repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
BlueBrain/NeuroM
neurom/io/swc.py
read
def read(filename, data_wrapper=DataWrapper): '''Read an SWC file and return a tuple of data, format.''' data = np.loadtxt(filename) if len(np.shape(data)) == 1: data = np.reshape(data, (1, -1)) data = data[:, [X, Y, Z, R, TYPE, ID, P]] return data_wrapper(data, 'SWC', None)
python
def read(filename, data_wrapper=DataWrapper): '''Read an SWC file and return a tuple of data, format.''' data = np.loadtxt(filename) if len(np.shape(data)) == 1: data = np.reshape(data, (1, -1)) data = data[:, [X, Y, Z, R, TYPE, ID, P]] return data_wrapper(data, 'SWC', None)
[ "def", "read", "(", "filename", ",", "data_wrapper", "=", "DataWrapper", ")", ":", "data", "=", "np", ".", "loadtxt", "(", "filename", ")", "if", "len", "(", "np", ".", "shape", "(", "data", ")", ")", "==", "1", ":", "data", "=", "np", ".", "reshape", "(", "data", ",", "(", "1", ",", "-", "1", ")", ")", "data", "=", "data", "[", ":", ",", "[", "X", ",", "Y", ",", "Z", ",", "R", ",", "TYPE", ",", "ID", ",", "P", "]", "]", "return", "data_wrapper", "(", "data", ",", "'SWC'", ",", "None", ")" ]
Read an SWC file and return a tuple of data, format.
[ "Read", "an", "SWC", "file", "and", "return", "a", "tuple", "of", "data", "format", "." ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/swc.py#L47-L53
train
235,600
BlueBrain/NeuroM
neurom/io/datawrapper.py
_merge_sections
def _merge_sections(sec_a, sec_b): '''Merge two sections Merges sec_a into sec_b and sets sec_a attributes to default ''' sec_b.ids = list(sec_a.ids) + list(sec_b.ids[1:]) sec_b.ntype = sec_a.ntype sec_b.pid = sec_a.pid sec_a.ids = [] sec_a.pid = -1 sec_a.ntype = 0
python
def _merge_sections(sec_a, sec_b): '''Merge two sections Merges sec_a into sec_b and sets sec_a attributes to default ''' sec_b.ids = list(sec_a.ids) + list(sec_b.ids[1:]) sec_b.ntype = sec_a.ntype sec_b.pid = sec_a.pid sec_a.ids = [] sec_a.pid = -1 sec_a.ntype = 0
[ "def", "_merge_sections", "(", "sec_a", ",", "sec_b", ")", ":", "sec_b", ".", "ids", "=", "list", "(", "sec_a", ".", "ids", ")", "+", "list", "(", "sec_b", ".", "ids", "[", "1", ":", "]", ")", "sec_b", ".", "ntype", "=", "sec_a", ".", "ntype", "sec_b", ".", "pid", "=", "sec_a", ".", "pid", "sec_a", ".", "ids", "=", "[", "]", "sec_a", ".", "pid", "=", "-", "1", "sec_a", ".", "ntype", "=", "0" ]
Merge two sections Merges sec_a into sec_b and sets sec_a attributes to default
[ "Merge", "two", "sections" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L89-L100
train
235,601
BlueBrain/NeuroM
neurom/io/datawrapper.py
_section_end_points
def _section_end_points(structure_block, id_map): '''Get the section end-points''' soma_idx = structure_block[:, TYPE] == POINT_TYPE.SOMA soma_ids = structure_block[soma_idx, ID] neurite_idx = structure_block[:, TYPE] != POINT_TYPE.SOMA neurite_rows = structure_block[neurite_idx, :] soma_end_pts = set(id_map[id_] for id_ in soma_ids[np.in1d(soma_ids, neurite_rows[:, PID])]) # end points have either no children or more than one # ie: leaf or multifurcation nodes n_children = defaultdict(int) for row in structure_block: n_children[row[PID]] += 1 end_pts = set(i for i, row in enumerate(structure_block) if n_children[row[ID]] != 1) return end_pts.union(soma_end_pts)
python
def _section_end_points(structure_block, id_map): '''Get the section end-points''' soma_idx = structure_block[:, TYPE] == POINT_TYPE.SOMA soma_ids = structure_block[soma_idx, ID] neurite_idx = structure_block[:, TYPE] != POINT_TYPE.SOMA neurite_rows = structure_block[neurite_idx, :] soma_end_pts = set(id_map[id_] for id_ in soma_ids[np.in1d(soma_ids, neurite_rows[:, PID])]) # end points have either no children or more than one # ie: leaf or multifurcation nodes n_children = defaultdict(int) for row in structure_block: n_children[row[PID]] += 1 end_pts = set(i for i, row in enumerate(structure_block) if n_children[row[ID]] != 1) return end_pts.union(soma_end_pts)
[ "def", "_section_end_points", "(", "structure_block", ",", "id_map", ")", ":", "soma_idx", "=", "structure_block", "[", ":", ",", "TYPE", "]", "==", "POINT_TYPE", ".", "SOMA", "soma_ids", "=", "structure_block", "[", "soma_idx", ",", "ID", "]", "neurite_idx", "=", "structure_block", "[", ":", ",", "TYPE", "]", "!=", "POINT_TYPE", ".", "SOMA", "neurite_rows", "=", "structure_block", "[", "neurite_idx", ",", ":", "]", "soma_end_pts", "=", "set", "(", "id_map", "[", "id_", "]", "for", "id_", "in", "soma_ids", "[", "np", ".", "in1d", "(", "soma_ids", ",", "neurite_rows", "[", ":", ",", "PID", "]", ")", "]", ")", "# end points have either no children or more than one", "# ie: leaf or multifurcation nodes", "n_children", "=", "defaultdict", "(", "int", ")", "for", "row", "in", "structure_block", ":", "n_children", "[", "row", "[", "PID", "]", "]", "+=", "1", "end_pts", "=", "set", "(", "i", "for", "i", ",", "row", "in", "enumerate", "(", "structure_block", ")", "if", "n_children", "[", "row", "[", "ID", "]", "]", "!=", "1", ")", "return", "end_pts", ".", "union", "(", "soma_end_pts", ")" ]
Get the section end-points
[ "Get", "the", "section", "end", "-", "points" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L103-L120
train
235,602
BlueBrain/NeuroM
neurom/io/datawrapper.py
_extract_sections
def _extract_sections(data_block): '''Make a list of sections from an SWC-style data wrapper block''' structure_block = data_block[:, COLS.TYPE:COLS.COL_COUNT].astype(np.int) # SWC ID -> structure_block position id_map = {-1: -1} for i, row in enumerate(structure_block): id_map[row[ID]] = i # end points have either no children, more than one, or are the start # of a new gap sec_end_pts = _section_end_points(structure_block, id_map) # a 'gap' is when a section has part of it's segments interleaved # with those of another section gap_sections = set() sections = [] def new_section(): '''new_section''' sections.append(DataBlockSection()) return sections[-1] curr_section = new_section() parent_section = {-1: -1} for row in structure_block: row_id = id_map[row[ID]] parent_id = id_map[row[PID]] if not curr_section.ids: # first in section point is parent curr_section.ids.append(parent_id) curr_section.ntype = row[TYPE] gap = parent_id != curr_section.ids[-1] # If parent is not the previous point, create a section end-point. # Else add the point to this section if gap: sec_end_pts.add(row_id) else: curr_section.ids.append(row_id) if row_id in sec_end_pts: parent_section[curr_section.ids[-1]] = len(sections) - 1 # Parent-child discontinuity section if gap: curr_section = new_section() curr_section.ids.extend((parent_id, row_id)) curr_section.ntype = row[TYPE] gap_sections.add(len(sections) - 2) elif row_id != len(data_block) - 1: # avoid creating an extra DataBlockSection for last row if it's a leaf curr_section = new_section() for sec in sections: # get the section parent ID from the id of the first point. if sec.ids: sec.pid = parent_section[sec.ids[0]] # join gap sections and "disable" first half if sec.pid in gap_sections: _merge_sections(sections[sec.pid], sec) # TODO find a way to remove empty sections. Currently they are # required to maintain tree integrity. return sections
python
def _extract_sections(data_block): '''Make a list of sections from an SWC-style data wrapper block''' structure_block = data_block[:, COLS.TYPE:COLS.COL_COUNT].astype(np.int) # SWC ID -> structure_block position id_map = {-1: -1} for i, row in enumerate(structure_block): id_map[row[ID]] = i # end points have either no children, more than one, or are the start # of a new gap sec_end_pts = _section_end_points(structure_block, id_map) # a 'gap' is when a section has part of it's segments interleaved # with those of another section gap_sections = set() sections = [] def new_section(): '''new_section''' sections.append(DataBlockSection()) return sections[-1] curr_section = new_section() parent_section = {-1: -1} for row in structure_block: row_id = id_map[row[ID]] parent_id = id_map[row[PID]] if not curr_section.ids: # first in section point is parent curr_section.ids.append(parent_id) curr_section.ntype = row[TYPE] gap = parent_id != curr_section.ids[-1] # If parent is not the previous point, create a section end-point. # Else add the point to this section if gap: sec_end_pts.add(row_id) else: curr_section.ids.append(row_id) if row_id in sec_end_pts: parent_section[curr_section.ids[-1]] = len(sections) - 1 # Parent-child discontinuity section if gap: curr_section = new_section() curr_section.ids.extend((parent_id, row_id)) curr_section.ntype = row[TYPE] gap_sections.add(len(sections) - 2) elif row_id != len(data_block) - 1: # avoid creating an extra DataBlockSection for last row if it's a leaf curr_section = new_section() for sec in sections: # get the section parent ID from the id of the first point. if sec.ids: sec.pid = parent_section[sec.ids[0]] # join gap sections and "disable" first half if sec.pid in gap_sections: _merge_sections(sections[sec.pid], sec) # TODO find a way to remove empty sections. Currently they are # required to maintain tree integrity. return sections
[ "def", "_extract_sections", "(", "data_block", ")", ":", "structure_block", "=", "data_block", "[", ":", ",", "COLS", ".", "TYPE", ":", "COLS", ".", "COL_COUNT", "]", ".", "astype", "(", "np", ".", "int", ")", "# SWC ID -> structure_block position", "id_map", "=", "{", "-", "1", ":", "-", "1", "}", "for", "i", ",", "row", "in", "enumerate", "(", "structure_block", ")", ":", "id_map", "[", "row", "[", "ID", "]", "]", "=", "i", "# end points have either no children, more than one, or are the start", "# of a new gap", "sec_end_pts", "=", "_section_end_points", "(", "structure_block", ",", "id_map", ")", "# a 'gap' is when a section has part of it's segments interleaved", "# with those of another section", "gap_sections", "=", "set", "(", ")", "sections", "=", "[", "]", "def", "new_section", "(", ")", ":", "'''new_section'''", "sections", ".", "append", "(", "DataBlockSection", "(", ")", ")", "return", "sections", "[", "-", "1", "]", "curr_section", "=", "new_section", "(", ")", "parent_section", "=", "{", "-", "1", ":", "-", "1", "}", "for", "row", "in", "structure_block", ":", "row_id", "=", "id_map", "[", "row", "[", "ID", "]", "]", "parent_id", "=", "id_map", "[", "row", "[", "PID", "]", "]", "if", "not", "curr_section", ".", "ids", ":", "# first in section point is parent", "curr_section", ".", "ids", ".", "append", "(", "parent_id", ")", "curr_section", ".", "ntype", "=", "row", "[", "TYPE", "]", "gap", "=", "parent_id", "!=", "curr_section", ".", "ids", "[", "-", "1", "]", "# If parent is not the previous point, create a section end-point.", "# Else add the point to this section", "if", "gap", ":", "sec_end_pts", ".", "add", "(", "row_id", ")", "else", ":", "curr_section", ".", "ids", ".", "append", "(", "row_id", ")", "if", "row_id", "in", "sec_end_pts", ":", "parent_section", "[", "curr_section", ".", "ids", "[", "-", "1", "]", "]", "=", "len", "(", "sections", ")", "-", "1", "# Parent-child discontinuity section", "if", "gap", ":", "curr_section", "=", "new_section", "(", ")", "curr_section", ".", "ids", ".", "extend", "(", "(", "parent_id", ",", "row_id", ")", ")", "curr_section", ".", "ntype", "=", "row", "[", "TYPE", "]", "gap_sections", ".", "add", "(", "len", "(", "sections", ")", "-", "2", ")", "elif", "row_id", "!=", "len", "(", "data_block", ")", "-", "1", ":", "# avoid creating an extra DataBlockSection for last row if it's a leaf", "curr_section", "=", "new_section", "(", ")", "for", "sec", "in", "sections", ":", "# get the section parent ID from the id of the first point.", "if", "sec", ".", "ids", ":", "sec", ".", "pid", "=", "parent_section", "[", "sec", ".", "ids", "[", "0", "]", "]", "# join gap sections and \"disable\" first half", "if", "sec", ".", "pid", "in", "gap_sections", ":", "_merge_sections", "(", "sections", "[", "sec", ".", "pid", "]", ",", "sec", ")", "# TODO find a way to remove empty sections. Currently they are", "# required to maintain tree integrity.", "return", "sections" ]
Make a list of sections from an SWC-style data wrapper block
[ "Make", "a", "list", "of", "sections", "from", "an", "SWC", "-", "style", "data", "wrapper", "block" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L142-L210
train
235,603
BlueBrain/NeuroM
neurom/io/datawrapper.py
DataWrapper.neurite_root_section_ids
def neurite_root_section_ids(self): '''Get the section IDs of the intitial neurite sections''' sec = self.sections return [i for i, ss in enumerate(sec) if ss.pid > -1 and (sec[ss.pid].ntype == POINT_TYPE.SOMA and ss.ntype != POINT_TYPE.SOMA)]
python
def neurite_root_section_ids(self): '''Get the section IDs of the intitial neurite sections''' sec = self.sections return [i for i, ss in enumerate(sec) if ss.pid > -1 and (sec[ss.pid].ntype == POINT_TYPE.SOMA and ss.ntype != POINT_TYPE.SOMA)]
[ "def", "neurite_root_section_ids", "(", "self", ")", ":", "sec", "=", "self", ".", "sections", "return", "[", "i", "for", "i", ",", "ss", "in", "enumerate", "(", "sec", ")", "if", "ss", ".", "pid", ">", "-", "1", "and", "(", "sec", "[", "ss", ".", "pid", "]", ".", "ntype", "==", "POINT_TYPE", ".", "SOMA", "and", "ss", ".", "ntype", "!=", "POINT_TYPE", ".", "SOMA", ")", "]" ]
Get the section IDs of the intitial neurite sections
[ "Get", "the", "section", "IDs", "of", "the", "intitial", "neurite", "sections" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L76-L81
train
235,604
BlueBrain/NeuroM
neurom/io/datawrapper.py
DataWrapper.soma_points
def soma_points(self): '''Get the soma points''' db = self.data_block return db[db[:, COLS.TYPE] == POINT_TYPE.SOMA]
python
def soma_points(self): '''Get the soma points''' db = self.data_block return db[db[:, COLS.TYPE] == POINT_TYPE.SOMA]
[ "def", "soma_points", "(", "self", ")", ":", "db", "=", "self", ".", "data_block", "return", "db", "[", "db", "[", ":", ",", "COLS", ".", "TYPE", "]", "==", "POINT_TYPE", ".", "SOMA", "]" ]
Get the soma points
[ "Get", "the", "soma", "points" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L83-L86
train
235,605
BlueBrain/NeuroM
neurom/io/datawrapper.py
BlockNeuronBuilder.add_section
def add_section(self, id_, parent_id, section_type, points): '''add a section Args: id_(int): identifying number of the section parent_id(int): identifying number of the parent of this section section_type(int): the section type as defined by POINT_TYPE points is an array of [X, Y, Z, R] ''' # L.debug('Adding section %d, with parent %d, of type: %d with count: %d', # id_, parent_id, section_type, len(points)) assert id_ not in self.sections, 'id %s already exists in sections' % id_ self.sections[id_] = BlockNeuronBuilder.BlockSection(parent_id, section_type, points)
python
def add_section(self, id_, parent_id, section_type, points): '''add a section Args: id_(int): identifying number of the section parent_id(int): identifying number of the parent of this section section_type(int): the section type as defined by POINT_TYPE points is an array of [X, Y, Z, R] ''' # L.debug('Adding section %d, with parent %d, of type: %d with count: %d', # id_, parent_id, section_type, len(points)) assert id_ not in self.sections, 'id %s already exists in sections' % id_ self.sections[id_] = BlockNeuronBuilder.BlockSection(parent_id, section_type, points)
[ "def", "add_section", "(", "self", ",", "id_", ",", "parent_id", ",", "section_type", ",", "points", ")", ":", "# L.debug('Adding section %d, with parent %d, of type: %d with count: %d',", "# id_, parent_id, section_type, len(points))", "assert", "id_", "not", "in", "self", ".", "sections", ",", "'id %s already exists in sections'", "%", "id_", "self", ".", "sections", "[", "id_", "]", "=", "BlockNeuronBuilder", ".", "BlockSection", "(", "parent_id", ",", "section_type", ",", "points", ")" ]
add a section Args: id_(int): identifying number of the section parent_id(int): identifying number of the parent of this section section_type(int): the section type as defined by POINT_TYPE points is an array of [X, Y, Z, R]
[ "add", "a", "section" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L234-L246
train
235,606
BlueBrain/NeuroM
neurom/io/datawrapper.py
BlockNeuronBuilder._make_datablock
def _make_datablock(self): '''Make a data_block and sections list as required by DataWrapper''' section_ids = sorted(self.sections) # create all insertion id's, this needs to be done ahead of time # as some of the children may have a lower id than their parents id_to_insert_id = {} row_count = 0 for section_id in section_ids: row_count += len(self.sections[section_id].points) id_to_insert_id[section_id] = row_count - 1 datablock = np.empty((row_count, COLS.COL_COUNT), dtype=np.float) datablock[:, COLS.ID] = np.arange(len(datablock)) datablock[:, COLS.P] = datablock[:, COLS.ID] - 1 sections = [] insert_index = 0 for id_ in section_ids: sec = self.sections[id_] points, section_type, parent_id = sec.points, sec.section_type, sec.parent_id idx = slice(insert_index, insert_index + len(points)) datablock[idx, COLS.XYZR] = points datablock[idx, COLS.TYPE] = section_type datablock[idx.start, COLS.P] = id_to_insert_id.get(parent_id, ROOT_ID) sections.append(DataBlockSection(idx, section_type, parent_id)) insert_index = idx.stop return datablock, sections
python
def _make_datablock(self): '''Make a data_block and sections list as required by DataWrapper''' section_ids = sorted(self.sections) # create all insertion id's, this needs to be done ahead of time # as some of the children may have a lower id than their parents id_to_insert_id = {} row_count = 0 for section_id in section_ids: row_count += len(self.sections[section_id].points) id_to_insert_id[section_id] = row_count - 1 datablock = np.empty((row_count, COLS.COL_COUNT), dtype=np.float) datablock[:, COLS.ID] = np.arange(len(datablock)) datablock[:, COLS.P] = datablock[:, COLS.ID] - 1 sections = [] insert_index = 0 for id_ in section_ids: sec = self.sections[id_] points, section_type, parent_id = sec.points, sec.section_type, sec.parent_id idx = slice(insert_index, insert_index + len(points)) datablock[idx, COLS.XYZR] = points datablock[idx, COLS.TYPE] = section_type datablock[idx.start, COLS.P] = id_to_insert_id.get(parent_id, ROOT_ID) sections.append(DataBlockSection(idx, section_type, parent_id)) insert_index = idx.stop return datablock, sections
[ "def", "_make_datablock", "(", "self", ")", ":", "section_ids", "=", "sorted", "(", "self", ".", "sections", ")", "# create all insertion id's, this needs to be done ahead of time", "# as some of the children may have a lower id than their parents", "id_to_insert_id", "=", "{", "}", "row_count", "=", "0", "for", "section_id", "in", "section_ids", ":", "row_count", "+=", "len", "(", "self", ".", "sections", "[", "section_id", "]", ".", "points", ")", "id_to_insert_id", "[", "section_id", "]", "=", "row_count", "-", "1", "datablock", "=", "np", ".", "empty", "(", "(", "row_count", ",", "COLS", ".", "COL_COUNT", ")", ",", "dtype", "=", "np", ".", "float", ")", "datablock", "[", ":", ",", "COLS", ".", "ID", "]", "=", "np", ".", "arange", "(", "len", "(", "datablock", ")", ")", "datablock", "[", ":", ",", "COLS", ".", "P", "]", "=", "datablock", "[", ":", ",", "COLS", ".", "ID", "]", "-", "1", "sections", "=", "[", "]", "insert_index", "=", "0", "for", "id_", "in", "section_ids", ":", "sec", "=", "self", ".", "sections", "[", "id_", "]", "points", ",", "section_type", ",", "parent_id", "=", "sec", ".", "points", ",", "sec", ".", "section_type", ",", "sec", ".", "parent_id", "idx", "=", "slice", "(", "insert_index", ",", "insert_index", "+", "len", "(", "points", ")", ")", "datablock", "[", "idx", ",", "COLS", ".", "XYZR", "]", "=", "points", "datablock", "[", "idx", ",", "COLS", ".", "TYPE", "]", "=", "section_type", "datablock", "[", "idx", ".", "start", ",", "COLS", ".", "P", "]", "=", "id_to_insert_id", ".", "get", "(", "parent_id", ",", "ROOT_ID", ")", "sections", ".", "append", "(", "DataBlockSection", "(", "idx", ",", "section_type", ",", "parent_id", ")", ")", "insert_index", "=", "idx", ".", "stop", "return", "datablock", ",", "sections" ]
Make a data_block and sections list as required by DataWrapper
[ "Make", "a", "data_block", "and", "sections", "list", "as", "required", "by", "DataWrapper" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L248-L277
train
235,607
BlueBrain/NeuroM
neurom/io/datawrapper.py
BlockNeuronBuilder._check_consistency
def _check_consistency(self): '''see if the sections have obvious errors''' type_count = defaultdict(int) for _, section in sorted(self.sections.items()): type_count[section.section_type] += 1 if type_count[POINT_TYPE.SOMA] != 1: L.info('Have %d somas, expected 1', type_count[POINT_TYPE.SOMA])
python
def _check_consistency(self): '''see if the sections have obvious errors''' type_count = defaultdict(int) for _, section in sorted(self.sections.items()): type_count[section.section_type] += 1 if type_count[POINT_TYPE.SOMA] != 1: L.info('Have %d somas, expected 1', type_count[POINT_TYPE.SOMA])
[ "def", "_check_consistency", "(", "self", ")", ":", "type_count", "=", "defaultdict", "(", "int", ")", "for", "_", ",", "section", "in", "sorted", "(", "self", ".", "sections", ".", "items", "(", ")", ")", ":", "type_count", "[", "section", ".", "section_type", "]", "+=", "1", "if", "type_count", "[", "POINT_TYPE", ".", "SOMA", "]", "!=", "1", ":", "L", ".", "info", "(", "'Have %d somas, expected 1'", ",", "type_count", "[", "POINT_TYPE", ".", "SOMA", "]", ")" ]
see if the sections have obvious errors
[ "see", "if", "the", "sections", "have", "obvious", "errors" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L279-L286
train
235,608
BlueBrain/NeuroM
neurom/io/datawrapper.py
BlockNeuronBuilder.get_datawrapper
def get_datawrapper(self, file_format='BlockNeuronBuilder', data_wrapper=DataWrapper): '''returns a DataWrapper''' self._check_consistency() datablock, sections = self._make_datablock() return data_wrapper(datablock, file_format, sections)
python
def get_datawrapper(self, file_format='BlockNeuronBuilder', data_wrapper=DataWrapper): '''returns a DataWrapper''' self._check_consistency() datablock, sections = self._make_datablock() return data_wrapper(datablock, file_format, sections)
[ "def", "get_datawrapper", "(", "self", ",", "file_format", "=", "'BlockNeuronBuilder'", ",", "data_wrapper", "=", "DataWrapper", ")", ":", "self", ".", "_check_consistency", "(", ")", "datablock", ",", "sections", "=", "self", ".", "_make_datablock", "(", ")", "return", "data_wrapper", "(", "datablock", ",", "file_format", ",", "sections", ")" ]
returns a DataWrapper
[ "returns", "a", "DataWrapper" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L288-L292
train
235,609
BlueBrain/NeuroM
neurom/io/utils.py
_is_morphology_file
def _is_morphology_file(filepath): """ Check if `filepath` is a file with one of morphology file extensions. """ return ( os.path.isfile(filepath) and os.path.splitext(filepath)[1].lower() in ('.swc', '.h5', '.asc') )
python
def _is_morphology_file(filepath): """ Check if `filepath` is a file with one of morphology file extensions. """ return ( os.path.isfile(filepath) and os.path.splitext(filepath)[1].lower() in ('.swc', '.h5', '.asc') )
[ "def", "_is_morphology_file", "(", "filepath", ")", ":", "return", "(", "os", ".", "path", ".", "isfile", "(", "filepath", ")", "and", "os", ".", "path", ".", "splitext", "(", "filepath", ")", "[", "1", "]", ".", "lower", "(", ")", "in", "(", "'.swc'", ",", "'.h5'", ",", "'.asc'", ")", ")" ]
Check if `filepath` is a file with one of morphology file extensions.
[ "Check", "if", "filepath", "is", "a", "file", "with", "one", "of", "morphology", "file", "extensions", "." ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L50-L55
train
235,610
BlueBrain/NeuroM
neurom/io/utils.py
get_morph_files
def get_morph_files(directory): '''Get a list of all morphology files in a directory Returns: list with all files with extensions '.swc' , 'h5' or '.asc' (case insensitive) ''' lsdir = (os.path.join(directory, m) for m in os.listdir(directory)) return list(filter(_is_morphology_file, lsdir))
python
def get_morph_files(directory): '''Get a list of all morphology files in a directory Returns: list with all files with extensions '.swc' , 'h5' or '.asc' (case insensitive) ''' lsdir = (os.path.join(directory, m) for m in os.listdir(directory)) return list(filter(_is_morphology_file, lsdir))
[ "def", "get_morph_files", "(", "directory", ")", ":", "lsdir", "=", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "m", ")", "for", "m", "in", "os", ".", "listdir", "(", "directory", ")", ")", "return", "list", "(", "filter", "(", "_is_morphology_file", ",", "lsdir", ")", ")" ]
Get a list of all morphology files in a directory Returns: list with all files with extensions '.swc' , 'h5' or '.asc' (case insensitive)
[ "Get", "a", "list", "of", "all", "morphology", "files", "in", "a", "directory" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L92-L99
train
235,611
BlueBrain/NeuroM
neurom/io/utils.py
get_files_by_path
def get_files_by_path(path): '''Get a file or set of files from a file path Return list of files with path ''' if os.path.isfile(path): return [path] if os.path.isdir(path): return get_morph_files(path) raise IOError('Invalid data path %s' % path)
python
def get_files_by_path(path): '''Get a file or set of files from a file path Return list of files with path ''' if os.path.isfile(path): return [path] if os.path.isdir(path): return get_morph_files(path) raise IOError('Invalid data path %s' % path)
[ "def", "get_files_by_path", "(", "path", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "return", "[", "path", "]", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "return", "get_morph_files", "(", "path", ")", "raise", "IOError", "(", "'Invalid data path %s'", "%", "path", ")" ]
Get a file or set of files from a file path Return list of files with path
[ "Get", "a", "file", "or", "set", "of", "files", "from", "a", "file", "path" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L102-L112
train
235,612
BlueBrain/NeuroM
neurom/io/utils.py
load_neuron
def load_neuron(handle, reader=None): '''Build section trees from an h5 or swc file''' rdw = load_data(handle, reader) if isinstance(handle, StringType): name = os.path.splitext(os.path.basename(handle))[0] else: name = None return FstNeuron(rdw, name)
python
def load_neuron(handle, reader=None): '''Build section trees from an h5 or swc file''' rdw = load_data(handle, reader) if isinstance(handle, StringType): name = os.path.splitext(os.path.basename(handle))[0] else: name = None return FstNeuron(rdw, name)
[ "def", "load_neuron", "(", "handle", ",", "reader", "=", "None", ")", ":", "rdw", "=", "load_data", "(", "handle", ",", "reader", ")", "if", "isinstance", "(", "handle", ",", "StringType", ")", ":", "name", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "handle", ")", ")", "[", "0", "]", "else", ":", "name", "=", "None", "return", "FstNeuron", "(", "rdw", ",", "name", ")" ]
Build section trees from an h5 or swc file
[ "Build", "section", "trees", "from", "an", "h5", "or", "swc", "file" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L115-L122
train
235,613
BlueBrain/NeuroM
neurom/io/utils.py
load_neurons
def load_neurons(neurons, neuron_loader=load_neuron, name=None, population_class=Population, ignored_exceptions=()): '''Create a population object from all morphologies in a directory\ of from morphologies in a list of file names Parameters: neurons: directory path or list of neuron file paths neuron_loader: function taking a filename and returning a neuron population_class: class representing populations name (str): optional name of population. By default 'Population' or\ filepath basename depending on whether neurons is list or\ directory path respectively. Returns: neuron population object ''' if isinstance(neurons, (list, tuple)): files = neurons name = name if name is not None else 'Population' elif isinstance(neurons, StringType): files = get_files_by_path(neurons) name = name if name is not None else os.path.basename(neurons) ignored_exceptions = tuple(ignored_exceptions) pop = [] for f in files: try: pop.append(neuron_loader(f)) except NeuroMError as e: if isinstance(e, ignored_exceptions): L.info('Ignoring exception "%s" for file %s', e, os.path.basename(f)) continue raise return population_class(pop, name=name)
python
def load_neurons(neurons, neuron_loader=load_neuron, name=None, population_class=Population, ignored_exceptions=()): '''Create a population object from all morphologies in a directory\ of from morphologies in a list of file names Parameters: neurons: directory path or list of neuron file paths neuron_loader: function taking a filename and returning a neuron population_class: class representing populations name (str): optional name of population. By default 'Population' or\ filepath basename depending on whether neurons is list or\ directory path respectively. Returns: neuron population object ''' if isinstance(neurons, (list, tuple)): files = neurons name = name if name is not None else 'Population' elif isinstance(neurons, StringType): files = get_files_by_path(neurons) name = name if name is not None else os.path.basename(neurons) ignored_exceptions = tuple(ignored_exceptions) pop = [] for f in files: try: pop.append(neuron_loader(f)) except NeuroMError as e: if isinstance(e, ignored_exceptions): L.info('Ignoring exception "%s" for file %s', e, os.path.basename(f)) continue raise return population_class(pop, name=name)
[ "def", "load_neurons", "(", "neurons", ",", "neuron_loader", "=", "load_neuron", ",", "name", "=", "None", ",", "population_class", "=", "Population", ",", "ignored_exceptions", "=", "(", ")", ")", ":", "if", "isinstance", "(", "neurons", ",", "(", "list", ",", "tuple", ")", ")", ":", "files", "=", "neurons", "name", "=", "name", "if", "name", "is", "not", "None", "else", "'Population'", "elif", "isinstance", "(", "neurons", ",", "StringType", ")", ":", "files", "=", "get_files_by_path", "(", "neurons", ")", "name", "=", "name", "if", "name", "is", "not", "None", "else", "os", ".", "path", ".", "basename", "(", "neurons", ")", "ignored_exceptions", "=", "tuple", "(", "ignored_exceptions", ")", "pop", "=", "[", "]", "for", "f", "in", "files", ":", "try", ":", "pop", ".", "append", "(", "neuron_loader", "(", "f", ")", ")", "except", "NeuroMError", "as", "e", ":", "if", "isinstance", "(", "e", ",", "ignored_exceptions", ")", ":", "L", ".", "info", "(", "'Ignoring exception \"%s\" for file %s'", ",", "e", ",", "os", ".", "path", ".", "basename", "(", "f", ")", ")", "continue", "raise", "return", "population_class", "(", "pop", ",", "name", "=", "name", ")" ]
Create a population object from all morphologies in a directory\ of from morphologies in a list of file names Parameters: neurons: directory path or list of neuron file paths neuron_loader: function taking a filename and returning a neuron population_class: class representing populations name (str): optional name of population. By default 'Population' or\ filepath basename depending on whether neurons is list or\ directory path respectively. Returns: neuron population object
[ "Create", "a", "population", "object", "from", "all", "morphologies", "in", "a", "directory", "\\", "of", "from", "morphologies", "in", "a", "list", "of", "file", "names" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L125-L164
train
235,614
BlueBrain/NeuroM
neurom/io/utils.py
_get_file
def _get_file(handle): '''Returns the filename of the file to read If handle is a stream, a temp file is written on disk first and its filename is returned''' if not isinstance(handle, IOBase): return handle fd, temp_file = tempfile.mkstemp(str(uuid.uuid4()), prefix='neurom-') os.close(fd) with open(temp_file, 'w') as fd: handle.seek(0) shutil.copyfileobj(handle, fd) return temp_file
python
def _get_file(handle): '''Returns the filename of the file to read If handle is a stream, a temp file is written on disk first and its filename is returned''' if not isinstance(handle, IOBase): return handle fd, temp_file = tempfile.mkstemp(str(uuid.uuid4()), prefix='neurom-') os.close(fd) with open(temp_file, 'w') as fd: handle.seek(0) shutil.copyfileobj(handle, fd) return temp_file
[ "def", "_get_file", "(", "handle", ")", ":", "if", "not", "isinstance", "(", "handle", ",", "IOBase", ")", ":", "return", "handle", "fd", ",", "temp_file", "=", "tempfile", ".", "mkstemp", "(", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ",", "prefix", "=", "'neurom-'", ")", "os", ".", "close", "(", "fd", ")", "with", "open", "(", "temp_file", ",", "'w'", ")", "as", "fd", ":", "handle", ".", "seek", "(", "0", ")", "shutil", ".", "copyfileobj", "(", "handle", ",", "fd", ")", "return", "temp_file" ]
Returns the filename of the file to read If handle is a stream, a temp file is written on disk first and its filename is returned
[ "Returns", "the", "filename", "of", "the", "file", "to", "read" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L167-L180
train
235,615
BlueBrain/NeuroM
neurom/io/utils.py
load_data
def load_data(handle, reader=None): '''Unpack data into a raw data wrapper''' if not reader: reader = os.path.splitext(handle)[1][1:].lower() if reader not in _READERS: raise NeuroMError('Do not have a loader for "%s" extension' % reader) filename = _get_file(handle) try: return _READERS[reader](filename) except Exception as e: L.exception('Error reading file %s, using "%s" loader', filename, reader) raise RawDataError('Error reading file %s:\n%s' % (filename, str(e)))
python
def load_data(handle, reader=None): '''Unpack data into a raw data wrapper''' if not reader: reader = os.path.splitext(handle)[1][1:].lower() if reader not in _READERS: raise NeuroMError('Do not have a loader for "%s" extension' % reader) filename = _get_file(handle) try: return _READERS[reader](filename) except Exception as e: L.exception('Error reading file %s, using "%s" loader', filename, reader) raise RawDataError('Error reading file %s:\n%s' % (filename, str(e)))
[ "def", "load_data", "(", "handle", ",", "reader", "=", "None", ")", ":", "if", "not", "reader", ":", "reader", "=", "os", ".", "path", ".", "splitext", "(", "handle", ")", "[", "1", "]", "[", "1", ":", "]", ".", "lower", "(", ")", "if", "reader", "not", "in", "_READERS", ":", "raise", "NeuroMError", "(", "'Do not have a loader for \"%s\" extension'", "%", "reader", ")", "filename", "=", "_get_file", "(", "handle", ")", "try", ":", "return", "_READERS", "[", "reader", "]", "(", "filename", ")", "except", "Exception", "as", "e", ":", "L", ".", "exception", "(", "'Error reading file %s, using \"%s\" loader'", ",", "filename", ",", "reader", ")", "raise", "RawDataError", "(", "'Error reading file %s:\\n%s'", "%", "(", "filename", ",", "str", "(", "e", ")", ")", ")" ]
Unpack data into a raw data wrapper
[ "Unpack", "data", "into", "a", "raw", "data", "wrapper" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L183-L196
train
235,616
BlueBrain/NeuroM
neurom/io/utils.py
_load_h5
def _load_h5(filename): '''Delay loading of h5py until it is needed''' from neurom.io import hdf5 return hdf5.read(filename, remove_duplicates=False, data_wrapper=DataWrapper)
python
def _load_h5(filename): '''Delay loading of h5py until it is needed''' from neurom.io import hdf5 return hdf5.read(filename, remove_duplicates=False, data_wrapper=DataWrapper)
[ "def", "_load_h5", "(", "filename", ")", ":", "from", "neurom", ".", "io", "import", "hdf5", "return", "hdf5", ".", "read", "(", "filename", ",", "remove_duplicates", "=", "False", ",", "data_wrapper", "=", "DataWrapper", ")" ]
Delay loading of h5py until it is needed
[ "Delay", "loading", "of", "h5py", "until", "it", "is", "needed" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L199-L204
train
235,617
BlueBrain/NeuroM
neurom/io/utils.py
NeuronLoader._filepath
def _filepath(self, name): """ File path to `name` morphology file. """ if self.file_ext is None: candidates = glob.glob(os.path.join(self.directory, name + ".*")) try: return next(filter(_is_morphology_file, candidates)) except StopIteration: raise NeuroMError("Can not find morphology file for '%s' " % name) else: return os.path.join(self.directory, name + self.file_ext)
python
def _filepath(self, name): """ File path to `name` morphology file. """ if self.file_ext is None: candidates = glob.glob(os.path.join(self.directory, name + ".*")) try: return next(filter(_is_morphology_file, candidates)) except StopIteration: raise NeuroMError("Can not find morphology file for '%s' " % name) else: return os.path.join(self.directory, name + self.file_ext)
[ "def", "_filepath", "(", "self", ",", "name", ")", ":", "if", "self", ".", "file_ext", "is", "None", ":", "candidates", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "directory", ",", "name", "+", "\".*\"", ")", ")", "try", ":", "return", "next", "(", "filter", "(", "_is_morphology_file", ",", "candidates", ")", ")", "except", "StopIteration", ":", "raise", "NeuroMError", "(", "\"Can not find morphology file for '%s' \"", "%", "name", ")", "else", ":", "return", "os", ".", "path", ".", "join", "(", "self", ".", "directory", ",", "name", "+", "self", ".", "file_ext", ")" ]
File path to `name` morphology file.
[ "File", "path", "to", "name", "morphology", "file", "." ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L75-L84
train
235,618
BlueBrain/NeuroM
neurom/viewer.py
draw
def draw(obj, mode='2d', **kwargs): '''Draw a morphology object Parameters: obj: morphology object to be drawn (neuron, tree, soma). mode (Optional[str]): drawing mode ('2d', '3d', 'dendrogram'). Defaults to '2d'. **kwargs: keyword arguments for underlying neurom.view.view functions. Raises: InvalidDrawModeError if mode is not valid NotDrawableError if obj is not drawable NotDrawableError if obj type and mode combination is not drawable Examples: >>> nrn = ... # load a neuron >>> fig, _ = viewer.draw(nrn) # 2d plot >>> fig.show() >>> fig3d, _ = viewer.draw(nrn, mode='3d') # 3d plot >>> fig3d.show() >>> fig, _ = viewer.draw(nrn.neurites[0]) # 2d plot of neurite tree >>> dend, _ = viewer.draw(nrn, mode='dendrogram') ''' if mode not in MODES: raise InvalidDrawModeError('Invalid drawing mode %s' % mode) if mode in ('2d', 'dendrogram'): fig, ax = common.get_figure() else: fig, ax = common.get_figure(params={'projection': '3d'}) if isinstance(obj, Neuron): tag = 'neuron' elif isinstance(obj, (Tree, Neurite)): tag = 'tree' elif isinstance(obj, Soma): tag = 'soma' else: raise NotDrawableError('draw not implemented for %s' % obj.__class__) viewer = '%s_%s' % (tag, mode) try: plotter = _VIEWERS[viewer] except KeyError: raise NotDrawableError('No drawer for class %s, mode=%s' % (obj.__class__, mode)) output_path = kwargs.pop('output_path', None) plotter(ax, obj, **kwargs) if mode != 'dendrogram': common.plot_style(fig=fig, ax=ax, **kwargs) if output_path: common.save_plot(fig=fig, output_path=output_path, **kwargs) return fig, ax
python
def draw(obj, mode='2d', **kwargs): '''Draw a morphology object Parameters: obj: morphology object to be drawn (neuron, tree, soma). mode (Optional[str]): drawing mode ('2d', '3d', 'dendrogram'). Defaults to '2d'. **kwargs: keyword arguments for underlying neurom.view.view functions. Raises: InvalidDrawModeError if mode is not valid NotDrawableError if obj is not drawable NotDrawableError if obj type and mode combination is not drawable Examples: >>> nrn = ... # load a neuron >>> fig, _ = viewer.draw(nrn) # 2d plot >>> fig.show() >>> fig3d, _ = viewer.draw(nrn, mode='3d') # 3d plot >>> fig3d.show() >>> fig, _ = viewer.draw(nrn.neurites[0]) # 2d plot of neurite tree >>> dend, _ = viewer.draw(nrn, mode='dendrogram') ''' if mode not in MODES: raise InvalidDrawModeError('Invalid drawing mode %s' % mode) if mode in ('2d', 'dendrogram'): fig, ax = common.get_figure() else: fig, ax = common.get_figure(params={'projection': '3d'}) if isinstance(obj, Neuron): tag = 'neuron' elif isinstance(obj, (Tree, Neurite)): tag = 'tree' elif isinstance(obj, Soma): tag = 'soma' else: raise NotDrawableError('draw not implemented for %s' % obj.__class__) viewer = '%s_%s' % (tag, mode) try: plotter = _VIEWERS[viewer] except KeyError: raise NotDrawableError('No drawer for class %s, mode=%s' % (obj.__class__, mode)) output_path = kwargs.pop('output_path', None) plotter(ax, obj, **kwargs) if mode != 'dendrogram': common.plot_style(fig=fig, ax=ax, **kwargs) if output_path: common.save_plot(fig=fig, output_path=output_path, **kwargs) return fig, ax
[ "def", "draw", "(", "obj", ",", "mode", "=", "'2d'", ",", "*", "*", "kwargs", ")", ":", "if", "mode", "not", "in", "MODES", ":", "raise", "InvalidDrawModeError", "(", "'Invalid drawing mode %s'", "%", "mode", ")", "if", "mode", "in", "(", "'2d'", ",", "'dendrogram'", ")", ":", "fig", ",", "ax", "=", "common", ".", "get_figure", "(", ")", "else", ":", "fig", ",", "ax", "=", "common", ".", "get_figure", "(", "params", "=", "{", "'projection'", ":", "'3d'", "}", ")", "if", "isinstance", "(", "obj", ",", "Neuron", ")", ":", "tag", "=", "'neuron'", "elif", "isinstance", "(", "obj", ",", "(", "Tree", ",", "Neurite", ")", ")", ":", "tag", "=", "'tree'", "elif", "isinstance", "(", "obj", ",", "Soma", ")", ":", "tag", "=", "'soma'", "else", ":", "raise", "NotDrawableError", "(", "'draw not implemented for %s'", "%", "obj", ".", "__class__", ")", "viewer", "=", "'%s_%s'", "%", "(", "tag", ",", "mode", ")", "try", ":", "plotter", "=", "_VIEWERS", "[", "viewer", "]", "except", "KeyError", ":", "raise", "NotDrawableError", "(", "'No drawer for class %s, mode=%s'", "%", "(", "obj", ".", "__class__", ",", "mode", ")", ")", "output_path", "=", "kwargs", ".", "pop", "(", "'output_path'", ",", "None", ")", "plotter", "(", "ax", ",", "obj", ",", "*", "*", "kwargs", ")", "if", "mode", "!=", "'dendrogram'", ":", "common", ".", "plot_style", "(", "fig", "=", "fig", ",", "ax", "=", "ax", ",", "*", "*", "kwargs", ")", "if", "output_path", ":", "common", ".", "save_plot", "(", "fig", "=", "fig", ",", "output_path", "=", "output_path", ",", "*", "*", "kwargs", ")", "return", "fig", ",", "ax" ]
Draw a morphology object Parameters: obj: morphology object to be drawn (neuron, tree, soma). mode (Optional[str]): drawing mode ('2d', '3d', 'dendrogram'). Defaults to '2d'. **kwargs: keyword arguments for underlying neurom.view.view functions. Raises: InvalidDrawModeError if mode is not valid NotDrawableError if obj is not drawable NotDrawableError if obj type and mode combination is not drawable Examples: >>> nrn = ... # load a neuron >>> fig, _ = viewer.draw(nrn) # 2d plot >>> fig.show() >>> fig3d, _ = viewer.draw(nrn, mode='3d') # 3d plot >>> fig3d.show() >>> fig, _ = viewer.draw(nrn.neurites[0]) # 2d plot of neurite tree >>> dend, _ = viewer.draw(nrn, mode='dendrogram')
[ "Draw", "a", "morphology", "object" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/viewer.py#L77-L134
train
235,619
BlueBrain/NeuroM
examples/histogram.py
population_feature_values
def population_feature_values(pops, feature): '''Extracts feature values per population ''' pops_feature_values = [] for pop in pops: feature_values = [getattr(neu, 'get_' + feature)() for neu in pop.neurons] # ugly hack to chain in case of list of lists if any([isinstance(p, (list, np.ndarray)) for p in feature_values]): feature_values = list(chain(*feature_values)) pops_feature_values.append(feature_values) return pops_feature_values
python
def population_feature_values(pops, feature): '''Extracts feature values per population ''' pops_feature_values = [] for pop in pops: feature_values = [getattr(neu, 'get_' + feature)() for neu in pop.neurons] # ugly hack to chain in case of list of lists if any([isinstance(p, (list, np.ndarray)) for p in feature_values]): feature_values = list(chain(*feature_values)) pops_feature_values.append(feature_values) return pops_feature_values
[ "def", "population_feature_values", "(", "pops", ",", "feature", ")", ":", "pops_feature_values", "=", "[", "]", "for", "pop", "in", "pops", ":", "feature_values", "=", "[", "getattr", "(", "neu", ",", "'get_'", "+", "feature", ")", "(", ")", "for", "neu", "in", "pop", ".", "neurons", "]", "# ugly hack to chain in case of list of lists", "if", "any", "(", "[", "isinstance", "(", "p", ",", "(", "list", ",", "np", ".", "ndarray", ")", ")", "for", "p", "in", "feature_values", "]", ")", ":", "feature_values", "=", "list", "(", "chain", "(", "*", "feature_values", ")", ")", "pops_feature_values", ".", "append", "(", "feature_values", ")", "return", "pops_feature_values" ]
Extracts feature values per population
[ "Extracts", "feature", "values", "per", "population" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/histogram.py#L96-L112
train
235,620
BlueBrain/NeuroM
examples/section_ids.py
get_segment
def get_segment(neuron, section_id, segment_id): '''Get a segment given a section and segment id Returns: array of two [x, y, z, r] points defining segment ''' sec = neuron.sections[section_id] return sec.points[segment_id:segment_id + 2][:, COLS.XYZR]
python
def get_segment(neuron, section_id, segment_id): '''Get a segment given a section and segment id Returns: array of two [x, y, z, r] points defining segment ''' sec = neuron.sections[section_id] return sec.points[segment_id:segment_id + 2][:, COLS.XYZR]
[ "def", "get_segment", "(", "neuron", ",", "section_id", ",", "segment_id", ")", ":", "sec", "=", "neuron", ".", "sections", "[", "section_id", "]", "return", "sec", ".", "points", "[", "segment_id", ":", "segment_id", "+", "2", "]", "[", ":", ",", "COLS", ".", "XYZR", "]" ]
Get a segment given a section and segment id Returns: array of two [x, y, z, r] points defining segment
[ "Get", "a", "segment", "given", "a", "section", "and", "segment", "id" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/section_ids.py#L37-L44
train
235,621
BlueBrain/NeuroM
examples/extract_distribution.py
extract_data
def extract_data(data_path, feature): '''Loads a list of neurons, extracts feature and transforms the fitted distribution in the correct format. Returns the optimal distribution, corresponding parameters, minimun and maximum values. ''' population = nm.load_neurons(data_path) feature_data = [nm.get(feature, n) for n in population] feature_data = list(chain(*feature_data)) return stats.optimal_distribution(feature_data)
python
def extract_data(data_path, feature): '''Loads a list of neurons, extracts feature and transforms the fitted distribution in the correct format. Returns the optimal distribution, corresponding parameters, minimun and maximum values. ''' population = nm.load_neurons(data_path) feature_data = [nm.get(feature, n) for n in population] feature_data = list(chain(*feature_data)) return stats.optimal_distribution(feature_data)
[ "def", "extract_data", "(", "data_path", ",", "feature", ")", ":", "population", "=", "nm", ".", "load_neurons", "(", "data_path", ")", "feature_data", "=", "[", "nm", ".", "get", "(", "feature", ",", "n", ")", "for", "n", "in", "population", "]", "feature_data", "=", "list", "(", "chain", "(", "*", "feature_data", ")", ")", "return", "stats", ".", "optimal_distribution", "(", "feature_data", ")" ]
Loads a list of neurons, extracts feature and transforms the fitted distribution in the correct format. Returns the optimal distribution, corresponding parameters, minimun and maximum values.
[ "Loads", "a", "list", "of", "neurons", "extracts", "feature", "and", "transforms", "the", "fitted", "distribution", "in", "the", "correct", "format", ".", "Returns", "the", "optimal", "distribution", "corresponding", "parameters", "minimun", "and", "maximum", "values", "." ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/extract_distribution.py#L59-L70
train
235,622
BlueBrain/NeuroM
neurom/fst/_bifurcationfunc.py
bifurcation_partition
def bifurcation_partition(bif_point): '''Calculate the partition at a bifurcation point We first ensure that the input point has only two children. The number of nodes in each child tree is counted. The partition is defined as the ratio of the largest number to the smallest number.''' assert len(bif_point.children) == 2, 'A bifurcation point must have exactly 2 children' n = float(sum(1 for _ in bif_point.children[0].ipreorder())) m = float(sum(1 for _ in bif_point.children[1].ipreorder())) return max(n, m) / min(n, m)
python
def bifurcation_partition(bif_point): '''Calculate the partition at a bifurcation point We first ensure that the input point has only two children. The number of nodes in each child tree is counted. The partition is defined as the ratio of the largest number to the smallest number.''' assert len(bif_point.children) == 2, 'A bifurcation point must have exactly 2 children' n = float(sum(1 for _ in bif_point.children[0].ipreorder())) m = float(sum(1 for _ in bif_point.children[1].ipreorder())) return max(n, m) / min(n, m)
[ "def", "bifurcation_partition", "(", "bif_point", ")", ":", "assert", "len", "(", "bif_point", ".", "children", ")", "==", "2", ",", "'A bifurcation point must have exactly 2 children'", "n", "=", "float", "(", "sum", "(", "1", "for", "_", "in", "bif_point", ".", "children", "[", "0", "]", ".", "ipreorder", "(", ")", ")", ")", "m", "=", "float", "(", "sum", "(", "1", "for", "_", "in", "bif_point", ".", "children", "[", "1", "]", ".", "ipreorder", "(", ")", ")", ")", "return", "max", "(", "n", ",", "m", ")", "/", "min", "(", "n", ",", "m", ")" ]
Calculate the partition at a bifurcation point We first ensure that the input point has only two children. The number of nodes in each child tree is counted. The partition is defined as the ratio of the largest number to the smallest number.
[ "Calculate", "the", "partition", "at", "a", "bifurcation", "point" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_bifurcationfunc.py#L80-L91
train
235,623
BlueBrain/NeuroM
neurom/fst/_bifurcationfunc.py
partition_pair
def partition_pair(bif_point): '''Calculate the partition pairs at a bifurcation point The number of nodes in each child tree is counted. The partition pairs is the number of bifurcations in the two daughter subtrees at each branch point.''' n = float(sum(1 for _ in bif_point.children[0].ipreorder())) m = float(sum(1 for _ in bif_point.children[1].ipreorder())) return (n, m)
python
def partition_pair(bif_point): '''Calculate the partition pairs at a bifurcation point The number of nodes in each child tree is counted. The partition pairs is the number of bifurcations in the two daughter subtrees at each branch point.''' n = float(sum(1 for _ in bif_point.children[0].ipreorder())) m = float(sum(1 for _ in bif_point.children[1].ipreorder())) return (n, m)
[ "def", "partition_pair", "(", "bif_point", ")", ":", "n", "=", "float", "(", "sum", "(", "1", "for", "_", "in", "bif_point", ".", "children", "[", "0", "]", ".", "ipreorder", "(", ")", ")", ")", "m", "=", "float", "(", "sum", "(", "1", "for", "_", "in", "bif_point", ".", "children", "[", "1", "]", ".", "ipreorder", "(", ")", ")", ")", "return", "(", "n", ",", "m", ")" ]
Calculate the partition pairs at a bifurcation point The number of nodes in each child tree is counted. The partition pairs is the number of bifurcations in the two daughter subtrees at each branch point.
[ "Calculate", "the", "partition", "pairs", "at", "a", "bifurcation", "point" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_bifurcationfunc.py#L110-L118
train
235,624
BlueBrain/NeuroM
neurom/io/neurolucida.py
_match_section
def _match_section(section, match): '''checks whether the `type` of section is in the `match` dictionary Works around the unknown ordering of s-expressions in each section. For instance, the `type` is the 3-rd one in for CellBodies ("CellBody" (Color Yellow) (CellBody) (Set "cell10") ) Returns: value associated with match[section_type], None if no match ''' # TODO: rewrite this so it is more clear, and handles sets & dictionaries for matching for i in range(5): if i >= len(section): return None if isinstance(section[i], StringType) and section[i] in match: return match[section[i]] return None
python
def _match_section(section, match): '''checks whether the `type` of section is in the `match` dictionary Works around the unknown ordering of s-expressions in each section. For instance, the `type` is the 3-rd one in for CellBodies ("CellBody" (Color Yellow) (CellBody) (Set "cell10") ) Returns: value associated with match[section_type], None if no match ''' # TODO: rewrite this so it is more clear, and handles sets & dictionaries for matching for i in range(5): if i >= len(section): return None if isinstance(section[i], StringType) and section[i] in match: return match[section[i]] return None
[ "def", "_match_section", "(", "section", ",", "match", ")", ":", "# TODO: rewrite this so it is more clear, and handles sets & dictionaries for matching", "for", "i", "in", "range", "(", "5", ")", ":", "if", "i", ">=", "len", "(", "section", ")", ":", "return", "None", "if", "isinstance", "(", "section", "[", "i", "]", ",", "StringType", ")", "and", "section", "[", "i", "]", "in", "match", ":", "return", "match", "[", "section", "[", "i", "]", "]", "return", "None" ]
checks whether the `type` of section is in the `match` dictionary Works around the unknown ordering of s-expressions in each section. For instance, the `type` is the 3-rd one in for CellBodies ("CellBody" (Color Yellow) (CellBody) (Set "cell10") ) Returns: value associated with match[section_type], None if no match
[ "checks", "whether", "the", "type", "of", "section", "is", "in", "the", "match", "dictionary" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/neurolucida.py#L64-L84
train
235,625
BlueBrain/NeuroM
neurom/io/neurolucida.py
_parse_section
def _parse_section(token_iter): '''take a stream of tokens, and create the tree structure that is defined by the s-expressions ''' sexp = [] for token in token_iter: if token == '(': new_sexp = _parse_section(token_iter) if not _match_section(new_sexp, UNWANTED_SECTIONS): sexp.append(new_sexp) elif token == ')': return sexp else: sexp.append(token) return sexp
python
def _parse_section(token_iter): '''take a stream of tokens, and create the tree structure that is defined by the s-expressions ''' sexp = [] for token in token_iter: if token == '(': new_sexp = _parse_section(token_iter) if not _match_section(new_sexp, UNWANTED_SECTIONS): sexp.append(new_sexp) elif token == ')': return sexp else: sexp.append(token) return sexp
[ "def", "_parse_section", "(", "token_iter", ")", ":", "sexp", "=", "[", "]", "for", "token", "in", "token_iter", ":", "if", "token", "==", "'('", ":", "new_sexp", "=", "_parse_section", "(", "token_iter", ")", "if", "not", "_match_section", "(", "new_sexp", ",", "UNWANTED_SECTIONS", ")", ":", "sexp", ".", "append", "(", "new_sexp", ")", "elif", "token", "==", "')'", ":", "return", "sexp", "else", ":", "sexp", ".", "append", "(", "token", ")", "return", "sexp" ]
take a stream of tokens, and create the tree structure that is defined by the s-expressions
[ "take", "a", "stream", "of", "tokens", "and", "create", "the", "tree", "structure", "that", "is", "defined", "by", "the", "s", "-", "expressions" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/neurolucida.py#L114-L128
train
235,626
BlueBrain/NeuroM
neurom/io/neurolucida.py
_parse_sections
def _parse_sections(morph_fd): '''returns array of all the sections that exist The format is nested lists that correspond to the s-expressions ''' sections = [] token_iter = _get_tokens(morph_fd) for token in token_iter: if token == '(': # find top-level sections section = _parse_section(token_iter) if not _match_section(section, UNWANTED_SECTIONS): sections.append(section) return sections
python
def _parse_sections(morph_fd): '''returns array of all the sections that exist The format is nested lists that correspond to the s-expressions ''' sections = [] token_iter = _get_tokens(morph_fd) for token in token_iter: if token == '(': # find top-level sections section = _parse_section(token_iter) if not _match_section(section, UNWANTED_SECTIONS): sections.append(section) return sections
[ "def", "_parse_sections", "(", "morph_fd", ")", ":", "sections", "=", "[", "]", "token_iter", "=", "_get_tokens", "(", "morph_fd", ")", "for", "token", "in", "token_iter", ":", "if", "token", "==", "'('", ":", "# find top-level sections", "section", "=", "_parse_section", "(", "token_iter", ")", "if", "not", "_match_section", "(", "section", ",", "UNWANTED_SECTIONS", ")", ":", "sections", ".", "append", "(", "section", ")", "return", "sections" ]
returns array of all the sections that exist The format is nested lists that correspond to the s-expressions
[ "returns", "array", "of", "all", "the", "sections", "that", "exist" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/neurolucida.py#L131-L143
train
235,627
BlueBrain/NeuroM
neurom/io/neurolucida.py
_flatten_subsection
def _flatten_subsection(subsection, _type, offset, parent): '''Flatten a subsection from its nested version Args: subsection: Nested subsection as produced by _parse_section, except one level in _type: type of section, ie: AXON, etc parent: first element has this as it's parent offset: position in the final array of the first element Returns: Generator of values corresponding to [X, Y, Z, R, TYPE, ID, PARENT_ID] ''' for row in subsection: # TODO: Figure out what these correspond to in neurolucida if row in ('Low', 'Generated', 'High', ): continue elif isinstance(row[0], StringType): if len(row) in (4, 5, ): if len(row) == 5: assert row[4][0] == 'S', \ 'Only known usage of a fifth member is Sn, found: %s' % row[4][0] yield (float(row[0]), float(row[1]), float(row[2]), float(row[3]) / 2., _type, offset, parent) parent = offset offset += 1 elif isinstance(row[0], list): split_parent = offset - 1 start_offset = 0 slices = [] start = 0 for i, value in enumerate(row): if value == '|': slices.append(slice(start + start_offset, i)) start = i + 1 slices.append(slice(start + start_offset, len(row))) for split_slice in slices: for _row in _flatten_subsection(row[split_slice], _type, offset, split_parent): offset += 1 yield _row
python
def _flatten_subsection(subsection, _type, offset, parent): '''Flatten a subsection from its nested version Args: subsection: Nested subsection as produced by _parse_section, except one level in _type: type of section, ie: AXON, etc parent: first element has this as it's parent offset: position in the final array of the first element Returns: Generator of values corresponding to [X, Y, Z, R, TYPE, ID, PARENT_ID] ''' for row in subsection: # TODO: Figure out what these correspond to in neurolucida if row in ('Low', 'Generated', 'High', ): continue elif isinstance(row[0], StringType): if len(row) in (4, 5, ): if len(row) == 5: assert row[4][0] == 'S', \ 'Only known usage of a fifth member is Sn, found: %s' % row[4][0] yield (float(row[0]), float(row[1]), float(row[2]), float(row[3]) / 2., _type, offset, parent) parent = offset offset += 1 elif isinstance(row[0], list): split_parent = offset - 1 start_offset = 0 slices = [] start = 0 for i, value in enumerate(row): if value == '|': slices.append(slice(start + start_offset, i)) start = i + 1 slices.append(slice(start + start_offset, len(row))) for split_slice in slices: for _row in _flatten_subsection(row[split_slice], _type, offset, split_parent): offset += 1 yield _row
[ "def", "_flatten_subsection", "(", "subsection", ",", "_type", ",", "offset", ",", "parent", ")", ":", "for", "row", "in", "subsection", ":", "# TODO: Figure out what these correspond to in neurolucida", "if", "row", "in", "(", "'Low'", ",", "'Generated'", ",", "'High'", ",", ")", ":", "continue", "elif", "isinstance", "(", "row", "[", "0", "]", ",", "StringType", ")", ":", "if", "len", "(", "row", ")", "in", "(", "4", ",", "5", ",", ")", ":", "if", "len", "(", "row", ")", "==", "5", ":", "assert", "row", "[", "4", "]", "[", "0", "]", "==", "'S'", ",", "'Only known usage of a fifth member is Sn, found: %s'", "%", "row", "[", "4", "]", "[", "0", "]", "yield", "(", "float", "(", "row", "[", "0", "]", ")", ",", "float", "(", "row", "[", "1", "]", ")", ",", "float", "(", "row", "[", "2", "]", ")", ",", "float", "(", "row", "[", "3", "]", ")", "/", "2.", ",", "_type", ",", "offset", ",", "parent", ")", "parent", "=", "offset", "offset", "+=", "1", "elif", "isinstance", "(", "row", "[", "0", "]", ",", "list", ")", ":", "split_parent", "=", "offset", "-", "1", "start_offset", "=", "0", "slices", "=", "[", "]", "start", "=", "0", "for", "i", ",", "value", "in", "enumerate", "(", "row", ")", ":", "if", "value", "==", "'|'", ":", "slices", ".", "append", "(", "slice", "(", "start", "+", "start_offset", ",", "i", ")", ")", "start", "=", "i", "+", "1", "slices", ".", "append", "(", "slice", "(", "start", "+", "start_offset", ",", "len", "(", "row", ")", ")", ")", "for", "split_slice", "in", "slices", ":", "for", "_row", "in", "_flatten_subsection", "(", "row", "[", "split_slice", "]", ",", "_type", ",", "offset", ",", "split_parent", ")", ":", "offset", "+=", "1", "yield", "_row" ]
Flatten a subsection from its nested version Args: subsection: Nested subsection as produced by _parse_section, except one level in _type: type of section, ie: AXON, etc parent: first element has this as it's parent offset: position in the final array of the first element Returns: Generator of values corresponding to [X, Y, Z, R, TYPE, ID, PARENT_ID]
[ "Flatten", "a", "subsection", "from", "its", "nested", "version" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/neurolucida.py#L146-L187
train
235,628
BlueBrain/NeuroM
neurom/io/neurolucida.py
_extract_section
def _extract_section(section): '''Find top level sections, and get their flat contents, and append them all Returns a numpy array with the row format: [X, Y, Z, R, TYPE, ID, PARENT_ID] Note: PARENT_ID starts at -1 for soma and 0 for neurites ''' # sections with only one element will be skipped, if len(section) == 1: assert section[0] == 'Sections', \ ('Only known usage of a single Section content is "Sections", found %s' % section[0]) return None # try and detect type _type = WANTED_SECTIONS.get(section[0][0], None) start = 1 # CellBody often has [['"CellBody"'], ['CellBody'] as its first two elements if _type is None: _type = WANTED_SECTIONS.get(section[1][0], None) if _type is None: # can't determine the type return None start = 2 parent = -1 if _type == POINT_TYPE.SOMA else 0 subsection_iter = _flatten_subsection(section[start:], _type, offset=0, parent=parent) ret = np.array([row for row in subsection_iter]) return ret
python
def _extract_section(section): '''Find top level sections, and get their flat contents, and append them all Returns a numpy array with the row format: [X, Y, Z, R, TYPE, ID, PARENT_ID] Note: PARENT_ID starts at -1 for soma and 0 for neurites ''' # sections with only one element will be skipped, if len(section) == 1: assert section[0] == 'Sections', \ ('Only known usage of a single Section content is "Sections", found %s' % section[0]) return None # try and detect type _type = WANTED_SECTIONS.get(section[0][0], None) start = 1 # CellBody often has [['"CellBody"'], ['CellBody'] as its first two elements if _type is None: _type = WANTED_SECTIONS.get(section[1][0], None) if _type is None: # can't determine the type return None start = 2 parent = -1 if _type == POINT_TYPE.SOMA else 0 subsection_iter = _flatten_subsection(section[start:], _type, offset=0, parent=parent) ret = np.array([row for row in subsection_iter]) return ret
[ "def", "_extract_section", "(", "section", ")", ":", "# sections with only one element will be skipped,", "if", "len", "(", "section", ")", "==", "1", ":", "assert", "section", "[", "0", "]", "==", "'Sections'", ",", "(", "'Only known usage of a single Section content is \"Sections\", found %s'", "%", "section", "[", "0", "]", ")", "return", "None", "# try and detect type", "_type", "=", "WANTED_SECTIONS", ".", "get", "(", "section", "[", "0", "]", "[", "0", "]", ",", "None", ")", "start", "=", "1", "# CellBody often has [['\"CellBody\"'], ['CellBody'] as its first two elements", "if", "_type", "is", "None", ":", "_type", "=", "WANTED_SECTIONS", ".", "get", "(", "section", "[", "1", "]", "[", "0", "]", ",", "None", ")", "if", "_type", "is", "None", ":", "# can't determine the type", "return", "None", "start", "=", "2", "parent", "=", "-", "1", "if", "_type", "==", "POINT_TYPE", ".", "SOMA", "else", "0", "subsection_iter", "=", "_flatten_subsection", "(", "section", "[", "start", ":", "]", ",", "_type", ",", "offset", "=", "0", ",", "parent", "=", "parent", ")", "ret", "=", "np", ".", "array", "(", "[", "row", "for", "row", "in", "subsection_iter", "]", ")", "return", "ret" ]
Find top level sections, and get their flat contents, and append them all Returns a numpy array with the row format: [X, Y, Z, R, TYPE, ID, PARENT_ID] Note: PARENT_ID starts at -1 for soma and 0 for neurites
[ "Find", "top", "level", "sections", "and", "get", "their", "flat", "contents", "and", "append", "them", "all" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/neurolucida.py#L190-L222
train
235,629
BlueBrain/NeuroM
neurom/io/neurolucida.py
_sections_to_raw_data
def _sections_to_raw_data(sections): '''convert list of sections into the `raw_data` format used in neurom This finds the soma, and attaches the neurites ''' soma = None neurites = [] for section in sections: neurite = _extract_section(section) if neurite is None: continue elif neurite[0][COLS.TYPE] == POINT_TYPE.SOMA: assert soma is None, 'Multiple somas defined in file' soma = neurite else: neurites.append(neurite) assert soma is not None, 'Missing CellBody element (ie. soma)' total_length = len(soma) + sum(len(neurite) for neurite in neurites) ret = np.zeros((total_length, 7,), dtype=np.float64) pos = len(soma) ret[0:pos, :] = soma for neurite in neurites: end = pos + len(neurite) ret[pos:end, :] = neurite ret[pos:end, COLS.P] += pos ret[pos:end, COLS.ID] += pos # TODO: attach the neurite at the closest point on the soma ret[pos, COLS.P] = len(soma) - 1 pos = end return ret
python
def _sections_to_raw_data(sections): '''convert list of sections into the `raw_data` format used in neurom This finds the soma, and attaches the neurites ''' soma = None neurites = [] for section in sections: neurite = _extract_section(section) if neurite is None: continue elif neurite[0][COLS.TYPE] == POINT_TYPE.SOMA: assert soma is None, 'Multiple somas defined in file' soma = neurite else: neurites.append(neurite) assert soma is not None, 'Missing CellBody element (ie. soma)' total_length = len(soma) + sum(len(neurite) for neurite in neurites) ret = np.zeros((total_length, 7,), dtype=np.float64) pos = len(soma) ret[0:pos, :] = soma for neurite in neurites: end = pos + len(neurite) ret[pos:end, :] = neurite ret[pos:end, COLS.P] += pos ret[pos:end, COLS.ID] += pos # TODO: attach the neurite at the closest point on the soma ret[pos, COLS.P] = len(soma) - 1 pos = end return ret
[ "def", "_sections_to_raw_data", "(", "sections", ")", ":", "soma", "=", "None", "neurites", "=", "[", "]", "for", "section", "in", "sections", ":", "neurite", "=", "_extract_section", "(", "section", ")", "if", "neurite", "is", "None", ":", "continue", "elif", "neurite", "[", "0", "]", "[", "COLS", ".", "TYPE", "]", "==", "POINT_TYPE", ".", "SOMA", ":", "assert", "soma", "is", "None", ",", "'Multiple somas defined in file'", "soma", "=", "neurite", "else", ":", "neurites", ".", "append", "(", "neurite", ")", "assert", "soma", "is", "not", "None", ",", "'Missing CellBody element (ie. soma)'", "total_length", "=", "len", "(", "soma", ")", "+", "sum", "(", "len", "(", "neurite", ")", "for", "neurite", "in", "neurites", ")", "ret", "=", "np", ".", "zeros", "(", "(", "total_length", ",", "7", ",", ")", ",", "dtype", "=", "np", ".", "float64", ")", "pos", "=", "len", "(", "soma", ")", "ret", "[", "0", ":", "pos", ",", ":", "]", "=", "soma", "for", "neurite", "in", "neurites", ":", "end", "=", "pos", "+", "len", "(", "neurite", ")", "ret", "[", "pos", ":", "end", ",", ":", "]", "=", "neurite", "ret", "[", "pos", ":", "end", ",", "COLS", ".", "P", "]", "+=", "pos", "ret", "[", "pos", ":", "end", ",", "COLS", ".", "ID", "]", "+=", "pos", "# TODO: attach the neurite at the closest point on the soma", "ret", "[", "pos", ",", "COLS", ".", "P", "]", "=", "len", "(", "soma", ")", "-", "1", "pos", "=", "end", "return", "ret" ]
convert list of sections into the `raw_data` format used in neurom This finds the soma, and attaches the neurites
[ "convert", "list", "of", "sections", "into", "the", "raw_data", "format", "used", "in", "neurom" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/neurolucida.py#L225-L257
train
235,630
BlueBrain/NeuroM
neurom/io/neurolucida.py
read
def read(morph_file, data_wrapper=DataWrapper): '''return a 'raw_data' np.array with the full neuron, and the format of the file suitable to be wrapped by DataWrapper ''' msg = ('This is an experimental reader. ' 'There are no guarantees regarding ability to parse ' 'Neurolucida .asc files or correctness of output.') warnings.warn(msg) L.warning(msg) with open(morph_file, encoding='utf-8', errors='replace') as morph_fd: sections = _parse_sections(morph_fd) raw_data = _sections_to_raw_data(sections) return data_wrapper(raw_data, 'NL-ASCII')
python
def read(morph_file, data_wrapper=DataWrapper): '''return a 'raw_data' np.array with the full neuron, and the format of the file suitable to be wrapped by DataWrapper ''' msg = ('This is an experimental reader. ' 'There are no guarantees regarding ability to parse ' 'Neurolucida .asc files or correctness of output.') warnings.warn(msg) L.warning(msg) with open(morph_file, encoding='utf-8', errors='replace') as morph_fd: sections = _parse_sections(morph_fd) raw_data = _sections_to_raw_data(sections) return data_wrapper(raw_data, 'NL-ASCII')
[ "def", "read", "(", "morph_file", ",", "data_wrapper", "=", "DataWrapper", ")", ":", "msg", "=", "(", "'This is an experimental reader. '", "'There are no guarantees regarding ability to parse '", "'Neurolucida .asc files or correctness of output.'", ")", "warnings", ".", "warn", "(", "msg", ")", "L", ".", "warning", "(", "msg", ")", "with", "open", "(", "morph_file", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'replace'", ")", "as", "morph_fd", ":", "sections", "=", "_parse_sections", "(", "morph_fd", ")", "raw_data", "=", "_sections_to_raw_data", "(", "sections", ")", "return", "data_wrapper", "(", "raw_data", ",", "'NL-ASCII'", ")" ]
return a 'raw_data' np.array with the full neuron, and the format of the file suitable to be wrapped by DataWrapper
[ "return", "a", "raw_data", "np", ".", "array", "with", "the", "full", "neuron", "and", "the", "format", "of", "the", "file", "suitable", "to", "be", "wrapped", "by", "DataWrapper" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/neurolucida.py#L260-L275
train
235,631
BlueBrain/NeuroM
examples/get_features.py
stats
def stats(data): '''Dictionary with summary stats for data Returns: dicitonary with length, mean, sum, standard deviation,\ min and max of data ''' return {'len': len(data), 'mean': np.mean(data), 'sum': np.sum(data), 'std': np.std(data), 'min': np.min(data), 'max': np.max(data)}
python
def stats(data): '''Dictionary with summary stats for data Returns: dicitonary with length, mean, sum, standard deviation,\ min and max of data ''' return {'len': len(data), 'mean': np.mean(data), 'sum': np.sum(data), 'std': np.std(data), 'min': np.min(data), 'max': np.max(data)}
[ "def", "stats", "(", "data", ")", ":", "return", "{", "'len'", ":", "len", "(", "data", ")", ",", "'mean'", ":", "np", ".", "mean", "(", "data", ")", ",", "'sum'", ":", "np", ".", "sum", "(", "data", ")", ",", "'std'", ":", "np", ".", "std", "(", "data", ")", ",", "'min'", ":", "np", ".", "min", "(", "data", ")", ",", "'max'", ":", "np", ".", "max", "(", "data", ")", "}" ]
Dictionary with summary stats for data Returns: dicitonary with length, mean, sum, standard deviation,\ min and max of data
[ "Dictionary", "with", "summary", "stats", "for", "data" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/get_features.py#L43-L55
train
235,632
BlueBrain/NeuroM
neurom/apps/__init__.py
get_config
def get_config(config, default_config): '''Load configuration from file if in config, else use default''' if not config: logging.warning('Using default config: %s', default_config) config = default_config try: with open(config, 'r') as config_file: return yaml.load(config_file) except (yaml.reader.ReaderError, yaml.parser.ParserError, yaml.scanner.ScannerError) as e: raise ConfigError('Invalid yaml file: \n %s' % str(e))
python
def get_config(config, default_config): '''Load configuration from file if in config, else use default''' if not config: logging.warning('Using default config: %s', default_config) config = default_config try: with open(config, 'r') as config_file: return yaml.load(config_file) except (yaml.reader.ReaderError, yaml.parser.ParserError, yaml.scanner.ScannerError) as e: raise ConfigError('Invalid yaml file: \n %s' % str(e))
[ "def", "get_config", "(", "config", ",", "default_config", ")", ":", "if", "not", "config", ":", "logging", ".", "warning", "(", "'Using default config: %s'", ",", "default_config", ")", "config", "=", "default_config", "try", ":", "with", "open", "(", "config", ",", "'r'", ")", "as", "config_file", ":", "return", "yaml", ".", "load", "(", "config_file", ")", "except", "(", "yaml", ".", "reader", ".", "ReaderError", ",", "yaml", ".", "parser", ".", "ParserError", ",", "yaml", ".", "scanner", ".", "ScannerError", ")", "as", "e", ":", "raise", "ConfigError", "(", "'Invalid yaml file: \\n %s'", "%", "str", "(", "e", ")", ")" ]
Load configuration from file if in config, else use default
[ "Load", "configuration", "from", "file", "if", "in", "config", "else", "use", "default" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/apps/__init__.py#L36-L48
train
235,633
BlueBrain/NeuroM
neurom/fst/_neuronfunc.py
soma_surface_area
def soma_surface_area(nrn, neurite_type=NeuriteType.soma): '''Get the surface area of a neuron's soma. Note: The surface area is calculated by assuming the soma is spherical. ''' assert neurite_type == NeuriteType.soma, 'Neurite type must be soma' return 4 * math.pi * nrn.soma.radius ** 2
python
def soma_surface_area(nrn, neurite_type=NeuriteType.soma): '''Get the surface area of a neuron's soma. Note: The surface area is calculated by assuming the soma is spherical. ''' assert neurite_type == NeuriteType.soma, 'Neurite type must be soma' return 4 * math.pi * nrn.soma.radius ** 2
[ "def", "soma_surface_area", "(", "nrn", ",", "neurite_type", "=", "NeuriteType", ".", "soma", ")", ":", "assert", "neurite_type", "==", "NeuriteType", ".", "soma", ",", "'Neurite type must be soma'", "return", "4", "*", "math", ".", "pi", "*", "nrn", ".", "soma", ".", "radius", "**", "2" ]
Get the surface area of a neuron's soma. Note: The surface area is calculated by assuming the soma is spherical.
[ "Get", "the", "surface", "area", "of", "a", "neuron", "s", "soma", "." ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L46-L53
train
235,634
BlueBrain/NeuroM
neurom/fst/_neuronfunc.py
soma_surface_areas
def soma_surface_areas(nrn_pop, neurite_type=NeuriteType.soma): '''Get the surface areas of the somata in a population of neurons Note: The surface area is calculated by assuming the soma is spherical. Note: If a single neuron is passed, a single element list with the surface area of its soma member is returned. ''' nrns = neuron_population(nrn_pop) assert neurite_type == NeuriteType.soma, 'Neurite type must be soma' return [soma_surface_area(n) for n in nrns]
python
def soma_surface_areas(nrn_pop, neurite_type=NeuriteType.soma): '''Get the surface areas of the somata in a population of neurons Note: The surface area is calculated by assuming the soma is spherical. Note: If a single neuron is passed, a single element list with the surface area of its soma member is returned. ''' nrns = neuron_population(nrn_pop) assert neurite_type == NeuriteType.soma, 'Neurite type must be soma' return [soma_surface_area(n) for n in nrns]
[ "def", "soma_surface_areas", "(", "nrn_pop", ",", "neurite_type", "=", "NeuriteType", ".", "soma", ")", ":", "nrns", "=", "neuron_population", "(", "nrn_pop", ")", "assert", "neurite_type", "==", "NeuriteType", ".", "soma", ",", "'Neurite type must be soma'", "return", "[", "soma_surface_area", "(", "n", ")", "for", "n", "in", "nrns", "]" ]
Get the surface areas of the somata in a population of neurons Note: The surface area is calculated by assuming the soma is spherical. Note: If a single neuron is passed, a single element list with the surface area of its soma member is returned.
[ "Get", "the", "surface", "areas", "of", "the", "somata", "in", "a", "population", "of", "neurons" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L56-L67
train
235,635
BlueBrain/NeuroM
neurom/fst/_neuronfunc.py
soma_radii
def soma_radii(nrn_pop, neurite_type=NeuriteType.soma): ''' Get the radii of the somata of a population of neurons Note: If a single neuron is passed, a single element list with the radius of its soma member is returned. ''' assert neurite_type == NeuriteType.soma, 'Neurite type must be soma' nrns = neuron_population(nrn_pop) return [n.soma.radius for n in nrns]
python
def soma_radii(nrn_pop, neurite_type=NeuriteType.soma): ''' Get the radii of the somata of a population of neurons Note: If a single neuron is passed, a single element list with the radius of its soma member is returned. ''' assert neurite_type == NeuriteType.soma, 'Neurite type must be soma' nrns = neuron_population(nrn_pop) return [n.soma.radius for n in nrns]
[ "def", "soma_radii", "(", "nrn_pop", ",", "neurite_type", "=", "NeuriteType", ".", "soma", ")", ":", "assert", "neurite_type", "==", "NeuriteType", ".", "soma", ",", "'Neurite type must be soma'", "nrns", "=", "neuron_population", "(", "nrn_pop", ")", "return", "[", "n", ".", "soma", ".", "radius", "for", "n", "in", "nrns", "]" ]
Get the radii of the somata of a population of neurons Note: If a single neuron is passed, a single element list with the radius of its soma member is returned.
[ "Get", "the", "radii", "of", "the", "somata", "of", "a", "population", "of", "neurons" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L70-L79
train
235,636
BlueBrain/NeuroM
neurom/fst/_neuronfunc.py
trunk_section_lengths
def trunk_section_lengths(nrn, neurite_type=NeuriteType.all): '''list of lengths of trunk sections of neurites in a neuron''' neurite_filter = is_type(neurite_type) return [morphmath.section_length(s.root_node.points) for s in nrn.neurites if neurite_filter(s)]
python
def trunk_section_lengths(nrn, neurite_type=NeuriteType.all): '''list of lengths of trunk sections of neurites in a neuron''' neurite_filter = is_type(neurite_type) return [morphmath.section_length(s.root_node.points) for s in nrn.neurites if neurite_filter(s)]
[ "def", "trunk_section_lengths", "(", "nrn", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "neurite_filter", "=", "is_type", "(", "neurite_type", ")", "return", "[", "morphmath", ".", "section_length", "(", "s", ".", "root_node", ".", "points", ")", "for", "s", "in", "nrn", ".", "neurites", "if", "neurite_filter", "(", "s", ")", "]" ]
list of lengths of trunk sections of neurites in a neuron
[ "list", "of", "lengths", "of", "trunk", "sections", "of", "neurites", "in", "a", "neuron" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L82-L86
train
235,637
BlueBrain/NeuroM
neurom/fst/_neuronfunc.py
trunk_origin_radii
def trunk_origin_radii(nrn, neurite_type=NeuriteType.all): '''radii of the trunk sections of neurites in a neuron''' neurite_filter = is_type(neurite_type) return [s.root_node.points[0][COLS.R] for s in nrn.neurites if neurite_filter(s)]
python
def trunk_origin_radii(nrn, neurite_type=NeuriteType.all): '''radii of the trunk sections of neurites in a neuron''' neurite_filter = is_type(neurite_type) return [s.root_node.points[0][COLS.R] for s in nrn.neurites if neurite_filter(s)]
[ "def", "trunk_origin_radii", "(", "nrn", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "neurite_filter", "=", "is_type", "(", "neurite_type", ")", "return", "[", "s", ".", "root_node", ".", "points", "[", "0", "]", "[", "COLS", ".", "R", "]", "for", "s", "in", "nrn", ".", "neurites", "if", "neurite_filter", "(", "s", ")", "]" ]
radii of the trunk sections of neurites in a neuron
[ "radii", "of", "the", "trunk", "sections", "of", "neurites", "in", "a", "neuron" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L89-L92
train
235,638
BlueBrain/NeuroM
neurom/fst/_neuronfunc.py
trunk_origin_azimuths
def trunk_origin_azimuths(nrn, neurite_type=NeuriteType.all): '''Get a list of all the trunk origin azimuths of a neuron or population The azimuth is defined as Angle between x-axis and the vector defined by (initial tree point - soma center) on the x-z plane. The range of the azimuth angle [-pi, pi] radians ''' neurite_filter = is_type(neurite_type) nrns = neuron_population(nrn) def _azimuth(section, soma): '''Azimuth of a section''' vector = morphmath.vector(section[0], soma.center) return np.arctan2(vector[COLS.Z], vector[COLS.X]) return [_azimuth(s.root_node.points, n.soma) for n in nrns for s in n.neurites if neurite_filter(s)]
python
def trunk_origin_azimuths(nrn, neurite_type=NeuriteType.all): '''Get a list of all the trunk origin azimuths of a neuron or population The azimuth is defined as Angle between x-axis and the vector defined by (initial tree point - soma center) on the x-z plane. The range of the azimuth angle [-pi, pi] radians ''' neurite_filter = is_type(neurite_type) nrns = neuron_population(nrn) def _azimuth(section, soma): '''Azimuth of a section''' vector = morphmath.vector(section[0], soma.center) return np.arctan2(vector[COLS.Z], vector[COLS.X]) return [_azimuth(s.root_node.points, n.soma) for n in nrns for s in n.neurites if neurite_filter(s)]
[ "def", "trunk_origin_azimuths", "(", "nrn", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "neurite_filter", "=", "is_type", "(", "neurite_type", ")", "nrns", "=", "neuron_population", "(", "nrn", ")", "def", "_azimuth", "(", "section", ",", "soma", ")", ":", "'''Azimuth of a section'''", "vector", "=", "morphmath", ".", "vector", "(", "section", "[", "0", "]", ",", "soma", ".", "center", ")", "return", "np", ".", "arctan2", "(", "vector", "[", "COLS", ".", "Z", "]", ",", "vector", "[", "COLS", ".", "X", "]", ")", "return", "[", "_azimuth", "(", "s", ".", "root_node", ".", "points", ",", "n", ".", "soma", ")", "for", "n", "in", "nrns", "for", "s", "in", "n", ".", "neurites", "if", "neurite_filter", "(", "s", ")", "]" ]
Get a list of all the trunk origin azimuths of a neuron or population The azimuth is defined as Angle between x-axis and the vector defined by (initial tree point - soma center) on the x-z plane. The range of the azimuth angle [-pi, pi] radians
[ "Get", "a", "list", "of", "all", "the", "trunk", "origin", "azimuths", "of", "a", "neuron", "or", "population" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L95-L113
train
235,639
BlueBrain/NeuroM
neurom/fst/_neuronfunc.py
trunk_origin_elevations
def trunk_origin_elevations(nrn, neurite_type=NeuriteType.all): '''Get a list of all the trunk origin elevations of a neuron or population The elevation is defined as the angle between x-axis and the vector defined by (initial tree point - soma center) on the x-y half-plane. The range of the elevation angle [-pi/2, pi/2] radians ''' neurite_filter = is_type(neurite_type) nrns = neuron_population(nrn) def _elevation(section, soma): '''Elevation of a section''' vector = morphmath.vector(section[0], soma.center) norm_vector = np.linalg.norm(vector) if norm_vector >= np.finfo(type(norm_vector)).eps: return np.arcsin(vector[COLS.Y] / norm_vector) raise ValueError("Norm of vector between soma center and section is almost zero.") return [_elevation(s.root_node.points, n.soma) for n in nrns for s in n.neurites if neurite_filter(s)]
python
def trunk_origin_elevations(nrn, neurite_type=NeuriteType.all): '''Get a list of all the trunk origin elevations of a neuron or population The elevation is defined as the angle between x-axis and the vector defined by (initial tree point - soma center) on the x-y half-plane. The range of the elevation angle [-pi/2, pi/2] radians ''' neurite_filter = is_type(neurite_type) nrns = neuron_population(nrn) def _elevation(section, soma): '''Elevation of a section''' vector = morphmath.vector(section[0], soma.center) norm_vector = np.linalg.norm(vector) if norm_vector >= np.finfo(type(norm_vector)).eps: return np.arcsin(vector[COLS.Y] / norm_vector) raise ValueError("Norm of vector between soma center and section is almost zero.") return [_elevation(s.root_node.points, n.soma) for n in nrns for s in n.neurites if neurite_filter(s)]
[ "def", "trunk_origin_elevations", "(", "nrn", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "neurite_filter", "=", "is_type", "(", "neurite_type", ")", "nrns", "=", "neuron_population", "(", "nrn", ")", "def", "_elevation", "(", "section", ",", "soma", ")", ":", "'''Elevation of a section'''", "vector", "=", "morphmath", ".", "vector", "(", "section", "[", "0", "]", ",", "soma", ".", "center", ")", "norm_vector", "=", "np", ".", "linalg", ".", "norm", "(", "vector", ")", "if", "norm_vector", ">=", "np", ".", "finfo", "(", "type", "(", "norm_vector", ")", ")", ".", "eps", ":", "return", "np", ".", "arcsin", "(", "vector", "[", "COLS", ".", "Y", "]", "/", "norm_vector", ")", "raise", "ValueError", "(", "\"Norm of vector between soma center and section is almost zero.\"", ")", "return", "[", "_elevation", "(", "s", ".", "root_node", ".", "points", ",", "n", ".", "soma", ")", "for", "n", "in", "nrns", "for", "s", "in", "n", ".", "neurites", "if", "neurite_filter", "(", "s", ")", "]" ]
Get a list of all the trunk origin elevations of a neuron or population The elevation is defined as the angle between x-axis and the vector defined by (initial tree point - soma center) on the x-y half-plane. The range of the elevation angle [-pi/2, pi/2] radians
[ "Get", "a", "list", "of", "all", "the", "trunk", "origin", "elevations", "of", "a", "neuron", "or", "population" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L116-L139
train
235,640
BlueBrain/NeuroM
neurom/fst/_neuronfunc.py
trunk_vectors
def trunk_vectors(nrn, neurite_type=NeuriteType.all): '''Calculates the vectors between all the trunks of the neuron and the soma center. ''' neurite_filter = is_type(neurite_type) nrns = neuron_population(nrn) return np.array([morphmath.vector(s.root_node.points[0], n.soma.center) for n in nrns for s in n.neurites if neurite_filter(s)])
python
def trunk_vectors(nrn, neurite_type=NeuriteType.all): '''Calculates the vectors between all the trunks of the neuron and the soma center. ''' neurite_filter = is_type(neurite_type) nrns = neuron_population(nrn) return np.array([morphmath.vector(s.root_node.points[0], n.soma.center) for n in nrns for s in n.neurites if neurite_filter(s)])
[ "def", "trunk_vectors", "(", "nrn", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "neurite_filter", "=", "is_type", "(", "neurite_type", ")", "nrns", "=", "neuron_population", "(", "nrn", ")", "return", "np", ".", "array", "(", "[", "morphmath", ".", "vector", "(", "s", ".", "root_node", ".", "points", "[", "0", "]", ",", "n", ".", "soma", ".", "center", ")", "for", "n", "in", "nrns", "for", "s", "in", "n", ".", "neurites", "if", "neurite_filter", "(", "s", ")", "]", ")" ]
Calculates the vectors between all the trunks of the neuron and the soma center.
[ "Calculates", "the", "vectors", "between", "all", "the", "trunks", "of", "the", "neuron", "and", "the", "soma", "center", "." ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L142-L151
train
235,641
BlueBrain/NeuroM
neurom/fst/_neuronfunc.py
trunk_angles
def trunk_angles(nrn, neurite_type=NeuriteType.all): '''Calculates the angles between all the trunks of the neuron. The angles are defined on the x-y plane and the trees are sorted from the y axis and anticlock-wise. ''' vectors = trunk_vectors(nrn, neurite_type=neurite_type) # In order to avoid the failure of the process in case the neurite_type does not exist if not vectors.size: return [] def _sort_angle(p1, p2): """Angle between p1-p2 to sort vectors""" ang1 = np.arctan2(*p1[::-1]) ang2 = np.arctan2(*p2[::-1]) return (ang1 - ang2) # Sorting angles according to x-y plane order = np.argsort(np.array([_sort_angle(i / np.linalg.norm(i), [0, 1]) for i in vectors[:, 0:2]])) ordered_vectors = vectors[order][:, [COLS.X, COLS.Y]] return [morphmath.angle_between_vectors(ordered_vectors[i], ordered_vectors[i - 1]) for i, _ in enumerate(ordered_vectors)]
python
def trunk_angles(nrn, neurite_type=NeuriteType.all): '''Calculates the angles between all the trunks of the neuron. The angles are defined on the x-y plane and the trees are sorted from the y axis and anticlock-wise. ''' vectors = trunk_vectors(nrn, neurite_type=neurite_type) # In order to avoid the failure of the process in case the neurite_type does not exist if not vectors.size: return [] def _sort_angle(p1, p2): """Angle between p1-p2 to sort vectors""" ang1 = np.arctan2(*p1[::-1]) ang2 = np.arctan2(*p2[::-1]) return (ang1 - ang2) # Sorting angles according to x-y plane order = np.argsort(np.array([_sort_angle(i / np.linalg.norm(i), [0, 1]) for i in vectors[:, 0:2]])) ordered_vectors = vectors[order][:, [COLS.X, COLS.Y]] return [morphmath.angle_between_vectors(ordered_vectors[i], ordered_vectors[i - 1]) for i, _ in enumerate(ordered_vectors)]
[ "def", "trunk_angles", "(", "nrn", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "vectors", "=", "trunk_vectors", "(", "nrn", ",", "neurite_type", "=", "neurite_type", ")", "# In order to avoid the failure of the process in case the neurite_type does not exist", "if", "not", "vectors", ".", "size", ":", "return", "[", "]", "def", "_sort_angle", "(", "p1", ",", "p2", ")", ":", "\"\"\"Angle between p1-p2 to sort vectors\"\"\"", "ang1", "=", "np", ".", "arctan2", "(", "*", "p1", "[", ":", ":", "-", "1", "]", ")", "ang2", "=", "np", ".", "arctan2", "(", "*", "p2", "[", ":", ":", "-", "1", "]", ")", "return", "(", "ang1", "-", "ang2", ")", "# Sorting angles according to x-y plane", "order", "=", "np", ".", "argsort", "(", "np", ".", "array", "(", "[", "_sort_angle", "(", "i", "/", "np", ".", "linalg", ".", "norm", "(", "i", ")", ",", "[", "0", ",", "1", "]", ")", "for", "i", "in", "vectors", "[", ":", ",", "0", ":", "2", "]", "]", ")", ")", "ordered_vectors", "=", "vectors", "[", "order", "]", "[", ":", ",", "[", "COLS", ".", "X", ",", "COLS", ".", "Y", "]", "]", "return", "[", "morphmath", ".", "angle_between_vectors", "(", "ordered_vectors", "[", "i", "]", ",", "ordered_vectors", "[", "i", "-", "1", "]", ")", "for", "i", ",", "_", "in", "enumerate", "(", "ordered_vectors", ")", "]" ]
Calculates the angles between all the trunks of the neuron. The angles are defined on the x-y plane and the trees are sorted from the y axis and anticlock-wise.
[ "Calculates", "the", "angles", "between", "all", "the", "trunks", "of", "the", "neuron", ".", "The", "angles", "are", "defined", "on", "the", "x", "-", "y", "plane", "and", "the", "trees", "are", "sorted", "from", "the", "y", "axis", "and", "anticlock", "-", "wise", "." ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L154-L177
train
235,642
BlueBrain/NeuroM
neurom/fst/_neuronfunc.py
sholl_crossings
def sholl_crossings(neurites, center, radii): '''calculate crossings of neurites Args: nrn(morph): morphology on which to perform Sholl analysis radii(iterable of floats): radii for which crossings will be counted Returns: Array of same length as radii, with a count of the number of crossings for the respective radius ''' def _count_crossings(neurite, radius): '''count_crossings of segments in neurite with radius''' r2 = radius ** 2 count = 0 for start, end in iter_segments(neurite): start_dist2, end_dist2 = (morphmath.point_dist2(center, start), morphmath.point_dist2(center, end)) count += int(start_dist2 <= r2 <= end_dist2 or end_dist2 <= r2 <= start_dist2) return count return np.array([sum(_count_crossings(neurite, r) for neurite in iter_neurites(neurites)) for r in radii])
python
def sholl_crossings(neurites, center, radii): '''calculate crossings of neurites Args: nrn(morph): morphology on which to perform Sholl analysis radii(iterable of floats): radii for which crossings will be counted Returns: Array of same length as radii, with a count of the number of crossings for the respective radius ''' def _count_crossings(neurite, radius): '''count_crossings of segments in neurite with radius''' r2 = radius ** 2 count = 0 for start, end in iter_segments(neurite): start_dist2, end_dist2 = (morphmath.point_dist2(center, start), morphmath.point_dist2(center, end)) count += int(start_dist2 <= r2 <= end_dist2 or end_dist2 <= r2 <= start_dist2) return count return np.array([sum(_count_crossings(neurite, r) for neurite in iter_neurites(neurites)) for r in radii])
[ "def", "sholl_crossings", "(", "neurites", ",", "center", ",", "radii", ")", ":", "def", "_count_crossings", "(", "neurite", ",", "radius", ")", ":", "'''count_crossings of segments in neurite with radius'''", "r2", "=", "radius", "**", "2", "count", "=", "0", "for", "start", ",", "end", "in", "iter_segments", "(", "neurite", ")", ":", "start_dist2", ",", "end_dist2", "=", "(", "morphmath", ".", "point_dist2", "(", "center", ",", "start", ")", ",", "morphmath", ".", "point_dist2", "(", "center", ",", "end", ")", ")", "count", "+=", "int", "(", "start_dist2", "<=", "r2", "<=", "end_dist2", "or", "end_dist2", "<=", "r2", "<=", "start_dist2", ")", "return", "count", "return", "np", ".", "array", "(", "[", "sum", "(", "_count_crossings", "(", "neurite", ",", "r", ")", "for", "neurite", "in", "iter_neurites", "(", "neurites", ")", ")", "for", "r", "in", "radii", "]", ")" ]
calculate crossings of neurites Args: nrn(morph): morphology on which to perform Sholl analysis radii(iterable of floats): radii for which crossings will be counted Returns: Array of same length as radii, with a count of the number of crossings for the respective radius
[ "calculate", "crossings", "of", "neurites" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L180-L206
train
235,643
BlueBrain/NeuroM
neurom/fst/_neuronfunc.py
sholl_frequency
def sholl_frequency(nrn, neurite_type=NeuriteType.all, step_size=10): '''perform Sholl frequency calculations on a population of neurites Args: nrn(morph): nrn or population neurite_type(NeuriteType): which neurites to operate on step_size(float): step size between Sholl radii Note: Given a neuron, the soma center is used for the concentric circles, which range from the soma radii, and the maximum radial distance in steps of `step_size`. When a population is given, the concentric circles range from the smallest soma radius to the largest radial neurite distance. Finally, each segment of the neuron is tested, so a neurite that bends back on itself, and crosses the same Sholl radius will get counted as having crossed multiple times. ''' nrns = neuron_population(nrn) neurite_filter = is_type(neurite_type) min_soma_edge = float('Inf') max_radii = 0 neurites_list = [] for neuron in nrns: neurites_list.extend(((neurites, neuron.soma.center) for neurites in neuron.neurites if neurite_filter(neurites))) min_soma_edge = min(min_soma_edge, neuron.soma.radius) max_radii = max(max_radii, np.max(np.abs(bounding_box(neuron)))) radii = np.arange(min_soma_edge, max_radii + step_size, step_size) ret = np.zeros_like(radii) for neurites, center in neurites_list: ret += sholl_crossings(neurites, center, radii) return ret
python
def sholl_frequency(nrn, neurite_type=NeuriteType.all, step_size=10): '''perform Sholl frequency calculations on a population of neurites Args: nrn(morph): nrn or population neurite_type(NeuriteType): which neurites to operate on step_size(float): step size between Sholl radii Note: Given a neuron, the soma center is used for the concentric circles, which range from the soma radii, and the maximum radial distance in steps of `step_size`. When a population is given, the concentric circles range from the smallest soma radius to the largest radial neurite distance. Finally, each segment of the neuron is tested, so a neurite that bends back on itself, and crosses the same Sholl radius will get counted as having crossed multiple times. ''' nrns = neuron_population(nrn) neurite_filter = is_type(neurite_type) min_soma_edge = float('Inf') max_radii = 0 neurites_list = [] for neuron in nrns: neurites_list.extend(((neurites, neuron.soma.center) for neurites in neuron.neurites if neurite_filter(neurites))) min_soma_edge = min(min_soma_edge, neuron.soma.radius) max_radii = max(max_radii, np.max(np.abs(bounding_box(neuron)))) radii = np.arange(min_soma_edge, max_radii + step_size, step_size) ret = np.zeros_like(radii) for neurites, center in neurites_list: ret += sholl_crossings(neurites, center, radii) return ret
[ "def", "sholl_frequency", "(", "nrn", ",", "neurite_type", "=", "NeuriteType", ".", "all", ",", "step_size", "=", "10", ")", ":", "nrns", "=", "neuron_population", "(", "nrn", ")", "neurite_filter", "=", "is_type", "(", "neurite_type", ")", "min_soma_edge", "=", "float", "(", "'Inf'", ")", "max_radii", "=", "0", "neurites_list", "=", "[", "]", "for", "neuron", "in", "nrns", ":", "neurites_list", ".", "extend", "(", "(", "(", "neurites", ",", "neuron", ".", "soma", ".", "center", ")", "for", "neurites", "in", "neuron", ".", "neurites", "if", "neurite_filter", "(", "neurites", ")", ")", ")", "min_soma_edge", "=", "min", "(", "min_soma_edge", ",", "neuron", ".", "soma", ".", "radius", ")", "max_radii", "=", "max", "(", "max_radii", ",", "np", ".", "max", "(", "np", ".", "abs", "(", "bounding_box", "(", "neuron", ")", ")", ")", ")", "radii", "=", "np", ".", "arange", "(", "min_soma_edge", ",", "max_radii", "+", "step_size", ",", "step_size", ")", "ret", "=", "np", ".", "zeros_like", "(", "radii", ")", "for", "neurites", ",", "center", "in", "neurites_list", ":", "ret", "+=", "sholl_crossings", "(", "neurites", ",", "center", ",", "radii", ")", "return", "ret" ]
perform Sholl frequency calculations on a population of neurites Args: nrn(morph): nrn or population neurite_type(NeuriteType): which neurites to operate on step_size(float): step size between Sholl radii Note: Given a neuron, the soma center is used for the concentric circles, which range from the soma radii, and the maximum radial distance in steps of `step_size`. When a population is given, the concentric circles range from the smallest soma radius to the largest radial neurite distance. Finally, each segment of the neuron is tested, so a neurite that bends back on itself, and crosses the same Sholl radius will get counted as having crossed multiple times.
[ "perform", "Sholl", "frequency", "calculations", "on", "a", "population", "of", "neurites" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L209-L245
train
235,644
BlueBrain/NeuroM
examples/plot_features.py
dist_points
def dist_points(bin_edges, d): """Return an array of values according to a distribution Points are calculated at the center of each bin """ bc = bin_centers(bin_edges) if d is not None: d = DISTS[d['type']](d, bc) return d, bc
python
def dist_points(bin_edges, d): """Return an array of values according to a distribution Points are calculated at the center of each bin """ bc = bin_centers(bin_edges) if d is not None: d = DISTS[d['type']](d, bc) return d, bc
[ "def", "dist_points", "(", "bin_edges", ",", "d", ")", ":", "bc", "=", "bin_centers", "(", "bin_edges", ")", "if", "d", "is", "not", "None", ":", "d", "=", "DISTS", "[", "d", "[", "'type'", "]", "]", "(", "d", ",", "bc", ")", "return", "d", ",", "bc" ]
Return an array of values according to a distribution Points are calculated at the center of each bin
[ "Return", "an", "array", "of", "values", "according", "to", "a", "distribution" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/plot_features.py#L70-L78
train
235,645
BlueBrain/NeuroM
examples/plot_features.py
calc_limits
def calc_limits(data, dist=None, padding=0.25): """Calculate a suitable range for a histogram Returns: tuple of (min, max) """ dmin = sys.float_info.max if dist is None else dist.get('min', sys.float_info.max) dmax = sys.float_info.min if dist is None else dist.get('max', sys.float_info.min) _min = min(min(data), dmin) _max = max(max(data), dmax) padding = padding * (_max - _min) return _min - padding, _max + padding
python
def calc_limits(data, dist=None, padding=0.25): """Calculate a suitable range for a histogram Returns: tuple of (min, max) """ dmin = sys.float_info.max if dist is None else dist.get('min', sys.float_info.max) dmax = sys.float_info.min if dist is None else dist.get('max', sys.float_info.min) _min = min(min(data), dmin) _max = max(max(data), dmax) padding = padding * (_max - _min) return _min - padding, _max + padding
[ "def", "calc_limits", "(", "data", ",", "dist", "=", "None", ",", "padding", "=", "0.25", ")", ":", "dmin", "=", "sys", ".", "float_info", ".", "max", "if", "dist", "is", "None", "else", "dist", ".", "get", "(", "'min'", ",", "sys", ".", "float_info", ".", "max", ")", "dmax", "=", "sys", ".", "float_info", ".", "min", "if", "dist", "is", "None", "else", "dist", ".", "get", "(", "'max'", ",", "sys", ".", "float_info", ".", "min", ")", "_min", "=", "min", "(", "min", "(", "data", ")", ",", "dmin", ")", "_max", "=", "max", "(", "max", "(", "data", ")", ",", "dmax", ")", "padding", "=", "padding", "*", "(", "_max", "-", "_min", ")", "return", "_min", "-", "padding", ",", "_max", "+", "padding" ]
Calculate a suitable range for a histogram Returns: tuple of (min, max)
[ "Calculate", "a", "suitable", "range", "for", "a", "histogram" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/plot_features.py#L81-L95
train
235,646
BlueBrain/NeuroM
examples/plot_features.py
load_neurite_features
def load_neurite_features(filepath): '''Unpack relevant data into megadict''' stuff = defaultdict(lambda: defaultdict(list)) nrns = nm.load_neurons(filepath) # unpack data into arrays for nrn in nrns: for t in NEURITES_: for feat in FEATURES: stuff[feat][str(t).split('.')[1]].extend( nm.get(feat, nrn, neurite_type=t) ) return stuff
python
def load_neurite_features(filepath): '''Unpack relevant data into megadict''' stuff = defaultdict(lambda: defaultdict(list)) nrns = nm.load_neurons(filepath) # unpack data into arrays for nrn in nrns: for t in NEURITES_: for feat in FEATURES: stuff[feat][str(t).split('.')[1]].extend( nm.get(feat, nrn, neurite_type=t) ) return stuff
[ "def", "load_neurite_features", "(", "filepath", ")", ":", "stuff", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "list", ")", ")", "nrns", "=", "nm", ".", "load_neurons", "(", "filepath", ")", "# unpack data into arrays", "for", "nrn", "in", "nrns", ":", "for", "t", "in", "NEURITES_", ":", "for", "feat", "in", "FEATURES", ":", "stuff", "[", "feat", "]", "[", "str", "(", "t", ")", ".", "split", "(", "'.'", ")", "[", "1", "]", "]", ".", "extend", "(", "nm", ".", "get", "(", "feat", ",", "nrn", ",", "neurite_type", "=", "t", ")", ")", "return", "stuff" ]
Unpack relevant data into megadict
[ "Unpack", "relevant", "data", "into", "megadict" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/plot_features.py#L112-L123
train
235,647
BlueBrain/NeuroM
examples/plot_features.py
main
def main(data_dir, mtype_file): # pylint: disable=too-many-locals '''Run the stuff''' # data structure to store results stuff = load_neurite_features(data_dir) sim_params = json.load(open(mtype_file)) # load histograms, distribution parameter sets and figures into arrays. # To plot figures, do # plots[i].fig.show() # To modify an axis, do # plots[i].ax.something() _plots = [] for feat, d in stuff.items(): for typ, data in d.items(): dist = sim_params['components'][typ].get(feat, None) print('Type = %s, Feature = %s, Distribution = %s' % (typ, feat, dist)) # if no data available, skip this feature if not data: print("No data found for feature %s (%s)" % (feat, typ)) continue # print 'DATA', data num_bins = 100 limits = calc_limits(data, dist) bin_edges = np.linspace(limits[0], limits[1], num_bins + 1) histo = np.histogram(data, bin_edges, normed=True) print('PLOT LIMITS:', limits) # print 'DATA:', data # print 'BIN HEIGHT', histo[0] plot = Plot(*view_utils.get_figure(new_fig=True, subplot=111)) plot.ax.set_xlim(*limits) plot.ax.bar(histo[1][:-1], histo[0], width=bin_widths(histo[1])) dp, bc = dist_points(histo[1], dist) # print 'BIN CENTERS:', bc, len(bc) if dp is not None: # print 'DIST POINTS:', dp, len(dp) plot.ax.plot(bc, dp, 'r*') plot.ax.set_title('%s (%s)' % (feat, typ)) _plots.append(plot) return _plots
python
def main(data_dir, mtype_file): # pylint: disable=too-many-locals '''Run the stuff''' # data structure to store results stuff = load_neurite_features(data_dir) sim_params = json.load(open(mtype_file)) # load histograms, distribution parameter sets and figures into arrays. # To plot figures, do # plots[i].fig.show() # To modify an axis, do # plots[i].ax.something() _plots = [] for feat, d in stuff.items(): for typ, data in d.items(): dist = sim_params['components'][typ].get(feat, None) print('Type = %s, Feature = %s, Distribution = %s' % (typ, feat, dist)) # if no data available, skip this feature if not data: print("No data found for feature %s (%s)" % (feat, typ)) continue # print 'DATA', data num_bins = 100 limits = calc_limits(data, dist) bin_edges = np.linspace(limits[0], limits[1], num_bins + 1) histo = np.histogram(data, bin_edges, normed=True) print('PLOT LIMITS:', limits) # print 'DATA:', data # print 'BIN HEIGHT', histo[0] plot = Plot(*view_utils.get_figure(new_fig=True, subplot=111)) plot.ax.set_xlim(*limits) plot.ax.bar(histo[1][:-1], histo[0], width=bin_widths(histo[1])) dp, bc = dist_points(histo[1], dist) # print 'BIN CENTERS:', bc, len(bc) if dp is not None: # print 'DIST POINTS:', dp, len(dp) plot.ax.plot(bc, dp, 'r*') plot.ax.set_title('%s (%s)' % (feat, typ)) _plots.append(plot) return _plots
[ "def", "main", "(", "data_dir", ",", "mtype_file", ")", ":", "# pylint: disable=too-many-locals", "# data structure to store results", "stuff", "=", "load_neurite_features", "(", "data_dir", ")", "sim_params", "=", "json", ".", "load", "(", "open", "(", "mtype_file", ")", ")", "# load histograms, distribution parameter sets and figures into arrays.", "# To plot figures, do", "# plots[i].fig.show()", "# To modify an axis, do", "# plots[i].ax.something()", "_plots", "=", "[", "]", "for", "feat", ",", "d", "in", "stuff", ".", "items", "(", ")", ":", "for", "typ", ",", "data", "in", "d", ".", "items", "(", ")", ":", "dist", "=", "sim_params", "[", "'components'", "]", "[", "typ", "]", ".", "get", "(", "feat", ",", "None", ")", "print", "(", "'Type = %s, Feature = %s, Distribution = %s'", "%", "(", "typ", ",", "feat", ",", "dist", ")", ")", "# if no data available, skip this feature", "if", "not", "data", ":", "print", "(", "\"No data found for feature %s (%s)\"", "%", "(", "feat", ",", "typ", ")", ")", "continue", "# print 'DATA', data", "num_bins", "=", "100", "limits", "=", "calc_limits", "(", "data", ",", "dist", ")", "bin_edges", "=", "np", ".", "linspace", "(", "limits", "[", "0", "]", ",", "limits", "[", "1", "]", ",", "num_bins", "+", "1", ")", "histo", "=", "np", ".", "histogram", "(", "data", ",", "bin_edges", ",", "normed", "=", "True", ")", "print", "(", "'PLOT LIMITS:'", ",", "limits", ")", "# print 'DATA:', data", "# print 'BIN HEIGHT', histo[0]", "plot", "=", "Plot", "(", "*", "view_utils", ".", "get_figure", "(", "new_fig", "=", "True", ",", "subplot", "=", "111", ")", ")", "plot", ".", "ax", ".", "set_xlim", "(", "*", "limits", ")", "plot", ".", "ax", ".", "bar", "(", "histo", "[", "1", "]", "[", ":", "-", "1", "]", ",", "histo", "[", "0", "]", ",", "width", "=", "bin_widths", "(", "histo", "[", "1", "]", ")", ")", "dp", ",", "bc", "=", "dist_points", "(", "histo", "[", "1", "]", ",", "dist", ")", "# print 'BIN CENTERS:', bc, len(bc)", "if", "dp", "is", "not", "None", ":", "# print 'DIST POINTS:', dp, len(dp)", "plot", ".", "ax", ".", "plot", "(", "bc", ",", "dp", ",", "'r*'", ")", "plot", ".", "ax", ".", "set_title", "(", "'%s (%s)'", "%", "(", "feat", ",", "typ", ")", ")", "_plots", ".", "append", "(", "plot", ")", "return", "_plots" ]
Run the stuff
[ "Run", "the", "stuff" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/plot_features.py#L150-L191
train
235,648
BlueBrain/NeuroM
examples/density_plot.py
extract_density
def extract_density(population, plane='xy', bins=100, neurite_type=NeuriteType.basal_dendrite): '''Extracts the 2d histogram of the center coordinates of segments in the selected plane. ''' segment_midpoints = get_feat('segment_midpoints', population, neurite_type=neurite_type) horiz = segment_midpoints[:, 'xyz'.index(plane[0])] vert = segment_midpoints[:, 'xyz'.index(plane[1])] return np.histogram2d(np.array(horiz), np.array(vert), bins=(bins, bins))
python
def extract_density(population, plane='xy', bins=100, neurite_type=NeuriteType.basal_dendrite): '''Extracts the 2d histogram of the center coordinates of segments in the selected plane. ''' segment_midpoints = get_feat('segment_midpoints', population, neurite_type=neurite_type) horiz = segment_midpoints[:, 'xyz'.index(plane[0])] vert = segment_midpoints[:, 'xyz'.index(plane[1])] return np.histogram2d(np.array(horiz), np.array(vert), bins=(bins, bins))
[ "def", "extract_density", "(", "population", ",", "plane", "=", "'xy'", ",", "bins", "=", "100", ",", "neurite_type", "=", "NeuriteType", ".", "basal_dendrite", ")", ":", "segment_midpoints", "=", "get_feat", "(", "'segment_midpoints'", ",", "population", ",", "neurite_type", "=", "neurite_type", ")", "horiz", "=", "segment_midpoints", "[", ":", ",", "'xyz'", ".", "index", "(", "plane", "[", "0", "]", ")", "]", "vert", "=", "segment_midpoints", "[", ":", ",", "'xyz'", ".", "index", "(", "plane", "[", "1", "]", ")", "]", "return", "np", ".", "histogram2d", "(", "np", ".", "array", "(", "horiz", ")", ",", "np", ".", "array", "(", "vert", ")", ",", "bins", "=", "(", "bins", ",", "bins", ")", ")" ]
Extracts the 2d histogram of the center coordinates of segments in the selected plane.
[ "Extracts", "the", "2d", "histogram", "of", "the", "center", "coordinates", "of", "segments", "in", "the", "selected", "plane", "." ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/density_plot.py#L39-L46
train
235,649
BlueBrain/NeuroM
examples/density_plot.py
plot_density
def plot_density(population, # pylint: disable=too-many-arguments, too-many-locals bins=100, new_fig=True, subplot=111, levels=None, plane='xy', colorlabel='Nodes per unit area', labelfontsize=16, color_map='Reds', no_colorbar=False, threshold=0.01, neurite_type=NeuriteType.basal_dendrite, **kwargs): '''Plots the 2d histogram of the center coordinates of segments in the selected plane. ''' fig, ax = common.get_figure(new_fig=new_fig, subplot=subplot) H1, xedges1, yedges1 = extract_density(population, plane=plane, bins=bins, neurite_type=neurite_type) mask = H1 < threshold # mask = H1==0 H2 = np.ma.masked_array(H1, mask) getattr(plt.cm, color_map).set_bad(color='white', alpha=None) plots = ax.contourf((xedges1[:-1] + xedges1[1:]) / 2, (yedges1[:-1] + yedges1[1:]) / 2, np.transpose(H2), # / np.max(H2), cmap=getattr(plt.cm, color_map), levels=levels) if not no_colorbar: cbar = plt.colorbar(plots) cbar.ax.set_ylabel(colorlabel, fontsize=labelfontsize) kwargs['title'] = kwargs.get('title', '') kwargs['xlabel'] = kwargs.get('xlabel', plane[0]) kwargs['ylabel'] = kwargs.get('ylabel', plane[1]) return common.plot_style(fig=fig, ax=ax, **kwargs)
python
def plot_density(population, # pylint: disable=too-many-arguments, too-many-locals bins=100, new_fig=True, subplot=111, levels=None, plane='xy', colorlabel='Nodes per unit area', labelfontsize=16, color_map='Reds', no_colorbar=False, threshold=0.01, neurite_type=NeuriteType.basal_dendrite, **kwargs): '''Plots the 2d histogram of the center coordinates of segments in the selected plane. ''' fig, ax = common.get_figure(new_fig=new_fig, subplot=subplot) H1, xedges1, yedges1 = extract_density(population, plane=plane, bins=bins, neurite_type=neurite_type) mask = H1 < threshold # mask = H1==0 H2 = np.ma.masked_array(H1, mask) getattr(plt.cm, color_map).set_bad(color='white', alpha=None) plots = ax.contourf((xedges1[:-1] + xedges1[1:]) / 2, (yedges1[:-1] + yedges1[1:]) / 2, np.transpose(H2), # / np.max(H2), cmap=getattr(plt.cm, color_map), levels=levels) if not no_colorbar: cbar = plt.colorbar(plots) cbar.ax.set_ylabel(colorlabel, fontsize=labelfontsize) kwargs['title'] = kwargs.get('title', '') kwargs['xlabel'] = kwargs.get('xlabel', plane[0]) kwargs['ylabel'] = kwargs.get('ylabel', plane[1]) return common.plot_style(fig=fig, ax=ax, **kwargs)
[ "def", "plot_density", "(", "population", ",", "# pylint: disable=too-many-arguments, too-many-locals", "bins", "=", "100", ",", "new_fig", "=", "True", ",", "subplot", "=", "111", ",", "levels", "=", "None", ",", "plane", "=", "'xy'", ",", "colorlabel", "=", "'Nodes per unit area'", ",", "labelfontsize", "=", "16", ",", "color_map", "=", "'Reds'", ",", "no_colorbar", "=", "False", ",", "threshold", "=", "0.01", ",", "neurite_type", "=", "NeuriteType", ".", "basal_dendrite", ",", "*", "*", "kwargs", ")", ":", "fig", ",", "ax", "=", "common", ".", "get_figure", "(", "new_fig", "=", "new_fig", ",", "subplot", "=", "subplot", ")", "H1", ",", "xedges1", ",", "yedges1", "=", "extract_density", "(", "population", ",", "plane", "=", "plane", ",", "bins", "=", "bins", ",", "neurite_type", "=", "neurite_type", ")", "mask", "=", "H1", "<", "threshold", "# mask = H1==0", "H2", "=", "np", ".", "ma", ".", "masked_array", "(", "H1", ",", "mask", ")", "getattr", "(", "plt", ".", "cm", ",", "color_map", ")", ".", "set_bad", "(", "color", "=", "'white'", ",", "alpha", "=", "None", ")", "plots", "=", "ax", ".", "contourf", "(", "(", "xedges1", "[", ":", "-", "1", "]", "+", "xedges1", "[", "1", ":", "]", ")", "/", "2", ",", "(", "yedges1", "[", ":", "-", "1", "]", "+", "yedges1", "[", "1", ":", "]", ")", "/", "2", ",", "np", ".", "transpose", "(", "H2", ")", ",", "# / np.max(H2),", "cmap", "=", "getattr", "(", "plt", ".", "cm", ",", "color_map", ")", ",", "levels", "=", "levels", ")", "if", "not", "no_colorbar", ":", "cbar", "=", "plt", ".", "colorbar", "(", "plots", ")", "cbar", ".", "ax", ".", "set_ylabel", "(", "colorlabel", ",", "fontsize", "=", "labelfontsize", ")", "kwargs", "[", "'title'", "]", "=", "kwargs", ".", "get", "(", "'title'", ",", "''", ")", "kwargs", "[", "'xlabel'", "]", "=", "kwargs", ".", "get", "(", "'xlabel'", ",", "plane", "[", "0", "]", ")", "kwargs", "[", "'ylabel'", "]", "=", "kwargs", ".", "get", "(", "'ylabel'", ",", "plane", "[", "1", "]", ")", "return", "common", ".", "plot_style", "(", "fig", "=", "fig", ",", "ax", "=", "ax", ",", "*", "*", "kwargs", ")" ]
Plots the 2d histogram of the center coordinates of segments in the selected plane.
[ "Plots", "the", "2d", "histogram", "of", "the", "center", "coordinates", "of", "segments", "in", "the", "selected", "plane", "." ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/density_plot.py#L49-L80
train
235,650
BlueBrain/NeuroM
examples/density_plot.py
plot_neuron_on_density
def plot_neuron_on_density(population, # pylint: disable=too-many-arguments bins=100, new_fig=True, subplot=111, levels=None, plane='xy', colorlabel='Nodes per unit area', labelfontsize=16, color_map='Reds', no_colorbar=False, threshold=0.01, neurite_type=NeuriteType.basal_dendrite, **kwargs): '''Plots the 2d histogram of the center coordinates of segments in the selected plane and superimposes the view of the first neurite of the collection. ''' _, ax = common.get_figure(new_fig=new_fig) view.plot_tree(ax, population.neurites[0]) return plot_density(population, plane=plane, bins=bins, new_fig=False, subplot=subplot, colorlabel=colorlabel, labelfontsize=labelfontsize, levels=levels, color_map=color_map, no_colorbar=no_colorbar, threshold=threshold, neurite_type=neurite_type, **kwargs)
python
def plot_neuron_on_density(population, # pylint: disable=too-many-arguments bins=100, new_fig=True, subplot=111, levels=None, plane='xy', colorlabel='Nodes per unit area', labelfontsize=16, color_map='Reds', no_colorbar=False, threshold=0.01, neurite_type=NeuriteType.basal_dendrite, **kwargs): '''Plots the 2d histogram of the center coordinates of segments in the selected plane and superimposes the view of the first neurite of the collection. ''' _, ax = common.get_figure(new_fig=new_fig) view.plot_tree(ax, population.neurites[0]) return plot_density(population, plane=plane, bins=bins, new_fig=False, subplot=subplot, colorlabel=colorlabel, labelfontsize=labelfontsize, levels=levels, color_map=color_map, no_colorbar=no_colorbar, threshold=threshold, neurite_type=neurite_type, **kwargs)
[ "def", "plot_neuron_on_density", "(", "population", ",", "# pylint: disable=too-many-arguments", "bins", "=", "100", ",", "new_fig", "=", "True", ",", "subplot", "=", "111", ",", "levels", "=", "None", ",", "plane", "=", "'xy'", ",", "colorlabel", "=", "'Nodes per unit area'", ",", "labelfontsize", "=", "16", ",", "color_map", "=", "'Reds'", ",", "no_colorbar", "=", "False", ",", "threshold", "=", "0.01", ",", "neurite_type", "=", "NeuriteType", ".", "basal_dendrite", ",", "*", "*", "kwargs", ")", ":", "_", ",", "ax", "=", "common", ".", "get_figure", "(", "new_fig", "=", "new_fig", ")", "view", ".", "plot_tree", "(", "ax", ",", "population", ".", "neurites", "[", "0", "]", ")", "return", "plot_density", "(", "population", ",", "plane", "=", "plane", ",", "bins", "=", "bins", ",", "new_fig", "=", "False", ",", "subplot", "=", "subplot", ",", "colorlabel", "=", "colorlabel", ",", "labelfontsize", "=", "labelfontsize", ",", "levels", "=", "levels", ",", "color_map", "=", "color_map", ",", "no_colorbar", "=", "no_colorbar", ",", "threshold", "=", "threshold", ",", "neurite_type", "=", "neurite_type", ",", "*", "*", "kwargs", ")" ]
Plots the 2d histogram of the center coordinates of segments in the selected plane and superimposes the view of the first neurite of the collection.
[ "Plots", "the", "2d", "histogram", "of", "the", "center", "coordinates", "of", "segments", "in", "the", "selected", "plane", "and", "superimposes", "the", "view", "of", "the", "first", "neurite", "of", "the", "collection", "." ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/density_plot.py#L83-L99
train
235,651
BlueBrain/NeuroM
neurom/check/morphtree.py
is_monotonic
def is_monotonic(neurite, tol): '''Check if neurite tree is monotonic If each child has smaller or equal diameters from its parent Args: neurite(Neurite): neurite to operate on tol(float): tolerance Returns: True if neurite monotonic ''' for node in neurite.iter_sections(): # check that points in section satisfy monotonicity sec = node.points for point_id in range(len(sec) - 1): if sec[point_id + 1][COLS.R] > sec[point_id][COLS.R] + tol: return False # Check that section boundary points satisfy monotonicity if(node.parent is not None and sec[0][COLS.R] > node.parent.points[-1][COLS.R] + tol): return False return True
python
def is_monotonic(neurite, tol): '''Check if neurite tree is monotonic If each child has smaller or equal diameters from its parent Args: neurite(Neurite): neurite to operate on tol(float): tolerance Returns: True if neurite monotonic ''' for node in neurite.iter_sections(): # check that points in section satisfy monotonicity sec = node.points for point_id in range(len(sec) - 1): if sec[point_id + 1][COLS.R] > sec[point_id][COLS.R] + tol: return False # Check that section boundary points satisfy monotonicity if(node.parent is not None and sec[0][COLS.R] > node.parent.points[-1][COLS.R] + tol): return False return True
[ "def", "is_monotonic", "(", "neurite", ",", "tol", ")", ":", "for", "node", "in", "neurite", ".", "iter_sections", "(", ")", ":", "# check that points in section satisfy monotonicity", "sec", "=", "node", ".", "points", "for", "point_id", "in", "range", "(", "len", "(", "sec", ")", "-", "1", ")", ":", "if", "sec", "[", "point_id", "+", "1", "]", "[", "COLS", ".", "R", "]", ">", "sec", "[", "point_id", "]", "[", "COLS", ".", "R", "]", "+", "tol", ":", "return", "False", "# Check that section boundary points satisfy monotonicity", "if", "(", "node", ".", "parent", "is", "not", "None", "and", "sec", "[", "0", "]", "[", "COLS", ".", "R", "]", ">", "node", ".", "parent", ".", "points", "[", "-", "1", "]", "[", "COLS", ".", "R", "]", "+", "tol", ")", ":", "return", "False", "return", "True" ]
Check if neurite tree is monotonic If each child has smaller or equal diameters from its parent Args: neurite(Neurite): neurite to operate on tol(float): tolerance Returns: True if neurite monotonic
[ "Check", "if", "neurite", "tree", "is", "monotonic" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/morphtree.py#L40-L64
train
235,652
BlueBrain/NeuroM
neurom/check/morphtree.py
is_flat
def is_flat(neurite, tol, method='tolerance'): '''Check if neurite is flat using the given method Args: neurite(Neurite): neurite to operate on tol(float): tolerance method(string): the method of flatness estimation: 'tolerance' returns true if any extent of the tree is smaller than the given tolerance 'ratio' returns true if the ratio of the smallest directions is smaller than tol. e.g. [1,2,3] -> 1/2 < tol Returns: True if neurite is flat ''' ext = principal_direction_extent(neurite.points[:, COLS.XYZ]) assert method in ('tolerance', 'ratio'), "Method must be one of 'tolerance', 'ratio'" if method == 'ratio': sorted_ext = np.sort(ext) return sorted_ext[0] / sorted_ext[1] < float(tol) return any(ext < float(tol))
python
def is_flat(neurite, tol, method='tolerance'): '''Check if neurite is flat using the given method Args: neurite(Neurite): neurite to operate on tol(float): tolerance method(string): the method of flatness estimation: 'tolerance' returns true if any extent of the tree is smaller than the given tolerance 'ratio' returns true if the ratio of the smallest directions is smaller than tol. e.g. [1,2,3] -> 1/2 < tol Returns: True if neurite is flat ''' ext = principal_direction_extent(neurite.points[:, COLS.XYZ]) assert method in ('tolerance', 'ratio'), "Method must be one of 'tolerance', 'ratio'" if method == 'ratio': sorted_ext = np.sort(ext) return sorted_ext[0] / sorted_ext[1] < float(tol) return any(ext < float(tol))
[ "def", "is_flat", "(", "neurite", ",", "tol", ",", "method", "=", "'tolerance'", ")", ":", "ext", "=", "principal_direction_extent", "(", "neurite", ".", "points", "[", ":", ",", "COLS", ".", "XYZ", "]", ")", "assert", "method", "in", "(", "'tolerance'", ",", "'ratio'", ")", ",", "\"Method must be one of 'tolerance', 'ratio'\"", "if", "method", "==", "'ratio'", ":", "sorted_ext", "=", "np", ".", "sort", "(", "ext", ")", "return", "sorted_ext", "[", "0", "]", "/", "sorted_ext", "[", "1", "]", "<", "float", "(", "tol", ")", "return", "any", "(", "ext", "<", "float", "(", "tol", ")", ")" ]
Check if neurite is flat using the given method Args: neurite(Neurite): neurite to operate on tol(float): tolerance method(string): the method of flatness estimation: 'tolerance' returns true if any extent of the tree is smaller than the given tolerance 'ratio' returns true if the ratio of the smallest directions is smaller than tol. e.g. [1,2,3] -> 1/2 < tol Returns: True if neurite is flat
[ "Check", "if", "neurite", "is", "flat", "using", "the", "given", "method" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/morphtree.py#L67-L88
train
235,653
BlueBrain/NeuroM
neurom/check/morphtree.py
is_back_tracking
def is_back_tracking(neurite): ''' Check if a neurite process backtracks to a previous node. Back-tracking takes place when a daughter of a branching process goes back and either overlaps with a previous point, or lies inside the cylindrical volume of the latter. Args: neurite(Neurite): neurite to operate on Returns: True Under the following scenaria: 1. A segment endpoint falls back and overlaps with a previous segment's point 2. The geometry of a segment overlaps with a previous one in the section ''' def pair(segs): ''' Pairs the input list into triplets''' return zip(segs, segs[1:]) def coords(node): ''' Returns the first three values of the tree that correspond to the x, y, z coordinates''' return node[COLS.XYZ] def max_radius(seg): ''' Returns maximum radius from the two segment endpoints''' return max(seg[0][COLS.R], seg[1][COLS.R]) def is_not_zero_seg(seg): ''' Returns True if segment has zero length''' return not np.allclose(coords(seg[0]), coords(seg[1])) def is_in_the_same_verse(seg1, seg2): ''' Checks if the vectors face the same direction. This is true if their dot product is greater than zero. ''' v1 = coords(seg2[1]) - coords(seg2[0]) v2 = coords(seg1[1]) - coords(seg1[0]) return np.dot(v1, v2) >= 0 def is_seg2_within_seg1_radius(dist, seg1, seg2): ''' Checks whether the orthogonal distance from the point at the end of seg1 to seg2 segment body is smaller than the sum of their radii ''' return dist <= max_radius(seg1) + max_radius(seg2) def is_seg1_overlapping_with_seg2(seg1, seg2): '''Checks if a segment is in proximity of another one upstream''' # get the coordinates of seg2 (from the origin) s1 = coords(seg2[0]) s2 = coords(seg2[1]) # vector of the center of seg2 (from the origin) C = 0.5 * (s1 + s2) # endpoint of seg1 (from the origin) P = coords(seg1[1]) # vector from the center C of seg2 to the endpoint P of seg1 CP = P - C # vector of seg2 S1S2 = s2 - s1 # projection of CP upon seg2 prj = mm.vector_projection(CP, S1S2) # check if the distance of the orthogonal complement of CP projection on S1S2 # (vertical distance from P to seg2) is smaller than the sum of the radii. (overlap) # If not exit early, because there is no way that backtracking can feasible if not is_seg2_within_seg1_radius(np.linalg.norm(CP - prj), seg1, seg2): return False # projection lies within the length of the cylinder. Check if the distance between # the center C of seg2 and the projection of the end point of seg1, P is smaller than # half of the others length plus a 5% tolerance return np.linalg.norm(prj) < 0.55 * np.linalg.norm(S1S2) def is_inside_cylinder(seg1, seg2): ''' Checks if seg2 approximately lies within a cylindrical volume of seg1. Two conditions must be satisfied: 1. The two segments are not facing the same direction (seg2 comes back to seg1) 2. seg2 is overlaping with seg1 ''' return not is_in_the_same_verse(seg1, seg2) and is_seg1_overlapping_with_seg2(seg1, seg2) # filter out single segment sections section_itr = (snode for snode in neurite.iter_sections() if snode.points.shape[0] > 2) for snode in section_itr: # group each section's points intro triplets segment_pairs = list(filter(is_not_zero_seg, pair(snode.points))) # filter out zero length segments for i, seg1 in enumerate(segment_pairs[1:]): # check if the end point of the segment lies within the previous # ones in the current sectionmake for seg2 in segment_pairs[0: i + 1]: if is_inside_cylinder(seg1, seg2): return True return False
python
def is_back_tracking(neurite): ''' Check if a neurite process backtracks to a previous node. Back-tracking takes place when a daughter of a branching process goes back and either overlaps with a previous point, or lies inside the cylindrical volume of the latter. Args: neurite(Neurite): neurite to operate on Returns: True Under the following scenaria: 1. A segment endpoint falls back and overlaps with a previous segment's point 2. The geometry of a segment overlaps with a previous one in the section ''' def pair(segs): ''' Pairs the input list into triplets''' return zip(segs, segs[1:]) def coords(node): ''' Returns the first three values of the tree that correspond to the x, y, z coordinates''' return node[COLS.XYZ] def max_radius(seg): ''' Returns maximum radius from the two segment endpoints''' return max(seg[0][COLS.R], seg[1][COLS.R]) def is_not_zero_seg(seg): ''' Returns True if segment has zero length''' return not np.allclose(coords(seg[0]), coords(seg[1])) def is_in_the_same_verse(seg1, seg2): ''' Checks if the vectors face the same direction. This is true if their dot product is greater than zero. ''' v1 = coords(seg2[1]) - coords(seg2[0]) v2 = coords(seg1[1]) - coords(seg1[0]) return np.dot(v1, v2) >= 0 def is_seg2_within_seg1_radius(dist, seg1, seg2): ''' Checks whether the orthogonal distance from the point at the end of seg1 to seg2 segment body is smaller than the sum of their radii ''' return dist <= max_radius(seg1) + max_radius(seg2) def is_seg1_overlapping_with_seg2(seg1, seg2): '''Checks if a segment is in proximity of another one upstream''' # get the coordinates of seg2 (from the origin) s1 = coords(seg2[0]) s2 = coords(seg2[1]) # vector of the center of seg2 (from the origin) C = 0.5 * (s1 + s2) # endpoint of seg1 (from the origin) P = coords(seg1[1]) # vector from the center C of seg2 to the endpoint P of seg1 CP = P - C # vector of seg2 S1S2 = s2 - s1 # projection of CP upon seg2 prj = mm.vector_projection(CP, S1S2) # check if the distance of the orthogonal complement of CP projection on S1S2 # (vertical distance from P to seg2) is smaller than the sum of the radii. (overlap) # If not exit early, because there is no way that backtracking can feasible if not is_seg2_within_seg1_radius(np.linalg.norm(CP - prj), seg1, seg2): return False # projection lies within the length of the cylinder. Check if the distance between # the center C of seg2 and the projection of the end point of seg1, P is smaller than # half of the others length plus a 5% tolerance return np.linalg.norm(prj) < 0.55 * np.linalg.norm(S1S2) def is_inside_cylinder(seg1, seg2): ''' Checks if seg2 approximately lies within a cylindrical volume of seg1. Two conditions must be satisfied: 1. The two segments are not facing the same direction (seg2 comes back to seg1) 2. seg2 is overlaping with seg1 ''' return not is_in_the_same_verse(seg1, seg2) and is_seg1_overlapping_with_seg2(seg1, seg2) # filter out single segment sections section_itr = (snode for snode in neurite.iter_sections() if snode.points.shape[0] > 2) for snode in section_itr: # group each section's points intro triplets segment_pairs = list(filter(is_not_zero_seg, pair(snode.points))) # filter out zero length segments for i, seg1 in enumerate(segment_pairs[1:]): # check if the end point of the segment lies within the previous # ones in the current sectionmake for seg2 in segment_pairs[0: i + 1]: if is_inside_cylinder(seg1, seg2): return True return False
[ "def", "is_back_tracking", "(", "neurite", ")", ":", "def", "pair", "(", "segs", ")", ":", "''' Pairs the input list into triplets'''", "return", "zip", "(", "segs", ",", "segs", "[", "1", ":", "]", ")", "def", "coords", "(", "node", ")", ":", "''' Returns the first three values of the tree that correspond to the x, y, z coordinates'''", "return", "node", "[", "COLS", ".", "XYZ", "]", "def", "max_radius", "(", "seg", ")", ":", "''' Returns maximum radius from the two segment endpoints'''", "return", "max", "(", "seg", "[", "0", "]", "[", "COLS", ".", "R", "]", ",", "seg", "[", "1", "]", "[", "COLS", ".", "R", "]", ")", "def", "is_not_zero_seg", "(", "seg", ")", ":", "''' Returns True if segment has zero length'''", "return", "not", "np", ".", "allclose", "(", "coords", "(", "seg", "[", "0", "]", ")", ",", "coords", "(", "seg", "[", "1", "]", ")", ")", "def", "is_in_the_same_verse", "(", "seg1", ",", "seg2", ")", ":", "''' Checks if the vectors face the same direction. This\n is true if their dot product is greater than zero.\n '''", "v1", "=", "coords", "(", "seg2", "[", "1", "]", ")", "-", "coords", "(", "seg2", "[", "0", "]", ")", "v2", "=", "coords", "(", "seg1", "[", "1", "]", ")", "-", "coords", "(", "seg1", "[", "0", "]", ")", "return", "np", ".", "dot", "(", "v1", ",", "v2", ")", ">=", "0", "def", "is_seg2_within_seg1_radius", "(", "dist", ",", "seg1", ",", "seg2", ")", ":", "''' Checks whether the orthogonal distance from the point at the end of\n seg1 to seg2 segment body is smaller than the sum of their radii\n '''", "return", "dist", "<=", "max_radius", "(", "seg1", ")", "+", "max_radius", "(", "seg2", ")", "def", "is_seg1_overlapping_with_seg2", "(", "seg1", ",", "seg2", ")", ":", "'''Checks if a segment is in proximity of another one upstream'''", "# get the coordinates of seg2 (from the origin)", "s1", "=", "coords", "(", "seg2", "[", "0", "]", ")", "s2", "=", "coords", "(", "seg2", "[", "1", "]", ")", "# vector of the center of seg2 (from the origin)", "C", "=", "0.5", "*", "(", "s1", "+", "s2", ")", "# endpoint of seg1 (from the origin)", "P", "=", "coords", "(", "seg1", "[", "1", "]", ")", "# vector from the center C of seg2 to the endpoint P of seg1", "CP", "=", "P", "-", "C", "# vector of seg2", "S1S2", "=", "s2", "-", "s1", "# projection of CP upon seg2", "prj", "=", "mm", ".", "vector_projection", "(", "CP", ",", "S1S2", ")", "# check if the distance of the orthogonal complement of CP projection on S1S2", "# (vertical distance from P to seg2) is smaller than the sum of the radii. (overlap)", "# If not exit early, because there is no way that backtracking can feasible", "if", "not", "is_seg2_within_seg1_radius", "(", "np", ".", "linalg", ".", "norm", "(", "CP", "-", "prj", ")", ",", "seg1", ",", "seg2", ")", ":", "return", "False", "# projection lies within the length of the cylinder. Check if the distance between", "# the center C of seg2 and the projection of the end point of seg1, P is smaller than", "# half of the others length plus a 5% tolerance", "return", "np", ".", "linalg", ".", "norm", "(", "prj", ")", "<", "0.55", "*", "np", ".", "linalg", ".", "norm", "(", "S1S2", ")", "def", "is_inside_cylinder", "(", "seg1", ",", "seg2", ")", ":", "''' Checks if seg2 approximately lies within a cylindrical volume of seg1.\n Two conditions must be satisfied:\n 1. The two segments are not facing the same direction (seg2 comes back to seg1)\n 2. seg2 is overlaping with seg1\n '''", "return", "not", "is_in_the_same_verse", "(", "seg1", ",", "seg2", ")", "and", "is_seg1_overlapping_with_seg2", "(", "seg1", ",", "seg2", ")", "# filter out single segment sections", "section_itr", "=", "(", "snode", "for", "snode", "in", "neurite", ".", "iter_sections", "(", ")", "if", "snode", ".", "points", ".", "shape", "[", "0", "]", ">", "2", ")", "for", "snode", "in", "section_itr", ":", "# group each section's points intro triplets", "segment_pairs", "=", "list", "(", "filter", "(", "is_not_zero_seg", ",", "pair", "(", "snode", ".", "points", ")", ")", ")", "# filter out zero length segments", "for", "i", ",", "seg1", "in", "enumerate", "(", "segment_pairs", "[", "1", ":", "]", ")", ":", "# check if the end point of the segment lies within the previous", "# ones in the current sectionmake", "for", "seg2", "in", "segment_pairs", "[", "0", ":", "i", "+", "1", "]", ":", "if", "is_inside_cylinder", "(", "seg1", ",", "seg2", ")", ":", "return", "True", "return", "False" ]
Check if a neurite process backtracks to a previous node. Back-tracking takes place when a daughter of a branching process goes back and either overlaps with a previous point, or lies inside the cylindrical volume of the latter. Args: neurite(Neurite): neurite to operate on Returns: True Under the following scenaria: 1. A segment endpoint falls back and overlaps with a previous segment's point 2. The geometry of a segment overlaps with a previous one in the section
[ "Check", "if", "a", "neurite", "process", "backtracks", "to", "a", "previous", "node", ".", "Back", "-", "tracking", "takes", "place", "when", "a", "daughter", "of", "a", "branching", "process", "goes", "back", "and", "either", "overlaps", "with", "a", "previous", "point", "or", "lies", "inside", "the", "cylindrical", "volume", "of", "the", "latter", "." ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/morphtree.py#L91-L187
train
235,654
BlueBrain/NeuroM
neurom/check/morphtree.py
get_flat_neurites
def get_flat_neurites(neuron, tol=0.1, method='ratio'): '''Check if a neuron has neurites that are flat within a tolerance Args: neurite(Neurite): neurite to operate on tol(float): the tolerance or the ratio method(string): 'tolerance' or 'ratio' described in :meth:`is_flat` Returns: Bool list corresponding to the flatness check for each neurite in neuron neurites with respect to the given criteria ''' return [n for n in neuron.neurites if is_flat(n, tol, method)]
python
def get_flat_neurites(neuron, tol=0.1, method='ratio'): '''Check if a neuron has neurites that are flat within a tolerance Args: neurite(Neurite): neurite to operate on tol(float): the tolerance or the ratio method(string): 'tolerance' or 'ratio' described in :meth:`is_flat` Returns: Bool list corresponding to the flatness check for each neurite in neuron neurites with respect to the given criteria ''' return [n for n in neuron.neurites if is_flat(n, tol, method)]
[ "def", "get_flat_neurites", "(", "neuron", ",", "tol", "=", "0.1", ",", "method", "=", "'ratio'", ")", ":", "return", "[", "n", "for", "n", "in", "neuron", ".", "neurites", "if", "is_flat", "(", "n", ",", "tol", ",", "method", ")", "]" ]
Check if a neuron has neurites that are flat within a tolerance Args: neurite(Neurite): neurite to operate on tol(float): the tolerance or the ratio method(string): 'tolerance' or 'ratio' described in :meth:`is_flat` Returns: Bool list corresponding to the flatness check for each neurite in neuron neurites with respect to the given criteria
[ "Check", "if", "a", "neuron", "has", "neurites", "that", "are", "flat", "within", "a", "tolerance" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/morphtree.py#L190-L202
train
235,655
BlueBrain/NeuroM
neurom/check/morphtree.py
get_nonmonotonic_neurites
def get_nonmonotonic_neurites(neuron, tol=1e-6): '''Get neurites that are not monotonic Args: neurite(Neurite): neurite to operate on tol(float): the tolerance or the ratio Returns: list of neurites that do not satisfy monotonicity test ''' return [n for n in neuron.neurites if not is_monotonic(n, tol)]
python
def get_nonmonotonic_neurites(neuron, tol=1e-6): '''Get neurites that are not monotonic Args: neurite(Neurite): neurite to operate on tol(float): the tolerance or the ratio Returns: list of neurites that do not satisfy monotonicity test ''' return [n for n in neuron.neurites if not is_monotonic(n, tol)]
[ "def", "get_nonmonotonic_neurites", "(", "neuron", ",", "tol", "=", "1e-6", ")", ":", "return", "[", "n", "for", "n", "in", "neuron", ".", "neurites", "if", "not", "is_monotonic", "(", "n", ",", "tol", ")", "]" ]
Get neurites that are not monotonic Args: neurite(Neurite): neurite to operate on tol(float): the tolerance or the ratio Returns: list of neurites that do not satisfy monotonicity test
[ "Get", "neurites", "that", "are", "not", "monotonic" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/morphtree.py#L205-L215
train
235,656
BlueBrain/NeuroM
examples/radius_of_gyration.py
segment_centre_of_mass
def segment_centre_of_mass(seg): '''Calculate and return centre of mass of a segment. C, seg_volalculated as centre of mass of conical frustum''' h = mm.segment_length(seg) r0 = seg[0][COLS.R] r1 = seg[1][COLS.R] num = r0 * r0 + 2 * r0 * r1 + 3 * r1 * r1 denom = 4 * (r0 * r0 + r0 * r1 + r1 * r1) centre_of_mass_z_loc = num / denom return seg[0][COLS.XYZ] + (centre_of_mass_z_loc / h) * (seg[1][COLS.XYZ] - seg[0][COLS.XYZ])
python
def segment_centre_of_mass(seg): '''Calculate and return centre of mass of a segment. C, seg_volalculated as centre of mass of conical frustum''' h = mm.segment_length(seg) r0 = seg[0][COLS.R] r1 = seg[1][COLS.R] num = r0 * r0 + 2 * r0 * r1 + 3 * r1 * r1 denom = 4 * (r0 * r0 + r0 * r1 + r1 * r1) centre_of_mass_z_loc = num / denom return seg[0][COLS.XYZ] + (centre_of_mass_z_loc / h) * (seg[1][COLS.XYZ] - seg[0][COLS.XYZ])
[ "def", "segment_centre_of_mass", "(", "seg", ")", ":", "h", "=", "mm", ".", "segment_length", "(", "seg", ")", "r0", "=", "seg", "[", "0", "]", "[", "COLS", ".", "R", "]", "r1", "=", "seg", "[", "1", "]", "[", "COLS", ".", "R", "]", "num", "=", "r0", "*", "r0", "+", "2", "*", "r0", "*", "r1", "+", "3", "*", "r1", "*", "r1", "denom", "=", "4", "*", "(", "r0", "*", "r0", "+", "r0", "*", "r1", "+", "r1", "*", "r1", ")", "centre_of_mass_z_loc", "=", "num", "/", "denom", "return", "seg", "[", "0", "]", "[", "COLS", ".", "XYZ", "]", "+", "(", "centre_of_mass_z_loc", "/", "h", ")", "*", "(", "seg", "[", "1", "]", "[", "COLS", ".", "XYZ", "]", "-", "seg", "[", "0", "]", "[", "COLS", ".", "XYZ", "]", ")" ]
Calculate and return centre of mass of a segment. C, seg_volalculated as centre of mass of conical frustum
[ "Calculate", "and", "return", "centre", "of", "mass", "of", "a", "segment", "." ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/radius_of_gyration.py#L38-L48
train
235,657
BlueBrain/NeuroM
examples/radius_of_gyration.py
neurite_centre_of_mass
def neurite_centre_of_mass(neurite): '''Calculate and return centre of mass of a neurite.''' centre_of_mass = np.zeros(3) total_volume = 0 seg_vol = np.array(map(mm.segment_volume, nm.iter_segments(neurite))) seg_centre_of_mass = np.array(map(segment_centre_of_mass, nm.iter_segments(neurite))) # multiply array of scalars with array of arrays # http://stackoverflow.com/questions/5795700/multiply-numpy-array-of-scalars-by-array-of-vectors seg_centre_of_mass = seg_centre_of_mass * seg_vol[:, np.newaxis] centre_of_mass = np.sum(seg_centre_of_mass, axis=0) total_volume = np.sum(seg_vol) return centre_of_mass / total_volume
python
def neurite_centre_of_mass(neurite): '''Calculate and return centre of mass of a neurite.''' centre_of_mass = np.zeros(3) total_volume = 0 seg_vol = np.array(map(mm.segment_volume, nm.iter_segments(neurite))) seg_centre_of_mass = np.array(map(segment_centre_of_mass, nm.iter_segments(neurite))) # multiply array of scalars with array of arrays # http://stackoverflow.com/questions/5795700/multiply-numpy-array-of-scalars-by-array-of-vectors seg_centre_of_mass = seg_centre_of_mass * seg_vol[:, np.newaxis] centre_of_mass = np.sum(seg_centre_of_mass, axis=0) total_volume = np.sum(seg_vol) return centre_of_mass / total_volume
[ "def", "neurite_centre_of_mass", "(", "neurite", ")", ":", "centre_of_mass", "=", "np", ".", "zeros", "(", "3", ")", "total_volume", "=", "0", "seg_vol", "=", "np", ".", "array", "(", "map", "(", "mm", ".", "segment_volume", ",", "nm", ".", "iter_segments", "(", "neurite", ")", ")", ")", "seg_centre_of_mass", "=", "np", ".", "array", "(", "map", "(", "segment_centre_of_mass", ",", "nm", ".", "iter_segments", "(", "neurite", ")", ")", ")", "# multiply array of scalars with array of arrays", "# http://stackoverflow.com/questions/5795700/multiply-numpy-array-of-scalars-by-array-of-vectors", "seg_centre_of_mass", "=", "seg_centre_of_mass", "*", "seg_vol", "[", ":", ",", "np", ".", "newaxis", "]", "centre_of_mass", "=", "np", ".", "sum", "(", "seg_centre_of_mass", ",", "axis", "=", "0", ")", "total_volume", "=", "np", ".", "sum", "(", "seg_vol", ")", "return", "centre_of_mass", "/", "total_volume" ]
Calculate and return centre of mass of a neurite.
[ "Calculate", "and", "return", "centre", "of", "mass", "of", "a", "neurite", "." ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/radius_of_gyration.py#L51-L64
train
235,658
BlueBrain/NeuroM
examples/radius_of_gyration.py
distance_sqr
def distance_sqr(point, seg): '''Calculate and return square Euclidian distance from given point to centre of mass of given segment.''' centre_of_mass = segment_centre_of_mass(seg) return sum(pow(np.subtract(point, centre_of_mass), 2))
python
def distance_sqr(point, seg): '''Calculate and return square Euclidian distance from given point to centre of mass of given segment.''' centre_of_mass = segment_centre_of_mass(seg) return sum(pow(np.subtract(point, centre_of_mass), 2))
[ "def", "distance_sqr", "(", "point", ",", "seg", ")", ":", "centre_of_mass", "=", "segment_centre_of_mass", "(", "seg", ")", "return", "sum", "(", "pow", "(", "np", ".", "subtract", "(", "point", ",", "centre_of_mass", ")", ",", "2", ")", ")" ]
Calculate and return square Euclidian distance from given point to centre of mass of given segment.
[ "Calculate", "and", "return", "square", "Euclidian", "distance", "from", "given", "point", "to", "centre", "of", "mass", "of", "given", "segment", "." ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/radius_of_gyration.py#L67-L71
train
235,659
BlueBrain/NeuroM
examples/radius_of_gyration.py
radius_of_gyration
def radius_of_gyration(neurite): '''Calculate and return radius of gyration of a given neurite.''' centre_mass = neurite_centre_of_mass(neurite) sum_sqr_distance = 0 N = 0 dist_sqr = [distance_sqr(centre_mass, s) for s in nm.iter_segments(neurite)] sum_sqr_distance = np.sum(dist_sqr) N = len(dist_sqr) return np.sqrt(sum_sqr_distance / N)
python
def radius_of_gyration(neurite): '''Calculate and return radius of gyration of a given neurite.''' centre_mass = neurite_centre_of_mass(neurite) sum_sqr_distance = 0 N = 0 dist_sqr = [distance_sqr(centre_mass, s) for s in nm.iter_segments(neurite)] sum_sqr_distance = np.sum(dist_sqr) N = len(dist_sqr) return np.sqrt(sum_sqr_distance / N)
[ "def", "radius_of_gyration", "(", "neurite", ")", ":", "centre_mass", "=", "neurite_centre_of_mass", "(", "neurite", ")", "sum_sqr_distance", "=", "0", "N", "=", "0", "dist_sqr", "=", "[", "distance_sqr", "(", "centre_mass", ",", "s", ")", "for", "s", "in", "nm", ".", "iter_segments", "(", "neurite", ")", "]", "sum_sqr_distance", "=", "np", ".", "sum", "(", "dist_sqr", ")", "N", "=", "len", "(", "dist_sqr", ")", "return", "np", ".", "sqrt", "(", "sum_sqr_distance", "/", "N", ")" ]
Calculate and return radius of gyration of a given neurite.
[ "Calculate", "and", "return", "radius", "of", "gyration", "of", "a", "given", "neurite", "." ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/radius_of_gyration.py#L74-L82
train
235,660
BlueBrain/NeuroM
apps/__main__.py
view
def view(input_file, plane, backend): '''A simple neuron viewer''' if backend == 'matplotlib': from neurom.viewer import draw kwargs = { 'mode': '3d' if plane == '3d' else '2d', } if plane != '3d': kwargs['plane'] = plane draw(load_neuron(input_file), **kwargs) else: from neurom.view.plotly import draw draw(load_neuron(input_file), plane=plane) if backend == 'matplotlib': import matplotlib.pyplot as plt plt.show()
python
def view(input_file, plane, backend): '''A simple neuron viewer''' if backend == 'matplotlib': from neurom.viewer import draw kwargs = { 'mode': '3d' if plane == '3d' else '2d', } if plane != '3d': kwargs['plane'] = plane draw(load_neuron(input_file), **kwargs) else: from neurom.view.plotly import draw draw(load_neuron(input_file), plane=plane) if backend == 'matplotlib': import matplotlib.pyplot as plt plt.show()
[ "def", "view", "(", "input_file", ",", "plane", ",", "backend", ")", ":", "if", "backend", "==", "'matplotlib'", ":", "from", "neurom", ".", "viewer", "import", "draw", "kwargs", "=", "{", "'mode'", ":", "'3d'", "if", "plane", "==", "'3d'", "else", "'2d'", ",", "}", "if", "plane", "!=", "'3d'", ":", "kwargs", "[", "'plane'", "]", "=", "plane", "draw", "(", "load_neuron", "(", "input_file", ")", ",", "*", "*", "kwargs", ")", "else", ":", "from", "neurom", ".", "view", ".", "plotly", "import", "draw", "draw", "(", "load_neuron", "(", "input_file", ")", ",", "plane", "=", "plane", ")", "if", "backend", "==", "'matplotlib'", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "plt", ".", "show", "(", ")" ]
A simple neuron viewer
[ "A", "simple", "neuron", "viewer" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/apps/__main__.py#L23-L39
train
235,661
BlueBrain/NeuroM
neurom/apps/annotate.py
generate_annotation
def generate_annotation(result, settings): '''Generate the annotation for a given checker Arguments neuron(Neuron): The neuron object checker: A tuple where the first item is the checking function (usually from neuron_checks) and the second item is a dictionary of settings for the annotation. It must contain the keys name, label and color Returns An S-expression-like string representing the annotation ''' if result.status: return "" header = ("\n\n" "({label} ; MUK_ANNOTATION\n" " (Color {color}) ; MUK_ANNOTATION\n" " (Name \"{name}\") ; MUK_ANNOTATION").format(**settings) points = [p for _, _points in result.info for p in _points] annotations = (" ({0} {1} {2} 0.50) ; MUK_ANNOTATION".format( p[COLS.X], p[COLS.Y], p[COLS.Z]) for p in points) footer = ") ; MUK_ANNOTATION\n" return '\n'.join(chain.from_iterable(([header], annotations, [footer])))
python
def generate_annotation(result, settings): '''Generate the annotation for a given checker Arguments neuron(Neuron): The neuron object checker: A tuple where the first item is the checking function (usually from neuron_checks) and the second item is a dictionary of settings for the annotation. It must contain the keys name, label and color Returns An S-expression-like string representing the annotation ''' if result.status: return "" header = ("\n\n" "({label} ; MUK_ANNOTATION\n" " (Color {color}) ; MUK_ANNOTATION\n" " (Name \"{name}\") ; MUK_ANNOTATION").format(**settings) points = [p for _, _points in result.info for p in _points] annotations = (" ({0} {1} {2} 0.50) ; MUK_ANNOTATION".format( p[COLS.X], p[COLS.Y], p[COLS.Z]) for p in points) footer = ") ; MUK_ANNOTATION\n" return '\n'.join(chain.from_iterable(([header], annotations, [footer])))
[ "def", "generate_annotation", "(", "result", ",", "settings", ")", ":", "if", "result", ".", "status", ":", "return", "\"\"", "header", "=", "(", "\"\\n\\n\"", "\"({label} ; MUK_ANNOTATION\\n\"", "\" (Color {color}) ; MUK_ANNOTATION\\n\"", "\" (Name \\\"{name}\\\") ; MUK_ANNOTATION\"", ")", ".", "format", "(", "*", "*", "settings", ")", "points", "=", "[", "p", "for", "_", ",", "_points", "in", "result", ".", "info", "for", "p", "in", "_points", "]", "annotations", "=", "(", "\" ({0} {1} {2} 0.50) ; MUK_ANNOTATION\"", ".", "format", "(", "p", "[", "COLS", ".", "X", "]", ",", "p", "[", "COLS", ".", "Y", "]", ",", "p", "[", "COLS", ".", "Z", "]", ")", "for", "p", "in", "points", ")", "footer", "=", "\") ; MUK_ANNOTATION\\n\"", "return", "'\\n'", ".", "join", "(", "chain", ".", "from_iterable", "(", "(", "[", "header", "]", ",", "annotations", ",", "[", "footer", "]", ")", ")", ")" ]
Generate the annotation for a given checker Arguments neuron(Neuron): The neuron object checker: A tuple where the first item is the checking function (usually from neuron_checks) and the second item is a dictionary of settings for the annotation. It must contain the keys name, label and color Returns An S-expression-like string representing the annotation
[ "Generate", "the", "annotation", "for", "a", "given", "checker" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/apps/annotate.py#L37-L62
train
235,662
BlueBrain/NeuroM
neurom/apps/annotate.py
annotate
def annotate(results, settings): '''Concatenate the annotations of all checkers''' annotations = (generate_annotation(result, setting) for result, setting in zip(results, settings)) return '\n'.join(annot for annot in annotations if annot)
python
def annotate(results, settings): '''Concatenate the annotations of all checkers''' annotations = (generate_annotation(result, setting) for result, setting in zip(results, settings)) return '\n'.join(annot for annot in annotations if annot)
[ "def", "annotate", "(", "results", ",", "settings", ")", ":", "annotations", "=", "(", "generate_annotation", "(", "result", ",", "setting", ")", "for", "result", ",", "setting", "in", "zip", "(", "results", ",", "settings", ")", ")", "return", "'\\n'", ".", "join", "(", "annot", "for", "annot", "in", "annotations", "if", "annot", ")" ]
Concatenate the annotations of all checkers
[ "Concatenate", "the", "annotations", "of", "all", "checkers" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/apps/annotate.py#L65-L69
train
235,663
BlueBrain/NeuroM
neurom/core/point.py
as_point
def as_point(row): '''Create a Point from a data block row''' return Point(row[COLS.X], row[COLS.Y], row[COLS.Z], row[COLS.R], int(row[COLS.TYPE]))
python
def as_point(row): '''Create a Point from a data block row''' return Point(row[COLS.X], row[COLS.Y], row[COLS.Z], row[COLS.R], int(row[COLS.TYPE]))
[ "def", "as_point", "(", "row", ")", ":", "return", "Point", "(", "row", "[", "COLS", ".", "X", "]", ",", "row", "[", "COLS", ".", "Y", "]", ",", "row", "[", "COLS", ".", "Z", "]", ",", "row", "[", "COLS", ".", "R", "]", ",", "int", "(", "row", "[", "COLS", ".", "TYPE", "]", ")", ")" ]
Create a Point from a data block row
[ "Create", "a", "Point", "from", "a", "data", "block", "row" ]
254bb73535b20053d175bc4725bade662177d12b
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/point.py#L38-L41
train
235,664
jambonsw/django-improved-user
src/improved_user/managers.py
UserManager.create_superuser
def create_superuser(self, email, password, **extra_fields): """Save new User with is_staff and is_superuser set to True""" extra_fields.setdefault('is_staff', True) extra_fields.setdefault('is_superuser', True) if extra_fields.get('is_staff') is not True: raise ValueError('Superuser must have is_staff=True.') if extra_fields.get('is_superuser') is not True: raise ValueError('Superuser must have is_superuser=True.') return self._create_user(email, password, **extra_fields)
python
def create_superuser(self, email, password, **extra_fields): """Save new User with is_staff and is_superuser set to True""" extra_fields.setdefault('is_staff', True) extra_fields.setdefault('is_superuser', True) if extra_fields.get('is_staff') is not True: raise ValueError('Superuser must have is_staff=True.') if extra_fields.get('is_superuser') is not True: raise ValueError('Superuser must have is_superuser=True.') return self._create_user(email, password, **extra_fields)
[ "def", "create_superuser", "(", "self", ",", "email", ",", "password", ",", "*", "*", "extra_fields", ")", ":", "extra_fields", ".", "setdefault", "(", "'is_staff'", ",", "True", ")", "extra_fields", ".", "setdefault", "(", "'is_superuser'", ",", "True", ")", "if", "extra_fields", ".", "get", "(", "'is_staff'", ")", "is", "not", "True", ":", "raise", "ValueError", "(", "'Superuser must have is_staff=True.'", ")", "if", "extra_fields", ".", "get", "(", "'is_superuser'", ")", "is", "not", "True", ":", "raise", "ValueError", "(", "'Superuser must have is_superuser=True.'", ")", "return", "self", ".", "_create_user", "(", "email", ",", "password", ",", "*", "*", "extra_fields", ")" ]
Save new User with is_staff and is_superuser set to True
[ "Save", "new", "User", "with", "is_staff", "and", "is_superuser", "set", "to", "True" ]
e5fbb4f0d5f7491b9f06f7eb2812127b5e4616d4
https://github.com/jambonsw/django-improved-user/blob/e5fbb4f0d5f7491b9f06f7eb2812127b5e4616d4/src/improved_user/managers.py#L43-L51
train
235,665
jambonsw/django-improved-user
setup.py
load_file_contents
def load_file_contents(file_path, as_list=True): """Load file as string or list""" abs_file_path = join(HERE, file_path) with open(abs_file_path, encoding='utf-8') as file_pointer: if as_list: return file_pointer.read().splitlines() return file_pointer.read()
python
def load_file_contents(file_path, as_list=True): """Load file as string or list""" abs_file_path = join(HERE, file_path) with open(abs_file_path, encoding='utf-8') as file_pointer: if as_list: return file_pointer.read().splitlines() return file_pointer.read()
[ "def", "load_file_contents", "(", "file_path", ",", "as_list", "=", "True", ")", ":", "abs_file_path", "=", "join", "(", "HERE", ",", "file_path", ")", "with", "open", "(", "abs_file_path", ",", "encoding", "=", "'utf-8'", ")", "as", "file_pointer", ":", "if", "as_list", ":", "return", "file_pointer", ".", "read", "(", ")", ".", "splitlines", "(", ")", "return", "file_pointer", ".", "read", "(", ")" ]
Load file as string or list
[ "Load", "file", "as", "string", "or", "list" ]
e5fbb4f0d5f7491b9f06f7eb2812127b5e4616d4
https://github.com/jambonsw/django-improved-user/blob/e5fbb4f0d5f7491b9f06f7eb2812127b5e4616d4/setup.py#L22-L28
train
235,666
jambonsw/django-improved-user
src/improved_user/forms.py
AbstractUserCreationForm.clean_password2
def clean_password2(self): """ Check wether password 1 and password 2 are equivalent While ideally this would be done in clean, there is a chance a superclass could declare clean and forget to call super. We therefore opt to run this password mismatch check in password2 clean, but to show the error above password1 (as we are unsure whether password 1 or password 2 contains the typo, and putting it above password 2 may lead some users to believe the typo is in just one). """ password1 = self.cleaned_data.get('password1') password2 = self.cleaned_data.get('password2') if password1 and password2 and password1 != password2: self.add_error( 'password1', forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch', )) return password2
python
def clean_password2(self): """ Check wether password 1 and password 2 are equivalent While ideally this would be done in clean, there is a chance a superclass could declare clean and forget to call super. We therefore opt to run this password mismatch check in password2 clean, but to show the error above password1 (as we are unsure whether password 1 or password 2 contains the typo, and putting it above password 2 may lead some users to believe the typo is in just one). """ password1 = self.cleaned_data.get('password1') password2 = self.cleaned_data.get('password2') if password1 and password2 and password1 != password2: self.add_error( 'password1', forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch', )) return password2
[ "def", "clean_password2", "(", "self", ")", ":", "password1", "=", "self", ".", "cleaned_data", ".", "get", "(", "'password1'", ")", "password2", "=", "self", ".", "cleaned_data", ".", "get", "(", "'password2'", ")", "if", "password1", "and", "password2", "and", "password1", "!=", "password2", ":", "self", ".", "add_error", "(", "'password1'", ",", "forms", ".", "ValidationError", "(", "self", ".", "error_messages", "[", "'password_mismatch'", "]", ",", "code", "=", "'password_mismatch'", ",", ")", ")", "return", "password2" ]
Check wether password 1 and password 2 are equivalent While ideally this would be done in clean, there is a chance a superclass could declare clean and forget to call super. We therefore opt to run this password mismatch check in password2 clean, but to show the error above password1 (as we are unsure whether password 1 or password 2 contains the typo, and putting it above password 2 may lead some users to believe the typo is in just one).
[ "Check", "wether", "password", "1", "and", "password", "2", "are", "equivalent" ]
e5fbb4f0d5f7491b9f06f7eb2812127b5e4616d4
https://github.com/jambonsw/django-improved-user/blob/e5fbb4f0d5f7491b9f06f7eb2812127b5e4616d4/src/improved_user/forms.py#L62-L84
train
235,667
jambonsw/django-improved-user
src/improved_user/forms.py
AbstractUserCreationForm._post_clean
def _post_clean(self): """Run password validaton after clean methods When clean methods are run, the user instance does not yet exist. To properly compare model values agains the password (in the UserAttributeSimilarityValidator), we wait until we have an instance to compare against. https://code.djangoproject.com/ticket/28127 https://github.com/django/django/pull/8408 Has no effect in Django prior to 1.9 May become unnecessary in Django 2.0 (if this superclass changes) """ super()._post_clean() # updates self.instance with form data password = self.cleaned_data.get('password1') if password: try: password_validation.validate_password(password, self.instance) except ValidationError as error: self.add_error('password1', error)
python
def _post_clean(self): """Run password validaton after clean methods When clean methods are run, the user instance does not yet exist. To properly compare model values agains the password (in the UserAttributeSimilarityValidator), we wait until we have an instance to compare against. https://code.djangoproject.com/ticket/28127 https://github.com/django/django/pull/8408 Has no effect in Django prior to 1.9 May become unnecessary in Django 2.0 (if this superclass changes) """ super()._post_clean() # updates self.instance with form data password = self.cleaned_data.get('password1') if password: try: password_validation.validate_password(password, self.instance) except ValidationError as error: self.add_error('password1', error)
[ "def", "_post_clean", "(", "self", ")", ":", "super", "(", ")", ".", "_post_clean", "(", ")", "# updates self.instance with form data", "password", "=", "self", ".", "cleaned_data", ".", "get", "(", "'password1'", ")", "if", "password", ":", "try", ":", "password_validation", ".", "validate_password", "(", "password", ",", "self", ".", "instance", ")", "except", "ValidationError", "as", "error", ":", "self", ".", "add_error", "(", "'password1'", ",", "error", ")" ]
Run password validaton after clean methods When clean methods are run, the user instance does not yet exist. To properly compare model values agains the password (in the UserAttributeSimilarityValidator), we wait until we have an instance to compare against. https://code.djangoproject.com/ticket/28127 https://github.com/django/django/pull/8408 Has no effect in Django prior to 1.9 May become unnecessary in Django 2.0 (if this superclass changes)
[ "Run", "password", "validaton", "after", "clean", "methods" ]
e5fbb4f0d5f7491b9f06f7eb2812127b5e4616d4
https://github.com/jambonsw/django-improved-user/blob/e5fbb4f0d5f7491b9f06f7eb2812127b5e4616d4/src/improved_user/forms.py#L86-L107
train
235,668
jambonsw/django-improved-user
src/improved_user/model_mixins.py
EmailAuthMixin.clean
def clean(self): """Override default clean method to normalize email. Call :code:`super().clean()` if overriding. """ super().clean() self.email = self.__class__.objects.normalize_email(self.email)
python
def clean(self): """Override default clean method to normalize email. Call :code:`super().clean()` if overriding. """ super().clean() self.email = self.__class__.objects.normalize_email(self.email)
[ "def", "clean", "(", "self", ")", ":", "super", "(", ")", ".", "clean", "(", ")", "self", ".", "email", "=", "self", ".", "__class__", ".", "objects", ".", "normalize_email", "(", "self", ".", "email", ")" ]
Override default clean method to normalize email. Call :code:`super().clean()` if overriding.
[ "Override", "default", "clean", "method", "to", "normalize", "email", "." ]
e5fbb4f0d5f7491b9f06f7eb2812127b5e4616d4
https://github.com/jambonsw/django-improved-user/blob/e5fbb4f0d5f7491b9f06f7eb2812127b5e4616d4/src/improved_user/model_mixins.py#L69-L76
train
235,669
mapbox/cligj
cligj/features.py
normalize_feature_inputs
def normalize_feature_inputs(ctx, param, value): """Click callback that normalizes feature input values. Returns a generator over features from the input value. Parameters ---------- ctx: a Click context param: the name of the argument or option value: object The value argument may be one of the following: 1. A list of paths to files containing GeoJSON feature collections or feature sequences. 2. A list of string-encoded coordinate pairs of the form "[lng, lat]", or "lng, lat", or "lng lat". If no value is provided, features will be read from stdin. """ for feature_like in value or ('-',): try: with click.open_file(feature_like) as src: for feature in iter_features(iter(src)): yield feature except IOError: coords = list(coords_from_query(feature_like)) yield { 'type': 'Feature', 'properties': {}, 'geometry': { 'type': 'Point', 'coordinates': coords}}
python
def normalize_feature_inputs(ctx, param, value): """Click callback that normalizes feature input values. Returns a generator over features from the input value. Parameters ---------- ctx: a Click context param: the name of the argument or option value: object The value argument may be one of the following: 1. A list of paths to files containing GeoJSON feature collections or feature sequences. 2. A list of string-encoded coordinate pairs of the form "[lng, lat]", or "lng, lat", or "lng lat". If no value is provided, features will be read from stdin. """ for feature_like in value or ('-',): try: with click.open_file(feature_like) as src: for feature in iter_features(iter(src)): yield feature except IOError: coords = list(coords_from_query(feature_like)) yield { 'type': 'Feature', 'properties': {}, 'geometry': { 'type': 'Point', 'coordinates': coords}}
[ "def", "normalize_feature_inputs", "(", "ctx", ",", "param", ",", "value", ")", ":", "for", "feature_like", "in", "value", "or", "(", "'-'", ",", ")", ":", "try", ":", "with", "click", ".", "open_file", "(", "feature_like", ")", "as", "src", ":", "for", "feature", "in", "iter_features", "(", "iter", "(", "src", ")", ")", ":", "yield", "feature", "except", "IOError", ":", "coords", "=", "list", "(", "coords_from_query", "(", "feature_like", ")", ")", "yield", "{", "'type'", ":", "'Feature'", ",", "'properties'", ":", "{", "}", ",", "'geometry'", ":", "{", "'type'", ":", "'Point'", ",", "'coordinates'", ":", "coords", "}", "}" ]
Click callback that normalizes feature input values. Returns a generator over features from the input value. Parameters ---------- ctx: a Click context param: the name of the argument or option value: object The value argument may be one of the following: 1. A list of paths to files containing GeoJSON feature collections or feature sequences. 2. A list of string-encoded coordinate pairs of the form "[lng, lat]", or "lng, lat", or "lng lat". If no value is provided, features will be read from stdin.
[ "Click", "callback", "that", "normalizes", "feature", "input", "values", "." ]
1815692d99abfb4bc4b2d0411f67fa568f112c05
https://github.com/mapbox/cligj/blob/1815692d99abfb4bc4b2d0411f67fa568f112c05/cligj/features.py#L8-L39
train
235,670
mapbox/cligj
cligj/features.py
iter_features
def iter_features(geojsonfile, func=None): """Extract GeoJSON features from a text file object. Given a file-like object containing a single GeoJSON feature collection text or a sequence of GeoJSON features, iter_features() iterates over lines of the file and yields GeoJSON features. Parameters ---------- geojsonfile: a file-like object The geojsonfile implements the iterator protocol and yields lines of JSON text. func: function, optional A function that will be applied to each extracted feature. It takes a feature object and may return a replacement feature or None -- in which case iter_features does not yield. """ func = func or (lambda x: x) first_line = next(geojsonfile) # Does the geojsonfile contain RS-delimited JSON sequences? if first_line.startswith(u'\x1e'): text_buffer = first_line.strip(u'\x1e') for line in geojsonfile: if line.startswith(u'\x1e'): if text_buffer: obj = json.loads(text_buffer) if 'coordinates' in obj: obj = to_feature(obj) newfeat = func(obj) if newfeat: yield newfeat text_buffer = line.strip(u'\x1e') else: text_buffer += line # complete our parsing with a for-else clause. else: obj = json.loads(text_buffer) if 'coordinates' in obj: obj = to_feature(obj) newfeat = func(obj) if newfeat: yield newfeat # If not, it may contains LF-delimited GeoJSON objects or a single # multi-line pretty-printed GeoJSON object. else: # Try to parse LF-delimited sequences of features or feature # collections produced by, e.g., `jq -c ...`. try: obj = json.loads(first_line) if obj['type'] == 'Feature': newfeat = func(obj) if newfeat: yield newfeat for line in geojsonfile: newfeat = func(json.loads(line)) if newfeat: yield newfeat elif obj['type'] == 'FeatureCollection': for feat in obj['features']: newfeat = func(feat) if newfeat: yield newfeat elif 'coordinates' in obj: newfeat = func(to_feature(obj)) if newfeat: yield newfeat for line in geojsonfile: newfeat = func(to_feature(json.loads(line))) if newfeat: yield newfeat # Indented or pretty-printed GeoJSON features or feature # collections will fail out of the try clause above since # they'll have no complete JSON object on their first line. # To handle these, we slurp in the entire file and parse its # text. except ValueError: text = "".join(chain([first_line], geojsonfile)) obj = json.loads(text) if obj['type'] == 'Feature': newfeat = func(obj) if newfeat: yield newfeat elif obj['type'] == 'FeatureCollection': for feat in obj['features']: newfeat = func(feat) if newfeat: yield newfeat elif 'coordinates' in obj: newfeat = func(to_feature(obj)) if newfeat: yield newfeat
python
def iter_features(geojsonfile, func=None): """Extract GeoJSON features from a text file object. Given a file-like object containing a single GeoJSON feature collection text or a sequence of GeoJSON features, iter_features() iterates over lines of the file and yields GeoJSON features. Parameters ---------- geojsonfile: a file-like object The geojsonfile implements the iterator protocol and yields lines of JSON text. func: function, optional A function that will be applied to each extracted feature. It takes a feature object and may return a replacement feature or None -- in which case iter_features does not yield. """ func = func or (lambda x: x) first_line = next(geojsonfile) # Does the geojsonfile contain RS-delimited JSON sequences? if first_line.startswith(u'\x1e'): text_buffer = first_line.strip(u'\x1e') for line in geojsonfile: if line.startswith(u'\x1e'): if text_buffer: obj = json.loads(text_buffer) if 'coordinates' in obj: obj = to_feature(obj) newfeat = func(obj) if newfeat: yield newfeat text_buffer = line.strip(u'\x1e') else: text_buffer += line # complete our parsing with a for-else clause. else: obj = json.loads(text_buffer) if 'coordinates' in obj: obj = to_feature(obj) newfeat = func(obj) if newfeat: yield newfeat # If not, it may contains LF-delimited GeoJSON objects or a single # multi-line pretty-printed GeoJSON object. else: # Try to parse LF-delimited sequences of features or feature # collections produced by, e.g., `jq -c ...`. try: obj = json.loads(first_line) if obj['type'] == 'Feature': newfeat = func(obj) if newfeat: yield newfeat for line in geojsonfile: newfeat = func(json.loads(line)) if newfeat: yield newfeat elif obj['type'] == 'FeatureCollection': for feat in obj['features']: newfeat = func(feat) if newfeat: yield newfeat elif 'coordinates' in obj: newfeat = func(to_feature(obj)) if newfeat: yield newfeat for line in geojsonfile: newfeat = func(to_feature(json.loads(line))) if newfeat: yield newfeat # Indented or pretty-printed GeoJSON features or feature # collections will fail out of the try clause above since # they'll have no complete JSON object on their first line. # To handle these, we slurp in the entire file and parse its # text. except ValueError: text = "".join(chain([first_line], geojsonfile)) obj = json.loads(text) if obj['type'] == 'Feature': newfeat = func(obj) if newfeat: yield newfeat elif obj['type'] == 'FeatureCollection': for feat in obj['features']: newfeat = func(feat) if newfeat: yield newfeat elif 'coordinates' in obj: newfeat = func(to_feature(obj)) if newfeat: yield newfeat
[ "def", "iter_features", "(", "geojsonfile", ",", "func", "=", "None", ")", ":", "func", "=", "func", "or", "(", "lambda", "x", ":", "x", ")", "first_line", "=", "next", "(", "geojsonfile", ")", "# Does the geojsonfile contain RS-delimited JSON sequences?", "if", "first_line", ".", "startswith", "(", "u'\\x1e'", ")", ":", "text_buffer", "=", "first_line", ".", "strip", "(", "u'\\x1e'", ")", "for", "line", "in", "geojsonfile", ":", "if", "line", ".", "startswith", "(", "u'\\x1e'", ")", ":", "if", "text_buffer", ":", "obj", "=", "json", ".", "loads", "(", "text_buffer", ")", "if", "'coordinates'", "in", "obj", ":", "obj", "=", "to_feature", "(", "obj", ")", "newfeat", "=", "func", "(", "obj", ")", "if", "newfeat", ":", "yield", "newfeat", "text_buffer", "=", "line", ".", "strip", "(", "u'\\x1e'", ")", "else", ":", "text_buffer", "+=", "line", "# complete our parsing with a for-else clause.", "else", ":", "obj", "=", "json", ".", "loads", "(", "text_buffer", ")", "if", "'coordinates'", "in", "obj", ":", "obj", "=", "to_feature", "(", "obj", ")", "newfeat", "=", "func", "(", "obj", ")", "if", "newfeat", ":", "yield", "newfeat", "# If not, it may contains LF-delimited GeoJSON objects or a single", "# multi-line pretty-printed GeoJSON object.", "else", ":", "# Try to parse LF-delimited sequences of features or feature", "# collections produced by, e.g., `jq -c ...`.", "try", ":", "obj", "=", "json", ".", "loads", "(", "first_line", ")", "if", "obj", "[", "'type'", "]", "==", "'Feature'", ":", "newfeat", "=", "func", "(", "obj", ")", "if", "newfeat", ":", "yield", "newfeat", "for", "line", "in", "geojsonfile", ":", "newfeat", "=", "func", "(", "json", ".", "loads", "(", "line", ")", ")", "if", "newfeat", ":", "yield", "newfeat", "elif", "obj", "[", "'type'", "]", "==", "'FeatureCollection'", ":", "for", "feat", "in", "obj", "[", "'features'", "]", ":", "newfeat", "=", "func", "(", "feat", ")", "if", "newfeat", ":", "yield", "newfeat", "elif", "'coordinates'", "in", "obj", ":", "newfeat", "=", "func", "(", "to_feature", "(", "obj", ")", ")", "if", "newfeat", ":", "yield", "newfeat", "for", "line", "in", "geojsonfile", ":", "newfeat", "=", "func", "(", "to_feature", "(", "json", ".", "loads", "(", "line", ")", ")", ")", "if", "newfeat", ":", "yield", "newfeat", "# Indented or pretty-printed GeoJSON features or feature", "# collections will fail out of the try clause above since", "# they'll have no complete JSON object on their first line.", "# To handle these, we slurp in the entire file and parse its", "# text.", "except", "ValueError", ":", "text", "=", "\"\"", ".", "join", "(", "chain", "(", "[", "first_line", "]", ",", "geojsonfile", ")", ")", "obj", "=", "json", ".", "loads", "(", "text", ")", "if", "obj", "[", "'type'", "]", "==", "'Feature'", ":", "newfeat", "=", "func", "(", "obj", ")", "if", "newfeat", ":", "yield", "newfeat", "elif", "obj", "[", "'type'", "]", "==", "'FeatureCollection'", ":", "for", "feat", "in", "obj", "[", "'features'", "]", ":", "newfeat", "=", "func", "(", "feat", ")", "if", "newfeat", ":", "yield", "newfeat", "elif", "'coordinates'", "in", "obj", ":", "newfeat", "=", "func", "(", "to_feature", "(", "obj", ")", ")", "if", "newfeat", ":", "yield", "newfeat" ]
Extract GeoJSON features from a text file object. Given a file-like object containing a single GeoJSON feature collection text or a sequence of GeoJSON features, iter_features() iterates over lines of the file and yields GeoJSON features. Parameters ---------- geojsonfile: a file-like object The geojsonfile implements the iterator protocol and yields lines of JSON text. func: function, optional A function that will be applied to each extracted feature. It takes a feature object and may return a replacement feature or None -- in which case iter_features does not yield.
[ "Extract", "GeoJSON", "features", "from", "a", "text", "file", "object", "." ]
1815692d99abfb4bc4b2d0411f67fa568f112c05
https://github.com/mapbox/cligj/blob/1815692d99abfb4bc4b2d0411f67fa568f112c05/cligj/features.py#L42-L135
train
235,671
mapbox/cligj
cligj/features.py
iter_query
def iter_query(query): """Accept a filename, stream, or string. Returns an iterator over lines of the query.""" try: itr = click.open_file(query).readlines() except IOError: itr = [query] return itr
python
def iter_query(query): """Accept a filename, stream, or string. Returns an iterator over lines of the query.""" try: itr = click.open_file(query).readlines() except IOError: itr = [query] return itr
[ "def", "iter_query", "(", "query", ")", ":", "try", ":", "itr", "=", "click", ".", "open_file", "(", "query", ")", ".", "readlines", "(", ")", "except", "IOError", ":", "itr", "=", "[", "query", "]", "return", "itr" ]
Accept a filename, stream, or string. Returns an iterator over lines of the query.
[ "Accept", "a", "filename", "stream", "or", "string", ".", "Returns", "an", "iterator", "over", "lines", "of", "the", "query", "." ]
1815692d99abfb4bc4b2d0411f67fa568f112c05
https://github.com/mapbox/cligj/blob/1815692d99abfb4bc4b2d0411f67fa568f112c05/cligj/features.py#L154-L161
train
235,672
mapbox/cligj
cligj/features.py
normalize_feature_objects
def normalize_feature_objects(feature_objs): """Takes an iterable of GeoJSON-like Feature mappings or an iterable of objects with a geo interface and normalizes it to the former.""" for obj in feature_objs: if hasattr(obj, "__geo_interface__") and \ 'type' in obj.__geo_interface__.keys() and \ obj.__geo_interface__['type'] == 'Feature': yield obj.__geo_interface__ elif isinstance(obj, dict) and 'type' in obj and \ obj['type'] == 'Feature': yield obj else: raise ValueError("Did not recognize object {0}" "as GeoJSON Feature".format(obj))
python
def normalize_feature_objects(feature_objs): """Takes an iterable of GeoJSON-like Feature mappings or an iterable of objects with a geo interface and normalizes it to the former.""" for obj in feature_objs: if hasattr(obj, "__geo_interface__") and \ 'type' in obj.__geo_interface__.keys() and \ obj.__geo_interface__['type'] == 'Feature': yield obj.__geo_interface__ elif isinstance(obj, dict) and 'type' in obj and \ obj['type'] == 'Feature': yield obj else: raise ValueError("Did not recognize object {0}" "as GeoJSON Feature".format(obj))
[ "def", "normalize_feature_objects", "(", "feature_objs", ")", ":", "for", "obj", "in", "feature_objs", ":", "if", "hasattr", "(", "obj", ",", "\"__geo_interface__\"", ")", "and", "'type'", "in", "obj", ".", "__geo_interface__", ".", "keys", "(", ")", "and", "obj", ".", "__geo_interface__", "[", "'type'", "]", "==", "'Feature'", ":", "yield", "obj", ".", "__geo_interface__", "elif", "isinstance", "(", "obj", ",", "dict", ")", "and", "'type'", "in", "obj", "and", "obj", "[", "'type'", "]", "==", "'Feature'", ":", "yield", "obj", "else", ":", "raise", "ValueError", "(", "\"Did not recognize object {0}\"", "\"as GeoJSON Feature\"", ".", "format", "(", "obj", ")", ")" ]
Takes an iterable of GeoJSON-like Feature mappings or an iterable of objects with a geo interface and normalizes it to the former.
[ "Takes", "an", "iterable", "of", "GeoJSON", "-", "like", "Feature", "mappings", "or", "an", "iterable", "of", "objects", "with", "a", "geo", "interface", "and", "normalizes", "it", "to", "the", "former", "." ]
1815692d99abfb4bc4b2d0411f67fa568f112c05
https://github.com/mapbox/cligj/blob/1815692d99abfb4bc4b2d0411f67fa568f112c05/cligj/features.py#L175-L189
train
235,673
ludeeus/pytraccar
pytraccar/api.py
API.api
async def api(self, endpoint, params=None, test=False): """Comunicate with the API.""" data = {} url = "{}/{}".format(self._api, endpoint) try: async with async_timeout.timeout(8, loop=self._loop): response = await self._session.get( url, auth=self._auth, headers=HEADERS, params=params ) if response.status == 200: self._authenticated = True self._connected = True if not test: data = await response.json() elif response.status == 401: self._authenticated = False self._connected = True except asyncio.TimeoutError as error: self._authenticated, self._connected = False, False if not test: _LOGGER.warning("Timeouterror connecting to Traccar, %s", error) except aiohttp.ClientError as error: self._authenticated, self._connected = False, False if not test: _LOGGER.warning("Error connecting to Traccar, %s", error) except socket.gaierror as error: self._authenticated, self._connected = False, False if not test: _LOGGER.warning("Error connecting to Traccar, %s", error) except TypeError as error: self._authenticated, self._connected = False, False if not test: _LOGGER.warning("Error connecting to Traccar, %s", error) except Exception as error: # pylint: disable=broad-except self._authenticated, self._connected = False, False if not test: _LOGGER.warning("Error connecting to Traccar, %s", error) return data
python
async def api(self, endpoint, params=None, test=False): """Comunicate with the API.""" data = {} url = "{}/{}".format(self._api, endpoint) try: async with async_timeout.timeout(8, loop=self._loop): response = await self._session.get( url, auth=self._auth, headers=HEADERS, params=params ) if response.status == 200: self._authenticated = True self._connected = True if not test: data = await response.json() elif response.status == 401: self._authenticated = False self._connected = True except asyncio.TimeoutError as error: self._authenticated, self._connected = False, False if not test: _LOGGER.warning("Timeouterror connecting to Traccar, %s", error) except aiohttp.ClientError as error: self._authenticated, self._connected = False, False if not test: _LOGGER.warning("Error connecting to Traccar, %s", error) except socket.gaierror as error: self._authenticated, self._connected = False, False if not test: _LOGGER.warning("Error connecting to Traccar, %s", error) except TypeError as error: self._authenticated, self._connected = False, False if not test: _LOGGER.warning("Error connecting to Traccar, %s", error) except Exception as error: # pylint: disable=broad-except self._authenticated, self._connected = False, False if not test: _LOGGER.warning("Error connecting to Traccar, %s", error) return data
[ "async", "def", "api", "(", "self", ",", "endpoint", ",", "params", "=", "None", ",", "test", "=", "False", ")", ":", "data", "=", "{", "}", "url", "=", "\"{}/{}\"", ".", "format", "(", "self", ".", "_api", ",", "endpoint", ")", "try", ":", "async", "with", "async_timeout", ".", "timeout", "(", "8", ",", "loop", "=", "self", ".", "_loop", ")", ":", "response", "=", "await", "self", ".", "_session", ".", "get", "(", "url", ",", "auth", "=", "self", ".", "_auth", ",", "headers", "=", "HEADERS", ",", "params", "=", "params", ")", "if", "response", ".", "status", "==", "200", ":", "self", ".", "_authenticated", "=", "True", "self", ".", "_connected", "=", "True", "if", "not", "test", ":", "data", "=", "await", "response", ".", "json", "(", ")", "elif", "response", ".", "status", "==", "401", ":", "self", ".", "_authenticated", "=", "False", "self", ".", "_connected", "=", "True", "except", "asyncio", ".", "TimeoutError", "as", "error", ":", "self", ".", "_authenticated", ",", "self", ".", "_connected", "=", "False", ",", "False", "if", "not", "test", ":", "_LOGGER", ".", "warning", "(", "\"Timeouterror connecting to Traccar, %s\"", ",", "error", ")", "except", "aiohttp", ".", "ClientError", "as", "error", ":", "self", ".", "_authenticated", ",", "self", ".", "_connected", "=", "False", ",", "False", "if", "not", "test", ":", "_LOGGER", ".", "warning", "(", "\"Error connecting to Traccar, %s\"", ",", "error", ")", "except", "socket", ".", "gaierror", "as", "error", ":", "self", ".", "_authenticated", ",", "self", ".", "_connected", "=", "False", ",", "False", "if", "not", "test", ":", "_LOGGER", ".", "warning", "(", "\"Error connecting to Traccar, %s\"", ",", "error", ")", "except", "TypeError", "as", "error", ":", "self", ".", "_authenticated", ",", "self", ".", "_connected", "=", "False", ",", "False", "if", "not", "test", ":", "_LOGGER", ".", "warning", "(", "\"Error connecting to Traccar, %s\"", ",", "error", ")", "except", "Exception", "as", "error", ":", "# pylint: disable=broad-except", "self", ".", "_authenticated", ",", "self", ".", "_connected", "=", "False", ",", "False", "if", "not", "test", ":", "_LOGGER", ".", "warning", "(", "\"Error connecting to Traccar, %s\"", ",", "error", ")", "return", "data" ]
Comunicate with the API.
[ "Comunicate", "with", "the", "API", "." ]
c7c635c334cc193c2da351a9fc8213d5095f77d6
https://github.com/ludeeus/pytraccar/blob/c7c635c334cc193c2da351a9fc8213d5095f77d6/pytraccar/api.py#L39-L79
train
235,674
ludeeus/pytraccar
pytraccar/cli.py
runcli
async def runcli(): """Debug of pytraccar.""" async with aiohttp.ClientSession() as session: host = input("IP: ") username = input("Username: ") password = input("Password: ") print("\n\n\n") data = API(LOOP, session, username, password, host) await data.test_connection() print("Authenticated:", data.authenticated) if data.authenticated: await data.get_device_info() print("Authentication:", data.authenticated) print("Geofences:", data.geofences) print("Devices:", data.devices) print("Positions:", data.positions) print("Device info:", data.device_info)
python
async def runcli(): """Debug of pytraccar.""" async with aiohttp.ClientSession() as session: host = input("IP: ") username = input("Username: ") password = input("Password: ") print("\n\n\n") data = API(LOOP, session, username, password, host) await data.test_connection() print("Authenticated:", data.authenticated) if data.authenticated: await data.get_device_info() print("Authentication:", data.authenticated) print("Geofences:", data.geofences) print("Devices:", data.devices) print("Positions:", data.positions) print("Device info:", data.device_info)
[ "async", "def", "runcli", "(", ")", ":", "async", "with", "aiohttp", ".", "ClientSession", "(", ")", "as", "session", ":", "host", "=", "input", "(", "\"IP: \"", ")", "username", "=", "input", "(", "\"Username: \"", ")", "password", "=", "input", "(", "\"Password: \"", ")", "print", "(", "\"\\n\\n\\n\"", ")", "data", "=", "API", "(", "LOOP", ",", "session", ",", "username", ",", "password", ",", "host", ")", "await", "data", ".", "test_connection", "(", ")", "print", "(", "\"Authenticated:\"", ",", "data", ".", "authenticated", ")", "if", "data", ".", "authenticated", ":", "await", "data", ".", "get_device_info", "(", ")", "print", "(", "\"Authentication:\"", ",", "data", ".", "authenticated", ")", "print", "(", "\"Geofences:\"", ",", "data", ".", "geofences", ")", "print", "(", "\"Devices:\"", ",", "data", ".", "devices", ")", "print", "(", "\"Positions:\"", ",", "data", ".", "positions", ")", "print", "(", "\"Device info:\"", ",", "data", ".", "device_info", ")" ]
Debug of pytraccar.
[ "Debug", "of", "pytraccar", "." ]
c7c635c334cc193c2da351a9fc8213d5095f77d6
https://github.com/ludeeus/pytraccar/blob/c7c635c334cc193c2da351a9fc8213d5095f77d6/pytraccar/cli.py#L9-L25
train
235,675
alexdej/puzpy
puz.py
restore
def restore(s, t): """ s is the source string, it can contain '.' t is the target, it's smaller than s by the number of '.'s in s Each char in s is replaced by the corresponding char in t, jumping over '.'s in s. >>> restore('ABC.DEF', 'XYZABC') 'XYZ.ABC' """ t = (c for c in t) return ''.join(next(t) if not is_blacksquare(c) else c for c in s)
python
def restore(s, t): """ s is the source string, it can contain '.' t is the target, it's smaller than s by the number of '.'s in s Each char in s is replaced by the corresponding char in t, jumping over '.'s in s. >>> restore('ABC.DEF', 'XYZABC') 'XYZ.ABC' """ t = (c for c in t) return ''.join(next(t) if not is_blacksquare(c) else c for c in s)
[ "def", "restore", "(", "s", ",", "t", ")", ":", "t", "=", "(", "c", "for", "c", "in", "t", ")", "return", "''", ".", "join", "(", "next", "(", "t", ")", "if", "not", "is_blacksquare", "(", "c", ")", "else", "c", "for", "c", "in", "s", ")" ]
s is the source string, it can contain '.' t is the target, it's smaller than s by the number of '.'s in s Each char in s is replaced by the corresponding char in t, jumping over '.'s in s. >>> restore('ABC.DEF', 'XYZABC') 'XYZ.ABC'
[ "s", "is", "the", "source", "string", "it", "can", "contain", ".", "t", "is", "the", "target", "it", "s", "smaller", "than", "s", "by", "the", "number", "of", ".", "s", "in", "s" ]
8906ab899845d1200ac3411b4c2a2067cffa15d7
https://github.com/alexdej/puzpy/blob/8906ab899845d1200ac3411b4c2a2067cffa15d7/puz.py#L696-L708
train
235,676
instacart/ahab
ahab/__init__.py
Ahab.default
def default(event, data): """The default handler prints basic event info.""" messages = defaultdict(lambda: 'Avast:') messages['start'] = 'Thar she blows!' messages['tag'] = 'Thar she blows!' messages['stop'] = 'Away into the depths:' messages['destroy'] = 'Away into the depths:' messages['delete'] = 'Away into the depths:' status = get_status(event) message = messages[status] + ' %s/%s' log.info(message, status, get_id(event)) log.debug('"data": %s', form_json(data))
python
def default(event, data): """The default handler prints basic event info.""" messages = defaultdict(lambda: 'Avast:') messages['start'] = 'Thar she blows!' messages['tag'] = 'Thar she blows!' messages['stop'] = 'Away into the depths:' messages['destroy'] = 'Away into the depths:' messages['delete'] = 'Away into the depths:' status = get_status(event) message = messages[status] + ' %s/%s' log.info(message, status, get_id(event)) log.debug('"data": %s', form_json(data))
[ "def", "default", "(", "event", ",", "data", ")", ":", "messages", "=", "defaultdict", "(", "lambda", ":", "'Avast:'", ")", "messages", "[", "'start'", "]", "=", "'Thar she blows!'", "messages", "[", "'tag'", "]", "=", "'Thar she blows!'", "messages", "[", "'stop'", "]", "=", "'Away into the depths:'", "messages", "[", "'destroy'", "]", "=", "'Away into the depths:'", "messages", "[", "'delete'", "]", "=", "'Away into the depths:'", "status", "=", "get_status", "(", "event", ")", "message", "=", "messages", "[", "status", "]", "+", "' %s/%s'", "log", ".", "info", "(", "message", ",", "status", ",", "get_id", "(", "event", ")", ")", "log", ".", "debug", "(", "'\"data\": %s'", ",", "form_json", "(", "data", ")", ")" ]
The default handler prints basic event info.
[ "The", "default", "handler", "prints", "basic", "event", "info", "." ]
da85dc6d89f5d0c49d3a26a25ea3710c7881b150
https://github.com/instacart/ahab/blob/da85dc6d89f5d0c49d3a26a25ea3710c7881b150/ahab/__init__.py#L58-L69
train
235,677
instacart/ahab
examples/nathook.py
table
def table(tab): """Access IPTables transactionally in a uniform way. Ensures all access is done without autocommit and that only the outer most task commits, and also ensures we refresh once and commit once. """ global open_tables if tab in open_tables: yield open_tables[tab] else: open_tables[tab] = iptc.Table(tab) open_tables[tab].refresh() open_tables[tab].autocommit = False yield open_tables[tab] open_tables[tab].commit() del open_tables[tab]
python
def table(tab): """Access IPTables transactionally in a uniform way. Ensures all access is done without autocommit and that only the outer most task commits, and also ensures we refresh once and commit once. """ global open_tables if tab in open_tables: yield open_tables[tab] else: open_tables[tab] = iptc.Table(tab) open_tables[tab].refresh() open_tables[tab].autocommit = False yield open_tables[tab] open_tables[tab].commit() del open_tables[tab]
[ "def", "table", "(", "tab", ")", ":", "global", "open_tables", "if", "tab", "in", "open_tables", ":", "yield", "open_tables", "[", "tab", "]", "else", ":", "open_tables", "[", "tab", "]", "=", "iptc", ".", "Table", "(", "tab", ")", "open_tables", "[", "tab", "]", ".", "refresh", "(", ")", "open_tables", "[", "tab", "]", ".", "autocommit", "=", "False", "yield", "open_tables", "[", "tab", "]", "open_tables", "[", "tab", "]", ".", "commit", "(", ")", "del", "open_tables", "[", "tab", "]" ]
Access IPTables transactionally in a uniform way. Ensures all access is done without autocommit and that only the outer most task commits, and also ensures we refresh once and commit once.
[ "Access", "IPTables", "transactionally", "in", "a", "uniform", "way", "." ]
da85dc6d89f5d0c49d3a26a25ea3710c7881b150
https://github.com/instacart/ahab/blob/da85dc6d89f5d0c49d3a26a25ea3710c7881b150/examples/nathook.py#L124-L139
train
235,678
hotdoc/hotdoc
hotdoc/core/formatter.py
Formatter.format_symbol
def format_symbol(self, symbol, link_resolver): """ Format a symbols.Symbol """ if not symbol: return '' if isinstance(symbol, FieldSymbol): return '' # pylint: disable=unused-variable out = self._format_symbol(symbol) template = self.get_template('symbol_wrapper.html') return template.render( {'symbol': symbol, 'formatted_doc': out})
python
def format_symbol(self, symbol, link_resolver): """ Format a symbols.Symbol """ if not symbol: return '' if isinstance(symbol, FieldSymbol): return '' # pylint: disable=unused-variable out = self._format_symbol(symbol) template = self.get_template('symbol_wrapper.html') return template.render( {'symbol': symbol, 'formatted_doc': out})
[ "def", "format_symbol", "(", "self", ",", "symbol", ",", "link_resolver", ")", ":", "if", "not", "symbol", ":", "return", "''", "if", "isinstance", "(", "symbol", ",", "FieldSymbol", ")", ":", "return", "''", "# pylint: disable=unused-variable", "out", "=", "self", ".", "_format_symbol", "(", "symbol", ")", "template", "=", "self", ".", "get_template", "(", "'symbol_wrapper.html'", ")", "return", "template", ".", "render", "(", "{", "'symbol'", ":", "symbol", ",", "'formatted_doc'", ":", "out", "}", ")" ]
Format a symbols.Symbol
[ "Format", "a", "symbols", ".", "Symbol" ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/formatter.py#L219-L235
train
235,679
hotdoc/hotdoc
hotdoc/core/database.py
Database.add_comment
def add_comment(self, comment): """ Add a comment to the database. Args: comment (hotdoc.core.Comment): comment to add """ if not comment: return self.__comments[comment.name] = comment self.comment_added_signal(self, comment)
python
def add_comment(self, comment): """ Add a comment to the database. Args: comment (hotdoc.core.Comment): comment to add """ if not comment: return self.__comments[comment.name] = comment self.comment_added_signal(self, comment)
[ "def", "add_comment", "(", "self", ",", "comment", ")", ":", "if", "not", "comment", ":", "return", "self", ".", "__comments", "[", "comment", ".", "name", "]", "=", "comment", "self", ".", "comment_added_signal", "(", "self", ",", "comment", ")" ]
Add a comment to the database. Args: comment (hotdoc.core.Comment): comment to add
[ "Add", "a", "comment", "to", "the", "database", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/database.py#L77-L88
train
235,680
hotdoc/hotdoc
hotdoc/utils/utils.py
touch
def touch(fname): """ Mimics the `touch` command Busy loops until the mtime has actually been changed, use for tests only """ orig_mtime = get_mtime(fname) while get_mtime(fname) == orig_mtime: pathlib.Path(fname).touch()
python
def touch(fname): """ Mimics the `touch` command Busy loops until the mtime has actually been changed, use for tests only """ orig_mtime = get_mtime(fname) while get_mtime(fname) == orig_mtime: pathlib.Path(fname).touch()
[ "def", "touch", "(", "fname", ")", ":", "orig_mtime", "=", "get_mtime", "(", "fname", ")", "while", "get_mtime", "(", "fname", ")", "==", "orig_mtime", ":", "pathlib", ".", "Path", "(", "fname", ")", ".", "touch", "(", ")" ]
Mimics the `touch` command Busy loops until the mtime has actually been changed, use for tests only
[ "Mimics", "the", "touch", "command" ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/utils/utils.py#L306-L314
train
235,681
hotdoc/hotdoc
hotdoc/core/extension.py
Extension.debug
def debug(self, message, domain=None): """ Shortcut function for `utils.loggable.debug` Args: message: see `utils.loggable.debug` domain: see `utils.loggable.debug` """ if domain is None: domain = self.extension_name debug(message, domain)
python
def debug(self, message, domain=None): """ Shortcut function for `utils.loggable.debug` Args: message: see `utils.loggable.debug` domain: see `utils.loggable.debug` """ if domain is None: domain = self.extension_name debug(message, domain)
[ "def", "debug", "(", "self", ",", "message", ",", "domain", "=", "None", ")", ":", "if", "domain", "is", "None", ":", "domain", "=", "self", ".", "extension_name", "debug", "(", "message", ",", "domain", ")" ]
Shortcut function for `utils.loggable.debug` Args: message: see `utils.loggable.debug` domain: see `utils.loggable.debug`
[ "Shortcut", "function", "for", "utils", ".", "loggable", ".", "debug" ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/extension.py#L148-L158
train
235,682
hotdoc/hotdoc
hotdoc/core/extension.py
Extension.info
def info(self, message, domain=None): """ Shortcut function for `utils.loggable.info` Args: message: see `utils.loggable.info` domain: see `utils.loggable.info` """ if domain is None: domain = self.extension_name info(message, domain)
python
def info(self, message, domain=None): """ Shortcut function for `utils.loggable.info` Args: message: see `utils.loggable.info` domain: see `utils.loggable.info` """ if domain is None: domain = self.extension_name info(message, domain)
[ "def", "info", "(", "self", ",", "message", ",", "domain", "=", "None", ")", ":", "if", "domain", "is", "None", ":", "domain", "=", "self", ".", "extension_name", "info", "(", "message", ",", "domain", ")" ]
Shortcut function for `utils.loggable.info` Args: message: see `utils.loggable.info` domain: see `utils.loggable.info`
[ "Shortcut", "function", "for", "utils", ".", "loggable", ".", "info" ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/extension.py#L160-L170
train
235,683
hotdoc/hotdoc
hotdoc/core/extension.py
Extension.parse_config
def parse_config(self, config): """ Override this, making sure to chain up first, if your extension adds its own custom command line arguments, or you want to do any further processing on the automatically added arguments. The default implementation will set attributes on the extension: - 'sources': a set of absolute paths to source files for this extension - 'index': absolute path to the index for this extension Additionally, it will set an attribute for each argument added with `Extension.add_path_argument` or `Extension.add_paths_argument`, with the extension's `Extension.argument_prefix` stripped, and dashes changed to underscores. Args: config: a `config.Config` instance """ prefix = self.argument_prefix self.sources = config.get_sources(prefix) self.smart_sources = [ self._get_smart_filename(s) for s in self.sources] self.index = config.get_index(prefix) self.source_roots = OrderedSet( config.get_paths('%s_source_roots' % prefix)) for arg, dest in list(self.paths_arguments.items()): val = config.get_paths(arg) setattr(self, dest, val) for arg, dest in list(self.path_arguments.items()): val = config.get_path(arg) setattr(self, dest, val) self.formatter.parse_config(config)
python
def parse_config(self, config): """ Override this, making sure to chain up first, if your extension adds its own custom command line arguments, or you want to do any further processing on the automatically added arguments. The default implementation will set attributes on the extension: - 'sources': a set of absolute paths to source files for this extension - 'index': absolute path to the index for this extension Additionally, it will set an attribute for each argument added with `Extension.add_path_argument` or `Extension.add_paths_argument`, with the extension's `Extension.argument_prefix` stripped, and dashes changed to underscores. Args: config: a `config.Config` instance """ prefix = self.argument_prefix self.sources = config.get_sources(prefix) self.smart_sources = [ self._get_smart_filename(s) for s in self.sources] self.index = config.get_index(prefix) self.source_roots = OrderedSet( config.get_paths('%s_source_roots' % prefix)) for arg, dest in list(self.paths_arguments.items()): val = config.get_paths(arg) setattr(self, dest, val) for arg, dest in list(self.path_arguments.items()): val = config.get_path(arg) setattr(self, dest, val) self.formatter.parse_config(config)
[ "def", "parse_config", "(", "self", ",", "config", ")", ":", "prefix", "=", "self", ".", "argument_prefix", "self", ".", "sources", "=", "config", ".", "get_sources", "(", "prefix", ")", "self", ".", "smart_sources", "=", "[", "self", ".", "_get_smart_filename", "(", "s", ")", "for", "s", "in", "self", ".", "sources", "]", "self", ".", "index", "=", "config", ".", "get_index", "(", "prefix", ")", "self", ".", "source_roots", "=", "OrderedSet", "(", "config", ".", "get_paths", "(", "'%s_source_roots'", "%", "prefix", ")", ")", "for", "arg", ",", "dest", "in", "list", "(", "self", ".", "paths_arguments", ".", "items", "(", ")", ")", ":", "val", "=", "config", ".", "get_paths", "(", "arg", ")", "setattr", "(", "self", ",", "dest", ",", "val", ")", "for", "arg", ",", "dest", "in", "list", "(", "self", ".", "path_arguments", ".", "items", "(", ")", ")", ":", "val", "=", "config", ".", "get_path", "(", "arg", ")", "setattr", "(", "self", ",", "dest", ",", "val", ")", "self", ".", "formatter", ".", "parse_config", "(", "config", ")" ]
Override this, making sure to chain up first, if your extension adds its own custom command line arguments, or you want to do any further processing on the automatically added arguments. The default implementation will set attributes on the extension: - 'sources': a set of absolute paths to source files for this extension - 'index': absolute path to the index for this extension Additionally, it will set an attribute for each argument added with `Extension.add_path_argument` or `Extension.add_paths_argument`, with the extension's `Extension.argument_prefix` stripped, and dashes changed to underscores. Args: config: a `config.Config` instance
[ "Override", "this", "making", "sure", "to", "chain", "up", "first", "if", "your", "extension", "adds", "its", "own", "custom", "command", "line", "arguments", "or", "you", "want", "to", "do", "any", "further", "processing", "on", "the", "automatically", "added", "arguments", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/extension.py#L403-L437
train
235,684
hotdoc/hotdoc
hotdoc/core/extension.py
Extension.add_attrs
def add_attrs(self, symbol, **kwargs): """ Helper for setting symbol extension attributes """ for key, val in kwargs.items(): symbol.add_extension_attribute(self.extension_name, key, val)
python
def add_attrs(self, symbol, **kwargs): """ Helper for setting symbol extension attributes """ for key, val in kwargs.items(): symbol.add_extension_attribute(self.extension_name, key, val)
[ "def", "add_attrs", "(", "self", ",", "symbol", ",", "*", "*", "kwargs", ")", ":", "for", "key", ",", "val", "in", "kwargs", ".", "items", "(", ")", ":", "symbol", ".", "add_extension_attribute", "(", "self", ".", "extension_name", ",", "key", ",", "val", ")" ]
Helper for setting symbol extension attributes
[ "Helper", "for", "setting", "symbol", "extension", "attributes" ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/extension.py#L439-L444
train
235,685
hotdoc/hotdoc
hotdoc/core/extension.py
Extension.get_attr
def get_attr(self, symbol, attrname): """ Helper for getting symbol extension attributes """ return symbol.extension_attributes.get(self.extension_name, {}).get( attrname, None)
python
def get_attr(self, symbol, attrname): """ Helper for getting symbol extension attributes """ return symbol.extension_attributes.get(self.extension_name, {}).get( attrname, None)
[ "def", "get_attr", "(", "self", ",", "symbol", ",", "attrname", ")", ":", "return", "symbol", ".", "extension_attributes", ".", "get", "(", "self", ".", "extension_name", ",", "{", "}", ")", ".", "get", "(", "attrname", ",", "None", ")" ]
Helper for getting symbol extension attributes
[ "Helper", "for", "getting", "symbol", "extension", "attributes" ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/extension.py#L446-L451
train
235,686
hotdoc/hotdoc
hotdoc/core/extension.py
Extension.add_index_argument
def add_index_argument(cls, group): """ Subclasses may call this to add an index argument. Args: group: arparse.ArgumentGroup, the extension argument group prefix: str, arguments have to be namespaced """ prefix = cls.argument_prefix group.add_argument( '--%s-index' % prefix, action="store", dest="%s_index" % prefix, help=("Name of the %s root markdown file, can be None" % ( cls.extension_name)))
python
def add_index_argument(cls, group): """ Subclasses may call this to add an index argument. Args: group: arparse.ArgumentGroup, the extension argument group prefix: str, arguments have to be namespaced """ prefix = cls.argument_prefix group.add_argument( '--%s-index' % prefix, action="store", dest="%s_index" % prefix, help=("Name of the %s root markdown file, can be None" % ( cls.extension_name)))
[ "def", "add_index_argument", "(", "cls", ",", "group", ")", ":", "prefix", "=", "cls", ".", "argument_prefix", "group", ".", "add_argument", "(", "'--%s-index'", "%", "prefix", ",", "action", "=", "\"store\"", ",", "dest", "=", "\"%s_index\"", "%", "prefix", ",", "help", "=", "(", "\"Name of the %s root markdown file, can be None\"", "%", "(", "cls", ".", "extension_name", ")", ")", ")" ]
Subclasses may call this to add an index argument. Args: group: arparse.ArgumentGroup, the extension argument group prefix: str, arguments have to be namespaced
[ "Subclasses", "may", "call", "this", "to", "add", "an", "index", "argument", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/extension.py#L479-L493
train
235,687
hotdoc/hotdoc
hotdoc/core/extension.py
Extension.add_sources_argument
def add_sources_argument(cls, group, allow_filters=True, prefix=None, add_root_paths=False): """ Subclasses may call this to add sources and source_filters arguments. Args: group: arparse.ArgumentGroup, the extension argument group allow_filters: bool, Whether the extension wishes to expose a source_filters argument. prefix: str, arguments have to be namespaced. """ prefix = prefix or cls.argument_prefix group.add_argument("--%s-sources" % prefix, action="store", nargs="+", dest="%s_sources" % prefix.replace('-', '_'), help="%s source files to parse" % prefix) if allow_filters: group.add_argument("--%s-source-filters" % prefix, action="store", nargs="+", dest="%s_source_filters" % prefix.replace( '-', '_'), help="%s source files to ignore" % prefix) if add_root_paths: group.add_argument("--%s-source-roots" % prefix, action="store", nargs="+", dest="%s_source_roots" % prefix.replace( '-', '_'), help="%s source root directories allowing files " "to be referenced relatively to those" % prefix)
python
def add_sources_argument(cls, group, allow_filters=True, prefix=None, add_root_paths=False): """ Subclasses may call this to add sources and source_filters arguments. Args: group: arparse.ArgumentGroup, the extension argument group allow_filters: bool, Whether the extension wishes to expose a source_filters argument. prefix: str, arguments have to be namespaced. """ prefix = prefix or cls.argument_prefix group.add_argument("--%s-sources" % prefix, action="store", nargs="+", dest="%s_sources" % prefix.replace('-', '_'), help="%s source files to parse" % prefix) if allow_filters: group.add_argument("--%s-source-filters" % prefix, action="store", nargs="+", dest="%s_source_filters" % prefix.replace( '-', '_'), help="%s source files to ignore" % prefix) if add_root_paths: group.add_argument("--%s-source-roots" % prefix, action="store", nargs="+", dest="%s_source_roots" % prefix.replace( '-', '_'), help="%s source root directories allowing files " "to be referenced relatively to those" % prefix)
[ "def", "add_sources_argument", "(", "cls", ",", "group", ",", "allow_filters", "=", "True", ",", "prefix", "=", "None", ",", "add_root_paths", "=", "False", ")", ":", "prefix", "=", "prefix", "or", "cls", ".", "argument_prefix", "group", ".", "add_argument", "(", "\"--%s-sources\"", "%", "prefix", ",", "action", "=", "\"store\"", ",", "nargs", "=", "\"+\"", ",", "dest", "=", "\"%s_sources\"", "%", "prefix", ".", "replace", "(", "'-'", ",", "'_'", ")", ",", "help", "=", "\"%s source files to parse\"", "%", "prefix", ")", "if", "allow_filters", ":", "group", ".", "add_argument", "(", "\"--%s-source-filters\"", "%", "prefix", ",", "action", "=", "\"store\"", ",", "nargs", "=", "\"+\"", ",", "dest", "=", "\"%s_source_filters\"", "%", "prefix", ".", "replace", "(", "'-'", ",", "'_'", ")", ",", "help", "=", "\"%s source files to ignore\"", "%", "prefix", ")", "if", "add_root_paths", ":", "group", ".", "add_argument", "(", "\"--%s-source-roots\"", "%", "prefix", ",", "action", "=", "\"store\"", ",", "nargs", "=", "\"+\"", ",", "dest", "=", "\"%s_source_roots\"", "%", "prefix", ".", "replace", "(", "'-'", ",", "'_'", ")", ",", "help", "=", "\"%s source root directories allowing files \"", "\"to be referenced relatively to those\"", "%", "prefix", ")" ]
Subclasses may call this to add sources and source_filters arguments. Args: group: arparse.ArgumentGroup, the extension argument group allow_filters: bool, Whether the extension wishes to expose a source_filters argument. prefix: str, arguments have to be namespaced.
[ "Subclasses", "may", "call", "this", "to", "add", "sources", "and", "source_filters", "arguments", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/extension.py#L496-L526
train
235,688
hotdoc/hotdoc
hotdoc/core/extension.py
Extension.add_path_argument
def add_path_argument(cls, group, argname, dest=None, help_=None): """ Subclasses may call this to expose a path argument. Args: group: arparse.ArgumentGroup, the extension argument group argname: str, the name of the argument, will be namespaced. dest: str, similar to the `dest` argument of `argparse.ArgumentParser.add_argument`, will be namespaced. help_: str, similar to the `help` argument of `argparse.ArgumentParser.add_argument`. """ prefixed = '%s-%s' % (cls.argument_prefix, argname) if dest is None: dest = prefixed.replace('-', '_') final_dest = dest[len(cls.argument_prefix) + 1:] else: final_dest = dest dest = '%s_%s' % (cls.argument_prefix, dest) group.add_argument('--%s' % prefixed, action='store', dest=dest, help=help_) cls.path_arguments[dest] = final_dest
python
def add_path_argument(cls, group, argname, dest=None, help_=None): """ Subclasses may call this to expose a path argument. Args: group: arparse.ArgumentGroup, the extension argument group argname: str, the name of the argument, will be namespaced. dest: str, similar to the `dest` argument of `argparse.ArgumentParser.add_argument`, will be namespaced. help_: str, similar to the `help` argument of `argparse.ArgumentParser.add_argument`. """ prefixed = '%s-%s' % (cls.argument_prefix, argname) if dest is None: dest = prefixed.replace('-', '_') final_dest = dest[len(cls.argument_prefix) + 1:] else: final_dest = dest dest = '%s_%s' % (cls.argument_prefix, dest) group.add_argument('--%s' % prefixed, action='store', dest=dest, help=help_) cls.path_arguments[dest] = final_dest
[ "def", "add_path_argument", "(", "cls", ",", "group", ",", "argname", ",", "dest", "=", "None", ",", "help_", "=", "None", ")", ":", "prefixed", "=", "'%s-%s'", "%", "(", "cls", ".", "argument_prefix", ",", "argname", ")", "if", "dest", "is", "None", ":", "dest", "=", "prefixed", ".", "replace", "(", "'-'", ",", "'_'", ")", "final_dest", "=", "dest", "[", "len", "(", "cls", ".", "argument_prefix", ")", "+", "1", ":", "]", "else", ":", "final_dest", "=", "dest", "dest", "=", "'%s_%s'", "%", "(", "cls", ".", "argument_prefix", ",", "dest", ")", "group", ".", "add_argument", "(", "'--%s'", "%", "prefixed", ",", "action", "=", "'store'", ",", "dest", "=", "dest", ",", "help", "=", "help_", ")", "cls", ".", "path_arguments", "[", "dest", "]", "=", "final_dest" ]
Subclasses may call this to expose a path argument. Args: group: arparse.ArgumentGroup, the extension argument group argname: str, the name of the argument, will be namespaced. dest: str, similar to the `dest` argument of `argparse.ArgumentParser.add_argument`, will be namespaced. help_: str, similar to the `help` argument of `argparse.ArgumentParser.add_argument`.
[ "Subclasses", "may", "call", "this", "to", "expose", "a", "path", "argument", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/extension.py#L529-L551
train
235,689
hotdoc/hotdoc
hotdoc/core/extension.py
Extension.add_paths_argument
def add_paths_argument(cls, group, argname, dest=None, help_=None): """ Subclasses may call this to expose a paths argument. Args: group: arparse.ArgumentGroup, the extension argument group argname: str, the name of the argument, will be namespaced. dest: str, similar to the `dest` argument of `argparse.ArgumentParser.add_argument`, will be namespaced. help_: str, similar to the `help` argument of `argparse.ArgumentParser.add_argument`. """ prefixed = '%s-%s' % (cls.argument_prefix, argname) if dest is None: dest = prefixed.replace('-', '_') final_dest = dest[len(cls.argument_prefix) + 1:] else: final_dest = dest dest = '%s_%s' % (cls.argument_prefix, dest) group.add_argument('--%s' % prefixed, action='store', nargs='+', dest=dest, help=help_) cls.paths_arguments[dest] = final_dest
python
def add_paths_argument(cls, group, argname, dest=None, help_=None): """ Subclasses may call this to expose a paths argument. Args: group: arparse.ArgumentGroup, the extension argument group argname: str, the name of the argument, will be namespaced. dest: str, similar to the `dest` argument of `argparse.ArgumentParser.add_argument`, will be namespaced. help_: str, similar to the `help` argument of `argparse.ArgumentParser.add_argument`. """ prefixed = '%s-%s' % (cls.argument_prefix, argname) if dest is None: dest = prefixed.replace('-', '_') final_dest = dest[len(cls.argument_prefix) + 1:] else: final_dest = dest dest = '%s_%s' % (cls.argument_prefix, dest) group.add_argument('--%s' % prefixed, action='store', nargs='+', dest=dest, help=help_) cls.paths_arguments[dest] = final_dest
[ "def", "add_paths_argument", "(", "cls", ",", "group", ",", "argname", ",", "dest", "=", "None", ",", "help_", "=", "None", ")", ":", "prefixed", "=", "'%s-%s'", "%", "(", "cls", ".", "argument_prefix", ",", "argname", ")", "if", "dest", "is", "None", ":", "dest", "=", "prefixed", ".", "replace", "(", "'-'", ",", "'_'", ")", "final_dest", "=", "dest", "[", "len", "(", "cls", ".", "argument_prefix", ")", "+", "1", ":", "]", "else", ":", "final_dest", "=", "dest", "dest", "=", "'%s_%s'", "%", "(", "cls", ".", "argument_prefix", ",", "dest", ")", "group", ".", "add_argument", "(", "'--%s'", "%", "prefixed", ",", "action", "=", "'store'", ",", "nargs", "=", "'+'", ",", "dest", "=", "dest", ",", "help", "=", "help_", ")", "cls", ".", "paths_arguments", "[", "dest", "]", "=", "final_dest" ]
Subclasses may call this to expose a paths argument. Args: group: arparse.ArgumentGroup, the extension argument group argname: str, the name of the argument, will be namespaced. dest: str, similar to the `dest` argument of `argparse.ArgumentParser.add_argument`, will be namespaced. help_: str, similar to the `help` argument of `argparse.ArgumentParser.add_argument`.
[ "Subclasses", "may", "call", "this", "to", "expose", "a", "paths", "argument", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/extension.py#L554-L576
train
235,690
hotdoc/hotdoc
hotdoc/core/extension.py
Extension.create_symbol
def create_symbol(self, *args, **kwargs): """ Extensions that discover and create instances of `symbols.Symbol` should do this through this method, as it will keep an index of these which can be used when generating a "naive index". See `database.Database.create_symbol` for more information. Args: args: see `database.Database.create_symbol` kwargs: see `database.Database.create_symbol` Returns: symbols.Symbol: the created symbol, or `None`. """ if not kwargs.get('project_name'): kwargs['project_name'] = self.project.project_name sym = self.app.database.create_symbol(*args, **kwargs) if sym: # pylint: disable=unidiomatic-typecheck if type(sym) != Symbol: self._created_symbols[sym.filename].add(sym.unique_name) return sym
python
def create_symbol(self, *args, **kwargs): """ Extensions that discover and create instances of `symbols.Symbol` should do this through this method, as it will keep an index of these which can be used when generating a "naive index". See `database.Database.create_symbol` for more information. Args: args: see `database.Database.create_symbol` kwargs: see `database.Database.create_symbol` Returns: symbols.Symbol: the created symbol, or `None`. """ if not kwargs.get('project_name'): kwargs['project_name'] = self.project.project_name sym = self.app.database.create_symbol(*args, **kwargs) if sym: # pylint: disable=unidiomatic-typecheck if type(sym) != Symbol: self._created_symbols[sym.filename].add(sym.unique_name) return sym
[ "def", "create_symbol", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "kwargs", ".", "get", "(", "'project_name'", ")", ":", "kwargs", "[", "'project_name'", "]", "=", "self", ".", "project", ".", "project_name", "sym", "=", "self", ".", "app", ".", "database", ".", "create_symbol", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "sym", ":", "# pylint: disable=unidiomatic-typecheck", "if", "type", "(", "sym", ")", "!=", "Symbol", ":", "self", ".", "_created_symbols", "[", "sym", ".", "filename", "]", ".", "add", "(", "sym", ".", "unique_name", ")", "return", "sym" ]
Extensions that discover and create instances of `symbols.Symbol` should do this through this method, as it will keep an index of these which can be used when generating a "naive index". See `database.Database.create_symbol` for more information. Args: args: see `database.Database.create_symbol` kwargs: see `database.Database.create_symbol` Returns: symbols.Symbol: the created symbol, or `None`.
[ "Extensions", "that", "discover", "and", "create", "instances", "of", "symbols", ".", "Symbol", "should", "do", "this", "through", "this", "method", "as", "it", "will", "keep", "an", "index", "of", "these", "which", "can", "be", "used", "when", "generating", "a", "naive", "index", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/extension.py#L584-L609
train
235,691
hotdoc/hotdoc
hotdoc/core/extension.py
Extension.format_page
def format_page(self, page, link_resolver, output): """ Called by `project.Project.format_page`, to leave full control to extensions over the formatting of the pages they are responsible of. Args: page: tree.Page, the page to format. link_resolver: links.LinkResolver, object responsible for resolving links potentially mentioned in `page` output: str, path to the output directory. """ debug('Formatting page %s' % page.link.ref, 'formatting') if output: actual_output = os.path.join(output, 'html') if not os.path.exists(actual_output): os.makedirs(actual_output) else: actual_output = None page.format(self.formatter, link_resolver, actual_output)
python
def format_page(self, page, link_resolver, output): """ Called by `project.Project.format_page`, to leave full control to extensions over the formatting of the pages they are responsible of. Args: page: tree.Page, the page to format. link_resolver: links.LinkResolver, object responsible for resolving links potentially mentioned in `page` output: str, path to the output directory. """ debug('Formatting page %s' % page.link.ref, 'formatting') if output: actual_output = os.path.join(output, 'html') if not os.path.exists(actual_output): os.makedirs(actual_output) else: actual_output = None page.format(self.formatter, link_resolver, actual_output)
[ "def", "format_page", "(", "self", ",", "page", ",", "link_resolver", ",", "output", ")", ":", "debug", "(", "'Formatting page %s'", "%", "page", ".", "link", ".", "ref", ",", "'formatting'", ")", "if", "output", ":", "actual_output", "=", "os", ".", "path", ".", "join", "(", "output", ",", "'html'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "actual_output", ")", ":", "os", ".", "makedirs", "(", "actual_output", ")", "else", ":", "actual_output", "=", "None", "page", ".", "format", "(", "self", ".", "formatter", ",", "link_resolver", ",", "actual_output", ")" ]
Called by `project.Project.format_page`, to leave full control to extensions over the formatting of the pages they are responsible of. Args: page: tree.Page, the page to format. link_resolver: links.LinkResolver, object responsible for resolving links potentially mentioned in `page` output: str, path to the output directory.
[ "Called", "by", "project", ".", "Project", ".", "format_page", "to", "leave", "full", "control", "to", "extensions", "over", "the", "formatting", "of", "the", "pages", "they", "are", "responsible", "of", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/extension.py#L646-L668
train
235,692
hotdoc/hotdoc
hotdoc/core/project.py
Project.add_subproject
def add_subproject(self, fname, conf_path): """Creates and adds a new subproject.""" config = Config(conf_file=conf_path) proj = Project(self.app, dependency_map=self.dependency_map) proj.parse_name_from_config(config) proj.parse_config(config) proj.setup() self.subprojects[fname] = proj
python
def add_subproject(self, fname, conf_path): """Creates and adds a new subproject.""" config = Config(conf_file=conf_path) proj = Project(self.app, dependency_map=self.dependency_map) proj.parse_name_from_config(config) proj.parse_config(config) proj.setup() self.subprojects[fname] = proj
[ "def", "add_subproject", "(", "self", ",", "fname", ",", "conf_path", ")", ":", "config", "=", "Config", "(", "conf_file", "=", "conf_path", ")", "proj", "=", "Project", "(", "self", ".", "app", ",", "dependency_map", "=", "self", ".", "dependency_map", ")", "proj", ".", "parse_name_from_config", "(", "config", ")", "proj", ".", "parse_config", "(", "config", ")", "proj", ".", "setup", "(", ")", "self", ".", "subprojects", "[", "fname", "]", "=", "proj" ]
Creates and adds a new subproject.
[ "Creates", "and", "adds", "a", "new", "subproject", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/project.py#L226-L234
train
235,693
hotdoc/hotdoc
hotdoc/core/tree.py
_no_duplicates_constructor
def _no_duplicates_constructor(loader, node, deep=False): """Check for duplicate keys.""" mapping = {} for key_node, value_node in node.value: key = loader.construct_object(key_node, deep=deep) value = loader.construct_object(value_node, deep=deep) if key in mapping: raise ConstructorError("while constructing a mapping", node.start_mark, "found duplicate key (%s)" % key, key_node.start_mark) mapping[key] = value return loader.construct_mapping(node, deep)
python
def _no_duplicates_constructor(loader, node, deep=False): """Check for duplicate keys.""" mapping = {} for key_node, value_node in node.value: key = loader.construct_object(key_node, deep=deep) value = loader.construct_object(value_node, deep=deep) if key in mapping: raise ConstructorError("while constructing a mapping", node.start_mark, "found duplicate key (%s)" % key, key_node.start_mark) mapping[key] = value return loader.construct_mapping(node, deep)
[ "def", "_no_duplicates_constructor", "(", "loader", ",", "node", ",", "deep", "=", "False", ")", ":", "mapping", "=", "{", "}", "for", "key_node", ",", "value_node", "in", "node", ".", "value", ":", "key", "=", "loader", ".", "construct_object", "(", "key_node", ",", "deep", "=", "deep", ")", "value", "=", "loader", ".", "construct_object", "(", "value_node", ",", "deep", "=", "deep", ")", "if", "key", "in", "mapping", ":", "raise", "ConstructorError", "(", "\"while constructing a mapping\"", ",", "node", ".", "start_mark", ",", "\"found duplicate key (%s)\"", "%", "key", ",", "key_node", ".", "start_mark", ")", "mapping", "[", "key", "]", "=", "value", "return", "loader", ".", "construct_mapping", "(", "node", ",", "deep", ")" ]
Check for duplicate keys.
[ "Check", "for", "duplicate", "keys", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/tree.py#L49-L63
train
235,694
hotdoc/hotdoc
hotdoc/core/tree.py
Page.resolve_symbols
def resolve_symbols(self, tree, database, link_resolver): """ When this method is called, the page's symbol names are queried from `database`, and added to lists of actual symbols, sorted by symbol class. """ self.typed_symbols = self.__get_empty_typed_symbols() all_syms = OrderedSet() for sym_name in self.symbol_names: sym = database.get_symbol(sym_name) self.__query_extra_symbols( sym, all_syms, tree, link_resolver, database) if tree.project.is_toplevel: page_path = self.link.ref else: page_path = self.project_name + '/' + self.link.ref if self.meta.get("auto-sort", True): all_syms = sorted(all_syms, key=lambda x: x.unique_name) for sym in all_syms: sym.update_children_comments() self.__resolve_symbol(sym, link_resolver, page_path) self.symbol_names.add(sym.unique_name) # Always put symbols with no parent at the end no_parent_syms = self.by_parent_symbols.pop(None, None) if no_parent_syms: self.by_parent_symbols[None] = no_parent_syms for sym_type in [ClassSymbol, AliasSymbol, InterfaceSymbol, StructSymbol]: syms = self.typed_symbols[sym_type].symbols if not syms: continue if self.title is None: self.title = syms[0].display_name if self.comment is None: self.comment = Comment(name=self.name) self.comment.short_description = syms[ 0].comment.short_description self.comment.title = syms[0].comment.title break
python
def resolve_symbols(self, tree, database, link_resolver): """ When this method is called, the page's symbol names are queried from `database`, and added to lists of actual symbols, sorted by symbol class. """ self.typed_symbols = self.__get_empty_typed_symbols() all_syms = OrderedSet() for sym_name in self.symbol_names: sym = database.get_symbol(sym_name) self.__query_extra_symbols( sym, all_syms, tree, link_resolver, database) if tree.project.is_toplevel: page_path = self.link.ref else: page_path = self.project_name + '/' + self.link.ref if self.meta.get("auto-sort", True): all_syms = sorted(all_syms, key=lambda x: x.unique_name) for sym in all_syms: sym.update_children_comments() self.__resolve_symbol(sym, link_resolver, page_path) self.symbol_names.add(sym.unique_name) # Always put symbols with no parent at the end no_parent_syms = self.by_parent_symbols.pop(None, None) if no_parent_syms: self.by_parent_symbols[None] = no_parent_syms for sym_type in [ClassSymbol, AliasSymbol, InterfaceSymbol, StructSymbol]: syms = self.typed_symbols[sym_type].symbols if not syms: continue if self.title is None: self.title = syms[0].display_name if self.comment is None: self.comment = Comment(name=self.name) self.comment.short_description = syms[ 0].comment.short_description self.comment.title = syms[0].comment.title break
[ "def", "resolve_symbols", "(", "self", ",", "tree", ",", "database", ",", "link_resolver", ")", ":", "self", ".", "typed_symbols", "=", "self", ".", "__get_empty_typed_symbols", "(", ")", "all_syms", "=", "OrderedSet", "(", ")", "for", "sym_name", "in", "self", ".", "symbol_names", ":", "sym", "=", "database", ".", "get_symbol", "(", "sym_name", ")", "self", ".", "__query_extra_symbols", "(", "sym", ",", "all_syms", ",", "tree", ",", "link_resolver", ",", "database", ")", "if", "tree", ".", "project", ".", "is_toplevel", ":", "page_path", "=", "self", ".", "link", ".", "ref", "else", ":", "page_path", "=", "self", ".", "project_name", "+", "'/'", "+", "self", ".", "link", ".", "ref", "if", "self", ".", "meta", ".", "get", "(", "\"auto-sort\"", ",", "True", ")", ":", "all_syms", "=", "sorted", "(", "all_syms", ",", "key", "=", "lambda", "x", ":", "x", ".", "unique_name", ")", "for", "sym", "in", "all_syms", ":", "sym", ".", "update_children_comments", "(", ")", "self", ".", "__resolve_symbol", "(", "sym", ",", "link_resolver", ",", "page_path", ")", "self", ".", "symbol_names", ".", "add", "(", "sym", ".", "unique_name", ")", "# Always put symbols with no parent at the end", "no_parent_syms", "=", "self", ".", "by_parent_symbols", ".", "pop", "(", "None", ",", "None", ")", "if", "no_parent_syms", ":", "self", ".", "by_parent_symbols", "[", "None", "]", "=", "no_parent_syms", "for", "sym_type", "in", "[", "ClassSymbol", ",", "AliasSymbol", ",", "InterfaceSymbol", ",", "StructSymbol", "]", ":", "syms", "=", "self", ".", "typed_symbols", "[", "sym_type", "]", ".", "symbols", "if", "not", "syms", ":", "continue", "if", "self", ".", "title", "is", "None", ":", "self", ".", "title", "=", "syms", "[", "0", "]", ".", "display_name", "if", "self", ".", "comment", "is", "None", ":", "self", ".", "comment", "=", "Comment", "(", "name", "=", "self", ".", "name", ")", "self", ".", "comment", ".", "short_description", "=", "syms", "[", "0", "]", ".", "comment", ".", "short_description", "self", ".", "comment", ".", "title", "=", "syms", "[", "0", "]", ".", "comment", ".", "title", "break" ]
When this method is called, the page's symbol names are queried from `database`, and added to lists of actual symbols, sorted by symbol class.
[ "When", "this", "method", "is", "called", "the", "page", "s", "symbol", "names", "are", "queried", "from", "database", "and", "added", "to", "lists", "of", "actual", "symbols", "sorted", "by", "symbol", "class", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/tree.py#L196-L240
train
235,695
hotdoc/hotdoc
hotdoc/core/tree.py
Tree.walk
def walk(self, parent=None): """Generator that yields pages in infix order Args: parent: hotdoc.core.tree.Page, optional, the page to start traversal from. If None, defaults to the root of the tree. Yields: hotdoc.core.tree.Page: the next page """ if parent is None: yield self.root parent = self.root for cpage_name in parent.subpages: cpage = self.__all_pages[cpage_name] yield cpage for page in self.walk(parent=cpage): yield page
python
def walk(self, parent=None): """Generator that yields pages in infix order Args: parent: hotdoc.core.tree.Page, optional, the page to start traversal from. If None, defaults to the root of the tree. Yields: hotdoc.core.tree.Page: the next page """ if parent is None: yield self.root parent = self.root for cpage_name in parent.subpages: cpage = self.__all_pages[cpage_name] yield cpage for page in self.walk(parent=cpage): yield page
[ "def", "walk", "(", "self", ",", "parent", "=", "None", ")", ":", "if", "parent", "is", "None", ":", "yield", "self", ".", "root", "parent", "=", "self", ".", "root", "for", "cpage_name", "in", "parent", ".", "subpages", ":", "cpage", "=", "self", ".", "__all_pages", "[", "cpage_name", "]", "yield", "cpage", "for", "page", "in", "self", ".", "walk", "(", "parent", "=", "cpage", ")", ":", "yield", "page" ]
Generator that yields pages in infix order Args: parent: hotdoc.core.tree.Page, optional, the page to start traversal from. If None, defaults to the root of the tree. Yields: hotdoc.core.tree.Page: the next page
[ "Generator", "that", "yields", "pages", "in", "infix", "order" ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/tree.py#L514-L532
train
235,696
hotdoc/hotdoc
hotdoc/extensions/__init__.py
get_extension_classes
def get_extension_classes(): """ Hotdoc's setuptools entry point """ res = [SyntaxHighlightingExtension, SearchExtension, TagExtension, DevhelpExtension, LicenseExtension, GitUploadExtension, EditOnGitHubExtension] if sys.version_info[1] >= 5: res += [DBusExtension] try: from hotdoc.extensions.c.c_extension import CExtension res += [CExtension] except ImportError: pass try: from hotdoc.extensions.gi.gi_extension import GIExtension res += [GIExtension] except ImportError: pass return res
python
def get_extension_classes(): """ Hotdoc's setuptools entry point """ res = [SyntaxHighlightingExtension, SearchExtension, TagExtension, DevhelpExtension, LicenseExtension, GitUploadExtension, EditOnGitHubExtension] if sys.version_info[1] >= 5: res += [DBusExtension] try: from hotdoc.extensions.c.c_extension import CExtension res += [CExtension] except ImportError: pass try: from hotdoc.extensions.gi.gi_extension import GIExtension res += [GIExtension] except ImportError: pass return res
[ "def", "get_extension_classes", "(", ")", ":", "res", "=", "[", "SyntaxHighlightingExtension", ",", "SearchExtension", ",", "TagExtension", ",", "DevhelpExtension", ",", "LicenseExtension", ",", "GitUploadExtension", ",", "EditOnGitHubExtension", "]", "if", "sys", ".", "version_info", "[", "1", "]", ">=", "5", ":", "res", "+=", "[", "DBusExtension", "]", "try", ":", "from", "hotdoc", ".", "extensions", ".", "c", ".", "c_extension", "import", "CExtension", "res", "+=", "[", "CExtension", "]", "except", "ImportError", ":", "pass", "try", ":", "from", "hotdoc", ".", "extensions", ".", "gi", ".", "gi_extension", "import", "GIExtension", "res", "+=", "[", "GIExtension", "]", "except", "ImportError", ":", "pass", "return", "res" ]
Hotdoc's setuptools entry point
[ "Hotdoc", "s", "setuptools", "entry", "point" ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/extensions/__init__.py#L40-L63
train
235,697
hotdoc/hotdoc
hotdoc/extensions/c/clang/cindex.py
register_functions
def register_functions(lib, ignore_errors): """Register function prototypes with a libclang library instance. This must be called as part of library instantiation so Python knows how to call out to the shared library. """ def register(item): return register_function(lib, item, ignore_errors) for f in functionList: register(f)
python
def register_functions(lib, ignore_errors): """Register function prototypes with a libclang library instance. This must be called as part of library instantiation so Python knows how to call out to the shared library. """ def register(item): return register_function(lib, item, ignore_errors) for f in functionList: register(f)
[ "def", "register_functions", "(", "lib", ",", "ignore_errors", ")", ":", "def", "register", "(", "item", ")", ":", "return", "register_function", "(", "lib", ",", "item", ",", "ignore_errors", ")", "for", "f", "in", "functionList", ":", "register", "(", "f", ")" ]
Register function prototypes with a libclang library instance. This must be called as part of library instantiation so Python knows how to call out to the shared library.
[ "Register", "function", "prototypes", "with", "a", "libclang", "library", "instance", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/extensions/c/clang/cindex.py#L3814-L3825
train
235,698
hotdoc/hotdoc
hotdoc/extensions/c/clang/cindex.py
SourceLocation.from_offset
def from_offset(tu, file, offset): """Retrieve a SourceLocation from a given character offset. tu -- TranslationUnit file belongs to file -- File instance to obtain offset from offset -- Integer character offset within file """ return conf.lib.clang_getLocationForOffset(tu, file, offset)
python
def from_offset(tu, file, offset): """Retrieve a SourceLocation from a given character offset. tu -- TranslationUnit file belongs to file -- File instance to obtain offset from offset -- Integer character offset within file """ return conf.lib.clang_getLocationForOffset(tu, file, offset)
[ "def", "from_offset", "(", "tu", ",", "file", ",", "offset", ")", ":", "return", "conf", ".", "lib", ".", "clang_getLocationForOffset", "(", "tu", ",", "file", ",", "offset", ")" ]
Retrieve a SourceLocation from a given character offset. tu -- TranslationUnit file belongs to file -- File instance to obtain offset from offset -- Integer character offset within file
[ "Retrieve", "a", "SourceLocation", "from", "a", "given", "character", "offset", "." ]
1067cdc8482b585b364a38fb52ca5d904e486280
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/extensions/c/clang/cindex.py#L217-L224
train
235,699