id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
242,500
tanghaibao/goatools
goatools/gosubdag/plot/go_name_shorten.py
ShortenText.get_short_plot_name
def get_short_plot_name(self, goobj): """Shorten some GO names so plots are smaller.""" name = goobj.name if self._keep_this(name): return self.replace_greek(name) name = name.replace("cellular response to chemical stimulus", "cellular rsp. to chemical stim.") depth = goobj.depth if depth > 1: name = name.replace("regulation of ", "reg. of ") name = name.replace("positive reg", "+reg") name = name.replace("negative reg", "-reg") name = name.replace("involved in", "in") if depth > 2: name = name.replace("antigen processing and presentation", "a.p.p") name = name.replace("MHC class I", "MHC-I") if depth == 4: if goobj.id == "GO:0002460": before = " ".join([ "adaptive immune response based on somatic recombination of", "immune receptors built from immunoglobulin superfamily domains"]) name = name.replace( before, "rsp. based on somatic recombination of Ig immune receptors") if depth > 3: name = name.replace("signaling pathway", "sig. pw.") name = name.replace("response", "rsp.") name = name.replace("immunoglobulin superfamily domains", "Ig domains") name = name.replace("immunoglobulin", "Ig") if depth > 4: name = name.replace("production", "prod.") if depth == 6 or depth == 5: name = name.replace("tumor necrosis factor", "TNF") name = self.replace_greek(name) return name
python
def get_short_plot_name(self, goobj): name = goobj.name if self._keep_this(name): return self.replace_greek(name) name = name.replace("cellular response to chemical stimulus", "cellular rsp. to chemical stim.") depth = goobj.depth if depth > 1: name = name.replace("regulation of ", "reg. of ") name = name.replace("positive reg", "+reg") name = name.replace("negative reg", "-reg") name = name.replace("involved in", "in") if depth > 2: name = name.replace("antigen processing and presentation", "a.p.p") name = name.replace("MHC class I", "MHC-I") if depth == 4: if goobj.id == "GO:0002460": before = " ".join([ "adaptive immune response based on somatic recombination of", "immune receptors built from immunoglobulin superfamily domains"]) name = name.replace( before, "rsp. based on somatic recombination of Ig immune receptors") if depth > 3: name = name.replace("signaling pathway", "sig. pw.") name = name.replace("response", "rsp.") name = name.replace("immunoglobulin superfamily domains", "Ig domains") name = name.replace("immunoglobulin", "Ig") if depth > 4: name = name.replace("production", "prod.") if depth == 6 or depth == 5: name = name.replace("tumor necrosis factor", "TNF") name = self.replace_greek(name) return name
[ "def", "get_short_plot_name", "(", "self", ",", "goobj", ")", ":", "name", "=", "goobj", ".", "name", "if", "self", ".", "_keep_this", "(", "name", ")", ":", "return", "self", ".", "replace_greek", "(", "name", ")", "name", "=", "name", ".", "replace",...
Shorten some GO names so plots are smaller.
[ "Shorten", "some", "GO", "names", "so", "plots", "are", "smaller", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/plot/go_name_shorten.py#L26-L60
242,501
tanghaibao/goatools
goatools/gosubdag/plot/go_name_shorten.py
ShortenText.shorten_go_name_ptbl1
def shorten_go_name_ptbl1(self, name): """Shorten GO name for tables in paper.""" if self._keep_this(name): return name name = name.replace("negative", "neg.") name = name.replace("positive", "pos.") name = name.replace("response", "rsp.") name = name.replace("regulation", "reg.") name = name.replace("antigen processing and presentation", "app.") return name
python
def shorten_go_name_ptbl1(self, name): if self._keep_this(name): return name name = name.replace("negative", "neg.") name = name.replace("positive", "pos.") name = name.replace("response", "rsp.") name = name.replace("regulation", "reg.") name = name.replace("antigen processing and presentation", "app.") return name
[ "def", "shorten_go_name_ptbl1", "(", "self", ",", "name", ")", ":", "if", "self", ".", "_keep_this", "(", "name", ")", ":", "return", "name", "name", "=", "name", ".", "replace", "(", "\"negative\"", ",", "\"neg.\"", ")", "name", "=", "name", ".", "rep...
Shorten GO name for tables in paper.
[ "Shorten", "GO", "name", "for", "tables", "in", "paper", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/plot/go_name_shorten.py#L62-L71
242,502
tanghaibao/goatools
goatools/gosubdag/plot/go_name_shorten.py
ShortenText.shorten_go_name_ptbl3
def shorten_go_name_ptbl3(self, name, dcnt): """Shorten GO description for Table 3 in manuscript.""" if self._keep_this(name): return name name = name.replace("positive regulation of immune system process", "+ reg. of immune sys. process") name = name.replace("positive regulation of immune response", "+ reg. of immune response") name = name.replace("positive regulation of cytokine production", "+ reg. of cytokine production") if dcnt < 40: name = name.replace("antigen processing and presentation", "a.p.p.") if dcnt < 10: name = name.replace("negative", "-") name = name.replace("positive", "+") #name = name.replace("tumor necrosis factor production", "tumor necrosis factor prod.") name = name.replace("tumor necrosis factor production", "TNF production") if dcnt < 4: name = name.replace("regulation", "reg.") name = name.replace("exogenous ", "") name = name.replace(" via ", " w/") name = name.replace("T cell mediated cytotoxicity", "cytotoxicity via T cell") name = name.replace('involved in', 'in') name = name.replace('-positive', '+') return name
python
def shorten_go_name_ptbl3(self, name, dcnt): if self._keep_this(name): return name name = name.replace("positive regulation of immune system process", "+ reg. of immune sys. process") name = name.replace("positive regulation of immune response", "+ reg. of immune response") name = name.replace("positive regulation of cytokine production", "+ reg. of cytokine production") if dcnt < 40: name = name.replace("antigen processing and presentation", "a.p.p.") if dcnt < 10: name = name.replace("negative", "-") name = name.replace("positive", "+") #name = name.replace("tumor necrosis factor production", "tumor necrosis factor prod.") name = name.replace("tumor necrosis factor production", "TNF production") if dcnt < 4: name = name.replace("regulation", "reg.") name = name.replace("exogenous ", "") name = name.replace(" via ", " w/") name = name.replace("T cell mediated cytotoxicity", "cytotoxicity via T cell") name = name.replace('involved in', 'in') name = name.replace('-positive', '+') return name
[ "def", "shorten_go_name_ptbl3", "(", "self", ",", "name", ",", "dcnt", ")", ":", "if", "self", ".", "_keep_this", "(", "name", ")", ":", "return", "name", "name", "=", "name", ".", "replace", "(", "\"positive regulation of immune system process\"", ",", "\"+ r...
Shorten GO description for Table 3 in manuscript.
[ "Shorten", "GO", "description", "for", "Table", "3", "in", "manuscript", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/plot/go_name_shorten.py#L73-L97
242,503
tanghaibao/goatools
goatools/gosubdag/plot/go_name_shorten.py
ShortenText.shorten_go_name_all
def shorten_go_name_all(self, name): """Shorten GO name for tables in paper, supplemental materials, and plots.""" name = self.replace_greek(name) name = name.replace("MHC class I", "MHC-I") return name
python
def shorten_go_name_all(self, name): name = self.replace_greek(name) name = name.replace("MHC class I", "MHC-I") return name
[ "def", "shorten_go_name_all", "(", "self", ",", "name", ")", ":", "name", "=", "self", ".", "replace_greek", "(", "name", ")", "name", "=", "name", ".", "replace", "(", "\"MHC class I\"", ",", "\"MHC-I\"", ")", "return", "name" ]
Shorten GO name for tables in paper, supplemental materials, and plots.
[ "Shorten", "GO", "name", "for", "tables", "in", "paper", "supplemental", "materials", "and", "plots", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/plot/go_name_shorten.py#L125-L129
242,504
tanghaibao/goatools
goatools/gosubdag/plot/go_name_shorten.py
ShortenText._keep_this
def _keep_this(self, name): """Return True if there are to be no modifications to name.""" for keep_name in self.keep: if name == keep_name: return True return False
python
def _keep_this(self, name): for keep_name in self.keep: if name == keep_name: return True return False
[ "def", "_keep_this", "(", "self", ",", "name", ")", ":", "for", "keep_name", "in", "self", ".", "keep", ":", "if", "name", "==", "keep_name", ":", "return", "True", "return", "False" ]
Return True if there are to be no modifications to name.
[ "Return", "True", "if", "there", "are", "to", "be", "no", "modifications", "to", "name", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/plot/go_name_shorten.py#L131-L136
242,505
tanghaibao/goatools
goatools/gosubdag/rpt/wr_xlsx.py
read_d1_letter
def read_d1_letter(fin_txt): """Reads letter aliases from a text file created by GoDepth1LettersWr.""" go2letter = {} re_goid = re.compile(r"(GO:\d{7})") with open(fin_txt) as ifstrm: for line in ifstrm: mtch = re_goid.search(line) if mtch and line[:1] != ' ': # Alias is expected to be the first character go2letter[mtch.group(1)] = line[:1] return go2letter
python
def read_d1_letter(fin_txt): go2letter = {} re_goid = re.compile(r"(GO:\d{7})") with open(fin_txt) as ifstrm: for line in ifstrm: mtch = re_goid.search(line) if mtch and line[:1] != ' ': # Alias is expected to be the first character go2letter[mtch.group(1)] = line[:1] return go2letter
[ "def", "read_d1_letter", "(", "fin_txt", ")", ":", "go2letter", "=", "{", "}", "re_goid", "=", "re", ".", "compile", "(", "r\"(GO:\\d{7})\"", ")", "with", "open", "(", "fin_txt", ")", "as", "ifstrm", ":", "for", "line", "in", "ifstrm", ":", "mtch", "="...
Reads letter aliases from a text file created by GoDepth1LettersWr.
[ "Reads", "letter", "aliases", "from", "a", "text", "file", "created", "by", "GoDepth1LettersWr", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/rpt/wr_xlsx.py#L66-L76
242,506
tanghaibao/goatools
goatools/gosubdag/rpt/wr_xlsx.py
GoSubDagWr.get_goids_sections
def get_goids_sections(sections): """Return all the GO IDs in a 2-D sections list.""" goids_all = set() for _, goids_sec in sections: goids_all |= set(goids_sec) return goids_all
python
def get_goids_sections(sections): goids_all = set() for _, goids_sec in sections: goids_all |= set(goids_sec) return goids_all
[ "def", "get_goids_sections", "(", "sections", ")", ":", "goids_all", "=", "set", "(", ")", "for", "_", ",", "goids_sec", "in", "sections", ":", "goids_all", "|=", "set", "(", "goids_sec", ")", "return", "goids_all" ]
Return all the GO IDs in a 2-D sections list.
[ "Return", "all", "the", "GO", "IDs", "in", "a", "2", "-", "D", "sections", "list", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/rpt/wr_xlsx.py#L58-L63
242,507
tanghaibao/goatools
goatools/gosubdag/rpt/wr_xlsx.py
GoDepth1LettersWr.prt_txt
def prt_txt(self, prt=sys.stdout, pre=''): """Print letters, descendant count, and GO information.""" data_nts = self.get_d1nts() for ntdata in data_nts: prt.write("{PRE}{L:1} {NS} {d:6,} D{D:02} {GO} {NAME}\n".format( PRE=pre, L=ntdata.D1, d=ntdata.dcnt, NS=ntdata.NS, D=ntdata.depth, GO=ntdata.GO, NAME=ntdata.name)) return data_nts
python
def prt_txt(self, prt=sys.stdout, pre=''): data_nts = self.get_d1nts() for ntdata in data_nts: prt.write("{PRE}{L:1} {NS} {d:6,} D{D:02} {GO} {NAME}\n".format( PRE=pre, L=ntdata.D1, d=ntdata.dcnt, NS=ntdata.NS, D=ntdata.depth, GO=ntdata.GO, NAME=ntdata.name)) return data_nts
[ "def", "prt_txt", "(", "self", ",", "prt", "=", "sys", ".", "stdout", ",", "pre", "=", "''", ")", ":", "data_nts", "=", "self", ".", "get_d1nts", "(", ")", "for", "ntdata", "in", "data_nts", ":", "prt", ".", "write", "(", "\"{PRE}{L:1} {NS} {d:6,} D{D:...
Print letters, descendant count, and GO information.
[ "Print", "letters", "descendant", "count", "and", "GO", "information", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/rpt/wr_xlsx.py#L88-L100
242,508
tanghaibao/goatools
goatools/gosubdag/rpt/wr_xlsx.py
GoDepth1LettersWr.wr_xlsx
def wr_xlsx(self, fout_xlsx="gos_depth01.xlsx", **kws): """Write xlsx table of depth-01 GO terms and their letter representation.""" data_nts = self.get_d1nts() if 'fld2col_widths' not in kws: kws['fld2col_widths'] = {'D1': 6, 'NS':3, 'depth': 5, 'GO': 12, 'name': 40} if 'hdrs' not in kws: kws['hdrs'] = self.hdrs wr_xlsx_tbl(fout_xlsx, data_nts, **kws)
python
def wr_xlsx(self, fout_xlsx="gos_depth01.xlsx", **kws): data_nts = self.get_d1nts() if 'fld2col_widths' not in kws: kws['fld2col_widths'] = {'D1': 6, 'NS':3, 'depth': 5, 'GO': 12, 'name': 40} if 'hdrs' not in kws: kws['hdrs'] = self.hdrs wr_xlsx_tbl(fout_xlsx, data_nts, **kws)
[ "def", "wr_xlsx", "(", "self", ",", "fout_xlsx", "=", "\"gos_depth01.xlsx\"", ",", "*", "*", "kws", ")", ":", "data_nts", "=", "self", ".", "get_d1nts", "(", ")", "if", "'fld2col_widths'", "not", "in", "kws", ":", "kws", "[", "'fld2col_widths'", "]", "="...
Write xlsx table of depth-01 GO terms and their letter representation.
[ "Write", "xlsx", "table", "of", "depth", "-", "01", "GO", "terms", "and", "their", "letter", "representation", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/rpt/wr_xlsx.py#L102-L109
242,509
tanghaibao/goatools
goatools/gosubdag/rpt/wr_xlsx.py
GoDepth1LettersWr.get_d1nts
def get_d1nts(self): """Get letters for depth-01 GO terms, descendants count, and GO information.""" data = [] ntdata = cx.namedtuple("NtPrt", "D1 NS dcnt depth GO name") namespace = None for ntlet in sorted(self.goone2ntletter.values(), key=lambda nt: [nt.goobj.namespace, -1 * nt.dcnt, nt.D1]): goobj = ntlet.goobj goid = goobj.id assert len(goobj.parents) == 1 if namespace != goobj.namespace: namespace = goobj.namespace ntns = self.ns2nt[namespace] pobj = ntns.goobj ns2 = self.str2ns[goobj.namespace] data.append(ntdata._make([" ", ns2, ntns.dcnt, pobj.depth, pobj.id, pobj.name])) data.append(ntdata._make( [ntlet.D1, self.str2ns[namespace], ntlet.dcnt, goobj.depth, goid, goobj.name])) return data
python
def get_d1nts(self): data = [] ntdata = cx.namedtuple("NtPrt", "D1 NS dcnt depth GO name") namespace = None for ntlet in sorted(self.goone2ntletter.values(), key=lambda nt: [nt.goobj.namespace, -1 * nt.dcnt, nt.D1]): goobj = ntlet.goobj goid = goobj.id assert len(goobj.parents) == 1 if namespace != goobj.namespace: namespace = goobj.namespace ntns = self.ns2nt[namespace] pobj = ntns.goobj ns2 = self.str2ns[goobj.namespace] data.append(ntdata._make([" ", ns2, ntns.dcnt, pobj.depth, pobj.id, pobj.name])) data.append(ntdata._make( [ntlet.D1, self.str2ns[namespace], ntlet.dcnt, goobj.depth, goid, goobj.name])) return data
[ "def", "get_d1nts", "(", "self", ")", ":", "data", "=", "[", "]", "ntdata", "=", "cx", ".", "namedtuple", "(", "\"NtPrt\"", ",", "\"D1 NS dcnt depth GO name\"", ")", "namespace", "=", "None", "for", "ntlet", "in", "sorted", "(", "self", ".", "goone2ntlette...
Get letters for depth-01 GO terms, descendants count, and GO information.
[ "Get", "letters", "for", "depth", "-", "01", "GO", "terms", "descendants", "count", "and", "GO", "information", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/rpt/wr_xlsx.py#L172-L190
242,510
tanghaibao/goatools
goatools/gosubdag/rpt/wr_xlsx.py
GoDepth1LettersWr._init_ns2nt
def _init_ns2nt(rcntobj): """Save depth-00 GO terms ordered using descendants cnt.""" go2dcnt = rcntobj.go2dcnt ntobj = cx.namedtuple("NtD1", "D1 dcnt goobj") d0s = rcntobj.depth2goobjs[0] ns_nt = [(o.namespace, ntobj(D1="", dcnt=go2dcnt[o.id], goobj=o)) for o in d0s] return cx.OrderedDict(ns_nt)
python
def _init_ns2nt(rcntobj): go2dcnt = rcntobj.go2dcnt ntobj = cx.namedtuple("NtD1", "D1 dcnt goobj") d0s = rcntobj.depth2goobjs[0] ns_nt = [(o.namespace, ntobj(D1="", dcnt=go2dcnt[o.id], goobj=o)) for o in d0s] return cx.OrderedDict(ns_nt)
[ "def", "_init_ns2nt", "(", "rcntobj", ")", ":", "go2dcnt", "=", "rcntobj", ".", "go2dcnt", "ntobj", "=", "cx", ".", "namedtuple", "(", "\"NtD1\"", ",", "\"D1 dcnt goobj\"", ")", "d0s", "=", "rcntobj", ".", "depth2goobjs", "[", "0", "]", "ns_nt", "=", "["...
Save depth-00 GO terms ordered using descendants cnt.
[ "Save", "depth", "-", "00", "GO", "terms", "ordered", "using", "descendants", "cnt", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/rpt/wr_xlsx.py#L193-L199
242,511
tanghaibao/goatools
goatools/grouper/wrxlsx.py
WrXlsxSortedGos._get_xlsx_kws
def _get_xlsx_kws(self, **kws_usr): """Return keyword arguments relevant to writing an xlsx.""" kws_xlsx = {'fld2col_widths':self._get_fld2col_widths(**kws_usr), 'items':'GO IDs'} remaining_keys = set(['title', 'hdrs', 'prt_flds', 'fld2fmt', 'ntval2wbfmtdict', 'ntfld_wbfmt']) for usr_key, usr_val in kws_usr.items(): if usr_key in remaining_keys: kws_xlsx[usr_key] = usr_val return kws_xlsx
python
def _get_xlsx_kws(self, **kws_usr): kws_xlsx = {'fld2col_widths':self._get_fld2col_widths(**kws_usr), 'items':'GO IDs'} remaining_keys = set(['title', 'hdrs', 'prt_flds', 'fld2fmt', 'ntval2wbfmtdict', 'ntfld_wbfmt']) for usr_key, usr_val in kws_usr.items(): if usr_key in remaining_keys: kws_xlsx[usr_key] = usr_val return kws_xlsx
[ "def", "_get_xlsx_kws", "(", "self", ",", "*", "*", "kws_usr", ")", ":", "kws_xlsx", "=", "{", "'fld2col_widths'", ":", "self", ".", "_get_fld2col_widths", "(", "*", "*", "kws_usr", ")", ",", "'items'", ":", "'GO IDs'", "}", "remaining_keys", "=", "set", ...
Return keyword arguments relevant to writing an xlsx.
[ "Return", "keyword", "arguments", "relevant", "to", "writing", "an", "xlsx", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/wrxlsx.py#L113-L121
242,512
tanghaibao/goatools
goatools/grouper/wrxlsx.py
WrXlsxSortedGos._adjust_prt_flds
def _adjust_prt_flds(self, kws_xlsx, desc2nts, shade_hdrgos): """Print user-requested fields or provided fields minus info fields.""" # Use xlsx prt_flds from the user, if provided if "prt_flds" in kws_xlsx: return kws_xlsx["prt_flds"] # If the user did not provide specific fields to print in an xlsx file: dont_print = set(['hdr_idx', 'is_hdrgo', 'is_usrgo']) # Are we printing GO group headers? # Build new list of xlsx print fields, excluding those which add no new information prt_flds_adjusted = [] # Get all namedtuple fields nt_flds = self.sortobj.get_fields(desc2nts) # Keep fields intended for print and optionally gray-shade field (format_txt) # print('FFFFFFFFFFFFFFF WrXlsxSortedGos::_adjust_prt_flds:', nt_flds) for nt_fld in nt_flds: if nt_fld not in dont_print: # Only add grey-shade to hdrgo and section name rows if hdrgo_prt=True if nt_fld == "format_txt": if shade_hdrgos is True: prt_flds_adjusted.append(nt_fld) else: prt_flds_adjusted.append(nt_fld) kws_xlsx['prt_flds'] = prt_flds_adjusted
python
def _adjust_prt_flds(self, kws_xlsx, desc2nts, shade_hdrgos): # Use xlsx prt_flds from the user, if provided if "prt_flds" in kws_xlsx: return kws_xlsx["prt_flds"] # If the user did not provide specific fields to print in an xlsx file: dont_print = set(['hdr_idx', 'is_hdrgo', 'is_usrgo']) # Are we printing GO group headers? # Build new list of xlsx print fields, excluding those which add no new information prt_flds_adjusted = [] # Get all namedtuple fields nt_flds = self.sortobj.get_fields(desc2nts) # Keep fields intended for print and optionally gray-shade field (format_txt) # print('FFFFFFFFFFFFFFF WrXlsxSortedGos::_adjust_prt_flds:', nt_flds) for nt_fld in nt_flds: if nt_fld not in dont_print: # Only add grey-shade to hdrgo and section name rows if hdrgo_prt=True if nt_fld == "format_txt": if shade_hdrgos is True: prt_flds_adjusted.append(nt_fld) else: prt_flds_adjusted.append(nt_fld) kws_xlsx['prt_flds'] = prt_flds_adjusted
[ "def", "_adjust_prt_flds", "(", "self", ",", "kws_xlsx", ",", "desc2nts", ",", "shade_hdrgos", ")", ":", "# Use xlsx prt_flds from the user, if provided", "if", "\"prt_flds\"", "in", "kws_xlsx", ":", "return", "kws_xlsx", "[", "\"prt_flds\"", "]", "# If the user did not...
Print user-requested fields or provided fields minus info fields.
[ "Print", "user", "-", "requested", "fields", "or", "provided", "fields", "minus", "info", "fields", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/wrxlsx.py#L123-L145
242,513
tanghaibao/goatools
goatools/grouper/wrxlsx.py
WrXlsxSortedGos._get_fld2col_widths
def _get_fld2col_widths(self, **kws): """Return xlsx column widths based on default and user-specified field-value pairs.""" fld2col_widths = self._init_fld2col_widths() if 'fld2col_widths' not in kws: return fld2col_widths for fld, val in kws['fld2col_widths'].items(): fld2col_widths[fld] = val return fld2col_widths
python
def _get_fld2col_widths(self, **kws): fld2col_widths = self._init_fld2col_widths() if 'fld2col_widths' not in kws: return fld2col_widths for fld, val in kws['fld2col_widths'].items(): fld2col_widths[fld] = val return fld2col_widths
[ "def", "_get_fld2col_widths", "(", "self", ",", "*", "*", "kws", ")", ":", "fld2col_widths", "=", "self", ".", "_init_fld2col_widths", "(", ")", "if", "'fld2col_widths'", "not", "in", "kws", ":", "return", "fld2col_widths", "for", "fld", ",", "val", "in", ...
Return xlsx column widths based on default and user-specified field-value pairs.
[ "Return", "xlsx", "column", "widths", "based", "on", "default", "and", "user", "-", "specified", "field", "-", "value", "pairs", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/wrxlsx.py#L147-L154
242,514
tanghaibao/goatools
goatools/grouper/wrxlsx.py
WrXlsxSortedGos._init_fld2col_widths
def _init_fld2col_widths(self): """Return default column widths for writing an Excel Spreadsheet.""" # GO info namedtuple fields: NS dcnt level depth GO D1 name # GO header namedtuple fields: format_txt hdr_idx fld2col_widths = GoSubDagWr.fld2col_widths.copy() for fld, wid in self.oprtfmt.default_fld2col_widths.items(): fld2col_widths[fld] = wid for fld in get_hdridx_flds(): fld2col_widths[fld] = 2 return fld2col_widths
python
def _init_fld2col_widths(self): # GO info namedtuple fields: NS dcnt level depth GO D1 name # GO header namedtuple fields: format_txt hdr_idx fld2col_widths = GoSubDagWr.fld2col_widths.copy() for fld, wid in self.oprtfmt.default_fld2col_widths.items(): fld2col_widths[fld] = wid for fld in get_hdridx_flds(): fld2col_widths[fld] = 2 return fld2col_widths
[ "def", "_init_fld2col_widths", "(", "self", ")", ":", "# GO info namedtuple fields: NS dcnt level depth GO D1 name", "# GO header namedtuple fields: format_txt hdr_idx", "fld2col_widths", "=", "GoSubDagWr", ".", "fld2col_widths", ".", "copy", "(", ")", "for", "fld", ",", "wid...
Return default column widths for writing an Excel Spreadsheet.
[ "Return", "default", "column", "widths", "for", "writing", "an", "Excel", "Spreadsheet", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/wrxlsx.py#L156-L165
242,515
tanghaibao/goatools
goatools/grouper/wrxlsx.py
WrXlsxSortedGos._get_shade_hdrgos
def _get_shade_hdrgos(**kws): """If no hdrgo_prt specified, and these conditions are present -> hdrgo_prt=F.""" # KWS: shade_hdrgos hdrgo_prt section_sortby top_n if 'shade_hdrgos' in kws: return kws['shade_hdrgos'] # Return user-sepcified hdrgo_prt, if provided if 'hdrgo_prt' in kws: return kws['hdrgo_prt'] # If no hdrgo_prt provided, set hdrgo_prt to False if: # * section_sortby == True # * section_sortby = user_sort # * top_n == N if 'section_sortby' in kws and kws['section_sortby']: return False if 'top_n' in kws and isinstance(kws['top_n'], int): return False return True
python
def _get_shade_hdrgos(**kws): # KWS: shade_hdrgos hdrgo_prt section_sortby top_n if 'shade_hdrgos' in kws: return kws['shade_hdrgos'] # Return user-sepcified hdrgo_prt, if provided if 'hdrgo_prt' in kws: return kws['hdrgo_prt'] # If no hdrgo_prt provided, set hdrgo_prt to False if: # * section_sortby == True # * section_sortby = user_sort # * top_n == N if 'section_sortby' in kws and kws['section_sortby']: return False if 'top_n' in kws and isinstance(kws['top_n'], int): return False return True
[ "def", "_get_shade_hdrgos", "(", "*", "*", "kws", ")", ":", "# KWS: shade_hdrgos hdrgo_prt section_sortby top_n", "if", "'shade_hdrgos'", "in", "kws", ":", "return", "kws", "[", "'shade_hdrgos'", "]", "# Return user-sepcified hdrgo_prt, if provided", "if", "'hdrgo_prt'", ...
If no hdrgo_prt specified, and these conditions are present -> hdrgo_prt=F.
[ "If", "no", "hdrgo_prt", "specified", "and", "these", "conditions", "are", "present", "-", ">", "hdrgo_prt", "=", "F", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/wrxlsx.py#L174-L190
242,516
tanghaibao/goatools
goatools/rpt/goea_nt_xfrm.py
MgrNtGOEAs.dflt_sortby_objgoea
def dflt_sortby_objgoea(goea_res): """Default sorting of GOEA results.""" return [getattr(goea_res, 'enrichment'), getattr(goea_res, 'namespace'), getattr(goea_res, 'p_uncorrected'), getattr(goea_res, 'depth'), getattr(goea_res, 'GO')]
python
def dflt_sortby_objgoea(goea_res): return [getattr(goea_res, 'enrichment'), getattr(goea_res, 'namespace'), getattr(goea_res, 'p_uncorrected'), getattr(goea_res, 'depth'), getattr(goea_res, 'GO')]
[ "def", "dflt_sortby_objgoea", "(", "goea_res", ")", ":", "return", "[", "getattr", "(", "goea_res", ",", "'enrichment'", ")", ",", "getattr", "(", "goea_res", ",", "'namespace'", ")", ",", "getattr", "(", "goea_res", ",", "'p_uncorrected'", ")", ",", "getatt...
Default sorting of GOEA results.
[ "Default", "sorting", "of", "GOEA", "results", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/rpt/goea_nt_xfrm.py#L44-L50
242,517
tanghaibao/goatools
goatools/rpt/goea_nt_xfrm.py
MgrNtGOEAs.dflt_sortby_ntgoea
def dflt_sortby_ntgoea(ntgoea): """Default sorting of GOEA results stored in namedtuples.""" return [ntgoea.enrichment, ntgoea.namespace, ntgoea.p_uncorrected, ntgoea.depth, ntgoea.GO]
python
def dflt_sortby_ntgoea(ntgoea): return [ntgoea.enrichment, ntgoea.namespace, ntgoea.p_uncorrected, ntgoea.depth, ntgoea.GO]
[ "def", "dflt_sortby_ntgoea", "(", "ntgoea", ")", ":", "return", "[", "ntgoea", ".", "enrichment", ",", "ntgoea", ".", "namespace", ",", "ntgoea", ".", "p_uncorrected", ",", "ntgoea", ".", "depth", ",", "ntgoea", ".", "GO", "]" ]
Default sorting of GOEA results stored in namedtuples.
[ "Default", "sorting", "of", "GOEA", "results", "stored", "in", "namedtuples", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/rpt/goea_nt_xfrm.py#L53-L59
242,518
tanghaibao/goatools
goatools/rpt/goea_nt_xfrm.py
MgrNtGOEAs.get_goea_nts_prt
def get_goea_nts_prt(self, fldnames=None, **usr_kws): """Return list of namedtuples removing fields which are redundant or verbose.""" kws = usr_kws.copy() if 'not_fldnames' not in kws: kws['not_fldnames'] = ['goterm', 'parents', 'children', 'id'] if 'rpt_fmt' not in kws: kws['rpt_fmt'] = True return self.get_goea_nts_all(fldnames, **kws)
python
def get_goea_nts_prt(self, fldnames=None, **usr_kws): kws = usr_kws.copy() if 'not_fldnames' not in kws: kws['not_fldnames'] = ['goterm', 'parents', 'children', 'id'] if 'rpt_fmt' not in kws: kws['rpt_fmt'] = True return self.get_goea_nts_all(fldnames, **kws)
[ "def", "get_goea_nts_prt", "(", "self", ",", "fldnames", "=", "None", ",", "*", "*", "usr_kws", ")", ":", "kws", "=", "usr_kws", ".", "copy", "(", ")", "if", "'not_fldnames'", "not", "in", "kws", ":", "kws", "[", "'not_fldnames'", "]", "=", "[", "'go...
Return list of namedtuples removing fields which are redundant or verbose.
[ "Return", "list", "of", "namedtuples", "removing", "fields", "which", "are", "redundant", "or", "verbose", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/rpt/goea_nt_xfrm.py#L61-L68
242,519
tanghaibao/goatools
goatools/rpt/goea_nt_xfrm.py
MgrNtGOEAs._get_field_values
def _get_field_values(item, fldnames, rpt_fmt=None, itemid2name=None): """Return fieldnames and values of either a namedtuple or GOEnrichmentRecord.""" if hasattr(item, "_fldsdefprt"): # Is a GOEnrichmentRecord return item.get_field_values(fldnames, rpt_fmt, itemid2name) if hasattr(item, "_fields"): # Is a namedtuple return [getattr(item, f) for f in fldnames]
python
def _get_field_values(item, fldnames, rpt_fmt=None, itemid2name=None): if hasattr(item, "_fldsdefprt"): # Is a GOEnrichmentRecord return item.get_field_values(fldnames, rpt_fmt, itemid2name) if hasattr(item, "_fields"): # Is a namedtuple return [getattr(item, f) for f in fldnames]
[ "def", "_get_field_values", "(", "item", ",", "fldnames", ",", "rpt_fmt", "=", "None", ",", "itemid2name", "=", "None", ")", ":", "if", "hasattr", "(", "item", ",", "\"_fldsdefprt\"", ")", ":", "# Is a GOEnrichmentRecord", "return", "item", ".", "get_field_val...
Return fieldnames and values of either a namedtuple or GOEnrichmentRecord.
[ "Return", "fieldnames", "and", "values", "of", "either", "a", "namedtuple", "or", "GOEnrichmentRecord", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/rpt/goea_nt_xfrm.py#L103-L108
242,520
tanghaibao/goatools
goatools/rpt/goea_nt_xfrm.py
MgrNtGOEAs._get_fieldnames
def _get_fieldnames(item): """Return fieldnames of either a namedtuple or GOEnrichmentRecord.""" if hasattr(item, "_fldsdefprt"): # Is a GOEnrichmentRecord return item.get_prtflds_all() if hasattr(item, "_fields"): # Is a namedtuple return item._fields
python
def _get_fieldnames(item): if hasattr(item, "_fldsdefprt"): # Is a GOEnrichmentRecord return item.get_prtflds_all() if hasattr(item, "_fields"): # Is a namedtuple return item._fields
[ "def", "_get_fieldnames", "(", "item", ")", ":", "if", "hasattr", "(", "item", ",", "\"_fldsdefprt\"", ")", ":", "# Is a GOEnrichmentRecord", "return", "item", ".", "get_prtflds_all", "(", ")", "if", "hasattr", "(", "item", ",", "\"_fields\"", ")", ":", "# I...
Return fieldnames of either a namedtuple or GOEnrichmentRecord.
[ "Return", "fieldnames", "of", "either", "a", "namedtuple", "or", "GOEnrichmentRecord", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/rpt/goea_nt_xfrm.py#L111-L116
242,521
tanghaibao/goatools
goatools/grouper/colors.py
GrouperColors.get_bordercolor
def get_bordercolor(self): """Get bordercolor based on hdrgos and usergos.""" hdrgos_all = self.grprobj.hdrobj.get_hdrgos() hdrgos_unused = hdrgos_all.difference(self.hdrgos_actual) go2bordercolor = {} # hdrgos that went unused for hdrgo in hdrgos_unused: go2bordercolor[hdrgo] = self.hdrcol_all # hdrgos used in this grouping that are NOT usrgos for hdrgo in self.grprobj.hdrgo2usrgos.keys(): go2bordercolor[hdrgo] = self.hdrcol_all # hdrgos used in this grouping that ARE usrgos for hdrgo in self.grprobj.hdrgo_is_usrgo: go2bordercolor[hdrgo] = 'blue' # usrgos which are NOT hdrgos usrgos_rem = self.grprobj.usrgos.difference(self.grprobj.hdrgo_is_usrgo) for usrgo in usrgos_rem: go2bordercolor[usrgo] = '#029386' # teal # print("{N:5} hdrgos actual".format(N=len(self.hdrgos_actual))) # print("{N:5} hdrgos unused".format(N=len(hdrgos_unused))) # print("{N:5} hdrgos only BLACK".format(N=len(self.grprobj.hdrgo2usrgos.keys()))) # print("{N:5} usrgos".format(N=len(self.grprobj.usrgos))) # print("{N:5} usrgos AND hdrgos BLUE".format(N=len(self.grprobj.hdrgo_is_usrgo))) # print("{N:5} usrgos Only".format(N=len(usrgos_rem))) return go2bordercolor
python
def get_bordercolor(self): hdrgos_all = self.grprobj.hdrobj.get_hdrgos() hdrgos_unused = hdrgos_all.difference(self.hdrgos_actual) go2bordercolor = {} # hdrgos that went unused for hdrgo in hdrgos_unused: go2bordercolor[hdrgo] = self.hdrcol_all # hdrgos used in this grouping that are NOT usrgos for hdrgo in self.grprobj.hdrgo2usrgos.keys(): go2bordercolor[hdrgo] = self.hdrcol_all # hdrgos used in this grouping that ARE usrgos for hdrgo in self.grprobj.hdrgo_is_usrgo: go2bordercolor[hdrgo] = 'blue' # usrgos which are NOT hdrgos usrgos_rem = self.grprobj.usrgos.difference(self.grprobj.hdrgo_is_usrgo) for usrgo in usrgos_rem: go2bordercolor[usrgo] = '#029386' # teal # print("{N:5} hdrgos actual".format(N=len(self.hdrgos_actual))) # print("{N:5} hdrgos unused".format(N=len(hdrgos_unused))) # print("{N:5} hdrgos only BLACK".format(N=len(self.grprobj.hdrgo2usrgos.keys()))) # print("{N:5} usrgos".format(N=len(self.grprobj.usrgos))) # print("{N:5} usrgos AND hdrgos BLUE".format(N=len(self.grprobj.hdrgo_is_usrgo))) # print("{N:5} usrgos Only".format(N=len(usrgos_rem))) return go2bordercolor
[ "def", "get_bordercolor", "(", "self", ")", ":", "hdrgos_all", "=", "self", ".", "grprobj", ".", "hdrobj", ".", "get_hdrgos", "(", ")", "hdrgos_unused", "=", "hdrgos_all", ".", "difference", "(", "self", ".", "hdrgos_actual", ")", "go2bordercolor", "=", "{",...
Get bordercolor based on hdrgos and usergos.
[ "Get", "bordercolor", "based", "on", "hdrgos", "and", "usergos", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/colors.py#L18-L42
242,522
tanghaibao/goatools
goatools/grouper/colors.py
GrouperColors.get_go2color_users
def get_go2color_users(self, usrgo_color='#feffa3', # yellow hdrusrgo_color='#d4ffea', # green hdrgo_color='#eee6f6'): # purple """Get go2color for GO DAG plots.""" go2color = {} # Color user GO IDs for goid in self.usrgos: go2color[goid] = usrgo_color # Color header GO IDs. Headers which are also GO IDs get their own color. for goid_hdr in self.hdrgos_actual: go2color[goid_hdr] = hdrusrgo_color if goid_hdr in self.usrgos else hdrgo_color return go2color
python
def get_go2color_users(self, usrgo_color='#feffa3', # yellow hdrusrgo_color='#d4ffea', # green hdrgo_color='#eee6f6'): # purple go2color = {} # Color user GO IDs for goid in self.usrgos: go2color[goid] = usrgo_color # Color header GO IDs. Headers which are also GO IDs get their own color. for goid_hdr in self.hdrgos_actual: go2color[goid_hdr] = hdrusrgo_color if goid_hdr in self.usrgos else hdrgo_color return go2color
[ "def", "get_go2color_users", "(", "self", ",", "usrgo_color", "=", "'#feffa3'", ",", "# yellow", "hdrusrgo_color", "=", "'#d4ffea'", ",", "# green", "hdrgo_color", "=", "'#eee6f6'", ")", ":", "# purple", "go2color", "=", "{", "}", "# Color user GO IDs", "for", "...
Get go2color for GO DAG plots.
[ "Get", "go2color", "for", "GO", "DAG", "plots", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/colors.py#L44-L56
242,523
tanghaibao/goatools
goatools/grouper/aart_geneproducts_all.py
AArtGeneProductSetsAll.run
def run(self, name, goea_nts, log): """Run gene product ASCII art.""" objaart = AArtGeneProductSetsOne(name, goea_nts, self) if self.hdrobj.sections: return objaart.prt_report_grp1(log) else: return objaart.prt_report_grp0(log)
python
def run(self, name, goea_nts, log): objaart = AArtGeneProductSetsOne(name, goea_nts, self) if self.hdrobj.sections: return objaart.prt_report_grp1(log) else: return objaart.prt_report_grp0(log)
[ "def", "run", "(", "self", ",", "name", ",", "goea_nts", ",", "log", ")", ":", "objaart", "=", "AArtGeneProductSetsOne", "(", "name", ",", "goea_nts", ",", "self", ")", "if", "self", ".", "hdrobj", ".", "sections", ":", "return", "objaart", ".", "prt_r...
Run gene product ASCII art.
[ "Run", "gene", "product", "ASCII", "art", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/aart_geneproducts_all.py#L40-L46
242,524
tanghaibao/goatools
goatools/grouper/aart_geneproducts_all.py
AArtGeneProductSetsAll.get_chr2idx
def get_chr2idx(self): """Return a dict with the ASCII art character as key and its index as value.""" return {chr(ascii_int):idx for idx, ascii_int in enumerate(self.all_chrints)}
python
def get_chr2idx(self): return {chr(ascii_int):idx for idx, ascii_int in enumerate(self.all_chrints)}
[ "def", "get_chr2idx", "(", "self", ")", ":", "return", "{", "chr", "(", "ascii_int", ")", ":", "idx", "for", "idx", ",", "ascii_int", "in", "enumerate", "(", "self", ".", "all_chrints", ")", "}" ]
Return a dict with the ASCII art character as key and its index as value.
[ "Return", "a", "dict", "with", "the", "ASCII", "art", "character", "as", "key", "and", "its", "index", "as", "value", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/aart_geneproducts_all.py#L99-L101
242,525
tanghaibao/goatools
goatools/grouper/aart_geneproducts_all.py
AArtGeneProductSetsAll._init_kws
def _init_kws(self): """Fill default values for keyword args, if necessary.""" # Return user-specified GO formatting, if specfied: if 'fmtgo' not in self.kws: self.kws['fmtgo'] = self.grprdflt.gosubdag.prt_attr['fmt'] + "\n" if 'fmtgo2' not in self.kws: self.kws['fmtgo2'] = self.grprdflt.gosubdag.prt_attr['fmt'] + "\n" if 'fmtgene' not in self.kws: if 'itemid2name' not in self.kws: self.kws['fmtgene'] = "{AART} {ID}\n" else: self.kws['fmtgene'] = "{AART} {ID} {NAME}\n" if 'fmtgene2' not in self.kws: self.kws['fmtgene2'] = self.kws['fmtgene']
python
def _init_kws(self): # Return user-specified GO formatting, if specfied: if 'fmtgo' not in self.kws: self.kws['fmtgo'] = self.grprdflt.gosubdag.prt_attr['fmt'] + "\n" if 'fmtgo2' not in self.kws: self.kws['fmtgo2'] = self.grprdflt.gosubdag.prt_attr['fmt'] + "\n" if 'fmtgene' not in self.kws: if 'itemid2name' not in self.kws: self.kws['fmtgene'] = "{AART} {ID}\n" else: self.kws['fmtgene'] = "{AART} {ID} {NAME}\n" if 'fmtgene2' not in self.kws: self.kws['fmtgene2'] = self.kws['fmtgene']
[ "def", "_init_kws", "(", "self", ")", ":", "# Return user-specified GO formatting, if specfied:", "if", "'fmtgo'", "not", "in", "self", ".", "kws", ":", "self", ".", "kws", "[", "'fmtgo'", "]", "=", "self", ".", "grprdflt", ".", "gosubdag", ".", "prt_attr", ...
Fill default values for keyword args, if necessary.
[ "Fill", "default", "values", "for", "keyword", "args", "if", "necessary", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/aart_geneproducts_all.py#L103-L116
242,526
tanghaibao/goatools
goatools/gosubdag/gosubdag_init.py
InitGOs._init_relationships
def _init_relationships(self, relationships_arg): """Return a set of relationships found in all subset GO Terms.""" if relationships_arg: relationships_all = self._get_all_relationships() if relationships_arg is True: return relationships_all else: return relationships_all.intersection(relationships_arg) return set()
python
def _init_relationships(self, relationships_arg): if relationships_arg: relationships_all = self._get_all_relationships() if relationships_arg is True: return relationships_all else: return relationships_all.intersection(relationships_arg) return set()
[ "def", "_init_relationships", "(", "self", ",", "relationships_arg", ")", ":", "if", "relationships_arg", ":", "relationships_all", "=", "self", ".", "_get_all_relationships", "(", ")", "if", "relationships_arg", "is", "True", ":", "return", "relationships_all", "el...
Return a set of relationships found in all subset GO Terms.
[ "Return", "a", "set", "of", "relationships", "found", "in", "all", "subset", "GO", "Terms", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/gosubdag_init.py#L39-L47
242,527
tanghaibao/goatools
goatools/gosubdag/gosubdag_init.py
InitGOs._get_all_relationships
def _get_all_relationships(self): """Return all relationships seen in GO Dag subset.""" relationships_all = set() for goterm in self.go2obj.values(): if goterm.relationship: relationships_all.update(goterm.relationship) if goterm.relationship_rev: relationships_all.update(goterm.relationship_rev) return relationships_all
python
def _get_all_relationships(self): relationships_all = set() for goterm in self.go2obj.values(): if goterm.relationship: relationships_all.update(goterm.relationship) if goterm.relationship_rev: relationships_all.update(goterm.relationship_rev) return relationships_all
[ "def", "_get_all_relationships", "(", "self", ")", ":", "relationships_all", "=", "set", "(", ")", "for", "goterm", "in", "self", ".", "go2obj", ".", "values", "(", ")", ":", "if", "goterm", ".", "relationship", ":", "relationships_all", ".", "update", "("...
Return all relationships seen in GO Dag subset.
[ "Return", "all", "relationships", "seen", "in", "GO", "Dag", "subset", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/gosubdag_init.py#L49-L57
242,528
tanghaibao/goatools
goatools/gosubdag/gosubdag_init.py
InitGOs._init_gos
def _init_gos(self, go_sources_arg, relationships_arg): """Initialize GO sources.""" # No GO sources provided if not go_sources_arg: assert self.go2obj_orig, "go2obj MUST BE PRESENT IF go_sources IS NOT" self.go_sources = set(self.go2obj_orig) self.go2obj = self.go2obj_orig sys.stdout.write("**NOTE: {N:,} SOURCE GO IDS\n".format(N=len(self.go_sources))) return # GO sources provided go_sources = self._init_go_sources(go_sources_arg, self.go2obj_orig) # Create new go2obj_user subset matching GO sources # Fill with source and parent GO IDs and alternate GO IDs go2obj_user = {} objrel = CurNHigher(relationships_arg, self.go2obj_orig) objrel.get_id2obj_cur_n_high(go2obj_user, go_sources) # Add additional GOTerm information, if needed for user task kws_gos = {k:v for k, v in self.kws.items() if k in self.kws_aux_gos} if kws_gos: self._add_goterms_kws(go2obj_user, kws_gos) self.go_sources = go_sources self.go2obj = go2obj_user
python
def _init_gos(self, go_sources_arg, relationships_arg): # No GO sources provided if not go_sources_arg: assert self.go2obj_orig, "go2obj MUST BE PRESENT IF go_sources IS NOT" self.go_sources = set(self.go2obj_orig) self.go2obj = self.go2obj_orig sys.stdout.write("**NOTE: {N:,} SOURCE GO IDS\n".format(N=len(self.go_sources))) return # GO sources provided go_sources = self._init_go_sources(go_sources_arg, self.go2obj_orig) # Create new go2obj_user subset matching GO sources # Fill with source and parent GO IDs and alternate GO IDs go2obj_user = {} objrel = CurNHigher(relationships_arg, self.go2obj_orig) objrel.get_id2obj_cur_n_high(go2obj_user, go_sources) # Add additional GOTerm information, if needed for user task kws_gos = {k:v for k, v in self.kws.items() if k in self.kws_aux_gos} if kws_gos: self._add_goterms_kws(go2obj_user, kws_gos) self.go_sources = go_sources self.go2obj = go2obj_user
[ "def", "_init_gos", "(", "self", ",", "go_sources_arg", ",", "relationships_arg", ")", ":", "# No GO sources provided", "if", "not", "go_sources_arg", ":", "assert", "self", ".", "go2obj_orig", ",", "\"go2obj MUST BE PRESENT IF go_sources IS NOT\"", "self", ".", "go_sou...
Initialize GO sources.
[ "Initialize", "GO", "sources", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/gosubdag_init.py#L59-L80
242,529
tanghaibao/goatools
goatools/gosubdag/gosubdag_init.py
InitGOs._add_goterms_kws
def _add_goterms_kws(self, go2obj_user, kws_gos): """Add more GOTerms to go2obj_user, if requested and relevant.""" if 'go2color' in kws_gos: for goid in kws_gos['go2color'].keys(): self._add_goterms(go2obj_user, goid)
python
def _add_goterms_kws(self, go2obj_user, kws_gos): if 'go2color' in kws_gos: for goid in kws_gos['go2color'].keys(): self._add_goterms(go2obj_user, goid)
[ "def", "_add_goterms_kws", "(", "self", ",", "go2obj_user", ",", "kws_gos", ")", ":", "if", "'go2color'", "in", "kws_gos", ":", "for", "goid", "in", "kws_gos", "[", "'go2color'", "]", ".", "keys", "(", ")", ":", "self", ".", "_add_goterms", "(", "go2obj_...
Add more GOTerms to go2obj_user, if requested and relevant.
[ "Add", "more", "GOTerms", "to", "go2obj_user", "if", "requested", "and", "relevant", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/gosubdag_init.py#L82-L86
242,530
tanghaibao/goatools
goatools/gosubdag/gosubdag_init.py
InitGOs._add_goterms
def _add_goterms(self, go2obj_user, goid): """Add alt GO IDs to go2obj subset, if requested and relevant.""" goterm = self.go2obj_orig[goid] if goid != goterm.id and goterm.id in go2obj_user and goid not in go2obj_user: go2obj_user[goid] = goterm
python
def _add_goterms(self, go2obj_user, goid): goterm = self.go2obj_orig[goid] if goid != goterm.id and goterm.id in go2obj_user and goid not in go2obj_user: go2obj_user[goid] = goterm
[ "def", "_add_goterms", "(", "self", ",", "go2obj_user", ",", "goid", ")", ":", "goterm", "=", "self", ".", "go2obj_orig", "[", "goid", "]", "if", "goid", "!=", "goterm", ".", "id", "and", "goterm", ".", "id", "in", "go2obj_user", "and", "goid", "not", ...
Add alt GO IDs to go2obj subset, if requested and relevant.
[ "Add", "alt", "GO", "IDs", "to", "go2obj", "subset", "if", "requested", "and", "relevant", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/gosubdag_init.py#L88-L92
242,531
tanghaibao/goatools
goatools/gosubdag/gosubdag_init.py
InitGOs._init_go_sources
def _init_go_sources(self, go_sources_arg, go2obj_arg): """Return GO sources which are present in GODag.""" gos_user = set(go_sources_arg) if 'children' in self.kws and self.kws['children']: gos_user |= get_leaf_children(gos_user, go2obj_arg) gos_godag = set(go2obj_arg) gos_source = gos_user.intersection(gos_godag) gos_missing = gos_user.difference(gos_godag) if not gos_missing: return gos_source sys.stdout.write("{N} GO IDs NOT FOUND IN GO DAG: {GOs}\n".format( N=len(gos_missing), GOs=" ".join([str(e) for e in gos_missing]))) return gos_source
python
def _init_go_sources(self, go_sources_arg, go2obj_arg): gos_user = set(go_sources_arg) if 'children' in self.kws and self.kws['children']: gos_user |= get_leaf_children(gos_user, go2obj_arg) gos_godag = set(go2obj_arg) gos_source = gos_user.intersection(gos_godag) gos_missing = gos_user.difference(gos_godag) if not gos_missing: return gos_source sys.stdout.write("{N} GO IDs NOT FOUND IN GO DAG: {GOs}\n".format( N=len(gos_missing), GOs=" ".join([str(e) for e in gos_missing]))) return gos_source
[ "def", "_init_go_sources", "(", "self", ",", "go_sources_arg", ",", "go2obj_arg", ")", ":", "gos_user", "=", "set", "(", "go_sources_arg", ")", "if", "'children'", "in", "self", ".", "kws", "and", "self", ".", "kws", "[", "'children'", "]", ":", "gos_user"...
Return GO sources which are present in GODag.
[ "Return", "GO", "sources", "which", "are", "present", "in", "GODag", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/gosubdag_init.py#L94-L106
242,532
tanghaibao/goatools
goatools/gosubdag/gosubdag_init.py
InitFields.get_rcntobj
def get_rcntobj(self): """Return None or user-provided CountRelatives object.""" # rcntobj value in kws can be: None, False, True, CountRelatives object if 'rcntobj' in self.kws: rcntobj = self.kws['rcntobj'] if isinstance(rcntobj, CountRelatives): return rcntobj return CountRelatives( self.go2obj, # Subset go2obj contains only items needed by go_sources self.relationships, dcnt='dcnt' in self.kw_elems, go2letter=self.kws.get('go2letter'))
python
def get_rcntobj(self): # rcntobj value in kws can be: None, False, True, CountRelatives object if 'rcntobj' in self.kws: rcntobj = self.kws['rcntobj'] if isinstance(rcntobj, CountRelatives): return rcntobj return CountRelatives( self.go2obj, # Subset go2obj contains only items needed by go_sources self.relationships, dcnt='dcnt' in self.kw_elems, go2letter=self.kws.get('go2letter'))
[ "def", "get_rcntobj", "(", "self", ")", ":", "# rcntobj value in kws can be: None, False, True, CountRelatives object", "if", "'rcntobj'", "in", "self", ".", "kws", ":", "rcntobj", "=", "self", ".", "kws", "[", "'rcntobj'", "]", "if", "isinstance", "(", "rcntobj", ...
Return None or user-provided CountRelatives object.
[ "Return", "None", "or", "user", "-", "provided", "CountRelatives", "object", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/gosubdag_init.py#L123-L134
242,533
tanghaibao/goatools
goatools/gosubdag/gosubdag_init.py
InitFields.get_prt_fmt
def get_prt_fmt(self, alt=False): """Return the format for printing GO named tuples and their related information.""" # prt_fmt = [ # rcnt # '{GO} # {NS} L{level:02} D{depth:02} {GO_name}', # '{GO} # {NS} {dcnt:6,} L{level:02} D{depth:02} {D1:5} {GO_name}'] prt_fmt = [] if alt: prt_fmt.append('{GO}{alt:1}') else: prt_fmt.append('{GO}') prt_fmt.append('# {NS}') if 'dcnt' in self.prt_flds: prt_fmt.append('{dcnt:5}') if 'childcnt' in self.prt_flds: prt_fmt.append('{childcnt:3}') if 'tcnt' in self.prt_flds: prt_fmt.append("{tcnt:7,}") if 'tfreq' in self.prt_flds: prt_fmt.append("{tfreq:8.6f}") if 'tinfo' in self.prt_flds: prt_fmt.append("{tinfo:5.2f}") prt_fmt.append('L{level:02} D{depth:02}') if self.relationships: prt_fmt.append('R{reldepth:02}') if 'D1' in self.prt_flds: prt_fmt.append('{D1:5}') if 'REL' in self.prt_flds: prt_fmt.append('{REL}') prt_fmt.append('{rel}') prt_fmt.append('{GO_name}') return " ".join(prt_fmt)
python
def get_prt_fmt(self, alt=False): # prt_fmt = [ # rcnt # '{GO} # {NS} L{level:02} D{depth:02} {GO_name}', # '{GO} # {NS} {dcnt:6,} L{level:02} D{depth:02} {D1:5} {GO_name}'] prt_fmt = [] if alt: prt_fmt.append('{GO}{alt:1}') else: prt_fmt.append('{GO}') prt_fmt.append('# {NS}') if 'dcnt' in self.prt_flds: prt_fmt.append('{dcnt:5}') if 'childcnt' in self.prt_flds: prt_fmt.append('{childcnt:3}') if 'tcnt' in self.prt_flds: prt_fmt.append("{tcnt:7,}") if 'tfreq' in self.prt_flds: prt_fmt.append("{tfreq:8.6f}") if 'tinfo' in self.prt_flds: prt_fmt.append("{tinfo:5.2f}") prt_fmt.append('L{level:02} D{depth:02}') if self.relationships: prt_fmt.append('R{reldepth:02}') if 'D1' in self.prt_flds: prt_fmt.append('{D1:5}') if 'REL' in self.prt_flds: prt_fmt.append('{REL}') prt_fmt.append('{rel}') prt_fmt.append('{GO_name}') return " ".join(prt_fmt)
[ "def", "get_prt_fmt", "(", "self", ",", "alt", "=", "False", ")", ":", "# prt_fmt = [ # rcnt", "# '{GO} # {NS} L{level:02} D{depth:02} {GO_name}',", "# '{GO} # {NS} {dcnt:6,} L{level:02} D{depth:02} {D1:5} {GO_name}']", "prt_f...
Return the format for printing GO named tuples and their related information.
[ "Return", "the", "format", "for", "printing", "GO", "named", "tuples", "and", "their", "related", "information", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/gosubdag_init.py#L174-L204
242,534
tanghaibao/goatools
goatools/gosubdag/gosubdag_init.py
InitFields._init_kwelems
def _init_kwelems(self): """Init set elements.""" ret = set() if 'rcntobj' in self.kws: ret.add('dcnt') ret.add('D1') if 'tcntobj' in self.kws: ret.add('tcnt') ret.add('tfreq') ret.add('tinfo') return ret
python
def _init_kwelems(self): ret = set() if 'rcntobj' in self.kws: ret.add('dcnt') ret.add('D1') if 'tcntobj' in self.kws: ret.add('tcnt') ret.add('tfreq') ret.add('tinfo') return ret
[ "def", "_init_kwelems", "(", "self", ")", ":", "ret", "=", "set", "(", ")", "if", "'rcntobj'", "in", "self", ".", "kws", ":", "ret", ".", "add", "(", "'dcnt'", ")", "ret", ".", "add", "(", "'D1'", ")", "if", "'tcntobj'", "in", "self", ".", "kws",...
Init set elements.
[ "Init", "set", "elements", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/gosubdag_init.py#L248-L258
242,535
tanghaibao/goatools
goatools/anno/extensions/extensions.py
AnnotationExtensions.get_relations_cnt
def get_relations_cnt(self): """Get the set of all relations.""" return cx.Counter([e.relation for es in self.exts for e in es])
python
def get_relations_cnt(self): return cx.Counter([e.relation for es in self.exts for e in es])
[ "def", "get_relations_cnt", "(", "self", ")", ":", "return", "cx", ".", "Counter", "(", "[", "e", ".", "relation", "for", "es", "in", "self", ".", "exts", "for", "e", "in", "es", "]", ")" ]
Get the set of all relations.
[ "Get", "the", "set", "of", "all", "relations", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/anno/extensions/extensions.py#L40-L42
242,536
tanghaibao/goatools
goatools/gosubdag/plot/go2color.py
Go2Color._init_equiv
def _init_equiv(self): """Add equivalent GO IDs to go2color, if necessary.""" gocolored_all = set(self.go2color) go2obj_usr = self.gosubdag.go2obj go2color_add = {} for gocolored_cur, color in self.go2color.items(): # Ignore GOs in go2color that are not in the user set if gocolored_cur in go2obj_usr: goobj = go2obj_usr[gocolored_cur] goids_equiv = goobj.alt_ids.union([goobj.id]) # mrk_alt = "*" if gocolored_cur != goobj.id else "" # print("COLORED({}) KEY({}){:1} ALL({})".format( # gocolored_cur, goobj.id, mrk_alt, goids_equiv)) # Loop through GO IDs which are not colored, but are equivalent to colored GO IDs. for goid_add in goids_equiv.difference(gocolored_all): if goid_add in go2color_add: print('**TBD: TWO DIFFERENT COLORS FOR EQUIV GO ID') # pylint: disable=superfluous-parens go2color_add[goid_add] = color # print("ADDING {N} GO IDs TO go2color".format(N=len(go2color_add))) for goid, color in go2color_add.items(): self.go2color[goid] = color
python
def _init_equiv(self): gocolored_all = set(self.go2color) go2obj_usr = self.gosubdag.go2obj go2color_add = {} for gocolored_cur, color in self.go2color.items(): # Ignore GOs in go2color that are not in the user set if gocolored_cur in go2obj_usr: goobj = go2obj_usr[gocolored_cur] goids_equiv = goobj.alt_ids.union([goobj.id]) # mrk_alt = "*" if gocolored_cur != goobj.id else "" # print("COLORED({}) KEY({}){:1} ALL({})".format( # gocolored_cur, goobj.id, mrk_alt, goids_equiv)) # Loop through GO IDs which are not colored, but are equivalent to colored GO IDs. for goid_add in goids_equiv.difference(gocolored_all): if goid_add in go2color_add: print('**TBD: TWO DIFFERENT COLORS FOR EQUIV GO ID') # pylint: disable=superfluous-parens go2color_add[goid_add] = color # print("ADDING {N} GO IDs TO go2color".format(N=len(go2color_add))) for goid, color in go2color_add.items(): self.go2color[goid] = color
[ "def", "_init_equiv", "(", "self", ")", ":", "gocolored_all", "=", "set", "(", "self", ".", "go2color", ")", "go2obj_usr", "=", "self", ".", "gosubdag", ".", "go2obj", "go2color_add", "=", "{", "}", "for", "gocolored_cur", ",", "color", "in", "self", "."...
Add equivalent GO IDs to go2color, if necessary.
[ "Add", "equivalent", "GO", "IDs", "to", "go2color", "if", "necessary", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/plot/go2color.py#L78-L98
242,537
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentRecord.get_pvalue
def get_pvalue(self): """Returns pval for 1st method, if it exists. Else returns uncorrected pval.""" if self.method_flds: return getattr(self, "p_{m}".format(m=self.get_method_name())) return getattr(self, "p_uncorrected")
python
def get_pvalue(self): if self.method_flds: return getattr(self, "p_{m}".format(m=self.get_method_name())) return getattr(self, "p_uncorrected")
[ "def", "get_pvalue", "(", "self", ")", ":", "if", "self", ".", "method_flds", ":", "return", "getattr", "(", "self", ",", "\"p_{m}\"", ".", "format", "(", "m", "=", "self", ".", "get_method_name", "(", ")", ")", ")", "return", "getattr", "(", "self", ...
Returns pval for 1st method, if it exists. Else returns uncorrected pval.
[ "Returns", "pval", "for", "1st", "method", "if", "it", "exists", ".", "Else", "returns", "uncorrected", "pval", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L86-L90
242,538
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentRecord.set_corrected_pval
def set_corrected_pval(self, nt_method, pvalue): """Add object attribute based on method name.""" self.method_flds.append(nt_method) fieldname = "".join(["p_", nt_method.fieldname]) setattr(self, fieldname, pvalue)
python
def set_corrected_pval(self, nt_method, pvalue): self.method_flds.append(nt_method) fieldname = "".join(["p_", nt_method.fieldname]) setattr(self, fieldname, pvalue)
[ "def", "set_corrected_pval", "(", "self", ",", "nt_method", ",", "pvalue", ")", ":", "self", ".", "method_flds", ".", "append", "(", "nt_method", ")", "fieldname", "=", "\"\"", ".", "join", "(", "[", "\"p_\"", ",", "nt_method", ".", "fieldname", "]", ")"...
Add object attribute based on method name.
[ "Add", "object", "attribute", "based", "on", "method", "name", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L92-L96
242,539
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentRecord._chk_fields
def _chk_fields(field_data, field_formatter): """Check that expected fields are present.""" if len(field_data) == len(field_formatter): return len_dat = len(field_data) len_fmt = len(field_formatter) msg = [ "FIELD DATA({d}) != FORMATTER({f})".format(d=len_dat, f=len_fmt), "DAT({N}): {D}".format(N=len_dat, D=field_data), "FMT({N}): {F}".format(N=len_fmt, F=field_formatter)] raise Exception("\n".join(msg))
python
def _chk_fields(field_data, field_formatter): if len(field_data) == len(field_formatter): return len_dat = len(field_data) len_fmt = len(field_formatter) msg = [ "FIELD DATA({d}) != FORMATTER({f})".format(d=len_dat, f=len_fmt), "DAT({N}): {D}".format(N=len_dat, D=field_data), "FMT({N}): {F}".format(N=len_fmt, F=field_formatter)] raise Exception("\n".join(msg))
[ "def", "_chk_fields", "(", "field_data", ",", "field_formatter", ")", ":", "if", "len", "(", "field_data", ")", "==", "len", "(", "field_formatter", ")", ":", "return", "len_dat", "=", "len", "(", "field_data", ")", "len_fmt", "=", "len", "(", "field_forma...
Check that expected fields are present.
[ "Check", "that", "expected", "fields", "are", "present", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L121-L131
242,540
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentRecord.set_goterm
def set_goterm(self, go2obj): """Set goterm and copy GOTerm's name and namespace.""" if self.GO in go2obj: goterm = go2obj[self.GO] self.goterm = goterm self.name = goterm.name self.depth = goterm.depth self.NS = self.namespace2NS[self.goterm.namespace]
python
def set_goterm(self, go2obj): if self.GO in go2obj: goterm = go2obj[self.GO] self.goterm = goterm self.name = goterm.name self.depth = goterm.depth self.NS = self.namespace2NS[self.goterm.namespace]
[ "def", "set_goterm", "(", "self", ",", "go2obj", ")", ":", "if", "self", ".", "GO", "in", "go2obj", ":", "goterm", "=", "go2obj", "[", "self", ".", "GO", "]", "self", ".", "goterm", "=", "goterm", "self", ".", "name", "=", "goterm", ".", "name", ...
Set goterm and copy GOTerm's name and namespace.
[ "Set", "goterm", "and", "copy", "GOTerm", "s", "name", "and", "namespace", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L136-L143
242,541
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentRecord._init_enrichment
def _init_enrichment(self): """Mark as 'enriched' or 'purified'.""" if self.study_n: return 'e' if ((1.0 * self.study_count / self.study_n) > (1.0 * self.pop_count / self.pop_n)) else 'p' return 'p'
python
def _init_enrichment(self): if self.study_n: return 'e' if ((1.0 * self.study_count / self.study_n) > (1.0 * self.pop_count / self.pop_n)) else 'p' return 'p'
[ "def", "_init_enrichment", "(", "self", ")", ":", "if", "self", ".", "study_n", ":", "return", "'e'", "if", "(", "(", "1.0", "*", "self", ".", "study_count", "/", "self", ".", "study_n", ")", ">", "(", "1.0", "*", "self", ".", "pop_count", "/", "se...
Mark as 'enriched' or 'purified'.
[ "Mark", "as", "enriched", "or", "purified", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L145-L150
242,542
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentRecord.get_prtflds_default
def get_prtflds_default(self): """Get default fields.""" return self._fldsdefprt[:-1] + \ ["p_{M}".format(M=m.fieldname) for m in self.method_flds] + \ [self._fldsdefprt[-1]]
python
def get_prtflds_default(self): return self._fldsdefprt[:-1] + \ ["p_{M}".format(M=m.fieldname) for m in self.method_flds] + \ [self._fldsdefprt[-1]]
[ "def", "get_prtflds_default", "(", "self", ")", ":", "return", "self", ".", "_fldsdefprt", "[", ":", "-", "1", "]", "+", "[", "\"p_{M}\"", ".", "format", "(", "M", "=", "m", ".", "fieldname", ")", "for", "m", "in", "self", ".", "method_flds", "]", ...
Get default fields.
[ "Get", "default", "fields", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L160-L164
242,543
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentRecord.get_prtflds_all
def get_prtflds_all(self): """When converting to a namedtuple, get all possible fields in their original order.""" flds = [] dont_add = set(['_parents', 'method_flds', 'relationship_rev', 'relationship']) # Fields: GO NS enrichment name ratio_in_study ratio_in_pop p_uncorrected # depth study_count p_sm_bonferroni p_fdr_bh study_items self._flds_append(flds, self.get_prtflds_default(), dont_add) # Fields: GO NS goterm # ratio_in_pop pop_n pop_count pop_items name # ratio_in_study study_n study_count study_items # method_flds enrichment p_uncorrected p_sm_bonferroni p_fdr_bh self._flds_append(flds, vars(self).keys(), dont_add) # Fields: name level is_obsolete namespace id depth parents children _parents alt_ids self._flds_append(flds, vars(self.goterm).keys(), dont_add) return flds
python
def get_prtflds_all(self): flds = [] dont_add = set(['_parents', 'method_flds', 'relationship_rev', 'relationship']) # Fields: GO NS enrichment name ratio_in_study ratio_in_pop p_uncorrected # depth study_count p_sm_bonferroni p_fdr_bh study_items self._flds_append(flds, self.get_prtflds_default(), dont_add) # Fields: GO NS goterm # ratio_in_pop pop_n pop_count pop_items name # ratio_in_study study_n study_count study_items # method_flds enrichment p_uncorrected p_sm_bonferroni p_fdr_bh self._flds_append(flds, vars(self).keys(), dont_add) # Fields: name level is_obsolete namespace id depth parents children _parents alt_ids self._flds_append(flds, vars(self.goterm).keys(), dont_add) return flds
[ "def", "get_prtflds_all", "(", "self", ")", ":", "flds", "=", "[", "]", "dont_add", "=", "set", "(", "[", "'_parents'", ",", "'method_flds'", ",", "'relationship_rev'", ",", "'relationship'", "]", ")", "# Fields: GO NS enrichment name ratio_in_study ratio_in_pop p_unc...
When converting to a namedtuple, get all possible fields in their original order.
[ "When", "converting", "to", "a", "namedtuple", "get", "all", "possible", "fields", "in", "their", "original", "order", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L166-L180
242,544
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentRecord._flds_append
def _flds_append(flds, addthese, dont_add): """Retain order of fields as we add them once to the list.""" for fld in addthese: if fld not in flds and fld not in dont_add: flds.append(fld)
python
def _flds_append(flds, addthese, dont_add): for fld in addthese: if fld not in flds and fld not in dont_add: flds.append(fld)
[ "def", "_flds_append", "(", "flds", ",", "addthese", ",", "dont_add", ")", ":", "for", "fld", "in", "addthese", ":", "if", "fld", "not", "in", "flds", "and", "fld", "not", "in", "dont_add", ":", "flds", ".", "append", "(", "fld", ")" ]
Retain order of fields as we add them once to the list.
[ "Retain", "order", "of", "fields", "as", "we", "add", "them", "once", "to", "the", "list", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L183-L187
242,545
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentRecord.get_field_values
def get_field_values(self, fldnames, rpt_fmt=True, itemid2name=None): """Get flat namedtuple fields for one GOEnrichmentRecord.""" row = [] # Loop through each user field desired for fld in fldnames: # 1. Check the GOEnrichmentRecord's attributes val = getattr(self, fld, None) if val is not None: if rpt_fmt: val = self._get_rpt_fmt(fld, val, itemid2name) row.append(val) else: # 2. Check the GO object for the field val = getattr(self.goterm, fld, None) if rpt_fmt: val = self._get_rpt_fmt(fld, val, itemid2name) if val is not None: row.append(val) else: # 3. Field not found, raise Exception self._err_fld(fld, fldnames) if rpt_fmt: assert not isinstance(val, list), \ "UNEXPECTED LIST: FIELD({F}) VALUE({V}) FMT({P})".format( P=rpt_fmt, F=fld, V=val) return row
python
def get_field_values(self, fldnames, rpt_fmt=True, itemid2name=None): row = [] # Loop through each user field desired for fld in fldnames: # 1. Check the GOEnrichmentRecord's attributes val = getattr(self, fld, None) if val is not None: if rpt_fmt: val = self._get_rpt_fmt(fld, val, itemid2name) row.append(val) else: # 2. Check the GO object for the field val = getattr(self.goterm, fld, None) if rpt_fmt: val = self._get_rpt_fmt(fld, val, itemid2name) if val is not None: row.append(val) else: # 3. Field not found, raise Exception self._err_fld(fld, fldnames) if rpt_fmt: assert not isinstance(val, list), \ "UNEXPECTED LIST: FIELD({F}) VALUE({V}) FMT({P})".format( P=rpt_fmt, F=fld, V=val) return row
[ "def", "get_field_values", "(", "self", ",", "fldnames", ",", "rpt_fmt", "=", "True", ",", "itemid2name", "=", "None", ")", ":", "row", "=", "[", "]", "# Loop through each user field desired", "for", "fld", "in", "fldnames", ":", "# 1. Check the GOEnrichmentRecord...
Get flat namedtuple fields for one GOEnrichmentRecord.
[ "Get", "flat", "namedtuple", "fields", "for", "one", "GOEnrichmentRecord", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L189-L214
242,546
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentRecord._get_rpt_fmt
def _get_rpt_fmt(fld, val, itemid2name=None): """Return values in a format amenable to printing in a table.""" if fld.startswith("ratio_"): return "{N}/{TOT}".format(N=val[0], TOT=val[1]) elif fld in set(['study_items', 'pop_items', 'alt_ids']): if itemid2name is not None: val = [itemid2name.get(v, v) for v in val] return ", ".join([str(v) for v in sorted(val)]) return val
python
def _get_rpt_fmt(fld, val, itemid2name=None): if fld.startswith("ratio_"): return "{N}/{TOT}".format(N=val[0], TOT=val[1]) elif fld in set(['study_items', 'pop_items', 'alt_ids']): if itemid2name is not None: val = [itemid2name.get(v, v) for v in val] return ", ".join([str(v) for v in sorted(val)]) return val
[ "def", "_get_rpt_fmt", "(", "fld", ",", "val", ",", "itemid2name", "=", "None", ")", ":", "if", "fld", ".", "startswith", "(", "\"ratio_\"", ")", ":", "return", "\"{N}/{TOT}\"", ".", "format", "(", "N", "=", "val", "[", "0", "]", ",", "TOT", "=", "...
Return values in a format amenable to printing in a table.
[ "Return", "values", "in", "a", "format", "amenable", "to", "printing", "in", "a", "table", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L217-L225
242,547
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentRecord._err_fld
def _err_fld(self, fld, fldnames): """Unrecognized field. Print detailed Failure message.""" msg = ['ERROR. UNRECOGNIZED FIELD({F})'.format(F=fld)] actual_flds = set(self.get_prtflds_default() + self.goterm.__dict__.keys()) bad_flds = set(fldnames).difference(set(actual_flds)) if bad_flds: msg.append("\nGOEA RESULT FIELDS: {}".format(" ".join(self._fldsdefprt))) msg.append("GO FIELDS: {}".format(" ".join(self.goterm.__dict__.keys()))) msg.append("\nFATAL: {N} UNEXPECTED FIELDS({F})\n".format( N=len(bad_flds), F=" ".join(bad_flds))) msg.append(" {N} User-provided fields:".format(N=len(fldnames))) for idx, fld in enumerate(fldnames, 1): mrk = "ERROR -->" if fld in bad_flds else "" msg.append(" {M:>9} {I:>2}) {F}".format(M=mrk, I=idx, F=fld)) raise Exception("\n".join(msg))
python
def _err_fld(self, fld, fldnames): msg = ['ERROR. UNRECOGNIZED FIELD({F})'.format(F=fld)] actual_flds = set(self.get_prtflds_default() + self.goterm.__dict__.keys()) bad_flds = set(fldnames).difference(set(actual_flds)) if bad_flds: msg.append("\nGOEA RESULT FIELDS: {}".format(" ".join(self._fldsdefprt))) msg.append("GO FIELDS: {}".format(" ".join(self.goterm.__dict__.keys()))) msg.append("\nFATAL: {N} UNEXPECTED FIELDS({F})\n".format( N=len(bad_flds), F=" ".join(bad_flds))) msg.append(" {N} User-provided fields:".format(N=len(fldnames))) for idx, fld in enumerate(fldnames, 1): mrk = "ERROR -->" if fld in bad_flds else "" msg.append(" {M:>9} {I:>2}) {F}".format(M=mrk, I=idx, F=fld)) raise Exception("\n".join(msg))
[ "def", "_err_fld", "(", "self", ",", "fld", ",", "fldnames", ")", ":", "msg", "=", "[", "'ERROR. UNRECOGNIZED FIELD({F})'", ".", "format", "(", "F", "=", "fld", ")", "]", "actual_flds", "=", "set", "(", "self", ".", "get_prtflds_default", "(", ")", "+", ...
Unrecognized field. Print detailed Failure message.
[ "Unrecognized", "field", ".", "Print", "detailed", "Failure", "message", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L227-L241
242,548
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentStudy.run_study_nts
def run_study_nts(self, study, **kws): """Run GOEA on study ids. Return results as a list of namedtuples.""" goea_results = self.run_study(study, **kws) return MgrNtGOEAs(goea_results).get_goea_nts_all()
python
def run_study_nts(self, study, **kws): goea_results = self.run_study(study, **kws) return MgrNtGOEAs(goea_results).get_goea_nts_all()
[ "def", "run_study_nts", "(", "self", ",", "study", ",", "*", "*", "kws", ")", ":", "goea_results", "=", "self", ".", "run_study", "(", "study", ",", "*", "*", "kws", ")", "return", "MgrNtGOEAs", "(", "goea_results", ")", ".", "get_goea_nts_all", "(", "...
Run GOEA on study ids. Return results as a list of namedtuples.
[ "Run", "GOEA", "on", "study", "ids", ".", "Return", "results", "as", "a", "list", "of", "namedtuples", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L302-L305
242,549
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentStudy.get_results_msg
def get_results_msg(self, results, study): """Return summary for GOEA results.""" # To convert msg list to string: "\n".join(msg) msg = [] if results: fmt = "{M:6,} GO terms are associated with {N:6,} of {NT:6,}" stu_items, num_gos_stu = self.get_item_cnt(results, "study_items") pop_items, num_gos_pop = self.get_item_cnt(results, "pop_items") stu_txt = fmt.format(N=len(stu_items), M=num_gos_stu, NT=len(set(study))) pop_txt = fmt.format(N=len(pop_items), M=num_gos_pop, NT=self.pop_n) msg.append("{POP} population items".format(POP=pop_txt)) msg.append("{STU} study items".format(STU=stu_txt)) return msg
python
def get_results_msg(self, results, study): # To convert msg list to string: "\n".join(msg) msg = [] if results: fmt = "{M:6,} GO terms are associated with {N:6,} of {NT:6,}" stu_items, num_gos_stu = self.get_item_cnt(results, "study_items") pop_items, num_gos_pop = self.get_item_cnt(results, "pop_items") stu_txt = fmt.format(N=len(stu_items), M=num_gos_stu, NT=len(set(study))) pop_txt = fmt.format(N=len(pop_items), M=num_gos_pop, NT=self.pop_n) msg.append("{POP} population items".format(POP=pop_txt)) msg.append("{STU} study items".format(STU=stu_txt)) return msg
[ "def", "get_results_msg", "(", "self", ",", "results", ",", "study", ")", ":", "# To convert msg list to string: \"\\n\".join(msg)", "msg", "=", "[", "]", "if", "results", ":", "fmt", "=", "\"{M:6,} GO terms are associated with {N:6,} of {NT:6,}\"", "stu_items", ",", "n...
Return summary for GOEA results.
[ "Return", "summary", "for", "GOEA", "results", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L307-L319
242,550
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentStudy.get_pval_uncorr
def get_pval_uncorr(self, study, log=sys.stdout): """Calculate the uncorrected pvalues for study items.""" results = [] study_in_pop = self.pop.intersection(study) # " 99% 378 of 382 study items found in population" go2studyitems = get_terms("study", study_in_pop, self.assoc, self.obo_dag, log) pop_n, study_n = self.pop_n, len(study_in_pop) allterms = set(go2studyitems).union(set(self.go2popitems)) if log is not None: # Some study genes may not have been found in the population. Report from orig study_n_orig = len(study) perc = 100.0*study_n/study_n_orig if study_n_orig != 0 else 0.0 log.write("{R:3.0f}% {N:>6,} of {M:>6,} study items found in population({P})\n".format( N=study_n, M=study_n_orig, P=pop_n, R=perc)) if study_n: log.write("Calculating {N:,} uncorrected p-values using {PFNC}\n".format( N=len(allterms), PFNC=self.pval_obj.name)) # If no study genes were found in the population, return empty GOEA results if not study_n: return [] calc_pvalue = self.pval_obj.calc_pvalue for goid in allterms: study_items = go2studyitems.get(goid, set()) study_count = len(study_items) pop_items = self.go2popitems.get(goid, set()) pop_count = len(pop_items) one_record = GOEnrichmentRecord( goid, p_uncorrected=calc_pvalue(study_count, study_n, pop_count, pop_n), study_items=study_items, pop_items=pop_items, ratio_in_study=(study_count, study_n), ratio_in_pop=(pop_count, pop_n)) results.append(one_record) return results
python
def get_pval_uncorr(self, study, log=sys.stdout): results = [] study_in_pop = self.pop.intersection(study) # " 99% 378 of 382 study items found in population" go2studyitems = get_terms("study", study_in_pop, self.assoc, self.obo_dag, log) pop_n, study_n = self.pop_n, len(study_in_pop) allterms = set(go2studyitems).union(set(self.go2popitems)) if log is not None: # Some study genes may not have been found in the population. Report from orig study_n_orig = len(study) perc = 100.0*study_n/study_n_orig if study_n_orig != 0 else 0.0 log.write("{R:3.0f}% {N:>6,} of {M:>6,} study items found in population({P})\n".format( N=study_n, M=study_n_orig, P=pop_n, R=perc)) if study_n: log.write("Calculating {N:,} uncorrected p-values using {PFNC}\n".format( N=len(allterms), PFNC=self.pval_obj.name)) # If no study genes were found in the population, return empty GOEA results if not study_n: return [] calc_pvalue = self.pval_obj.calc_pvalue for goid in allterms: study_items = go2studyitems.get(goid, set()) study_count = len(study_items) pop_items = self.go2popitems.get(goid, set()) pop_count = len(pop_items) one_record = GOEnrichmentRecord( goid, p_uncorrected=calc_pvalue(study_count, study_n, pop_count, pop_n), study_items=study_items, pop_items=pop_items, ratio_in_study=(study_count, study_n), ratio_in_pop=(pop_count, pop_n)) results.append(one_record) return results
[ "def", "get_pval_uncorr", "(", "self", ",", "study", ",", "log", "=", "sys", ".", "stdout", ")", ":", "results", "=", "[", "]", "study_in_pop", "=", "self", ".", "pop", ".", "intersection", "(", "study", ")", "# \" 99% 378 of 382 study items found in pop...
Calculate the uncorrected pvalues for study items.
[ "Calculate", "the", "uncorrected", "pvalues", "for", "study", "items", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L321-L359
242,551
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentStudy.get_study_items
def get_study_items(results): """Return a list of study items associated with the given results.""" study_items = set() for obj in results: study_items.update(obj.study_items) return study_items
python
def get_study_items(results): study_items = set() for obj in results: study_items.update(obj.study_items) return study_items
[ "def", "get_study_items", "(", "results", ")", ":", "study_items", "=", "set", "(", ")", "for", "obj", "in", "results", ":", "study_items", ".", "update", "(", "obj", ".", "study_items", ")", "return", "study_items" ]
Return a list of study items associated with the given results.
[ "Return", "a", "list", "of", "study", "items", "associated", "with", "the", "given", "results", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L389-L394
242,552
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentStudy._update_pvalcorr
def _update_pvalcorr(ntmt, corrected_pvals): """Add data members to store multiple test corrections.""" if corrected_pvals is None: return for rec, val in zip(ntmt.results, corrected_pvals): rec.set_corrected_pval(ntmt.nt_method, val)
python
def _update_pvalcorr(ntmt, corrected_pvals): if corrected_pvals is None: return for rec, val in zip(ntmt.results, corrected_pvals): rec.set_corrected_pval(ntmt.nt_method, val)
[ "def", "_update_pvalcorr", "(", "ntmt", ",", "corrected_pvals", ")", ":", "if", "corrected_pvals", "is", "None", ":", "return", "for", "rec", ",", "val", "in", "zip", "(", "ntmt", ".", "results", ",", "corrected_pvals", ")", ":", "rec", ".", "set_corrected...
Add data members to store multiple test corrections.
[ "Add", "data", "members", "to", "store", "multiple", "test", "corrections", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L429-L434
242,553
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentStudy.wr_txt
def wr_txt(self, fout_txt, goea_results, prtfmt=None, **kws): """Print GOEA results to text file.""" if not goea_results: sys.stdout.write(" 0 GOEA results. NOT WRITING {FOUT}\n".format(FOUT=fout_txt)) return with open(fout_txt, 'w') as prt: if 'title' in kws: prt.write("{TITLE}\n".format(TITLE=kws['title'])) data_nts = self.prt_txt(prt, goea_results, prtfmt, **kws) log = self.log if self.log is not None else sys.stdout log.write(" {N:>5} GOEA results for {CUR:5} study items. WROTE: {F}\n".format( N=len(data_nts), CUR=len(MgrNtGOEAs(goea_results).get_study_items()), F=fout_txt))
python
def wr_txt(self, fout_txt, goea_results, prtfmt=None, **kws): if not goea_results: sys.stdout.write(" 0 GOEA results. NOT WRITING {FOUT}\n".format(FOUT=fout_txt)) return with open(fout_txt, 'w') as prt: if 'title' in kws: prt.write("{TITLE}\n".format(TITLE=kws['title'])) data_nts = self.prt_txt(prt, goea_results, prtfmt, **kws) log = self.log if self.log is not None else sys.stdout log.write(" {N:>5} GOEA results for {CUR:5} study items. WROTE: {F}\n".format( N=len(data_nts), CUR=len(MgrNtGOEAs(goea_results).get_study_items()), F=fout_txt))
[ "def", "wr_txt", "(", "self", ",", "fout_txt", ",", "goea_results", ",", "prtfmt", "=", "None", ",", "*", "*", "kws", ")", ":", "if", "not", "goea_results", ":", "sys", ".", "stdout", ".", "write", "(", "\" 0 GOEA results. NOT WRITING {FOUT}\\n\"", ".",...
Print GOEA results to text file.
[ "Print", "GOEA", "results", "to", "text", "file", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L437-L450
242,554
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentStudy.prt_txt
def prt_txt(prt, goea_results, prtfmt=None, **kws): """Print GOEA results in text format.""" objprt = PrtFmt() if prtfmt is None: flds = ['GO', 'NS', 'p_uncorrected', 'ratio_in_study', 'ratio_in_pop', 'depth', 'name', 'study_items'] prtfmt = objprt.get_prtfmt_str(flds) prtfmt = objprt.adjust_prtfmt(prtfmt) prt_flds = RPT.get_fmtflds(prtfmt) data_nts = MgrNtGOEAs(goea_results).get_goea_nts_prt(prt_flds, **kws) RPT.prt_txt(prt, data_nts, prtfmt, prt_flds, **kws) return data_nts
python
def prt_txt(prt, goea_results, prtfmt=None, **kws): objprt = PrtFmt() if prtfmt is None: flds = ['GO', 'NS', 'p_uncorrected', 'ratio_in_study', 'ratio_in_pop', 'depth', 'name', 'study_items'] prtfmt = objprt.get_prtfmt_str(flds) prtfmt = objprt.adjust_prtfmt(prtfmt) prt_flds = RPT.get_fmtflds(prtfmt) data_nts = MgrNtGOEAs(goea_results).get_goea_nts_prt(prt_flds, **kws) RPT.prt_txt(prt, data_nts, prtfmt, prt_flds, **kws) return data_nts
[ "def", "prt_txt", "(", "prt", ",", "goea_results", ",", "prtfmt", "=", "None", ",", "*", "*", "kws", ")", ":", "objprt", "=", "PrtFmt", "(", ")", "if", "prtfmt", "is", "None", ":", "flds", "=", "[", "'GO'", ",", "'NS'", ",", "'p_uncorrected'", ",",...
Print GOEA results in text format.
[ "Print", "GOEA", "results", "in", "text", "format", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L453-L464
242,555
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentStudy.wr_xlsx
def wr_xlsx(self, fout_xlsx, goea_results, **kws): """Write a xlsx file.""" # kws: prt_if indent itemid2name(study_items) objprt = PrtFmt() prt_flds = kws.get('prt_flds', self.get_prtflds_default(goea_results)) xlsx_data = MgrNtGOEAs(goea_results).get_goea_nts_prt(prt_flds, **kws) if 'fld2col_widths' not in kws: kws['fld2col_widths'] = {f:objprt.default_fld2col_widths.get(f, 8) for f in prt_flds} RPT.wr_xlsx(fout_xlsx, xlsx_data, **kws)
python
def wr_xlsx(self, fout_xlsx, goea_results, **kws): # kws: prt_if indent itemid2name(study_items) objprt = PrtFmt() prt_flds = kws.get('prt_flds', self.get_prtflds_default(goea_results)) xlsx_data = MgrNtGOEAs(goea_results).get_goea_nts_prt(prt_flds, **kws) if 'fld2col_widths' not in kws: kws['fld2col_widths'] = {f:objprt.default_fld2col_widths.get(f, 8) for f in prt_flds} RPT.wr_xlsx(fout_xlsx, xlsx_data, **kws)
[ "def", "wr_xlsx", "(", "self", ",", "fout_xlsx", ",", "goea_results", ",", "*", "*", "kws", ")", ":", "# kws: prt_if indent itemid2name(study_items)", "objprt", "=", "PrtFmt", "(", ")", "prt_flds", "=", "kws", ".", "get", "(", "'prt_flds'", ",", "self", ".",...
Write a xlsx file.
[ "Write", "a", "xlsx", "file", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L466-L474
242,556
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentStudy.wr_tsv
def wr_tsv(self, fout_tsv, goea_results, **kws): """Write tab-separated table data to file""" prt_flds = kws.get('prt_flds', self.get_prtflds_default(goea_results)) tsv_data = MgrNtGOEAs(goea_results).get_goea_nts_prt(prt_flds, **kws) RPT.wr_tsv(fout_tsv, tsv_data, **kws)
python
def wr_tsv(self, fout_tsv, goea_results, **kws): prt_flds = kws.get('prt_flds', self.get_prtflds_default(goea_results)) tsv_data = MgrNtGOEAs(goea_results).get_goea_nts_prt(prt_flds, **kws) RPT.wr_tsv(fout_tsv, tsv_data, **kws)
[ "def", "wr_tsv", "(", "self", ",", "fout_tsv", ",", "goea_results", ",", "*", "*", "kws", ")", ":", "prt_flds", "=", "kws", ".", "get", "(", "'prt_flds'", ",", "self", ".", "get_prtflds_default", "(", "goea_results", ")", ")", "tsv_data", "=", "MgrNtGOEA...
Write tab-separated table data to file
[ "Write", "tab", "-", "separated", "table", "data", "to", "file" ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L476-L480
242,557
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentStudy.prt_tsv
def prt_tsv(self, prt, goea_results, **kws): """Write tab-separated table data""" prt_flds = kws.get('prt_flds', self.get_prtflds_default(goea_results)) tsv_data = MgrNtGOEAs(goea_results).get_goea_nts_prt(prt_flds, **kws) RPT.prt_tsv(prt, tsv_data, **kws)
python
def prt_tsv(self, prt, goea_results, **kws): prt_flds = kws.get('prt_flds', self.get_prtflds_default(goea_results)) tsv_data = MgrNtGOEAs(goea_results).get_goea_nts_prt(prt_flds, **kws) RPT.prt_tsv(prt, tsv_data, **kws)
[ "def", "prt_tsv", "(", "self", ",", "prt", ",", "goea_results", ",", "*", "*", "kws", ")", ":", "prt_flds", "=", "kws", ".", "get", "(", "'prt_flds'", ",", "self", ".", "get_prtflds_default", "(", "goea_results", ")", ")", "tsv_data", "=", "MgrNtGOEAs", ...
Write tab-separated table data
[ "Write", "tab", "-", "separated", "table", "data" ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L482-L486
242,558
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentStudy.get_ns2nts
def get_ns2nts(results, fldnames=None, **kws): """Get namedtuples of GOEA results, split into BP, MF, CC.""" ns2nts = cx.defaultdict(list) nts = MgrNtGOEAs(results).get_goea_nts_all(fldnames, **kws) for ntgoea in nts: ns2nts[ntgoea.NS].append(ntgoea) return ns2nts
python
def get_ns2nts(results, fldnames=None, **kws): ns2nts = cx.defaultdict(list) nts = MgrNtGOEAs(results).get_goea_nts_all(fldnames, **kws) for ntgoea in nts: ns2nts[ntgoea.NS].append(ntgoea) return ns2nts
[ "def", "get_ns2nts", "(", "results", ",", "fldnames", "=", "None", ",", "*", "*", "kws", ")", ":", "ns2nts", "=", "cx", ".", "defaultdict", "(", "list", ")", "nts", "=", "MgrNtGOEAs", "(", "results", ")", ".", "get_goea_nts_all", "(", "fldnames", ",", ...
Get namedtuples of GOEA results, split into BP, MF, CC.
[ "Get", "namedtuples", "of", "GOEA", "results", "split", "into", "BP", "MF", "CC", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L489-L495
242,559
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentStudy.print_date
def print_date(min_ratio=None, pval=0.05): """Print GOATOOLS version and the date the GOEA was run.""" import goatools # Header contains provenance and parameters date = datetime.date.today() print("# Generated by GOATOOLS v{0} ({1})".format(goatools.__version__, date)) print("# min_ratio={0} pval={1}".format(min_ratio, pval))
python
def print_date(min_ratio=None, pval=0.05): import goatools # Header contains provenance and parameters date = datetime.date.today() print("# Generated by GOATOOLS v{0} ({1})".format(goatools.__version__, date)) print("# min_ratio={0} pval={1}".format(min_ratio, pval))
[ "def", "print_date", "(", "min_ratio", "=", "None", ",", "pval", "=", "0.05", ")", ":", "import", "goatools", "# Header contains provenance and parameters", "date", "=", "datetime", ".", "date", ".", "today", "(", ")", "print", "(", "\"# Generated by GOATOOLS v{0}...
Print GOATOOLS version and the date the GOEA was run.
[ "Print", "GOATOOLS", "version", "and", "the", "date", "the", "GOEA", "was", "run", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L523-L530
242,560
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentStudy.print_results
def print_results(self, results, min_ratio=None, indent=False, pval=0.05, prt=sys.stdout): """Print GOEA results with some additional statistics calculated.""" results_adj = self.get_adj_records(results, min_ratio, pval) self.print_results_adj(results_adj, indent, prt)
python
def print_results(self, results, min_ratio=None, indent=False, pval=0.05, prt=sys.stdout): results_adj = self.get_adj_records(results, min_ratio, pval) self.print_results_adj(results_adj, indent, prt)
[ "def", "print_results", "(", "self", ",", "results", ",", "min_ratio", "=", "None", ",", "indent", "=", "False", ",", "pval", "=", "0.05", ",", "prt", "=", "sys", ".", "stdout", ")", ":", "results_adj", "=", "self", ".", "get_adj_records", "(", "result...
Print GOEA results with some additional statistics calculated.
[ "Print", "GOEA", "results", "with", "some", "additional", "statistics", "calculated", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L532-L535
242,561
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentStudy.get_adj_records
def get_adj_records(results, min_ratio=None, pval=0.05): """Return GOEA results with some additional statistics calculated.""" records = [] for rec in results: # calculate some additional statistics # (over_under, is_ratio_different) rec.update_remaining_fldsdefprt(min_ratio=min_ratio) if pval is not None and rec.p_uncorrected >= pval: continue if rec.is_ratio_different: records.append(rec) return records
python
def get_adj_records(results, min_ratio=None, pval=0.05): records = [] for rec in results: # calculate some additional statistics # (over_under, is_ratio_different) rec.update_remaining_fldsdefprt(min_ratio=min_ratio) if pval is not None and rec.p_uncorrected >= pval: continue if rec.is_ratio_different: records.append(rec) return records
[ "def", "get_adj_records", "(", "results", ",", "min_ratio", "=", "None", ",", "pval", "=", "0.05", ")", ":", "records", "=", "[", "]", "for", "rec", "in", "results", ":", "# calculate some additional statistics", "# (over_under, is_ratio_different)", "rec", ".", ...
Return GOEA results with some additional statistics calculated.
[ "Return", "GOEA", "results", "with", "some", "additional", "statistics", "calculated", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L538-L551
242,562
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentStudy.print_results_adj
def print_results_adj(results, indent=False, prt=sys.stdout): """Print GOEA results.""" # Print column headers if there are results to be printed if results: prt.write("{R}\n".format(R="\t".join(GOEnrichmentStudy.get_prtflds_default(results)))) # Print the GOEA results for rec in results: prt.write("{R}\n".format(R=rec.__str__(indent=indent)))
python
def print_results_adj(results, indent=False, prt=sys.stdout): # Print column headers if there are results to be printed if results: prt.write("{R}\n".format(R="\t".join(GOEnrichmentStudy.get_prtflds_default(results)))) # Print the GOEA results for rec in results: prt.write("{R}\n".format(R=rec.__str__(indent=indent)))
[ "def", "print_results_adj", "(", "results", ",", "indent", "=", "False", ",", "prt", "=", "sys", ".", "stdout", ")", ":", "# Print column headers if there are results to be printed", "if", "results", ":", "prt", ".", "write", "(", "\"{R}\\n\"", ".", "format", "(...
Print GOEA results.
[ "Print", "GOEA", "results", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L554-L561
242,563
tanghaibao/goatools
goatools/go_enrichment.py
GOEnrichmentStudy.wr_py_goea_results
def wr_py_goea_results(self, fout_py, goea_results, **kws): """Save GOEA results into Python package containing list of namedtuples.""" var_name = kws.get("var_name", "goea_results") docstring = kws.get("docstring", "") sortby = kws.get("sortby", None) if goea_results: from goatools.nt_utils import wr_py_nts nts_goea = goea_results # If list has GOEnrichmentRecords or verbose namedtuples, exclude some fields. if hasattr(goea_results[0], "_fldsdefprt") or hasattr(goea_results[0], 'goterm'): # Exclude some attributes from the namedtuple when saving results # to a Python file because the information is redundant or verbose. nts_goea = MgrNtGOEAs(goea_results).get_goea_nts_prt(**kws) docstring = "\n".join([docstring, "# {VER}\n\n".format(VER=self.obo_dag.version)]) assert hasattr(nts_goea[0], '_fields') if sortby is None: sortby = MgrNtGOEAs.dflt_sortby_objgoea nts_goea = sorted(nts_goea, key=sortby) wr_py_nts(fout_py, nts_goea, docstring, var_name)
python
def wr_py_goea_results(self, fout_py, goea_results, **kws): var_name = kws.get("var_name", "goea_results") docstring = kws.get("docstring", "") sortby = kws.get("sortby", None) if goea_results: from goatools.nt_utils import wr_py_nts nts_goea = goea_results # If list has GOEnrichmentRecords or verbose namedtuples, exclude some fields. if hasattr(goea_results[0], "_fldsdefprt") or hasattr(goea_results[0], 'goterm'): # Exclude some attributes from the namedtuple when saving results # to a Python file because the information is redundant or verbose. nts_goea = MgrNtGOEAs(goea_results).get_goea_nts_prt(**kws) docstring = "\n".join([docstring, "# {VER}\n\n".format(VER=self.obo_dag.version)]) assert hasattr(nts_goea[0], '_fields') if sortby is None: sortby = MgrNtGOEAs.dflt_sortby_objgoea nts_goea = sorted(nts_goea, key=sortby) wr_py_nts(fout_py, nts_goea, docstring, var_name)
[ "def", "wr_py_goea_results", "(", "self", ",", "fout_py", ",", "goea_results", ",", "*", "*", "kws", ")", ":", "var_name", "=", "kws", ".", "get", "(", "\"var_name\"", ",", "\"goea_results\"", ")", "docstring", "=", "kws", ".", "get", "(", "\"docstring\"",...
Save GOEA results into Python package containing list of namedtuples.
[ "Save", "GOEA", "results", "into", "Python", "package", "containing", "list", "of", "namedtuples", "." ]
407682e573a108864a79031f8ca19ee3bf377626
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L563-L581
242,564
tryolabs/requestium
requestium/requestium.py
_ensure_click
def _ensure_click(self): """Ensures a click gets made, because Selenium can be a bit buggy about clicks This method gets added to the selenium element returned in '__ensure_element_by_xpath'. We should probably add it to more selenium methods, such as all the 'find**' methods though. I wrote this method out of frustration with chromedriver and its problems with clicking items that need to be scrolled to in order to be clickable. In '__ensure_element_by_xpath' we scroll to the item before returning it, but chrome has some problems if it doesn't get some time to scroll to the item. This method ensures chromes gets enough time to scroll to the item before clicking it. I tried SEVERAL more 'correct' methods to get around this, but none of them worked 100% of the time (waiting for the element to be 'clickable' does not work). """ # We ensure the element is scrolled into the middle of the viewport to ensure that # it is clickable. There are two main ways an element may not be clickable: # - It is outside of the viewport # - It is under a banner or toolbar # This script solves both cases script = ("var viewPortHeight = Math.max(" "document.documentElement.clientHeight, window.innerHeight || 0);" "var elementTop = arguments[0].getBoundingClientRect().top;" "window.scrollBy(0, elementTop-(viewPortHeight/2));") self.parent.execute_script(script, self) # parent = the webdriver for _ in range(10): try: self.click() return except WebDriverException as e: exception_message = str(e) time.sleep(0.2) raise WebDriverException( "Couldn't click item after trying 10 times, got error message: \n{}".format( exception_message ) )
python
def _ensure_click(self): # We ensure the element is scrolled into the middle of the viewport to ensure that # it is clickable. There are two main ways an element may not be clickable: # - It is outside of the viewport # - It is under a banner or toolbar # This script solves both cases script = ("var viewPortHeight = Math.max(" "document.documentElement.clientHeight, window.innerHeight || 0);" "var elementTop = arguments[0].getBoundingClientRect().top;" "window.scrollBy(0, elementTop-(viewPortHeight/2));") self.parent.execute_script(script, self) # parent = the webdriver for _ in range(10): try: self.click() return except WebDriverException as e: exception_message = str(e) time.sleep(0.2) raise WebDriverException( "Couldn't click item after trying 10 times, got error message: \n{}".format( exception_message ) )
[ "def", "_ensure_click", "(", "self", ")", ":", "# We ensure the element is scrolled into the middle of the viewport to ensure that", "# it is clickable. There are two main ways an element may not be clickable:", "# - It is outside of the viewport", "# - It is under a banner or toolbar", "# T...
Ensures a click gets made, because Selenium can be a bit buggy about clicks This method gets added to the selenium element returned in '__ensure_element_by_xpath'. We should probably add it to more selenium methods, such as all the 'find**' methods though. I wrote this method out of frustration with chromedriver and its problems with clicking items that need to be scrolled to in order to be clickable. In '__ensure_element_by_xpath' we scroll to the item before returning it, but chrome has some problems if it doesn't get some time to scroll to the item. This method ensures chromes gets enough time to scroll to the item before clicking it. I tried SEVERAL more 'correct' methods to get around this, but none of them worked 100% of the time (waiting for the element to be 'clickable' does not work).
[ "Ensures", "a", "click", "gets", "made", "because", "Selenium", "can", "be", "a", "bit", "buggy", "about", "clicks" ]
9533932ae688da26f3fb78b97b3c0b05c6f24934
https://github.com/tryolabs/requestium/blob/9533932ae688da26f3fb78b97b3c0b05c6f24934/requestium/requestium.py#L365-L401
242,565
tryolabs/requestium
requestium/requestium.py
Session.transfer_session_cookies_to_driver
def transfer_session_cookies_to_driver(self, domain=None): """Copies the Session's cookies into the webdriver Using the 'domain' parameter we choose the cookies we wish to transfer, we only transfer the cookies which belong to that domain. The domain defaults to our last visited site if not provided. """ if not domain and self._last_requests_url: domain = tldextract.extract(self._last_requests_url).registered_domain elif not domain and not self._last_requests_url: raise Exception('Trying to transfer cookies to selenium without specifying a domain ' 'and without having visited any page in the current session') # Transfer cookies for c in [c for c in self.cookies if domain in c.domain]: self.driver.ensure_add_cookie({'name': c.name, 'value': c.value, 'path': c.path, 'expiry': c.expires, 'domain': c.domain})
python
def transfer_session_cookies_to_driver(self, domain=None): if not domain and self._last_requests_url: domain = tldextract.extract(self._last_requests_url).registered_domain elif not domain and not self._last_requests_url: raise Exception('Trying to transfer cookies to selenium without specifying a domain ' 'and without having visited any page in the current session') # Transfer cookies for c in [c for c in self.cookies if domain in c.domain]: self.driver.ensure_add_cookie({'name': c.name, 'value': c.value, 'path': c.path, 'expiry': c.expires, 'domain': c.domain})
[ "def", "transfer_session_cookies_to_driver", "(", "self", ",", "domain", "=", "None", ")", ":", "if", "not", "domain", "and", "self", ".", "_last_requests_url", ":", "domain", "=", "tldextract", ".", "extract", "(", "self", ".", "_last_requests_url", ")", ".",...
Copies the Session's cookies into the webdriver Using the 'domain' parameter we choose the cookies we wish to transfer, we only transfer the cookies which belong to that domain. The domain defaults to our last visited site if not provided.
[ "Copies", "the", "Session", "s", "cookies", "into", "the", "webdriver" ]
9533932ae688da26f3fb78b97b3c0b05c6f24934
https://github.com/tryolabs/requestium/blob/9533932ae688da26f3fb78b97b3c0b05c6f24934/requestium/requestium.py#L98-L114
242,566
tryolabs/requestium
requestium/requestium.py
Session.copy_user_agent_from_driver
def copy_user_agent_from_driver(self): """ Updates requests' session user-agent with the driver's user agent This method will start the browser process if its not already running. """ selenium_user_agent = self.driver.execute_script("return navigator.userAgent;") self.headers.update({"user-agent": selenium_user_agent})
python
def copy_user_agent_from_driver(self): selenium_user_agent = self.driver.execute_script("return navigator.userAgent;") self.headers.update({"user-agent": selenium_user_agent})
[ "def", "copy_user_agent_from_driver", "(", "self", ")", ":", "selenium_user_agent", "=", "self", ".", "driver", ".", "execute_script", "(", "\"return navigator.userAgent;\"", ")", "self", ".", "headers", ".", "update", "(", "{", "\"user-agent\"", ":", "selenium_user...
Updates requests' session user-agent with the driver's user agent This method will start the browser process if its not already running.
[ "Updates", "requests", "session", "user", "-", "agent", "with", "the", "driver", "s", "user", "agent" ]
9533932ae688da26f3fb78b97b3c0b05c6f24934
https://github.com/tryolabs/requestium/blob/9533932ae688da26f3fb78b97b3c0b05c6f24934/requestium/requestium.py#L138-L144
242,567
tryolabs/requestium
requestium/requestium.py
DriverMixin.ensure_add_cookie
def ensure_add_cookie(self, cookie, override_domain=None): """Ensures a cookie gets added to the driver Selenium needs the driver to be currently at the domain of the cookie before allowing you to add it, so we need to get through this limitation. The cookie parameter is a dict which must contain the keys (name, value, domain) and may contain the keys (path, expiry). We first check that we aren't currently in the cookie's domain, if we aren't, we GET the cookie's domain and then add the cookies to the driver. We can override the cookie's domain using 'override_domain'. The use for this parameter is that sometimes GETting the cookie's domain redirects you to a different sub domain, and therefore adding the cookie fails. So sometimes the user may need to override the cookie's domain to a less strict one, Eg.: 'site.com' instead of 'home.site.com', in this way even if the site redirects us to a subdomain, the cookie will stick. If you set the domain to '', the cookie gets added with whatever domain the browser is currently at (at least in chrome it does), so this ensures the cookie gets added. It also retries adding the cookie with a more permissive domain if it fails in the first try, and raises an exception if that fails. The standard selenium behaviour in this case was to not do anything, which was very hard to debug. """ if override_domain: cookie['domain'] = override_domain cookie_domain = cookie['domain'] if cookie['domain'][0] != '.' else cookie['domain'][1:] try: browser_domain = tldextract.extract(self.current_url).fqdn except AttributeError: browser_domain = '' if cookie_domain not in browser_domain: # TODO Check if hardcoding 'http' causes trouble # TODO Consider using a new proxy for this next request to not cause an anomalous # request. This way their server sees our ip address as continuously having the # same cookies and not have a request mid-session with no cookies self.get('http://' + cookie_domain) # Fixes phantomjs bug, all domains must start with a period if self.name == "phantomjs": cookie['domain'] = '.' + cookie['domain'] self.add_cookie(cookie) # If we fail adding the cookie, retry with a more permissive domain if not self.is_cookie_in_driver(cookie): cookie['domain'] = tldextract.extract(cookie['domain']).registered_domain self.add_cookie(cookie) if not self.is_cookie_in_driver(cookie): raise WebDriverException( "Couldn't add the following cookie to the webdriver\n{}\n".format(cookie) )
python
def ensure_add_cookie(self, cookie, override_domain=None): if override_domain: cookie['domain'] = override_domain cookie_domain = cookie['domain'] if cookie['domain'][0] != '.' else cookie['domain'][1:] try: browser_domain = tldextract.extract(self.current_url).fqdn except AttributeError: browser_domain = '' if cookie_domain not in browser_domain: # TODO Check if hardcoding 'http' causes trouble # TODO Consider using a new proxy for this next request to not cause an anomalous # request. This way their server sees our ip address as continuously having the # same cookies and not have a request mid-session with no cookies self.get('http://' + cookie_domain) # Fixes phantomjs bug, all domains must start with a period if self.name == "phantomjs": cookie['domain'] = '.' + cookie['domain'] self.add_cookie(cookie) # If we fail adding the cookie, retry with a more permissive domain if not self.is_cookie_in_driver(cookie): cookie['domain'] = tldextract.extract(cookie['domain']).registered_domain self.add_cookie(cookie) if not self.is_cookie_in_driver(cookie): raise WebDriverException( "Couldn't add the following cookie to the webdriver\n{}\n".format(cookie) )
[ "def", "ensure_add_cookie", "(", "self", ",", "cookie", ",", "override_domain", "=", "None", ")", ":", "if", "override_domain", ":", "cookie", "[", "'domain'", "]", "=", "override_domain", "cookie_domain", "=", "cookie", "[", "'domain'", "]", "if", "cookie", ...
Ensures a cookie gets added to the driver Selenium needs the driver to be currently at the domain of the cookie before allowing you to add it, so we need to get through this limitation. The cookie parameter is a dict which must contain the keys (name, value, domain) and may contain the keys (path, expiry). We first check that we aren't currently in the cookie's domain, if we aren't, we GET the cookie's domain and then add the cookies to the driver. We can override the cookie's domain using 'override_domain'. The use for this parameter is that sometimes GETting the cookie's domain redirects you to a different sub domain, and therefore adding the cookie fails. So sometimes the user may need to override the cookie's domain to a less strict one, Eg.: 'site.com' instead of 'home.site.com', in this way even if the site redirects us to a subdomain, the cookie will stick. If you set the domain to '', the cookie gets added with whatever domain the browser is currently at (at least in chrome it does), so this ensures the cookie gets added. It also retries adding the cookie with a more permissive domain if it fails in the first try, and raises an exception if that fails. The standard selenium behaviour in this case was to not do anything, which was very hard to debug.
[ "Ensures", "a", "cookie", "gets", "added", "to", "the", "driver" ]
9533932ae688da26f3fb78b97b3c0b05c6f24934
https://github.com/tryolabs/requestium/blob/9533932ae688da26f3fb78b97b3c0b05c6f24934/requestium/requestium.py#L194-L244
242,568
tryolabs/requestium
requestium/requestium.py
DriverMixin.is_cookie_in_driver
def is_cookie_in_driver(self, cookie): """We check that the cookie is correctly added to the driver We only compare name, value and domain, as the rest can produce false negatives. We are a bit lenient when comparing domains. """ for driver_cookie in self.get_cookies(): if (cookie['name'] == driver_cookie['name'] and cookie['value'] == driver_cookie['value'] and (cookie['domain'] == driver_cookie['domain'] or '.' + cookie['domain'] == driver_cookie['domain'])): return True return False
python
def is_cookie_in_driver(self, cookie): for driver_cookie in self.get_cookies(): if (cookie['name'] == driver_cookie['name'] and cookie['value'] == driver_cookie['value'] and (cookie['domain'] == driver_cookie['domain'] or '.' + cookie['domain'] == driver_cookie['domain'])): return True return False
[ "def", "is_cookie_in_driver", "(", "self", ",", "cookie", ")", ":", "for", "driver_cookie", "in", "self", ".", "get_cookies", "(", ")", ":", "if", "(", "cookie", "[", "'name'", "]", "==", "driver_cookie", "[", "'name'", "]", "and", "cookie", "[", "'value...
We check that the cookie is correctly added to the driver We only compare name, value and domain, as the rest can produce false negatives. We are a bit lenient when comparing domains.
[ "We", "check", "that", "the", "cookie", "is", "correctly", "added", "to", "the", "driver" ]
9533932ae688da26f3fb78b97b3c0b05c6f24934
https://github.com/tryolabs/requestium/blob/9533932ae688da26f3fb78b97b3c0b05c6f24934/requestium/requestium.py#L246-L258
242,569
tryolabs/requestium
requestium/requestium.py
DriverMixin.ensure_element
def ensure_element(self, locator, selector, state="present", timeout=None): """This method allows us to wait till an element appears or disappears in the browser The webdriver runs in parallel with our scripts, so we must wait for it everytime it runs javascript. Selenium automatically waits till a page loads when GETing it, but it doesn't do this when it runs javascript and makes AJAX requests. So we must explicitly wait in that case. The 'locator' argument defines what strategy we use to search for the element. The 'state' argument allows us to chose between waiting for the element to be visible, clickable, present, or invisible. Presence is more inclusive, but sometimes we want to know if the element is visible. Careful, its not always intuitive what Selenium considers to be a visible element. We can also wait for it to be clickable, although this method is a bit buggy in selenium, an element can be 'clickable' according to selenium and still fail when we try to click it. More info at: http://selenium-python.readthedocs.io/waits.html """ locators = {'id': By.ID, 'name': By.NAME, 'xpath': By.XPATH, 'link_text': By.LINK_TEXT, 'partial_link_text': By.PARTIAL_LINK_TEXT, 'tag_name': By.TAG_NAME, 'class_name': By.CLASS_NAME, 'css_selector': By.CSS_SELECTOR} locator = locators[locator] if not timeout: timeout = self.default_timeout if state == 'visible': element = WebDriverWait(self, timeout).until( EC.visibility_of_element_located((locator, selector)) ) elif state == 'clickable': element = WebDriverWait(self, timeout).until( EC.element_to_be_clickable((locator, selector)) ) elif state == 'present': element = WebDriverWait(self, timeout).until( EC.presence_of_element_located((locator, selector)) ) elif state == 'invisible': WebDriverWait(self, timeout).until( EC.invisibility_of_element_located((locator, selector)) ) element = None else: raise ValueError( "The 'state' argument must be 'visible', 'clickable', 'present' " "or 'invisible', not '{}'".format(state) ) # We add this method to our element to provide a more robust click. Chromedriver # sometimes needs some time before it can click an item, specially if it needs to # scroll into it first. This method ensures clicks don't fail because of this. if element: element.ensure_click = partial(_ensure_click, element) return element
python
def ensure_element(self, locator, selector, state="present", timeout=None): locators = {'id': By.ID, 'name': By.NAME, 'xpath': By.XPATH, 'link_text': By.LINK_TEXT, 'partial_link_text': By.PARTIAL_LINK_TEXT, 'tag_name': By.TAG_NAME, 'class_name': By.CLASS_NAME, 'css_selector': By.CSS_SELECTOR} locator = locators[locator] if not timeout: timeout = self.default_timeout if state == 'visible': element = WebDriverWait(self, timeout).until( EC.visibility_of_element_located((locator, selector)) ) elif state == 'clickable': element = WebDriverWait(self, timeout).until( EC.element_to_be_clickable((locator, selector)) ) elif state == 'present': element = WebDriverWait(self, timeout).until( EC.presence_of_element_located((locator, selector)) ) elif state == 'invisible': WebDriverWait(self, timeout).until( EC.invisibility_of_element_located((locator, selector)) ) element = None else: raise ValueError( "The 'state' argument must be 'visible', 'clickable', 'present' " "or 'invisible', not '{}'".format(state) ) # We add this method to our element to provide a more robust click. Chromedriver # sometimes needs some time before it can click an item, specially if it needs to # scroll into it first. This method ensures clicks don't fail because of this. if element: element.ensure_click = partial(_ensure_click, element) return element
[ "def", "ensure_element", "(", "self", ",", "locator", ",", "selector", ",", "state", "=", "\"present\"", ",", "timeout", "=", "None", ")", ":", "locators", "=", "{", "'id'", ":", "By", ".", "ID", ",", "'name'", ":", "By", ".", "NAME", ",", "'xpath'",...
This method allows us to wait till an element appears or disappears in the browser The webdriver runs in parallel with our scripts, so we must wait for it everytime it runs javascript. Selenium automatically waits till a page loads when GETing it, but it doesn't do this when it runs javascript and makes AJAX requests. So we must explicitly wait in that case. The 'locator' argument defines what strategy we use to search for the element. The 'state' argument allows us to chose between waiting for the element to be visible, clickable, present, or invisible. Presence is more inclusive, but sometimes we want to know if the element is visible. Careful, its not always intuitive what Selenium considers to be a visible element. We can also wait for it to be clickable, although this method is a bit buggy in selenium, an element can be 'clickable' according to selenium and still fail when we try to click it. More info at: http://selenium-python.readthedocs.io/waits.html
[ "This", "method", "allows", "us", "to", "wait", "till", "an", "element", "appears", "or", "disappears", "in", "the", "browser" ]
9533932ae688da26f3fb78b97b3c0b05c6f24934
https://github.com/tryolabs/requestium/blob/9533932ae688da26f3fb78b97b3c0b05c6f24934/requestium/requestium.py#L284-L342
242,570
Belval/pdf2image
pdf2image/parsers.py
parse_buffer_to_ppm
def parse_buffer_to_ppm(data): """ Parse PPM file bytes to Pillow Image """ images = [] index = 0 while index < len(data): code, size, rgb = tuple(data[index:index + 40].split(b'\n')[0:3]) size_x, size_y = tuple(size.split(b' ')) file_size = len(code) + len(size) + len(rgb) + 3 + int(size_x) * int(size_y) * 3 images.append(Image.open(BytesIO(data[index:index + file_size]))) index += file_size return images
python
def parse_buffer_to_ppm(data): images = [] index = 0 while index < len(data): code, size, rgb = tuple(data[index:index + 40].split(b'\n')[0:3]) size_x, size_y = tuple(size.split(b' ')) file_size = len(code) + len(size) + len(rgb) + 3 + int(size_x) * int(size_y) * 3 images.append(Image.open(BytesIO(data[index:index + file_size]))) index += file_size return images
[ "def", "parse_buffer_to_ppm", "(", "data", ")", ":", "images", "=", "[", "]", "index", "=", "0", "while", "index", "<", "len", "(", "data", ")", ":", "code", ",", "size", ",", "rgb", "=", "tuple", "(", "data", "[", "index", ":", "index", "+", "40...
Parse PPM file bytes to Pillow Image
[ "Parse", "PPM", "file", "bytes", "to", "Pillow", "Image" ]
48ea7ac36ad67e1f9b06593b67d7cdf2c337505c
https://github.com/Belval/pdf2image/blob/48ea7ac36ad67e1f9b06593b67d7cdf2c337505c/pdf2image/parsers.py#L9-L25
242,571
Belval/pdf2image
pdf2image/parsers.py
parse_buffer_to_jpeg
def parse_buffer_to_jpeg(data): """ Parse JPEG file bytes to Pillow Image """ return [ Image.open(BytesIO(image_data + b'\xff\xd9')) for image_data in data.split(b'\xff\xd9')[:-1] # Last element is obviously empty ]
python
def parse_buffer_to_jpeg(data): return [ Image.open(BytesIO(image_data + b'\xff\xd9')) for image_data in data.split(b'\xff\xd9')[:-1] # Last element is obviously empty ]
[ "def", "parse_buffer_to_jpeg", "(", "data", ")", ":", "return", "[", "Image", ".", "open", "(", "BytesIO", "(", "image_data", "+", "b'\\xff\\xd9'", ")", ")", "for", "image_data", "in", "data", ".", "split", "(", "b'\\xff\\xd9'", ")", "[", ":", "-", "1", ...
Parse JPEG file bytes to Pillow Image
[ "Parse", "JPEG", "file", "bytes", "to", "Pillow", "Image" ]
48ea7ac36ad67e1f9b06593b67d7cdf2c337505c
https://github.com/Belval/pdf2image/blob/48ea7ac36ad67e1f9b06593b67d7cdf2c337505c/pdf2image/parsers.py#L27-L35
242,572
Belval/pdf2image
pdf2image/parsers.py
parse_buffer_to_png
def parse_buffer_to_png(data): """ Parse PNG file bytes to Pillow Image """ images = [] c1 = 0 c2 = 0 data_len = len(data) while c1 < data_len: # IEND can appear in a PNG without being the actual end if data[c2:c2 + 4] == b'IEND' and (c2 + 8 == data_len or data[c2+9:c2+12] == b'PNG'): images.append(Image.open(BytesIO(data[c1:c2 + 8]))) c1 = c2 + 8 c2 = c1 c2 += 1 return images
python
def parse_buffer_to_png(data): images = [] c1 = 0 c2 = 0 data_len = len(data) while c1 < data_len: # IEND can appear in a PNG without being the actual end if data[c2:c2 + 4] == b'IEND' and (c2 + 8 == data_len or data[c2+9:c2+12] == b'PNG'): images.append(Image.open(BytesIO(data[c1:c2 + 8]))) c1 = c2 + 8 c2 = c1 c2 += 1 return images
[ "def", "parse_buffer_to_png", "(", "data", ")", ":", "images", "=", "[", "]", "c1", "=", "0", "c2", "=", "0", "data_len", "=", "len", "(", "data", ")", "while", "c1", "<", "data_len", ":", "# IEND can appear in a PNG without being the actual end", "if", "dat...
Parse PNG file bytes to Pillow Image
[ "Parse", "PNG", "file", "bytes", "to", "Pillow", "Image" ]
48ea7ac36ad67e1f9b06593b67d7cdf2c337505c
https://github.com/Belval/pdf2image/blob/48ea7ac36ad67e1f9b06593b67d7cdf2c337505c/pdf2image/parsers.py#L37-L55
242,573
wal-e/wal-e
wal_e/log_help.py
configure
def configure(*args, **kwargs): """ Configure logging. Borrowed from logging.basicConfig Uses the IndentFormatter instead of the regular Formatter Also, opts the caller into Syslog output, unless syslog could not be opened for some reason or another, in which case a warning will be printed to the other log handlers. """ # Configuration must only happen once: no mechanism for avoiding # duplication of handlers exists. assert len(HANDLERS) == 0 log_destinations = get_log_destinations() if 'stderr' in log_destinations: # Add stderr output. HANDLERS.append(logging.StreamHandler()) def terrible_log_output(s): import sys print(s, file=sys.stderr) places = [ # Linux '/dev/log', # FreeBSD '/var/run/log', # Macintosh '/var/run/syslog', ] default_syslog_address = places[0] for p in places: if path.exists(p): default_syslog_address = p break syslog_address = kwargs.setdefault('syslog_address', default_syslog_address) valid_facility = False if 'syslog' in log_destinations: facility, valid_facility = get_syslog_facility() if not valid_facility: terrible_log_output('invalid syslog facility level specified') try: # Add syslog output. HANDLERS.append(handlers.SysLogHandler(syslog_address, facility=facility)) except EnvironmentError as e: if e.errno in [errno.EACCES, errno.ECONNREFUSED]: message = ('wal-e: Could not set up syslog, ' 'continuing anyway. ' 'Reason: {0}').format(errno.errorcode[e.errno]) terrible_log_output(message) fs = kwargs.get("format", logging.BASIC_FORMAT) dfs = kwargs.get("datefmt", None) fmt = IndentFormatter(fs, dfs) for handler in HANDLERS: handler.setFormatter(fmt) logging.root.addHandler(handler) # Default to INFO level logging. set_level(kwargs.get('level', logging.INFO))
python
def configure(*args, **kwargs): # Configuration must only happen once: no mechanism for avoiding # duplication of handlers exists. assert len(HANDLERS) == 0 log_destinations = get_log_destinations() if 'stderr' in log_destinations: # Add stderr output. HANDLERS.append(logging.StreamHandler()) def terrible_log_output(s): import sys print(s, file=sys.stderr) places = [ # Linux '/dev/log', # FreeBSD '/var/run/log', # Macintosh '/var/run/syslog', ] default_syslog_address = places[0] for p in places: if path.exists(p): default_syslog_address = p break syslog_address = kwargs.setdefault('syslog_address', default_syslog_address) valid_facility = False if 'syslog' in log_destinations: facility, valid_facility = get_syslog_facility() if not valid_facility: terrible_log_output('invalid syslog facility level specified') try: # Add syslog output. HANDLERS.append(handlers.SysLogHandler(syslog_address, facility=facility)) except EnvironmentError as e: if e.errno in [errno.EACCES, errno.ECONNREFUSED]: message = ('wal-e: Could not set up syslog, ' 'continuing anyway. ' 'Reason: {0}').format(errno.errorcode[e.errno]) terrible_log_output(message) fs = kwargs.get("format", logging.BASIC_FORMAT) dfs = kwargs.get("datefmt", None) fmt = IndentFormatter(fs, dfs) for handler in HANDLERS: handler.setFormatter(fmt) logging.root.addHandler(handler) # Default to INFO level logging. set_level(kwargs.get('level', logging.INFO))
[ "def", "configure", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Configuration must only happen once: no mechanism for avoiding", "# duplication of handlers exists.", "assert", "len", "(", "HANDLERS", ")", "==", "0", "log_destinations", "=", "get_log_destinatio...
Configure logging. Borrowed from logging.basicConfig Uses the IndentFormatter instead of the regular Formatter Also, opts the caller into Syslog output, unless syslog could not be opened for some reason or another, in which case a warning will be printed to the other log handlers.
[ "Configure", "logging", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/log_help.py#L32-L108
242,574
wal-e/wal-e
wal_e/log_help.py
get_syslog_facility
def get_syslog_facility(): """Get syslog facility from ENV var""" facil = os.getenv('WALE_SYSLOG_FACILITY', 'user') valid_facility = True try: facility = handlers.SysLogHandler.facility_names[facil.lower()] except KeyError: valid_facility = False facility = handlers.SysLogHandler.LOG_USER return facility, valid_facility
python
def get_syslog_facility(): facil = os.getenv('WALE_SYSLOG_FACILITY', 'user') valid_facility = True try: facility = handlers.SysLogHandler.facility_names[facil.lower()] except KeyError: valid_facility = False facility = handlers.SysLogHandler.LOG_USER return facility, valid_facility
[ "def", "get_syslog_facility", "(", ")", ":", "facil", "=", "os", ".", "getenv", "(", "'WALE_SYSLOG_FACILITY'", ",", "'user'", ")", "valid_facility", "=", "True", "try", ":", "facility", "=", "handlers", ".", "SysLogHandler", ".", "facility_names", "[", "facil"...
Get syslog facility from ENV var
[ "Get", "syslog", "facility", "from", "ENV", "var" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/log_help.py#L118-L129
242,575
wal-e/wal-e
wal_e/log_help.py
set_level
def set_level(level): """Adjust the logging level of WAL-E""" for handler in HANDLERS: handler.setLevel(level) logging.root.setLevel(level)
python
def set_level(level): for handler in HANDLERS: handler.setLevel(level) logging.root.setLevel(level)
[ "def", "set_level", "(", "level", ")", ":", "for", "handler", "in", "HANDLERS", ":", "handler", ".", "setLevel", "(", "level", ")", "logging", ".", "root", ".", "setLevel", "(", "level", ")" ]
Adjust the logging level of WAL-E
[ "Adjust", "the", "logging", "level", "of", "WAL", "-", "E" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/log_help.py#L132-L137
242,576
wal-e/wal-e
wal_e/log_help.py
IndentFormatter.format
def format(self, record, *args, **kwargs): """ Format a message in the log Act like the normal format, but indent anything that is a newline within the message. """ return logging.Formatter.format( self, record, *args, **kwargs).replace('\n', '\n' + ' ' * 8)
python
def format(self, record, *args, **kwargs): return logging.Formatter.format( self, record, *args, **kwargs).replace('\n', '\n' + ' ' * 8)
[ "def", "format", "(", "self", ",", "record", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "logging", ".", "Formatter", ".", "format", "(", "self", ",", "record", ",", "*", "args", ",", "*", "*", "kwargs", ")", ".", "replace", "(...
Format a message in the log Act like the normal format, but indent anything that is a newline within the message.
[ "Format", "a", "message", "in", "the", "log" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/log_help.py#L20-L29
242,577
wal-e/wal-e
wal_e/tar_partition.py
_segmentation_guts
def _segmentation_guts(root, file_paths, max_partition_size): """Segment a series of file paths into TarPartition values These TarPartitions are disjoint and roughly below the prescribed size. """ # Canonicalize root to include the trailing slash, since root is # intended to be a directory anyway. if not root.endswith(os.path.sep): root += os.path.sep # Ensure that the root path is a directory before continuing. if not os.path.isdir(root): raise TarBadRootError(root=root) bogus_tar = None try: # Create a bogus TarFile as a contrivance to be able to run # gettarinfo and produce such instances. Some of the settings # on the TarFile are important, like whether to de-reference # symlinks. bogus_tar = tarfile.TarFile(os.devnull, 'w', dereference=False) # Bookkeeping for segmentation of tar members into partitions. partition_number = 0 partition_bytes = 0 partition_members = 0 partition = TarPartition(partition_number) for file_path in file_paths: # Ensure tar members exist within a shared root before # continuing. if not file_path.startswith(root): raise TarBadPathError(root=root, offensive_path=file_path) # Create an ExtendedTarInfo to represent the tarfile. try: et_info = ExtendedTarInfo( tarinfo=bogus_tar.gettarinfo( file_path, arcname=file_path[len(root):]), submitted_path=file_path) except EnvironmentError as e: if (e.errno == errno.ENOENT and e.filename == file_path): # log a NOTICE/INFO that the file was unlinked. # Ostensibly harmless (such unlinks should be replayed # in the WAL) but good to know. logger.debug( msg='tar member additions skipping an unlinked file', detail='Skipping {0}.'.format(et_info.submitted_path)) continue else: raise # Ensure tar members are within an expected size before # continuing. if et_info.tarinfo.size > max_partition_size: raise TarMemberTooBigError( et_info.tarinfo.name, max_partition_size, et_info.tarinfo.size) if (partition_bytes + et_info.tarinfo.size >= max_partition_size or partition_members >= PARTITION_MAX_MEMBERS): # Partition is full and cannot accept another member, # so yield the complete one to the caller. yield partition # Prepare a fresh partition to accrue additional file # paths into. partition_number += 1 partition_bytes = et_info.tarinfo.size partition_members = 1 partition = TarPartition( partition_number, [et_info]) else: # Partition is able to accept this member, so just add # it and increment the size counters. partition_bytes += et_info.tarinfo.size partition_members += 1 partition.append(et_info) # Partition size overflow must not to be possible # here. assert partition_bytes < max_partition_size finally: if bogus_tar is not None: bogus_tar.close() # Flush out the final partition should it be non-empty. if partition: yield partition
python
def _segmentation_guts(root, file_paths, max_partition_size): # Canonicalize root to include the trailing slash, since root is # intended to be a directory anyway. if not root.endswith(os.path.sep): root += os.path.sep # Ensure that the root path is a directory before continuing. if not os.path.isdir(root): raise TarBadRootError(root=root) bogus_tar = None try: # Create a bogus TarFile as a contrivance to be able to run # gettarinfo and produce such instances. Some of the settings # on the TarFile are important, like whether to de-reference # symlinks. bogus_tar = tarfile.TarFile(os.devnull, 'w', dereference=False) # Bookkeeping for segmentation of tar members into partitions. partition_number = 0 partition_bytes = 0 partition_members = 0 partition = TarPartition(partition_number) for file_path in file_paths: # Ensure tar members exist within a shared root before # continuing. if not file_path.startswith(root): raise TarBadPathError(root=root, offensive_path=file_path) # Create an ExtendedTarInfo to represent the tarfile. try: et_info = ExtendedTarInfo( tarinfo=bogus_tar.gettarinfo( file_path, arcname=file_path[len(root):]), submitted_path=file_path) except EnvironmentError as e: if (e.errno == errno.ENOENT and e.filename == file_path): # log a NOTICE/INFO that the file was unlinked. # Ostensibly harmless (such unlinks should be replayed # in the WAL) but good to know. logger.debug( msg='tar member additions skipping an unlinked file', detail='Skipping {0}.'.format(et_info.submitted_path)) continue else: raise # Ensure tar members are within an expected size before # continuing. if et_info.tarinfo.size > max_partition_size: raise TarMemberTooBigError( et_info.tarinfo.name, max_partition_size, et_info.tarinfo.size) if (partition_bytes + et_info.tarinfo.size >= max_partition_size or partition_members >= PARTITION_MAX_MEMBERS): # Partition is full and cannot accept another member, # so yield the complete one to the caller. yield partition # Prepare a fresh partition to accrue additional file # paths into. partition_number += 1 partition_bytes = et_info.tarinfo.size partition_members = 1 partition = TarPartition( partition_number, [et_info]) else: # Partition is able to accept this member, so just add # it and increment the size counters. partition_bytes += et_info.tarinfo.size partition_members += 1 partition.append(et_info) # Partition size overflow must not to be possible # here. assert partition_bytes < max_partition_size finally: if bogus_tar is not None: bogus_tar.close() # Flush out the final partition should it be non-empty. if partition: yield partition
[ "def", "_segmentation_guts", "(", "root", ",", "file_paths", ",", "max_partition_size", ")", ":", "# Canonicalize root to include the trailing slash, since root is", "# intended to be a directory anyway.", "if", "not", "root", ".", "endswith", "(", "os", ".", "path", ".", ...
Segment a series of file paths into TarPartition values These TarPartitions are disjoint and roughly below the prescribed size.
[ "Segment", "a", "series", "of", "file", "paths", "into", "TarPartition", "values" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/tar_partition.py#L351-L444
242,578
wal-e/wal-e
wal_e/tar_partition.py
TarPartition.tarfile_extract
def tarfile_extract(fileobj, dest_path): """Extract a tarfile described by a file object to a specified path. Args: fileobj (file): File object wrapping the target tarfile. dest_path (str): Path to extract the contents of the tarfile to. """ # Though this method doesn't fit cleanly into the TarPartition object, # tarballs are only ever extracted for partitions so the logic jives # for the most part. tar = tarfile.open(mode='r|', fileobj=fileobj, bufsize=pipebuf.PIPE_BUF_BYTES) # canonicalize dest_path so the prefix check below works dest_path = os.path.realpath(dest_path) # list of files that need fsyncing extracted_files = [] # Iterate through each member of the tarfile individually. We must # approach it this way because we are dealing with a pipe and the # getmembers() method will consume it before we extract any data. for member in tar: assert not member.name.startswith('/') relpath = os.path.join(dest_path, member.name) # Workaround issue with tar handling of symlink, see: # https://bugs.python.org/issue12800 if member.issym(): target_path = os.path.join(dest_path, member.name) try: os.symlink(member.linkname, target_path) except OSError as e: if e.errno == errno.EEXIST: os.remove(target_path) os.symlink(member.linkname, target_path) else: raise continue if member.isreg() and member.size >= pipebuf.PIPE_BUF_BYTES: cat_extract(tar, member, relpath) else: tar.extract(member, path=dest_path) filename = os.path.realpath(relpath) extracted_files.append(filename) # avoid accumulating an unbounded list of strings which # could be quite large for a large database if len(extracted_files) > 1000: _fsync_files(extracted_files) del extracted_files[:] tar.close() _fsync_files(extracted_files)
python
def tarfile_extract(fileobj, dest_path): # Though this method doesn't fit cleanly into the TarPartition object, # tarballs are only ever extracted for partitions so the logic jives # for the most part. tar = tarfile.open(mode='r|', fileobj=fileobj, bufsize=pipebuf.PIPE_BUF_BYTES) # canonicalize dest_path so the prefix check below works dest_path = os.path.realpath(dest_path) # list of files that need fsyncing extracted_files = [] # Iterate through each member of the tarfile individually. We must # approach it this way because we are dealing with a pipe and the # getmembers() method will consume it before we extract any data. for member in tar: assert not member.name.startswith('/') relpath = os.path.join(dest_path, member.name) # Workaround issue with tar handling of symlink, see: # https://bugs.python.org/issue12800 if member.issym(): target_path = os.path.join(dest_path, member.name) try: os.symlink(member.linkname, target_path) except OSError as e: if e.errno == errno.EEXIST: os.remove(target_path) os.symlink(member.linkname, target_path) else: raise continue if member.isreg() and member.size >= pipebuf.PIPE_BUF_BYTES: cat_extract(tar, member, relpath) else: tar.extract(member, path=dest_path) filename = os.path.realpath(relpath) extracted_files.append(filename) # avoid accumulating an unbounded list of strings which # could be quite large for a large database if len(extracted_files) > 1000: _fsync_files(extracted_files) del extracted_files[:] tar.close() _fsync_files(extracted_files)
[ "def", "tarfile_extract", "(", "fileobj", ",", "dest_path", ")", ":", "# Though this method doesn't fit cleanly into the TarPartition object,", "# tarballs are only ever extracted for partitions so the logic jives", "# for the most part.", "tar", "=", "tarfile", ".", "open", "(", "...
Extract a tarfile described by a file object to a specified path. Args: fileobj (file): File object wrapping the target tarfile. dest_path (str): Path to extract the contents of the tarfile to.
[ "Extract", "a", "tarfile", "described", "by", "a", "file", "object", "to", "a", "specified", "path", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/tar_partition.py#L258-L312
242,579
wal-e/wal-e
wal_e/operator/backup.py
Backup.backup_list
def backup_list(self, query, detail): """ Lists base backups and basic information about them """ import csv from wal_e.storage.base import BackupInfo bl = self._backup_list(detail) # If there is no query, return an exhaustive list, otherwise # find a backup instead. if query is None: bl_iter = bl else: bl_iter = bl.find_all(query) # TODO: support switchable formats for difference needs. w_csv = csv.writer(sys.stdout, dialect='excel-tab') w_csv.writerow(BackupInfo._fields) for bi in bl_iter: w_csv.writerow([getattr(bi, k) for k in BackupInfo._fields]) sys.stdout.flush()
python
def backup_list(self, query, detail): import csv from wal_e.storage.base import BackupInfo bl = self._backup_list(detail) # If there is no query, return an exhaustive list, otherwise # find a backup instead. if query is None: bl_iter = bl else: bl_iter = bl.find_all(query) # TODO: support switchable formats for difference needs. w_csv = csv.writer(sys.stdout, dialect='excel-tab') w_csv.writerow(BackupInfo._fields) for bi in bl_iter: w_csv.writerow([getattr(bi, k) for k in BackupInfo._fields]) sys.stdout.flush()
[ "def", "backup_list", "(", "self", ",", "query", ",", "detail", ")", ":", "import", "csv", "from", "wal_e", ".", "storage", ".", "base", "import", "BackupInfo", "bl", "=", "self", ".", "_backup_list", "(", "detail", ")", "# If there is no query, return an exha...
Lists base backups and basic information about them
[ "Lists", "base", "backups", "and", "basic", "information", "about", "them" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/operator/backup.py#L46-L69
242,580
wal-e/wal-e
wal_e/operator/backup.py
Backup.database_backup
def database_backup(self, data_directory, *args, **kwargs): """Uploads a PostgreSQL file cluster to S3 or Windows Azure Blob Service Mechanism: just wraps _upload_pg_cluster_dir with start/stop backup actions with exception handling. In particular there is a 'finally' block to stop the backup in most situations. """ upload_good = False backup_stop_good = False while_offline = False start_backup_info = None if 'while_offline' in kwargs: while_offline = kwargs.pop('while_offline') try: if not while_offline: start_backup_info = PgBackupStatements.run_start_backup() version = PgBackupStatements.pg_version()['version'] else: if os.path.exists(os.path.join(data_directory, 'postmaster.pid')): hint = ('Shut down postgres. ' 'If there is a stale lockfile, ' 'then remove it after being very sure postgres ' 'is not running.') raise UserException( msg='while_offline set, but pg looks to be running', detail='Found a postmaster.pid lockfile, and aborting', hint=hint) ctrl_data = PgControlDataParser(data_directory) start_backup_info = ctrl_data.last_xlog_file_name_and_offset() version = ctrl_data.pg_version() ret_tuple = self._upload_pg_cluster_dir( start_backup_info, data_directory, version=version, *args, **kwargs) spec, uploaded_to, expanded_size_bytes = ret_tuple upload_good = True finally: if not upload_good: logger.warning( 'blocking on sending WAL segments', detail=('The backup was not completed successfully, ' 'but we have to wait anyway. ' 'See README: TODO about pg_cancel_backup')) if not while_offline: stop_backup_info = PgBackupStatements.run_stop_backup() else: stop_backup_info = start_backup_info backup_stop_good = True # XXX: Ugly, this is more of a 'worker' task because it might # involve retries and error messages, something that is not # treated by the "operator" category of modules. So # basically, if this small upload fails, the whole upload # fails! if upload_good and backup_stop_good: # Try to write a sentinel file to the cluster backup # directory that indicates that the base backup upload has # definitely run its course and also communicates what WAL # segments are needed to get to consistency. sentinel_content = json.dumps( {'wal_segment_backup_stop': stop_backup_info['file_name'], 'wal_segment_offset_backup_stop': stop_backup_info['file_offset'], 'expanded_size_bytes': expanded_size_bytes, 'spec': spec}) # XXX: should use the storage operators. # # XXX: distinguish sentinels by *PREFIX* not suffix, # which makes searching harder. (For the next version # bump). uri_put_file(self.creds, uploaded_to + '_backup_stop_sentinel.json', BytesIO(sentinel_content.encode("utf8")), content_type='application/json') else: # NB: Other exceptions should be raised before this that # have more informative results, it is intended that this # exception never will get raised. raise UserCritical('could not complete backup process')
python
def database_backup(self, data_directory, *args, **kwargs): upload_good = False backup_stop_good = False while_offline = False start_backup_info = None if 'while_offline' in kwargs: while_offline = kwargs.pop('while_offline') try: if not while_offline: start_backup_info = PgBackupStatements.run_start_backup() version = PgBackupStatements.pg_version()['version'] else: if os.path.exists(os.path.join(data_directory, 'postmaster.pid')): hint = ('Shut down postgres. ' 'If there is a stale lockfile, ' 'then remove it after being very sure postgres ' 'is not running.') raise UserException( msg='while_offline set, but pg looks to be running', detail='Found a postmaster.pid lockfile, and aborting', hint=hint) ctrl_data = PgControlDataParser(data_directory) start_backup_info = ctrl_data.last_xlog_file_name_and_offset() version = ctrl_data.pg_version() ret_tuple = self._upload_pg_cluster_dir( start_backup_info, data_directory, version=version, *args, **kwargs) spec, uploaded_to, expanded_size_bytes = ret_tuple upload_good = True finally: if not upload_good: logger.warning( 'blocking on sending WAL segments', detail=('The backup was not completed successfully, ' 'but we have to wait anyway. ' 'See README: TODO about pg_cancel_backup')) if not while_offline: stop_backup_info = PgBackupStatements.run_stop_backup() else: stop_backup_info = start_backup_info backup_stop_good = True # XXX: Ugly, this is more of a 'worker' task because it might # involve retries and error messages, something that is not # treated by the "operator" category of modules. So # basically, if this small upload fails, the whole upload # fails! if upload_good and backup_stop_good: # Try to write a sentinel file to the cluster backup # directory that indicates that the base backup upload has # definitely run its course and also communicates what WAL # segments are needed to get to consistency. sentinel_content = json.dumps( {'wal_segment_backup_stop': stop_backup_info['file_name'], 'wal_segment_offset_backup_stop': stop_backup_info['file_offset'], 'expanded_size_bytes': expanded_size_bytes, 'spec': spec}) # XXX: should use the storage operators. # # XXX: distinguish sentinels by *PREFIX* not suffix, # which makes searching harder. (For the next version # bump). uri_put_file(self.creds, uploaded_to + '_backup_stop_sentinel.json', BytesIO(sentinel_content.encode("utf8")), content_type='application/json') else: # NB: Other exceptions should be raised before this that # have more informative results, it is intended that this # exception never will get raised. raise UserCritical('could not complete backup process')
[ "def", "database_backup", "(", "self", ",", "data_directory", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "upload_good", "=", "False", "backup_stop_good", "=", "False", "while_offline", "=", "False", "start_backup_info", "=", "None", "if", "'while_off...
Uploads a PostgreSQL file cluster to S3 or Windows Azure Blob Service Mechanism: just wraps _upload_pg_cluster_dir with start/stop backup actions with exception handling. In particular there is a 'finally' block to stop the backup in most situations.
[ "Uploads", "a", "PostgreSQL", "file", "cluster", "to", "S3", "or", "Windows", "Azure", "Blob", "Service" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/operator/backup.py#L158-L246
242,581
wal-e/wal-e
wal_e/operator/backup.py
Backup.wal_archive
def wal_archive(self, wal_path, concurrency=1): """ Uploads a WAL file to S3 or Windows Azure Blob Service This code is intended to typically be called from Postgres's archive_command feature. """ # Upload the segment expressly indicated. It's special # relative to other uploads when parallel wal-push is enabled, # in that it's not desirable to tweak its .ready/.done files # in archive_status. xlog_dir = os.path.dirname(wal_path) segment = WalSegment(wal_path, explicit=True) uploader = WalUploader(self.layout, self.creds, self.gpg_key_id) group = WalTransferGroup(uploader) group.start(segment) # Upload any additional wal segments up to the specified # concurrency by scanning the Postgres archive_status # directory. started = 1 seg_stream = WalSegment.from_ready_archive_status(xlog_dir) while started < concurrency: try: other_segment = next(seg_stream) except StopIteration: break if other_segment.path != wal_path: group.start(other_segment) started += 1 try: # Wait for uploads to finish. group.join() except EnvironmentError as e: if e.errno == errno.ENOENT: print(e) raise UserException( msg='could not find file for wal-push', detail=('The operating system reported: {0} {1}' .format(e.strerror, repr(e.filename)))) raise
python
def wal_archive(self, wal_path, concurrency=1): # Upload the segment expressly indicated. It's special # relative to other uploads when parallel wal-push is enabled, # in that it's not desirable to tweak its .ready/.done files # in archive_status. xlog_dir = os.path.dirname(wal_path) segment = WalSegment(wal_path, explicit=True) uploader = WalUploader(self.layout, self.creds, self.gpg_key_id) group = WalTransferGroup(uploader) group.start(segment) # Upload any additional wal segments up to the specified # concurrency by scanning the Postgres archive_status # directory. started = 1 seg_stream = WalSegment.from_ready_archive_status(xlog_dir) while started < concurrency: try: other_segment = next(seg_stream) except StopIteration: break if other_segment.path != wal_path: group.start(other_segment) started += 1 try: # Wait for uploads to finish. group.join() except EnvironmentError as e: if e.errno == errno.ENOENT: print(e) raise UserException( msg='could not find file for wal-push', detail=('The operating system reported: {0} {1}' .format(e.strerror, repr(e.filename)))) raise
[ "def", "wal_archive", "(", "self", ",", "wal_path", ",", "concurrency", "=", "1", ")", ":", "# Upload the segment expressly indicated. It's special", "# relative to other uploads when parallel wal-push is enabled,", "# in that it's not desirable to tweak its .ready/.done files", "# in...
Uploads a WAL file to S3 or Windows Azure Blob Service This code is intended to typically be called from Postgres's archive_command feature.
[ "Uploads", "a", "WAL", "file", "to", "S3", "or", "Windows", "Azure", "Blob", "Service" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/operator/backup.py#L248-L291
242,582
wal-e/wal-e
wal_e/operator/backup.py
Backup.wal_restore
def wal_restore(self, wal_name, wal_destination, prefetch_max): """ Downloads a WAL file from S3 or Windows Azure Blob Service This code is intended to typically be called from Postgres's restore_command feature. NB: Postgres doesn't guarantee that wal_name == basename(wal_path), so both are required. """ url = '{0}://{1}/{2}'.format( self.layout.scheme, self.layout.store_name(), self.layout.wal_path(wal_name)) if prefetch_max > 0: # Check for prefetch-hit. base = os.path.dirname(os.path.realpath(wal_destination)) pd = prefetch.Dirs(base) seg = WalSegment(wal_name) started = start_prefetches(seg, pd, prefetch_max) last_size = 0 while True: if pd.contains(seg): pd.promote(seg, wal_destination) logger.info( msg='promoted prefetched wal segment', structured={'action': 'wal-fetch', 'key': url, 'seg': wal_name, 'prefix': self.layout.path_prefix}) pd.clear_except(started) return True # If there is a 'running' download, wait a bit for it # to make progress or finish. However, if it doesn't # make progress in some amount of time, assume that # the prefetch process has died and go on with the # in-band downloading code. sz = pd.running_size(seg) if sz <= last_size: break last_size = sz gevent.sleep(0.5) pd.clear_except(started) logger.info( msg='begin wal restore', structured={'action': 'wal-fetch', 'key': url, 'seg': wal_name, 'prefix': self.layout.path_prefix, 'state': 'begin'}) ret = do_lzop_get(self.creds, url, wal_destination, self.gpg_key_id is not None) logger.info( msg='complete wal restore', structured={'action': 'wal-fetch', 'key': url, 'seg': wal_name, 'prefix': self.layout.path_prefix, 'state': 'complete'}) return ret
python
def wal_restore(self, wal_name, wal_destination, prefetch_max): url = '{0}://{1}/{2}'.format( self.layout.scheme, self.layout.store_name(), self.layout.wal_path(wal_name)) if prefetch_max > 0: # Check for prefetch-hit. base = os.path.dirname(os.path.realpath(wal_destination)) pd = prefetch.Dirs(base) seg = WalSegment(wal_name) started = start_prefetches(seg, pd, prefetch_max) last_size = 0 while True: if pd.contains(seg): pd.promote(seg, wal_destination) logger.info( msg='promoted prefetched wal segment', structured={'action': 'wal-fetch', 'key': url, 'seg': wal_name, 'prefix': self.layout.path_prefix}) pd.clear_except(started) return True # If there is a 'running' download, wait a bit for it # to make progress or finish. However, if it doesn't # make progress in some amount of time, assume that # the prefetch process has died and go on with the # in-band downloading code. sz = pd.running_size(seg) if sz <= last_size: break last_size = sz gevent.sleep(0.5) pd.clear_except(started) logger.info( msg='begin wal restore', structured={'action': 'wal-fetch', 'key': url, 'seg': wal_name, 'prefix': self.layout.path_prefix, 'state': 'begin'}) ret = do_lzop_get(self.creds, url, wal_destination, self.gpg_key_id is not None) logger.info( msg='complete wal restore', structured={'action': 'wal-fetch', 'key': url, 'seg': wal_name, 'prefix': self.layout.path_prefix, 'state': 'complete'}) return ret
[ "def", "wal_restore", "(", "self", ",", "wal_name", ",", "wal_destination", ",", "prefetch_max", ")", ":", "url", "=", "'{0}://{1}/{2}'", ".", "format", "(", "self", ".", "layout", ".", "scheme", ",", "self", ".", "layout", ".", "store_name", "(", ")", "...
Downloads a WAL file from S3 or Windows Azure Blob Service This code is intended to typically be called from Postgres's restore_command feature. NB: Postgres doesn't guarantee that wal_name == basename(wal_path), so both are required.
[ "Downloads", "a", "WAL", "file", "from", "S3", "or", "Windows", "Azure", "Blob", "Service" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/operator/backup.py#L293-L363
242,583
wal-e/wal-e
wal_e/operator/backup.py
Backup._upload_pg_cluster_dir
def _upload_pg_cluster_dir(self, start_backup_info, pg_cluster_dir, version, pool_size, rate_limit=None): """ Upload to url_prefix from pg_cluster_dir This function ignores the directory pg_xlog, which contains WAL files and are not generally part of a base backup. Note that this is also lzo compresses the files: thus, the number of pooled processes involves doing a full sequential scan of the uncompressed Postgres heap file that is pipelined into lzo. Once lzo is completely finished (necessary to have access to the file size) the file is sent to S3 or WABS. TODO: Investigate an optimization to decouple the compression and upload steps to make sure that the most efficient possible use of pipelining of network and disk resources occurs. Right now it possible to bounce back and forth between bottlenecking on reading from the database block device and subsequently the S3/WABS sending steps should the processes be at the same stage of the upload pipeline: this can have a very negative impact on being able to make full use of system resources. Furthermore, it desirable to overflowing the page cache: having separate tunables for number of simultanious compression jobs (which occupy /tmp space and page cache) and number of uploads (which affect upload throughput) would help. """ spec, parts = tar_partition.partition(pg_cluster_dir) # TODO :: Move arbitray path construction to StorageLayout Object backup_prefix = '{0}/basebackups_{1}/base_{file_name}_{file_offset}'\ .format(self.layout.prefix.rstrip('/'), FILE_STRUCTURE_VERSION, **start_backup_info) if rate_limit is None: per_process_limit = None else: per_process_limit = int(rate_limit / pool_size) # Reject tiny per-process rate limits. They should be # rejected more nicely elsewhere. assert per_process_limit is None or per_process_limit > 0 total_size = 0 # Make an attempt to upload extended version metadata extended_version_url = backup_prefix + '/extended_version.txt' logger.info( msg='start upload postgres version metadata', detail=('Uploading to {extended_version_url}.' .format(extended_version_url=extended_version_url))) uri_put_file(self.creds, extended_version_url, BytesIO(version.encode("utf8")), content_type='text/plain') logger.info(msg='postgres version metadata upload complete') uploader = PartitionUploader(self.creds, backup_prefix, per_process_limit, self.gpg_key_id) pool = TarUploadPool(uploader, pool_size) # Enqueue uploads for parallel execution for tpart in parts: total_size += tpart.total_member_size # 'put' can raise an exception for a just-failed upload, # aborting the process. pool.put(tpart) # Wait for remaining parts to upload. An exception can be # raised to signal failure of the upload. pool.join() return spec, backup_prefix, total_size
python
def _upload_pg_cluster_dir(self, start_backup_info, pg_cluster_dir, version, pool_size, rate_limit=None): spec, parts = tar_partition.partition(pg_cluster_dir) # TODO :: Move arbitray path construction to StorageLayout Object backup_prefix = '{0}/basebackups_{1}/base_{file_name}_{file_offset}'\ .format(self.layout.prefix.rstrip('/'), FILE_STRUCTURE_VERSION, **start_backup_info) if rate_limit is None: per_process_limit = None else: per_process_limit = int(rate_limit / pool_size) # Reject tiny per-process rate limits. They should be # rejected more nicely elsewhere. assert per_process_limit is None or per_process_limit > 0 total_size = 0 # Make an attempt to upload extended version metadata extended_version_url = backup_prefix + '/extended_version.txt' logger.info( msg='start upload postgres version metadata', detail=('Uploading to {extended_version_url}.' .format(extended_version_url=extended_version_url))) uri_put_file(self.creds, extended_version_url, BytesIO(version.encode("utf8")), content_type='text/plain') logger.info(msg='postgres version metadata upload complete') uploader = PartitionUploader(self.creds, backup_prefix, per_process_limit, self.gpg_key_id) pool = TarUploadPool(uploader, pool_size) # Enqueue uploads for parallel execution for tpart in parts: total_size += tpart.total_member_size # 'put' can raise an exception for a just-failed upload, # aborting the process. pool.put(tpart) # Wait for remaining parts to upload. An exception can be # raised to signal failure of the upload. pool.join() return spec, backup_prefix, total_size
[ "def", "_upload_pg_cluster_dir", "(", "self", ",", "start_backup_info", ",", "pg_cluster_dir", ",", "version", ",", "pool_size", ",", "rate_limit", "=", "None", ")", ":", "spec", ",", "parts", "=", "tar_partition", ".", "partition", "(", "pg_cluster_dir", ")", ...
Upload to url_prefix from pg_cluster_dir This function ignores the directory pg_xlog, which contains WAL files and are not generally part of a base backup. Note that this is also lzo compresses the files: thus, the number of pooled processes involves doing a full sequential scan of the uncompressed Postgres heap file that is pipelined into lzo. Once lzo is completely finished (necessary to have access to the file size) the file is sent to S3 or WABS. TODO: Investigate an optimization to decouple the compression and upload steps to make sure that the most efficient possible use of pipelining of network and disk resources occurs. Right now it possible to bounce back and forth between bottlenecking on reading from the database block device and subsequently the S3/WABS sending steps should the processes be at the same stage of the upload pipeline: this can have a very negative impact on being able to make full use of system resources. Furthermore, it desirable to overflowing the page cache: having separate tunables for number of simultanious compression jobs (which occupy /tmp space and page cache) and number of uploads (which affect upload throughput) would help.
[ "Upload", "to", "url_prefix", "from", "pg_cluster_dir" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/operator/backup.py#L430-L506
242,584
wal-e/wal-e
wal_e/operator/backup.py
Backup._exception_gather_guard
def _exception_gather_guard(self, fn): """ A higher order function to trap UserExceptions and then log them. This is to present nicer output to the user when failures are occuring in another thread of execution that may not end up at the catch-all try/except in main(). """ @functools.wraps(fn) def wrapper(*args, **kwargs): try: return fn(*args, **kwargs) except UserException as e: self.exceptions.append(e) return wrapper
python
def _exception_gather_guard(self, fn): @functools.wraps(fn) def wrapper(*args, **kwargs): try: return fn(*args, **kwargs) except UserException as e: self.exceptions.append(e) return wrapper
[ "def", "_exception_gather_guard", "(", "self", ",", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "fn", "(", "*", "args", ",", "*", "*"...
A higher order function to trap UserExceptions and then log them. This is to present nicer output to the user when failures are occuring in another thread of execution that may not end up at the catch-all try/except in main().
[ "A", "higher", "order", "function", "to", "trap", "UserExceptions", "and", "then", "log", "them", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/operator/backup.py#L508-L524
242,585
wal-e/wal-e
wal_e/worker/prefetch.py
Dirs.create
def create(self, segment): """A best-effort attempt to create directories. Warnings are issued to the user if those directories could not created or if they don't exist. The caller should only call this function if the user requested prefetching (i.e. concurrency) to avoid spurious warnings. """ def lackadaisical_mkdir(place): ok = False place = path.realpath(place) try: os.makedirs(place, 0o700) ok = True except EnvironmentError as e: if e.errno == errno.EEXIST: # Has already been created: this is the most # common situation, and is fine. ok = True else: logger.warning( msg='could not create prefetch directory', detail=('Prefetch directory creation target: {0}, {1}' .format(place, e.strerror))) return ok ok = True for d in [self.prefetched_dir, self.running]: ok &= lackadaisical_mkdir(d) lackadaisical_mkdir(self.seg_dir(segment))
python
def create(self, segment): def lackadaisical_mkdir(place): ok = False place = path.realpath(place) try: os.makedirs(place, 0o700) ok = True except EnvironmentError as e: if e.errno == errno.EEXIST: # Has already been created: this is the most # common situation, and is fine. ok = True else: logger.warning( msg='could not create prefetch directory', detail=('Prefetch directory creation target: {0}, {1}' .format(place, e.strerror))) return ok ok = True for d in [self.prefetched_dir, self.running]: ok &= lackadaisical_mkdir(d) lackadaisical_mkdir(self.seg_dir(segment))
[ "def", "create", "(", "self", ",", "segment", ")", ":", "def", "lackadaisical_mkdir", "(", "place", ")", ":", "ok", "=", "False", "place", "=", "path", ".", "realpath", "(", "place", ")", "try", ":", "os", ".", "makedirs", "(", "place", ",", "0o700",...
A best-effort attempt to create directories. Warnings are issued to the user if those directories could not created or if they don't exist. The caller should only call this function if the user requested prefetching (i.e. concurrency) to avoid spurious warnings.
[ "A", "best", "-", "effort", "attempt", "to", "create", "directories", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/prefetch.py#L91-L128
242,586
wal-e/wal-e
wal_e/pep3143daemon/pidfile.py
PidFile.acquire
def acquire(self): """Acquire the pidfile. Create the pidfile, lock it, write the pid into it and register the release with atexit. :return: None :raise: SystemExit """ try: pidfile = open(self._pidfile, "a") except IOError as err: raise SystemExit(err) try: fcntl.flock(pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: raise SystemExit('Already running according to ' + self._pidfile) pidfile.seek(0) pidfile.truncate() pidfile.write(str(os.getpid()) + '\n') pidfile.flush() self.pidfile = pidfile atexit.register(self.release)
python
def acquire(self): try: pidfile = open(self._pidfile, "a") except IOError as err: raise SystemExit(err) try: fcntl.flock(pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: raise SystemExit('Already running according to ' + self._pidfile) pidfile.seek(0) pidfile.truncate() pidfile.write(str(os.getpid()) + '\n') pidfile.flush() self.pidfile = pidfile atexit.register(self.release)
[ "def", "acquire", "(", "self", ")", ":", "try", ":", "pidfile", "=", "open", "(", "self", ".", "_pidfile", ",", "\"a\"", ")", "except", "IOError", "as", "err", ":", "raise", "SystemExit", "(", "err", ")", "try", ":", "fcntl", ".", "flock", "(", "pi...
Acquire the pidfile. Create the pidfile, lock it, write the pid into it and register the release with atexit. :return: None :raise: SystemExit
[ "Acquire", "the", "pidfile", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/pep3143daemon/pidfile.py#L44-L67
242,587
wal-e/wal-e
wal_e/pep3143daemon/pidfile.py
PidFile.release
def release(self): """Release the pidfile. Close and delete the Pidfile. :return: None """ try: self.pidfile.close() os.remove(self._pidfile) except OSError as err: if err.errno != 2: raise
python
def release(self): try: self.pidfile.close() os.remove(self._pidfile) except OSError as err: if err.errno != 2: raise
[ "def", "release", "(", "self", ")", ":", "try", ":", "self", ".", "pidfile", ".", "close", "(", ")", "os", ".", "remove", "(", "self", ".", "_pidfile", ")", "except", "OSError", "as", "err", ":", "if", "err", ".", "errno", "!=", "2", ":", "raise"...
Release the pidfile. Close and delete the Pidfile. :return: None
[ "Release", "the", "pidfile", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/pep3143daemon/pidfile.py#L69-L82
242,588
wal-e/wal-e
wal_e/pipebuf.py
_configure_buffer_sizes
def _configure_buffer_sizes(): """Set up module globals controlling buffer sizes""" global PIPE_BUF_BYTES global OS_PIPE_SZ PIPE_BUF_BYTES = 65536 OS_PIPE_SZ = None # Teach the 'fcntl' module about 'F_SETPIPE_SZ', which is a Linux-ism, # but a good one that can drastically reduce the number of syscalls # when dealing with high-throughput pipes. if not hasattr(fcntl, 'F_SETPIPE_SZ'): import platform if platform.system() == 'Linux': fcntl.F_SETPIPE_SZ = 1031 # If Linux procfs (or something that looks like it) exposes its # maximum F_SETPIPE_SZ, adjust the default buffer sizes. try: with open('/proc/sys/fs/pipe-max-size', 'r') as f: # Figure out OS pipe size, but in case it is unusually large # or small restrain it to sensible values. OS_PIPE_SZ = min(int(f.read()), 1024 * 1024) PIPE_BUF_BYTES = max(OS_PIPE_SZ, PIPE_BUF_BYTES) except Exception: pass
python
def _configure_buffer_sizes(): global PIPE_BUF_BYTES global OS_PIPE_SZ PIPE_BUF_BYTES = 65536 OS_PIPE_SZ = None # Teach the 'fcntl' module about 'F_SETPIPE_SZ', which is a Linux-ism, # but a good one that can drastically reduce the number of syscalls # when dealing with high-throughput pipes. if not hasattr(fcntl, 'F_SETPIPE_SZ'): import platform if platform.system() == 'Linux': fcntl.F_SETPIPE_SZ = 1031 # If Linux procfs (or something that looks like it) exposes its # maximum F_SETPIPE_SZ, adjust the default buffer sizes. try: with open('/proc/sys/fs/pipe-max-size', 'r') as f: # Figure out OS pipe size, but in case it is unusually large # or small restrain it to sensible values. OS_PIPE_SZ = min(int(f.read()), 1024 * 1024) PIPE_BUF_BYTES = max(OS_PIPE_SZ, PIPE_BUF_BYTES) except Exception: pass
[ "def", "_configure_buffer_sizes", "(", ")", ":", "global", "PIPE_BUF_BYTES", "global", "OS_PIPE_SZ", "PIPE_BUF_BYTES", "=", "65536", "OS_PIPE_SZ", "=", "None", "# Teach the 'fcntl' module about 'F_SETPIPE_SZ', which is a Linux-ism,", "# but a good one that can drastically reduce the ...
Set up module globals controlling buffer sizes
[ "Set", "up", "module", "globals", "controlling", "buffer", "sizes" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/pipebuf.py#L18-L44
242,589
wal-e/wal-e
wal_e/pipebuf.py
set_buf_size
def set_buf_size(fd): """Set up os pipe buffer size, if applicable""" if OS_PIPE_SZ and hasattr(fcntl, 'F_SETPIPE_SZ'): fcntl.fcntl(fd, fcntl.F_SETPIPE_SZ, OS_PIPE_SZ)
python
def set_buf_size(fd): if OS_PIPE_SZ and hasattr(fcntl, 'F_SETPIPE_SZ'): fcntl.fcntl(fd, fcntl.F_SETPIPE_SZ, OS_PIPE_SZ)
[ "def", "set_buf_size", "(", "fd", ")", ":", "if", "OS_PIPE_SZ", "and", "hasattr", "(", "fcntl", ",", "'F_SETPIPE_SZ'", ")", ":", "fcntl", ".", "fcntl", "(", "fd", ",", "fcntl", ".", "F_SETPIPE_SZ", ",", "OS_PIPE_SZ", ")" ]
Set up os pipe buffer size, if applicable
[ "Set", "up", "os", "pipe", "buffer", "size", "if", "applicable" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/pipebuf.py#L50-L53
242,590
wal-e/wal-e
wal_e/worker/pg/wal_transfer.py
WalSegment.mark_done
def mark_done(self): """Mark the archive status of this segment as 'done'. This is most useful when performing out-of-band parallel uploads of segments, so that Postgres doesn't try to go and upload them again. This amounts to messing with an internal bookkeeping mechanism of Postgres, but that mechanism is not changing too fast over the last five years and seems simple enough. """ # Recheck that this is not an segment explicitly passed from Postgres if self.explicit: raise UserCritical( msg='unexpected attempt to modify wal metadata detected', detail=('Segments explicitly passed from postgres should not ' 'engage in archiver metadata manipulation: {0}' .format(self.path)), hint='report a bug') # Attempt a rename of archiver metadata, wrapping unexpected # raised exceptions into a UserCritical. try: status_dir = path.join(path.dirname(self.path), 'archive_status') ready_metadata = path.join(status_dir, self.name + '.ready') done_metadata = path.join(status_dir, self.name + '.done') os.rename(ready_metadata, done_metadata) except Exception: raise UserCritical( msg='problem moving .ready archive status to .done', detail='Traceback is: {0}'.format(traceback.format_exc()), hint='report a bug')
python
def mark_done(self): # Recheck that this is not an segment explicitly passed from Postgres if self.explicit: raise UserCritical( msg='unexpected attempt to modify wal metadata detected', detail=('Segments explicitly passed from postgres should not ' 'engage in archiver metadata manipulation: {0}' .format(self.path)), hint='report a bug') # Attempt a rename of archiver metadata, wrapping unexpected # raised exceptions into a UserCritical. try: status_dir = path.join(path.dirname(self.path), 'archive_status') ready_metadata = path.join(status_dir, self.name + '.ready') done_metadata = path.join(status_dir, self.name + '.done') os.rename(ready_metadata, done_metadata) except Exception: raise UserCritical( msg='problem moving .ready archive status to .done', detail='Traceback is: {0}'.format(traceback.format_exc()), hint='report a bug')
[ "def", "mark_done", "(", "self", ")", ":", "# Recheck that this is not an segment explicitly passed from Postgres", "if", "self", ".", "explicit", ":", "raise", "UserCritical", "(", "msg", "=", "'unexpected attempt to modify wal metadata detected'", ",", "detail", "=", "(",...
Mark the archive status of this segment as 'done'. This is most useful when performing out-of-band parallel uploads of segments, so that Postgres doesn't try to go and upload them again. This amounts to messing with an internal bookkeeping mechanism of Postgres, but that mechanism is not changing too fast over the last five years and seems simple enough.
[ "Mark", "the", "archive", "status", "of", "this", "segment", "as", "done", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/pg/wal_transfer.py#L30-L65
242,591
wal-e/wal-e
wal_e/worker/pg/wal_transfer.py
WalTransferGroup.join
def join(self): """Wait for transfer to exit, raising errors as necessary.""" self.closed = True while self.expect > 0: val = self.wait_change.get() self.expect -= 1 if val is not None: # Wait a while for all running greenlets to exit, and # then attempt to force them to exit so join() # terminates in a reasonable amount of time. gevent.joinall(list(self.greenlets), timeout=30) gevent.killall(list(self.greenlets), block=True, timeout=30) raise val
python
def join(self): self.closed = True while self.expect > 0: val = self.wait_change.get() self.expect -= 1 if val is not None: # Wait a while for all running greenlets to exit, and # then attempt to force them to exit so join() # terminates in a reasonable amount of time. gevent.joinall(list(self.greenlets), timeout=30) gevent.killall(list(self.greenlets), block=True, timeout=30) raise val
[ "def", "join", "(", "self", ")", ":", "self", ".", "closed", "=", "True", "while", "self", ".", "expect", ">", "0", ":", "val", "=", "self", ".", "wait_change", ".", "get", "(", ")", "self", ".", "expect", "-=", "1", "if", "val", "is", "not", "...
Wait for transfer to exit, raising errors as necessary.
[ "Wait", "for", "transfer", "to", "exit", "raising", "errors", "as", "necessary", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/pg/wal_transfer.py#L130-L144
242,592
wal-e/wal-e
wal_e/worker/pg/wal_transfer.py
WalTransferGroup.start
def start(self, segment): """Begin transfer for an indicated wal segment.""" if self.closed: raise UserCritical(msg='attempt to transfer wal after closing', hint='report a bug') g = gevent.Greenlet(self.transferer, segment) g.link(self._complete_execution) self.greenlets.add(g) # Increment .expect before starting the greenlet, or else a # very unlucky .join could be fooled as to when pool is # complete. self.expect += 1 g.start()
python
def start(self, segment): if self.closed: raise UserCritical(msg='attempt to transfer wal after closing', hint='report a bug') g = gevent.Greenlet(self.transferer, segment) g.link(self._complete_execution) self.greenlets.add(g) # Increment .expect before starting the greenlet, or else a # very unlucky .join could be fooled as to when pool is # complete. self.expect += 1 g.start()
[ "def", "start", "(", "self", ",", "segment", ")", ":", "if", "self", ".", "closed", ":", "raise", "UserCritical", "(", "msg", "=", "'attempt to transfer wal after closing'", ",", "hint", "=", "'report a bug'", ")", "g", "=", "gevent", ".", "Greenlet", "(", ...
Begin transfer for an indicated wal segment.
[ "Begin", "transfer", "for", "an", "indicated", "wal", "segment", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/pg/wal_transfer.py#L146-L162
242,593
wal-e/wal-e
wal_e/worker/pg/wal_transfer.py
WalTransferGroup._complete_execution
def _complete_execution(self, g): """Forward any raised exceptions across a channel.""" # Triggered via completion callback. # # Runs in its own greenlet, so take care to forward the # exception, if any, to fail the entire transfer in event of # trouble. assert g.ready() self.greenlets.remove(g) placed = UserCritical(msg='placeholder bogus exception', hint='report a bug') if g.successful(): try: segment = g.get() if not segment.explicit: segment.mark_done() except BaseException as e: # Absorb and forward exceptions across the channel. placed = e else: placed = None else: placed = g.exception self.wait_change.put(placed)
python
def _complete_execution(self, g): # Triggered via completion callback. # # Runs in its own greenlet, so take care to forward the # exception, if any, to fail the entire transfer in event of # trouble. assert g.ready() self.greenlets.remove(g) placed = UserCritical(msg='placeholder bogus exception', hint='report a bug') if g.successful(): try: segment = g.get() if not segment.explicit: segment.mark_done() except BaseException as e: # Absorb and forward exceptions across the channel. placed = e else: placed = None else: placed = g.exception self.wait_change.put(placed)
[ "def", "_complete_execution", "(", "self", ",", "g", ")", ":", "# Triggered via completion callback.", "#", "# Runs in its own greenlet, so take care to forward the", "# exception, if any, to fail the entire transfer in event of", "# trouble.", "assert", "g", ".", "ready", "(", "...
Forward any raised exceptions across a channel.
[ "Forward", "any", "raised", "exceptions", "across", "a", "channel", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/pg/wal_transfer.py#L164-L192
242,594
wal-e/wal-e
wal_e/piper.py
pipe
def pipe(*args): """ Takes as parameters several dicts, each with the same parameters passed to popen. Runs the various processes in a pipeline, connecting the stdout of every process except the last with the stdin of the next process. Adapted from http://www.enricozini.org/2009/debian/python-pipes/ """ if len(args) < 2: raise ValueError("pipe needs at least 2 processes") # Set stdout=PIPE in every subprocess except the last for i in args[:-1]: i["stdout"] = subprocess.PIPE # Runs all subprocesses connecting stdins and stdouts to create the # pipeline. Closes stdouts to avoid deadlocks. popens = [popen_sp(**args[0])] for i in range(1, len(args)): args[i]["stdin"] = popens[i - 1].stdout popens.append(popen_sp(**args[i])) popens[i - 1].stdout.close() # Returns the array of subprocesses just created return popens
python
def pipe(*args): if len(args) < 2: raise ValueError("pipe needs at least 2 processes") # Set stdout=PIPE in every subprocess except the last for i in args[:-1]: i["stdout"] = subprocess.PIPE # Runs all subprocesses connecting stdins and stdouts to create the # pipeline. Closes stdouts to avoid deadlocks. popens = [popen_sp(**args[0])] for i in range(1, len(args)): args[i]["stdin"] = popens[i - 1].stdout popens.append(popen_sp(**args[i])) popens[i - 1].stdout.close() # Returns the array of subprocesses just created return popens
[ "def", "pipe", "(", "*", "args", ")", ":", "if", "len", "(", "args", ")", "<", "2", ":", "raise", "ValueError", "(", "\"pipe needs at least 2 processes\"", ")", "# Set stdout=PIPE in every subprocess except the last", "for", "i", "in", "args", "[", ":", "-", "...
Takes as parameters several dicts, each with the same parameters passed to popen. Runs the various processes in a pipeline, connecting the stdout of every process except the last with the stdin of the next process. Adapted from http://www.enricozini.org/2009/debian/python-pipes/
[ "Takes", "as", "parameters", "several", "dicts", "each", "with", "the", "same", "parameters", "passed", "to", "popen", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/piper.py#L94-L122
242,595
wal-e/wal-e
wal_e/piper.py
pipe_wait
def pipe_wait(popens): """ Given an array of Popen objects returned by the pipe method, wait for all processes to terminate and return the array with their return values. Taken from http://www.enricozini.org/2009/debian/python-pipes/ """ # Avoid mutating the passed copy popens = copy.copy(popens) results = [0] * len(popens) while popens: last = popens.pop(-1) results[len(popens)] = last.wait() return results
python
def pipe_wait(popens): # Avoid mutating the passed copy popens = copy.copy(popens) results = [0] * len(popens) while popens: last = popens.pop(-1) results[len(popens)] = last.wait() return results
[ "def", "pipe_wait", "(", "popens", ")", ":", "# Avoid mutating the passed copy", "popens", "=", "copy", ".", "copy", "(", "popens", ")", "results", "=", "[", "0", "]", "*", "len", "(", "popens", ")", "while", "popens", ":", "last", "=", "popens", ".", ...
Given an array of Popen objects returned by the pipe method, wait for all processes to terminate and return the array with their return values. Taken from http://www.enricozini.org/2009/debian/python-pipes/
[ "Given", "an", "array", "of", "Popen", "objects", "returned", "by", "the", "pipe", "method", "wait", "for", "all", "processes", "to", "terminate", "and", "return", "the", "array", "with", "their", "return", "values", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/piper.py#L125-L140
242,596
wal-e/wal-e
wal_e/blobstore/wabs/calling_format.py
CallingInfo.connect
def connect(self, creds): """Return an azure BlockBlobService instance. """ return BlockBlobService(account_name=creds.account_name, account_key=creds.account_key, sas_token=creds.access_token, protocol='https')
python
def connect(self, creds): return BlockBlobService(account_name=creds.account_name, account_key=creds.account_key, sas_token=creds.access_token, protocol='https')
[ "def", "connect", "(", "self", ",", "creds", ")", ":", "return", "BlockBlobService", "(", "account_name", "=", "creds", ".", "account_name", ",", "account_key", "=", "creds", ".", "account_key", ",", "sas_token", "=", "creds", ".", "access_token", ",", "prot...
Return an azure BlockBlobService instance.
[ "Return", "an", "azure", "BlockBlobService", "instance", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/blobstore/wabs/calling_format.py#L23-L29
242,597
wal-e/wal-e
wal_e/blobstore/file/file_util.py
do_lzop_get
def do_lzop_get(creds, url, path, decrypt, do_retry): """ Get and decompress a URL This streams the content directly to lzop; the compressed version is never stored on disk. """ assert url.endswith('.lzo'), 'Expect an lzop-compressed file' with files.DeleteOnError(path) as decomp_out: key = _uri_to_key(creds, url) with get_download_pipeline(PIPE, decomp_out.f, decrypt) as pl: g = gevent.spawn(write_and_return_error, key, pl.stdin) exc = g.get() if exc is not None: raise exc logger.info( msg='completed download and decompression', detail='Downloaded and decompressed "{url}" to "{path}"' .format(url=url, path=path)) return True
python
def do_lzop_get(creds, url, path, decrypt, do_retry): assert url.endswith('.lzo'), 'Expect an lzop-compressed file' with files.DeleteOnError(path) as decomp_out: key = _uri_to_key(creds, url) with get_download_pipeline(PIPE, decomp_out.f, decrypt) as pl: g = gevent.spawn(write_and_return_error, key, pl.stdin) exc = g.get() if exc is not None: raise exc logger.info( msg='completed download and decompression', detail='Downloaded and decompressed "{url}" to "{path}"' .format(url=url, path=path)) return True
[ "def", "do_lzop_get", "(", "creds", ",", "url", ",", "path", ",", "decrypt", ",", "do_retry", ")", ":", "assert", "url", ".", "endswith", "(", "'.lzo'", ")", ",", "'Expect an lzop-compressed file'", "with", "files", ".", "DeleteOnError", "(", "path", ")", ...
Get and decompress a URL This streams the content directly to lzop; the compressed version is never stored on disk.
[ "Get", "and", "decompress", "a", "URL" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/blobstore/file/file_util.py#L36-L59
242,598
wal-e/wal-e
wal_e/worker/pg/psql_worker.py
psql_csv_run
def psql_csv_run(sql_command, error_handler=None): """ Runs psql and returns a CSVReader object from the query This CSVReader includes header names as the first record in all situations. The output is fully buffered into Python. """ csv_query = ('COPY ({query}) TO STDOUT WITH CSV HEADER;' .format(query=sql_command)) new_env = os.environ.copy() new_env.setdefault('PGOPTIONS', '') new_env["PGOPTIONS"] += ' --statement-timeout=0' psql_proc = popen_nonblock([PSQL_BIN, '-d', 'postgres', '--no-password', '--no-psqlrc', '-c', csv_query], stdout=PIPE, env=new_env) stdout = psql_proc.communicate()[0].decode('utf-8') if psql_proc.returncode != 0: if error_handler is not None: error_handler(psql_proc) else: assert error_handler is None raise UserException( 'could not csv-execute a query successfully via psql', 'Query was "{query}".'.format(sql_command), 'You may have to set some libpq environment ' 'variables if you are sure the server is running.') # Previous code must raise any desired exceptions for non-zero # exit codes assert psql_proc.returncode == 0 # Fake enough iterator interface to get a CSV Reader object # that works. return csv.reader(iter(stdout.strip().split('\n')))
python
def psql_csv_run(sql_command, error_handler=None): csv_query = ('COPY ({query}) TO STDOUT WITH CSV HEADER;' .format(query=sql_command)) new_env = os.environ.copy() new_env.setdefault('PGOPTIONS', '') new_env["PGOPTIONS"] += ' --statement-timeout=0' psql_proc = popen_nonblock([PSQL_BIN, '-d', 'postgres', '--no-password', '--no-psqlrc', '-c', csv_query], stdout=PIPE, env=new_env) stdout = psql_proc.communicate()[0].decode('utf-8') if psql_proc.returncode != 0: if error_handler is not None: error_handler(psql_proc) else: assert error_handler is None raise UserException( 'could not csv-execute a query successfully via psql', 'Query was "{query}".'.format(sql_command), 'You may have to set some libpq environment ' 'variables if you are sure the server is running.') # Previous code must raise any desired exceptions for non-zero # exit codes assert psql_proc.returncode == 0 # Fake enough iterator interface to get a CSV Reader object # that works. return csv.reader(iter(stdout.strip().split('\n')))
[ "def", "psql_csv_run", "(", "sql_command", ",", "error_handler", "=", "None", ")", ":", "csv_query", "=", "(", "'COPY ({query}) TO STDOUT WITH CSV HEADER;'", ".", "format", "(", "query", "=", "sql_command", ")", ")", "new_env", "=", "os", ".", "environ", ".", ...
Runs psql and returns a CSVReader object from the query This CSVReader includes header names as the first record in all situations. The output is fully buffered into Python.
[ "Runs", "psql", "and", "returns", "a", "CSVReader", "object", "from", "the", "query" ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/pg/psql_worker.py#L34-L71
242,599
wal-e/wal-e
wal_e/worker/pg/psql_worker.py
PgBackupStatements._wal_name
def _wal_name(cls): """ Sets and returns _WAL_NAME to 'wal' or 'xlog' depending on version of postgres we are working with. It is used for handling xlog -> wal rename in postgres v10 """ if cls._WAL_NAME is None: version = cls._dict_transform(psql_csv_run( "SELECT current_setting('server_version_num')")) if int(version['current_setting']) >= 100000: cls._WAL_NAME = 'wal' else: cls._WAL_NAME = 'xlog' return cls._WAL_NAME
python
def _wal_name(cls): if cls._WAL_NAME is None: version = cls._dict_transform(psql_csv_run( "SELECT current_setting('server_version_num')")) if int(version['current_setting']) >= 100000: cls._WAL_NAME = 'wal' else: cls._WAL_NAME = 'xlog' return cls._WAL_NAME
[ "def", "_wal_name", "(", "cls", ")", ":", "if", "cls", ".", "_WAL_NAME", "is", "None", ":", "version", "=", "cls", ".", "_dict_transform", "(", "psql_csv_run", "(", "\"SELECT current_setting('server_version_num')\"", ")", ")", "if", "int", "(", "version", "[",...
Sets and returns _WAL_NAME to 'wal' or 'xlog' depending on version of postgres we are working with. It is used for handling xlog -> wal rename in postgres v10
[ "Sets", "and", "returns", "_WAL_NAME", "to", "wal", "or", "xlog", "depending", "on", "version", "of", "postgres", "we", "are", "working", "with", "." ]
027263860e72a403bc0e1497bb3e67523138e7a2
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/pg/psql_worker.py#L91-L106