repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
lordmauve/lepton
lepton/system.py
ParticleSystem.run_ahead
def run_ahead(self, time, framerate): """Run the particle system for the specified time frame at the specified framerate to move time forward as quickly as possible. Useful for "warming up" the particle system to reach a steady-state before anything is drawn or to simply "skip ahead" in time. time -- The amount of simulation time to skip over. framerate -- The framerate of the simulation in updates per unit time. Higher values will increase simulation accuracy, but will take longer to compute. """ if time: td = 1.0 / framerate update = self.update for i in range(int(time / td)): update(td)
python
def run_ahead(self, time, framerate): """Run the particle system for the specified time frame at the specified framerate to move time forward as quickly as possible. Useful for "warming up" the particle system to reach a steady-state before anything is drawn or to simply "skip ahead" in time. time -- The amount of simulation time to skip over. framerate -- The framerate of the simulation in updates per unit time. Higher values will increase simulation accuracy, but will take longer to compute. """ if time: td = 1.0 / framerate update = self.update for i in range(int(time / td)): update(td)
[ "def", "run_ahead", "(", "self", ",", "time", ",", "framerate", ")", ":", "if", "time", ":", "td", "=", "1.0", "/", "framerate", "update", "=", "self", ".", "update", "for", "i", "in", "range", "(", "int", "(", "time", "/", "td", ")", ")", ":", ...
Run the particle system for the specified time frame at the specified framerate to move time forward as quickly as possible. Useful for "warming up" the particle system to reach a steady-state before anything is drawn or to simply "skip ahead" in time. time -- The amount of simulation time to skip over. framerate -- The framerate of the simulation in updates per unit time. Higher values will increase simulation accuracy, but will take longer to compute.
[ "Run", "the", "particle", "system", "for", "the", "specified", "time", "frame", "at", "the", "specified", "framerate", "to", "move", "time", "forward", "as", "quickly", "as", "possible", ".", "Useful", "for", "warming", "up", "the", "particle", "system", "to...
train
https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/lepton/system.py#L72-L88
karel-brinda/rnftools
rnftools/lavender/Panel.py
Panel.get_html_column
def get_html_column(self): """ Get a HTML column for this panel. """ panel_id = "panel_{}".format(self.name) return ["<h2>{}</h2>".format(self.title) + '<a href="{}">Download data</a>'.format(self.tar_fn())] + [ # list of links (" <br />" + os.linesep).join( [ """ <strong>{bam_name}:</strong> <a onclick="document.getElementById('{panel_id}').src='{bam_svg}';document.getElementById('{panel_id}_').href='{bam_html}';return false;" href="#">display graph</a>, <a href="{bam_html}">detailed report</a> """.format( bam_name=bam.get_name(), bam_html=bam.html_fn(), bam_svg=bam.svg_fn(), panel_id=panel_id, ) for bam in self.bams ] ) + '<br /> '.format(self.tar_fn()), # main graph """ <div class="formats"> <a href="{html}" id="{panel_id}_"> <img src="{svg}" id="{panel_id}" /> </a> </div> """.format( html=self.bams[0]._html_fn, svg=self.bams[0]._svg_fn, panel_id=panel_id, ), ] + [ # overall graphs """ <div class="formats"> <img src="{svg}" /> <br /> <a href="{svg}">SVG version</a> | <a href="{gp}" type="text/plain">GP file</a> </div> """.format( svg=svg, gp=self._gp_fn, ) for svg in self._svg_fns ]
python
def get_html_column(self): """ Get a HTML column for this panel. """ panel_id = "panel_{}".format(self.name) return ["<h2>{}</h2>".format(self.title) + '<a href="{}">Download data</a>'.format(self.tar_fn())] + [ # list of links (" <br />" + os.linesep).join( [ """ <strong>{bam_name}:</strong> <a onclick="document.getElementById('{panel_id}').src='{bam_svg}';document.getElementById('{panel_id}_').href='{bam_html}';return false;" href="#">display graph</a>, <a href="{bam_html}">detailed report</a> """.format( bam_name=bam.get_name(), bam_html=bam.html_fn(), bam_svg=bam.svg_fn(), panel_id=panel_id, ) for bam in self.bams ] ) + '<br /> '.format(self.tar_fn()), # main graph """ <div class="formats"> <a href="{html}" id="{panel_id}_"> <img src="{svg}" id="{panel_id}" /> </a> </div> """.format( html=self.bams[0]._html_fn, svg=self.bams[0]._svg_fn, panel_id=panel_id, ), ] + [ # overall graphs """ <div class="formats"> <img src="{svg}" /> <br /> <a href="{svg}">SVG version</a> | <a href="{gp}" type="text/plain">GP file</a> </div> """.format( svg=svg, gp=self._gp_fn, ) for svg in self._svg_fns ]
[ "def", "get_html_column", "(", "self", ")", ":", "panel_id", "=", "\"panel_{}\"", ".", "format", "(", "self", ".", "name", ")", "return", "[", "\"<h2>{}</h2>\"", ".", "format", "(", "self", ".", "title", ")", "+", "'<a href=\"{}\">Download data</a>'", ".", "...
Get a HTML column for this panel.
[ "Get", "a", "HTML", "column", "for", "this", "panel", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/lavender/Panel.py#L124-L172
karel-brinda/rnftools
rnftools/lavender/Panel.py
Panel.create_gp
def create_gp(self): """ Create GnuPlot file. """ nb_bams = len(self.bams) gp_parts = [ textwrap.dedent( """\ set log x set log x2 #set format x "10^{{%L}}" set format x2 "10^{{%L}}" set x2tics unset xtics """ ), os.linesep.join([self._gp_style_func(i, nb_bams) for i in range(nb_bams)]), textwrap.dedent( """\ set format y "%g %%" set ytics set pointsize 1.5 set grid ytics lc rgb "#777777" lw 1 lt 0 front set grid x2tics lc rgb "#777777" lw 1 lt 0 front set datafile separator "\\t" set palette negative """ ), os.linesep.join(self.gp_plots) ] gp_src = os.linesep.join(gp_parts) # .format( # x_lab=self.default_x_label, # ) with open(self._gp_fn, "w+") as f: f.write(gp_src)
python
def create_gp(self): """ Create GnuPlot file. """ nb_bams = len(self.bams) gp_parts = [ textwrap.dedent( """\ set log x set log x2 #set format x "10^{{%L}}" set format x2 "10^{{%L}}" set x2tics unset xtics """ ), os.linesep.join([self._gp_style_func(i, nb_bams) for i in range(nb_bams)]), textwrap.dedent( """\ set format y "%g %%" set ytics set pointsize 1.5 set grid ytics lc rgb "#777777" lw 1 lt 0 front set grid x2tics lc rgb "#777777" lw 1 lt 0 front set datafile separator "\\t" set palette negative """ ), os.linesep.join(self.gp_plots) ] gp_src = os.linesep.join(gp_parts) # .format( # x_lab=self.default_x_label, # ) with open(self._gp_fn, "w+") as f: f.write(gp_src)
[ "def", "create_gp", "(", "self", ")", ":", "nb_bams", "=", "len", "(", "self", ".", "bams", ")", "gp_parts", "=", "[", "textwrap", ".", "dedent", "(", "\"\"\"\\\n\t\t\t\tset log x\n\t\t\t\tset log x2\n\n\n\t\t\t\t#set format x \"10^{{%L}}\"\n\t\t\t\tset format x2 \"10^{{%L}...
Create GnuPlot file.
[ "Create", "GnuPlot", "file", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/lavender/Panel.py#L255-L297
karel-brinda/rnftools
rnftools/lavender/Panel.py
Panel.create_graphics
def create_graphics(self): """Create images related to this panel.""" if len(self._svg_fns) > 0: rnftools.utils.shell('"{}" "{}"'.format("gnuplot", self._gp_fn)) if self.render_pdf_method is not None: for svg_fn in self._svg_fns: pdf_fn = re.sub(r'\.svg$', r'.pdf', svg_fn) svg42pdf(svg_fn, pdf_fn, method=self.render_pdf_method)
python
def create_graphics(self): """Create images related to this panel.""" if len(self._svg_fns) > 0: rnftools.utils.shell('"{}" "{}"'.format("gnuplot", self._gp_fn)) if self.render_pdf_method is not None: for svg_fn in self._svg_fns: pdf_fn = re.sub(r'\.svg$', r'.pdf', svg_fn) svg42pdf(svg_fn, pdf_fn, method=self.render_pdf_method)
[ "def", "create_graphics", "(", "self", ")", ":", "if", "len", "(", "self", ".", "_svg_fns", ")", ">", "0", ":", "rnftools", ".", "utils", ".", "shell", "(", "'\"{}\" \"{}\"'", ".", "format", "(", "\"gnuplot\"", ",", "self", ".", "_gp_fn", ")", ")", "...
Create images related to this panel.
[ "Create", "images", "related", "to", "this", "panel", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/lavender/Panel.py#L299-L308
karel-brinda/rnftools
rnftools/lavender/Panel.py
Panel.create_tar
def create_tar(self): """Create a tar file with all the files.""" def add_file_to_tar(tar, orig_fn, new_fn, func=None): tf = tarfile.TarInfo(name=new_fn) with open(orig_fn) as f: tfs = f.read() if func is not None: tfs = func(tfs) tf.size = len(tfs) tfs = io.BytesIO(tfs.encode('utf8')) tar.addfile(tarinfo=tf, fileobj=tfs) def add_text_to_tar(tar, new_fn, text, func=None): tf = tarfile.TarInfo(name=new_fn) if func is not None: text = func(text) tf.size = len(text) tfs = io.BytesIO(text.encode('utf8')) tar.addfile(tarinfo=tf, fileobj=tfs) def strip_lines(text): text = text.replace("\t", " ") while text.find(" ") != -1: text = text.replace(" ", " ") lines = [x.strip() for x in text.strip().split("\n")] return "\n".join(lines) + "\n" tar = tarfile.TarFile(self._tar_fn, "w") for i in range(len(self.bams)): roc_fn = self.bams[i].roc_fn() t_roc_fn = os.path.basename(roc_fn) gp_fn = self.bams[i].gp_fn() t_gp_fn = os.path.basename(gp_fn) svg_fn = self.bams[i].svg_fn() t_svg_fn = os.path.basename(svg_fn) add_file_to_tar(tar, roc_fn, t_roc_fn) add_file_to_tar( tar, gp_fn, t_gp_fn, lambda x: strip_lines(x.replace(roc_fn, t_roc_fn).replace(svg_fn, t_svg_fn)) ) gp_fn = self._gp_fn t_gp_fn = os.path.basename(gp_fn) svg_dir = os.path.join(self.panel_dir, "graphics") + "/" roc_dir = os.path.join(self.panel_dir, "roc") + "/" add_file_to_tar(tar, gp_fn, t_gp_fn, lambda x: strip_lines(x.replace(svg_dir, "").replace(roc_dir, ""))) makefile = [ ".PHONY: all", "all:", "\tgnuplot *.gp", "clean:", "\trm -f *.svg", "", ] add_text_to_tar(tar, "Makefile", "\n".join(makefile))
python
def create_tar(self): """Create a tar file with all the files.""" def add_file_to_tar(tar, orig_fn, new_fn, func=None): tf = tarfile.TarInfo(name=new_fn) with open(orig_fn) as f: tfs = f.read() if func is not None: tfs = func(tfs) tf.size = len(tfs) tfs = io.BytesIO(tfs.encode('utf8')) tar.addfile(tarinfo=tf, fileobj=tfs) def add_text_to_tar(tar, new_fn, text, func=None): tf = tarfile.TarInfo(name=new_fn) if func is not None: text = func(text) tf.size = len(text) tfs = io.BytesIO(text.encode('utf8')) tar.addfile(tarinfo=tf, fileobj=tfs) def strip_lines(text): text = text.replace("\t", " ") while text.find(" ") != -1: text = text.replace(" ", " ") lines = [x.strip() for x in text.strip().split("\n")] return "\n".join(lines) + "\n" tar = tarfile.TarFile(self._tar_fn, "w") for i in range(len(self.bams)): roc_fn = self.bams[i].roc_fn() t_roc_fn = os.path.basename(roc_fn) gp_fn = self.bams[i].gp_fn() t_gp_fn = os.path.basename(gp_fn) svg_fn = self.bams[i].svg_fn() t_svg_fn = os.path.basename(svg_fn) add_file_to_tar(tar, roc_fn, t_roc_fn) add_file_to_tar( tar, gp_fn, t_gp_fn, lambda x: strip_lines(x.replace(roc_fn, t_roc_fn).replace(svg_fn, t_svg_fn)) ) gp_fn = self._gp_fn t_gp_fn = os.path.basename(gp_fn) svg_dir = os.path.join(self.panel_dir, "graphics") + "/" roc_dir = os.path.join(self.panel_dir, "roc") + "/" add_file_to_tar(tar, gp_fn, t_gp_fn, lambda x: strip_lines(x.replace(svg_dir, "").replace(roc_dir, ""))) makefile = [ ".PHONY: all", "all:", "\tgnuplot *.gp", "clean:", "\trm -f *.svg", "", ] add_text_to_tar(tar, "Makefile", "\n".join(makefile))
[ "def", "create_tar", "(", "self", ")", ":", "def", "add_file_to_tar", "(", "tar", ",", "orig_fn", ",", "new_fn", ",", "func", "=", "None", ")", ":", "tf", "=", "tarfile", ".", "TarInfo", "(", "name", "=", "new_fn", ")", "with", "open", "(", "orig_fn"...
Create a tar file with all the files.
[ "Create", "a", "tar", "file", "with", "all", "the", "files", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/lavender/Panel.py#L310-L369
LIVVkit/LIVVkit
livvkit/util/functions.py
merge_dicts
def merge_dicts(dict1, dict2): """ Merge two dictionaries and return the result """ tmp = dict1.copy() tmp.update(dict2) return tmp
python
def merge_dicts(dict1, dict2): """ Merge two dictionaries and return the result """ tmp = dict1.copy() tmp.update(dict2) return tmp
[ "def", "merge_dicts", "(", "dict1", ",", "dict2", ")", ":", "tmp", "=", "dict1", ".", "copy", "(", ")", "tmp", ".", "update", "(", "dict2", ")", "return", "tmp" ]
Merge two dictionaries and return the result
[ "Merge", "two", "dictionaries", "and", "return", "the", "result" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/functions.py#L70-L74
LIVVkit/LIVVkit
livvkit/util/functions.py
parse_gptl
def parse_gptl(file_path, var_list): """ Read a GPTL timing file and extract some data. Args: file_path: the path to the GPTL timing file var_list: a list of strings to look for in the file Returns: A dict containing key-value pairs of the livvkit and the times associated with them """ timing_result = dict() if os.path.isfile(file_path): with open(file_path, 'r') as f: for var in var_list: for line in f: if var in line: timing_result[var] = float(line.split()[4])/int(line.split()[2]) break return timing_result
python
def parse_gptl(file_path, var_list): """ Read a GPTL timing file and extract some data. Args: file_path: the path to the GPTL timing file var_list: a list of strings to look for in the file Returns: A dict containing key-value pairs of the livvkit and the times associated with them """ timing_result = dict() if os.path.isfile(file_path): with open(file_path, 'r') as f: for var in var_list: for line in f: if var in line: timing_result[var] = float(line.split()[4])/int(line.split()[2]) break return timing_result
[ "def", "parse_gptl", "(", "file_path", ",", "var_list", ")", ":", "timing_result", "=", "dict", "(", ")", "if", "os", ".", "path", ".", "isfile", "(", "file_path", ")", ":", "with", "open", "(", "file_path", ",", "'r'", ")", "as", "f", ":", "for", ...
Read a GPTL timing file and extract some data. Args: file_path: the path to the GPTL timing file var_list: a list of strings to look for in the file Returns: A dict containing key-value pairs of the livvkit and the times associated with them
[ "Read", "a", "GPTL", "timing", "file", "and", "extract", "some", "data", "." ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/functions.py#L77-L97
LIVVkit/LIVVkit
livvkit/util/functions.py
find_file
def find_file(search_dir, file_pattern): """ Search for a file in a directory, and return the first match. If the file is not found return an empty string Args: search_dir: The root directory to search in file_pattern: A unix-style wildcard pattern representing the file to find Returns: The path to the file if it was found, otherwise an empty string """ for root, dirnames, fnames in os.walk(search_dir): for fname in fnames: if fnmatch.fnmatch(fname, file_pattern): return os.path.join(root, fname) return ""
python
def find_file(search_dir, file_pattern): """ Search for a file in a directory, and return the first match. If the file is not found return an empty string Args: search_dir: The root directory to search in file_pattern: A unix-style wildcard pattern representing the file to find Returns: The path to the file if it was found, otherwise an empty string """ for root, dirnames, fnames in os.walk(search_dir): for fname in fnames: if fnmatch.fnmatch(fname, file_pattern): return os.path.join(root, fname) return ""
[ "def", "find_file", "(", "search_dir", ",", "file_pattern", ")", ":", "for", "root", ",", "dirnames", ",", "fnames", "in", "os", ".", "walk", "(", "search_dir", ")", ":", "for", "fname", "in", "fnames", ":", "if", "fnmatch", ".", "fnmatch", "(", "fname...
Search for a file in a directory, and return the first match. If the file is not found return an empty string Args: search_dir: The root directory to search in file_pattern: A unix-style wildcard pattern representing the file to find Returns: The path to the file if it was found, otherwise an empty string
[ "Search", "for", "a", "file", "in", "a", "directory", "and", "return", "the", "first", "match", ".", "If", "the", "file", "is", "not", "found", "return", "an", "empty", "string" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/functions.py#L100-L117
LIVVkit/LIVVkit
livvkit/util/functions.py
create_page_from_template
def create_page_from_template(template_file, output_path): """ Copy the correct html template file to the output directory """ mkdir_p(os.path.dirname(output_path)) shutil.copy(os.path.join(livvkit.resource_dir, template_file), output_path)
python
def create_page_from_template(template_file, output_path): """ Copy the correct html template file to the output directory """ mkdir_p(os.path.dirname(output_path)) shutil.copy(os.path.join(livvkit.resource_dir, template_file), output_path)
[ "def", "create_page_from_template", "(", "template_file", ",", "output_path", ")", ":", "mkdir_p", "(", "os", ".", "path", ".", "dirname", "(", "output_path", ")", ")", "shutil", ".", "copy", "(", "os", ".", "path", ".", "join", "(", "livvkit", ".", "res...
Copy the correct html template file to the output directory
[ "Copy", "the", "correct", "html", "template", "file", "to", "the", "output", "directory" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/functions.py#L130-L133
LIVVkit/LIVVkit
livvkit/util/functions.py
read_json
def read_json(file_path): """ Read in a json file and return a dictionary representation """ try: with open(file_path, 'r') as f: config = json_tricks.load(f) except ValueError: print(' '+'!'*58) print(' Woops! Looks the JSON syntax is not valid in:') print(' {}'.format(file_path)) print(' Note: commonly this is a result of having a trailing comma \n in the file') print(' '+'!'*58) raise return config
python
def read_json(file_path): """ Read in a json file and return a dictionary representation """ try: with open(file_path, 'r') as f: config = json_tricks.load(f) except ValueError: print(' '+'!'*58) print(' Woops! Looks the JSON syntax is not valid in:') print(' {}'.format(file_path)) print(' Note: commonly this is a result of having a trailing comma \n in the file') print(' '+'!'*58) raise return config
[ "def", "read_json", "(", "file_path", ")", ":", "try", ":", "with", "open", "(", "file_path", ",", "'r'", ")", "as", "f", ":", "config", "=", "json_tricks", ".", "load", "(", "f", ")", "except", "ValueError", ":", "print", "(", "' '", "+", "'!'", ...
Read in a json file and return a dictionary representation
[ "Read", "in", "a", "json", "file", "and", "return", "a", "dictionary", "representation" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/functions.py#L136-L149
LIVVkit/LIVVkit
livvkit/util/functions.py
write_json
def write_json(data, path, file_name): """ Write out data to a json file. Args: data: A dictionary representation of the data to write out path: The directory to output the file in file_name: The name of the file to write out """ if os.path.exists(path) and not os.path.isdir(path): return elif not os.path.exists(path): mkdir_p(path) with open(os.path.join(path, file_name), 'w') as f: json_tricks.dump(data, f, indent=4, primitives=True, allow_nan=True)
python
def write_json(data, path, file_name): """ Write out data to a json file. Args: data: A dictionary representation of the data to write out path: The directory to output the file in file_name: The name of the file to write out """ if os.path.exists(path) and not os.path.isdir(path): return elif not os.path.exists(path): mkdir_p(path) with open(os.path.join(path, file_name), 'w') as f: json_tricks.dump(data, f, indent=4, primitives=True, allow_nan=True)
[ "def", "write_json", "(", "data", ",", "path", ",", "file_name", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", "and", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "return", "elif", "not", "os", ".", "path",...
Write out data to a json file. Args: data: A dictionary representation of the data to write out path: The directory to output the file in file_name: The name of the file to write out
[ "Write", "out", "data", "to", "a", "json", "file", "." ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/functions.py#L152-L166
LIVVkit/LIVVkit
livvkit/util/functions.py
collect_cases
def collect_cases(data_dir): """ Find all cases and subcases of a particular run type """ cases = {} for root, dirs, files in os.walk(data_dir): if not dirs: split_case = os.path.relpath(root, data_dir).split(os.path.sep) if split_case[0] not in cases: cases[split_case[0]] = [] cases[split_case[0]].append("-".join(split_case[1:])) return cases
python
def collect_cases(data_dir): """ Find all cases and subcases of a particular run type """ cases = {} for root, dirs, files in os.walk(data_dir): if not dirs: split_case = os.path.relpath(root, data_dir).split(os.path.sep) if split_case[0] not in cases: cases[split_case[0]] = [] cases[split_case[0]].append("-".join(split_case[1:])) return cases
[ "def", "collect_cases", "(", "data_dir", ")", ":", "cases", "=", "{", "}", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "data_dir", ")", ":", "if", "not", "dirs", ":", "split_case", "=", "os", ".", "path", ".", "relpath",...
Find all cases and subcases of a particular run type
[ "Find", "all", "cases", "and", "subcases", "of", "a", "particular", "run", "type" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/functions.py#L169-L178
LIVVkit/LIVVkit
livvkit/util/functions.py
setup_output
def setup_output(cssd=None, jsd=None, imgd=None): """ Set up the directory structure for the output. Copies old run data into a timestamped directory and sets up the new directory """ # Check if we need to back up an old run if os.path.isdir(livvkit.index_dir): print("-------------------------------------------------------------------") print(' Previous output data found in output directory!') try: f = open(os.path.join(livvkit.index_dir, "data.txt"), "r") prev_time = f.readline().replace(":", "").replace("-", "").replace(" ", "_").rstrip() f.close() except IOError: prev_time = "bkd_"+datetime.now().strftime("%Y%m%d_%H%M%S") print(' Backing up data to:') print(' ' + livvkit.index_dir + "_" + prev_time) print("-------------------------------------------------------------------") shutil.move(livvkit.index_dir, livvkit.index_dir + "_" + prev_time) else: print("-------------------------------------------------------------------") # Copy over js, css, & imgs directories from source if cssd: shutil.copytree(cssd, os.path.join(livvkit.index_dir, "css")) else: shutil.copytree(os.path.join(livvkit.resource_dir, "css"), os.path.join(livvkit.index_dir, "css")) if jsd: shutil.copytree(jsd, os.path.join(livvkit.index_dir, "js")) else: shutil.copytree(os.path.join(livvkit.resource_dir, "js"), os.path.join(livvkit.index_dir, "js")) if imgd: shutil.copytree(imgd, os.path.join(livvkit.index_dir, "js")) else: shutil.copytree(os.path.join(livvkit.resource_dir, "imgs"), os.path.join(livvkit.index_dir, "imgs")) # Get the index template from the resource directory shutil.copy(os.path.join(livvkit.resource_dir, "index.html"), os.path.join(livvkit.index_dir, "index.html")) # Record when this data was recorded so we can make nice backups with open(os.path.join(livvkit.index_dir, "data.txt"), "w") as f: f.write(livvkit.timestamp + "\n") f.write(livvkit.comment)
python
def setup_output(cssd=None, jsd=None, imgd=None): """ Set up the directory structure for the output. Copies old run data into a timestamped directory and sets up the new directory """ # Check if we need to back up an old run if os.path.isdir(livvkit.index_dir): print("-------------------------------------------------------------------") print(' Previous output data found in output directory!') try: f = open(os.path.join(livvkit.index_dir, "data.txt"), "r") prev_time = f.readline().replace(":", "").replace("-", "").replace(" ", "_").rstrip() f.close() except IOError: prev_time = "bkd_"+datetime.now().strftime("%Y%m%d_%H%M%S") print(' Backing up data to:') print(' ' + livvkit.index_dir + "_" + prev_time) print("-------------------------------------------------------------------") shutil.move(livvkit.index_dir, livvkit.index_dir + "_" + prev_time) else: print("-------------------------------------------------------------------") # Copy over js, css, & imgs directories from source if cssd: shutil.copytree(cssd, os.path.join(livvkit.index_dir, "css")) else: shutil.copytree(os.path.join(livvkit.resource_dir, "css"), os.path.join(livvkit.index_dir, "css")) if jsd: shutil.copytree(jsd, os.path.join(livvkit.index_dir, "js")) else: shutil.copytree(os.path.join(livvkit.resource_dir, "js"), os.path.join(livvkit.index_dir, "js")) if imgd: shutil.copytree(imgd, os.path.join(livvkit.index_dir, "js")) else: shutil.copytree(os.path.join(livvkit.resource_dir, "imgs"), os.path.join(livvkit.index_dir, "imgs")) # Get the index template from the resource directory shutil.copy(os.path.join(livvkit.resource_dir, "index.html"), os.path.join(livvkit.index_dir, "index.html")) # Record when this data was recorded so we can make nice backups with open(os.path.join(livvkit.index_dir, "data.txt"), "w") as f: f.write(livvkit.timestamp + "\n") f.write(livvkit.comment)
[ "def", "setup_output", "(", "cssd", "=", "None", ",", "jsd", "=", "None", ",", "imgd", "=", "None", ")", ":", "# Check if we need to back up an old run", "if", "os", ".", "path", ".", "isdir", "(", "livvkit", ".", "index_dir", ")", ":", "print", "(", "\"...
Set up the directory structure for the output. Copies old run data into a timestamped directory and sets up the new directory
[ "Set", "up", "the", "directory", "structure", "for", "the", "output", ".", "Copies", "old", "run", "data", "into", "a", "timestamped", "directory", "and", "sets", "up", "the", "new", "directory" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/functions.py#L181-L226
Stranger6667/pyoffers
pyoffers/utils.py
prepare_query_params
def prepare_query_params(**kwargs): """ Prepares given parameters to be used in querystring. """ return [ (sub_key, sub_value) for key, value in kwargs.items() for sub_key, sub_value in expand(value, key) if sub_value is not None ]
python
def prepare_query_params(**kwargs): """ Prepares given parameters to be used in querystring. """ return [ (sub_key, sub_value) for key, value in kwargs.items() for sub_key, sub_value in expand(value, key) if sub_value is not None ]
[ "def", "prepare_query_params", "(", "*", "*", "kwargs", ")", ":", "return", "[", "(", "sub_key", ",", "sub_value", ")", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", "for", "sub_key", ",", "sub_value", "in", "expand", "(", "value"...
Prepares given parameters to be used in querystring.
[ "Prepares", "given", "parameters", "to", "be", "used", "in", "querystring", "." ]
train
https://github.com/Stranger6667/pyoffers/blob/9575d6cdc878096242268311a22cc5fdd4f64b37/pyoffers/utils.py#L17-L26
Stranger6667/pyoffers
pyoffers/utils.py
Sort.prepend_model
def prepend_model(self, value, model): """ Prepends model name if it is not already prepended. For example model is "Offer": key -> Offer.key -key -> -Offer.key Offer.key -> Offer.key -Offer.key -> -Offer.key """ if '.' not in value: direction = '' if value.startswith('-'): value = value[1:] direction = '-' value = '%s%s.%s' % (direction, model, value) return value
python
def prepend_model(self, value, model): """ Prepends model name if it is not already prepended. For example model is "Offer": key -> Offer.key -key -> -Offer.key Offer.key -> Offer.key -Offer.key -> -Offer.key """ if '.' not in value: direction = '' if value.startswith('-'): value = value[1:] direction = '-' value = '%s%s.%s' % (direction, model, value) return value
[ "def", "prepend_model", "(", "self", ",", "value", ",", "model", ")", ":", "if", "'.'", "not", "in", "value", ":", "direction", "=", "''", "if", "value", ".", "startswith", "(", "'-'", ")", ":", "value", "=", "value", "[", "1", ":", "]", "direction...
Prepends model name if it is not already prepended. For example model is "Offer": key -> Offer.key -key -> -Offer.key Offer.key -> Offer.key -Offer.key -> -Offer.key
[ "Prepends", "model", "name", "if", "it", "is", "not", "already", "prepended", ".", "For", "example", "model", "is", "Offer", ":" ]
train
https://github.com/Stranger6667/pyoffers/blob/9575d6cdc878096242268311a22cc5fdd4f64b37/pyoffers/utils.py#L49-L65
lordmauve/lepton
examples/fireworks.py
on_draw
def on_draw(): global yrot win.clear() glLoadIdentity() glTranslatef(0, 0, -100) glRotatef(yrot, 0.0, 1.0, 0.0) default_system.draw() ''' glBindTexture(GL_TEXTURE_2D, 1) glEnable(GL_TEXTURE_2D) glEnable(GL_POINT_SPRITE) glPointSize(100); glBegin(GL_POINTS) glVertex2f(0,0) glEnd() glBindTexture(GL_TEXTURE_2D, 2) glEnable(GL_TEXTURE_2D) glEnable(GL_POINT_SPRITE) glPointSize(100); glBegin(GL_POINTS) glVertex2f(50,0) glEnd() glBindTexture(GL_TEXTURE_2D, 0) '''
python
def on_draw(): global yrot win.clear() glLoadIdentity() glTranslatef(0, 0, -100) glRotatef(yrot, 0.0, 1.0, 0.0) default_system.draw() ''' glBindTexture(GL_TEXTURE_2D, 1) glEnable(GL_TEXTURE_2D) glEnable(GL_POINT_SPRITE) glPointSize(100); glBegin(GL_POINTS) glVertex2f(0,0) glEnd() glBindTexture(GL_TEXTURE_2D, 2) glEnable(GL_TEXTURE_2D) glEnable(GL_POINT_SPRITE) glPointSize(100); glBegin(GL_POINTS) glVertex2f(50,0) glEnd() glBindTexture(GL_TEXTURE_2D, 0) '''
[ "def", "on_draw", "(", ")", ":", "global", "yrot", "win", ".", "clear", "(", ")", "glLoadIdentity", "(", ")", "glTranslatef", "(", "0", ",", "0", ",", "-", "100", ")", "glRotatef", "(", "yrot", ",", "0.0", ",", "1.0", ",", "0.0", ")", "default_syst...
glBindTexture(GL_TEXTURE_2D, 1) glEnable(GL_TEXTURE_2D) glEnable(GL_POINT_SPRITE) glPointSize(100); glBegin(GL_POINTS) glVertex2f(0,0) glEnd() glBindTexture(GL_TEXTURE_2D, 2) glEnable(GL_TEXTURE_2D) glEnable(GL_POINT_SPRITE) glPointSize(100); glBegin(GL_POINTS) glVertex2f(50,0) glEnd() glBindTexture(GL_TEXTURE_2D, 0)
[ "glBindTexture", "(", "GL_TEXTURE_2D", "1", ")", "glEnable", "(", "GL_TEXTURE_2D", ")", "glEnable", "(", "GL_POINT_SPRITE", ")", "glPointSize", "(", "100", ")", ";", "glBegin", "(", "GL_POINTS", ")", "glVertex2f", "(", "0", "0", ")", "glEnd", "()", "glBindTe...
train
https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/fireworks.py#L132-L155
Adarnof/adarnauth-esi
esi/tasks.py
cleanup_callbackredirect
def cleanup_callbackredirect(max_age=300): """ Delete old :model:`esi.CallbackRedirect` models. Accepts a max_age parameter, in seconds (default 300). """ max_age = timezone.now() - timedelta(seconds=max_age) logger.debug("Deleting all callback redirects created before {0}".format(max_age.strftime("%b %d %Y %H:%M:%S"))) CallbackRedirect.objects.filter(created__lte=max_age).delete()
python
def cleanup_callbackredirect(max_age=300): """ Delete old :model:`esi.CallbackRedirect` models. Accepts a max_age parameter, in seconds (default 300). """ max_age = timezone.now() - timedelta(seconds=max_age) logger.debug("Deleting all callback redirects created before {0}".format(max_age.strftime("%b %d %Y %H:%M:%S"))) CallbackRedirect.objects.filter(created__lte=max_age).delete()
[ "def", "cleanup_callbackredirect", "(", "max_age", "=", "300", ")", ":", "max_age", "=", "timezone", ".", "now", "(", ")", "-", "timedelta", "(", "seconds", "=", "max_age", ")", "logger", ".", "debug", "(", "\"Deleting all callback redirects created before {0}\"",...
Delete old :model:`esi.CallbackRedirect` models. Accepts a max_age parameter, in seconds (default 300).
[ "Delete", "old", ":", "model", ":", "esi", ".", "CallbackRedirect", "models", ".", "Accepts", "a", "max_age", "parameter", "in", "seconds", "(", "default", "300", ")", "." ]
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/tasks.py#L13-L20
robmoggach/django-token-auth
src/token_auth/admin.py
TokenAdmin.send_token_email
def send_token_email(self, request, queryset): """ Sends token email(s) for the selected users. """ for token in queryset: if not token.expired: forward_token(token)
python
def send_token_email(self, request, queryset): """ Sends token email(s) for the selected users. """ for token in queryset: if not token.expired: forward_token(token)
[ "def", "send_token_email", "(", "self", ",", "request", ",", "queryset", ")", ":", "for", "token", "in", "queryset", ":", "if", "not", "token", ".", "expired", ":", "forward_token", "(", "token", ")" ]
Sends token email(s) for the selected users.
[ "Sends", "token", "email", "(", "s", ")", "for", "the", "selected", "users", "." ]
train
https://github.com/robmoggach/django-token-auth/blob/7ef6e10c27b0168c1272347d0169cdbd96232ed3/src/token_auth/admin.py#L35-L41
theonion/django-bulbs
bulbs/content/__init__.py
TagCache.count
def count(cls, slug): """get the number of objects in the cache for a given slug :param slug: cache key :return: `int` """ from .models import Content # Gets the count for a tag, hopefully form an in-memory cache. cnt = cls._cache.get(slug) if cnt is None: cnt = Content.search_objects.search(tags=[slug]).count() cls._cache[slug] = cnt return cnt
python
def count(cls, slug): """get the number of objects in the cache for a given slug :param slug: cache key :return: `int` """ from .models import Content # Gets the count for a tag, hopefully form an in-memory cache. cnt = cls._cache.get(slug) if cnt is None: cnt = Content.search_objects.search(tags=[slug]).count() cls._cache[slug] = cnt return cnt
[ "def", "count", "(", "cls", ",", "slug", ")", ":", "from", ".", "models", "import", "Content", "# Gets the count for a tag, hopefully form an in-memory cache.", "cnt", "=", "cls", ".", "_cache", ".", "get", "(", "slug", ")", "if", "cnt", "is", "None", ":", "...
get the number of objects in the cache for a given slug :param slug: cache key :return: `int`
[ "get", "the", "number", "of", "objects", "in", "the", "cache", "for", "a", "given", "slug" ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/content/__init__.py#L9-L21
mrstephenneal/mysql-toolkit
mysql/toolkit/datatypes/text.py
Text.is_varchar
def is_varchar(self): """Determine if a data record is of the type VARCHAR.""" dt = DATA_TYPES['varchar'] if type(self.data) is dt['type'] and len(self.data) < dt['max']: self.type = 'VARCHAR' self.len = len(self.data) return True
python
def is_varchar(self): """Determine if a data record is of the type VARCHAR.""" dt = DATA_TYPES['varchar'] if type(self.data) is dt['type'] and len(self.data) < dt['max']: self.type = 'VARCHAR' self.len = len(self.data) return True
[ "def", "is_varchar", "(", "self", ")", ":", "dt", "=", "DATA_TYPES", "[", "'varchar'", "]", "if", "type", "(", "self", ".", "data", ")", "is", "dt", "[", "'type'", "]", "and", "len", "(", "self", ".", "data", ")", "<", "dt", "[", "'max'", "]", ...
Determine if a data record is of the type VARCHAR.
[ "Determine", "if", "a", "data", "record", "is", "of", "the", "type", "VARCHAR", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/datatypes/text.py#L10-L16
mrstephenneal/mysql-toolkit
mysql/toolkit/datatypes/text.py
Text._is_text_data
def _is_text_data(self, data_type): """Private method for testing text data types.""" dt = DATA_TYPES[data_type] if type(self.data) is dt['type'] and len(self.data) < dt['max'] and all(type(char) == str for char in self.data): self.type = data_type.upper() self.len = len(self.data) return True
python
def _is_text_data(self, data_type): """Private method for testing text data types.""" dt = DATA_TYPES[data_type] if type(self.data) is dt['type'] and len(self.data) < dt['max'] and all(type(char) == str for char in self.data): self.type = data_type.upper() self.len = len(self.data) return True
[ "def", "_is_text_data", "(", "self", ",", "data_type", ")", ":", "dt", "=", "DATA_TYPES", "[", "data_type", "]", "if", "type", "(", "self", ".", "data", ")", "is", "dt", "[", "'type'", "]", "and", "len", "(", "self", ".", "data", ")", "<", "dt", ...
Private method for testing text data types.
[ "Private", "method", "for", "testing", "text", "data", "types", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/datatypes/text.py#L30-L36
lordmauve/lepton
examples/games/bonk/controls.py
Controls.bind_key_name
def bind_key_name(self, function, object_name): """Bind a key to an object name""" for funcname, name in self.name_map.items(): if funcname == function: self.name_map[ funcname] = object_name
python
def bind_key_name(self, function, object_name): """Bind a key to an object name""" for funcname, name in self.name_map.items(): if funcname == function: self.name_map[ funcname] = object_name
[ "def", "bind_key_name", "(", "self", ",", "function", ",", "object_name", ")", ":", "for", "funcname", ",", "name", "in", "self", ".", "name_map", ".", "items", "(", ")", ":", "if", "funcname", "==", "function", ":", "self", ".", "name_map", "[", "func...
Bind a key to an object name
[ "Bind", "a", "key", "to", "an", "object", "name" ]
train
https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/games/bonk/controls.py#L78-L83
lordmauve/lepton
examples/games/bonk/controls.py
Controls.bind_keys
def bind_keys(self, objects): """Configure name map: My goal here is to associate a named object with a specific function""" for object in objects: if object.keys != None: for key in object.keys: if key != None: self.bind_key_name(key, object.name)
python
def bind_keys(self, objects): """Configure name map: My goal here is to associate a named object with a specific function""" for object in objects: if object.keys != None: for key in object.keys: if key != None: self.bind_key_name(key, object.name)
[ "def", "bind_keys", "(", "self", ",", "objects", ")", ":", "for", "object", "in", "objects", ":", "if", "object", ".", "keys", "!=", "None", ":", "for", "key", "in", "object", ".", "keys", ":", "if", "key", "!=", "None", ":", "self", ".", "bind_key...
Configure name map: My goal here is to associate a named object with a specific function
[ "Configure", "name", "map", ":", "My", "goal", "here", "is", "to", "associate", "a", "named", "object", "with", "a", "specific", "function" ]
train
https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/games/bonk/controls.py#L85-L92
lordmauve/lepton
examples/games/bonk/controls.py
Controls.configure_keys
def configure_keys(self): """Configure key map""" self.active_functions = set() self.key2func = {} for funcname, key in self.key_map.items(): self.key2func[key] = getattr(self, funcname)
python
def configure_keys(self): """Configure key map""" self.active_functions = set() self.key2func = {} for funcname, key in self.key_map.items(): self.key2func[key] = getattr(self, funcname)
[ "def", "configure_keys", "(", "self", ")", ":", "self", ".", "active_functions", "=", "set", "(", ")", "self", ".", "key2func", "=", "{", "}", "for", "funcname", ",", "key", "in", "self", ".", "key_map", ".", "items", "(", ")", ":", "self", ".", "k...
Configure key map
[ "Configure", "key", "map" ]
train
https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/games/bonk/controls.py#L95-L100
Fizzadar/pydocs
pydocs/__init__.py
_parse_module_list
def _parse_module_list(module_list): '''Loop through all the modules and parse them.''' for module_meta in module_list: name = module_meta['module'] # Import & parse module module = import_module(name) output = parse_module(module) # Assign to meta.content module_meta['content'] = output
python
def _parse_module_list(module_list): '''Loop through all the modules and parse them.''' for module_meta in module_list: name = module_meta['module'] # Import & parse module module = import_module(name) output = parse_module(module) # Assign to meta.content module_meta['content'] = output
[ "def", "_parse_module_list", "(", "module_list", ")", ":", "for", "module_meta", "in", "module_list", ":", "name", "=", "module_meta", "[", "'module'", "]", "# Import & parse module", "module", "=", "import_module", "(", "name", ")", "output", "=", "parse_module",...
Loop through all the modules and parse them.
[ "Loop", "through", "all", "the", "modules", "and", "parse", "them", "." ]
train
https://github.com/Fizzadar/pydocs/blob/72713201dc4cf40335f9c3d380c9111b23c2c38b/pydocs/__init__.py#L16-L26
Fizzadar/pydocs
pydocs/__init__.py
_build_module_list
def _build_module_list(source_module, index_filename, ignore_modules): '''Builds a list of python modules in the current directory.''' out = [] dirs_with_init = set() module_prefix = '' if source_module == '.' else source_module for root, _, filenames in walk('.'): root = root[2:] module_root = root.replace('/', '.') file_names = [filename[:-3] for filename in filenames if filename.endswith('.py')] for filename in file_names: if filename == '__init__': dirs_with_init.add(root) module_name = '.'.join([module_prefix, module_root]) if module_root else source_module elif not root: module_name = '.'.join([module_prefix, filename]) else: module_name = '.'.join([module_prefix, root.replace('/', '.'), filename]) if module_name.startswith('.'): module_name = module_name[1:] if module_name in ignore_modules: print 'Ignored file: {}{}.py'.format('{}/'.format(root), filename) continue if root and root not in dirs_with_init: print 'No __init__.py, skipping: {}{}.py'.format('{}/'.format(root), filename) continue source_name = '{}.py'.format(filename) if root: source_name = '{}/{}'.format(root, source_name) if filename == '__init__': output_name = '{}.md'.format(index_filename) else: output_name = '{}.md'.format(filename) if root: output_name = '{}/{}'.format(root, output_name) out.append({ 'directory': root, 'file': filename, 'module': module_name, 'output': output_name, 'source': source_name }) return out
python
def _build_module_list(source_module, index_filename, ignore_modules): '''Builds a list of python modules in the current directory.''' out = [] dirs_with_init = set() module_prefix = '' if source_module == '.' else source_module for root, _, filenames in walk('.'): root = root[2:] module_root = root.replace('/', '.') file_names = [filename[:-3] for filename in filenames if filename.endswith('.py')] for filename in file_names: if filename == '__init__': dirs_with_init.add(root) module_name = '.'.join([module_prefix, module_root]) if module_root else source_module elif not root: module_name = '.'.join([module_prefix, filename]) else: module_name = '.'.join([module_prefix, root.replace('/', '.'), filename]) if module_name.startswith('.'): module_name = module_name[1:] if module_name in ignore_modules: print 'Ignored file: {}{}.py'.format('{}/'.format(root), filename) continue if root and root not in dirs_with_init: print 'No __init__.py, skipping: {}{}.py'.format('{}/'.format(root), filename) continue source_name = '{}.py'.format(filename) if root: source_name = '{}/{}'.format(root, source_name) if filename == '__init__': output_name = '{}.md'.format(index_filename) else: output_name = '{}.md'.format(filename) if root: output_name = '{}/{}'.format(root, output_name) out.append({ 'directory': root, 'file': filename, 'module': module_name, 'output': output_name, 'source': source_name }) return out
[ "def", "_build_module_list", "(", "source_module", ",", "index_filename", ",", "ignore_modules", ")", ":", "out", "=", "[", "]", "dirs_with_init", "=", "set", "(", ")", "module_prefix", "=", "''", "if", "source_module", "==", "'.'", "else", "source_module", "f...
Builds a list of python modules in the current directory.
[ "Builds", "a", "list", "of", "python", "modules", "in", "the", "current", "directory", "." ]
train
https://github.com/Fizzadar/pydocs/blob/72713201dc4cf40335f9c3d380c9111b23c2c38b/pydocs/__init__.py#L28-L78
Fizzadar/pydocs
pydocs/__init__.py
_write_docs
def _write_docs(module_list, output_dir): '''Write the document meta to our output location.''' for module_meta in module_list: directory = module_meta['directory'] # Ensure target directory if directory and not path.isdir(directory): makedirs(directory) # Write the file file = open(module_meta['output'], 'w') file.write(module_meta['content']) file.close()
python
def _write_docs(module_list, output_dir): '''Write the document meta to our output location.''' for module_meta in module_list: directory = module_meta['directory'] # Ensure target directory if directory and not path.isdir(directory): makedirs(directory) # Write the file file = open(module_meta['output'], 'w') file.write(module_meta['content']) file.close()
[ "def", "_write_docs", "(", "module_list", ",", "output_dir", ")", ":", "for", "module_meta", "in", "module_list", ":", "directory", "=", "module_meta", "[", "'directory'", "]", "# Ensure target directory", "if", "directory", "and", "not", "path", ".", "isdir", "...
Write the document meta to our output location.
[ "Write", "the", "document", "meta", "to", "our", "output", "location", "." ]
train
https://github.com/Fizzadar/pydocs/blob/72713201dc4cf40335f9c3d380c9111b23c2c38b/pydocs/__init__.py#L80-L91
Fizzadar/pydocs
pydocs/__init__.py
build
def build( root, source_module, output_dir, json_dump=False, ignore_modules=None, index_filename='index' ): ''' Build markdown documentation from a directory and/or python module. # ignore_modules: takes a list of module names to ignore # index_filename: __init__.py output (default index.md), use README.md for github indexes ''' if root.endswith('/'): root = root[:-1] if ignore_modules is None: ignore_modules = [] elif isinstance(ignore_modules, str): ignore_modules = ignore_modules.split(',') # Ensure output_dir format (no / at start, / at end) if output_dir.startswith('/'): output_dir = output_dir[1:] if not output_dir.endswith('/'): output_dir = '{}/'.format(output_dir) # Apply root to it, make the directory if not exists output_dir = '{}/{}'.format(root, output_dir) if not path.isdir(output_dir): makedirs(output_dir) if source_module == '.': source_dir = '{}/'.format(root) else: source_dir = '{}/{}/'.format(root, source_module.replace('.', os.sep)) # Cd into the source directory chdir(source_dir) # And build the module list module_list = _build_module_list( source_module, index_filename=index_filename, ignore_modules=ignore_modules ) # Cd back to old directory chdir(root) # And parse all the modules _parse_module_list(module_list) # Cd inot the target directory chdir(output_dir) # Finally, write the module list to our output dir _write_docs(module_list, output_dir) if json_dump: print json.dumps(module_list, indent=4)
python
def build( root, source_module, output_dir, json_dump=False, ignore_modules=None, index_filename='index' ): ''' Build markdown documentation from a directory and/or python module. # ignore_modules: takes a list of module names to ignore # index_filename: __init__.py output (default index.md), use README.md for github indexes ''' if root.endswith('/'): root = root[:-1] if ignore_modules is None: ignore_modules = [] elif isinstance(ignore_modules, str): ignore_modules = ignore_modules.split(',') # Ensure output_dir format (no / at start, / at end) if output_dir.startswith('/'): output_dir = output_dir[1:] if not output_dir.endswith('/'): output_dir = '{}/'.format(output_dir) # Apply root to it, make the directory if not exists output_dir = '{}/{}'.format(root, output_dir) if not path.isdir(output_dir): makedirs(output_dir) if source_module == '.': source_dir = '{}/'.format(root) else: source_dir = '{}/{}/'.format(root, source_module.replace('.', os.sep)) # Cd into the source directory chdir(source_dir) # And build the module list module_list = _build_module_list( source_module, index_filename=index_filename, ignore_modules=ignore_modules ) # Cd back to old directory chdir(root) # And parse all the modules _parse_module_list(module_list) # Cd inot the target directory chdir(output_dir) # Finally, write the module list to our output dir _write_docs(module_list, output_dir) if json_dump: print json.dumps(module_list, indent=4)
[ "def", "build", "(", "root", ",", "source_module", ",", "output_dir", ",", "json_dump", "=", "False", ",", "ignore_modules", "=", "None", ",", "index_filename", "=", "'index'", ")", ":", "if", "root", ".", "endswith", "(", "'/'", ")", ":", "root", "=", ...
Build markdown documentation from a directory and/or python module. # ignore_modules: takes a list of module names to ignore # index_filename: __init__.py output (default index.md), use README.md for github indexes
[ "Build", "markdown", "documentation", "from", "a", "directory", "and", "/", "or", "python", "module", "." ]
train
https://github.com/Fizzadar/pydocs/blob/72713201dc4cf40335f9c3d380c9111b23c2c38b/pydocs/__init__.py#L93-L148
Fizzadar/pydocs
pydocs/__init__.py
main
def main(): '''Main in a function in case you place a build.py for pydocs inside the root directory.''' options = ''' pydocs Usage: pydocs SOURCE OUTPUT_DIR pydocs SOURCE OUTPUT_DIR [--json] [--index NAME] [--ignore FILE,NAMES] pydocs --help Options: SOURCE Source module, or . for current directory. OUTPUT_DIR The location to output the generated markdown. --json Dump meta in JSON format upon completion. --index NAME Name of the index file (default index.md) to generate. --ignore FILE,NAMES Comma separated modules to ignore/skip. -h --help Show this screen. --version Show version. ''' args = docopt(options) build( getcwd(), args['SOURCE'], args['OUTPUT_DIR'], json_dump=args['--json'], ignore_modules=args['--ignore'], index_filename=args['--index'] or 'index' )
python
def main(): '''Main in a function in case you place a build.py for pydocs inside the root directory.''' options = ''' pydocs Usage: pydocs SOURCE OUTPUT_DIR pydocs SOURCE OUTPUT_DIR [--json] [--index NAME] [--ignore FILE,NAMES] pydocs --help Options: SOURCE Source module, or . for current directory. OUTPUT_DIR The location to output the generated markdown. --json Dump meta in JSON format upon completion. --index NAME Name of the index file (default index.md) to generate. --ignore FILE,NAMES Comma separated modules to ignore/skip. -h --help Show this screen. --version Show version. ''' args = docopt(options) build( getcwd(), args['SOURCE'], args['OUTPUT_DIR'], json_dump=args['--json'], ignore_modules=args['--ignore'], index_filename=args['--index'] or 'index' )
[ "def", "main", "(", ")", ":", "options", "=", "'''\n pydocs\n\n Usage:\n pydocs SOURCE OUTPUT_DIR\n pydocs SOURCE OUTPUT_DIR [--json] [--index NAME] [--ignore FILE,NAMES]\n pydocs --help\n\n Options:\n SOURCE Source modu...
Main in a function in case you place a build.py for pydocs inside the root directory.
[ "Main", "in", "a", "function", "in", "case", "you", "place", "a", "build", ".", "py", "for", "pydocs", "inside", "the", "root", "directory", "." ]
train
https://github.com/Fizzadar/pydocs/blob/72713201dc4cf40335f9c3d380c9111b23c2c38b/pydocs/__init__.py#L151-L175
shmir/PyIxNetwork
ixnetwork/ixn_statistics_view.py
IxnStatisticsView.read_stats
def read_stats(self): """ Reads the statistics view from IXN and saves it in statistics dictionary. """ captions, rows = self._get_pages() name_caption_index = captions.index(self.name_caption) captions.pop(name_caption_index) self.captions = captions self.statistics = OrderedDict() for row in rows: name = row.pop(name_caption_index) self.statistics[name] = row
python
def read_stats(self): """ Reads the statistics view from IXN and saves it in statistics dictionary. """ captions, rows = self._get_pages() name_caption_index = captions.index(self.name_caption) captions.pop(name_caption_index) self.captions = captions self.statistics = OrderedDict() for row in rows: name = row.pop(name_caption_index) self.statistics[name] = row
[ "def", "read_stats", "(", "self", ")", ":", "captions", ",", "rows", "=", "self", ".", "_get_pages", "(", ")", "name_caption_index", "=", "captions", ".", "index", "(", "self", ".", "name_caption", ")", "captions", ".", "pop", "(", "name_caption_index", ")...
Reads the statistics view from IXN and saves it in statistics dictionary.
[ "Reads", "the", "statistics", "view", "from", "IXN", "and", "saves", "it", "in", "statistics", "dictionary", "." ]
train
https://github.com/shmir/PyIxNetwork/blob/e7d7a89c08a5d3a1382b4dcfd915bbfc7eedd33f/ixnetwork/ixn_statistics_view.py#L31-L41
shmir/PyIxNetwork
ixnetwork/ixn_statistics_view.py
IxnStatisticsView.get_all_stats
def get_all_stats(self): """ :returns: all statistics values for all objects. """ all_stats = OrderedDict() for obj_name in self.statistics: all_stats[obj_name] = dict(zip(self.captions, self.statistics[obj_name])) return all_stats
python
def get_all_stats(self): """ :returns: all statistics values for all objects. """ all_stats = OrderedDict() for obj_name in self.statistics: all_stats[obj_name] = dict(zip(self.captions, self.statistics[obj_name])) return all_stats
[ "def", "get_all_stats", "(", "self", ")", ":", "all_stats", "=", "OrderedDict", "(", ")", "for", "obj_name", "in", "self", ".", "statistics", ":", "all_stats", "[", "obj_name", "]", "=", "dict", "(", "zip", "(", "self", ".", "captions", ",", "self", "....
:returns: all statistics values for all objects.
[ ":", "returns", ":", "all", "statistics", "values", "for", "all", "objects", "." ]
train
https://github.com/shmir/PyIxNetwork/blob/e7d7a89c08a5d3a1382b4dcfd915bbfc7eedd33f/ixnetwork/ixn_statistics_view.py#L43-L51
shmir/PyIxNetwork
ixnetwork/ixn_statistics_view.py
IxnStatisticsView.get_object_stats
def get_object_stats(self, obj_name): """ :param obj_name: requested object name :returns: all statistics values for the requested object. """ return dict(zip(self.captions, self.statistics[obj_name]))
python
def get_object_stats(self, obj_name): """ :param obj_name: requested object name :returns: all statistics values for the requested object. """ return dict(zip(self.captions, self.statistics[obj_name]))
[ "def", "get_object_stats", "(", "self", ",", "obj_name", ")", ":", "return", "dict", "(", "zip", "(", "self", ".", "captions", ",", "self", ".", "statistics", "[", "obj_name", "]", ")", ")" ]
:param obj_name: requested object name :returns: all statistics values for the requested object.
[ ":", "param", "obj_name", ":", "requested", "object", "name", ":", "returns", ":", "all", "statistics", "values", "for", "the", "requested", "object", "." ]
train
https://github.com/shmir/PyIxNetwork/blob/e7d7a89c08a5d3a1382b4dcfd915bbfc7eedd33f/ixnetwork/ixn_statistics_view.py#L53-L59
shmir/PyIxNetwork
ixnetwork/ixn_statistics_view.py
IxnStatisticsView.get_stats
def get_stats(self, stat_name): """ :param stat_name: requested statistics name. :returns: all values of the requested statistic for all objects. """ return [self.get_stat(r, stat_name) for r in self.statistics.keys()]
python
def get_stats(self, stat_name): """ :param stat_name: requested statistics name. :returns: all values of the requested statistic for all objects. """ return [self.get_stat(r, stat_name) for r in self.statistics.keys()]
[ "def", "get_stats", "(", "self", ",", "stat_name", ")", ":", "return", "[", "self", ".", "get_stat", "(", "r", ",", "stat_name", ")", "for", "r", "in", "self", ".", "statistics", ".", "keys", "(", ")", "]" ]
:param stat_name: requested statistics name. :returns: all values of the requested statistic for all objects.
[ ":", "param", "stat_name", ":", "requested", "statistics", "name", ".", ":", "returns", ":", "all", "values", "of", "the", "requested", "statistic", "for", "all", "objects", "." ]
train
https://github.com/shmir/PyIxNetwork/blob/e7d7a89c08a5d3a1382b4dcfd915bbfc7eedd33f/ixnetwork/ixn_statistics_view.py#L61-L67
shmir/PyIxNetwork
ixnetwork/ixn_statistics_view.py
IxnStatisticsView.get_stat
def get_stat(self, obj_name, stat_name): """ :param obj_name: requested object name. :param stat_name: requested statistics name. :return: str, the value of the requested statics for the requested object. """ return self.statistics[obj_name][self.captions.index(stat_name)]
python
def get_stat(self, obj_name, stat_name): """ :param obj_name: requested object name. :param stat_name: requested statistics name. :return: str, the value of the requested statics for the requested object. """ return self.statistics[obj_name][self.captions.index(stat_name)]
[ "def", "get_stat", "(", "self", ",", "obj_name", ",", "stat_name", ")", ":", "return", "self", ".", "statistics", "[", "obj_name", "]", "[", "self", ".", "captions", ".", "index", "(", "stat_name", ")", "]" ]
:param obj_name: requested object name. :param stat_name: requested statistics name. :return: str, the value of the requested statics for the requested object.
[ ":", "param", "obj_name", ":", "requested", "object", "name", ".", ":", "param", "stat_name", ":", "requested", "statistics", "name", ".", ":", "return", ":", "str", "the", "value", "of", "the", "requested", "statics", "for", "the", "requested", "object", ...
train
https://github.com/shmir/PyIxNetwork/blob/e7d7a89c08a5d3a1382b4dcfd915bbfc7eedd33f/ixnetwork/ixn_statistics_view.py#L69-L76
bast/flanders
cmake/autocmake/configure.py
check_cmake_exists
def check_cmake_exists(cmake_command): """ Check whether CMake is installed. If not, print informative error message and quits. """ from subprocess import Popen, PIPE p = Popen( '{0} --version'.format(cmake_command), shell=True, stdin=PIPE, stdout=PIPE) if not ('cmake version' in p.communicate()[0].decode('UTF-8')): sys.stderr.write(' This code is built using CMake\n\n') sys.stderr.write(' CMake is not found\n') sys.stderr.write(' get CMake at http://www.cmake.org/\n') sys.stderr.write(' on many clusters CMake is installed\n') sys.stderr.write(' but you have to load it first:\n') sys.stderr.write(' $ module load cmake\n') sys.exit(1)
python
def check_cmake_exists(cmake_command): """ Check whether CMake is installed. If not, print informative error message and quits. """ from subprocess import Popen, PIPE p = Popen( '{0} --version'.format(cmake_command), shell=True, stdin=PIPE, stdout=PIPE) if not ('cmake version' in p.communicate()[0].decode('UTF-8')): sys.stderr.write(' This code is built using CMake\n\n') sys.stderr.write(' CMake is not found\n') sys.stderr.write(' get CMake at http://www.cmake.org/\n') sys.stderr.write(' on many clusters CMake is installed\n') sys.stderr.write(' but you have to load it first:\n') sys.stderr.write(' $ module load cmake\n') sys.exit(1)
[ "def", "check_cmake_exists", "(", "cmake_command", ")", ":", "from", "subprocess", "import", "Popen", ",", "PIPE", "p", "=", "Popen", "(", "'{0} --version'", ".", "format", "(", "cmake_command", ")", ",", "shell", "=", "True", ",", "stdin", "=", "PIPE", ",...
Check whether CMake is installed. If not, print informative error message and quits.
[ "Check", "whether", "CMake", "is", "installed", ".", "If", "not", "print", "informative", "error", "message", "and", "quits", "." ]
train
https://github.com/bast/flanders/blob/792f9eed8511cb553e67a25b6c5ce60fd6ae97bc/cmake/autocmake/configure.py#L14-L33
bast/flanders
cmake/autocmake/configure.py
setup_build_path
def setup_build_path(build_path): """ Create build directory. If this already exists, print informative error message and quit. """ if os.path.isdir(build_path): fname = os.path.join(build_path, 'CMakeCache.txt') if os.path.exists(fname): sys.stderr.write('aborting setup\n') sys.stderr.write( 'build directory {0} which contains CMakeCache.txt already exists\n'. format(build_path)) sys.stderr.write( 'remove the build directory and then rerun setup\n') sys.exit(1) else: os.makedirs(build_path, 0o755)
python
def setup_build_path(build_path): """ Create build directory. If this already exists, print informative error message and quit. """ if os.path.isdir(build_path): fname = os.path.join(build_path, 'CMakeCache.txt') if os.path.exists(fname): sys.stderr.write('aborting setup\n') sys.stderr.write( 'build directory {0} which contains CMakeCache.txt already exists\n'. format(build_path)) sys.stderr.write( 'remove the build directory and then rerun setup\n') sys.exit(1) else: os.makedirs(build_path, 0o755)
[ "def", "setup_build_path", "(", "build_path", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "build_path", ")", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "build_path", ",", "'CMakeCache.txt'", ")", "if", "os", ".", "path", ".", "...
Create build directory. If this already exists, print informative error message and quit.
[ "Create", "build", "directory", ".", "If", "this", "already", "exists", "print", "informative", "error", "message", "and", "quit", "." ]
train
https://github.com/bast/flanders/blob/792f9eed8511cb553e67a25b6c5ce60fd6ae97bc/cmake/autocmake/configure.py#L36-L52
bast/flanders
cmake/autocmake/configure.py
run_cmake
def run_cmake(command, build_path, default_build_path): """ Execute CMake command. """ from subprocess import Popen, PIPE from shutil import rmtree topdir = os.getcwd() p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout_coded, stderr_coded = p.communicate() stdout = stdout_coded.decode('UTF-8') stderr = stderr_coded.decode('UTF-8') # print cmake output to screen print(stdout) if stderr: # we write out stderr but we do not stop yet # this is because CMake warnings are sent to stderr # and they might be benign sys.stderr.write(stderr) # write cmake output to file with open(os.path.join(build_path, 'cmake_output'), 'w') as f: f.write(stdout) # change directory and return os.chdir(topdir) # to figure out whether configuration was a success # we check for 3 sentences that should be part of stdout configuring_done = '-- Configuring done' in stdout generating_done = '-- Generating done' in stdout build_files_written = '-- Build files have been written to' in stdout configuration_successful = configuring_done and generating_done and build_files_written if configuration_successful: save_setup_command(sys.argv, build_path) print_build_help(build_path, default_build_path) else: if (build_path == default_build_path): # remove build_path iff not set by the user # otherwise removal can be dangerous rmtree(default_build_path)
python
def run_cmake(command, build_path, default_build_path): """ Execute CMake command. """ from subprocess import Popen, PIPE from shutil import rmtree topdir = os.getcwd() p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout_coded, stderr_coded = p.communicate() stdout = stdout_coded.decode('UTF-8') stderr = stderr_coded.decode('UTF-8') # print cmake output to screen print(stdout) if stderr: # we write out stderr but we do not stop yet # this is because CMake warnings are sent to stderr # and they might be benign sys.stderr.write(stderr) # write cmake output to file with open(os.path.join(build_path, 'cmake_output'), 'w') as f: f.write(stdout) # change directory and return os.chdir(topdir) # to figure out whether configuration was a success # we check for 3 sentences that should be part of stdout configuring_done = '-- Configuring done' in stdout generating_done = '-- Generating done' in stdout build_files_written = '-- Build files have been written to' in stdout configuration_successful = configuring_done and generating_done and build_files_written if configuration_successful: save_setup_command(sys.argv, build_path) print_build_help(build_path, default_build_path) else: if (build_path == default_build_path): # remove build_path iff not set by the user # otherwise removal can be dangerous rmtree(default_build_path)
[ "def", "run_cmake", "(", "command", ",", "build_path", ",", "default_build_path", ")", ":", "from", "subprocess", "import", "Popen", ",", "PIPE", "from", "shutil", "import", "rmtree", "topdir", "=", "os", ".", "getcwd", "(", ")", "p", "=", "Popen", "(", ...
Execute CMake command.
[ "Execute", "CMake", "command", "." ]
train
https://github.com/bast/flanders/blob/792f9eed8511cb553e67a25b6c5ce60fd6ae97bc/cmake/autocmake/configure.py#L55-L98
bast/flanders
cmake/autocmake/configure.py
print_build_help
def print_build_help(build_path, default_build_path): """ Print help text after configuration step is done. """ print(' configure step is done') print(' now you need to compile the sources:') if (build_path == default_build_path): print(' $ cd build') else: print(' $ cd ' + build_path) print(' $ make')
python
def print_build_help(build_path, default_build_path): """ Print help text after configuration step is done. """ print(' configure step is done') print(' now you need to compile the sources:') if (build_path == default_build_path): print(' $ cd build') else: print(' $ cd ' + build_path) print(' $ make')
[ "def", "print_build_help", "(", "build_path", ",", "default_build_path", ")", ":", "print", "(", "' configure step is done'", ")", "print", "(", "' now you need to compile the sources:'", ")", "if", "(", "build_path", "==", "default_build_path", ")", ":", "print", ...
Print help text after configuration step is done.
[ "Print", "help", "text", "after", "configuration", "step", "is", "done", "." ]
train
https://github.com/bast/flanders/blob/792f9eed8511cb553e67a25b6c5ce60fd6ae97bc/cmake/autocmake/configure.py#L101-L111
bast/flanders
cmake/autocmake/configure.py
save_setup_command
def save_setup_command(argv, build_path): """ Save setup command to a file. """ file_name = os.path.join(build_path, 'setup_command') with open(file_name, 'w') as f: f.write(' '.join(argv[:]) + '\n')
python
def save_setup_command(argv, build_path): """ Save setup command to a file. """ file_name = os.path.join(build_path, 'setup_command') with open(file_name, 'w') as f: f.write(' '.join(argv[:]) + '\n')
[ "def", "save_setup_command", "(", "argv", ",", "build_path", ")", ":", "file_name", "=", "os", ".", "path", ".", "join", "(", "build_path", ",", "'setup_command'", ")", "with", "open", "(", "file_name", ",", "'w'", ")", "as", "f", ":", "f", ".", "write...
Save setup command to a file.
[ "Save", "setup", "command", "to", "a", "file", "." ]
train
https://github.com/bast/flanders/blob/792f9eed8511cb553e67a25b6c5ce60fd6ae97bc/cmake/autocmake/configure.py#L114-L120
bast/flanders
cmake/autocmake/configure.py
configure
def configure(root_directory, build_path, cmake_command, only_show): """ Main configure function. """ default_build_path = os.path.join(root_directory, 'build') # check that CMake is available, if not stop check_cmake_exists('cmake') # deal with build path if build_path is None: build_path = default_build_path if not only_show: setup_build_path(build_path) cmake_command += ' -B' + build_path print('{0}\n'.format(cmake_command)) if only_show: sys.exit(0) run_cmake(cmake_command, build_path, default_build_path)
python
def configure(root_directory, build_path, cmake_command, only_show): """ Main configure function. """ default_build_path = os.path.join(root_directory, 'build') # check that CMake is available, if not stop check_cmake_exists('cmake') # deal with build path if build_path is None: build_path = default_build_path if not only_show: setup_build_path(build_path) cmake_command += ' -B' + build_path print('{0}\n'.format(cmake_command)) if only_show: sys.exit(0) run_cmake(cmake_command, build_path, default_build_path)
[ "def", "configure", "(", "root_directory", ",", "build_path", ",", "cmake_command", ",", "only_show", ")", ":", "default_build_path", "=", "os", ".", "path", ".", "join", "(", "root_directory", ",", "'build'", ")", "# check that CMake is available, if not stop", "ch...
Main configure function.
[ "Main", "configure", "function", "." ]
train
https://github.com/bast/flanders/blob/792f9eed8511cb553e67a25b6c5ce60fd6ae97bc/cmake/autocmake/configure.py#L123-L143
robmoggach/django-token-auth
src/token_auth/models.py
Token.generate_hash
def generate_hash(self): """ Create a unique SHA token/hash using the project SECRET_KEY, URL, email address and current datetime. """ from django.utils.hashcompat import sha_constructor hash = sha_constructor(settings.SECRET_KEY + self.url + self.email + unicode(datetime.now()) ).hexdigest() return hash[::2]
python
def generate_hash(self): """ Create a unique SHA token/hash using the project SECRET_KEY, URL, email address and current datetime. """ from django.utils.hashcompat import sha_constructor hash = sha_constructor(settings.SECRET_KEY + self.url + self.email + unicode(datetime.now()) ).hexdigest() return hash[::2]
[ "def", "generate_hash", "(", "self", ")", ":", "from", "django", ".", "utils", ".", "hashcompat", "import", "sha_constructor", "hash", "=", "sha_constructor", "(", "settings", ".", "SECRET_KEY", "+", "self", ".", "url", "+", "self", ".", "email", "+", "uni...
Create a unique SHA token/hash using the project SECRET_KEY, URL, email address and current datetime.
[ "Create", "a", "unique", "SHA", "token", "/", "hash", "using", "the", "project", "SECRET_KEY", "URL", "email", "address", "and", "current", "datetime", "." ]
train
https://github.com/robmoggach/django-token-auth/blob/7ef6e10c27b0168c1272347d0169cdbd96232ed3/src/token_auth/models.py#L60-L67
PGower/PyCanvas
pycanvas/apis/poll_sessions.py
PollSessionsAPI.create_single_poll_session
def create_single_poll_session(self, poll_id, poll_sessions_course_id, poll_sessions_course_section_id=None, poll_sessions_has_public_results=None): """ Create a single poll session. Create a new poll session for this poll """ path = {} data = {} params = {} # REQUIRED - PATH - poll_id """ID""" path["poll_id"] = poll_id # REQUIRED - poll_sessions[course_id] """The id of the course this session is associated with.""" data["poll_sessions[course_id]"] = poll_sessions_course_id # OPTIONAL - poll_sessions[course_section_id] """The id of the course section this session is associated with.""" if poll_sessions_course_section_id is not None: data["poll_sessions[course_section_id]"] = poll_sessions_course_section_id # OPTIONAL - poll_sessions[has_public_results] """Whether or not results are viewable by students.""" if poll_sessions_has_public_results is not None: data["poll_sessions[has_public_results]"] = poll_sessions_has_public_results self.logger.debug("POST /api/v1/polls/{poll_id}/poll_sessions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/polls/{poll_id}/poll_sessions".format(**path), data=data, params=params, no_data=True)
python
def create_single_poll_session(self, poll_id, poll_sessions_course_id, poll_sessions_course_section_id=None, poll_sessions_has_public_results=None): """ Create a single poll session. Create a new poll session for this poll """ path = {} data = {} params = {} # REQUIRED - PATH - poll_id """ID""" path["poll_id"] = poll_id # REQUIRED - poll_sessions[course_id] """The id of the course this session is associated with.""" data["poll_sessions[course_id]"] = poll_sessions_course_id # OPTIONAL - poll_sessions[course_section_id] """The id of the course section this session is associated with.""" if poll_sessions_course_section_id is not None: data["poll_sessions[course_section_id]"] = poll_sessions_course_section_id # OPTIONAL - poll_sessions[has_public_results] """Whether or not results are viewable by students.""" if poll_sessions_has_public_results is not None: data["poll_sessions[has_public_results]"] = poll_sessions_has_public_results self.logger.debug("POST /api/v1/polls/{poll_id}/poll_sessions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/polls/{poll_id}/poll_sessions".format(**path), data=data, params=params, no_data=True)
[ "def", "create_single_poll_session", "(", "self", ",", "poll_id", ",", "poll_sessions_course_id", ",", "poll_sessions_course_section_id", "=", "None", ",", "poll_sessions_has_public_results", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "pa...
Create a single poll session. Create a new poll session for this poll
[ "Create", "a", "single", "poll", "session", ".", "Create", "a", "new", "poll", "session", "for", "this", "poll" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/poll_sessions.py#L57-L86
jaraco/hgtools
hgtools/managers/library.py
MercurialInProcManager._invoke
def _invoke(self, *params): """ Run the self.exe command in-process with the supplied params. """ cmd = [self.exe, '-R', self.location] + list(params) with reentry.in_process_context(cmd) as result: sys.modules['mercurial.dispatch'].run() stdout = result.stdio.stdout.getvalue() stderr = result.stdio.stderr.getvalue() if not result.returncode == 0: raise RuntimeError(stderr.strip() or stdout.strip()) return stdout.decode('utf-8')
python
def _invoke(self, *params): """ Run the self.exe command in-process with the supplied params. """ cmd = [self.exe, '-R', self.location] + list(params) with reentry.in_process_context(cmd) as result: sys.modules['mercurial.dispatch'].run() stdout = result.stdio.stdout.getvalue() stderr = result.stdio.stderr.getvalue() if not result.returncode == 0: raise RuntimeError(stderr.strip() or stdout.strip()) return stdout.decode('utf-8')
[ "def", "_invoke", "(", "self", ",", "*", "params", ")", ":", "cmd", "=", "[", "self", ".", "exe", ",", "'-R'", ",", "self", ".", "location", "]", "+", "list", "(", "params", ")", "with", "reentry", ".", "in_process_context", "(", "cmd", ")", "as", ...
Run the self.exe command in-process with the supplied params.
[ "Run", "the", "self", ".", "exe", "command", "in", "-", "process", "with", "the", "supplied", "params", "." ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/managers/library.py#L15-L26
tariqdaouda/rabaDB
rabaDB/Raba.py
removeFromRegistery
def removeFromRegistery(obj) : """Removes an object/rabalist from registery. This is useful if you want to allow the garbage collector to free the memory taken by the objects you've already loaded. Be careful might cause some discrepenties in your scripts. For objects, cascades to free the registeries of related rabalists also""" if isRabaObject(obj) : _unregisterRabaObjectInstance(obj) elif isRabaList(obj) : _unregisterRabaListInstance(obj)
python
def removeFromRegistery(obj) : """Removes an object/rabalist from registery. This is useful if you want to allow the garbage collector to free the memory taken by the objects you've already loaded. Be careful might cause some discrepenties in your scripts. For objects, cascades to free the registeries of related rabalists also""" if isRabaObject(obj) : _unregisterRabaObjectInstance(obj) elif isRabaList(obj) : _unregisterRabaListInstance(obj)
[ "def", "removeFromRegistery", "(", "obj", ")", ":", "if", "isRabaObject", "(", "obj", ")", ":", "_unregisterRabaObjectInstance", "(", "obj", ")", "elif", "isRabaList", "(", "obj", ")", ":", "_unregisterRabaListInstance", "(", "obj", ")" ]
Removes an object/rabalist from registery. This is useful if you want to allow the garbage collector to free the memory taken by the objects you've already loaded. Be careful might cause some discrepenties in your scripts. For objects, cascades to free the registeries of related rabalists also
[ "Removes", "an", "object", "/", "rabalist", "from", "registery", ".", "This", "is", "useful", "if", "you", "want", "to", "allow", "the", "garbage", "collector", "to", "free", "the", "memory", "taken", "by", "the", "objects", "you", "ve", "already", "loaded...
train
https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/Raba.py#L326-L334
tariqdaouda/rabaDB
rabaDB/Raba.py
RabaPupa.getDctDescription
def getDctDescription(self) : "returns a dict describing the object" return {'type' : RabaFields.RABA_FIELD_TYPE_IS_RABA_OBJECT, 'className' : self._rabaClass.__name__, 'raba_id' : self.raba_id, 'raba_namespace' : self._raba_namespace}
python
def getDctDescription(self) : "returns a dict describing the object" return {'type' : RabaFields.RABA_FIELD_TYPE_IS_RABA_OBJECT, 'className' : self._rabaClass.__name__, 'raba_id' : self.raba_id, 'raba_namespace' : self._raba_namespace}
[ "def", "getDctDescription", "(", "self", ")", ":", "return", "{", "'type'", ":", "RabaFields", ".", "RABA_FIELD_TYPE_IS_RABA_OBJECT", ",", "'className'", ":", "self", ".", "_rabaClass", ".", "__name__", ",", "'raba_id'", ":", "self", ".", "raba_id", ",", "'rab...
returns a dict describing the object
[ "returns", "a", "dict", "describing", "the", "object" ]
train
https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/Raba.py#L373-L375
tariqdaouda/rabaDB
rabaDB/Raba.py
Raba.unreference
def unreference(self) : "explicit deletes the object from the singleton reference dictionary. This is mandatory to be able to delete the object using del(). Also, any attempt to reload an object with the same parameters will result un a new instance being created" try : del(self.__class__._instances[makeRabaObjectSingletonKey(self.__class__.__name__, self._raba_namespace, self.raba_id)]) except KeyError : pass
python
def unreference(self) : "explicit deletes the object from the singleton reference dictionary. This is mandatory to be able to delete the object using del(). Also, any attempt to reload an object with the same parameters will result un a new instance being created" try : del(self.__class__._instances[makeRabaObjectSingletonKey(self.__class__.__name__, self._raba_namespace, self.raba_id)]) except KeyError : pass
[ "def", "unreference", "(", "self", ")", ":", "try", ":", "del", "(", "self", ".", "__class__", ".", "_instances", "[", "makeRabaObjectSingletonKey", "(", "self", ".", "__class__", ".", "__name__", ",", "self", ".", "_raba_namespace", ",", "self", ".", "rab...
explicit deletes the object from the singleton reference dictionary. This is mandatory to be able to delete the object using del(). Also, any attempt to reload an object with the same parameters will result un a new instance being created
[ "explicit", "deletes", "the", "object", "from", "the", "singleton", "reference", "dictionary", ".", "This", "is", "mandatory", "to", "be", "able", "to", "delete", "the", "object", "using", "del", "()", ".", "Also", "any", "attempt", "to", "reload", "an", "...
train
https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/Raba.py#L404-L409
tariqdaouda/rabaDB
rabaDB/Raba.py
Raba.ensureIndex
def ensureIndex(cls, fields, where = '', whereValues = []) : """Add an index for field, indexes take place and slow down saves and deletes but they speed up a lot everything else. If you are going to do a lot of saves/deletes drop the indexes first re-add them afterwards Fields can be a list of fields for Multi-Column Indices or simply the name of a single field. But as RabaList are basicaly in separate tables you cannot create a multicolumn indice on them. A single index will be create for the RabaList alone""" con = RabaConnection(cls._raba_namespace) rlf, ff = cls._parseIndex(fields) ww = [] for i in range(len(whereValues)) : if isRabaObject(whereValues[i]) : ww.append(whereValues[i].getJsonEncoding()) for name in rlf : con.createIndex(name, 'anchor_raba_id') if len(ff) > 0 : con.createIndex(cls.__name__, ff, where = where, whereValues = ww) con.commit()
python
def ensureIndex(cls, fields, where = '', whereValues = []) : """Add an index for field, indexes take place and slow down saves and deletes but they speed up a lot everything else. If you are going to do a lot of saves/deletes drop the indexes first re-add them afterwards Fields can be a list of fields for Multi-Column Indices or simply the name of a single field. But as RabaList are basicaly in separate tables you cannot create a multicolumn indice on them. A single index will be create for the RabaList alone""" con = RabaConnection(cls._raba_namespace) rlf, ff = cls._parseIndex(fields) ww = [] for i in range(len(whereValues)) : if isRabaObject(whereValues[i]) : ww.append(whereValues[i].getJsonEncoding()) for name in rlf : con.createIndex(name, 'anchor_raba_id') if len(ff) > 0 : con.createIndex(cls.__name__, ff, where = where, whereValues = ww) con.commit()
[ "def", "ensureIndex", "(", "cls", ",", "fields", ",", "where", "=", "''", ",", "whereValues", "=", "[", "]", ")", ":", "con", "=", "RabaConnection", "(", "cls", ".", "_raba_namespace", ")", "rlf", ",", "ff", "=", "cls", ".", "_parseIndex", "(", "fiel...
Add an index for field, indexes take place and slow down saves and deletes but they speed up a lot everything else. If you are going to do a lot of saves/deletes drop the indexes first re-add them afterwards Fields can be a list of fields for Multi-Column Indices or simply the name of a single field. But as RabaList are basicaly in separate tables you cannot create a multicolumn indice on them. A single index will be create for the RabaList alone
[ "Add", "an", "index", "for", "field", "indexes", "take", "place", "and", "slow", "down", "saves", "and", "deletes", "but", "they", "speed", "up", "a", "lot", "everything", "else", ".", "If", "you", "are", "going", "to", "do", "a", "lot", "of", "saves",...
train
https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/Raba.py#L498-L514
tariqdaouda/rabaDB
rabaDB/Raba.py
Raba.dropIndex
def dropIndex(cls, fields) : "removes an index created with ensureIndex " con = RabaConnection(cls._raba_namespace) rlf, ff = cls._parseIndex(fields) for name in rlf : con.dropIndex(name, 'anchor_raba_id') con.dropIndex(cls.__name__, ff) con.commit()
python
def dropIndex(cls, fields) : "removes an index created with ensureIndex " con = RabaConnection(cls._raba_namespace) rlf, ff = cls._parseIndex(fields) for name in rlf : con.dropIndex(name, 'anchor_raba_id') con.dropIndex(cls.__name__, ff) con.commit()
[ "def", "dropIndex", "(", "cls", ",", "fields", ")", ":", "con", "=", "RabaConnection", "(", "cls", ".", "_raba_namespace", ")", "rlf", ",", "ff", "=", "cls", ".", "_parseIndex", "(", "fields", ")", "for", "name", "in", "rlf", ":", "con", ".", "dropIn...
removes an index created with ensureIndex
[ "removes", "an", "index", "created", "with", "ensureIndex" ]
train
https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/Raba.py#L517-L526
tariqdaouda/rabaDB
rabaDB/Raba.py
Raba.getIndexes
def getIndexes(cls) : "returns a list of the indexes of a class" con = RabaConnection(cls._raba_namespace) idxs = [] for idx in con.getIndexes(rabaOnly = True) : if idx[2] == cls.__name__ : idxs.append(idx) else : for k in cls.columns : if RabaFields.isRabaListField(getattr(cls, k)) and idx[2] == con.makeRabaListTableName(cls.__name__, k) : idxs.append(idx) return idxs
python
def getIndexes(cls) : "returns a list of the indexes of a class" con = RabaConnection(cls._raba_namespace) idxs = [] for idx in con.getIndexes(rabaOnly = True) : if idx[2] == cls.__name__ : idxs.append(idx) else : for k in cls.columns : if RabaFields.isRabaListField(getattr(cls, k)) and idx[2] == con.makeRabaListTableName(cls.__name__, k) : idxs.append(idx) return idxs
[ "def", "getIndexes", "(", "cls", ")", ":", "con", "=", "RabaConnection", "(", "cls", ".", "_raba_namespace", ")", "idxs", "=", "[", "]", "for", "idx", "in", "con", ".", "getIndexes", "(", "rabaOnly", "=", "True", ")", ":", "if", "idx", "[", "2", "]...
returns a list of the indexes of a class
[ "returns", "a", "list", "of", "the", "indexes", "of", "a", "class" ]
train
https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/Raba.py#L529-L540
tariqdaouda/rabaDB
rabaDB/Raba.py
Raba.flushIndexes
def flushIndexes(cls) : "drops all indexes for a class" con = RabaConnection(cls._raba_namespace) for idx in cls.getIndexes() : con.dropIndexByName(idx[1])
python
def flushIndexes(cls) : "drops all indexes for a class" con = RabaConnection(cls._raba_namespace) for idx in cls.getIndexes() : con.dropIndexByName(idx[1])
[ "def", "flushIndexes", "(", "cls", ")", ":", "con", "=", "RabaConnection", "(", "cls", ".", "_raba_namespace", ")", "for", "idx", "in", "cls", ".", "getIndexes", "(", ")", ":", "con", ".", "dropIndexByName", "(", "idx", "[", "1", "]", ")" ]
drops all indexes for a class
[ "drops", "all", "indexes", "for", "a", "class" ]
train
https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/Raba.py#L543-L547
tariqdaouda/rabaDB
rabaDB/Raba.py
Raba.set
def set(self, **args) : "set multiple values quickly, ex : name = woopy" for k, v in args.items() : setattr(self, k, v)
python
def set(self, **args) : "set multiple values quickly, ex : name = woopy" for k, v in args.items() : setattr(self, k, v)
[ "def", "set", "(", "self", ",", "*", "*", "args", ")", ":", "for", "k", ",", "v", "in", "args", ".", "items", "(", ")", ":", "setattr", "(", "self", ",", "k", ",", "v", ")" ]
set multiple values quickly, ex : name = woopy
[ "set", "multiple", "values", "quickly", "ex", ":", "name", "=", "woopy" ]
train
https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/Raba.py#L604-L607
tariqdaouda/rabaDB
rabaDB/Raba.py
Raba.getFields
def getFields(cls) : """returns a set of the available fields. In order to be able ti securely loop of the fields, "raba_id" and "json" are not included in the set""" s = set(cls.columns.keys()) s.remove('json') s.remove('raba_id') return s
python
def getFields(cls) : """returns a set of the available fields. In order to be able ti securely loop of the fields, "raba_id" and "json" are not included in the set""" s = set(cls.columns.keys()) s.remove('json') s.remove('raba_id') return s
[ "def", "getFields", "(", "cls", ")", ":", "s", "=", "set", "(", "cls", ".", "columns", ".", "keys", "(", ")", ")", "s", ".", "remove", "(", "'json'", ")", "s", ".", "remove", "(", "'raba_id'", ")", "return", "s" ]
returns a set of the available fields. In order to be able ti securely loop of the fields, "raba_id" and "json" are not included in the set
[ "returns", "a", "set", "of", "the", "available", "fields", ".", "In", "order", "to", "be", "able", "ti", "securely", "loop", "of", "the", "fields", "raba_id", "and", "json", "are", "not", "included", "in", "the", "set" ]
train
https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/Raba.py#L672-L677
tariqdaouda/rabaDB
rabaDB/Raba.py
RabaListPupa._attachToObject
def _attachToObject(self, anchorObj, relationName) : "dummy fct for compatibility reasons, a RabaListPupa is attached by default" #MutableSequence.__getattribute__(self, "develop")() self.develop() self._attachToObject(anchorObj, relationName)
python
def _attachToObject(self, anchorObj, relationName) : "dummy fct for compatibility reasons, a RabaListPupa is attached by default" #MutableSequence.__getattribute__(self, "develop")() self.develop() self._attachToObject(anchorObj, relationName)
[ "def", "_attachToObject", "(", "self", ",", "anchorObj", ",", "relationName", ")", ":", "#MutableSequence.__getattribute__(self, \"develop\")()", "self", ".", "develop", "(", ")", "self", ".", "_attachToObject", "(", "anchorObj", ",", "relationName", ")" ]
dummy fct for compatibility reasons, a RabaListPupa is attached by default
[ "dummy", "fct", "for", "compatibility", "reasons", "a", "RabaListPupa", "is", "attached", "by", "default" ]
train
https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/Raba.py#L739-L743
tariqdaouda/rabaDB
rabaDB/Raba.py
RabaList.pupatizeElements
def pupatizeElements(self) : """Transform all raba object into pupas""" for i in range(len(self)) : self[i] = self[i].pupa()
python
def pupatizeElements(self) : """Transform all raba object into pupas""" for i in range(len(self)) : self[i] = self[i].pupa()
[ "def", "pupatizeElements", "(", "self", ")", ":", "for", "i", "in", "range", "(", "len", "(", "self", ")", ")", ":", "self", "[", "i", "]", "=", "self", "[", "i", "]", ".", "pupa", "(", ")" ]
Transform all raba object into pupas
[ "Transform", "all", "raba", "object", "into", "pupas" ]
train
https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/Raba.py#L849-L852
tariqdaouda/rabaDB
rabaDB/Raba.py
RabaList._save
def _save(self) : """saves the RabaList into it's own table. This a private function that should be called directly Before saving the entire list corresponding to the anchorObj is wiped out before being rewritten. The alternative would be to keep the sync between the list and the table in real time (remove in both). If the current solution proves to be to slow, i'll consider the alternative""" if self.connection.registerSave(self) : if len(self) == 0 : self.connection.updateRabaListLength(self.raba_id, len(self)) return True else : if self.relationName == None or self.anchorObj == None : raise ValueError('%s has not been attached to any object, impossible to save it' % s) #if self.raba_id == None : # self.raba_id, self.tableName = self.connection.registerRabalist(self.anchorObj._rabaClass.__name__, self.anchorObj.raba_id, self.relationName) if self._saved : self.empty() values = [] for e in self.data : if isRabaObject(e) : e.save() objDct = e.getDctDescription() values.append((self.anchorObj.raba_id, None, RabaFields.RABA_FIELD_TYPE_IS_RABA_OBJECT, e._rabaClass.__name__, e.raba_id, e._raba_namespace)) elif isPythonPrimitive(e) : values.append((self.anchorObj.raba_id, e, RabaFields.RABA_FIELD_TYPE_IS_PRIMITIVE, None, None, None)) else : values.append((self.anchorObj.raba_id, buffer(cPickle.dumps(e)), RabaFields.RABA_FIELD_TYPE_IS_PRIMITIVE, None, None, None)) self.connection.executeMany('INSERT INTO %s (anchor_raba_id, value, type, obj_raba_class_name, obj_raba_id, obj_raba_namespace) VALUES (?, ?, ?, ?, ?, ?)' % self.tableName, values) #self.connection.updateRabaListLength(self.raba_id, len(self)) self._saved = True self._mutated = False return True else : return False
python
def _save(self) : """saves the RabaList into it's own table. This a private function that should be called directly Before saving the entire list corresponding to the anchorObj is wiped out before being rewritten. The alternative would be to keep the sync between the list and the table in real time (remove in both). If the current solution proves to be to slow, i'll consider the alternative""" if self.connection.registerSave(self) : if len(self) == 0 : self.connection.updateRabaListLength(self.raba_id, len(self)) return True else : if self.relationName == None or self.anchorObj == None : raise ValueError('%s has not been attached to any object, impossible to save it' % s) #if self.raba_id == None : # self.raba_id, self.tableName = self.connection.registerRabalist(self.anchorObj._rabaClass.__name__, self.anchorObj.raba_id, self.relationName) if self._saved : self.empty() values = [] for e in self.data : if isRabaObject(e) : e.save() objDct = e.getDctDescription() values.append((self.anchorObj.raba_id, None, RabaFields.RABA_FIELD_TYPE_IS_RABA_OBJECT, e._rabaClass.__name__, e.raba_id, e._raba_namespace)) elif isPythonPrimitive(e) : values.append((self.anchorObj.raba_id, e, RabaFields.RABA_FIELD_TYPE_IS_PRIMITIVE, None, None, None)) else : values.append((self.anchorObj.raba_id, buffer(cPickle.dumps(e)), RabaFields.RABA_FIELD_TYPE_IS_PRIMITIVE, None, None, None)) self.connection.executeMany('INSERT INTO %s (anchor_raba_id, value, type, obj_raba_class_name, obj_raba_id, obj_raba_namespace) VALUES (?, ?, ?, ?, ?, ?)' % self.tableName, values) #self.connection.updateRabaListLength(self.raba_id, len(self)) self._saved = True self._mutated = False return True else : return False
[ "def", "_save", "(", "self", ")", ":", "if", "self", ".", "connection", ".", "registerSave", "(", "self", ")", ":", "if", "len", "(", "self", ")", "==", "0", ":", "self", ".", "connection", ".", "updateRabaListLength", "(", "self", ".", "raba_id", ",...
saves the RabaList into it's own table. This a private function that should be called directly Before saving the entire list corresponding to the anchorObj is wiped out before being rewritten. The alternative would be to keep the sync between the list and the table in real time (remove in both). If the current solution proves to be to slow, i'll consider the alternative
[ "saves", "the", "RabaList", "into", "it", "s", "own", "table", ".", "This", "a", "private", "function", "that", "should", "be", "called", "directly", "Before", "saving", "the", "entire", "list", "corresponding", "to", "the", "anchorObj", "is", "wiped", "out"...
train
https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/Raba.py#L859-L897
tariqdaouda/rabaDB
rabaDB/Raba.py
RabaList._attachToObject
def _attachToObject(self, anchorObj, relationName) : "Attaches the rabalist to a raba object. Only attached rabalists can be saved" if self.anchorObj == None : self.relationName = relationName self.anchorObj = anchorObj self._setNamespaceConAndConf(anchorObj._rabaClass._raba_namespace) self.tableName = self.connection.makeRabaListTableName(self.anchorObj._rabaClass.__name__, self.relationName) faultyElmt = self._checkSelf() if faultyElmt != None : raise ValueError("Element %s violates specified list or relation constraints" % faultyElmt) elif self.anchorObj is not anchorObj : raise ValueError("Ouch: attempt to steal rabalist, use RabaLict.copy() instead.\nthief: %s\nvictim: %s\nlist: %s" % (anchorObj, self.anchorObj, self))
python
def _attachToObject(self, anchorObj, relationName) : "Attaches the rabalist to a raba object. Only attached rabalists can be saved" if self.anchorObj == None : self.relationName = relationName self.anchorObj = anchorObj self._setNamespaceConAndConf(anchorObj._rabaClass._raba_namespace) self.tableName = self.connection.makeRabaListTableName(self.anchorObj._rabaClass.__name__, self.relationName) faultyElmt = self._checkSelf() if faultyElmt != None : raise ValueError("Element %s violates specified list or relation constraints" % faultyElmt) elif self.anchorObj is not anchorObj : raise ValueError("Ouch: attempt to steal rabalist, use RabaLict.copy() instead.\nthief: %s\nvictim: %s\nlist: %s" % (anchorObj, self.anchorObj, self))
[ "def", "_attachToObject", "(", "self", ",", "anchorObj", ",", "relationName", ")", ":", "if", "self", ".", "anchorObj", "==", "None", ":", "self", ".", "relationName", "=", "relationName", "self", ".", "anchorObj", "=", "anchorObj", "self", ".", "_setNamespa...
Attaches the rabalist to a raba object. Only attached rabalists can be saved
[ "Attaches", "the", "rabalist", "to", "a", "raba", "object", ".", "Only", "attached", "rabalists", "can", "be", "saved" ]
train
https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/Raba.py#L899-L910
PGower/PyCanvas
pycanvas/apis/files.py
FilesAPI.list_files_courses
def list_files_courses(self, course_id, content_types=None, include=None, only=None, order=None, search_term=None, sort=None): """ List files. Returns the paginated list of files for the folder or course. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - content_types """Filter results by content-type. You can specify type/subtype pairs (e.g., 'image/jpeg'), or simply types (e.g., 'image', which will match 'image/gif', 'image/jpeg', etc.).""" if content_types is not None: params["content_types"] = content_types # OPTIONAL - search_term """The partial name of the files to match and return.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - include """Array of additional information to include. "user":: the user who uploaded the file or last edited its content "usage_rights":: copyright and license information for the file (see UsageRights)""" if include is not None: self._validate_enum(include, ["user"]) params["include"] = include # OPTIONAL - only """Array of information to restrict to. Overrides include[] "names":: only returns file name information""" if only is not None: params["only"] = only # OPTIONAL - sort """Sort results by this field. Defaults to 'name'. Note that `sort=user` implies `include[]=user`.""" if sort is not None: self._validate_enum(sort, ["name", "size", "created_at", "updated_at", "content_type", "user"]) params["sort"] = sort # OPTIONAL - order """The sorting order. Defaults to 'asc'.""" if order is not None: self._validate_enum(order, ["asc", "desc"]) params["order"] = order self.logger.debug("GET /api/v1/courses/{course_id}/files with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/files".format(**path), data=data, params=params, all_pages=True)
python
def list_files_courses(self, course_id, content_types=None, include=None, only=None, order=None, search_term=None, sort=None): """ List files. Returns the paginated list of files for the folder or course. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - content_types """Filter results by content-type. You can specify type/subtype pairs (e.g., 'image/jpeg'), or simply types (e.g., 'image', which will match 'image/gif', 'image/jpeg', etc.).""" if content_types is not None: params["content_types"] = content_types # OPTIONAL - search_term """The partial name of the files to match and return.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - include """Array of additional information to include. "user":: the user who uploaded the file or last edited its content "usage_rights":: copyright and license information for the file (see UsageRights)""" if include is not None: self._validate_enum(include, ["user"]) params["include"] = include # OPTIONAL - only """Array of information to restrict to. Overrides include[] "names":: only returns file name information""" if only is not None: params["only"] = only # OPTIONAL - sort """Sort results by this field. Defaults to 'name'. Note that `sort=user` implies `include[]=user`.""" if sort is not None: self._validate_enum(sort, ["name", "size", "created_at", "updated_at", "content_type", "user"]) params["sort"] = sort # OPTIONAL - order """The sorting order. Defaults to 'asc'.""" if order is not None: self._validate_enum(order, ["asc", "desc"]) params["order"] = order self.logger.debug("GET /api/v1/courses/{course_id}/files with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/files".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_files_courses", "(", "self", ",", "course_id", ",", "content_types", "=", "None", ",", "include", "=", "None", ",", "only", "=", "None", ",", "order", "=", "None", ",", "search_term", "=", "None", ",", "sort", "=", "None", ")", ":", "path"...
List files. Returns the paginated list of files for the folder or course.
[ "List", "files", ".", "Returns", "the", "paginated", "list", "of", "files", "for", "the", "folder", "or", "course", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L70-L125
PGower/PyCanvas
pycanvas/apis/files.py
FilesAPI.get_public_inline_preview_url
def get_public_inline_preview_url(self, id, submission_id=None): """ Get public inline preview url. Determine the URL that should be used for inline preview of the file. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - submission_id """The id of the submission the file is associated with. Provide this argument to gain access to a file that has been submitted to an assignment (Canvas will verify that the file belongs to the submission and the calling user has rights to view the submission).""" if submission_id is not None: params["submission_id"] = submission_id self.logger.debug("GET /api/v1/files/{id}/public_url with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/files/{id}/public_url".format(**path), data=data, params=params, no_data=True)
python
def get_public_inline_preview_url(self, id, submission_id=None): """ Get public inline preview url. Determine the URL that should be used for inline preview of the file. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - submission_id """The id of the submission the file is associated with. Provide this argument to gain access to a file that has been submitted to an assignment (Canvas will verify that the file belongs to the submission and the calling user has rights to view the submission).""" if submission_id is not None: params["submission_id"] = submission_id self.logger.debug("GET /api/v1/files/{id}/public_url with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/files/{id}/public_url".format(**path), data=data, params=params, no_data=True)
[ "def", "get_public_inline_preview_url", "(", "self", ",", "id", ",", "submission_id", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - id\r", "\"\"\"ID\"\"\"", "path", "[", "\"id\"", "]", "=...
Get public inline preview url. Determine the URL that should be used for inline preview of the file.
[ "Get", "public", "inline", "preview", "url", ".", "Determine", "the", "URL", "that", "should", "be", "used", "for", "inline", "preview", "of", "the", "file", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L298-L320
PGower/PyCanvas
pycanvas/apis/files.py
FilesAPI.get_file_courses
def get_file_courses(self, id, course_id, include=None): """ Get file. Returns the standard attachment json object """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - include """Array of additional information to include. "user":: the user who uploaded the file or last edited its content "usage_rights":: copyright and license information for the file (see UsageRights)""" if include is not None: self._validate_enum(include, ["user"]) params["include"] = include self.logger.debug("GET /api/v1/courses/{course_id}/files/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/files/{id}".format(**path), data=data, params=params, single_item=True)
python
def get_file_courses(self, id, course_id, include=None): """ Get file. Returns the standard attachment json object """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - include """Array of additional information to include. "user":: the user who uploaded the file or last edited its content "usage_rights":: copyright and license information for the file (see UsageRights)""" if include is not None: self._validate_enum(include, ["user"]) params["include"] = include self.logger.debug("GET /api/v1/courses/{course_id}/files/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/files/{id}".format(**path), data=data, params=params, single_item=True)
[ "def", "get_file_courses", "(", "self", ",", "id", ",", "course_id", ",", "include", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_i...
Get file. Returns the standard attachment json object
[ "Get", "file", ".", "Returns", "the", "standard", "attachment", "json", "object" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L348-L376
PGower/PyCanvas
pycanvas/apis/files.py
FilesAPI.update_file
def update_file(self, id, hidden=None, lock_at=None, locked=None, name=None, on_duplicate=None, parent_folder_id=None, unlock_at=None): """ Update file. Update some settings on the specified file """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - name """The new display name of the file""" if name is not None: data["name"] = name # OPTIONAL - parent_folder_id """The id of the folder to move this file into. The new folder must be in the same context as the original parent folder. If the file is in a context without folders this does not apply.""" if parent_folder_id is not None: data["parent_folder_id"] = parent_folder_id # OPTIONAL - on_duplicate """If the file is moved to a folder containing a file with the same name, or renamed to a name matching an existing file, the API call will fail unless this parameter is supplied. "overwrite":: Replace the existing file with the same name "rename":: Add a qualifier to make the new filename unique""" if on_duplicate is not None: self._validate_enum(on_duplicate, ["overwrite", "rename"]) data["on_duplicate"] = on_duplicate # OPTIONAL - lock_at """The datetime to lock the file at""" if lock_at is not None: data["lock_at"] = lock_at # OPTIONAL - unlock_at """The datetime to unlock the file at""" if unlock_at is not None: data["unlock_at"] = unlock_at # OPTIONAL - locked """Flag the file as locked""" if locked is not None: data["locked"] = locked # OPTIONAL - hidden """Flag the file as hidden""" if hidden is not None: data["hidden"] = hidden self.logger.debug("PUT /api/v1/files/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/files/{id}".format(**path), data=data, params=params, single_item=True)
python
def update_file(self, id, hidden=None, lock_at=None, locked=None, name=None, on_duplicate=None, parent_folder_id=None, unlock_at=None): """ Update file. Update some settings on the specified file """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - name """The new display name of the file""" if name is not None: data["name"] = name # OPTIONAL - parent_folder_id """The id of the folder to move this file into. The new folder must be in the same context as the original parent folder. If the file is in a context without folders this does not apply.""" if parent_folder_id is not None: data["parent_folder_id"] = parent_folder_id # OPTIONAL - on_duplicate """If the file is moved to a folder containing a file with the same name, or renamed to a name matching an existing file, the API call will fail unless this parameter is supplied. "overwrite":: Replace the existing file with the same name "rename":: Add a qualifier to make the new filename unique""" if on_duplicate is not None: self._validate_enum(on_duplicate, ["overwrite", "rename"]) data["on_duplicate"] = on_duplicate # OPTIONAL - lock_at """The datetime to lock the file at""" if lock_at is not None: data["lock_at"] = lock_at # OPTIONAL - unlock_at """The datetime to unlock the file at""" if unlock_at is not None: data["unlock_at"] = unlock_at # OPTIONAL - locked """Flag the file as locked""" if locked is not None: data["locked"] = locked # OPTIONAL - hidden """Flag the file as hidden""" if hidden is not None: data["hidden"] = hidden self.logger.debug("PUT /api/v1/files/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/files/{id}".format(**path), data=data, params=params, single_item=True)
[ "def", "update_file", "(", "self", ",", "id", ",", "hidden", "=", "None", ",", "lock_at", "=", "None", ",", "locked", "=", "None", ",", "name", "=", "None", ",", "on_duplicate", "=", "None", ",", "parent_folder_id", "=", "None", ",", "unlock_at", "=", ...
Update file. Update some settings on the specified file
[ "Update", "file", ".", "Update", "some", "settings", "on", "the", "specified", "file" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L438-L496
PGower/PyCanvas
pycanvas/apis/files.py
FilesAPI.delete_file
def delete_file(self, id): """ Delete file. Remove the specified file curl -XDELETE 'https://<canvas>/api/v1/files/<file_id>' \ -H 'Authorization: Bearer <token>' """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id self.logger.debug("DELETE /api/v1/files/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/files/{id}".format(**path), data=data, params=params, no_data=True)
python
def delete_file(self, id): """ Delete file. Remove the specified file curl -XDELETE 'https://<canvas>/api/v1/files/<file_id>' \ -H 'Authorization: Bearer <token>' """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id self.logger.debug("DELETE /api/v1/files/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/files/{id}".format(**path), data=data, params=params, no_data=True)
[ "def", "delete_file", "(", "self", ",", "id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - id\r", "\"\"\"ID\"\"\"", "path", "[", "\"id\"", "]", "=", "id", "self", ".", "logger", ".", "debug", "...
Delete file. Remove the specified file curl -XDELETE 'https://<canvas>/api/v1/files/<file_id>' \ -H 'Authorization: Bearer <token>'
[ "Delete", "file", ".", "Remove", "the", "specified", "file", "curl", "-", "XDELETE", "https", ":", "//", "<canvas", ">", "/", "api", "/", "v1", "/", "files", "/", "<file_id", ">", "\\", "-", "H", "Authorization", ":", "Bearer", "<token", ">" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L498-L516
PGower/PyCanvas
pycanvas/apis/files.py
FilesAPI.get_folder_groups
def get_folder_groups(self, id, group_id): """ Get folder. Returns the details for a folder You can get the root folder from a context by using 'root' as the :id. For example, you could get the root folder for a course like: """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - id """ID""" path["id"] = id self.logger.debug("GET /api/v1/groups/{group_id}/folders/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/folders/{id}".format(**path), data=data, params=params, single_item=True)
python
def get_folder_groups(self, id, group_id): """ Get folder. Returns the details for a folder You can get the root folder from a context by using 'root' as the :id. For example, you could get the root folder for a course like: """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - id """ID""" path["id"] = id self.logger.debug("GET /api/v1/groups/{group_id}/folders/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/folders/{id}".format(**path), data=data, params=params, single_item=True)
[ "def", "get_folder_groups", "(", "self", ",", "id", ",", "group_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - group_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"group_id\"", "]", "=", "group_id", "# ...
Get folder. Returns the details for a folder You can get the root folder from a context by using 'root' as the :id. For example, you could get the root folder for a course like:
[ "Get", "folder", ".", "Returns", "the", "details", "for", "a", "folder", "You", "can", "get", "the", "root", "folder", "from", "a", "context", "by", "using", "root", "as", "the", ":", "id", ".", "For", "example", "you", "could", "get", "the", "root", ...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L769-L791
PGower/PyCanvas
pycanvas/apis/files.py
FilesAPI.update_folder
def update_folder(self, id, hidden=None, lock_at=None, locked=None, name=None, parent_folder_id=None, position=None, unlock_at=None): """ Update folder. Updates a folder """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - name """The new name of the folder""" if name is not None: data["name"] = name # OPTIONAL - parent_folder_id """The id of the folder to move this folder into. The new folder must be in the same context as the original parent folder.""" if parent_folder_id is not None: data["parent_folder_id"] = parent_folder_id # OPTIONAL - lock_at """The datetime to lock the folder at""" if lock_at is not None: data["lock_at"] = lock_at # OPTIONAL - unlock_at """The datetime to unlock the folder at""" if unlock_at is not None: data["unlock_at"] = unlock_at # OPTIONAL - locked """Flag the folder as locked""" if locked is not None: data["locked"] = locked # OPTIONAL - hidden """Flag the folder as hidden""" if hidden is not None: data["hidden"] = hidden # OPTIONAL - position """Set an explicit sort position for the folder""" if position is not None: data["position"] = position self.logger.debug("PUT /api/v1/folders/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/folders/{id}".format(**path), data=data, params=params, single_item=True)
python
def update_folder(self, id, hidden=None, lock_at=None, locked=None, name=None, parent_folder_id=None, position=None, unlock_at=None): """ Update folder. Updates a folder """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - name """The new name of the folder""" if name is not None: data["name"] = name # OPTIONAL - parent_folder_id """The id of the folder to move this folder into. The new folder must be in the same context as the original parent folder.""" if parent_folder_id is not None: data["parent_folder_id"] = parent_folder_id # OPTIONAL - lock_at """The datetime to lock the folder at""" if lock_at is not None: data["lock_at"] = lock_at # OPTIONAL - unlock_at """The datetime to unlock the folder at""" if unlock_at is not None: data["unlock_at"] = unlock_at # OPTIONAL - locked """Flag the folder as locked""" if locked is not None: data["locked"] = locked # OPTIONAL - hidden """Flag the folder as hidden""" if hidden is not None: data["hidden"] = hidden # OPTIONAL - position """Set an explicit sort position for the folder""" if position is not None: data["position"] = position self.logger.debug("PUT /api/v1/folders/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/folders/{id}".format(**path), data=data, params=params, single_item=True)
[ "def", "update_folder", "(", "self", ",", "id", ",", "hidden", "=", "None", ",", "lock_at", "=", "None", ",", "locked", "=", "None", ",", "name", "=", "None", ",", "parent_folder_id", "=", "None", ",", "position", "=", "None", ",", "unlock_at", "=", ...
Update folder. Updates a folder
[ "Update", "folder", ".", "Updates", "a", "folder" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L813-L863
PGower/PyCanvas
pycanvas/apis/files.py
FilesAPI.create_folder_courses
def create_folder_courses(self, name, course_id, hidden=None, lock_at=None, locked=None, parent_folder_id=None, parent_folder_path=None, position=None, unlock_at=None): """ Create folder. Creates a folder in the specified context """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - name """The name of the folder""" data["name"] = name # OPTIONAL - parent_folder_id """The id of the folder to store the file in. If this and parent_folder_path are sent an error will be returned. If neither is given, a default folder will be used.""" if parent_folder_id is not None: data["parent_folder_id"] = parent_folder_id # OPTIONAL - parent_folder_path """The path of the folder to store the new folder in. The path separator is the forward slash `/`, never a back slash. The parent folder will be created if it does not already exist. This parameter only applies to new folders in a context that has folders, such as a user, a course, or a group. If this and parent_folder_id are sent an error will be returned. If neither is given, a default folder will be used.""" if parent_folder_path is not None: data["parent_folder_path"] = parent_folder_path # OPTIONAL - lock_at """The datetime to lock the folder at""" if lock_at is not None: data["lock_at"] = lock_at # OPTIONAL - unlock_at """The datetime to unlock the folder at""" if unlock_at is not None: data["unlock_at"] = unlock_at # OPTIONAL - locked """Flag the folder as locked""" if locked is not None: data["locked"] = locked # OPTIONAL - hidden """Flag the folder as hidden""" if hidden is not None: data["hidden"] = hidden # OPTIONAL - position """Set an explicit sort position for the folder""" if position is not None: data["position"] = position self.logger.debug("POST /api/v1/courses/{course_id}/folders with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/folders".format(**path), data=data, params=params, single_item=True)
python
def create_folder_courses(self, name, course_id, hidden=None, lock_at=None, locked=None, parent_folder_id=None, parent_folder_path=None, position=None, unlock_at=None): """ Create folder. Creates a folder in the specified context """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - name """The name of the folder""" data["name"] = name # OPTIONAL - parent_folder_id """The id of the folder to store the file in. If this and parent_folder_path are sent an error will be returned. If neither is given, a default folder will be used.""" if parent_folder_id is not None: data["parent_folder_id"] = parent_folder_id # OPTIONAL - parent_folder_path """The path of the folder to store the new folder in. The path separator is the forward slash `/`, never a back slash. The parent folder will be created if it does not already exist. This parameter only applies to new folders in a context that has folders, such as a user, a course, or a group. If this and parent_folder_id are sent an error will be returned. If neither is given, a default folder will be used.""" if parent_folder_path is not None: data["parent_folder_path"] = parent_folder_path # OPTIONAL - lock_at """The datetime to lock the folder at""" if lock_at is not None: data["lock_at"] = lock_at # OPTIONAL - unlock_at """The datetime to unlock the folder at""" if unlock_at is not None: data["unlock_at"] = unlock_at # OPTIONAL - locked """Flag the folder as locked""" if locked is not None: data["locked"] = locked # OPTIONAL - hidden """Flag the folder as hidden""" if hidden is not None: data["hidden"] = hidden # OPTIONAL - position """Set an explicit sort position for the folder""" if position is not None: data["position"] = position self.logger.debug("POST /api/v1/courses/{course_id}/folders with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/folders".format(**path), data=data, params=params, single_item=True)
[ "def", "create_folder_courses", "(", "self", ",", "name", ",", "course_id", ",", "hidden", "=", "None", ",", "lock_at", "=", "None", ",", "locked", "=", "None", ",", "parent_folder_id", "=", "None", ",", "parent_folder_path", "=", "None", ",", "position", ...
Create folder. Creates a folder in the specified context
[ "Create", "folder", ".", "Creates", "a", "folder", "in", "the", "specified", "context" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L865-L919
PGower/PyCanvas
pycanvas/apis/files.py
FilesAPI.delete_folder
def delete_folder(self, id, force=None): """ Delete folder. Remove the specified folder. You can only delete empty folders unless you set the 'force' flag """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - force """Set to 'true' to allow deleting a non-empty folder""" if force is not None: params["force"] = force self.logger.debug("DELETE /api/v1/folders/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/folders/{id}".format(**path), data=data, params=params, no_data=True)
python
def delete_folder(self, id, force=None): """ Delete folder. Remove the specified folder. You can only delete empty folders unless you set the 'force' flag """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - force """Set to 'true' to allow deleting a non-empty folder""" if force is not None: params["force"] = force self.logger.debug("DELETE /api/v1/folders/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/folders/{id}".format(**path), data=data, params=params, no_data=True)
[ "def", "delete_folder", "(", "self", ",", "id", ",", "force", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - id\r", "\"\"\"ID\"\"\"", "path", "[", "\"id\"", "]", "=", "id", "# OPTIONAL...
Delete folder. Remove the specified folder. You can only delete empty folders unless you set the 'force' flag
[ "Delete", "folder", ".", "Remove", "the", "specified", "folder", ".", "You", "can", "only", "delete", "empty", "folders", "unless", "you", "set", "the", "force", "flag" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L1089-L1110
PGower/PyCanvas
pycanvas/apis/files.py
FilesAPI.copy_file
def copy_file(self, dest_folder_id, source_file_id, on_duplicate=None): """ Copy a file. Copy a file from elsewhere in Canvas into a folder. Copying a file across contexts (between courses and users) is permitted, but the source and destination must belong to the same institution. """ path = {} data = {} params = {} # REQUIRED - PATH - dest_folder_id """ID""" path["dest_folder_id"] = dest_folder_id # REQUIRED - source_file_id """The id of the source file""" data["source_file_id"] = source_file_id # OPTIONAL - on_duplicate """What to do if a file with the same name already exists at the destination. If such a file exists and this parameter is not given, the call will fail. "overwrite":: Replace an existing file with the same name "rename":: Add a qualifier to make the new filename unique""" if on_duplicate is not None: self._validate_enum(on_duplicate, ["overwrite", "rename"]) data["on_duplicate"] = on_duplicate self.logger.debug("POST /api/v1/folders/{dest_folder_id}/copy_file with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/folders/{dest_folder_id}/copy_file".format(**path), data=data, params=params, single_item=True)
python
def copy_file(self, dest_folder_id, source_file_id, on_duplicate=None): """ Copy a file. Copy a file from elsewhere in Canvas into a folder. Copying a file across contexts (between courses and users) is permitted, but the source and destination must belong to the same institution. """ path = {} data = {} params = {} # REQUIRED - PATH - dest_folder_id """ID""" path["dest_folder_id"] = dest_folder_id # REQUIRED - source_file_id """The id of the source file""" data["source_file_id"] = source_file_id # OPTIONAL - on_duplicate """What to do if a file with the same name already exists at the destination. If such a file exists and this parameter is not given, the call will fail. "overwrite":: Replace an existing file with the same name "rename":: Add a qualifier to make the new filename unique""" if on_duplicate is not None: self._validate_enum(on_duplicate, ["overwrite", "rename"]) data["on_duplicate"] = on_duplicate self.logger.debug("POST /api/v1/folders/{dest_folder_id}/copy_file with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/folders/{dest_folder_id}/copy_file".format(**path), data=data, params=params, single_item=True)
[ "def", "copy_file", "(", "self", ",", "dest_folder_id", ",", "source_file_id", ",", "on_duplicate", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - dest_folder_id\r", "\"\"\"ID\"\"\"", "path", ...
Copy a file. Copy a file from elsewhere in Canvas into a folder. Copying a file across contexts (between courses and users) is permitted, but the source and destination must belong to the same institution.
[ "Copy", "a", "file", ".", "Copy", "a", "file", "from", "elsewhere", "in", "Canvas", "into", "a", "folder", ".", "Copying", "a", "file", "across", "contexts", "(", "between", "courses", "and", "users", ")", "is", "permitted", "but", "the", "source", "and"...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L1136-L1168
PGower/PyCanvas
pycanvas/apis/files.py
FilesAPI.copy_folder
def copy_folder(self, dest_folder_id, source_folder_id): """ Copy a folder. Copy a folder (and its contents) from elsewhere in Canvas into a folder. Copying a folder across contexts (between courses and users) is permitted, but the source and destination must belong to the same institution. If the source and destination folders are in the same context, the source folder may not contain the destination folder. A folder will be renamed at its destination if another folder with the same name already exists. """ path = {} data = {} params = {} # REQUIRED - PATH - dest_folder_id """ID""" path["dest_folder_id"] = dest_folder_id # REQUIRED - source_folder_id """The id of the source folder""" data["source_folder_id"] = source_folder_id self.logger.debug("POST /api/v1/folders/{dest_folder_id}/copy_folder with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/folders/{dest_folder_id}/copy_folder".format(**path), data=data, params=params, single_item=True)
python
def copy_folder(self, dest_folder_id, source_folder_id): """ Copy a folder. Copy a folder (and its contents) from elsewhere in Canvas into a folder. Copying a folder across contexts (between courses and users) is permitted, but the source and destination must belong to the same institution. If the source and destination folders are in the same context, the source folder may not contain the destination folder. A folder will be renamed at its destination if another folder with the same name already exists. """ path = {} data = {} params = {} # REQUIRED - PATH - dest_folder_id """ID""" path["dest_folder_id"] = dest_folder_id # REQUIRED - source_folder_id """The id of the source folder""" data["source_folder_id"] = source_folder_id self.logger.debug("POST /api/v1/folders/{dest_folder_id}/copy_folder with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/folders/{dest_folder_id}/copy_folder".format(**path), data=data, params=params, single_item=True)
[ "def", "copy_folder", "(", "self", ",", "dest_folder_id", ",", "source_folder_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - dest_folder_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"dest_folder_id\"", "]", ...
Copy a folder. Copy a folder (and its contents) from elsewhere in Canvas into a folder. Copying a folder across contexts (between courses and users) is permitted, but the source and destination must belong to the same institution. If the source and destination folders are in the same context, the source folder may not contain the destination folder. A folder will be renamed at its destination if another folder with the same name already exists.
[ "Copy", "a", "folder", ".", "Copy", "a", "folder", "(", "and", "its", "contents", ")", "from", "elsewhere", "in", "Canvas", "into", "a", "folder", ".", "Copying", "a", "folder", "across", "contexts", "(", "between", "courses", "and", "users", ")", "is", ...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L1170-L1196
PGower/PyCanvas
pycanvas/apis/files.py
FilesAPI.set_usage_rights_courses
def set_usage_rights_courses(self, file_ids, course_id, usage_rights_use_justification, folder_ids=None, publish=None, usage_rights_legal_copyright=None, usage_rights_license=None): """ Set usage rights. Sets copyright and license information for one or more files """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - file_ids """List of ids of files to set usage rights for.""" data["file_ids"] = file_ids # OPTIONAL - folder_ids """List of ids of folders to search for files to set usage rights for. Note that new files uploaded to these folders do not automatically inherit these rights.""" if folder_ids is not None: data["folder_ids"] = folder_ids # OPTIONAL - publish """Whether the file(s) or folder(s) should be published on save, provided that usage rights have been specified (set to `true` to publish on save).""" if publish is not None: data["publish"] = publish # REQUIRED - usage_rights[use_justification] """The intellectual property justification for using the files in Canvas""" self._validate_enum(usage_rights_use_justification, ["own_copyright", "used_by_permission", "fair_use", "public_domain", "creative_commons"]) data["usage_rights[use_justification]"] = usage_rights_use_justification # OPTIONAL - usage_rights[legal_copyright] """The legal copyright line for the files""" if usage_rights_legal_copyright is not None: data["usage_rights[legal_copyright]"] = usage_rights_legal_copyright # OPTIONAL - usage_rights[license] """The license that applies to the files. See the {api:UsageRightsController#licenses List licenses endpoint} for the supported license types.""" if usage_rights_license is not None: data["usage_rights[license]"] = usage_rights_license self.logger.debug("PUT /api/v1/courses/{course_id}/usage_rights with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/usage_rights".format(**path), data=data, params=params, single_item=True)
python
def set_usage_rights_courses(self, file_ids, course_id, usage_rights_use_justification, folder_ids=None, publish=None, usage_rights_legal_copyright=None, usage_rights_license=None): """ Set usage rights. Sets copyright and license information for one or more files """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - file_ids """List of ids of files to set usage rights for.""" data["file_ids"] = file_ids # OPTIONAL - folder_ids """List of ids of folders to search for files to set usage rights for. Note that new files uploaded to these folders do not automatically inherit these rights.""" if folder_ids is not None: data["folder_ids"] = folder_ids # OPTIONAL - publish """Whether the file(s) or folder(s) should be published on save, provided that usage rights have been specified (set to `true` to publish on save).""" if publish is not None: data["publish"] = publish # REQUIRED - usage_rights[use_justification] """The intellectual property justification for using the files in Canvas""" self._validate_enum(usage_rights_use_justification, ["own_copyright", "used_by_permission", "fair_use", "public_domain", "creative_commons"]) data["usage_rights[use_justification]"] = usage_rights_use_justification # OPTIONAL - usage_rights[legal_copyright] """The legal copyright line for the files""" if usage_rights_legal_copyright is not None: data["usage_rights[legal_copyright]"] = usage_rights_legal_copyright # OPTIONAL - usage_rights[license] """The license that applies to the files. See the {api:UsageRightsController#licenses List licenses endpoint} for the supported license types.""" if usage_rights_license is not None: data["usage_rights[license]"] = usage_rights_license self.logger.debug("PUT /api/v1/courses/{course_id}/usage_rights with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/usage_rights".format(**path), data=data, params=params, single_item=True)
[ "def", "set_usage_rights_courses", "(", "self", ",", "file_ids", ",", "course_id", ",", "usage_rights_use_justification", ",", "folder_ids", "=", "None", ",", "publish", "=", "None", ",", "usage_rights_legal_copyright", "=", "None", ",", "usage_rights_license", "=", ...
Set usage rights. Sets copyright and license information for one or more files
[ "Set", "usage", "rights", ".", "Sets", "copyright", "and", "license", "information", "for", "one", "or", "more", "files" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L1198-L1243
PGower/PyCanvas
pycanvas/apis/files.py
FilesAPI.remove_usage_rights_courses
def remove_usage_rights_courses(self, file_ids, course_id, folder_ids=None): """ Remove usage rights. Removes copyright and license information associated with one or more files """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - file_ids """List of ids of files to remove associated usage rights from.""" params["file_ids"] = file_ids # OPTIONAL - folder_ids """List of ids of folders. Usage rights will be removed from all files in these folders.""" if folder_ids is not None: params["folder_ids"] = folder_ids self.logger.debug("DELETE /api/v1/courses/{course_id}/usage_rights with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/courses/{course_id}/usage_rights".format(**path), data=data, params=params, no_data=True)
python
def remove_usage_rights_courses(self, file_ids, course_id, folder_ids=None): """ Remove usage rights. Removes copyright and license information associated with one or more files """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - file_ids """List of ids of files to remove associated usage rights from.""" params["file_ids"] = file_ids # OPTIONAL - folder_ids """List of ids of folders. Usage rights will be removed from all files in these folders.""" if folder_ids is not None: params["folder_ids"] = folder_ids self.logger.debug("DELETE /api/v1/courses/{course_id}/usage_rights with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/courses/{course_id}/usage_rights".format(**path), data=data, params=params, no_data=True)
[ "def", "remove_usage_rights_courses", "(", "self", ",", "file_ids", ",", "course_id", ",", "folder_ids", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", ...
Remove usage rights. Removes copyright and license information associated with one or more files
[ "Remove", "usage", "rights", ".", "Removes", "copyright", "and", "license", "information", "associated", "with", "one", "or", "more", "files" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L1339-L1363
PGower/PyCanvas
pycanvas/apis/files.py
FilesAPI.remove_usage_rights_groups
def remove_usage_rights_groups(self, group_id, file_ids, folder_ids=None): """ Remove usage rights. Removes copyright and license information associated with one or more files """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - file_ids """List of ids of files to remove associated usage rights from.""" params["file_ids"] = file_ids # OPTIONAL - folder_ids """List of ids of folders. Usage rights will be removed from all files in these folders.""" if folder_ids is not None: params["folder_ids"] = folder_ids self.logger.debug("DELETE /api/v1/groups/{group_id}/usage_rights with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/groups/{group_id}/usage_rights".format(**path), data=data, params=params, no_data=True)
python
def remove_usage_rights_groups(self, group_id, file_ids, folder_ids=None): """ Remove usage rights. Removes copyright and license information associated with one or more files """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - file_ids """List of ids of files to remove associated usage rights from.""" params["file_ids"] = file_ids # OPTIONAL - folder_ids """List of ids of folders. Usage rights will be removed from all files in these folders.""" if folder_ids is not None: params["folder_ids"] = folder_ids self.logger.debug("DELETE /api/v1/groups/{group_id}/usage_rights with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/groups/{group_id}/usage_rights".format(**path), data=data, params=params, no_data=True)
[ "def", "remove_usage_rights_groups", "(", "self", ",", "group_id", ",", "file_ids", ",", "folder_ids", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - group_id\r", "\"\"\"ID\"\"\"", "path", "...
Remove usage rights. Removes copyright and license information associated with one or more files
[ "Remove", "usage", "rights", ".", "Removes", "copyright", "and", "license", "information", "associated", "with", "one", "or", "more", "files" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L1365-L1389
PGower/PyCanvas
pycanvas/apis/files.py
FilesAPI.remove_usage_rights_users
def remove_usage_rights_users(self, user_id, file_ids, folder_ids=None): """ Remove usage rights. Removes copyright and license information associated with one or more files """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - file_ids """List of ids of files to remove associated usage rights from.""" params["file_ids"] = file_ids # OPTIONAL - folder_ids """List of ids of folders. Usage rights will be removed from all files in these folders.""" if folder_ids is not None: params["folder_ids"] = folder_ids self.logger.debug("DELETE /api/v1/users/{user_id}/usage_rights with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/users/{user_id}/usage_rights".format(**path), data=data, params=params, no_data=True)
python
def remove_usage_rights_users(self, user_id, file_ids, folder_ids=None): """ Remove usage rights. Removes copyright and license information associated with one or more files """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - file_ids """List of ids of files to remove associated usage rights from.""" params["file_ids"] = file_ids # OPTIONAL - folder_ids """List of ids of folders. Usage rights will be removed from all files in these folders.""" if folder_ids is not None: params["folder_ids"] = folder_ids self.logger.debug("DELETE /api/v1/users/{user_id}/usage_rights with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/users/{user_id}/usage_rights".format(**path), data=data, params=params, no_data=True)
[ "def", "remove_usage_rights_users", "(", "self", ",", "user_id", ",", "file_ids", ",", "folder_ids", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - user_id\r", "\"\"\"ID\"\"\"", "path", "[",...
Remove usage rights. Removes copyright and license information associated with one or more files
[ "Remove", "usage", "rights", ".", "Removes", "copyright", "and", "license", "information", "associated", "with", "one", "or", "more", "files" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L1391-L1415
theonion/django-bulbs
bulbs/utils/methods.py
today_as_utc_datetime
def today_as_utc_datetime(): """Datetime/Date comparisons aren't great, and someone might configure TODAY, to be a date.""" now = today() if not isinstance(now, datetime) and isinstance(now, date): now = datetime.combine(now, datetime.min.time()) now = now.replace(tzinfo=tz.gettz('UTC')) return now
python
def today_as_utc_datetime(): """Datetime/Date comparisons aren't great, and someone might configure TODAY, to be a date.""" now = today() if not isinstance(now, datetime) and isinstance(now, date): now = datetime.combine(now, datetime.min.time()) now = now.replace(tzinfo=tz.gettz('UTC')) return now
[ "def", "today_as_utc_datetime", "(", ")", ":", "now", "=", "today", "(", ")", "if", "not", "isinstance", "(", "now", ",", "datetime", ")", "and", "isinstance", "(", "now", ",", "date", ")", ":", "now", "=", "datetime", ".", "combine", "(", "now", ","...
Datetime/Date comparisons aren't great, and someone might configure TODAY, to be a date.
[ "Datetime", "/", "Date", "comparisons", "aren", "t", "great", "and", "someone", "might", "configure", "TODAY", "to", "be", "a", "date", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/utils/methods.py#L31-L37
theonion/django-bulbs
bulbs/utils/methods.py
import_class
def import_class(name): """Load class from fully-qualified python module name. ex: import_class('bulbs.content.models.Content') """ module, _, klass = name.rpartition('.') mod = import_module(module) return getattr(mod, klass)
python
def import_class(name): """Load class from fully-qualified python module name. ex: import_class('bulbs.content.models.Content') """ module, _, klass = name.rpartition('.') mod = import_module(module) return getattr(mod, klass)
[ "def", "import_class", "(", "name", ")", ":", "module", ",", "_", ",", "klass", "=", "name", ".", "rpartition", "(", "'.'", ")", "mod", "=", "import_module", "(", "module", ")", "return", "getattr", "(", "mod", ",", "klass", ")" ]
Load class from fully-qualified python module name. ex: import_class('bulbs.content.models.Content')
[ "Load", "class", "from", "fully", "-", "qualified", "python", "module", "name", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/utils/methods.py#L125-L133
jaraco/hgtools
hgtools/managers/base.py
RepoManager.get_valid_managers
def get_valid_managers(cls, location): """ Get the valid RepoManagers for this location. """ def by_priority_attr(c): return getattr(c, 'priority', 0) classes = sorted( iter_subclasses(cls), key=by_priority_attr, reverse=True) all_managers = (c(location) for c in classes) return (mgr for mgr in all_managers if mgr.is_valid())
python
def get_valid_managers(cls, location): """ Get the valid RepoManagers for this location. """ def by_priority_attr(c): return getattr(c, 'priority', 0) classes = sorted( iter_subclasses(cls), key=by_priority_attr, reverse=True) all_managers = (c(location) for c in classes) return (mgr for mgr in all_managers if mgr.is_valid())
[ "def", "get_valid_managers", "(", "cls", ",", "location", ")", ":", "def", "by_priority_attr", "(", "c", ")", ":", "return", "getattr", "(", "c", ",", "'priority'", ",", "0", ")", "classes", "=", "sorted", "(", "iter_subclasses", "(", "cls", ")", ",", ...
Get the valid RepoManagers for this location.
[ "Get", "the", "valid", "RepoManagers", "for", "this", "location", "." ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/managers/base.py#L30-L40
jaraco/hgtools
hgtools/managers/base.py
RepoManager.get_parent_tags
def get_parent_tags(self, rev=None): """ Return the tags for the parent revision (or None if no single parent can be identified). """ try: parent_rev = one(self.get_parent_revs(rev)) except Exception: return None return self.get_tags(parent_rev)
python
def get_parent_tags(self, rev=None): """ Return the tags for the parent revision (or None if no single parent can be identified). """ try: parent_rev = one(self.get_parent_revs(rev)) except Exception: return None return self.get_tags(parent_rev)
[ "def", "get_parent_tags", "(", "self", ",", "rev", "=", "None", ")", ":", "try", ":", "parent_rev", "=", "one", "(", "self", ".", "get_parent_revs", "(", "rev", ")", ")", "except", "Exception", ":", "return", "None", "return", "self", ".", "get_tags", ...
Return the tags for the parent revision (or None if no single parent can be identified).
[ "Return", "the", "tags", "for", "the", "parent", "revision", "(", "or", "None", "if", "no", "single", "parent", "can", "be", "identified", ")", "." ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/managers/base.py#L79-L88
jaraco/hgtools
hgtools/managers/base.py
RepoManager.find_all_files
def find_all_files(self): """ Find files including those in subrepositories. """ files = self.find_files() subrepo_files = ( posixpath.join(subrepo.location, filename) for subrepo in self.subrepos() for filename in subrepo.find_files() ) return itertools.chain(files, subrepo_files)
python
def find_all_files(self): """ Find files including those in subrepositories. """ files = self.find_files() subrepo_files = ( posixpath.join(subrepo.location, filename) for subrepo in self.subrepos() for filename in subrepo.find_files() ) return itertools.chain(files, subrepo_files)
[ "def", "find_all_files", "(", "self", ")", ":", "files", "=", "self", ".", "find_files", "(", ")", "subrepo_files", "=", "(", "posixpath", ".", "join", "(", "subrepo", ".", "location", ",", "filename", ")", "for", "subrepo", "in", "self", ".", "subrepos"...
Find files including those in subrepositories.
[ "Find", "files", "including", "those", "in", "subrepositories", "." ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/managers/base.py#L101-L111
bharadwaj-raju/libdesktop
libdesktop/system.py
get_cmd_out
def get_cmd_out(command): '''Get the output of a command. Gets a nice Unicode no-extra-whitespace string of the ``stdout`` of a given command. Args: command (str or list): A string of the command, or a list of the arguments (as would be used in :class:`subprocess.Popen`). Note: If ``command`` is a ``str``, it will be evaluated with ``shell=True`` i.e. in the default shell (for example, bash). Returns: str: The ``stdout`` of the command.''' if isinstance(command, list): result = sp.check_output(command) else: result = sp.check_output(command, shell=True) return result.decode('utf-8').rstrip()
python
def get_cmd_out(command): '''Get the output of a command. Gets a nice Unicode no-extra-whitespace string of the ``stdout`` of a given command. Args: command (str or list): A string of the command, or a list of the arguments (as would be used in :class:`subprocess.Popen`). Note: If ``command`` is a ``str``, it will be evaluated with ``shell=True`` i.e. in the default shell (for example, bash). Returns: str: The ``stdout`` of the command.''' if isinstance(command, list): result = sp.check_output(command) else: result = sp.check_output(command, shell=True) return result.decode('utf-8').rstrip()
[ "def", "get_cmd_out", "(", "command", ")", ":", "if", "isinstance", "(", "command", ",", "list", ")", ":", "result", "=", "sp", ".", "check_output", "(", "command", ")", "else", ":", "result", "=", "sp", ".", "check_output", "(", "command", ",", "shell...
Get the output of a command. Gets a nice Unicode no-extra-whitespace string of the ``stdout`` of a given command. Args: command (str or list): A string of the command, or a list of the arguments (as would be used in :class:`subprocess.Popen`). Note: If ``command`` is a ``str``, it will be evaluated with ``shell=True`` i.e. in the default shell (for example, bash). Returns: str: The ``stdout`` of the command.
[ "Get", "the", "output", "of", "a", "command", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/system.py#L34-L53
bharadwaj-raju/libdesktop
libdesktop/system.py
get_name
def get_name(): '''Get desktop environment or OS. Get the OS name or desktop environment. **List of Possible Values** +-------------------------+---------------+ | Windows | windows | +-------------------------+---------------+ | Mac OS X | mac | +-------------------------+---------------+ | GNOME 3+ | gnome | +-------------------------+---------------+ | GNOME 2 | gnome2 | +-------------------------+---------------+ | XFCE | xfce4 | +-------------------------+---------------+ | KDE | kde | +-------------------------+---------------+ | Unity | unity | +-------------------------+---------------+ | LXDE | lxde | +-------------------------+---------------+ | i3wm | i3 | +-------------------------+---------------+ | \*box | \*box | +-------------------------+---------------+ | Trinity (KDE 3 fork) | trinity | +-------------------------+---------------+ | MATE | mate | +-------------------------+---------------+ | IceWM | icewm | +-------------------------+---------------+ | Pantheon (elementaryOS) | pantheon | +-------------------------+---------------+ | LXQt | lxqt | +-------------------------+---------------+ | Awesome WM | awesome | +-------------------------+---------------+ | Enlightenment | enlightenment | +-------------------------+---------------+ | AfterStep | afterstep | +-------------------------+---------------+ | WindowMaker | windowmaker | +-------------------------+---------------+ | [Other] | unknown | +-------------------------+---------------+ Returns: str: The name of the desktop environment or OS. ''' if sys.platform in ['win32', 'cygwin']: return 'windows' elif sys.platform == 'darwin': return 'mac' else: desktop_session = os.environ.get( 'XDG_CURRENT_DESKTOP') or os.environ.get('DESKTOP_SESSION') if desktop_session is not None: desktop_session = desktop_session.lower() # Fix for X-Cinnamon etc if desktop_session.startswith('x-'): desktop_session = desktop_session.replace('x-', '') if desktop_session in ['gnome', 'unity', 'cinnamon', 'mate', 'xfce4', 'lxde', 'fluxbox', 'blackbox', 'openbox', 'icewm', 'jwm', 'afterstep', 'trinity', 'kde', 'pantheon', 'i3', 'lxqt', 'awesome', 'enlightenment']: return desktop_session #-- Special cases --# # Canonical sets environment var to Lubuntu rather than # LXDE if using LXDE. # There is no guarantee that they will not do the same # with the other desktop environments. elif 'xfce' in desktop_session: return 'xfce4' elif desktop_session.startswith('ubuntu'): return 'unity' elif desktop_session.startswith('xubuntu'): return 'xfce4' elif desktop_session.startswith('lubuntu'): return 'lxde' elif desktop_session.startswith('kubuntu'): return 'kde' elif desktop_session.startswith('razor'): return 'razor-qt' elif desktop_session.startswith('wmaker'): return 'windowmaker' if os.environ.get('KDE_FULL_SESSION') == 'true': return 'kde' elif os.environ.get('GNOME_DESKTOP_SESSION_ID'): if not 'deprecated' in os.environ.get('GNOME_DESKTOP_SESSION_ID'): return 'gnome2' elif is_running('xfce-mcs-manage'): return 'xfce4' elif is_running('ksmserver'): return 'kde' return 'unknown'
python
def get_name(): '''Get desktop environment or OS. Get the OS name or desktop environment. **List of Possible Values** +-------------------------+---------------+ | Windows | windows | +-------------------------+---------------+ | Mac OS X | mac | +-------------------------+---------------+ | GNOME 3+ | gnome | +-------------------------+---------------+ | GNOME 2 | gnome2 | +-------------------------+---------------+ | XFCE | xfce4 | +-------------------------+---------------+ | KDE | kde | +-------------------------+---------------+ | Unity | unity | +-------------------------+---------------+ | LXDE | lxde | +-------------------------+---------------+ | i3wm | i3 | +-------------------------+---------------+ | \*box | \*box | +-------------------------+---------------+ | Trinity (KDE 3 fork) | trinity | +-------------------------+---------------+ | MATE | mate | +-------------------------+---------------+ | IceWM | icewm | +-------------------------+---------------+ | Pantheon (elementaryOS) | pantheon | +-------------------------+---------------+ | LXQt | lxqt | +-------------------------+---------------+ | Awesome WM | awesome | +-------------------------+---------------+ | Enlightenment | enlightenment | +-------------------------+---------------+ | AfterStep | afterstep | +-------------------------+---------------+ | WindowMaker | windowmaker | +-------------------------+---------------+ | [Other] | unknown | +-------------------------+---------------+ Returns: str: The name of the desktop environment or OS. ''' if sys.platform in ['win32', 'cygwin']: return 'windows' elif sys.platform == 'darwin': return 'mac' else: desktop_session = os.environ.get( 'XDG_CURRENT_DESKTOP') or os.environ.get('DESKTOP_SESSION') if desktop_session is not None: desktop_session = desktop_session.lower() # Fix for X-Cinnamon etc if desktop_session.startswith('x-'): desktop_session = desktop_session.replace('x-', '') if desktop_session in ['gnome', 'unity', 'cinnamon', 'mate', 'xfce4', 'lxde', 'fluxbox', 'blackbox', 'openbox', 'icewm', 'jwm', 'afterstep', 'trinity', 'kde', 'pantheon', 'i3', 'lxqt', 'awesome', 'enlightenment']: return desktop_session #-- Special cases --# # Canonical sets environment var to Lubuntu rather than # LXDE if using LXDE. # There is no guarantee that they will not do the same # with the other desktop environments. elif 'xfce' in desktop_session: return 'xfce4' elif desktop_session.startswith('ubuntu'): return 'unity' elif desktop_session.startswith('xubuntu'): return 'xfce4' elif desktop_session.startswith('lubuntu'): return 'lxde' elif desktop_session.startswith('kubuntu'): return 'kde' elif desktop_session.startswith('razor'): return 'razor-qt' elif desktop_session.startswith('wmaker'): return 'windowmaker' if os.environ.get('KDE_FULL_SESSION') == 'true': return 'kde' elif os.environ.get('GNOME_DESKTOP_SESSION_ID'): if not 'deprecated' in os.environ.get('GNOME_DESKTOP_SESSION_ID'): return 'gnome2' elif is_running('xfce-mcs-manage'): return 'xfce4' elif is_running('ksmserver'): return 'kde' return 'unknown'
[ "def", "get_name", "(", ")", ":", "if", "sys", ".", "platform", "in", "[", "'win32'", ",", "'cygwin'", "]", ":", "return", "'windows'", "elif", "sys", ".", "platform", "==", "'darwin'", ":", "return", "'mac'", "else", ":", "desktop_session", "=", "os", ...
Get desktop environment or OS. Get the OS name or desktop environment. **List of Possible Values** +-------------------------+---------------+ | Windows | windows | +-------------------------+---------------+ | Mac OS X | mac | +-------------------------+---------------+ | GNOME 3+ | gnome | +-------------------------+---------------+ | GNOME 2 | gnome2 | +-------------------------+---------------+ | XFCE | xfce4 | +-------------------------+---------------+ | KDE | kde | +-------------------------+---------------+ | Unity | unity | +-------------------------+---------------+ | LXDE | lxde | +-------------------------+---------------+ | i3wm | i3 | +-------------------------+---------------+ | \*box | \*box | +-------------------------+---------------+ | Trinity (KDE 3 fork) | trinity | +-------------------------+---------------+ | MATE | mate | +-------------------------+---------------+ | IceWM | icewm | +-------------------------+---------------+ | Pantheon (elementaryOS) | pantheon | +-------------------------+---------------+ | LXQt | lxqt | +-------------------------+---------------+ | Awesome WM | awesome | +-------------------------+---------------+ | Enlightenment | enlightenment | +-------------------------+---------------+ | AfterStep | afterstep | +-------------------------+---------------+ | WindowMaker | windowmaker | +-------------------------+---------------+ | [Other] | unknown | +-------------------------+---------------+ Returns: str: The name of the desktop environment or OS.
[ "Get", "desktop", "environment", "or", "OS", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/system.py#L56-L174
bharadwaj-raju/libdesktop
libdesktop/system.py
is_in_path
def is_in_path(program): ''' Check if a program is in the system ``PATH``. Checks if a given program is in the user's ``PATH`` or not. Args: program (str): The program to try to find in ``PATH``. Returns: bool: Is the program in ``PATH``? ''' if sys.version_info.major == 2: path = os.getenv('PATH') if os.name == 'nt': path = path.split(';') else: path = path.split(':') else: path = os.get_exec_path() for i in path: if os.path.isdir(i): if program in os.listdir(i): return True
python
def is_in_path(program): ''' Check if a program is in the system ``PATH``. Checks if a given program is in the user's ``PATH`` or not. Args: program (str): The program to try to find in ``PATH``. Returns: bool: Is the program in ``PATH``? ''' if sys.version_info.major == 2: path = os.getenv('PATH') if os.name == 'nt': path = path.split(';') else: path = path.split(':') else: path = os.get_exec_path() for i in path: if os.path.isdir(i): if program in os.listdir(i): return True
[ "def", "is_in_path", "(", "program", ")", ":", "if", "sys", ".", "version_info", ".", "major", "==", "2", ":", "path", "=", "os", ".", "getenv", "(", "'PATH'", ")", "if", "os", ".", "name", "==", "'nt'", ":", "path", "=", "path", ".", "split", "(...
Check if a program is in the system ``PATH``. Checks if a given program is in the user's ``PATH`` or not. Args: program (str): The program to try to find in ``PATH``. Returns: bool: Is the program in ``PATH``?
[ "Check", "if", "a", "program", "is", "in", "the", "system", "PATH", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/system.py#L177-L202
bharadwaj-raju/libdesktop
libdesktop/system.py
is_running
def is_running(process): ''' Check if process is running. Check if the given process name is running or not. Note: On a Linux system, kernel threads (like ``kthreadd`` etc.) are excluded. Args: process (str): The name of the process. Returns: bool: Is the process running? ''' if os.name == 'nt': process_list = get_cmd_out(['tasklist', '/v']) return process in process_list else: process_list = get_cmd_out('ps axw | awk \'{print $5}\'') for i in process_list.split('\n'): # 'COMMAND' is the column heading # [*] indicates kernel-level processes like \ # kthreadd, which manages threads in the Linux kernel if not i == 'COMMAND' or i.startswith('['): if i == process: return True elif os.path.basename(i) == process: # check i without executable path # for example, if 'process' arguments is 'sshd' # and '/usr/bin/sshd' is listed in ps, return True return True return False
python
def is_running(process): ''' Check if process is running. Check if the given process name is running or not. Note: On a Linux system, kernel threads (like ``kthreadd`` etc.) are excluded. Args: process (str): The name of the process. Returns: bool: Is the process running? ''' if os.name == 'nt': process_list = get_cmd_out(['tasklist', '/v']) return process in process_list else: process_list = get_cmd_out('ps axw | awk \'{print $5}\'') for i in process_list.split('\n'): # 'COMMAND' is the column heading # [*] indicates kernel-level processes like \ # kthreadd, which manages threads in the Linux kernel if not i == 'COMMAND' or i.startswith('['): if i == process: return True elif os.path.basename(i) == process: # check i without executable path # for example, if 'process' arguments is 'sshd' # and '/usr/bin/sshd' is listed in ps, return True return True return False
[ "def", "is_running", "(", "process", ")", ":", "if", "os", ".", "name", "==", "'nt'", ":", "process_list", "=", "get_cmd_out", "(", "[", "'tasklist'", ",", "'/v'", "]", ")", "return", "process", "in", "process_list", "else", ":", "process_list", "=", "ge...
Check if process is running. Check if the given process name is running or not. Note: On a Linux system, kernel threads (like ``kthreadd`` etc.) are excluded. Args: process (str): The name of the process. Returns: bool: Is the process running?
[ "Check", "if", "process", "is", "running", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/system.py#L205-L243
theonion/django-bulbs
bulbs/content/filters.py
parse_datetime
def parse_datetime(value): """Returns a datetime object for a given argument This helps to convert strings, dates and datetimes to proper tz-enabled datetime objects.""" if isinstance(value, (string_types, text_type, binary_type)): value = dateutil.parser.parse(value) value.replace(tzinfo=dateutil.tz.tzutc()) return value elif isinstance(value, datetime.datetime): value.replace(tzinfo=dateutil.tz.tzutc()) return value elif isinstance(value, datetime.date): value = datetime.datetime(value.year, value.month, value.day) value.replace(tzinfo=dateutil.tz.tzutc()) return value else: raise ValueError('Value must be parsable to datetime object. Got `{}`'.format(type(value)))
python
def parse_datetime(value): """Returns a datetime object for a given argument This helps to convert strings, dates and datetimes to proper tz-enabled datetime objects.""" if isinstance(value, (string_types, text_type, binary_type)): value = dateutil.parser.parse(value) value.replace(tzinfo=dateutil.tz.tzutc()) return value elif isinstance(value, datetime.datetime): value.replace(tzinfo=dateutil.tz.tzutc()) return value elif isinstance(value, datetime.date): value = datetime.datetime(value.year, value.month, value.day) value.replace(tzinfo=dateutil.tz.tzutc()) return value else: raise ValueError('Value must be parsable to datetime object. Got `{}`'.format(type(value)))
[ "def", "parse_datetime", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "string_types", ",", "text_type", ",", "binary_type", ")", ")", ":", "value", "=", "dateutil", ".", "parser", ".", "parse", "(", "value", ")", "value", ".", "r...
Returns a datetime object for a given argument This helps to convert strings, dates and datetimes to proper tz-enabled datetime objects.
[ "Returns", "a", "datetime", "object", "for", "a", "given", "argument" ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/content/filters.py#L18-L36
theonion/django-bulbs
bulbs/content/filters.py
NegateQueryFilter
def NegateQueryFilter(es_query): # noqa """ Return a filter removing the contents of the provided query. """ query = es_query.to_dict().get("query", {}) filtered = query.get("filtered", {}) negated_filter = filtered.get("filter", {}) return Not(**negated_filter)
python
def NegateQueryFilter(es_query): # noqa """ Return a filter removing the contents of the provided query. """ query = es_query.to_dict().get("query", {}) filtered = query.get("filtered", {}) negated_filter = filtered.get("filter", {}) return Not(**negated_filter)
[ "def", "NegateQueryFilter", "(", "es_query", ")", ":", "# noqa", "query", "=", "es_query", ".", "to_dict", "(", ")", ".", "get", "(", "\"query\"", ",", "{", "}", ")", "filtered", "=", "query", ".", "get", "(", "\"filtered\"", ",", "{", "}", ")", "neg...
Return a filter removing the contents of the provided query.
[ "Return", "a", "filter", "removing", "the", "contents", "of", "the", "provided", "query", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/content/filters.py#L92-L99
anomaly/prestans
prestans/rest/request.py
Request.set_deserializer_by_mime_type
def set_deserializer_by_mime_type(self, mime_type): """ :param mime_type: :return: Used by content_type_set to set get a reference to the serializer object """ for deserializer in self._deserializers: if deserializer.content_type() == mime_type: self._selected_deserializer = deserializer return raise exception.UnsupportedContentTypeError(mime_type, self.supported_mime_types_str)
python
def set_deserializer_by_mime_type(self, mime_type): """ :param mime_type: :return: Used by content_type_set to set get a reference to the serializer object """ for deserializer in self._deserializers: if deserializer.content_type() == mime_type: self._selected_deserializer = deserializer return raise exception.UnsupportedContentTypeError(mime_type, self.supported_mime_types_str)
[ "def", "set_deserializer_by_mime_type", "(", "self", ",", "mime_type", ")", ":", "for", "deserializer", "in", "self", ".", "_deserializers", ":", "if", "deserializer", ".", "content_type", "(", ")", "==", "mime_type", ":", "self", ".", "_selected_deserializer", ...
:param mime_type: :return: Used by content_type_set to set get a reference to the serializer object
[ ":", "param", "mime_type", ":", ":", "return", ":" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/rest/request.py#L70-L83
anomaly/prestans
prestans/rest/request.py
Request.body_template
def body_template(self, value): """ Must be an instance of a prestans.types.DataCollection subclass; this is generally set during the RequestHandler lifecycle. Setting this spwans the parsing process of the body. If the HTTP verb is GET an AssertionError is thrown. Use with extreme caution. """ if self.method == VERB.GET: raise AssertionError("body_template cannot be set for GET requests") if value is None: self.logger.warning("body_template is None, parsing will be ignored") return if not isinstance(value, DataCollection): msg = "body_template must be an instance of %s.%s" % ( DataCollection.__module__, DataCollection.__name__ ) raise AssertionError(msg) self._body_template = value # get a deserializer based on the Content-Type header # do this here so the handler gets a chance to setup extra serializers self.set_deserializer_by_mime_type(self.content_type)
python
def body_template(self, value): """ Must be an instance of a prestans.types.DataCollection subclass; this is generally set during the RequestHandler lifecycle. Setting this spwans the parsing process of the body. If the HTTP verb is GET an AssertionError is thrown. Use with extreme caution. """ if self.method == VERB.GET: raise AssertionError("body_template cannot be set for GET requests") if value is None: self.logger.warning("body_template is None, parsing will be ignored") return if not isinstance(value, DataCollection): msg = "body_template must be an instance of %s.%s" % ( DataCollection.__module__, DataCollection.__name__ ) raise AssertionError(msg) self._body_template = value # get a deserializer based on the Content-Type header # do this here so the handler gets a chance to setup extra serializers self.set_deserializer_by_mime_type(self.content_type)
[ "def", "body_template", "(", "self", ",", "value", ")", ":", "if", "self", ".", "method", "==", "VERB", ".", "GET", ":", "raise", "AssertionError", "(", "\"body_template cannot be set for GET requests\"", ")", "if", "value", "is", "None", ":", "self", ".", "...
Must be an instance of a prestans.types.DataCollection subclass; this is generally set during the RequestHandler lifecycle. Setting this spwans the parsing process of the body. If the HTTP verb is GET an AssertionError is thrown. Use with extreme caution.
[ "Must", "be", "an", "instance", "of", "a", "prestans", ".", "types", ".", "DataCollection", "subclass", ";", "this", "is", "generally", "set", "during", "the", "RequestHandler", "lifecycle", ".", "Setting", "this", "spwans", "the", "parsing", "process", "of", ...
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/rest/request.py#L116-L142
anomaly/prestans
prestans/rest/request.py
Request.get_response_attribute_filter
def get_response_attribute_filter(self, template_filter, template_model=None): """ Prestans-Response-Attribute-List can contain a client's requested definition for attributes required in the response. This should match the response_attribute_filter_template? :param template_filter: :param template_model: the expected model that this filter corresponds to :return: :rtype: None | AttributeFilter """ if template_filter is None: return None if 'Prestans-Response-Attribute-List' not in self.headers: return None # header not set results in a None attribute_list_str = self.headers['Prestans-Response-Attribute-List'] # deserialize the header contents json_deserializer = deserializer.JSON() attribute_list_dictionary = json_deserializer.loads(attribute_list_str) # construct an AttributeFilter attribute_filter = AttributeFilter( from_dictionary=attribute_list_dictionary, template_model=template_model ) #: Check template? Do this even through we might have template_model #: in case users have made a custom filter evaluated_filter = attribute_filter.conforms_to_template_filter(template_filter) return evaluated_filter
python
def get_response_attribute_filter(self, template_filter, template_model=None): """ Prestans-Response-Attribute-List can contain a client's requested definition for attributes required in the response. This should match the response_attribute_filter_template? :param template_filter: :param template_model: the expected model that this filter corresponds to :return: :rtype: None | AttributeFilter """ if template_filter is None: return None if 'Prestans-Response-Attribute-List' not in self.headers: return None # header not set results in a None attribute_list_str = self.headers['Prestans-Response-Attribute-List'] # deserialize the header contents json_deserializer = deserializer.JSON() attribute_list_dictionary = json_deserializer.loads(attribute_list_str) # construct an AttributeFilter attribute_filter = AttributeFilter( from_dictionary=attribute_list_dictionary, template_model=template_model ) #: Check template? Do this even through we might have template_model #: in case users have made a custom filter evaluated_filter = attribute_filter.conforms_to_template_filter(template_filter) return evaluated_filter
[ "def", "get_response_attribute_filter", "(", "self", ",", "template_filter", ",", "template_model", "=", "None", ")", ":", "if", "template_filter", "is", "None", ":", "return", "None", "if", "'Prestans-Response-Attribute-List'", "not", "in", "self", ".", "headers", ...
Prestans-Response-Attribute-List can contain a client's requested definition for attributes required in the response. This should match the response_attribute_filter_template? :param template_filter: :param template_model: the expected model that this filter corresponds to :return: :rtype: None | AttributeFilter
[ "Prestans", "-", "Response", "-", "Attribute", "-", "List", "can", "contain", "a", "client", "s", "requested", "definition", "for", "attributes", "required", "in", "the", "response", ".", "This", "should", "match", "the", "response_attribute_filter_template?" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/rest/request.py#L174-L209
shmir/PyIxNetwork
ixnetwork/api/ixn_python.py
IxnPythonWrapper.add
def add(self, parent, obj_type, **attributes): """ IXN API add command @param parent: object parent - object will be created under this parent. @param object_type: object type. @param attributes: additional attributes. @return: IXN object reference. """ return self.ixn.add(parent.obj_ref(), obj_type, *self._get_args_list(**attributes))
python
def add(self, parent, obj_type, **attributes): """ IXN API add command @param parent: object parent - object will be created under this parent. @param object_type: object type. @param attributes: additional attributes. @return: IXN object reference. """ return self.ixn.add(parent.obj_ref(), obj_type, *self._get_args_list(**attributes))
[ "def", "add", "(", "self", ",", "parent", ",", "obj_type", ",", "*", "*", "attributes", ")", ":", "return", "self", ".", "ixn", ".", "add", "(", "parent", ".", "obj_ref", "(", ")", ",", "obj_type", ",", "*", "self", ".", "_get_args_list", "(", "*",...
IXN API add command @param parent: object parent - object will be created under this parent. @param object_type: object type. @param attributes: additional attributes. @return: IXN object reference.
[ "IXN", "API", "add", "command" ]
train
https://github.com/shmir/PyIxNetwork/blob/e7d7a89c08a5d3a1382b4dcfd915bbfc7eedd33f/ixnetwork/api/ixn_python.py#L88-L97
jaraco/hgtools
hgtools/managers/cmd.py
Command.version
def version(self): """ Return the underlying version """ lines = iter(self._invoke('version').splitlines()) version = next(lines).strip() return self._parse_version(version)
python
def version(self): """ Return the underlying version """ lines = iter(self._invoke('version').splitlines()) version = next(lines).strip() return self._parse_version(version)
[ "def", "version", "(", "self", ")", ":", "lines", "=", "iter", "(", "self", ".", "_invoke", "(", "'version'", ")", ".", "splitlines", "(", ")", ")", "version", "=", "next", "(", "lines", ")", ".", "strip", "(", ")", "return", "self", ".", "_parse_v...
Return the underlying version
[ "Return", "the", "underlying", "version" ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/managers/cmd.py#L24-L30
jaraco/hgtools
hgtools/managers/cmd.py
Mercurial.find_files
def find_files(self): """ Find versioned files in self.location """ all_files = self._invoke('locate', '-I', '.').splitlines() # now we have a list of all files in self.location relative to # self.find_root() # Remove the parent dirs from them. from_root = os.path.relpath(self.location, self.find_root()) loc_rel_paths = [ os.path.relpath(path, from_root) for path in all_files] return loc_rel_paths
python
def find_files(self): """ Find versioned files in self.location """ all_files = self._invoke('locate', '-I', '.').splitlines() # now we have a list of all files in self.location relative to # self.find_root() # Remove the parent dirs from them. from_root = os.path.relpath(self.location, self.find_root()) loc_rel_paths = [ os.path.relpath(path, from_root) for path in all_files] return loc_rel_paths
[ "def", "find_files", "(", "self", ")", ":", "all_files", "=", "self", ".", "_invoke", "(", "'locate'", ",", "'-I'", ",", "'.'", ")", ".", "splitlines", "(", ")", "# now we have a list of all files in self.location relative to", "# self.find_root()", "# Remove the par...
Find versioned files in self.location
[ "Find", "versioned", "files", "in", "self", ".", "location" ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/managers/cmd.py#L47-L59
jaraco/hgtools
hgtools/managers/cmd.py
Mercurial.get_tags
def get_tags(self, rev=None): """ Get the tags for the given revision specifier (or the current revision if not specified). """ rev_num = self._get_rev_num(rev) # rev_num might end with '+', indicating local modifications. return ( set(self._read_tags_for_rev(rev_num)) if not rev_num.endswith('+') else set([]) )
python
def get_tags(self, rev=None): """ Get the tags for the given revision specifier (or the current revision if not specified). """ rev_num = self._get_rev_num(rev) # rev_num might end with '+', indicating local modifications. return ( set(self._read_tags_for_rev(rev_num)) if not rev_num.endswith('+') else set([]) )
[ "def", "get_tags", "(", "self", ",", "rev", "=", "None", ")", ":", "rev_num", "=", "self", ".", "_get_rev_num", "(", "rev", ")", "# rev_num might end with '+', indicating local modifications.", "return", "(", "set", "(", "self", ".", "_read_tags_for_rev", "(", "...
Get the tags for the given revision specifier (or the current revision if not specified).
[ "Get", "the", "tags", "for", "the", "given", "revision", "specifier", "(", "or", "the", "current", "revision", "if", "not", "specified", ")", "." ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/managers/cmd.py#L73-L84
jaraco/hgtools
hgtools/managers/cmd.py
Mercurial._read_tags_for_revset
def _read_tags_for_revset(self, spec): """ Return TaggedRevision for each tag/rev combination in the revset spec """ cmd = [ 'log', '--style', 'default', '--config', 'defaults.log=', '-r', spec] res = self._invoke(*cmd) header_pattern = re.compile(r'(?P<header>\w+?):\s+(?P<value>.*)') match_res = map(header_pattern.match, res.splitlines()) matched_lines = filter(None, match_res) matches = (match.groupdict() for match in matched_lines) for match in matches: if match['header'] == 'changeset': id, sep, rev = match['value'].partition(':') if match['header'] == 'tag': tag = match['value'] yield TaggedRevision(tag, rev)
python
def _read_tags_for_revset(self, spec): """ Return TaggedRevision for each tag/rev combination in the revset spec """ cmd = [ 'log', '--style', 'default', '--config', 'defaults.log=', '-r', spec] res = self._invoke(*cmd) header_pattern = re.compile(r'(?P<header>\w+?):\s+(?P<value>.*)') match_res = map(header_pattern.match, res.splitlines()) matched_lines = filter(None, match_res) matches = (match.groupdict() for match in matched_lines) for match in matches: if match['header'] == 'changeset': id, sep, rev = match['value'].partition(':') if match['header'] == 'tag': tag = match['value'] yield TaggedRevision(tag, rev)
[ "def", "_read_tags_for_revset", "(", "self", ",", "spec", ")", ":", "cmd", "=", "[", "'log'", ",", "'--style'", ",", "'default'", ",", "'--config'", ",", "'defaults.log='", ",", "'-r'", ",", "spec", "]", "res", "=", "self", ".", "_invoke", "(", "*", "c...
Return TaggedRevision for each tag/rev combination in the revset spec
[ "Return", "TaggedRevision", "for", "each", "tag", "/", "rev", "combination", "in", "the", "revset", "spec" ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/managers/cmd.py#L93-L110
jaraco/hgtools
hgtools/managers/cmd.py
Mercurial._get_rev_num
def _get_rev_num(self, rev=None): """ Determine the revision number for a given revision specifier. """ # first, determine the numeric ID cmd = ['identify', '--num'] # workaround for #4 cmd.extend(['--config', 'defaults.identify=']) if rev: cmd.extend(['--rev', rev]) res = self._invoke(*cmd) return res.strip()
python
def _get_rev_num(self, rev=None): """ Determine the revision number for a given revision specifier. """ # first, determine the numeric ID cmd = ['identify', '--num'] # workaround for #4 cmd.extend(['--config', 'defaults.identify=']) if rev: cmd.extend(['--rev', rev]) res = self._invoke(*cmd) return res.strip()
[ "def", "_get_rev_num", "(", "self", ",", "rev", "=", "None", ")", ":", "# first, determine the numeric ID", "cmd", "=", "[", "'identify'", ",", "'--num'", "]", "# workaround for #4", "cmd", ".", "extend", "(", "[", "'--config'", ",", "'defaults.identify='", "]",...
Determine the revision number for a given revision specifier.
[ "Determine", "the", "revision", "number", "for", "a", "given", "revision", "specifier", "." ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/managers/cmd.py#L112-L123
jaraco/hgtools
hgtools/managers/cmd.py
Mercurial._get_tags_by_num
def _get_tags_by_num(self): """ Return a dictionary mapping revision number to tags for that number. """ by_revision = operator.attrgetter('revision') tags = sorted(self.get_tags(), key=by_revision) revision_tags = itertools.groupby(tags, key=by_revision) def get_id(rev): return rev.split(':', 1)[0] return dict( (get_id(rev), [tr.tag for tr in tr_list]) for rev, tr_list in revision_tags )
python
def _get_tags_by_num(self): """ Return a dictionary mapping revision number to tags for that number. """ by_revision = operator.attrgetter('revision') tags = sorted(self.get_tags(), key=by_revision) revision_tags = itertools.groupby(tags, key=by_revision) def get_id(rev): return rev.split(':', 1)[0] return dict( (get_id(rev), [tr.tag for tr in tr_list]) for rev, tr_list in revision_tags )
[ "def", "_get_tags_by_num", "(", "self", ")", ":", "by_revision", "=", "operator", ".", "attrgetter", "(", "'revision'", ")", "tags", "=", "sorted", "(", "self", ".", "get_tags", "(", ")", ",", "key", "=", "by_revision", ")", "revision_tags", "=", "itertool...
Return a dictionary mapping revision number to tags for that number.
[ "Return", "a", "dictionary", "mapping", "revision", "number", "to", "tags", "for", "that", "number", "." ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/managers/cmd.py#L125-L138
jaraco/hgtools
hgtools/managers/cmd.py
Git.get_tags
def get_tags(self, rev=None): """ Return the tags for the current revision as a set """ rev = rev or 'HEAD' return set(self._invoke('tag', '--points-at', rev).splitlines())
python
def get_tags(self, rev=None): """ Return the tags for the current revision as a set """ rev = rev or 'HEAD' return set(self._invoke('tag', '--points-at', rev).splitlines())
[ "def", "get_tags", "(", "self", ",", "rev", "=", "None", ")", ":", "rev", "=", "rev", "or", "'HEAD'", "return", "set", "(", "self", ".", "_invoke", "(", "'tag'", ",", "'--points-at'", ",", "rev", ")", ".", "splitlines", "(", ")", ")" ]
Return the tags for the current revision as a set
[ "Return", "the", "tags", "for", "the", "current", "revision", "as", "a", "set" ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/managers/cmd.py#L182-L187
inveniosoftware/invenio-pages
invenio_pages/models.py
Page.validate_template_name
def validate_template_name(self, key, value): """Validate template name. :param key: The template path. :param value: The template name. :raises ValueError: If template name is wrong. """ if value not in dict(current_app.config['PAGES_TEMPLATES']): raise ValueError( 'Template "{0}" does not exist.'.format(value)) return value
python
def validate_template_name(self, key, value): """Validate template name. :param key: The template path. :param value: The template name. :raises ValueError: If template name is wrong. """ if value not in dict(current_app.config['PAGES_TEMPLATES']): raise ValueError( 'Template "{0}" does not exist.'.format(value)) return value
[ "def", "validate_template_name", "(", "self", ",", "key", ",", "value", ")", ":", "if", "value", "not", "in", "dict", "(", "current_app", ".", "config", "[", "'PAGES_TEMPLATES'", "]", ")", ":", "raise", "ValueError", "(", "'Template \"{0}\" does not exist.'", ...
Validate template name. :param key: The template path. :param value: The template name. :raises ValueError: If template name is wrong.
[ "Validate", "template", "name", "." ]
train
https://github.com/inveniosoftware/invenio-pages/blob/8d544d72fb4c22b7134c521f435add0abed42544/invenio_pages/models.py#L71-L81
bioasp/caspo
caspo/classify.py
Classifier.classify
def classify(self, n_jobs=-1, configure=None): """ Returns input-output behaviors for the list of logical networks in the attribute :attr:`networks` Example:: >>> from caspo import core, classify >>> networks = core.LogicalNetworkList.from_csv('networks.csv') >>> setup = core.Setup.from_json('setup.json') >>> classifier = classify.Classifier(networks, setup) >>> behaviors = classifier.classify() >>> behaviors.to_csv('behaviors.csv', networks=True) n_jobs : int Number of jobs to run in parallel. Default to -1 (all cores available) configure : callable Callable object responsible of setting clingo configuration Returns ------- caspo.core.logicalnetwork.LogicalNetworkList The list of networks with one representative for each behavior """ start = timeit.default_timer() networks = self.networks n = len(networks) cpu = n_jobs if n_jobs > -1 else mp.cpu_count() if cpu > 1: lpart = int(np.ceil(n / float(cpu))) if n > cpu else 1 parts = networks.split(np.arange(lpart, n, lpart)) behaviors_parts = Parallel(n_jobs=n_jobs)(delayed(__learn_io__)(part, self.setup, configure) for part in parts) networks = core.LogicalNetworkList.from_hypergraph(networks.hg) for behavior in behaviors_parts: networks = networks.concat(behavior) behaviors = __learn_io__(networks, self.setup, configure) self.stats['time_io'] = timeit.default_timer() - start self._logger.info("%s input-output logical behaviors found in %.4fs", len(behaviors), self.stats['time_io']) return behaviors
python
def classify(self, n_jobs=-1, configure=None): """ Returns input-output behaviors for the list of logical networks in the attribute :attr:`networks` Example:: >>> from caspo import core, classify >>> networks = core.LogicalNetworkList.from_csv('networks.csv') >>> setup = core.Setup.from_json('setup.json') >>> classifier = classify.Classifier(networks, setup) >>> behaviors = classifier.classify() >>> behaviors.to_csv('behaviors.csv', networks=True) n_jobs : int Number of jobs to run in parallel. Default to -1 (all cores available) configure : callable Callable object responsible of setting clingo configuration Returns ------- caspo.core.logicalnetwork.LogicalNetworkList The list of networks with one representative for each behavior """ start = timeit.default_timer() networks = self.networks n = len(networks) cpu = n_jobs if n_jobs > -1 else mp.cpu_count() if cpu > 1: lpart = int(np.ceil(n / float(cpu))) if n > cpu else 1 parts = networks.split(np.arange(lpart, n, lpart)) behaviors_parts = Parallel(n_jobs=n_jobs)(delayed(__learn_io__)(part, self.setup, configure) for part in parts) networks = core.LogicalNetworkList.from_hypergraph(networks.hg) for behavior in behaviors_parts: networks = networks.concat(behavior) behaviors = __learn_io__(networks, self.setup, configure) self.stats['time_io'] = timeit.default_timer() - start self._logger.info("%s input-output logical behaviors found in %.4fs", len(behaviors), self.stats['time_io']) return behaviors
[ "def", "classify", "(", "self", ",", "n_jobs", "=", "-", "1", ",", "configure", "=", "None", ")", ":", "start", "=", "timeit", ".", "default_timer", "(", ")", "networks", "=", "self", ".", "networks", "n", "=", "len", "(", "networks", ")", "cpu", "...
Returns input-output behaviors for the list of logical networks in the attribute :attr:`networks` Example:: >>> from caspo import core, classify >>> networks = core.LogicalNetworkList.from_csv('networks.csv') >>> setup = core.Setup.from_json('setup.json') >>> classifier = classify.Classifier(networks, setup) >>> behaviors = classifier.classify() >>> behaviors.to_csv('behaviors.csv', networks=True) n_jobs : int Number of jobs to run in parallel. Default to -1 (all cores available) configure : callable Callable object responsible of setting clingo configuration Returns ------- caspo.core.logicalnetwork.LogicalNetworkList The list of networks with one representative for each behavior
[ "Returns", "input", "-", "output", "behaviors", "for", "the", "list", "of", "logical", "networks", "in", "the", "attribute", ":", "attr", ":", "networks" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/classify.py#L93-L141
PGower/PyCanvas
pycanvas/apis/user_observees.py
UserObserveesAPI.add_observee_with_credentials
def add_observee_with_credentials(self, user_id, access_token=None, observee_password=None, observee_unique_id=None): """ Add an observee with credentials. Register the given user to observe another user, given the observee's credentials. *Note:* all users are allowed to add their own observees, given the observee's credentials or access token are provided. Administrators can add observees given credentials, access token or the {api:UserObserveesController#update observee's id}. """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # OPTIONAL - observee[unique_id] """The login id for the user to observe. Required if access_token is omitted.""" if observee_unique_id is not None: data["observee[unique_id]"] = observee_unique_id # OPTIONAL - observee[password] """The password for the user to observe. Required if access_token is omitted.""" if observee_password is not None: data["observee[password]"] = observee_password # OPTIONAL - access_token """The access token for the user to observe. Required if <tt>observee[unique_id]</tt> or <tt>observee[password]</tt> are omitted.""" if access_token is not None: data["access_token"] = access_token self.logger.debug("POST /api/v1/users/{user_id}/observees with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/users/{user_id}/observees".format(**path), data=data, params=params, single_item=True)
python
def add_observee_with_credentials(self, user_id, access_token=None, observee_password=None, observee_unique_id=None): """ Add an observee with credentials. Register the given user to observe another user, given the observee's credentials. *Note:* all users are allowed to add their own observees, given the observee's credentials or access token are provided. Administrators can add observees given credentials, access token or the {api:UserObserveesController#update observee's id}. """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # OPTIONAL - observee[unique_id] """The login id for the user to observe. Required if access_token is omitted.""" if observee_unique_id is not None: data["observee[unique_id]"] = observee_unique_id # OPTIONAL - observee[password] """The password for the user to observe. Required if access_token is omitted.""" if observee_password is not None: data["observee[password]"] = observee_password # OPTIONAL - access_token """The access token for the user to observe. Required if <tt>observee[unique_id]</tt> or <tt>observee[password]</tt> are omitted.""" if access_token is not None: data["access_token"] = access_token self.logger.debug("POST /api/v1/users/{user_id}/observees with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/users/{user_id}/observees".format(**path), data=data, params=params, single_item=True)
[ "def", "add_observee_with_credentials", "(", "self", ",", "user_id", ",", "access_token", "=", "None", ",", "observee_password", "=", "None", ",", "observee_unique_id", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{"...
Add an observee with credentials. Register the given user to observe another user, given the observee's credentials. *Note:* all users are allowed to add their own observees, given the observee's credentials or access token are provided. Administrators can add observees given credentials, access token or the {api:UserObserveesController#update observee's id}.
[ "Add", "an", "observee", "with", "credentials", ".", "Register", "the", "given", "user", "to", "observe", "another", "user", "given", "the", "observee", "s", "credentials", ".", "*", "Note", ":", "*", "all", "users", "are", "allowed", "to", "add", "their",...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/user_observees.py#L45-L79
PGower/PyCanvas
pycanvas/apis/user_observees.py
UserObserveesAPI.show_observee
def show_observee(self, user_id, observee_id): """ Show an observee. Gets information about an observed user. *Note:* all users are allowed to view their own observees. """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - PATH - observee_id """ID""" path["observee_id"] = observee_id self.logger.debug("GET /api/v1/users/{user_id}/observees/{observee_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/users/{user_id}/observees/{observee_id}".format(**path), data=data, params=params, single_item=True)
python
def show_observee(self, user_id, observee_id): """ Show an observee. Gets information about an observed user. *Note:* all users are allowed to view their own observees. """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - PATH - observee_id """ID""" path["observee_id"] = observee_id self.logger.debug("GET /api/v1/users/{user_id}/observees/{observee_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/users/{user_id}/observees/{observee_id}".format(**path), data=data, params=params, single_item=True)
[ "def", "show_observee", "(", "self", ",", "user_id", ",", "observee_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - user_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"user_id\"", "]", "=", "user_id", "#...
Show an observee. Gets information about an observed user. *Note:* all users are allowed to view their own observees.
[ "Show", "an", "observee", ".", "Gets", "information", "about", "an", "observed", "user", ".", "*", "Note", ":", "*", "all", "users", "are", "allowed", "to", "view", "their", "own", "observees", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/user_observees.py#L81-L102
bharadwaj-raju/libdesktop
libdesktop/startup.py
add_item
def add_item(name, command, system_wide=False): '''Adds a program to startup. Adds a program to user startup. Args: name (str) : The name of the startup entry. command (str) : The command to run. system_wide (bool): Add to system-wide startup. Note: ``system_wide`` requires superuser/admin privileges. ''' desktop_env = system.get_name() if os.path.isfile(command): command_is_file = True if not desktop_env == 'windows': # Will not exit program if insufficient permissions sp.Popen(['chmod +x %s' % command], shell=True) if desktop_env == 'windows': import winreg if system_wide: startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup') else: startup_dir = os.path.join(get_config_dir()[0], 'Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup') if not command_is_file: with open(os.path.join(startup_dir, name + '.bat'), 'w') as f: f.write(command) else: shutil.copy(command, startup_dir) elif desktop_env == 'mac': sp.Popen(['launchctl submit -l %s -- %s'] % (name, command), shell=True) # system-wide will be handled by running the above as root # which will auto-happen if current process is root. else: # Linux/Unix if desktop_env == 'unknown': # CLI if system_wide: login_file = '/etc/profile' else: login_file = os.path.expanduser('~/.profile') with open(login_file, 'a') as f: f.write(command) else: try: desktop_file_name = name + '.desktop' startup_file = os.path.join(get_config_dir('autostart', system_wide=system_wide)[0], desktop_file_name) # .desktop files' Terminal option uses an independent method to find terminal emulator desktop_str = desktopfile.construct(name=name, exec_=command, additional_opts={'X-GNOME-Autostart-enabled': 'true'}) with open(startup_file, 'w') as f: f.write(desktop_str) except: pass
python
def add_item(name, command, system_wide=False): '''Adds a program to startup. Adds a program to user startup. Args: name (str) : The name of the startup entry. command (str) : The command to run. system_wide (bool): Add to system-wide startup. Note: ``system_wide`` requires superuser/admin privileges. ''' desktop_env = system.get_name() if os.path.isfile(command): command_is_file = True if not desktop_env == 'windows': # Will not exit program if insufficient permissions sp.Popen(['chmod +x %s' % command], shell=True) if desktop_env == 'windows': import winreg if system_wide: startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup') else: startup_dir = os.path.join(get_config_dir()[0], 'Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup') if not command_is_file: with open(os.path.join(startup_dir, name + '.bat'), 'w') as f: f.write(command) else: shutil.copy(command, startup_dir) elif desktop_env == 'mac': sp.Popen(['launchctl submit -l %s -- %s'] % (name, command), shell=True) # system-wide will be handled by running the above as root # which will auto-happen if current process is root. else: # Linux/Unix if desktop_env == 'unknown': # CLI if system_wide: login_file = '/etc/profile' else: login_file = os.path.expanduser('~/.profile') with open(login_file, 'a') as f: f.write(command) else: try: desktop_file_name = name + '.desktop' startup_file = os.path.join(get_config_dir('autostart', system_wide=system_wide)[0], desktop_file_name) # .desktop files' Terminal option uses an independent method to find terminal emulator desktop_str = desktopfile.construct(name=name, exec_=command, additional_opts={'X-GNOME-Autostart-enabled': 'true'}) with open(startup_file, 'w') as f: f.write(desktop_str) except: pass
[ "def", "add_item", "(", "name", ",", "command", ",", "system_wide", "=", "False", ")", ":", "desktop_env", "=", "system", ".", "get_name", "(", ")", "if", "os", ".", "path", ".", "isfile", "(", "command", ")", ":", "command_is_file", "=", "True", "if",...
Adds a program to startup. Adds a program to user startup. Args: name (str) : The name of the startup entry. command (str) : The command to run. system_wide (bool): Add to system-wide startup. Note: ``system_wide`` requires superuser/admin privileges.
[ "Adds", "a", "program", "to", "startup", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/startup.py#L38-L107
bharadwaj-raju/libdesktop
libdesktop/startup.py
list_items
def list_items(system_wide=False): '''List startup programs. List the programs set to run at startup. Args: system_wide (bool): Gets the programs that run at system-wide startup. Returns: list: A list of dictionaries in this format: .. code-block:: python { 'name': 'The name of the entry.', 'command': 'The command used to run it.' } ''' desktop_env = system.get_name() result = [] if desktop_env == 'windows': sys_startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup') user_startup_dir = os.path.join(get_config_dir()[0], 'Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup') startup_dir = sys_startup_dir if system_wide else user_startup_dir for file in os.listdir(startup_dir): file_path = os.path.join(startup_dir, file) result.append({ 'name': file, 'command': os.path.join(startup_dir, file) }) elif desktop_env == 'mac': items_list = system.get_cmd_out('launchtl list | awk \'{print $3}\'') for item in items_list.split('\n'): # launchd stores each job as a .plist file (pseudo-xml) launchd_plist_paths = ['~/Library/LaunchAgents', '/Library/LaunchAgents', '/Library/LaunchDaemons', '/System/Library/LaunchAgents', '/System/Library/LaunchDaemons'] for path in launchd_plist_paths: if item + '.plist' in os.listdir(path): plist_file = os.path.join(path, item + '.plist') # Parse the plist if sys.version_info.major == 2: plist_parsed = plistlib.readPlist(plist_file) else: with open(plist_file) as f: plist_parsed = plistlib.load(f) if 'Program' in plist_parsed: cmd = plist_parsed['Program'] if 'ProgramArguments' in plist_parsed: cmd += ' '.join(plist_parsed['ProgramArguments']) elif 'ProgramArguments' in plist_parsed: cmd = ' '.join(plist_parsed['ProgramArguments']) else: cmd = '' result.append({ 'name': item, 'command': cmd }) # system-wide will be handled by running the above as root # which will auto-happen if current process is root. else: # Linux/Unix # CLI profile = os.path.expanduser('~/.profile') if os.path.isfile(profile): with open(profile) as f: for line in f: if system.is_in_path(line.lstrip().split(' ')[0]): cmd_name = line.lstrip().split(' ')[0] result.append({ 'name': cmd_name, 'command': line.strip() }) # /etc/profile.d if system_wide: if os.path.isdir('/etc/profile.d'): for file in os.listdir('/etc/profile.d'): file_path = os.path.join('/etc/profile.d', file) result.append({ 'name': file, 'command': 'sh %s' % file_path }) # GUI try: startup_dir = directories.get_config_dir('autostart', system_wide=system_wide)[0] for file in os.listdir(startup_dir): file_parsed = desktopfile.parse(os.path.join(startup_dir, file)) if 'Name' in file_parsed: name = file_parsed['Name'] else: name = file.replace('.desktop', '') if 'Exec' in file_parsed: if file_parsed['Terminal']: cmd = applications.terminal(exec_=file_parsed['Exec'], return_cmd=True) else: cmd = file_parsed['Exec'] else: cmd = '' if not file_parsed.get('Hidden', False): result.append({ 'name': name, 'command': cmd }) except IndexError: pass return result
python
def list_items(system_wide=False): '''List startup programs. List the programs set to run at startup. Args: system_wide (bool): Gets the programs that run at system-wide startup. Returns: list: A list of dictionaries in this format: .. code-block:: python { 'name': 'The name of the entry.', 'command': 'The command used to run it.' } ''' desktop_env = system.get_name() result = [] if desktop_env == 'windows': sys_startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup') user_startup_dir = os.path.join(get_config_dir()[0], 'Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup') startup_dir = sys_startup_dir if system_wide else user_startup_dir for file in os.listdir(startup_dir): file_path = os.path.join(startup_dir, file) result.append({ 'name': file, 'command': os.path.join(startup_dir, file) }) elif desktop_env == 'mac': items_list = system.get_cmd_out('launchtl list | awk \'{print $3}\'') for item in items_list.split('\n'): # launchd stores each job as a .plist file (pseudo-xml) launchd_plist_paths = ['~/Library/LaunchAgents', '/Library/LaunchAgents', '/Library/LaunchDaemons', '/System/Library/LaunchAgents', '/System/Library/LaunchDaemons'] for path in launchd_plist_paths: if item + '.plist' in os.listdir(path): plist_file = os.path.join(path, item + '.plist') # Parse the plist if sys.version_info.major == 2: plist_parsed = plistlib.readPlist(plist_file) else: with open(plist_file) as f: plist_parsed = plistlib.load(f) if 'Program' in plist_parsed: cmd = plist_parsed['Program'] if 'ProgramArguments' in plist_parsed: cmd += ' '.join(plist_parsed['ProgramArguments']) elif 'ProgramArguments' in plist_parsed: cmd = ' '.join(plist_parsed['ProgramArguments']) else: cmd = '' result.append({ 'name': item, 'command': cmd }) # system-wide will be handled by running the above as root # which will auto-happen if current process is root. else: # Linux/Unix # CLI profile = os.path.expanduser('~/.profile') if os.path.isfile(profile): with open(profile) as f: for line in f: if system.is_in_path(line.lstrip().split(' ')[0]): cmd_name = line.lstrip().split(' ')[0] result.append({ 'name': cmd_name, 'command': line.strip() }) # /etc/profile.d if system_wide: if os.path.isdir('/etc/profile.d'): for file in os.listdir('/etc/profile.d'): file_path = os.path.join('/etc/profile.d', file) result.append({ 'name': file, 'command': 'sh %s' % file_path }) # GUI try: startup_dir = directories.get_config_dir('autostart', system_wide=system_wide)[0] for file in os.listdir(startup_dir): file_parsed = desktopfile.parse(os.path.join(startup_dir, file)) if 'Name' in file_parsed: name = file_parsed['Name'] else: name = file.replace('.desktop', '') if 'Exec' in file_parsed: if file_parsed['Terminal']: cmd = applications.terminal(exec_=file_parsed['Exec'], return_cmd=True) else: cmd = file_parsed['Exec'] else: cmd = '' if not file_parsed.get('Hidden', False): result.append({ 'name': name, 'command': cmd }) except IndexError: pass return result
[ "def", "list_items", "(", "system_wide", "=", "False", ")", ":", "desktop_env", "=", "system", ".", "get_name", "(", ")", "result", "=", "[", "]", "if", "desktop_env", "==", "'windows'", ":", "sys_startup_dir", "=", "os", ".", "path", ".", "join", "(", ...
List startup programs. List the programs set to run at startup. Args: system_wide (bool): Gets the programs that run at system-wide startup. Returns: list: A list of dictionaries in this format: .. code-block:: python { 'name': 'The name of the entry.', 'command': 'The command used to run it.' }
[ "List", "startup", "programs", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/startup.py#L109-L233
bharadwaj-raju/libdesktop
libdesktop/startup.py
remove_item
def remove_item(name, system_wide=False): '''Removes a program from startup. Removes a program from startup. Args: name (str) : The name of the program (as known to the system) to remove. See :func:``list_items``. system_wide (bool): Remove it from system-wide startup. Note: ``system_wide`` requires superuser/admin privileges. ''' desktop_env = system.get_name() if desktop_env == 'windows': import winreg if system_wide: startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup') else: startup_dir = os.path.join(directories.get_config_dir()[0], 'Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup') for startup_file in os.path.listdir(start_dir): if startup_file == name or startup_file.split('.')[0] == name: os.remove(os.path.join(startup_dir, startup_file)) elif desktop_env == 'mac': sp.Popen(['launchctl', 'remove', name]) # system-wide will be handled by running the above as root # which will auto-happen if current process is root. else: # Linux/Unix if desktop_env == 'unknown': # CLI if system_wide: login_file = '/etc/profile' else: login_file = os.path.expanduser('~/.profile') with open(login_file) as f: login_file_contents = f.read() final_login_file_contents = '' for line in login_file_contents.split('\n'): if line.split(' ')[0] != name: final_login_file_contents += line with open(login_file, 'w') as f: f.write(final_login_file_contents) else: try: desktop_file_name = name + '.desktop' startup_file = os.path.join(directories.get_config_dir('autostart', system_wide=system_wide)[0], desktop_file_name) if not os.path.isfile(startup_file): for possible_startup_file in os.listdir(directories.get_config_dir('autostart', system_wide=system_wide)[0]): possible_startup_file_parsed = desktopfile.parse(possible_startup_file) if possible_startup_file_parsed['Name'] == name: startup_file = possible_startup_file os.remove(startup_file) except IndexError: pass
python
def remove_item(name, system_wide=False): '''Removes a program from startup. Removes a program from startup. Args: name (str) : The name of the program (as known to the system) to remove. See :func:``list_items``. system_wide (bool): Remove it from system-wide startup. Note: ``system_wide`` requires superuser/admin privileges. ''' desktop_env = system.get_name() if desktop_env == 'windows': import winreg if system_wide: startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup') else: startup_dir = os.path.join(directories.get_config_dir()[0], 'Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup') for startup_file in os.path.listdir(start_dir): if startup_file == name or startup_file.split('.')[0] == name: os.remove(os.path.join(startup_dir, startup_file)) elif desktop_env == 'mac': sp.Popen(['launchctl', 'remove', name]) # system-wide will be handled by running the above as root # which will auto-happen if current process is root. else: # Linux/Unix if desktop_env == 'unknown': # CLI if system_wide: login_file = '/etc/profile' else: login_file = os.path.expanduser('~/.profile') with open(login_file) as f: login_file_contents = f.read() final_login_file_contents = '' for line in login_file_contents.split('\n'): if line.split(' ')[0] != name: final_login_file_contents += line with open(login_file, 'w') as f: f.write(final_login_file_contents) else: try: desktop_file_name = name + '.desktop' startup_file = os.path.join(directories.get_config_dir('autostart', system_wide=system_wide)[0], desktop_file_name) if not os.path.isfile(startup_file): for possible_startup_file in os.listdir(directories.get_config_dir('autostart', system_wide=system_wide)[0]): possible_startup_file_parsed = desktopfile.parse(possible_startup_file) if possible_startup_file_parsed['Name'] == name: startup_file = possible_startup_file os.remove(startup_file) except IndexError: pass
[ "def", "remove_item", "(", "name", ",", "system_wide", "=", "False", ")", ":", "desktop_env", "=", "system", ".", "get_name", "(", ")", "if", "desktop_env", "==", "'windows'", ":", "import", "winreg", "if", "system_wide", ":", "startup_dir", "=", "os", "."...
Removes a program from startup. Removes a program from startup. Args: name (str) : The name of the program (as known to the system) to remove. See :func:``list_items``. system_wide (bool): Remove it from system-wide startup. Note: ``system_wide`` requires superuser/admin privileges.
[ "Removes", "a", "program", "from", "startup", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/startup.py#L235-L306