repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
simodalla/pygmount
thirdparty/PyZenity-0.1.7/PyZenity.py
GetDate
def GetDate(text=None, selected=None, **kwargs): """Prompt the user for a date. This will raise a Zenity Calendar Dialog for the user to pick a date. It will return a datetime.date object with the date or None if the user hit cancel. text - Text to be displayed in the calendar dialog. selected - A datetime.date object that will be the pre-selected date. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = ['--date-format=%d/%m/%Y'] if text: args.append('--text=%s' % text) if selected: args.append('--day=%d' % selected.day) args.append('--month=%d' % selected.month) args.append('--year=%d' % selected.year) for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) p = run_zenity('--calendar', *args) if p.wait() == 0: retval = p.stdout.read().strip() day, month, year = [int(x) for x in retval.split('/')] return date(year, month, day)
python
def GetDate(text=None, selected=None, **kwargs): """Prompt the user for a date. This will raise a Zenity Calendar Dialog for the user to pick a date. It will return a datetime.date object with the date or None if the user hit cancel. text - Text to be displayed in the calendar dialog. selected - A datetime.date object that will be the pre-selected date. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = ['--date-format=%d/%m/%Y'] if text: args.append('--text=%s' % text) if selected: args.append('--day=%d' % selected.day) args.append('--month=%d' % selected.month) args.append('--year=%d' % selected.year) for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) p = run_zenity('--calendar', *args) if p.wait() == 0: retval = p.stdout.read().strip() day, month, year = [int(x) for x in retval.split('/')] return date(year, month, day)
[ "def", "GetDate", "(", "text", "=", "None", ",", "selected", "=", "None", ",", "*", "*", "kwargs", ")", ":", "args", "=", "[", "'--date-format=%d/%m/%Y'", "]", "if", "text", ":", "args", ".", "append", "(", "'--text=%s'", "%", "text", ")", "if", "sel...
Prompt the user for a date. This will raise a Zenity Calendar Dialog for the user to pick a date. It will return a datetime.date object with the date or None if the user hit cancel. text - Text to be displayed in the calendar dialog. selected - A datetime.date object that will be the pre-selected date. kwargs - Optional command line parameters for Zenity such as height, width, etc.
[ "Prompt", "the", "user", "for", "a", "date", ".", "This", "will", "raise", "a", "Zenity", "Calendar", "Dialog", "for", "the", "user", "to", "pick", "a", "date", ".", "It", "will", "return", "a", "datetime", ".", "date", "object", "with", "the", "date",...
train
https://github.com/simodalla/pygmount/blob/8027cfa2ed5fa8e9207d72b6013ecec7fcf2e5f5/thirdparty/PyZenity-0.1.7/PyZenity.py#L84-L112
simodalla/pygmount
thirdparty/PyZenity-0.1.7/PyZenity.py
GetFilename
def GetFilename(multiple=False, sep='|', **kwargs): """Prompt the user for a filename. This will raise a Zenity File Selection Dialog. It will return a list with the selected files or None if the user hit cancel. multiple - True to allow the user to select multiple files. sep - Token to use as the path separator when parsing Zenity's return string. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = [] if multiple: args.append('--multiple') if sep != '|': args.append('--separator=%s' % sep) for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) p = run_zenity('--file-selection', *args) if p.wait() == 0: return p.stdout.read()[:-1].split('|')
python
def GetFilename(multiple=False, sep='|', **kwargs): """Prompt the user for a filename. This will raise a Zenity File Selection Dialog. It will return a list with the selected files or None if the user hit cancel. multiple - True to allow the user to select multiple files. sep - Token to use as the path separator when parsing Zenity's return string. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = [] if multiple: args.append('--multiple') if sep != '|': args.append('--separator=%s' % sep) for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) p = run_zenity('--file-selection', *args) if p.wait() == 0: return p.stdout.read()[:-1].split('|')
[ "def", "GetFilename", "(", "multiple", "=", "False", ",", "sep", "=", "'|'", ",", "*", "*", "kwargs", ")", ":", "args", "=", "[", "]", "if", "multiple", ":", "args", ".", "append", "(", "'--multiple'", ")", "if", "sep", "!=", "'|'", ":", "args", ...
Prompt the user for a filename. This will raise a Zenity File Selection Dialog. It will return a list with the selected files or None if the user hit cancel. multiple - True to allow the user to select multiple files. sep - Token to use as the path separator when parsing Zenity's return string. kwargs - Optional command line parameters for Zenity such as height, width, etc.
[ "Prompt", "the", "user", "for", "a", "filename", ".", "This", "will", "raise", "a", "Zenity", "File", "Selection", "Dialog", ".", "It", "will", "return", "a", "list", "with", "the", "selected", "files", "or", "None", "if", "the", "user", "hit", "cancel",...
train
https://github.com/simodalla/pygmount/blob/8027cfa2ed5fa8e9207d72b6013ecec7fcf2e5f5/thirdparty/PyZenity-0.1.7/PyZenity.py#L115-L139
simodalla/pygmount
thirdparty/PyZenity-0.1.7/PyZenity.py
GetDirectory
def GetDirectory(multiple=False, selected=None, sep=None, **kwargs): """Prompt the user for a directory. This will raise a Zenity Directory Selection Dialog. It will return a list with the selected directories or None if the user hit cancel. multiple - True to allow the user to select multiple directories. selected - Path to the directory to be selected on startup. sep - Token to use as the path separator when parsing Zenity's return string. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = ['--directory'] if multiple: args.append('--multiple') if selected: if not path.lexists(selected): raise ValueError("File %s does not exist!" % selected) args.append('--filename=%s' % selected) if sep: args.append('--separator=%s' % sep) for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) p = run_zenity('--file-selection', *args) if p.wait() == 0: return p.stdout.read().strip().split('|')
python
def GetDirectory(multiple=False, selected=None, sep=None, **kwargs): """Prompt the user for a directory. This will raise a Zenity Directory Selection Dialog. It will return a list with the selected directories or None if the user hit cancel. multiple - True to allow the user to select multiple directories. selected - Path to the directory to be selected on startup. sep - Token to use as the path separator when parsing Zenity's return string. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = ['--directory'] if multiple: args.append('--multiple') if selected: if not path.lexists(selected): raise ValueError("File %s does not exist!" % selected) args.append('--filename=%s' % selected) if sep: args.append('--separator=%s' % sep) for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) p = run_zenity('--file-selection', *args) if p.wait() == 0: return p.stdout.read().strip().split('|')
[ "def", "GetDirectory", "(", "multiple", "=", "False", ",", "selected", "=", "None", ",", "sep", "=", "None", ",", "*", "*", "kwargs", ")", ":", "args", "=", "[", "'--directory'", "]", "if", "multiple", ":", "args", ".", "append", "(", "'--multiple'", ...
Prompt the user for a directory. This will raise a Zenity Directory Selection Dialog. It will return a list with the selected directories or None if the user hit cancel. multiple - True to allow the user to select multiple directories. selected - Path to the directory to be selected on startup. sep - Token to use as the path separator when parsing Zenity's return string. kwargs - Optional command line parameters for Zenity such as height, width, etc.
[ "Prompt", "the", "user", "for", "a", "directory", ".", "This", "will", "raise", "a", "Zenity", "Directory", "Selection", "Dialog", ".", "It", "will", "return", "a", "list", "with", "the", "selected", "directories", "or", "None", "if", "the", "user", "hit",...
train
https://github.com/simodalla/pygmount/blob/8027cfa2ed5fa8e9207d72b6013ecec7fcf2e5f5/thirdparty/PyZenity-0.1.7/PyZenity.py#L142-L171
simodalla/pygmount
thirdparty/PyZenity-0.1.7/PyZenity.py
GetSavename
def GetSavename(default=None, **kwargs): """Prompt the user for a filename to save as. This will raise a Zenity Save As Dialog. It will return the name to save a file as or None if the user hit cancel. default - The default name that should appear in the save as dialog. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = ['--save'] if default: args.append('--filename=%s' % default) for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) p = run_zenity('--file-selection', *args) if p.wait() == 0: return p.stdout.read().strip().split('|')
python
def GetSavename(default=None, **kwargs): """Prompt the user for a filename to save as. This will raise a Zenity Save As Dialog. It will return the name to save a file as or None if the user hit cancel. default - The default name that should appear in the save as dialog. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = ['--save'] if default: args.append('--filename=%s' % default) for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) p = run_zenity('--file-selection', *args) if p.wait() == 0: return p.stdout.read().strip().split('|')
[ "def", "GetSavename", "(", "default", "=", "None", ",", "*", "*", "kwargs", ")", ":", "args", "=", "[", "'--save'", "]", "if", "default", ":", "args", ".", "append", "(", "'--filename=%s'", "%", "default", ")", "for", "generic_args", "in", "kwargs_helper...
Prompt the user for a filename to save as. This will raise a Zenity Save As Dialog. It will return the name to save a file as or None if the user hit cancel. default - The default name that should appear in the save as dialog. kwargs - Optional command line parameters for Zenity such as height, width, etc.
[ "Prompt", "the", "user", "for", "a", "filename", "to", "save", "as", ".", "This", "will", "raise", "a", "Zenity", "Save", "As", "Dialog", ".", "It", "will", "return", "the", "name", "to", "save", "a", "file", "as", "or", "None", "if", "the", "user", ...
train
https://github.com/simodalla/pygmount/blob/8027cfa2ed5fa8e9207d72b6013ecec7fcf2e5f5/thirdparty/PyZenity-0.1.7/PyZenity.py#L174-L194
simodalla/pygmount
thirdparty/PyZenity-0.1.7/PyZenity.py
Notification
def Notification(text=None, window_icon=None, **kwargs): """Put an icon in the notification area. This will put an icon in the notification area and return when the user clicks on it. text - The tooltip that will show when the user hovers over it. window_icon - The stock icon ("question", "info", "warning", "error") or path to the icon to show. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = [] if text: args.append('--text=%s' % text) if window_icon: args.append('--window-icon=%s' % window_icon) for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) p = run_zenity('--notification', *args) p.wait()
python
def Notification(text=None, window_icon=None, **kwargs): """Put an icon in the notification area. This will put an icon in the notification area and return when the user clicks on it. text - The tooltip that will show when the user hovers over it. window_icon - The stock icon ("question", "info", "warning", "error") or path to the icon to show. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = [] if text: args.append('--text=%s' % text) if window_icon: args.append('--window-icon=%s' % window_icon) for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) p = run_zenity('--notification', *args) p.wait()
[ "def", "Notification", "(", "text", "=", "None", ",", "window_icon", "=", "None", ",", "*", "*", "kwargs", ")", ":", "args", "=", "[", "]", "if", "text", ":", "args", ".", "append", "(", "'--text=%s'", "%", "text", ")", "if", "window_icon", ":", "a...
Put an icon in the notification area. This will put an icon in the notification area and return when the user clicks on it. text - The tooltip that will show when the user hovers over it. window_icon - The stock icon ("question", "info", "warning", "error") or path to the icon to show. kwargs - Optional command line parameters for Zenity such as height, width, etc.
[ "Put", "an", "icon", "in", "the", "notification", "area", ".", "This", "will", "put", "an", "icon", "in", "the", "notification", "area", "and", "return", "when", "the", "user", "clicks", "on", "it", ".", "text", "-", "The", "tooltip", "that", "will", "...
train
https://github.com/simodalla/pygmount/blob/8027cfa2ed5fa8e9207d72b6013ecec7fcf2e5f5/thirdparty/PyZenity-0.1.7/PyZenity.py#L197-L219
simodalla/pygmount
thirdparty/PyZenity-0.1.7/PyZenity.py
List
def List(column_names, title=None, boolstyle=None, editable=False, select_col=None, sep='|', data=[], **kwargs): """Present a list of items to select. This will raise a Zenity List Dialog populated with the colomns and rows specified and return either the cell or row that was selected or None if the user hit cancel. column_names - A tuple or list containing the names of the columns. title - The title of the dialog box. boolstyle - Whether the first columns should be a bool option ("checklist", "radiolist") or None if it should be a text field. editable - True if the user can edit the cells. select_col - The column number of the selected cell to return or "ALL" to return the entire row. sep - Token to use as the row separator when parsing Zenity's return. Cells should not contain this token. data - A list or tuple of tuples that contain the cells in the row. The size of the row's tuple must be equal to the number of columns. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = [] for column in column_names: args.append('--column=%s' % column) if title: args.append('--title=%s' % title) if boolstyle: if not (boolstyle == 'checklist' or boolstyle == 'radiolist'): raise ValueError('"%s" is not a proper boolean column style.' % boolstyle) args.append('--' + boolstyle) if editable: args.append('--editable') if select_col: args.append('--print-column=%s' % select_col) if sep != '|': args.append('--separator=%s' % sep) for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) for datum in chain(*data): args.append(str(datum)) p = run_zenity('--list', *args) if p.wait() == 0: return p.stdout.read().strip().split(sep)
python
def List(column_names, title=None, boolstyle=None, editable=False, select_col=None, sep='|', data=[], **kwargs): """Present a list of items to select. This will raise a Zenity List Dialog populated with the colomns and rows specified and return either the cell or row that was selected or None if the user hit cancel. column_names - A tuple or list containing the names of the columns. title - The title of the dialog box. boolstyle - Whether the first columns should be a bool option ("checklist", "radiolist") or None if it should be a text field. editable - True if the user can edit the cells. select_col - The column number of the selected cell to return or "ALL" to return the entire row. sep - Token to use as the row separator when parsing Zenity's return. Cells should not contain this token. data - A list or tuple of tuples that contain the cells in the row. The size of the row's tuple must be equal to the number of columns. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = [] for column in column_names: args.append('--column=%s' % column) if title: args.append('--title=%s' % title) if boolstyle: if not (boolstyle == 'checklist' or boolstyle == 'radiolist'): raise ValueError('"%s" is not a proper boolean column style.' % boolstyle) args.append('--' + boolstyle) if editable: args.append('--editable') if select_col: args.append('--print-column=%s' % select_col) if sep != '|': args.append('--separator=%s' % sep) for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) for datum in chain(*data): args.append(str(datum)) p = run_zenity('--list', *args) if p.wait() == 0: return p.stdout.read().strip().split(sep)
[ "def", "List", "(", "column_names", ",", "title", "=", "None", ",", "boolstyle", "=", "None", ",", "editable", "=", "False", ",", "select_col", "=", "None", ",", "sep", "=", "'|'", ",", "data", "=", "[", "]", ",", "*", "*", "kwargs", ")", ":", "a...
Present a list of items to select. This will raise a Zenity List Dialog populated with the colomns and rows specified and return either the cell or row that was selected or None if the user hit cancel. column_names - A tuple or list containing the names of the columns. title - The title of the dialog box. boolstyle - Whether the first columns should be a bool option ("checklist", "radiolist") or None if it should be a text field. editable - True if the user can edit the cells. select_col - The column number of the selected cell to return or "ALL" to return the entire row. sep - Token to use as the row separator when parsing Zenity's return. Cells should not contain this token. data - A list or tuple of tuples that contain the cells in the row. The size of the row's tuple must be equal to the number of columns. kwargs - Optional command line parameters for Zenity such as height, width, etc.
[ "Present", "a", "list", "of", "items", "to", "select", ".", "This", "will", "raise", "a", "Zenity", "List", "Dialog", "populated", "with", "the", "colomns", "and", "rows", "specified", "and", "return", "either", "the", "cell", "or", "row", "that", "was", ...
train
https://github.com/simodalla/pygmount/blob/8027cfa2ed5fa8e9207d72b6013ecec7fcf2e5f5/thirdparty/PyZenity-0.1.7/PyZenity.py#L222-L271
simodalla/pygmount
thirdparty/PyZenity-0.1.7/PyZenity.py
ErrorMessage
def ErrorMessage(text, **kwargs): """Show an error message dialog to the user. This will raise a Zenity Error Dialog with a description of the error. text - A description of the error. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = ['--text=%s' % text] for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) run_zenity('--error', *args).wait()
python
def ErrorMessage(text, **kwargs): """Show an error message dialog to the user. This will raise a Zenity Error Dialog with a description of the error. text - A description of the error. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = ['--text=%s' % text] for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) run_zenity('--error', *args).wait()
[ "def", "ErrorMessage", "(", "text", ",", "*", "*", "kwargs", ")", ":", "args", "=", "[", "'--text=%s'", "%", "text", "]", "for", "generic_args", "in", "kwargs_helper", "(", "kwargs", ")", ":", "args", ".", "append", "(", "'--%s=%s'", "%", "generic_args",...
Show an error message dialog to the user. This will raise a Zenity Error Dialog with a description of the error. text - A description of the error. kwargs - Optional command line parameters for Zenity such as height, width, etc.
[ "Show", "an", "error", "message", "dialog", "to", "the", "user", ".", "This", "will", "raise", "a", "Zenity", "Error", "Dialog", "with", "a", "description", "of", "the", "error", ".", "text", "-", "A", "description", "of", "the", "error", ".", "kwargs", ...
train
https://github.com/simodalla/pygmount/blob/8027cfa2ed5fa8e9207d72b6013ecec7fcf2e5f5/thirdparty/PyZenity-0.1.7/PyZenity.py#L274-L287
simodalla/pygmount
thirdparty/PyZenity-0.1.7/PyZenity.py
Progress
def Progress(text='', percentage=0, auto_close=False, pulsate=False, **kwargs): """Show a progress dialog to the user. This will raise a Zenity Progress Dialog. It returns a callback that accepts two arguments. The first is a numeric value of the percent complete. The second is a message about the progress. NOTE: This function sends the SIGHUP signal if the user hits the cancel button. You must connect to this signal if you do not want your application to exit. text - The initial message about the progress. percentage - The initial percentage to set the progress bar to. auto_close - True if the dialog should close automatically if it reaches 100%. pulsate - True is the status should pulsate instead of progress. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = [] if text: args.append('--text=%s' % text) if percentage: args.append('--percentage=%s' % percentage) if auto_close: args.append('--auto-close=%s' % auto_close) if pulsate: args.append('--pulsate=%s' % pulsate) for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) p = Popen([zen_exec, '--progress'] + args, stdin=PIPE, stdout=PIPE) def update(percent, message=''): if type(percent) == float: percent = int(percent * 100) p.stdin.write(str(percent) + '\n') if message: p.stdin.write('# %s\n' % message) return p.returncode return update
python
def Progress(text='', percentage=0, auto_close=False, pulsate=False, **kwargs): """Show a progress dialog to the user. This will raise a Zenity Progress Dialog. It returns a callback that accepts two arguments. The first is a numeric value of the percent complete. The second is a message about the progress. NOTE: This function sends the SIGHUP signal if the user hits the cancel button. You must connect to this signal if you do not want your application to exit. text - The initial message about the progress. percentage - The initial percentage to set the progress bar to. auto_close - True if the dialog should close automatically if it reaches 100%. pulsate - True is the status should pulsate instead of progress. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = [] if text: args.append('--text=%s' % text) if percentage: args.append('--percentage=%s' % percentage) if auto_close: args.append('--auto-close=%s' % auto_close) if pulsate: args.append('--pulsate=%s' % pulsate) for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) p = Popen([zen_exec, '--progress'] + args, stdin=PIPE, stdout=PIPE) def update(percent, message=''): if type(percent) == float: percent = int(percent * 100) p.stdin.write(str(percent) + '\n') if message: p.stdin.write('# %s\n' % message) return p.returncode return update
[ "def", "Progress", "(", "text", "=", "''", ",", "percentage", "=", "0", ",", "auto_close", "=", "False", ",", "pulsate", "=", "False", ",", "*", "*", "kwargs", ")", ":", "args", "=", "[", "]", "if", "text", ":", "args", ".", "append", "(", "'--te...
Show a progress dialog to the user. This will raise a Zenity Progress Dialog. It returns a callback that accepts two arguments. The first is a numeric value of the percent complete. The second is a message about the progress. NOTE: This function sends the SIGHUP signal if the user hits the cancel button. You must connect to this signal if you do not want your application to exit. text - The initial message about the progress. percentage - The initial percentage to set the progress bar to. auto_close - True if the dialog should close automatically if it reaches 100%. pulsate - True is the status should pulsate instead of progress. kwargs - Optional command line parameters for Zenity such as height, width, etc.
[ "Show", "a", "progress", "dialog", "to", "the", "user", ".", "This", "will", "raise", "a", "Zenity", "Progress", "Dialog", ".", "It", "returns", "a", "callback", "that", "accepts", "two", "arguments", ".", "The", "first", "is", "a", "numeric", "value", "...
train
https://github.com/simodalla/pygmount/blob/8027cfa2ed5fa8e9207d72b6013ecec7fcf2e5f5/thirdparty/PyZenity-0.1.7/PyZenity.py#L341-L383
simodalla/pygmount
thirdparty/PyZenity-0.1.7/PyZenity.py
GetText
def GetText(text='', entry_text='', password=False, **kwargs): """Get some text from the user. This will raise a Zenity Text Entry Dialog. It returns the text the user entered or None if the user hit cancel. text - A description of the text to enter. entry_text - The initial value of the text entry box. password - True if text entered should be hidden by stars. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = [] if text: args.append('--text=%s' % text) if entry_text: args.append('--entry-text=%s' % entry_text) if password: args.append('--hide-text') for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) p = run_zenity('--entry', *args) if p.wait() == 0: return p.stdout.read()[:-1]
python
def GetText(text='', entry_text='', password=False, **kwargs): """Get some text from the user. This will raise a Zenity Text Entry Dialog. It returns the text the user entered or None if the user hit cancel. text - A description of the text to enter. entry_text - The initial value of the text entry box. password - True if text entered should be hidden by stars. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = [] if text: args.append('--text=%s' % text) if entry_text: args.append('--entry-text=%s' % entry_text) if password: args.append('--hide-text') for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) p = run_zenity('--entry', *args) if p.wait() == 0: return p.stdout.read()[:-1]
[ "def", "GetText", "(", "text", "=", "''", ",", "entry_text", "=", "''", ",", "password", "=", "False", ",", "*", "*", "kwargs", ")", ":", "args", "=", "[", "]", "if", "text", ":", "args", ".", "append", "(", "'--text=%s'", "%", "text", ")", "if",...
Get some text from the user. This will raise a Zenity Text Entry Dialog. It returns the text the user entered or None if the user hit cancel. text - A description of the text to enter. entry_text - The initial value of the text entry box. password - True if text entered should be hidden by stars. kwargs - Optional command line parameters for Zenity such as height, width, etc.
[ "Get", "some", "text", "from", "the", "user", "." ]
train
https://github.com/simodalla/pygmount/blob/8027cfa2ed5fa8e9207d72b6013ecec7fcf2e5f5/thirdparty/PyZenity-0.1.7/PyZenity.py#L386-L412
simodalla/pygmount
thirdparty/PyZenity-0.1.7/PyZenity.py
TextInfo
def TextInfo(filename=None, editable=False, **kwargs): """Show the text of a file to the user. This will raise a Zenity Text Information Dialog presenting the user with the contents of a file. It returns the contents of the text box. filename - The path to the file to show. editable - True if the text should be editable. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = [] if filename: args.append('--filename=%s' % filename) if editable: args.append('--editable') for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) p = run_zenity('--text-info', *args) if p.wait() == 0: return p.stdout.read()
python
def TextInfo(filename=None, editable=False, **kwargs): """Show the text of a file to the user. This will raise a Zenity Text Information Dialog presenting the user with the contents of a file. It returns the contents of the text box. filename - The path to the file to show. editable - True if the text should be editable. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = [] if filename: args.append('--filename=%s' % filename) if editable: args.append('--editable') for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) p = run_zenity('--text-info', *args) if p.wait() == 0: return p.stdout.read()
[ "def", "TextInfo", "(", "filename", "=", "None", ",", "editable", "=", "False", ",", "*", "*", "kwargs", ")", ":", "args", "=", "[", "]", "if", "filename", ":", "args", ".", "append", "(", "'--filename=%s'", "%", "filename", ")", "if", "editable", ":...
Show the text of a file to the user. This will raise a Zenity Text Information Dialog presenting the user with the contents of a file. It returns the contents of the text box. filename - The path to the file to show. editable - True if the text should be editable. kwargs - Optional command line parameters for Zenity such as height, width, etc.
[ "Show", "the", "text", "of", "a", "file", "to", "the", "user", "." ]
train
https://github.com/simodalla/pygmount/blob/8027cfa2ed5fa8e9207d72b6013ecec7fcf2e5f5/thirdparty/PyZenity-0.1.7/PyZenity.py#L415-L438
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/vcs/__init__.py
VcsSupport.get_backend_name
def get_backend_name(self, location): """ Return the name of the version control backend if found at given location, e.g. vcs.get_backend_name('/path/to/vcs/checkout') """ for vc_type in self._registry.values(): path = os.path.join(location, vc_type.dirname) if os.path.exists(path): return vc_type.name return None
python
def get_backend_name(self, location): """ Return the name of the version control backend if found at given location, e.g. vcs.get_backend_name('/path/to/vcs/checkout') """ for vc_type in self._registry.values(): path = os.path.join(location, vc_type.dirname) if os.path.exists(path): return vc_type.name return None
[ "def", "get_backend_name", "(", "self", ",", "location", ")", ":", "for", "vc_type", "in", "self", ".", "_registry", ".", "values", "(", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "location", ",", "vc_type", ".", "dirname", ")", "if"...
Return the name of the version control backend if found at given location, e.g. vcs.get_backend_name('/path/to/vcs/checkout')
[ "Return", "the", "name", "of", "the", "version", "control", "backend", "if", "found", "at", "given", "location", "e", ".", "g", ".", "vcs", ".", "get_backend_name", "(", "/", "path", "/", "to", "/", "vcs", "/", "checkout", ")" ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/vcs/__init__.py#L57-L66
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/vcs/__init__.py
VersionControl.check_destination
def check_destination(self, dest, url, rev_options, rev_display): """ Prepare a location to receive a checkout/clone. Return True if the location is ready for (and requires) a checkout/clone, False otherwise. """ checkout = True prompt = False if os.path.exists(dest): checkout = False if os.path.exists(os.path.join(dest, self.dirname)): existing_url = self.get_url(dest) if self.compare_urls(existing_url, url): logger.info('%s in %s exists, and has correct URL (%s)' % (self.repo_name.title(), display_path(dest), url)) logger.notify('Updating %s %s%s' % (display_path(dest), self.repo_name, rev_display)) self.update(dest, rev_options) else: logger.warn('%s %s in %s exists with URL %s' % (self.name, self.repo_name, display_path(dest), existing_url)) prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', ('s', 'i', 'w', 'b')) else: logger.warn('Directory %s already exists, and is not a %s %s.' % (dest, self.name, self.repo_name)) prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b')) if prompt: logger.warn('The plan is to install the %s repository %s' % (self.name, url)) response = ask('What to do? %s' % prompt[0], prompt[1]) if response == 's': logger.notify('Switching %s %s to %s%s' % (self.repo_name, display_path(dest), url, rev_display)) self.switch(dest, url, rev_options) elif response == 'i': # do nothing pass elif response == 'w': logger.warn('Deleting %s' % display_path(dest)) rmtree(dest) checkout = True elif response == 'b': dest_dir = backup_dir(dest) logger.warn('Backing up %s to %s' % (display_path(dest), dest_dir)) shutil.move(dest, dest_dir) checkout = True return checkout
python
def check_destination(self, dest, url, rev_options, rev_display): """ Prepare a location to receive a checkout/clone. Return True if the location is ready for (and requires) a checkout/clone, False otherwise. """ checkout = True prompt = False if os.path.exists(dest): checkout = False if os.path.exists(os.path.join(dest, self.dirname)): existing_url = self.get_url(dest) if self.compare_urls(existing_url, url): logger.info('%s in %s exists, and has correct URL (%s)' % (self.repo_name.title(), display_path(dest), url)) logger.notify('Updating %s %s%s' % (display_path(dest), self.repo_name, rev_display)) self.update(dest, rev_options) else: logger.warn('%s %s in %s exists with URL %s' % (self.name, self.repo_name, display_path(dest), existing_url)) prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', ('s', 'i', 'w', 'b')) else: logger.warn('Directory %s already exists, and is not a %s %s.' % (dest, self.name, self.repo_name)) prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b')) if prompt: logger.warn('The plan is to install the %s repository %s' % (self.name, url)) response = ask('What to do? %s' % prompt[0], prompt[1]) if response == 's': logger.notify('Switching %s %s to %s%s' % (self.repo_name, display_path(dest), url, rev_display)) self.switch(dest, url, rev_options) elif response == 'i': # do nothing pass elif response == 'w': logger.warn('Deleting %s' % display_path(dest)) rmtree(dest) checkout = True elif response == 'b': dest_dir = backup_dir(dest) logger.warn('Backing up %s to %s' % (display_path(dest), dest_dir)) shutil.move(dest, dest_dir) checkout = True return checkout
[ "def", "check_destination", "(", "self", ",", "dest", ",", "url", ",", "rev_options", ",", "rev_display", ")", ":", "checkout", "=", "True", "prompt", "=", "False", "if", "os", ".", "path", ".", "exists", "(", "dest", ")", ":", "checkout", "=", "False"...
Prepare a location to receive a checkout/clone. Return True if the location is ready for (and requires) a checkout/clone, False otherwise.
[ "Prepare", "a", "location", "to", "receive", "a", "checkout", "/", "clone", "." ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/vcs/__init__.py#L171-L220
sqrvrtx/plaid
plaid/yml_check.py
parse
def parse(file_contents, file_name): """ This takes a list of filenames and their paths of expected yaml files and tried to parse them, erroring if there are any parsing issues. Args: file_contents (str): Contents of a yml file Raises: yaml.parser.ParserError: Raises an error if the file contents cannot be parsed and interpreted as yaml """ try: yaml.load(file_contents) except Exception: _, exc_value, _ = sys.exc_info() return("Cannot Parse: {file_name}: \n {exc_value}" .format(file_name=file_name, exc_value=exc_value))
python
def parse(file_contents, file_name): """ This takes a list of filenames and their paths of expected yaml files and tried to parse them, erroring if there are any parsing issues. Args: file_contents (str): Contents of a yml file Raises: yaml.parser.ParserError: Raises an error if the file contents cannot be parsed and interpreted as yaml """ try: yaml.load(file_contents) except Exception: _, exc_value, _ = sys.exc_info() return("Cannot Parse: {file_name}: \n {exc_value}" .format(file_name=file_name, exc_value=exc_value))
[ "def", "parse", "(", "file_contents", ",", "file_name", ")", ":", "try", ":", "yaml", ".", "load", "(", "file_contents", ")", "except", "Exception", ":", "_", ",", "exc_value", ",", "_", "=", "sys", ".", "exc_info", "(", ")", "return", "(", "\"Cannot P...
This takes a list of filenames and their paths of expected yaml files and tried to parse them, erroring if there are any parsing issues. Args: file_contents (str): Contents of a yml file Raises: yaml.parser.ParserError: Raises an error if the file contents cannot be parsed and interpreted as yaml
[ "This", "takes", "a", "list", "of", "filenames", "and", "their", "paths", "of", "expected", "yaml", "files", "and", "tried", "to", "parse", "them", "erroring", "if", "there", "are", "any", "parsing", "issues", "." ]
train
https://github.com/sqrvrtx/plaid/blob/2b6162f896e40e7c490e767839de143e042c2a18/plaid/yml_check.py#L11-L30
thejunglejane/datums
datums/models/base.py
GhostBase.get_or_create
def get_or_create(cls, **kwargs): ''' If a record matching the instance already exists in the database, then return it, otherwise create a new record. ''' q = cls._get_instance(**kwargs) if q: return q q = cls(**kwargs) _action_and_commit(q, session.add) return q
python
def get_or_create(cls, **kwargs): ''' If a record matching the instance already exists in the database, then return it, otherwise create a new record. ''' q = cls._get_instance(**kwargs) if q: return q q = cls(**kwargs) _action_and_commit(q, session.add) return q
[ "def", "get_or_create", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "q", "=", "cls", ".", "_get_instance", "(", "*", "*", "kwargs", ")", "if", "q", ":", "return", "q", "q", "=", "cls", "(", "*", "*", "kwargs", ")", "_action_and_commit", "(", "...
If a record matching the instance already exists in the database, then return it, otherwise create a new record.
[ "If", "a", "record", "matching", "the", "instance", "already", "exists", "in", "the", "database", "then", "return", "it", "otherwise", "create", "a", "new", "record", "." ]
train
https://github.com/thejunglejane/datums/blob/2250b365e37ba952c2426edc615c1487afabae6e/datums/models/base.py#L56-L66
thejunglejane/datums
datums/models/base.py
GhostBase.update
def update(cls, **kwargs): ''' If a record matching the instance id already exists in the database, update it. If a record matching the instance id does not already exist, create a new record. ''' q = cls._get_instance(**{'id': kwargs['id']}) if q: for k, v in kwargs.items(): setattr(q, k, v) _action_and_commit(q, session.add) else: cls.get_or_create(**kwargs)
python
def update(cls, **kwargs): ''' If a record matching the instance id already exists in the database, update it. If a record matching the instance id does not already exist, create a new record. ''' q = cls._get_instance(**{'id': kwargs['id']}) if q: for k, v in kwargs.items(): setattr(q, k, v) _action_and_commit(q, session.add) else: cls.get_or_create(**kwargs)
[ "def", "update", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "q", "=", "cls", ".", "_get_instance", "(", "*", "*", "{", "'id'", ":", "kwargs", "[", "'id'", "]", "}", ")", "if", "q", ":", "for", "k", ",", "v", "in", "kwargs", ".", "items", ...
If a record matching the instance id already exists in the database, update it. If a record matching the instance id does not already exist, create a new record.
[ "If", "a", "record", "matching", "the", "instance", "id", "already", "exists", "in", "the", "database", "update", "it", ".", "If", "a", "record", "matching", "the", "instance", "id", "does", "not", "already", "exist", "create", "a", "new", "record", "." ]
train
https://github.com/thejunglejane/datums/blob/2250b365e37ba952c2426edc615c1487afabae6e/datums/models/base.py#L70-L82
thejunglejane/datums
datums/models/base.py
GhostBase.delete
def delete(cls, **kwargs): ''' If a record matching the instance id exists in the database, delete it. ''' q = cls._get_instance(**kwargs) if q: _action_and_commit(q, session.delete)
python
def delete(cls, **kwargs): ''' If a record matching the instance id exists in the database, delete it. ''' q = cls._get_instance(**kwargs) if q: _action_and_commit(q, session.delete)
[ "def", "delete", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "q", "=", "cls", ".", "_get_instance", "(", "*", "*", "kwargs", ")", "if", "q", ":", "_action_and_commit", "(", "q", ",", "session", ".", "delete", ")" ]
If a record matching the instance id exists in the database, delete it.
[ "If", "a", "record", "matching", "the", "instance", "id", "exists", "in", "the", "database", "delete", "it", "." ]
train
https://github.com/thejunglejane/datums/blob/2250b365e37ba952c2426edc615c1487afabae6e/datums/models/base.py#L85-L91
thejunglejane/datums
datums/models/base.py
ResponseClassLegacyAccessor._get_instance
def _get_instance(self, **kwargs): '''Return the first existing instance of the response record. ''' return session.query(self.response_class).filter_by(**kwargs).first()
python
def _get_instance(self, **kwargs): '''Return the first existing instance of the response record. ''' return session.query(self.response_class).filter_by(**kwargs).first()
[ "def", "_get_instance", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "session", ".", "query", "(", "self", ".", "response_class", ")", ".", "filter_by", "(", "*", "*", "kwargs", ")", ".", "first", "(", ")" ]
Return the first existing instance of the response record.
[ "Return", "the", "first", "existing", "instance", "of", "the", "response", "record", "." ]
train
https://github.com/thejunglejane/datums/blob/2250b365e37ba952c2426edc615c1487afabae6e/datums/models/base.py#L101-L104
thejunglejane/datums
datums/models/base.py
ResponseClassLegacyAccessor.get_or_create_from_legacy_response
def get_or_create_from_legacy_response(self, response, **kwargs): ''' If a record matching the instance already does not already exist in the database, then create a new record. ''' response_cls = self.response_class(**kwargs).get_or_create(**kwargs) if not getattr(response_cls, self.column): setattr(response_cls, self.column, self.accessor(response)) _action_and_commit(response_cls, session.add)
python
def get_or_create_from_legacy_response(self, response, **kwargs): ''' If a record matching the instance already does not already exist in the database, then create a new record. ''' response_cls = self.response_class(**kwargs).get_or_create(**kwargs) if not getattr(response_cls, self.column): setattr(response_cls, self.column, self.accessor(response)) _action_and_commit(response_cls, session.add)
[ "def", "get_or_create_from_legacy_response", "(", "self", ",", "response", ",", "*", "*", "kwargs", ")", ":", "response_cls", "=", "self", ".", "response_class", "(", "*", "*", "kwargs", ")", ".", "get_or_create", "(", "*", "*", "kwargs", ")", "if", "not",...
If a record matching the instance already does not already exist in the database, then create a new record.
[ "If", "a", "record", "matching", "the", "instance", "already", "does", "not", "already", "exist", "in", "the", "database", "then", "create", "a", "new", "record", "." ]
train
https://github.com/thejunglejane/datums/blob/2250b365e37ba952c2426edc615c1487afabae6e/datums/models/base.py#L106-L114
thejunglejane/datums
datums/models/base.py
ResponseClassLegacyAccessor.update
def update(self, response, **kwargs): ''' If a record matching the instance already exists in the database, update it, else create a new record. ''' response_cls = self._get_instance(**kwargs) if response_cls: setattr(response_cls, self.column, self.accessor(response)) _action_and_commit(response_cls, session.add) else: self.get_or_create_from_legacy_response(response, **kwargs)
python
def update(self, response, **kwargs): ''' If a record matching the instance already exists in the database, update it, else create a new record. ''' response_cls = self._get_instance(**kwargs) if response_cls: setattr(response_cls, self.column, self.accessor(response)) _action_and_commit(response_cls, session.add) else: self.get_or_create_from_legacy_response(response, **kwargs)
[ "def", "update", "(", "self", ",", "response", ",", "*", "*", "kwargs", ")", ":", "response_cls", "=", "self", ".", "_get_instance", "(", "*", "*", "kwargs", ")", "if", "response_cls", ":", "setattr", "(", "response_cls", ",", "self", ".", "column", ",...
If a record matching the instance already exists in the database, update it, else create a new record.
[ "If", "a", "record", "matching", "the", "instance", "already", "exists", "in", "the", "database", "update", "it", "else", "create", "a", "new", "record", "." ]
train
https://github.com/thejunglejane/datums/blob/2250b365e37ba952c2426edc615c1487afabae6e/datums/models/base.py#L116-L126
thejunglejane/datums
datums/models/base.py
ResponseClassLegacyAccessor.delete
def delete(self, response, **kwargs): ''' If a record matching the instance id exists in the database, delete it. ''' response_cls = self._get_instance(**kwargs) if response_cls: _action_and_commit(response_cls, session.delete)
python
def delete(self, response, **kwargs): ''' If a record matching the instance id exists in the database, delete it. ''' response_cls = self._get_instance(**kwargs) if response_cls: _action_and_commit(response_cls, session.delete)
[ "def", "delete", "(", "self", ",", "response", ",", "*", "*", "kwargs", ")", ":", "response_cls", "=", "self", ".", "_get_instance", "(", "*", "*", "kwargs", ")", "if", "response_cls", ":", "_action_and_commit", "(", "response_cls", ",", "session", ".", ...
If a record matching the instance id exists in the database, delete it.
[ "If", "a", "record", "matching", "the", "instance", "id", "exists", "in", "the", "database", "delete", "it", "." ]
train
https://github.com/thejunglejane/datums/blob/2250b365e37ba952c2426edc615c1487afabae6e/datums/models/base.py#L128-L134
thejunglejane/datums
datums/models/base.py
LocationResponseClassLegacyAccessor.get_or_create_from_legacy_response
def get_or_create_from_legacy_response(self, response, **kwargs): ''' If a record matching the instance already does not already exist in the database, then create a new record. ''' response_cls = self.response_class(**kwargs).get_or_create(**kwargs) if not getattr(response_cls, self.column): setattr(response_cls, self.column, self.accessor(response)) _action_and_commit(response_cls, session.add) if not getattr(response_cls, self.venue_column): setattr( response_cls, self.venue_column, self.venue_accessor(response)) _action_and_commit(response_cls, session.add)
python
def get_or_create_from_legacy_response(self, response, **kwargs): ''' If a record matching the instance already does not already exist in the database, then create a new record. ''' response_cls = self.response_class(**kwargs).get_or_create(**kwargs) if not getattr(response_cls, self.column): setattr(response_cls, self.column, self.accessor(response)) _action_and_commit(response_cls, session.add) if not getattr(response_cls, self.venue_column): setattr( response_cls, self.venue_column, self.venue_accessor(response)) _action_and_commit(response_cls, session.add)
[ "def", "get_or_create_from_legacy_response", "(", "self", ",", "response", ",", "*", "*", "kwargs", ")", ":", "response_cls", "=", "self", ".", "response_class", "(", "*", "*", "kwargs", ")", ".", "get_or_create", "(", "*", "*", "kwargs", ")", "if", "not",...
If a record matching the instance already does not already exist in the database, then create a new record.
[ "If", "a", "record", "matching", "the", "instance", "already", "does", "not", "already", "exist", "in", "the", "database", "then", "create", "a", "new", "record", "." ]
train
https://github.com/thejunglejane/datums/blob/2250b365e37ba952c2426edc615c1487afabae6e/datums/models/base.py#L147-L159
thejunglejane/datums
datums/models/base.py
LocationResponseClassLegacyAccessor.update
def update(self, response, **kwargs): ''' If a record matching the instance already exists in the database, update both the column and venue column attributes, else create a new record. ''' response_cls = super( LocationResponseClassLegacyAccessor, self)._get_instance(**kwargs) if response_cls: setattr(response_cls, self.column, self.accessor(response)) setattr( response_cls, self.venue_column, self.venue_accessor(response)) _action_and_commit(response_cls, session.add)
python
def update(self, response, **kwargs): ''' If a record matching the instance already exists in the database, update both the column and venue column attributes, else create a new record. ''' response_cls = super( LocationResponseClassLegacyAccessor, self)._get_instance(**kwargs) if response_cls: setattr(response_cls, self.column, self.accessor(response)) setattr( response_cls, self.venue_column, self.venue_accessor(response)) _action_and_commit(response_cls, session.add)
[ "def", "update", "(", "self", ",", "response", ",", "*", "*", "kwargs", ")", ":", "response_cls", "=", "super", "(", "LocationResponseClassLegacyAccessor", ",", "self", ")", ".", "_get_instance", "(", "*", "*", "kwargs", ")", "if", "response_cls", ":", "se...
If a record matching the instance already exists in the database, update both the column and venue column attributes, else create a new record.
[ "If", "a", "record", "matching", "the", "instance", "already", "exists", "in", "the", "database", "update", "both", "the", "column", "and", "venue", "column", "attributes", "else", "create", "a", "new", "record", "." ]
train
https://github.com/thejunglejane/datums/blob/2250b365e37ba952c2426edc615c1487afabae6e/datums/models/base.py#L161-L172
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.common_update_sys
def common_update_sys(self): """ update system package """ try: sudo('apt-get update -y --fix-missing') except Exception as e: print(e) print(green('System package is up to date.')) print()
python
def common_update_sys(self): """ update system package """ try: sudo('apt-get update -y --fix-missing') except Exception as e: print(e) print(green('System package is up to date.')) print()
[ "def", "common_update_sys", "(", "self", ")", ":", "try", ":", "sudo", "(", "'apt-get update -y --fix-missing'", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "print", "(", "green", "(", "'System package is up to date.'", ")", ")", "print...
update system package
[ "update", "system", "package" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L34-L44
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.common_install_mysql
def common_install_mysql(self): """ Install mysql """ sudo("debconf-set-selections <<< 'mysql-server mysql-server/root_password password {0}'".format(self.mysql_password)) sudo("debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password {0}'".format(self.mysql_password)) sudo('apt-get install mysql-server -y') print(green(' * Installed MySql server in the system.')) print(green(' * Done')) print()
python
def common_install_mysql(self): """ Install mysql """ sudo("debconf-set-selections <<< 'mysql-server mysql-server/root_password password {0}'".format(self.mysql_password)) sudo("debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password {0}'".format(self.mysql_password)) sudo('apt-get install mysql-server -y') print(green(' * Installed MySql server in the system.')) print(green(' * Done')) print()
[ "def", "common_install_mysql", "(", "self", ")", ":", "sudo", "(", "\"debconf-set-selections <<< 'mysql-server mysql-server/root_password password {0}'\"", ".", "format", "(", "self", ".", "mysql_password", ")", ")", "sudo", "(", "\"debconf-set-selections <<< 'mysql-server mysq...
Install mysql
[ "Install", "mysql" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L46-L56
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.update_source_list
def update_source_list(self): """ update ubuntu 16 source list :return: """ with cd('/etc/apt'): sudo('mv sources.list sources.list.bak') put(StringIO(bigdata_conf.ubuntu_source_list_16), 'sources.list', use_sudo=True) sudo('apt-get update -y --fix-missing')
python
def update_source_list(self): """ update ubuntu 16 source list :return: """ with cd('/etc/apt'): sudo('mv sources.list sources.list.bak') put(StringIO(bigdata_conf.ubuntu_source_list_16), 'sources.list', use_sudo=True) sudo('apt-get update -y --fix-missing')
[ "def", "update_source_list", "(", "self", ")", ":", "with", "cd", "(", "'/etc/apt'", ")", ":", "sudo", "(", "'mv sources.list sources.list.bak'", ")", "put", "(", "StringIO", "(", "bigdata_conf", ".", "ubuntu_source_list_16", ")", ",", "'sources.list'", ",", "us...
update ubuntu 16 source list :return:
[ "update", "ubuntu", "16", "source", "list", ":", "return", ":" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L58-L67
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.common_install_nginx
def common_install_nginx(self): """ Install nginx """ run('echo "deb http://ppa.launchpad.net/nginx/stable/ubuntu $(lsb_release -sc) main" | sudo tee /etc/apt/sources.list.d/nginx-stable.list') sudo('apt-key adv --keyserver keyserver.ubuntu.com --recv-keys C300EE8C') sudo('apt-get update -y') sudo('apt-get install nginx -y') print(green(' * Installed Nginx in the system.')) print(green(' * Done')) print()
python
def common_install_nginx(self): """ Install nginx """ run('echo "deb http://ppa.launchpad.net/nginx/stable/ubuntu $(lsb_release -sc) main" | sudo tee /etc/apt/sources.list.d/nginx-stable.list') sudo('apt-key adv --keyserver keyserver.ubuntu.com --recv-keys C300EE8C') sudo('apt-get update -y') sudo('apt-get install nginx -y') print(green(' * Installed Nginx in the system.')) print(green(' * Done')) print()
[ "def", "common_install_nginx", "(", "self", ")", ":", "run", "(", "'echo \"deb http://ppa.launchpad.net/nginx/stable/ubuntu $(lsb_release -sc) main\" | sudo tee /etc/apt/sources.list.d/nginx-stable.list'", ")", "sudo", "(", "'apt-key adv --keyserver keyserver.ubuntu.com --recv-keys C300EE8C'...
Install nginx
[ "Install", "nginx" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L69-L80
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.common_config_nginx_ssl
def common_config_nginx_ssl(self): """ Convert nginx server from http to https """ if prompt(red(' * Change url from http to https (y/n)?'), default='n') == 'y': if not exists(self.nginx_ssl_dir): sudo('mkdir -p {0}'.format(self.nginx_ssl_dir)) # generate ssh key sudo('openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout {0}/cert.key -out {0}/cert.pem'.format(self.nginx_ssl_dir)) # do nginx config config put(StringIO(self.nginx_web_ssl_config), '/etc/nginx/sites-available/default', use_sudo=True) sudo('service nginx restart') print(green(' * Make Nginx from http to https.')) print(green(' * Done')) print()
python
def common_config_nginx_ssl(self): """ Convert nginx server from http to https """ if prompt(red(' * Change url from http to https (y/n)?'), default='n') == 'y': if not exists(self.nginx_ssl_dir): sudo('mkdir -p {0}'.format(self.nginx_ssl_dir)) # generate ssh key sudo('openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout {0}/cert.key -out {0}/cert.pem'.format(self.nginx_ssl_dir)) # do nginx config config put(StringIO(self.nginx_web_ssl_config), '/etc/nginx/sites-available/default', use_sudo=True) sudo('service nginx restart') print(green(' * Make Nginx from http to https.')) print(green(' * Done')) print()
[ "def", "common_config_nginx_ssl", "(", "self", ")", ":", "if", "prompt", "(", "red", "(", "' * Change url from http to https (y/n)?'", ")", ",", "default", "=", "'n'", ")", "==", "'y'", ":", "if", "not", "exists", "(", "self", ".", "nginx_ssl_dir", ")", ":",...
Convert nginx server from http to https
[ "Convert", "nginx", "server", "from", "http", "to", "https" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L82-L100
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.common_install_apache2
def common_install_apache2(self): """ Install apache2 web server """ try: sudo('apt-get install apache2 -y') except Exception as e: print(e) print(green(' * Installed Apache2 in the system.')) print(green(' * Done')) print()
python
def common_install_apache2(self): """ Install apache2 web server """ try: sudo('apt-get install apache2 -y') except Exception as e: print(e) print(green(' * Installed Apache2 in the system.')) print(green(' * Done')) print()
[ "def", "common_install_apache2", "(", "self", ")", ":", "try", ":", "sudo", "(", "'apt-get install apache2 -y'", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "print", "(", "green", "(", "' * Installed Apache2 in the system.'", ")", ")", ...
Install apache2 web server
[ "Install", "apache2", "web", "server" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L102-L113
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.common_install_python_env
def common_install_python_env(self): """ Install python virtualenv """ sudo('apt-get install python3 python3-pip -y') sudo('pip3 install virtualenv') run('virtualenv {0}'.format(self.python_env_dir)) print(green(' * Installed Python3 virtual environment in the system.')) print(green(' * Done')) print()
python
def common_install_python_env(self): """ Install python virtualenv """ sudo('apt-get install python3 python3-pip -y') sudo('pip3 install virtualenv') run('virtualenv {0}'.format(self.python_env_dir)) print(green(' * Installed Python3 virtual environment in the system.')) print(green(' * Done')) print()
[ "def", "common_install_python_env", "(", "self", ")", ":", "sudo", "(", "'apt-get install python3 python3-pip -y'", ")", "sudo", "(", "'pip3 install virtualenv'", ")", "run", "(", "'virtualenv {0}'", ".", "format", "(", "self", ".", "python_env_dir", ")", ")", "prin...
Install python virtualenv
[ "Install", "python", "virtualenv" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L115-L126
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.systemctl_autostart
def systemctl_autostart(self, service_name, start_cmd, stop_cmd): """ ubuntu 16.04 systemctl service config :param service_name: :param start_cmd: :param stop_cmd: :return: """ # get config content service_content = bigdata_conf.systemctl_config.format( service_name=service_name, start_cmd=start_cmd, stop_cmd=stop_cmd ) # write config into file with cd('/lib/systemd/system'): if not exists(service_name): sudo('touch {0}'.format(service_name)) put(StringIO(service_content), service_name, use_sudo=True) # make service auto-start sudo('systemctl daemon-reload') sudo('systemctl disable {0}'.format(service_name)) sudo('systemctl stop {0}'.format(service_name)) sudo('systemctl enable {0}'.format(service_name)) sudo('systemctl start {0}'.format(service_name))
python
def systemctl_autostart(self, service_name, start_cmd, stop_cmd): """ ubuntu 16.04 systemctl service config :param service_name: :param start_cmd: :param stop_cmd: :return: """ # get config content service_content = bigdata_conf.systemctl_config.format( service_name=service_name, start_cmd=start_cmd, stop_cmd=stop_cmd ) # write config into file with cd('/lib/systemd/system'): if not exists(service_name): sudo('touch {0}'.format(service_name)) put(StringIO(service_content), service_name, use_sudo=True) # make service auto-start sudo('systemctl daemon-reload') sudo('systemctl disable {0}'.format(service_name)) sudo('systemctl stop {0}'.format(service_name)) sudo('systemctl enable {0}'.format(service_name)) sudo('systemctl start {0}'.format(service_name))
[ "def", "systemctl_autostart", "(", "self", ",", "service_name", ",", "start_cmd", ",", "stop_cmd", ")", ":", "# get config content", "service_content", "=", "bigdata_conf", ".", "systemctl_config", ".", "format", "(", "service_name", "=", "service_name", ",", "start...
ubuntu 16.04 systemctl service config :param service_name: :param start_cmd: :param stop_cmd: :return:
[ "ubuntu", "16", ".", "04", "systemctl", "service", "config", ":", "param", "service_name", ":", ":", "param", "start_cmd", ":", ":", "param", "stop_cmd", ":", ":", "return", ":" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L128-L154
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.java_install
def java_install(self): """ install java :return: """ sudo('apt-get install openjdk-8-jdk -y') java_home = run('readlink -f /usr/bin/java | ' 'sed "s:/jre/bin/java::"') append(bigdata_conf.global_env_home, 'export JAVA_HOME={0}'.format( java_home ), use_sudo=True) run('source {0}'.format(bigdata_conf.global_env_home))
python
def java_install(self): """ install java :return: """ sudo('apt-get install openjdk-8-jdk -y') java_home = run('readlink -f /usr/bin/java | ' 'sed "s:/jre/bin/java::"') append(bigdata_conf.global_env_home, 'export JAVA_HOME={0}'.format( java_home ), use_sudo=True) run('source {0}'.format(bigdata_conf.global_env_home))
[ "def", "java_install", "(", "self", ")", ":", "sudo", "(", "'apt-get install openjdk-8-jdk -y'", ")", "java_home", "=", "run", "(", "'readlink -f /usr/bin/java | '", "'sed \"s:/jre/bin/java::\"'", ")", "append", "(", "bigdata_conf", ".", "global_env_home", ",", "'export...
install java :return:
[ "install", "java", ":", "return", ":" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L156-L169
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.kafka_install
def kafka_install(self): """ kafka download and install :return: """ with cd('/tmp'): if not exists('kafka.tgz'): sudo('wget {0} -O kafka.tgz'.format( bigdata_conf.kafka_download_url )) sudo('tar -zxf kafka.tgz') sudo('rm -rf {0}'.format(bigdata_conf.kafka_home)) sudo('mv kafka_* {0}'.format(bigdata_conf.kafka_home))
python
def kafka_install(self): """ kafka download and install :return: """ with cd('/tmp'): if not exists('kafka.tgz'): sudo('wget {0} -O kafka.tgz'.format( bigdata_conf.kafka_download_url )) sudo('tar -zxf kafka.tgz') sudo('rm -rf {0}'.format(bigdata_conf.kafka_home)) sudo('mv kafka_* {0}'.format(bigdata_conf.kafka_home))
[ "def", "kafka_install", "(", "self", ")", ":", "with", "cd", "(", "'/tmp'", ")", ":", "if", "not", "exists", "(", "'kafka.tgz'", ")", ":", "sudo", "(", "'wget {0} -O kafka.tgz'", ".", "format", "(", "bigdata_conf", ".", "kafka_download_url", ")", ")", "sud...
kafka download and install :return:
[ "kafka", "download", "and", "install", ":", "return", ":" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L171-L185
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.kafka_config
def kafka_config(self): """ kafka config :return: """ # 读取配置文件中的端口 config_obj = self.configure[self.args.config[1]] kafka_ports = config_obj.get('KAFKA_PORTS') # 默认端口9092 if not kafka_ports: kafka_ports_arr = ['9092'] else: kafka_ports_arr = kafka_ports.replace(' ', '').split(',') # chmod project root owner sudo('chown {user}:{user} -R {path}'.format( user=config_obj.get('user'), path=bigdata_conf.project_root )) # change kafka bin permission for JAVA sudo('chmod -R 777 {0}/bin'.format(bigdata_conf.kafka_home)) # 配置zookeeper服务 self.systemctl_autostart( 'zookeeper.service', '/opt/kafka/bin/zookeeper-server-start.sh /opt/kafka/config/zookeeper.properties', '/opt/kafka/bin/zookeeper-server-stop.sh /opt/kafka/config/zookeeper.properties' ) # 循环生成kafka配置文件 with cd('{0}/config'.format(bigdata_conf.kafka_home)): for idx, k_port in enumerate(kafka_ports_arr): conf_file = 'server.properties-{0}'.format(k_port) run('cp server.properties {0}'.format(conf_file)) # 修改kafka配置文件 sed(conf_file, 'broker.id=.*', 'broker.id={0}'.format(idx)) uncomment(conf_file, 'listeners=PLAINTEXT') sed(conf_file, 'PLAINTEXT://.*', 'PLAINTEXT://{0}:{1}'.format( env.host_string, k_port )) sed(conf_file, 'log.dirs=.*', 'log.dirs=/tmp/kafka-log-{0}'.format(k_port)) # 配置kafka服务 self.systemctl_autostart( 'kafka-{0}.service'.format(k_port), '/opt/kafka/bin/kafka-server-start.sh /opt/kafka/config/{0}'.format(conf_file), '/opt/kafka/bin/kafka-server-stop.sh /opt/kafka/config/{0}'.format(conf_file) )
python
def kafka_config(self): """ kafka config :return: """ # 读取配置文件中的端口 config_obj = self.configure[self.args.config[1]] kafka_ports = config_obj.get('KAFKA_PORTS') # 默认端口9092 if not kafka_ports: kafka_ports_arr = ['9092'] else: kafka_ports_arr = kafka_ports.replace(' ', '').split(',') # chmod project root owner sudo('chown {user}:{user} -R {path}'.format( user=config_obj.get('user'), path=bigdata_conf.project_root )) # change kafka bin permission for JAVA sudo('chmod -R 777 {0}/bin'.format(bigdata_conf.kafka_home)) # 配置zookeeper服务 self.systemctl_autostart( 'zookeeper.service', '/opt/kafka/bin/zookeeper-server-start.sh /opt/kafka/config/zookeeper.properties', '/opt/kafka/bin/zookeeper-server-stop.sh /opt/kafka/config/zookeeper.properties' ) # 循环生成kafka配置文件 with cd('{0}/config'.format(bigdata_conf.kafka_home)): for idx, k_port in enumerate(kafka_ports_arr): conf_file = 'server.properties-{0}'.format(k_port) run('cp server.properties {0}'.format(conf_file)) # 修改kafka配置文件 sed(conf_file, 'broker.id=.*', 'broker.id={0}'.format(idx)) uncomment(conf_file, 'listeners=PLAINTEXT') sed(conf_file, 'PLAINTEXT://.*', 'PLAINTEXT://{0}:{1}'.format( env.host_string, k_port )) sed(conf_file, 'log.dirs=.*', 'log.dirs=/tmp/kafka-log-{0}'.format(k_port)) # 配置kafka服务 self.systemctl_autostart( 'kafka-{0}.service'.format(k_port), '/opt/kafka/bin/kafka-server-start.sh /opt/kafka/config/{0}'.format(conf_file), '/opt/kafka/bin/kafka-server-stop.sh /opt/kafka/config/{0}'.format(conf_file) )
[ "def", "kafka_config", "(", "self", ")", ":", "# 读取配置文件中的端口", "config_obj", "=", "self", ".", "configure", "[", "self", ".", "args", ".", "config", "[", "1", "]", "]", "kafka_ports", "=", "config_obj", ".", "get", "(", "'KAFKA_PORTS'", ")", "# 默认端口9092", ...
kafka config :return:
[ "kafka", "config", ":", "return", ":" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L187-L236
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.elastic_install
def elastic_install(self): """ elasticsearch install :return: """ with cd('/tmp'): if not exists('elastic.deb'): sudo('wget {0} -O elastic.deb'.format( bigdata_conf.elastic_download_url )) sudo('dpkg -i elastic.deb') sudo('apt-get install -y')
python
def elastic_install(self): """ elasticsearch install :return: """ with cd('/tmp'): if not exists('elastic.deb'): sudo('wget {0} -O elastic.deb'.format( bigdata_conf.elastic_download_url )) sudo('dpkg -i elastic.deb') sudo('apt-get install -y')
[ "def", "elastic_install", "(", "self", ")", ":", "with", "cd", "(", "'/tmp'", ")", ":", "if", "not", "exists", "(", "'elastic.deb'", ")", ":", "sudo", "(", "'wget {0} -O elastic.deb'", ".", "format", "(", "bigdata_conf", ".", "elastic_download_url", ")", ")"...
elasticsearch install :return:
[ "elasticsearch", "install", ":", "return", ":" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L238-L250
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.logstash_install
def logstash_install(self): """ logstash install :return: """ with cd('/tmp'): if not exists('logstash.deb'): sudo('wget {0} -O logstash.deb'.format( bigdata_conf.logstash_download_url )) sudo('dpkg -i logstash.deb') sudo('apt-get install -y')
python
def logstash_install(self): """ logstash install :return: """ with cd('/tmp'): if not exists('logstash.deb'): sudo('wget {0} -O logstash.deb'.format( bigdata_conf.logstash_download_url )) sudo('dpkg -i logstash.deb') sudo('apt-get install -y')
[ "def", "logstash_install", "(", "self", ")", ":", "with", "cd", "(", "'/tmp'", ")", ":", "if", "not", "exists", "(", "'logstash.deb'", ")", ":", "sudo", "(", "'wget {0} -O logstash.deb'", ".", "format", "(", "bigdata_conf", ".", "logstash_download_url", ")", ...
logstash install :return:
[ "logstash", "install", ":", "return", ":" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L262-L274
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.kibana_install
def kibana_install(self): """ kibana install :return: """ with cd('/tmp'): if not exists('kibana.deb'): sudo('wget {0} -O kibana.deb'.format( bigdata_conf.kibana_download_url )) sudo('dpkg -i kibana.deb') sudo('apt-get install -y')
python
def kibana_install(self): """ kibana install :return: """ with cd('/tmp'): if not exists('kibana.deb'): sudo('wget {0} -O kibana.deb'.format( bigdata_conf.kibana_download_url )) sudo('dpkg -i kibana.deb') sudo('apt-get install -y')
[ "def", "kibana_install", "(", "self", ")", ":", "with", "cd", "(", "'/tmp'", ")", ":", "if", "not", "exists", "(", "'kibana.deb'", ")", ":", "sudo", "(", "'wget {0} -O kibana.deb'", ".", "format", "(", "bigdata_conf", ".", "kibana_download_url", ")", ")", ...
kibana install :return:
[ "kibana", "install", ":", "return", ":" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L286-L298
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.kibana_config
def kibana_config(self): """ config kibana :return: """ uncomment("/etc/kibana/kibana.yml", "#server.host:", use_sudo=True) sed('/etc/kibana/kibana.yml', 'server.host:.*', 'server.host: "{0}"'.format(env.host_string), use_sudo=True) sudo('systemctl stop kibana.service') sudo('systemctl daemon-reload') sudo('systemctl enable kibana.service') sudo('systemctl start kibana.service')
python
def kibana_config(self): """ config kibana :return: """ uncomment("/etc/kibana/kibana.yml", "#server.host:", use_sudo=True) sed('/etc/kibana/kibana.yml', 'server.host:.*', 'server.host: "{0}"'.format(env.host_string), use_sudo=True) sudo('systemctl stop kibana.service') sudo('systemctl daemon-reload') sudo('systemctl enable kibana.service') sudo('systemctl start kibana.service')
[ "def", "kibana_config", "(", "self", ")", ":", "uncomment", "(", "\"/etc/kibana/kibana.yml\"", ",", "\"#server.host:\"", ",", "use_sudo", "=", "True", ")", "sed", "(", "'/etc/kibana/kibana.yml'", ",", "'server.host:.*'", ",", "'server.host: \"{0}\"'", ".", "format", ...
config kibana :return:
[ "config", "kibana", ":", "return", ":" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L300-L312
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.hadoop_install
def hadoop_install(self): """ install hadoop :return: """ with cd('/tmp'): if not exists('hadoop.tar.gz'): sudo('wget {0} -O hadoop.tar.gz'.format( bigdata_conf.hadoop_download_url )) sudo('rm -rf hadoop-*') sudo('tar -zxf hadoop.tar.gz') sudo('rm -rf {0}'.format(bigdata_conf.hadoop_home)) sudo('mv hadoop-* {0}'.format(bigdata_conf.hadoop_home))
python
def hadoop_install(self): """ install hadoop :return: """ with cd('/tmp'): if not exists('hadoop.tar.gz'): sudo('wget {0} -O hadoop.tar.gz'.format( bigdata_conf.hadoop_download_url )) sudo('rm -rf hadoop-*') sudo('tar -zxf hadoop.tar.gz') sudo('rm -rf {0}'.format(bigdata_conf.hadoop_home)) sudo('mv hadoop-* {0}'.format(bigdata_conf.hadoop_home))
[ "def", "hadoop_install", "(", "self", ")", ":", "with", "cd", "(", "'/tmp'", ")", ":", "if", "not", "exists", "(", "'hadoop.tar.gz'", ")", ":", "sudo", "(", "'wget {0} -O hadoop.tar.gz'", ".", "format", "(", "bigdata_conf", ".", "hadoop_download_url", ")", "...
install hadoop :return:
[ "install", "hadoop", ":", "return", ":" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L314-L328
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.spark_install
def spark_install(self): """ download and install spark :return: """ sudo('apt-get -y install build-essential python-dev python-six \ python-virtualenv libcurl4-nss-dev libsasl2-dev libsasl2-modules \ maven libapr1-dev libsvn-dev zlib1g-dev') with cd('/tmp'): if not exists('spark.tgz'): sudo('wget {0} -O spark.tgz'.format( bigdata_conf.spark_download_url )) sudo('rm -rf spark-*') sudo('tar -zxf spark.tgz') sudo('rm -rf {0}'.format(bigdata_conf.spark_home)) sudo('mv spark-* {0}'.format(bigdata_conf.spark_home))
python
def spark_install(self): """ download and install spark :return: """ sudo('apt-get -y install build-essential python-dev python-six \ python-virtualenv libcurl4-nss-dev libsasl2-dev libsasl2-modules \ maven libapr1-dev libsvn-dev zlib1g-dev') with cd('/tmp'): if not exists('spark.tgz'): sudo('wget {0} -O spark.tgz'.format( bigdata_conf.spark_download_url )) sudo('rm -rf spark-*') sudo('tar -zxf spark.tgz') sudo('rm -rf {0}'.format(bigdata_conf.spark_home)) sudo('mv spark-* {0}'.format(bigdata_conf.spark_home))
[ "def", "spark_install", "(", "self", ")", ":", "sudo", "(", "'apt-get -y install build-essential python-dev python-six \\\n python-virtualenv libcurl4-nss-dev libsasl2-dev libsasl2-modules \\\n maven libapr1-dev libsvn-dev zlib1g-dev'", ")", "with", "cd", "(", "'/tm...
download and install spark :return:
[ "download", "and", "install", "spark", ":", "return", ":" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L330-L348
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.spark_config
def spark_config(self): """ config spark :return: """ configs = [ 'export LD_LIBRARY_PATH={0}/lib/native/:$LD_LIBRARY_PATH'.format( bigdata_conf.hadoop_home ), 'export SPARK_LOCAL_IP={0}'.format(env.host_string) ] append(bigdata_conf.global_env_home, configs, use_sudo=True) run('source {0}'.format(bigdata_conf.global_env_home))
python
def spark_config(self): """ config spark :return: """ configs = [ 'export LD_LIBRARY_PATH={0}/lib/native/:$LD_LIBRARY_PATH'.format( bigdata_conf.hadoop_home ), 'export SPARK_LOCAL_IP={0}'.format(env.host_string) ] append(bigdata_conf.global_env_home, configs, use_sudo=True) run('source {0}'.format(bigdata_conf.global_env_home))
[ "def", "spark_config", "(", "self", ")", ":", "configs", "=", "[", "'export LD_LIBRARY_PATH={0}/lib/native/:$LD_LIBRARY_PATH'", ".", "format", "(", "bigdata_conf", ".", "hadoop_home", ")", ",", "'export SPARK_LOCAL_IP={0}'", ".", "format", "(", "env", ".", "host_strin...
config spark :return:
[ "config", "spark", ":", "return", ":" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L350-L363
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.reset_server_env
def reset_server_env(self, server_name, configure): """ reset server env to server-name :param server_name: :param configure: :return: """ env.host_string = configure[server_name]['host'] env.user = configure[server_name]['user'] env.password = configure[server_name]['passwd']
python
def reset_server_env(self, server_name, configure): """ reset server env to server-name :param server_name: :param configure: :return: """ env.host_string = configure[server_name]['host'] env.user = configure[server_name]['user'] env.password = configure[server_name]['passwd']
[ "def", "reset_server_env", "(", "self", ",", "server_name", ",", "configure", ")", ":", "env", ".", "host_string", "=", "configure", "[", "server_name", "]", "[", "'host'", "]", "env", ".", "user", "=", "configure", "[", "server_name", "]", "[", "'user'", ...
reset server env to server-name :param server_name: :param configure: :return:
[ "reset", "server", "env", "to", "server", "-", "name", ":", "param", "server_name", ":", ":", "param", "configure", ":", ":", "return", ":" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L365-L374
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.generate_ssh
def generate_ssh(self, server, args, configure): """ 异步同时执行SSH生成 generate ssh :param server: :param args: :param configure: :return: """ self.reset_server_env(server, configure) # chmod project root owner sudo('chown {user}:{user} -R {path}'.format( user=configure[server]['user'], path=bigdata_conf.project_root )) # generate ssh key if not exists('~/.ssh/id_rsa.pub'): run('ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa')
python
def generate_ssh(self, server, args, configure): """ 异步同时执行SSH生成 generate ssh :param server: :param args: :param configure: :return: """ self.reset_server_env(server, configure) # chmod project root owner sudo('chown {user}:{user} -R {path}'.format( user=configure[server]['user'], path=bigdata_conf.project_root )) # generate ssh key if not exists('~/.ssh/id_rsa.pub'): run('ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa')
[ "def", "generate_ssh", "(", "self", ",", "server", ",", "args", ",", "configure", ")", ":", "self", ".", "reset_server_env", "(", "server", ",", "configure", ")", "# chmod project root owner", "sudo", "(", "'chown {user}:{user} -R {path}'", ".", "format", "(", "...
异步同时执行SSH生成 generate ssh :param server: :param args: :param configure: :return:
[ "异步同时执行SSH生成", "generate", "ssh", ":", "param", "server", ":", ":", "param", "args", ":", ":", "param", "configure", ":", ":", "return", ":" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L376-L394
zhexiao/ezhost
ezhost/ServerCommon.py
ServerCommon.add_spark_slave
def add_spark_slave(self, master, slave, configure): """ add spark slave :return: """ # go to master server, add config self.reset_server_env(master, configure) with cd(bigdata_conf.spark_home): if not exists('conf/spark-env.sh'): sudo('cp conf/spark-env.sh.template conf/spark-env.sh') spark_env = bigdata_conf.spark_env.format( spark_home=bigdata_conf.spark_home, hadoop_home=bigdata_conf.hadoop_home, host=env.host_string, SPARK_WORKER_MEMORY=configure[master].get( 'SPARK_WORKER_MEMORY', '512M' ) ) put(StringIO(spark_env), 'conf/spark-env.sh', use_sudo=True) if not exists('conf/slaves'): sudo('cp conf/slaves.template conf/slaves') # comment slaves localhost comment('{0}/conf/slaves'.format(bigdata_conf.spark_home), 'localhost', use_sudo=True) # add slave into config append('{0}/conf/slaves'.format(bigdata_conf.spark_home), '\n{0}'.format(configure[slave]['host']), use_sudo=True) run('scp -r {0} {1}@{2}:/opt'.format( bigdata_conf.spark_home, configure[slave]['user'], configure[slave]['host'] )) # go to slave server self.reset_server_env(slave, configure) append(bigdata_conf.global_env_home, 'export SPARK_LOCAL_IP={0}'.format( configure[slave]['host'] ), use_sudo=True) run('source {0}'.format(bigdata_conf.global_env_home)) # go to master server, restart server self.reset_server_env(master, configure) with cd(bigdata_conf.spark_home): run('./sbin/stop-master.sh') run('./sbin/stop-slaves.sh') run('./sbin/start-master.sh') run('./sbin/start-slaves.sh')
python
def add_spark_slave(self, master, slave, configure): """ add spark slave :return: """ # go to master server, add config self.reset_server_env(master, configure) with cd(bigdata_conf.spark_home): if not exists('conf/spark-env.sh'): sudo('cp conf/spark-env.sh.template conf/spark-env.sh') spark_env = bigdata_conf.spark_env.format( spark_home=bigdata_conf.spark_home, hadoop_home=bigdata_conf.hadoop_home, host=env.host_string, SPARK_WORKER_MEMORY=configure[master].get( 'SPARK_WORKER_MEMORY', '512M' ) ) put(StringIO(spark_env), 'conf/spark-env.sh', use_sudo=True) if not exists('conf/slaves'): sudo('cp conf/slaves.template conf/slaves') # comment slaves localhost comment('{0}/conf/slaves'.format(bigdata_conf.spark_home), 'localhost', use_sudo=True) # add slave into config append('{0}/conf/slaves'.format(bigdata_conf.spark_home), '\n{0}'.format(configure[slave]['host']), use_sudo=True) run('scp -r {0} {1}@{2}:/opt'.format( bigdata_conf.spark_home, configure[slave]['user'], configure[slave]['host'] )) # go to slave server self.reset_server_env(slave, configure) append(bigdata_conf.global_env_home, 'export SPARK_LOCAL_IP={0}'.format( configure[slave]['host'] ), use_sudo=True) run('source {0}'.format(bigdata_conf.global_env_home)) # go to master server, restart server self.reset_server_env(master, configure) with cd(bigdata_conf.spark_home): run('./sbin/stop-master.sh') run('./sbin/stop-slaves.sh') run('./sbin/start-master.sh') run('./sbin/start-slaves.sh')
[ "def", "add_spark_slave", "(", "self", ",", "master", ",", "slave", ",", "configure", ")", ":", "# go to master server, add config", "self", ".", "reset_server_env", "(", "master", ",", "configure", ")", "with", "cd", "(", "bigdata_conf", ".", "spark_home", ")",...
add spark slave :return:
[ "add", "spark", "slave", ":", "return", ":" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L396-L449
robehickman/simple-http-file-sync
shttpfs/storage.py
storage.new_tmp
def new_tmp(self): """ Create a new temp file allocation """ self.tmp_idx += 1 return p.join(self.tmp_dir, 'tmp_' + str(self.tmp_idx))
python
def new_tmp(self): """ Create a new temp file allocation """ self.tmp_idx += 1 return p.join(self.tmp_dir, 'tmp_' + str(self.tmp_idx))
[ "def", "new_tmp", "(", "self", ")", ":", "self", ".", "tmp_idx", "+=", "1", "return", "p", ".", "join", "(", "self", ".", "tmp_dir", ",", "'tmp_'", "+", "str", "(", "self", ".", "tmp_idx", ")", ")" ]
Create a new temp file allocation
[ "Create", "a", "new", "temp", "file", "allocation" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/storage.py#L31-L35
robehickman/simple-http-file-sync
shttpfs/storage.py
storage.new_backup
def new_backup(self, src): """ Create a new backup file allocation """ backup_id_file = p.join(self.backup_dir, '.bk_idx') backup_num = file_or_default(backup_id_file, 1, int) backup_name = str(backup_num) + "_" + os.path.basename(src) backup_num += 1 file_put_contents(backup_id_file, str(backup_num)) return p.join(self.backup_dir, backup_name)
python
def new_backup(self, src): """ Create a new backup file allocation """ backup_id_file = p.join(self.backup_dir, '.bk_idx') backup_num = file_or_default(backup_id_file, 1, int) backup_name = str(backup_num) + "_" + os.path.basename(src) backup_num += 1 file_put_contents(backup_id_file, str(backup_num)) return p.join(self.backup_dir, backup_name)
[ "def", "new_backup", "(", "self", ",", "src", ")", ":", "backup_id_file", "=", "p", ".", "join", "(", "self", ".", "backup_dir", ",", "'.bk_idx'", ")", "backup_num", "=", "file_or_default", "(", "backup_id_file", ",", "1", ",", "int", ")", "backup_name", ...
Create a new backup file allocation
[ "Create", "a", "new", "backup", "file", "allocation" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/storage.py#L38-L47
robehickman/simple-http-file-sync
shttpfs/storage.py
storage.begin
def begin(self): """ Begin a transaction """ if self.journal != None: raise Exception('Storage is already active, nested begin not supported') # under normal operation journal is deleted at end of transaction # if it does exist we need to roll back if os.path.isfile(self.j_file): self.rollback() self.journal = open(self.j_file, 'w')
python
def begin(self): """ Begin a transaction """ if self.journal != None: raise Exception('Storage is already active, nested begin not supported') # under normal operation journal is deleted at end of transaction # if it does exist we need to roll back if os.path.isfile(self.j_file): self.rollback() self.journal = open(self.j_file, 'w')
[ "def", "begin", "(", "self", ")", ":", "if", "self", ".", "journal", "!=", "None", ":", "raise", "Exception", "(", "'Storage is already active, nested begin not supported'", ")", "# under normal operation journal is deleted at end of transaction", "# if it does exist we need to...
Begin a transaction
[ "Begin", "a", "transaction" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/storage.py#L50-L60
robehickman/simple-http-file-sync
shttpfs/storage.py
storage.do_action
def do_action(self, command, journal = True): """ Implementation for declarative file operations. """ cmd = 0; src = 1; path = 1; data = 2; dst = 2 if journal is True: self.journal.write(json.dumps(command['undo']) + "\n") self.journal.flush() d = command['do'] if d[cmd] == 'copy': shutil.copy(d[src], d[dst]) elif d[cmd] == 'move': shutil.move(d[src], d[dst]) elif d[cmd] == 'backup': shutil.move(d[src], self.new_backup(d[src])) elif d[cmd] == 'write' : if callable(d[data]): d[data](d[path]) else: file_put_contents(d[path], d[data])
python
def do_action(self, command, journal = True): """ Implementation for declarative file operations. """ cmd = 0; src = 1; path = 1; data = 2; dst = 2 if journal is True: self.journal.write(json.dumps(command['undo']) + "\n") self.journal.flush() d = command['do'] if d[cmd] == 'copy': shutil.copy(d[src], d[dst]) elif d[cmd] == 'move': shutil.move(d[src], d[dst]) elif d[cmd] == 'backup': shutil.move(d[src], self.new_backup(d[src])) elif d[cmd] == 'write' : if callable(d[data]): d[data](d[path]) else: file_put_contents(d[path], d[data])
[ "def", "do_action", "(", "self", ",", "command", ",", "journal", "=", "True", ")", ":", "cmd", "=", "0", "src", "=", "1", "path", "=", "1", "data", "=", "2", "dst", "=", "2", "if", "journal", "is", "True", ":", "self", ".", "journal", ".", "wri...
Implementation for declarative file operations.
[ "Implementation", "for", "declarative", "file", "operations", "." ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/storage.py#L63-L78
robehickman/simple-http-file-sync
shttpfs/storage.py
storage.rollback
def rollback(self): """ Do journal rollback """ # Close the journal for writing, if this is an automatic rollback following a crash, # the file descriptor will not be open, so don't need to do anything. if self.journal != None: self.journal.close() self.journal = None # Read the journal journ_list = [] with open(self.j_file) as fle: for l in fle: journ_list.append(json.loads(l)) journ_subtract = deque(reversed(journ_list)) for j_itm in reversed(journ_list): try: self.do_action({'do' : j_itm}, False) except IOError: pass # As each item is completed remove it from the journal file, in case # something fails during the rollback we can pick up where it stopped. journ_subtract.popleft() with open(self.j_file, 'w') as f: for data in list(journ_subtract): f.write(json.dumps(data) + "\n") f.flush() # Rollback is complete so delete the journal file os.remove(self.j_file)
python
def rollback(self): """ Do journal rollback """ # Close the journal for writing, if this is an automatic rollback following a crash, # the file descriptor will not be open, so don't need to do anything. if self.journal != None: self.journal.close() self.journal = None # Read the journal journ_list = [] with open(self.j_file) as fle: for l in fle: journ_list.append(json.loads(l)) journ_subtract = deque(reversed(journ_list)) for j_itm in reversed(journ_list): try: self.do_action({'do' : j_itm}, False) except IOError: pass # As each item is completed remove it from the journal file, in case # something fails during the rollback we can pick up where it stopped. journ_subtract.popleft() with open(self.j_file, 'w') as f: for data in list(journ_subtract): f.write(json.dumps(data) + "\n") f.flush() # Rollback is complete so delete the journal file os.remove(self.j_file)
[ "def", "rollback", "(", "self", ")", ":", "# Close the journal for writing, if this is an automatic rollback following a crash,", "# the file descriptor will not be open, so don't need to do anything.", "if", "self", ".", "journal", "!=", "None", ":", "self", ".", "journal", ".",...
Do journal rollback
[ "Do", "journal", "rollback" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/storage.py#L81-L109
robehickman/simple-http-file-sync
shttpfs/storage.py
storage.commit
def commit(self, cont = False): """ Finish a transaction """ self.journal.close() self.journal = None os.remove(self.j_file) for itm in os.listdir(self.tmp_dir): os.remove(cpjoin(self.tmp_dir, itm)) if cont is True: self.begin()
python
def commit(self, cont = False): """ Finish a transaction """ self.journal.close() self.journal = None os.remove(self.j_file) for itm in os.listdir(self.tmp_dir): os.remove(cpjoin(self.tmp_dir, itm)) if cont is True: self.begin()
[ "def", "commit", "(", "self", ",", "cont", "=", "False", ")", ":", "self", ".", "journal", ".", "close", "(", ")", "self", ".", "journal", "=", "None", "os", ".", "remove", "(", "self", ".", "j_file", ")", "for", "itm", "in", "os", ".", "listdir"...
Finish a transaction
[ "Finish", "a", "transaction" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/storage.py#L112-L121
robehickman/simple-http-file-sync
shttpfs/storage.py
storage.file_get_contents
def file_get_contents(self, path): """ Returns contents of file located at 'path', not changing FS so does not require journaling """ with open(self.get_full_file_path(path), 'r') as f: return f.read()
python
def file_get_contents(self, path): """ Returns contents of file located at 'path', not changing FS so does not require journaling """ with open(self.get_full_file_path(path), 'r') as f: return f.read()
[ "def", "file_get_contents", "(", "self", ",", "path", ")", ":", "with", "open", "(", "self", ".", "get_full_file_path", "(", "path", ")", ",", "'r'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")" ]
Returns contents of file located at 'path', not changing FS so does not require journaling
[ "Returns", "contents", "of", "file", "located", "at", "path", "not", "changing", "FS", "so", "does", "not", "require", "journaling" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/storage.py#L124-L128
robehickman/simple-http-file-sync
shttpfs/storage.py
storage.file_put_contents
def file_put_contents(self, path, data): """ Put passed contents into file located at 'path' """ path = self.get_full_file_path(path) # if file exists, create a temp copy to allow rollback if os.path.isfile(path): tmp_path = self.new_tmp() self.do_action({ 'do' : ['copy', path, tmp_path], 'undo' : ['move', tmp_path, path]}) self.do_action( {'do' : ['write', path, data], 'undo' : ['backup', path]})
python
def file_put_contents(self, path, data): """ Put passed contents into file located at 'path' """ path = self.get_full_file_path(path) # if file exists, create a temp copy to allow rollback if os.path.isfile(path): tmp_path = self.new_tmp() self.do_action({ 'do' : ['copy', path, tmp_path], 'undo' : ['move', tmp_path, path]}) self.do_action( {'do' : ['write', path, data], 'undo' : ['backup', path]})
[ "def", "file_put_contents", "(", "self", ",", "path", ",", "data", ")", ":", "path", "=", "self", ".", "get_full_file_path", "(", "path", ")", "# if file exists, create a temp copy to allow rollback", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", "...
Put passed contents into file located at 'path'
[ "Put", "passed", "contents", "into", "file", "located", "at", "path" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/storage.py#L131-L145
robehickman/simple-http-file-sync
shttpfs/storage.py
storage.move_file
def move_file(self, src, dst): """ Move file from src to dst """ src = self.get_full_file_path(src); dst = self.get_full_file_path(dst) # record where file moved if os.path.isfile(src): # if destination file exists, copy it to tmp first if os.path.isfile(dst): tmp_path = self.new_tmp() self.do_action({ 'do' : ['copy', dst, tmp_path], 'undo' : ['move', tmp_path, dst]}) self.do_action( {'do' : ['move', src, dst], 'undo' : ['move', dst, src]})
python
def move_file(self, src, dst): """ Move file from src to dst """ src = self.get_full_file_path(src); dst = self.get_full_file_path(dst) # record where file moved if os.path.isfile(src): # if destination file exists, copy it to tmp first if os.path.isfile(dst): tmp_path = self.new_tmp() self.do_action({ 'do' : ['copy', dst, tmp_path], 'undo' : ['move', tmp_path, dst]}) self.do_action( {'do' : ['move', src, dst], 'undo' : ['move', dst, src]})
[ "def", "move_file", "(", "self", ",", "src", ",", "dst", ")", ":", "src", "=", "self", ".", "get_full_file_path", "(", "src", ")", "dst", "=", "self", ".", "get_full_file_path", "(", "dst", ")", "# record where file moved", "if", "os", ".", "path", ".", ...
Move file from src to dst
[ "Move", "file", "from", "src", "to", "dst" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/storage.py#L148-L164
robehickman/simple-http-file-sync
shttpfs/storage.py
storage.delete_file
def delete_file(self, path): """ delete a file """ path = self.get_full_file_path(path) # if file exists, create a temp copy to allow rollback if os.path.isfile(path): tmp_path = self.new_tmp() self.do_action({ 'do' : ['move', path, tmp_path], 'undo' : ['move', tmp_path, path]}) else: raise OSError(errno.ENOENT, 'No such file or directory', path)
python
def delete_file(self, path): """ delete a file """ path = self.get_full_file_path(path) # if file exists, create a temp copy to allow rollback if os.path.isfile(path): tmp_path = self.new_tmp() self.do_action({ 'do' : ['move', path, tmp_path], 'undo' : ['move', tmp_path, path]}) else: raise OSError(errno.ENOENT, 'No such file or directory', path)
[ "def", "delete_file", "(", "self", ",", "path", ")", ":", "path", "=", "self", ".", "get_full_file_path", "(", "path", ")", "# if file exists, create a temp copy to allow rollback", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "tmp_path", "="...
delete a file
[ "delete", "a", "file" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/storage.py#L167-L180
mixmastamyk/fr
fr/darwin.py
get_label_map
def get_label_map(opts): ''' Find volume labels from filesystem and return in dict format. ''' result = {} try: # get labels from filesystem for entry in os.scandir(diskdir): if entry.name.startswith('.'): continue if islink(entry.path): target = os.readlink(entry.path) else: target = entry.path result[target] = entry.name if opts.debug: print('\n\nlabel_map:', result) except FileNotFoundError: pass return result
python
def get_label_map(opts): ''' Find volume labels from filesystem and return in dict format. ''' result = {} try: # get labels from filesystem for entry in os.scandir(diskdir): if entry.name.startswith('.'): continue if islink(entry.path): target = os.readlink(entry.path) else: target = entry.path result[target] = entry.name if opts.debug: print('\n\nlabel_map:', result) except FileNotFoundError: pass return result
[ "def", "get_label_map", "(", "opts", ")", ":", "result", "=", "{", "}", "try", ":", "# get labels from filesystem", "for", "entry", "in", "os", ".", "scandir", "(", "diskdir", ")", ":", "if", "entry", ".", "name", ".", "startswith", "(", "'.'", ")", ":...
Find volume labels from filesystem and return in dict format.
[ "Find", "volume", "labels", "from", "filesystem", "and", "return", "in", "dict", "format", "." ]
train
https://github.com/mixmastamyk/fr/blob/f96df8ed7210a033b9e711bbed768d4116213bfb/fr/darwin.py#L37-L54
mixmastamyk/fr
fr/darwin.py
get_diskinfo
def get_diskinfo(opts, show_all=False, debug=False, local_only=False): ''' Returns a list holding the current disk info, stats divided by the ouptut unit. ''' outunit = opts.outunit disks = [] try: label_map = get_label_map(opts) lines = run(diskcmd).splitlines()[1:] # dump header for line in lines: tokens = line.split() mntp = b' '.join(tokens[8:]) dev = basename(tokens[0]) disk = DiskInfo() if (dev in devfilter) or (mntp in mntfilter): if show_all: if dev == b'map': # fix alignment :-/ dev = tokens[0] = b'%b %b' % (dev, tokens[1]) del tokens[1] disk.isram = True else: continue # convert to bytes as integer, then output units disk.dev = dev = dev.decode('ascii') disk.ocap = float(tokens[1]) * 1024 disk.cap = disk.ocap / outunit disk.free = float(tokens[3]) * 1024 / outunit disk.pcnt = int(tokens[4][:-1]) disk.used = float(tokens[2]) * 1024 / outunit disk.mntp = mntp.decode('utf8') disk.label = label_map.get(disk.mntp) disk.ismntd = bool(disk.mntp) disk.isnet = ':' in dev # cheesy but may work? (macos) if local_only and disk.isnet: continue if disk.ismntd: if disk.mntp == '/': disk.rw = True else: disk.rw = os.access(disk.mntp, os.W_OK) # ~ disk.isopt = None # TODO: not sure how to get these # ~ disk.isrem = None disks.append(disk) except IOError as err: print(err) return None if opts.debug: print() for disk in disks: print(disk.dev, disk) print() disks.sort() return disks
python
def get_diskinfo(opts, show_all=False, debug=False, local_only=False): ''' Returns a list holding the current disk info, stats divided by the ouptut unit. ''' outunit = opts.outunit disks = [] try: label_map = get_label_map(opts) lines = run(diskcmd).splitlines()[1:] # dump header for line in lines: tokens = line.split() mntp = b' '.join(tokens[8:]) dev = basename(tokens[0]) disk = DiskInfo() if (dev in devfilter) or (mntp in mntfilter): if show_all: if dev == b'map': # fix alignment :-/ dev = tokens[0] = b'%b %b' % (dev, tokens[1]) del tokens[1] disk.isram = True else: continue # convert to bytes as integer, then output units disk.dev = dev = dev.decode('ascii') disk.ocap = float(tokens[1]) * 1024 disk.cap = disk.ocap / outunit disk.free = float(tokens[3]) * 1024 / outunit disk.pcnt = int(tokens[4][:-1]) disk.used = float(tokens[2]) * 1024 / outunit disk.mntp = mntp.decode('utf8') disk.label = label_map.get(disk.mntp) disk.ismntd = bool(disk.mntp) disk.isnet = ':' in dev # cheesy but may work? (macos) if local_only and disk.isnet: continue if disk.ismntd: if disk.mntp == '/': disk.rw = True else: disk.rw = os.access(disk.mntp, os.W_OK) # ~ disk.isopt = None # TODO: not sure how to get these # ~ disk.isrem = None disks.append(disk) except IOError as err: print(err) return None if opts.debug: print() for disk in disks: print(disk.dev, disk) print() disks.sort() return disks
[ "def", "get_diskinfo", "(", "opts", ",", "show_all", "=", "False", ",", "debug", "=", "False", ",", "local_only", "=", "False", ")", ":", "outunit", "=", "opts", ".", "outunit", "disks", "=", "[", "]", "try", ":", "label_map", "=", "get_label_map", "("...
Returns a list holding the current disk info, stats divided by the ouptut unit.
[ "Returns", "a", "list", "holding", "the", "current", "disk", "info", "stats", "divided", "by", "the", "ouptut", "unit", "." ]
train
https://github.com/mixmastamyk/fr/blob/f96df8ed7210a033b9e711bbed768d4116213bfb/fr/darwin.py#L57-L113
mixmastamyk/fr
fr/darwin.py
get_meminfo
def get_meminfo(opts): ''' Returns a dictionary holding the current memory info, divided by the ouptut unit. If mem info can't be read, returns None. For Darwin / Mac OS X, interrogates the output of the sysctl and vm_stat utilities rather than /proc/meminfo ''' outunit = opts.outunit meminfo = MemInfo() sysinf = parse_sysctl(run(syscmd)) vmstat = parse_vmstat(run(vmscmd)) if opts.debug: print('\n') print('sysinf', sysinf) print('vmstat:', vmstat) print() # mem set meminfo.memtotal = sysinf['hw.memsize'] / outunit meminfo.memfree = vmstat.free / outunit meminfo.used = (vmstat.wire + vmstat.active) / outunit meminfo.cached = (vmstat.inactive + vmstat.speculative) / outunit meminfo.buffers = 0 # TODO: investigate # swap set swaptotal, swapused, swapfree = sysinf['vm.swapusage'] meminfo.swaptotal = swaptotal / outunit meminfo.swapused = swapused / outunit meminfo.swapfree = swapfree / outunit meminfo.swapcached = 0 # alternative to calculating used: #~ meminfo.swapused = (meminfo.swaptotal - meminfo.swapcached - #~ meminfo.swapfree) if opts.debug: print('meminfo:', meminfo) return meminfo
python
def get_meminfo(opts): ''' Returns a dictionary holding the current memory info, divided by the ouptut unit. If mem info can't be read, returns None. For Darwin / Mac OS X, interrogates the output of the sysctl and vm_stat utilities rather than /proc/meminfo ''' outunit = opts.outunit meminfo = MemInfo() sysinf = parse_sysctl(run(syscmd)) vmstat = parse_vmstat(run(vmscmd)) if opts.debug: print('\n') print('sysinf', sysinf) print('vmstat:', vmstat) print() # mem set meminfo.memtotal = sysinf['hw.memsize'] / outunit meminfo.memfree = vmstat.free / outunit meminfo.used = (vmstat.wire + vmstat.active) / outunit meminfo.cached = (vmstat.inactive + vmstat.speculative) / outunit meminfo.buffers = 0 # TODO: investigate # swap set swaptotal, swapused, swapfree = sysinf['vm.swapusage'] meminfo.swaptotal = swaptotal / outunit meminfo.swapused = swapused / outunit meminfo.swapfree = swapfree / outunit meminfo.swapcached = 0 # alternative to calculating used: #~ meminfo.swapused = (meminfo.swaptotal - meminfo.swapcached - #~ meminfo.swapfree) if opts.debug: print('meminfo:', meminfo) return meminfo
[ "def", "get_meminfo", "(", "opts", ")", ":", "outunit", "=", "opts", ".", "outunit", "meminfo", "=", "MemInfo", "(", ")", "sysinf", "=", "parse_sysctl", "(", "run", "(", "syscmd", ")", ")", "vmstat", "=", "parse_vmstat", "(", "run", "(", "vmscmd", ")",...
Returns a dictionary holding the current memory info, divided by the ouptut unit. If mem info can't be read, returns None. For Darwin / Mac OS X, interrogates the output of the sysctl and vm_stat utilities rather than /proc/meminfo
[ "Returns", "a", "dictionary", "holding", "the", "current", "memory", "info", "divided", "by", "the", "ouptut", "unit", ".", "If", "mem", "info", "can", "t", "be", "read", "returns", "None", ".", "For", "Darwin", "/", "Mac", "OS", "X", "interrogates", "th...
train
https://github.com/mixmastamyk/fr/blob/f96df8ed7210a033b9e711bbed768d4116213bfb/fr/darwin.py#L116-L152
mixmastamyk/fr
fr/darwin.py
parse_sysctl
def parse_sysctl(text): ''' Parse sysctl output. ''' lines = text.splitlines() results = {} for line in lines: key, _, value = line.decode('ascii').partition(': ') if key == 'hw.memsize': value = int(value) elif key == 'vm.swapusage': values = value.split()[2::3] # every third token su_unit = values[0][-1].lower() # get unit, 'M' PAGESIZE = 1024 if su_unit == 'm': PAGESIZE = 1024 * 1024 value = [ (float(val[:-1]) * PAGESIZE) for val in values ] results[key] = value return results
python
def parse_sysctl(text): ''' Parse sysctl output. ''' lines = text.splitlines() results = {} for line in lines: key, _, value = line.decode('ascii').partition(': ') if key == 'hw.memsize': value = int(value) elif key == 'vm.swapusage': values = value.split()[2::3] # every third token su_unit = values[0][-1].lower() # get unit, 'M' PAGESIZE = 1024 if su_unit == 'm': PAGESIZE = 1024 * 1024 value = [ (float(val[:-1]) * PAGESIZE) for val in values ] results[key] = value return results
[ "def", "parse_sysctl", "(", "text", ")", ":", "lines", "=", "text", ".", "splitlines", "(", ")", "results", "=", "{", "}", "for", "line", "in", "lines", ":", "key", ",", "_", ",", "value", "=", "line", ".", "decode", "(", "'ascii'", ")", ".", "pa...
Parse sysctl output.
[ "Parse", "sysctl", "output", "." ]
train
https://github.com/mixmastamyk/fr/blob/f96df8ed7210a033b9e711bbed768d4116213bfb/fr/darwin.py#L155-L175
mixmastamyk/fr
fr/darwin.py
parse_vmstat
def parse_vmstat(text): ''' Parse vmstat output. ''' lines = text.splitlines() results = Info() # TODO use MemInfo try: PAGESIZE = int(lines[0].split()[-2]) except IndexError: PAGESIZE = 4096 for line in lines[1:]: # dump header if not line[0] == 80: # b'P' startswith Page... break tokens = line.split() name, value = tokens[1][:-1].decode('ascii'), tokens[-1][:-1] results[name] = int(value) * PAGESIZE return results
python
def parse_vmstat(text): ''' Parse vmstat output. ''' lines = text.splitlines() results = Info() # TODO use MemInfo try: PAGESIZE = int(lines[0].split()[-2]) except IndexError: PAGESIZE = 4096 for line in lines[1:]: # dump header if not line[0] == 80: # b'P' startswith Page... break tokens = line.split() name, value = tokens[1][:-1].decode('ascii'), tokens[-1][:-1] results[name] = int(value) * PAGESIZE return results
[ "def", "parse_vmstat", "(", "text", ")", ":", "lines", "=", "text", ".", "splitlines", "(", ")", "results", "=", "Info", "(", ")", "# TODO use MemInfo", "try", ":", "PAGESIZE", "=", "int", "(", "lines", "[", "0", "]", ".", "split", "(", ")", "[", "...
Parse vmstat output.
[ "Parse", "vmstat", "output", "." ]
train
https://github.com/mixmastamyk/fr/blob/f96df8ed7210a033b9e711bbed768d4116213bfb/fr/darwin.py#L178-L195
vadimk2016/v-vk-api
v_vk_api/utils.py
get_base_url
def get_base_url(html: str) -> str: """ Search for login url from VK login page """ forms = BeautifulSoup(html, 'html.parser').find_all('form') if not forms: raise VVKBaseUrlException('Form for login not found') elif len(forms) > 1: raise VVKBaseUrlException('More than one login form found') login_url = forms[0].get('action') if not login_url: raise VVKBaseUrlException('No action tag in form') return login_url
python
def get_base_url(html: str) -> str: """ Search for login url from VK login page """ forms = BeautifulSoup(html, 'html.parser').find_all('form') if not forms: raise VVKBaseUrlException('Form for login not found') elif len(forms) > 1: raise VVKBaseUrlException('More than one login form found') login_url = forms[0].get('action') if not login_url: raise VVKBaseUrlException('No action tag in form') return login_url
[ "def", "get_base_url", "(", "html", ":", "str", ")", "->", "str", ":", "forms", "=", "BeautifulSoup", "(", "html", ",", "'html.parser'", ")", ".", "find_all", "(", "'form'", ")", "if", "not", "forms", ":", "raise", "VVKBaseUrlException", "(", "'Form for lo...
Search for login url from VK login page
[ "Search", "for", "login", "url", "from", "VK", "login", "page" ]
train
https://github.com/vadimk2016/v-vk-api/blob/ef5656e09944b5319a1f573cfb7b022f3d31c0cf/v_vk_api/utils.py#L8-L20
vadimk2016/v-vk-api
v_vk_api/utils.py
get_url_params
def get_url_params(url: str, fragment: bool = False) -> dict: """ Parse URL params """ parsed_url = urlparse(url) if fragment: url_query = parse_qsl(parsed_url.fragment) else: url_query = parse_qsl(parsed_url.query) return dict(url_query)
python
def get_url_params(url: str, fragment: bool = False) -> dict: """ Parse URL params """ parsed_url = urlparse(url) if fragment: url_query = parse_qsl(parsed_url.fragment) else: url_query = parse_qsl(parsed_url.query) return dict(url_query)
[ "def", "get_url_params", "(", "url", ":", "str", ",", "fragment", ":", "bool", "=", "False", ")", "->", "dict", ":", "parsed_url", "=", "urlparse", "(", "url", ")", "if", "fragment", ":", "url_query", "=", "parse_qsl", "(", "parsed_url", ".", "fragment",...
Parse URL params
[ "Parse", "URL", "params" ]
train
https://github.com/vadimk2016/v-vk-api/blob/ef5656e09944b5319a1f573cfb7b022f3d31c0cf/v_vk_api/utils.py#L23-L32
vadimk2016/v-vk-api
v_vk_api/utils.py
check_page_for_warnings
def check_page_for_warnings(html: str) -> None: """ Checks if is any warnings on page if so raises an exception """ soup = BeautifulSoup(html, 'html.parser') warnings = soup.find_all('div', {'class': 'service_msg_warning'}) if warnings: exception_msg = '; '.join((warning.get_text() for warning in warnings)) raise VVKPageWarningException(exception_msg)
python
def check_page_for_warnings(html: str) -> None: """ Checks if is any warnings on page if so raises an exception """ soup = BeautifulSoup(html, 'html.parser') warnings = soup.find_all('div', {'class': 'service_msg_warning'}) if warnings: exception_msg = '; '.join((warning.get_text() for warning in warnings)) raise VVKPageWarningException(exception_msg)
[ "def", "check_page_for_warnings", "(", "html", ":", "str", ")", "->", "None", ":", "soup", "=", "BeautifulSoup", "(", "html", ",", "'html.parser'", ")", "warnings", "=", "soup", ".", "find_all", "(", "'div'", ",", "{", "'class'", ":", "'service_msg_warning'"...
Checks if is any warnings on page if so raises an exception
[ "Checks", "if", "is", "any", "warnings", "on", "page", "if", "so", "raises", "an", "exception" ]
train
https://github.com/vadimk2016/v-vk-api/blob/ef5656e09944b5319a1f573cfb7b022f3d31c0cf/v_vk_api/utils.py#L35-L43
NerdWalletOSS/savage
src/savage/utils.py
result_to_dict
def result_to_dict(res): """ :param res: :any:`sqlalchemy.engine.ResultProxy` :return: a list of dicts where each dict represents a row in the query where the key \ is the column name and the value is the value of that column. """ keys = res.keys() return [dict(itertools.izip(keys, row)) for row in res]
python
def result_to_dict(res): """ :param res: :any:`sqlalchemy.engine.ResultProxy` :return: a list of dicts where each dict represents a row in the query where the key \ is the column name and the value is the value of that column. """ keys = res.keys() return [dict(itertools.izip(keys, row)) for row in res]
[ "def", "result_to_dict", "(", "res", ")", ":", "keys", "=", "res", ".", "keys", "(", ")", "return", "[", "dict", "(", "itertools", ".", "izip", "(", "keys", ",", "row", ")", ")", "for", "row", "in", "res", "]" ]
:param res: :any:`sqlalchemy.engine.ResultProxy` :return: a list of dicts where each dict represents a row in the query where the key \ is the column name and the value is the value of that column.
[ ":", "param", "res", ":", ":", "any", ":", "sqlalchemy", ".", "engine", ".", "ResultProxy" ]
train
https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/utils.py#L11-L19
NerdWalletOSS/savage
src/savage/utils.py
get_bind_processor
def get_bind_processor(column_type, dialect): """ Returns a bind processor for a column type and dialect, with special handling for JSON/JSONB column types to return dictionaries instead of serialized JSON strings. NOTE: This is a workaround for https://github.com/NerdWalletOSS/savage/issues/8 :param column_type: :py:class:`~sqlalchemy.sql.type_api.TypeEngine` :param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect` :return: bind processor for given column type and dialect """ if column_type.compile(dialect) not in {'JSON', 'JSONB'}: # For non-JSON/JSONB column types, return the column type's bind processor return column_type.bind_processor(dialect) if type(column_type) in {JSON, JSONB}: # For bare JSON/JSONB types, we simply skip bind processing altogether return None elif isinstance(column_type, TypeDecorator) and column_type._has_bind_processor: # For decorated JSON/JSONB types, we return the custom bind processor (if any) return partial(column_type.process_bind_param, dialect=dialect) else: # For all other cases, we fall back to deserializing the result of the bind processor def wrapped_bind_processor(value): json_deserializer = dialect._json_deserializer or json.loads return json_deserializer(column_type.bind_processor(dialect)(value)) return wrapped_bind_processor
python
def get_bind_processor(column_type, dialect): """ Returns a bind processor for a column type and dialect, with special handling for JSON/JSONB column types to return dictionaries instead of serialized JSON strings. NOTE: This is a workaround for https://github.com/NerdWalletOSS/savage/issues/8 :param column_type: :py:class:`~sqlalchemy.sql.type_api.TypeEngine` :param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect` :return: bind processor for given column type and dialect """ if column_type.compile(dialect) not in {'JSON', 'JSONB'}: # For non-JSON/JSONB column types, return the column type's bind processor return column_type.bind_processor(dialect) if type(column_type) in {JSON, JSONB}: # For bare JSON/JSONB types, we simply skip bind processing altogether return None elif isinstance(column_type, TypeDecorator) and column_type._has_bind_processor: # For decorated JSON/JSONB types, we return the custom bind processor (if any) return partial(column_type.process_bind_param, dialect=dialect) else: # For all other cases, we fall back to deserializing the result of the bind processor def wrapped_bind_processor(value): json_deserializer = dialect._json_deserializer or json.loads return json_deserializer(column_type.bind_processor(dialect)(value)) return wrapped_bind_processor
[ "def", "get_bind_processor", "(", "column_type", ",", "dialect", ")", ":", "if", "column_type", ".", "compile", "(", "dialect", ")", "not", "in", "{", "'JSON'", ",", "'JSONB'", "}", ":", "# For non-JSON/JSONB column types, return the column type's bind processor", "re...
Returns a bind processor for a column type and dialect, with special handling for JSON/JSONB column types to return dictionaries instead of serialized JSON strings. NOTE: This is a workaround for https://github.com/NerdWalletOSS/savage/issues/8 :param column_type: :py:class:`~sqlalchemy.sql.type_api.TypeEngine` :param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect` :return: bind processor for given column type and dialect
[ "Returns", "a", "bind", "processor", "for", "a", "column", "type", "and", "dialect", "with", "special", "handling", "for", "JSON", "/", "JSONB", "column", "types", "to", "return", "dictionaries", "instead", "of", "serialized", "JSON", "strings", "." ]
train
https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/utils.py#L22-L48
NerdWalletOSS/savage
src/savage/utils.py
get_column_attribute
def get_column_attribute(row, col_name, use_dirty=True, dialect=None): """ :param row: the row object :param col_name: the column name :param use_dirty: whether to return the dirty value of the column :param dialect: if not None, should be a :py:class:`~sqlalchemy.engine.interfaces.Dialect`. If \ specified, this function will process the column attribute into the dialect type before \ returning it; useful if one is using user defined column types in their mappers. :return: if :any:`use_dirty`, this will return the value of col_name on the row before it was \ changed; else this will return getattr(row, col_name) """ def identity(x): return x bind_processor = None if dialect: column_type = getattr(type(row), col_name).type bind_processor = get_bind_processor(column_type, dialect) bind_processor = bind_processor or identity current_value = bind_processor(getattr(row, col_name)) if use_dirty: return current_value hist = getattr(inspect(row).attrs, col_name).history if not hist.has_changes(): return current_value elif hist.deleted: return bind_processor(hist.deleted[0]) return None
python
def get_column_attribute(row, col_name, use_dirty=True, dialect=None): """ :param row: the row object :param col_name: the column name :param use_dirty: whether to return the dirty value of the column :param dialect: if not None, should be a :py:class:`~sqlalchemy.engine.interfaces.Dialect`. If \ specified, this function will process the column attribute into the dialect type before \ returning it; useful if one is using user defined column types in their mappers. :return: if :any:`use_dirty`, this will return the value of col_name on the row before it was \ changed; else this will return getattr(row, col_name) """ def identity(x): return x bind_processor = None if dialect: column_type = getattr(type(row), col_name).type bind_processor = get_bind_processor(column_type, dialect) bind_processor = bind_processor or identity current_value = bind_processor(getattr(row, col_name)) if use_dirty: return current_value hist = getattr(inspect(row).attrs, col_name).history if not hist.has_changes(): return current_value elif hist.deleted: return bind_processor(hist.deleted[0]) return None
[ "def", "get_column_attribute", "(", "row", ",", "col_name", ",", "use_dirty", "=", "True", ",", "dialect", "=", "None", ")", ":", "def", "identity", "(", "x", ")", ":", "return", "x", "bind_processor", "=", "None", "if", "dialect", ":", "column_type", "=...
:param row: the row object :param col_name: the column name :param use_dirty: whether to return the dirty value of the column :param dialect: if not None, should be a :py:class:`~sqlalchemy.engine.interfaces.Dialect`. If \ specified, this function will process the column attribute into the dialect type before \ returning it; useful if one is using user defined column types in their mappers. :return: if :any:`use_dirty`, this will return the value of col_name on the row before it was \ changed; else this will return getattr(row, col_name)
[ ":", "param", "row", ":", "the", "row", "object", ":", "param", "col_name", ":", "the", "column", "name", ":", "param", "use_dirty", ":", "whether", "to", "return", "the", "dirty", "value", "of", "the", "column", ":", "param", "dialect", ":", "if", "no...
train
https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/utils.py#L51-L80
NerdWalletOSS/savage
src/savage/utils.py
get_column_keys_and_names
def get_column_keys_and_names(table): """ Return a generator of tuples k, c such that k is the name of the python attribute for the column and c is the name of the column in the sql table. """ ins = inspect(table) return ((k, c.name) for k, c in ins.mapper.c.items())
python
def get_column_keys_and_names(table): """ Return a generator of tuples k, c such that k is the name of the python attribute for the column and c is the name of the column in the sql table. """ ins = inspect(table) return ((k, c.name) for k, c in ins.mapper.c.items())
[ "def", "get_column_keys_and_names", "(", "table", ")", ":", "ins", "=", "inspect", "(", "table", ")", "return", "(", "(", "k", ",", "c", ".", "name", ")", "for", "k", ",", "c", "in", "ins", ".", "mapper", ".", "c", ".", "items", "(", ")", ")" ]
Return a generator of tuples k, c such that k is the name of the python attribute for the column and c is the name of the column in the sql table.
[ "Return", "a", "generator", "of", "tuples", "k", "c", "such", "that", "k", "is", "the", "name", "of", "the", "python", "attribute", "for", "the", "column", "and", "c", "is", "the", "name", "of", "the", "column", "in", "the", "sql", "table", "." ]
train
https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/utils.py#L93-L99
NerdWalletOSS/savage
src/savage/utils.py
has_constraint
def has_constraint(model, engine, *col_names): # pragma: no cover """ :param model: model class to check :param engine: SQLAlchemy engine :param col_names: the name of columns which the unique constraint should contain :rtype: bool :return: True if the given columns are part of a unique constraint on model """ table_name = model.__tablename__ if engine.dialect.has_table(engine, table_name): # Use SQLAlchemy reflection to determine unique constraints insp = Inspector.from_engine(engine) constraints = itertools.chain( (sorted(x['column_names']) for x in insp.get_unique_constraints(table_name)), sorted(insp.get_pk_constraint(table_name)['constrained_columns']), ) return sorted(col_names) in constraints else: # Needed to validate test models pre-creation constrained_cols = set() for arg in getattr(model, '__table_args__', []): if isinstance(arg, UniqueConstraint): constrained_cols.update([c.name for c in arg.columns]) for c in model.__table__.columns: if c.primary_key or c.unique: constrained_cols.add(c.name) return constrained_cols.issuperset(col_names)
python
def has_constraint(model, engine, *col_names): # pragma: no cover """ :param model: model class to check :param engine: SQLAlchemy engine :param col_names: the name of columns which the unique constraint should contain :rtype: bool :return: True if the given columns are part of a unique constraint on model """ table_name = model.__tablename__ if engine.dialect.has_table(engine, table_name): # Use SQLAlchemy reflection to determine unique constraints insp = Inspector.from_engine(engine) constraints = itertools.chain( (sorted(x['column_names']) for x in insp.get_unique_constraints(table_name)), sorted(insp.get_pk_constraint(table_name)['constrained_columns']), ) return sorted(col_names) in constraints else: # Needed to validate test models pre-creation constrained_cols = set() for arg in getattr(model, '__table_args__', []): if isinstance(arg, UniqueConstraint): constrained_cols.update([c.name for c in arg.columns]) for c in model.__table__.columns: if c.primary_key or c.unique: constrained_cols.add(c.name) return constrained_cols.issuperset(col_names)
[ "def", "has_constraint", "(", "model", ",", "engine", ",", "*", "col_names", ")", ":", "# pragma: no cover", "table_name", "=", "model", ".", "__tablename__", "if", "engine", ".", "dialect", ".", "has_table", "(", "engine", ",", "table_name", ")", ":", "# Us...
:param model: model class to check :param engine: SQLAlchemy engine :param col_names: the name of columns which the unique constraint should contain :rtype: bool :return: True if the given columns are part of a unique constraint on model
[ ":", "param", "model", ":", "model", "class", "to", "check", ":", "param", "engine", ":", "SQLAlchemy", "engine", ":", "param", "col_names", ":", "the", "name", "of", "columns", "which", "the", "unique", "constraint", "should", "contain" ]
train
https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/utils.py#L106-L133
NerdWalletOSS/savage
src/savage/utils.py
is_modified
def is_modified(row, dialect): """ Has the row data been modified? This method inspects the row, and iterates over all columns looking for changes to the (processed) data, skipping over unmodified columns. :param row: SQLAlchemy model instance :param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect` :return: True if any columns were modified, else False """ ins = inspect(row) modified_cols = set(get_column_keys(ins.mapper)) - ins.unmodified for col_name in modified_cols: current_value = get_column_attribute(row, col_name, dialect=dialect) previous_value = get_column_attribute(row, col_name, use_dirty=False, dialect=dialect) if previous_value != current_value: return True return False
python
def is_modified(row, dialect): """ Has the row data been modified? This method inspects the row, and iterates over all columns looking for changes to the (processed) data, skipping over unmodified columns. :param row: SQLAlchemy model instance :param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect` :return: True if any columns were modified, else False """ ins = inspect(row) modified_cols = set(get_column_keys(ins.mapper)) - ins.unmodified for col_name in modified_cols: current_value = get_column_attribute(row, col_name, dialect=dialect) previous_value = get_column_attribute(row, col_name, use_dirty=False, dialect=dialect) if previous_value != current_value: return True return False
[ "def", "is_modified", "(", "row", ",", "dialect", ")", ":", "ins", "=", "inspect", "(", "row", ")", "modified_cols", "=", "set", "(", "get_column_keys", "(", "ins", ".", "mapper", ")", ")", "-", "ins", ".", "unmodified", "for", "col_name", "in", "modif...
Has the row data been modified? This method inspects the row, and iterates over all columns looking for changes to the (processed) data, skipping over unmodified columns. :param row: SQLAlchemy model instance :param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect` :return: True if any columns were modified, else False
[ "Has", "the", "row", "data", "been", "modified?" ]
train
https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/utils.py#L136-L154
Brightmd/TxPx
txpx/process.py
registerLoggers
def registerLoggers(info, error, debug): """ Add logging functions to this module. Functions will be called on various severities (log, error, or debug respectively). Each function must have the signature: fn(message, **kwargs) If Python str.format()-style placeholders are in message, kwargs will be interpolated. """ global log_info global log_error global log_debug log_info = info log_error = error log_debug = debug
python
def registerLoggers(info, error, debug): """ Add logging functions to this module. Functions will be called on various severities (log, error, or debug respectively). Each function must have the signature: fn(message, **kwargs) If Python str.format()-style placeholders are in message, kwargs will be interpolated. """ global log_info global log_error global log_debug log_info = info log_error = error log_debug = debug
[ "def", "registerLoggers", "(", "info", ",", "error", ",", "debug", ")", ":", "global", "log_info", "global", "log_error", "global", "log_debug", "log_info", "=", "info", "log_error", "=", "error", "log_debug", "=", "debug" ]
Add logging functions to this module. Functions will be called on various severities (log, error, or debug respectively). Each function must have the signature: fn(message, **kwargs) If Python str.format()-style placeholders are in message, kwargs will be interpolated.
[ "Add", "logging", "functions", "to", "this", "module", "." ]
train
https://github.com/Brightmd/TxPx/blob/403c18b3006fc68842ec05b259e8611fe80763aa/txpx/process.py#L25-L44
Brightmd/TxPx
txpx/process.py
background
def background(cl, proto=EchoProcess, **kw): """ Use the reactor to run a process in the background. Keep the pid around. ``proto'' may be any callable which returns an instance of ProcessProtocol """ if isinstance(cl, basestring): cl = shlex.split(cl) if not cl[0].startswith('/'): path = which(cl[0]) assert path, '%s not found' % cl[0] cl[0] = path[0] d = Deferred() proc = reactor.spawnProcess( proto(name=basename(cl[0]), deferred=d), cl[0], cl, env=os.environ, **kw) daycare.add(proc.pid) return d
python
def background(cl, proto=EchoProcess, **kw): """ Use the reactor to run a process in the background. Keep the pid around. ``proto'' may be any callable which returns an instance of ProcessProtocol """ if isinstance(cl, basestring): cl = shlex.split(cl) if not cl[0].startswith('/'): path = which(cl[0]) assert path, '%s not found' % cl[0] cl[0] = path[0] d = Deferred() proc = reactor.spawnProcess( proto(name=basename(cl[0]), deferred=d), cl[0], cl, env=os.environ, **kw) daycare.add(proc.pid) return d
[ "def", "background", "(", "cl", ",", "proto", "=", "EchoProcess", ",", "*", "*", "kw", ")", ":", "if", "isinstance", "(", "cl", ",", "basestring", ")", ":", "cl", "=", "shlex", ".", "split", "(", "cl", ")", "if", "not", "cl", "[", "0", "]", "."...
Use the reactor to run a process in the background. Keep the pid around. ``proto'' may be any callable which returns an instance of ProcessProtocol
[ "Use", "the", "reactor", "to", "run", "a", "process", "in", "the", "background", "." ]
train
https://github.com/Brightmd/TxPx/blob/403c18b3006fc68842ec05b259e8611fe80763aa/txpx/process.py#L128-L153
Brightmd/TxPx
txpx/process.py
runner
def runner(Options, buffering=True): """ Return a standard "run" function that wraps an Options class If buffering=False, turn off stdout/stderr buffering for this process """ def run(argv=None): if not buffering: sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 0) if argv is None: argv = sys.argv o = Options() try: o.parseOptions(argv[1:]) except usage.UsageError, e: if hasattr(o, 'subOptions'): print str(o.subOptions) else: print str(o) print str(e) return 1 return 0 return run
python
def runner(Options, buffering=True): """ Return a standard "run" function that wraps an Options class If buffering=False, turn off stdout/stderr buffering for this process """ def run(argv=None): if not buffering: sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 0) if argv is None: argv = sys.argv o = Options() try: o.parseOptions(argv[1:]) except usage.UsageError, e: if hasattr(o, 'subOptions'): print str(o.subOptions) else: print str(o) print str(e) return 1 return 0 return run
[ "def", "runner", "(", "Options", ",", "buffering", "=", "True", ")", ":", "def", "run", "(", "argv", "=", "None", ")", ":", "if", "not", "buffering", ":", "sys", ".", "stdout", "=", "os", ".", "fdopen", "(", "sys", ".", "stdout", ".", "fileno", "...
Return a standard "run" function that wraps an Options class If buffering=False, turn off stdout/stderr buffering for this process
[ "Return", "a", "standard", "run", "function", "that", "wraps", "an", "Options", "class" ]
train
https://github.com/Brightmd/TxPx/blob/403c18b3006fc68842ec05b259e8611fe80763aa/txpx/process.py#L197-L223
Brightmd/TxPx
txpx/process.py
DayCare.killall
def killall(self): """ Kill all children """ for pid in set(self): try: os.kill(pid, signal.SIGTERM) except OSError, e: # pragma: nocover if e.errno == errno.ESRCH: "Process previously died on its own" self.remove(pid)
python
def killall(self): """ Kill all children """ for pid in set(self): try: os.kill(pid, signal.SIGTERM) except OSError, e: # pragma: nocover if e.errno == errno.ESRCH: "Process previously died on its own" self.remove(pid)
[ "def", "killall", "(", "self", ")", ":", "for", "pid", "in", "set", "(", "self", ")", ":", "try", ":", "os", ".", "kill", "(", "pid", ",", "signal", ".", "SIGTERM", ")", "except", "OSError", ",", "e", ":", "# pragma: nocover", "if", "e", ".", "er...
Kill all children
[ "Kill", "all", "children" ]
train
https://github.com/Brightmd/TxPx/blob/403c18b3006fc68842ec05b259e8611fe80763aa/txpx/process.py#L55-L65
Brightmd/TxPx
txpx/process.py
EchoProcess.processEnded
def processEnded(self, reason): """ Connected process shut down """ log_debug("{name} process exited", name=self.name) if self.deferred: if reason.type == ProcessDone: self.deferred.callback(reason.value.exitCode) elif reason.type == ProcessTerminated: self.deferred.errback(reason) return self.deferred
python
def processEnded(self, reason): """ Connected process shut down """ log_debug("{name} process exited", name=self.name) if self.deferred: if reason.type == ProcessDone: self.deferred.callback(reason.value.exitCode) elif reason.type == ProcessTerminated: self.deferred.errback(reason) return self.deferred
[ "def", "processEnded", "(", "self", ",", "reason", ")", ":", "log_debug", "(", "\"{name} process exited\"", ",", "name", "=", "self", ".", "name", ")", "if", "self", ".", "deferred", ":", "if", "reason", ".", "type", "==", "ProcessDone", ":", "self", "."...
Connected process shut down
[ "Connected", "process", "shut", "down" ]
train
https://github.com/Brightmd/TxPx/blob/403c18b3006fc68842ec05b259e8611fe80763aa/txpx/process.py#L86-L96
Brightmd/TxPx
txpx/process.py
EchoProcess.errReceived
def errReceived(self, data): """ Connected process wrote to stderr """ lines = data.splitlines() for line in lines: log_error("*** {name} stderr *** {line}", name=self.name, line=self.errFilter(line))
python
def errReceived(self, data): """ Connected process wrote to stderr """ lines = data.splitlines() for line in lines: log_error("*** {name} stderr *** {line}", name=self.name, line=self.errFilter(line))
[ "def", "errReceived", "(", "self", ",", "data", ")", ":", "lines", "=", "data", ".", "splitlines", "(", ")", "for", "line", "in", "lines", ":", "log_error", "(", "\"*** {name} stderr *** {line}\"", ",", "name", "=", "self", ".", "name", ",", "line", "=",...
Connected process wrote to stderr
[ "Connected", "process", "wrote", "to", "stderr" ]
train
https://github.com/Brightmd/TxPx/blob/403c18b3006fc68842ec05b259e8611fe80763aa/txpx/process.py#L104-L112
Brightmd/TxPx
txpx/process.py
EchoProcess.outLineReceived
def outLineReceived(self, line): """ Handle data via stdout linewise. This is useful if you turned off buffering. In your subclass, override this if you want to handle the line as a protocol line in addition to logging it. (You may upcall this function safely.) """ log_debug('<<< {name} stdout >>> {line}', name=self.name, line=self.outFilter(line))
python
def outLineReceived(self, line): """ Handle data via stdout linewise. This is useful if you turned off buffering. In your subclass, override this if you want to handle the line as a protocol line in addition to logging it. (You may upcall this function safely.) """ log_debug('<<< {name} stdout >>> {line}', name=self.name, line=self.outFilter(line))
[ "def", "outLineReceived", "(", "self", ",", "line", ")", ":", "log_debug", "(", "'<<< {name} stdout >>> {line}'", ",", "name", "=", "self", ".", "name", ",", "line", "=", "self", ".", "outFilter", "(", "line", ")", ")" ]
Handle data via stdout linewise. This is useful if you turned off buffering. In your subclass, override this if you want to handle the line as a protocol line in addition to logging it. (You may upcall this function safely.)
[ "Handle", "data", "via", "stdout", "linewise", ".", "This", "is", "useful", "if", "you", "turned", "off", "buffering", "." ]
train
https://github.com/Brightmd/TxPx/blob/403c18b3006fc68842ec05b259e8611fe80763aa/txpx/process.py#L114-L125
Brightmd/TxPx
txpx/process.py
Tee.write
def write(self, *a, **kw): """ Write to both files If either one has an error, try writing the error to the other one. """ fl = None try: self.file1.write(*a, **kw) self.file1.flush() except IOError: badFile, fl = 1, failure.Failure() try: self.file2.write(*a, **kw) self.file2.flush() except IOError: badFile, fl = 2, failure.Failure() if fl: out = self.file2 if badFile == 1 else self.file1 out.write(str(fl) + '\n') out.flush() fl.raiseException()
python
def write(self, *a, **kw): """ Write to both files If either one has an error, try writing the error to the other one. """ fl = None try: self.file1.write(*a, **kw) self.file1.flush() except IOError: badFile, fl = 1, failure.Failure() try: self.file2.write(*a, **kw) self.file2.flush() except IOError: badFile, fl = 2, failure.Failure() if fl: out = self.file2 if badFile == 1 else self.file1 out.write(str(fl) + '\n') out.flush() fl.raiseException()
[ "def", "write", "(", "self", ",", "*", "a", ",", "*", "*", "kw", ")", ":", "fl", "=", "None", "try", ":", "self", ".", "file1", ".", "write", "(", "*", "a", ",", "*", "*", "kw", ")", "self", ".", "file1", ".", "flush", "(", ")", "except", ...
Write to both files If either one has an error, try writing the error to the other one.
[ "Write", "to", "both", "files" ]
train
https://github.com/Brightmd/TxPx/blob/403c18b3006fc68842ec05b259e8611fe80763aa/txpx/process.py#L164-L187
littlemo/moear-package-mobi
moear_package_mobi/pipelines.py
MoEarImagesPipeline.item_completed
def item_completed(self, results, item, info): ''' 在正常图片本地化处理管道业务执行完毕后,使用缩略图路径替换原 ``result[path]`` 路径, 从而使最终打包时使用缩略图,并根据配置,对缩略图进行灰度处理 :param item: 爬取到的数据模型 :type item: :class:`.MoearPackageMobiItem` or dict ''' # 处理 results 中的 path 使用缩略图路径替代 for ok, result in results: if not ok: continue path = result['path'] path = re.sub(r'full', os.path.join('thumbs', 'kindle'), path) result['path'] = path # 处理缩略图为灰度图,为便于在电纸书上节省空间 if info.spider.options.get('img_convert_to_gray'): images_store = info.spider.settings.get('IMAGES_STORE') for ok, result in results: if not ok: continue img_path = os.path.join(images_store, result['path']) with open(img_path, 'rb+') as fh: output = img.gray_image(fh.read()) fh.seek(0) fh.truncate() fh.write(output) info.spider._logger.debug(results) item = super(MoEarImagesPipeline, self).item_completed( results, item, info) return item
python
def item_completed(self, results, item, info): ''' 在正常图片本地化处理管道业务执行完毕后,使用缩略图路径替换原 ``result[path]`` 路径, 从而使最终打包时使用缩略图,并根据配置,对缩略图进行灰度处理 :param item: 爬取到的数据模型 :type item: :class:`.MoearPackageMobiItem` or dict ''' # 处理 results 中的 path 使用缩略图路径替代 for ok, result in results: if not ok: continue path = result['path'] path = re.sub(r'full', os.path.join('thumbs', 'kindle'), path) result['path'] = path # 处理缩略图为灰度图,为便于在电纸书上节省空间 if info.spider.options.get('img_convert_to_gray'): images_store = info.spider.settings.get('IMAGES_STORE') for ok, result in results: if not ok: continue img_path = os.path.join(images_store, result['path']) with open(img_path, 'rb+') as fh: output = img.gray_image(fh.read()) fh.seek(0) fh.truncate() fh.write(output) info.spider._logger.debug(results) item = super(MoEarImagesPipeline, self).item_completed( results, item, info) return item
[ "def", "item_completed", "(", "self", ",", "results", ",", "item", ",", "info", ")", ":", "# 处理 results 中的 path 使用缩略图路径替代", "for", "ok", ",", "result", "in", "results", ":", "if", "not", "ok", ":", "continue", "path", "=", "result", "[", "'path'", "]", "...
在正常图片本地化处理管道业务执行完毕后,使用缩略图路径替换原 ``result[path]`` 路径, 从而使最终打包时使用缩略图,并根据配置,对缩略图进行灰度处理 :param item: 爬取到的数据模型 :type item: :class:`.MoearPackageMobiItem` or dict
[ "在正常图片本地化处理管道业务执行完毕后,使用缩略图路径替换原", "result", "[", "path", "]", "路径,", "从而使最终打包时使用缩略图,并根据配置,对缩略图进行灰度处理" ]
train
https://github.com/littlemo/moear-package-mobi/blob/189a077bd0ad5309607957b3f1c0b65eae40ec90/moear_package_mobi/pipelines.py#L31-L64
littlemo/moear-package-mobi
moear_package_mobi/pipelines.py
PagePersistentPipeline.process_item
def process_item(self, item, spider): ''' 将从图片处理管道流过的数据模型中的缩略图链接更新到文章中的相应图片 URL 上, 并对其中的,已删除图片 ``item['image_urls_removed']`` 进行处理, 使其显示内建的删除图标。 最终使用文章模板,对数据模型中的数据进行渲染并输出到指定路径中,完成本地化, 等待最终 ``mobi`` 打包 :param item: 爬取到的数据模型 :type item: :class:`.MoearPackageMobiItem` or dict :param spider: 当前爬虫对象 :type spider: :class:`.MobiSpider` ''' soup = BeautifulSoup(item.get('content', ''), "lxml") if item.get('images'): # 将content中的全部img替换为本地化后的url img_list = soup.find_all('img') for i in img_list: img_src = i.get('src') # 删除image_urls_removed中的img,避免由于未本地化造成mobi生成失败 if img_src in item.get('image_urls_removed', []): i['src'] = '../icons/delete.jpg' for result in item.get('images', []): if img_src == result['url']: i['src'] = os.path.join('..', 'images', result['path']) spider._logger.debug( '文章({})的正文img保存成功: {}'.format( item['title'], img_src)) break # 填充toc_thumbnail路径值 for result in item['images']: if item['cover_image'] == result['url']: item['toc_thumbnail'] = os.path.join( 'images', result['path']) break # 过滤掉不支持的标签 unsupport_tag = spider.options.get('kindlegen_unsupport_tag', []) for tag in unsupport_tag: for i in soup.find_all(tag): delete_img = soup.new_tag('img') delete_img['src'] = '../icons/delete.jpg' i.replace_with(delete_img) item['content'] = str(soup.div) # 将item['content']保存到本地 article_html_name = hashlib.md5(to_bytes(item['url'])).hexdigest() html_name = '{}.html'.format(article_html_name) item['url_local'] = os.path.join('html', html_name) page_store = os.path.join(spider.build_source_dir, item['url_local']) # 将item中的生成字段添加到post中 idx = 0 post = None for section in spider.data.items(): for p in section[1]: idx += 1 if p.get('origin_url') == item.get('url'): post = p p['idx'] = 'post_{:0>3}'.format(idx) p['playOrder'] = idx p['content'] = item.get('content') p['url_local'] = item.get('url_local') p['toc_thumbnail'] = item.get('toc_thumbnail') # 若为最后一篇文章,则添加相应标志 if idx == spider.post_num: spider._logger.info( '标记为最后一篇文章: {}'.format(p.get('title'))) p['last_one'] = True break # 创建目标dirname dirname = os.path.dirname(page_store) if not os.path.exists(dirname): os.makedirs(dirname) # 基于预设模板,将文章正文本地化 with codecs.open(page_store, 'wb', 'utf-8') as fh: fh.write(spider.template_post.render( post=post, options=spider.options)) # 为优化log打印信息,清空已处理过的字段 item.pop('content', '') item.pop('image_urls', []) item.pop('images', []) return item
python
def process_item(self, item, spider): ''' 将从图片处理管道流过的数据模型中的缩略图链接更新到文章中的相应图片 URL 上, 并对其中的,已删除图片 ``item['image_urls_removed']`` 进行处理, 使其显示内建的删除图标。 最终使用文章模板,对数据模型中的数据进行渲染并输出到指定路径中,完成本地化, 等待最终 ``mobi`` 打包 :param item: 爬取到的数据模型 :type item: :class:`.MoearPackageMobiItem` or dict :param spider: 当前爬虫对象 :type spider: :class:`.MobiSpider` ''' soup = BeautifulSoup(item.get('content', ''), "lxml") if item.get('images'): # 将content中的全部img替换为本地化后的url img_list = soup.find_all('img') for i in img_list: img_src = i.get('src') # 删除image_urls_removed中的img,避免由于未本地化造成mobi生成失败 if img_src in item.get('image_urls_removed', []): i['src'] = '../icons/delete.jpg' for result in item.get('images', []): if img_src == result['url']: i['src'] = os.path.join('..', 'images', result['path']) spider._logger.debug( '文章({})的正文img保存成功: {}'.format( item['title'], img_src)) break # 填充toc_thumbnail路径值 for result in item['images']: if item['cover_image'] == result['url']: item['toc_thumbnail'] = os.path.join( 'images', result['path']) break # 过滤掉不支持的标签 unsupport_tag = spider.options.get('kindlegen_unsupport_tag', []) for tag in unsupport_tag: for i in soup.find_all(tag): delete_img = soup.new_tag('img') delete_img['src'] = '../icons/delete.jpg' i.replace_with(delete_img) item['content'] = str(soup.div) # 将item['content']保存到本地 article_html_name = hashlib.md5(to_bytes(item['url'])).hexdigest() html_name = '{}.html'.format(article_html_name) item['url_local'] = os.path.join('html', html_name) page_store = os.path.join(spider.build_source_dir, item['url_local']) # 将item中的生成字段添加到post中 idx = 0 post = None for section in spider.data.items(): for p in section[1]: idx += 1 if p.get('origin_url') == item.get('url'): post = p p['idx'] = 'post_{:0>3}'.format(idx) p['playOrder'] = idx p['content'] = item.get('content') p['url_local'] = item.get('url_local') p['toc_thumbnail'] = item.get('toc_thumbnail') # 若为最后一篇文章,则添加相应标志 if idx == spider.post_num: spider._logger.info( '标记为最后一篇文章: {}'.format(p.get('title'))) p['last_one'] = True break # 创建目标dirname dirname = os.path.dirname(page_store) if not os.path.exists(dirname): os.makedirs(dirname) # 基于预设模板,将文章正文本地化 with codecs.open(page_store, 'wb', 'utf-8') as fh: fh.write(spider.template_post.render( post=post, options=spider.options)) # 为优化log打印信息,清空已处理过的字段 item.pop('content', '') item.pop('image_urls', []) item.pop('images', []) return item
[ "def", "process_item", "(", "self", ",", "item", ",", "spider", ")", ":", "soup", "=", "BeautifulSoup", "(", "item", ".", "get", "(", "'content'", ",", "''", ")", ",", "\"lxml\"", ")", "if", "item", ".", "get", "(", "'images'", ")", ":", "# 将content中...
将从图片处理管道流过的数据模型中的缩略图链接更新到文章中的相应图片 URL 上, 并对其中的,已删除图片 ``item['image_urls_removed']`` 进行处理, 使其显示内建的删除图标。 最终使用文章模板,对数据模型中的数据进行渲染并输出到指定路径中,完成本地化, 等待最终 ``mobi`` 打包 :param item: 爬取到的数据模型 :type item: :class:`.MoearPackageMobiItem` or dict :param spider: 当前爬虫对象 :type spider: :class:`.MobiSpider`
[ "将从图片处理管道流过的数据模型中的缩略图链接更新到文章中的相应图片", "URL", "上,", "并对其中的,已删除图片", "item", "[", "image_urls_removed", "]", "进行处理,", "使其显示内建的删除图标。" ]
train
https://github.com/littlemo/moear-package-mobi/blob/189a077bd0ad5309607957b3f1c0b65eae40ec90/moear_package_mobi/pipelines.py#L71-L164
twneale/visitors
visitors/ext/etree.py
from_etree
def from_etree( el, node=None, node_cls=None, tagsub=functools.partial(re.sub, r'\{.+?\}', ''), Node=Node): '''Convert the element tree to a tater tree. ''' node_cls = node_cls or Node if node is None: node = node_cls() tag = tagsub(el.tag) attrib = dict((tagsub(k), v) for (k, v) in el.attrib.items()) node.update(attrib, tag=tag) if el.text: node['text'] = el.text for child in el: child = from_etree(child, node_cls=node_cls) node.append(child) if el.tail: node['tail'] = el.tail return node
python
def from_etree( el, node=None, node_cls=None, tagsub=functools.partial(re.sub, r'\{.+?\}', ''), Node=Node): '''Convert the element tree to a tater tree. ''' node_cls = node_cls or Node if node is None: node = node_cls() tag = tagsub(el.tag) attrib = dict((tagsub(k), v) for (k, v) in el.attrib.items()) node.update(attrib, tag=tag) if el.text: node['text'] = el.text for child in el: child = from_etree(child, node_cls=node_cls) node.append(child) if el.tail: node['tail'] = el.tail return node
[ "def", "from_etree", "(", "el", ",", "node", "=", "None", ",", "node_cls", "=", "None", ",", "tagsub", "=", "functools", ".", "partial", "(", "re", ".", "sub", ",", "r'\\{.+?\\}'", ",", "''", ")", ",", "Node", "=", "Node", ")", ":", "node_cls", "="...
Convert the element tree to a tater tree.
[ "Convert", "the", "element", "tree", "to", "a", "tater", "tree", "." ]
train
https://github.com/twneale/visitors/blob/17a2759fb0ddc0a039cf42e1bbb053295b3b2445/visitors/ext/etree.py#L69-L89
berndca/xmodels
xmodels/models.py
Model.from_dict
def from_dict(cls, raw_data, **kwargs): """ This factory for :class:`Model` creates a Model from a dict object. """ instance = cls() instance.populate(raw_data, **kwargs) instance.validate(**kwargs) return instance
python
def from_dict(cls, raw_data, **kwargs): """ This factory for :class:`Model` creates a Model from a dict object. """ instance = cls() instance.populate(raw_data, **kwargs) instance.validate(**kwargs) return instance
[ "def", "from_dict", "(", "cls", ",", "raw_data", ",", "*", "*", "kwargs", ")", ":", "instance", "=", "cls", "(", ")", "instance", ".", "populate", "(", "raw_data", ",", "*", "*", "kwargs", ")", "instance", ".", "validate", "(", "*", "*", "kwargs", ...
This factory for :class:`Model` creates a Model from a dict object.
[ "This", "factory", "for", ":", "class", ":", "Model", "creates", "a", "Model", "from", "a", "dict", "object", "." ]
train
https://github.com/berndca/xmodels/blob/8265522229a1ce482a2866cdbd1938293a74bb67/xmodels/models.py#L252-L259
The-Politico/politico-civic-almanac
almanac/utils/auth.py
secure
def secure(view): """ Authentication decorator for views. If DEBUG is on, we serve the view without authenticating. Default is 'django.contrib.auth.decorators.login_required'. Can also be 'django.contrib.admin.views.decorators.staff_member_required' or a custom decorator. """ auth_decorator = import_class(settings.AUTH_DECORATOR) return ( view if project_settings.DEBUG else method_decorator(auth_decorator, name='dispatch')(view) )
python
def secure(view): """ Authentication decorator for views. If DEBUG is on, we serve the view without authenticating. Default is 'django.contrib.auth.decorators.login_required'. Can also be 'django.contrib.admin.views.decorators.staff_member_required' or a custom decorator. """ auth_decorator = import_class(settings.AUTH_DECORATOR) return ( view if project_settings.DEBUG else method_decorator(auth_decorator, name='dispatch')(view) )
[ "def", "secure", "(", "view", ")", ":", "auth_decorator", "=", "import_class", "(", "settings", ".", "AUTH_DECORATOR", ")", "return", "(", "view", "if", "project_settings", ".", "DEBUG", "else", "method_decorator", "(", "auth_decorator", ",", "name", "=", "'di...
Authentication decorator for views. If DEBUG is on, we serve the view without authenticating. Default is 'django.contrib.auth.decorators.login_required'. Can also be 'django.contrib.admin.views.decorators.staff_member_required' or a custom decorator.
[ "Authentication", "decorator", "for", "views", "." ]
train
https://github.com/The-Politico/politico-civic-almanac/blob/f97521fabd445c8a0fa97a435f6d39f517ef3892/almanac/utils/auth.py#L8-L21
bitesofcode/xqt
xqt/gui/xfiledialog.py
XFileDialog.getOpenFileName
def getOpenFileName(*args): """ Normalizes the getOpenFileName method between the different Qt wrappers. :return (<str> filename, <bool> accepted) """ result = QtGui.QFileDialog.getOpenFileName(*args) # PyQt4 returns just a string if type(result) is not tuple: return result, bool(result) # PySide returns a tuple of str, bool else: return result
python
def getOpenFileName(*args): """ Normalizes the getOpenFileName method between the different Qt wrappers. :return (<str> filename, <bool> accepted) """ result = QtGui.QFileDialog.getOpenFileName(*args) # PyQt4 returns just a string if type(result) is not tuple: return result, bool(result) # PySide returns a tuple of str, bool else: return result
[ "def", "getOpenFileName", "(", "*", "args", ")", ":", "result", "=", "QtGui", ".", "QFileDialog", ".", "getOpenFileName", "(", "*", "args", ")", "# PyQt4 returns just a string\r", "if", "type", "(", "result", ")", "is", "not", "tuple", ":", "return", "result...
Normalizes the getOpenFileName method between the different Qt wrappers. :return (<str> filename, <bool> accepted)
[ "Normalizes", "the", "getOpenFileName", "method", "between", "the", "different", "Qt", "wrappers", ".", ":", "return", "(", "<str", ">", "filename", "<bool", ">", "accepted", ")" ]
train
https://github.com/bitesofcode/xqt/blob/befa649a2f2104a20d49c8c78ffdba5907fd94d2/xqt/gui/xfiledialog.py#L6-L21
bitesofcode/xqt
xqt/gui/xfiledialog.py
XFileDialog.getDirectory
def getDirectory(*args): """ Normalizes the getDirectory method between the different Qt wrappers. :return (<str> filename, <bool> accepted) """ result = QtGui.QFileDialog.getDirectory(*args) # PyQt4 returns just a string if type(result) is not tuple: return result, bool(result) # PySide returns a tuple of str, bool else: return result
python
def getDirectory(*args): """ Normalizes the getDirectory method between the different Qt wrappers. :return (<str> filename, <bool> accepted) """ result = QtGui.QFileDialog.getDirectory(*args) # PyQt4 returns just a string if type(result) is not tuple: return result, bool(result) # PySide returns a tuple of str, bool else: return result
[ "def", "getDirectory", "(", "*", "args", ")", ":", "result", "=", "QtGui", ".", "QFileDialog", ".", "getDirectory", "(", "*", "args", ")", "# PyQt4 returns just a string\r", "if", "type", "(", "result", ")", "is", "not", "tuple", ":", "return", "result", "...
Normalizes the getDirectory method between the different Qt wrappers. :return (<str> filename, <bool> accepted)
[ "Normalizes", "the", "getDirectory", "method", "between", "the", "different", "Qt", "wrappers", ".", ":", "return", "(", "<str", ">", "filename", "<bool", ">", "accepted", ")" ]
train
https://github.com/bitesofcode/xqt/blob/befa649a2f2104a20d49c8c78ffdba5907fd94d2/xqt/gui/xfiledialog.py#L24-L39
bitesofcode/xqt
xqt/gui/xfiledialog.py
XFileDialog.getSaveFileName
def getSaveFileName(*args): """ Normalizes the getSaveFileName method between the different Qt wrappers. :return (<str> filename, <bool> accepted) """ result = QtGui.QFileDialog.getSaveFileName(*args) # PyQt4 returns just a string if type(result) is not tuple: return result, bool(result) # PySide returns a tuple of str, bool else: return result
python
def getSaveFileName(*args): """ Normalizes the getSaveFileName method between the different Qt wrappers. :return (<str> filename, <bool> accepted) """ result = QtGui.QFileDialog.getSaveFileName(*args) # PyQt4 returns just a string if type(result) is not tuple: return result, bool(result) # PySide returns a tuple of str, bool else: return result
[ "def", "getSaveFileName", "(", "*", "args", ")", ":", "result", "=", "QtGui", ".", "QFileDialog", ".", "getSaveFileName", "(", "*", "args", ")", "# PyQt4 returns just a string\r", "if", "type", "(", "result", ")", "is", "not", "tuple", ":", "return", "result...
Normalizes the getSaveFileName method between the different Qt wrappers. :return (<str> filename, <bool> accepted)
[ "Normalizes", "the", "getSaveFileName", "method", "between", "the", "different", "Qt", "wrappers", ".", ":", "return", "(", "<str", ">", "filename", "<bool", ">", "accepted", ")" ]
train
https://github.com/bitesofcode/xqt/blob/befa649a2f2104a20d49c8c78ffdba5907fd94d2/xqt/gui/xfiledialog.py#L42-L57
uw-it-aca/uw-restclients-uwnetid
uw_uwnetid/category.py
get_netid_categories
def get_netid_categories(netid, category_codes): """ Return a list of uwnetid.models Category objects corresponding to the netid and category code or list provided """ url = _netid_category_url(netid, category_codes) response = get_resource(url) return _json_to_categories(response)
python
def get_netid_categories(netid, category_codes): """ Return a list of uwnetid.models Category objects corresponding to the netid and category code or list provided """ url = _netid_category_url(netid, category_codes) response = get_resource(url) return _json_to_categories(response)
[ "def", "get_netid_categories", "(", "netid", ",", "category_codes", ")", ":", "url", "=", "_netid_category_url", "(", "netid", ",", "category_codes", ")", "response", "=", "get_resource", "(", "url", ")", "return", "_json_to_categories", "(", "response", ")" ]
Return a list of uwnetid.models Category objects corresponding to the netid and category code or list provided
[ "Return", "a", "list", "of", "uwnetid", ".", "models", "Category", "objects", "corresponding", "to", "the", "netid", "and", "category", "code", "or", "list", "provided" ]
train
https://github.com/uw-it-aca/uw-restclients-uwnetid/blob/58c78b564f9c920a8f8fd408eec959ddd5605b0b/uw_uwnetid/category.py#L13-L20
uw-it-aca/uw-restclients-uwnetid
uw_uwnetid/category.py
update_catagory
def update_catagory(netid, category_code, status): """ Post a subscriptionfor the given netid and category_code """ url = "{0}/category".format(url_version()) body = { "categoryCode": category_code, "status": status, "categoryList": [{"netid": netid}] } response = post_resource(url, json.dumps(body)) return json.loads(response)
python
def update_catagory(netid, category_code, status): """ Post a subscriptionfor the given netid and category_code """ url = "{0}/category".format(url_version()) body = { "categoryCode": category_code, "status": status, "categoryList": [{"netid": netid}] } response = post_resource(url, json.dumps(body)) return json.loads(response)
[ "def", "update_catagory", "(", "netid", ",", "category_code", ",", "status", ")", ":", "url", "=", "\"{0}/category\"", ".", "format", "(", "url_version", "(", ")", ")", "body", "=", "{", "\"categoryCode\"", ":", "category_code", ",", "\"status\"", ":", "stat...
Post a subscriptionfor the given netid and category_code
[ "Post", "a", "subscriptionfor", "the", "given", "netid", "and", "category_code" ]
train
https://github.com/uw-it-aca/uw-restclients-uwnetid/blob/58c78b564f9c920a8f8fd408eec959ddd5605b0b/uw_uwnetid/category.py#L23-L36
uw-it-aca/uw-restclients-uwnetid
uw_uwnetid/category.py
_netid_category_url
def _netid_category_url(netid, category_codes): """ Return UWNetId resource for provided netid and category code or code list """ return "{0}/{1}/category/{2}".format( url_base(), netid, (','.join([str(n) for n in category_codes]) if isinstance(category_codes, (list, tuple)) else category_codes))
python
def _netid_category_url(netid, category_codes): """ Return UWNetId resource for provided netid and category code or code list """ return "{0}/{1}/category/{2}".format( url_base(), netid, (','.join([str(n) for n in category_codes]) if isinstance(category_codes, (list, tuple)) else category_codes))
[ "def", "_netid_category_url", "(", "netid", ",", "category_codes", ")", ":", "return", "\"{0}/{1}/category/{2}\"", ".", "format", "(", "url_base", "(", ")", ",", "netid", ",", "(", "','", ".", "join", "(", "[", "str", "(", "n", ")", "for", "n", "in", "...
Return UWNetId resource for provided netid and category code or code list
[ "Return", "UWNetId", "resource", "for", "provided", "netid", "and", "category", "code", "or", "code", "list" ]
train
https://github.com/uw-it-aca/uw-restclients-uwnetid/blob/58c78b564f9c920a8f8fd408eec959ddd5605b0b/uw_uwnetid/category.py#L39-L48
uw-it-aca/uw-restclients-uwnetid
uw_uwnetid/category.py
_json_to_categories
def _json_to_categories(response_body): """ Returns a list of Category objects """ data = json.loads(response_body) categories = [] for category_data in data.get("categoryList", []): categories.append(Category().from_json( data.get('uwNetID'), category_data)) return categories
python
def _json_to_categories(response_body): """ Returns a list of Category objects """ data = json.loads(response_body) categories = [] for category_data in data.get("categoryList", []): categories.append(Category().from_json( data.get('uwNetID'), category_data)) return categories
[ "def", "_json_to_categories", "(", "response_body", ")", ":", "data", "=", "json", ".", "loads", "(", "response_body", ")", "categories", "=", "[", "]", "for", "category_data", "in", "data", ".", "get", "(", "\"categoryList\"", ",", "[", "]", ")", ":", "...
Returns a list of Category objects
[ "Returns", "a", "list", "of", "Category", "objects" ]
train
https://github.com/uw-it-aca/uw-restclients-uwnetid/blob/58c78b564f9c920a8f8fd408eec959ddd5605b0b/uw_uwnetid/category.py#L51-L61
benley/butcher
butcher/buildfile_context.py
exec_function
def exec_function(ast, globals_map): """Execute a python code object in the given environment. Args: globals_map: Dictionary to use as the globals context. Returns: locals_map: Dictionary of locals from the environment after execution. """ locals_map = globals_map exec ast in globals_map, locals_map return locals_map
python
def exec_function(ast, globals_map): """Execute a python code object in the given environment. Args: globals_map: Dictionary to use as the globals context. Returns: locals_map: Dictionary of locals from the environment after execution. """ locals_map = globals_map exec ast in globals_map, locals_map return locals_map
[ "def", "exec_function", "(", "ast", ",", "globals_map", ")", ":", "locals_map", "=", "globals_map", "exec", "ast", "in", "globals_map", ",", "locals_map", "return", "locals_map" ]
Execute a python code object in the given environment. Args: globals_map: Dictionary to use as the globals context. Returns: locals_map: Dictionary of locals from the environment after execution.
[ "Execute", "a", "python", "code", "object", "in", "the", "given", "environment", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/buildfile_context.py#L91-L101
benley/butcher
butcher/buildfile_context.py
ParseContext.activate
def activate(ctx): """Activate the given ParseContext.""" if hasattr(ctx, '_on_context_exit'): raise ContextError( 'Context actions registered outside this ' 'parse context are active') try: ParseContext._active.append(ctx) ctx._on_context_exit = [] yield finally: for func, args, kwargs in ctx._on_context_exit: func(*args, **kwargs) del ctx._on_context_exit ParseContext._active.pop()
python
def activate(ctx): """Activate the given ParseContext.""" if hasattr(ctx, '_on_context_exit'): raise ContextError( 'Context actions registered outside this ' 'parse context are active') try: ParseContext._active.append(ctx) ctx._on_context_exit = [] yield finally: for func, args, kwargs in ctx._on_context_exit: func(*args, **kwargs) del ctx._on_context_exit ParseContext._active.pop()
[ "def", "activate", "(", "ctx", ")", ":", "if", "hasattr", "(", "ctx", ",", "'_on_context_exit'", ")", ":", "raise", "ContextError", "(", "'Context actions registered outside this '", "'parse context are active'", ")", "try", ":", "ParseContext", ".", "_active", ".",...
Activate the given ParseContext.
[ "Activate", "the", "given", "ParseContext", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/buildfile_context.py#L39-L54
benley/butcher
butcher/buildfile_context.py
ParseContext.parse
def parse(self, **global_args): """Entry point to parsing a BUILD file. Args: **global_args: Variables to include in the parsing environment. """ if self.build_file not in ParseContext._parsed: # http://en.wikipedia.org/wiki/Abstract_syntax_tree # http://martinfowler.com/books/dsl.html butcher_context = {} for str_to_exec in self._strs_to_exec: ast = compile(str_to_exec, '<string>', 'exec') exec_function(ast, butcher_context) with ParseContext.activate(self): startdir = os.path.abspath(os.curdir) try: os.chdir(self.build_file.path_on_disk) if self.build_file not in ParseContext._parsed: ParseContext._parsed.add(self.build_file) eval_globals = copy.copy(butcher_context) eval_globals.update( {'ROOT_DIR': self.build_file.path_on_disk, '__file__': 'bogus please fix this'}) eval_globals.update(global_args) exec_function(self.build_file.code, eval_globals) finally: os.chdir(startdir)
python
def parse(self, **global_args): """Entry point to parsing a BUILD file. Args: **global_args: Variables to include in the parsing environment. """ if self.build_file not in ParseContext._parsed: # http://en.wikipedia.org/wiki/Abstract_syntax_tree # http://martinfowler.com/books/dsl.html butcher_context = {} for str_to_exec in self._strs_to_exec: ast = compile(str_to_exec, '<string>', 'exec') exec_function(ast, butcher_context) with ParseContext.activate(self): startdir = os.path.abspath(os.curdir) try: os.chdir(self.build_file.path_on_disk) if self.build_file not in ParseContext._parsed: ParseContext._parsed.add(self.build_file) eval_globals = copy.copy(butcher_context) eval_globals.update( {'ROOT_DIR': self.build_file.path_on_disk, '__file__': 'bogus please fix this'}) eval_globals.update(global_args) exec_function(self.build_file.code, eval_globals) finally: os.chdir(startdir)
[ "def", "parse", "(", "self", ",", "*", "*", "global_args", ")", ":", "if", "self", ".", "build_file", "not", "in", "ParseContext", ".", "_parsed", ":", "# http://en.wikipedia.org/wiki/Abstract_syntax_tree", "# http://martinfowler.com/books/dsl.html", "butcher_context", ...
Entry point to parsing a BUILD file. Args: **global_args: Variables to include in the parsing environment.
[ "Entry", "point", "to", "parsing", "a", "BUILD", "file", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/buildfile_context.py#L60-L88
COLORFULBOARD/revision
revision/client_manager.py
ClientManager.instantiate_client
def instantiate_client(self, config): """ :param config: The config object. :type config: dict :return: The instantiated class. :rtype: :class:`revision.client.Client` """ modules = config.module.split('.') class_name = modules.pop() module_path = '.'.join(modules) client_instance = getattr( __import__(module_path, {}, {}, ['']), class_name )() client_instance.add_config(config) return client_instance
python
def instantiate_client(self, config): """ :param config: The config object. :type config: dict :return: The instantiated class. :rtype: :class:`revision.client.Client` """ modules = config.module.split('.') class_name = modules.pop() module_path = '.'.join(modules) client_instance = getattr( __import__(module_path, {}, {}, ['']), class_name )() client_instance.add_config(config) return client_instance
[ "def", "instantiate_client", "(", "self", ",", "config", ")", ":", "modules", "=", "config", ".", "module", ".", "split", "(", "'.'", ")", "class_name", "=", "modules", ".", "pop", "(", ")", "module_path", "=", "'.'", ".", "join", "(", "modules", ")", ...
:param config: The config object. :type config: dict :return: The instantiated class. :rtype: :class:`revision.client.Client`
[ ":", "param", "config", ":", "The", "config", "object", ".", ":", "type", "config", ":", "dict", ":", "return", ":", "The", "instantiated", "class", ".", ":", "rtype", ":", ":", "class", ":", "revision", ".", "client", ".", "Client" ]
train
https://github.com/COLORFULBOARD/revision/blob/2f22e72cce5b60032a80c002ac45c2ecef0ed987/revision/client_manager.py#L29-L47
COLORFULBOARD/revision
revision/client_manager.py
ClientManager.add_client
def add_client(self, client): """ Adds the specified client to this manager. :param client: The client to add into this manager. :type client: :class:`revision.client.Client` :return: The ClientManager instance (method chaining) :rtype: :class:`revision.client_manager.ClientManager` """ if not isinstance(client, Client): raise InvalidArgType() if self.has_client(client.key): return self self[client.key] = client return self
python
def add_client(self, client): """ Adds the specified client to this manager. :param client: The client to add into this manager. :type client: :class:`revision.client.Client` :return: The ClientManager instance (method chaining) :rtype: :class:`revision.client_manager.ClientManager` """ if not isinstance(client, Client): raise InvalidArgType() if self.has_client(client.key): return self self[client.key] = client return self
[ "def", "add_client", "(", "self", ",", "client", ")", ":", "if", "not", "isinstance", "(", "client", ",", "Client", ")", ":", "raise", "InvalidArgType", "(", ")", "if", "self", ".", "has_client", "(", "client", ".", "key", ")", ":", "return", "self", ...
Adds the specified client to this manager. :param client: The client to add into this manager. :type client: :class:`revision.client.Client` :return: The ClientManager instance (method chaining) :rtype: :class:`revision.client_manager.ClientManager`
[ "Adds", "the", "specified", "client", "to", "this", "manager", "." ]
train
https://github.com/COLORFULBOARD/revision/blob/2f22e72cce5b60032a80c002ac45c2ecef0ed987/revision/client_manager.py#L67-L84
BD2KOnFHIR/i2b2model
i2b2model/scripts/genconffile.py
generate_conf_file
def generate_conf_file(argv: List[str]) -> bool: """ Convert a set of FHIR resources into their corresponding i2b2 counterparts. :param argv: Command line arguments. See: create_parser for details :return: """ parser = ArgumentParser(description="Generate SQL db_conf file template") parser.add_argument("-f", "--configfile", help="File name to generate (Default: db_conf)", metavar="Config File", default="db_conf") opts = parser.parse_args(argv) if os.path.exists(opts.configfile): print(f"{opts.configfile} already exists!") return False with open(opts.configfile, 'w') as f: f.write(conf_template) print(f"{opts.configfile} generated") return True
python
def generate_conf_file(argv: List[str]) -> bool: """ Convert a set of FHIR resources into their corresponding i2b2 counterparts. :param argv: Command line arguments. See: create_parser for details :return: """ parser = ArgumentParser(description="Generate SQL db_conf file template") parser.add_argument("-f", "--configfile", help="File name to generate (Default: db_conf)", metavar="Config File", default="db_conf") opts = parser.parse_args(argv) if os.path.exists(opts.configfile): print(f"{opts.configfile} already exists!") return False with open(opts.configfile, 'w') as f: f.write(conf_template) print(f"{opts.configfile} generated") return True
[ "def", "generate_conf_file", "(", "argv", ":", "List", "[", "str", "]", ")", "->", "bool", ":", "parser", "=", "ArgumentParser", "(", "description", "=", "\"Generate SQL db_conf file template\"", ")", "parser", ".", "add_argument", "(", "\"-f\"", ",", "\"--confi...
Convert a set of FHIR resources into their corresponding i2b2 counterparts. :param argv: Command line arguments. See: create_parser for details :return:
[ "Convert", "a", "set", "of", "FHIR", "resources", "into", "their", "corresponding", "i2b2", "counterparts", "." ]
train
https://github.com/BD2KOnFHIR/i2b2model/blob/9d49bb53b0733dd83ab5b716014865e270a3c903/i2b2model/scripts/genconffile.py#L14-L31
tonyfast/whatever-forever
whatever/chain.py
Chain.compute
def compute(self, *args, **kwargs)->[Any, None]: """Compose and evaluate the function. """ return super().compute( self.compose, *args, **kwargs )
python
def compute(self, *args, **kwargs)->[Any, None]: """Compose and evaluate the function. """ return super().compute( self.compose, *args, **kwargs )
[ "def", "compute", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "->", "[", "Any", ",", "None", "]", ":", "return", "super", "(", ")", ".", "compute", "(", "self", ".", "compose", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Compose and evaluate the function.
[ "Compose", "and", "evaluate", "the", "function", "." ]
train
https://github.com/tonyfast/whatever-forever/blob/455f80473c61c90121a589d9a66a4c3021375cc2/whatever/chain.py#L105-L110
tonyfast/whatever-forever
whatever/chain.py
Chain.copy
def copy(self, klass=None): """Create a new instance of the current chain. """ chain = ( klass if klass else self.__class__ )(*self._args, **self._kwargs) chain._tokens = self._tokens.copy() return chain
python
def copy(self, klass=None): """Create a new instance of the current chain. """ chain = ( klass if klass else self.__class__ )(*self._args, **self._kwargs) chain._tokens = self._tokens.copy() return chain
[ "def", "copy", "(", "self", ",", "klass", "=", "None", ")", ":", "chain", "=", "(", "klass", "if", "klass", "else", "self", ".", "__class__", ")", "(", "*", "self", ".", "_args", ",", "*", "*", "self", ".", "_kwargs", ")", "chain", ".", "_tokens"...
Create a new instance of the current chain.
[ "Create", "a", "new", "instance", "of", "the", "current", "chain", "." ]
train
https://github.com/tonyfast/whatever-forever/blob/455f80473c61c90121a589d9a66a4c3021375cc2/whatever/chain.py#L135-L142
tonyfast/whatever-forever
whatever/chain.py
ThisComposer.call
def call(self, tokens, *args, **kwargs): """Add args and kwargs to the tokens. """ tokens.append([evaluate, [args, kwargs], {}]) return tokens
python
def call(self, tokens, *args, **kwargs): """Add args and kwargs to the tokens. """ tokens.append([evaluate, [args, kwargs], {}]) return tokens
[ "def", "call", "(", "self", ",", "tokens", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "tokens", ".", "append", "(", "[", "evaluate", ",", "[", "args", ",", "kwargs", "]", ",", "{", "}", "]", ")", "return", "tokens" ]
Add args and kwargs to the tokens.
[ "Add", "args", "and", "kwargs", "to", "the", "tokens", "." ]
train
https://github.com/tonyfast/whatever-forever/blob/455f80473c61c90121a589d9a66a4c3021375cc2/whatever/chain.py#L282-L286
tonyfast/whatever-forever
whatever/chain.py
_this.copy
def copy(self, klass=_x): """A new chain beginning with the current chain tokens and argument. """ chain = super().copy() new_chain = klass(chain._args[0]) new_chain._tokens = [[ chain.compose, [], {}, ]] return new_chain
python
def copy(self, klass=_x): """A new chain beginning with the current chain tokens and argument. """ chain = super().copy() new_chain = klass(chain._args[0]) new_chain._tokens = [[ chain.compose, [], {}, ]] return new_chain
[ "def", "copy", "(", "self", ",", "klass", "=", "_x", ")", ":", "chain", "=", "super", "(", ")", ".", "copy", "(", ")", "new_chain", "=", "klass", "(", "chain", ".", "_args", "[", "0", "]", ")", "new_chain", ".", "_tokens", "=", "[", "[", "chain...
A new chain beginning with the current chain tokens and argument.
[ "A", "new", "chain", "beginning", "with", "the", "current", "chain", "tokens", "and", "argument", "." ]
train
https://github.com/tonyfast/whatever-forever/blob/455f80473c61c90121a589d9a66a4c3021375cc2/whatever/chain.py#L306-L314
azraq27/neural
neural/dsets.py
prefix
def prefix(filename): ''' strips common fMRI dataset suffixes from filenames ''' return os.path.split(re.sub(_afni_suffix_regex,"",str(filename)))[1]
python
def prefix(filename): ''' strips common fMRI dataset suffixes from filenames ''' return os.path.split(re.sub(_afni_suffix_regex,"",str(filename)))[1]
[ "def", "prefix", "(", "filename", ")", ":", "return", "os", ".", "path", ".", "split", "(", "re", ".", "sub", "(", "_afni_suffix_regex", ",", "\"\"", ",", "str", "(", "filename", ")", ")", ")", "[", "1", "]" ]
strips common fMRI dataset suffixes from filenames
[ "strips", "common", "fMRI", "dataset", "suffixes", "from", "filenames" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dsets.py#L8-L10
azraq27/neural
neural/dsets.py
suffix
def suffix(filename,suffix): ''' returns a filenames with ``suffix`` inserted before the dataset suffix ''' return os.path.split(re.sub(_afni_suffix_regex,"%s\g<1>" % suffix,str(filename)))[1]
python
def suffix(filename,suffix): ''' returns a filenames with ``suffix`` inserted before the dataset suffix ''' return os.path.split(re.sub(_afni_suffix_regex,"%s\g<1>" % suffix,str(filename)))[1]
[ "def", "suffix", "(", "filename", ",", "suffix", ")", ":", "return", "os", ".", "path", ".", "split", "(", "re", ".", "sub", "(", "_afni_suffix_regex", ",", "\"%s\\g<1>\"", "%", "suffix", ",", "str", "(", "filename", ")", ")", ")", "[", "1", "]" ]
returns a filenames with ``suffix`` inserted before the dataset suffix
[ "returns", "a", "filenames", "with", "suffix", "inserted", "before", "the", "dataset", "suffix" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dsets.py#L12-L14
azraq27/neural
neural/dsets.py
afni_copy
def afni_copy(filename): ''' creates a ``+orig`` copy of the given dataset and returns the filename as a string ''' if nl.pkg_available('afni',True): afni_filename = "%s+orig" % nl.prefix(filename) if not os.path.exists(afni_filename + ".HEAD"): nl.calc(filename,'a',prefix=nl.prefix(filename)) return afni_filename
python
def afni_copy(filename): ''' creates a ``+orig`` copy of the given dataset and returns the filename as a string ''' if nl.pkg_available('afni',True): afni_filename = "%s+orig" % nl.prefix(filename) if not os.path.exists(afni_filename + ".HEAD"): nl.calc(filename,'a',prefix=nl.prefix(filename)) return afni_filename
[ "def", "afni_copy", "(", "filename", ")", ":", "if", "nl", ".", "pkg_available", "(", "'afni'", ",", "True", ")", ":", "afni_filename", "=", "\"%s+orig\"", "%", "nl", ".", "prefix", "(", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", ...
creates a ``+orig`` copy of the given dataset and returns the filename as a string
[ "creates", "a", "+", "orig", "copy", "of", "the", "given", "dataset", "and", "returns", "the", "filename", "as", "a", "string" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dsets.py#L20-L26