repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
manns/pyspread
pyspread/src/gui/_dialogs.py
CsvParameterWidgets.OnDialectChoice
def OnDialectChoice(self, event): """Updates all param widgets confirming to the selcted dialect""" dialect_name = event.GetString() value = list(self.choices['dialects']).index(dialect_name) if dialect_name == 'sniffer': if self.csvfilepath is None: event.Skip() return None dialect, self.has_header = sniff(self.csvfilepath) elif dialect_name == 'user': event.Skip() return None else: dialect = csv.get_dialect(dialect_name) self._update_settings(dialect) self.choice_dialects.SetValue(value)
python
def OnDialectChoice(self, event): """Updates all param widgets confirming to the selcted dialect""" dialect_name = event.GetString() value = list(self.choices['dialects']).index(dialect_name) if dialect_name == 'sniffer': if self.csvfilepath is None: event.Skip() return None dialect, self.has_header = sniff(self.csvfilepath) elif dialect_name == 'user': event.Skip() return None else: dialect = csv.get_dialect(dialect_name) self._update_settings(dialect) self.choice_dialects.SetValue(value)
[ "def", "OnDialectChoice", "(", "self", ",", "event", ")", ":", "dialect_name", "=", "event", ".", "GetString", "(", ")", "value", "=", "list", "(", "self", ".", "choices", "[", "'dialects'", "]", ")", ".", "index", "(", "dialect_name", ")", "if", "dialect_name", "==", "'sniffer'", ":", "if", "self", ".", "csvfilepath", "is", "None", ":", "event", ".", "Skip", "(", ")", "return", "None", "dialect", ",", "self", ".", "has_header", "=", "sniff", "(", "self", ".", "csvfilepath", ")", "elif", "dialect_name", "==", "'user'", ":", "event", ".", "Skip", "(", ")", "return", "None", "else", ":", "dialect", "=", "csv", ".", "get_dialect", "(", "dialect_name", ")", "self", ".", "_update_settings", "(", "dialect", ")", "self", ".", "choice_dialects", ".", "SetValue", "(", "value", ")" ]
Updates all param widgets confirming to the selcted dialect
[ "Updates", "all", "param", "widgets", "confirming", "to", "the", "selcted", "dialect" ]
0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L386-L405
train
231,700
manns/pyspread
pyspread/src/gui/_dialogs.py
CsvParameterWidgets.OnWidget
def OnWidget(self, event): """Update the dialect widget to 'user'""" self.choice_dialects.SetValue(len(self.choices['dialects']) - 1) event.Skip()
python
def OnWidget(self, event): """Update the dialect widget to 'user'""" self.choice_dialects.SetValue(len(self.choices['dialects']) - 1) event.Skip()
[ "def", "OnWidget", "(", "self", ",", "event", ")", ":", "self", ".", "choice_dialects", ".", "SetValue", "(", "len", "(", "self", ".", "choices", "[", "'dialects'", "]", ")", "-", "1", ")", "event", ".", "Skip", "(", ")" ]
Update the dialect widget to 'user
[ "Update", "the", "dialect", "widget", "to", "user" ]
0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L407-L411
train
231,701
manns/pyspread
pyspread/src/gui/_dialogs.py
CsvParameterWidgets.get_dialect
def get_dialect(self): """Returns a new dialect that implements the current selection""" parameters = {} for parameter in self.csv_params[2:]: pname, ptype, plabel, phelp = parameter widget = self._widget_from_p(pname, ptype) if ptype is types.StringType or ptype is types.UnicodeType: parameters[pname] = str(widget.GetValue()) elif ptype is types.BooleanType: parameters[pname] = widget.GetValue() elif pname == 'quoting': choice = self.choices['quoting'][widget.GetSelection()] parameters[pname] = getattr(csv, choice) else: raise TypeError(_("{type} unknown.").format(type=ptype)) has_header = parameters.pop("self.has_header") try: csv.register_dialect('user', **parameters) except TypeError, err: msg = _("The dialect is invalid. \n " "\nError message:\n{msg}").format(msg=err) dlg = wx.MessageDialog(self.parent, msg, style=wx.ID_CANCEL) dlg.ShowModal() dlg.Destroy() raise TypeError(err) return csv.get_dialect('user'), has_header
python
def get_dialect(self): """Returns a new dialect that implements the current selection""" parameters = {} for parameter in self.csv_params[2:]: pname, ptype, plabel, phelp = parameter widget = self._widget_from_p(pname, ptype) if ptype is types.StringType or ptype is types.UnicodeType: parameters[pname] = str(widget.GetValue()) elif ptype is types.BooleanType: parameters[pname] = widget.GetValue() elif pname == 'quoting': choice = self.choices['quoting'][widget.GetSelection()] parameters[pname] = getattr(csv, choice) else: raise TypeError(_("{type} unknown.").format(type=ptype)) has_header = parameters.pop("self.has_header") try: csv.register_dialect('user', **parameters) except TypeError, err: msg = _("The dialect is invalid. \n " "\nError message:\n{msg}").format(msg=err) dlg = wx.MessageDialog(self.parent, msg, style=wx.ID_CANCEL) dlg.ShowModal() dlg.Destroy() raise TypeError(err) return csv.get_dialect('user'), has_header
[ "def", "get_dialect", "(", "self", ")", ":", "parameters", "=", "{", "}", "for", "parameter", "in", "self", ".", "csv_params", "[", "2", ":", "]", ":", "pname", ",", "ptype", ",", "plabel", ",", "phelp", "=", "parameter", "widget", "=", "self", ".", "_widget_from_p", "(", "pname", ",", "ptype", ")", "if", "ptype", "is", "types", ".", "StringType", "or", "ptype", "is", "types", ".", "UnicodeType", ":", "parameters", "[", "pname", "]", "=", "str", "(", "widget", ".", "GetValue", "(", ")", ")", "elif", "ptype", "is", "types", ".", "BooleanType", ":", "parameters", "[", "pname", "]", "=", "widget", ".", "GetValue", "(", ")", "elif", "pname", "==", "'quoting'", ":", "choice", "=", "self", ".", "choices", "[", "'quoting'", "]", "[", "widget", ".", "GetSelection", "(", ")", "]", "parameters", "[", "pname", "]", "=", "getattr", "(", "csv", ",", "choice", ")", "else", ":", "raise", "TypeError", "(", "_", "(", "\"{type} unknown.\"", ")", ".", "format", "(", "type", "=", "ptype", ")", ")", "has_header", "=", "parameters", ".", "pop", "(", "\"self.has_header\"", ")", "try", ":", "csv", ".", "register_dialect", "(", "'user'", ",", "*", "*", "parameters", ")", "except", "TypeError", ",", "err", ":", "msg", "=", "_", "(", "\"The dialect is invalid. \\n \"", "\"\\nError message:\\n{msg}\"", ")", ".", "format", "(", "msg", "=", "err", ")", "dlg", "=", "wx", ".", "MessageDialog", "(", "self", ".", "parent", ",", "msg", ",", "style", "=", "wx", ".", "ID_CANCEL", ")", "dlg", ".", "ShowModal", "(", ")", "dlg", ".", "Destroy", "(", ")", "raise", "TypeError", "(", "err", ")", "return", "csv", ".", "get_dialect", "(", "'user'", ")", ",", "has_header" ]
Returns a new dialect that implements the current selection
[ "Returns", "a", "new", "dialect", "that", "implements", "the", "current", "selection" ]
0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L413-L446
train
231,702
manns/pyspread
pyspread/src/gui/_dialogs.py
CSVPreviewGrid.OnMouse
def OnMouse(self, event): """Reduces clicks to enter an edit control""" self.SetGridCursor(event.Row, event.Col) self.EnableCellEditControl(True) event.Skip()
python
def OnMouse(self, event): """Reduces clicks to enter an edit control""" self.SetGridCursor(event.Row, event.Col) self.EnableCellEditControl(True) event.Skip()
[ "def", "OnMouse", "(", "self", ",", "event", ")", ":", "self", ".", "SetGridCursor", "(", "event", ".", "Row", ",", "event", ".", "Col", ")", "self", ".", "EnableCellEditControl", "(", "True", ")", "event", ".", "Skip", "(", ")" ]
Reduces clicks to enter an edit control
[ "Reduces", "clicks", "to", "enter", "an", "edit", "control" ]
0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L487-L492
train
231,703
manns/pyspread
pyspread/src/gui/_dialogs.py
CSVPreviewGrid.OnGridEditorCreated
def OnGridEditorCreated(self, event): """Used to capture Editor close events""" editor = event.GetControl() editor.Bind(wx.EVT_KILL_FOCUS, self.OnGridEditorClosed) event.Skip()
python
def OnGridEditorCreated(self, event): """Used to capture Editor close events""" editor = event.GetControl() editor.Bind(wx.EVT_KILL_FOCUS, self.OnGridEditorClosed) event.Skip()
[ "def", "OnGridEditorCreated", "(", "self", ",", "event", ")", ":", "editor", "=", "event", ".", "GetControl", "(", ")", "editor", ".", "Bind", "(", "wx", ".", "EVT_KILL_FOCUS", ",", "self", ".", "OnGridEditorClosed", ")", "event", ".", "Skip", "(", ")" ]
Used to capture Editor close events
[ "Used", "to", "capture", "Editor", "close", "events" ]
0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L499-L505
train
231,704
manns/pyspread
pyspread/src/gui/_dialogs.py
CSVPreviewGrid.OnGridEditorClosed
def OnGridEditorClosed(self, event): """Event handler for end of output type choice""" try: dialect, self.has_header = \ self.parent.csvwidgets.get_dialect() except TypeError: event.Skip() return 0 self.fill_cells(dialect, self.has_header, choices=False)
python
def OnGridEditorClosed(self, event): """Event handler for end of output type choice""" try: dialect, self.has_header = \ self.parent.csvwidgets.get_dialect() except TypeError: event.Skip() return 0 self.fill_cells(dialect, self.has_header, choices=False)
[ "def", "OnGridEditorClosed", "(", "self", ",", "event", ")", ":", "try", ":", "dialect", ",", "self", ".", "has_header", "=", "self", ".", "parent", ".", "csvwidgets", ".", "get_dialect", "(", ")", "except", "TypeError", ":", "event", ".", "Skip", "(", ")", "return", "0", "self", ".", "fill_cells", "(", "dialect", ",", "self", ".", "has_header", ",", "choices", "=", "False", ")" ]
Event handler for end of output type choice
[ "Event", "handler", "for", "end", "of", "output", "type", "choice" ]
0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L507-L517
train
231,705
manns/pyspread
pyspread/src/gui/_dialogs.py
CSVPreviewGrid.get_digest_keys
def get_digest_keys(self): """Returns a list of the type choices""" digest_keys = [] for col in xrange(self.GetNumberCols()): digest_key = self.GetCellValue(self.has_header, col) if digest_key == "": digest_key = self.digest_types.keys()[0] digest_keys.append(digest_key) return digest_keys
python
def get_digest_keys(self): """Returns a list of the type choices""" digest_keys = [] for col in xrange(self.GetNumberCols()): digest_key = self.GetCellValue(self.has_header, col) if digest_key == "": digest_key = self.digest_types.keys()[0] digest_keys.append(digest_key) return digest_keys
[ "def", "get_digest_keys", "(", "self", ")", ":", "digest_keys", "=", "[", "]", "for", "col", "in", "xrange", "(", "self", ".", "GetNumberCols", "(", ")", ")", ":", "digest_key", "=", "self", ".", "GetCellValue", "(", "self", ".", "has_header", ",", "col", ")", "if", "digest_key", "==", "\"\"", ":", "digest_key", "=", "self", ".", "digest_types", ".", "keys", "(", ")", "[", "0", "]", "digest_keys", ".", "append", "(", "digest_key", ")", "return", "digest_keys" ]
Returns a list of the type choices
[ "Returns", "a", "list", "of", "the", "type", "choices" ]
0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L580-L590
train
231,706
manns/pyspread
pyspread/src/gui/_dialogs.py
CsvExportDialog.OnButtonApply
def OnButtonApply(self, event): """Updates the preview_textctrl""" try: dialect, self.has_header = self.csvwidgets.get_dialect() except TypeError: event.Skip() return 0 self.preview_textctrl.fill(data=self.data, dialect=dialect) event.Skip()
python
def OnButtonApply(self, event): """Updates the preview_textctrl""" try: dialect, self.has_header = self.csvwidgets.get_dialect() except TypeError: event.Skip() return 0 self.preview_textctrl.fill(data=self.data, dialect=dialect) event.Skip()
[ "def", "OnButtonApply", "(", "self", ",", "event", ")", ":", "try", ":", "dialect", ",", "self", ".", "has_header", "=", "self", ".", "csvwidgets", ".", "get_dialect", "(", ")", "except", "TypeError", ":", "event", ".", "Skip", "(", ")", "return", "0", "self", ".", "preview_textctrl", ".", "fill", "(", "data", "=", "self", ".", "data", ",", "dialect", "=", "dialect", ")", "event", ".", "Skip", "(", ")" ]
Updates the preview_textctrl
[ "Updates", "the", "preview_textctrl" ]
0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L785-L796
train
231,707
manns/pyspread
pyspread/src/gui/_dialogs.py
MacroPanel._set_properties
def _set_properties(self): """Setup title, size and tooltips""" self.codetext_ctrl.SetToolTipString(_("Enter python code here.")) self.apply_button.SetToolTipString(_("Apply changes to current macro")) self.splitter.SetBackgroundStyle(wx.BG_STYLE_COLOUR) self.result_ctrl.SetMinSize((10, 10))
python
def _set_properties(self): """Setup title, size and tooltips""" self.codetext_ctrl.SetToolTipString(_("Enter python code here.")) self.apply_button.SetToolTipString(_("Apply changes to current macro")) self.splitter.SetBackgroundStyle(wx.BG_STYLE_COLOUR) self.result_ctrl.SetMinSize((10, 10))
[ "def", "_set_properties", "(", "self", ")", ":", "self", ".", "codetext_ctrl", ".", "SetToolTipString", "(", "_", "(", "\"Enter python code here.\"", ")", ")", "self", ".", "apply_button", ".", "SetToolTipString", "(", "_", "(", "\"Apply changes to current macro\"", ")", ")", "self", ".", "splitter", ".", "SetBackgroundStyle", "(", "wx", ".", "BG_STYLE_COLOUR", ")", "self", ".", "result_ctrl", ".", "SetMinSize", "(", "(", "10", ",", "10", ")", ")" ]
Setup title, size and tooltips
[ "Setup", "title", "size", "and", "tooltips" ]
0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L868-L874
train
231,708
manns/pyspread
pyspread/src/gui/_dialogs.py
MacroPanel.OnApply
def OnApply(self, event): """Event handler for Apply button""" # See if we have valid python try: ast.parse(self.macros) except: # Grab the traceback and print it for the user s = StringIO() e = exc_info() # usr_tb will more than likely be none because ast throws # SytnaxErrorsas occurring outside of the current # execution frame usr_tb = get_user_codeframe(e[2]) or None print_exception(e[0], e[1], usr_tb, None, s) post_command_event(self.parent, self.MacroErrorMsg, err=s.getvalue()) success = False else: self.result_ctrl.SetValue('') post_command_event(self.parent, self.MacroReplaceMsg, macros=self.macros) post_command_event(self.parent, self.MacroExecuteMsg) success = True event.Skip() return success
python
def OnApply(self, event): """Event handler for Apply button""" # See if we have valid python try: ast.parse(self.macros) except: # Grab the traceback and print it for the user s = StringIO() e = exc_info() # usr_tb will more than likely be none because ast throws # SytnaxErrorsas occurring outside of the current # execution frame usr_tb = get_user_codeframe(e[2]) or None print_exception(e[0], e[1], usr_tb, None, s) post_command_event(self.parent, self.MacroErrorMsg, err=s.getvalue()) success = False else: self.result_ctrl.SetValue('') post_command_event(self.parent, self.MacroReplaceMsg, macros=self.macros) post_command_event(self.parent, self.MacroExecuteMsg) success = True event.Skip() return success
[ "def", "OnApply", "(", "self", ",", "event", ")", ":", "# See if we have valid python", "try", ":", "ast", ".", "parse", "(", "self", ".", "macros", ")", "except", ":", "# Grab the traceback and print it for the user", "s", "=", "StringIO", "(", ")", "e", "=", "exc_info", "(", ")", "# usr_tb will more than likely be none because ast throws", "# SytnaxErrorsas occurring outside of the current", "# execution frame", "usr_tb", "=", "get_user_codeframe", "(", "e", "[", "2", "]", ")", "or", "None", "print_exception", "(", "e", "[", "0", "]", ",", "e", "[", "1", "]", ",", "usr_tb", ",", "None", ",", "s", ")", "post_command_event", "(", "self", ".", "parent", ",", "self", ".", "MacroErrorMsg", ",", "err", "=", "s", ".", "getvalue", "(", ")", ")", "success", "=", "False", "else", ":", "self", ".", "result_ctrl", ".", "SetValue", "(", "''", ")", "post_command_event", "(", "self", ".", "parent", ",", "self", ".", "MacroReplaceMsg", ",", "macros", "=", "self", ".", "macros", ")", "post_command_event", "(", "self", ".", "parent", ",", "self", ".", "MacroExecuteMsg", ")", "success", "=", "True", "event", ".", "Skip", "(", ")", "return", "success" ]
Event handler for Apply button
[ "Event", "handler", "for", "Apply", "button" ]
0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L881-L907
train
231,709
manns/pyspread
pyspread/src/gui/_dialogs.py
MacroPanel.update_result_ctrl
def update_result_ctrl(self, event): """Update event result following execution by main window""" # Check to see if macro window still exists if not self: return printLen = 0 self.result_ctrl.SetValue('') if hasattr(event, 'msg'): # Output of script (from print statements, for example) self.result_ctrl.AppendText(event.msg) printLen = len(event.msg) if hasattr(event, 'err'): # Error messages errLen = len(event.err) errStyle = wx.TextAttr(wx.RED) self.result_ctrl.AppendText(event.err) self.result_ctrl.SetStyle(printLen, printLen+errLen, errStyle) if not hasattr(event, 'err') or event.err == '': # No error passed. Close dialog if user requested it. if self._ok_pressed: self.Destroy() self._ok_pressed = False
python
def update_result_ctrl(self, event): """Update event result following execution by main window""" # Check to see if macro window still exists if not self: return printLen = 0 self.result_ctrl.SetValue('') if hasattr(event, 'msg'): # Output of script (from print statements, for example) self.result_ctrl.AppendText(event.msg) printLen = len(event.msg) if hasattr(event, 'err'): # Error messages errLen = len(event.err) errStyle = wx.TextAttr(wx.RED) self.result_ctrl.AppendText(event.err) self.result_ctrl.SetStyle(printLen, printLen+errLen, errStyle) if not hasattr(event, 'err') or event.err == '': # No error passed. Close dialog if user requested it. if self._ok_pressed: self.Destroy() self._ok_pressed = False
[ "def", "update_result_ctrl", "(", "self", ",", "event", ")", ":", "# Check to see if macro window still exists", "if", "not", "self", ":", "return", "printLen", "=", "0", "self", ".", "result_ctrl", ".", "SetValue", "(", "''", ")", "if", "hasattr", "(", "event", ",", "'msg'", ")", ":", "# Output of script (from print statements, for example)", "self", ".", "result_ctrl", ".", "AppendText", "(", "event", ".", "msg", ")", "printLen", "=", "len", "(", "event", ".", "msg", ")", "if", "hasattr", "(", "event", ",", "'err'", ")", ":", "# Error messages", "errLen", "=", "len", "(", "event", ".", "err", ")", "errStyle", "=", "wx", ".", "TextAttr", "(", "wx", ".", "RED", ")", "self", ".", "result_ctrl", ".", "AppendText", "(", "event", ".", "err", ")", "self", ".", "result_ctrl", ".", "SetStyle", "(", "printLen", ",", "printLen", "+", "errLen", ",", "errStyle", ")", "if", "not", "hasattr", "(", "event", ",", "'err'", ")", "or", "event", ".", "err", "==", "''", ":", "# No error passed. Close dialog if user requested it.", "if", "self", ".", "_ok_pressed", ":", "self", ".", "Destroy", "(", ")", "self", ".", "_ok_pressed", "=", "False" ]
Update event result following execution by main window
[ "Update", "event", "result", "following", "execution", "by", "main", "window" ]
0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L909-L933
train
231,710
manns/pyspread
pyspread/src/gui/_dialogs.py
DimensionsEntryDialog._ondim
def _ondim(self, dimension, valuestring): """Converts valuestring to int and assigns result to self.dim If there is an error (such as an empty valuestring) or if the value is < 1, the value 1 is assigned to self.dim Parameters ---------- dimension: int \tDimension that is to be updated. Must be in [1:4] valuestring: string \t A string that can be converted to an int """ try: self.dimensions[dimension] = int(valuestring) except ValueError: self.dimensions[dimension] = 1 self.textctrls[dimension].SetValue(str(1)) if self.dimensions[dimension] < 1: self.dimensions[dimension] = 1 self.textctrls[dimension].SetValue(str(1))
python
def _ondim(self, dimension, valuestring): """Converts valuestring to int and assigns result to self.dim If there is an error (such as an empty valuestring) or if the value is < 1, the value 1 is assigned to self.dim Parameters ---------- dimension: int \tDimension that is to be updated. Must be in [1:4] valuestring: string \t A string that can be converted to an int """ try: self.dimensions[dimension] = int(valuestring) except ValueError: self.dimensions[dimension] = 1 self.textctrls[dimension].SetValue(str(1)) if self.dimensions[dimension] < 1: self.dimensions[dimension] = 1 self.textctrls[dimension].SetValue(str(1))
[ "def", "_ondim", "(", "self", ",", "dimension", ",", "valuestring", ")", ":", "try", ":", "self", ".", "dimensions", "[", "dimension", "]", "=", "int", "(", "valuestring", ")", "except", "ValueError", ":", "self", ".", "dimensions", "[", "dimension", "]", "=", "1", "self", ".", "textctrls", "[", "dimension", "]", ".", "SetValue", "(", "str", "(", "1", ")", ")", "if", "self", ".", "dimensions", "[", "dimension", "]", "<", "1", ":", "self", ".", "dimensions", "[", "dimension", "]", "=", "1", "self", ".", "textctrls", "[", "dimension", "]", ".", "SetValue", "(", "str", "(", "1", ")", ")" ]
Converts valuestring to int and assigns result to self.dim If there is an error (such as an empty valuestring) or if the value is < 1, the value 1 is assigned to self.dim Parameters ---------- dimension: int \tDimension that is to be updated. Must be in [1:4] valuestring: string \t A string that can be converted to an int
[ "Converts", "valuestring", "to", "int", "and", "assigns", "result", "to", "self", ".", "dim" ]
0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L1001-L1025
train
231,711
manns/pyspread
pyspread/src/gui/_dialogs.py
CellEntryDialog.OnOk
def OnOk(self, event): """Posts a command event that makes the grid show the entered cell""" # Get key values from textctrls key_strings = [self.row_textctrl.GetValue(), self.col_textctrl.GetValue(), self.tab_textctrl.GetValue()] key = [] for key_string in key_strings: try: key.append(int(key_string)) except ValueError: key.append(0) # Post event post_command_event(self.parent, self.GotoCellMsg, key=tuple(key))
python
def OnOk(self, event): """Posts a command event that makes the grid show the entered cell""" # Get key values from textctrls key_strings = [self.row_textctrl.GetValue(), self.col_textctrl.GetValue(), self.tab_textctrl.GetValue()] key = [] for key_string in key_strings: try: key.append(int(key_string)) except ValueError: key.append(0) # Post event post_command_event(self.parent, self.GotoCellMsg, key=tuple(key))
[ "def", "OnOk", "(", "self", ",", "event", ")", ":", "# Get key values from textctrls", "key_strings", "=", "[", "self", ".", "row_textctrl", ".", "GetValue", "(", ")", ",", "self", ".", "col_textctrl", ".", "GetValue", "(", ")", ",", "self", ".", "tab_textctrl", ".", "GetValue", "(", ")", "]", "key", "=", "[", "]", "for", "key_string", "in", "key_strings", ":", "try", ":", "key", ".", "append", "(", "int", "(", "key_string", ")", ")", "except", "ValueError", ":", "key", ".", "append", "(", "0", ")", "# Post event", "post_command_event", "(", "self", ".", "parent", ",", "self", ".", "GotoCellMsg", ",", "key", "=", "tuple", "(", "key", ")", ")" ]
Posts a command event that makes the grid show the entered cell
[ "Posts", "a", "command", "event", "that", "makes", "the", "grid", "show", "the", "entered", "cell" ]
0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L1106-L1125
train
231,712
manns/pyspread
pyspread/src/gui/_dialogs.py
AboutDialog._set_properties
def _set_properties(self): """Setup title and label""" self.SetTitle(_("About pyspread")) label = _("pyspread {version}\nCopyright Martin Manns") label = label.format(version=VERSION) self.about_label.SetLabel(label)
python
def _set_properties(self): """Setup title and label""" self.SetTitle(_("About pyspread")) label = _("pyspread {version}\nCopyright Martin Manns") label = label.format(version=VERSION) self.about_label.SetLabel(label)
[ "def", "_set_properties", "(", "self", ")", ":", "self", ".", "SetTitle", "(", "_", "(", "\"About pyspread\"", ")", ")", "label", "=", "_", "(", "\"pyspread {version}\\nCopyright Martin Manns\"", ")", "label", "=", "label", ".", "format", "(", "version", "=", "VERSION", ")", "self", ".", "about_label", ".", "SetLabel", "(", "label", ")" ]
Setup title and label
[ "Setup", "title", "and", "label" ]
0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L1163-L1171
train
231,713
manns/pyspread
pyspread/src/gui/_dialogs.py
PasteAsDialog.get_max_dim
def get_max_dim(self, obj): """Returns maximum dimensionality over which obj is iterable <= 2""" try: iter(obj) except TypeError: return 0 try: for o in obj: iter(o) break except TypeError: return 1 return 2
python
def get_max_dim(self, obj): """Returns maximum dimensionality over which obj is iterable <= 2""" try: iter(obj) except TypeError: return 0 try: for o in obj: iter(o) break except TypeError: return 1 return 2
[ "def", "get_max_dim", "(", "self", ",", "obj", ")", ":", "try", ":", "iter", "(", "obj", ")", "except", "TypeError", ":", "return", "0", "try", ":", "for", "o", "in", "obj", ":", "iter", "(", "o", ")", "break", "except", "TypeError", ":", "return", "1", "return", "2" ]
Returns maximum dimensionality over which obj is iterable <= 2
[ "Returns", "maximum", "dimensionality", "over", "which", "obj", "is", "iterable", "<", "=", "2" ]
0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L1473-L1490
train
231,714
dgraph-io/pydgraph
pydgraph/client_stub.py
DgraphClientStub.alter
def alter(self, operation, timeout=None, metadata=None, credentials=None): """Runs alter operation.""" return self.stub.Alter(operation, timeout=timeout, metadata=metadata, credentials=credentials)
python
def alter(self, operation, timeout=None, metadata=None, credentials=None): """Runs alter operation.""" return self.stub.Alter(operation, timeout=timeout, metadata=metadata, credentials=credentials)
[ "def", "alter", "(", "self", ",", "operation", ",", "timeout", "=", "None", ",", "metadata", "=", "None", ",", "credentials", "=", "None", ")", ":", "return", "self", ".", "stub", ".", "Alter", "(", "operation", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ",", "credentials", "=", "credentials", ")" ]
Runs alter operation.
[ "Runs", "alter", "operation", "." ]
0fe85f6593cb2148475750bc8555a6fdf509054b
https://github.com/dgraph-io/pydgraph/blob/0fe85f6593cb2148475750bc8555a6fdf509054b/pydgraph/client_stub.py#L43-L46
train
231,715
dgraph-io/pydgraph
pydgraph/client_stub.py
DgraphClientStub.query
def query(self, req, timeout=None, metadata=None, credentials=None): """Runs query operation.""" return self.stub.Query(req, timeout=timeout, metadata=metadata, credentials=credentials)
python
def query(self, req, timeout=None, metadata=None, credentials=None): """Runs query operation.""" return self.stub.Query(req, timeout=timeout, metadata=metadata, credentials=credentials)
[ "def", "query", "(", "self", ",", "req", ",", "timeout", "=", "None", ",", "metadata", "=", "None", ",", "credentials", "=", "None", ")", ":", "return", "self", ".", "stub", ".", "Query", "(", "req", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ",", "credentials", "=", "credentials", ")" ]
Runs query operation.
[ "Runs", "query", "operation", "." ]
0fe85f6593cb2148475750bc8555a6fdf509054b
https://github.com/dgraph-io/pydgraph/blob/0fe85f6593cb2148475750bc8555a6fdf509054b/pydgraph/client_stub.py#L48-L51
train
231,716
dgraph-io/pydgraph
pydgraph/client_stub.py
DgraphClientStub.mutate
def mutate(self, mutation, timeout=None, metadata=None, credentials=None): """Runs mutate operation.""" return self.stub.Mutate(mutation, timeout=timeout, metadata=metadata, credentials=credentials)
python
def mutate(self, mutation, timeout=None, metadata=None, credentials=None): """Runs mutate operation.""" return self.stub.Mutate(mutation, timeout=timeout, metadata=metadata, credentials=credentials)
[ "def", "mutate", "(", "self", ",", "mutation", ",", "timeout", "=", "None", ",", "metadata", "=", "None", ",", "credentials", "=", "None", ")", ":", "return", "self", ".", "stub", ".", "Mutate", "(", "mutation", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ",", "credentials", "=", "credentials", ")" ]
Runs mutate operation.
[ "Runs", "mutate", "operation", "." ]
0fe85f6593cb2148475750bc8555a6fdf509054b
https://github.com/dgraph-io/pydgraph/blob/0fe85f6593cb2148475750bc8555a6fdf509054b/pydgraph/client_stub.py#L53-L56
train
231,717
dgraph-io/pydgraph
pydgraph/client_stub.py
DgraphClientStub.commit_or_abort
def commit_or_abort(self, ctx, timeout=None, metadata=None, credentials=None): """Runs commit or abort operation.""" return self.stub.CommitOrAbort(ctx, timeout=timeout, metadata=metadata, credentials=credentials)
python
def commit_or_abort(self, ctx, timeout=None, metadata=None, credentials=None): """Runs commit or abort operation.""" return self.stub.CommitOrAbort(ctx, timeout=timeout, metadata=metadata, credentials=credentials)
[ "def", "commit_or_abort", "(", "self", ",", "ctx", ",", "timeout", "=", "None", ",", "metadata", "=", "None", ",", "credentials", "=", "None", ")", ":", "return", "self", ".", "stub", ".", "CommitOrAbort", "(", "ctx", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ",", "credentials", "=", "credentials", ")" ]
Runs commit or abort operation.
[ "Runs", "commit", "or", "abort", "operation", "." ]
0fe85f6593cb2148475750bc8555a6fdf509054b
https://github.com/dgraph-io/pydgraph/blob/0fe85f6593cb2148475750bc8555a6fdf509054b/pydgraph/client_stub.py#L58-L62
train
231,718
dgraph-io/pydgraph
pydgraph/client_stub.py
DgraphClientStub.check_version
def check_version(self, check, timeout=None, metadata=None, credentials=None): """Returns the version of the Dgraph instance.""" return self.stub.CheckVersion(check, timeout=timeout, metadata=metadata, credentials=credentials)
python
def check_version(self, check, timeout=None, metadata=None, credentials=None): """Returns the version of the Dgraph instance.""" return self.stub.CheckVersion(check, timeout=timeout, metadata=metadata, credentials=credentials)
[ "def", "check_version", "(", "self", ",", "check", ",", "timeout", "=", "None", ",", "metadata", "=", "None", ",", "credentials", "=", "None", ")", ":", "return", "self", ".", "stub", ".", "CheckVersion", "(", "check", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ",", "credentials", "=", "credentials", ")" ]
Returns the version of the Dgraph instance.
[ "Returns", "the", "version", "of", "the", "Dgraph", "instance", "." ]
0fe85f6593cb2148475750bc8555a6fdf509054b
https://github.com/dgraph-io/pydgraph/blob/0fe85f6593cb2148475750bc8555a6fdf509054b/pydgraph/client_stub.py#L64-L69
train
231,719
dgraph-io/pydgraph
pydgraph/client.py
DgraphClient.alter
def alter(self, operation, timeout=None, metadata=None, credentials=None): """Runs a modification via this client.""" new_metadata = self.add_login_metadata(metadata) try: return self.any_client().alter(operation, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: if util.is_jwt_expired(error): self.retry_login() new_metadata = self.add_login_metadata(metadata) return self.any_client().alter(operation, timeout=timeout, metadata=new_metadata, credentials=credentials) else: raise error
python
def alter(self, operation, timeout=None, metadata=None, credentials=None): """Runs a modification via this client.""" new_metadata = self.add_login_metadata(metadata) try: return self.any_client().alter(operation, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: if util.is_jwt_expired(error): self.retry_login() new_metadata = self.add_login_metadata(metadata) return self.any_client().alter(operation, timeout=timeout, metadata=new_metadata, credentials=credentials) else: raise error
[ "def", "alter", "(", "self", ",", "operation", ",", "timeout", "=", "None", ",", "metadata", "=", "None", ",", "credentials", "=", "None", ")", ":", "new_metadata", "=", "self", ".", "add_login_metadata", "(", "metadata", ")", "try", ":", "return", "self", ".", "any_client", "(", ")", ".", "alter", "(", "operation", ",", "timeout", "=", "timeout", ",", "metadata", "=", "new_metadata", ",", "credentials", "=", "credentials", ")", "except", "Exception", "as", "error", ":", "if", "util", ".", "is_jwt_expired", "(", "error", ")", ":", "self", ".", "retry_login", "(", ")", "new_metadata", "=", "self", ".", "add_login_metadata", "(", "metadata", ")", "return", "self", ".", "any_client", "(", ")", ".", "alter", "(", "operation", ",", "timeout", "=", "timeout", ",", "metadata", "=", "new_metadata", ",", "credentials", "=", "credentials", ")", "else", ":", "raise", "error" ]
Runs a modification via this client.
[ "Runs", "a", "modification", "via", "this", "client", "." ]
0fe85f6593cb2148475750bc8555a6fdf509054b
https://github.com/dgraph-io/pydgraph/blob/0fe85f6593cb2148475750bc8555a6fdf509054b/pydgraph/client.py#L71-L87
train
231,720
dgraph-io/pydgraph
pydgraph/client.py
DgraphClient.txn
def txn(self, read_only=False, best_effort=False): """Creates a transaction.""" return txn.Txn(self, read_only=read_only, best_effort=best_effort)
python
def txn(self, read_only=False, best_effort=False): """Creates a transaction.""" return txn.Txn(self, read_only=read_only, best_effort=best_effort)
[ "def", "txn", "(", "self", ",", "read_only", "=", "False", ",", "best_effort", "=", "False", ")", ":", "return", "txn", ".", "Txn", "(", "self", ",", "read_only", "=", "read_only", ",", "best_effort", "=", "best_effort", ")" ]
Creates a transaction.
[ "Creates", "a", "transaction", "." ]
0fe85f6593cb2148475750bc8555a6fdf509054b
https://github.com/dgraph-io/pydgraph/blob/0fe85f6593cb2148475750bc8555a6fdf509054b/pydgraph/client.py#L89-L91
train
231,721
dgraph-io/pydgraph
pydgraph/txn.py
Txn.query
def query(self, query, variables=None, timeout=None, metadata=None, credentials=None): """Adds a query operation to the transaction.""" new_metadata = self._dg.add_login_metadata(metadata) req = self._common_query(query, variables=variables) try: res = self._dc.query(req, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: if util.is_jwt_expired(error): self._dg.retry_login() new_metadata = self._dg.add_login_metadata(metadata) res = self._dc.query(req, timeout=timeout, metadata=new_metadata, credentials=credentials) else: raise error self.merge_context(res.txn) return res
python
def query(self, query, variables=None, timeout=None, metadata=None, credentials=None): """Adds a query operation to the transaction.""" new_metadata = self._dg.add_login_metadata(metadata) req = self._common_query(query, variables=variables) try: res = self._dc.query(req, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: if util.is_jwt_expired(error): self._dg.retry_login() new_metadata = self._dg.add_login_metadata(metadata) res = self._dc.query(req, timeout=timeout, metadata=new_metadata, credentials=credentials) else: raise error self.merge_context(res.txn) return res
[ "def", "query", "(", "self", ",", "query", ",", "variables", "=", "None", ",", "timeout", "=", "None", ",", "metadata", "=", "None", ",", "credentials", "=", "None", ")", ":", "new_metadata", "=", "self", ".", "_dg", ".", "add_login_metadata", "(", "metadata", ")", "req", "=", "self", ".", "_common_query", "(", "query", ",", "variables", "=", "variables", ")", "try", ":", "res", "=", "self", ".", "_dc", ".", "query", "(", "req", ",", "timeout", "=", "timeout", ",", "metadata", "=", "new_metadata", ",", "credentials", "=", "credentials", ")", "except", "Exception", "as", "error", ":", "if", "util", ".", "is_jwt_expired", "(", "error", ")", ":", "self", ".", "_dg", ".", "retry_login", "(", ")", "new_metadata", "=", "self", ".", "_dg", ".", "add_login_metadata", "(", "metadata", ")", "res", "=", "self", ".", "_dc", ".", "query", "(", "req", ",", "timeout", "=", "timeout", ",", "metadata", "=", "new_metadata", ",", "credentials", "=", "credentials", ")", "else", ":", "raise", "error", "self", ".", "merge_context", "(", "res", ".", "txn", ")", "return", "res" ]
Adds a query operation to the transaction.
[ "Adds", "a", "query", "operation", "to", "the", "transaction", "." ]
0fe85f6593cb2148475750bc8555a6fdf509054b
https://github.com/dgraph-io/pydgraph/blob/0fe85f6593cb2148475750bc8555a6fdf509054b/pydgraph/txn.py#L65-L85
train
231,722
dgraph-io/pydgraph
pydgraph/txn.py
Txn.mutate
def mutate(self, mutation=None, set_obj=None, del_obj=None, set_nquads=None, del_nquads=None, commit_now=None, ignore_index_conflict=None, timeout=None, metadata=None, credentials=None): """Adds a mutate operation to the transaction.""" mutation = self._common_mutate( mutation=mutation, set_obj=set_obj, del_obj=del_obj, set_nquads=set_nquads, del_nquads=del_nquads, commit_now=commit_now, ignore_index_conflict=ignore_index_conflict) new_metadata = self._dg.add_login_metadata(metadata) mutate_error = None try: assigned = self._dc.mutate(mutation, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: if util.is_jwt_expired(error): self._dg.retry_login() new_metadata = self._dg.add_login_metadata(metadata) try: assigned = self._dc.mutate(mutation, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: mutate_error = error else: mutate_error = error if mutate_error is not None: try: self.discard(timeout=timeout, metadata=metadata, credentials=credentials) except: # Ignore error - user should see the original error. pass self._common_except_mutate(mutate_error) if mutation.commit_now: self._finished = True self.merge_context(assigned.context) return assigned
python
def mutate(self, mutation=None, set_obj=None, del_obj=None, set_nquads=None, del_nquads=None, commit_now=None, ignore_index_conflict=None, timeout=None, metadata=None, credentials=None): """Adds a mutate operation to the transaction.""" mutation = self._common_mutate( mutation=mutation, set_obj=set_obj, del_obj=del_obj, set_nquads=set_nquads, del_nquads=del_nquads, commit_now=commit_now, ignore_index_conflict=ignore_index_conflict) new_metadata = self._dg.add_login_metadata(metadata) mutate_error = None try: assigned = self._dc.mutate(mutation, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: if util.is_jwt_expired(error): self._dg.retry_login() new_metadata = self._dg.add_login_metadata(metadata) try: assigned = self._dc.mutate(mutation, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: mutate_error = error else: mutate_error = error if mutate_error is not None: try: self.discard(timeout=timeout, metadata=metadata, credentials=credentials) except: # Ignore error - user should see the original error. pass self._common_except_mutate(mutate_error) if mutation.commit_now: self._finished = True self.merge_context(assigned.context) return assigned
[ "def", "mutate", "(", "self", ",", "mutation", "=", "None", ",", "set_obj", "=", "None", ",", "del_obj", "=", "None", ",", "set_nquads", "=", "None", ",", "del_nquads", "=", "None", ",", "commit_now", "=", "None", ",", "ignore_index_conflict", "=", "None", ",", "timeout", "=", "None", ",", "metadata", "=", "None", ",", "credentials", "=", "None", ")", ":", "mutation", "=", "self", ".", "_common_mutate", "(", "mutation", "=", "mutation", ",", "set_obj", "=", "set_obj", ",", "del_obj", "=", "del_obj", ",", "set_nquads", "=", "set_nquads", ",", "del_nquads", "=", "del_nquads", ",", "commit_now", "=", "commit_now", ",", "ignore_index_conflict", "=", "ignore_index_conflict", ")", "new_metadata", "=", "self", ".", "_dg", ".", "add_login_metadata", "(", "metadata", ")", "mutate_error", "=", "None", "try", ":", "assigned", "=", "self", ".", "_dc", ".", "mutate", "(", "mutation", ",", "timeout", "=", "timeout", ",", "metadata", "=", "new_metadata", ",", "credentials", "=", "credentials", ")", "except", "Exception", "as", "error", ":", "if", "util", ".", "is_jwt_expired", "(", "error", ")", ":", "self", ".", "_dg", ".", "retry_login", "(", ")", "new_metadata", "=", "self", ".", "_dg", ".", "add_login_metadata", "(", "metadata", ")", "try", ":", "assigned", "=", "self", ".", "_dc", ".", "mutate", "(", "mutation", ",", "timeout", "=", "timeout", ",", "metadata", "=", "new_metadata", ",", "credentials", "=", "credentials", ")", "except", "Exception", "as", "error", ":", "mutate_error", "=", "error", "else", ":", "mutate_error", "=", "error", "if", "mutate_error", "is", "not", "None", ":", "try", ":", "self", ".", "discard", "(", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ",", "credentials", "=", "credentials", ")", "except", ":", "# Ignore error - user should see the original error.", "pass", "self", ".", "_common_except_mutate", "(", "mutate_error", ")", "if", "mutation", ".", "commit_now", ":", "self", ".", "_finished", "=", "True", "self", ".", "merge_context", "(", "assigned", ".", "context", ")", "return", "assigned" ]
Adds a mutate operation to the transaction.
[ "Adds", "a", "mutate", "operation", "to", "the", "transaction", "." ]
0fe85f6593cb2148475750bc8555a6fdf509054b
https://github.com/dgraph-io/pydgraph/blob/0fe85f6593cb2148475750bc8555a6fdf509054b/pydgraph/txn.py#L104-L147
train
231,723
dgraph-io/pydgraph
pydgraph/txn.py
Txn.commit
def commit(self, timeout=None, metadata=None, credentials=None): """Commits the transaction.""" if not self._common_commit(): return new_metadata = self._dg.add_login_metadata(metadata) try: self._dc.commit_or_abort(self._ctx, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: if util.is_jwt_expired(error): self._dg.retry_login() new_metadata = self._dg.add_login_metadata(metadata) try: self._dc.commit_or_abort(self._ctx, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: return self._common_except_commit(error) self._common_except_commit(error)
python
def commit(self, timeout=None, metadata=None, credentials=None): """Commits the transaction.""" if not self._common_commit(): return new_metadata = self._dg.add_login_metadata(metadata) try: self._dc.commit_or_abort(self._ctx, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: if util.is_jwt_expired(error): self._dg.retry_login() new_metadata = self._dg.add_login_metadata(metadata) try: self._dc.commit_or_abort(self._ctx, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: return self._common_except_commit(error) self._common_except_commit(error)
[ "def", "commit", "(", "self", ",", "timeout", "=", "None", ",", "metadata", "=", "None", ",", "credentials", "=", "None", ")", ":", "if", "not", "self", ".", "_common_commit", "(", ")", ":", "return", "new_metadata", "=", "self", ".", "_dg", ".", "add_login_metadata", "(", "metadata", ")", "try", ":", "self", ".", "_dc", ".", "commit_or_abort", "(", "self", ".", "_ctx", ",", "timeout", "=", "timeout", ",", "metadata", "=", "new_metadata", ",", "credentials", "=", "credentials", ")", "except", "Exception", "as", "error", ":", "if", "util", ".", "is_jwt_expired", "(", "error", ")", ":", "self", ".", "_dg", ".", "retry_login", "(", ")", "new_metadata", "=", "self", ".", "_dg", ".", "add_login_metadata", "(", "metadata", ")", "try", ":", "self", ".", "_dc", ".", "commit_or_abort", "(", "self", ".", "_ctx", ",", "timeout", "=", "timeout", ",", "metadata", "=", "new_metadata", ",", "credentials", "=", "credentials", ")", "except", "Exception", "as", "error", ":", "return", "self", ".", "_common_except_commit", "(", "error", ")", "self", ".", "_common_except_commit", "(", "error", ")" ]
Commits the transaction.
[ "Commits", "the", "transaction", "." ]
0fe85f6593cb2148475750bc8555a6fdf509054b
https://github.com/dgraph-io/pydgraph/blob/0fe85f6593cb2148475750bc8555a6fdf509054b/pydgraph/txn.py#L188-L209
train
231,724
dgraph-io/pydgraph
pydgraph/txn.py
Txn.discard
def discard(self, timeout=None, metadata=None, credentials=None): """Discards the transaction.""" if not self._common_discard(): return new_metadata = self._dg.add_login_metadata(metadata) try: self._dc.commit_or_abort(self._ctx, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: if util.is_jwt_expired(error): self._dg.retry_login() new_metadata = self._dg.add_login_metadata(metadata) self._dc.commit_or_abort(self._ctx, timeout=timeout, metadata=new_metadata, credentials=credentials) else: raise error
python
def discard(self, timeout=None, metadata=None, credentials=None): """Discards the transaction.""" if not self._common_discard(): return new_metadata = self._dg.add_login_metadata(metadata) try: self._dc.commit_or_abort(self._ctx, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: if util.is_jwt_expired(error): self._dg.retry_login() new_metadata = self._dg.add_login_metadata(metadata) self._dc.commit_or_abort(self._ctx, timeout=timeout, metadata=new_metadata, credentials=credentials) else: raise error
[ "def", "discard", "(", "self", ",", "timeout", "=", "None", ",", "metadata", "=", "None", ",", "credentials", "=", "None", ")", ":", "if", "not", "self", ".", "_common_discard", "(", ")", ":", "return", "new_metadata", "=", "self", ".", "_dg", ".", "add_login_metadata", "(", "metadata", ")", "try", ":", "self", ".", "_dc", ".", "commit_or_abort", "(", "self", ".", "_ctx", ",", "timeout", "=", "timeout", ",", "metadata", "=", "new_metadata", ",", "credentials", "=", "credentials", ")", "except", "Exception", "as", "error", ":", "if", "util", ".", "is_jwt_expired", "(", "error", ")", ":", "self", ".", "_dg", ".", "retry_login", "(", ")", "new_metadata", "=", "self", ".", "_dg", ".", "add_login_metadata", "(", "metadata", ")", "self", ".", "_dc", ".", "commit_or_abort", "(", "self", ".", "_ctx", ",", "timeout", "=", "timeout", ",", "metadata", "=", "new_metadata", ",", "credentials", "=", "credentials", ")", "else", ":", "raise", "error" ]
Discards the transaction.
[ "Discards", "the", "transaction", "." ]
0fe85f6593cb2148475750bc8555a6fdf509054b
https://github.com/dgraph-io/pydgraph/blob/0fe85f6593cb2148475750bc8555a6fdf509054b/pydgraph/txn.py#L232-L250
train
231,725
dgraph-io/pydgraph
pydgraph/txn.py
Txn.merge_context
def merge_context(self, src=None): """Merges context from this instance with src.""" if src is None: # This condition will be true only if the server doesn't return a # txn context after a query or mutation. return if self._ctx.start_ts == 0: self._ctx.start_ts = src.start_ts elif self._ctx.start_ts != src.start_ts: # This condition should never be true. raise Exception('StartTs mismatch') self._ctx.keys.extend(src.keys) self._ctx.preds.extend(src.preds)
python
def merge_context(self, src=None): """Merges context from this instance with src.""" if src is None: # This condition will be true only if the server doesn't return a # txn context after a query or mutation. return if self._ctx.start_ts == 0: self._ctx.start_ts = src.start_ts elif self._ctx.start_ts != src.start_ts: # This condition should never be true. raise Exception('StartTs mismatch') self._ctx.keys.extend(src.keys) self._ctx.preds.extend(src.preds)
[ "def", "merge_context", "(", "self", ",", "src", "=", "None", ")", ":", "if", "src", "is", "None", ":", "# This condition will be true only if the server doesn't return a", "# txn context after a query or mutation.", "return", "if", "self", ".", "_ctx", ".", "start_ts", "==", "0", ":", "self", ".", "_ctx", ".", "start_ts", "=", "src", ".", "start_ts", "elif", "self", ".", "_ctx", ".", "start_ts", "!=", "src", ".", "start_ts", ":", "# This condition should never be true.", "raise", "Exception", "(", "'StartTs mismatch'", ")", "self", ".", "_ctx", ".", "keys", ".", "extend", "(", "src", ".", "keys", ")", "self", ".", "_ctx", ".", "preds", ".", "extend", "(", "src", ".", "preds", ")" ]
Merges context from this instance with src.
[ "Merges", "context", "from", "this", "instance", "with", "src", "." ]
0fe85f6593cb2148475750bc8555a6fdf509054b
https://github.com/dgraph-io/pydgraph/blob/0fe85f6593cb2148475750bc8555a6fdf509054b/pydgraph/txn.py#L263-L277
train
231,726
anx-ckreuzberger/django-rest-passwordreset
django_rest_passwordreset/tokens.py
RandomStringTokenGenerator.generate_token
def generate_token(self, *args, **kwargs): """ generates a pseudo random code using os.urandom and binascii.hexlify """ # determine the length based on min_length and max_length length = random.randint(self.min_length, self.max_length) # generate the token using os.urandom and hexlify return binascii.hexlify( os.urandom(self.max_length) ).decode()[0:length]
python
def generate_token(self, *args, **kwargs): """ generates a pseudo random code using os.urandom and binascii.hexlify """ # determine the length based on min_length and max_length length = random.randint(self.min_length, self.max_length) # generate the token using os.urandom and hexlify return binascii.hexlify( os.urandom(self.max_length) ).decode()[0:length]
[ "def", "generate_token", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# determine the length based on min_length and max_length", "length", "=", "random", ".", "randint", "(", "self", ".", "min_length", ",", "self", ".", "max_length", ")", "# generate the token using os.urandom and hexlify", "return", "binascii", ".", "hexlify", "(", "os", ".", "urandom", "(", "self", ".", "max_length", ")", ")", ".", "decode", "(", ")", "[", "0", ":", "length", "]" ]
generates a pseudo random code using os.urandom and binascii.hexlify
[ "generates", "a", "pseudo", "random", "code", "using", "os", ".", "urandom", "and", "binascii", ".", "hexlify" ]
7118d430d4b21f78a23530c88a33390c9b6a4f95
https://github.com/anx-ckreuzberger/django-rest-passwordreset/blob/7118d430d4b21f78a23530c88a33390c9b6a4f95/django_rest_passwordreset/tokens.py#L61-L69
train
231,727
pinax/pinax-blog
pinax/blog/parsers/creole_parser.py
HtmlEmitter.get_text
def get_text(self, node): """Try to emit whatever text is in the node.""" try: return node.children[0].content or "" except (AttributeError, IndexError): return node.content or ""
python
def get_text(self, node): """Try to emit whatever text is in the node.""" try: return node.children[0].content or "" except (AttributeError, IndexError): return node.content or ""
[ "def", "get_text", "(", "self", ",", "node", ")", ":", "try", ":", "return", "node", ".", "children", "[", "0", "]", ".", "content", "or", "\"\"", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "return", "node", ".", "content", "or", "\"\"" ]
Try to emit whatever text is in the node.
[ "Try", "to", "emit", "whatever", "text", "is", "in", "the", "node", "." ]
be1d64946381b47d197b258a488d5de56aacccce
https://github.com/pinax/pinax-blog/blob/be1d64946381b47d197b258a488d5de56aacccce/pinax/blog/parsers/creole_parser.py#L35-L40
train
231,728
pinax/pinax-blog
pinax/blog/parsers/creole_parser.py
HtmlEmitter.emit_children
def emit_children(self, node): """Emit all the children of a node.""" return "".join([self.emit_node(child) for child in node.children])
python
def emit_children(self, node): """Emit all the children of a node.""" return "".join([self.emit_node(child) for child in node.children])
[ "def", "emit_children", "(", "self", ",", "node", ")", ":", "return", "\"\"", ".", "join", "(", "[", "self", ".", "emit_node", "(", "child", ")", "for", "child", "in", "node", ".", "children", "]", ")" ]
Emit all the children of a node.
[ "Emit", "all", "the", "children", "of", "a", "node", "." ]
be1d64946381b47d197b258a488d5de56aacccce
https://github.com/pinax/pinax-blog/blob/be1d64946381b47d197b258a488d5de56aacccce/pinax/blog/parsers/creole_parser.py#L138-L140
train
231,729
pinax/pinax-blog
pinax/blog/parsers/creole_parser.py
HtmlEmitter.emit_node
def emit_node(self, node): """Emit a single node.""" emit = getattr(self, "%s_emit" % node.kind, self.default_emit) return emit(node)
python
def emit_node(self, node): """Emit a single node.""" emit = getattr(self, "%s_emit" % node.kind, self.default_emit) return emit(node)
[ "def", "emit_node", "(", "self", ",", "node", ")", ":", "emit", "=", "getattr", "(", "self", ",", "\"%s_emit\"", "%", "node", ".", "kind", ",", "self", ".", "default_emit", ")", "return", "emit", "(", "node", ")" ]
Emit a single node.
[ "Emit", "a", "single", "node", "." ]
be1d64946381b47d197b258a488d5de56aacccce
https://github.com/pinax/pinax-blog/blob/be1d64946381b47d197b258a488d5de56aacccce/pinax/blog/parsers/creole_parser.py#L142-L145
train
231,730
pinax/pinax-blog
pinax/blog/views.py
ajax_preview
def ajax_preview(request, **kwargs): """ Currently only supports markdown """ data = { "html": render_to_string("pinax/blog/_preview.html", { "content": parse(request.POST.get("markup")) }) } return JsonResponse(data)
python
def ajax_preview(request, **kwargs): """ Currently only supports markdown """ data = { "html": render_to_string("pinax/blog/_preview.html", { "content": parse(request.POST.get("markup")) }) } return JsonResponse(data)
[ "def", "ajax_preview", "(", "request", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "\"html\"", ":", "render_to_string", "(", "\"pinax/blog/_preview.html\"", ",", "{", "\"content\"", ":", "parse", "(", "request", ".", "POST", ".", "get", "(", "\"markup\"", ")", ")", "}", ")", "}", "return", "JsonResponse", "(", "data", ")" ]
Currently only supports markdown
[ "Currently", "only", "supports", "markdown" ]
be1d64946381b47d197b258a488d5de56aacccce
https://github.com/pinax/pinax-blog/blob/be1d64946381b47d197b258a488d5de56aacccce/pinax/blog/views.py#L285-L294
train
231,731
closeio/tasktiger
tasktiger/redis_semaphore.py
Semaphore.set_system_lock
def set_system_lock(cls, redis, name, timeout): """ Set system lock for the semaphore. Sets a system lock that will expire in timeout seconds. This overrides all other locks. Existing locks cannot be renewed and no new locks will be permitted until the system lock expires. Arguments: redis: Redis client name: Name of lock. Used as ZSET key. timeout: Timeout in seconds for system lock """ pipeline = redis.pipeline() pipeline.zadd(name, SYSTEM_LOCK_ID, time.time() + timeout) pipeline.expire(name, timeout + 10) # timeout plus buffer for troubleshooting pipeline.execute()
python
def set_system_lock(cls, redis, name, timeout): """ Set system lock for the semaphore. Sets a system lock that will expire in timeout seconds. This overrides all other locks. Existing locks cannot be renewed and no new locks will be permitted until the system lock expires. Arguments: redis: Redis client name: Name of lock. Used as ZSET key. timeout: Timeout in seconds for system lock """ pipeline = redis.pipeline() pipeline.zadd(name, SYSTEM_LOCK_ID, time.time() + timeout) pipeline.expire(name, timeout + 10) # timeout plus buffer for troubleshooting pipeline.execute()
[ "def", "set_system_lock", "(", "cls", ",", "redis", ",", "name", ",", "timeout", ")", ":", "pipeline", "=", "redis", ".", "pipeline", "(", ")", "pipeline", ".", "zadd", "(", "name", ",", "SYSTEM_LOCK_ID", ",", "time", ".", "time", "(", ")", "+", "timeout", ")", "pipeline", ".", "expire", "(", "name", ",", "timeout", "+", "10", ")", "# timeout plus buffer for troubleshooting", "pipeline", ".", "execute", "(", ")" ]
Set system lock for the semaphore. Sets a system lock that will expire in timeout seconds. This overrides all other locks. Existing locks cannot be renewed and no new locks will be permitted until the system lock expires. Arguments: redis: Redis client name: Name of lock. Used as ZSET key. timeout: Timeout in seconds for system lock
[ "Set", "system", "lock", "for", "the", "semaphore", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/redis_semaphore.py#L51-L69
train
231,732
closeio/tasktiger
tasktiger/redis_semaphore.py
Semaphore.acquire
def acquire(self): """ Obtain a semaphore lock. Returns: Tuple that contains True/False if the lock was acquired and number of locks in semaphore. """ acquired, locks = self._semaphore(keys=[self.name], args=[self.lock_id, self.max_locks, self.timeout, time.time()]) # Convert Lua boolean returns to Python booleans acquired = True if acquired == 1 else False return acquired, locks
python
def acquire(self): """ Obtain a semaphore lock. Returns: Tuple that contains True/False if the lock was acquired and number of locks in semaphore. """ acquired, locks = self._semaphore(keys=[self.name], args=[self.lock_id, self.max_locks, self.timeout, time.time()]) # Convert Lua boolean returns to Python booleans acquired = True if acquired == 1 else False return acquired, locks
[ "def", "acquire", "(", "self", ")", ":", "acquired", ",", "locks", "=", "self", ".", "_semaphore", "(", "keys", "=", "[", "self", ".", "name", "]", ",", "args", "=", "[", "self", ".", "lock_id", ",", "self", ".", "max_locks", ",", "self", ".", "timeout", ",", "time", ".", "time", "(", ")", "]", ")", "# Convert Lua boolean returns to Python booleans", "acquired", "=", "True", "if", "acquired", "==", "1", "else", "False", "return", "acquired", ",", "locks" ]
Obtain a semaphore lock. Returns: Tuple that contains True/False if the lock was acquired and number of locks in semaphore.
[ "Obtain", "a", "semaphore", "lock", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/redis_semaphore.py#L76-L91
train
231,733
closeio/tasktiger
tasktiger/redis_lock.py
NewStyleLock.renew
def renew(self, new_timeout): """ Sets a new timeout for an already acquired lock. ``new_timeout`` can be specified as an integer or a float, both representing the number of seconds. """ if self.local.token is None: raise LockError("Cannot extend an unlocked lock") if self.timeout is None: raise LockError("Cannot extend a lock with no timeout") return self.do_renew(new_timeout)
python
def renew(self, new_timeout): """ Sets a new timeout for an already acquired lock. ``new_timeout`` can be specified as an integer or a float, both representing the number of seconds. """ if self.local.token is None: raise LockError("Cannot extend an unlocked lock") if self.timeout is None: raise LockError("Cannot extend a lock with no timeout") return self.do_renew(new_timeout)
[ "def", "renew", "(", "self", ",", "new_timeout", ")", ":", "if", "self", ".", "local", ".", "token", "is", "None", ":", "raise", "LockError", "(", "\"Cannot extend an unlocked lock\"", ")", "if", "self", ".", "timeout", "is", "None", ":", "raise", "LockError", "(", "\"Cannot extend a lock with no timeout\"", ")", "return", "self", ".", "do_renew", "(", "new_timeout", ")" ]
Sets a new timeout for an already acquired lock. ``new_timeout`` can be specified as an integer or a float, both representing the number of seconds.
[ "Sets", "a", "new", "timeout", "for", "an", "already", "acquired", "lock", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/redis_lock.py#L122-L133
train
231,734
closeio/tasktiger
tasktiger/__init__.py
TaskTiger.task
def task(self, _fn=None, queue=None, hard_timeout=None, unique=None, lock=None, lock_key=None, retry=None, retry_on=None, retry_method=None, schedule=None, batch=False, max_queue_size=None): """ Function decorator that defines the behavior of the function when it is used as a task. To use the default behavior, tasks don't need to be decorated. See README.rst for an explanation of the options. """ def _delay(func): def _delay_inner(*args, **kwargs): return self.delay(func, args=args, kwargs=kwargs) return _delay_inner # Periodic tasks are unique. if schedule is not None: unique = True def _wrap(func): if hard_timeout is not None: func._task_hard_timeout = hard_timeout if queue is not None: func._task_queue = queue if unique is not None: func._task_unique = unique if lock is not None: func._task_lock = lock if lock_key is not None: func._task_lock_key = lock_key if retry is not None: func._task_retry = retry if retry_on is not None: func._task_retry_on = retry_on if retry_method is not None: func._task_retry_method = retry_method if batch is not None: func._task_batch = batch if schedule is not None: func._task_schedule = schedule if max_queue_size is not None: func._task_max_queue_size = max_queue_size func.delay = _delay(func) if schedule is not None: serialized_func = serialize_func_name(func) assert serialized_func not in self.periodic_task_funcs, \ "attempted duplicate registration of periodic task" self.periodic_task_funcs[serialized_func] = func return func return _wrap if _fn is None else _wrap(_fn)
python
def task(self, _fn=None, queue=None, hard_timeout=None, unique=None, lock=None, lock_key=None, retry=None, retry_on=None, retry_method=None, schedule=None, batch=False, max_queue_size=None): """ Function decorator that defines the behavior of the function when it is used as a task. To use the default behavior, tasks don't need to be decorated. See README.rst for an explanation of the options. """ def _delay(func): def _delay_inner(*args, **kwargs): return self.delay(func, args=args, kwargs=kwargs) return _delay_inner # Periodic tasks are unique. if schedule is not None: unique = True def _wrap(func): if hard_timeout is not None: func._task_hard_timeout = hard_timeout if queue is not None: func._task_queue = queue if unique is not None: func._task_unique = unique if lock is not None: func._task_lock = lock if lock_key is not None: func._task_lock_key = lock_key if retry is not None: func._task_retry = retry if retry_on is not None: func._task_retry_on = retry_on if retry_method is not None: func._task_retry_method = retry_method if batch is not None: func._task_batch = batch if schedule is not None: func._task_schedule = schedule if max_queue_size is not None: func._task_max_queue_size = max_queue_size func.delay = _delay(func) if schedule is not None: serialized_func = serialize_func_name(func) assert serialized_func not in self.periodic_task_funcs, \ "attempted duplicate registration of periodic task" self.periodic_task_funcs[serialized_func] = func return func return _wrap if _fn is None else _wrap(_fn)
[ "def", "task", "(", "self", ",", "_fn", "=", "None", ",", "queue", "=", "None", ",", "hard_timeout", "=", "None", ",", "unique", "=", "None", ",", "lock", "=", "None", ",", "lock_key", "=", "None", ",", "retry", "=", "None", ",", "retry_on", "=", "None", ",", "retry_method", "=", "None", ",", "schedule", "=", "None", ",", "batch", "=", "False", ",", "max_queue_size", "=", "None", ")", ":", "def", "_delay", "(", "func", ")", ":", "def", "_delay_inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "delay", "(", "func", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ")", "return", "_delay_inner", "# Periodic tasks are unique.", "if", "schedule", "is", "not", "None", ":", "unique", "=", "True", "def", "_wrap", "(", "func", ")", ":", "if", "hard_timeout", "is", "not", "None", ":", "func", ".", "_task_hard_timeout", "=", "hard_timeout", "if", "queue", "is", "not", "None", ":", "func", ".", "_task_queue", "=", "queue", "if", "unique", "is", "not", "None", ":", "func", ".", "_task_unique", "=", "unique", "if", "lock", "is", "not", "None", ":", "func", ".", "_task_lock", "=", "lock", "if", "lock_key", "is", "not", "None", ":", "func", ".", "_task_lock_key", "=", "lock_key", "if", "retry", "is", "not", "None", ":", "func", ".", "_task_retry", "=", "retry", "if", "retry_on", "is", "not", "None", ":", "func", ".", "_task_retry_on", "=", "retry_on", "if", "retry_method", "is", "not", "None", ":", "func", ".", "_task_retry_method", "=", "retry_method", "if", "batch", "is", "not", "None", ":", "func", ".", "_task_batch", "=", "batch", "if", "schedule", "is", "not", "None", ":", "func", ".", "_task_schedule", "=", "schedule", "if", "max_queue_size", "is", "not", "None", ":", "func", ".", "_task_max_queue_size", "=", "max_queue_size", "func", ".", "delay", "=", "_delay", "(", "func", ")", "if", "schedule", "is", "not", "None", ":", "serialized_func", "=", "serialize_func_name", "(", "func", ")", "assert", "serialized_func", "not", "in", "self", ".", "periodic_task_funcs", ",", "\"attempted duplicate registration of periodic task\"", "self", ".", "periodic_task_funcs", "[", "serialized_func", "]", "=", "func", "return", "func", "return", "_wrap", "if", "_fn", "is", "None", "else", "_wrap", "(", "_fn", ")" ]
Function decorator that defines the behavior of the function when it is used as a task. To use the default behavior, tasks don't need to be decorated. See README.rst for an explanation of the options.
[ "Function", "decorator", "that", "defines", "the", "behavior", "of", "the", "function", "when", "it", "is", "used", "as", "a", "task", ".", "To", "use", "the", "default", "behavior", "tasks", "don", "t", "need", "to", "be", "decorated", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/__init__.py#L242-L297
train
231,735
closeio/tasktiger
tasktiger/__init__.py
TaskTiger.run_worker
def run_worker(self, queues=None, module=None, exclude_queues=None, max_workers_per_queue=None, store_tracebacks=None): """ Main worker entry point method. The arguments are explained in the module-level run_worker() method's click options. """ try: module_names = module or '' for module_name in module_names.split(','): module_name = module_name.strip() if module_name: importlib.import_module(module_name) self.log.debug('imported module', module_name=module_name) worker = Worker(self, queues.split(',') if queues else None, exclude_queues.split(',') if exclude_queues else None, max_workers_per_queue=max_workers_per_queue, store_tracebacks=store_tracebacks) worker.run() except Exception: self.log.exception('Unhandled exception') raise
python
def run_worker(self, queues=None, module=None, exclude_queues=None, max_workers_per_queue=None, store_tracebacks=None): """ Main worker entry point method. The arguments are explained in the module-level run_worker() method's click options. """ try: module_names = module or '' for module_name in module_names.split(','): module_name = module_name.strip() if module_name: importlib.import_module(module_name) self.log.debug('imported module', module_name=module_name) worker = Worker(self, queues.split(',') if queues else None, exclude_queues.split(',') if exclude_queues else None, max_workers_per_queue=max_workers_per_queue, store_tracebacks=store_tracebacks) worker.run() except Exception: self.log.exception('Unhandled exception') raise
[ "def", "run_worker", "(", "self", ",", "queues", "=", "None", ",", "module", "=", "None", ",", "exclude_queues", "=", "None", ",", "max_workers_per_queue", "=", "None", ",", "store_tracebacks", "=", "None", ")", ":", "try", ":", "module_names", "=", "module", "or", "''", "for", "module_name", "in", "module_names", ".", "split", "(", "','", ")", ":", "module_name", "=", "module_name", ".", "strip", "(", ")", "if", "module_name", ":", "importlib", ".", "import_module", "(", "module_name", ")", "self", ".", "log", ".", "debug", "(", "'imported module'", ",", "module_name", "=", "module_name", ")", "worker", "=", "Worker", "(", "self", ",", "queues", ".", "split", "(", "','", ")", "if", "queues", "else", "None", ",", "exclude_queues", ".", "split", "(", "','", ")", "if", "exclude_queues", "else", "None", ",", "max_workers_per_queue", "=", "max_workers_per_queue", ",", "store_tracebacks", "=", "store_tracebacks", ")", "worker", ".", "run", "(", ")", "except", "Exception", ":", "self", ".", "log", ".", "exception", "(", "'Unhandled exception'", ")", "raise" ]
Main worker entry point method. The arguments are explained in the module-level run_worker() method's click options.
[ "Main", "worker", "entry", "point", "method", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/__init__.py#L306-L331
train
231,736
closeio/tasktiger
tasktiger/__init__.py
TaskTiger.delay
def delay(self, func, args=None, kwargs=None, queue=None, hard_timeout=None, unique=None, lock=None, lock_key=None, when=None, retry=None, retry_on=None, retry_method=None, max_queue_size=None): """ Queues a task. See README.rst for an explanation of the options. """ task = Task(self, func, args=args, kwargs=kwargs, queue=queue, hard_timeout=hard_timeout, unique=unique, lock=lock, lock_key=lock_key, retry=retry, retry_on=retry_on, retry_method=retry_method) task.delay(when=when, max_queue_size=max_queue_size) return task
python
def delay(self, func, args=None, kwargs=None, queue=None, hard_timeout=None, unique=None, lock=None, lock_key=None, when=None, retry=None, retry_on=None, retry_method=None, max_queue_size=None): """ Queues a task. See README.rst for an explanation of the options. """ task = Task(self, func, args=args, kwargs=kwargs, queue=queue, hard_timeout=hard_timeout, unique=unique, lock=lock, lock_key=lock_key, retry=retry, retry_on=retry_on, retry_method=retry_method) task.delay(when=when, max_queue_size=max_queue_size) return task
[ "def", "delay", "(", "self", ",", "func", ",", "args", "=", "None", ",", "kwargs", "=", "None", ",", "queue", "=", "None", ",", "hard_timeout", "=", "None", ",", "unique", "=", "None", ",", "lock", "=", "None", ",", "lock_key", "=", "None", ",", "when", "=", "None", ",", "retry", "=", "None", ",", "retry_on", "=", "None", ",", "retry_method", "=", "None", ",", "max_queue_size", "=", "None", ")", ":", "task", "=", "Task", "(", "self", ",", "func", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ",", "queue", "=", "queue", ",", "hard_timeout", "=", "hard_timeout", ",", "unique", "=", "unique", ",", "lock", "=", "lock", ",", "lock_key", "=", "lock_key", ",", "retry", "=", "retry", ",", "retry_on", "=", "retry_on", ",", "retry_method", "=", "retry_method", ")", "task", ".", "delay", "(", "when", "=", "when", ",", "max_queue_size", "=", "max_queue_size", ")", "return", "task" ]
Queues a task. See README.rst for an explanation of the options.
[ "Queues", "a", "task", ".", "See", "README", ".", "rst", "for", "an", "explanation", "of", "the", "options", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/__init__.py#L333-L348
train
231,737
closeio/tasktiger
tasktiger/__init__.py
TaskTiger.get_queue_sizes
def get_queue_sizes(self, queue): """ Get the queue's number of tasks in each state. Returns dict with queue size for the QUEUED, SCHEDULED, and ACTIVE states. Does not include size of error queue. """ states = [QUEUED, SCHEDULED, ACTIVE] pipeline = self.connection.pipeline() for state in states: pipeline.zcard(self._key(state, queue)) results = pipeline.execute() return dict(zip(states, results))
python
def get_queue_sizes(self, queue): """ Get the queue's number of tasks in each state. Returns dict with queue size for the QUEUED, SCHEDULED, and ACTIVE states. Does not include size of error queue. """ states = [QUEUED, SCHEDULED, ACTIVE] pipeline = self.connection.pipeline() for state in states: pipeline.zcard(self._key(state, queue)) results = pipeline.execute() return dict(zip(states, results))
[ "def", "get_queue_sizes", "(", "self", ",", "queue", ")", ":", "states", "=", "[", "QUEUED", ",", "SCHEDULED", ",", "ACTIVE", "]", "pipeline", "=", "self", ".", "connection", ".", "pipeline", "(", ")", "for", "state", "in", "states", ":", "pipeline", ".", "zcard", "(", "self", ".", "_key", "(", "state", ",", "queue", ")", ")", "results", "=", "pipeline", ".", "execute", "(", ")", "return", "dict", "(", "zip", "(", "states", ",", "results", ")", ")" ]
Get the queue's number of tasks in each state. Returns dict with queue size for the QUEUED, SCHEDULED, and ACTIVE states. Does not include size of error queue.
[ "Get", "the", "queue", "s", "number", "of", "tasks", "in", "each", "state", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/__init__.py#L350-L363
train
231,738
closeio/tasktiger
tasktiger/__init__.py
TaskTiger.get_queue_system_lock
def get_queue_system_lock(self, queue): """ Get system lock timeout Returns time system lock expires or None if lock does not exist """ key = self._key(LOCK_REDIS_KEY, queue) return Semaphore.get_system_lock(self.connection, key)
python
def get_queue_system_lock(self, queue): """ Get system lock timeout Returns time system lock expires or None if lock does not exist """ key = self._key(LOCK_REDIS_KEY, queue) return Semaphore.get_system_lock(self.connection, key)
[ "def", "get_queue_system_lock", "(", "self", ",", "queue", ")", ":", "key", "=", "self", ".", "_key", "(", "LOCK_REDIS_KEY", ",", "queue", ")", "return", "Semaphore", ".", "get_system_lock", "(", "self", ".", "connection", ",", "key", ")" ]
Get system lock timeout Returns time system lock expires or None if lock does not exist
[ "Get", "system", "lock", "timeout" ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/__init__.py#L370-L378
train
231,739
closeio/tasktiger
tasktiger/__init__.py
TaskTiger.set_queue_system_lock
def set_queue_system_lock(self, queue, timeout): """ Set system lock on a queue. Max workers for this queue must be used for this to have any effect. This will keep workers from processing tasks for this queue until the timeout has expired. Active tasks will continue processing their current task. timeout is number of seconds to hold the lock """ key = self._key(LOCK_REDIS_KEY, queue) Semaphore.set_system_lock(self.connection, key, timeout)
python
def set_queue_system_lock(self, queue, timeout): """ Set system lock on a queue. Max workers for this queue must be used for this to have any effect. This will keep workers from processing tasks for this queue until the timeout has expired. Active tasks will continue processing their current task. timeout is number of seconds to hold the lock """ key = self._key(LOCK_REDIS_KEY, queue) Semaphore.set_system_lock(self.connection, key, timeout)
[ "def", "set_queue_system_lock", "(", "self", ",", "queue", ",", "timeout", ")", ":", "key", "=", "self", ".", "_key", "(", "LOCK_REDIS_KEY", ",", "queue", ")", "Semaphore", ".", "set_system_lock", "(", "self", ".", "connection", ",", "key", ",", "timeout", ")" ]
Set system lock on a queue. Max workers for this queue must be used for this to have any effect. This will keep workers from processing tasks for this queue until the timeout has expired. Active tasks will continue processing their current task. timeout is number of seconds to hold the lock
[ "Set", "system", "lock", "on", "a", "queue", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/__init__.py#L380-L394
train
231,740
closeio/tasktiger
tasktiger/worker.py
Worker._install_signal_handlers
def _install_signal_handlers(self): """ Sets up signal handlers for safely stopping the worker. """ def request_stop(signum, frame): self._stop_requested = True self.log.info('stop requested, waiting for task to finish') signal.signal(signal.SIGINT, request_stop) signal.signal(signal.SIGTERM, request_stop)
python
def _install_signal_handlers(self): """ Sets up signal handlers for safely stopping the worker. """ def request_stop(signum, frame): self._stop_requested = True self.log.info('stop requested, waiting for task to finish') signal.signal(signal.SIGINT, request_stop) signal.signal(signal.SIGTERM, request_stop)
[ "def", "_install_signal_handlers", "(", "self", ")", ":", "def", "request_stop", "(", "signum", ",", "frame", ")", ":", "self", ".", "_stop_requested", "=", "True", "self", ".", "log", ".", "info", "(", "'stop requested, waiting for task to finish'", ")", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "request_stop", ")", "signal", ".", "signal", "(", "signal", ".", "SIGTERM", ",", "request_stop", ")" ]
Sets up signal handlers for safely stopping the worker.
[ "Sets", "up", "signal", "handlers", "for", "safely", "stopping", "the", "worker", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L105-L113
train
231,741
closeio/tasktiger
tasktiger/worker.py
Worker._uninstall_signal_handlers
def _uninstall_signal_handlers(self): """ Restores default signal handlers. """ signal.signal(signal.SIGINT, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL)
python
def _uninstall_signal_handlers(self): """ Restores default signal handlers. """ signal.signal(signal.SIGINT, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL)
[ "def", "_uninstall_signal_handlers", "(", "self", ")", ":", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "signal", ".", "SIG_DFL", ")", "signal", ".", "signal", "(", "signal", ".", "SIGTERM", ",", "signal", ".", "SIG_DFL", ")" ]
Restores default signal handlers.
[ "Restores", "default", "signal", "handlers", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L115-L120
train
231,742
closeio/tasktiger
tasktiger/worker.py
Worker._filter_queues
def _filter_queues(self, queues): """ Applies the queue filter to the given list of queues and returns the queues that match. Note that a queue name matches any subqueues starting with the name, followed by a date. For example, "foo" will match both "foo" and "foo.bar". """ def match(queue): """ Returns whether the given queue should be included by checking each part of the queue name. """ for part in reversed_dotted_parts(queue): if part in self.exclude_queues: return False if part in self.only_queues: return True return not self.only_queues return [q for q in queues if match(q)]
python
def _filter_queues(self, queues): """ Applies the queue filter to the given list of queues and returns the queues that match. Note that a queue name matches any subqueues starting with the name, followed by a date. For example, "foo" will match both "foo" and "foo.bar". """ def match(queue): """ Returns whether the given queue should be included by checking each part of the queue name. """ for part in reversed_dotted_parts(queue): if part in self.exclude_queues: return False if part in self.only_queues: return True return not self.only_queues return [q for q in queues if match(q)]
[ "def", "_filter_queues", "(", "self", ",", "queues", ")", ":", "def", "match", "(", "queue", ")", ":", "\"\"\"\n Returns whether the given queue should be included by checking each\n part of the queue name.\n \"\"\"", "for", "part", "in", "reversed_dotted_parts", "(", "queue", ")", ":", "if", "part", "in", "self", ".", "exclude_queues", ":", "return", "False", "if", "part", "in", "self", ".", "only_queues", ":", "return", "True", "return", "not", "self", ".", "only_queues", "return", "[", "q", "for", "q", "in", "queues", "if", "match", "(", "q", ")", "]" ]
Applies the queue filter to the given list of queues and returns the queues that match. Note that a queue name matches any subqueues starting with the name, followed by a date. For example, "foo" will match both "foo" and "foo.bar".
[ "Applies", "the", "queue", "filter", "to", "the", "given", "list", "of", "queues", "and", "returns", "the", "queues", "that", "match", ".", "Note", "that", "a", "queue", "name", "matches", "any", "subqueues", "starting", "with", "the", "name", "followed", "by", "a", "date", ".", "For", "example", "foo", "will", "match", "both", "foo", "and", "foo", ".", "bar", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L122-L142
train
231,743
closeio/tasktiger
tasktiger/worker.py
Worker._worker_queue_scheduled_tasks
def _worker_queue_scheduled_tasks(self): """ Helper method that takes due tasks from the SCHEDULED queue and puts them in the QUEUED queue for execution. This should be called periodically. """ queues = set(self._filter_queues(self.connection.smembers( self._key(SCHEDULED)))) now = time.time() for queue in queues: # Move due items from the SCHEDULED queue to the QUEUED queue. If # items were moved, remove the queue from the scheduled set if it # is empty, and add it to the queued set so the task gets picked # up. If any unique tasks are already queued, don't update their # queue time (because the new queue time would be later). result = self.scripts.zpoppush( self._key(SCHEDULED, queue), self._key(QUEUED, queue), self.config['SCHEDULED_TASK_BATCH_SIZE'], now, now, if_exists=('noupdate',), on_success=('update_sets', queue, self._key(SCHEDULED), self._key(QUEUED)), ) self.log.debug('scheduled tasks', queue=queue, qty=len(result)) # XXX: ideally this would be in the same pipeline, but we only want # to announce if there was a result. if result: self.connection.publish(self._key('activity'), queue) self._did_work = True
python
def _worker_queue_scheduled_tasks(self): """ Helper method that takes due tasks from the SCHEDULED queue and puts them in the QUEUED queue for execution. This should be called periodically. """ queues = set(self._filter_queues(self.connection.smembers( self._key(SCHEDULED)))) now = time.time() for queue in queues: # Move due items from the SCHEDULED queue to the QUEUED queue. If # items were moved, remove the queue from the scheduled set if it # is empty, and add it to the queued set so the task gets picked # up. If any unique tasks are already queued, don't update their # queue time (because the new queue time would be later). result = self.scripts.zpoppush( self._key(SCHEDULED, queue), self._key(QUEUED, queue), self.config['SCHEDULED_TASK_BATCH_SIZE'], now, now, if_exists=('noupdate',), on_success=('update_sets', queue, self._key(SCHEDULED), self._key(QUEUED)), ) self.log.debug('scheduled tasks', queue=queue, qty=len(result)) # XXX: ideally this would be in the same pipeline, but we only want # to announce if there was a result. if result: self.connection.publish(self._key('activity'), queue) self._did_work = True
[ "def", "_worker_queue_scheduled_tasks", "(", "self", ")", ":", "queues", "=", "set", "(", "self", ".", "_filter_queues", "(", "self", ".", "connection", ".", "smembers", "(", "self", ".", "_key", "(", "SCHEDULED", ")", ")", ")", ")", "now", "=", "time", ".", "time", "(", ")", "for", "queue", "in", "queues", ":", "# Move due items from the SCHEDULED queue to the QUEUED queue. If", "# items were moved, remove the queue from the scheduled set if it", "# is empty, and add it to the queued set so the task gets picked", "# up. If any unique tasks are already queued, don't update their", "# queue time (because the new queue time would be later).", "result", "=", "self", ".", "scripts", ".", "zpoppush", "(", "self", ".", "_key", "(", "SCHEDULED", ",", "queue", ")", ",", "self", ".", "_key", "(", "QUEUED", ",", "queue", ")", ",", "self", ".", "config", "[", "'SCHEDULED_TASK_BATCH_SIZE'", "]", ",", "now", ",", "now", ",", "if_exists", "=", "(", "'noupdate'", ",", ")", ",", "on_success", "=", "(", "'update_sets'", ",", "queue", ",", "self", ".", "_key", "(", "SCHEDULED", ")", ",", "self", ".", "_key", "(", "QUEUED", ")", ")", ",", ")", "self", ".", "log", ".", "debug", "(", "'scheduled tasks'", ",", "queue", "=", "queue", ",", "qty", "=", "len", "(", "result", ")", ")", "# XXX: ideally this would be in the same pipeline, but we only want", "# to announce if there was a result.", "if", "result", ":", "self", ".", "connection", ".", "publish", "(", "self", ".", "_key", "(", "'activity'", ")", ",", "queue", ")", "self", ".", "_did_work", "=", "True" ]
Helper method that takes due tasks from the SCHEDULED queue and puts them in the QUEUED queue for execution. This should be called periodically.
[ "Helper", "method", "that", "takes", "due", "tasks", "from", "the", "SCHEDULED", "queue", "and", "puts", "them", "in", "the", "QUEUED", "queue", "for", "execution", ".", "This", "should", "be", "called", "periodically", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L144-L175
train
231,744
closeio/tasktiger
tasktiger/worker.py
Worker._wait_for_new_tasks
def _wait_for_new_tasks(self, timeout=0, batch_timeout=0): """ Check activity channel and wait as necessary. This method is also used to slow down the main processing loop to reduce the effects of rapidly sending Redis commands. This method will exit for any of these conditions: 1. _did_work is True, suggests there could be more work pending 2. Found new queue and after batch timeout. Note batch timeout can be zero so it will exit immediately. 3. Timeout seconds have passed, this is the maximum time to stay in this method """ new_queue_found = False start_time = batch_exit = time.time() while True: # Check to see if batch_exit has been updated if batch_exit > start_time: pubsub_sleep = batch_exit - time.time() else: pubsub_sleep = start_time + timeout - time.time() message = self._pubsub.get_message(timeout=0 if pubsub_sleep < 0 or self._did_work else pubsub_sleep) # Pull remaining messages off of channel while message: if message['type'] == 'message': new_queue_found, batch_exit = self._process_queue_message( message['data'], new_queue_found, batch_exit, start_time, timeout, batch_timeout ) message = self._pubsub.get_message() if self._did_work: break # Exit immediately if we did work during the last # execution loop because there might be more work to do elif time.time() >= batch_exit and new_queue_found: break # After finding a new queue we can wait until the # batch timeout expires elif time.time() - start_time > timeout: break
python
def _wait_for_new_tasks(self, timeout=0, batch_timeout=0): """ Check activity channel and wait as necessary. This method is also used to slow down the main processing loop to reduce the effects of rapidly sending Redis commands. This method will exit for any of these conditions: 1. _did_work is True, suggests there could be more work pending 2. Found new queue and after batch timeout. Note batch timeout can be zero so it will exit immediately. 3. Timeout seconds have passed, this is the maximum time to stay in this method """ new_queue_found = False start_time = batch_exit = time.time() while True: # Check to see if batch_exit has been updated if batch_exit > start_time: pubsub_sleep = batch_exit - time.time() else: pubsub_sleep = start_time + timeout - time.time() message = self._pubsub.get_message(timeout=0 if pubsub_sleep < 0 or self._did_work else pubsub_sleep) # Pull remaining messages off of channel while message: if message['type'] == 'message': new_queue_found, batch_exit = self._process_queue_message( message['data'], new_queue_found, batch_exit, start_time, timeout, batch_timeout ) message = self._pubsub.get_message() if self._did_work: break # Exit immediately if we did work during the last # execution loop because there might be more work to do elif time.time() >= batch_exit and new_queue_found: break # After finding a new queue we can wait until the # batch timeout expires elif time.time() - start_time > timeout: break
[ "def", "_wait_for_new_tasks", "(", "self", ",", "timeout", "=", "0", ",", "batch_timeout", "=", "0", ")", ":", "new_queue_found", "=", "False", "start_time", "=", "batch_exit", "=", "time", ".", "time", "(", ")", "while", "True", ":", "# Check to see if batch_exit has been updated", "if", "batch_exit", ">", "start_time", ":", "pubsub_sleep", "=", "batch_exit", "-", "time", ".", "time", "(", ")", "else", ":", "pubsub_sleep", "=", "start_time", "+", "timeout", "-", "time", ".", "time", "(", ")", "message", "=", "self", ".", "_pubsub", ".", "get_message", "(", "timeout", "=", "0", "if", "pubsub_sleep", "<", "0", "or", "self", ".", "_did_work", "else", "pubsub_sleep", ")", "# Pull remaining messages off of channel", "while", "message", ":", "if", "message", "[", "'type'", "]", "==", "'message'", ":", "new_queue_found", ",", "batch_exit", "=", "self", ".", "_process_queue_message", "(", "message", "[", "'data'", "]", ",", "new_queue_found", ",", "batch_exit", ",", "start_time", ",", "timeout", ",", "batch_timeout", ")", "message", "=", "self", ".", "_pubsub", ".", "get_message", "(", ")", "if", "self", ".", "_did_work", ":", "break", "# Exit immediately if we did work during the last", "# execution loop because there might be more work to do", "elif", "time", ".", "time", "(", ")", ">=", "batch_exit", "and", "new_queue_found", ":", "break", "# After finding a new queue we can wait until the", "# batch timeout expires", "elif", "time", ".", "time", "(", ")", "-", "start_time", ">", "timeout", ":", "break" ]
Check activity channel and wait as necessary. This method is also used to slow down the main processing loop to reduce the effects of rapidly sending Redis commands. This method will exit for any of these conditions: 1. _did_work is True, suggests there could be more work pending 2. Found new queue and after batch timeout. Note batch timeout can be zero so it will exit immediately. 3. Timeout seconds have passed, this is the maximum time to stay in this method
[ "Check", "activity", "channel", "and", "wait", "as", "necessary", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L177-L220
train
231,745
closeio/tasktiger
tasktiger/worker.py
Worker._execute_forked
def _execute_forked(self, tasks, log): """ Executes the tasks in the forked process. Multiple tasks can be passed for batch processing. However, they must all use the same function and will share the execution entry. """ success = False execution = {} assert len(tasks) task_func = tasks[0].serialized_func assert all([task_func == task.serialized_func for task in tasks[1:]]) execution['time_started'] = time.time() exc = None exc_info = None try: func = tasks[0].func is_batch_func = getattr(func, '_task_batch', False) g['current_task_is_batch'] = is_batch_func if is_batch_func: # Batch process if the task supports it. params = [{ 'args': task.args, 'kwargs': task.kwargs, } for task in tasks] task_timeouts = [task.hard_timeout for task in tasks if task.hard_timeout is not None] hard_timeout = ((max(task_timeouts) if task_timeouts else None) or getattr(func, '_task_hard_timeout', None) or self.config['DEFAULT_HARD_TIMEOUT']) g['current_tasks'] = tasks with UnixSignalDeathPenalty(hard_timeout): func(params) else: # Process sequentially. for task in tasks: hard_timeout = (task.hard_timeout or getattr(func, '_task_hard_timeout', None) or self.config['DEFAULT_HARD_TIMEOUT']) g['current_tasks'] = [task] with UnixSignalDeathPenalty(hard_timeout): func(*task.args, **task.kwargs) except RetryException as exc: execution['retry'] = True if exc.method: execution['retry_method'] = serialize_retry_method(exc.method) execution['log_error'] = exc.log_error execution['exception_name'] = serialize_func_name(exc.__class__) exc_info = exc.exc_info or sys.exc_info() except (JobTimeoutException, Exception) as exc: execution['exception_name'] = serialize_func_name(exc.__class__) exc_info = sys.exc_info() else: success = True if not success: execution['time_failed'] = time.time() if self.store_tracebacks: # Currently we only log failed task executions to Redis. execution['traceback'] = \ ''.join(traceback.format_exception(*exc_info)) execution['success'] = success execution['host'] = socket.gethostname() serialized_execution = json.dumps(execution) for task in tasks: self.connection.rpush(self._key('task', task.id, 'executions'), serialized_execution) return success
python
def _execute_forked(self, tasks, log): """ Executes the tasks in the forked process. Multiple tasks can be passed for batch processing. However, they must all use the same function and will share the execution entry. """ success = False execution = {} assert len(tasks) task_func = tasks[0].serialized_func assert all([task_func == task.serialized_func for task in tasks[1:]]) execution['time_started'] = time.time() exc = None exc_info = None try: func = tasks[0].func is_batch_func = getattr(func, '_task_batch', False) g['current_task_is_batch'] = is_batch_func if is_batch_func: # Batch process if the task supports it. params = [{ 'args': task.args, 'kwargs': task.kwargs, } for task in tasks] task_timeouts = [task.hard_timeout for task in tasks if task.hard_timeout is not None] hard_timeout = ((max(task_timeouts) if task_timeouts else None) or getattr(func, '_task_hard_timeout', None) or self.config['DEFAULT_HARD_TIMEOUT']) g['current_tasks'] = tasks with UnixSignalDeathPenalty(hard_timeout): func(params) else: # Process sequentially. for task in tasks: hard_timeout = (task.hard_timeout or getattr(func, '_task_hard_timeout', None) or self.config['DEFAULT_HARD_TIMEOUT']) g['current_tasks'] = [task] with UnixSignalDeathPenalty(hard_timeout): func(*task.args, **task.kwargs) except RetryException as exc: execution['retry'] = True if exc.method: execution['retry_method'] = serialize_retry_method(exc.method) execution['log_error'] = exc.log_error execution['exception_name'] = serialize_func_name(exc.__class__) exc_info = exc.exc_info or sys.exc_info() except (JobTimeoutException, Exception) as exc: execution['exception_name'] = serialize_func_name(exc.__class__) exc_info = sys.exc_info() else: success = True if not success: execution['time_failed'] = time.time() if self.store_tracebacks: # Currently we only log failed task executions to Redis. execution['traceback'] = \ ''.join(traceback.format_exception(*exc_info)) execution['success'] = success execution['host'] = socket.gethostname() serialized_execution = json.dumps(execution) for task in tasks: self.connection.rpush(self._key('task', task.id, 'executions'), serialized_execution) return success
[ "def", "_execute_forked", "(", "self", ",", "tasks", ",", "log", ")", ":", "success", "=", "False", "execution", "=", "{", "}", "assert", "len", "(", "tasks", ")", "task_func", "=", "tasks", "[", "0", "]", ".", "serialized_func", "assert", "all", "(", "[", "task_func", "==", "task", ".", "serialized_func", "for", "task", "in", "tasks", "[", "1", ":", "]", "]", ")", "execution", "[", "'time_started'", "]", "=", "time", ".", "time", "(", ")", "exc", "=", "None", "exc_info", "=", "None", "try", ":", "func", "=", "tasks", "[", "0", "]", ".", "func", "is_batch_func", "=", "getattr", "(", "func", ",", "'_task_batch'", ",", "False", ")", "g", "[", "'current_task_is_batch'", "]", "=", "is_batch_func", "if", "is_batch_func", ":", "# Batch process if the task supports it.", "params", "=", "[", "{", "'args'", ":", "task", ".", "args", ",", "'kwargs'", ":", "task", ".", "kwargs", ",", "}", "for", "task", "in", "tasks", "]", "task_timeouts", "=", "[", "task", ".", "hard_timeout", "for", "task", "in", "tasks", "if", "task", ".", "hard_timeout", "is", "not", "None", "]", "hard_timeout", "=", "(", "(", "max", "(", "task_timeouts", ")", "if", "task_timeouts", "else", "None", ")", "or", "getattr", "(", "func", ",", "'_task_hard_timeout'", ",", "None", ")", "or", "self", ".", "config", "[", "'DEFAULT_HARD_TIMEOUT'", "]", ")", "g", "[", "'current_tasks'", "]", "=", "tasks", "with", "UnixSignalDeathPenalty", "(", "hard_timeout", ")", ":", "func", "(", "params", ")", "else", ":", "# Process sequentially.", "for", "task", "in", "tasks", ":", "hard_timeout", "=", "(", "task", ".", "hard_timeout", "or", "getattr", "(", "func", ",", "'_task_hard_timeout'", ",", "None", ")", "or", "self", ".", "config", "[", "'DEFAULT_HARD_TIMEOUT'", "]", ")", "g", "[", "'current_tasks'", "]", "=", "[", "task", "]", "with", "UnixSignalDeathPenalty", "(", "hard_timeout", ")", ":", "func", "(", "*", "task", ".", "args", ",", "*", "*", "task", ".", "kwargs", ")", "except", "RetryException", "as", "exc", ":", "execution", "[", "'retry'", "]", "=", "True", "if", "exc", ".", "method", ":", "execution", "[", "'retry_method'", "]", "=", "serialize_retry_method", "(", "exc", ".", "method", ")", "execution", "[", "'log_error'", "]", "=", "exc", ".", "log_error", "execution", "[", "'exception_name'", "]", "=", "serialize_func_name", "(", "exc", ".", "__class__", ")", "exc_info", "=", "exc", ".", "exc_info", "or", "sys", ".", "exc_info", "(", ")", "except", "(", "JobTimeoutException", ",", "Exception", ")", "as", "exc", ":", "execution", "[", "'exception_name'", "]", "=", "serialize_func_name", "(", "exc", ".", "__class__", ")", "exc_info", "=", "sys", ".", "exc_info", "(", ")", "else", ":", "success", "=", "True", "if", "not", "success", ":", "execution", "[", "'time_failed'", "]", "=", "time", ".", "time", "(", ")", "if", "self", ".", "store_tracebacks", ":", "# Currently we only log failed task executions to Redis.", "execution", "[", "'traceback'", "]", "=", "''", ".", "join", "(", "traceback", ".", "format_exception", "(", "*", "exc_info", ")", ")", "execution", "[", "'success'", "]", "=", "success", "execution", "[", "'host'", "]", "=", "socket", ".", "gethostname", "(", ")", "serialized_execution", "=", "json", ".", "dumps", "(", "execution", ")", "for", "task", "in", "tasks", ":", "self", ".", "connection", ".", "rpush", "(", "self", ".", "_key", "(", "'task'", ",", "task", ".", "id", ",", "'executions'", ")", ",", "serialized_execution", ")", "return", "success" ]
Executes the tasks in the forked process. Multiple tasks can be passed for batch processing. However, they must all use the same function and will share the execution entry.
[ "Executes", "the", "tasks", "in", "the", "forked", "process", ".", "Multiple", "tasks", "can", "be", "passed", "for", "batch", "processing", ".", "However", "they", "must", "all", "use", "the", "same", "function", "and", "will", "share", "the", "execution", "entry", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L292-L370
train
231,746
closeio/tasktiger
tasktiger/worker.py
Worker._get_queue_batch_size
def _get_queue_batch_size(self, queue): """Get queue batch size.""" # Fetch one item unless this is a batch queue. # XXX: It would be more efficient to loop in reverse order and break. batch_queues = self.config['BATCH_QUEUES'] batch_size = 1 for part in dotted_parts(queue): if part in batch_queues: batch_size = batch_queues[part] return batch_size
python
def _get_queue_batch_size(self, queue): """Get queue batch size.""" # Fetch one item unless this is a batch queue. # XXX: It would be more efficient to loop in reverse order and break. batch_queues = self.config['BATCH_QUEUES'] batch_size = 1 for part in dotted_parts(queue): if part in batch_queues: batch_size = batch_queues[part] return batch_size
[ "def", "_get_queue_batch_size", "(", "self", ",", "queue", ")", ":", "# Fetch one item unless this is a batch queue.", "# XXX: It would be more efficient to loop in reverse order and break.", "batch_queues", "=", "self", ".", "config", "[", "'BATCH_QUEUES'", "]", "batch_size", "=", "1", "for", "part", "in", "dotted_parts", "(", "queue", ")", ":", "if", "part", "in", "batch_queues", ":", "batch_size", "=", "batch_queues", "[", "part", "]", "return", "batch_size" ]
Get queue batch size.
[ "Get", "queue", "batch", "size", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L372-L383
train
231,747
closeio/tasktiger
tasktiger/worker.py
Worker._get_queue_lock
def _get_queue_lock(self, queue, log): """Get queue lock for max worker queues. For max worker queues it returns a Lock if acquired and whether it failed to acquire the lock. """ max_workers = self.max_workers_per_queue # Check if this is single worker queue for part in dotted_parts(queue): if part in self.single_worker_queues: log.debug('single worker queue') max_workers = 1 break # Max worker queues require us to get a queue lock before # moving tasks if max_workers: queue_lock = Semaphore(self.connection, self._key(LOCK_REDIS_KEY, queue), self.id, max_locks=max_workers, timeout=self.config['ACTIVE_TASK_UPDATE_TIMEOUT']) acquired, locks = queue_lock.acquire() if not acquired: return None, True log.debug('acquired queue lock', locks=locks) else: queue_lock = None return queue_lock, False
python
def _get_queue_lock(self, queue, log): """Get queue lock for max worker queues. For max worker queues it returns a Lock if acquired and whether it failed to acquire the lock. """ max_workers = self.max_workers_per_queue # Check if this is single worker queue for part in dotted_parts(queue): if part in self.single_worker_queues: log.debug('single worker queue') max_workers = 1 break # Max worker queues require us to get a queue lock before # moving tasks if max_workers: queue_lock = Semaphore(self.connection, self._key(LOCK_REDIS_KEY, queue), self.id, max_locks=max_workers, timeout=self.config['ACTIVE_TASK_UPDATE_TIMEOUT']) acquired, locks = queue_lock.acquire() if not acquired: return None, True log.debug('acquired queue lock', locks=locks) else: queue_lock = None return queue_lock, False
[ "def", "_get_queue_lock", "(", "self", ",", "queue", ",", "log", ")", ":", "max_workers", "=", "self", ".", "max_workers_per_queue", "# Check if this is single worker queue", "for", "part", "in", "dotted_parts", "(", "queue", ")", ":", "if", "part", "in", "self", ".", "single_worker_queues", ":", "log", ".", "debug", "(", "'single worker queue'", ")", "max_workers", "=", "1", "break", "# Max worker queues require us to get a queue lock before", "# moving tasks", "if", "max_workers", ":", "queue_lock", "=", "Semaphore", "(", "self", ".", "connection", ",", "self", ".", "_key", "(", "LOCK_REDIS_KEY", ",", "queue", ")", ",", "self", ".", "id", ",", "max_locks", "=", "max_workers", ",", "timeout", "=", "self", ".", "config", "[", "'ACTIVE_TASK_UPDATE_TIMEOUT'", "]", ")", "acquired", ",", "locks", "=", "queue_lock", ".", "acquire", "(", ")", "if", "not", "acquired", ":", "return", "None", ",", "True", "log", ".", "debug", "(", "'acquired queue lock'", ",", "locks", "=", "locks", ")", "else", ":", "queue_lock", "=", "None", "return", "queue_lock", ",", "False" ]
Get queue lock for max worker queues. For max worker queues it returns a Lock if acquired and whether it failed to acquire the lock.
[ "Get", "queue", "lock", "for", "max", "worker", "queues", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L385-L414
train
231,748
closeio/tasktiger
tasktiger/worker.py
Worker._heartbeat
def _heartbeat(self, queue, task_ids): """ Updates the heartbeat for the given task IDs to prevent them from timing out and being requeued. """ now = time.time() self.connection.zadd(self._key(ACTIVE, queue), **{task_id: now for task_id in task_ids})
python
def _heartbeat(self, queue, task_ids): """ Updates the heartbeat for the given task IDs to prevent them from timing out and being requeued. """ now = time.time() self.connection.zadd(self._key(ACTIVE, queue), **{task_id: now for task_id in task_ids})
[ "def", "_heartbeat", "(", "self", ",", "queue", ",", "task_ids", ")", ":", "now", "=", "time", ".", "time", "(", ")", "self", ".", "connection", ".", "zadd", "(", "self", ".", "_key", "(", "ACTIVE", ",", "queue", ")", ",", "*", "*", "{", "task_id", ":", "now", "for", "task_id", "in", "task_ids", "}", ")" ]
Updates the heartbeat for the given task IDs to prevent them from timing out and being requeued.
[ "Updates", "the", "heartbeat", "for", "the", "given", "task", "IDs", "to", "prevent", "them", "from", "timing", "out", "and", "being", "requeued", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L416-L423
train
231,749
closeio/tasktiger
tasktiger/worker.py
Worker._execute
def _execute(self, queue, tasks, log, locks, queue_lock, all_task_ids): """ Executes the given tasks. Returns a boolean indicating whether the tasks were executed successfully. """ # The tasks must use the same function. assert len(tasks) task_func = tasks[0].serialized_func assert all([task_func == task.serialized_func for task in tasks[1:]]) # Before executing periodic tasks, queue them for the next period. if task_func in self.tiger.periodic_task_funcs: tasks[0]._queue_for_next_period() with g_fork_lock: child_pid = os.fork() if child_pid == 0: # Child process log = log.bind(child_pid=os.getpid()) # Disconnect the Redis connection inherited from the main process. # Note that this doesn't disconnect the socket in the main process. self.connection.connection_pool.disconnect() random.seed() # Ignore Ctrl+C in the child so we don't abort the job -- the main # process already takes care of a graceful shutdown. signal.signal(signal.SIGINT, signal.SIG_IGN) with WorkerContextManagerStack(self.config['CHILD_CONTEXT_MANAGERS']): success = self._execute_forked(tasks, log) # Wait for any threads that might be running in the child, just # like sys.exit() would. Note we don't call sys.exit() directly # because it would perform additional cleanup (e.g. calling atexit # handlers twice). See also: https://bugs.python.org/issue18966 threading._shutdown() os._exit(int(not success)) else: # Main process log = log.bind(child_pid=child_pid) for task in tasks: log.info('processing', func=task_func, task_id=task.id, params={'args': task.args, 'kwargs': task.kwargs}) # Attach a signal handler to SIGCHLD (sent when the child process # exits) so we can capture it. signal.signal(signal.SIGCHLD, sigchld_handler) # Since newer Python versions retry interrupted system calls we can't # rely on the fact that select() is interrupted with EINTR. Instead, # we'll set up a wake-up file descriptor below. # Create a new pipe and apply the non-blocking flag (required for # set_wakeup_fd). pipe_r, pipe_w = os.pipe() flags = fcntl.fcntl(pipe_w, fcntl.F_GETFL, 0) flags = flags | os.O_NONBLOCK fcntl.fcntl(pipe_w, fcntl.F_SETFL, flags) # A byte will be written to pipe_w if a signal occurs (and can be # read from pipe_r). old_wakeup_fd = signal.set_wakeup_fd(pipe_w) def check_child_exit(): """ Do a non-blocking check to see if the child process exited. Returns None if the process is still running, or the exit code value of the child process. """ try: pid, return_code = os.waitpid(child_pid, os.WNOHANG) if pid != 0: # The child process is done. return return_code except OSError as e: # Of course EINTR can happen if the child process exits # while we're checking whether it exited. In this case it # should be safe to retry. if e.errno == errno.EINTR: return check_child_exit() else: raise # Wait for the child to exit and perform a periodic heartbeat. # We check for the child twice in this loop so that we avoid # unnecessary waiting if the child exited just before entering # the while loop or while renewing heartbeat/locks. while True: return_code = check_child_exit() if return_code is not None: break # Wait until the timeout or a signal / child exit occurs. try: select.select([pipe_r], [], [], self.config['ACTIVE_TASK_UPDATE_TIMER']) except select.error as e: if e.args[0] != errno.EINTR: raise return_code = check_child_exit() if return_code is not None: break try: self._heartbeat(queue, all_task_ids) for lock in locks: lock.renew(self.config['ACTIVE_TASK_UPDATE_TIMEOUT']) if queue_lock: acquired, current_locks = queue_lock.renew() if not acquired: log.debug('queue lock renew failure') except OSError as e: # EINTR happens if the task completed. Since we're just # renewing locks/heartbeat it's okay if we get interrupted. if e.errno != errno.EINTR: raise # Restore signals / clean up signal.signal(signal.SIGCHLD, signal.SIG_DFL) signal.set_wakeup_fd(old_wakeup_fd) os.close(pipe_r) os.close(pipe_w) success = (return_code == 0) return success
python
def _execute(self, queue, tasks, log, locks, queue_lock, all_task_ids): """ Executes the given tasks. Returns a boolean indicating whether the tasks were executed successfully. """ # The tasks must use the same function. assert len(tasks) task_func = tasks[0].serialized_func assert all([task_func == task.serialized_func for task in tasks[1:]]) # Before executing periodic tasks, queue them for the next period. if task_func in self.tiger.periodic_task_funcs: tasks[0]._queue_for_next_period() with g_fork_lock: child_pid = os.fork() if child_pid == 0: # Child process log = log.bind(child_pid=os.getpid()) # Disconnect the Redis connection inherited from the main process. # Note that this doesn't disconnect the socket in the main process. self.connection.connection_pool.disconnect() random.seed() # Ignore Ctrl+C in the child so we don't abort the job -- the main # process already takes care of a graceful shutdown. signal.signal(signal.SIGINT, signal.SIG_IGN) with WorkerContextManagerStack(self.config['CHILD_CONTEXT_MANAGERS']): success = self._execute_forked(tasks, log) # Wait for any threads that might be running in the child, just # like sys.exit() would. Note we don't call sys.exit() directly # because it would perform additional cleanup (e.g. calling atexit # handlers twice). See also: https://bugs.python.org/issue18966 threading._shutdown() os._exit(int(not success)) else: # Main process log = log.bind(child_pid=child_pid) for task in tasks: log.info('processing', func=task_func, task_id=task.id, params={'args': task.args, 'kwargs': task.kwargs}) # Attach a signal handler to SIGCHLD (sent when the child process # exits) so we can capture it. signal.signal(signal.SIGCHLD, sigchld_handler) # Since newer Python versions retry interrupted system calls we can't # rely on the fact that select() is interrupted with EINTR. Instead, # we'll set up a wake-up file descriptor below. # Create a new pipe and apply the non-blocking flag (required for # set_wakeup_fd). pipe_r, pipe_w = os.pipe() flags = fcntl.fcntl(pipe_w, fcntl.F_GETFL, 0) flags = flags | os.O_NONBLOCK fcntl.fcntl(pipe_w, fcntl.F_SETFL, flags) # A byte will be written to pipe_w if a signal occurs (and can be # read from pipe_r). old_wakeup_fd = signal.set_wakeup_fd(pipe_w) def check_child_exit(): """ Do a non-blocking check to see if the child process exited. Returns None if the process is still running, or the exit code value of the child process. """ try: pid, return_code = os.waitpid(child_pid, os.WNOHANG) if pid != 0: # The child process is done. return return_code except OSError as e: # Of course EINTR can happen if the child process exits # while we're checking whether it exited. In this case it # should be safe to retry. if e.errno == errno.EINTR: return check_child_exit() else: raise # Wait for the child to exit and perform a periodic heartbeat. # We check for the child twice in this loop so that we avoid # unnecessary waiting if the child exited just before entering # the while loop or while renewing heartbeat/locks. while True: return_code = check_child_exit() if return_code is not None: break # Wait until the timeout or a signal / child exit occurs. try: select.select([pipe_r], [], [], self.config['ACTIVE_TASK_UPDATE_TIMER']) except select.error as e: if e.args[0] != errno.EINTR: raise return_code = check_child_exit() if return_code is not None: break try: self._heartbeat(queue, all_task_ids) for lock in locks: lock.renew(self.config['ACTIVE_TASK_UPDATE_TIMEOUT']) if queue_lock: acquired, current_locks = queue_lock.renew() if not acquired: log.debug('queue lock renew failure') except OSError as e: # EINTR happens if the task completed. Since we're just # renewing locks/heartbeat it's okay if we get interrupted. if e.errno != errno.EINTR: raise # Restore signals / clean up signal.signal(signal.SIGCHLD, signal.SIG_DFL) signal.set_wakeup_fd(old_wakeup_fd) os.close(pipe_r) os.close(pipe_w) success = (return_code == 0) return success
[ "def", "_execute", "(", "self", ",", "queue", ",", "tasks", ",", "log", ",", "locks", ",", "queue_lock", ",", "all_task_ids", ")", ":", "# The tasks must use the same function.", "assert", "len", "(", "tasks", ")", "task_func", "=", "tasks", "[", "0", "]", ".", "serialized_func", "assert", "all", "(", "[", "task_func", "==", "task", ".", "serialized_func", "for", "task", "in", "tasks", "[", "1", ":", "]", "]", ")", "# Before executing periodic tasks, queue them for the next period.", "if", "task_func", "in", "self", ".", "tiger", ".", "periodic_task_funcs", ":", "tasks", "[", "0", "]", ".", "_queue_for_next_period", "(", ")", "with", "g_fork_lock", ":", "child_pid", "=", "os", ".", "fork", "(", ")", "if", "child_pid", "==", "0", ":", "# Child process", "log", "=", "log", ".", "bind", "(", "child_pid", "=", "os", ".", "getpid", "(", ")", ")", "# Disconnect the Redis connection inherited from the main process.", "# Note that this doesn't disconnect the socket in the main process.", "self", ".", "connection", ".", "connection_pool", ".", "disconnect", "(", ")", "random", ".", "seed", "(", ")", "# Ignore Ctrl+C in the child so we don't abort the job -- the main", "# process already takes care of a graceful shutdown.", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "signal", ".", "SIG_IGN", ")", "with", "WorkerContextManagerStack", "(", "self", ".", "config", "[", "'CHILD_CONTEXT_MANAGERS'", "]", ")", ":", "success", "=", "self", ".", "_execute_forked", "(", "tasks", ",", "log", ")", "# Wait for any threads that might be running in the child, just", "# like sys.exit() would. Note we don't call sys.exit() directly", "# because it would perform additional cleanup (e.g. calling atexit", "# handlers twice). See also: https://bugs.python.org/issue18966", "threading", ".", "_shutdown", "(", ")", "os", ".", "_exit", "(", "int", "(", "not", "success", ")", ")", "else", ":", "# Main process", "log", "=", "log", ".", "bind", "(", "child_pid", "=", "child_pid", ")", "for", "task", "in", "tasks", ":", "log", ".", "info", "(", "'processing'", ",", "func", "=", "task_func", ",", "task_id", "=", "task", ".", "id", ",", "params", "=", "{", "'args'", ":", "task", ".", "args", ",", "'kwargs'", ":", "task", ".", "kwargs", "}", ")", "# Attach a signal handler to SIGCHLD (sent when the child process", "# exits) so we can capture it.", "signal", ".", "signal", "(", "signal", ".", "SIGCHLD", ",", "sigchld_handler", ")", "# Since newer Python versions retry interrupted system calls we can't", "# rely on the fact that select() is interrupted with EINTR. Instead,", "# we'll set up a wake-up file descriptor below.", "# Create a new pipe and apply the non-blocking flag (required for", "# set_wakeup_fd).", "pipe_r", ",", "pipe_w", "=", "os", ".", "pipe", "(", ")", "flags", "=", "fcntl", ".", "fcntl", "(", "pipe_w", ",", "fcntl", ".", "F_GETFL", ",", "0", ")", "flags", "=", "flags", "|", "os", ".", "O_NONBLOCK", "fcntl", ".", "fcntl", "(", "pipe_w", ",", "fcntl", ".", "F_SETFL", ",", "flags", ")", "# A byte will be written to pipe_w if a signal occurs (and can be", "# read from pipe_r).", "old_wakeup_fd", "=", "signal", ".", "set_wakeup_fd", "(", "pipe_w", ")", "def", "check_child_exit", "(", ")", ":", "\"\"\"\n Do a non-blocking check to see if the child process exited.\n Returns None if the process is still running, or the exit code\n value of the child process.\n \"\"\"", "try", ":", "pid", ",", "return_code", "=", "os", ".", "waitpid", "(", "child_pid", ",", "os", ".", "WNOHANG", ")", "if", "pid", "!=", "0", ":", "# The child process is done.", "return", "return_code", "except", "OSError", "as", "e", ":", "# Of course EINTR can happen if the child process exits", "# while we're checking whether it exited. In this case it", "# should be safe to retry.", "if", "e", ".", "errno", "==", "errno", ".", "EINTR", ":", "return", "check_child_exit", "(", ")", "else", ":", "raise", "# Wait for the child to exit and perform a periodic heartbeat.", "# We check for the child twice in this loop so that we avoid", "# unnecessary waiting if the child exited just before entering", "# the while loop or while renewing heartbeat/locks.", "while", "True", ":", "return_code", "=", "check_child_exit", "(", ")", "if", "return_code", "is", "not", "None", ":", "break", "# Wait until the timeout or a signal / child exit occurs.", "try", ":", "select", ".", "select", "(", "[", "pipe_r", "]", ",", "[", "]", ",", "[", "]", ",", "self", ".", "config", "[", "'ACTIVE_TASK_UPDATE_TIMER'", "]", ")", "except", "select", ".", "error", "as", "e", ":", "if", "e", ".", "args", "[", "0", "]", "!=", "errno", ".", "EINTR", ":", "raise", "return_code", "=", "check_child_exit", "(", ")", "if", "return_code", "is", "not", "None", ":", "break", "try", ":", "self", ".", "_heartbeat", "(", "queue", ",", "all_task_ids", ")", "for", "lock", "in", "locks", ":", "lock", ".", "renew", "(", "self", ".", "config", "[", "'ACTIVE_TASK_UPDATE_TIMEOUT'", "]", ")", "if", "queue_lock", ":", "acquired", ",", "current_locks", "=", "queue_lock", ".", "renew", "(", ")", "if", "not", "acquired", ":", "log", ".", "debug", "(", "'queue lock renew failure'", ")", "except", "OSError", "as", "e", ":", "# EINTR happens if the task completed. Since we're just", "# renewing locks/heartbeat it's okay if we get interrupted.", "if", "e", ".", "errno", "!=", "errno", ".", "EINTR", ":", "raise", "# Restore signals / clean up", "signal", ".", "signal", "(", "signal", ".", "SIGCHLD", ",", "signal", ".", "SIG_DFL", ")", "signal", ".", "set_wakeup_fd", "(", "old_wakeup_fd", ")", "os", ".", "close", "(", "pipe_r", ")", "os", ".", "close", "(", "pipe_w", ")", "success", "=", "(", "return_code", "==", "0", ")", "return", "success" ]
Executes the given tasks. Returns a boolean indicating whether the tasks were executed successfully.
[ "Executes", "the", "given", "tasks", ".", "Returns", "a", "boolean", "indicating", "whether", "the", "tasks", "were", "executed", "successfully", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L425-L554
train
231,750
closeio/tasktiger
tasktiger/worker.py
Worker._process_queue_message
def _process_queue_message(self, message_queue, new_queue_found, batch_exit, start_time, timeout, batch_timeout): """Process a queue message from activity channel.""" for queue in self._filter_queues([message_queue]): if queue not in self._queue_set: if not new_queue_found: new_queue_found = True batch_exit = time.time() + batch_timeout # Limit batch_exit to max timeout if batch_exit > start_time + timeout: batch_exit = start_time + timeout self._queue_set.add(queue) self.log.debug('new queue', queue=queue) return new_queue_found, batch_exit
python
def _process_queue_message(self, message_queue, new_queue_found, batch_exit, start_time, timeout, batch_timeout): """Process a queue message from activity channel.""" for queue in self._filter_queues([message_queue]): if queue not in self._queue_set: if not new_queue_found: new_queue_found = True batch_exit = time.time() + batch_timeout # Limit batch_exit to max timeout if batch_exit > start_time + timeout: batch_exit = start_time + timeout self._queue_set.add(queue) self.log.debug('new queue', queue=queue) return new_queue_found, batch_exit
[ "def", "_process_queue_message", "(", "self", ",", "message_queue", ",", "new_queue_found", ",", "batch_exit", ",", "start_time", ",", "timeout", ",", "batch_timeout", ")", ":", "for", "queue", "in", "self", ".", "_filter_queues", "(", "[", "message_queue", "]", ")", ":", "if", "queue", "not", "in", "self", ".", "_queue_set", ":", "if", "not", "new_queue_found", ":", "new_queue_found", "=", "True", "batch_exit", "=", "time", ".", "time", "(", ")", "+", "batch_timeout", "# Limit batch_exit to max timeout", "if", "batch_exit", ">", "start_time", "+", "timeout", ":", "batch_exit", "=", "start_time", "+", "timeout", "self", ".", "_queue_set", ".", "add", "(", "queue", ")", "self", ".", "log", ".", "debug", "(", "'new queue'", ",", "queue", "=", "queue", ")", "return", "new_queue_found", ",", "batch_exit" ]
Process a queue message from activity channel.
[ "Process", "a", "queue", "message", "from", "activity", "channel", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L556-L571
train
231,751
closeio/tasktiger
tasktiger/worker.py
Worker._process_queue_tasks
def _process_queue_tasks(self, queue, queue_lock, task_ids, now, log): """Process tasks in queue.""" processed_count = 0 # Get all tasks serialized_tasks = self.connection.mget([ self._key('task', task_id) for task_id in task_ids ]) # Parse tasks tasks = [] for task_id, serialized_task in zip(task_ids, serialized_tasks): if serialized_task: task_data = json.loads(serialized_task) else: # In the rare case where we don't find the task which is # queued (see ReliabilityTestCase.test_task_disappears), # we log an error and remove the task below. We need to # at least initialize the Task object with an ID so we can # remove it. task_data = {'id': task_id} task = Task(self.tiger, queue=queue, _data=task_data, _state=ACTIVE, _ts=now) if not serialized_task: # Remove task as per comment above log.error('not found', task_id=task_id) task._move() elif task.id != task_id: log.error('task ID mismatch', task_id=task_id) # Remove task task._move() else: tasks.append(task) # List of task IDs that exist and we will update the heartbeat on. valid_task_ids = set(task.id for task in tasks) # Group by task func tasks_by_func = OrderedDict() for task in tasks: func = task.serialized_func if func in tasks_by_func: tasks_by_func[func].append(task) else: tasks_by_func[func] = [task] # Execute tasks for each task func for tasks in tasks_by_func.values(): success, processed_tasks = self._execute_task_group(queue, tasks, valid_task_ids, queue_lock) processed_count = processed_count + len(processed_tasks) log.debug('processed', attempted=len(tasks), processed=processed_count) for task in processed_tasks: self._finish_task_processing(queue, task, success) return processed_count
python
def _process_queue_tasks(self, queue, queue_lock, task_ids, now, log): """Process tasks in queue.""" processed_count = 0 # Get all tasks serialized_tasks = self.connection.mget([ self._key('task', task_id) for task_id in task_ids ]) # Parse tasks tasks = [] for task_id, serialized_task in zip(task_ids, serialized_tasks): if serialized_task: task_data = json.loads(serialized_task) else: # In the rare case where we don't find the task which is # queued (see ReliabilityTestCase.test_task_disappears), # we log an error and remove the task below. We need to # at least initialize the Task object with an ID so we can # remove it. task_data = {'id': task_id} task = Task(self.tiger, queue=queue, _data=task_data, _state=ACTIVE, _ts=now) if not serialized_task: # Remove task as per comment above log.error('not found', task_id=task_id) task._move() elif task.id != task_id: log.error('task ID mismatch', task_id=task_id) # Remove task task._move() else: tasks.append(task) # List of task IDs that exist and we will update the heartbeat on. valid_task_ids = set(task.id for task in tasks) # Group by task func tasks_by_func = OrderedDict() for task in tasks: func = task.serialized_func if func in tasks_by_func: tasks_by_func[func].append(task) else: tasks_by_func[func] = [task] # Execute tasks for each task func for tasks in tasks_by_func.values(): success, processed_tasks = self._execute_task_group(queue, tasks, valid_task_ids, queue_lock) processed_count = processed_count + len(processed_tasks) log.debug('processed', attempted=len(tasks), processed=processed_count) for task in processed_tasks: self._finish_task_processing(queue, task, success) return processed_count
[ "def", "_process_queue_tasks", "(", "self", ",", "queue", ",", "queue_lock", ",", "task_ids", ",", "now", ",", "log", ")", ":", "processed_count", "=", "0", "# Get all tasks", "serialized_tasks", "=", "self", ".", "connection", ".", "mget", "(", "[", "self", ".", "_key", "(", "'task'", ",", "task_id", ")", "for", "task_id", "in", "task_ids", "]", ")", "# Parse tasks", "tasks", "=", "[", "]", "for", "task_id", ",", "serialized_task", "in", "zip", "(", "task_ids", ",", "serialized_tasks", ")", ":", "if", "serialized_task", ":", "task_data", "=", "json", ".", "loads", "(", "serialized_task", ")", "else", ":", "# In the rare case where we don't find the task which is", "# queued (see ReliabilityTestCase.test_task_disappears),", "# we log an error and remove the task below. We need to", "# at least initialize the Task object with an ID so we can", "# remove it.", "task_data", "=", "{", "'id'", ":", "task_id", "}", "task", "=", "Task", "(", "self", ".", "tiger", ",", "queue", "=", "queue", ",", "_data", "=", "task_data", ",", "_state", "=", "ACTIVE", ",", "_ts", "=", "now", ")", "if", "not", "serialized_task", ":", "# Remove task as per comment above", "log", ".", "error", "(", "'not found'", ",", "task_id", "=", "task_id", ")", "task", ".", "_move", "(", ")", "elif", "task", ".", "id", "!=", "task_id", ":", "log", ".", "error", "(", "'task ID mismatch'", ",", "task_id", "=", "task_id", ")", "# Remove task", "task", ".", "_move", "(", ")", "else", ":", "tasks", ".", "append", "(", "task", ")", "# List of task IDs that exist and we will update the heartbeat on.", "valid_task_ids", "=", "set", "(", "task", ".", "id", "for", "task", "in", "tasks", ")", "# Group by task func", "tasks_by_func", "=", "OrderedDict", "(", ")", "for", "task", "in", "tasks", ":", "func", "=", "task", ".", "serialized_func", "if", "func", "in", "tasks_by_func", ":", "tasks_by_func", "[", "func", "]", ".", "append", "(", "task", ")", "else", ":", "tasks_by_func", "[", "func", "]", "=", "[", "task", "]", "# Execute tasks for each task func", "for", "tasks", "in", "tasks_by_func", ".", "values", "(", ")", ":", "success", ",", "processed_tasks", "=", "self", ".", "_execute_task_group", "(", "queue", ",", "tasks", ",", "valid_task_ids", ",", "queue_lock", ")", "processed_count", "=", "processed_count", "+", "len", "(", "processed_tasks", ")", "log", ".", "debug", "(", "'processed'", ",", "attempted", "=", "len", "(", "tasks", ")", ",", "processed", "=", "processed_count", ")", "for", "task", "in", "processed_tasks", ":", "self", ".", "_finish_task_processing", "(", "queue", ",", "task", ",", "success", ")", "return", "processed_count" ]
Process tasks in queue.
[ "Process", "tasks", "in", "queue", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L573-L632
train
231,752
closeio/tasktiger
tasktiger/worker.py
Worker._process_from_queue
def _process_from_queue(self, queue): """ Internal method to process a task batch from the given queue. Args: queue: Queue name to be processed Returns: Task IDs: List of tasks that were processed (even if there was an error so that client code can assume the queue is empty if nothing was returned) Count: The number of tasks that were attempted to be executed or -1 if the queue lock couldn't be acquired. """ now = time.time() log = self.log.bind(queue=queue) batch_size = self._get_queue_batch_size(queue) queue_lock, failed_to_acquire = self._get_queue_lock(queue, log) if failed_to_acquire: return [], -1 # Move an item to the active queue, if available. # We need to be careful when moving unique tasks: We currently don't # support concurrent processing of multiple unique tasks. If the task # is already in the ACTIVE queue, we need to execute the queued task # later, i.e. move it to the SCHEDULED queue (prefer the earliest # time if it's already scheduled). We want to make sure that the last # queued instance of the task always gets executed no earlier than it # was queued. later = time.time() + self.config['LOCK_RETRY'] task_ids = self.scripts.zpoppush( self._key(QUEUED, queue), self._key(ACTIVE, queue), batch_size, None, now, if_exists=('add', self._key(SCHEDULED, queue), later, 'min'), on_success=('update_sets', queue, self._key(QUEUED), self._key(ACTIVE), self._key(SCHEDULED)) ) log.debug('moved tasks', src_queue=QUEUED, dest_queue=ACTIVE, qty=len(task_ids)) processed_count = 0 if task_ids: processed_count = self._process_queue_tasks(queue, queue_lock, task_ids, now, log) if queue_lock: queue_lock.release() log.debug('released swq lock') return task_ids, processed_count
python
def _process_from_queue(self, queue): """ Internal method to process a task batch from the given queue. Args: queue: Queue name to be processed Returns: Task IDs: List of tasks that were processed (even if there was an error so that client code can assume the queue is empty if nothing was returned) Count: The number of tasks that were attempted to be executed or -1 if the queue lock couldn't be acquired. """ now = time.time() log = self.log.bind(queue=queue) batch_size = self._get_queue_batch_size(queue) queue_lock, failed_to_acquire = self._get_queue_lock(queue, log) if failed_to_acquire: return [], -1 # Move an item to the active queue, if available. # We need to be careful when moving unique tasks: We currently don't # support concurrent processing of multiple unique tasks. If the task # is already in the ACTIVE queue, we need to execute the queued task # later, i.e. move it to the SCHEDULED queue (prefer the earliest # time if it's already scheduled). We want to make sure that the last # queued instance of the task always gets executed no earlier than it # was queued. later = time.time() + self.config['LOCK_RETRY'] task_ids = self.scripts.zpoppush( self._key(QUEUED, queue), self._key(ACTIVE, queue), batch_size, None, now, if_exists=('add', self._key(SCHEDULED, queue), later, 'min'), on_success=('update_sets', queue, self._key(QUEUED), self._key(ACTIVE), self._key(SCHEDULED)) ) log.debug('moved tasks', src_queue=QUEUED, dest_queue=ACTIVE, qty=len(task_ids)) processed_count = 0 if task_ids: processed_count = self._process_queue_tasks(queue, queue_lock, task_ids, now, log) if queue_lock: queue_lock.release() log.debug('released swq lock') return task_ids, processed_count
[ "def", "_process_from_queue", "(", "self", ",", "queue", ")", ":", "now", "=", "time", ".", "time", "(", ")", "log", "=", "self", ".", "log", ".", "bind", "(", "queue", "=", "queue", ")", "batch_size", "=", "self", ".", "_get_queue_batch_size", "(", "queue", ")", "queue_lock", ",", "failed_to_acquire", "=", "self", ".", "_get_queue_lock", "(", "queue", ",", "log", ")", "if", "failed_to_acquire", ":", "return", "[", "]", ",", "-", "1", "# Move an item to the active queue, if available.", "# We need to be careful when moving unique tasks: We currently don't", "# support concurrent processing of multiple unique tasks. If the task", "# is already in the ACTIVE queue, we need to execute the queued task", "# later, i.e. move it to the SCHEDULED queue (prefer the earliest", "# time if it's already scheduled). We want to make sure that the last", "# queued instance of the task always gets executed no earlier than it", "# was queued.", "later", "=", "time", ".", "time", "(", ")", "+", "self", ".", "config", "[", "'LOCK_RETRY'", "]", "task_ids", "=", "self", ".", "scripts", ".", "zpoppush", "(", "self", ".", "_key", "(", "QUEUED", ",", "queue", ")", ",", "self", ".", "_key", "(", "ACTIVE", ",", "queue", ")", ",", "batch_size", ",", "None", ",", "now", ",", "if_exists", "=", "(", "'add'", ",", "self", ".", "_key", "(", "SCHEDULED", ",", "queue", ")", ",", "later", ",", "'min'", ")", ",", "on_success", "=", "(", "'update_sets'", ",", "queue", ",", "self", ".", "_key", "(", "QUEUED", ")", ",", "self", ".", "_key", "(", "ACTIVE", ")", ",", "self", ".", "_key", "(", "SCHEDULED", ")", ")", ")", "log", ".", "debug", "(", "'moved tasks'", ",", "src_queue", "=", "QUEUED", ",", "dest_queue", "=", "ACTIVE", ",", "qty", "=", "len", "(", "task_ids", ")", ")", "processed_count", "=", "0", "if", "task_ids", ":", "processed_count", "=", "self", ".", "_process_queue_tasks", "(", "queue", ",", "queue_lock", ",", "task_ids", ",", "now", ",", "log", ")", "if", "queue_lock", ":", "queue_lock", ".", "release", "(", ")", "log", ".", "debug", "(", "'released swq lock'", ")", "return", "task_ids", ",", "processed_count" ]
Internal method to process a task batch from the given queue. Args: queue: Queue name to be processed Returns: Task IDs: List of tasks that were processed (even if there was an error so that client code can assume the queue is empty if nothing was returned) Count: The number of tasks that were attempted to be executed or -1 if the queue lock couldn't be acquired.
[ "Internal", "method", "to", "process", "a", "task", "batch", "from", "the", "given", "queue", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L634-L690
train
231,753
closeio/tasktiger
tasktiger/worker.py
Worker._execute_task_group
def _execute_task_group(self, queue, tasks, all_task_ids, queue_lock): """ Executes the given tasks in the queue. Updates the heartbeat for task IDs passed in all_task_ids. This internal method is only meant to be called from within _process_from_queue. """ log = self.log.bind(queue=queue) locks = [] # Keep track of the acquired locks: If two tasks in the list require # the same lock we only acquire it once. lock_ids = set() ready_tasks = [] for task in tasks: if task.lock: if task.lock_key: kwargs = task.kwargs lock_id = gen_unique_id( task.serialized_func, None, {key: kwargs.get(key) for key in task.lock_key}, ) else: lock_id = gen_unique_id( task.serialized_func, task.args, task.kwargs, ) if lock_id not in lock_ids: lock = Lock(self.connection, self._key('lock', lock_id), timeout=self.config['ACTIVE_TASK_UPDATE_TIMEOUT']) acquired = lock.acquire(blocking=False) if acquired: lock_ids.add(lock_id) locks.append(lock) else: log.info('could not acquire lock', task_id=task.id) # Reschedule the task (but if the task is already # scheduled in case of a unique task, don't prolong # the schedule date). when = time.time() + self.config['LOCK_RETRY'] task._move(from_state=ACTIVE, to_state=SCHEDULED, when=when, mode='min') # Make sure to remove it from this list so we don't # re-add to the ACTIVE queue by updating the heartbeat. all_task_ids.remove(task.id) continue ready_tasks.append(task) if not ready_tasks: return True, [] if self.stats_thread: self.stats_thread.report_task_start() success = self._execute(queue, ready_tasks, log, locks, queue_lock, all_task_ids) if self.stats_thread: self.stats_thread.report_task_end() for lock in locks: lock.release() return success, ready_tasks
python
def _execute_task_group(self, queue, tasks, all_task_ids, queue_lock): """ Executes the given tasks in the queue. Updates the heartbeat for task IDs passed in all_task_ids. This internal method is only meant to be called from within _process_from_queue. """ log = self.log.bind(queue=queue) locks = [] # Keep track of the acquired locks: If two tasks in the list require # the same lock we only acquire it once. lock_ids = set() ready_tasks = [] for task in tasks: if task.lock: if task.lock_key: kwargs = task.kwargs lock_id = gen_unique_id( task.serialized_func, None, {key: kwargs.get(key) for key in task.lock_key}, ) else: lock_id = gen_unique_id( task.serialized_func, task.args, task.kwargs, ) if lock_id not in lock_ids: lock = Lock(self.connection, self._key('lock', lock_id), timeout=self.config['ACTIVE_TASK_UPDATE_TIMEOUT']) acquired = lock.acquire(blocking=False) if acquired: lock_ids.add(lock_id) locks.append(lock) else: log.info('could not acquire lock', task_id=task.id) # Reschedule the task (but if the task is already # scheduled in case of a unique task, don't prolong # the schedule date). when = time.time() + self.config['LOCK_RETRY'] task._move(from_state=ACTIVE, to_state=SCHEDULED, when=when, mode='min') # Make sure to remove it from this list so we don't # re-add to the ACTIVE queue by updating the heartbeat. all_task_ids.remove(task.id) continue ready_tasks.append(task) if not ready_tasks: return True, [] if self.stats_thread: self.stats_thread.report_task_start() success = self._execute(queue, ready_tasks, log, locks, queue_lock, all_task_ids) if self.stats_thread: self.stats_thread.report_task_end() for lock in locks: lock.release() return success, ready_tasks
[ "def", "_execute_task_group", "(", "self", ",", "queue", ",", "tasks", ",", "all_task_ids", ",", "queue_lock", ")", ":", "log", "=", "self", ".", "log", ".", "bind", "(", "queue", "=", "queue", ")", "locks", "=", "[", "]", "# Keep track of the acquired locks: If two tasks in the list require", "# the same lock we only acquire it once.", "lock_ids", "=", "set", "(", ")", "ready_tasks", "=", "[", "]", "for", "task", "in", "tasks", ":", "if", "task", ".", "lock", ":", "if", "task", ".", "lock_key", ":", "kwargs", "=", "task", ".", "kwargs", "lock_id", "=", "gen_unique_id", "(", "task", ".", "serialized_func", ",", "None", ",", "{", "key", ":", "kwargs", ".", "get", "(", "key", ")", "for", "key", "in", "task", ".", "lock_key", "}", ",", ")", "else", ":", "lock_id", "=", "gen_unique_id", "(", "task", ".", "serialized_func", ",", "task", ".", "args", ",", "task", ".", "kwargs", ",", ")", "if", "lock_id", "not", "in", "lock_ids", ":", "lock", "=", "Lock", "(", "self", ".", "connection", ",", "self", ".", "_key", "(", "'lock'", ",", "lock_id", ")", ",", "timeout", "=", "self", ".", "config", "[", "'ACTIVE_TASK_UPDATE_TIMEOUT'", "]", ")", "acquired", "=", "lock", ".", "acquire", "(", "blocking", "=", "False", ")", "if", "acquired", ":", "lock_ids", ".", "add", "(", "lock_id", ")", "locks", ".", "append", "(", "lock", ")", "else", ":", "log", ".", "info", "(", "'could not acquire lock'", ",", "task_id", "=", "task", ".", "id", ")", "# Reschedule the task (but if the task is already", "# scheduled in case of a unique task, don't prolong", "# the schedule date).", "when", "=", "time", ".", "time", "(", ")", "+", "self", ".", "config", "[", "'LOCK_RETRY'", "]", "task", ".", "_move", "(", "from_state", "=", "ACTIVE", ",", "to_state", "=", "SCHEDULED", ",", "when", "=", "when", ",", "mode", "=", "'min'", ")", "# Make sure to remove it from this list so we don't", "# re-add to the ACTIVE queue by updating the heartbeat.", "all_task_ids", ".", "remove", "(", "task", ".", "id", ")", "continue", "ready_tasks", ".", "append", "(", "task", ")", "if", "not", "ready_tasks", ":", "return", "True", ",", "[", "]", "if", "self", ".", "stats_thread", ":", "self", ".", "stats_thread", ".", "report_task_start", "(", ")", "success", "=", "self", ".", "_execute", "(", "queue", ",", "ready_tasks", ",", "log", ",", "locks", ",", "queue_lock", ",", "all_task_ids", ")", "if", "self", ".", "stats_thread", ":", "self", ".", "stats_thread", ".", "report_task_end", "(", ")", "for", "lock", "in", "locks", ":", "lock", ".", "release", "(", ")", "return", "success", ",", "ready_tasks" ]
Executes the given tasks in the queue. Updates the heartbeat for task IDs passed in all_task_ids. This internal method is only meant to be called from within _process_from_queue.
[ "Executes", "the", "given", "tasks", "in", "the", "queue", ".", "Updates", "the", "heartbeat", "for", "task", "IDs", "passed", "in", "all_task_ids", ".", "This", "internal", "method", "is", "only", "meant", "to", "be", "called", "from", "within", "_process_from_queue", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L692-L757
train
231,754
closeio/tasktiger
tasktiger/worker.py
Worker._finish_task_processing
def _finish_task_processing(self, queue, task, success): """ After a task is executed, this method is called and ensures that the task gets properly removed from the ACTIVE queue and, in case of an error, retried or marked as failed. """ log = self.log.bind(queue=queue, task_id=task.id) def _mark_done(): # Remove the task from active queue task._move(from_state=ACTIVE) log.info('done') if success: _mark_done() else: should_retry = False should_log_error = True # Get execution info (for logging and retry purposes) execution = self.connection.lindex( self._key('task', task.id, 'executions'), -1) if execution: execution = json.loads(execution) if execution and execution.get('retry'): if 'retry_method' in execution: retry_func, retry_args = execution['retry_method'] else: # We expect the serialized method here. retry_func, retry_args = serialize_retry_method( \ self.config['DEFAULT_RETRY_METHOD']) should_log_error = execution['log_error'] should_retry = True if task.retry_method and not should_retry: retry_func, retry_args = task.retry_method if task.retry_on: if execution: exception_name = execution.get('exception_name') try: exception_class = import_attribute(exception_name) except TaskImportError: log.error('could not import exception', exception_name=exception_name) else: if task.should_retry_on(exception_class, logger=log): should_retry = True else: should_retry = True state = ERROR when = time.time() log_context = { 'func': task.serialized_func } if should_retry: retry_num = task.n_executions() log_context['retry_func'] = retry_func log_context['retry_num'] = retry_num try: func = import_attribute(retry_func) except TaskImportError: log.error('could not import retry function', func=retry_func) else: try: retry_delay = func(retry_num, *retry_args) log_context['retry_delay'] = retry_delay when += retry_delay except StopRetry: pass else: state = SCHEDULED if execution: if state == ERROR and should_log_error: log_func = log.error else: log_func = log.warning log_context.update({ 'time_failed': execution.get('time_failed'), 'traceback': execution.get('traceback'), 'exception_name': execution.get('exception_name'), }) log_func('task error', **log_context) else: log.error('execution not found', **log_context) # Move task to the scheduled queue for retry, or move to error # queue if we don't want to retry. if state == ERROR and not should_log_error: _mark_done() else: task._move(from_state=ACTIVE, to_state=state, when=when)
python
def _finish_task_processing(self, queue, task, success): """ After a task is executed, this method is called and ensures that the task gets properly removed from the ACTIVE queue and, in case of an error, retried or marked as failed. """ log = self.log.bind(queue=queue, task_id=task.id) def _mark_done(): # Remove the task from active queue task._move(from_state=ACTIVE) log.info('done') if success: _mark_done() else: should_retry = False should_log_error = True # Get execution info (for logging and retry purposes) execution = self.connection.lindex( self._key('task', task.id, 'executions'), -1) if execution: execution = json.loads(execution) if execution and execution.get('retry'): if 'retry_method' in execution: retry_func, retry_args = execution['retry_method'] else: # We expect the serialized method here. retry_func, retry_args = serialize_retry_method( \ self.config['DEFAULT_RETRY_METHOD']) should_log_error = execution['log_error'] should_retry = True if task.retry_method and not should_retry: retry_func, retry_args = task.retry_method if task.retry_on: if execution: exception_name = execution.get('exception_name') try: exception_class = import_attribute(exception_name) except TaskImportError: log.error('could not import exception', exception_name=exception_name) else: if task.should_retry_on(exception_class, logger=log): should_retry = True else: should_retry = True state = ERROR when = time.time() log_context = { 'func': task.serialized_func } if should_retry: retry_num = task.n_executions() log_context['retry_func'] = retry_func log_context['retry_num'] = retry_num try: func = import_attribute(retry_func) except TaskImportError: log.error('could not import retry function', func=retry_func) else: try: retry_delay = func(retry_num, *retry_args) log_context['retry_delay'] = retry_delay when += retry_delay except StopRetry: pass else: state = SCHEDULED if execution: if state == ERROR and should_log_error: log_func = log.error else: log_func = log.warning log_context.update({ 'time_failed': execution.get('time_failed'), 'traceback': execution.get('traceback'), 'exception_name': execution.get('exception_name'), }) log_func('task error', **log_context) else: log.error('execution not found', **log_context) # Move task to the scheduled queue for retry, or move to error # queue if we don't want to retry. if state == ERROR and not should_log_error: _mark_done() else: task._move(from_state=ACTIVE, to_state=state, when=when)
[ "def", "_finish_task_processing", "(", "self", ",", "queue", ",", "task", ",", "success", ")", ":", "log", "=", "self", ".", "log", ".", "bind", "(", "queue", "=", "queue", ",", "task_id", "=", "task", ".", "id", ")", "def", "_mark_done", "(", ")", ":", "# Remove the task from active queue", "task", ".", "_move", "(", "from_state", "=", "ACTIVE", ")", "log", ".", "info", "(", "'done'", ")", "if", "success", ":", "_mark_done", "(", ")", "else", ":", "should_retry", "=", "False", "should_log_error", "=", "True", "# Get execution info (for logging and retry purposes)", "execution", "=", "self", ".", "connection", ".", "lindex", "(", "self", ".", "_key", "(", "'task'", ",", "task", ".", "id", ",", "'executions'", ")", ",", "-", "1", ")", "if", "execution", ":", "execution", "=", "json", ".", "loads", "(", "execution", ")", "if", "execution", "and", "execution", ".", "get", "(", "'retry'", ")", ":", "if", "'retry_method'", "in", "execution", ":", "retry_func", ",", "retry_args", "=", "execution", "[", "'retry_method'", "]", "else", ":", "# We expect the serialized method here.", "retry_func", ",", "retry_args", "=", "serialize_retry_method", "(", "self", ".", "config", "[", "'DEFAULT_RETRY_METHOD'", "]", ")", "should_log_error", "=", "execution", "[", "'log_error'", "]", "should_retry", "=", "True", "if", "task", ".", "retry_method", "and", "not", "should_retry", ":", "retry_func", ",", "retry_args", "=", "task", ".", "retry_method", "if", "task", ".", "retry_on", ":", "if", "execution", ":", "exception_name", "=", "execution", ".", "get", "(", "'exception_name'", ")", "try", ":", "exception_class", "=", "import_attribute", "(", "exception_name", ")", "except", "TaskImportError", ":", "log", ".", "error", "(", "'could not import exception'", ",", "exception_name", "=", "exception_name", ")", "else", ":", "if", "task", ".", "should_retry_on", "(", "exception_class", ",", "logger", "=", "log", ")", ":", "should_retry", "=", "True", "else", ":", "should_retry", "=", "True", "state", "=", "ERROR", "when", "=", "time", ".", "time", "(", ")", "log_context", "=", "{", "'func'", ":", "task", ".", "serialized_func", "}", "if", "should_retry", ":", "retry_num", "=", "task", ".", "n_executions", "(", ")", "log_context", "[", "'retry_func'", "]", "=", "retry_func", "log_context", "[", "'retry_num'", "]", "=", "retry_num", "try", ":", "func", "=", "import_attribute", "(", "retry_func", ")", "except", "TaskImportError", ":", "log", ".", "error", "(", "'could not import retry function'", ",", "func", "=", "retry_func", ")", "else", ":", "try", ":", "retry_delay", "=", "func", "(", "retry_num", ",", "*", "retry_args", ")", "log_context", "[", "'retry_delay'", "]", "=", "retry_delay", "when", "+=", "retry_delay", "except", "StopRetry", ":", "pass", "else", ":", "state", "=", "SCHEDULED", "if", "execution", ":", "if", "state", "==", "ERROR", "and", "should_log_error", ":", "log_func", "=", "log", ".", "error", "else", ":", "log_func", "=", "log", ".", "warning", "log_context", ".", "update", "(", "{", "'time_failed'", ":", "execution", ".", "get", "(", "'time_failed'", ")", ",", "'traceback'", ":", "execution", ".", "get", "(", "'traceback'", ")", ",", "'exception_name'", ":", "execution", ".", "get", "(", "'exception_name'", ")", ",", "}", ")", "log_func", "(", "'task error'", ",", "*", "*", "log_context", ")", "else", ":", "log", ".", "error", "(", "'execution not found'", ",", "*", "*", "log_context", ")", "# Move task to the scheduled queue for retry, or move to error", "# queue if we don't want to retry.", "if", "state", "==", "ERROR", "and", "not", "should_log_error", ":", "_mark_done", "(", ")", "else", ":", "task", ".", "_move", "(", "from_state", "=", "ACTIVE", ",", "to_state", "=", "state", ",", "when", "=", "when", ")" ]
After a task is executed, this method is called and ensures that the task gets properly removed from the ACTIVE queue and, in case of an error, retried or marked as failed.
[ "After", "a", "task", "is", "executed", "this", "method", "is", "called", "and", "ensures", "that", "the", "task", "gets", "properly", "removed", "from", "the", "ACTIVE", "queue", "and", "in", "case", "of", "an", "error", "retried", "or", "marked", "as", "failed", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L759-L859
train
231,755
closeio/tasktiger
tasktiger/worker.py
Worker.run
def run(self, once=False, force_once=False): """ Main loop of the worker. Use once=True to execute any queued tasks and then exit. Use force_once=True with once=True to always exit after one processing loop even if tasks remain queued. """ self.log.info('ready', id=self.id, queues=sorted(self.only_queues), exclude_queues=sorted(self.exclude_queues), single_worker_queues=sorted(self.single_worker_queues), max_workers=self.max_workers_per_queue) if not self.scripts.can_replicate_commands: # Older Redis versions may create additional overhead when # executing pipelines. self.log.warn('using old Redis version') if self.config['STATS_INTERVAL']: self.stats_thread = StatsThread(self) self.stats_thread.start() # Queue any periodic tasks that are not queued yet. self._queue_periodic_tasks() # First scan all the available queues for new items until they're empty. # Then, listen to the activity channel. # XXX: This can get inefficient when having lots of queues. self._pubsub = self.connection.pubsub() self._pubsub.subscribe(self._key('activity')) self._queue_set = set(self._filter_queues( self.connection.smembers(self._key(QUEUED)))) try: while True: # Update the queue set on every iteration so we don't get stuck # on processing a specific queue. self._wait_for_new_tasks(timeout=self.config['SELECT_TIMEOUT'], batch_timeout=self.config['SELECT_BATCH_TIMEOUT']) self._install_signal_handlers() self._did_work = False self._worker_run() self._uninstall_signal_handlers() if once and (not self._queue_set or force_once): break if self._stop_requested: raise KeyboardInterrupt() except KeyboardInterrupt: pass except Exception as e: self.log.exception(event='exception') raise finally: if self.stats_thread: self.stats_thread.stop() self.stats_thread = None # Free up Redis connection self._pubsub.reset() self.log.info('done')
python
def run(self, once=False, force_once=False): """ Main loop of the worker. Use once=True to execute any queued tasks and then exit. Use force_once=True with once=True to always exit after one processing loop even if tasks remain queued. """ self.log.info('ready', id=self.id, queues=sorted(self.only_queues), exclude_queues=sorted(self.exclude_queues), single_worker_queues=sorted(self.single_worker_queues), max_workers=self.max_workers_per_queue) if not self.scripts.can_replicate_commands: # Older Redis versions may create additional overhead when # executing pipelines. self.log.warn('using old Redis version') if self.config['STATS_INTERVAL']: self.stats_thread = StatsThread(self) self.stats_thread.start() # Queue any periodic tasks that are not queued yet. self._queue_periodic_tasks() # First scan all the available queues for new items until they're empty. # Then, listen to the activity channel. # XXX: This can get inefficient when having lots of queues. self._pubsub = self.connection.pubsub() self._pubsub.subscribe(self._key('activity')) self._queue_set = set(self._filter_queues( self.connection.smembers(self._key(QUEUED)))) try: while True: # Update the queue set on every iteration so we don't get stuck # on processing a specific queue. self._wait_for_new_tasks(timeout=self.config['SELECT_TIMEOUT'], batch_timeout=self.config['SELECT_BATCH_TIMEOUT']) self._install_signal_handlers() self._did_work = False self._worker_run() self._uninstall_signal_handlers() if once and (not self._queue_set or force_once): break if self._stop_requested: raise KeyboardInterrupt() except KeyboardInterrupt: pass except Exception as e: self.log.exception(event='exception') raise finally: if self.stats_thread: self.stats_thread.stop() self.stats_thread = None # Free up Redis connection self._pubsub.reset() self.log.info('done')
[ "def", "run", "(", "self", ",", "once", "=", "False", ",", "force_once", "=", "False", ")", ":", "self", ".", "log", ".", "info", "(", "'ready'", ",", "id", "=", "self", ".", "id", ",", "queues", "=", "sorted", "(", "self", ".", "only_queues", ")", ",", "exclude_queues", "=", "sorted", "(", "self", ".", "exclude_queues", ")", ",", "single_worker_queues", "=", "sorted", "(", "self", ".", "single_worker_queues", ")", ",", "max_workers", "=", "self", ".", "max_workers_per_queue", ")", "if", "not", "self", ".", "scripts", ".", "can_replicate_commands", ":", "# Older Redis versions may create additional overhead when", "# executing pipelines.", "self", ".", "log", ".", "warn", "(", "'using old Redis version'", ")", "if", "self", ".", "config", "[", "'STATS_INTERVAL'", "]", ":", "self", ".", "stats_thread", "=", "StatsThread", "(", "self", ")", "self", ".", "stats_thread", ".", "start", "(", ")", "# Queue any periodic tasks that are not queued yet.", "self", ".", "_queue_periodic_tasks", "(", ")", "# First scan all the available queues for new items until they're empty.", "# Then, listen to the activity channel.", "# XXX: This can get inefficient when having lots of queues.", "self", ".", "_pubsub", "=", "self", ".", "connection", ".", "pubsub", "(", ")", "self", ".", "_pubsub", ".", "subscribe", "(", "self", ".", "_key", "(", "'activity'", ")", ")", "self", ".", "_queue_set", "=", "set", "(", "self", ".", "_filter_queues", "(", "self", ".", "connection", ".", "smembers", "(", "self", ".", "_key", "(", "QUEUED", ")", ")", ")", ")", "try", ":", "while", "True", ":", "# Update the queue set on every iteration so we don't get stuck", "# on processing a specific queue.", "self", ".", "_wait_for_new_tasks", "(", "timeout", "=", "self", ".", "config", "[", "'SELECT_TIMEOUT'", "]", ",", "batch_timeout", "=", "self", ".", "config", "[", "'SELECT_BATCH_TIMEOUT'", "]", ")", "self", ".", "_install_signal_handlers", "(", ")", "self", ".", "_did_work", "=", "False", "self", ".", "_worker_run", "(", ")", "self", ".", "_uninstall_signal_handlers", "(", ")", "if", "once", "and", "(", "not", "self", ".", "_queue_set", "or", "force_once", ")", ":", "break", "if", "self", ".", "_stop_requested", ":", "raise", "KeyboardInterrupt", "(", ")", "except", "KeyboardInterrupt", ":", "pass", "except", "Exception", "as", "e", ":", "self", ".", "log", ".", "exception", "(", "event", "=", "'exception'", ")", "raise", "finally", ":", "if", "self", ".", "stats_thread", ":", "self", ".", "stats_thread", ".", "stop", "(", ")", "self", ".", "stats_thread", "=", "None", "# Free up Redis connection", "self", ".", "_pubsub", ".", "reset", "(", ")", "self", ".", "log", ".", "info", "(", "'done'", ")" ]
Main loop of the worker. Use once=True to execute any queued tasks and then exit. Use force_once=True with once=True to always exit after one processing loop even if tasks remain queued.
[ "Main", "loop", "of", "the", "worker", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L938-L1004
train
231,756
closeio/tasktiger
tasktiger/redis_scripts.py
RedisScripts.can_replicate_commands
def can_replicate_commands(self): """ Whether Redis supports single command replication. """ if not hasattr(self, '_can_replicate_commands'): info = self.redis.info('server') version_info = info['redis_version'].split('.') major, minor = int(version_info[0]), int(version_info[1]) result = major > 3 or major == 3 and minor >= 2 self._can_replicate_commands = result return self._can_replicate_commands
python
def can_replicate_commands(self): """ Whether Redis supports single command replication. """ if not hasattr(self, '_can_replicate_commands'): info = self.redis.info('server') version_info = info['redis_version'].split('.') major, minor = int(version_info[0]), int(version_info[1]) result = major > 3 or major == 3 and minor >= 2 self._can_replicate_commands = result return self._can_replicate_commands
[ "def", "can_replicate_commands", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_can_replicate_commands'", ")", ":", "info", "=", "self", ".", "redis", ".", "info", "(", "'server'", ")", "version_info", "=", "info", "[", "'redis_version'", "]", ".", "split", "(", "'.'", ")", "major", ",", "minor", "=", "int", "(", "version_info", "[", "0", "]", ")", ",", "int", "(", "version_info", "[", "1", "]", ")", "result", "=", "major", ">", "3", "or", "major", "==", "3", "and", "minor", ">=", "2", "self", ".", "_can_replicate_commands", "=", "result", "return", "self", ".", "_can_replicate_commands" ]
Whether Redis supports single command replication.
[ "Whether", "Redis", "supports", "single", "command", "replication", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/redis_scripts.py#L301-L311
train
231,757
closeio/tasktiger
tasktiger/redis_scripts.py
RedisScripts.zpoppush
def zpoppush(self, source, destination, count, score, new_score, client=None, withscores=False, on_success=None, if_exists=None): """ Pops the first ``count`` members from the ZSET ``source`` and adds them to the ZSET ``destination`` with a score of ``new_score``. If ``score`` is not None, only members up to a score of ``score`` are used. Returns the members that were moved and, if ``withscores`` is True, their original scores. If items were moved, the action defined in ``on_success`` is executed. The only implemented option is a tuple in the form ('update_sets', ``set_value``, ``remove_from_set``, ``add_to_set`` [, ``add_to_set_if_exists``]). If no items are left in the ``source`` ZSET, the ``set_value`` is removed from ``remove_from_set``. If any items were moved to the ``destination`` ZSET, the ``set_value`` is added to ``add_to_set``. If any items were moved to the ``if_exists_key`` ZSET (see below), the ``set_value`` is added to the ``add_to_set_if_exists`` set. If ``if_exists`` is specified as a tuple ('add', if_exists_key, if_exists_score, if_exists_mode), then members that are already in the ``destination`` set will not be returned or updated, but they will be added to a ZSET ``if_exists_key`` with a score of ``if_exists_score`` and the given behavior specified in ``if_exists_mode`` for members that already exist in the ``if_exists_key`` ZSET. ``if_exists_mode`` can be one of the following: - "nx": Don't update the score - "min": Use the smaller of the given and existing score - "max": Use the larger of the given and existing score If ``if_exists`` is specified as a tuple ('noupdate',), then no action will be taken for members that are already in the ``destination`` ZSET (their score will not be updated). """ if score is None: score = '+inf' # Include all elements. if withscores: if on_success: raise NotImplementedError() return self._zpoppush_withscores( keys=[source, destination], args=[score, count, new_score], client=client) else: if if_exists and if_exists[0] == 'add': _, if_exists_key, if_exists_score, if_exists_mode = if_exists if if_exists_mode != 'min': raise NotImplementedError() if not on_success or on_success[0] != 'update_sets': raise NotImplementedError() set_value, remove_from_set, add_to_set, add_to_set_if_exists \ = on_success[1:] return self._zpoppush_exists_min_update_sets( keys=[source, destination, remove_from_set, add_to_set, add_to_set_if_exists, if_exists_key], args=[score, count, new_score, set_value, if_exists_score], ) elif if_exists and if_exists[0] == 'noupdate': if not on_success or on_success[0] != 'update_sets': raise NotImplementedError() set_value, remove_from_set, add_to_set \ = on_success[1:] return self._zpoppush_exists_ignore_update_sets( keys=[source, destination, remove_from_set, add_to_set], args=[score, count, new_score, set_value], ) if on_success: if on_success[0] != 'update_sets': raise NotImplementedError() else: set_value, remove_from_set, add_to_set = on_success[1:] return self._zpoppush_update_sets( keys=[source, destination, remove_from_set, add_to_set], args=[score, count, new_score, set_value], client=client) else: return self._zpoppush( keys=[source, destination], args=[score, count, new_score], client=client)
python
def zpoppush(self, source, destination, count, score, new_score, client=None, withscores=False, on_success=None, if_exists=None): """ Pops the first ``count`` members from the ZSET ``source`` and adds them to the ZSET ``destination`` with a score of ``new_score``. If ``score`` is not None, only members up to a score of ``score`` are used. Returns the members that were moved and, if ``withscores`` is True, their original scores. If items were moved, the action defined in ``on_success`` is executed. The only implemented option is a tuple in the form ('update_sets', ``set_value``, ``remove_from_set``, ``add_to_set`` [, ``add_to_set_if_exists``]). If no items are left in the ``source`` ZSET, the ``set_value`` is removed from ``remove_from_set``. If any items were moved to the ``destination`` ZSET, the ``set_value`` is added to ``add_to_set``. If any items were moved to the ``if_exists_key`` ZSET (see below), the ``set_value`` is added to the ``add_to_set_if_exists`` set. If ``if_exists`` is specified as a tuple ('add', if_exists_key, if_exists_score, if_exists_mode), then members that are already in the ``destination`` set will not be returned or updated, but they will be added to a ZSET ``if_exists_key`` with a score of ``if_exists_score`` and the given behavior specified in ``if_exists_mode`` for members that already exist in the ``if_exists_key`` ZSET. ``if_exists_mode`` can be one of the following: - "nx": Don't update the score - "min": Use the smaller of the given and existing score - "max": Use the larger of the given and existing score If ``if_exists`` is specified as a tuple ('noupdate',), then no action will be taken for members that are already in the ``destination`` ZSET (their score will not be updated). """ if score is None: score = '+inf' # Include all elements. if withscores: if on_success: raise NotImplementedError() return self._zpoppush_withscores( keys=[source, destination], args=[score, count, new_score], client=client) else: if if_exists and if_exists[0] == 'add': _, if_exists_key, if_exists_score, if_exists_mode = if_exists if if_exists_mode != 'min': raise NotImplementedError() if not on_success or on_success[0] != 'update_sets': raise NotImplementedError() set_value, remove_from_set, add_to_set, add_to_set_if_exists \ = on_success[1:] return self._zpoppush_exists_min_update_sets( keys=[source, destination, remove_from_set, add_to_set, add_to_set_if_exists, if_exists_key], args=[score, count, new_score, set_value, if_exists_score], ) elif if_exists and if_exists[0] == 'noupdate': if not on_success or on_success[0] != 'update_sets': raise NotImplementedError() set_value, remove_from_set, add_to_set \ = on_success[1:] return self._zpoppush_exists_ignore_update_sets( keys=[source, destination, remove_from_set, add_to_set], args=[score, count, new_score, set_value], ) if on_success: if on_success[0] != 'update_sets': raise NotImplementedError() else: set_value, remove_from_set, add_to_set = on_success[1:] return self._zpoppush_update_sets( keys=[source, destination, remove_from_set, add_to_set], args=[score, count, new_score, set_value], client=client) else: return self._zpoppush( keys=[source, destination], args=[score, count, new_score], client=client)
[ "def", "zpoppush", "(", "self", ",", "source", ",", "destination", ",", "count", ",", "score", ",", "new_score", ",", "client", "=", "None", ",", "withscores", "=", "False", ",", "on_success", "=", "None", ",", "if_exists", "=", "None", ")", ":", "if", "score", "is", "None", ":", "score", "=", "'+inf'", "# Include all elements.", "if", "withscores", ":", "if", "on_success", ":", "raise", "NotImplementedError", "(", ")", "return", "self", ".", "_zpoppush_withscores", "(", "keys", "=", "[", "source", ",", "destination", "]", ",", "args", "=", "[", "score", ",", "count", ",", "new_score", "]", ",", "client", "=", "client", ")", "else", ":", "if", "if_exists", "and", "if_exists", "[", "0", "]", "==", "'add'", ":", "_", ",", "if_exists_key", ",", "if_exists_score", ",", "if_exists_mode", "=", "if_exists", "if", "if_exists_mode", "!=", "'min'", ":", "raise", "NotImplementedError", "(", ")", "if", "not", "on_success", "or", "on_success", "[", "0", "]", "!=", "'update_sets'", ":", "raise", "NotImplementedError", "(", ")", "set_value", ",", "remove_from_set", ",", "add_to_set", ",", "add_to_set_if_exists", "=", "on_success", "[", "1", ":", "]", "return", "self", ".", "_zpoppush_exists_min_update_sets", "(", "keys", "=", "[", "source", ",", "destination", ",", "remove_from_set", ",", "add_to_set", ",", "add_to_set_if_exists", ",", "if_exists_key", "]", ",", "args", "=", "[", "score", ",", "count", ",", "new_score", ",", "set_value", ",", "if_exists_score", "]", ",", ")", "elif", "if_exists", "and", "if_exists", "[", "0", "]", "==", "'noupdate'", ":", "if", "not", "on_success", "or", "on_success", "[", "0", "]", "!=", "'update_sets'", ":", "raise", "NotImplementedError", "(", ")", "set_value", ",", "remove_from_set", ",", "add_to_set", "=", "on_success", "[", "1", ":", "]", "return", "self", ".", "_zpoppush_exists_ignore_update_sets", "(", "keys", "=", "[", "source", ",", "destination", ",", "remove_from_set", ",", "add_to_set", "]", ",", "args", "=", "[", "score", ",", "count", ",", "new_score", ",", "set_value", "]", ",", ")", "if", "on_success", ":", "if", "on_success", "[", "0", "]", "!=", "'update_sets'", ":", "raise", "NotImplementedError", "(", ")", "else", ":", "set_value", ",", "remove_from_set", ",", "add_to_set", "=", "on_success", "[", "1", ":", "]", "return", "self", ".", "_zpoppush_update_sets", "(", "keys", "=", "[", "source", ",", "destination", ",", "remove_from_set", ",", "add_to_set", "]", ",", "args", "=", "[", "score", ",", "count", ",", "new_score", ",", "set_value", "]", ",", "client", "=", "client", ")", "else", ":", "return", "self", ".", "_zpoppush", "(", "keys", "=", "[", "source", ",", "destination", "]", ",", "args", "=", "[", "score", ",", "count", ",", "new_score", "]", ",", "client", "=", "client", ")" ]
Pops the first ``count`` members from the ZSET ``source`` and adds them to the ZSET ``destination`` with a score of ``new_score``. If ``score`` is not None, only members up to a score of ``score`` are used. Returns the members that were moved and, if ``withscores`` is True, their original scores. If items were moved, the action defined in ``on_success`` is executed. The only implemented option is a tuple in the form ('update_sets', ``set_value``, ``remove_from_set``, ``add_to_set`` [, ``add_to_set_if_exists``]). If no items are left in the ``source`` ZSET, the ``set_value`` is removed from ``remove_from_set``. If any items were moved to the ``destination`` ZSET, the ``set_value`` is added to ``add_to_set``. If any items were moved to the ``if_exists_key`` ZSET (see below), the ``set_value`` is added to the ``add_to_set_if_exists`` set. If ``if_exists`` is specified as a tuple ('add', if_exists_key, if_exists_score, if_exists_mode), then members that are already in the ``destination`` set will not be returned or updated, but they will be added to a ZSET ``if_exists_key`` with a score of ``if_exists_score`` and the given behavior specified in ``if_exists_mode`` for members that already exist in the ``if_exists_key`` ZSET. ``if_exists_mode`` can be one of the following: - "nx": Don't update the score - "min": Use the smaller of the given and existing score - "max": Use the larger of the given and existing score If ``if_exists`` is specified as a tuple ('noupdate',), then no action will be taken for members that are already in the ``destination`` ZSET (their score will not be updated).
[ "Pops", "the", "first", "count", "members", "from", "the", "ZSET", "source", "and", "adds", "them", "to", "the", "ZSET", "destination", "with", "a", "score", "of", "new_score", ".", "If", "score", "is", "not", "None", "only", "members", "up", "to", "a", "score", "of", "score", "are", "used", ".", "Returns", "the", "members", "that", "were", "moved", "and", "if", "withscores", "is", "True", "their", "original", "scores", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/redis_scripts.py#L339-L423
train
231,758
closeio/tasktiger
tasktiger/redis_scripts.py
RedisScripts.execute_pipeline
def execute_pipeline(self, pipeline, client=None): """ Executes the given Redis pipeline as a Lua script. When an error occurs, the transaction stops executing, and an exception is raised. This differs from Redis transactions, where execution continues after an error. On success, a list of results is returned. The pipeline is cleared after execution and can no longer be reused. Example: p = conn.pipeline() p.lrange('x', 0, -1) p.set('success', 1) # If "x" is empty or a list, an array [[...], True] is returned. # Otherwise, ResponseError is raised and "success" is not set. results = redis_scripts.execute_pipeline(p) """ client = client or self.redis executing_pipeline = None try: # Prepare args stack = pipeline.command_stack script_args = [int(self.can_replicate_commands), len(stack)] for args, options in stack: script_args += [len(args)-1] + list(args) # Run the pipeline if self.can_replicate_commands: # Redis 3.2 or higher # Make sure scripts exist if pipeline.scripts: pipeline.load_scripts() raw_results = self._execute_pipeline(args=script_args, client=client) else: executing_pipeline = client.pipeline() # Always load scripts to avoid issues when Redis loads data # from AOF file / when replicating. for s in pipeline.scripts: executing_pipeline.script_load(s.script) # Run actual pipeline lua script self._execute_pipeline(args=script_args, client=executing_pipeline) # Always load all scripts and run actual pipeline lua script raw_results = executing_pipeline.execute()[-1] # Run response callbacks on results. results = [] response_callbacks = pipeline.response_callbacks for ((args, options), result) in zip(stack, raw_results): command_name = args[0] if command_name in response_callbacks: result = response_callbacks[command_name](result, **options) results.append(result) return results finally: if executing_pipeline: executing_pipeline.reset() pipeline.reset()
python
def execute_pipeline(self, pipeline, client=None): """ Executes the given Redis pipeline as a Lua script. When an error occurs, the transaction stops executing, and an exception is raised. This differs from Redis transactions, where execution continues after an error. On success, a list of results is returned. The pipeline is cleared after execution and can no longer be reused. Example: p = conn.pipeline() p.lrange('x', 0, -1) p.set('success', 1) # If "x" is empty or a list, an array [[...], True] is returned. # Otherwise, ResponseError is raised and "success" is not set. results = redis_scripts.execute_pipeline(p) """ client = client or self.redis executing_pipeline = None try: # Prepare args stack = pipeline.command_stack script_args = [int(self.can_replicate_commands), len(stack)] for args, options in stack: script_args += [len(args)-1] + list(args) # Run the pipeline if self.can_replicate_commands: # Redis 3.2 or higher # Make sure scripts exist if pipeline.scripts: pipeline.load_scripts() raw_results = self._execute_pipeline(args=script_args, client=client) else: executing_pipeline = client.pipeline() # Always load scripts to avoid issues when Redis loads data # from AOF file / when replicating. for s in pipeline.scripts: executing_pipeline.script_load(s.script) # Run actual pipeline lua script self._execute_pipeline(args=script_args, client=executing_pipeline) # Always load all scripts and run actual pipeline lua script raw_results = executing_pipeline.execute()[-1] # Run response callbacks on results. results = [] response_callbacks = pipeline.response_callbacks for ((args, options), result) in zip(stack, raw_results): command_name = args[0] if command_name in response_callbacks: result = response_callbacks[command_name](result, **options) results.append(result) return results finally: if executing_pipeline: executing_pipeline.reset() pipeline.reset()
[ "def", "execute_pipeline", "(", "self", ",", "pipeline", ",", "client", "=", "None", ")", ":", "client", "=", "client", "or", "self", ".", "redis", "executing_pipeline", "=", "None", "try", ":", "# Prepare args", "stack", "=", "pipeline", ".", "command_stack", "script_args", "=", "[", "int", "(", "self", ".", "can_replicate_commands", ")", ",", "len", "(", "stack", ")", "]", "for", "args", ",", "options", "in", "stack", ":", "script_args", "+=", "[", "len", "(", "args", ")", "-", "1", "]", "+", "list", "(", "args", ")", "# Run the pipeline", "if", "self", ".", "can_replicate_commands", ":", "# Redis 3.2 or higher", "# Make sure scripts exist", "if", "pipeline", ".", "scripts", ":", "pipeline", ".", "load_scripts", "(", ")", "raw_results", "=", "self", ".", "_execute_pipeline", "(", "args", "=", "script_args", ",", "client", "=", "client", ")", "else", ":", "executing_pipeline", "=", "client", ".", "pipeline", "(", ")", "# Always load scripts to avoid issues when Redis loads data", "# from AOF file / when replicating.", "for", "s", "in", "pipeline", ".", "scripts", ":", "executing_pipeline", ".", "script_load", "(", "s", ".", "script", ")", "# Run actual pipeline lua script", "self", ".", "_execute_pipeline", "(", "args", "=", "script_args", ",", "client", "=", "executing_pipeline", ")", "# Always load all scripts and run actual pipeline lua script", "raw_results", "=", "executing_pipeline", ".", "execute", "(", ")", "[", "-", "1", "]", "# Run response callbacks on results.", "results", "=", "[", "]", "response_callbacks", "=", "pipeline", ".", "response_callbacks", "for", "(", "(", "args", ",", "options", ")", ",", "result", ")", "in", "zip", "(", "stack", ",", "raw_results", ")", ":", "command_name", "=", "args", "[", "0", "]", "if", "command_name", "in", "response_callbacks", ":", "result", "=", "response_callbacks", "[", "command_name", "]", "(", "result", ",", "*", "*", "options", ")", "results", ".", "append", "(", "result", ")", "return", "results", "finally", ":", "if", "executing_pipeline", ":", "executing_pipeline", ".", "reset", "(", ")", "pipeline", ".", "reset", "(", ")" ]
Executes the given Redis pipeline as a Lua script. When an error occurs, the transaction stops executing, and an exception is raised. This differs from Redis transactions, where execution continues after an error. On success, a list of results is returned. The pipeline is cleared after execution and can no longer be reused. Example: p = conn.pipeline() p.lrange('x', 0, -1) p.set('success', 1) # If "x" is empty or a list, an array [[...], True] is returned. # Otherwise, ResponseError is raised and "success" is not set. results = redis_scripts.execute_pipeline(p)
[ "Executes", "the", "given", "Redis", "pipeline", "as", "a", "Lua", "script", ".", "When", "an", "error", "occurs", "the", "transaction", "stops", "executing", "and", "an", "exception", "is", "raised", ".", "This", "differs", "from", "Redis", "transactions", "where", "execution", "continues", "after", "an", "error", ".", "On", "success", "a", "list", "of", "results", "is", "returned", ".", "The", "pipeline", "is", "cleared", "after", "execution", "and", "can", "no", "longer", "be", "reused", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/redis_scripts.py#L466-L534
train
231,759
closeio/tasktiger
tasktiger/_internal.py
gen_unique_id
def gen_unique_id(serialized_name, args, kwargs): """ Generates and returns a hex-encoded 256-bit ID for the given task name and args. Used to generate IDs for unique tasks or for task locks. """ return hashlib.sha256(json.dumps({ 'func': serialized_name, 'args': args, 'kwargs': kwargs, }, sort_keys=True).encode('utf8')).hexdigest()
python
def gen_unique_id(serialized_name, args, kwargs): """ Generates and returns a hex-encoded 256-bit ID for the given task name and args. Used to generate IDs for unique tasks or for task locks. """ return hashlib.sha256(json.dumps({ 'func': serialized_name, 'args': args, 'kwargs': kwargs, }, sort_keys=True).encode('utf8')).hexdigest()
[ "def", "gen_unique_id", "(", "serialized_name", ",", "args", ",", "kwargs", ")", ":", "return", "hashlib", ".", "sha256", "(", "json", ".", "dumps", "(", "{", "'func'", ":", "serialized_name", ",", "'args'", ":", "args", ",", "'kwargs'", ":", "kwargs", ",", "}", ",", "sort_keys", "=", "True", ")", ".", "encode", "(", "'utf8'", ")", ")", ".", "hexdigest", "(", ")" ]
Generates and returns a hex-encoded 256-bit ID for the given task name and args. Used to generate IDs for unique tasks or for task locks.
[ "Generates", "and", "returns", "a", "hex", "-", "encoded", "256", "-", "bit", "ID", "for", "the", "given", "task", "name", "and", "args", ".", "Used", "to", "generate", "IDs", "for", "unique", "tasks", "or", "for", "task", "locks", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/_internal.py#L56-L65
train
231,760
closeio/tasktiger
tasktiger/_internal.py
serialize_func_name
def serialize_func_name(func): """ Returns the dotted serialized path to the passed function. """ if func.__module__ == '__main__': raise ValueError('Functions from the __main__ module cannot be ' 'processed by workers.') try: # This will only work on Python 3.3 or above, but it will allow us to use static/classmethods func_name = func.__qualname__ except AttributeError: func_name = func.__name__ return ':'.join([func.__module__, func_name])
python
def serialize_func_name(func): """ Returns the dotted serialized path to the passed function. """ if func.__module__ == '__main__': raise ValueError('Functions from the __main__ module cannot be ' 'processed by workers.') try: # This will only work on Python 3.3 or above, but it will allow us to use static/classmethods func_name = func.__qualname__ except AttributeError: func_name = func.__name__ return ':'.join([func.__module__, func_name])
[ "def", "serialize_func_name", "(", "func", ")", ":", "if", "func", ".", "__module__", "==", "'__main__'", ":", "raise", "ValueError", "(", "'Functions from the __main__ module cannot be '", "'processed by workers.'", ")", "try", ":", "# This will only work on Python 3.3 or above, but it will allow us to use static/classmethods", "func_name", "=", "func", ".", "__qualname__", "except", "AttributeError", ":", "func_name", "=", "func", ".", "__name__", "return", "':'", ".", "join", "(", "[", "func", ".", "__module__", ",", "func_name", "]", ")" ]
Returns the dotted serialized path to the passed function.
[ "Returns", "the", "dotted", "serialized", "path", "to", "the", "passed", "function", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/_internal.py#L67-L79
train
231,761
closeio/tasktiger
tasktiger/_internal.py
dotted_parts
def dotted_parts(s): """ For a string "a.b.c", yields "a", "a.b", "a.b.c". """ idx = -1 while s: idx = s.find('.', idx+1) if idx == -1: yield s break yield s[:idx]
python
def dotted_parts(s): """ For a string "a.b.c", yields "a", "a.b", "a.b.c". """ idx = -1 while s: idx = s.find('.', idx+1) if idx == -1: yield s break yield s[:idx]
[ "def", "dotted_parts", "(", "s", ")", ":", "idx", "=", "-", "1", "while", "s", ":", "idx", "=", "s", ".", "find", "(", "'.'", ",", "idx", "+", "1", ")", "if", "idx", "==", "-", "1", ":", "yield", "s", "break", "yield", "s", "[", ":", "idx", "]" ]
For a string "a.b.c", yields "a", "a.b", "a.b.c".
[ "For", "a", "string", "a", ".", "b", ".", "c", "yields", "a", "a", ".", "b", "a", ".", "b", ".", "c", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/_internal.py#L81-L91
train
231,762
closeio/tasktiger
tasktiger/_internal.py
reversed_dotted_parts
def reversed_dotted_parts(s): """ For a string "a.b.c", yields "a.b.c", "a.b", "a". """ idx = -1 if s: yield s while s: idx = s.rfind('.', 0, idx) if idx == -1: break yield s[:idx]
python
def reversed_dotted_parts(s): """ For a string "a.b.c", yields "a.b.c", "a.b", "a". """ idx = -1 if s: yield s while s: idx = s.rfind('.', 0, idx) if idx == -1: break yield s[:idx]
[ "def", "reversed_dotted_parts", "(", "s", ")", ":", "idx", "=", "-", "1", "if", "s", ":", "yield", "s", "while", "s", ":", "idx", "=", "s", ".", "rfind", "(", "'.'", ",", "0", ",", "idx", ")", "if", "idx", "==", "-", "1", ":", "break", "yield", "s", "[", ":", "idx", "]" ]
For a string "a.b.c", yields "a.b.c", "a.b", "a".
[ "For", "a", "string", "a", ".", "b", ".", "c", "yields", "a", ".", "b", ".", "c", "a", ".", "b", "a", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/_internal.py#L93-L104
train
231,763
closeio/tasktiger
tasktiger/logging.py
tasktiger_processor
def tasktiger_processor(logger, method_name, event_dict): """ TaskTiger structlog processor. Inject the current task id for non-batch tasks. """ if g['current_tasks'] is not None and not g['current_task_is_batch']: event_dict['task_id'] = g['current_tasks'][0].id return event_dict
python
def tasktiger_processor(logger, method_name, event_dict): """ TaskTiger structlog processor. Inject the current task id for non-batch tasks. """ if g['current_tasks'] is not None and not g['current_task_is_batch']: event_dict['task_id'] = g['current_tasks'][0].id return event_dict
[ "def", "tasktiger_processor", "(", "logger", ",", "method_name", ",", "event_dict", ")", ":", "if", "g", "[", "'current_tasks'", "]", "is", "not", "None", "and", "not", "g", "[", "'current_task_is_batch'", "]", ":", "event_dict", "[", "'task_id'", "]", "=", "g", "[", "'current_tasks'", "]", "[", "0", "]", ".", "id", "return", "event_dict" ]
TaskTiger structlog processor. Inject the current task id for non-batch tasks.
[ "TaskTiger", "structlog", "processor", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/logging.py#L4-L14
train
231,764
closeio/tasktiger
tasktiger/task.py
Task.should_retry_on
def should_retry_on(self, exception_class, logger=None): """ Whether this task should be retried when the given exception occurs. """ for n in (self.retry_on or []): try: if issubclass(exception_class, import_attribute(n)): return True except TaskImportError: if logger: logger.error('should_retry_on could not import class', exception_name=n) return False
python
def should_retry_on(self, exception_class, logger=None): """ Whether this task should be retried when the given exception occurs. """ for n in (self.retry_on or []): try: if issubclass(exception_class, import_attribute(n)): return True except TaskImportError: if logger: logger.error('should_retry_on could not import class', exception_name=n) return False
[ "def", "should_retry_on", "(", "self", ",", "exception_class", ",", "logger", "=", "None", ")", ":", "for", "n", "in", "(", "self", ".", "retry_on", "or", "[", "]", ")", ":", "try", ":", "if", "issubclass", "(", "exception_class", ",", "import_attribute", "(", "n", ")", ")", ":", "return", "True", "except", "TaskImportError", ":", "if", "logger", ":", "logger", ".", "error", "(", "'should_retry_on could not import class'", ",", "exception_name", "=", "n", ")", "return", "False" ]
Whether this task should be retried when the given exception occurs.
[ "Whether", "this", "task", "should", "be", "retried", "when", "the", "given", "exception", "occurs", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/task.py#L155-L167
train
231,765
closeio/tasktiger
tasktiger/task.py
Task.update_scheduled_time
def update_scheduled_time(self, when): """ Updates a scheduled task's date to the given date. If the task is not scheduled, a TaskNotFound exception is raised. """ tiger = self.tiger ts = get_timestamp(when) assert ts pipeline = tiger.connection.pipeline() key = tiger._key(SCHEDULED, self.queue) tiger.scripts.zadd(key, ts, self.id, mode='xx', client=pipeline) pipeline.zscore(key, self.id) _, score = pipeline.execute() if not score: raise TaskNotFound('Task {} not found in queue "{}" in state "{}".'.format( self.id, self.queue, SCHEDULED )) self._ts = ts
python
def update_scheduled_time(self, when): """ Updates a scheduled task's date to the given date. If the task is not scheduled, a TaskNotFound exception is raised. """ tiger = self.tiger ts = get_timestamp(when) assert ts pipeline = tiger.connection.pipeline() key = tiger._key(SCHEDULED, self.queue) tiger.scripts.zadd(key, ts, self.id, mode='xx', client=pipeline) pipeline.zscore(key, self.id) _, score = pipeline.execute() if not score: raise TaskNotFound('Task {} not found in queue "{}" in state "{}".'.format( self.id, self.queue, SCHEDULED )) self._ts = ts
[ "def", "update_scheduled_time", "(", "self", ",", "when", ")", ":", "tiger", "=", "self", ".", "tiger", "ts", "=", "get_timestamp", "(", "when", ")", "assert", "ts", "pipeline", "=", "tiger", ".", "connection", ".", "pipeline", "(", ")", "key", "=", "tiger", ".", "_key", "(", "SCHEDULED", ",", "self", ".", "queue", ")", "tiger", ".", "scripts", ".", "zadd", "(", "key", ",", "ts", ",", "self", ".", "id", ",", "mode", "=", "'xx'", ",", "client", "=", "pipeline", ")", "pipeline", ".", "zscore", "(", "key", ",", "self", ".", "id", ")", "_", ",", "score", "=", "pipeline", ".", "execute", "(", ")", "if", "not", "score", ":", "raise", "TaskNotFound", "(", "'Task {} not found in queue \"{}\" in state \"{}\".'", ".", "format", "(", "self", ".", "id", ",", "self", ".", "queue", ",", "SCHEDULED", ")", ")", "self", ".", "_ts", "=", "ts" ]
Updates a scheduled task's date to the given date. If the task is not scheduled, a TaskNotFound exception is raised.
[ "Updates", "a", "scheduled", "task", "s", "date", "to", "the", "given", "date", ".", "If", "the", "task", "is", "not", "scheduled", "a", "TaskNotFound", "exception", "is", "raised", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/task.py#L322-L342
train
231,766
closeio/tasktiger
tasktiger/task.py
Task.n_executions
def n_executions(self): """ Queries and returns the number of past task executions. """ pipeline = self.tiger.connection.pipeline() pipeline.exists(self.tiger._key('task', self.id)) pipeline.llen(self.tiger._key('task', self.id, 'executions')) exists, n_executions = pipeline.execute() if not exists: raise TaskNotFound('Task {} not found.'.format( self.id )) return n_executions
python
def n_executions(self): """ Queries and returns the number of past task executions. """ pipeline = self.tiger.connection.pipeline() pipeline.exists(self.tiger._key('task', self.id)) pipeline.llen(self.tiger._key('task', self.id, 'executions')) exists, n_executions = pipeline.execute() if not exists: raise TaskNotFound('Task {} not found.'.format( self.id )) return n_executions
[ "def", "n_executions", "(", "self", ")", ":", "pipeline", "=", "self", ".", "tiger", ".", "connection", ".", "pipeline", "(", ")", "pipeline", ".", "exists", "(", "self", ".", "tiger", ".", "_key", "(", "'task'", ",", "self", ".", "id", ")", ")", "pipeline", ".", "llen", "(", "self", ".", "tiger", ".", "_key", "(", "'task'", ",", "self", ".", "id", ",", "'executions'", ")", ")", "exists", ",", "n_executions", "=", "pipeline", ".", "execute", "(", ")", "if", "not", "exists", ":", "raise", "TaskNotFound", "(", "'Task {} not found.'", ".", "format", "(", "self", ".", "id", ")", ")", "return", "n_executions" ]
Queries and returns the number of past task executions.
[ "Queries", "and", "returns", "the", "number", "of", "past", "task", "executions", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/task.py#L421-L433
train
231,767
aewallin/allantools
allantools/noise_kasdin.py
Noise.set_input
def set_input(self, nr=2, qd=1, b=0): """ Set inputs after initialization Parameters ------- nr: integer length of generated time-series number must be power of two qd: float discrete variance b: float noise type: 0 : White Phase Modulation (WPM) -1 : Flicker Phase Modulation (FPM) -2 : White Frequency Modulation (WFM) -3 : Flicker Frequency Modulation (FFM) -4 : Random Walk Frequency Modulation (RWFM) """ self.nr = nr self.qd = qd self.b = b
python
def set_input(self, nr=2, qd=1, b=0): """ Set inputs after initialization Parameters ------- nr: integer length of generated time-series number must be power of two qd: float discrete variance b: float noise type: 0 : White Phase Modulation (WPM) -1 : Flicker Phase Modulation (FPM) -2 : White Frequency Modulation (WFM) -3 : Flicker Frequency Modulation (FFM) -4 : Random Walk Frequency Modulation (RWFM) """ self.nr = nr self.qd = qd self.b = b
[ "def", "set_input", "(", "self", ",", "nr", "=", "2", ",", "qd", "=", "1", ",", "b", "=", "0", ")", ":", "self", ".", "nr", "=", "nr", "self", ".", "qd", "=", "qd", "self", ".", "b", "=", "b" ]
Set inputs after initialization Parameters ------- nr: integer length of generated time-series number must be power of two qd: float discrete variance b: float noise type: 0 : White Phase Modulation (WPM) -1 : Flicker Phase Modulation (FPM) -2 : White Frequency Modulation (WFM) -3 : Flicker Frequency Modulation (FFM) -4 : Random Walk Frequency Modulation (RWFM)
[ "Set", "inputs", "after", "initialization" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise_kasdin.py#L81-L102
train
231,768
aewallin/allantools
allantools/noise_kasdin.py
Noise.generateNoise
def generateNoise(self): """ Generate noise time series based on input parameters Returns ------- time_series: np.array Time series with colored noise. len(time_series) == nr """ # Fill wfb array with white noise based on given discrete variance wfb = np.zeros(self.nr*2) wfb[:self.nr] = np.random.normal(0, np.sqrt(self.qd), self.nr) # Generate the hfb coefficients based on the noise type mhb = -self.b/2.0 hfb = np.zeros(self.nr*2) hfb = np.zeros(self.nr*2) hfb[0] = 1.0 indices = np.arange(self.nr-1) hfb[1:self.nr] = (mhb+indices)/(indices+1.0) hfb[:self.nr] = np.multiply.accumulate(hfb[:self.nr]) # Perform discrete Fourier transform of wfb and hfb time series wfb_fft = np.fft.rfft(wfb) hfb_fft = np.fft.rfft(hfb) # Perform inverse Fourier transform of the product of wfb and hfb FFTs time_series = np.fft.irfft(wfb_fft*hfb_fft)[:self.nr] self.time_series = time_series
python
def generateNoise(self): """ Generate noise time series based on input parameters Returns ------- time_series: np.array Time series with colored noise. len(time_series) == nr """ # Fill wfb array with white noise based on given discrete variance wfb = np.zeros(self.nr*2) wfb[:self.nr] = np.random.normal(0, np.sqrt(self.qd), self.nr) # Generate the hfb coefficients based on the noise type mhb = -self.b/2.0 hfb = np.zeros(self.nr*2) hfb = np.zeros(self.nr*2) hfb[0] = 1.0 indices = np.arange(self.nr-1) hfb[1:self.nr] = (mhb+indices)/(indices+1.0) hfb[:self.nr] = np.multiply.accumulate(hfb[:self.nr]) # Perform discrete Fourier transform of wfb and hfb time series wfb_fft = np.fft.rfft(wfb) hfb_fft = np.fft.rfft(hfb) # Perform inverse Fourier transform of the product of wfb and hfb FFTs time_series = np.fft.irfft(wfb_fft*hfb_fft)[:self.nr] self.time_series = time_series
[ "def", "generateNoise", "(", "self", ")", ":", "# Fill wfb array with white noise based on given discrete variance", "wfb", "=", "np", ".", "zeros", "(", "self", ".", "nr", "*", "2", ")", "wfb", "[", ":", "self", ".", "nr", "]", "=", "np", ".", "random", ".", "normal", "(", "0", ",", "np", ".", "sqrt", "(", "self", ".", "qd", ")", ",", "self", ".", "nr", ")", "# Generate the hfb coefficients based on the noise type", "mhb", "=", "-", "self", ".", "b", "/", "2.0", "hfb", "=", "np", ".", "zeros", "(", "self", ".", "nr", "*", "2", ")", "hfb", "=", "np", ".", "zeros", "(", "self", ".", "nr", "*", "2", ")", "hfb", "[", "0", "]", "=", "1.0", "indices", "=", "np", ".", "arange", "(", "self", ".", "nr", "-", "1", ")", "hfb", "[", "1", ":", "self", ".", "nr", "]", "=", "(", "mhb", "+", "indices", ")", "/", "(", "indices", "+", "1.0", ")", "hfb", "[", ":", "self", ".", "nr", "]", "=", "np", ".", "multiply", ".", "accumulate", "(", "hfb", "[", ":", "self", ".", "nr", "]", ")", "# Perform discrete Fourier transform of wfb and hfb time series", "wfb_fft", "=", "np", ".", "fft", ".", "rfft", "(", "wfb", ")", "hfb_fft", "=", "np", ".", "fft", ".", "rfft", "(", "hfb", ")", "# Perform inverse Fourier transform of the product of wfb and hfb FFTs", "time_series", "=", "np", ".", "fft", ".", "irfft", "(", "wfb_fft", "*", "hfb_fft", ")", "[", ":", "self", ".", "nr", "]", "self", ".", "time_series", "=", "time_series" ]
Generate noise time series based on input parameters Returns ------- time_series: np.array Time series with colored noise. len(time_series) == nr
[ "Generate", "noise", "time", "series", "based", "on", "input", "parameters" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise_kasdin.py#L104-L130
train
231,769
aewallin/allantools
allantools/noise_kasdin.py
Noise.adev
def adev(self, tau0, tau): """ return predicted ADEV of noise-type at given tau """ prefactor = self.adev_from_qd(tau0=tau0, tau=tau) c = self.c_avar() avar = pow(prefactor, 2)*pow(tau, c) return np.sqrt(avar)
python
def adev(self, tau0, tau): """ return predicted ADEV of noise-type at given tau """ prefactor = self.adev_from_qd(tau0=tau0, tau=tau) c = self.c_avar() avar = pow(prefactor, 2)*pow(tau, c) return np.sqrt(avar)
[ "def", "adev", "(", "self", ",", "tau0", ",", "tau", ")", ":", "prefactor", "=", "self", ".", "adev_from_qd", "(", "tau0", "=", "tau0", ",", "tau", "=", "tau", ")", "c", "=", "self", ".", "c_avar", "(", ")", "avar", "=", "pow", "(", "prefactor", ",", "2", ")", "*", "pow", "(", "tau", ",", "c", ")", "return", "np", ".", "sqrt", "(", "avar", ")" ]
return predicted ADEV of noise-type at given tau
[ "return", "predicted", "ADEV", "of", "noise", "-", "type", "at", "given", "tau" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise_kasdin.py#L161-L168
train
231,770
aewallin/allantools
allantools/noise_kasdin.py
Noise.mdev
def mdev(self, tau0, tau): """ return predicted MDEV of noise-type at given tau """ prefactor = self.mdev_from_qd(tau0=tau0, tau=tau) c = self.c_mvar() mvar = pow(prefactor, 2)*pow(tau, c) return np.sqrt(mvar)
python
def mdev(self, tau0, tau): """ return predicted MDEV of noise-type at given tau """ prefactor = self.mdev_from_qd(tau0=tau0, tau=tau) c = self.c_mvar() mvar = pow(prefactor, 2)*pow(tau, c) return np.sqrt(mvar)
[ "def", "mdev", "(", "self", ",", "tau0", ",", "tau", ")", ":", "prefactor", "=", "self", ".", "mdev_from_qd", "(", "tau0", "=", "tau0", ",", "tau", "=", "tau", ")", "c", "=", "self", ".", "c_mvar", "(", ")", "mvar", "=", "pow", "(", "prefactor", ",", "2", ")", "*", "pow", "(", "tau", ",", "c", ")", "return", "np", ".", "sqrt", "(", "mvar", ")" ]
return predicted MDEV of noise-type at given tau
[ "return", "predicted", "MDEV", "of", "noise", "-", "type", "at", "given", "tau" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise_kasdin.py#L170-L177
train
231,771
aewallin/allantools
allantools/noise.py
scipy_psd
def scipy_psd(x, f_sample=1.0, nr_segments=4): """ PSD routine from scipy we can compare our own numpy result against this one """ f_axis, psd_of_x = scipy.signal.welch(x, f_sample, nperseg=len(x)/nr_segments) return f_axis, psd_of_x
python
def scipy_psd(x, f_sample=1.0, nr_segments=4): """ PSD routine from scipy we can compare our own numpy result against this one """ f_axis, psd_of_x = scipy.signal.welch(x, f_sample, nperseg=len(x)/nr_segments) return f_axis, psd_of_x
[ "def", "scipy_psd", "(", "x", ",", "f_sample", "=", "1.0", ",", "nr_segments", "=", "4", ")", ":", "f_axis", ",", "psd_of_x", "=", "scipy", ".", "signal", ".", "welch", "(", "x", ",", "f_sample", ",", "nperseg", "=", "len", "(", "x", ")", "/", "nr_segments", ")", "return", "f_axis", ",", "psd_of_x" ]
PSD routine from scipy we can compare our own numpy result against this one
[ "PSD", "routine", "from", "scipy", "we", "can", "compare", "our", "own", "numpy", "result", "against", "this", "one" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise.py#L37-L42
train
231,772
aewallin/allantools
allantools/noise.py
iterpink
def iterpink(depth=20): """Generate a sequence of samples of pink noise. pink noise generator from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/ Based on the Voss-McCartney algorithm, discussion and code examples at http://www.firstpr.com.au/dsp/pink-noise/ depth: Use this many samples of white noise to calculate the output. A higher number is slower to run, but renders low frequencies with more correct power spectra. Generates a never-ending sequence of floating-point values. Any continuous set of these samples will tend to have a 1/f power spectrum. """ values = numpy.random.randn(depth) smooth = numpy.random.randn(depth) source = numpy.random.randn(depth) sumvals = values.sum() i = 0 while True: yield sumvals + smooth[i] # advance the index by 1. if the index wraps, generate noise to use in # the calculations, but do not update any of the pink noise values. i += 1 if i == depth: i = 0 smooth = numpy.random.randn(depth) source = numpy.random.randn(depth) continue # count trailing zeros in i c = 0 while not (i >> c) & 1: c += 1 # replace value c with a new source element sumvals += source[i] - values[c] values[c] = source[i]
python
def iterpink(depth=20): """Generate a sequence of samples of pink noise. pink noise generator from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/ Based on the Voss-McCartney algorithm, discussion and code examples at http://www.firstpr.com.au/dsp/pink-noise/ depth: Use this many samples of white noise to calculate the output. A higher number is slower to run, but renders low frequencies with more correct power spectra. Generates a never-ending sequence of floating-point values. Any continuous set of these samples will tend to have a 1/f power spectrum. """ values = numpy.random.randn(depth) smooth = numpy.random.randn(depth) source = numpy.random.randn(depth) sumvals = values.sum() i = 0 while True: yield sumvals + smooth[i] # advance the index by 1. if the index wraps, generate noise to use in # the calculations, but do not update any of the pink noise values. i += 1 if i == depth: i = 0 smooth = numpy.random.randn(depth) source = numpy.random.randn(depth) continue # count trailing zeros in i c = 0 while not (i >> c) & 1: c += 1 # replace value c with a new source element sumvals += source[i] - values[c] values[c] = source[i]
[ "def", "iterpink", "(", "depth", "=", "20", ")", ":", "values", "=", "numpy", ".", "random", ".", "randn", "(", "depth", ")", "smooth", "=", "numpy", ".", "random", ".", "randn", "(", "depth", ")", "source", "=", "numpy", ".", "random", ".", "randn", "(", "depth", ")", "sumvals", "=", "values", ".", "sum", "(", ")", "i", "=", "0", "while", "True", ":", "yield", "sumvals", "+", "smooth", "[", "i", "]", "# advance the index by 1. if the index wraps, generate noise to use in", "# the calculations, but do not update any of the pink noise values.", "i", "+=", "1", "if", "i", "==", "depth", ":", "i", "=", "0", "smooth", "=", "numpy", ".", "random", ".", "randn", "(", "depth", ")", "source", "=", "numpy", ".", "random", ".", "randn", "(", "depth", ")", "continue", "# count trailing zeros in i", "c", "=", "0", "while", "not", "(", "i", ">>", "c", ")", "&", "1", ":", "c", "+=", "1", "# replace value c with a new source element", "sumvals", "+=", "source", "[", "i", "]", "-", "values", "[", "c", "]", "values", "[", "c", "]", "=", "source", "[", "i", "]" ]
Generate a sequence of samples of pink noise. pink noise generator from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/ Based on the Voss-McCartney algorithm, discussion and code examples at http://www.firstpr.com.au/dsp/pink-noise/ depth: Use this many samples of white noise to calculate the output. A higher number is slower to run, but renders low frequencies with more correct power spectra. Generates a never-ending sequence of floating-point values. Any continuous set of these samples will tend to have a 1/f power spectrum.
[ "Generate", "a", "sequence", "of", "samples", "of", "pink", "noise", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise.py#L85-L125
train
231,773
aewallin/allantools
examples/noise-color-demo.py
plotline
def plotline(plt, alpha, taus, style,label=""): """ plot a line with the slope alpha """ y = [pow(tt, alpha) for tt in taus] plt.loglog(taus, y, style,label=label)
python
def plotline(plt, alpha, taus, style,label=""): """ plot a line with the slope alpha """ y = [pow(tt, alpha) for tt in taus] plt.loglog(taus, y, style,label=label)
[ "def", "plotline", "(", "plt", ",", "alpha", ",", "taus", ",", "style", ",", "label", "=", "\"\"", ")", ":", "y", "=", "[", "pow", "(", "tt", ",", "alpha", ")", "for", "tt", "in", "taus", "]", "plt", ".", "loglog", "(", "taus", ",", "y", ",", "style", ",", "label", "=", "label", ")" ]
plot a line with the slope alpha
[ "plot", "a", "line", "with", "the", "slope", "alpha" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/examples/noise-color-demo.py#L38-L41
train
231,774
aewallin/allantools
examples/b1_noise_id_figure.py
b1_noise_id
def b1_noise_id(x, af, rate): """ B1 ratio for noise identification ratio of Standard Variace to AVAR """ (taus,devs,errs,ns) = at.adev(x,taus=[af*rate],data_type="phase", rate=rate) oadev_x = devs[0] y = np.diff(x) y_cut = np.array( y[:len(y)-(len(y)%af)] ) # cut to length assert len(y_cut)%af == 0 y_shaped = y_cut.reshape( ( int(len(y_cut)/af), af) ) y_averaged = np.average(y_shaped,axis=1) # average var = np.var(y_averaged, ddof=1) return var/pow(oadev_x,2.0)
python
def b1_noise_id(x, af, rate): """ B1 ratio for noise identification ratio of Standard Variace to AVAR """ (taus,devs,errs,ns) = at.adev(x,taus=[af*rate],data_type="phase", rate=rate) oadev_x = devs[0] y = np.diff(x) y_cut = np.array( y[:len(y)-(len(y)%af)] ) # cut to length assert len(y_cut)%af == 0 y_shaped = y_cut.reshape( ( int(len(y_cut)/af), af) ) y_averaged = np.average(y_shaped,axis=1) # average var = np.var(y_averaged, ddof=1) return var/pow(oadev_x,2.0)
[ "def", "b1_noise_id", "(", "x", ",", "af", ",", "rate", ")", ":", "(", "taus", ",", "devs", ",", "errs", ",", "ns", ")", "=", "at", ".", "adev", "(", "x", ",", "taus", "=", "[", "af", "*", "rate", "]", ",", "data_type", "=", "\"phase\"", ",", "rate", "=", "rate", ")", "oadev_x", "=", "devs", "[", "0", "]", "y", "=", "np", ".", "diff", "(", "x", ")", "y_cut", "=", "np", ".", "array", "(", "y", "[", ":", "len", "(", "y", ")", "-", "(", "len", "(", "y", ")", "%", "af", ")", "]", ")", "# cut to length", "assert", "len", "(", "y_cut", ")", "%", "af", "==", "0", "y_shaped", "=", "y_cut", ".", "reshape", "(", "(", "int", "(", "len", "(", "y_cut", ")", "/", "af", ")", ",", "af", ")", ")", "y_averaged", "=", "np", ".", "average", "(", "y_shaped", ",", "axis", "=", "1", ")", "# average", "var", "=", "np", ".", "var", "(", "y_averaged", ",", "ddof", "=", "1", ")", "return", "var", "/", "pow", "(", "oadev_x", ",", "2.0", ")" ]
B1 ratio for noise identification ratio of Standard Variace to AVAR
[ "B1", "ratio", "for", "noise", "identification", "ratio", "of", "Standard", "Variace", "to", "AVAR" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/examples/b1_noise_id_figure.py#L5-L19
train
231,775
aewallin/allantools
allantools/plot.py
Plot.plot
def plot(self, atDataset, errorbars=False, grid=False): """ use matplotlib methods for plotting Parameters ---------- atDataset : allantools.Dataset() a dataset with computed data errorbars : boolean Plot errorbars. Defaults to False grid : boolean Plot grid. Defaults to False """ if errorbars: self.ax.errorbar(atDataset.out["taus"], atDataset.out["stat"], yerr=atDataset.out["stat_err"], ) else: self.ax.plot(atDataset.out["taus"], atDataset.out["stat"], ) self.ax.set_xlabel("Tau") self.ax.set_ylabel(atDataset.out["stat_id"]) self.ax.grid(grid, which="minor", ls="-", color='0.65') self.ax.grid(grid, which="major", ls="-", color='0.25')
python
def plot(self, atDataset, errorbars=False, grid=False): """ use matplotlib methods for plotting Parameters ---------- atDataset : allantools.Dataset() a dataset with computed data errorbars : boolean Plot errorbars. Defaults to False grid : boolean Plot grid. Defaults to False """ if errorbars: self.ax.errorbar(atDataset.out["taus"], atDataset.out["stat"], yerr=atDataset.out["stat_err"], ) else: self.ax.plot(atDataset.out["taus"], atDataset.out["stat"], ) self.ax.set_xlabel("Tau") self.ax.set_ylabel(atDataset.out["stat_id"]) self.ax.grid(grid, which="minor", ls="-", color='0.65') self.ax.grid(grid, which="major", ls="-", color='0.25')
[ "def", "plot", "(", "self", ",", "atDataset", ",", "errorbars", "=", "False", ",", "grid", "=", "False", ")", ":", "if", "errorbars", ":", "self", ".", "ax", ".", "errorbar", "(", "atDataset", ".", "out", "[", "\"taus\"", "]", ",", "atDataset", ".", "out", "[", "\"stat\"", "]", ",", "yerr", "=", "atDataset", ".", "out", "[", "\"stat_err\"", "]", ",", ")", "else", ":", "self", ".", "ax", ".", "plot", "(", "atDataset", ".", "out", "[", "\"taus\"", "]", ",", "atDataset", ".", "out", "[", "\"stat\"", "]", ",", ")", "self", ".", "ax", ".", "set_xlabel", "(", "\"Tau\"", ")", "self", ".", "ax", ".", "set_ylabel", "(", "atDataset", ".", "out", "[", "\"stat_id\"", "]", ")", "self", ".", "ax", ".", "grid", "(", "grid", ",", "which", "=", "\"minor\"", ",", "ls", "=", "\"-\"", ",", "color", "=", "'0.65'", ")", "self", ".", "ax", ".", "grid", "(", "grid", ",", "which", "=", "\"major\"", ",", "ls", "=", "\"-\"", ",", "color", "=", "'0.25'", ")" ]
use matplotlib methods for plotting Parameters ---------- atDataset : allantools.Dataset() a dataset with computed data errorbars : boolean Plot errorbars. Defaults to False grid : boolean Plot grid. Defaults to False
[ "use", "matplotlib", "methods", "for", "plotting" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/plot.py#L66-L92
train
231,776
aewallin/allantools
allantools/ci.py
greenhall_table2
def greenhall_table2(alpha, d): """ Table 2 from Greenhall 2004 """ row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6 assert(row_idx in [0, 1, 2, 3, 4, 5]) col_idx = int(d-1) table2 = [[(3.0/2.0, 1.0/2.0), (35.0/18.0, 1.0), (231.0/100.0, 3.0/2.0)], # alpha=+2 [(78.6, 25.2), (790.0, 410.0), (9950.0, 6520.0)], [(2.0/3.0, 1.0/6.0), (2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0)], # alpha=0 [(-1, -1), (0.852, 0.375), (0.997, 0.617)], # -1 [(-1, -1), (1.079, 0.368), (1.033, 0.607)], #-2 [(-1, -1), (-1, -1), (1.053, 0.553)], #-3 [(-1, -1), (-1, -1), (1.302, 0.535)], # alpha=-4 ] #print("table2 = ", table2[row_idx][col_idx]) return table2[row_idx][col_idx]
python
def greenhall_table2(alpha, d): """ Table 2 from Greenhall 2004 """ row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6 assert(row_idx in [0, 1, 2, 3, 4, 5]) col_idx = int(d-1) table2 = [[(3.0/2.0, 1.0/2.0), (35.0/18.0, 1.0), (231.0/100.0, 3.0/2.0)], # alpha=+2 [(78.6, 25.2), (790.0, 410.0), (9950.0, 6520.0)], [(2.0/3.0, 1.0/6.0), (2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0)], # alpha=0 [(-1, -1), (0.852, 0.375), (0.997, 0.617)], # -1 [(-1, -1), (1.079, 0.368), (1.033, 0.607)], #-2 [(-1, -1), (-1, -1), (1.053, 0.553)], #-3 [(-1, -1), (-1, -1), (1.302, 0.535)], # alpha=-4 ] #print("table2 = ", table2[row_idx][col_idx]) return table2[row_idx][col_idx]
[ "def", "greenhall_table2", "(", "alpha", ",", "d", ")", ":", "row_idx", "=", "int", "(", "-", "alpha", "+", "2", ")", "# map 2-> row0 and -4-> row6", "assert", "(", "row_idx", "in", "[", "0", ",", "1", ",", "2", ",", "3", ",", "4", ",", "5", "]", ")", "col_idx", "=", "int", "(", "d", "-", "1", ")", "table2", "=", "[", "[", "(", "3.0", "/", "2.0", ",", "1.0", "/", "2.0", ")", ",", "(", "35.0", "/", "18.0", ",", "1.0", ")", ",", "(", "231.0", "/", "100.0", ",", "3.0", "/", "2.0", ")", "]", ",", "# alpha=+2", "[", "(", "78.6", ",", "25.2", ")", ",", "(", "790.0", ",", "410.0", ")", ",", "(", "9950.0", ",", "6520.0", ")", "]", ",", "[", "(", "2.0", "/", "3.0", ",", "1.0", "/", "6.0", ")", ",", "(", "2.0", "/", "3.0", ",", "1.0", "/", "3.0", ")", ",", "(", "7.0", "/", "9.0", ",", "1.0", "/", "2.0", ")", "]", ",", "# alpha=0", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "0.852", ",", "0.375", ")", ",", "(", "0.997", ",", "0.617", ")", "]", ",", "# -1", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.079", ",", "0.368", ")", ",", "(", "1.033", ",", "0.607", ")", "]", ",", "#-2", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.053", ",", "0.553", ")", "]", ",", "#-3", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.302", ",", "0.535", ")", "]", ",", "# alpha=-4", "]", "#print(\"table2 = \", table2[row_idx][col_idx])", "return", "table2", "[", "row_idx", "]", "[", "col_idx", "]" ]
Table 2 from Greenhall 2004
[ "Table", "2", "from", "Greenhall", "2004" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/ci.py#L662-L676
train
231,777
aewallin/allantools
allantools/ci.py
greenhall_table1
def greenhall_table1(alpha, d): """ Table 1 from Greenhall 2004 """ row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6 col_idx = int(d-1) table1 = [[(2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0), (22.0/25.0, 2.0/3.0)], # alpha=+2 [(0.840, 0.345), (0.997, 0.616), (1.141, 0.843)], [(1.079, 0.368), (1.033, 0.607), (1.184, 0.848)], [(-1, -1), (1.048, 0.534), (1.180, 0.816)], # -1 [(-1, -1), (1.302, 0.535), (1.175, 0.777)], #-2 [(-1, -1), (-1, -1), (1.194, 0.703)], #-3 [(-1, -1), (-1, -1), (1.489, 0.702)], # alpha=-4 ] #print("table1 = ", table1[row_idx][col_idx]) return table1[row_idx][col_idx]
python
def greenhall_table1(alpha, d): """ Table 1 from Greenhall 2004 """ row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6 col_idx = int(d-1) table1 = [[(2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0), (22.0/25.0, 2.0/3.0)], # alpha=+2 [(0.840, 0.345), (0.997, 0.616), (1.141, 0.843)], [(1.079, 0.368), (1.033, 0.607), (1.184, 0.848)], [(-1, -1), (1.048, 0.534), (1.180, 0.816)], # -1 [(-1, -1), (1.302, 0.535), (1.175, 0.777)], #-2 [(-1, -1), (-1, -1), (1.194, 0.703)], #-3 [(-1, -1), (-1, -1), (1.489, 0.702)], # alpha=-4 ] #print("table1 = ", table1[row_idx][col_idx]) return table1[row_idx][col_idx]
[ "def", "greenhall_table1", "(", "alpha", ",", "d", ")", ":", "row_idx", "=", "int", "(", "-", "alpha", "+", "2", ")", "# map 2-> row0 and -4-> row6", "col_idx", "=", "int", "(", "d", "-", "1", ")", "table1", "=", "[", "[", "(", "2.0", "/", "3.0", ",", "1.0", "/", "3.0", ")", ",", "(", "7.0", "/", "9.0", ",", "1.0", "/", "2.0", ")", ",", "(", "22.0", "/", "25.0", ",", "2.0", "/", "3.0", ")", "]", ",", "# alpha=+2", "[", "(", "0.840", ",", "0.345", ")", ",", "(", "0.997", ",", "0.616", ")", ",", "(", "1.141", ",", "0.843", ")", "]", ",", "[", "(", "1.079", ",", "0.368", ")", ",", "(", "1.033", ",", "0.607", ")", ",", "(", "1.184", ",", "0.848", ")", "]", ",", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.048", ",", "0.534", ")", ",", "(", "1.180", ",", "0.816", ")", "]", ",", "# -1", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.302", ",", "0.535", ")", ",", "(", "1.175", ",", "0.777", ")", "]", ",", "#-2", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.194", ",", "0.703", ")", "]", ",", "#-3", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.489", ",", "0.702", ")", "]", ",", "# alpha=-4", "]", "#print(\"table1 = \", table1[row_idx][col_idx])", "return", "table1", "[", "row_idx", "]", "[", "col_idx", "]" ]
Table 1 from Greenhall 2004
[ "Table", "1", "from", "Greenhall", "2004" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/ci.py#L678-L691
train
231,778
aewallin/allantools
allantools/ci.py
edf_mtotdev
def edf_mtotdev(N, m, alpha): """ Equivalent degrees of freedom for Modified Total Deviation NIST SP1065 page 41, Table 8 """ assert(alpha in [2, 1, 0, -1, -2]) NIST_SP1065_table8 = [(1.90, 2.1), (1.20, 1.40), (1.10, 1.2), (0.85, 0.50), (0.75, 0.31)] #(b, c) = NIST_SP1065_table8[ abs(alpha-2) ] (b, c) = NIST_SP1065_table8[abs(alpha-2)] edf = b*(float(N)/float(m))-c print("mtotdev b,c= ", (b, c), " edf=", edf) return edf
python
def edf_mtotdev(N, m, alpha): """ Equivalent degrees of freedom for Modified Total Deviation NIST SP1065 page 41, Table 8 """ assert(alpha in [2, 1, 0, -1, -2]) NIST_SP1065_table8 = [(1.90, 2.1), (1.20, 1.40), (1.10, 1.2), (0.85, 0.50), (0.75, 0.31)] #(b, c) = NIST_SP1065_table8[ abs(alpha-2) ] (b, c) = NIST_SP1065_table8[abs(alpha-2)] edf = b*(float(N)/float(m))-c print("mtotdev b,c= ", (b, c), " edf=", edf) return edf
[ "def", "edf_mtotdev", "(", "N", ",", "m", ",", "alpha", ")", ":", "assert", "(", "alpha", "in", "[", "2", ",", "1", ",", "0", ",", "-", "1", ",", "-", "2", "]", ")", "NIST_SP1065_table8", "=", "[", "(", "1.90", ",", "2.1", ")", ",", "(", "1.20", ",", "1.40", ")", ",", "(", "1.10", ",", "1.2", ")", ",", "(", "0.85", ",", "0.50", ")", ",", "(", "0.75", ",", "0.31", ")", "]", "#(b, c) = NIST_SP1065_table8[ abs(alpha-2) ]", "(", "b", ",", "c", ")", "=", "NIST_SP1065_table8", "[", "abs", "(", "alpha", "-", "2", ")", "]", "edf", "=", "b", "*", "(", "float", "(", "N", ")", "/", "float", "(", "m", ")", ")", "-", "c", "print", "(", "\"mtotdev b,c= \"", ",", "(", "b", ",", "c", ")", ",", "\" edf=\"", ",", "edf", ")", "return", "edf" ]
Equivalent degrees of freedom for Modified Total Deviation NIST SP1065 page 41, Table 8
[ "Equivalent", "degrees", "of", "freedom", "for", "Modified", "Total", "Deviation", "NIST", "SP1065", "page", "41", "Table", "8" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/ci.py#L710-L721
train
231,779
aewallin/allantools
allantools/ci.py
edf_simple
def edf_simple(N, m, alpha): """Equivalent degrees of freedom. Simple approximate formulae. Parameters ---------- N : int the number of phase samples m : int averaging factor, tau = m * tau0 alpha: int exponent of f for the frequency PSD: 'wp' returns white phase noise. alpha=+2 'wf' returns white frequency noise. alpha= 0 'fp' returns flicker phase noise. alpha=+1 'ff' returns flicker frequency noise. alpha=-1 'rf' returns random walk frequency noise. alpha=-2 If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Notes ----- S. Stein, Frequency and Time - Their Measurement and Characterization. Precision Frequency Control Vol 2, 1985, pp 191-416. http://tf.boulder.nist.gov/general/pdf/666.pdf Returns ------- edf : float Equivalent degrees of freedom """ N = float(N) m = float(m) if alpha in [2, 1, 0, -1, -2]: # NIST SP 1065, Table 5 if alpha == +2: edf = (N + 1) * (N - 2*m) / (2 * (N - m)) if alpha == 0: edf = (((3 * (N - 1) / (2 * m)) - (2 * (N - 2) / N)) * ((4*pow(m, 2)) / ((4*pow(m, 2)) + 5))) if alpha == 1: a = (N - 1)/(2 * m) b = (2 * m + 1) * (N - 1) / 4 edf = np.exp(np.sqrt(np.log(a) * np.log(b))) if alpha == -1: if m == 1: edf = 2 * (N - 2) /(2.3 * N - 4.9) if m >= 2: edf = 5 * N**2 / (4 * m * (N + (3 * m))) if alpha == -2: a = (N - 2) / (m * (N - 3)**2) b = (N - 1)**2 c = 3 * m * (N - 1) d = 4 * m **2 edf = a * (b - c + d) else: edf = (N - 1) print("Noise type not recognized. Defaulting to N - 1 degrees of freedom.") return edf
python
def edf_simple(N, m, alpha): """Equivalent degrees of freedom. Simple approximate formulae. Parameters ---------- N : int the number of phase samples m : int averaging factor, tau = m * tau0 alpha: int exponent of f for the frequency PSD: 'wp' returns white phase noise. alpha=+2 'wf' returns white frequency noise. alpha= 0 'fp' returns flicker phase noise. alpha=+1 'ff' returns flicker frequency noise. alpha=-1 'rf' returns random walk frequency noise. alpha=-2 If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Notes ----- S. Stein, Frequency and Time - Their Measurement and Characterization. Precision Frequency Control Vol 2, 1985, pp 191-416. http://tf.boulder.nist.gov/general/pdf/666.pdf Returns ------- edf : float Equivalent degrees of freedom """ N = float(N) m = float(m) if alpha in [2, 1, 0, -1, -2]: # NIST SP 1065, Table 5 if alpha == +2: edf = (N + 1) * (N - 2*m) / (2 * (N - m)) if alpha == 0: edf = (((3 * (N - 1) / (2 * m)) - (2 * (N - 2) / N)) * ((4*pow(m, 2)) / ((4*pow(m, 2)) + 5))) if alpha == 1: a = (N - 1)/(2 * m) b = (2 * m + 1) * (N - 1) / 4 edf = np.exp(np.sqrt(np.log(a) * np.log(b))) if alpha == -1: if m == 1: edf = 2 * (N - 2) /(2.3 * N - 4.9) if m >= 2: edf = 5 * N**2 / (4 * m * (N + (3 * m))) if alpha == -2: a = (N - 2) / (m * (N - 3)**2) b = (N - 1)**2 c = 3 * m * (N - 1) d = 4 * m **2 edf = a * (b - c + d) else: edf = (N - 1) print("Noise type not recognized. Defaulting to N - 1 degrees of freedom.") return edf
[ "def", "edf_simple", "(", "N", ",", "m", ",", "alpha", ")", ":", "N", "=", "float", "(", "N", ")", "m", "=", "float", "(", "m", ")", "if", "alpha", "in", "[", "2", ",", "1", ",", "0", ",", "-", "1", ",", "-", "2", "]", ":", "# NIST SP 1065, Table 5", "if", "alpha", "==", "+", "2", ":", "edf", "=", "(", "N", "+", "1", ")", "*", "(", "N", "-", "2", "*", "m", ")", "/", "(", "2", "*", "(", "N", "-", "m", ")", ")", "if", "alpha", "==", "0", ":", "edf", "=", "(", "(", "(", "3", "*", "(", "N", "-", "1", ")", "/", "(", "2", "*", "m", ")", ")", "-", "(", "2", "*", "(", "N", "-", "2", ")", "/", "N", ")", ")", "*", "(", "(", "4", "*", "pow", "(", "m", ",", "2", ")", ")", "/", "(", "(", "4", "*", "pow", "(", "m", ",", "2", ")", ")", "+", "5", ")", ")", ")", "if", "alpha", "==", "1", ":", "a", "=", "(", "N", "-", "1", ")", "/", "(", "2", "*", "m", ")", "b", "=", "(", "2", "*", "m", "+", "1", ")", "*", "(", "N", "-", "1", ")", "/", "4", "edf", "=", "np", ".", "exp", "(", "np", ".", "sqrt", "(", "np", ".", "log", "(", "a", ")", "*", "np", ".", "log", "(", "b", ")", ")", ")", "if", "alpha", "==", "-", "1", ":", "if", "m", "==", "1", ":", "edf", "=", "2", "*", "(", "N", "-", "2", ")", "/", "(", "2.3", "*", "N", "-", "4.9", ")", "if", "m", ">=", "2", ":", "edf", "=", "5", "*", "N", "**", "2", "/", "(", "4", "*", "m", "*", "(", "N", "+", "(", "3", "*", "m", ")", ")", ")", "if", "alpha", "==", "-", "2", ":", "a", "=", "(", "N", "-", "2", ")", "/", "(", "m", "*", "(", "N", "-", "3", ")", "**", "2", ")", "b", "=", "(", "N", "-", "1", ")", "**", "2", "c", "=", "3", "*", "m", "*", "(", "N", "-", "1", ")", "d", "=", "4", "*", "m", "**", "2", "edf", "=", "a", "*", "(", "b", "-", "c", "+", "d", ")", "else", ":", "edf", "=", "(", "N", "-", "1", ")", "print", "(", "\"Noise type not recognized. Defaulting to N - 1 degrees of freedom.\"", ")", "return", "edf" ]
Equivalent degrees of freedom. Simple approximate formulae. Parameters ---------- N : int the number of phase samples m : int averaging factor, tau = m * tau0 alpha: int exponent of f for the frequency PSD: 'wp' returns white phase noise. alpha=+2 'wf' returns white frequency noise. alpha= 0 'fp' returns flicker phase noise. alpha=+1 'ff' returns flicker frequency noise. alpha=-1 'rf' returns random walk frequency noise. alpha=-2 If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Notes ----- S. Stein, Frequency and Time - Their Measurement and Characterization. Precision Frequency Control Vol 2, 1985, pp 191-416. http://tf.boulder.nist.gov/general/pdf/666.pdf Returns ------- edf : float Equivalent degrees of freedom
[ "Equivalent", "degrees", "of", "freedom", ".", "Simple", "approximate", "formulae", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/ci.py#L723-L789
train
231,780
aewallin/allantools
examples/gradev-demo.py
example1
def example1(): """ Compute the GRADEV of a white phase noise. Compares two different scenarios. 1) The original data and 2) ADEV estimate with gap robust ADEV. """ N = 1000 f = 1 y = np.random.randn(1,N)[0,:] x = [xx for xx in np.linspace(1,len(y),len(y))] x_ax, y_ax, (err_l, err_h), ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h],label='GRADEV, no gaps') y[int(np.floor(0.4*N)):int(np.floor(0.6*N))] = np.NaN # Simulate missing data x_ax, y_ax, (err_l, err_h) , ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h], label='GRADEV, with gaps') plt.xscale('log') plt.yscale('log') plt.grid() plt.legend() plt.xlabel('Tau / s') plt.ylabel('Overlapping Allan deviation') plt.show()
python
def example1(): """ Compute the GRADEV of a white phase noise. Compares two different scenarios. 1) The original data and 2) ADEV estimate with gap robust ADEV. """ N = 1000 f = 1 y = np.random.randn(1,N)[0,:] x = [xx for xx in np.linspace(1,len(y),len(y))] x_ax, y_ax, (err_l, err_h), ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h],label='GRADEV, no gaps') y[int(np.floor(0.4*N)):int(np.floor(0.6*N))] = np.NaN # Simulate missing data x_ax, y_ax, (err_l, err_h) , ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h], label='GRADEV, with gaps') plt.xscale('log') plt.yscale('log') plt.grid() plt.legend() plt.xlabel('Tau / s') plt.ylabel('Overlapping Allan deviation') plt.show()
[ "def", "example1", "(", ")", ":", "N", "=", "1000", "f", "=", "1", "y", "=", "np", ".", "random", ".", "randn", "(", "1", ",", "N", ")", "[", "0", ",", ":", "]", "x", "=", "[", "xx", "for", "xx", "in", "np", ".", "linspace", "(", "1", ",", "len", "(", "y", ")", ",", "len", "(", "y", ")", ")", "]", "x_ax", ",", "y_ax", ",", "(", "err_l", ",", "err_h", ")", ",", "ns", "=", "allan", ".", "gradev", "(", "y", ",", "data_type", "=", "'phase'", ",", "rate", "=", "f", ",", "taus", "=", "x", ")", "plt", ".", "errorbar", "(", "x_ax", ",", "y_ax", ",", "yerr", "=", "[", "err_l", ",", "err_h", "]", ",", "label", "=", "'GRADEV, no gaps'", ")", "y", "[", "int", "(", "np", ".", "floor", "(", "0.4", "*", "N", ")", ")", ":", "int", "(", "np", ".", "floor", "(", "0.6", "*", "N", ")", ")", "]", "=", "np", ".", "NaN", "# Simulate missing data", "x_ax", ",", "y_ax", ",", "(", "err_l", ",", "err_h", ")", ",", "ns", "=", "allan", ".", "gradev", "(", "y", ",", "data_type", "=", "'phase'", ",", "rate", "=", "f", ",", "taus", "=", "x", ")", "plt", ".", "errorbar", "(", "x_ax", ",", "y_ax", ",", "yerr", "=", "[", "err_l", ",", "err_h", "]", ",", "label", "=", "'GRADEV, with gaps'", ")", "plt", ".", "xscale", "(", "'log'", ")", "plt", ".", "yscale", "(", "'log'", ")", "plt", ".", "grid", "(", ")", "plt", ".", "legend", "(", ")", "plt", ".", "xlabel", "(", "'Tau / s'", ")", "plt", ".", "ylabel", "(", "'Overlapping Allan deviation'", ")", "plt", ".", "show", "(", ")" ]
Compute the GRADEV of a white phase noise. Compares two different scenarios. 1) The original data and 2) ADEV estimate with gap robust ADEV.
[ "Compute", "the", "GRADEV", "of", "a", "white", "phase", "noise", ".", "Compares", "two", "different", "scenarios", ".", "1", ")", "The", "original", "data", "and", "2", ")", "ADEV", "estimate", "with", "gap", "robust", "ADEV", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/examples/gradev-demo.py#L10-L32
train
231,781
aewallin/allantools
examples/gradev-demo.py
example2
def example2(): """ Compute the GRADEV of a nonstationary white phase noise. """ N=1000 # number of samples f = 1 # data samples per second s=1+5/N*np.arange(0,N) y=s*np.random.randn(1,N)[0,:] x = [xx for xx in np.linspace(1,len(y),len(y))] x_ax, y_ax, (err_l, err_h) , ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.loglog(x_ax, y_ax,'b.',label="No gaps") y[int(0.4*N):int(0.6*N,)] = np.NaN # Simulate missing data x_ax, y_ax, (err_l, err_h), ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.loglog(x_ax, y_ax,'g.',label="With gaps") plt.grid() plt.legend() plt.xlabel('Tau / s') plt.ylabel('Overlapping Allan deviation') plt.show()
python
def example2(): """ Compute the GRADEV of a nonstationary white phase noise. """ N=1000 # number of samples f = 1 # data samples per second s=1+5/N*np.arange(0,N) y=s*np.random.randn(1,N)[0,:] x = [xx for xx in np.linspace(1,len(y),len(y))] x_ax, y_ax, (err_l, err_h) , ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.loglog(x_ax, y_ax,'b.',label="No gaps") y[int(0.4*N):int(0.6*N,)] = np.NaN # Simulate missing data x_ax, y_ax, (err_l, err_h), ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.loglog(x_ax, y_ax,'g.',label="With gaps") plt.grid() plt.legend() plt.xlabel('Tau / s') plt.ylabel('Overlapping Allan deviation') plt.show()
[ "def", "example2", "(", ")", ":", "N", "=", "1000", "# number of samples", "f", "=", "1", "# data samples per second", "s", "=", "1", "+", "5", "/", "N", "*", "np", ".", "arange", "(", "0", ",", "N", ")", "y", "=", "s", "*", "np", ".", "random", ".", "randn", "(", "1", ",", "N", ")", "[", "0", ",", ":", "]", "x", "=", "[", "xx", "for", "xx", "in", "np", ".", "linspace", "(", "1", ",", "len", "(", "y", ")", ",", "len", "(", "y", ")", ")", "]", "x_ax", ",", "y_ax", ",", "(", "err_l", ",", "err_h", ")", ",", "ns", "=", "allan", ".", "gradev", "(", "y", ",", "data_type", "=", "'phase'", ",", "rate", "=", "f", ",", "taus", "=", "x", ")", "plt", ".", "loglog", "(", "x_ax", ",", "y_ax", ",", "'b.'", ",", "label", "=", "\"No gaps\"", ")", "y", "[", "int", "(", "0.4", "*", "N", ")", ":", "int", "(", "0.6", "*", "N", ",", ")", "]", "=", "np", ".", "NaN", "# Simulate missing data", "x_ax", ",", "y_ax", ",", "(", "err_l", ",", "err_h", ")", ",", "ns", "=", "allan", ".", "gradev", "(", "y", ",", "data_type", "=", "'phase'", ",", "rate", "=", "f", ",", "taus", "=", "x", ")", "plt", ".", "loglog", "(", "x_ax", ",", "y_ax", ",", "'g.'", ",", "label", "=", "\"With gaps\"", ")", "plt", ".", "grid", "(", ")", "plt", ".", "legend", "(", ")", "plt", ".", "xlabel", "(", "'Tau / s'", ")", "plt", ".", "ylabel", "(", "'Overlapping Allan deviation'", ")", "plt", ".", "show", "(", ")" ]
Compute the GRADEV of a nonstationary white phase noise.
[ "Compute", "the", "GRADEV", "of", "a", "nonstationary", "white", "phase", "noise", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/examples/gradev-demo.py#L34-L52
train
231,782
aewallin/allantools
allantools/allantools.py
tdev
def tdev(data, rate=1.0, data_type="phase", taus=None): """ Time deviation. Based on modified Allan variance. .. math:: \\sigma^2_{TDEV}( \\tau ) = { \\tau^2 \\over 3 } \\sigma^2_{MDEV}( \\tau ) Note that TDEV has a unit of seconds. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus, tdev, tdev_error, ns): tuple Tuple of values taus: np.array Tau values for which td computed tdev: np.array Computed time deviations (in seconds) for each tau value tdev_errors: np.array Time deviation errors ns: np.array Values of N used in mdev_phase() Notes ----- http://en.wikipedia.org/wiki/Time_deviation """ phase = input_to_phase(data, rate, data_type) (taus, md, mde, ns) = mdev(phase, rate=rate, taus=taus) td = taus * md / np.sqrt(3.0) tde = td / np.sqrt(ns) return taus, td, tde, ns
python
def tdev(data, rate=1.0, data_type="phase", taus=None): """ Time deviation. Based on modified Allan variance. .. math:: \\sigma^2_{TDEV}( \\tau ) = { \\tau^2 \\over 3 } \\sigma^2_{MDEV}( \\tau ) Note that TDEV has a unit of seconds. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus, tdev, tdev_error, ns): tuple Tuple of values taus: np.array Tau values for which td computed tdev: np.array Computed time deviations (in seconds) for each tau value tdev_errors: np.array Time deviation errors ns: np.array Values of N used in mdev_phase() Notes ----- http://en.wikipedia.org/wiki/Time_deviation """ phase = input_to_phase(data, rate, data_type) (taus, md, mde, ns) = mdev(phase, rate=rate, taus=taus) td = taus * md / np.sqrt(3.0) tde = td / np.sqrt(ns) return taus, td, tde, ns
[ "def", "tdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "taus", ",", "md", ",", "mde", ",", "ns", ")", "=", "mdev", "(", "phase", ",", "rate", "=", "rate", ",", "taus", "=", "taus", ")", "td", "=", "taus", "*", "md", "/", "np", ".", "sqrt", "(", "3.0", ")", "tde", "=", "td", "/", "np", ".", "sqrt", "(", "ns", ")", "return", "taus", ",", "td", ",", "tde", ",", "ns" ]
Time deviation. Based on modified Allan variance. .. math:: \\sigma^2_{TDEV}( \\tau ) = { \\tau^2 \\over 3 } \\sigma^2_{MDEV}( \\tau ) Note that TDEV has a unit of seconds. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus, tdev, tdev_error, ns): tuple Tuple of values taus: np.array Tau values for which td computed tdev: np.array Computed time deviations (in seconds) for each tau value tdev_errors: np.array Time deviation errors ns: np.array Values of N used in mdev_phase() Notes ----- http://en.wikipedia.org/wiki/Time_deviation
[ "Time", "deviation", ".", "Based", "on", "modified", "Allan", "variance", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L109-L155
train
231,783
aewallin/allantools
allantools/allantools.py
mdev
def mdev(data, rate=1.0, data_type="phase", taus=None): """ Modified Allan deviation. Used to distinguish between White and Flicker Phase Modulation. .. math:: \\sigma^2_{MDEV}(m\\tau_0) = { 1 \\over 2 (m \\tau_0 )^2 (N-3m+1) } \\sum_{j=1}^{N-3m+1} \\lbrace \\sum_{i=j}^{j+m-1} {x}_{i+2m} - 2x_{i+m} + x_{i} \\rbrace^2 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, md, mde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed md: np.array Computed mdev for each tau value mde: np.array mdev errors ns: np.array Values of N used in each mdev calculation Notes ----- see http://www.leapsecond.com/tools/adev_lib.c NIST SP 1065 eqn (14) and (15), page 17 """ phase = input_to_phase(data, rate, data_type) (phase, ms, taus_used) = tau_generator(phase, rate, taus=taus) data, taus = np.array(phase), np.array(taus) md = np.zeros_like(ms) mderr = np.zeros_like(ms) ns = np.zeros_like(ms) # this is a 'loop-unrolled' algorithm following # http://www.leapsecond.com/tools/adev_lib.c for idx, m in enumerate(ms): m = int(m) # without this we get: VisibleDeprecationWarning: # using a non-integer number instead of an integer # will result in an error in the future tau = taus_used[idx] # First loop sum d0 = phase[0:m] d1 = phase[m:2*m] d2 = phase[2*m:3*m] e = min(len(d0), len(d1), len(d2)) v = np.sum(d2[:e] - 2* d1[:e] + d0[:e]) s = v * v # Second part of sum d3 = phase[3*m:] d2 = phase[2*m:] d1 = phase[1*m:] d0 = phase[0:] e = min(len(d0), len(d1), len(d2), len(d3)) n = e + 1 v_arr = v + np.cumsum(d3[:e] - 3 * d2[:e] + 3 * d1[:e] - d0[:e]) s = s + np.sum(v_arr * v_arr) s /= 2.0 * m * m * tau * tau * n s = np.sqrt(s) md[idx] = s mderr[idx] = (s / np.sqrt(n)) ns[idx] = n return remove_small_ns(taus_used, md, mderr, ns)
python
def mdev(data, rate=1.0, data_type="phase", taus=None): """ Modified Allan deviation. Used to distinguish between White and Flicker Phase Modulation. .. math:: \\sigma^2_{MDEV}(m\\tau_0) = { 1 \\over 2 (m \\tau_0 )^2 (N-3m+1) } \\sum_{j=1}^{N-3m+1} \\lbrace \\sum_{i=j}^{j+m-1} {x}_{i+2m} - 2x_{i+m} + x_{i} \\rbrace^2 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, md, mde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed md: np.array Computed mdev for each tau value mde: np.array mdev errors ns: np.array Values of N used in each mdev calculation Notes ----- see http://www.leapsecond.com/tools/adev_lib.c NIST SP 1065 eqn (14) and (15), page 17 """ phase = input_to_phase(data, rate, data_type) (phase, ms, taus_used) = tau_generator(phase, rate, taus=taus) data, taus = np.array(phase), np.array(taus) md = np.zeros_like(ms) mderr = np.zeros_like(ms) ns = np.zeros_like(ms) # this is a 'loop-unrolled' algorithm following # http://www.leapsecond.com/tools/adev_lib.c for idx, m in enumerate(ms): m = int(m) # without this we get: VisibleDeprecationWarning: # using a non-integer number instead of an integer # will result in an error in the future tau = taus_used[idx] # First loop sum d0 = phase[0:m] d1 = phase[m:2*m] d2 = phase[2*m:3*m] e = min(len(d0), len(d1), len(d2)) v = np.sum(d2[:e] - 2* d1[:e] + d0[:e]) s = v * v # Second part of sum d3 = phase[3*m:] d2 = phase[2*m:] d1 = phase[1*m:] d0 = phase[0:] e = min(len(d0), len(d1), len(d2), len(d3)) n = e + 1 v_arr = v + np.cumsum(d3[:e] - 3 * d2[:e] + 3 * d1[:e] - d0[:e]) s = s + np.sum(v_arr * v_arr) s /= 2.0 * m * m * tau * tau * n s = np.sqrt(s) md[idx] = s mderr[idx] = (s / np.sqrt(n)) ns[idx] = n return remove_small_ns(taus_used, md, mderr, ns)
[ "def", "mdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "ms", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", "=", "taus", ")", "data", ",", "taus", "=", "np", ".", "array", "(", "phase", ")", ",", "np", ".", "array", "(", "taus", ")", "md", "=", "np", ".", "zeros_like", "(", "ms", ")", "mderr", "=", "np", ".", "zeros_like", "(", "ms", ")", "ns", "=", "np", ".", "zeros_like", "(", "ms", ")", "# this is a 'loop-unrolled' algorithm following", "# http://www.leapsecond.com/tools/adev_lib.c", "for", "idx", ",", "m", "in", "enumerate", "(", "ms", ")", ":", "m", "=", "int", "(", "m", ")", "# without this we get: VisibleDeprecationWarning:", "# using a non-integer number instead of an integer", "# will result in an error in the future", "tau", "=", "taus_used", "[", "idx", "]", "# First loop sum", "d0", "=", "phase", "[", "0", ":", "m", "]", "d1", "=", "phase", "[", "m", ":", "2", "*", "m", "]", "d2", "=", "phase", "[", "2", "*", "m", ":", "3", "*", "m", "]", "e", "=", "min", "(", "len", "(", "d0", ")", ",", "len", "(", "d1", ")", ",", "len", "(", "d2", ")", ")", "v", "=", "np", ".", "sum", "(", "d2", "[", ":", "e", "]", "-", "2", "*", "d1", "[", ":", "e", "]", "+", "d0", "[", ":", "e", "]", ")", "s", "=", "v", "*", "v", "# Second part of sum", "d3", "=", "phase", "[", "3", "*", "m", ":", "]", "d2", "=", "phase", "[", "2", "*", "m", ":", "]", "d1", "=", "phase", "[", "1", "*", "m", ":", "]", "d0", "=", "phase", "[", "0", ":", "]", "e", "=", "min", "(", "len", "(", "d0", ")", ",", "len", "(", "d1", ")", ",", "len", "(", "d2", ")", ",", "len", "(", "d3", ")", ")", "n", "=", "e", "+", "1", "v_arr", "=", "v", "+", "np", ".", "cumsum", "(", "d3", "[", ":", "e", "]", "-", "3", "*", "d2", "[", ":", "e", "]", "+", "3", "*", "d1", "[", ":", "e", "]", "-", "d0", "[", ":", "e", "]", ")", "s", "=", "s", "+", "np", ".", "sum", "(", "v_arr", "*", "v_arr", ")", "s", "/=", "2.0", "*", "m", "*", "m", "*", "tau", "*", "tau", "*", "n", "s", "=", "np", ".", "sqrt", "(", "s", ")", "md", "[", "idx", "]", "=", "s", "mderr", "[", "idx", "]", "=", "(", "s", "/", "np", ".", "sqrt", "(", "n", ")", ")", "ns", "[", "idx", "]", "=", "n", "return", "remove_small_ns", "(", "taus_used", ",", "md", ",", "mderr", ",", "ns", ")" ]
Modified Allan deviation. Used to distinguish between White and Flicker Phase Modulation. .. math:: \\sigma^2_{MDEV}(m\\tau_0) = { 1 \\over 2 (m \\tau_0 )^2 (N-3m+1) } \\sum_{j=1}^{N-3m+1} \\lbrace \\sum_{i=j}^{j+m-1} {x}_{i+2m} - 2x_{i+m} + x_{i} \\rbrace^2 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, md, mde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed md: np.array Computed mdev for each tau value mde: np.array mdev errors ns: np.array Values of N used in each mdev calculation Notes ----- see http://www.leapsecond.com/tools/adev_lib.c NIST SP 1065 eqn (14) and (15), page 17
[ "Modified", "Allan", "deviation", ".", "Used", "to", "distinguish", "between", "White", "and", "Flicker", "Phase", "Modulation", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L157-L245
train
231,784
aewallin/allantools
allantools/allantools.py
adev
def adev(data, rate=1.0, data_type="phase", taus=None): """ Allan deviation. Classic - use only if required - relatively poor confidence. .. math:: \\sigma^2_{ADEV}(\\tau) = { 1 \\over 2 \\tau^2 } \\langle ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 \\rangle = { 1 \\over 2 (N-2) \\tau^2 } \\sum_{n=1}^{N-2} ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 where :math:`x_n` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau`, and with length :math:`N`. Or alternatively calculated from a time-series of fractional frequency: .. math:: \\sigma^{2}_{ADEV}(\\tau) = { 1 \\over 2 } \\langle ( \\bar{y}_{n+1} - \\bar{y}_n )^2 \\rangle where :math:`\\bar{y}_n` is the time-series of fractional frequency at averaging time :math:`\\tau` NIST SP 1065 eqn (6) and (7), pages 14 and 15 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, ad, ade, ns): tuple Tuple of values taus2: np.array Tau values for which td computed ad: np.array Computed adev for each tau value ade: np.array adev errors ns: np.array Values of N used in each adev calculation """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) ad = np.zeros_like(taus_used) ade = np.zeros_like(taus_used) adn = np.zeros_like(taus_used) for idx, mj in enumerate(m): # loop through each tau value m(j) (ad[idx], ade[idx], adn[idx]) = calc_adev_phase(phase, rate, mj, mj) return remove_small_ns(taus_used, ad, ade, adn)
python
def adev(data, rate=1.0, data_type="phase", taus=None): """ Allan deviation. Classic - use only if required - relatively poor confidence. .. math:: \\sigma^2_{ADEV}(\\tau) = { 1 \\over 2 \\tau^2 } \\langle ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 \\rangle = { 1 \\over 2 (N-2) \\tau^2 } \\sum_{n=1}^{N-2} ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 where :math:`x_n` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau`, and with length :math:`N`. Or alternatively calculated from a time-series of fractional frequency: .. math:: \\sigma^{2}_{ADEV}(\\tau) = { 1 \\over 2 } \\langle ( \\bar{y}_{n+1} - \\bar{y}_n )^2 \\rangle where :math:`\\bar{y}_n` is the time-series of fractional frequency at averaging time :math:`\\tau` NIST SP 1065 eqn (6) and (7), pages 14 and 15 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, ad, ade, ns): tuple Tuple of values taus2: np.array Tau values for which td computed ad: np.array Computed adev for each tau value ade: np.array adev errors ns: np.array Values of N used in each adev calculation """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) ad = np.zeros_like(taus_used) ade = np.zeros_like(taus_used) adn = np.zeros_like(taus_used) for idx, mj in enumerate(m): # loop through each tau value m(j) (ad[idx], ade[idx], adn[idx]) = calc_adev_phase(phase, rate, mj, mj) return remove_small_ns(taus_used, ad, ade, adn)
[ "def", "adev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "ad", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ade", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "adn", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "# loop through each tau value m(j)", "(", "ad", "[", "idx", "]", ",", "ade", "[", "idx", "]", ",", "adn", "[", "idx", "]", ")", "=", "calc_adev_phase", "(", "phase", ",", "rate", ",", "mj", ",", "mj", ")", "return", "remove_small_ns", "(", "taus_used", ",", "ad", ",", "ade", ",", "adn", ")" ]
Allan deviation. Classic - use only if required - relatively poor confidence. .. math:: \\sigma^2_{ADEV}(\\tau) = { 1 \\over 2 \\tau^2 } \\langle ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 \\rangle = { 1 \\over 2 (N-2) \\tau^2 } \\sum_{n=1}^{N-2} ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 where :math:`x_n` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau`, and with length :math:`N`. Or alternatively calculated from a time-series of fractional frequency: .. math:: \\sigma^{2}_{ADEV}(\\tau) = { 1 \\over 2 } \\langle ( \\bar{y}_{n+1} - \\bar{y}_n )^2 \\rangle where :math:`\\bar{y}_n` is the time-series of fractional frequency at averaging time :math:`\\tau` NIST SP 1065 eqn (6) and (7), pages 14 and 15 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, ad, ade, ns): tuple Tuple of values taus2: np.array Tau values for which td computed ad: np.array Computed adev for each tau value ade: np.array adev errors ns: np.array Values of N used in each adev calculation
[ "Allan", "deviation", ".", "Classic", "-", "use", "only", "if", "required", "-", "relatively", "poor", "confidence", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L247-L311
train
231,785
aewallin/allantools
allantools/allantools.py
ohdev
def ohdev(data, rate=1.0, data_type="phase", taus=None): """ Overlapping Hadamard deviation. Better confidence than normal Hadamard. .. math:: \\sigma^2_{OHDEV}(m\\tau_0) = { 1 \\over 6 (m \\tau_0 )^2 (N-3m) } \\sum_{i=1}^{N-3m} ( {x}_{i+3m} - 3x_{i+2m} + 3x_{i+m} - x_{i} )^2 where :math:`x_i` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau_0`, and with length :math:`N`. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, hd, hde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed hd: np.array Computed hdev for each tau value hde: np.array hdev errors ns: np.array Values of N used in each hdev calculation """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) hdevs = np.zeros_like(taus_used) hdeverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): (hdevs[idx], hdeverrs[idx], ns[idx]) = calc_hdev_phase(phase, rate, mj, 1) return remove_small_ns(taus_used, hdevs, hdeverrs, ns)
python
def ohdev(data, rate=1.0, data_type="phase", taus=None): """ Overlapping Hadamard deviation. Better confidence than normal Hadamard. .. math:: \\sigma^2_{OHDEV}(m\\tau_0) = { 1 \\over 6 (m \\tau_0 )^2 (N-3m) } \\sum_{i=1}^{N-3m} ( {x}_{i+3m} - 3x_{i+2m} + 3x_{i+m} - x_{i} )^2 where :math:`x_i` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau_0`, and with length :math:`N`. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, hd, hde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed hd: np.array Computed hdev for each tau value hde: np.array hdev errors ns: np.array Values of N used in each hdev calculation """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) hdevs = np.zeros_like(taus_used) hdeverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): (hdevs[idx], hdeverrs[idx], ns[idx]) = calc_hdev_phase(phase, rate, mj, 1) return remove_small_ns(taus_used, hdevs, hdeverrs, ns)
[ "def", "ohdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "hdevs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "hdeverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "(", "hdevs", "[", "idx", "]", ",", "hdeverrs", "[", "idx", "]", ",", "ns", "[", "idx", "]", ")", "=", "calc_hdev_phase", "(", "phase", ",", "rate", ",", "mj", ",", "1", ")", "return", "remove_small_ns", "(", "taus_used", ",", "hdevs", ",", "hdeverrs", ",", "ns", ")" ]
Overlapping Hadamard deviation. Better confidence than normal Hadamard. .. math:: \\sigma^2_{OHDEV}(m\\tau_0) = { 1 \\over 6 (m \\tau_0 )^2 (N-3m) } \\sum_{i=1}^{N-3m} ( {x}_{i+3m} - 3x_{i+2m} + 3x_{i+m} - x_{i} )^2 where :math:`x_i` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau_0`, and with length :math:`N`. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, hd, hde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed hd: np.array Computed hdev for each tau value hde: np.array hdev errors ns: np.array Values of N used in each hdev calculation
[ "Overlapping", "Hadamard", "deviation", ".", "Better", "confidence", "than", "normal", "Hadamard", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L420-L471
train
231,786
aewallin/allantools
allantools/allantools.py
calc_hdev_phase
def calc_hdev_phase(phase, rate, mj, stride): """ main calculation fungtion for HDEV and OHDEV Parameters ---------- phase: np.array Phase data in seconds. rate: float The sampling rate for phase or frequency, in Hz mj: int M index value for stride stride: int Size of stride Returns ------- (dev, deverr, n): tuple Array of computed values. Notes ----- http://www.leapsecond.com/tools/adev_lib.c 1 N-3 s2y(t) = --------------- sum [x(i+3) - 3x(i+2) + 3x(i+1) - x(i) ]^2 6*tau^2 (N-3m) i=1 N=M+1 phase measurements m is averaging factor NIST SP 1065 eqn (18) and (20) pages 20 and 21 """ tau0 = 1.0 / float(rate) mj = int(mj) stride = int(stride) d3 = phase[3 * mj::stride] d2 = phase[2 * mj::stride] d1 = phase[1 * mj::stride] d0 = phase[::stride] n = min(len(d0), len(d1), len(d2), len(d3)) v_arr = d3[:n] - 3 * d2[:n] + 3 * d1[:n] - d0[:n] s = np.sum(v_arr * v_arr) if n == 0: n = 1 h = np.sqrt(s / 6.0 / float(n)) / float(tau0 * mj) e = h / np.sqrt(n) return h, e, n
python
def calc_hdev_phase(phase, rate, mj, stride): """ main calculation fungtion for HDEV and OHDEV Parameters ---------- phase: np.array Phase data in seconds. rate: float The sampling rate for phase or frequency, in Hz mj: int M index value for stride stride: int Size of stride Returns ------- (dev, deverr, n): tuple Array of computed values. Notes ----- http://www.leapsecond.com/tools/adev_lib.c 1 N-3 s2y(t) = --------------- sum [x(i+3) - 3x(i+2) + 3x(i+1) - x(i) ]^2 6*tau^2 (N-3m) i=1 N=M+1 phase measurements m is averaging factor NIST SP 1065 eqn (18) and (20) pages 20 and 21 """ tau0 = 1.0 / float(rate) mj = int(mj) stride = int(stride) d3 = phase[3 * mj::stride] d2 = phase[2 * mj::stride] d1 = phase[1 * mj::stride] d0 = phase[::stride] n = min(len(d0), len(d1), len(d2), len(d3)) v_arr = d3[:n] - 3 * d2[:n] + 3 * d1[:n] - d0[:n] s = np.sum(v_arr * v_arr) if n == 0: n = 1 h = np.sqrt(s / 6.0 / float(n)) / float(tau0 * mj) e = h / np.sqrt(n) return h, e, n
[ "def", "calc_hdev_phase", "(", "phase", ",", "rate", ",", "mj", ",", "stride", ")", ":", "tau0", "=", "1.0", "/", "float", "(", "rate", ")", "mj", "=", "int", "(", "mj", ")", "stride", "=", "int", "(", "stride", ")", "d3", "=", "phase", "[", "3", "*", "mj", ":", ":", "stride", "]", "d2", "=", "phase", "[", "2", "*", "mj", ":", ":", "stride", "]", "d1", "=", "phase", "[", "1", "*", "mj", ":", ":", "stride", "]", "d0", "=", "phase", "[", ":", ":", "stride", "]", "n", "=", "min", "(", "len", "(", "d0", ")", ",", "len", "(", "d1", ")", ",", "len", "(", "d2", ")", ",", "len", "(", "d3", ")", ")", "v_arr", "=", "d3", "[", ":", "n", "]", "-", "3", "*", "d2", "[", ":", "n", "]", "+", "3", "*", "d1", "[", ":", "n", "]", "-", "d0", "[", ":", "n", "]", "s", "=", "np", ".", "sum", "(", "v_arr", "*", "v_arr", ")", "if", "n", "==", "0", ":", "n", "=", "1", "h", "=", "np", ".", "sqrt", "(", "s", "/", "6.0", "/", "float", "(", "n", ")", ")", "/", "float", "(", "tau0", "*", "mj", ")", "e", "=", "h", "/", "np", ".", "sqrt", "(", "n", ")", "return", "h", ",", "e", ",", "n" ]
main calculation fungtion for HDEV and OHDEV Parameters ---------- phase: np.array Phase data in seconds. rate: float The sampling rate for phase or frequency, in Hz mj: int M index value for stride stride: int Size of stride Returns ------- (dev, deverr, n): tuple Array of computed values. Notes ----- http://www.leapsecond.com/tools/adev_lib.c 1 N-3 s2y(t) = --------------- sum [x(i+3) - 3x(i+2) + 3x(i+1) - x(i) ]^2 6*tau^2 (N-3m) i=1 N=M+1 phase measurements m is averaging factor NIST SP 1065 eqn (18) and (20) pages 20 and 21
[ "main", "calculation", "fungtion", "for", "HDEV", "and", "OHDEV" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L515-L566
train
231,787
aewallin/allantools
allantools/allantools.py
totdev
def totdev(data, rate=1.0, data_type="phase", taus=None): """ Total deviation. Better confidence at long averages for Allan. .. math:: \\sigma^2_{TOTDEV}( m\\tau_0 ) = { 1 \\over 2 (m\\tau_0)^2 (N-2) } \\sum_{i=2}^{N-1} ( {x}^*_{i-m} - 2x^*_{i} + x^*_{i+m} )^2 Where :math:`x^*_i` is a new time-series of length :math:`3N-4` derived from the original phase time-series :math:`x_n` of length :math:`N` by reflection at both ends. FIXME: better description of reflection operation. the original data x is in the center of x*: x*(1-j) = 2x(1) - x(1+j) for j=1..N-2 x*(i) = x(i) for i=1..N x*(N+j) = 2x(N) - x(N-j) for j=1..N-2 x* has length 3N-4 tau = m*tau0 FIXME: bias correction http://www.wriley.com/CI2.pdf page 5 Parameters ---------- phase: np.array Phase data in seconds. Provide either phase or frequency. frequency: np.array Fractional frequency data (nondimensional). Provide either frequency or phase. rate: float The sampling rate for phase or frequency, in Hz taus: np.array Array of tau values for which to compute measurement References ---------- David A. Howe, *The total deviation approach to long-term characterization of frequency stability*, IEEE tr. UFFC vol 47 no 5 (2000) NIST SP 1065 eqn (25) page 23 """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) N = len(phase) # totdev requires a new dataset # Begin by adding reflected data before dataset x1 = 2.0 * phase[0] * np.ones((N - 2,)) x1 = x1 - phase[1:-1] x1 = x1[::-1] # Reflected data at end of dataset x2 = 2.0 * phase[-1] * np.ones((N - 2,)) x2 = x2 - phase[1:-1][::-1] # check length of new dataset assert len(x1)+len(phase)+len(x2) == 3*N - 4 # Combine into a single array x = np.zeros((3*N - 4)) x[0:N-2] = x1 x[N-2:2*(N-2)+2] = phase # original data in the middle x[2*(N-2)+2:] = x2 devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) mid = len(x1) for idx, mj in enumerate(m): mj = int(mj) d0 = x[mid + 1:] d1 = x[mid + mj + 1:] d1n = x[mid - mj + 1:] e = min(len(d0), len(d1), len(d1n)) v_arr = d1n[:e] - 2.0 * d0[:e] + d1[:e] dev = np.sum(v_arr[:mid] * v_arr[:mid]) dev /= float(2 * pow(mj / rate, 2) * (N - 2)) dev = np.sqrt(dev) devs[idx] = dev deverrs[idx] = dev / np.sqrt(mid) ns[idx] = mid return remove_small_ns(taus_used, devs, deverrs, ns)
python
def totdev(data, rate=1.0, data_type="phase", taus=None): """ Total deviation. Better confidence at long averages for Allan. .. math:: \\sigma^2_{TOTDEV}( m\\tau_0 ) = { 1 \\over 2 (m\\tau_0)^2 (N-2) } \\sum_{i=2}^{N-1} ( {x}^*_{i-m} - 2x^*_{i} + x^*_{i+m} )^2 Where :math:`x^*_i` is a new time-series of length :math:`3N-4` derived from the original phase time-series :math:`x_n` of length :math:`N` by reflection at both ends. FIXME: better description of reflection operation. the original data x is in the center of x*: x*(1-j) = 2x(1) - x(1+j) for j=1..N-2 x*(i) = x(i) for i=1..N x*(N+j) = 2x(N) - x(N-j) for j=1..N-2 x* has length 3N-4 tau = m*tau0 FIXME: bias correction http://www.wriley.com/CI2.pdf page 5 Parameters ---------- phase: np.array Phase data in seconds. Provide either phase or frequency. frequency: np.array Fractional frequency data (nondimensional). Provide either frequency or phase. rate: float The sampling rate for phase or frequency, in Hz taus: np.array Array of tau values for which to compute measurement References ---------- David A. Howe, *The total deviation approach to long-term characterization of frequency stability*, IEEE tr. UFFC vol 47 no 5 (2000) NIST SP 1065 eqn (25) page 23 """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) N = len(phase) # totdev requires a new dataset # Begin by adding reflected data before dataset x1 = 2.0 * phase[0] * np.ones((N - 2,)) x1 = x1 - phase[1:-1] x1 = x1[::-1] # Reflected data at end of dataset x2 = 2.0 * phase[-1] * np.ones((N - 2,)) x2 = x2 - phase[1:-1][::-1] # check length of new dataset assert len(x1)+len(phase)+len(x2) == 3*N - 4 # Combine into a single array x = np.zeros((3*N - 4)) x[0:N-2] = x1 x[N-2:2*(N-2)+2] = phase # original data in the middle x[2*(N-2)+2:] = x2 devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) mid = len(x1) for idx, mj in enumerate(m): mj = int(mj) d0 = x[mid + 1:] d1 = x[mid + mj + 1:] d1n = x[mid - mj + 1:] e = min(len(d0), len(d1), len(d1n)) v_arr = d1n[:e] - 2.0 * d0[:e] + d1[:e] dev = np.sum(v_arr[:mid] * v_arr[:mid]) dev /= float(2 * pow(mj / rate, 2) * (N - 2)) dev = np.sqrt(dev) devs[idx] = dev deverrs[idx] = dev / np.sqrt(mid) ns[idx] = mid return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "totdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "N", "=", "len", "(", "phase", ")", "# totdev requires a new dataset", "# Begin by adding reflected data before dataset", "x1", "=", "2.0", "*", "phase", "[", "0", "]", "*", "np", ".", "ones", "(", "(", "N", "-", "2", ",", ")", ")", "x1", "=", "x1", "-", "phase", "[", "1", ":", "-", "1", "]", "x1", "=", "x1", "[", ":", ":", "-", "1", "]", "# Reflected data at end of dataset", "x2", "=", "2.0", "*", "phase", "[", "-", "1", "]", "*", "np", ".", "ones", "(", "(", "N", "-", "2", ",", ")", ")", "x2", "=", "x2", "-", "phase", "[", "1", ":", "-", "1", "]", "[", ":", ":", "-", "1", "]", "# check length of new dataset", "assert", "len", "(", "x1", ")", "+", "len", "(", "phase", ")", "+", "len", "(", "x2", ")", "==", "3", "*", "N", "-", "4", "# Combine into a single array", "x", "=", "np", ".", "zeros", "(", "(", "3", "*", "N", "-", "4", ")", ")", "x", "[", "0", ":", "N", "-", "2", "]", "=", "x1", "x", "[", "N", "-", "2", ":", "2", "*", "(", "N", "-", "2", ")", "+", "2", "]", "=", "phase", "# original data in the middle", "x", "[", "2", "*", "(", "N", "-", "2", ")", "+", "2", ":", "]", "=", "x2", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "mid", "=", "len", "(", "x1", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "mj", "=", "int", "(", "mj", ")", "d0", "=", "x", "[", "mid", "+", "1", ":", "]", "d1", "=", "x", "[", "mid", "+", "mj", "+", "1", ":", "]", "d1n", "=", "x", "[", "mid", "-", "mj", "+", "1", ":", "]", "e", "=", "min", "(", "len", "(", "d0", ")", ",", "len", "(", "d1", ")", ",", "len", "(", "d1n", ")", ")", "v_arr", "=", "d1n", "[", ":", "e", "]", "-", "2.0", "*", "d0", "[", ":", "e", "]", "+", "d1", "[", ":", "e", "]", "dev", "=", "np", ".", "sum", "(", "v_arr", "[", ":", "mid", "]", "*", "v_arr", "[", ":", "mid", "]", ")", "dev", "/=", "float", "(", "2", "*", "pow", "(", "mj", "/", "rate", ",", "2", ")", "*", "(", "N", "-", "2", ")", ")", "dev", "=", "np", ".", "sqrt", "(", "dev", ")", "devs", "[", "idx", "]", "=", "dev", "deverrs", "[", "idx", "]", "=", "dev", "/", "np", ".", "sqrt", "(", "mid", ")", "ns", "[", "idx", "]", "=", "mid", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
Total deviation. Better confidence at long averages for Allan. .. math:: \\sigma^2_{TOTDEV}( m\\tau_0 ) = { 1 \\over 2 (m\\tau_0)^2 (N-2) } \\sum_{i=2}^{N-1} ( {x}^*_{i-m} - 2x^*_{i} + x^*_{i+m} )^2 Where :math:`x^*_i` is a new time-series of length :math:`3N-4` derived from the original phase time-series :math:`x_n` of length :math:`N` by reflection at both ends. FIXME: better description of reflection operation. the original data x is in the center of x*: x*(1-j) = 2x(1) - x(1+j) for j=1..N-2 x*(i) = x(i) for i=1..N x*(N+j) = 2x(N) - x(N-j) for j=1..N-2 x* has length 3N-4 tau = m*tau0 FIXME: bias correction http://www.wriley.com/CI2.pdf page 5 Parameters ---------- phase: np.array Phase data in seconds. Provide either phase or frequency. frequency: np.array Fractional frequency data (nondimensional). Provide either frequency or phase. rate: float The sampling rate for phase or frequency, in Hz taus: np.array Array of tau values for which to compute measurement References ---------- David A. Howe, *The total deviation approach to long-term characterization of frequency stability*, IEEE tr. UFFC vol 47 no 5 (2000) NIST SP 1065 eqn (25) page 23
[ "Total", "deviation", ".", "Better", "confidence", "at", "long", "averages", "for", "Allan", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L568-L660
train
231,788
aewallin/allantools
allantools/allantools.py
mtotdev
def mtotdev(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Modified Total deviation. Better confidence at long averages for modified Allan FIXME: bias-correction http://www.wriley.com/CI2.pdf page 6 The variance is scaled up (divided by this number) based on the noise-type identified. WPM 0.94 FPM 0.83 WFM 0.73 FFM 0.70 RWFM 0.69 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. NIST SP 1065 eqn (27) page 25 """ phase = input_to_phase(data, rate, data_type) (phase, ms, taus_used) = tau_generator(phase, rate, taus, maximum_m=float(len(phase))/3.0) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(ms): devs[idx], deverrs[idx], ns[idx] = calc_mtotdev_phase(phase, rate, mj) return remove_small_ns(taus_used, devs, deverrs, ns)
python
def mtotdev(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Modified Total deviation. Better confidence at long averages for modified Allan FIXME: bias-correction http://www.wriley.com/CI2.pdf page 6 The variance is scaled up (divided by this number) based on the noise-type identified. WPM 0.94 FPM 0.83 WFM 0.73 FFM 0.70 RWFM 0.69 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. NIST SP 1065 eqn (27) page 25 """ phase = input_to_phase(data, rate, data_type) (phase, ms, taus_used) = tau_generator(phase, rate, taus, maximum_m=float(len(phase))/3.0) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(ms): devs[idx], deverrs[idx], ns[idx] = calc_mtotdev_phase(phase, rate, mj) return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "mtotdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "ms", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ",", "maximum_m", "=", "float", "(", "len", "(", "phase", ")", ")", "/", "3.0", ")", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "ms", ")", ":", "devs", "[", "idx", "]", ",", "deverrs", "[", "idx", "]", ",", "ns", "[", "idx", "]", "=", "calc_mtotdev_phase", "(", "phase", ",", "rate", ",", "mj", ")", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
PRELIMINARY - REQUIRES FURTHER TESTING. Modified Total deviation. Better confidence at long averages for modified Allan FIXME: bias-correction http://www.wriley.com/CI2.pdf page 6 The variance is scaled up (divided by this number) based on the noise-type identified. WPM 0.94 FPM 0.83 WFM 0.73 FFM 0.70 RWFM 0.69 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. NIST SP 1065 eqn (27) page 25
[ "PRELIMINARY", "-", "REQUIRES", "FURTHER", "TESTING", ".", "Modified", "Total", "deviation", ".", "Better", "confidence", "at", "long", "averages", "for", "modified", "Allan" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L674-L716
train
231,789
aewallin/allantools
allantools/allantools.py
htotdev
def htotdev(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Hadamard Total deviation. Better confidence at long averages for Hadamard deviation FIXME: bias corrections from http://www.wriley.com/CI2.pdf W FM 0.995 alpha= 0 F FM 0.851 alpha=-1 RW FM 0.771 alpha=-2 FW FM 0.717 alpha=-3 RR FM 0.679 alpha=-4 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ if data_type == "phase": phase = data freq = phase2frequency(phase, rate) elif data_type == "freq": phase = frequency2phase(data, rate) freq = data else: raise Exception("unknown data_type: " + data_type) rate = float(rate) (freq, ms, taus_used) = tau_generator(freq, rate, taus, maximum_m=float(len(freq))/3.0) phase = np.array(phase) freq = np.array(freq) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) # NOTE at mj==1 we use ohdev(), based on comment from here: # http://www.wriley.com/paper4ht.htm # "For best consistency, the overlapping Hadamard variance is used # instead of the Hadamard total variance at m=1" # FIXME: this uses both freq and phase datasets, which uses double the memory really needed... for idx, mj in enumerate(ms): if int(mj) == 1: (devs[idx], deverrs[idx], ns[idx]) = calc_hdev_phase(phase, rate, mj, 1) else: (devs[idx], deverrs[idx], ns[idx]) = calc_htotdev_freq(freq, mj) return remove_small_ns(taus_used, devs, deverrs, ns)
python
def htotdev(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Hadamard Total deviation. Better confidence at long averages for Hadamard deviation FIXME: bias corrections from http://www.wriley.com/CI2.pdf W FM 0.995 alpha= 0 F FM 0.851 alpha=-1 RW FM 0.771 alpha=-2 FW FM 0.717 alpha=-3 RR FM 0.679 alpha=-4 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ if data_type == "phase": phase = data freq = phase2frequency(phase, rate) elif data_type == "freq": phase = frequency2phase(data, rate) freq = data else: raise Exception("unknown data_type: " + data_type) rate = float(rate) (freq, ms, taus_used) = tau_generator(freq, rate, taus, maximum_m=float(len(freq))/3.0) phase = np.array(phase) freq = np.array(freq) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) # NOTE at mj==1 we use ohdev(), based on comment from here: # http://www.wriley.com/paper4ht.htm # "For best consistency, the overlapping Hadamard variance is used # instead of the Hadamard total variance at m=1" # FIXME: this uses both freq and phase datasets, which uses double the memory really needed... for idx, mj in enumerate(ms): if int(mj) == 1: (devs[idx], deverrs[idx], ns[idx]) = calc_hdev_phase(phase, rate, mj, 1) else: (devs[idx], deverrs[idx], ns[idx]) = calc_htotdev_freq(freq, mj) return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "htotdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "if", "data_type", "==", "\"phase\"", ":", "phase", "=", "data", "freq", "=", "phase2frequency", "(", "phase", ",", "rate", ")", "elif", "data_type", "==", "\"freq\"", ":", "phase", "=", "frequency2phase", "(", "data", ",", "rate", ")", "freq", "=", "data", "else", ":", "raise", "Exception", "(", "\"unknown data_type: \"", "+", "data_type", ")", "rate", "=", "float", "(", "rate", ")", "(", "freq", ",", "ms", ",", "taus_used", ")", "=", "tau_generator", "(", "freq", ",", "rate", ",", "taus", ",", "maximum_m", "=", "float", "(", "len", "(", "freq", ")", ")", "/", "3.0", ")", "phase", "=", "np", ".", "array", "(", "phase", ")", "freq", "=", "np", ".", "array", "(", "freq", ")", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "# NOTE at mj==1 we use ohdev(), based on comment from here:", "# http://www.wriley.com/paper4ht.htm", "# \"For best consistency, the overlapping Hadamard variance is used", "# instead of the Hadamard total variance at m=1\"", "# FIXME: this uses both freq and phase datasets, which uses double the memory really needed...", "for", "idx", ",", "mj", "in", "enumerate", "(", "ms", ")", ":", "if", "int", "(", "mj", ")", "==", "1", ":", "(", "devs", "[", "idx", "]", ",", "deverrs", "[", "idx", "]", ",", "ns", "[", "idx", "]", ")", "=", "calc_hdev_phase", "(", "phase", ",", "rate", ",", "mj", ",", "1", ")", "else", ":", "(", "devs", "[", "idx", "]", ",", "deverrs", "[", "idx", "]", ",", "ns", "[", "idx", "]", ")", "=", "calc_htotdev_freq", "(", "freq", ",", "mj", ")", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
PRELIMINARY - REQUIRES FURTHER TESTING. Hadamard Total deviation. Better confidence at long averages for Hadamard deviation FIXME: bias corrections from http://www.wriley.com/CI2.pdf W FM 0.995 alpha= 0 F FM 0.851 alpha=-1 RW FM 0.771 alpha=-2 FW FM 0.717 alpha=-3 RR FM 0.679 alpha=-4 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation.
[ "PRELIMINARY", "-", "REQUIRES", "FURTHER", "TESTING", ".", "Hadamard", "Total", "deviation", ".", "Better", "confidence", "at", "long", "averages", "for", "Hadamard", "deviation" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L787-L847
train
231,790
aewallin/allantools
allantools/allantools.py
theo1
def theo1(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Theo1 is a two-sample variance with improved confidence and extended averaging factor range. .. math:: \\sigma^2_{THEO1}(m\\tau_0) = { 1 \\over (m \\tau_0 )^2 (N-m) } \\sum_{i=1}^{N-m} \\sum_{\\delta=0}^{m/2-1} {1\\over m/2-\\delta}\\lbrace ({x}_{i} - x_{i-\\delta +m/2}) + (x_{i+m}- x_{i+\\delta +m/2}) \\rbrace^2 Where :math:`10<=m<=N-1` is even. FIXME: bias correction NIST SP 1065 eq (30) page 29 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ phase = input_to_phase(data, rate, data_type) tau0 = 1.0/rate (phase, ms, taus_used) = tau_generator(phase, rate, taus, even=True) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) N = len(phase) for idx, m in enumerate(ms): m = int(m) # to avoid: VisibleDeprecationWarning: using a # non-integer number instead of an integer will # result in an error in the future assert m % 2 == 0 # m must be even dev = 0 n = 0 for i in range(int(N-m)): s = 0 for d in range(int(m/2)): # inner sum pre = 1.0 / (float(m)/2 - float(d)) s += pre*pow(phase[i]-phase[i-d+int(m/2)] + phase[i+m]-phase[i+d+int(m/2)], 2) n = n+1 dev += s assert n == (N-m)*m/2 # N-m outer sums, m/2 inner sums dev = dev/(0.75*(N-m)*pow(m*tau0, 2)) # factor 0.75 used here? http://tf.nist.gov/general/pdf/1990.pdf # but not here? http://tf.nist.gov/timefreq/general/pdf/2220.pdf page 29 devs[idx] = np.sqrt(dev) deverrs[idx] = devs[idx] / np.sqrt(N-m) ns[idx] = n return remove_small_ns(taus_used, devs, deverrs, ns)
python
def theo1(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Theo1 is a two-sample variance with improved confidence and extended averaging factor range. .. math:: \\sigma^2_{THEO1}(m\\tau_0) = { 1 \\over (m \\tau_0 )^2 (N-m) } \\sum_{i=1}^{N-m} \\sum_{\\delta=0}^{m/2-1} {1\\over m/2-\\delta}\\lbrace ({x}_{i} - x_{i-\\delta +m/2}) + (x_{i+m}- x_{i+\\delta +m/2}) \\rbrace^2 Where :math:`10<=m<=N-1` is even. FIXME: bias correction NIST SP 1065 eq (30) page 29 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ phase = input_to_phase(data, rate, data_type) tau0 = 1.0/rate (phase, ms, taus_used) = tau_generator(phase, rate, taus, even=True) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) N = len(phase) for idx, m in enumerate(ms): m = int(m) # to avoid: VisibleDeprecationWarning: using a # non-integer number instead of an integer will # result in an error in the future assert m % 2 == 0 # m must be even dev = 0 n = 0 for i in range(int(N-m)): s = 0 for d in range(int(m/2)): # inner sum pre = 1.0 / (float(m)/2 - float(d)) s += pre*pow(phase[i]-phase[i-d+int(m/2)] + phase[i+m]-phase[i+d+int(m/2)], 2) n = n+1 dev += s assert n == (N-m)*m/2 # N-m outer sums, m/2 inner sums dev = dev/(0.75*(N-m)*pow(m*tau0, 2)) # factor 0.75 used here? http://tf.nist.gov/general/pdf/1990.pdf # but not here? http://tf.nist.gov/timefreq/general/pdf/2220.pdf page 29 devs[idx] = np.sqrt(dev) deverrs[idx] = devs[idx] / np.sqrt(N-m) ns[idx] = n return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "theo1", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "tau0", "=", "1.0", "/", "rate", "(", "phase", ",", "ms", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ",", "even", "=", "True", ")", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "N", "=", "len", "(", "phase", ")", "for", "idx", ",", "m", "in", "enumerate", "(", "ms", ")", ":", "m", "=", "int", "(", "m", ")", "# to avoid: VisibleDeprecationWarning: using a", "# non-integer number instead of an integer will", "# result in an error in the future", "assert", "m", "%", "2", "==", "0", "# m must be even", "dev", "=", "0", "n", "=", "0", "for", "i", "in", "range", "(", "int", "(", "N", "-", "m", ")", ")", ":", "s", "=", "0", "for", "d", "in", "range", "(", "int", "(", "m", "/", "2", ")", ")", ":", "# inner sum", "pre", "=", "1.0", "/", "(", "float", "(", "m", ")", "/", "2", "-", "float", "(", "d", ")", ")", "s", "+=", "pre", "*", "pow", "(", "phase", "[", "i", "]", "-", "phase", "[", "i", "-", "d", "+", "int", "(", "m", "/", "2", ")", "]", "+", "phase", "[", "i", "+", "m", "]", "-", "phase", "[", "i", "+", "d", "+", "int", "(", "m", "/", "2", ")", "]", ",", "2", ")", "n", "=", "n", "+", "1", "dev", "+=", "s", "assert", "n", "==", "(", "N", "-", "m", ")", "*", "m", "/", "2", "# N-m outer sums, m/2 inner sums", "dev", "=", "dev", "/", "(", "0.75", "*", "(", "N", "-", "m", ")", "*", "pow", "(", "m", "*", "tau0", ",", "2", ")", ")", "# factor 0.75 used here? http://tf.nist.gov/general/pdf/1990.pdf", "# but not here? http://tf.nist.gov/timefreq/general/pdf/2220.pdf page 29", "devs", "[", "idx", "]", "=", "np", ".", "sqrt", "(", "dev", ")", "deverrs", "[", "idx", "]", "=", "devs", "[", "idx", "]", "/", "np", ".", "sqrt", "(", "N", "-", "m", ")", "ns", "[", "idx", "]", "=", "n", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
PRELIMINARY - REQUIRES FURTHER TESTING. Theo1 is a two-sample variance with improved confidence and extended averaging factor range. .. math:: \\sigma^2_{THEO1}(m\\tau_0) = { 1 \\over (m \\tau_0 )^2 (N-m) } \\sum_{i=1}^{N-m} \\sum_{\\delta=0}^{m/2-1} {1\\over m/2-\\delta}\\lbrace ({x}_{i} - x_{i-\\delta +m/2}) + (x_{i+m}- x_{i+\\delta +m/2}) \\rbrace^2 Where :math:`10<=m<=N-1` is even. FIXME: bias correction NIST SP 1065 eq (30) page 29 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation.
[ "PRELIMINARY", "-", "REQUIRES", "FURTHER", "TESTING", ".", "Theo1", "is", "a", "two", "-", "sample", "variance", "with", "improved", "confidence", "and", "extended", "averaging", "factor", "range", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L919-L987
train
231,791
aewallin/allantools
allantools/allantools.py
tierms
def tierms(data, rate=1.0, data_type="phase", taus=None): """ Time Interval Error RMS. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ phase = input_to_phase(data, rate, data_type) (data, m, taus_used) = tau_generator(phase, rate, taus) count = len(phase) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): mj = int(mj) # This seems like an unusual way to phases = np.column_stack((phase[:-mj], phase[mj:])) p_max = np.max(phases, axis=1) p_min = np.min(phases, axis=1) phases = p_max - p_min tie = np.sqrt(np.mean(phases * phases)) ncount = count - mj devs[idx] = tie deverrs[idx] = 0 / np.sqrt(ncount) # TODO! I THINK THIS IS WRONG! ns[idx] = ncount return remove_small_ns(taus_used, devs, deverrs, ns)
python
def tierms(data, rate=1.0, data_type="phase", taus=None): """ Time Interval Error RMS. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ phase = input_to_phase(data, rate, data_type) (data, m, taus_used) = tau_generator(phase, rate, taus) count = len(phase) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): mj = int(mj) # This seems like an unusual way to phases = np.column_stack((phase[:-mj], phase[mj:])) p_max = np.max(phases, axis=1) p_min = np.min(phases, axis=1) phases = p_max - p_min tie = np.sqrt(np.mean(phases * phases)) ncount = count - mj devs[idx] = tie deverrs[idx] = 0 / np.sqrt(ncount) # TODO! I THINK THIS IS WRONG! ns[idx] = ncount return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "tierms", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "data", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "count", "=", "len", "(", "phase", ")", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "mj", "=", "int", "(", "mj", ")", "# This seems like an unusual way to", "phases", "=", "np", ".", "column_stack", "(", "(", "phase", "[", ":", "-", "mj", "]", ",", "phase", "[", "mj", ":", "]", ")", ")", "p_max", "=", "np", ".", "max", "(", "phases", ",", "axis", "=", "1", ")", "p_min", "=", "np", ".", "min", "(", "phases", ",", "axis", "=", "1", ")", "phases", "=", "p_max", "-", "p_min", "tie", "=", "np", ".", "sqrt", "(", "np", ".", "mean", "(", "phases", "*", "phases", ")", ")", "ncount", "=", "count", "-", "mj", "devs", "[", "idx", "]", "=", "tie", "deverrs", "[", "idx", "]", "=", "0", "/", "np", ".", "sqrt", "(", "ncount", ")", "# TODO! I THINK THIS IS WRONG!", "ns", "[", "idx", "]", "=", "ncount", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
Time Interval Error RMS. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation.
[ "Time", "Interval", "Error", "RMS", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L990-L1033
train
231,792
aewallin/allantools
allantools/allantools.py
mtie
def mtie(data, rate=1.0, data_type="phase", taus=None): """ Maximum Time Interval Error. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Notes ----- this seems to correspond to Stable32 setting "Fast(u)" Stable32 also has "Decade" and "Octave" modes where the dataset is extended somehow? """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): rw = mtie_rolling_window(phase, int(mj + 1)) win_max = np.max(rw, axis=1) win_min = np.min(rw, axis=1) tie = win_max - win_min dev = np.max(tie) ncount = phase.shape[0] - mj devs[idx] = dev deverrs[idx] = dev / np.sqrt(ncount) ns[idx] = ncount return remove_small_ns(taus_used, devs, deverrs, ns)
python
def mtie(data, rate=1.0, data_type="phase", taus=None): """ Maximum Time Interval Error. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Notes ----- this seems to correspond to Stable32 setting "Fast(u)" Stable32 also has "Decade" and "Octave" modes where the dataset is extended somehow? """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): rw = mtie_rolling_window(phase, int(mj + 1)) win_max = np.max(rw, axis=1) win_min = np.min(rw, axis=1) tie = win_max - win_min dev = np.max(tie) ncount = phase.shape[0] - mj devs[idx] = dev deverrs[idx] = dev / np.sqrt(ncount) ns[idx] = ncount return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "mtie", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "rw", "=", "mtie_rolling_window", "(", "phase", ",", "int", "(", "mj", "+", "1", ")", ")", "win_max", "=", "np", ".", "max", "(", "rw", ",", "axis", "=", "1", ")", "win_min", "=", "np", ".", "min", "(", "rw", ",", "axis", "=", "1", ")", "tie", "=", "win_max", "-", "win_min", "dev", "=", "np", ".", "max", "(", "tie", ")", "ncount", "=", "phase", ".", "shape", "[", "0", "]", "-", "mj", "devs", "[", "idx", "]", "=", "dev", "deverrs", "[", "idx", "]", "=", "dev", "/", "np", ".", "sqrt", "(", "ncount", ")", "ns", "[", "idx", "]", "=", "ncount", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
Maximum Time Interval Error. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Notes ----- this seems to correspond to Stable32 setting "Fast(u)" Stable32 also has "Decade" and "Octave" modes where the dataset is extended somehow?
[ "Maximum", "Time", "Interval", "Error", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1061-L1101
train
231,793
aewallin/allantools
allantools/allantools.py
mtie_phase_fast
def mtie_phase_fast(phase, rate=1.0, data_type="phase", taus=None): """ fast binary decomposition algorithm for MTIE See: STEFANO BREGNI "Fast Algorithms for TVAR and MTIE Computation in Characterization of Network Synchronization Performance" """ rate = float(rate) phase = np.asarray(phase) k_max = int(np.floor(np.log2(len(phase)))) phase = phase[0:pow(2, k_max)] # truncate data to 2**k_max datapoints assert len(phase) == pow(2, k_max) #k = 1 taus = [ pow(2,k) for k in range(k_max)] #while k <= k_max: # tau = pow(2, k) # taus.append(tau) #print tau # k += 1 print("taus N=", len(taus), " ",taus) devs = np.zeros(len(taus)) deverrs = np.zeros(len(taus)) ns = np.zeros(len(taus)) taus_used = np.array(taus) # [(1.0/rate)*t for t in taus] # matrices to store results mtie_max = np.zeros((len(phase)-1, k_max)) mtie_min = np.zeros((len(phase)-1, k_max)) for kidx in range(k_max): k = kidx+1 imax = len(phase)-pow(2, k)+1 #print k, imax tie = np.zeros(imax) ns[kidx]=imax #print np.max( tie ) for i in range(imax): if k == 1: mtie_max[i, kidx] = max(phase[i], phase[i+1]) mtie_min[i, kidx] = min(phase[i], phase[i+1]) else: p = int(pow(2, k-1)) mtie_max[i, kidx] = max(mtie_max[i, kidx-1], mtie_max[i+p, kidx-1]) mtie_min[i, kidx] = min(mtie_min[i, kidx-1], mtie_min[i+p, kidx-1]) #for i in range(imax): tie[i] = mtie_max[i, kidx] - mtie_min[i, kidx] #print tie[i] devs[kidx] = np.amax(tie) # maximum along axis #print "maximum %2.4f" % devs[kidx] #print np.amax( tie ) #for tau in taus: #for devs = np.array(devs) print("devs N=",len(devs)," ",devs) print("taus N=", len(taus_used), " ",taus_used) return remove_small_ns(taus_used, devs, deverrs, ns)
python
def mtie_phase_fast(phase, rate=1.0, data_type="phase", taus=None): """ fast binary decomposition algorithm for MTIE See: STEFANO BREGNI "Fast Algorithms for TVAR and MTIE Computation in Characterization of Network Synchronization Performance" """ rate = float(rate) phase = np.asarray(phase) k_max = int(np.floor(np.log2(len(phase)))) phase = phase[0:pow(2, k_max)] # truncate data to 2**k_max datapoints assert len(phase) == pow(2, k_max) #k = 1 taus = [ pow(2,k) for k in range(k_max)] #while k <= k_max: # tau = pow(2, k) # taus.append(tau) #print tau # k += 1 print("taus N=", len(taus), " ",taus) devs = np.zeros(len(taus)) deverrs = np.zeros(len(taus)) ns = np.zeros(len(taus)) taus_used = np.array(taus) # [(1.0/rate)*t for t in taus] # matrices to store results mtie_max = np.zeros((len(phase)-1, k_max)) mtie_min = np.zeros((len(phase)-1, k_max)) for kidx in range(k_max): k = kidx+1 imax = len(phase)-pow(2, k)+1 #print k, imax tie = np.zeros(imax) ns[kidx]=imax #print np.max( tie ) for i in range(imax): if k == 1: mtie_max[i, kidx] = max(phase[i], phase[i+1]) mtie_min[i, kidx] = min(phase[i], phase[i+1]) else: p = int(pow(2, k-1)) mtie_max[i, kidx] = max(mtie_max[i, kidx-1], mtie_max[i+p, kidx-1]) mtie_min[i, kidx] = min(mtie_min[i, kidx-1], mtie_min[i+p, kidx-1]) #for i in range(imax): tie[i] = mtie_max[i, kidx] - mtie_min[i, kidx] #print tie[i] devs[kidx] = np.amax(tie) # maximum along axis #print "maximum %2.4f" % devs[kidx] #print np.amax( tie ) #for tau in taus: #for devs = np.array(devs) print("devs N=",len(devs)," ",devs) print("taus N=", len(taus_used), " ",taus_used) return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "mtie_phase_fast", "(", "phase", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "rate", "=", "float", "(", "rate", ")", "phase", "=", "np", ".", "asarray", "(", "phase", ")", "k_max", "=", "int", "(", "np", ".", "floor", "(", "np", ".", "log2", "(", "len", "(", "phase", ")", ")", ")", ")", "phase", "=", "phase", "[", "0", ":", "pow", "(", "2", ",", "k_max", ")", "]", "# truncate data to 2**k_max datapoints", "assert", "len", "(", "phase", ")", "==", "pow", "(", "2", ",", "k_max", ")", "#k = 1", "taus", "=", "[", "pow", "(", "2", ",", "k", ")", "for", "k", "in", "range", "(", "k_max", ")", "]", "#while k <= k_max:", "# tau = pow(2, k)", "# taus.append(tau)", "#print tau", "# k += 1", "print", "(", "\"taus N=\"", ",", "len", "(", "taus", ")", ",", "\" \"", ",", "taus", ")", "devs", "=", "np", ".", "zeros", "(", "len", "(", "taus", ")", ")", "deverrs", "=", "np", ".", "zeros", "(", "len", "(", "taus", ")", ")", "ns", "=", "np", ".", "zeros", "(", "len", "(", "taus", ")", ")", "taus_used", "=", "np", ".", "array", "(", "taus", ")", "# [(1.0/rate)*t for t in taus]", "# matrices to store results", "mtie_max", "=", "np", ".", "zeros", "(", "(", "len", "(", "phase", ")", "-", "1", ",", "k_max", ")", ")", "mtie_min", "=", "np", ".", "zeros", "(", "(", "len", "(", "phase", ")", "-", "1", ",", "k_max", ")", ")", "for", "kidx", "in", "range", "(", "k_max", ")", ":", "k", "=", "kidx", "+", "1", "imax", "=", "len", "(", "phase", ")", "-", "pow", "(", "2", ",", "k", ")", "+", "1", "#print k, imax", "tie", "=", "np", ".", "zeros", "(", "imax", ")", "ns", "[", "kidx", "]", "=", "imax", "#print np.max( tie )", "for", "i", "in", "range", "(", "imax", ")", ":", "if", "k", "==", "1", ":", "mtie_max", "[", "i", ",", "kidx", "]", "=", "max", "(", "phase", "[", "i", "]", ",", "phase", "[", "i", "+", "1", "]", ")", "mtie_min", "[", "i", ",", "kidx", "]", "=", "min", "(", "phase", "[", "i", "]", ",", "phase", "[", "i", "+", "1", "]", ")", "else", ":", "p", "=", "int", "(", "pow", "(", "2", ",", "k", "-", "1", ")", ")", "mtie_max", "[", "i", ",", "kidx", "]", "=", "max", "(", "mtie_max", "[", "i", ",", "kidx", "-", "1", "]", ",", "mtie_max", "[", "i", "+", "p", ",", "kidx", "-", "1", "]", ")", "mtie_min", "[", "i", ",", "kidx", "]", "=", "min", "(", "mtie_min", "[", "i", ",", "kidx", "-", "1", "]", ",", "mtie_min", "[", "i", "+", "p", ",", "kidx", "-", "1", "]", ")", "#for i in range(imax):", "tie", "[", "i", "]", "=", "mtie_max", "[", "i", ",", "kidx", "]", "-", "mtie_min", "[", "i", ",", "kidx", "]", "#print tie[i]", "devs", "[", "kidx", "]", "=", "np", ".", "amax", "(", "tie", ")", "# maximum along axis", "#print \"maximum %2.4f\" % devs[kidx]", "#print np.amax( tie )", "#for tau in taus:", "#for", "devs", "=", "np", ".", "array", "(", "devs", ")", "print", "(", "\"devs N=\"", ",", "len", "(", "devs", ")", ",", "\" \"", ",", "devs", ")", "print", "(", "\"taus N=\"", ",", "len", "(", "taus_used", ")", ",", "\" \"", ",", "taus_used", ")", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
fast binary decomposition algorithm for MTIE See: STEFANO BREGNI "Fast Algorithms for TVAR and MTIE Computation in Characterization of Network Synchronization Performance"
[ "fast", "binary", "decomposition", "algorithm", "for", "MTIE" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1108-L1163
train
231,794
aewallin/allantools
allantools/allantools.py
gradev
def gradev(data, rate=1.0, data_type="phase", taus=None, ci=0.9, noisetype='wp'): """ gap resistant overlapping Allan deviation Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). Warning : phase data works better (frequency data is first trantformed into phase using numpy.cumsum() function, which can lead to poor results). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. ci: float the total confidence interval desired, i.e. if ci = 0.9, the bounds will be at 0.05 and 0.95. noisetype: string the type of noise desired: 'wp' returns white phase noise. 'wf' returns white frequency noise. 'fp' returns flicker phase noise. 'ff' returns flicker frequency noise. 'rf' returns random walk frequency noise. If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Returns ------- taus: np.array list of tau vales in seconds adev: np.array deviations [err_l, err_h] : list of len()==2, np.array the upper and lower bounds of the confidence interval taken as distances from the the estimated two sample variance. ns: np.array numper of terms n in the adev estimate. """ if (data_type == "freq"): print("Warning : phase data is preferred as input to gradev()") phase = input_to_phase(data, rate, data_type) (data, m, taus_used) = tau_generator(phase, rate, taus) ad = np.zeros_like(taus_used) ade_l = np.zeros_like(taus_used) ade_h = np.zeros_like(taus_used) adn = np.zeros_like(taus_used) for idx, mj in enumerate(m): (dev, deverr, n) = calc_gradev_phase(data, rate, mj, 1, ci, noisetype) # stride=1 for overlapping ADEV ad[idx] = dev ade_l[idx] = deverr[0] ade_h[idx] = deverr[1] adn[idx] = n # Note that errors are split in 2 arrays return remove_small_ns(taus_used, ad, [ade_l, ade_h], adn)
python
def gradev(data, rate=1.0, data_type="phase", taus=None, ci=0.9, noisetype='wp'): """ gap resistant overlapping Allan deviation Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). Warning : phase data works better (frequency data is first trantformed into phase using numpy.cumsum() function, which can lead to poor results). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. ci: float the total confidence interval desired, i.e. if ci = 0.9, the bounds will be at 0.05 and 0.95. noisetype: string the type of noise desired: 'wp' returns white phase noise. 'wf' returns white frequency noise. 'fp' returns flicker phase noise. 'ff' returns flicker frequency noise. 'rf' returns random walk frequency noise. If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Returns ------- taus: np.array list of tau vales in seconds adev: np.array deviations [err_l, err_h] : list of len()==2, np.array the upper and lower bounds of the confidence interval taken as distances from the the estimated two sample variance. ns: np.array numper of terms n in the adev estimate. """ if (data_type == "freq"): print("Warning : phase data is preferred as input to gradev()") phase = input_to_phase(data, rate, data_type) (data, m, taus_used) = tau_generator(phase, rate, taus) ad = np.zeros_like(taus_used) ade_l = np.zeros_like(taus_used) ade_h = np.zeros_like(taus_used) adn = np.zeros_like(taus_used) for idx, mj in enumerate(m): (dev, deverr, n) = calc_gradev_phase(data, rate, mj, 1, ci, noisetype) # stride=1 for overlapping ADEV ad[idx] = dev ade_l[idx] = deverr[0] ade_h[idx] = deverr[1] adn[idx] = n # Note that errors are split in 2 arrays return remove_small_ns(taus_used, ad, [ade_l, ade_h], adn)
[ "def", "gradev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ",", "ci", "=", "0.9", ",", "noisetype", "=", "'wp'", ")", ":", "if", "(", "data_type", "==", "\"freq\"", ")", ":", "print", "(", "\"Warning : phase data is preferred as input to gradev()\"", ")", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "data", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "ad", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ade_l", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ade_h", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "adn", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "(", "dev", ",", "deverr", ",", "n", ")", "=", "calc_gradev_phase", "(", "data", ",", "rate", ",", "mj", ",", "1", ",", "ci", ",", "noisetype", ")", "# stride=1 for overlapping ADEV", "ad", "[", "idx", "]", "=", "dev", "ade_l", "[", "idx", "]", "=", "deverr", "[", "0", "]", "ade_h", "[", "idx", "]", "=", "deverr", "[", "1", "]", "adn", "[", "idx", "]", "=", "n", "# Note that errors are split in 2 arrays", "return", "remove_small_ns", "(", "taus_used", ",", "ad", ",", "[", "ade_l", ",", "ade_h", "]", ",", "adn", ")" ]
gap resistant overlapping Allan deviation Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). Warning : phase data works better (frequency data is first trantformed into phase using numpy.cumsum() function, which can lead to poor results). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. ci: float the total confidence interval desired, i.e. if ci = 0.9, the bounds will be at 0.05 and 0.95. noisetype: string the type of noise desired: 'wp' returns white phase noise. 'wf' returns white frequency noise. 'fp' returns flicker phase noise. 'ff' returns flicker frequency noise. 'rf' returns random walk frequency noise. If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Returns ------- taus: np.array list of tau vales in seconds adev: np.array deviations [err_l, err_h] : list of len()==2, np.array the upper and lower bounds of the confidence interval taken as distances from the the estimated two sample variance. ns: np.array numper of terms n in the adev estimate.
[ "gap", "resistant", "overlapping", "Allan", "deviation" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1173-L1242
train
231,795
aewallin/allantools
allantools/allantools.py
input_to_phase
def input_to_phase(data, rate, data_type): """ Take either phase or frequency as input and return phase """ if data_type == "phase": return data elif data_type == "freq": return frequency2phase(data, rate) else: raise Exception("unknown data_type: " + data_type)
python
def input_to_phase(data, rate, data_type): """ Take either phase or frequency as input and return phase """ if data_type == "phase": return data elif data_type == "freq": return frequency2phase(data, rate) else: raise Exception("unknown data_type: " + data_type)
[ "def", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", ":", "if", "data_type", "==", "\"phase\"", ":", "return", "data", "elif", "data_type", "==", "\"freq\"", ":", "return", "frequency2phase", "(", "data", ",", "rate", ")", "else", ":", "raise", "Exception", "(", "\"unknown data_type: \"", "+", "data_type", ")" ]
Take either phase or frequency as input and return phase
[ "Take", "either", "phase", "or", "frequency", "as", "input", "and", "return", "phase" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1297-L1305
train
231,796
aewallin/allantools
allantools/allantools.py
trim_data
def trim_data(x): """ Trim leading and trailing NaNs from dataset This is done by browsing the array from each end and store the index of the first non-NaN in each case, the return the appropriate slice of the array """ # Find indices for first and last valid data first = 0 while np.isnan(x[first]): first += 1 last = len(x) while np.isnan(x[last - 1]): last -= 1 return x[first:last]
python
def trim_data(x): """ Trim leading and trailing NaNs from dataset This is done by browsing the array from each end and store the index of the first non-NaN in each case, the return the appropriate slice of the array """ # Find indices for first and last valid data first = 0 while np.isnan(x[first]): first += 1 last = len(x) while np.isnan(x[last - 1]): last -= 1 return x[first:last]
[ "def", "trim_data", "(", "x", ")", ":", "# Find indices for first and last valid data", "first", "=", "0", "while", "np", ".", "isnan", "(", "x", "[", "first", "]", ")", ":", "first", "+=", "1", "last", "=", "len", "(", "x", ")", "while", "np", ".", "isnan", "(", "x", "[", "last", "-", "1", "]", ")", ":", "last", "-=", "1", "return", "x", "[", "first", ":", "last", "]" ]
Trim leading and trailing NaNs from dataset This is done by browsing the array from each end and store the index of the first non-NaN in each case, the return the appropriate slice of the array
[ "Trim", "leading", "and", "trailing", "NaNs", "from", "dataset", "This", "is", "done", "by", "browsing", "the", "array", "from", "each", "end", "and", "store", "the", "index", "of", "the", "first", "non", "-", "NaN", "in", "each", "case", "the", "return", "the", "appropriate", "slice", "of", "the", "array" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1498-L1511
train
231,797
aewallin/allantools
allantools/allantools.py
three_cornered_hat_phase
def three_cornered_hat_phase(phasedata_ab, phasedata_bc, phasedata_ca, rate, taus, function): """ Three Cornered Hat Method Given three clocks A, B, C, we seek to find their variances :math:`\\sigma^2_A`, :math:`\\sigma^2_B`, :math:`\\sigma^2_C`. We measure three phase differences, assuming no correlation between the clocks, the measurements have variances: .. math:: \\sigma^2_{AB} = \\sigma^2_{A} + \\sigma^2_{B} \\sigma^2_{BC} = \\sigma^2_{B} + \\sigma^2_{C} \\sigma^2_{CA} = \\sigma^2_{C} + \\sigma^2_{A} Which allows solving for the variance of one clock as: .. math:: \\sigma^2_{A} = {1 \\over 2} ( \\sigma^2_{AB} + \\sigma^2_{CA} - \\sigma^2_{BC} ) and similarly cyclic permutations for :math:`\\sigma^2_B` and :math:`\\sigma^2_C` Parameters ---------- phasedata_ab: np.array phase measurements between clock A and B, in seconds phasedata_bc: np.array phase measurements between clock B and C, in seconds phasedata_ca: np.array phase measurements between clock C and A, in seconds rate: float The sampling rate for phase, in Hz taus: np.array The tau values for deviations, in seconds function: allantools deviation function The type of statistic to compute, e.g. allantools.oadev Returns ------- tau_ab: np.array Tau values corresponding to output deviations dev_a: np.array List of computed values for clock A References ---------- http://www.wriley.com/3-CornHat.htm """ (tau_ab, dev_ab, err_ab, ns_ab) = function(phasedata_ab, data_type='phase', rate=rate, taus=taus) (tau_bc, dev_bc, err_bc, ns_bc) = function(phasedata_bc, data_type='phase', rate=rate, taus=taus) (tau_ca, dev_ca, err_ca, ns_ca) = function(phasedata_ca, data_type='phase', rate=rate, taus=taus) var_ab = dev_ab * dev_ab var_bc = dev_bc * dev_bc var_ca = dev_ca * dev_ca assert len(var_ab) == len(var_bc) == len(var_ca) var_a = 0.5 * (var_ab + var_ca - var_bc) var_a[var_a < 0] = 0 # don't return imaginary deviations (?) dev_a = np.sqrt(var_a) err_a = [d/np.sqrt(nn) for (d, nn) in zip(dev_a, ns_ab)] return tau_ab, dev_a, err_a, ns_ab
python
def three_cornered_hat_phase(phasedata_ab, phasedata_bc, phasedata_ca, rate, taus, function): """ Three Cornered Hat Method Given three clocks A, B, C, we seek to find their variances :math:`\\sigma^2_A`, :math:`\\sigma^2_B`, :math:`\\sigma^2_C`. We measure three phase differences, assuming no correlation between the clocks, the measurements have variances: .. math:: \\sigma^2_{AB} = \\sigma^2_{A} + \\sigma^2_{B} \\sigma^2_{BC} = \\sigma^2_{B} + \\sigma^2_{C} \\sigma^2_{CA} = \\sigma^2_{C} + \\sigma^2_{A} Which allows solving for the variance of one clock as: .. math:: \\sigma^2_{A} = {1 \\over 2} ( \\sigma^2_{AB} + \\sigma^2_{CA} - \\sigma^2_{BC} ) and similarly cyclic permutations for :math:`\\sigma^2_B` and :math:`\\sigma^2_C` Parameters ---------- phasedata_ab: np.array phase measurements between clock A and B, in seconds phasedata_bc: np.array phase measurements between clock B and C, in seconds phasedata_ca: np.array phase measurements between clock C and A, in seconds rate: float The sampling rate for phase, in Hz taus: np.array The tau values for deviations, in seconds function: allantools deviation function The type of statistic to compute, e.g. allantools.oadev Returns ------- tau_ab: np.array Tau values corresponding to output deviations dev_a: np.array List of computed values for clock A References ---------- http://www.wriley.com/3-CornHat.htm """ (tau_ab, dev_ab, err_ab, ns_ab) = function(phasedata_ab, data_type='phase', rate=rate, taus=taus) (tau_bc, dev_bc, err_bc, ns_bc) = function(phasedata_bc, data_type='phase', rate=rate, taus=taus) (tau_ca, dev_ca, err_ca, ns_ca) = function(phasedata_ca, data_type='phase', rate=rate, taus=taus) var_ab = dev_ab * dev_ab var_bc = dev_bc * dev_bc var_ca = dev_ca * dev_ca assert len(var_ab) == len(var_bc) == len(var_ca) var_a = 0.5 * (var_ab + var_ca - var_bc) var_a[var_a < 0] = 0 # don't return imaginary deviations (?) dev_a = np.sqrt(var_a) err_a = [d/np.sqrt(nn) for (d, nn) in zip(dev_a, ns_ab)] return tau_ab, dev_a, err_a, ns_ab
[ "def", "three_cornered_hat_phase", "(", "phasedata_ab", ",", "phasedata_bc", ",", "phasedata_ca", ",", "rate", ",", "taus", ",", "function", ")", ":", "(", "tau_ab", ",", "dev_ab", ",", "err_ab", ",", "ns_ab", ")", "=", "function", "(", "phasedata_ab", ",", "data_type", "=", "'phase'", ",", "rate", "=", "rate", ",", "taus", "=", "taus", ")", "(", "tau_bc", ",", "dev_bc", ",", "err_bc", ",", "ns_bc", ")", "=", "function", "(", "phasedata_bc", ",", "data_type", "=", "'phase'", ",", "rate", "=", "rate", ",", "taus", "=", "taus", ")", "(", "tau_ca", ",", "dev_ca", ",", "err_ca", ",", "ns_ca", ")", "=", "function", "(", "phasedata_ca", ",", "data_type", "=", "'phase'", ",", "rate", "=", "rate", ",", "taus", "=", "taus", ")", "var_ab", "=", "dev_ab", "*", "dev_ab", "var_bc", "=", "dev_bc", "*", "dev_bc", "var_ca", "=", "dev_ca", "*", "dev_ca", "assert", "len", "(", "var_ab", ")", "==", "len", "(", "var_bc", ")", "==", "len", "(", "var_ca", ")", "var_a", "=", "0.5", "*", "(", "var_ab", "+", "var_ca", "-", "var_bc", ")", "var_a", "[", "var_a", "<", "0", "]", "=", "0", "# don't return imaginary deviations (?)", "dev_a", "=", "np", ".", "sqrt", "(", "var_a", ")", "err_a", "=", "[", "d", "/", "np", ".", "sqrt", "(", "nn", ")", "for", "(", "d", ",", "nn", ")", "in", "zip", "(", "dev_a", ",", "ns_ab", ")", "]", "return", "tau_ab", ",", "dev_a", ",", "err_a", ",", "ns_ab" ]
Three Cornered Hat Method Given three clocks A, B, C, we seek to find their variances :math:`\\sigma^2_A`, :math:`\\sigma^2_B`, :math:`\\sigma^2_C`. We measure three phase differences, assuming no correlation between the clocks, the measurements have variances: .. math:: \\sigma^2_{AB} = \\sigma^2_{A} + \\sigma^2_{B} \\sigma^2_{BC} = \\sigma^2_{B} + \\sigma^2_{C} \\sigma^2_{CA} = \\sigma^2_{C} + \\sigma^2_{A} Which allows solving for the variance of one clock as: .. math:: \\sigma^2_{A} = {1 \\over 2} ( \\sigma^2_{AB} + \\sigma^2_{CA} - \\sigma^2_{BC} ) and similarly cyclic permutations for :math:`\\sigma^2_B` and :math:`\\sigma^2_C` Parameters ---------- phasedata_ab: np.array phase measurements between clock A and B, in seconds phasedata_bc: np.array phase measurements between clock B and C, in seconds phasedata_ca: np.array phase measurements between clock C and A, in seconds rate: float The sampling rate for phase, in Hz taus: np.array The tau values for deviations, in seconds function: allantools deviation function The type of statistic to compute, e.g. allantools.oadev Returns ------- tau_ab: np.array Tau values corresponding to output deviations dev_a: np.array List of computed values for clock A References ---------- http://www.wriley.com/3-CornHat.htm
[ "Three", "Cornered", "Hat", "Method" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1513-L1588
train
231,798
aewallin/allantools
allantools/allantools.py
frequency2phase
def frequency2phase(freqdata, rate): """ integrate fractional frequency data and output phase data Parameters ---------- freqdata: np.array Data array of fractional frequency measurements (nondimensional) rate: float The sampling rate for phase or frequency, in Hz Returns ------- phasedata: np.array Time integral of fractional frequency data, i.e. phase (time) data in units of seconds. For phase in units of radians, see phase2radians() """ dt = 1.0 / float(rate) # Protect against NaN values in input array (issue #60) # Reintroduces data trimming as in commit 503cb82 freqdata = trim_data(freqdata) phasedata = np.cumsum(freqdata) * dt phasedata = np.insert(phasedata, 0, 0) # FIXME: why do we do this? # so that phase starts at zero and len(phase)=len(freq)+1 ?? return phasedata
python
def frequency2phase(freqdata, rate): """ integrate fractional frequency data and output phase data Parameters ---------- freqdata: np.array Data array of fractional frequency measurements (nondimensional) rate: float The sampling rate for phase or frequency, in Hz Returns ------- phasedata: np.array Time integral of fractional frequency data, i.e. phase (time) data in units of seconds. For phase in units of radians, see phase2radians() """ dt = 1.0 / float(rate) # Protect against NaN values in input array (issue #60) # Reintroduces data trimming as in commit 503cb82 freqdata = trim_data(freqdata) phasedata = np.cumsum(freqdata) * dt phasedata = np.insert(phasedata, 0, 0) # FIXME: why do we do this? # so that phase starts at zero and len(phase)=len(freq)+1 ?? return phasedata
[ "def", "frequency2phase", "(", "freqdata", ",", "rate", ")", ":", "dt", "=", "1.0", "/", "float", "(", "rate", ")", "# Protect against NaN values in input array (issue #60)", "# Reintroduces data trimming as in commit 503cb82", "freqdata", "=", "trim_data", "(", "freqdata", ")", "phasedata", "=", "np", ".", "cumsum", "(", "freqdata", ")", "*", "dt", "phasedata", "=", "np", ".", "insert", "(", "phasedata", ",", "0", ",", "0", ")", "# FIXME: why do we do this?", "# so that phase starts at zero and len(phase)=len(freq)+1 ??", "return", "phasedata" ]
integrate fractional frequency data and output phase data Parameters ---------- freqdata: np.array Data array of fractional frequency measurements (nondimensional) rate: float The sampling rate for phase or frequency, in Hz Returns ------- phasedata: np.array Time integral of fractional frequency data, i.e. phase (time) data in units of seconds. For phase in units of radians, see phase2radians()
[ "integrate", "fractional", "frequency", "data", "and", "output", "phase", "data" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1595-L1619
train
231,799