repository_name
stringclasses
316 values
func_path_in_repository
stringlengths
6
223
func_name
stringlengths
1
134
language
stringclasses
1 value
func_code_string
stringlengths
57
65.5k
func_documentation_string
stringlengths
1
46.3k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
called_functions
listlengths
1
156
enclosing_scope
stringlengths
2
1.48M
fkarb/xltable
xltable/worksheet.py
Worksheet.add_value
python
def add_value(self, value, row, col): self.__values[(row, col)] = value
Adds a single value (cell) to a worksheet at (row, col). Return the (row, col) where the value has been put. :param value: Value to write to the sheet. :param row: Row where the value should be written. :param col: Column where the value should be written.
train
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/worksheet.py#L62-L71
null
class Worksheet(object): """ A worksheet is a collection of tables placed at specific locations. Once all tables have been placed the worksheet can be written out or the rows can be iterated over, and any expressions present in the tables will be resolved to absolute cell references. :param str name: Worksheet name. """ _xlsx_unsupported_types = tuple() def __init__(self, name="Sheet1"): self.__name = name self.__tables = {} self.__values = {} self.__charts = [] self.__next_row = 0 self.__groups = [] @property def name(self): """Worksheet name""" return self.__name def add_table(self, table, row=None, col=0, row_spaces=1): """ Adds a table to the worksheet at (row, col). Return the (row, col) where the table has been put. :param xltable.Table table: Table to add to the worksheet. :param int row: Row to start the table at (defaults to the next free row). :param int col: Column to start the table at. :param int row_spaces: Number of rows to leave between this table and the next. """ name = table.name assert name is not None, "Tables must have a name" assert name not in self.__tables, "Table %s already exists in this worksheet" % name if row is None: row = self.__next_row self.__next_row = max(row + table.height + row_spaces, self.__next_row) self.__tables[name] = (table, (row, col)) return row, col def add_chart(self, chart, row, col): """ Adds a chart to the worksheet at (row, col). :param xltable.Chart Chart: chart to add to the workbook. :param int row: Row to add the chart at. """ self.__charts.append((chart, (row, col))) def add_row_group(self, tables, collapsed=True): """ Adds a group over all the given tables (will include any rows between the first row over all tables, and the last row over all tables) Initially collapsed if collapsed is True (True by default) """ self.__groups.append((tables, collapsed)) @property def next_row(self): """Row the next table will start at unless another row is specified.""" return self.__next_row @next_row.setter def next_row(self, value): self.__next_row = value def get_table_pos(self, tablename): """ :param str tablename: Name of table to get position of. :return: Upper left (row, col) coordinate of the named table. """ _table, (row, col) = self.__tables[tablename] return (row, col) def get_table(self, tablename): """ :param str tablename: Name of table to find. :return: A :py:class:`xltable.Table` instance from the table name. """ table, (_row, _col) = self.__tables[tablename] return table def iterrows(self, workbook=None): """ Yield rows as lists of data. The data is exactly as it is in the source pandas DataFrames and any formulas are not resolved. """ resolved_tables = [] max_height = 0 max_width = 0 # while yielding rows __formula_values is updated with any formula values set on Expressions self.__formula_values = {} for name, (table, (row, col)) in list(self.__tables.items()): # get the resolved 2d data array from the table # # expressions with no explicit table will use None when calling # get_table/get_table_pos, which should return the current table. # self.__tables[None] = (table, (row, col)) data = table.get_data(workbook, row, col, self.__formula_values) del self.__tables[None] height, width = data.shape upper_left = (row, col) lower_right = (row + height - 1, col + width - 1) max_height = max(max_height, lower_right[0] + 1) max_width = max(max_width, lower_right[1] + 1) resolved_tables.append((name, data, upper_left, lower_right)) for row, col in self.__values.keys(): max_width = max(max_width, row+1) max_height = max(max_height, col+1) # Build the whole table up-front. Doing it row by row is too slow. table = [[None] * max_width for i in range(max_height)] for name, data, upper_left, lower_right in resolved_tables: for i, r in enumerate(range(upper_left[0], lower_right[0]+1)): for j, c in enumerate(range(upper_left[1], lower_right[1]+1)): table[r][c] = data[i][j] for (r, c), value in self.__values.items(): if isinstance(value, Value): value = value.value if isinstance(value, Expression): if value.has_value: self.__formula_values[(r, c)] = value.value value = value.get_formula(workbook, r, c) table[r][c] = value for row in table: yield row def to_csv(self, writer): """ Writes worksheet to a csv.writer object. :param writer: csv writer instance. """ for row in self.iterrows(): writer.writerow(row) def _get_column_widths(self): """return a dictionary of {col -> width}""" col_widths = {} for table, (row, col) in self.__tables.values(): for colname, width in table.column_widths.items(): ic = col + table.get_column_offset(colname) current_width = col_widths.setdefault(ic, width) col_widths[ic] = max(width, current_width) return col_widths def _get_all_styles(self): """ return a dictionary of {(row, col) -> CellStyle} for all cells that use a non-default style. """ _styles = {} def _get_style(bold=False, bg_col=None, border=None): if (bold, bg_col, border) not in _styles: _styles[(bold, bg_col, border)] = CellStyle(bold=bold, bg_color=bg_col, border=border) return _styles[(bold, bg_col, border)] ws_styles = {} for table, (row, col) in self.__tables.values(): for r in range(row, row + table.header_height): for c in range(col, col + table.width): if isinstance(table.header_style, dict): col_name = table.dataframe.columns[c - col] style = table.header_style.get(col_name, _get_style(bold=True)) else: style = table.header_style or _get_style(bold=True) ws_styles[(r, c)] = style for c in range(col, col + table.row_labels_width): for r in range(row + table.header_height, row + table.height): if isinstance(table.index_style, dict): row_name = table.dataframe.index[r - row] style = table.index_style.get(row_name, _get_style(bold=True)) else: style = table.index_style or _get_style(bold=True) ws_styles[(r, c)] = style if table.style.stripe_colors or table.style.border: num_bg_cols = len(table.style.stripe_colors) if \ table.style.stripe_colors else 1 bg_cols = table.style.stripe_colors if \ table.style.stripe_colors else None for i, row_offset in enumerate(range(table.header_height, table.height)): for c in range(col, col + table.width): bg_col = bg_cols[i % num_bg_cols] if bg_cols else None style = _get_style(bold=None, bg_col=bg_col, border=table.style.border) if (row + row_offset, c) in ws_styles: style = style + ws_styles[(row + row_offset, c)] ws_styles[(row + row_offset, c)] = style for col_name, col_style in table.column_styles.items(): try: col_offset = table.get_column_offset(col_name) except KeyError: continue for i, r in enumerate(range(row + table.header_height, row + table.height)): style = col_style if (r, col + col_offset) in ws_styles: style = ws_styles[(r, col + col_offset)] + style ws_styles[(r, col + col_offset)] = style for row_name, row_style in table.row_styles.items(): try: row_offset = table.get_row_offset(row_name) except KeyError: continue for i, c in enumerate(range(col + table.row_labels_width, col + table.width)): style = row_style if (row + row_offset, c) in ws_styles: style = ws_styles[(row + row_offset, c)] + style ws_styles[(row + row_offset, c)] = style for (row_name, col_name), cell_style in table.cell_styles.items(): try: col_offset = table.get_column_offset(col_name) row_offset = table.get_row_offset(row_name) except KeyError: continue style = cell_style if (row + row_offset, col + col_offset) in ws_styles: style = ws_styles[(row + row_offset, col + col_offset)] + style ws_styles[(row + row_offset, col + col_offset)] = style for (row, col), value in self.__values.items(): if isinstance(value, Value): style = value.style if style: if (row, col) in ws_styles: style = style + ws_styles[(row, col)] ws_styles[(row, col)] = style return ws_styles def to_excel(self, workbook=None, worksheet=None, xl_app=None, clear=True, rename=True, resize_columns=True): """ Writes worksheet to an Excel Worksheet COM object. Requires :py:module:`pywin32` to be installed. :param workbook: xltable.Workbook this sheet belongs to. :param worksheet: Excel COM Worksheet instance to write to. :param xl_app: Excel COM Excel Application to write to. :param bool clear: If a worksheet is provided, clear worksheet before writing. :param bool rename: If a worksheet is provided, rename self to match the worksheet. :param bool resize_columns: Resize sheet columns after writing. """ from win32com.client import Dispatch, constants, gencache if xl_app is None: if worksheet is not None: xl_app = worksheet.Parent.Application elif workbook is not None and hasattr(workbook.workbook_obj, "Application"): xl_app = workbook.workbook_obj.Application else: xl_app = Dispatch("Excel.Application") xl = xl_app = gencache.EnsureDispatch(xl_app) # Create a workbook if there isn't one already if not workbook: from .workbook import Workbook workbook = Workbook(worksheets=[self]) if worksheet is None: # If there's no worksheet then call Workbook.to_excel which will create one return workbook.to_excel(xl_app=xl_app, resize_columns=resize_columns) if rename: self.__name = worksheet.Name # set manual calculation and turn off screen updating while we update the cells calculation = xl.Calculation screen_updating = xl.ScreenUpdating xl.Calculation = constants.xlCalculationManual xl.ScreenUpdating = False try: # clear the worksheet and reset the styles if clear: worksheet.Cells.ClearContents() worksheet.Cells.Font.Bold = False worksheet.Cells.Font.Size = 11 worksheet.Cells.Font.Color = 0x000000 worksheet.Cells.Interior.ColorIndex = 0 worksheet.Cells.NumberFormat = "General" # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False origin = worksheet.Range("A1") xl_cell = origin for r, row in enumerate(self.iterrows(workbook)): row = _to_pywintypes(row) # set the value and formulae to the excel range (it's much quicker to # write a row at a time and update the formula than it is it do it # cell by cell) if clear: xl_row = worksheet.Range(xl_cell, xl_cell.Offset(1, len(row))) xl_row.Value = row else: for c, value in enumerate(row): if value is not None: xl_cell.Offset(1, 1 + c).Value = value for c, value in enumerate(row): if isinstance(value, str): if value.startswith("="): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).Formula = value elif value.startswith("{=") \ and not _is_in_array_formula_table(r, c): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).FormulaArray = value # move to the next row xl_cell = xl_cell.Offset(2, 1) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): data = table.get_data(workbook, row, col) height, width = data.shape upper_left = origin.Offset(row+1, col+1) lower_right = origin.Offset(row + height, col + width) xl_range = worksheet.Range(upper_left, lower_right) xl_range.FormulaArray = table.formula.get_formula(workbook, row, col) # set any formatting for (row, col), style in self._get_all_styles().items(): r = origin.Offset(1 + row, 1 + col) if style.bold: r.Font.Bold = True if style.excel_number_format is not None: r.NumberFormat = style.excel_number_format if style.size is not None: r.Font.Size = style.size if style.text_color is not None: r.Font.Color = _to_bgr(style.text_color) if style.bg_color is not None: r.Interior.Color = _to_bgr(style.bg_color) if style.text_wrap or style.border: raise Exception("text wrap and border not implemented") # add any charts for chart, (row, col) in self.__charts: top_left = origin.Offset(1 + row, 1 + col) xl_chart = worksheet.ChartObjects().Add(top_left.Left, top_left.Top, 360, 220).Chart xl_chart.ChartType = _to_excel_chart_type(chart.type, chart.subtype) if chart.title: xl_chart.ChartTitle = chart.title for series in chart.iter_series(self, row, col): xl_series = xl_chart.SeriesCollection().NewSeries() xl_series.Values = "=%s!%s" % (self.name, series["values"].lstrip("=")) if "categories" in series: xl_series.XValues = "=%s!%s" % (self.name, series["categories"].lstrip("=")) if "name" in series: xl_series.Name = series["name"] finally: xl.ScreenUpdating = screen_updating xl.Calculation = calculation if resize_columns: try: worksheet.Cells.EntireColumn.AutoFit() except: pass def to_xlsx(self, filename=None, workbook=None): """ Write worksheet to a .xlsx file using xlsxwriter. :param str filename: Filename to write to. If None no file is written. :param xltable.Workbook: Workbook this sheet belongs to. If None a new workbook will be created with this worksheet as the only sheet. :return: :py:class:`xlsxwriter.workbook.Workbook` instance. """ from .workbook import Workbook if not workbook: workbook = Workbook(filename=filename) workbook.append(self) return workbook.to_xlsx() ws = workbook.add_xlsx_worksheet(self, self.name) _styles = {} def _get_xlsx_style(cell_style): """ convert rb.excel style to xlsx writer style """ style_args = ( cell_style.bold, cell_style.excel_number_format, cell_style.text_color, cell_style.bg_color, cell_style.size, cell_style.text_wrap, cell_style.text_wrap, cell_style.border, cell_style.align, cell_style.valign ) if (style_args) not in _styles: style = workbook.add_format() if cell_style.bold: style.set_bold() if cell_style.excel_number_format is not None: style.set_num_format(cell_style.excel_number_format) if cell_style.text_color is not None: style.set_font_color("#%06x" % cell_style.text_color) if cell_style.bg_color is not None: style.set_bg_color("#%06x" % cell_style.bg_color) if cell_style.size is not None: style.set_font_size(cell_style.size) if cell_style.text_wrap: style.set_text_wrap() if cell_style.border: if isinstance(cell_style.border, frozenset): for border_position, border_style in cell_style.border: if border_position == "bottom": style.set_bottom(border_style) elif border_position == "top": style.set_top(border_style) elif border_position == "left": style.set_left(border_style) elif border_position == "right": style.set_right(border_style) else: raise AssertionError("Unknown border position '%s'." % border_position) else: style.set_border(cell_style.border) if cell_style.align: style.set_align(cell_style.align) if cell_style.valign: style.set_valign(cell_style.valign) _styles[style_args] = style return _styles[style_args] # pre-compute the cells with non-default styles ws_styles = self._get_all_styles() ws_styles = {(r, c): _get_xlsx_style(s) for ((r, c), s) in ws_styles.items()} plain_style = _get_xlsx_style(CellStyle()) # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False # write the rows to the worksheet for ir, row in enumerate(self.iterrows(workbook)): for ic, cell in enumerate(row): style = ws_styles.get((ir, ic), plain_style) if isinstance(cell, str): if cell.startswith("="): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_formula(ir, ic, cell, style, value=formula_value) elif cell.startswith("{="): # array formulas tables are written after everything else, # but individual cells can also be array formulas if not _is_in_array_formula_table(ir, ic): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_array_formula(ir, ic, ir, ic, cell, style, value=formula_value) else: ws.write(ir, ic, cell, style) else: if isinstance(cell, self._xlsx_unsupported_types): ws.write(ir, ic, str(cell), style) else: try: ws.write(ir, ic, cell, style) except TypeError: ws.write(ir, ic, str(cell), style) unsupported_types = set(self._xlsx_unsupported_types) unsupported_types.add(type(cell)) self.__class__._xlsx_unsupported_types = tuple(unsupported_types) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): style = ws_styles.get((row, col), plain_style) data = table.get_data(workbook, row, col) height, width = data.shape bottom, right = (row + height - 1, col + width -1) formula = table.formula.get_formula(workbook, row, col) ws.write_array_formula(row, col, bottom, right, formula, style, value=data[0][0]) for y in range(height): for x in range(width): if y == 0 and x == 0: continue ir, ic = row + y, col + x style = ws_styles.get((ir, ic), plain_style) cell = data[y][x] if isinstance(cell, str): cell_str = cell.encode("ascii", "xmlcharrefreplace").decode("ascii") ws.write_formula(ir, ic, cell_str, style) else: ws.write(ir, ic, cell, style) # set any non-default column widths for ic, width in self._get_column_widths().items(): ws.set_column(ic, ic, width) # add any charts for chart, (row, col) in self.__charts: kwargs = {"type": chart.type} if chart.subtype: kwargs["subtype"] = chart.subtype xl_chart = workbook.workbook_obj.add_chart(kwargs) if chart.show_blanks: xl_chart.show_blanks_as(chart.show_blanks) for series in chart.iter_series(workbook, row, col): # xlsxwriter expects the sheetname in the formula values = series.get("values") if isinstance(values, str) and values.startswith("=") and "!" not in values: series["values"] = "='%s'!%s" % (self.name, values.lstrip("=")) categories = series.get("categories") if isinstance(categories, str) and categories.startswith("=") and "!" not in categories: series["categories"] = "='%s'!%s" % (self.name, categories.lstrip("=")) xl_chart.add_series(series) xl_chart.set_size({"width": chart.width, "height": chart.height}) if chart.title: xl_chart.set_title({"name": chart.title}) if chart.legend_position: xl_chart.set_legend({"position": chart.legend_position}) if chart.x_axis: xl_chart.set_x_axis(chart.x_axis) if chart.y_axis: xl_chart.set_y_axis(chart.y_axis) ws.insert_chart(row, col, xl_chart) # add any groups for tables, collapsed in self.__groups: min_row, max_row = 1000000, -1 for table, (row, col) in self.__tables.values(): if table in tables: min_row = min(min_row, row) max_row = max(max_row, row + table.height) for i in range(min_row, max_row+1): ws.set_row(i, None, None, {'level': 1, 'hidden': collapsed}) if filename: workbook.close() return workbook
fkarb/xltable
xltable/worksheet.py
Worksheet.add_chart
python
def add_chart(self, chart, row, col): self.__charts.append((chart, (row, col)))
Adds a chart to the worksheet at (row, col). :param xltable.Chart Chart: chart to add to the workbook. :param int row: Row to add the chart at.
train
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/worksheet.py#L73-L80
null
class Worksheet(object): """ A worksheet is a collection of tables placed at specific locations. Once all tables have been placed the worksheet can be written out or the rows can be iterated over, and any expressions present in the tables will be resolved to absolute cell references. :param str name: Worksheet name. """ _xlsx_unsupported_types = tuple() def __init__(self, name="Sheet1"): self.__name = name self.__tables = {} self.__values = {} self.__charts = [] self.__next_row = 0 self.__groups = [] @property def name(self): """Worksheet name""" return self.__name def add_table(self, table, row=None, col=0, row_spaces=1): """ Adds a table to the worksheet at (row, col). Return the (row, col) where the table has been put. :param xltable.Table table: Table to add to the worksheet. :param int row: Row to start the table at (defaults to the next free row). :param int col: Column to start the table at. :param int row_spaces: Number of rows to leave between this table and the next. """ name = table.name assert name is not None, "Tables must have a name" assert name not in self.__tables, "Table %s already exists in this worksheet" % name if row is None: row = self.__next_row self.__next_row = max(row + table.height + row_spaces, self.__next_row) self.__tables[name] = (table, (row, col)) return row, col def add_value(self, value, row, col): """ Adds a single value (cell) to a worksheet at (row, col). Return the (row, col) where the value has been put. :param value: Value to write to the sheet. :param row: Row where the value should be written. :param col: Column where the value should be written. """ self.__values[(row, col)] = value def add_row_group(self, tables, collapsed=True): """ Adds a group over all the given tables (will include any rows between the first row over all tables, and the last row over all tables) Initially collapsed if collapsed is True (True by default) """ self.__groups.append((tables, collapsed)) @property def next_row(self): """Row the next table will start at unless another row is specified.""" return self.__next_row @next_row.setter def next_row(self, value): self.__next_row = value def get_table_pos(self, tablename): """ :param str tablename: Name of table to get position of. :return: Upper left (row, col) coordinate of the named table. """ _table, (row, col) = self.__tables[tablename] return (row, col) def get_table(self, tablename): """ :param str tablename: Name of table to find. :return: A :py:class:`xltable.Table` instance from the table name. """ table, (_row, _col) = self.__tables[tablename] return table def iterrows(self, workbook=None): """ Yield rows as lists of data. The data is exactly as it is in the source pandas DataFrames and any formulas are not resolved. """ resolved_tables = [] max_height = 0 max_width = 0 # while yielding rows __formula_values is updated with any formula values set on Expressions self.__formula_values = {} for name, (table, (row, col)) in list(self.__tables.items()): # get the resolved 2d data array from the table # # expressions with no explicit table will use None when calling # get_table/get_table_pos, which should return the current table. # self.__tables[None] = (table, (row, col)) data = table.get_data(workbook, row, col, self.__formula_values) del self.__tables[None] height, width = data.shape upper_left = (row, col) lower_right = (row + height - 1, col + width - 1) max_height = max(max_height, lower_right[0] + 1) max_width = max(max_width, lower_right[1] + 1) resolved_tables.append((name, data, upper_left, lower_right)) for row, col in self.__values.keys(): max_width = max(max_width, row+1) max_height = max(max_height, col+1) # Build the whole table up-front. Doing it row by row is too slow. table = [[None] * max_width for i in range(max_height)] for name, data, upper_left, lower_right in resolved_tables: for i, r in enumerate(range(upper_left[0], lower_right[0]+1)): for j, c in enumerate(range(upper_left[1], lower_right[1]+1)): table[r][c] = data[i][j] for (r, c), value in self.__values.items(): if isinstance(value, Value): value = value.value if isinstance(value, Expression): if value.has_value: self.__formula_values[(r, c)] = value.value value = value.get_formula(workbook, r, c) table[r][c] = value for row in table: yield row def to_csv(self, writer): """ Writes worksheet to a csv.writer object. :param writer: csv writer instance. """ for row in self.iterrows(): writer.writerow(row) def _get_column_widths(self): """return a dictionary of {col -> width}""" col_widths = {} for table, (row, col) in self.__tables.values(): for colname, width in table.column_widths.items(): ic = col + table.get_column_offset(colname) current_width = col_widths.setdefault(ic, width) col_widths[ic] = max(width, current_width) return col_widths def _get_all_styles(self): """ return a dictionary of {(row, col) -> CellStyle} for all cells that use a non-default style. """ _styles = {} def _get_style(bold=False, bg_col=None, border=None): if (bold, bg_col, border) not in _styles: _styles[(bold, bg_col, border)] = CellStyle(bold=bold, bg_color=bg_col, border=border) return _styles[(bold, bg_col, border)] ws_styles = {} for table, (row, col) in self.__tables.values(): for r in range(row, row + table.header_height): for c in range(col, col + table.width): if isinstance(table.header_style, dict): col_name = table.dataframe.columns[c - col] style = table.header_style.get(col_name, _get_style(bold=True)) else: style = table.header_style or _get_style(bold=True) ws_styles[(r, c)] = style for c in range(col, col + table.row_labels_width): for r in range(row + table.header_height, row + table.height): if isinstance(table.index_style, dict): row_name = table.dataframe.index[r - row] style = table.index_style.get(row_name, _get_style(bold=True)) else: style = table.index_style or _get_style(bold=True) ws_styles[(r, c)] = style if table.style.stripe_colors or table.style.border: num_bg_cols = len(table.style.stripe_colors) if \ table.style.stripe_colors else 1 bg_cols = table.style.stripe_colors if \ table.style.stripe_colors else None for i, row_offset in enumerate(range(table.header_height, table.height)): for c in range(col, col + table.width): bg_col = bg_cols[i % num_bg_cols] if bg_cols else None style = _get_style(bold=None, bg_col=bg_col, border=table.style.border) if (row + row_offset, c) in ws_styles: style = style + ws_styles[(row + row_offset, c)] ws_styles[(row + row_offset, c)] = style for col_name, col_style in table.column_styles.items(): try: col_offset = table.get_column_offset(col_name) except KeyError: continue for i, r in enumerate(range(row + table.header_height, row + table.height)): style = col_style if (r, col + col_offset) in ws_styles: style = ws_styles[(r, col + col_offset)] + style ws_styles[(r, col + col_offset)] = style for row_name, row_style in table.row_styles.items(): try: row_offset = table.get_row_offset(row_name) except KeyError: continue for i, c in enumerate(range(col + table.row_labels_width, col + table.width)): style = row_style if (row + row_offset, c) in ws_styles: style = ws_styles[(row + row_offset, c)] + style ws_styles[(row + row_offset, c)] = style for (row_name, col_name), cell_style in table.cell_styles.items(): try: col_offset = table.get_column_offset(col_name) row_offset = table.get_row_offset(row_name) except KeyError: continue style = cell_style if (row + row_offset, col + col_offset) in ws_styles: style = ws_styles[(row + row_offset, col + col_offset)] + style ws_styles[(row + row_offset, col + col_offset)] = style for (row, col), value in self.__values.items(): if isinstance(value, Value): style = value.style if style: if (row, col) in ws_styles: style = style + ws_styles[(row, col)] ws_styles[(row, col)] = style return ws_styles def to_excel(self, workbook=None, worksheet=None, xl_app=None, clear=True, rename=True, resize_columns=True): """ Writes worksheet to an Excel Worksheet COM object. Requires :py:module:`pywin32` to be installed. :param workbook: xltable.Workbook this sheet belongs to. :param worksheet: Excel COM Worksheet instance to write to. :param xl_app: Excel COM Excel Application to write to. :param bool clear: If a worksheet is provided, clear worksheet before writing. :param bool rename: If a worksheet is provided, rename self to match the worksheet. :param bool resize_columns: Resize sheet columns after writing. """ from win32com.client import Dispatch, constants, gencache if xl_app is None: if worksheet is not None: xl_app = worksheet.Parent.Application elif workbook is not None and hasattr(workbook.workbook_obj, "Application"): xl_app = workbook.workbook_obj.Application else: xl_app = Dispatch("Excel.Application") xl = xl_app = gencache.EnsureDispatch(xl_app) # Create a workbook if there isn't one already if not workbook: from .workbook import Workbook workbook = Workbook(worksheets=[self]) if worksheet is None: # If there's no worksheet then call Workbook.to_excel which will create one return workbook.to_excel(xl_app=xl_app, resize_columns=resize_columns) if rename: self.__name = worksheet.Name # set manual calculation and turn off screen updating while we update the cells calculation = xl.Calculation screen_updating = xl.ScreenUpdating xl.Calculation = constants.xlCalculationManual xl.ScreenUpdating = False try: # clear the worksheet and reset the styles if clear: worksheet.Cells.ClearContents() worksheet.Cells.Font.Bold = False worksheet.Cells.Font.Size = 11 worksheet.Cells.Font.Color = 0x000000 worksheet.Cells.Interior.ColorIndex = 0 worksheet.Cells.NumberFormat = "General" # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False origin = worksheet.Range("A1") xl_cell = origin for r, row in enumerate(self.iterrows(workbook)): row = _to_pywintypes(row) # set the value and formulae to the excel range (it's much quicker to # write a row at a time and update the formula than it is it do it # cell by cell) if clear: xl_row = worksheet.Range(xl_cell, xl_cell.Offset(1, len(row))) xl_row.Value = row else: for c, value in enumerate(row): if value is not None: xl_cell.Offset(1, 1 + c).Value = value for c, value in enumerate(row): if isinstance(value, str): if value.startswith("="): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).Formula = value elif value.startswith("{=") \ and not _is_in_array_formula_table(r, c): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).FormulaArray = value # move to the next row xl_cell = xl_cell.Offset(2, 1) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): data = table.get_data(workbook, row, col) height, width = data.shape upper_left = origin.Offset(row+1, col+1) lower_right = origin.Offset(row + height, col + width) xl_range = worksheet.Range(upper_left, lower_right) xl_range.FormulaArray = table.formula.get_formula(workbook, row, col) # set any formatting for (row, col), style in self._get_all_styles().items(): r = origin.Offset(1 + row, 1 + col) if style.bold: r.Font.Bold = True if style.excel_number_format is not None: r.NumberFormat = style.excel_number_format if style.size is not None: r.Font.Size = style.size if style.text_color is not None: r.Font.Color = _to_bgr(style.text_color) if style.bg_color is not None: r.Interior.Color = _to_bgr(style.bg_color) if style.text_wrap or style.border: raise Exception("text wrap and border not implemented") # add any charts for chart, (row, col) in self.__charts: top_left = origin.Offset(1 + row, 1 + col) xl_chart = worksheet.ChartObjects().Add(top_left.Left, top_left.Top, 360, 220).Chart xl_chart.ChartType = _to_excel_chart_type(chart.type, chart.subtype) if chart.title: xl_chart.ChartTitle = chart.title for series in chart.iter_series(self, row, col): xl_series = xl_chart.SeriesCollection().NewSeries() xl_series.Values = "=%s!%s" % (self.name, series["values"].lstrip("=")) if "categories" in series: xl_series.XValues = "=%s!%s" % (self.name, series["categories"].lstrip("=")) if "name" in series: xl_series.Name = series["name"] finally: xl.ScreenUpdating = screen_updating xl.Calculation = calculation if resize_columns: try: worksheet.Cells.EntireColumn.AutoFit() except: pass def to_xlsx(self, filename=None, workbook=None): """ Write worksheet to a .xlsx file using xlsxwriter. :param str filename: Filename to write to. If None no file is written. :param xltable.Workbook: Workbook this sheet belongs to. If None a new workbook will be created with this worksheet as the only sheet. :return: :py:class:`xlsxwriter.workbook.Workbook` instance. """ from .workbook import Workbook if not workbook: workbook = Workbook(filename=filename) workbook.append(self) return workbook.to_xlsx() ws = workbook.add_xlsx_worksheet(self, self.name) _styles = {} def _get_xlsx_style(cell_style): """ convert rb.excel style to xlsx writer style """ style_args = ( cell_style.bold, cell_style.excel_number_format, cell_style.text_color, cell_style.bg_color, cell_style.size, cell_style.text_wrap, cell_style.text_wrap, cell_style.border, cell_style.align, cell_style.valign ) if (style_args) not in _styles: style = workbook.add_format() if cell_style.bold: style.set_bold() if cell_style.excel_number_format is not None: style.set_num_format(cell_style.excel_number_format) if cell_style.text_color is not None: style.set_font_color("#%06x" % cell_style.text_color) if cell_style.bg_color is not None: style.set_bg_color("#%06x" % cell_style.bg_color) if cell_style.size is not None: style.set_font_size(cell_style.size) if cell_style.text_wrap: style.set_text_wrap() if cell_style.border: if isinstance(cell_style.border, frozenset): for border_position, border_style in cell_style.border: if border_position == "bottom": style.set_bottom(border_style) elif border_position == "top": style.set_top(border_style) elif border_position == "left": style.set_left(border_style) elif border_position == "right": style.set_right(border_style) else: raise AssertionError("Unknown border position '%s'." % border_position) else: style.set_border(cell_style.border) if cell_style.align: style.set_align(cell_style.align) if cell_style.valign: style.set_valign(cell_style.valign) _styles[style_args] = style return _styles[style_args] # pre-compute the cells with non-default styles ws_styles = self._get_all_styles() ws_styles = {(r, c): _get_xlsx_style(s) for ((r, c), s) in ws_styles.items()} plain_style = _get_xlsx_style(CellStyle()) # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False # write the rows to the worksheet for ir, row in enumerate(self.iterrows(workbook)): for ic, cell in enumerate(row): style = ws_styles.get((ir, ic), plain_style) if isinstance(cell, str): if cell.startswith("="): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_formula(ir, ic, cell, style, value=formula_value) elif cell.startswith("{="): # array formulas tables are written after everything else, # but individual cells can also be array formulas if not _is_in_array_formula_table(ir, ic): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_array_formula(ir, ic, ir, ic, cell, style, value=formula_value) else: ws.write(ir, ic, cell, style) else: if isinstance(cell, self._xlsx_unsupported_types): ws.write(ir, ic, str(cell), style) else: try: ws.write(ir, ic, cell, style) except TypeError: ws.write(ir, ic, str(cell), style) unsupported_types = set(self._xlsx_unsupported_types) unsupported_types.add(type(cell)) self.__class__._xlsx_unsupported_types = tuple(unsupported_types) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): style = ws_styles.get((row, col), plain_style) data = table.get_data(workbook, row, col) height, width = data.shape bottom, right = (row + height - 1, col + width -1) formula = table.formula.get_formula(workbook, row, col) ws.write_array_formula(row, col, bottom, right, formula, style, value=data[0][0]) for y in range(height): for x in range(width): if y == 0 and x == 0: continue ir, ic = row + y, col + x style = ws_styles.get((ir, ic), plain_style) cell = data[y][x] if isinstance(cell, str): cell_str = cell.encode("ascii", "xmlcharrefreplace").decode("ascii") ws.write_formula(ir, ic, cell_str, style) else: ws.write(ir, ic, cell, style) # set any non-default column widths for ic, width in self._get_column_widths().items(): ws.set_column(ic, ic, width) # add any charts for chart, (row, col) in self.__charts: kwargs = {"type": chart.type} if chart.subtype: kwargs["subtype"] = chart.subtype xl_chart = workbook.workbook_obj.add_chart(kwargs) if chart.show_blanks: xl_chart.show_blanks_as(chart.show_blanks) for series in chart.iter_series(workbook, row, col): # xlsxwriter expects the sheetname in the formula values = series.get("values") if isinstance(values, str) and values.startswith("=") and "!" not in values: series["values"] = "='%s'!%s" % (self.name, values.lstrip("=")) categories = series.get("categories") if isinstance(categories, str) and categories.startswith("=") and "!" not in categories: series["categories"] = "='%s'!%s" % (self.name, categories.lstrip("=")) xl_chart.add_series(series) xl_chart.set_size({"width": chart.width, "height": chart.height}) if chart.title: xl_chart.set_title({"name": chart.title}) if chart.legend_position: xl_chart.set_legend({"position": chart.legend_position}) if chart.x_axis: xl_chart.set_x_axis(chart.x_axis) if chart.y_axis: xl_chart.set_y_axis(chart.y_axis) ws.insert_chart(row, col, xl_chart) # add any groups for tables, collapsed in self.__groups: min_row, max_row = 1000000, -1 for table, (row, col) in self.__tables.values(): if table in tables: min_row = min(min_row, row) max_row = max(max_row, row + table.height) for i in range(min_row, max_row+1): ws.set_row(i, None, None, {'level': 1, 'hidden': collapsed}) if filename: workbook.close() return workbook
fkarb/xltable
xltable/worksheet.py
Worksheet.add_row_group
python
def add_row_group(self, tables, collapsed=True): self.__groups.append((tables, collapsed))
Adds a group over all the given tables (will include any rows between the first row over all tables, and the last row over all tables) Initially collapsed if collapsed is True (True by default)
train
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/worksheet.py#L82-L88
null
class Worksheet(object): """ A worksheet is a collection of tables placed at specific locations. Once all tables have been placed the worksheet can be written out or the rows can be iterated over, and any expressions present in the tables will be resolved to absolute cell references. :param str name: Worksheet name. """ _xlsx_unsupported_types = tuple() def __init__(self, name="Sheet1"): self.__name = name self.__tables = {} self.__values = {} self.__charts = [] self.__next_row = 0 self.__groups = [] @property def name(self): """Worksheet name""" return self.__name def add_table(self, table, row=None, col=0, row_spaces=1): """ Adds a table to the worksheet at (row, col). Return the (row, col) where the table has been put. :param xltable.Table table: Table to add to the worksheet. :param int row: Row to start the table at (defaults to the next free row). :param int col: Column to start the table at. :param int row_spaces: Number of rows to leave between this table and the next. """ name = table.name assert name is not None, "Tables must have a name" assert name not in self.__tables, "Table %s already exists in this worksheet" % name if row is None: row = self.__next_row self.__next_row = max(row + table.height + row_spaces, self.__next_row) self.__tables[name] = (table, (row, col)) return row, col def add_value(self, value, row, col): """ Adds a single value (cell) to a worksheet at (row, col). Return the (row, col) where the value has been put. :param value: Value to write to the sheet. :param row: Row where the value should be written. :param col: Column where the value should be written. """ self.__values[(row, col)] = value def add_chart(self, chart, row, col): """ Adds a chart to the worksheet at (row, col). :param xltable.Chart Chart: chart to add to the workbook. :param int row: Row to add the chart at. """ self.__charts.append((chart, (row, col))) @property def next_row(self): """Row the next table will start at unless another row is specified.""" return self.__next_row @next_row.setter def next_row(self, value): self.__next_row = value def get_table_pos(self, tablename): """ :param str tablename: Name of table to get position of. :return: Upper left (row, col) coordinate of the named table. """ _table, (row, col) = self.__tables[tablename] return (row, col) def get_table(self, tablename): """ :param str tablename: Name of table to find. :return: A :py:class:`xltable.Table` instance from the table name. """ table, (_row, _col) = self.__tables[tablename] return table def iterrows(self, workbook=None): """ Yield rows as lists of data. The data is exactly as it is in the source pandas DataFrames and any formulas are not resolved. """ resolved_tables = [] max_height = 0 max_width = 0 # while yielding rows __formula_values is updated with any formula values set on Expressions self.__formula_values = {} for name, (table, (row, col)) in list(self.__tables.items()): # get the resolved 2d data array from the table # # expressions with no explicit table will use None when calling # get_table/get_table_pos, which should return the current table. # self.__tables[None] = (table, (row, col)) data = table.get_data(workbook, row, col, self.__formula_values) del self.__tables[None] height, width = data.shape upper_left = (row, col) lower_right = (row + height - 1, col + width - 1) max_height = max(max_height, lower_right[0] + 1) max_width = max(max_width, lower_right[1] + 1) resolved_tables.append((name, data, upper_left, lower_right)) for row, col in self.__values.keys(): max_width = max(max_width, row+1) max_height = max(max_height, col+1) # Build the whole table up-front. Doing it row by row is too slow. table = [[None] * max_width for i in range(max_height)] for name, data, upper_left, lower_right in resolved_tables: for i, r in enumerate(range(upper_left[0], lower_right[0]+1)): for j, c in enumerate(range(upper_left[1], lower_right[1]+1)): table[r][c] = data[i][j] for (r, c), value in self.__values.items(): if isinstance(value, Value): value = value.value if isinstance(value, Expression): if value.has_value: self.__formula_values[(r, c)] = value.value value = value.get_formula(workbook, r, c) table[r][c] = value for row in table: yield row def to_csv(self, writer): """ Writes worksheet to a csv.writer object. :param writer: csv writer instance. """ for row in self.iterrows(): writer.writerow(row) def _get_column_widths(self): """return a dictionary of {col -> width}""" col_widths = {} for table, (row, col) in self.__tables.values(): for colname, width in table.column_widths.items(): ic = col + table.get_column_offset(colname) current_width = col_widths.setdefault(ic, width) col_widths[ic] = max(width, current_width) return col_widths def _get_all_styles(self): """ return a dictionary of {(row, col) -> CellStyle} for all cells that use a non-default style. """ _styles = {} def _get_style(bold=False, bg_col=None, border=None): if (bold, bg_col, border) not in _styles: _styles[(bold, bg_col, border)] = CellStyle(bold=bold, bg_color=bg_col, border=border) return _styles[(bold, bg_col, border)] ws_styles = {} for table, (row, col) in self.__tables.values(): for r in range(row, row + table.header_height): for c in range(col, col + table.width): if isinstance(table.header_style, dict): col_name = table.dataframe.columns[c - col] style = table.header_style.get(col_name, _get_style(bold=True)) else: style = table.header_style or _get_style(bold=True) ws_styles[(r, c)] = style for c in range(col, col + table.row_labels_width): for r in range(row + table.header_height, row + table.height): if isinstance(table.index_style, dict): row_name = table.dataframe.index[r - row] style = table.index_style.get(row_name, _get_style(bold=True)) else: style = table.index_style or _get_style(bold=True) ws_styles[(r, c)] = style if table.style.stripe_colors or table.style.border: num_bg_cols = len(table.style.stripe_colors) if \ table.style.stripe_colors else 1 bg_cols = table.style.stripe_colors if \ table.style.stripe_colors else None for i, row_offset in enumerate(range(table.header_height, table.height)): for c in range(col, col + table.width): bg_col = bg_cols[i % num_bg_cols] if bg_cols else None style = _get_style(bold=None, bg_col=bg_col, border=table.style.border) if (row + row_offset, c) in ws_styles: style = style + ws_styles[(row + row_offset, c)] ws_styles[(row + row_offset, c)] = style for col_name, col_style in table.column_styles.items(): try: col_offset = table.get_column_offset(col_name) except KeyError: continue for i, r in enumerate(range(row + table.header_height, row + table.height)): style = col_style if (r, col + col_offset) in ws_styles: style = ws_styles[(r, col + col_offset)] + style ws_styles[(r, col + col_offset)] = style for row_name, row_style in table.row_styles.items(): try: row_offset = table.get_row_offset(row_name) except KeyError: continue for i, c in enumerate(range(col + table.row_labels_width, col + table.width)): style = row_style if (row + row_offset, c) in ws_styles: style = ws_styles[(row + row_offset, c)] + style ws_styles[(row + row_offset, c)] = style for (row_name, col_name), cell_style in table.cell_styles.items(): try: col_offset = table.get_column_offset(col_name) row_offset = table.get_row_offset(row_name) except KeyError: continue style = cell_style if (row + row_offset, col + col_offset) in ws_styles: style = ws_styles[(row + row_offset, col + col_offset)] + style ws_styles[(row + row_offset, col + col_offset)] = style for (row, col), value in self.__values.items(): if isinstance(value, Value): style = value.style if style: if (row, col) in ws_styles: style = style + ws_styles[(row, col)] ws_styles[(row, col)] = style return ws_styles def to_excel(self, workbook=None, worksheet=None, xl_app=None, clear=True, rename=True, resize_columns=True): """ Writes worksheet to an Excel Worksheet COM object. Requires :py:module:`pywin32` to be installed. :param workbook: xltable.Workbook this sheet belongs to. :param worksheet: Excel COM Worksheet instance to write to. :param xl_app: Excel COM Excel Application to write to. :param bool clear: If a worksheet is provided, clear worksheet before writing. :param bool rename: If a worksheet is provided, rename self to match the worksheet. :param bool resize_columns: Resize sheet columns after writing. """ from win32com.client import Dispatch, constants, gencache if xl_app is None: if worksheet is not None: xl_app = worksheet.Parent.Application elif workbook is not None and hasattr(workbook.workbook_obj, "Application"): xl_app = workbook.workbook_obj.Application else: xl_app = Dispatch("Excel.Application") xl = xl_app = gencache.EnsureDispatch(xl_app) # Create a workbook if there isn't one already if not workbook: from .workbook import Workbook workbook = Workbook(worksheets=[self]) if worksheet is None: # If there's no worksheet then call Workbook.to_excel which will create one return workbook.to_excel(xl_app=xl_app, resize_columns=resize_columns) if rename: self.__name = worksheet.Name # set manual calculation and turn off screen updating while we update the cells calculation = xl.Calculation screen_updating = xl.ScreenUpdating xl.Calculation = constants.xlCalculationManual xl.ScreenUpdating = False try: # clear the worksheet and reset the styles if clear: worksheet.Cells.ClearContents() worksheet.Cells.Font.Bold = False worksheet.Cells.Font.Size = 11 worksheet.Cells.Font.Color = 0x000000 worksheet.Cells.Interior.ColorIndex = 0 worksheet.Cells.NumberFormat = "General" # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False origin = worksheet.Range("A1") xl_cell = origin for r, row in enumerate(self.iterrows(workbook)): row = _to_pywintypes(row) # set the value and formulae to the excel range (it's much quicker to # write a row at a time and update the formula than it is it do it # cell by cell) if clear: xl_row = worksheet.Range(xl_cell, xl_cell.Offset(1, len(row))) xl_row.Value = row else: for c, value in enumerate(row): if value is not None: xl_cell.Offset(1, 1 + c).Value = value for c, value in enumerate(row): if isinstance(value, str): if value.startswith("="): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).Formula = value elif value.startswith("{=") \ and not _is_in_array_formula_table(r, c): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).FormulaArray = value # move to the next row xl_cell = xl_cell.Offset(2, 1) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): data = table.get_data(workbook, row, col) height, width = data.shape upper_left = origin.Offset(row+1, col+1) lower_right = origin.Offset(row + height, col + width) xl_range = worksheet.Range(upper_left, lower_right) xl_range.FormulaArray = table.formula.get_formula(workbook, row, col) # set any formatting for (row, col), style in self._get_all_styles().items(): r = origin.Offset(1 + row, 1 + col) if style.bold: r.Font.Bold = True if style.excel_number_format is not None: r.NumberFormat = style.excel_number_format if style.size is not None: r.Font.Size = style.size if style.text_color is not None: r.Font.Color = _to_bgr(style.text_color) if style.bg_color is not None: r.Interior.Color = _to_bgr(style.bg_color) if style.text_wrap or style.border: raise Exception("text wrap and border not implemented") # add any charts for chart, (row, col) in self.__charts: top_left = origin.Offset(1 + row, 1 + col) xl_chart = worksheet.ChartObjects().Add(top_left.Left, top_left.Top, 360, 220).Chart xl_chart.ChartType = _to_excel_chart_type(chart.type, chart.subtype) if chart.title: xl_chart.ChartTitle = chart.title for series in chart.iter_series(self, row, col): xl_series = xl_chart.SeriesCollection().NewSeries() xl_series.Values = "=%s!%s" % (self.name, series["values"].lstrip("=")) if "categories" in series: xl_series.XValues = "=%s!%s" % (self.name, series["categories"].lstrip("=")) if "name" in series: xl_series.Name = series["name"] finally: xl.ScreenUpdating = screen_updating xl.Calculation = calculation if resize_columns: try: worksheet.Cells.EntireColumn.AutoFit() except: pass def to_xlsx(self, filename=None, workbook=None): """ Write worksheet to a .xlsx file using xlsxwriter. :param str filename: Filename to write to. If None no file is written. :param xltable.Workbook: Workbook this sheet belongs to. If None a new workbook will be created with this worksheet as the only sheet. :return: :py:class:`xlsxwriter.workbook.Workbook` instance. """ from .workbook import Workbook if not workbook: workbook = Workbook(filename=filename) workbook.append(self) return workbook.to_xlsx() ws = workbook.add_xlsx_worksheet(self, self.name) _styles = {} def _get_xlsx_style(cell_style): """ convert rb.excel style to xlsx writer style """ style_args = ( cell_style.bold, cell_style.excel_number_format, cell_style.text_color, cell_style.bg_color, cell_style.size, cell_style.text_wrap, cell_style.text_wrap, cell_style.border, cell_style.align, cell_style.valign ) if (style_args) not in _styles: style = workbook.add_format() if cell_style.bold: style.set_bold() if cell_style.excel_number_format is not None: style.set_num_format(cell_style.excel_number_format) if cell_style.text_color is not None: style.set_font_color("#%06x" % cell_style.text_color) if cell_style.bg_color is not None: style.set_bg_color("#%06x" % cell_style.bg_color) if cell_style.size is not None: style.set_font_size(cell_style.size) if cell_style.text_wrap: style.set_text_wrap() if cell_style.border: if isinstance(cell_style.border, frozenset): for border_position, border_style in cell_style.border: if border_position == "bottom": style.set_bottom(border_style) elif border_position == "top": style.set_top(border_style) elif border_position == "left": style.set_left(border_style) elif border_position == "right": style.set_right(border_style) else: raise AssertionError("Unknown border position '%s'." % border_position) else: style.set_border(cell_style.border) if cell_style.align: style.set_align(cell_style.align) if cell_style.valign: style.set_valign(cell_style.valign) _styles[style_args] = style return _styles[style_args] # pre-compute the cells with non-default styles ws_styles = self._get_all_styles() ws_styles = {(r, c): _get_xlsx_style(s) for ((r, c), s) in ws_styles.items()} plain_style = _get_xlsx_style(CellStyle()) # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False # write the rows to the worksheet for ir, row in enumerate(self.iterrows(workbook)): for ic, cell in enumerate(row): style = ws_styles.get((ir, ic), plain_style) if isinstance(cell, str): if cell.startswith("="): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_formula(ir, ic, cell, style, value=formula_value) elif cell.startswith("{="): # array formulas tables are written after everything else, # but individual cells can also be array formulas if not _is_in_array_formula_table(ir, ic): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_array_formula(ir, ic, ir, ic, cell, style, value=formula_value) else: ws.write(ir, ic, cell, style) else: if isinstance(cell, self._xlsx_unsupported_types): ws.write(ir, ic, str(cell), style) else: try: ws.write(ir, ic, cell, style) except TypeError: ws.write(ir, ic, str(cell), style) unsupported_types = set(self._xlsx_unsupported_types) unsupported_types.add(type(cell)) self.__class__._xlsx_unsupported_types = tuple(unsupported_types) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): style = ws_styles.get((row, col), plain_style) data = table.get_data(workbook, row, col) height, width = data.shape bottom, right = (row + height - 1, col + width -1) formula = table.formula.get_formula(workbook, row, col) ws.write_array_formula(row, col, bottom, right, formula, style, value=data[0][0]) for y in range(height): for x in range(width): if y == 0 and x == 0: continue ir, ic = row + y, col + x style = ws_styles.get((ir, ic), plain_style) cell = data[y][x] if isinstance(cell, str): cell_str = cell.encode("ascii", "xmlcharrefreplace").decode("ascii") ws.write_formula(ir, ic, cell_str, style) else: ws.write(ir, ic, cell, style) # set any non-default column widths for ic, width in self._get_column_widths().items(): ws.set_column(ic, ic, width) # add any charts for chart, (row, col) in self.__charts: kwargs = {"type": chart.type} if chart.subtype: kwargs["subtype"] = chart.subtype xl_chart = workbook.workbook_obj.add_chart(kwargs) if chart.show_blanks: xl_chart.show_blanks_as(chart.show_blanks) for series in chart.iter_series(workbook, row, col): # xlsxwriter expects the sheetname in the formula values = series.get("values") if isinstance(values, str) and values.startswith("=") and "!" not in values: series["values"] = "='%s'!%s" % (self.name, values.lstrip("=")) categories = series.get("categories") if isinstance(categories, str) and categories.startswith("=") and "!" not in categories: series["categories"] = "='%s'!%s" % (self.name, categories.lstrip("=")) xl_chart.add_series(series) xl_chart.set_size({"width": chart.width, "height": chart.height}) if chart.title: xl_chart.set_title({"name": chart.title}) if chart.legend_position: xl_chart.set_legend({"position": chart.legend_position}) if chart.x_axis: xl_chart.set_x_axis(chart.x_axis) if chart.y_axis: xl_chart.set_y_axis(chart.y_axis) ws.insert_chart(row, col, xl_chart) # add any groups for tables, collapsed in self.__groups: min_row, max_row = 1000000, -1 for table, (row, col) in self.__tables.values(): if table in tables: min_row = min(min_row, row) max_row = max(max_row, row + table.height) for i in range(min_row, max_row+1): ws.set_row(i, None, None, {'level': 1, 'hidden': collapsed}) if filename: workbook.close() return workbook
fkarb/xltable
xltable/worksheet.py
Worksheet.get_table_pos
python
def get_table_pos(self, tablename): _table, (row, col) = self.__tables[tablename] return (row, col)
:param str tablename: Name of table to get position of. :return: Upper left (row, col) coordinate of the named table.
train
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/worksheet.py#L99-L105
null
class Worksheet(object): """ A worksheet is a collection of tables placed at specific locations. Once all tables have been placed the worksheet can be written out or the rows can be iterated over, and any expressions present in the tables will be resolved to absolute cell references. :param str name: Worksheet name. """ _xlsx_unsupported_types = tuple() def __init__(self, name="Sheet1"): self.__name = name self.__tables = {} self.__values = {} self.__charts = [] self.__next_row = 0 self.__groups = [] @property def name(self): """Worksheet name""" return self.__name def add_table(self, table, row=None, col=0, row_spaces=1): """ Adds a table to the worksheet at (row, col). Return the (row, col) where the table has been put. :param xltable.Table table: Table to add to the worksheet. :param int row: Row to start the table at (defaults to the next free row). :param int col: Column to start the table at. :param int row_spaces: Number of rows to leave between this table and the next. """ name = table.name assert name is not None, "Tables must have a name" assert name not in self.__tables, "Table %s already exists in this worksheet" % name if row is None: row = self.__next_row self.__next_row = max(row + table.height + row_spaces, self.__next_row) self.__tables[name] = (table, (row, col)) return row, col def add_value(self, value, row, col): """ Adds a single value (cell) to a worksheet at (row, col). Return the (row, col) where the value has been put. :param value: Value to write to the sheet. :param row: Row where the value should be written. :param col: Column where the value should be written. """ self.__values[(row, col)] = value def add_chart(self, chart, row, col): """ Adds a chart to the worksheet at (row, col). :param xltable.Chart Chart: chart to add to the workbook. :param int row: Row to add the chart at. """ self.__charts.append((chart, (row, col))) def add_row_group(self, tables, collapsed=True): """ Adds a group over all the given tables (will include any rows between the first row over all tables, and the last row over all tables) Initially collapsed if collapsed is True (True by default) """ self.__groups.append((tables, collapsed)) @property def next_row(self): """Row the next table will start at unless another row is specified.""" return self.__next_row @next_row.setter def next_row(self, value): self.__next_row = value def get_table(self, tablename): """ :param str tablename: Name of table to find. :return: A :py:class:`xltable.Table` instance from the table name. """ table, (_row, _col) = self.__tables[tablename] return table def iterrows(self, workbook=None): """ Yield rows as lists of data. The data is exactly as it is in the source pandas DataFrames and any formulas are not resolved. """ resolved_tables = [] max_height = 0 max_width = 0 # while yielding rows __formula_values is updated with any formula values set on Expressions self.__formula_values = {} for name, (table, (row, col)) in list(self.__tables.items()): # get the resolved 2d data array from the table # # expressions with no explicit table will use None when calling # get_table/get_table_pos, which should return the current table. # self.__tables[None] = (table, (row, col)) data = table.get_data(workbook, row, col, self.__formula_values) del self.__tables[None] height, width = data.shape upper_left = (row, col) lower_right = (row + height - 1, col + width - 1) max_height = max(max_height, lower_right[0] + 1) max_width = max(max_width, lower_right[1] + 1) resolved_tables.append((name, data, upper_left, lower_right)) for row, col in self.__values.keys(): max_width = max(max_width, row+1) max_height = max(max_height, col+1) # Build the whole table up-front. Doing it row by row is too slow. table = [[None] * max_width for i in range(max_height)] for name, data, upper_left, lower_right in resolved_tables: for i, r in enumerate(range(upper_left[0], lower_right[0]+1)): for j, c in enumerate(range(upper_left[1], lower_right[1]+1)): table[r][c] = data[i][j] for (r, c), value in self.__values.items(): if isinstance(value, Value): value = value.value if isinstance(value, Expression): if value.has_value: self.__formula_values[(r, c)] = value.value value = value.get_formula(workbook, r, c) table[r][c] = value for row in table: yield row def to_csv(self, writer): """ Writes worksheet to a csv.writer object. :param writer: csv writer instance. """ for row in self.iterrows(): writer.writerow(row) def _get_column_widths(self): """return a dictionary of {col -> width}""" col_widths = {} for table, (row, col) in self.__tables.values(): for colname, width in table.column_widths.items(): ic = col + table.get_column_offset(colname) current_width = col_widths.setdefault(ic, width) col_widths[ic] = max(width, current_width) return col_widths def _get_all_styles(self): """ return a dictionary of {(row, col) -> CellStyle} for all cells that use a non-default style. """ _styles = {} def _get_style(bold=False, bg_col=None, border=None): if (bold, bg_col, border) not in _styles: _styles[(bold, bg_col, border)] = CellStyle(bold=bold, bg_color=bg_col, border=border) return _styles[(bold, bg_col, border)] ws_styles = {} for table, (row, col) in self.__tables.values(): for r in range(row, row + table.header_height): for c in range(col, col + table.width): if isinstance(table.header_style, dict): col_name = table.dataframe.columns[c - col] style = table.header_style.get(col_name, _get_style(bold=True)) else: style = table.header_style or _get_style(bold=True) ws_styles[(r, c)] = style for c in range(col, col + table.row_labels_width): for r in range(row + table.header_height, row + table.height): if isinstance(table.index_style, dict): row_name = table.dataframe.index[r - row] style = table.index_style.get(row_name, _get_style(bold=True)) else: style = table.index_style or _get_style(bold=True) ws_styles[(r, c)] = style if table.style.stripe_colors or table.style.border: num_bg_cols = len(table.style.stripe_colors) if \ table.style.stripe_colors else 1 bg_cols = table.style.stripe_colors if \ table.style.stripe_colors else None for i, row_offset in enumerate(range(table.header_height, table.height)): for c in range(col, col + table.width): bg_col = bg_cols[i % num_bg_cols] if bg_cols else None style = _get_style(bold=None, bg_col=bg_col, border=table.style.border) if (row + row_offset, c) in ws_styles: style = style + ws_styles[(row + row_offset, c)] ws_styles[(row + row_offset, c)] = style for col_name, col_style in table.column_styles.items(): try: col_offset = table.get_column_offset(col_name) except KeyError: continue for i, r in enumerate(range(row + table.header_height, row + table.height)): style = col_style if (r, col + col_offset) in ws_styles: style = ws_styles[(r, col + col_offset)] + style ws_styles[(r, col + col_offset)] = style for row_name, row_style in table.row_styles.items(): try: row_offset = table.get_row_offset(row_name) except KeyError: continue for i, c in enumerate(range(col + table.row_labels_width, col + table.width)): style = row_style if (row + row_offset, c) in ws_styles: style = ws_styles[(row + row_offset, c)] + style ws_styles[(row + row_offset, c)] = style for (row_name, col_name), cell_style in table.cell_styles.items(): try: col_offset = table.get_column_offset(col_name) row_offset = table.get_row_offset(row_name) except KeyError: continue style = cell_style if (row + row_offset, col + col_offset) in ws_styles: style = ws_styles[(row + row_offset, col + col_offset)] + style ws_styles[(row + row_offset, col + col_offset)] = style for (row, col), value in self.__values.items(): if isinstance(value, Value): style = value.style if style: if (row, col) in ws_styles: style = style + ws_styles[(row, col)] ws_styles[(row, col)] = style return ws_styles def to_excel(self, workbook=None, worksheet=None, xl_app=None, clear=True, rename=True, resize_columns=True): """ Writes worksheet to an Excel Worksheet COM object. Requires :py:module:`pywin32` to be installed. :param workbook: xltable.Workbook this sheet belongs to. :param worksheet: Excel COM Worksheet instance to write to. :param xl_app: Excel COM Excel Application to write to. :param bool clear: If a worksheet is provided, clear worksheet before writing. :param bool rename: If a worksheet is provided, rename self to match the worksheet. :param bool resize_columns: Resize sheet columns after writing. """ from win32com.client import Dispatch, constants, gencache if xl_app is None: if worksheet is not None: xl_app = worksheet.Parent.Application elif workbook is not None and hasattr(workbook.workbook_obj, "Application"): xl_app = workbook.workbook_obj.Application else: xl_app = Dispatch("Excel.Application") xl = xl_app = gencache.EnsureDispatch(xl_app) # Create a workbook if there isn't one already if not workbook: from .workbook import Workbook workbook = Workbook(worksheets=[self]) if worksheet is None: # If there's no worksheet then call Workbook.to_excel which will create one return workbook.to_excel(xl_app=xl_app, resize_columns=resize_columns) if rename: self.__name = worksheet.Name # set manual calculation and turn off screen updating while we update the cells calculation = xl.Calculation screen_updating = xl.ScreenUpdating xl.Calculation = constants.xlCalculationManual xl.ScreenUpdating = False try: # clear the worksheet and reset the styles if clear: worksheet.Cells.ClearContents() worksheet.Cells.Font.Bold = False worksheet.Cells.Font.Size = 11 worksheet.Cells.Font.Color = 0x000000 worksheet.Cells.Interior.ColorIndex = 0 worksheet.Cells.NumberFormat = "General" # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False origin = worksheet.Range("A1") xl_cell = origin for r, row in enumerate(self.iterrows(workbook)): row = _to_pywintypes(row) # set the value and formulae to the excel range (it's much quicker to # write a row at a time and update the formula than it is it do it # cell by cell) if clear: xl_row = worksheet.Range(xl_cell, xl_cell.Offset(1, len(row))) xl_row.Value = row else: for c, value in enumerate(row): if value is not None: xl_cell.Offset(1, 1 + c).Value = value for c, value in enumerate(row): if isinstance(value, str): if value.startswith("="): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).Formula = value elif value.startswith("{=") \ and not _is_in_array_formula_table(r, c): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).FormulaArray = value # move to the next row xl_cell = xl_cell.Offset(2, 1) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): data = table.get_data(workbook, row, col) height, width = data.shape upper_left = origin.Offset(row+1, col+1) lower_right = origin.Offset(row + height, col + width) xl_range = worksheet.Range(upper_left, lower_right) xl_range.FormulaArray = table.formula.get_formula(workbook, row, col) # set any formatting for (row, col), style in self._get_all_styles().items(): r = origin.Offset(1 + row, 1 + col) if style.bold: r.Font.Bold = True if style.excel_number_format is not None: r.NumberFormat = style.excel_number_format if style.size is not None: r.Font.Size = style.size if style.text_color is not None: r.Font.Color = _to_bgr(style.text_color) if style.bg_color is not None: r.Interior.Color = _to_bgr(style.bg_color) if style.text_wrap or style.border: raise Exception("text wrap and border not implemented") # add any charts for chart, (row, col) in self.__charts: top_left = origin.Offset(1 + row, 1 + col) xl_chart = worksheet.ChartObjects().Add(top_left.Left, top_left.Top, 360, 220).Chart xl_chart.ChartType = _to_excel_chart_type(chart.type, chart.subtype) if chart.title: xl_chart.ChartTitle = chart.title for series in chart.iter_series(self, row, col): xl_series = xl_chart.SeriesCollection().NewSeries() xl_series.Values = "=%s!%s" % (self.name, series["values"].lstrip("=")) if "categories" in series: xl_series.XValues = "=%s!%s" % (self.name, series["categories"].lstrip("=")) if "name" in series: xl_series.Name = series["name"] finally: xl.ScreenUpdating = screen_updating xl.Calculation = calculation if resize_columns: try: worksheet.Cells.EntireColumn.AutoFit() except: pass def to_xlsx(self, filename=None, workbook=None): """ Write worksheet to a .xlsx file using xlsxwriter. :param str filename: Filename to write to. If None no file is written. :param xltable.Workbook: Workbook this sheet belongs to. If None a new workbook will be created with this worksheet as the only sheet. :return: :py:class:`xlsxwriter.workbook.Workbook` instance. """ from .workbook import Workbook if not workbook: workbook = Workbook(filename=filename) workbook.append(self) return workbook.to_xlsx() ws = workbook.add_xlsx_worksheet(self, self.name) _styles = {} def _get_xlsx_style(cell_style): """ convert rb.excel style to xlsx writer style """ style_args = ( cell_style.bold, cell_style.excel_number_format, cell_style.text_color, cell_style.bg_color, cell_style.size, cell_style.text_wrap, cell_style.text_wrap, cell_style.border, cell_style.align, cell_style.valign ) if (style_args) not in _styles: style = workbook.add_format() if cell_style.bold: style.set_bold() if cell_style.excel_number_format is not None: style.set_num_format(cell_style.excel_number_format) if cell_style.text_color is not None: style.set_font_color("#%06x" % cell_style.text_color) if cell_style.bg_color is not None: style.set_bg_color("#%06x" % cell_style.bg_color) if cell_style.size is not None: style.set_font_size(cell_style.size) if cell_style.text_wrap: style.set_text_wrap() if cell_style.border: if isinstance(cell_style.border, frozenset): for border_position, border_style in cell_style.border: if border_position == "bottom": style.set_bottom(border_style) elif border_position == "top": style.set_top(border_style) elif border_position == "left": style.set_left(border_style) elif border_position == "right": style.set_right(border_style) else: raise AssertionError("Unknown border position '%s'." % border_position) else: style.set_border(cell_style.border) if cell_style.align: style.set_align(cell_style.align) if cell_style.valign: style.set_valign(cell_style.valign) _styles[style_args] = style return _styles[style_args] # pre-compute the cells with non-default styles ws_styles = self._get_all_styles() ws_styles = {(r, c): _get_xlsx_style(s) for ((r, c), s) in ws_styles.items()} plain_style = _get_xlsx_style(CellStyle()) # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False # write the rows to the worksheet for ir, row in enumerate(self.iterrows(workbook)): for ic, cell in enumerate(row): style = ws_styles.get((ir, ic), plain_style) if isinstance(cell, str): if cell.startswith("="): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_formula(ir, ic, cell, style, value=formula_value) elif cell.startswith("{="): # array formulas tables are written after everything else, # but individual cells can also be array formulas if not _is_in_array_formula_table(ir, ic): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_array_formula(ir, ic, ir, ic, cell, style, value=formula_value) else: ws.write(ir, ic, cell, style) else: if isinstance(cell, self._xlsx_unsupported_types): ws.write(ir, ic, str(cell), style) else: try: ws.write(ir, ic, cell, style) except TypeError: ws.write(ir, ic, str(cell), style) unsupported_types = set(self._xlsx_unsupported_types) unsupported_types.add(type(cell)) self.__class__._xlsx_unsupported_types = tuple(unsupported_types) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): style = ws_styles.get((row, col), plain_style) data = table.get_data(workbook, row, col) height, width = data.shape bottom, right = (row + height - 1, col + width -1) formula = table.formula.get_formula(workbook, row, col) ws.write_array_formula(row, col, bottom, right, formula, style, value=data[0][0]) for y in range(height): for x in range(width): if y == 0 and x == 0: continue ir, ic = row + y, col + x style = ws_styles.get((ir, ic), plain_style) cell = data[y][x] if isinstance(cell, str): cell_str = cell.encode("ascii", "xmlcharrefreplace").decode("ascii") ws.write_formula(ir, ic, cell_str, style) else: ws.write(ir, ic, cell, style) # set any non-default column widths for ic, width in self._get_column_widths().items(): ws.set_column(ic, ic, width) # add any charts for chart, (row, col) in self.__charts: kwargs = {"type": chart.type} if chart.subtype: kwargs["subtype"] = chart.subtype xl_chart = workbook.workbook_obj.add_chart(kwargs) if chart.show_blanks: xl_chart.show_blanks_as(chart.show_blanks) for series in chart.iter_series(workbook, row, col): # xlsxwriter expects the sheetname in the formula values = series.get("values") if isinstance(values, str) and values.startswith("=") and "!" not in values: series["values"] = "='%s'!%s" % (self.name, values.lstrip("=")) categories = series.get("categories") if isinstance(categories, str) and categories.startswith("=") and "!" not in categories: series["categories"] = "='%s'!%s" % (self.name, categories.lstrip("=")) xl_chart.add_series(series) xl_chart.set_size({"width": chart.width, "height": chart.height}) if chart.title: xl_chart.set_title({"name": chart.title}) if chart.legend_position: xl_chart.set_legend({"position": chart.legend_position}) if chart.x_axis: xl_chart.set_x_axis(chart.x_axis) if chart.y_axis: xl_chart.set_y_axis(chart.y_axis) ws.insert_chart(row, col, xl_chart) # add any groups for tables, collapsed in self.__groups: min_row, max_row = 1000000, -1 for table, (row, col) in self.__tables.values(): if table in tables: min_row = min(min_row, row) max_row = max(max_row, row + table.height) for i in range(min_row, max_row+1): ws.set_row(i, None, None, {'level': 1, 'hidden': collapsed}) if filename: workbook.close() return workbook
fkarb/xltable
xltable/worksheet.py
Worksheet.get_table
python
def get_table(self, tablename): table, (_row, _col) = self.__tables[tablename] return table
:param str tablename: Name of table to find. :return: A :py:class:`xltable.Table` instance from the table name.
train
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/worksheet.py#L107-L113
null
class Worksheet(object): """ A worksheet is a collection of tables placed at specific locations. Once all tables have been placed the worksheet can be written out or the rows can be iterated over, and any expressions present in the tables will be resolved to absolute cell references. :param str name: Worksheet name. """ _xlsx_unsupported_types = tuple() def __init__(self, name="Sheet1"): self.__name = name self.__tables = {} self.__values = {} self.__charts = [] self.__next_row = 0 self.__groups = [] @property def name(self): """Worksheet name""" return self.__name def add_table(self, table, row=None, col=0, row_spaces=1): """ Adds a table to the worksheet at (row, col). Return the (row, col) where the table has been put. :param xltable.Table table: Table to add to the worksheet. :param int row: Row to start the table at (defaults to the next free row). :param int col: Column to start the table at. :param int row_spaces: Number of rows to leave between this table and the next. """ name = table.name assert name is not None, "Tables must have a name" assert name not in self.__tables, "Table %s already exists in this worksheet" % name if row is None: row = self.__next_row self.__next_row = max(row + table.height + row_spaces, self.__next_row) self.__tables[name] = (table, (row, col)) return row, col def add_value(self, value, row, col): """ Adds a single value (cell) to a worksheet at (row, col). Return the (row, col) where the value has been put. :param value: Value to write to the sheet. :param row: Row where the value should be written. :param col: Column where the value should be written. """ self.__values[(row, col)] = value def add_chart(self, chart, row, col): """ Adds a chart to the worksheet at (row, col). :param xltable.Chart Chart: chart to add to the workbook. :param int row: Row to add the chart at. """ self.__charts.append((chart, (row, col))) def add_row_group(self, tables, collapsed=True): """ Adds a group over all the given tables (will include any rows between the first row over all tables, and the last row over all tables) Initially collapsed if collapsed is True (True by default) """ self.__groups.append((tables, collapsed)) @property def next_row(self): """Row the next table will start at unless another row is specified.""" return self.__next_row @next_row.setter def next_row(self, value): self.__next_row = value def get_table_pos(self, tablename): """ :param str tablename: Name of table to get position of. :return: Upper left (row, col) coordinate of the named table. """ _table, (row, col) = self.__tables[tablename] return (row, col) def iterrows(self, workbook=None): """ Yield rows as lists of data. The data is exactly as it is in the source pandas DataFrames and any formulas are not resolved. """ resolved_tables = [] max_height = 0 max_width = 0 # while yielding rows __formula_values is updated with any formula values set on Expressions self.__formula_values = {} for name, (table, (row, col)) in list(self.__tables.items()): # get the resolved 2d data array from the table # # expressions with no explicit table will use None when calling # get_table/get_table_pos, which should return the current table. # self.__tables[None] = (table, (row, col)) data = table.get_data(workbook, row, col, self.__formula_values) del self.__tables[None] height, width = data.shape upper_left = (row, col) lower_right = (row + height - 1, col + width - 1) max_height = max(max_height, lower_right[0] + 1) max_width = max(max_width, lower_right[1] + 1) resolved_tables.append((name, data, upper_left, lower_right)) for row, col in self.__values.keys(): max_width = max(max_width, row+1) max_height = max(max_height, col+1) # Build the whole table up-front. Doing it row by row is too slow. table = [[None] * max_width for i in range(max_height)] for name, data, upper_left, lower_right in resolved_tables: for i, r in enumerate(range(upper_left[0], lower_right[0]+1)): for j, c in enumerate(range(upper_left[1], lower_right[1]+1)): table[r][c] = data[i][j] for (r, c), value in self.__values.items(): if isinstance(value, Value): value = value.value if isinstance(value, Expression): if value.has_value: self.__formula_values[(r, c)] = value.value value = value.get_formula(workbook, r, c) table[r][c] = value for row in table: yield row def to_csv(self, writer): """ Writes worksheet to a csv.writer object. :param writer: csv writer instance. """ for row in self.iterrows(): writer.writerow(row) def _get_column_widths(self): """return a dictionary of {col -> width}""" col_widths = {} for table, (row, col) in self.__tables.values(): for colname, width in table.column_widths.items(): ic = col + table.get_column_offset(colname) current_width = col_widths.setdefault(ic, width) col_widths[ic] = max(width, current_width) return col_widths def _get_all_styles(self): """ return a dictionary of {(row, col) -> CellStyle} for all cells that use a non-default style. """ _styles = {} def _get_style(bold=False, bg_col=None, border=None): if (bold, bg_col, border) not in _styles: _styles[(bold, bg_col, border)] = CellStyle(bold=bold, bg_color=bg_col, border=border) return _styles[(bold, bg_col, border)] ws_styles = {} for table, (row, col) in self.__tables.values(): for r in range(row, row + table.header_height): for c in range(col, col + table.width): if isinstance(table.header_style, dict): col_name = table.dataframe.columns[c - col] style = table.header_style.get(col_name, _get_style(bold=True)) else: style = table.header_style or _get_style(bold=True) ws_styles[(r, c)] = style for c in range(col, col + table.row_labels_width): for r in range(row + table.header_height, row + table.height): if isinstance(table.index_style, dict): row_name = table.dataframe.index[r - row] style = table.index_style.get(row_name, _get_style(bold=True)) else: style = table.index_style or _get_style(bold=True) ws_styles[(r, c)] = style if table.style.stripe_colors or table.style.border: num_bg_cols = len(table.style.stripe_colors) if \ table.style.stripe_colors else 1 bg_cols = table.style.stripe_colors if \ table.style.stripe_colors else None for i, row_offset in enumerate(range(table.header_height, table.height)): for c in range(col, col + table.width): bg_col = bg_cols[i % num_bg_cols] if bg_cols else None style = _get_style(bold=None, bg_col=bg_col, border=table.style.border) if (row + row_offset, c) in ws_styles: style = style + ws_styles[(row + row_offset, c)] ws_styles[(row + row_offset, c)] = style for col_name, col_style in table.column_styles.items(): try: col_offset = table.get_column_offset(col_name) except KeyError: continue for i, r in enumerate(range(row + table.header_height, row + table.height)): style = col_style if (r, col + col_offset) in ws_styles: style = ws_styles[(r, col + col_offset)] + style ws_styles[(r, col + col_offset)] = style for row_name, row_style in table.row_styles.items(): try: row_offset = table.get_row_offset(row_name) except KeyError: continue for i, c in enumerate(range(col + table.row_labels_width, col + table.width)): style = row_style if (row + row_offset, c) in ws_styles: style = ws_styles[(row + row_offset, c)] + style ws_styles[(row + row_offset, c)] = style for (row_name, col_name), cell_style in table.cell_styles.items(): try: col_offset = table.get_column_offset(col_name) row_offset = table.get_row_offset(row_name) except KeyError: continue style = cell_style if (row + row_offset, col + col_offset) in ws_styles: style = ws_styles[(row + row_offset, col + col_offset)] + style ws_styles[(row + row_offset, col + col_offset)] = style for (row, col), value in self.__values.items(): if isinstance(value, Value): style = value.style if style: if (row, col) in ws_styles: style = style + ws_styles[(row, col)] ws_styles[(row, col)] = style return ws_styles def to_excel(self, workbook=None, worksheet=None, xl_app=None, clear=True, rename=True, resize_columns=True): """ Writes worksheet to an Excel Worksheet COM object. Requires :py:module:`pywin32` to be installed. :param workbook: xltable.Workbook this sheet belongs to. :param worksheet: Excel COM Worksheet instance to write to. :param xl_app: Excel COM Excel Application to write to. :param bool clear: If a worksheet is provided, clear worksheet before writing. :param bool rename: If a worksheet is provided, rename self to match the worksheet. :param bool resize_columns: Resize sheet columns after writing. """ from win32com.client import Dispatch, constants, gencache if xl_app is None: if worksheet is not None: xl_app = worksheet.Parent.Application elif workbook is not None and hasattr(workbook.workbook_obj, "Application"): xl_app = workbook.workbook_obj.Application else: xl_app = Dispatch("Excel.Application") xl = xl_app = gencache.EnsureDispatch(xl_app) # Create a workbook if there isn't one already if not workbook: from .workbook import Workbook workbook = Workbook(worksheets=[self]) if worksheet is None: # If there's no worksheet then call Workbook.to_excel which will create one return workbook.to_excel(xl_app=xl_app, resize_columns=resize_columns) if rename: self.__name = worksheet.Name # set manual calculation and turn off screen updating while we update the cells calculation = xl.Calculation screen_updating = xl.ScreenUpdating xl.Calculation = constants.xlCalculationManual xl.ScreenUpdating = False try: # clear the worksheet and reset the styles if clear: worksheet.Cells.ClearContents() worksheet.Cells.Font.Bold = False worksheet.Cells.Font.Size = 11 worksheet.Cells.Font.Color = 0x000000 worksheet.Cells.Interior.ColorIndex = 0 worksheet.Cells.NumberFormat = "General" # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False origin = worksheet.Range("A1") xl_cell = origin for r, row in enumerate(self.iterrows(workbook)): row = _to_pywintypes(row) # set the value and formulae to the excel range (it's much quicker to # write a row at a time and update the formula than it is it do it # cell by cell) if clear: xl_row = worksheet.Range(xl_cell, xl_cell.Offset(1, len(row))) xl_row.Value = row else: for c, value in enumerate(row): if value is not None: xl_cell.Offset(1, 1 + c).Value = value for c, value in enumerate(row): if isinstance(value, str): if value.startswith("="): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).Formula = value elif value.startswith("{=") \ and not _is_in_array_formula_table(r, c): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).FormulaArray = value # move to the next row xl_cell = xl_cell.Offset(2, 1) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): data = table.get_data(workbook, row, col) height, width = data.shape upper_left = origin.Offset(row+1, col+1) lower_right = origin.Offset(row + height, col + width) xl_range = worksheet.Range(upper_left, lower_right) xl_range.FormulaArray = table.formula.get_formula(workbook, row, col) # set any formatting for (row, col), style in self._get_all_styles().items(): r = origin.Offset(1 + row, 1 + col) if style.bold: r.Font.Bold = True if style.excel_number_format is not None: r.NumberFormat = style.excel_number_format if style.size is not None: r.Font.Size = style.size if style.text_color is not None: r.Font.Color = _to_bgr(style.text_color) if style.bg_color is not None: r.Interior.Color = _to_bgr(style.bg_color) if style.text_wrap or style.border: raise Exception("text wrap and border not implemented") # add any charts for chart, (row, col) in self.__charts: top_left = origin.Offset(1 + row, 1 + col) xl_chart = worksheet.ChartObjects().Add(top_left.Left, top_left.Top, 360, 220).Chart xl_chart.ChartType = _to_excel_chart_type(chart.type, chart.subtype) if chart.title: xl_chart.ChartTitle = chart.title for series in chart.iter_series(self, row, col): xl_series = xl_chart.SeriesCollection().NewSeries() xl_series.Values = "=%s!%s" % (self.name, series["values"].lstrip("=")) if "categories" in series: xl_series.XValues = "=%s!%s" % (self.name, series["categories"].lstrip("=")) if "name" in series: xl_series.Name = series["name"] finally: xl.ScreenUpdating = screen_updating xl.Calculation = calculation if resize_columns: try: worksheet.Cells.EntireColumn.AutoFit() except: pass def to_xlsx(self, filename=None, workbook=None): """ Write worksheet to a .xlsx file using xlsxwriter. :param str filename: Filename to write to. If None no file is written. :param xltable.Workbook: Workbook this sheet belongs to. If None a new workbook will be created with this worksheet as the only sheet. :return: :py:class:`xlsxwriter.workbook.Workbook` instance. """ from .workbook import Workbook if not workbook: workbook = Workbook(filename=filename) workbook.append(self) return workbook.to_xlsx() ws = workbook.add_xlsx_worksheet(self, self.name) _styles = {} def _get_xlsx_style(cell_style): """ convert rb.excel style to xlsx writer style """ style_args = ( cell_style.bold, cell_style.excel_number_format, cell_style.text_color, cell_style.bg_color, cell_style.size, cell_style.text_wrap, cell_style.text_wrap, cell_style.border, cell_style.align, cell_style.valign ) if (style_args) not in _styles: style = workbook.add_format() if cell_style.bold: style.set_bold() if cell_style.excel_number_format is not None: style.set_num_format(cell_style.excel_number_format) if cell_style.text_color is not None: style.set_font_color("#%06x" % cell_style.text_color) if cell_style.bg_color is not None: style.set_bg_color("#%06x" % cell_style.bg_color) if cell_style.size is not None: style.set_font_size(cell_style.size) if cell_style.text_wrap: style.set_text_wrap() if cell_style.border: if isinstance(cell_style.border, frozenset): for border_position, border_style in cell_style.border: if border_position == "bottom": style.set_bottom(border_style) elif border_position == "top": style.set_top(border_style) elif border_position == "left": style.set_left(border_style) elif border_position == "right": style.set_right(border_style) else: raise AssertionError("Unknown border position '%s'." % border_position) else: style.set_border(cell_style.border) if cell_style.align: style.set_align(cell_style.align) if cell_style.valign: style.set_valign(cell_style.valign) _styles[style_args] = style return _styles[style_args] # pre-compute the cells with non-default styles ws_styles = self._get_all_styles() ws_styles = {(r, c): _get_xlsx_style(s) for ((r, c), s) in ws_styles.items()} plain_style = _get_xlsx_style(CellStyle()) # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False # write the rows to the worksheet for ir, row in enumerate(self.iterrows(workbook)): for ic, cell in enumerate(row): style = ws_styles.get((ir, ic), plain_style) if isinstance(cell, str): if cell.startswith("="): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_formula(ir, ic, cell, style, value=formula_value) elif cell.startswith("{="): # array formulas tables are written after everything else, # but individual cells can also be array formulas if not _is_in_array_formula_table(ir, ic): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_array_formula(ir, ic, ir, ic, cell, style, value=formula_value) else: ws.write(ir, ic, cell, style) else: if isinstance(cell, self._xlsx_unsupported_types): ws.write(ir, ic, str(cell), style) else: try: ws.write(ir, ic, cell, style) except TypeError: ws.write(ir, ic, str(cell), style) unsupported_types = set(self._xlsx_unsupported_types) unsupported_types.add(type(cell)) self.__class__._xlsx_unsupported_types = tuple(unsupported_types) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): style = ws_styles.get((row, col), plain_style) data = table.get_data(workbook, row, col) height, width = data.shape bottom, right = (row + height - 1, col + width -1) formula = table.formula.get_formula(workbook, row, col) ws.write_array_formula(row, col, bottom, right, formula, style, value=data[0][0]) for y in range(height): for x in range(width): if y == 0 and x == 0: continue ir, ic = row + y, col + x style = ws_styles.get((ir, ic), plain_style) cell = data[y][x] if isinstance(cell, str): cell_str = cell.encode("ascii", "xmlcharrefreplace").decode("ascii") ws.write_formula(ir, ic, cell_str, style) else: ws.write(ir, ic, cell, style) # set any non-default column widths for ic, width in self._get_column_widths().items(): ws.set_column(ic, ic, width) # add any charts for chart, (row, col) in self.__charts: kwargs = {"type": chart.type} if chart.subtype: kwargs["subtype"] = chart.subtype xl_chart = workbook.workbook_obj.add_chart(kwargs) if chart.show_blanks: xl_chart.show_blanks_as(chart.show_blanks) for series in chart.iter_series(workbook, row, col): # xlsxwriter expects the sheetname in the formula values = series.get("values") if isinstance(values, str) and values.startswith("=") and "!" not in values: series["values"] = "='%s'!%s" % (self.name, values.lstrip("=")) categories = series.get("categories") if isinstance(categories, str) and categories.startswith("=") and "!" not in categories: series["categories"] = "='%s'!%s" % (self.name, categories.lstrip("=")) xl_chart.add_series(series) xl_chart.set_size({"width": chart.width, "height": chart.height}) if chart.title: xl_chart.set_title({"name": chart.title}) if chart.legend_position: xl_chart.set_legend({"position": chart.legend_position}) if chart.x_axis: xl_chart.set_x_axis(chart.x_axis) if chart.y_axis: xl_chart.set_y_axis(chart.y_axis) ws.insert_chart(row, col, xl_chart) # add any groups for tables, collapsed in self.__groups: min_row, max_row = 1000000, -1 for table, (row, col) in self.__tables.values(): if table in tables: min_row = min(min_row, row) max_row = max(max_row, row + table.height) for i in range(min_row, max_row+1): ws.set_row(i, None, None, {'level': 1, 'hidden': collapsed}) if filename: workbook.close() return workbook
fkarb/xltable
xltable/worksheet.py
Worksheet.iterrows
python
def iterrows(self, workbook=None): resolved_tables = [] max_height = 0 max_width = 0 # while yielding rows __formula_values is updated with any formula values set on Expressions self.__formula_values = {} for name, (table, (row, col)) in list(self.__tables.items()): # get the resolved 2d data array from the table # # expressions with no explicit table will use None when calling # get_table/get_table_pos, which should return the current table. # self.__tables[None] = (table, (row, col)) data = table.get_data(workbook, row, col, self.__formula_values) del self.__tables[None] height, width = data.shape upper_left = (row, col) lower_right = (row + height - 1, col + width - 1) max_height = max(max_height, lower_right[0] + 1) max_width = max(max_width, lower_right[1] + 1) resolved_tables.append((name, data, upper_left, lower_right)) for row, col in self.__values.keys(): max_width = max(max_width, row+1) max_height = max(max_height, col+1) # Build the whole table up-front. Doing it row by row is too slow. table = [[None] * max_width for i in range(max_height)] for name, data, upper_left, lower_right in resolved_tables: for i, r in enumerate(range(upper_left[0], lower_right[0]+1)): for j, c in enumerate(range(upper_left[1], lower_right[1]+1)): table[r][c] = data[i][j] for (r, c), value in self.__values.items(): if isinstance(value, Value): value = value.value if isinstance(value, Expression): if value.has_value: self.__formula_values[(r, c)] = value.value value = value.get_formula(workbook, r, c) table[r][c] = value for row in table: yield row
Yield rows as lists of data. The data is exactly as it is in the source pandas DataFrames and any formulas are not resolved.
train
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/worksheet.py#L115-L169
null
class Worksheet(object): """ A worksheet is a collection of tables placed at specific locations. Once all tables have been placed the worksheet can be written out or the rows can be iterated over, and any expressions present in the tables will be resolved to absolute cell references. :param str name: Worksheet name. """ _xlsx_unsupported_types = tuple() def __init__(self, name="Sheet1"): self.__name = name self.__tables = {} self.__values = {} self.__charts = [] self.__next_row = 0 self.__groups = [] @property def name(self): """Worksheet name""" return self.__name def add_table(self, table, row=None, col=0, row_spaces=1): """ Adds a table to the worksheet at (row, col). Return the (row, col) where the table has been put. :param xltable.Table table: Table to add to the worksheet. :param int row: Row to start the table at (defaults to the next free row). :param int col: Column to start the table at. :param int row_spaces: Number of rows to leave between this table and the next. """ name = table.name assert name is not None, "Tables must have a name" assert name not in self.__tables, "Table %s already exists in this worksheet" % name if row is None: row = self.__next_row self.__next_row = max(row + table.height + row_spaces, self.__next_row) self.__tables[name] = (table, (row, col)) return row, col def add_value(self, value, row, col): """ Adds a single value (cell) to a worksheet at (row, col). Return the (row, col) where the value has been put. :param value: Value to write to the sheet. :param row: Row where the value should be written. :param col: Column where the value should be written. """ self.__values[(row, col)] = value def add_chart(self, chart, row, col): """ Adds a chart to the worksheet at (row, col). :param xltable.Chart Chart: chart to add to the workbook. :param int row: Row to add the chart at. """ self.__charts.append((chart, (row, col))) def add_row_group(self, tables, collapsed=True): """ Adds a group over all the given tables (will include any rows between the first row over all tables, and the last row over all tables) Initially collapsed if collapsed is True (True by default) """ self.__groups.append((tables, collapsed)) @property def next_row(self): """Row the next table will start at unless another row is specified.""" return self.__next_row @next_row.setter def next_row(self, value): self.__next_row = value def get_table_pos(self, tablename): """ :param str tablename: Name of table to get position of. :return: Upper left (row, col) coordinate of the named table. """ _table, (row, col) = self.__tables[tablename] return (row, col) def get_table(self, tablename): """ :param str tablename: Name of table to find. :return: A :py:class:`xltable.Table` instance from the table name. """ table, (_row, _col) = self.__tables[tablename] return table def iterrows(self, workbook=None): """ Yield rows as lists of data. The data is exactly as it is in the source pandas DataFrames and any formulas are not resolved. """ resolved_tables = [] max_height = 0 max_width = 0 # while yielding rows __formula_values is updated with any formula values set on Expressions self.__formula_values = {} for name, (table, (row, col)) in list(self.__tables.items()): # get the resolved 2d data array from the table # # expressions with no explicit table will use None when calling # get_table/get_table_pos, which should return the current table. # self.__tables[None] = (table, (row, col)) data = table.get_data(workbook, row, col, self.__formula_values) del self.__tables[None] height, width = data.shape upper_left = (row, col) lower_right = (row + height - 1, col + width - 1) max_height = max(max_height, lower_right[0] + 1) max_width = max(max_width, lower_right[1] + 1) resolved_tables.append((name, data, upper_left, lower_right)) for row, col in self.__values.keys(): max_width = max(max_width, row+1) max_height = max(max_height, col+1) # Build the whole table up-front. Doing it row by row is too slow. table = [[None] * max_width for i in range(max_height)] for name, data, upper_left, lower_right in resolved_tables: for i, r in enumerate(range(upper_left[0], lower_right[0]+1)): for j, c in enumerate(range(upper_left[1], lower_right[1]+1)): table[r][c] = data[i][j] for (r, c), value in self.__values.items(): if isinstance(value, Value): value = value.value if isinstance(value, Expression): if value.has_value: self.__formula_values[(r, c)] = value.value value = value.get_formula(workbook, r, c) table[r][c] = value for row in table: yield row def to_csv(self, writer): """ Writes worksheet to a csv.writer object. :param writer: csv writer instance. """ for row in self.iterrows(): writer.writerow(row) def _get_column_widths(self): """return a dictionary of {col -> width}""" col_widths = {} for table, (row, col) in self.__tables.values(): for colname, width in table.column_widths.items(): ic = col + table.get_column_offset(colname) current_width = col_widths.setdefault(ic, width) col_widths[ic] = max(width, current_width) return col_widths def _get_all_styles(self): """ return a dictionary of {(row, col) -> CellStyle} for all cells that use a non-default style. """ _styles = {} def _get_style(bold=False, bg_col=None, border=None): if (bold, bg_col, border) not in _styles: _styles[(bold, bg_col, border)] = CellStyle(bold=bold, bg_color=bg_col, border=border) return _styles[(bold, bg_col, border)] ws_styles = {} for table, (row, col) in self.__tables.values(): for r in range(row, row + table.header_height): for c in range(col, col + table.width): if isinstance(table.header_style, dict): col_name = table.dataframe.columns[c - col] style = table.header_style.get(col_name, _get_style(bold=True)) else: style = table.header_style or _get_style(bold=True) ws_styles[(r, c)] = style for c in range(col, col + table.row_labels_width): for r in range(row + table.header_height, row + table.height): if isinstance(table.index_style, dict): row_name = table.dataframe.index[r - row] style = table.index_style.get(row_name, _get_style(bold=True)) else: style = table.index_style or _get_style(bold=True) ws_styles[(r, c)] = style if table.style.stripe_colors or table.style.border: num_bg_cols = len(table.style.stripe_colors) if \ table.style.stripe_colors else 1 bg_cols = table.style.stripe_colors if \ table.style.stripe_colors else None for i, row_offset in enumerate(range(table.header_height, table.height)): for c in range(col, col + table.width): bg_col = bg_cols[i % num_bg_cols] if bg_cols else None style = _get_style(bold=None, bg_col=bg_col, border=table.style.border) if (row + row_offset, c) in ws_styles: style = style + ws_styles[(row + row_offset, c)] ws_styles[(row + row_offset, c)] = style for col_name, col_style in table.column_styles.items(): try: col_offset = table.get_column_offset(col_name) except KeyError: continue for i, r in enumerate(range(row + table.header_height, row + table.height)): style = col_style if (r, col + col_offset) in ws_styles: style = ws_styles[(r, col + col_offset)] + style ws_styles[(r, col + col_offset)] = style for row_name, row_style in table.row_styles.items(): try: row_offset = table.get_row_offset(row_name) except KeyError: continue for i, c in enumerate(range(col + table.row_labels_width, col + table.width)): style = row_style if (row + row_offset, c) in ws_styles: style = ws_styles[(row + row_offset, c)] + style ws_styles[(row + row_offset, c)] = style for (row_name, col_name), cell_style in table.cell_styles.items(): try: col_offset = table.get_column_offset(col_name) row_offset = table.get_row_offset(row_name) except KeyError: continue style = cell_style if (row + row_offset, col + col_offset) in ws_styles: style = ws_styles[(row + row_offset, col + col_offset)] + style ws_styles[(row + row_offset, col + col_offset)] = style for (row, col), value in self.__values.items(): if isinstance(value, Value): style = value.style if style: if (row, col) in ws_styles: style = style + ws_styles[(row, col)] ws_styles[(row, col)] = style return ws_styles def to_excel(self, workbook=None, worksheet=None, xl_app=None, clear=True, rename=True, resize_columns=True): """ Writes worksheet to an Excel Worksheet COM object. Requires :py:module:`pywin32` to be installed. :param workbook: xltable.Workbook this sheet belongs to. :param worksheet: Excel COM Worksheet instance to write to. :param xl_app: Excel COM Excel Application to write to. :param bool clear: If a worksheet is provided, clear worksheet before writing. :param bool rename: If a worksheet is provided, rename self to match the worksheet. :param bool resize_columns: Resize sheet columns after writing. """ from win32com.client import Dispatch, constants, gencache if xl_app is None: if worksheet is not None: xl_app = worksheet.Parent.Application elif workbook is not None and hasattr(workbook.workbook_obj, "Application"): xl_app = workbook.workbook_obj.Application else: xl_app = Dispatch("Excel.Application") xl = xl_app = gencache.EnsureDispatch(xl_app) # Create a workbook if there isn't one already if not workbook: from .workbook import Workbook workbook = Workbook(worksheets=[self]) if worksheet is None: # If there's no worksheet then call Workbook.to_excel which will create one return workbook.to_excel(xl_app=xl_app, resize_columns=resize_columns) if rename: self.__name = worksheet.Name # set manual calculation and turn off screen updating while we update the cells calculation = xl.Calculation screen_updating = xl.ScreenUpdating xl.Calculation = constants.xlCalculationManual xl.ScreenUpdating = False try: # clear the worksheet and reset the styles if clear: worksheet.Cells.ClearContents() worksheet.Cells.Font.Bold = False worksheet.Cells.Font.Size = 11 worksheet.Cells.Font.Color = 0x000000 worksheet.Cells.Interior.ColorIndex = 0 worksheet.Cells.NumberFormat = "General" # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False origin = worksheet.Range("A1") xl_cell = origin for r, row in enumerate(self.iterrows(workbook)): row = _to_pywintypes(row) # set the value and formulae to the excel range (it's much quicker to # write a row at a time and update the formula than it is it do it # cell by cell) if clear: xl_row = worksheet.Range(xl_cell, xl_cell.Offset(1, len(row))) xl_row.Value = row else: for c, value in enumerate(row): if value is not None: xl_cell.Offset(1, 1 + c).Value = value for c, value in enumerate(row): if isinstance(value, str): if value.startswith("="): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).Formula = value elif value.startswith("{=") \ and not _is_in_array_formula_table(r, c): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).FormulaArray = value # move to the next row xl_cell = xl_cell.Offset(2, 1) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): data = table.get_data(workbook, row, col) height, width = data.shape upper_left = origin.Offset(row+1, col+1) lower_right = origin.Offset(row + height, col + width) xl_range = worksheet.Range(upper_left, lower_right) xl_range.FormulaArray = table.formula.get_formula(workbook, row, col) # set any formatting for (row, col), style in self._get_all_styles().items(): r = origin.Offset(1 + row, 1 + col) if style.bold: r.Font.Bold = True if style.excel_number_format is not None: r.NumberFormat = style.excel_number_format if style.size is not None: r.Font.Size = style.size if style.text_color is not None: r.Font.Color = _to_bgr(style.text_color) if style.bg_color is not None: r.Interior.Color = _to_bgr(style.bg_color) if style.text_wrap or style.border: raise Exception("text wrap and border not implemented") # add any charts for chart, (row, col) in self.__charts: top_left = origin.Offset(1 + row, 1 + col) xl_chart = worksheet.ChartObjects().Add(top_left.Left, top_left.Top, 360, 220).Chart xl_chart.ChartType = _to_excel_chart_type(chart.type, chart.subtype) if chart.title: xl_chart.ChartTitle = chart.title for series in chart.iter_series(self, row, col): xl_series = xl_chart.SeriesCollection().NewSeries() xl_series.Values = "=%s!%s" % (self.name, series["values"].lstrip("=")) if "categories" in series: xl_series.XValues = "=%s!%s" % (self.name, series["categories"].lstrip("=")) if "name" in series: xl_series.Name = series["name"] finally: xl.ScreenUpdating = screen_updating xl.Calculation = calculation if resize_columns: try: worksheet.Cells.EntireColumn.AutoFit() except: pass def to_xlsx(self, filename=None, workbook=None): """ Write worksheet to a .xlsx file using xlsxwriter. :param str filename: Filename to write to. If None no file is written. :param xltable.Workbook: Workbook this sheet belongs to. If None a new workbook will be created with this worksheet as the only sheet. :return: :py:class:`xlsxwriter.workbook.Workbook` instance. """ from .workbook import Workbook if not workbook: workbook = Workbook(filename=filename) workbook.append(self) return workbook.to_xlsx() ws = workbook.add_xlsx_worksheet(self, self.name) _styles = {} def _get_xlsx_style(cell_style): """ convert rb.excel style to xlsx writer style """ style_args = ( cell_style.bold, cell_style.excel_number_format, cell_style.text_color, cell_style.bg_color, cell_style.size, cell_style.text_wrap, cell_style.text_wrap, cell_style.border, cell_style.align, cell_style.valign ) if (style_args) not in _styles: style = workbook.add_format() if cell_style.bold: style.set_bold() if cell_style.excel_number_format is not None: style.set_num_format(cell_style.excel_number_format) if cell_style.text_color is not None: style.set_font_color("#%06x" % cell_style.text_color) if cell_style.bg_color is not None: style.set_bg_color("#%06x" % cell_style.bg_color) if cell_style.size is not None: style.set_font_size(cell_style.size) if cell_style.text_wrap: style.set_text_wrap() if cell_style.border: if isinstance(cell_style.border, frozenset): for border_position, border_style in cell_style.border: if border_position == "bottom": style.set_bottom(border_style) elif border_position == "top": style.set_top(border_style) elif border_position == "left": style.set_left(border_style) elif border_position == "right": style.set_right(border_style) else: raise AssertionError("Unknown border position '%s'." % border_position) else: style.set_border(cell_style.border) if cell_style.align: style.set_align(cell_style.align) if cell_style.valign: style.set_valign(cell_style.valign) _styles[style_args] = style return _styles[style_args] # pre-compute the cells with non-default styles ws_styles = self._get_all_styles() ws_styles = {(r, c): _get_xlsx_style(s) for ((r, c), s) in ws_styles.items()} plain_style = _get_xlsx_style(CellStyle()) # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False # write the rows to the worksheet for ir, row in enumerate(self.iterrows(workbook)): for ic, cell in enumerate(row): style = ws_styles.get((ir, ic), plain_style) if isinstance(cell, str): if cell.startswith("="): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_formula(ir, ic, cell, style, value=formula_value) elif cell.startswith("{="): # array formulas tables are written after everything else, # but individual cells can also be array formulas if not _is_in_array_formula_table(ir, ic): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_array_formula(ir, ic, ir, ic, cell, style, value=formula_value) else: ws.write(ir, ic, cell, style) else: if isinstance(cell, self._xlsx_unsupported_types): ws.write(ir, ic, str(cell), style) else: try: ws.write(ir, ic, cell, style) except TypeError: ws.write(ir, ic, str(cell), style) unsupported_types = set(self._xlsx_unsupported_types) unsupported_types.add(type(cell)) self.__class__._xlsx_unsupported_types = tuple(unsupported_types) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): style = ws_styles.get((row, col), plain_style) data = table.get_data(workbook, row, col) height, width = data.shape bottom, right = (row + height - 1, col + width -1) formula = table.formula.get_formula(workbook, row, col) ws.write_array_formula(row, col, bottom, right, formula, style, value=data[0][0]) for y in range(height): for x in range(width): if y == 0 and x == 0: continue ir, ic = row + y, col + x style = ws_styles.get((ir, ic), plain_style) cell = data[y][x] if isinstance(cell, str): cell_str = cell.encode("ascii", "xmlcharrefreplace").decode("ascii") ws.write_formula(ir, ic, cell_str, style) else: ws.write(ir, ic, cell, style) # set any non-default column widths for ic, width in self._get_column_widths().items(): ws.set_column(ic, ic, width) # add any charts for chart, (row, col) in self.__charts: kwargs = {"type": chart.type} if chart.subtype: kwargs["subtype"] = chart.subtype xl_chart = workbook.workbook_obj.add_chart(kwargs) if chart.show_blanks: xl_chart.show_blanks_as(chart.show_blanks) for series in chart.iter_series(workbook, row, col): # xlsxwriter expects the sheetname in the formula values = series.get("values") if isinstance(values, str) and values.startswith("=") and "!" not in values: series["values"] = "='%s'!%s" % (self.name, values.lstrip("=")) categories = series.get("categories") if isinstance(categories, str) and categories.startswith("=") and "!" not in categories: series["categories"] = "='%s'!%s" % (self.name, categories.lstrip("=")) xl_chart.add_series(series) xl_chart.set_size({"width": chart.width, "height": chart.height}) if chart.title: xl_chart.set_title({"name": chart.title}) if chart.legend_position: xl_chart.set_legend({"position": chart.legend_position}) if chart.x_axis: xl_chart.set_x_axis(chart.x_axis) if chart.y_axis: xl_chart.set_y_axis(chart.y_axis) ws.insert_chart(row, col, xl_chart) # add any groups for tables, collapsed in self.__groups: min_row, max_row = 1000000, -1 for table, (row, col) in self.__tables.values(): if table in tables: min_row = min(min_row, row) max_row = max(max_row, row + table.height) for i in range(min_row, max_row+1): ws.set_row(i, None, None, {'level': 1, 'hidden': collapsed}) if filename: workbook.close() return workbook
fkarb/xltable
xltable/worksheet.py
Worksheet._get_column_widths
python
def _get_column_widths(self): col_widths = {} for table, (row, col) in self.__tables.values(): for colname, width in table.column_widths.items(): ic = col + table.get_column_offset(colname) current_width = col_widths.setdefault(ic, width) col_widths[ic] = max(width, current_width) return col_widths
return a dictionary of {col -> width}
train
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/worksheet.py#L179-L187
null
class Worksheet(object): """ A worksheet is a collection of tables placed at specific locations. Once all tables have been placed the worksheet can be written out or the rows can be iterated over, and any expressions present in the tables will be resolved to absolute cell references. :param str name: Worksheet name. """ _xlsx_unsupported_types = tuple() def __init__(self, name="Sheet1"): self.__name = name self.__tables = {} self.__values = {} self.__charts = [] self.__next_row = 0 self.__groups = [] @property def name(self): """Worksheet name""" return self.__name def add_table(self, table, row=None, col=0, row_spaces=1): """ Adds a table to the worksheet at (row, col). Return the (row, col) where the table has been put. :param xltable.Table table: Table to add to the worksheet. :param int row: Row to start the table at (defaults to the next free row). :param int col: Column to start the table at. :param int row_spaces: Number of rows to leave between this table and the next. """ name = table.name assert name is not None, "Tables must have a name" assert name not in self.__tables, "Table %s already exists in this worksheet" % name if row is None: row = self.__next_row self.__next_row = max(row + table.height + row_spaces, self.__next_row) self.__tables[name] = (table, (row, col)) return row, col def add_value(self, value, row, col): """ Adds a single value (cell) to a worksheet at (row, col). Return the (row, col) where the value has been put. :param value: Value to write to the sheet. :param row: Row where the value should be written. :param col: Column where the value should be written. """ self.__values[(row, col)] = value def add_chart(self, chart, row, col): """ Adds a chart to the worksheet at (row, col). :param xltable.Chart Chart: chart to add to the workbook. :param int row: Row to add the chart at. """ self.__charts.append((chart, (row, col))) def add_row_group(self, tables, collapsed=True): """ Adds a group over all the given tables (will include any rows between the first row over all tables, and the last row over all tables) Initially collapsed if collapsed is True (True by default) """ self.__groups.append((tables, collapsed)) @property def next_row(self): """Row the next table will start at unless another row is specified.""" return self.__next_row @next_row.setter def next_row(self, value): self.__next_row = value def get_table_pos(self, tablename): """ :param str tablename: Name of table to get position of. :return: Upper left (row, col) coordinate of the named table. """ _table, (row, col) = self.__tables[tablename] return (row, col) def get_table(self, tablename): """ :param str tablename: Name of table to find. :return: A :py:class:`xltable.Table` instance from the table name. """ table, (_row, _col) = self.__tables[tablename] return table def iterrows(self, workbook=None): """ Yield rows as lists of data. The data is exactly as it is in the source pandas DataFrames and any formulas are not resolved. """ resolved_tables = [] max_height = 0 max_width = 0 # while yielding rows __formula_values is updated with any formula values set on Expressions self.__formula_values = {} for name, (table, (row, col)) in list(self.__tables.items()): # get the resolved 2d data array from the table # # expressions with no explicit table will use None when calling # get_table/get_table_pos, which should return the current table. # self.__tables[None] = (table, (row, col)) data = table.get_data(workbook, row, col, self.__formula_values) del self.__tables[None] height, width = data.shape upper_left = (row, col) lower_right = (row + height - 1, col + width - 1) max_height = max(max_height, lower_right[0] + 1) max_width = max(max_width, lower_right[1] + 1) resolved_tables.append((name, data, upper_left, lower_right)) for row, col in self.__values.keys(): max_width = max(max_width, row+1) max_height = max(max_height, col+1) # Build the whole table up-front. Doing it row by row is too slow. table = [[None] * max_width for i in range(max_height)] for name, data, upper_left, lower_right in resolved_tables: for i, r in enumerate(range(upper_left[0], lower_right[0]+1)): for j, c in enumerate(range(upper_left[1], lower_right[1]+1)): table[r][c] = data[i][j] for (r, c), value in self.__values.items(): if isinstance(value, Value): value = value.value if isinstance(value, Expression): if value.has_value: self.__formula_values[(r, c)] = value.value value = value.get_formula(workbook, r, c) table[r][c] = value for row in table: yield row def to_csv(self, writer): """ Writes worksheet to a csv.writer object. :param writer: csv writer instance. """ for row in self.iterrows(): writer.writerow(row) def _get_all_styles(self): """ return a dictionary of {(row, col) -> CellStyle} for all cells that use a non-default style. """ _styles = {} def _get_style(bold=False, bg_col=None, border=None): if (bold, bg_col, border) not in _styles: _styles[(bold, bg_col, border)] = CellStyle(bold=bold, bg_color=bg_col, border=border) return _styles[(bold, bg_col, border)] ws_styles = {} for table, (row, col) in self.__tables.values(): for r in range(row, row + table.header_height): for c in range(col, col + table.width): if isinstance(table.header_style, dict): col_name = table.dataframe.columns[c - col] style = table.header_style.get(col_name, _get_style(bold=True)) else: style = table.header_style or _get_style(bold=True) ws_styles[(r, c)] = style for c in range(col, col + table.row_labels_width): for r in range(row + table.header_height, row + table.height): if isinstance(table.index_style, dict): row_name = table.dataframe.index[r - row] style = table.index_style.get(row_name, _get_style(bold=True)) else: style = table.index_style or _get_style(bold=True) ws_styles[(r, c)] = style if table.style.stripe_colors or table.style.border: num_bg_cols = len(table.style.stripe_colors) if \ table.style.stripe_colors else 1 bg_cols = table.style.stripe_colors if \ table.style.stripe_colors else None for i, row_offset in enumerate(range(table.header_height, table.height)): for c in range(col, col + table.width): bg_col = bg_cols[i % num_bg_cols] if bg_cols else None style = _get_style(bold=None, bg_col=bg_col, border=table.style.border) if (row + row_offset, c) in ws_styles: style = style + ws_styles[(row + row_offset, c)] ws_styles[(row + row_offset, c)] = style for col_name, col_style in table.column_styles.items(): try: col_offset = table.get_column_offset(col_name) except KeyError: continue for i, r in enumerate(range(row + table.header_height, row + table.height)): style = col_style if (r, col + col_offset) in ws_styles: style = ws_styles[(r, col + col_offset)] + style ws_styles[(r, col + col_offset)] = style for row_name, row_style in table.row_styles.items(): try: row_offset = table.get_row_offset(row_name) except KeyError: continue for i, c in enumerate(range(col + table.row_labels_width, col + table.width)): style = row_style if (row + row_offset, c) in ws_styles: style = ws_styles[(row + row_offset, c)] + style ws_styles[(row + row_offset, c)] = style for (row_name, col_name), cell_style in table.cell_styles.items(): try: col_offset = table.get_column_offset(col_name) row_offset = table.get_row_offset(row_name) except KeyError: continue style = cell_style if (row + row_offset, col + col_offset) in ws_styles: style = ws_styles[(row + row_offset, col + col_offset)] + style ws_styles[(row + row_offset, col + col_offset)] = style for (row, col), value in self.__values.items(): if isinstance(value, Value): style = value.style if style: if (row, col) in ws_styles: style = style + ws_styles[(row, col)] ws_styles[(row, col)] = style return ws_styles def to_excel(self, workbook=None, worksheet=None, xl_app=None, clear=True, rename=True, resize_columns=True): """ Writes worksheet to an Excel Worksheet COM object. Requires :py:module:`pywin32` to be installed. :param workbook: xltable.Workbook this sheet belongs to. :param worksheet: Excel COM Worksheet instance to write to. :param xl_app: Excel COM Excel Application to write to. :param bool clear: If a worksheet is provided, clear worksheet before writing. :param bool rename: If a worksheet is provided, rename self to match the worksheet. :param bool resize_columns: Resize sheet columns after writing. """ from win32com.client import Dispatch, constants, gencache if xl_app is None: if worksheet is not None: xl_app = worksheet.Parent.Application elif workbook is not None and hasattr(workbook.workbook_obj, "Application"): xl_app = workbook.workbook_obj.Application else: xl_app = Dispatch("Excel.Application") xl = xl_app = gencache.EnsureDispatch(xl_app) # Create a workbook if there isn't one already if not workbook: from .workbook import Workbook workbook = Workbook(worksheets=[self]) if worksheet is None: # If there's no worksheet then call Workbook.to_excel which will create one return workbook.to_excel(xl_app=xl_app, resize_columns=resize_columns) if rename: self.__name = worksheet.Name # set manual calculation and turn off screen updating while we update the cells calculation = xl.Calculation screen_updating = xl.ScreenUpdating xl.Calculation = constants.xlCalculationManual xl.ScreenUpdating = False try: # clear the worksheet and reset the styles if clear: worksheet.Cells.ClearContents() worksheet.Cells.Font.Bold = False worksheet.Cells.Font.Size = 11 worksheet.Cells.Font.Color = 0x000000 worksheet.Cells.Interior.ColorIndex = 0 worksheet.Cells.NumberFormat = "General" # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False origin = worksheet.Range("A1") xl_cell = origin for r, row in enumerate(self.iterrows(workbook)): row = _to_pywintypes(row) # set the value and formulae to the excel range (it's much quicker to # write a row at a time and update the formula than it is it do it # cell by cell) if clear: xl_row = worksheet.Range(xl_cell, xl_cell.Offset(1, len(row))) xl_row.Value = row else: for c, value in enumerate(row): if value is not None: xl_cell.Offset(1, 1 + c).Value = value for c, value in enumerate(row): if isinstance(value, str): if value.startswith("="): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).Formula = value elif value.startswith("{=") \ and not _is_in_array_formula_table(r, c): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).FormulaArray = value # move to the next row xl_cell = xl_cell.Offset(2, 1) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): data = table.get_data(workbook, row, col) height, width = data.shape upper_left = origin.Offset(row+1, col+1) lower_right = origin.Offset(row + height, col + width) xl_range = worksheet.Range(upper_left, lower_right) xl_range.FormulaArray = table.formula.get_formula(workbook, row, col) # set any formatting for (row, col), style in self._get_all_styles().items(): r = origin.Offset(1 + row, 1 + col) if style.bold: r.Font.Bold = True if style.excel_number_format is not None: r.NumberFormat = style.excel_number_format if style.size is not None: r.Font.Size = style.size if style.text_color is not None: r.Font.Color = _to_bgr(style.text_color) if style.bg_color is not None: r.Interior.Color = _to_bgr(style.bg_color) if style.text_wrap or style.border: raise Exception("text wrap and border not implemented") # add any charts for chart, (row, col) in self.__charts: top_left = origin.Offset(1 + row, 1 + col) xl_chart = worksheet.ChartObjects().Add(top_left.Left, top_left.Top, 360, 220).Chart xl_chart.ChartType = _to_excel_chart_type(chart.type, chart.subtype) if chart.title: xl_chart.ChartTitle = chart.title for series in chart.iter_series(self, row, col): xl_series = xl_chart.SeriesCollection().NewSeries() xl_series.Values = "=%s!%s" % (self.name, series["values"].lstrip("=")) if "categories" in series: xl_series.XValues = "=%s!%s" % (self.name, series["categories"].lstrip("=")) if "name" in series: xl_series.Name = series["name"] finally: xl.ScreenUpdating = screen_updating xl.Calculation = calculation if resize_columns: try: worksheet.Cells.EntireColumn.AutoFit() except: pass def to_xlsx(self, filename=None, workbook=None): """ Write worksheet to a .xlsx file using xlsxwriter. :param str filename: Filename to write to. If None no file is written. :param xltable.Workbook: Workbook this sheet belongs to. If None a new workbook will be created with this worksheet as the only sheet. :return: :py:class:`xlsxwriter.workbook.Workbook` instance. """ from .workbook import Workbook if not workbook: workbook = Workbook(filename=filename) workbook.append(self) return workbook.to_xlsx() ws = workbook.add_xlsx_worksheet(self, self.name) _styles = {} def _get_xlsx_style(cell_style): """ convert rb.excel style to xlsx writer style """ style_args = ( cell_style.bold, cell_style.excel_number_format, cell_style.text_color, cell_style.bg_color, cell_style.size, cell_style.text_wrap, cell_style.text_wrap, cell_style.border, cell_style.align, cell_style.valign ) if (style_args) not in _styles: style = workbook.add_format() if cell_style.bold: style.set_bold() if cell_style.excel_number_format is not None: style.set_num_format(cell_style.excel_number_format) if cell_style.text_color is not None: style.set_font_color("#%06x" % cell_style.text_color) if cell_style.bg_color is not None: style.set_bg_color("#%06x" % cell_style.bg_color) if cell_style.size is not None: style.set_font_size(cell_style.size) if cell_style.text_wrap: style.set_text_wrap() if cell_style.border: if isinstance(cell_style.border, frozenset): for border_position, border_style in cell_style.border: if border_position == "bottom": style.set_bottom(border_style) elif border_position == "top": style.set_top(border_style) elif border_position == "left": style.set_left(border_style) elif border_position == "right": style.set_right(border_style) else: raise AssertionError("Unknown border position '%s'." % border_position) else: style.set_border(cell_style.border) if cell_style.align: style.set_align(cell_style.align) if cell_style.valign: style.set_valign(cell_style.valign) _styles[style_args] = style return _styles[style_args] # pre-compute the cells with non-default styles ws_styles = self._get_all_styles() ws_styles = {(r, c): _get_xlsx_style(s) for ((r, c), s) in ws_styles.items()} plain_style = _get_xlsx_style(CellStyle()) # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False # write the rows to the worksheet for ir, row in enumerate(self.iterrows(workbook)): for ic, cell in enumerate(row): style = ws_styles.get((ir, ic), plain_style) if isinstance(cell, str): if cell.startswith("="): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_formula(ir, ic, cell, style, value=formula_value) elif cell.startswith("{="): # array formulas tables are written after everything else, # but individual cells can also be array formulas if not _is_in_array_formula_table(ir, ic): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_array_formula(ir, ic, ir, ic, cell, style, value=formula_value) else: ws.write(ir, ic, cell, style) else: if isinstance(cell, self._xlsx_unsupported_types): ws.write(ir, ic, str(cell), style) else: try: ws.write(ir, ic, cell, style) except TypeError: ws.write(ir, ic, str(cell), style) unsupported_types = set(self._xlsx_unsupported_types) unsupported_types.add(type(cell)) self.__class__._xlsx_unsupported_types = tuple(unsupported_types) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): style = ws_styles.get((row, col), plain_style) data = table.get_data(workbook, row, col) height, width = data.shape bottom, right = (row + height - 1, col + width -1) formula = table.formula.get_formula(workbook, row, col) ws.write_array_formula(row, col, bottom, right, formula, style, value=data[0][0]) for y in range(height): for x in range(width): if y == 0 and x == 0: continue ir, ic = row + y, col + x style = ws_styles.get((ir, ic), plain_style) cell = data[y][x] if isinstance(cell, str): cell_str = cell.encode("ascii", "xmlcharrefreplace").decode("ascii") ws.write_formula(ir, ic, cell_str, style) else: ws.write(ir, ic, cell, style) # set any non-default column widths for ic, width in self._get_column_widths().items(): ws.set_column(ic, ic, width) # add any charts for chart, (row, col) in self.__charts: kwargs = {"type": chart.type} if chart.subtype: kwargs["subtype"] = chart.subtype xl_chart = workbook.workbook_obj.add_chart(kwargs) if chart.show_blanks: xl_chart.show_blanks_as(chart.show_blanks) for series in chart.iter_series(workbook, row, col): # xlsxwriter expects the sheetname in the formula values = series.get("values") if isinstance(values, str) and values.startswith("=") and "!" not in values: series["values"] = "='%s'!%s" % (self.name, values.lstrip("=")) categories = series.get("categories") if isinstance(categories, str) and categories.startswith("=") and "!" not in categories: series["categories"] = "='%s'!%s" % (self.name, categories.lstrip("=")) xl_chart.add_series(series) xl_chart.set_size({"width": chart.width, "height": chart.height}) if chart.title: xl_chart.set_title({"name": chart.title}) if chart.legend_position: xl_chart.set_legend({"position": chart.legend_position}) if chart.x_axis: xl_chart.set_x_axis(chart.x_axis) if chart.y_axis: xl_chart.set_y_axis(chart.y_axis) ws.insert_chart(row, col, xl_chart) # add any groups for tables, collapsed in self.__groups: min_row, max_row = 1000000, -1 for table, (row, col) in self.__tables.values(): if table in tables: min_row = min(min_row, row) max_row = max(max_row, row + table.height) for i in range(min_row, max_row+1): ws.set_row(i, None, None, {'level': 1, 'hidden': collapsed}) if filename: workbook.close() return workbook
fkarb/xltable
xltable/worksheet.py
Worksheet._get_all_styles
python
def _get_all_styles(self): _styles = {} def _get_style(bold=False, bg_col=None, border=None): if (bold, bg_col, border) not in _styles: _styles[(bold, bg_col, border)] = CellStyle(bold=bold, bg_color=bg_col, border=border) return _styles[(bold, bg_col, border)] ws_styles = {} for table, (row, col) in self.__tables.values(): for r in range(row, row + table.header_height): for c in range(col, col + table.width): if isinstance(table.header_style, dict): col_name = table.dataframe.columns[c - col] style = table.header_style.get(col_name, _get_style(bold=True)) else: style = table.header_style or _get_style(bold=True) ws_styles[(r, c)] = style for c in range(col, col + table.row_labels_width): for r in range(row + table.header_height, row + table.height): if isinstance(table.index_style, dict): row_name = table.dataframe.index[r - row] style = table.index_style.get(row_name, _get_style(bold=True)) else: style = table.index_style or _get_style(bold=True) ws_styles[(r, c)] = style if table.style.stripe_colors or table.style.border: num_bg_cols = len(table.style.stripe_colors) if \ table.style.stripe_colors else 1 bg_cols = table.style.stripe_colors if \ table.style.stripe_colors else None for i, row_offset in enumerate(range(table.header_height, table.height)): for c in range(col, col + table.width): bg_col = bg_cols[i % num_bg_cols] if bg_cols else None style = _get_style(bold=None, bg_col=bg_col, border=table.style.border) if (row + row_offset, c) in ws_styles: style = style + ws_styles[(row + row_offset, c)] ws_styles[(row + row_offset, c)] = style for col_name, col_style in table.column_styles.items(): try: col_offset = table.get_column_offset(col_name) except KeyError: continue for i, r in enumerate(range(row + table.header_height, row + table.height)): style = col_style if (r, col + col_offset) in ws_styles: style = ws_styles[(r, col + col_offset)] + style ws_styles[(r, col + col_offset)] = style for row_name, row_style in table.row_styles.items(): try: row_offset = table.get_row_offset(row_name) except KeyError: continue for i, c in enumerate(range(col + table.row_labels_width, col + table.width)): style = row_style if (row + row_offset, c) in ws_styles: style = ws_styles[(row + row_offset, c)] + style ws_styles[(row + row_offset, c)] = style for (row_name, col_name), cell_style in table.cell_styles.items(): try: col_offset = table.get_column_offset(col_name) row_offset = table.get_row_offset(row_name) except KeyError: continue style = cell_style if (row + row_offset, col + col_offset) in ws_styles: style = ws_styles[(row + row_offset, col + col_offset)] + style ws_styles[(row + row_offset, col + col_offset)] = style for (row, col), value in self.__values.items(): if isinstance(value, Value): style = value.style if style: if (row, col) in ws_styles: style = style + ws_styles[(row, col)] ws_styles[(row, col)] = style return ws_styles
return a dictionary of {(row, col) -> CellStyle} for all cells that use a non-default style.
train
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/worksheet.py#L189-L278
[ "def _get_style(bold=False, bg_col=None, border=None):\n if (bold, bg_col, border) not in _styles:\n _styles[(bold, bg_col, border)] = CellStyle(bold=bold,\n bg_color=bg_col,\n border=border)\n return ...
class Worksheet(object): """ A worksheet is a collection of tables placed at specific locations. Once all tables have been placed the worksheet can be written out or the rows can be iterated over, and any expressions present in the tables will be resolved to absolute cell references. :param str name: Worksheet name. """ _xlsx_unsupported_types = tuple() def __init__(self, name="Sheet1"): self.__name = name self.__tables = {} self.__values = {} self.__charts = [] self.__next_row = 0 self.__groups = [] @property def name(self): """Worksheet name""" return self.__name def add_table(self, table, row=None, col=0, row_spaces=1): """ Adds a table to the worksheet at (row, col). Return the (row, col) where the table has been put. :param xltable.Table table: Table to add to the worksheet. :param int row: Row to start the table at (defaults to the next free row). :param int col: Column to start the table at. :param int row_spaces: Number of rows to leave between this table and the next. """ name = table.name assert name is not None, "Tables must have a name" assert name not in self.__tables, "Table %s already exists in this worksheet" % name if row is None: row = self.__next_row self.__next_row = max(row + table.height + row_spaces, self.__next_row) self.__tables[name] = (table, (row, col)) return row, col def add_value(self, value, row, col): """ Adds a single value (cell) to a worksheet at (row, col). Return the (row, col) where the value has been put. :param value: Value to write to the sheet. :param row: Row where the value should be written. :param col: Column where the value should be written. """ self.__values[(row, col)] = value def add_chart(self, chart, row, col): """ Adds a chart to the worksheet at (row, col). :param xltable.Chart Chart: chart to add to the workbook. :param int row: Row to add the chart at. """ self.__charts.append((chart, (row, col))) def add_row_group(self, tables, collapsed=True): """ Adds a group over all the given tables (will include any rows between the first row over all tables, and the last row over all tables) Initially collapsed if collapsed is True (True by default) """ self.__groups.append((tables, collapsed)) @property def next_row(self): """Row the next table will start at unless another row is specified.""" return self.__next_row @next_row.setter def next_row(self, value): self.__next_row = value def get_table_pos(self, tablename): """ :param str tablename: Name of table to get position of. :return: Upper left (row, col) coordinate of the named table. """ _table, (row, col) = self.__tables[tablename] return (row, col) def get_table(self, tablename): """ :param str tablename: Name of table to find. :return: A :py:class:`xltable.Table` instance from the table name. """ table, (_row, _col) = self.__tables[tablename] return table def iterrows(self, workbook=None): """ Yield rows as lists of data. The data is exactly as it is in the source pandas DataFrames and any formulas are not resolved. """ resolved_tables = [] max_height = 0 max_width = 0 # while yielding rows __formula_values is updated with any formula values set on Expressions self.__formula_values = {} for name, (table, (row, col)) in list(self.__tables.items()): # get the resolved 2d data array from the table # # expressions with no explicit table will use None when calling # get_table/get_table_pos, which should return the current table. # self.__tables[None] = (table, (row, col)) data = table.get_data(workbook, row, col, self.__formula_values) del self.__tables[None] height, width = data.shape upper_left = (row, col) lower_right = (row + height - 1, col + width - 1) max_height = max(max_height, lower_right[0] + 1) max_width = max(max_width, lower_right[1] + 1) resolved_tables.append((name, data, upper_left, lower_right)) for row, col in self.__values.keys(): max_width = max(max_width, row+1) max_height = max(max_height, col+1) # Build the whole table up-front. Doing it row by row is too slow. table = [[None] * max_width for i in range(max_height)] for name, data, upper_left, lower_right in resolved_tables: for i, r in enumerate(range(upper_left[0], lower_right[0]+1)): for j, c in enumerate(range(upper_left[1], lower_right[1]+1)): table[r][c] = data[i][j] for (r, c), value in self.__values.items(): if isinstance(value, Value): value = value.value if isinstance(value, Expression): if value.has_value: self.__formula_values[(r, c)] = value.value value = value.get_formula(workbook, r, c) table[r][c] = value for row in table: yield row def to_csv(self, writer): """ Writes worksheet to a csv.writer object. :param writer: csv writer instance. """ for row in self.iterrows(): writer.writerow(row) def _get_column_widths(self): """return a dictionary of {col -> width}""" col_widths = {} for table, (row, col) in self.__tables.values(): for colname, width in table.column_widths.items(): ic = col + table.get_column_offset(colname) current_width = col_widths.setdefault(ic, width) col_widths[ic] = max(width, current_width) return col_widths def to_excel(self, workbook=None, worksheet=None, xl_app=None, clear=True, rename=True, resize_columns=True): """ Writes worksheet to an Excel Worksheet COM object. Requires :py:module:`pywin32` to be installed. :param workbook: xltable.Workbook this sheet belongs to. :param worksheet: Excel COM Worksheet instance to write to. :param xl_app: Excel COM Excel Application to write to. :param bool clear: If a worksheet is provided, clear worksheet before writing. :param bool rename: If a worksheet is provided, rename self to match the worksheet. :param bool resize_columns: Resize sheet columns after writing. """ from win32com.client import Dispatch, constants, gencache if xl_app is None: if worksheet is not None: xl_app = worksheet.Parent.Application elif workbook is not None and hasattr(workbook.workbook_obj, "Application"): xl_app = workbook.workbook_obj.Application else: xl_app = Dispatch("Excel.Application") xl = xl_app = gencache.EnsureDispatch(xl_app) # Create a workbook if there isn't one already if not workbook: from .workbook import Workbook workbook = Workbook(worksheets=[self]) if worksheet is None: # If there's no worksheet then call Workbook.to_excel which will create one return workbook.to_excel(xl_app=xl_app, resize_columns=resize_columns) if rename: self.__name = worksheet.Name # set manual calculation and turn off screen updating while we update the cells calculation = xl.Calculation screen_updating = xl.ScreenUpdating xl.Calculation = constants.xlCalculationManual xl.ScreenUpdating = False try: # clear the worksheet and reset the styles if clear: worksheet.Cells.ClearContents() worksheet.Cells.Font.Bold = False worksheet.Cells.Font.Size = 11 worksheet.Cells.Font.Color = 0x000000 worksheet.Cells.Interior.ColorIndex = 0 worksheet.Cells.NumberFormat = "General" # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False origin = worksheet.Range("A1") xl_cell = origin for r, row in enumerate(self.iterrows(workbook)): row = _to_pywintypes(row) # set the value and formulae to the excel range (it's much quicker to # write a row at a time and update the formula than it is it do it # cell by cell) if clear: xl_row = worksheet.Range(xl_cell, xl_cell.Offset(1, len(row))) xl_row.Value = row else: for c, value in enumerate(row): if value is not None: xl_cell.Offset(1, 1 + c).Value = value for c, value in enumerate(row): if isinstance(value, str): if value.startswith("="): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).Formula = value elif value.startswith("{=") \ and not _is_in_array_formula_table(r, c): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).FormulaArray = value # move to the next row xl_cell = xl_cell.Offset(2, 1) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): data = table.get_data(workbook, row, col) height, width = data.shape upper_left = origin.Offset(row+1, col+1) lower_right = origin.Offset(row + height, col + width) xl_range = worksheet.Range(upper_left, lower_right) xl_range.FormulaArray = table.formula.get_formula(workbook, row, col) # set any formatting for (row, col), style in self._get_all_styles().items(): r = origin.Offset(1 + row, 1 + col) if style.bold: r.Font.Bold = True if style.excel_number_format is not None: r.NumberFormat = style.excel_number_format if style.size is not None: r.Font.Size = style.size if style.text_color is not None: r.Font.Color = _to_bgr(style.text_color) if style.bg_color is not None: r.Interior.Color = _to_bgr(style.bg_color) if style.text_wrap or style.border: raise Exception("text wrap and border not implemented") # add any charts for chart, (row, col) in self.__charts: top_left = origin.Offset(1 + row, 1 + col) xl_chart = worksheet.ChartObjects().Add(top_left.Left, top_left.Top, 360, 220).Chart xl_chart.ChartType = _to_excel_chart_type(chart.type, chart.subtype) if chart.title: xl_chart.ChartTitle = chart.title for series in chart.iter_series(self, row, col): xl_series = xl_chart.SeriesCollection().NewSeries() xl_series.Values = "=%s!%s" % (self.name, series["values"].lstrip("=")) if "categories" in series: xl_series.XValues = "=%s!%s" % (self.name, series["categories"].lstrip("=")) if "name" in series: xl_series.Name = series["name"] finally: xl.ScreenUpdating = screen_updating xl.Calculation = calculation if resize_columns: try: worksheet.Cells.EntireColumn.AutoFit() except: pass def to_xlsx(self, filename=None, workbook=None): """ Write worksheet to a .xlsx file using xlsxwriter. :param str filename: Filename to write to. If None no file is written. :param xltable.Workbook: Workbook this sheet belongs to. If None a new workbook will be created with this worksheet as the only sheet. :return: :py:class:`xlsxwriter.workbook.Workbook` instance. """ from .workbook import Workbook if not workbook: workbook = Workbook(filename=filename) workbook.append(self) return workbook.to_xlsx() ws = workbook.add_xlsx_worksheet(self, self.name) _styles = {} def _get_xlsx_style(cell_style): """ convert rb.excel style to xlsx writer style """ style_args = ( cell_style.bold, cell_style.excel_number_format, cell_style.text_color, cell_style.bg_color, cell_style.size, cell_style.text_wrap, cell_style.text_wrap, cell_style.border, cell_style.align, cell_style.valign ) if (style_args) not in _styles: style = workbook.add_format() if cell_style.bold: style.set_bold() if cell_style.excel_number_format is not None: style.set_num_format(cell_style.excel_number_format) if cell_style.text_color is not None: style.set_font_color("#%06x" % cell_style.text_color) if cell_style.bg_color is not None: style.set_bg_color("#%06x" % cell_style.bg_color) if cell_style.size is not None: style.set_font_size(cell_style.size) if cell_style.text_wrap: style.set_text_wrap() if cell_style.border: if isinstance(cell_style.border, frozenset): for border_position, border_style in cell_style.border: if border_position == "bottom": style.set_bottom(border_style) elif border_position == "top": style.set_top(border_style) elif border_position == "left": style.set_left(border_style) elif border_position == "right": style.set_right(border_style) else: raise AssertionError("Unknown border position '%s'." % border_position) else: style.set_border(cell_style.border) if cell_style.align: style.set_align(cell_style.align) if cell_style.valign: style.set_valign(cell_style.valign) _styles[style_args] = style return _styles[style_args] # pre-compute the cells with non-default styles ws_styles = self._get_all_styles() ws_styles = {(r, c): _get_xlsx_style(s) for ((r, c), s) in ws_styles.items()} plain_style = _get_xlsx_style(CellStyle()) # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False # write the rows to the worksheet for ir, row in enumerate(self.iterrows(workbook)): for ic, cell in enumerate(row): style = ws_styles.get((ir, ic), plain_style) if isinstance(cell, str): if cell.startswith("="): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_formula(ir, ic, cell, style, value=formula_value) elif cell.startswith("{="): # array formulas tables are written after everything else, # but individual cells can also be array formulas if not _is_in_array_formula_table(ir, ic): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_array_formula(ir, ic, ir, ic, cell, style, value=formula_value) else: ws.write(ir, ic, cell, style) else: if isinstance(cell, self._xlsx_unsupported_types): ws.write(ir, ic, str(cell), style) else: try: ws.write(ir, ic, cell, style) except TypeError: ws.write(ir, ic, str(cell), style) unsupported_types = set(self._xlsx_unsupported_types) unsupported_types.add(type(cell)) self.__class__._xlsx_unsupported_types = tuple(unsupported_types) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): style = ws_styles.get((row, col), plain_style) data = table.get_data(workbook, row, col) height, width = data.shape bottom, right = (row + height - 1, col + width -1) formula = table.formula.get_formula(workbook, row, col) ws.write_array_formula(row, col, bottom, right, formula, style, value=data[0][0]) for y in range(height): for x in range(width): if y == 0 and x == 0: continue ir, ic = row + y, col + x style = ws_styles.get((ir, ic), plain_style) cell = data[y][x] if isinstance(cell, str): cell_str = cell.encode("ascii", "xmlcharrefreplace").decode("ascii") ws.write_formula(ir, ic, cell_str, style) else: ws.write(ir, ic, cell, style) # set any non-default column widths for ic, width in self._get_column_widths().items(): ws.set_column(ic, ic, width) # add any charts for chart, (row, col) in self.__charts: kwargs = {"type": chart.type} if chart.subtype: kwargs["subtype"] = chart.subtype xl_chart = workbook.workbook_obj.add_chart(kwargs) if chart.show_blanks: xl_chart.show_blanks_as(chart.show_blanks) for series in chart.iter_series(workbook, row, col): # xlsxwriter expects the sheetname in the formula values = series.get("values") if isinstance(values, str) and values.startswith("=") and "!" not in values: series["values"] = "='%s'!%s" % (self.name, values.lstrip("=")) categories = series.get("categories") if isinstance(categories, str) and categories.startswith("=") and "!" not in categories: series["categories"] = "='%s'!%s" % (self.name, categories.lstrip("=")) xl_chart.add_series(series) xl_chart.set_size({"width": chart.width, "height": chart.height}) if chart.title: xl_chart.set_title({"name": chart.title}) if chart.legend_position: xl_chart.set_legend({"position": chart.legend_position}) if chart.x_axis: xl_chart.set_x_axis(chart.x_axis) if chart.y_axis: xl_chart.set_y_axis(chart.y_axis) ws.insert_chart(row, col, xl_chart) # add any groups for tables, collapsed in self.__groups: min_row, max_row = 1000000, -1 for table, (row, col) in self.__tables.values(): if table in tables: min_row = min(min_row, row) max_row = max(max_row, row + table.height) for i in range(min_row, max_row+1): ws.set_row(i, None, None, {'level': 1, 'hidden': collapsed}) if filename: workbook.close() return workbook
fkarb/xltable
xltable/worksheet.py
Worksheet.to_excel
python
def to_excel(self, workbook=None, worksheet=None, xl_app=None, clear=True, rename=True, resize_columns=True): from win32com.client import Dispatch, constants, gencache if xl_app is None: if worksheet is not None: xl_app = worksheet.Parent.Application elif workbook is not None and hasattr(workbook.workbook_obj, "Application"): xl_app = workbook.workbook_obj.Application else: xl_app = Dispatch("Excel.Application") xl = xl_app = gencache.EnsureDispatch(xl_app) # Create a workbook if there isn't one already if not workbook: from .workbook import Workbook workbook = Workbook(worksheets=[self]) if worksheet is None: # If there's no worksheet then call Workbook.to_excel which will create one return workbook.to_excel(xl_app=xl_app, resize_columns=resize_columns) if rename: self.__name = worksheet.Name # set manual calculation and turn off screen updating while we update the cells calculation = xl.Calculation screen_updating = xl.ScreenUpdating xl.Calculation = constants.xlCalculationManual xl.ScreenUpdating = False try: # clear the worksheet and reset the styles if clear: worksheet.Cells.ClearContents() worksheet.Cells.Font.Bold = False worksheet.Cells.Font.Size = 11 worksheet.Cells.Font.Color = 0x000000 worksheet.Cells.Interior.ColorIndex = 0 worksheet.Cells.NumberFormat = "General" # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False origin = worksheet.Range("A1") xl_cell = origin for r, row in enumerate(self.iterrows(workbook)): row = _to_pywintypes(row) # set the value and formulae to the excel range (it's much quicker to # write a row at a time and update the formula than it is it do it # cell by cell) if clear: xl_row = worksheet.Range(xl_cell, xl_cell.Offset(1, len(row))) xl_row.Value = row else: for c, value in enumerate(row): if value is not None: xl_cell.Offset(1, 1 + c).Value = value for c, value in enumerate(row): if isinstance(value, str): if value.startswith("="): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).Formula = value elif value.startswith("{=") \ and not _is_in_array_formula_table(r, c): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).FormulaArray = value # move to the next row xl_cell = xl_cell.Offset(2, 1) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): data = table.get_data(workbook, row, col) height, width = data.shape upper_left = origin.Offset(row+1, col+1) lower_right = origin.Offset(row + height, col + width) xl_range = worksheet.Range(upper_left, lower_right) xl_range.FormulaArray = table.formula.get_formula(workbook, row, col) # set any formatting for (row, col), style in self._get_all_styles().items(): r = origin.Offset(1 + row, 1 + col) if style.bold: r.Font.Bold = True if style.excel_number_format is not None: r.NumberFormat = style.excel_number_format if style.size is not None: r.Font.Size = style.size if style.text_color is not None: r.Font.Color = _to_bgr(style.text_color) if style.bg_color is not None: r.Interior.Color = _to_bgr(style.bg_color) if style.text_wrap or style.border: raise Exception("text wrap and border not implemented") # add any charts for chart, (row, col) in self.__charts: top_left = origin.Offset(1 + row, 1 + col) xl_chart = worksheet.ChartObjects().Add(top_left.Left, top_left.Top, 360, 220).Chart xl_chart.ChartType = _to_excel_chart_type(chart.type, chart.subtype) if chart.title: xl_chart.ChartTitle = chart.title for series in chart.iter_series(self, row, col): xl_series = xl_chart.SeriesCollection().NewSeries() xl_series.Values = "=%s!%s" % (self.name, series["values"].lstrip("=")) if "categories" in series: xl_series.XValues = "=%s!%s" % (self.name, series["categories"].lstrip("=")) if "name" in series: xl_series.Name = series["name"] finally: xl.ScreenUpdating = screen_updating xl.Calculation = calculation if resize_columns: try: worksheet.Cells.EntireColumn.AutoFit() except: pass
Writes worksheet to an Excel Worksheet COM object. Requires :py:module:`pywin32` to be installed. :param workbook: xltable.Workbook this sheet belongs to. :param worksheet: Excel COM Worksheet instance to write to. :param xl_app: Excel COM Excel Application to write to. :param bool clear: If a worksheet is provided, clear worksheet before writing. :param bool rename: If a worksheet is provided, rename self to match the worksheet. :param bool resize_columns: Resize sheet columns after writing.
train
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/worksheet.py#L280-L431
[ "def _to_pywintypes(row):\n \"\"\"convert values in a row to types accepted by excel\"\"\"\n def _pywintype(x):\n if isinstance(x, dt.date):\n return dt.datetime(x.year, x.month, x.day, tzinfo=dt.timezone.utc)\n\n elif isinstance(x, (dt.datetime, pa.Timestamp)):\n if x.tzin...
class Worksheet(object): """ A worksheet is a collection of tables placed at specific locations. Once all tables have been placed the worksheet can be written out or the rows can be iterated over, and any expressions present in the tables will be resolved to absolute cell references. :param str name: Worksheet name. """ _xlsx_unsupported_types = tuple() def __init__(self, name="Sheet1"): self.__name = name self.__tables = {} self.__values = {} self.__charts = [] self.__next_row = 0 self.__groups = [] @property def name(self): """Worksheet name""" return self.__name def add_table(self, table, row=None, col=0, row_spaces=1): """ Adds a table to the worksheet at (row, col). Return the (row, col) where the table has been put. :param xltable.Table table: Table to add to the worksheet. :param int row: Row to start the table at (defaults to the next free row). :param int col: Column to start the table at. :param int row_spaces: Number of rows to leave between this table and the next. """ name = table.name assert name is not None, "Tables must have a name" assert name not in self.__tables, "Table %s already exists in this worksheet" % name if row is None: row = self.__next_row self.__next_row = max(row + table.height + row_spaces, self.__next_row) self.__tables[name] = (table, (row, col)) return row, col def add_value(self, value, row, col): """ Adds a single value (cell) to a worksheet at (row, col). Return the (row, col) where the value has been put. :param value: Value to write to the sheet. :param row: Row where the value should be written. :param col: Column where the value should be written. """ self.__values[(row, col)] = value def add_chart(self, chart, row, col): """ Adds a chart to the worksheet at (row, col). :param xltable.Chart Chart: chart to add to the workbook. :param int row: Row to add the chart at. """ self.__charts.append((chart, (row, col))) def add_row_group(self, tables, collapsed=True): """ Adds a group over all the given tables (will include any rows between the first row over all tables, and the last row over all tables) Initially collapsed if collapsed is True (True by default) """ self.__groups.append((tables, collapsed)) @property def next_row(self): """Row the next table will start at unless another row is specified.""" return self.__next_row @next_row.setter def next_row(self, value): self.__next_row = value def get_table_pos(self, tablename): """ :param str tablename: Name of table to get position of. :return: Upper left (row, col) coordinate of the named table. """ _table, (row, col) = self.__tables[tablename] return (row, col) def get_table(self, tablename): """ :param str tablename: Name of table to find. :return: A :py:class:`xltable.Table` instance from the table name. """ table, (_row, _col) = self.__tables[tablename] return table def iterrows(self, workbook=None): """ Yield rows as lists of data. The data is exactly as it is in the source pandas DataFrames and any formulas are not resolved. """ resolved_tables = [] max_height = 0 max_width = 0 # while yielding rows __formula_values is updated with any formula values set on Expressions self.__formula_values = {} for name, (table, (row, col)) in list(self.__tables.items()): # get the resolved 2d data array from the table # # expressions with no explicit table will use None when calling # get_table/get_table_pos, which should return the current table. # self.__tables[None] = (table, (row, col)) data = table.get_data(workbook, row, col, self.__formula_values) del self.__tables[None] height, width = data.shape upper_left = (row, col) lower_right = (row + height - 1, col + width - 1) max_height = max(max_height, lower_right[0] + 1) max_width = max(max_width, lower_right[1] + 1) resolved_tables.append((name, data, upper_left, lower_right)) for row, col in self.__values.keys(): max_width = max(max_width, row+1) max_height = max(max_height, col+1) # Build the whole table up-front. Doing it row by row is too slow. table = [[None] * max_width for i in range(max_height)] for name, data, upper_left, lower_right in resolved_tables: for i, r in enumerate(range(upper_left[0], lower_right[0]+1)): for j, c in enumerate(range(upper_left[1], lower_right[1]+1)): table[r][c] = data[i][j] for (r, c), value in self.__values.items(): if isinstance(value, Value): value = value.value if isinstance(value, Expression): if value.has_value: self.__formula_values[(r, c)] = value.value value = value.get_formula(workbook, r, c) table[r][c] = value for row in table: yield row def to_csv(self, writer): """ Writes worksheet to a csv.writer object. :param writer: csv writer instance. """ for row in self.iterrows(): writer.writerow(row) def _get_column_widths(self): """return a dictionary of {col -> width}""" col_widths = {} for table, (row, col) in self.__tables.values(): for colname, width in table.column_widths.items(): ic = col + table.get_column_offset(colname) current_width = col_widths.setdefault(ic, width) col_widths[ic] = max(width, current_width) return col_widths def _get_all_styles(self): """ return a dictionary of {(row, col) -> CellStyle} for all cells that use a non-default style. """ _styles = {} def _get_style(bold=False, bg_col=None, border=None): if (bold, bg_col, border) not in _styles: _styles[(bold, bg_col, border)] = CellStyle(bold=bold, bg_color=bg_col, border=border) return _styles[(bold, bg_col, border)] ws_styles = {} for table, (row, col) in self.__tables.values(): for r in range(row, row + table.header_height): for c in range(col, col + table.width): if isinstance(table.header_style, dict): col_name = table.dataframe.columns[c - col] style = table.header_style.get(col_name, _get_style(bold=True)) else: style = table.header_style or _get_style(bold=True) ws_styles[(r, c)] = style for c in range(col, col + table.row_labels_width): for r in range(row + table.header_height, row + table.height): if isinstance(table.index_style, dict): row_name = table.dataframe.index[r - row] style = table.index_style.get(row_name, _get_style(bold=True)) else: style = table.index_style or _get_style(bold=True) ws_styles[(r, c)] = style if table.style.stripe_colors or table.style.border: num_bg_cols = len(table.style.stripe_colors) if \ table.style.stripe_colors else 1 bg_cols = table.style.stripe_colors if \ table.style.stripe_colors else None for i, row_offset in enumerate(range(table.header_height, table.height)): for c in range(col, col + table.width): bg_col = bg_cols[i % num_bg_cols] if bg_cols else None style = _get_style(bold=None, bg_col=bg_col, border=table.style.border) if (row + row_offset, c) in ws_styles: style = style + ws_styles[(row + row_offset, c)] ws_styles[(row + row_offset, c)] = style for col_name, col_style in table.column_styles.items(): try: col_offset = table.get_column_offset(col_name) except KeyError: continue for i, r in enumerate(range(row + table.header_height, row + table.height)): style = col_style if (r, col + col_offset) in ws_styles: style = ws_styles[(r, col + col_offset)] + style ws_styles[(r, col + col_offset)] = style for row_name, row_style in table.row_styles.items(): try: row_offset = table.get_row_offset(row_name) except KeyError: continue for i, c in enumerate(range(col + table.row_labels_width, col + table.width)): style = row_style if (row + row_offset, c) in ws_styles: style = ws_styles[(row + row_offset, c)] + style ws_styles[(row + row_offset, c)] = style for (row_name, col_name), cell_style in table.cell_styles.items(): try: col_offset = table.get_column_offset(col_name) row_offset = table.get_row_offset(row_name) except KeyError: continue style = cell_style if (row + row_offset, col + col_offset) in ws_styles: style = ws_styles[(row + row_offset, col + col_offset)] + style ws_styles[(row + row_offset, col + col_offset)] = style for (row, col), value in self.__values.items(): if isinstance(value, Value): style = value.style if style: if (row, col) in ws_styles: style = style + ws_styles[(row, col)] ws_styles[(row, col)] = style return ws_styles def to_xlsx(self, filename=None, workbook=None): """ Write worksheet to a .xlsx file using xlsxwriter. :param str filename: Filename to write to. If None no file is written. :param xltable.Workbook: Workbook this sheet belongs to. If None a new workbook will be created with this worksheet as the only sheet. :return: :py:class:`xlsxwriter.workbook.Workbook` instance. """ from .workbook import Workbook if not workbook: workbook = Workbook(filename=filename) workbook.append(self) return workbook.to_xlsx() ws = workbook.add_xlsx_worksheet(self, self.name) _styles = {} def _get_xlsx_style(cell_style): """ convert rb.excel style to xlsx writer style """ style_args = ( cell_style.bold, cell_style.excel_number_format, cell_style.text_color, cell_style.bg_color, cell_style.size, cell_style.text_wrap, cell_style.text_wrap, cell_style.border, cell_style.align, cell_style.valign ) if (style_args) not in _styles: style = workbook.add_format() if cell_style.bold: style.set_bold() if cell_style.excel_number_format is not None: style.set_num_format(cell_style.excel_number_format) if cell_style.text_color is not None: style.set_font_color("#%06x" % cell_style.text_color) if cell_style.bg_color is not None: style.set_bg_color("#%06x" % cell_style.bg_color) if cell_style.size is not None: style.set_font_size(cell_style.size) if cell_style.text_wrap: style.set_text_wrap() if cell_style.border: if isinstance(cell_style.border, frozenset): for border_position, border_style in cell_style.border: if border_position == "bottom": style.set_bottom(border_style) elif border_position == "top": style.set_top(border_style) elif border_position == "left": style.set_left(border_style) elif border_position == "right": style.set_right(border_style) else: raise AssertionError("Unknown border position '%s'." % border_position) else: style.set_border(cell_style.border) if cell_style.align: style.set_align(cell_style.align) if cell_style.valign: style.set_valign(cell_style.valign) _styles[style_args] = style return _styles[style_args] # pre-compute the cells with non-default styles ws_styles = self._get_all_styles() ws_styles = {(r, c): _get_xlsx_style(s) for ((r, c), s) in ws_styles.items()} plain_style = _get_xlsx_style(CellStyle()) # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False # write the rows to the worksheet for ir, row in enumerate(self.iterrows(workbook)): for ic, cell in enumerate(row): style = ws_styles.get((ir, ic), plain_style) if isinstance(cell, str): if cell.startswith("="): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_formula(ir, ic, cell, style, value=formula_value) elif cell.startswith("{="): # array formulas tables are written after everything else, # but individual cells can also be array formulas if not _is_in_array_formula_table(ir, ic): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_array_formula(ir, ic, ir, ic, cell, style, value=formula_value) else: ws.write(ir, ic, cell, style) else: if isinstance(cell, self._xlsx_unsupported_types): ws.write(ir, ic, str(cell), style) else: try: ws.write(ir, ic, cell, style) except TypeError: ws.write(ir, ic, str(cell), style) unsupported_types = set(self._xlsx_unsupported_types) unsupported_types.add(type(cell)) self.__class__._xlsx_unsupported_types = tuple(unsupported_types) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): style = ws_styles.get((row, col), plain_style) data = table.get_data(workbook, row, col) height, width = data.shape bottom, right = (row + height - 1, col + width -1) formula = table.formula.get_formula(workbook, row, col) ws.write_array_formula(row, col, bottom, right, formula, style, value=data[0][0]) for y in range(height): for x in range(width): if y == 0 and x == 0: continue ir, ic = row + y, col + x style = ws_styles.get((ir, ic), plain_style) cell = data[y][x] if isinstance(cell, str): cell_str = cell.encode("ascii", "xmlcharrefreplace").decode("ascii") ws.write_formula(ir, ic, cell_str, style) else: ws.write(ir, ic, cell, style) # set any non-default column widths for ic, width in self._get_column_widths().items(): ws.set_column(ic, ic, width) # add any charts for chart, (row, col) in self.__charts: kwargs = {"type": chart.type} if chart.subtype: kwargs["subtype"] = chart.subtype xl_chart = workbook.workbook_obj.add_chart(kwargs) if chart.show_blanks: xl_chart.show_blanks_as(chart.show_blanks) for series in chart.iter_series(workbook, row, col): # xlsxwriter expects the sheetname in the formula values = series.get("values") if isinstance(values, str) and values.startswith("=") and "!" not in values: series["values"] = "='%s'!%s" % (self.name, values.lstrip("=")) categories = series.get("categories") if isinstance(categories, str) and categories.startswith("=") and "!" not in categories: series["categories"] = "='%s'!%s" % (self.name, categories.lstrip("=")) xl_chart.add_series(series) xl_chart.set_size({"width": chart.width, "height": chart.height}) if chart.title: xl_chart.set_title({"name": chart.title}) if chart.legend_position: xl_chart.set_legend({"position": chart.legend_position}) if chart.x_axis: xl_chart.set_x_axis(chart.x_axis) if chart.y_axis: xl_chart.set_y_axis(chart.y_axis) ws.insert_chart(row, col, xl_chart) # add any groups for tables, collapsed in self.__groups: min_row, max_row = 1000000, -1 for table, (row, col) in self.__tables.values(): if table in tables: min_row = min(min_row, row) max_row = max(max_row, row + table.height) for i in range(min_row, max_row+1): ws.set_row(i, None, None, {'level': 1, 'hidden': collapsed}) if filename: workbook.close() return workbook
fkarb/xltable
xltable/worksheet.py
Worksheet.to_xlsx
python
def to_xlsx(self, filename=None, workbook=None): from .workbook import Workbook if not workbook: workbook = Workbook(filename=filename) workbook.append(self) return workbook.to_xlsx() ws = workbook.add_xlsx_worksheet(self, self.name) _styles = {} def _get_xlsx_style(cell_style): """ convert rb.excel style to xlsx writer style """ style_args = ( cell_style.bold, cell_style.excel_number_format, cell_style.text_color, cell_style.bg_color, cell_style.size, cell_style.text_wrap, cell_style.text_wrap, cell_style.border, cell_style.align, cell_style.valign ) if (style_args) not in _styles: style = workbook.add_format() if cell_style.bold: style.set_bold() if cell_style.excel_number_format is not None: style.set_num_format(cell_style.excel_number_format) if cell_style.text_color is not None: style.set_font_color("#%06x" % cell_style.text_color) if cell_style.bg_color is not None: style.set_bg_color("#%06x" % cell_style.bg_color) if cell_style.size is not None: style.set_font_size(cell_style.size) if cell_style.text_wrap: style.set_text_wrap() if cell_style.border: if isinstance(cell_style.border, frozenset): for border_position, border_style in cell_style.border: if border_position == "bottom": style.set_bottom(border_style) elif border_position == "top": style.set_top(border_style) elif border_position == "left": style.set_left(border_style) elif border_position == "right": style.set_right(border_style) else: raise AssertionError("Unknown border position '%s'." % border_position) else: style.set_border(cell_style.border) if cell_style.align: style.set_align(cell_style.align) if cell_style.valign: style.set_valign(cell_style.valign) _styles[style_args] = style return _styles[style_args] # pre-compute the cells with non-default styles ws_styles = self._get_all_styles() ws_styles = {(r, c): _get_xlsx_style(s) for ((r, c), s) in ws_styles.items()} plain_style = _get_xlsx_style(CellStyle()) # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False # write the rows to the worksheet for ir, row in enumerate(self.iterrows(workbook)): for ic, cell in enumerate(row): style = ws_styles.get((ir, ic), plain_style) if isinstance(cell, str): if cell.startswith("="): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_formula(ir, ic, cell, style, value=formula_value) elif cell.startswith("{="): # array formulas tables are written after everything else, # but individual cells can also be array formulas if not _is_in_array_formula_table(ir, ic): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_array_formula(ir, ic, ir, ic, cell, style, value=formula_value) else: ws.write(ir, ic, cell, style) else: if isinstance(cell, self._xlsx_unsupported_types): ws.write(ir, ic, str(cell), style) else: try: ws.write(ir, ic, cell, style) except TypeError: ws.write(ir, ic, str(cell), style) unsupported_types = set(self._xlsx_unsupported_types) unsupported_types.add(type(cell)) self.__class__._xlsx_unsupported_types = tuple(unsupported_types) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): style = ws_styles.get((row, col), plain_style) data = table.get_data(workbook, row, col) height, width = data.shape bottom, right = (row + height - 1, col + width -1) formula = table.formula.get_formula(workbook, row, col) ws.write_array_formula(row, col, bottom, right, formula, style, value=data[0][0]) for y in range(height): for x in range(width): if y == 0 and x == 0: continue ir, ic = row + y, col + x style = ws_styles.get((ir, ic), plain_style) cell = data[y][x] if isinstance(cell, str): cell_str = cell.encode("ascii", "xmlcharrefreplace").decode("ascii") ws.write_formula(ir, ic, cell_str, style) else: ws.write(ir, ic, cell, style) # set any non-default column widths for ic, width in self._get_column_widths().items(): ws.set_column(ic, ic, width) # add any charts for chart, (row, col) in self.__charts: kwargs = {"type": chart.type} if chart.subtype: kwargs["subtype"] = chart.subtype xl_chart = workbook.workbook_obj.add_chart(kwargs) if chart.show_blanks: xl_chart.show_blanks_as(chart.show_blanks) for series in chart.iter_series(workbook, row, col): # xlsxwriter expects the sheetname in the formula values = series.get("values") if isinstance(values, str) and values.startswith("=") and "!" not in values: series["values"] = "='%s'!%s" % (self.name, values.lstrip("=")) categories = series.get("categories") if isinstance(categories, str) and categories.startswith("=") and "!" not in categories: series["categories"] = "='%s'!%s" % (self.name, categories.lstrip("=")) xl_chart.add_series(series) xl_chart.set_size({"width": chart.width, "height": chart.height}) if chart.title: xl_chart.set_title({"name": chart.title}) if chart.legend_position: xl_chart.set_legend({"position": chart.legend_position}) if chart.x_axis: xl_chart.set_x_axis(chart.x_axis) if chart.y_axis: xl_chart.set_y_axis(chart.y_axis) ws.insert_chart(row, col, xl_chart) # add any groups for tables, collapsed in self.__groups: min_row, max_row = 1000000, -1 for table, (row, col) in self.__tables.values(): if table in tables: min_row = min(min_row, row) max_row = max(max_row, row + table.height) for i in range(min_row, max_row+1): ws.set_row(i, None, None, {'level': 1, 'hidden': collapsed}) if filename: workbook.close() return workbook
Write worksheet to a .xlsx file using xlsxwriter. :param str filename: Filename to write to. If None no file is written. :param xltable.Workbook: Workbook this sheet belongs to. If None a new workbook will be created with this worksheet as the only sheet. :return: :py:class:`xlsxwriter.workbook.Workbook` instance.
train
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/worksheet.py#L433-L630
[ "def add_sheet(self, worksheet):\n \"\"\"\n Adds a worksheet to the workbook.\n \"\"\"\n self.worksheets.append(worksheet)\n", "def to_xlsx(self, **kwargs):\n \"\"\"\n Write workbook to a .xlsx file using xlsxwriter.\n Return a xlsxwriter.workbook.Workbook.\n\n :param kwargs: Extra argumen...
class Worksheet(object): """ A worksheet is a collection of tables placed at specific locations. Once all tables have been placed the worksheet can be written out or the rows can be iterated over, and any expressions present in the tables will be resolved to absolute cell references. :param str name: Worksheet name. """ _xlsx_unsupported_types = tuple() def __init__(self, name="Sheet1"): self.__name = name self.__tables = {} self.__values = {} self.__charts = [] self.__next_row = 0 self.__groups = [] @property def name(self): """Worksheet name""" return self.__name def add_table(self, table, row=None, col=0, row_spaces=1): """ Adds a table to the worksheet at (row, col). Return the (row, col) where the table has been put. :param xltable.Table table: Table to add to the worksheet. :param int row: Row to start the table at (defaults to the next free row). :param int col: Column to start the table at. :param int row_spaces: Number of rows to leave between this table and the next. """ name = table.name assert name is not None, "Tables must have a name" assert name not in self.__tables, "Table %s already exists in this worksheet" % name if row is None: row = self.__next_row self.__next_row = max(row + table.height + row_spaces, self.__next_row) self.__tables[name] = (table, (row, col)) return row, col def add_value(self, value, row, col): """ Adds a single value (cell) to a worksheet at (row, col). Return the (row, col) where the value has been put. :param value: Value to write to the sheet. :param row: Row where the value should be written. :param col: Column where the value should be written. """ self.__values[(row, col)] = value def add_chart(self, chart, row, col): """ Adds a chart to the worksheet at (row, col). :param xltable.Chart Chart: chart to add to the workbook. :param int row: Row to add the chart at. """ self.__charts.append((chart, (row, col))) def add_row_group(self, tables, collapsed=True): """ Adds a group over all the given tables (will include any rows between the first row over all tables, and the last row over all tables) Initially collapsed if collapsed is True (True by default) """ self.__groups.append((tables, collapsed)) @property def next_row(self): """Row the next table will start at unless another row is specified.""" return self.__next_row @next_row.setter def next_row(self, value): self.__next_row = value def get_table_pos(self, tablename): """ :param str tablename: Name of table to get position of. :return: Upper left (row, col) coordinate of the named table. """ _table, (row, col) = self.__tables[tablename] return (row, col) def get_table(self, tablename): """ :param str tablename: Name of table to find. :return: A :py:class:`xltable.Table` instance from the table name. """ table, (_row, _col) = self.__tables[tablename] return table def iterrows(self, workbook=None): """ Yield rows as lists of data. The data is exactly as it is in the source pandas DataFrames and any formulas are not resolved. """ resolved_tables = [] max_height = 0 max_width = 0 # while yielding rows __formula_values is updated with any formula values set on Expressions self.__formula_values = {} for name, (table, (row, col)) in list(self.__tables.items()): # get the resolved 2d data array from the table # # expressions with no explicit table will use None when calling # get_table/get_table_pos, which should return the current table. # self.__tables[None] = (table, (row, col)) data = table.get_data(workbook, row, col, self.__formula_values) del self.__tables[None] height, width = data.shape upper_left = (row, col) lower_right = (row + height - 1, col + width - 1) max_height = max(max_height, lower_right[0] + 1) max_width = max(max_width, lower_right[1] + 1) resolved_tables.append((name, data, upper_left, lower_right)) for row, col in self.__values.keys(): max_width = max(max_width, row+1) max_height = max(max_height, col+1) # Build the whole table up-front. Doing it row by row is too slow. table = [[None] * max_width for i in range(max_height)] for name, data, upper_left, lower_right in resolved_tables: for i, r in enumerate(range(upper_left[0], lower_right[0]+1)): for j, c in enumerate(range(upper_left[1], lower_right[1]+1)): table[r][c] = data[i][j] for (r, c), value in self.__values.items(): if isinstance(value, Value): value = value.value if isinstance(value, Expression): if value.has_value: self.__formula_values[(r, c)] = value.value value = value.get_formula(workbook, r, c) table[r][c] = value for row in table: yield row def to_csv(self, writer): """ Writes worksheet to a csv.writer object. :param writer: csv writer instance. """ for row in self.iterrows(): writer.writerow(row) def _get_column_widths(self): """return a dictionary of {col -> width}""" col_widths = {} for table, (row, col) in self.__tables.values(): for colname, width in table.column_widths.items(): ic = col + table.get_column_offset(colname) current_width = col_widths.setdefault(ic, width) col_widths[ic] = max(width, current_width) return col_widths def _get_all_styles(self): """ return a dictionary of {(row, col) -> CellStyle} for all cells that use a non-default style. """ _styles = {} def _get_style(bold=False, bg_col=None, border=None): if (bold, bg_col, border) not in _styles: _styles[(bold, bg_col, border)] = CellStyle(bold=bold, bg_color=bg_col, border=border) return _styles[(bold, bg_col, border)] ws_styles = {} for table, (row, col) in self.__tables.values(): for r in range(row, row + table.header_height): for c in range(col, col + table.width): if isinstance(table.header_style, dict): col_name = table.dataframe.columns[c - col] style = table.header_style.get(col_name, _get_style(bold=True)) else: style = table.header_style or _get_style(bold=True) ws_styles[(r, c)] = style for c in range(col, col + table.row_labels_width): for r in range(row + table.header_height, row + table.height): if isinstance(table.index_style, dict): row_name = table.dataframe.index[r - row] style = table.index_style.get(row_name, _get_style(bold=True)) else: style = table.index_style or _get_style(bold=True) ws_styles[(r, c)] = style if table.style.stripe_colors or table.style.border: num_bg_cols = len(table.style.stripe_colors) if \ table.style.stripe_colors else 1 bg_cols = table.style.stripe_colors if \ table.style.stripe_colors else None for i, row_offset in enumerate(range(table.header_height, table.height)): for c in range(col, col + table.width): bg_col = bg_cols[i % num_bg_cols] if bg_cols else None style = _get_style(bold=None, bg_col=bg_col, border=table.style.border) if (row + row_offset, c) in ws_styles: style = style + ws_styles[(row + row_offset, c)] ws_styles[(row + row_offset, c)] = style for col_name, col_style in table.column_styles.items(): try: col_offset = table.get_column_offset(col_name) except KeyError: continue for i, r in enumerate(range(row + table.header_height, row + table.height)): style = col_style if (r, col + col_offset) in ws_styles: style = ws_styles[(r, col + col_offset)] + style ws_styles[(r, col + col_offset)] = style for row_name, row_style in table.row_styles.items(): try: row_offset = table.get_row_offset(row_name) except KeyError: continue for i, c in enumerate(range(col + table.row_labels_width, col + table.width)): style = row_style if (row + row_offset, c) in ws_styles: style = ws_styles[(row + row_offset, c)] + style ws_styles[(row + row_offset, c)] = style for (row_name, col_name), cell_style in table.cell_styles.items(): try: col_offset = table.get_column_offset(col_name) row_offset = table.get_row_offset(row_name) except KeyError: continue style = cell_style if (row + row_offset, col + col_offset) in ws_styles: style = ws_styles[(row + row_offset, col + col_offset)] + style ws_styles[(row + row_offset, col + col_offset)] = style for (row, col), value in self.__values.items(): if isinstance(value, Value): style = value.style if style: if (row, col) in ws_styles: style = style + ws_styles[(row, col)] ws_styles[(row, col)] = style return ws_styles def to_excel(self, workbook=None, worksheet=None, xl_app=None, clear=True, rename=True, resize_columns=True): """ Writes worksheet to an Excel Worksheet COM object. Requires :py:module:`pywin32` to be installed. :param workbook: xltable.Workbook this sheet belongs to. :param worksheet: Excel COM Worksheet instance to write to. :param xl_app: Excel COM Excel Application to write to. :param bool clear: If a worksheet is provided, clear worksheet before writing. :param bool rename: If a worksheet is provided, rename self to match the worksheet. :param bool resize_columns: Resize sheet columns after writing. """ from win32com.client import Dispatch, constants, gencache if xl_app is None: if worksheet is not None: xl_app = worksheet.Parent.Application elif workbook is not None and hasattr(workbook.workbook_obj, "Application"): xl_app = workbook.workbook_obj.Application else: xl_app = Dispatch("Excel.Application") xl = xl_app = gencache.EnsureDispatch(xl_app) # Create a workbook if there isn't one already if not workbook: from .workbook import Workbook workbook = Workbook(worksheets=[self]) if worksheet is None: # If there's no worksheet then call Workbook.to_excel which will create one return workbook.to_excel(xl_app=xl_app, resize_columns=resize_columns) if rename: self.__name = worksheet.Name # set manual calculation and turn off screen updating while we update the cells calculation = xl.Calculation screen_updating = xl.ScreenUpdating xl.Calculation = constants.xlCalculationManual xl.ScreenUpdating = False try: # clear the worksheet and reset the styles if clear: worksheet.Cells.ClearContents() worksheet.Cells.Font.Bold = False worksheet.Cells.Font.Size = 11 worksheet.Cells.Font.Color = 0x000000 worksheet.Cells.Interior.ColorIndex = 0 worksheet.Cells.NumberFormat = "General" # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False origin = worksheet.Range("A1") xl_cell = origin for r, row in enumerate(self.iterrows(workbook)): row = _to_pywintypes(row) # set the value and formulae to the excel range (it's much quicker to # write a row at a time and update the formula than it is it do it # cell by cell) if clear: xl_row = worksheet.Range(xl_cell, xl_cell.Offset(1, len(row))) xl_row.Value = row else: for c, value in enumerate(row): if value is not None: xl_cell.Offset(1, 1 + c).Value = value for c, value in enumerate(row): if isinstance(value, str): if value.startswith("="): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).Formula = value elif value.startswith("{=") \ and not _is_in_array_formula_table(r, c): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).FormulaArray = value # move to the next row xl_cell = xl_cell.Offset(2, 1) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): data = table.get_data(workbook, row, col) height, width = data.shape upper_left = origin.Offset(row+1, col+1) lower_right = origin.Offset(row + height, col + width) xl_range = worksheet.Range(upper_left, lower_right) xl_range.FormulaArray = table.formula.get_formula(workbook, row, col) # set any formatting for (row, col), style in self._get_all_styles().items(): r = origin.Offset(1 + row, 1 + col) if style.bold: r.Font.Bold = True if style.excel_number_format is not None: r.NumberFormat = style.excel_number_format if style.size is not None: r.Font.Size = style.size if style.text_color is not None: r.Font.Color = _to_bgr(style.text_color) if style.bg_color is not None: r.Interior.Color = _to_bgr(style.bg_color) if style.text_wrap or style.border: raise Exception("text wrap and border not implemented") # add any charts for chart, (row, col) in self.__charts: top_left = origin.Offset(1 + row, 1 + col) xl_chart = worksheet.ChartObjects().Add(top_left.Left, top_left.Top, 360, 220).Chart xl_chart.ChartType = _to_excel_chart_type(chart.type, chart.subtype) if chart.title: xl_chart.ChartTitle = chart.title for series in chart.iter_series(self, row, col): xl_series = xl_chart.SeriesCollection().NewSeries() xl_series.Values = "=%s!%s" % (self.name, series["values"].lstrip("=")) if "categories" in series: xl_series.XValues = "=%s!%s" % (self.name, series["categories"].lstrip("=")) if "name" in series: xl_series.Name = series["name"] finally: xl.ScreenUpdating = screen_updating xl.Calculation = calculation if resize_columns: try: worksheet.Cells.EntireColumn.AutoFit() except: pass def to_xlsx(self, filename=None, workbook=None): """ Write worksheet to a .xlsx file using xlsxwriter. :param str filename: Filename to write to. If None no file is written. :param xltable.Workbook: Workbook this sheet belongs to. If None a new workbook will be created with this worksheet as the only sheet. :return: :py:class:`xlsxwriter.workbook.Workbook` instance. """ from .workbook import Workbook if not workbook: workbook = Workbook(filename=filename) workbook.append(self) return workbook.to_xlsx() ws = workbook.add_xlsx_worksheet(self, self.name) _styles = {} def _get_xlsx_style(cell_style): """ convert rb.excel style to xlsx writer style """ style_args = ( cell_style.bold, cell_style.excel_number_format, cell_style.text_color, cell_style.bg_color, cell_style.size, cell_style.text_wrap, cell_style.text_wrap, cell_style.border, cell_style.align, cell_style.valign ) if (style_args) not in _styles: style = workbook.add_format() if cell_style.bold: style.set_bold() if cell_style.excel_number_format is not None: style.set_num_format(cell_style.excel_number_format) if cell_style.text_color is not None: style.set_font_color("#%06x" % cell_style.text_color) if cell_style.bg_color is not None: style.set_bg_color("#%06x" % cell_style.bg_color) if cell_style.size is not None: style.set_font_size(cell_style.size) if cell_style.text_wrap: style.set_text_wrap() if cell_style.border: if isinstance(cell_style.border, frozenset): for border_position, border_style in cell_style.border: if border_position == "bottom": style.set_bottom(border_style) elif border_position == "top": style.set_top(border_style) elif border_position == "left": style.set_left(border_style) elif border_position == "right": style.set_right(border_style) else: raise AssertionError("Unknown border position '%s'." % border_position) else: style.set_border(cell_style.border) if cell_style.align: style.set_align(cell_style.align) if cell_style.valign: style.set_valign(cell_style.valign) _styles[style_args] = style return _styles[style_args] # pre-compute the cells with non-default styles ws_styles = self._get_all_styles() ws_styles = {(r, c): _get_xlsx_style(s) for ((r, c), s) in ws_styles.items()} plain_style = _get_xlsx_style(CellStyle()) # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False # write the rows to the worksheet for ir, row in enumerate(self.iterrows(workbook)): for ic, cell in enumerate(row): style = ws_styles.get((ir, ic), plain_style) if isinstance(cell, str): if cell.startswith("="): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_formula(ir, ic, cell, style, value=formula_value) elif cell.startswith("{="): # array formulas tables are written after everything else, # but individual cells can also be array formulas if not _is_in_array_formula_table(ir, ic): formula_value = self.__formula_values.get((ir, ic), 0) ws.write_array_formula(ir, ic, ir, ic, cell, style, value=formula_value) else: ws.write(ir, ic, cell, style) else: if isinstance(cell, self._xlsx_unsupported_types): ws.write(ir, ic, str(cell), style) else: try: ws.write(ir, ic, cell, style) except TypeError: ws.write(ir, ic, str(cell), style) unsupported_types = set(self._xlsx_unsupported_types) unsupported_types.add(type(cell)) self.__class__._xlsx_unsupported_types = tuple(unsupported_types) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): style = ws_styles.get((row, col), plain_style) data = table.get_data(workbook, row, col) height, width = data.shape bottom, right = (row + height - 1, col + width -1) formula = table.formula.get_formula(workbook, row, col) ws.write_array_formula(row, col, bottom, right, formula, style, value=data[0][0]) for y in range(height): for x in range(width): if y == 0 and x == 0: continue ir, ic = row + y, col + x style = ws_styles.get((ir, ic), plain_style) cell = data[y][x] if isinstance(cell, str): cell_str = cell.encode("ascii", "xmlcharrefreplace").decode("ascii") ws.write_formula(ir, ic, cell_str, style) else: ws.write(ir, ic, cell, style) # set any non-default column widths for ic, width in self._get_column_widths().items(): ws.set_column(ic, ic, width) # add any charts for chart, (row, col) in self.__charts: kwargs = {"type": chart.type} if chart.subtype: kwargs["subtype"] = chart.subtype xl_chart = workbook.workbook_obj.add_chart(kwargs) if chart.show_blanks: xl_chart.show_blanks_as(chart.show_blanks) for series in chart.iter_series(workbook, row, col): # xlsxwriter expects the sheetname in the formula values = series.get("values") if isinstance(values, str) and values.startswith("=") and "!" not in values: series["values"] = "='%s'!%s" % (self.name, values.lstrip("=")) categories = series.get("categories") if isinstance(categories, str) and categories.startswith("=") and "!" not in categories: series["categories"] = "='%s'!%s" % (self.name, categories.lstrip("=")) xl_chart.add_series(series) xl_chart.set_size({"width": chart.width, "height": chart.height}) if chart.title: xl_chart.set_title({"name": chart.title}) if chart.legend_position: xl_chart.set_legend({"position": chart.legend_position}) if chart.x_axis: xl_chart.set_x_axis(chart.x_axis) if chart.y_axis: xl_chart.set_y_axis(chart.y_axis) ws.insert_chart(row, col, xl_chart) # add any groups for tables, collapsed in self.__groups: min_row, max_row = 1000000, -1 for table, (row, col) in self.__tables.values(): if table in tables: min_row = min(min_row, row) max_row = max(max_row, row + table.height) for i in range(min_row, max_row+1): ws.set_row(i, None, None, {'level': 1, 'hidden': collapsed}) if filename: workbook.close() return workbook
fkarb/xltable
xltable/chart.py
Chart.add_series
python
def add_series(self, values, **kwargs): series = {"values": values} series.update(kwargs) self.__series.append(series)
Adds a series to the chart. :param values: A :py:class:`xltable.Expression` object that evaluates to the data series. :param categories: A :py:class:`xltable.Expression` object that evaluates to the data series. :param name: Name to show in the legend for the series :param line: Line style, eg {'color': 'blue', 'width': 3.25} or {'none': True} :param marker: dict specifying how the markers should look, eg {type: square}. :param trendline: dict specifying how the trendline should be drawn, eg {type: linear}.
train
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/chart.py#L70-L83
null
class Chart(object): """ Chart objects reference data from Table instances and are written to Excel worksheets as Excel charts. :param str type: Chart type (see below). :param str subtype: Chart sub type (see below). :param str title: Chart title :param str legend_position: right (default), left, top, bottom or 'none' for no legend. :param int width: Chart width. :param int height: Chart height. Chart types and sub-types: - area: - stacked - percent_stacked - bar: - stacked - perecent_stacked - column: - stacked - perecent_stacked - line - scatter: - straight_with_markers - straight - smooth_with_markers - smooth - stock - radar: - with_markers - filled """ def __init__(self, type, subtype=None, title=None, legend_position=None, x_axis=None, y_axis=None, show_blanks=None, # set to 'gap', 'zero' or 'span' width=480, height=288): self.type = type self.subtype = subtype self.title = title self.legend_position = legend_position self.x_axis = dict(x_axis) if x_axis else x_axis self.y_axis = dict(y_axis) if y_axis else y_axis self.show_blanks = show_blanks self.width = width self.height = height self.__series = [] # convert dates in the axis args to serial dates for axis in (self.x_axis, self.y_axis): if axis: for key, value in list(axis.items()): if isinstance(value, dt.date): axis[key] = (value - dt.date(1900, 1, 1)).days + 2 def add_series(self, values, **kwargs): """ Adds a series to the chart. :param values: A :py:class:`xltable.Expression` object that evaluates to the data series. :param categories: A :py:class:`xltable.Expression` object that evaluates to the data series. :param name: Name to show in the legend for the series :param line: Line style, eg {'color': 'blue', 'width': 3.25} or {'none': True} :param marker: dict specifying how the markers should look, eg {type: square}. :param trendline: dict specifying how the trendline should be drawn, eg {type: linear}. """ series = {"values": values} series.update(kwargs) self.__series.append(series) def iter_series(self, workbook, row, col): """ Yield series dictionaries with values resolved to the final excel formulas. """ for series in self.__series: series = dict(series) series["values"] = series["values"].get_formula(workbook, row, col) if "categories" in series: series["categories"] = series["categories"].get_formula(workbook, row, col) yield series
fkarb/xltable
xltable/chart.py
Chart.iter_series
python
def iter_series(self, workbook, row, col): for series in self.__series: series = dict(series) series["values"] = series["values"].get_formula(workbook, row, col) if "categories" in series: series["categories"] = series["categories"].get_formula(workbook, row, col) yield series
Yield series dictionaries with values resolved to the final excel formulas.
train
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/chart.py#L85-L94
null
class Chart(object): """ Chart objects reference data from Table instances and are written to Excel worksheets as Excel charts. :param str type: Chart type (see below). :param str subtype: Chart sub type (see below). :param str title: Chart title :param str legend_position: right (default), left, top, bottom or 'none' for no legend. :param int width: Chart width. :param int height: Chart height. Chart types and sub-types: - area: - stacked - percent_stacked - bar: - stacked - perecent_stacked - column: - stacked - perecent_stacked - line - scatter: - straight_with_markers - straight - smooth_with_markers - smooth - stock - radar: - with_markers - filled """ def __init__(self, type, subtype=None, title=None, legend_position=None, x_axis=None, y_axis=None, show_blanks=None, # set to 'gap', 'zero' or 'span' width=480, height=288): self.type = type self.subtype = subtype self.title = title self.legend_position = legend_position self.x_axis = dict(x_axis) if x_axis else x_axis self.y_axis = dict(y_axis) if y_axis else y_axis self.show_blanks = show_blanks self.width = width self.height = height self.__series = [] # convert dates in the axis args to serial dates for axis in (self.x_axis, self.y_axis): if axis: for key, value in list(axis.items()): if isinstance(value, dt.date): axis[key] = (value - dt.date(1900, 1, 1)).days + 2 def add_series(self, values, **kwargs): """ Adds a series to the chart. :param values: A :py:class:`xltable.Expression` object that evaluates to the data series. :param categories: A :py:class:`xltable.Expression` object that evaluates to the data series. :param name: Name to show in the legend for the series :param line: Line style, eg {'color': 'blue', 'width': 3.25} or {'none': True} :param marker: dict specifying how the markers should look, eg {type: square}. :param trendline: dict specifying how the trendline should be drawn, eg {type: linear}. """ series = {"values": values} series.update(kwargs) self.__series.append(series)
gebn/wood
wood/__init__.py
_normalise_path
python
def _normalise_path(path: Union[str, pathlib.Path]) -> pathlib.Path: if isinstance(path, str): return pathlib.Path(path) return path
Ensures a path is parsed. :param path: A path string or Path object. :return: The path as a Path object.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/__init__.py#L30-L39
null
# -*- coding: utf-8 -*- from typing import Union from pkg_resources import get_distribution, DistributionNotFound import os import pathlib from wood.entities import Root as _Root, Entity as _Entity from wood.comparison import Comparison from wood.integrations import cloudflare, cloudfront, s3 __title__ = 'wood' __author__ = 'George Brighton' __license__ = 'MIT' __copyright__ = 'Copyright 2017 George Brighton' # adapted from http://stackoverflow.com/a/17638236 try: dist = get_distribution(__title__) dist_path = os.path.normcase(dist.location) pwd = os.path.normcase(__file__) if not pwd.startswith(os.path.join(dist_path, __title__)): raise DistributionNotFound() __version__ = dist.version except DistributionNotFound: __version__ = 'unknown' def root(path: Union[str, pathlib.Path]) -> _Root: """ Retrieve a root directory object from a path. :param path: The path string or Path object. :return: The created root object. """ return _Root.from_path(_normalise_path(path)) def entity(path: Union[str, pathlib.Path]) -> _Entity: """ Retrieve an appropriate entity object from a path. :param path: The path of the entity to represent, either a string or Path object. :return: An entity representing the input path. """ return _Entity.from_path(_normalise_path(path)) def compare(left: Union[str, pathlib.Path, _Entity], right: Union[str, pathlib.Path, _Entity]) -> Comparison: """ Compare two paths. :param left: The left side or "before" entity. :param right: The right side or "after" entity. :return: A comparison details what has changed from the left side to the right side. """ def normalise(param: Union[str, pathlib.Path, _Entity]) -> _Entity: """ Turns any one of a number of types of input into an entity. :param param: The input - either a path string, a path object, or a full blown entity. :return: The input param as an entity. """ if isinstance(param, str): param = pathlib.Path(param) if isinstance(param, pathlib.Path): param = _Entity.from_path(param) return param return Comparison.compare(normalise(left), normalise(right))
gebn/wood
wood/__init__.py
root
python
def root(path: Union[str, pathlib.Path]) -> _Root: return _Root.from_path(_normalise_path(path))
Retrieve a root directory object from a path. :param path: The path string or Path object. :return: The created root object.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/__init__.py#L42-L49
[ "def _normalise_path(path: Union[str, pathlib.Path]) -> pathlib.Path:\n \"\"\"\n Ensures a path is parsed.\n\n :param path: A path string or Path object.\n :return: The path as a Path object.\n \"\"\"\n if isinstance(path, str):\n return pathlib.Path(path)\n return path\n", "def from_p...
# -*- coding: utf-8 -*- from typing import Union from pkg_resources import get_distribution, DistributionNotFound import os import pathlib from wood.entities import Root as _Root, Entity as _Entity from wood.comparison import Comparison from wood.integrations import cloudflare, cloudfront, s3 __title__ = 'wood' __author__ = 'George Brighton' __license__ = 'MIT' __copyright__ = 'Copyright 2017 George Brighton' # adapted from http://stackoverflow.com/a/17638236 try: dist = get_distribution(__title__) dist_path = os.path.normcase(dist.location) pwd = os.path.normcase(__file__) if not pwd.startswith(os.path.join(dist_path, __title__)): raise DistributionNotFound() __version__ = dist.version except DistributionNotFound: __version__ = 'unknown' def _normalise_path(path: Union[str, pathlib.Path]) -> pathlib.Path: """ Ensures a path is parsed. :param path: A path string or Path object. :return: The path as a Path object. """ if isinstance(path, str): return pathlib.Path(path) return path def entity(path: Union[str, pathlib.Path]) -> _Entity: """ Retrieve an appropriate entity object from a path. :param path: The path of the entity to represent, either a string or Path object. :return: An entity representing the input path. """ return _Entity.from_path(_normalise_path(path)) def compare(left: Union[str, pathlib.Path, _Entity], right: Union[str, pathlib.Path, _Entity]) -> Comparison: """ Compare two paths. :param left: The left side or "before" entity. :param right: The right side or "after" entity. :return: A comparison details what has changed from the left side to the right side. """ def normalise(param: Union[str, pathlib.Path, _Entity]) -> _Entity: """ Turns any one of a number of types of input into an entity. :param param: The input - either a path string, a path object, or a full blown entity. :return: The input param as an entity. """ if isinstance(param, str): param = pathlib.Path(param) if isinstance(param, pathlib.Path): param = _Entity.from_path(param) return param return Comparison.compare(normalise(left), normalise(right))
gebn/wood
wood/__init__.py
entity
python
def entity(path: Union[str, pathlib.Path]) -> _Entity: return _Entity.from_path(_normalise_path(path))
Retrieve an appropriate entity object from a path. :param path: The path of the entity to represent, either a string or Path object. :return: An entity representing the input path.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/__init__.py#L52-L60
[ "def _normalise_path(path: Union[str, pathlib.Path]) -> pathlib.Path:\n \"\"\"\n Ensures a path is parsed.\n\n :param path: A path string or Path object.\n :return: The path as a Path object.\n \"\"\"\n if isinstance(path, str):\n return pathlib.Path(path)\n return path\n", "def from_p...
# -*- coding: utf-8 -*- from typing import Union from pkg_resources import get_distribution, DistributionNotFound import os import pathlib from wood.entities import Root as _Root, Entity as _Entity from wood.comparison import Comparison from wood.integrations import cloudflare, cloudfront, s3 __title__ = 'wood' __author__ = 'George Brighton' __license__ = 'MIT' __copyright__ = 'Copyright 2017 George Brighton' # adapted from http://stackoverflow.com/a/17638236 try: dist = get_distribution(__title__) dist_path = os.path.normcase(dist.location) pwd = os.path.normcase(__file__) if not pwd.startswith(os.path.join(dist_path, __title__)): raise DistributionNotFound() __version__ = dist.version except DistributionNotFound: __version__ = 'unknown' def _normalise_path(path: Union[str, pathlib.Path]) -> pathlib.Path: """ Ensures a path is parsed. :param path: A path string or Path object. :return: The path as a Path object. """ if isinstance(path, str): return pathlib.Path(path) return path def root(path: Union[str, pathlib.Path]) -> _Root: """ Retrieve a root directory object from a path. :param path: The path string or Path object. :return: The created root object. """ return _Root.from_path(_normalise_path(path)) def compare(left: Union[str, pathlib.Path, _Entity], right: Union[str, pathlib.Path, _Entity]) -> Comparison: """ Compare two paths. :param left: The left side or "before" entity. :param right: The right side or "after" entity. :return: A comparison details what has changed from the left side to the right side. """ def normalise(param: Union[str, pathlib.Path, _Entity]) -> _Entity: """ Turns any one of a number of types of input into an entity. :param param: The input - either a path string, a path object, or a full blown entity. :return: The input param as an entity. """ if isinstance(param, str): param = pathlib.Path(param) if isinstance(param, pathlib.Path): param = _Entity.from_path(param) return param return Comparison.compare(normalise(left), normalise(right))
gebn/wood
wood/__init__.py
compare
python
def compare(left: Union[str, pathlib.Path, _Entity], right: Union[str, pathlib.Path, _Entity]) -> Comparison: def normalise(param: Union[str, pathlib.Path, _Entity]) -> _Entity: """ Turns any one of a number of types of input into an entity. :param param: The input - either a path string, a path object, or a full blown entity. :return: The input param as an entity. """ if isinstance(param, str): param = pathlib.Path(param) if isinstance(param, pathlib.Path): param = _Entity.from_path(param) return param return Comparison.compare(normalise(left), normalise(right))
Compare two paths. :param left: The left side or "before" entity. :param right: The right side or "after" entity. :return: A comparison details what has changed from the left side to the right side.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/__init__.py#L63-L88
[ "def normalise(param: Union[str, pathlib.Path, _Entity]) -> _Entity:\n \"\"\"\n Turns any one of a number of types of input into an entity.\n\n :param param: The input - either a path string, a path object, or a\n full blown entity.\n :return: The input param as an entity.\n \"\"\"\n...
# -*- coding: utf-8 -*- from typing import Union from pkg_resources import get_distribution, DistributionNotFound import os import pathlib from wood.entities import Root as _Root, Entity as _Entity from wood.comparison import Comparison from wood.integrations import cloudflare, cloudfront, s3 __title__ = 'wood' __author__ = 'George Brighton' __license__ = 'MIT' __copyright__ = 'Copyright 2017 George Brighton' # adapted from http://stackoverflow.com/a/17638236 try: dist = get_distribution(__title__) dist_path = os.path.normcase(dist.location) pwd = os.path.normcase(__file__) if not pwd.startswith(os.path.join(dist_path, __title__)): raise DistributionNotFound() __version__ = dist.version except DistributionNotFound: __version__ = 'unknown' def _normalise_path(path: Union[str, pathlib.Path]) -> pathlib.Path: """ Ensures a path is parsed. :param path: A path string or Path object. :return: The path as a Path object. """ if isinstance(path, str): return pathlib.Path(path) return path def root(path: Union[str, pathlib.Path]) -> _Root: """ Retrieve a root directory object from a path. :param path: The path string or Path object. :return: The created root object. """ return _Root.from_path(_normalise_path(path)) def entity(path: Union[str, pathlib.Path]) -> _Entity: """ Retrieve an appropriate entity object from a path. :param path: The path of the entity to represent, either a string or Path object. :return: An entity representing the input path. """ return _Entity.from_path(_normalise_path(path))
gebn/wood
wood/comparison.py
Comparison.new
python
def new(self, base: pathlib.PurePath = pathlib.PurePath(), include_intermediates: bool = True) -> Iterator[str]: if self.is_new: yield str(base / self.right.name)
Find the list of new paths in this comparison. :param base: The base directory to prepend to the right entity's name. :param include_intermediates: Whether to include new non-empty directories in the returned iterable. If you only care about files, or are using flat key-based storage system like S3 where directories are a made-up concept, this can be set to false. :return: An iterator of the new paths.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L61-L76
null
class Comparison(Generic[L, R], metaclass=abc.ABCMeta): """ Represents a comparison between any two entities, e.g. a File and a File, a File and Directory etc. """ # the number of spaces to indent per level _INDENT_SIZE = 4 def __init__(self, left: Optional[L], right: Optional[R]): """ Initialise a new comparison. :param left: The left or "original"/"old" entity. Omit if the entity is new. :param right: The right or "current"/"new" entity. Omit if the entity has been deleted. :raises ValueError: If both the left and right entities are None. """ if left is None and right is None: raise ValueError('The left and right side cannot both be None') self.left = left self.right = right @property def is_empty(self) -> bool: """ Find whether this entity is empty. :return: True if the entity is empty, false otherwise. """ raise NotImplementedError() @property def is_new(self) -> bool: """ Find whether this comparison represents a new file. :return: True if the right is a new file, false otherwise. """ return self.left is None @property @abc.abstractmethod def is_modified(self) -> bool: """ Find whether this comparison is for a modified file. Note that directory comparisons are always not modified, as a directory itself can only be added (new) or removed (deleted). :return: True if this comparison represents a modified file, false otherwise. """ raise NotImplementedError() def modified(self, base: pathlib.PurePath = pathlib.PurePath()) \ -> Iterator[str]: """ Find the paths of modified files. There is no option to include intermediate directories, as all files and directories exist in both the left and right trees. :param base: The base directory to recursively append to the right entity. :return: An iterable of paths of modified files. """ # N.B. this method will only ever return files, as directories cannot # be "modified" if self.is_modified: yield str(base / self.right.name) @property def is_deleted(self) -> bool: """ Find whether this comparison represents a deleted entity. :return: True if the left entity has been deleted, false otherwise. """ return self.right is None def deleted(self, base: pathlib.PurePath = pathlib.PurePath(), include_children: bool = True, include_directories: bool = True) -> Iterator[str]: """ Find the paths of entities deleted between the left and right entities in this comparison. :param base: The base directory to recursively append to entities. :param include_children: Whether to recursively include children of deleted directories. These are themselves deleted by definition, however it may be useful to the caller to list them explicitly. :param include_directories: Whether to include directories in the returned iterable. :return: An iterable of deleted paths. """ if self.is_deleted: yield str(base / self.left.name) @property @abc.abstractmethod def invalidate(self) -> bool: """ Find whether this entity should be invalidated. :return: True if it should be, false otherwise. """ raise NotImplementedError() def invalidations(self) -> Iterator[str]: """ Get a set of invalidation prefixes for this entity and its children. These prefixes will resemble relative paths - they will not have a leading slash. :return: The set of invalidations. """ if self.invalidate: yield self.left.name @staticmethod def compare(left: Optional[L], right: Optional[R]) -> 'Comparison[L, R]': """ Calculate the comparison of two entities. | left | right | Return Type | |===========|===========|=========================| | file | file | FileComparison | | file | directory | FileDirectoryComparison | | file | None | FileComparison | | directory | file | DirectoryFileComparison | | directory | directory | DirectoryComparison | | directory | None | DirectoryComparison | | None | file | FileComparison | | None | directory | DirectoryComparison | | None | None | TypeError | :param left: The left side or "before" entity. :param right: The right side or "after" entity. :return: See table above. """ if isinstance(left, File) and isinstance(right, Directory): return FileDirectoryComparison(left, right) if isinstance(left, Directory) and isinstance(right, File): return DirectoryFileComparison(left, right) if isinstance(left, File) or isinstance(right, File): return FileComparison(left, right) if isinstance(left, Directory) or isinstance(right, Directory): return DirectoryComparison(left, right) raise TypeError(f'Cannot compare entities: {left}, {right}') def print_hierarchy(self, level: int = 0, file: IO[str] = sys.stdout) \ -> None: """ Print this comparison and its children with indentation to represent nesting. :param level: The level of indentation to use. This is mostly for internal use, but you can use it to inset the root comparison. :param file: The stream to print to. Defaults to stdout. """ print(' ' * self._INDENT_SIZE * level + str(self), file=file) def __str__(self): return f'{self.__class__.__name__}(' \ f'{self.left or None}|{self.right or None}, ' \ f'is_new: {self.is_new}, ' \ f'is_modified: {self.is_modified}, ' \ f'is_deleted: {self.is_deleted}, ' \ f'invalidate: {self.invalidate})'
gebn/wood
wood/comparison.py
Comparison.modified
python
def modified(self, base: pathlib.PurePath = pathlib.PurePath()) \ -> Iterator[str]: # N.B. this method will only ever return files, as directories cannot # be "modified" if self.is_modified: yield str(base / self.right.name)
Find the paths of modified files. There is no option to include intermediate directories, as all files and directories exist in both the left and right trees. :param base: The base directory to recursively append to the right entity. :return: An iterable of paths of modified files.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L91-L105
null
class Comparison(Generic[L, R], metaclass=abc.ABCMeta): """ Represents a comparison between any two entities, e.g. a File and a File, a File and Directory etc. """ # the number of spaces to indent per level _INDENT_SIZE = 4 def __init__(self, left: Optional[L], right: Optional[R]): """ Initialise a new comparison. :param left: The left or "original"/"old" entity. Omit if the entity is new. :param right: The right or "current"/"new" entity. Omit if the entity has been deleted. :raises ValueError: If both the left and right entities are None. """ if left is None and right is None: raise ValueError('The left and right side cannot both be None') self.left = left self.right = right @property def is_empty(self) -> bool: """ Find whether this entity is empty. :return: True if the entity is empty, false otherwise. """ raise NotImplementedError() @property def is_new(self) -> bool: """ Find whether this comparison represents a new file. :return: True if the right is a new file, false otherwise. """ return self.left is None def new(self, base: pathlib.PurePath = pathlib.PurePath(), include_intermediates: bool = True) -> Iterator[str]: """ Find the list of new paths in this comparison. :param base: The base directory to prepend to the right entity's name. :param include_intermediates: Whether to include new non-empty directories in the returned iterable. If you only care about files, or are using flat key-based storage system like S3 where directories are a made-up concept, this can be set to false. :return: An iterator of the new paths. """ if self.is_new: yield str(base / self.right.name) @property @abc.abstractmethod def is_modified(self) -> bool: """ Find whether this comparison is for a modified file. Note that directory comparisons are always not modified, as a directory itself can only be added (new) or removed (deleted). :return: True if this comparison represents a modified file, false otherwise. """ raise NotImplementedError() @property def is_deleted(self) -> bool: """ Find whether this comparison represents a deleted entity. :return: True if the left entity has been deleted, false otherwise. """ return self.right is None def deleted(self, base: pathlib.PurePath = pathlib.PurePath(), include_children: bool = True, include_directories: bool = True) -> Iterator[str]: """ Find the paths of entities deleted between the left and right entities in this comparison. :param base: The base directory to recursively append to entities. :param include_children: Whether to recursively include children of deleted directories. These are themselves deleted by definition, however it may be useful to the caller to list them explicitly. :param include_directories: Whether to include directories in the returned iterable. :return: An iterable of deleted paths. """ if self.is_deleted: yield str(base / self.left.name) @property @abc.abstractmethod def invalidate(self) -> bool: """ Find whether this entity should be invalidated. :return: True if it should be, false otherwise. """ raise NotImplementedError() def invalidations(self) -> Iterator[str]: """ Get a set of invalidation prefixes for this entity and its children. These prefixes will resemble relative paths - they will not have a leading slash. :return: The set of invalidations. """ if self.invalidate: yield self.left.name @staticmethod def compare(left: Optional[L], right: Optional[R]) -> 'Comparison[L, R]': """ Calculate the comparison of two entities. | left | right | Return Type | |===========|===========|=========================| | file | file | FileComparison | | file | directory | FileDirectoryComparison | | file | None | FileComparison | | directory | file | DirectoryFileComparison | | directory | directory | DirectoryComparison | | directory | None | DirectoryComparison | | None | file | FileComparison | | None | directory | DirectoryComparison | | None | None | TypeError | :param left: The left side or "before" entity. :param right: The right side or "after" entity. :return: See table above. """ if isinstance(left, File) and isinstance(right, Directory): return FileDirectoryComparison(left, right) if isinstance(left, Directory) and isinstance(right, File): return DirectoryFileComparison(left, right) if isinstance(left, File) or isinstance(right, File): return FileComparison(left, right) if isinstance(left, Directory) or isinstance(right, Directory): return DirectoryComparison(left, right) raise TypeError(f'Cannot compare entities: {left}, {right}') def print_hierarchy(self, level: int = 0, file: IO[str] = sys.stdout) \ -> None: """ Print this comparison and its children with indentation to represent nesting. :param level: The level of indentation to use. This is mostly for internal use, but you can use it to inset the root comparison. :param file: The stream to print to. Defaults to stdout. """ print(' ' * self._INDENT_SIZE * level + str(self), file=file) def __str__(self): return f'{self.__class__.__name__}(' \ f'{self.left or None}|{self.right or None}, ' \ f'is_new: {self.is_new}, ' \ f'is_modified: {self.is_modified}, ' \ f'is_deleted: {self.is_deleted}, ' \ f'invalidate: {self.invalidate})'
gebn/wood
wood/comparison.py
Comparison.deleted
python
def deleted(self, base: pathlib.PurePath = pathlib.PurePath(), include_children: bool = True, include_directories: bool = True) -> Iterator[str]: if self.is_deleted: yield str(base / self.left.name)
Find the paths of entities deleted between the left and right entities in this comparison. :param base: The base directory to recursively append to entities. :param include_children: Whether to recursively include children of deleted directories. These are themselves deleted by definition, however it may be useful to the caller to list them explicitly. :param include_directories: Whether to include directories in the returned iterable. :return: An iterable of deleted paths.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L116-L133
null
class Comparison(Generic[L, R], metaclass=abc.ABCMeta): """ Represents a comparison between any two entities, e.g. a File and a File, a File and Directory etc. """ # the number of spaces to indent per level _INDENT_SIZE = 4 def __init__(self, left: Optional[L], right: Optional[R]): """ Initialise a new comparison. :param left: The left or "original"/"old" entity. Omit if the entity is new. :param right: The right or "current"/"new" entity. Omit if the entity has been deleted. :raises ValueError: If both the left and right entities are None. """ if left is None and right is None: raise ValueError('The left and right side cannot both be None') self.left = left self.right = right @property def is_empty(self) -> bool: """ Find whether this entity is empty. :return: True if the entity is empty, false otherwise. """ raise NotImplementedError() @property def is_new(self) -> bool: """ Find whether this comparison represents a new file. :return: True if the right is a new file, false otherwise. """ return self.left is None def new(self, base: pathlib.PurePath = pathlib.PurePath(), include_intermediates: bool = True) -> Iterator[str]: """ Find the list of new paths in this comparison. :param base: The base directory to prepend to the right entity's name. :param include_intermediates: Whether to include new non-empty directories in the returned iterable. If you only care about files, or are using flat key-based storage system like S3 where directories are a made-up concept, this can be set to false. :return: An iterator of the new paths. """ if self.is_new: yield str(base / self.right.name) @property @abc.abstractmethod def is_modified(self) -> bool: """ Find whether this comparison is for a modified file. Note that directory comparisons are always not modified, as a directory itself can only be added (new) or removed (deleted). :return: True if this comparison represents a modified file, false otherwise. """ raise NotImplementedError() def modified(self, base: pathlib.PurePath = pathlib.PurePath()) \ -> Iterator[str]: """ Find the paths of modified files. There is no option to include intermediate directories, as all files and directories exist in both the left and right trees. :param base: The base directory to recursively append to the right entity. :return: An iterable of paths of modified files. """ # N.B. this method will only ever return files, as directories cannot # be "modified" if self.is_modified: yield str(base / self.right.name) @property def is_deleted(self) -> bool: """ Find whether this comparison represents a deleted entity. :return: True if the left entity has been deleted, false otherwise. """ return self.right is None @property @abc.abstractmethod def invalidate(self) -> bool: """ Find whether this entity should be invalidated. :return: True if it should be, false otherwise. """ raise NotImplementedError() def invalidations(self) -> Iterator[str]: """ Get a set of invalidation prefixes for this entity and its children. These prefixes will resemble relative paths - they will not have a leading slash. :return: The set of invalidations. """ if self.invalidate: yield self.left.name @staticmethod def compare(left: Optional[L], right: Optional[R]) -> 'Comparison[L, R]': """ Calculate the comparison of two entities. | left | right | Return Type | |===========|===========|=========================| | file | file | FileComparison | | file | directory | FileDirectoryComparison | | file | None | FileComparison | | directory | file | DirectoryFileComparison | | directory | directory | DirectoryComparison | | directory | None | DirectoryComparison | | None | file | FileComparison | | None | directory | DirectoryComparison | | None | None | TypeError | :param left: The left side or "before" entity. :param right: The right side or "after" entity. :return: See table above. """ if isinstance(left, File) and isinstance(right, Directory): return FileDirectoryComparison(left, right) if isinstance(left, Directory) and isinstance(right, File): return DirectoryFileComparison(left, right) if isinstance(left, File) or isinstance(right, File): return FileComparison(left, right) if isinstance(left, Directory) or isinstance(right, Directory): return DirectoryComparison(left, right) raise TypeError(f'Cannot compare entities: {left}, {right}') def print_hierarchy(self, level: int = 0, file: IO[str] = sys.stdout) \ -> None: """ Print this comparison and its children with indentation to represent nesting. :param level: The level of indentation to use. This is mostly for internal use, but you can use it to inset the root comparison. :param file: The stream to print to. Defaults to stdout. """ print(' ' * self._INDENT_SIZE * level + str(self), file=file) def __str__(self): return f'{self.__class__.__name__}(' \ f'{self.left or None}|{self.right or None}, ' \ f'is_new: {self.is_new}, ' \ f'is_modified: {self.is_modified}, ' \ f'is_deleted: {self.is_deleted}, ' \ f'invalidate: {self.invalidate})'
gebn/wood
wood/comparison.py
Comparison.compare
python
def compare(left: Optional[L], right: Optional[R]) -> 'Comparison[L, R]': if isinstance(left, File) and isinstance(right, Directory): return FileDirectoryComparison(left, right) if isinstance(left, Directory) and isinstance(right, File): return DirectoryFileComparison(left, right) if isinstance(left, File) or isinstance(right, File): return FileComparison(left, right) if isinstance(left, Directory) or isinstance(right, Directory): return DirectoryComparison(left, right) raise TypeError(f'Cannot compare entities: {left}, {right}')
Calculate the comparison of two entities. | left | right | Return Type | |===========|===========|=========================| | file | file | FileComparison | | file | directory | FileDirectoryComparison | | file | None | FileComparison | | directory | file | DirectoryFileComparison | | directory | directory | DirectoryComparison | | directory | None | DirectoryComparison | | None | file | FileComparison | | None | directory | DirectoryComparison | | None | None | TypeError | :param left: The left side or "before" entity. :param right: The right side or "after" entity. :return: See table above.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L157-L189
null
class Comparison(Generic[L, R], metaclass=abc.ABCMeta): """ Represents a comparison between any two entities, e.g. a File and a File, a File and Directory etc. """ # the number of spaces to indent per level _INDENT_SIZE = 4 def __init__(self, left: Optional[L], right: Optional[R]): """ Initialise a new comparison. :param left: The left or "original"/"old" entity. Omit if the entity is new. :param right: The right or "current"/"new" entity. Omit if the entity has been deleted. :raises ValueError: If both the left and right entities are None. """ if left is None and right is None: raise ValueError('The left and right side cannot both be None') self.left = left self.right = right @property def is_empty(self) -> bool: """ Find whether this entity is empty. :return: True if the entity is empty, false otherwise. """ raise NotImplementedError() @property def is_new(self) -> bool: """ Find whether this comparison represents a new file. :return: True if the right is a new file, false otherwise. """ return self.left is None def new(self, base: pathlib.PurePath = pathlib.PurePath(), include_intermediates: bool = True) -> Iterator[str]: """ Find the list of new paths in this comparison. :param base: The base directory to prepend to the right entity's name. :param include_intermediates: Whether to include new non-empty directories in the returned iterable. If you only care about files, or are using flat key-based storage system like S3 where directories are a made-up concept, this can be set to false. :return: An iterator of the new paths. """ if self.is_new: yield str(base / self.right.name) @property @abc.abstractmethod def is_modified(self) -> bool: """ Find whether this comparison is for a modified file. Note that directory comparisons are always not modified, as a directory itself can only be added (new) or removed (deleted). :return: True if this comparison represents a modified file, false otherwise. """ raise NotImplementedError() def modified(self, base: pathlib.PurePath = pathlib.PurePath()) \ -> Iterator[str]: """ Find the paths of modified files. There is no option to include intermediate directories, as all files and directories exist in both the left and right trees. :param base: The base directory to recursively append to the right entity. :return: An iterable of paths of modified files. """ # N.B. this method will only ever return files, as directories cannot # be "modified" if self.is_modified: yield str(base / self.right.name) @property def is_deleted(self) -> bool: """ Find whether this comparison represents a deleted entity. :return: True if the left entity has been deleted, false otherwise. """ return self.right is None def deleted(self, base: pathlib.PurePath = pathlib.PurePath(), include_children: bool = True, include_directories: bool = True) -> Iterator[str]: """ Find the paths of entities deleted between the left and right entities in this comparison. :param base: The base directory to recursively append to entities. :param include_children: Whether to recursively include children of deleted directories. These are themselves deleted by definition, however it may be useful to the caller to list them explicitly. :param include_directories: Whether to include directories in the returned iterable. :return: An iterable of deleted paths. """ if self.is_deleted: yield str(base / self.left.name) @property @abc.abstractmethod def invalidate(self) -> bool: """ Find whether this entity should be invalidated. :return: True if it should be, false otherwise. """ raise NotImplementedError() def invalidations(self) -> Iterator[str]: """ Get a set of invalidation prefixes for this entity and its children. These prefixes will resemble relative paths - they will not have a leading slash. :return: The set of invalidations. """ if self.invalidate: yield self.left.name @staticmethod def print_hierarchy(self, level: int = 0, file: IO[str] = sys.stdout) \ -> None: """ Print this comparison and its children with indentation to represent nesting. :param level: The level of indentation to use. This is mostly for internal use, but you can use it to inset the root comparison. :param file: The stream to print to. Defaults to stdout. """ print(' ' * self._INDENT_SIZE * level + str(self), file=file) def __str__(self): return f'{self.__class__.__name__}(' \ f'{self.left or None}|{self.right or None}, ' \ f'is_new: {self.is_new}, ' \ f'is_modified: {self.is_modified}, ' \ f'is_deleted: {self.is_deleted}, ' \ f'invalidate: {self.invalidate})'
gebn/wood
wood/comparison.py
Comparison.print_hierarchy
python
def print_hierarchy(self, level: int = 0, file: IO[str] = sys.stdout) \ -> None: print(' ' * self._INDENT_SIZE * level + str(self), file=file)
Print this comparison and its children with indentation to represent nesting. :param level: The level of indentation to use. This is mostly for internal use, but you can use it to inset the root comparison. :param file: The stream to print to. Defaults to stdout.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L191-L202
null
class Comparison(Generic[L, R], metaclass=abc.ABCMeta): """ Represents a comparison between any two entities, e.g. a File and a File, a File and Directory etc. """ # the number of spaces to indent per level _INDENT_SIZE = 4 def __init__(self, left: Optional[L], right: Optional[R]): """ Initialise a new comparison. :param left: The left or "original"/"old" entity. Omit if the entity is new. :param right: The right or "current"/"new" entity. Omit if the entity has been deleted. :raises ValueError: If both the left and right entities are None. """ if left is None and right is None: raise ValueError('The left and right side cannot both be None') self.left = left self.right = right @property def is_empty(self) -> bool: """ Find whether this entity is empty. :return: True if the entity is empty, false otherwise. """ raise NotImplementedError() @property def is_new(self) -> bool: """ Find whether this comparison represents a new file. :return: True if the right is a new file, false otherwise. """ return self.left is None def new(self, base: pathlib.PurePath = pathlib.PurePath(), include_intermediates: bool = True) -> Iterator[str]: """ Find the list of new paths in this comparison. :param base: The base directory to prepend to the right entity's name. :param include_intermediates: Whether to include new non-empty directories in the returned iterable. If you only care about files, or are using flat key-based storage system like S3 where directories are a made-up concept, this can be set to false. :return: An iterator of the new paths. """ if self.is_new: yield str(base / self.right.name) @property @abc.abstractmethod def is_modified(self) -> bool: """ Find whether this comparison is for a modified file. Note that directory comparisons are always not modified, as a directory itself can only be added (new) or removed (deleted). :return: True if this comparison represents a modified file, false otherwise. """ raise NotImplementedError() def modified(self, base: pathlib.PurePath = pathlib.PurePath()) \ -> Iterator[str]: """ Find the paths of modified files. There is no option to include intermediate directories, as all files and directories exist in both the left and right trees. :param base: The base directory to recursively append to the right entity. :return: An iterable of paths of modified files. """ # N.B. this method will only ever return files, as directories cannot # be "modified" if self.is_modified: yield str(base / self.right.name) @property def is_deleted(self) -> bool: """ Find whether this comparison represents a deleted entity. :return: True if the left entity has been deleted, false otherwise. """ return self.right is None def deleted(self, base: pathlib.PurePath = pathlib.PurePath(), include_children: bool = True, include_directories: bool = True) -> Iterator[str]: """ Find the paths of entities deleted between the left and right entities in this comparison. :param base: The base directory to recursively append to entities. :param include_children: Whether to recursively include children of deleted directories. These are themselves deleted by definition, however it may be useful to the caller to list them explicitly. :param include_directories: Whether to include directories in the returned iterable. :return: An iterable of deleted paths. """ if self.is_deleted: yield str(base / self.left.name) @property @abc.abstractmethod def invalidate(self) -> bool: """ Find whether this entity should be invalidated. :return: True if it should be, false otherwise. """ raise NotImplementedError() def invalidations(self) -> Iterator[str]: """ Get a set of invalidation prefixes for this entity and its children. These prefixes will resemble relative paths - they will not have a leading slash. :return: The set of invalidations. """ if self.invalidate: yield self.left.name @staticmethod def compare(left: Optional[L], right: Optional[R]) -> 'Comparison[L, R]': """ Calculate the comparison of two entities. | left | right | Return Type | |===========|===========|=========================| | file | file | FileComparison | | file | directory | FileDirectoryComparison | | file | None | FileComparison | | directory | file | DirectoryFileComparison | | directory | directory | DirectoryComparison | | directory | None | DirectoryComparison | | None | file | FileComparison | | None | directory | DirectoryComparison | | None | None | TypeError | :param left: The left side or "before" entity. :param right: The right side or "after" entity. :return: See table above. """ if isinstance(left, File) and isinstance(right, Directory): return FileDirectoryComparison(left, right) if isinstance(left, Directory) and isinstance(right, File): return DirectoryFileComparison(left, right) if isinstance(left, File) or isinstance(right, File): return FileComparison(left, right) if isinstance(left, Directory) or isinstance(right, Directory): return DirectoryComparison(left, right) raise TypeError(f'Cannot compare entities: {left}, {right}') def __str__(self): return f'{self.__class__.__name__}(' \ f'{self.left or None}|{self.right or None}, ' \ f'is_new: {self.is_new}, ' \ f'is_modified: {self.is_modified}, ' \ f'is_deleted: {self.is_deleted}, ' \ f'invalidate: {self.invalidate})'
gebn/wood
wood/comparison.py
FileComparison.is_modified
python
def is_modified(self) -> bool: if self.is_new or self.is_deleted: return False return self.left.md5 != self.right.md5
Find whether the files on the left and right are different. Note, modified implies the contents of the file have changed, which is predicated on the file existing on both the left and right. Therefore this will be false if the file on the left has been deleted, or the file on the right is new. :return: Whether the file has been modified.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L224-L236
null
class FileComparison(Comparison[File, File]): """ A comparison of two files. """ @property def is_empty(self) -> bool: # we have no insight into the contents of files return False @property @property def invalidate(self) -> bool: return self.is_modified or self.is_deleted def __init__(self, left: Optional[File], right: Optional[File]): """ Initialise a new file comparison. :param left: The "original" or "old" file. Cannot be None if right is None. :param right: The "current" or "new" file. Cannot be None if left is None. """ super().__init__(left, right)
gebn/wood
wood/entities.py
Entity.walk_paths
python
def walk_paths(self, base: Optional[pathlib.PurePath] = pathlib.PurePath()) \ -> Iterator[pathlib.PurePath]: raise NotImplementedError()
Recursively traverse all paths inside this entity, including the entity itself. :param base: The base path to prepend to the entity name. :return: An iterator of paths.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/entities.py#L27-L37
null
class Entity(metaclass=abc.ABCMeta): """ Represents a snapshot of a file or directory. """ # the number of spaces to indent per level _INDENT_SIZE = 4 def __init__(self, name): """ Initialise a new entity. :param name: The name of the entity, e.g. "essay.txt" or "photos". """ self.name = name @abc.abstractmethod def _walk_paths(self, base: pathlib.PurePath) \ -> Iterator[pathlib.PurePath]: """ Internal helper for walking paths. This is required to exclude the name of the root entity from the walk. :param base: The base path to prepend to the entity name. :return: An iterator of paths. """ return self.walk_paths(base) @abc.abstractmethod def walk_files(self) -> Iterator['File']: """ Recursively traverse all files inside this entity, including the entity itself. :return: An iterator of files. """ raise NotImplementedError() @classmethod def from_path(cls, path: pathlib.Path) -> 'Entity': """ Create an entity from a local path. :param path: The path to the entity, either a file or directory. :return: An entity instance representing the path. """ if path.is_file(): return File.from_path(path) return Directory.from_path(path) def print_hierarchy(self, level: int = 0, file: IO[str] = sys.stdout) \ -> None: """ Print this entity and its children with indentation to represent nesting. :param level: The level of indentation to use. This is mostly for internal use, but you can use it to inset the root entity. :param file: The stream to print to. Defaults to stdout. """ print(' ' * self._INDENT_SIZE * level + str(self), file=file) def __str__(self) -> str: return f'{self.__class__.__name__}({self.name})' def __repr__(self) -> str: return f'<{self}>'
gebn/wood
wood/entities.py
Entity._walk_paths
python
def _walk_paths(self, base: pathlib.PurePath) \ -> Iterator[pathlib.PurePath]: return self.walk_paths(base)
Internal helper for walking paths. This is required to exclude the name of the root entity from the walk. :param base: The base path to prepend to the entity name. :return: An iterator of paths.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/entities.py#L39-L48
null
class Entity(metaclass=abc.ABCMeta): """ Represents a snapshot of a file or directory. """ # the number of spaces to indent per level _INDENT_SIZE = 4 def __init__(self, name): """ Initialise a new entity. :param name: The name of the entity, e.g. "essay.txt" or "photos". """ self.name = name @abc.abstractmethod def walk_paths(self, base: Optional[pathlib.PurePath] = pathlib.PurePath()) \ -> Iterator[pathlib.PurePath]: """ Recursively traverse all paths inside this entity, including the entity itself. :param base: The base path to prepend to the entity name. :return: An iterator of paths. """ raise NotImplementedError() @abc.abstractmethod def walk_files(self) -> Iterator['File']: """ Recursively traverse all files inside this entity, including the entity itself. :return: An iterator of files. """ raise NotImplementedError() @classmethod def from_path(cls, path: pathlib.Path) -> 'Entity': """ Create an entity from a local path. :param path: The path to the entity, either a file or directory. :return: An entity instance representing the path. """ if path.is_file(): return File.from_path(path) return Directory.from_path(path) def print_hierarchy(self, level: int = 0, file: IO[str] = sys.stdout) \ -> None: """ Print this entity and its children with indentation to represent nesting. :param level: The level of indentation to use. This is mostly for internal use, but you can use it to inset the root entity. :param file: The stream to print to. Defaults to stdout. """ print(' ' * self._INDENT_SIZE * level + str(self), file=file) def __str__(self) -> str: return f'{self.__class__.__name__}({self.name})' def __repr__(self) -> str: return f'<{self}>'
gebn/wood
wood/entities.py
Entity.from_path
python
def from_path(cls, path: pathlib.Path) -> 'Entity': if path.is_file(): return File.from_path(path) return Directory.from_path(path)
Create an entity from a local path. :param path: The path to the entity, either a file or directory. :return: An entity instance representing the path.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/entities.py#L61-L70
[ "def from_path(cls, path: pathlib.Path) -> 'File':\n \"\"\"\n Create a file entity from a file path.\n\n :param path: The path of the file.\n :return: A file entity instance representing the file.\n :raises ValueError: If the path does not point to a file.\n \"\"\"\n if not path.is_file():\n ...
class Entity(metaclass=abc.ABCMeta): """ Represents a snapshot of a file or directory. """ # the number of spaces to indent per level _INDENT_SIZE = 4 def __init__(self, name): """ Initialise a new entity. :param name: The name of the entity, e.g. "essay.txt" or "photos". """ self.name = name @abc.abstractmethod def walk_paths(self, base: Optional[pathlib.PurePath] = pathlib.PurePath()) \ -> Iterator[pathlib.PurePath]: """ Recursively traverse all paths inside this entity, including the entity itself. :param base: The base path to prepend to the entity name. :return: An iterator of paths. """ raise NotImplementedError() def _walk_paths(self, base: pathlib.PurePath) \ -> Iterator[pathlib.PurePath]: """ Internal helper for walking paths. This is required to exclude the name of the root entity from the walk. :param base: The base path to prepend to the entity name. :return: An iterator of paths. """ return self.walk_paths(base) @abc.abstractmethod def walk_files(self) -> Iterator['File']: """ Recursively traverse all files inside this entity, including the entity itself. :return: An iterator of files. """ raise NotImplementedError() @classmethod def print_hierarchy(self, level: int = 0, file: IO[str] = sys.stdout) \ -> None: """ Print this entity and its children with indentation to represent nesting. :param level: The level of indentation to use. This is mostly for internal use, but you can use it to inset the root entity. :param file: The stream to print to. Defaults to stdout. """ print(' ' * self._INDENT_SIZE * level + str(self), file=file) def __str__(self) -> str: return f'{self.__class__.__name__}({self.name})' def __repr__(self) -> str: return f'<{self}>'
gebn/wood
wood/entities.py
File._md5
python
def _md5(path: pathlib.PurePath): hash_ = hashlib.md5() with open(path, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): hash_.update(chunk) return hash_.hexdigest()
Calculate the MD5 checksum of a file. :param path: The path of the file whose checksum to calculate. :return: The lowercase hex representation of the file's MD5 checksum, exactly 32 chars long.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/entities.py#L119-L131
null
class File(Entity): """ Represents a file. """ def __init__(self, name, size, md5): """ Initialise a new file entity. :param name: The file name, including extension if any. :param size: The size of the file in bytes. :param md5: The lowercase hex representation of the file's MD5 checksum. Should be exactly 32 chars long. """ super().__init__(name) self.size = size self.md5 = md5 def walk_paths(self, base: Optional[pathlib.PurePath] = pathlib.PurePath()) \ -> Iterator[pathlib.PurePath]: yield base / self.name def walk_files(self) -> Iterator['File']: yield self @staticmethod @classmethod def from_path(cls, path: pathlib.Path) -> 'File': """ Create a file entity from a file path. :param path: The path of the file. :return: A file entity instance representing the file. :raises ValueError: If the path does not point to a file. """ if not path.is_file(): raise ValueError('Path does not point to a file') return File(path.name, path.stat().st_size, cls._md5(path)) def __str__(self) -> str: return f'{self.__class__.__name__}({self.name}, {self.size}, ' \ f'{self.md5})'
gebn/wood
wood/entities.py
File.from_path
python
def from_path(cls, path: pathlib.Path) -> 'File': if not path.is_file(): raise ValueError('Path does not point to a file') return File(path.name, path.stat().st_size, cls._md5(path))
Create a file entity from a file path. :param path: The path of the file. :return: A file entity instance representing the file. :raises ValueError: If the path does not point to a file.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/entities.py#L134-L144
null
class File(Entity): """ Represents a file. """ def __init__(self, name, size, md5): """ Initialise a new file entity. :param name: The file name, including extension if any. :param size: The size of the file in bytes. :param md5: The lowercase hex representation of the file's MD5 checksum. Should be exactly 32 chars long. """ super().__init__(name) self.size = size self.md5 = md5 def walk_paths(self, base: Optional[pathlib.PurePath] = pathlib.PurePath()) \ -> Iterator[pathlib.PurePath]: yield base / self.name def walk_files(self) -> Iterator['File']: yield self @staticmethod def _md5(path: pathlib.PurePath): """ Calculate the MD5 checksum of a file. :param path: The path of the file whose checksum to calculate. :return: The lowercase hex representation of the file's MD5 checksum, exactly 32 chars long. """ hash_ = hashlib.md5() with open(path, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): hash_.update(chunk) return hash_.hexdigest() @classmethod def __str__(self) -> str: return f'{self.__class__.__name__}({self.name}, {self.size}, ' \ f'{self.md5})'
gebn/wood
wood/entities.py
Directory.from_path
python
def from_path(cls, path: pathlib.Path) -> 'Directory': if not path.is_dir(): raise ValueError('Path does not point to a directory') return Directory(path.name, {entity.name: Entity.from_path(entity) for entity in path.iterdir()})
Create a directory entity from a directory path. :param path: The path of the directory. :return: A directory entity instance representing the directory. :raises ValueError: If the path does not point to a directory.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/entities.py#L186-L197
null
class Directory(Entity): """ Represents a directory. """ def __init__(self, name, children: Optional[Dict[str, Entity]] = None): """ Initialise a new directory entity. :param name: The name of the directory, e.g. "photos". :param children: Entities within this directory. Keys are entity names, whose values correspond to the entity object. Defaults to no children, i.e. an empty directory. """ super().__init__(name) self.children = children or {} # type: Dict[str, Entity] def walk_paths(self, base: Optional[pathlib.PurePath] = pathlib.PurePath()) \ -> Iterator[pathlib.PurePath]: yield from itertools.chain.from_iterable( [child._walk_paths(base) for child in self.children.values()]) def _walk_paths(self, base: pathlib.PurePath) \ -> Iterator[pathlib.PurePath]: us = base / self.name yield us yield from itertools.chain.from_iterable( [child.walk_paths(us) for child in self.children.values()]) def walk_files(self) -> Iterator[File]: for child in self.children.values(): yield from child.walk_files() @classmethod def print_hierarchy(self, level: int = 0, file: IO[str] = sys.stdout) \ -> None: print(' ' * self._INDENT_SIZE * level + str(self), file=file) for name, child in self.children.items(): child.print_hierarchy(level + 1, file)
gebn/wood
wood/integrations/cloudflare.py
Invalidator.invalidate
python
def invalidate(self, comparison: Comparison[Entity, Entity]) -> None: @backoff.on_exception(backoff.expo, requests.exceptions.RequestException, max_tries=5, giveup=lambda e: 400 <= e.response.status_code < 500) def _request(chunk: List[str]) -> requests.Response: """ Send a purge cache request to Cloudflare. This method will automatically retry with a back-off in case of server-side error. :param chunk: The list of paths to purge. These should not have a leading slash, and will be combined with the prefix to form a URL. :return: Cloudflare's response to our successful request. :raises requests.exceptions.RequestException: If the request fails on the 5th attempt. """ response = self._session.delete( f'{self._API_BASE}/client/v4/zones/{self._zone}/purge_cache', headers={ 'X-Auth-Email': self._email, 'X-Auth-Key': self._key }, json={ 'files': [self._prefix + path for path in chunk] }) response.raise_for_status() return response paths = itertools.chain(comparison.deleted(), comparison.modified()) for chunk_ in util.chunk(paths, self._MAX_INVALIDATIONS_PER_REQUEST): chunk_ = list(chunk_) if not chunk_: # nothing to do return logger.info('Invalidating %d paths (%s)', len(chunk_), ', '.join(chunk_)) response_ = _request(chunk_) logger.debug('Cloudflare invalidation response [%d]: %s', response_.status_code, response_.text) json_ = response_.json() if not json_['success']: # this would be strange - the API returned a success response # code, but success was not "true" # TODO more appropriate exception, with handling upstream raise RuntimeError('Cloudflare reported failure') logger.info('Created invalidation %s', json_['result']['id'])
Invalidate paths in a zone. See https://api.cloudflare.com /#zone-purge-individual-files-by-url-and-cache-tags :param comparison: The comparison whose changes to invalidate. :raises requests.exceptions.RequestException: On request failure. :raises RuntimeError: If the request succeeded but could not be carried out.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/integrations/cloudflare.py#L44-L102
[ "def chunk(iterable: Iterable[A], n: int) \\\n -> Iterable[more_itertools.more.peekable]:\n \"\"\"\n Produce an iterable of interables of a maximum length from a (presumably\n longer) iterable. This is useful when only so many elements can be\n processed at once, such as an API that limits to n t...
class Invalidator(GenericInvalidator): """ Invalidates URLs within a Cloudflare zone. """ _API_BASE = 'https://api.cloudflare.com' _MAX_INVALIDATIONS_PER_REQUEST = 30 def __init__(self, session: requests.Session, email: str, key: str, zone: str, prefix: str): """ Initialise a new Cloudflare cache invalidator. :param session: The requests session to use when interacting with Cloudflare. :param zone: The zone ID to purge from. :param prefix: The full URL prefix to append asset paths to, e.g. https://example.com/webroot/. Should always end with a trailing slash. """ self._session = session self._email = email self._key = key self._zone = zone self._prefix = prefix
gebn/wood
wood/integrations/s3.py
objects_to_root
python
def objects_to_root(objects: List) -> Root: def _to_tree(objs: Iterable) -> Dict: """ Build a tree structure from a flat list of objects. :param objs: The raw iterable of S3 `ObjectSummary`s, as returned by a bucket listing. :return: The listing as a nested dictionary where keys are directory and file names. The values of directories will in turn be a dict. The values of keys representing files will be the `ObjectSummary` instance. """ path_tree = {} for obj in objs: is_dir = obj.key.endswith('/') chunks = [chunk for chunk in obj.key.split('/') if chunk] chunk_count = len(chunks) tmp = path_tree for i, chunk in enumerate(chunks): is_last_chunk = i == chunk_count - 1 if is_last_chunk and not is_dir: tmp[chunk] = obj else: # must be a directory if chunk not in tmp: # it doesn't exist - create it tmp[chunk] = {} tmp = tmp[chunk] return path_tree def _to_entity(key: str, value: Union[Dict, Any]) -> Entity: """ Turn a nested dictionary representing an S3 bucket into the correct `Entity` object. :param key: The name of the entity. :param value: If the entity is a directory, the nested dict representing its contents. Otherwise, the `ObjectSummary` instance representing the file. :return: The entity representing the entity name and value pair. """ if isinstance(value, dict): return Directory( key, {key_: _to_entity(key_, value_) for key_, value_ in value.items()}) return File(pathlib.PurePath(value.key).name, value.size, value.e_tag.strip('"')) tree = _to_tree(objects) return Root({pathlib.PurePath(key).name: _to_entity(key, value) for key, value in tree.items()})
Convert a list of s3 ObjectSummaries into a directory tree. :param objects: The list of objects, e.g. the result of calling `.objects.all()` on a bucket. :return: The tree structure, contained within a root node.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/integrations/s3.py#L16-L77
[ "def _to_tree(objs: Iterable) -> Dict:\n \"\"\"\n Build a tree structure from a flat list of objects.\n\n :param objs: The raw iterable of S3 `ObjectSummary`s, as returned by a\n bucket listing.\n :return: The listing as a nested dictionary where keys are directory\n and file...
# -*- coding: utf-8 -*- from typing import List, Dict, Union, Any, Iterable, Tuple import pathlib import logging import mimetypes from wood import util from wood.comparison import Comparison from wood.entities import Root, Directory, File, Entity from wood.sync import Syncer as GenericSyncer logger = logging.getLogger(__name__) class Syncer(GenericSyncer[Root, Directory]): """ Synchronises a local directory with an S3 bucket. """ # the maximum number of keys that can be specified in a single delete call _MAX_DELETES_PER_REQUEST = 1_000 def __init__(self, base: pathlib.PurePath, bucket, prefix: str = ''): """ Initialise a new S3 syncer. :param base: The local base directory. :param bucket: The bucket to upload to. :param prefix: The prefix within which to work, within the bucket. Defaults to no prefix, i.e. the root of the bucket. """ self._base = base self._bucket = bucket self._prefix = prefix def _delete(self, paths: Iterable[str]) -> None: """ Delete a collection of paths from S3. :param paths: The paths to delete. The prefix will be prepended to each one. :raises ClientError: If any request fails. """ for chunk in util.chunk(paths, self._MAX_DELETES_PER_REQUEST): keys = list([self._prefix + key for key in chunk]) logger.info('Deleting %d objects (%s)', len(keys), ', '.join(keys)) response = self._bucket.delete_objects(Delete={ 'Objects': [{'Key': key} for key in keys], 'Quiet': True }) logger.debug('Delete objects response: %s', response) def _upload(self, items: Iterable[Tuple[str, str]]) -> None: """ Upload a collection of paths to S3. :param items: An iterable of pairs containing the local path of the file to upload, and the remote path to upload it to. The prefix will be appended to each remote path. """ for src, key in items: logger.info(f'Uploading {src} to {key}') mimetype, _ = mimetypes.guess_type(src) if mimetype is None: logger.warning(f'Could not guess MIME type for {src}') mimetype = 'application/octet-stream' logger.debug(f'Deduced MIME type: {mimetype}') self._bucket.upload_file(src, key, ExtraArgs={ 'ContentType': mimetype }) def sync(self, comparison: Comparison[Root, Directory]) -> None: self._delete(comparison.deleted(include_directories=False)) self._upload(zip(comparison.new(self._base, include_intermediates=False), comparison.new(include_intermediates=False))) self._upload(zip(comparison.modified(self._base), comparison.modified()))
gebn/wood
wood/integrations/s3.py
Syncer._delete
python
def _delete(self, paths: Iterable[str]) -> None: for chunk in util.chunk(paths, self._MAX_DELETES_PER_REQUEST): keys = list([self._prefix + key for key in chunk]) logger.info('Deleting %d objects (%s)', len(keys), ', '.join(keys)) response = self._bucket.delete_objects(Delete={ 'Objects': [{'Key': key} for key in keys], 'Quiet': True }) logger.debug('Delete objects response: %s', response)
Delete a collection of paths from S3. :param paths: The paths to delete. The prefix will be prepended to each one. :raises ClientError: If any request fails.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/integrations/s3.py#L101-L116
null
class Syncer(GenericSyncer[Root, Directory]): """ Synchronises a local directory with an S3 bucket. """ # the maximum number of keys that can be specified in a single delete call _MAX_DELETES_PER_REQUEST = 1_000 def __init__(self, base: pathlib.PurePath, bucket, prefix: str = ''): """ Initialise a new S3 syncer. :param base: The local base directory. :param bucket: The bucket to upload to. :param prefix: The prefix within which to work, within the bucket. Defaults to no prefix, i.e. the root of the bucket. """ self._base = base self._bucket = bucket self._prefix = prefix def _upload(self, items: Iterable[Tuple[str, str]]) -> None: """ Upload a collection of paths to S3. :param items: An iterable of pairs containing the local path of the file to upload, and the remote path to upload it to. The prefix will be appended to each remote path. """ for src, key in items: logger.info(f'Uploading {src} to {key}') mimetype, _ = mimetypes.guess_type(src) if mimetype is None: logger.warning(f'Could not guess MIME type for {src}') mimetype = 'application/octet-stream' logger.debug(f'Deduced MIME type: {mimetype}') self._bucket.upload_file(src, key, ExtraArgs={ 'ContentType': mimetype }) def sync(self, comparison: Comparison[Root, Directory]) -> None: self._delete(comparison.deleted(include_directories=False)) self._upload(zip(comparison.new(self._base, include_intermediates=False), comparison.new(include_intermediates=False))) self._upload(zip(comparison.modified(self._base), comparison.modified()))
gebn/wood
wood/integrations/s3.py
Syncer._upload
python
def _upload(self, items: Iterable[Tuple[str, str]]) -> None: for src, key in items: logger.info(f'Uploading {src} to {key}') mimetype, _ = mimetypes.guess_type(src) if mimetype is None: logger.warning(f'Could not guess MIME type for {src}') mimetype = 'application/octet-stream' logger.debug(f'Deduced MIME type: {mimetype}') self._bucket.upload_file(src, key, ExtraArgs={ 'ContentType': mimetype })
Upload a collection of paths to S3. :param items: An iterable of pairs containing the local path of the file to upload, and the remote path to upload it to. The prefix will be appended to each remote path.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/integrations/s3.py#L118-L136
null
class Syncer(GenericSyncer[Root, Directory]): """ Synchronises a local directory with an S3 bucket. """ # the maximum number of keys that can be specified in a single delete call _MAX_DELETES_PER_REQUEST = 1_000 def __init__(self, base: pathlib.PurePath, bucket, prefix: str = ''): """ Initialise a new S3 syncer. :param base: The local base directory. :param bucket: The bucket to upload to. :param prefix: The prefix within which to work, within the bucket. Defaults to no prefix, i.e. the root of the bucket. """ self._base = base self._bucket = bucket self._prefix = prefix def _delete(self, paths: Iterable[str]) -> None: """ Delete a collection of paths from S3. :param paths: The paths to delete. The prefix will be prepended to each one. :raises ClientError: If any request fails. """ for chunk in util.chunk(paths, self._MAX_DELETES_PER_REQUEST): keys = list([self._prefix + key for key in chunk]) logger.info('Deleting %d objects (%s)', len(keys), ', '.join(keys)) response = self._bucket.delete_objects(Delete={ 'Objects': [{'Key': key} for key in keys], 'Quiet': True }) logger.debug('Delete objects response: %s', response) def sync(self, comparison: Comparison[Root, Directory]) -> None: self._delete(comparison.deleted(include_directories=False)) self._upload(zip(comparison.new(self._base, include_intermediates=False), comparison.new(include_intermediates=False))) self._upload(zip(comparison.modified(self._base), comparison.modified()))
gebn/wood
setup.py
_read_file
python
def _read_file(name, encoding='utf-8') -> str: with open(name, encoding=encoding) as f: return f.read()
Read the contents of a file. :param name: The name of the file in the current directory. :param encoding: The encoding of the file; defaults to utf-8. :return: The contents of the file.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/setup.py#L5-L14
null
# -*- coding: utf-8 -*- from setuptools import setup, find_packages setup( name='wood', version='1.0.3', description='Compare directories, efficiently sync changes to AWS, and ' 'invalidate CDNs.', long_description=_read_file('README.rst'), license='MIT', url='https://github.com/gebn/wood', author='George Brighton', author_email='oss@gebn.co.uk', packages=find_packages(), zip_safe=True, install_requires=[ 'backoff', 'boto3', 'botocore', 'more-itertools', 'requests' ], test_suite='nose.collector', tests_require=[ 'coverage', 'coveralls', 'mock', 'nose' ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Software Development :: Libraries :: Python Modules' ] )
gebn/wood
wood/util.py
zip_dict
python
def zip_dict(a: Dict[str, A], b: Dict[str, B]) \ -> Dict[str, Tuple[Optional[A], Optional[B]]]: return {key: (a.get(key), b.get(key)) for key in a.keys() | b.keys()}
Combine the values within two dictionaries by key. :param a: The first dictionary. :param b: The second dictionary. :return: A dictionary containing all keys that appear in the union of a and b. Values are pairs where the first part is a's value for the key, and right second part b's value.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/util.py#L11-L22
null
# -*- coding: utf-8 -*- from typing import Dict, Tuple, Optional, TypeVar, Iterable import itertools import more_itertools A = TypeVar('A') B = TypeVar('B') def chunk(iterable: Iterable[A], n: int) \ -> Iterable[more_itertools.more.peekable]: """ Produce an iterable of interables of a maximum length from a (presumably longer) iterable. This is useful when only so many elements can be processed at once, such as an API that limits to n things per request. :param iterable: The iterable to chunk into iterables of size up to n. :param n: The maximum length of each iterable. :return: An iterable of iterables. Each iterable will be of size n, except possibly the last one which will contain fewer elements. """ iterator = iter(iterable) while True: chunk_ = more_itertools.peekable(itertools.islice(iterator, n)) try: chunk_.peek() except StopIteration: return yield chunk_
gebn/wood
wood/util.py
chunk
python
def chunk(iterable: Iterable[A], n: int) \ -> Iterable[more_itertools.more.peekable]: iterator = iter(iterable) while True: chunk_ = more_itertools.peekable(itertools.islice(iterator, n)) try: chunk_.peek() except StopIteration: return yield chunk_
Produce an iterable of interables of a maximum length from a (presumably longer) iterable. This is useful when only so many elements can be processed at once, such as an API that limits to n things per request. :param iterable: The iterable to chunk into iterables of size up to n. :param n: The maximum length of each iterable. :return: An iterable of iterables. Each iterable will be of size n, except possibly the last one which will contain fewer elements.
train
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/util.py#L25-L44
null
# -*- coding: utf-8 -*- from typing import Dict, Tuple, Optional, TypeVar, Iterable import itertools import more_itertools A = TypeVar('A') B = TypeVar('B') def zip_dict(a: Dict[str, A], b: Dict[str, B]) \ -> Dict[str, Tuple[Optional[A], Optional[B]]]: """ Combine the values within two dictionaries by key. :param a: The first dictionary. :param b: The second dictionary. :return: A dictionary containing all keys that appear in the union of a and b. Values are pairs where the first part is a's value for the key, and right second part b's value. """ return {key: (a.get(key), b.get(key)) for key in a.keys() | b.keys()}
markfinger/assembla
assembla/api.py
API._get_json
python
def _get_json(self, model, space=None, rel_path=None, extra_params=None, get_all=None): # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._get_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} # Handle pagination for requests carrying large amounts of data extra_params['page'] = extra_params.get('page', 1) # Generate the url to hit url = '{0}/{1}/{2}.json?{3}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, urllib.urlencode(extra_params), ) # If the cache is being used and the url has been hit already if self.cache_responses and url in self.cache: response = self.cache[url] else: # Fetch the data headers = { 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, } response = self.session.get(url=url, headers=headers) # If the cache is being used, update it if self.cache_responses: self.cache[url] = response if response.status_code == 200: # OK results = [] json_response = response.json() for obj in json_response: instance = model(data=obj) instance.api = self if space: instance.space = space results.append(instance) # If it looks like there are more pages to fetch, # try and fetch the next one per_page = extra_params.get('per_page', None) if ( get_all and per_page and len(json_response) and per_page == len(json_response) ): extra_params['page'] += 1 results = results + self._get_json(model, space, rel_path, extra_params, get_all=get_all) return results elif response.status_code == 204: # No Content return [] else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) )
Base level method for fetching data from the API
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L42-L112
null
class API(object): cache_responses = False cache = {} def __init__(self, key=None, secret=None): """ :key, :secret Your Assembla API access details, available from https://www.assembla.com/user/edit/manage_clients """ if not key or not secret: raise Exception( 'The Assembla API requires your API \'key\' and \'secret\', ' 'accessible from https://www.assembla.com/user/edit/manage_clients' ) self.key = key self.secret = secret self.session = requests.Session() @assembla_filter def stream(self, extra_params=None): """ All Events available """ return self._get_json(Event, extra_params=extra_params) @assembla_filter def spaces(self, extra_params=None): """ All Spaces available """ return self._get_json(Space, extra_params=extra_params) def _post_json(self, instance, space=None, rel_path=None, extra_params=None): """ Base level method for updating data via the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._post_json` should always ' 'be called with a `space` argument.' ) if 'number' in instance.data: raise AttributeError( 'You cannot create a ticket which already has a number' ) if not extra_params: extra_params = {} # Generate the url to hit url = '{0}/{1}/{2}?{3}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, urllib.urlencode(extra_params), ) # Fetch the data response = requests.post( url=url, data=json.dumps(instance.data), headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 201: # OK instance = model(data=response.json()) instance.api = self if space: instance.space = space return instance else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) ) def _put_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None): """ Base level method for adding new data to the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._put_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} if not id_field: id_field = 'number' # Generate the url to hit url = '{0}/{1}/{2}/{3}.json?{4}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, instance[id_field], urllib.urlencode(extra_params), ) # Fetch the data response = requests.put( url=url, data=json.dumps(instance.data), headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 204: # OK return instance else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) ) def _delete_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None, append_to_path=None): """ Base level method for removing data from the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._delete_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} if not id_field: id_field = 'number' if not instance.get(id_field, None): raise AttributeError( '%s does not have a value for the id field \'%s\'' % ( instance.__class__.__name__, id_field ) ) # Generate the url to hit url = '{0}/{1}/{2}/{3}{4}.json?{5}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, instance[id_field], append_to_path or '', urllib.urlencode(extra_params), ) # Fetch the data response = requests.delete( url=url, headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 204: # OK return True else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) ) def _bind_variables(self, instance, space): """ Bind related variables to the instance """ instance.api = self if space: instance.space = space return instance
markfinger/assembla
assembla/api.py
API._post_json
python
def _post_json(self, instance, space=None, rel_path=None, extra_params=None): model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._post_json` should always ' 'be called with a `space` argument.' ) if 'number' in instance.data: raise AttributeError( 'You cannot create a ticket which already has a number' ) if not extra_params: extra_params = {} # Generate the url to hit url = '{0}/{1}/{2}?{3}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, urllib.urlencode(extra_params), ) # Fetch the data response = requests.post( url=url, data=json.dumps(instance.data), headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 201: # OK instance = model(data=response.json()) instance.api = self if space: instance.space = space return instance else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) )
Base level method for updating data via the API
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L114-L169
null
class API(object): cache_responses = False cache = {} def __init__(self, key=None, secret=None): """ :key, :secret Your Assembla API access details, available from https://www.assembla.com/user/edit/manage_clients """ if not key or not secret: raise Exception( 'The Assembla API requires your API \'key\' and \'secret\', ' 'accessible from https://www.assembla.com/user/edit/manage_clients' ) self.key = key self.secret = secret self.session = requests.Session() @assembla_filter def stream(self, extra_params=None): """ All Events available """ return self._get_json(Event, extra_params=extra_params) @assembla_filter def spaces(self, extra_params=None): """ All Spaces available """ return self._get_json(Space, extra_params=extra_params) def _get_json(self, model, space=None, rel_path=None, extra_params=None, get_all=None): """ Base level method for fetching data from the API """ # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._get_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} # Handle pagination for requests carrying large amounts of data extra_params['page'] = extra_params.get('page', 1) # Generate the url to hit url = '{0}/{1}/{2}.json?{3}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, urllib.urlencode(extra_params), ) # If the cache is being used and the url has been hit already if self.cache_responses and url in self.cache: response = self.cache[url] else: # Fetch the data headers = { 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, } response = self.session.get(url=url, headers=headers) # If the cache is being used, update it if self.cache_responses: self.cache[url] = response if response.status_code == 200: # OK results = [] json_response = response.json() for obj in json_response: instance = model(data=obj) instance.api = self if space: instance.space = space results.append(instance) # If it looks like there are more pages to fetch, # try and fetch the next one per_page = extra_params.get('per_page', None) if ( get_all and per_page and len(json_response) and per_page == len(json_response) ): extra_params['page'] += 1 results = results + self._get_json(model, space, rel_path, extra_params, get_all=get_all) return results elif response.status_code == 204: # No Content return [] else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) ) def _put_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None): """ Base level method for adding new data to the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._put_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} if not id_field: id_field = 'number' # Generate the url to hit url = '{0}/{1}/{2}/{3}.json?{4}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, instance[id_field], urllib.urlencode(extra_params), ) # Fetch the data response = requests.put( url=url, data=json.dumps(instance.data), headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 204: # OK return instance else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) ) def _delete_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None, append_to_path=None): """ Base level method for removing data from the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._delete_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} if not id_field: id_field = 'number' if not instance.get(id_field, None): raise AttributeError( '%s does not have a value for the id field \'%s\'' % ( instance.__class__.__name__, id_field ) ) # Generate the url to hit url = '{0}/{1}/{2}/{3}{4}.json?{5}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, instance[id_field], append_to_path or '', urllib.urlencode(extra_params), ) # Fetch the data response = requests.delete( url=url, headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 204: # OK return True else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) ) def _bind_variables(self, instance, space): """ Bind related variables to the instance """ instance.api = self if space: instance.space = space return instance
markfinger/assembla
assembla/api.py
API._put_json
python
def _put_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None): model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._put_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} if not id_field: id_field = 'number' # Generate the url to hit url = '{0}/{1}/{2}/{3}.json?{4}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, instance[id_field], urllib.urlencode(extra_params), ) # Fetch the data response = requests.put( url=url, data=json.dumps(instance.data), headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 204: # OK return instance else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) )
Base level method for adding new data to the API
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L171-L221
null
class API(object): cache_responses = False cache = {} def __init__(self, key=None, secret=None): """ :key, :secret Your Assembla API access details, available from https://www.assembla.com/user/edit/manage_clients """ if not key or not secret: raise Exception( 'The Assembla API requires your API \'key\' and \'secret\', ' 'accessible from https://www.assembla.com/user/edit/manage_clients' ) self.key = key self.secret = secret self.session = requests.Session() @assembla_filter def stream(self, extra_params=None): """ All Events available """ return self._get_json(Event, extra_params=extra_params) @assembla_filter def spaces(self, extra_params=None): """ All Spaces available """ return self._get_json(Space, extra_params=extra_params) def _get_json(self, model, space=None, rel_path=None, extra_params=None, get_all=None): """ Base level method for fetching data from the API """ # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._get_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} # Handle pagination for requests carrying large amounts of data extra_params['page'] = extra_params.get('page', 1) # Generate the url to hit url = '{0}/{1}/{2}.json?{3}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, urllib.urlencode(extra_params), ) # If the cache is being used and the url has been hit already if self.cache_responses and url in self.cache: response = self.cache[url] else: # Fetch the data headers = { 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, } response = self.session.get(url=url, headers=headers) # If the cache is being used, update it if self.cache_responses: self.cache[url] = response if response.status_code == 200: # OK results = [] json_response = response.json() for obj in json_response: instance = model(data=obj) instance.api = self if space: instance.space = space results.append(instance) # If it looks like there are more pages to fetch, # try and fetch the next one per_page = extra_params.get('per_page', None) if ( get_all and per_page and len(json_response) and per_page == len(json_response) ): extra_params['page'] += 1 results = results + self._get_json(model, space, rel_path, extra_params, get_all=get_all) return results elif response.status_code == 204: # No Content return [] else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) ) def _post_json(self, instance, space=None, rel_path=None, extra_params=None): """ Base level method for updating data via the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._post_json` should always ' 'be called with a `space` argument.' ) if 'number' in instance.data: raise AttributeError( 'You cannot create a ticket which already has a number' ) if not extra_params: extra_params = {} # Generate the url to hit url = '{0}/{1}/{2}?{3}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, urllib.urlencode(extra_params), ) # Fetch the data response = requests.post( url=url, data=json.dumps(instance.data), headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 201: # OK instance = model(data=response.json()) instance.api = self if space: instance.space = space return instance else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) ) def _delete_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None, append_to_path=None): """ Base level method for removing data from the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._delete_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} if not id_field: id_field = 'number' if not instance.get(id_field, None): raise AttributeError( '%s does not have a value for the id field \'%s\'' % ( instance.__class__.__name__, id_field ) ) # Generate the url to hit url = '{0}/{1}/{2}/{3}{4}.json?{5}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, instance[id_field], append_to_path or '', urllib.urlencode(extra_params), ) # Fetch the data response = requests.delete( url=url, headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 204: # OK return True else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) ) def _bind_variables(self, instance, space): """ Bind related variables to the instance """ instance.api = self if space: instance.space = space return instance
markfinger/assembla
assembla/api.py
API._delete_json
python
def _delete_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None, append_to_path=None): model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._delete_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} if not id_field: id_field = 'number' if not instance.get(id_field, None): raise AttributeError( '%s does not have a value for the id field \'%s\'' % ( instance.__class__.__name__, id_field ) ) # Generate the url to hit url = '{0}/{1}/{2}/{3}{4}.json?{5}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, instance[id_field], append_to_path or '', urllib.urlencode(extra_params), ) # Fetch the data response = requests.delete( url=url, headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 204: # OK return True else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) )
Base level method for removing data from the API
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L223-L281
null
class API(object): cache_responses = False cache = {} def __init__(self, key=None, secret=None): """ :key, :secret Your Assembla API access details, available from https://www.assembla.com/user/edit/manage_clients """ if not key or not secret: raise Exception( 'The Assembla API requires your API \'key\' and \'secret\', ' 'accessible from https://www.assembla.com/user/edit/manage_clients' ) self.key = key self.secret = secret self.session = requests.Session() @assembla_filter def stream(self, extra_params=None): """ All Events available """ return self._get_json(Event, extra_params=extra_params) @assembla_filter def spaces(self, extra_params=None): """ All Spaces available """ return self._get_json(Space, extra_params=extra_params) def _get_json(self, model, space=None, rel_path=None, extra_params=None, get_all=None): """ Base level method for fetching data from the API """ # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._get_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} # Handle pagination for requests carrying large amounts of data extra_params['page'] = extra_params.get('page', 1) # Generate the url to hit url = '{0}/{1}/{2}.json?{3}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, urllib.urlencode(extra_params), ) # If the cache is being used and the url has been hit already if self.cache_responses and url in self.cache: response = self.cache[url] else: # Fetch the data headers = { 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, } response = self.session.get(url=url, headers=headers) # If the cache is being used, update it if self.cache_responses: self.cache[url] = response if response.status_code == 200: # OK results = [] json_response = response.json() for obj in json_response: instance = model(data=obj) instance.api = self if space: instance.space = space results.append(instance) # If it looks like there are more pages to fetch, # try and fetch the next one per_page = extra_params.get('per_page', None) if ( get_all and per_page and len(json_response) and per_page == len(json_response) ): extra_params['page'] += 1 results = results + self._get_json(model, space, rel_path, extra_params, get_all=get_all) return results elif response.status_code == 204: # No Content return [] else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) ) def _post_json(self, instance, space=None, rel_path=None, extra_params=None): """ Base level method for updating data via the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._post_json` should always ' 'be called with a `space` argument.' ) if 'number' in instance.data: raise AttributeError( 'You cannot create a ticket which already has a number' ) if not extra_params: extra_params = {} # Generate the url to hit url = '{0}/{1}/{2}?{3}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, urllib.urlencode(extra_params), ) # Fetch the data response = requests.post( url=url, data=json.dumps(instance.data), headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 201: # OK instance = model(data=response.json()) instance.api = self if space: instance.space = space return instance else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) ) def _put_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None): """ Base level method for adding new data to the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._put_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} if not id_field: id_field = 'number' # Generate the url to hit url = '{0}/{1}/{2}/{3}.json?{4}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, instance[id_field], urllib.urlencode(extra_params), ) # Fetch the data response = requests.put( url=url, data=json.dumps(instance.data), headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 204: # OK return instance else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) ) def _bind_variables(self, instance, space): """ Bind related variables to the instance """ instance.api = self if space: instance.space = space return instance
markfinger/assembla
assembla/api.py
API._bind_variables
python
def _bind_variables(self, instance, space): instance.api = self if space: instance.space = space return instance
Bind related variables to the instance
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L283-L290
null
class API(object): cache_responses = False cache = {} def __init__(self, key=None, secret=None): """ :key, :secret Your Assembla API access details, available from https://www.assembla.com/user/edit/manage_clients """ if not key or not secret: raise Exception( 'The Assembla API requires your API \'key\' and \'secret\', ' 'accessible from https://www.assembla.com/user/edit/manage_clients' ) self.key = key self.secret = secret self.session = requests.Session() @assembla_filter def stream(self, extra_params=None): """ All Events available """ return self._get_json(Event, extra_params=extra_params) @assembla_filter def spaces(self, extra_params=None): """ All Spaces available """ return self._get_json(Space, extra_params=extra_params) def _get_json(self, model, space=None, rel_path=None, extra_params=None, get_all=None): """ Base level method for fetching data from the API """ # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._get_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} # Handle pagination for requests carrying large amounts of data extra_params['page'] = extra_params.get('page', 1) # Generate the url to hit url = '{0}/{1}/{2}.json?{3}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, urllib.urlencode(extra_params), ) # If the cache is being used and the url has been hit already if self.cache_responses and url in self.cache: response = self.cache[url] else: # Fetch the data headers = { 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, } response = self.session.get(url=url, headers=headers) # If the cache is being used, update it if self.cache_responses: self.cache[url] = response if response.status_code == 200: # OK results = [] json_response = response.json() for obj in json_response: instance = model(data=obj) instance.api = self if space: instance.space = space results.append(instance) # If it looks like there are more pages to fetch, # try and fetch the next one per_page = extra_params.get('per_page', None) if ( get_all and per_page and len(json_response) and per_page == len(json_response) ): extra_params['page'] += 1 results = results + self._get_json(model, space, rel_path, extra_params, get_all=get_all) return results elif response.status_code == 204: # No Content return [] else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) ) def _post_json(self, instance, space=None, rel_path=None, extra_params=None): """ Base level method for updating data via the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._post_json` should always ' 'be called with a `space` argument.' ) if 'number' in instance.data: raise AttributeError( 'You cannot create a ticket which already has a number' ) if not extra_params: extra_params = {} # Generate the url to hit url = '{0}/{1}/{2}?{3}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, urllib.urlencode(extra_params), ) # Fetch the data response = requests.post( url=url, data=json.dumps(instance.data), headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 201: # OK instance = model(data=response.json()) instance.api = self if space: instance.space = space return instance else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) ) def _put_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None): """ Base level method for adding new data to the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._put_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} if not id_field: id_field = 'number' # Generate the url to hit url = '{0}/{1}/{2}/{3}.json?{4}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, instance[id_field], urllib.urlencode(extra_params), ) # Fetch the data response = requests.put( url=url, data=json.dumps(instance.data), headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 204: # OK return instance else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) ) def _delete_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None, append_to_path=None): """ Base level method for removing data from the API """ model = type(instance) # Only API.spaces and API.event should not provide # the `space argument if space is None and model not in (Space, Event): raise Exception( 'In general, `API._delete_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} if not id_field: id_field = 'number' if not instance.get(id_field, None): raise AttributeError( '%s does not have a value for the id field \'%s\'' % ( instance.__class__.__name__, id_field ) ) # Generate the url to hit url = '{0}/{1}/{2}/{3}{4}.json?{5}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, instance[id_field], append_to_path or '', urllib.urlencode(extra_params), ) # Fetch the data response = requests.delete( url=url, headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 204: # OK return True else: # Most likely a 404 Not Found raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) )
markfinger/assembla
assembla/api.py
Space.tickets
python
def tickets(self, extra_params=None): # Default params params = { 'per_page': settings.MAX_PER_PAGE, 'report': 0, # Report 0 is all tickets } if extra_params: params.update(extra_params) return self.api._get_json( Ticket, space=self, rel_path=self._build_rel_path('tickets'), extra_params=params, get_all=True, # Retrieve all tickets in the space )
All Tickets in this Space
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L301-L321
[ "def _build_rel_path(self, to_append=None):\n \"\"\"\n Build a relative path to the API endpoint\n \"\"\"\n return '{0}/{1}/{2}'.format(\n self.rel_path,\n self['id'],\n to_append if to_append else ''\n )\n" ]
class Space(AssemblaObject): rel_path = 'spaces' @assembla_filter @assembla_filter def milestones(self, extra_params=None): """ All Milestones in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Milestone, space=self, rel_path=self._build_rel_path('milestones/all'), extra_params=params, get_all=True, # Retrieve all milestones in the space ) @assembla_filter def tools(self, extra_params=None): """" All Tools in this Space """ return self.api._get_json( SpaceTool, space=self, rel_path=self._build_rel_path('space_tools'), extra_params=extra_params, ) @assembla_filter def components(self, extra_params=None): """" All components in this Space """ return self.api._get_json( Component, space=self, rel_path=self._build_rel_path('ticket_components'), extra_params=extra_params, ) @assembla_filter def users(self, extra_params=None): """ All Users with access to this Space """ return self.api._get_json( User, space=self, rel_path=self._build_rel_path('users'), extra_params=extra_params, ) @assembla_filter def tags(self, extra_params=None): """" All Tags in this Space """ return self.api._get_json( Tag, space=self, rel_path=self._build_rel_path('tags'), extra_params=extra_params, ) @assembla_filter def wiki_pages(self, extra_params=None): """ All Wiki Pages with access to this Space """ return self.api._get_json( WikiPage, space=self, rel_path=self._build_rel_path('wiki_pages'), extra_params=extra_params, ) def _build_rel_path(self, to_append=None): """ Build a relative path to the API endpoint """ return '{0}/{1}/{2}'.format( self.rel_path, self['id'], to_append if to_append else '' )
markfinger/assembla
assembla/api.py
Space.milestones
python
def milestones(self, extra_params=None): # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Milestone, space=self, rel_path=self._build_rel_path('milestones/all'), extra_params=params, get_all=True, # Retrieve all milestones in the space )
All Milestones in this Space
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L324-L343
[ "def _build_rel_path(self, to_append=None):\n \"\"\"\n Build a relative path to the API endpoint\n \"\"\"\n return '{0}/{1}/{2}'.format(\n self.rel_path,\n self['id'],\n to_append if to_append else ''\n )\n" ]
class Space(AssemblaObject): rel_path = 'spaces' @assembla_filter def tickets(self, extra_params=None): """ All Tickets in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, 'report': 0, # Report 0 is all tickets } if extra_params: params.update(extra_params) return self.api._get_json( Ticket, space=self, rel_path=self._build_rel_path('tickets'), extra_params=params, get_all=True, # Retrieve all tickets in the space ) @assembla_filter def milestones(self, extra_params=None): """ All Milestones in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Milestone, space=self, rel_path=self._build_rel_path('milestones/all'), extra_params=params, get_all=True, # Retrieve all milestones in the space ) @assembla_filter def tools(self, extra_params=None): """" All Tools in this Space """ return self.api._get_json( SpaceTool, space=self, rel_path=self._build_rel_path('space_tools'), extra_params=extra_params, ) @assembla_filter def components(self, extra_params=None): """" All components in this Space """ return self.api._get_json( Component, space=self, rel_path=self._build_rel_path('ticket_components'), extra_params=extra_params, ) @assembla_filter def users(self, extra_params=None): """ All Users with access to this Space """ return self.api._get_json( User, space=self, rel_path=self._build_rel_path('users'), extra_params=extra_params, ) @assembla_filter def tags(self, extra_params=None): """" All Tags in this Space """ return self.api._get_json( Tag, space=self, rel_path=self._build_rel_path('tags'), extra_params=extra_params, ) @assembla_filter def wiki_pages(self, extra_params=None): """ All Wiki Pages with access to this Space """ return self.api._get_json( WikiPage, space=self, rel_path=self._build_rel_path('wiki_pages'), extra_params=extra_params, ) def _build_rel_path(self, to_append=None): """ Build a relative path to the API endpoint """ return '{0}/{1}/{2}'.format( self.rel_path, self['id'], to_append if to_append else '' )
markfinger/assembla
assembla/api.py
Space.tools
python
def tools(self, extra_params=None): " return self.api._get_json( SpaceTool, space=self, rel_path=self._build_rel_path('space_tools'), extra_params=extra_params, )
All Tools in this Space
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L346-L355
[ "def _build_rel_path(self, to_append=None):\n \"\"\"\n Build a relative path to the API endpoint\n \"\"\"\n return '{0}/{1}/{2}'.format(\n self.rel_path,\n self['id'],\n to_append if to_append else ''\n )\n" ]
class Space(AssemblaObject): rel_path = 'spaces' @assembla_filter def tickets(self, extra_params=None): """ All Tickets in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, 'report': 0, # Report 0 is all tickets } if extra_params: params.update(extra_params) return self.api._get_json( Ticket, space=self, rel_path=self._build_rel_path('tickets'), extra_params=params, get_all=True, # Retrieve all tickets in the space ) @assembla_filter def milestones(self, extra_params=None): """ All Milestones in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Milestone, space=self, rel_path=self._build_rel_path('milestones/all'), extra_params=params, get_all=True, # Retrieve all milestones in the space ) @assembla_filter @assembla_filter def components(self, extra_params=None): """" All components in this Space """ return self.api._get_json( Component, space=self, rel_path=self._build_rel_path('ticket_components'), extra_params=extra_params, ) @assembla_filter def users(self, extra_params=None): """ All Users with access to this Space """ return self.api._get_json( User, space=self, rel_path=self._build_rel_path('users'), extra_params=extra_params, ) @assembla_filter def tags(self, extra_params=None): """" All Tags in this Space """ return self.api._get_json( Tag, space=self, rel_path=self._build_rel_path('tags'), extra_params=extra_params, ) @assembla_filter def wiki_pages(self, extra_params=None): """ All Wiki Pages with access to this Space """ return self.api._get_json( WikiPage, space=self, rel_path=self._build_rel_path('wiki_pages'), extra_params=extra_params, ) def _build_rel_path(self, to_append=None): """ Build a relative path to the API endpoint """ return '{0}/{1}/{2}'.format( self.rel_path, self['id'], to_append if to_append else '' )
markfinger/assembla
assembla/api.py
Space.components
python
def components(self, extra_params=None): " return self.api._get_json( Component, space=self, rel_path=self._build_rel_path('ticket_components'), extra_params=extra_params, )
All components in this Space
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L358-L367
[ "def _build_rel_path(self, to_append=None):\n \"\"\"\n Build a relative path to the API endpoint\n \"\"\"\n return '{0}/{1}/{2}'.format(\n self.rel_path,\n self['id'],\n to_append if to_append else ''\n )\n" ]
class Space(AssemblaObject): rel_path = 'spaces' @assembla_filter def tickets(self, extra_params=None): """ All Tickets in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, 'report': 0, # Report 0 is all tickets } if extra_params: params.update(extra_params) return self.api._get_json( Ticket, space=self, rel_path=self._build_rel_path('tickets'), extra_params=params, get_all=True, # Retrieve all tickets in the space ) @assembla_filter def milestones(self, extra_params=None): """ All Milestones in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Milestone, space=self, rel_path=self._build_rel_path('milestones/all'), extra_params=params, get_all=True, # Retrieve all milestones in the space ) @assembla_filter def tools(self, extra_params=None): """" All Tools in this Space """ return self.api._get_json( SpaceTool, space=self, rel_path=self._build_rel_path('space_tools'), extra_params=extra_params, ) @assembla_filter @assembla_filter def users(self, extra_params=None): """ All Users with access to this Space """ return self.api._get_json( User, space=self, rel_path=self._build_rel_path('users'), extra_params=extra_params, ) @assembla_filter def tags(self, extra_params=None): """" All Tags in this Space """ return self.api._get_json( Tag, space=self, rel_path=self._build_rel_path('tags'), extra_params=extra_params, ) @assembla_filter def wiki_pages(self, extra_params=None): """ All Wiki Pages with access to this Space """ return self.api._get_json( WikiPage, space=self, rel_path=self._build_rel_path('wiki_pages'), extra_params=extra_params, ) def _build_rel_path(self, to_append=None): """ Build a relative path to the API endpoint """ return '{0}/{1}/{2}'.format( self.rel_path, self['id'], to_append if to_append else '' )
markfinger/assembla
assembla/api.py
Space.users
python
def users(self, extra_params=None): return self.api._get_json( User, space=self, rel_path=self._build_rel_path('users'), extra_params=extra_params, )
All Users with access to this Space
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L370-L379
[ "def _build_rel_path(self, to_append=None):\n \"\"\"\n Build a relative path to the API endpoint\n \"\"\"\n return '{0}/{1}/{2}'.format(\n self.rel_path,\n self['id'],\n to_append if to_append else ''\n )\n" ]
class Space(AssemblaObject): rel_path = 'spaces' @assembla_filter def tickets(self, extra_params=None): """ All Tickets in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, 'report': 0, # Report 0 is all tickets } if extra_params: params.update(extra_params) return self.api._get_json( Ticket, space=self, rel_path=self._build_rel_path('tickets'), extra_params=params, get_all=True, # Retrieve all tickets in the space ) @assembla_filter def milestones(self, extra_params=None): """ All Milestones in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Milestone, space=self, rel_path=self._build_rel_path('milestones/all'), extra_params=params, get_all=True, # Retrieve all milestones in the space ) @assembla_filter def tools(self, extra_params=None): """" All Tools in this Space """ return self.api._get_json( SpaceTool, space=self, rel_path=self._build_rel_path('space_tools'), extra_params=extra_params, ) @assembla_filter def components(self, extra_params=None): """" All components in this Space """ return self.api._get_json( Component, space=self, rel_path=self._build_rel_path('ticket_components'), extra_params=extra_params, ) @assembla_filter @assembla_filter def tags(self, extra_params=None): """" All Tags in this Space """ return self.api._get_json( Tag, space=self, rel_path=self._build_rel_path('tags'), extra_params=extra_params, ) @assembla_filter def wiki_pages(self, extra_params=None): """ All Wiki Pages with access to this Space """ return self.api._get_json( WikiPage, space=self, rel_path=self._build_rel_path('wiki_pages'), extra_params=extra_params, ) def _build_rel_path(self, to_append=None): """ Build a relative path to the API endpoint """ return '{0}/{1}/{2}'.format( self.rel_path, self['id'], to_append if to_append else '' )
markfinger/assembla
assembla/api.py
Space.tags
python
def tags(self, extra_params=None): " return self.api._get_json( Tag, space=self, rel_path=self._build_rel_path('tags'), extra_params=extra_params, )
All Tags in this Space
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L382-L391
[ "def _build_rel_path(self, to_append=None):\n \"\"\"\n Build a relative path to the API endpoint\n \"\"\"\n return '{0}/{1}/{2}'.format(\n self.rel_path,\n self['id'],\n to_append if to_append else ''\n )\n" ]
class Space(AssemblaObject): rel_path = 'spaces' @assembla_filter def tickets(self, extra_params=None): """ All Tickets in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, 'report': 0, # Report 0 is all tickets } if extra_params: params.update(extra_params) return self.api._get_json( Ticket, space=self, rel_path=self._build_rel_path('tickets'), extra_params=params, get_all=True, # Retrieve all tickets in the space ) @assembla_filter def milestones(self, extra_params=None): """ All Milestones in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Milestone, space=self, rel_path=self._build_rel_path('milestones/all'), extra_params=params, get_all=True, # Retrieve all milestones in the space ) @assembla_filter def tools(self, extra_params=None): """" All Tools in this Space """ return self.api._get_json( SpaceTool, space=self, rel_path=self._build_rel_path('space_tools'), extra_params=extra_params, ) @assembla_filter def components(self, extra_params=None): """" All components in this Space """ return self.api._get_json( Component, space=self, rel_path=self._build_rel_path('ticket_components'), extra_params=extra_params, ) @assembla_filter def users(self, extra_params=None): """ All Users with access to this Space """ return self.api._get_json( User, space=self, rel_path=self._build_rel_path('users'), extra_params=extra_params, ) @assembla_filter @assembla_filter def wiki_pages(self, extra_params=None): """ All Wiki Pages with access to this Space """ return self.api._get_json( WikiPage, space=self, rel_path=self._build_rel_path('wiki_pages'), extra_params=extra_params, ) def _build_rel_path(self, to_append=None): """ Build a relative path to the API endpoint """ return '{0}/{1}/{2}'.format( self.rel_path, self['id'], to_append if to_append else '' )
markfinger/assembla
assembla/api.py
Space.wiki_pages
python
def wiki_pages(self, extra_params=None): return self.api._get_json( WikiPage, space=self, rel_path=self._build_rel_path('wiki_pages'), extra_params=extra_params, )
All Wiki Pages with access to this Space
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L394-L403
[ "def _build_rel_path(self, to_append=None):\n \"\"\"\n Build a relative path to the API endpoint\n \"\"\"\n return '{0}/{1}/{2}'.format(\n self.rel_path,\n self['id'],\n to_append if to_append else ''\n )\n" ]
class Space(AssemblaObject): rel_path = 'spaces' @assembla_filter def tickets(self, extra_params=None): """ All Tickets in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, 'report': 0, # Report 0 is all tickets } if extra_params: params.update(extra_params) return self.api._get_json( Ticket, space=self, rel_path=self._build_rel_path('tickets'), extra_params=params, get_all=True, # Retrieve all tickets in the space ) @assembla_filter def milestones(self, extra_params=None): """ All Milestones in this Space """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Milestone, space=self, rel_path=self._build_rel_path('milestones/all'), extra_params=params, get_all=True, # Retrieve all milestones in the space ) @assembla_filter def tools(self, extra_params=None): """" All Tools in this Space """ return self.api._get_json( SpaceTool, space=self, rel_path=self._build_rel_path('space_tools'), extra_params=extra_params, ) @assembla_filter def components(self, extra_params=None): """" All components in this Space """ return self.api._get_json( Component, space=self, rel_path=self._build_rel_path('ticket_components'), extra_params=extra_params, ) @assembla_filter def users(self, extra_params=None): """ All Users with access to this Space """ return self.api._get_json( User, space=self, rel_path=self._build_rel_path('users'), extra_params=extra_params, ) @assembla_filter def tags(self, extra_params=None): """" All Tags in this Space """ return self.api._get_json( Tag, space=self, rel_path=self._build_rel_path('tags'), extra_params=extra_params, ) @assembla_filter def _build_rel_path(self, to_append=None): """ Build a relative path to the API endpoint """ return '{0}/{1}/{2}'.format( self.rel_path, self['id'], to_append if to_append else '' )
markfinger/assembla
assembla/api.py
Milestone.tickets
python
def tickets(self, extra_params=None): return filter( lambda ticket: ticket.get('milestone_id', None) == self['id'], self.space.tickets(extra_params=extra_params) )
All Tickets which are a part of this Milestone
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L426-L433
null
class Milestone(AssemblaObject): @assembla_filter
markfinger/assembla
assembla/api.py
Ticket.tags
python
def tags(self, extra_params=None): # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Tag, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/tags' % self['number'] ), extra_params=params, get_all=True, # Retrieve all tags in the ticket )
All Tags in this Ticket
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L438-L459
null
class Ticket(AssemblaObject): def milestone(self, extra_params=None): """ The Milestone that the Ticket is a part of """ if self.get('milestone_id', None): milestones = self.space.milestones(id=self['milestone_id'], extra_params=extra_params) if milestones: return milestones[0] def user(self, extra_params=None): """ The User currently assigned to the Ticket """ if self.get('assigned_to_id', None): users = self.space.users( id=self['assigned_to_id'], extra_params=extra_params ) if users: return users[0] def component(self, extra_params=None): """ The Component currently assigned to the Ticket """ if self.get('component_id', None): components = self.space.components(id=self['component_id'], extra_params=extra_params) if components: return components[0] @assembla_filter def comments(self, extra_params=None): """ All Comments in this Ticket """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( TicketComment, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/ticket_comments' % self['number'] ), extra_params=params, get_all=True, # Retrieve all comments in the ticket ) def write(self): """ Create or update the Ticket on Assembla """ if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can write it to Assembla.") if self.get('number'): # Modifying an existing ticket method = self.space.api._put_json else: # Creating a new ticket method = self.space.api._post_json return method( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), ) def delete(self): """ Remove the Ticket from Assembla """ if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can remove it from Assembla.") return self.space.api._delete_json( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), )
markfinger/assembla
assembla/api.py
Ticket.milestone
python
def milestone(self, extra_params=None): if self.get('milestone_id', None): milestones = self.space.milestones(id=self['milestone_id'], extra_params=extra_params) if milestones: return milestones[0]
The Milestone that the Ticket is a part of
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L462-L469
[ "def get(self, *args, **kwargs):\n return self.data.get(*args, **kwargs)\n" ]
class Ticket(AssemblaObject): def tags(self, extra_params=None): """ All Tags in this Ticket """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Tag, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/tags' % self['number'] ), extra_params=params, get_all=True, # Retrieve all tags in the ticket ) def user(self, extra_params=None): """ The User currently assigned to the Ticket """ if self.get('assigned_to_id', None): users = self.space.users( id=self['assigned_to_id'], extra_params=extra_params ) if users: return users[0] def component(self, extra_params=None): """ The Component currently assigned to the Ticket """ if self.get('component_id', None): components = self.space.components(id=self['component_id'], extra_params=extra_params) if components: return components[0] @assembla_filter def comments(self, extra_params=None): """ All Comments in this Ticket """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( TicketComment, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/ticket_comments' % self['number'] ), extra_params=params, get_all=True, # Retrieve all comments in the ticket ) def write(self): """ Create or update the Ticket on Assembla """ if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can write it to Assembla.") if self.get('number'): # Modifying an existing ticket method = self.space.api._put_json else: # Creating a new ticket method = self.space.api._post_json return method( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), ) def delete(self): """ Remove the Ticket from Assembla """ if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can remove it from Assembla.") return self.space.api._delete_json( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), )
markfinger/assembla
assembla/api.py
Ticket.user
python
def user(self, extra_params=None): if self.get('assigned_to_id', None): users = self.space.users( id=self['assigned_to_id'], extra_params=extra_params ) if users: return users[0]
The User currently assigned to the Ticket
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L471-L481
[ "def get(self, *args, **kwargs):\n return self.data.get(*args, **kwargs)\n" ]
class Ticket(AssemblaObject): def tags(self, extra_params=None): """ All Tags in this Ticket """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Tag, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/tags' % self['number'] ), extra_params=params, get_all=True, # Retrieve all tags in the ticket ) def milestone(self, extra_params=None): """ The Milestone that the Ticket is a part of """ if self.get('milestone_id', None): milestones = self.space.milestones(id=self['milestone_id'], extra_params=extra_params) if milestones: return milestones[0] def component(self, extra_params=None): """ The Component currently assigned to the Ticket """ if self.get('component_id', None): components = self.space.components(id=self['component_id'], extra_params=extra_params) if components: return components[0] @assembla_filter def comments(self, extra_params=None): """ All Comments in this Ticket """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( TicketComment, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/ticket_comments' % self['number'] ), extra_params=params, get_all=True, # Retrieve all comments in the ticket ) def write(self): """ Create or update the Ticket on Assembla """ if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can write it to Assembla.") if self.get('number'): # Modifying an existing ticket method = self.space.api._put_json else: # Creating a new ticket method = self.space.api._post_json return method( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), ) def delete(self): """ Remove the Ticket from Assembla """ if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can remove it from Assembla.") return self.space.api._delete_json( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), )
markfinger/assembla
assembla/api.py
Ticket.component
python
def component(self, extra_params=None): if self.get('component_id', None): components = self.space.components(id=self['component_id'], extra_params=extra_params) if components: return components[0]
The Component currently assigned to the Ticket
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L483-L490
[ "def get(self, *args, **kwargs):\n return self.data.get(*args, **kwargs)\n" ]
class Ticket(AssemblaObject): def tags(self, extra_params=None): """ All Tags in this Ticket """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Tag, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/tags' % self['number'] ), extra_params=params, get_all=True, # Retrieve all tags in the ticket ) def milestone(self, extra_params=None): """ The Milestone that the Ticket is a part of """ if self.get('milestone_id', None): milestones = self.space.milestones(id=self['milestone_id'], extra_params=extra_params) if milestones: return milestones[0] def user(self, extra_params=None): """ The User currently assigned to the Ticket """ if self.get('assigned_to_id', None): users = self.space.users( id=self['assigned_to_id'], extra_params=extra_params ) if users: return users[0] @assembla_filter def comments(self, extra_params=None): """ All Comments in this Ticket """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( TicketComment, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/ticket_comments' % self['number'] ), extra_params=params, get_all=True, # Retrieve all comments in the ticket ) def write(self): """ Create or update the Ticket on Assembla """ if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can write it to Assembla.") if self.get('number'): # Modifying an existing ticket method = self.space.api._put_json else: # Creating a new ticket method = self.space.api._post_json return method( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), ) def delete(self): """ Remove the Ticket from Assembla """ if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can remove it from Assembla.") return self.space.api._delete_json( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), )
markfinger/assembla
assembla/api.py
Ticket.comments
python
def comments(self, extra_params=None): # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( TicketComment, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/ticket_comments' % self['number'] ), extra_params=params, get_all=True, # Retrieve all comments in the ticket )
All Comments in this Ticket
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L493-L514
null
class Ticket(AssemblaObject): def tags(self, extra_params=None): """ All Tags in this Ticket """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Tag, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/tags' % self['number'] ), extra_params=params, get_all=True, # Retrieve all tags in the ticket ) def milestone(self, extra_params=None): """ The Milestone that the Ticket is a part of """ if self.get('milestone_id', None): milestones = self.space.milestones(id=self['milestone_id'], extra_params=extra_params) if milestones: return milestones[0] def user(self, extra_params=None): """ The User currently assigned to the Ticket """ if self.get('assigned_to_id', None): users = self.space.users( id=self['assigned_to_id'], extra_params=extra_params ) if users: return users[0] def component(self, extra_params=None): """ The Component currently assigned to the Ticket """ if self.get('component_id', None): components = self.space.components(id=self['component_id'], extra_params=extra_params) if components: return components[0] @assembla_filter def write(self): """ Create or update the Ticket on Assembla """ if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can write it to Assembla.") if self.get('number'): # Modifying an existing ticket method = self.space.api._put_json else: # Creating a new ticket method = self.space.api._post_json return method( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), ) def delete(self): """ Remove the Ticket from Assembla """ if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can remove it from Assembla.") return self.space.api._delete_json( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), )
markfinger/assembla
assembla/api.py
Ticket.write
python
def write(self): if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can write it to Assembla.") if self.get('number'): # Modifying an existing ticket method = self.space.api._put_json else: # Creating a new ticket method = self.space.api._post_json return method( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), )
Create or update the Ticket on Assembla
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L517-L533
null
class Ticket(AssemblaObject): def tags(self, extra_params=None): """ All Tags in this Ticket """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Tag, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/tags' % self['number'] ), extra_params=params, get_all=True, # Retrieve all tags in the ticket ) def milestone(self, extra_params=None): """ The Milestone that the Ticket is a part of """ if self.get('milestone_id', None): milestones = self.space.milestones(id=self['milestone_id'], extra_params=extra_params) if milestones: return milestones[0] def user(self, extra_params=None): """ The User currently assigned to the Ticket """ if self.get('assigned_to_id', None): users = self.space.users( id=self['assigned_to_id'], extra_params=extra_params ) if users: return users[0] def component(self, extra_params=None): """ The Component currently assigned to the Ticket """ if self.get('component_id', None): components = self.space.components(id=self['component_id'], extra_params=extra_params) if components: return components[0] @assembla_filter def comments(self, extra_params=None): """ All Comments in this Ticket """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( TicketComment, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/ticket_comments' % self['number'] ), extra_params=params, get_all=True, # Retrieve all comments in the ticket ) def delete(self): """ Remove the Ticket from Assembla """ if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can remove it from Assembla.") return self.space.api._delete_json( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), )
markfinger/assembla
assembla/api.py
Ticket.delete
python
def delete(self): if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can remove it from Assembla.") return self.space.api._delete_json( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), )
Remove the Ticket from Assembla
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L535-L546
null
class Ticket(AssemblaObject): def tags(self, extra_params=None): """ All Tags in this Ticket """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( Tag, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/tags' % self['number'] ), extra_params=params, get_all=True, # Retrieve all tags in the ticket ) def milestone(self, extra_params=None): """ The Milestone that the Ticket is a part of """ if self.get('milestone_id', None): milestones = self.space.milestones(id=self['milestone_id'], extra_params=extra_params) if milestones: return milestones[0] def user(self, extra_params=None): """ The User currently assigned to the Ticket """ if self.get('assigned_to_id', None): users = self.space.users( id=self['assigned_to_id'], extra_params=extra_params ) if users: return users[0] def component(self, extra_params=None): """ The Component currently assigned to the Ticket """ if self.get('component_id', None): components = self.space.components(id=self['component_id'], extra_params=extra_params) if components: return components[0] @assembla_filter def comments(self, extra_params=None): """ All Comments in this Ticket """ # Default params params = { 'per_page': settings.MAX_PER_PAGE, } if extra_params: params.update(extra_params) return self.api._get_json( TicketComment, space=self, rel_path=self.space._build_rel_path( 'tickets/%s/ticket_comments' % self['number'] ), extra_params=params, get_all=True, # Retrieve all comments in the ticket ) def write(self): """ Create or update the Ticket on Assembla """ if not hasattr(self, 'space'): raise AttributeError("A ticket must have a 'space' attribute before you can write it to Assembla.") if self.get('number'): # Modifying an existing ticket method = self.space.api._put_json else: # Creating a new ticket method = self.space.api._post_json return method( self, space=self.space, rel_path=self.space._build_rel_path('tickets'), )
markfinger/assembla
assembla/api.py
User.tickets
python
def tickets(self, extra_params=None): tickets = [] for space in self.api.spaces(): tickets += filter( lambda ticket: ticket.get('assigned_to_id', None) == self['id'], space.tickets(extra_params=extra_params) ) return tickets
A User's tickets across all available spaces
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L559-L569
null
class User(AssemblaObject): @assembla_filter
markfinger/assembla
assembla/api.py
WikiPage.write
python
def write(self): if not hasattr(self, 'space'): raise AttributeError("A WikiPage must have a 'space' attribute before you can write it to Assembla.") self.api = self.space.api if self.get('id'): # We are modifying an existing wiki page return self.api._put_json( self, space=self.space, rel_path=self.space._build_rel_path('wiki_pages'), id_field='id' ) else: # Creating a new wiki page return self.api._post_json( self, space=self.space, rel_path=self.space._build_rel_path('wiki_pages'), )
Create or update a Wiki Page on Assembla
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L573-L594
null
class WikiPage(AssemblaObject): def delete(self): """ Remove the WikiPage from Assembla """ if not hasattr(self, 'space'): raise AttributeError("A WikiPage must have a 'space' attribute before you can remove it from Assembla.") self.api = self.space.api return self.api._delete_json( self, space=self.space, rel_path=self.space._build_rel_path('wiki_pages'), id_field='id', append_to_path='/container' )
markfinger/assembla
assembla/lib.py
assembla_filter
python
def assembla_filter(func): @wraps(func) def wrapper(class_instance, **kwargs): # Get the result extra_params = kwargs.get('extra_params', None) if extra_params: del kwargs['extra_params'] results = func(class_instance, extra_params) # Filter the result if kwargs: results = filter( # Find the objects who have an equal number of matching attr/value # combinations as `len(kwargs)` lambda obj: len(kwargs) == len( filter( lambda boolean: boolean, [obj.get(attr_name) == value for attr_name, value in kwargs.iteritems()] ) ), results ) return results return wrapper
Filters :data for the objects in it which possess attributes equal in name/value to a key/value in kwargs. Each key/value combination in kwargs is compared against the object, so multiple keyword arguments can be passed in to constrain the filtering.
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/lib.py#L43-L76
null
from functools import wraps class AssemblaObject(object): """ Proxies getitem calls (eg: `instance['id']`) to a dictionary `instance.data['id']`. """ def __init__(self, data={}): self.data = data def __getitem__(self, key): return self.data[key] def __setitem__(self, key, value): self.data[key] = value def keys(self): return self.data.keys() def values(self): return self.data.values() def get(self, *args, **kwargs): return self.data.get(*args, **kwargs) def __repr__(self): # Most objects for field in ('menu_name', 'page_name', 'name',): if field in self.data: return '<%s: %s>' % (type(self).__name__, self.data[field]) # Tickets if ('number' in self.data) and ('summary' in self.data): return "<%s: #%s - %s>" % (type(self).__name__, self.data['number'], self.data['summary']) # Ticket Comments if 'id' in self.data: return "<%s: #%s>" % (type(self).__name__, self.data['id']) return super(AssemblaObject, self).__repr__()
rchatterjee/pwmodels
src/pwmodel/helper.py
gen_n_random_num
python
def gen_n_random_num(n, MAX_NUM=MAX_INT, unique=True): fmt = "<%dI" % n t = struct.calcsize(fmt) D = [d % MAX_NUM for d in struct.unpack(fmt, os.urandom(t))] if unique: D = set(D) assert MAX_NUM > n, \ "Cannot have {0} unique integers less than {1}".format(n, MAX_NUM) while len(D) < n: print("Number of collision: {}. Regenerating!".format(n - len(D))) fmt = "<%dI" % (n - len(D)) t = struct.calcsize(fmt) extra = struct.unpack(fmt, os.urandom(t)) D |= set(d % MAX_NUM for d in extra) D = list(D) return D
Returns @n @unique random unsigned integers (4 bytes) \ between 0 and @MAX_NUM.
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/helper.py#L123-L142
null
#!/usr/bin/python # from Crypto.Random import random import itertools import operator import os import random import re import string import struct import sys from functools import reduce import bz2 import gzip import functools from math import sqrt import dawg BASE_DIR = os.getcwd() sys.path.append(BASE_DIR) MAX_INT = 2 ** 64 - 1 DEBUG = os.getenv('DEBUG', False) START = '\x02' # chr(0x02) ascii for STX (start of text) END = '\x03' # chr(0x03) ascii for ETX (end of text) home = os.path.expanduser("~") thisdir = os.path.dirname(os.path.abspath(__file__)) ROCKYOU_TOTAL_CNT = 32603388.0 pw_characters = string.ascii_letters + string.digits + string.punctuation + ' ' L33T = {v: k for k,v in [('a', '@'), ('e', '3'), ('H', '#'), ('i', '1'), ('l', '1'), ('o', '0'), ('O', '0'), ('t', '1'), ('w', 'v')]} class memoized(object): '''Decorator. Caches a function's return value each time it is called. If called later with the same arguments, the cached value is returned (not reevaluated). ''' def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): # if not isinstance(args, collections.Hashable): # # uncacheable. a list, for instance. # # better to not cache than blow up. # print ("Uncachebale", args) # return self.func(*args) try: return self.cache[args[0]][args[1:]] except KeyError: value = self.func(*args) try: self.cache[args[0]][args[1:]] = value except KeyError: self.cache[args[0]] = {args[1:]: value} # if random.randint(0,10000)==0: # print ("Printing cache size:", file=sys.stderr) # for k,v in self.cache.items(): # print (">>", repr(k), len(v), file=sys.stderr) return value def __repr__(self): '''Return the function's docstring.''' return self.func.__doc__ def __get__(self, obj, objtype): '''Support instance methods.''' return functools.partial(self.__call__, obj) class random: @staticmethod def randints(s, e, n=1): """ returns n uniform random numbers from [s, e] """ assert e >= s, "Wrong range: [{}, {})".format(s, e) n = max(1, n) arr = [s + a % (e - s) for a in struct.unpack('<%dL' % n, os.urandom(4 * n))] return arr @staticmethod def randint(s, e): """ returns one random integer between s and e. Try using @randints in case you need multiple random integer. @randints is more efficient """ return random.randints(s, e, 1)[0] @staticmethod def choice(arr): i = random.randint(0, len(arr)) return arr[i] @staticmethod def sample(arr, n, unique=False): if unique: arr = set(arr) assert len(arr) > n, "Cannot sample uniquely from a small array." if len(arr) == n: return arr if n > len(arr) / 2: res = list(arr) while len(res) > n: del res[random.randint(0, len(res))] else: res = [] arr = list(arr) while len(res) < n: i = random.randint(0, len(arr)) res.append(arr[i]) del arr[i] else: return [arr[i] for i in random.randints(0, len(arr), n)] def sample_following_dist(handle_iter, n, totalf): """Samples n passwords following the distribution from the handle @handle_iter is an iterator that gives (pw,f) @n is the total number of samle asked for @totalf is the total number of users, which is euqal to sum(f for pw,f in handle_iter) As, handle_iterator is an iterator and can only traverse once. @totalf needs to be supplied to the funciton. @handle_iter must be sorted in decreasing order Returns, an array of @n tuples (id, pw) sampled from @handle_iter. """ multiplier = 1.0 if totalf == 1.0: multiplier = 1e8 # print "WARNING!! I don't except probabilities" totalf = totalf * multiplier # print("# Population Size", totalf) A = gen_n_random_num(n, totalf, unique=False) A.sort(reverse=True) # Uniqueness check, non necessarily required, but not very # computationally intensive assert len(A) == n, "Not enough randomnumbers generated" \ "Requried {}, generated only {}".format(n, len(A)) # if not all(A[i] != A[i-1] for i in range(1,n,1)): # for i in range(1,n,1): # if A[i] == A[i-1]: # print i, A[i], A[i-1] j, sampled = 0, 0 val = A.pop() # print(handle_iter) for w, f in handle_iter: j += f * multiplier while val < j: sampled += 1 if sampled % 5000 == 0: print("Sampled:", sampled) yield (val, w) if A: val = A.pop() else: val = -1 break if not A and val == -1: break # print("# Stopped at:", w, f, j, '\n', file=sys.stderr) while A and val < j: yield (val, w) if A: i, val = A.pop() else: break try: # import pyximport; pyximport.install() from ._fast import compute_ngrams def ngramsofw(word, n, maxn=0): return compute_ngrams(word, n, maxn) except ImportError as ex: print(ex) exit(0) def ngramsofw(word, n, maxn=0): """Returns the @n-grams of a word @w """ print(">>> SLOW ngram computation") word = START + word + END ngrams = [] for ngram_length in range(n, min(len(word), maxn) + 1): for i in range(0, len(word) - ngram_length + 1): ngrams.append(word[i:i + ngram_length]) return ngrams def MILLION(n): return int(n * 1e6) def file_type(filename, param='rb'): """returns the type of file, e.g., gz, bz2, normal""" magic_dict = { b"\x1f\x8b\x08": "gz", b"\x42\x5a\x68": "bz2", b"\x50\x4b\x03\x04": "zip" } if param.startswith('w'): return filename.split('.')[-1] max_len = max(len(x) for x in magic_dict) with open(filename, 'rb') as f: file_start = f.read(max_len) for magic, filetype in list(magic_dict.items()): if file_start.startswith(magic): return filetype return "no match" def open_(filename, mode='rb'): type_ = file_type(filename, mode) errors = 'ignore' if 't' in mode else None if type_ == "bz2": f = bz2.open(filename, mode, errors=errors) elif type_ == "gz": f = gzip.open(filename, mode, errors=errors) else: f = open(filename, mode) return f def load_dawg(f, t=dawg.IntDAWG): if not f.endswith('.gz'): if not os.path.exists(f): f += '.gz' T = t() T.read(open_(f, 'rb')) return T def save_dawg(T, fname): if not fname.endswith('gz'): fname = fname + '.gz' with gzip.open(fname, 'wb') as f: T.write(f) def getallgroups(arr, k=-1): """ returns all the subset of @arr of size less than equalto @k the return array will be of size \sum_{i=1}^k nCi, n = len(arr) """ if k < 0: k = len(arr) return itertools.chain.from_iterable(itertools.combinations(set(arr), j) for j in range(1, k + 1)) def isascii(s): try: s.encode('ascii') return True except (UnicodeDecodeError, UnicodeEncodeError) as e: # warning("UnicodeError:", s, str(e)) return False def get_line(file_object, limit=-1, sep=r'\s+', pw_filter=lambda x: True, errors='replace'): # regex = re.compile(r'\s*([0-9]+) (.*)$') i = 0 print("sep={!r}".format(sep)) for l in file_object: if limit > 0 and limit <= i: break t = re.split(sep, l.rstrip('\n').lstrip(), maxsplit=1) if len(t) != 2: continue c, w = t c = int(c) w = w.replace('\x00', '\\x00') # if not isascii(w): # print("Not ascii, ignoring...") # continue if w and pw_filter(w) and c > 0: i += 1 yield w, c else: pass # warning ("Filter Failed or malformed string: ", w, c) def open_get_line(filename, limit=-1, **kwargs): """Opens the password file named @filename and reads first @limit passwords. @kwargs are passed to get_line for further processing. For example, pw_filter etc. @fielname: string @limit: integer """ allowed_keys_for_get_line = {'sep', 'pw_filter', 'errors'} for k in list(kwargs.keys()): if k not in allowed_keys_for_get_line: del kwargs[k] print("After filtering: {}".format(kwargs)) with open_(filename, 'rt') as f: for w, c in get_line(f, limit, **kwargs): yield w, c # TODO - Optimize the tokenization process regex = r'([A-Za-z_]+)|([0-9]+)|(\W+)' def print_err(*args): if DEBUG: sys.stderr.write(' '.join([str(a) for a in args]) + '\n') def tokens(w): T = [] while w: m = re.match(regex, w) T.append(m.group(0)) w = w[len(T[-1]):] return T def whatchar(c): if c.isalpha(): return 'L'; if c.isdigit(): return 'D'; else: return 'Y' def mean_sd(arr): s = sum(arr) s2 = sum([x * x for x in arr]) n = len(arr) m = s / float(n) sd = sqrt(float(s2) / n - m * m) return m, sd def prod(arr): return reduce(operator.mul, arr, 1) def convert2group(t, totalC): return t + random.randint(0, (MAX_INT - t) / totalC) * totalC def warning(*objs): if DEBUG: print("WARNING: ", *objs, file=sys.stderr) # assumes last element in the array(A) is the sum of all elements def getIndex(p, A): p %= A[-1] i = 0; for i, v in enumerate(A): p -= v; if p < 0: break return i def dp(**kwargs): print(kwargs, file=sys.stderr) if __name__ == "__main__": print(list(getallgroups([1, 2, 3, 4, 5, 6, 7, 8, 9], 5))) # unittest.main()
rchatterjee/pwmodels
src/pwmodel/helper.py
sample_following_dist
python
def sample_following_dist(handle_iter, n, totalf): multiplier = 1.0 if totalf == 1.0: multiplier = 1e8 # print "WARNING!! I don't except probabilities" totalf = totalf * multiplier # print("# Population Size", totalf) A = gen_n_random_num(n, totalf, unique=False) A.sort(reverse=True) # Uniqueness check, non necessarily required, but not very # computationally intensive assert len(A) == n, "Not enough randomnumbers generated" \ "Requried {}, generated only {}".format(n, len(A)) # if not all(A[i] != A[i-1] for i in range(1,n,1)): # for i in range(1,n,1): # if A[i] == A[i-1]: # print i, A[i], A[i-1] j, sampled = 0, 0 val = A.pop() # print(handle_iter) for w, f in handle_iter: j += f * multiplier while val < j: sampled += 1 if sampled % 5000 == 0: print("Sampled:", sampled) yield (val, w) if A: val = A.pop() else: val = -1 break if not A and val == -1: break # print("# Stopped at:", w, f, j, '\n', file=sys.stderr) while A and val < j: yield (val, w) if A: i, val = A.pop() else: break
Samples n passwords following the distribution from the handle @handle_iter is an iterator that gives (pw,f) @n is the total number of samle asked for @totalf is the total number of users, which is euqal to sum(f for pw,f in handle_iter) As, handle_iterator is an iterator and can only traverse once. @totalf needs to be supplied to the funciton. @handle_iter must be sorted in decreasing order Returns, an array of @n tuples (id, pw) sampled from @handle_iter.
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/helper.py#L145-L199
[ "def gen_n_random_num(n, MAX_NUM=MAX_INT, unique=True):\n \"\"\"\n Returns @n @unique random unsigned integers (4 bytes) \\\n between 0 and @MAX_NUM.\n \"\"\"\n fmt = \"<%dI\" % n\n t = struct.calcsize(fmt)\n D = [d % MAX_NUM for d in struct.unpack(fmt, os.urandom(t))]\n if unique:\n ...
#!/usr/bin/python # from Crypto.Random import random import itertools import operator import os import random import re import string import struct import sys from functools import reduce import bz2 import gzip import functools from math import sqrt import dawg BASE_DIR = os.getcwd() sys.path.append(BASE_DIR) MAX_INT = 2 ** 64 - 1 DEBUG = os.getenv('DEBUG', False) START = '\x02' # chr(0x02) ascii for STX (start of text) END = '\x03' # chr(0x03) ascii for ETX (end of text) home = os.path.expanduser("~") thisdir = os.path.dirname(os.path.abspath(__file__)) ROCKYOU_TOTAL_CNT = 32603388.0 pw_characters = string.ascii_letters + string.digits + string.punctuation + ' ' L33T = {v: k for k,v in [('a', '@'), ('e', '3'), ('H', '#'), ('i', '1'), ('l', '1'), ('o', '0'), ('O', '0'), ('t', '1'), ('w', 'v')]} class memoized(object): '''Decorator. Caches a function's return value each time it is called. If called later with the same arguments, the cached value is returned (not reevaluated). ''' def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): # if not isinstance(args, collections.Hashable): # # uncacheable. a list, for instance. # # better to not cache than blow up. # print ("Uncachebale", args) # return self.func(*args) try: return self.cache[args[0]][args[1:]] except KeyError: value = self.func(*args) try: self.cache[args[0]][args[1:]] = value except KeyError: self.cache[args[0]] = {args[1:]: value} # if random.randint(0,10000)==0: # print ("Printing cache size:", file=sys.stderr) # for k,v in self.cache.items(): # print (">>", repr(k), len(v), file=sys.stderr) return value def __repr__(self): '''Return the function's docstring.''' return self.func.__doc__ def __get__(self, obj, objtype): '''Support instance methods.''' return functools.partial(self.__call__, obj) class random: @staticmethod def randints(s, e, n=1): """ returns n uniform random numbers from [s, e] """ assert e >= s, "Wrong range: [{}, {})".format(s, e) n = max(1, n) arr = [s + a % (e - s) for a in struct.unpack('<%dL' % n, os.urandom(4 * n))] return arr @staticmethod def randint(s, e): """ returns one random integer between s and e. Try using @randints in case you need multiple random integer. @randints is more efficient """ return random.randints(s, e, 1)[0] @staticmethod def choice(arr): i = random.randint(0, len(arr)) return arr[i] @staticmethod def sample(arr, n, unique=False): if unique: arr = set(arr) assert len(arr) > n, "Cannot sample uniquely from a small array." if len(arr) == n: return arr if n > len(arr) / 2: res = list(arr) while len(res) > n: del res[random.randint(0, len(res))] else: res = [] arr = list(arr) while len(res) < n: i = random.randint(0, len(arr)) res.append(arr[i]) del arr[i] else: return [arr[i] for i in random.randints(0, len(arr), n)] def gen_n_random_num(n, MAX_NUM=MAX_INT, unique=True): """ Returns @n @unique random unsigned integers (4 bytes) \ between 0 and @MAX_NUM. """ fmt = "<%dI" % n t = struct.calcsize(fmt) D = [d % MAX_NUM for d in struct.unpack(fmt, os.urandom(t))] if unique: D = set(D) assert MAX_NUM > n, \ "Cannot have {0} unique integers less than {1}".format(n, MAX_NUM) while len(D) < n: print("Number of collision: {}. Regenerating!".format(n - len(D))) fmt = "<%dI" % (n - len(D)) t = struct.calcsize(fmt) extra = struct.unpack(fmt, os.urandom(t)) D |= set(d % MAX_NUM for d in extra) D = list(D) return D try: # import pyximport; pyximport.install() from ._fast import compute_ngrams def ngramsofw(word, n, maxn=0): return compute_ngrams(word, n, maxn) except ImportError as ex: print(ex) exit(0) def ngramsofw(word, n, maxn=0): """Returns the @n-grams of a word @w """ print(">>> SLOW ngram computation") word = START + word + END ngrams = [] for ngram_length in range(n, min(len(word), maxn) + 1): for i in range(0, len(word) - ngram_length + 1): ngrams.append(word[i:i + ngram_length]) return ngrams def MILLION(n): return int(n * 1e6) def file_type(filename, param='rb'): """returns the type of file, e.g., gz, bz2, normal""" magic_dict = { b"\x1f\x8b\x08": "gz", b"\x42\x5a\x68": "bz2", b"\x50\x4b\x03\x04": "zip" } if param.startswith('w'): return filename.split('.')[-1] max_len = max(len(x) for x in magic_dict) with open(filename, 'rb') as f: file_start = f.read(max_len) for magic, filetype in list(magic_dict.items()): if file_start.startswith(magic): return filetype return "no match" def open_(filename, mode='rb'): type_ = file_type(filename, mode) errors = 'ignore' if 't' in mode else None if type_ == "bz2": f = bz2.open(filename, mode, errors=errors) elif type_ == "gz": f = gzip.open(filename, mode, errors=errors) else: f = open(filename, mode) return f def load_dawg(f, t=dawg.IntDAWG): if not f.endswith('.gz'): if not os.path.exists(f): f += '.gz' T = t() T.read(open_(f, 'rb')) return T def save_dawg(T, fname): if not fname.endswith('gz'): fname = fname + '.gz' with gzip.open(fname, 'wb') as f: T.write(f) def getallgroups(arr, k=-1): """ returns all the subset of @arr of size less than equalto @k the return array will be of size \sum_{i=1}^k nCi, n = len(arr) """ if k < 0: k = len(arr) return itertools.chain.from_iterable(itertools.combinations(set(arr), j) for j in range(1, k + 1)) def isascii(s): try: s.encode('ascii') return True except (UnicodeDecodeError, UnicodeEncodeError) as e: # warning("UnicodeError:", s, str(e)) return False def get_line(file_object, limit=-1, sep=r'\s+', pw_filter=lambda x: True, errors='replace'): # regex = re.compile(r'\s*([0-9]+) (.*)$') i = 0 print("sep={!r}".format(sep)) for l in file_object: if limit > 0 and limit <= i: break t = re.split(sep, l.rstrip('\n').lstrip(), maxsplit=1) if len(t) != 2: continue c, w = t c = int(c) w = w.replace('\x00', '\\x00') # if not isascii(w): # print("Not ascii, ignoring...") # continue if w and pw_filter(w) and c > 0: i += 1 yield w, c else: pass # warning ("Filter Failed or malformed string: ", w, c) def open_get_line(filename, limit=-1, **kwargs): """Opens the password file named @filename and reads first @limit passwords. @kwargs are passed to get_line for further processing. For example, pw_filter etc. @fielname: string @limit: integer """ allowed_keys_for_get_line = {'sep', 'pw_filter', 'errors'} for k in list(kwargs.keys()): if k not in allowed_keys_for_get_line: del kwargs[k] print("After filtering: {}".format(kwargs)) with open_(filename, 'rt') as f: for w, c in get_line(f, limit, **kwargs): yield w, c # TODO - Optimize the tokenization process regex = r'([A-Za-z_]+)|([0-9]+)|(\W+)' def print_err(*args): if DEBUG: sys.stderr.write(' '.join([str(a) for a in args]) + '\n') def tokens(w): T = [] while w: m = re.match(regex, w) T.append(m.group(0)) w = w[len(T[-1]):] return T def whatchar(c): if c.isalpha(): return 'L'; if c.isdigit(): return 'D'; else: return 'Y' def mean_sd(arr): s = sum(arr) s2 = sum([x * x for x in arr]) n = len(arr) m = s / float(n) sd = sqrt(float(s2) / n - m * m) return m, sd def prod(arr): return reduce(operator.mul, arr, 1) def convert2group(t, totalC): return t + random.randint(0, (MAX_INT - t) / totalC) * totalC def warning(*objs): if DEBUG: print("WARNING: ", *objs, file=sys.stderr) # assumes last element in the array(A) is the sum of all elements def getIndex(p, A): p %= A[-1] i = 0; for i, v in enumerate(A): p -= v; if p < 0: break return i def dp(**kwargs): print(kwargs, file=sys.stderr) if __name__ == "__main__": print(list(getallgroups([1, 2, 3, 4, 5, 6, 7, 8, 9], 5))) # unittest.main()
rchatterjee/pwmodels
src/pwmodel/helper.py
file_type
python
def file_type(filename, param='rb'): magic_dict = { b"\x1f\x8b\x08": "gz", b"\x42\x5a\x68": "bz2", b"\x50\x4b\x03\x04": "zip" } if param.startswith('w'): return filename.split('.')[-1] max_len = max(len(x) for x in magic_dict) with open(filename, 'rb') as f: file_start = f.read(max_len) for magic, filetype in list(magic_dict.items()): if file_start.startswith(magic): return filetype return "no match"
returns the type of file, e.g., gz, bz2, normal
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/helper.py#L227-L242
null
#!/usr/bin/python # from Crypto.Random import random import itertools import operator import os import random import re import string import struct import sys from functools import reduce import bz2 import gzip import functools from math import sqrt import dawg BASE_DIR = os.getcwd() sys.path.append(BASE_DIR) MAX_INT = 2 ** 64 - 1 DEBUG = os.getenv('DEBUG', False) START = '\x02' # chr(0x02) ascii for STX (start of text) END = '\x03' # chr(0x03) ascii for ETX (end of text) home = os.path.expanduser("~") thisdir = os.path.dirname(os.path.abspath(__file__)) ROCKYOU_TOTAL_CNT = 32603388.0 pw_characters = string.ascii_letters + string.digits + string.punctuation + ' ' L33T = {v: k for k,v in [('a', '@'), ('e', '3'), ('H', '#'), ('i', '1'), ('l', '1'), ('o', '0'), ('O', '0'), ('t', '1'), ('w', 'v')]} class memoized(object): '''Decorator. Caches a function's return value each time it is called. If called later with the same arguments, the cached value is returned (not reevaluated). ''' def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): # if not isinstance(args, collections.Hashable): # # uncacheable. a list, for instance. # # better to not cache than blow up. # print ("Uncachebale", args) # return self.func(*args) try: return self.cache[args[0]][args[1:]] except KeyError: value = self.func(*args) try: self.cache[args[0]][args[1:]] = value except KeyError: self.cache[args[0]] = {args[1:]: value} # if random.randint(0,10000)==0: # print ("Printing cache size:", file=sys.stderr) # for k,v in self.cache.items(): # print (">>", repr(k), len(v), file=sys.stderr) return value def __repr__(self): '''Return the function's docstring.''' return self.func.__doc__ def __get__(self, obj, objtype): '''Support instance methods.''' return functools.partial(self.__call__, obj) class random: @staticmethod def randints(s, e, n=1): """ returns n uniform random numbers from [s, e] """ assert e >= s, "Wrong range: [{}, {})".format(s, e) n = max(1, n) arr = [s + a % (e - s) for a in struct.unpack('<%dL' % n, os.urandom(4 * n))] return arr @staticmethod def randint(s, e): """ returns one random integer between s and e. Try using @randints in case you need multiple random integer. @randints is more efficient """ return random.randints(s, e, 1)[0] @staticmethod def choice(arr): i = random.randint(0, len(arr)) return arr[i] @staticmethod def sample(arr, n, unique=False): if unique: arr = set(arr) assert len(arr) > n, "Cannot sample uniquely from a small array." if len(arr) == n: return arr if n > len(arr) / 2: res = list(arr) while len(res) > n: del res[random.randint(0, len(res))] else: res = [] arr = list(arr) while len(res) < n: i = random.randint(0, len(arr)) res.append(arr[i]) del arr[i] else: return [arr[i] for i in random.randints(0, len(arr), n)] def gen_n_random_num(n, MAX_NUM=MAX_INT, unique=True): """ Returns @n @unique random unsigned integers (4 bytes) \ between 0 and @MAX_NUM. """ fmt = "<%dI" % n t = struct.calcsize(fmt) D = [d % MAX_NUM for d in struct.unpack(fmt, os.urandom(t))] if unique: D = set(D) assert MAX_NUM > n, \ "Cannot have {0} unique integers less than {1}".format(n, MAX_NUM) while len(D) < n: print("Number of collision: {}. Regenerating!".format(n - len(D))) fmt = "<%dI" % (n - len(D)) t = struct.calcsize(fmt) extra = struct.unpack(fmt, os.urandom(t)) D |= set(d % MAX_NUM for d in extra) D = list(D) return D def sample_following_dist(handle_iter, n, totalf): """Samples n passwords following the distribution from the handle @handle_iter is an iterator that gives (pw,f) @n is the total number of samle asked for @totalf is the total number of users, which is euqal to sum(f for pw,f in handle_iter) As, handle_iterator is an iterator and can only traverse once. @totalf needs to be supplied to the funciton. @handle_iter must be sorted in decreasing order Returns, an array of @n tuples (id, pw) sampled from @handle_iter. """ multiplier = 1.0 if totalf == 1.0: multiplier = 1e8 # print "WARNING!! I don't except probabilities" totalf = totalf * multiplier # print("# Population Size", totalf) A = gen_n_random_num(n, totalf, unique=False) A.sort(reverse=True) # Uniqueness check, non necessarily required, but not very # computationally intensive assert len(A) == n, "Not enough randomnumbers generated" \ "Requried {}, generated only {}".format(n, len(A)) # if not all(A[i] != A[i-1] for i in range(1,n,1)): # for i in range(1,n,1): # if A[i] == A[i-1]: # print i, A[i], A[i-1] j, sampled = 0, 0 val = A.pop() # print(handle_iter) for w, f in handle_iter: j += f * multiplier while val < j: sampled += 1 if sampled % 5000 == 0: print("Sampled:", sampled) yield (val, w) if A: val = A.pop() else: val = -1 break if not A and val == -1: break # print("# Stopped at:", w, f, j, '\n', file=sys.stderr) while A and val < j: yield (val, w) if A: i, val = A.pop() else: break try: # import pyximport; pyximport.install() from ._fast import compute_ngrams def ngramsofw(word, n, maxn=0): return compute_ngrams(word, n, maxn) except ImportError as ex: print(ex) exit(0) def ngramsofw(word, n, maxn=0): """Returns the @n-grams of a word @w """ print(">>> SLOW ngram computation") word = START + word + END ngrams = [] for ngram_length in range(n, min(len(word), maxn) + 1): for i in range(0, len(word) - ngram_length + 1): ngrams.append(word[i:i + ngram_length]) return ngrams def MILLION(n): return int(n * 1e6) def open_(filename, mode='rb'): type_ = file_type(filename, mode) errors = 'ignore' if 't' in mode else None if type_ == "bz2": f = bz2.open(filename, mode, errors=errors) elif type_ == "gz": f = gzip.open(filename, mode, errors=errors) else: f = open(filename, mode) return f def load_dawg(f, t=dawg.IntDAWG): if not f.endswith('.gz'): if not os.path.exists(f): f += '.gz' T = t() T.read(open_(f, 'rb')) return T def save_dawg(T, fname): if not fname.endswith('gz'): fname = fname + '.gz' with gzip.open(fname, 'wb') as f: T.write(f) def getallgroups(arr, k=-1): """ returns all the subset of @arr of size less than equalto @k the return array will be of size \sum_{i=1}^k nCi, n = len(arr) """ if k < 0: k = len(arr) return itertools.chain.from_iterable(itertools.combinations(set(arr), j) for j in range(1, k + 1)) def isascii(s): try: s.encode('ascii') return True except (UnicodeDecodeError, UnicodeEncodeError) as e: # warning("UnicodeError:", s, str(e)) return False def get_line(file_object, limit=-1, sep=r'\s+', pw_filter=lambda x: True, errors='replace'): # regex = re.compile(r'\s*([0-9]+) (.*)$') i = 0 print("sep={!r}".format(sep)) for l in file_object: if limit > 0 and limit <= i: break t = re.split(sep, l.rstrip('\n').lstrip(), maxsplit=1) if len(t) != 2: continue c, w = t c = int(c) w = w.replace('\x00', '\\x00') # if not isascii(w): # print("Not ascii, ignoring...") # continue if w and pw_filter(w) and c > 0: i += 1 yield w, c else: pass # warning ("Filter Failed or malformed string: ", w, c) def open_get_line(filename, limit=-1, **kwargs): """Opens the password file named @filename and reads first @limit passwords. @kwargs are passed to get_line for further processing. For example, pw_filter etc. @fielname: string @limit: integer """ allowed_keys_for_get_line = {'sep', 'pw_filter', 'errors'} for k in list(kwargs.keys()): if k not in allowed_keys_for_get_line: del kwargs[k] print("After filtering: {}".format(kwargs)) with open_(filename, 'rt') as f: for w, c in get_line(f, limit, **kwargs): yield w, c # TODO - Optimize the tokenization process regex = r'([A-Za-z_]+)|([0-9]+)|(\W+)' def print_err(*args): if DEBUG: sys.stderr.write(' '.join([str(a) for a in args]) + '\n') def tokens(w): T = [] while w: m = re.match(regex, w) T.append(m.group(0)) w = w[len(T[-1]):] return T def whatchar(c): if c.isalpha(): return 'L'; if c.isdigit(): return 'D'; else: return 'Y' def mean_sd(arr): s = sum(arr) s2 = sum([x * x for x in arr]) n = len(arr) m = s / float(n) sd = sqrt(float(s2) / n - m * m) return m, sd def prod(arr): return reduce(operator.mul, arr, 1) def convert2group(t, totalC): return t + random.randint(0, (MAX_INT - t) / totalC) * totalC def warning(*objs): if DEBUG: print("WARNING: ", *objs, file=sys.stderr) # assumes last element in the array(A) is the sum of all elements def getIndex(p, A): p %= A[-1] i = 0; for i, v in enumerate(A): p -= v; if p < 0: break return i def dp(**kwargs): print(kwargs, file=sys.stderr) if __name__ == "__main__": print(list(getallgroups([1, 2, 3, 4, 5, 6, 7, 8, 9], 5))) # unittest.main()
rchatterjee/pwmodels
src/pwmodel/helper.py
getallgroups
python
def getallgroups(arr, k=-1): if k < 0: k = len(arr) return itertools.chain.from_iterable(itertools.combinations(set(arr), j) for j in range(1, k + 1))
returns all the subset of @arr of size less than equalto @k the return array will be of size \sum_{i=1}^k nCi, n = len(arr)
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/helper.py#L273-L281
null
#!/usr/bin/python # from Crypto.Random import random import itertools import operator import os import random import re import string import struct import sys from functools import reduce import bz2 import gzip import functools from math import sqrt import dawg BASE_DIR = os.getcwd() sys.path.append(BASE_DIR) MAX_INT = 2 ** 64 - 1 DEBUG = os.getenv('DEBUG', False) START = '\x02' # chr(0x02) ascii for STX (start of text) END = '\x03' # chr(0x03) ascii for ETX (end of text) home = os.path.expanduser("~") thisdir = os.path.dirname(os.path.abspath(__file__)) ROCKYOU_TOTAL_CNT = 32603388.0 pw_characters = string.ascii_letters + string.digits + string.punctuation + ' ' L33T = {v: k for k,v in [('a', '@'), ('e', '3'), ('H', '#'), ('i', '1'), ('l', '1'), ('o', '0'), ('O', '0'), ('t', '1'), ('w', 'v')]} class memoized(object): '''Decorator. Caches a function's return value each time it is called. If called later with the same arguments, the cached value is returned (not reevaluated). ''' def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): # if not isinstance(args, collections.Hashable): # # uncacheable. a list, for instance. # # better to not cache than blow up. # print ("Uncachebale", args) # return self.func(*args) try: return self.cache[args[0]][args[1:]] except KeyError: value = self.func(*args) try: self.cache[args[0]][args[1:]] = value except KeyError: self.cache[args[0]] = {args[1:]: value} # if random.randint(0,10000)==0: # print ("Printing cache size:", file=sys.stderr) # for k,v in self.cache.items(): # print (">>", repr(k), len(v), file=sys.stderr) return value def __repr__(self): '''Return the function's docstring.''' return self.func.__doc__ def __get__(self, obj, objtype): '''Support instance methods.''' return functools.partial(self.__call__, obj) class random: @staticmethod def randints(s, e, n=1): """ returns n uniform random numbers from [s, e] """ assert e >= s, "Wrong range: [{}, {})".format(s, e) n = max(1, n) arr = [s + a % (e - s) for a in struct.unpack('<%dL' % n, os.urandom(4 * n))] return arr @staticmethod def randint(s, e): """ returns one random integer between s and e. Try using @randints in case you need multiple random integer. @randints is more efficient """ return random.randints(s, e, 1)[0] @staticmethod def choice(arr): i = random.randint(0, len(arr)) return arr[i] @staticmethod def sample(arr, n, unique=False): if unique: arr = set(arr) assert len(arr) > n, "Cannot sample uniquely from a small array." if len(arr) == n: return arr if n > len(arr) / 2: res = list(arr) while len(res) > n: del res[random.randint(0, len(res))] else: res = [] arr = list(arr) while len(res) < n: i = random.randint(0, len(arr)) res.append(arr[i]) del arr[i] else: return [arr[i] for i in random.randints(0, len(arr), n)] def gen_n_random_num(n, MAX_NUM=MAX_INT, unique=True): """ Returns @n @unique random unsigned integers (4 bytes) \ between 0 and @MAX_NUM. """ fmt = "<%dI" % n t = struct.calcsize(fmt) D = [d % MAX_NUM for d in struct.unpack(fmt, os.urandom(t))] if unique: D = set(D) assert MAX_NUM > n, \ "Cannot have {0} unique integers less than {1}".format(n, MAX_NUM) while len(D) < n: print("Number of collision: {}. Regenerating!".format(n - len(D))) fmt = "<%dI" % (n - len(D)) t = struct.calcsize(fmt) extra = struct.unpack(fmt, os.urandom(t)) D |= set(d % MAX_NUM for d in extra) D = list(D) return D def sample_following_dist(handle_iter, n, totalf): """Samples n passwords following the distribution from the handle @handle_iter is an iterator that gives (pw,f) @n is the total number of samle asked for @totalf is the total number of users, which is euqal to sum(f for pw,f in handle_iter) As, handle_iterator is an iterator and can only traverse once. @totalf needs to be supplied to the funciton. @handle_iter must be sorted in decreasing order Returns, an array of @n tuples (id, pw) sampled from @handle_iter. """ multiplier = 1.0 if totalf == 1.0: multiplier = 1e8 # print "WARNING!! I don't except probabilities" totalf = totalf * multiplier # print("# Population Size", totalf) A = gen_n_random_num(n, totalf, unique=False) A.sort(reverse=True) # Uniqueness check, non necessarily required, but not very # computationally intensive assert len(A) == n, "Not enough randomnumbers generated" \ "Requried {}, generated only {}".format(n, len(A)) # if not all(A[i] != A[i-1] for i in range(1,n,1)): # for i in range(1,n,1): # if A[i] == A[i-1]: # print i, A[i], A[i-1] j, sampled = 0, 0 val = A.pop() # print(handle_iter) for w, f in handle_iter: j += f * multiplier while val < j: sampled += 1 if sampled % 5000 == 0: print("Sampled:", sampled) yield (val, w) if A: val = A.pop() else: val = -1 break if not A and val == -1: break # print("# Stopped at:", w, f, j, '\n', file=sys.stderr) while A and val < j: yield (val, w) if A: i, val = A.pop() else: break try: # import pyximport; pyximport.install() from ._fast import compute_ngrams def ngramsofw(word, n, maxn=0): return compute_ngrams(word, n, maxn) except ImportError as ex: print(ex) exit(0) def ngramsofw(word, n, maxn=0): """Returns the @n-grams of a word @w """ print(">>> SLOW ngram computation") word = START + word + END ngrams = [] for ngram_length in range(n, min(len(word), maxn) + 1): for i in range(0, len(word) - ngram_length + 1): ngrams.append(word[i:i + ngram_length]) return ngrams def MILLION(n): return int(n * 1e6) def file_type(filename, param='rb'): """returns the type of file, e.g., gz, bz2, normal""" magic_dict = { b"\x1f\x8b\x08": "gz", b"\x42\x5a\x68": "bz2", b"\x50\x4b\x03\x04": "zip" } if param.startswith('w'): return filename.split('.')[-1] max_len = max(len(x) for x in magic_dict) with open(filename, 'rb') as f: file_start = f.read(max_len) for magic, filetype in list(magic_dict.items()): if file_start.startswith(magic): return filetype return "no match" def open_(filename, mode='rb'): type_ = file_type(filename, mode) errors = 'ignore' if 't' in mode else None if type_ == "bz2": f = bz2.open(filename, mode, errors=errors) elif type_ == "gz": f = gzip.open(filename, mode, errors=errors) else: f = open(filename, mode) return f def load_dawg(f, t=dawg.IntDAWG): if not f.endswith('.gz'): if not os.path.exists(f): f += '.gz' T = t() T.read(open_(f, 'rb')) return T def save_dawg(T, fname): if not fname.endswith('gz'): fname = fname + '.gz' with gzip.open(fname, 'wb') as f: T.write(f) def isascii(s): try: s.encode('ascii') return True except (UnicodeDecodeError, UnicodeEncodeError) as e: # warning("UnicodeError:", s, str(e)) return False def get_line(file_object, limit=-1, sep=r'\s+', pw_filter=lambda x: True, errors='replace'): # regex = re.compile(r'\s*([0-9]+) (.*)$') i = 0 print("sep={!r}".format(sep)) for l in file_object: if limit > 0 and limit <= i: break t = re.split(sep, l.rstrip('\n').lstrip(), maxsplit=1) if len(t) != 2: continue c, w = t c = int(c) w = w.replace('\x00', '\\x00') # if not isascii(w): # print("Not ascii, ignoring...") # continue if w and pw_filter(w) and c > 0: i += 1 yield w, c else: pass # warning ("Filter Failed or malformed string: ", w, c) def open_get_line(filename, limit=-1, **kwargs): """Opens the password file named @filename and reads first @limit passwords. @kwargs are passed to get_line for further processing. For example, pw_filter etc. @fielname: string @limit: integer """ allowed_keys_for_get_line = {'sep', 'pw_filter', 'errors'} for k in list(kwargs.keys()): if k not in allowed_keys_for_get_line: del kwargs[k] print("After filtering: {}".format(kwargs)) with open_(filename, 'rt') as f: for w, c in get_line(f, limit, **kwargs): yield w, c # TODO - Optimize the tokenization process regex = r'([A-Za-z_]+)|([0-9]+)|(\W+)' def print_err(*args): if DEBUG: sys.stderr.write(' '.join([str(a) for a in args]) + '\n') def tokens(w): T = [] while w: m = re.match(regex, w) T.append(m.group(0)) w = w[len(T[-1]):] return T def whatchar(c): if c.isalpha(): return 'L'; if c.isdigit(): return 'D'; else: return 'Y' def mean_sd(arr): s = sum(arr) s2 = sum([x * x for x in arr]) n = len(arr) m = s / float(n) sd = sqrt(float(s2) / n - m * m) return m, sd def prod(arr): return reduce(operator.mul, arr, 1) def convert2group(t, totalC): return t + random.randint(0, (MAX_INT - t) / totalC) * totalC def warning(*objs): if DEBUG: print("WARNING: ", *objs, file=sys.stderr) # assumes last element in the array(A) is the sum of all elements def getIndex(p, A): p %= A[-1] i = 0; for i, v in enumerate(A): p -= v; if p < 0: break return i def dp(**kwargs): print(kwargs, file=sys.stderr) if __name__ == "__main__": print(list(getallgroups([1, 2, 3, 4, 5, 6, 7, 8, 9], 5))) # unittest.main()
rchatterjee/pwmodels
src/pwmodel/helper.py
open_get_line
python
def open_get_line(filename, limit=-1, **kwargs): allowed_keys_for_get_line = {'sep', 'pw_filter', 'errors'} for k in list(kwargs.keys()): if k not in allowed_keys_for_get_line: del kwargs[k] print("After filtering: {}".format(kwargs)) with open_(filename, 'rt') as f: for w, c in get_line(f, limit, **kwargs): yield w, c
Opens the password file named @filename and reads first @limit passwords. @kwargs are passed to get_line for further processing. For example, pw_filter etc. @fielname: string @limit: integer
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/helper.py#L317-L331
[ "def open_(filename, mode='rb'):\n type_ = file_type(filename, mode)\n errors = 'ignore' if 't' in mode else None\n if type_ == \"bz2\":\n f = bz2.open(filename, mode, errors=errors)\n elif type_ == \"gz\":\n f = gzip.open(filename, mode, errors=errors)\n else:\n f = open(filenam...
#!/usr/bin/python # from Crypto.Random import random import itertools import operator import os import random import re import string import struct import sys from functools import reduce import bz2 import gzip import functools from math import sqrt import dawg BASE_DIR = os.getcwd() sys.path.append(BASE_DIR) MAX_INT = 2 ** 64 - 1 DEBUG = os.getenv('DEBUG', False) START = '\x02' # chr(0x02) ascii for STX (start of text) END = '\x03' # chr(0x03) ascii for ETX (end of text) home = os.path.expanduser("~") thisdir = os.path.dirname(os.path.abspath(__file__)) ROCKYOU_TOTAL_CNT = 32603388.0 pw_characters = string.ascii_letters + string.digits + string.punctuation + ' ' L33T = {v: k for k,v in [('a', '@'), ('e', '3'), ('H', '#'), ('i', '1'), ('l', '1'), ('o', '0'), ('O', '0'), ('t', '1'), ('w', 'v')]} class memoized(object): '''Decorator. Caches a function's return value each time it is called. If called later with the same arguments, the cached value is returned (not reevaluated). ''' def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): # if not isinstance(args, collections.Hashable): # # uncacheable. a list, for instance. # # better to not cache than blow up. # print ("Uncachebale", args) # return self.func(*args) try: return self.cache[args[0]][args[1:]] except KeyError: value = self.func(*args) try: self.cache[args[0]][args[1:]] = value except KeyError: self.cache[args[0]] = {args[1:]: value} # if random.randint(0,10000)==0: # print ("Printing cache size:", file=sys.stderr) # for k,v in self.cache.items(): # print (">>", repr(k), len(v), file=sys.stderr) return value def __repr__(self): '''Return the function's docstring.''' return self.func.__doc__ def __get__(self, obj, objtype): '''Support instance methods.''' return functools.partial(self.__call__, obj) class random: @staticmethod def randints(s, e, n=1): """ returns n uniform random numbers from [s, e] """ assert e >= s, "Wrong range: [{}, {})".format(s, e) n = max(1, n) arr = [s + a % (e - s) for a in struct.unpack('<%dL' % n, os.urandom(4 * n))] return arr @staticmethod def randint(s, e): """ returns one random integer between s and e. Try using @randints in case you need multiple random integer. @randints is more efficient """ return random.randints(s, e, 1)[0] @staticmethod def choice(arr): i = random.randint(0, len(arr)) return arr[i] @staticmethod def sample(arr, n, unique=False): if unique: arr = set(arr) assert len(arr) > n, "Cannot sample uniquely from a small array." if len(arr) == n: return arr if n > len(arr) / 2: res = list(arr) while len(res) > n: del res[random.randint(0, len(res))] else: res = [] arr = list(arr) while len(res) < n: i = random.randint(0, len(arr)) res.append(arr[i]) del arr[i] else: return [arr[i] for i in random.randints(0, len(arr), n)] def gen_n_random_num(n, MAX_NUM=MAX_INT, unique=True): """ Returns @n @unique random unsigned integers (4 bytes) \ between 0 and @MAX_NUM. """ fmt = "<%dI" % n t = struct.calcsize(fmt) D = [d % MAX_NUM for d in struct.unpack(fmt, os.urandom(t))] if unique: D = set(D) assert MAX_NUM > n, \ "Cannot have {0} unique integers less than {1}".format(n, MAX_NUM) while len(D) < n: print("Number of collision: {}. Regenerating!".format(n - len(D))) fmt = "<%dI" % (n - len(D)) t = struct.calcsize(fmt) extra = struct.unpack(fmt, os.urandom(t)) D |= set(d % MAX_NUM for d in extra) D = list(D) return D def sample_following_dist(handle_iter, n, totalf): """Samples n passwords following the distribution from the handle @handle_iter is an iterator that gives (pw,f) @n is the total number of samle asked for @totalf is the total number of users, which is euqal to sum(f for pw,f in handle_iter) As, handle_iterator is an iterator and can only traverse once. @totalf needs to be supplied to the funciton. @handle_iter must be sorted in decreasing order Returns, an array of @n tuples (id, pw) sampled from @handle_iter. """ multiplier = 1.0 if totalf == 1.0: multiplier = 1e8 # print "WARNING!! I don't except probabilities" totalf = totalf * multiplier # print("# Population Size", totalf) A = gen_n_random_num(n, totalf, unique=False) A.sort(reverse=True) # Uniqueness check, non necessarily required, but not very # computationally intensive assert len(A) == n, "Not enough randomnumbers generated" \ "Requried {}, generated only {}".format(n, len(A)) # if not all(A[i] != A[i-1] for i in range(1,n,1)): # for i in range(1,n,1): # if A[i] == A[i-1]: # print i, A[i], A[i-1] j, sampled = 0, 0 val = A.pop() # print(handle_iter) for w, f in handle_iter: j += f * multiplier while val < j: sampled += 1 if sampled % 5000 == 0: print("Sampled:", sampled) yield (val, w) if A: val = A.pop() else: val = -1 break if not A and val == -1: break # print("# Stopped at:", w, f, j, '\n', file=sys.stderr) while A and val < j: yield (val, w) if A: i, val = A.pop() else: break try: # import pyximport; pyximport.install() from ._fast import compute_ngrams def ngramsofw(word, n, maxn=0): return compute_ngrams(word, n, maxn) except ImportError as ex: print(ex) exit(0) def ngramsofw(word, n, maxn=0): """Returns the @n-grams of a word @w """ print(">>> SLOW ngram computation") word = START + word + END ngrams = [] for ngram_length in range(n, min(len(word), maxn) + 1): for i in range(0, len(word) - ngram_length + 1): ngrams.append(word[i:i + ngram_length]) return ngrams def MILLION(n): return int(n * 1e6) def file_type(filename, param='rb'): """returns the type of file, e.g., gz, bz2, normal""" magic_dict = { b"\x1f\x8b\x08": "gz", b"\x42\x5a\x68": "bz2", b"\x50\x4b\x03\x04": "zip" } if param.startswith('w'): return filename.split('.')[-1] max_len = max(len(x) for x in magic_dict) with open(filename, 'rb') as f: file_start = f.read(max_len) for magic, filetype in list(magic_dict.items()): if file_start.startswith(magic): return filetype return "no match" def open_(filename, mode='rb'): type_ = file_type(filename, mode) errors = 'ignore' if 't' in mode else None if type_ == "bz2": f = bz2.open(filename, mode, errors=errors) elif type_ == "gz": f = gzip.open(filename, mode, errors=errors) else: f = open(filename, mode) return f def load_dawg(f, t=dawg.IntDAWG): if not f.endswith('.gz'): if not os.path.exists(f): f += '.gz' T = t() T.read(open_(f, 'rb')) return T def save_dawg(T, fname): if not fname.endswith('gz'): fname = fname + '.gz' with gzip.open(fname, 'wb') as f: T.write(f) def getallgroups(arr, k=-1): """ returns all the subset of @arr of size less than equalto @k the return array will be of size \sum_{i=1}^k nCi, n = len(arr) """ if k < 0: k = len(arr) return itertools.chain.from_iterable(itertools.combinations(set(arr), j) for j in range(1, k + 1)) def isascii(s): try: s.encode('ascii') return True except (UnicodeDecodeError, UnicodeEncodeError) as e: # warning("UnicodeError:", s, str(e)) return False def get_line(file_object, limit=-1, sep=r'\s+', pw_filter=lambda x: True, errors='replace'): # regex = re.compile(r'\s*([0-9]+) (.*)$') i = 0 print("sep={!r}".format(sep)) for l in file_object: if limit > 0 and limit <= i: break t = re.split(sep, l.rstrip('\n').lstrip(), maxsplit=1) if len(t) != 2: continue c, w = t c = int(c) w = w.replace('\x00', '\\x00') # if not isascii(w): # print("Not ascii, ignoring...") # continue if w and pw_filter(w) and c > 0: i += 1 yield w, c else: pass # warning ("Filter Failed or malformed string: ", w, c) # TODO - Optimize the tokenization process regex = r'([A-Za-z_]+)|([0-9]+)|(\W+)' def print_err(*args): if DEBUG: sys.stderr.write(' '.join([str(a) for a in args]) + '\n') def tokens(w): T = [] while w: m = re.match(regex, w) T.append(m.group(0)) w = w[len(T[-1]):] return T def whatchar(c): if c.isalpha(): return 'L'; if c.isdigit(): return 'D'; else: return 'Y' def mean_sd(arr): s = sum(arr) s2 = sum([x * x for x in arr]) n = len(arr) m = s / float(n) sd = sqrt(float(s2) / n - m * m) return m, sd def prod(arr): return reduce(operator.mul, arr, 1) def convert2group(t, totalC): return t + random.randint(0, (MAX_INT - t) / totalC) * totalC def warning(*objs): if DEBUG: print("WARNING: ", *objs, file=sys.stderr) # assumes last element in the array(A) is the sum of all elements def getIndex(p, A): p %= A[-1] i = 0; for i, v in enumerate(A): p -= v; if p < 0: break return i def dp(**kwargs): print(kwargs, file=sys.stderr) if __name__ == "__main__": print(list(getallgroups([1, 2, 3, 4, 5, 6, 7, 8, 9], 5))) # unittest.main()
rchatterjee/pwmodels
src/pwmodel/helper.py
random.randints
python
def randints(s, e, n=1): assert e >= s, "Wrong range: [{}, {})".format(s, e) n = max(1, n) arr = [s + a % (e - s) for a in struct.unpack('<%dL' % n, os.urandom(4 * n))] return arr
returns n uniform random numbers from [s, e]
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/helper.py#L79-L86
null
class random: @staticmethod @staticmethod def randint(s, e): """ returns one random integer between s and e. Try using @randints in case you need multiple random integer. @randints is more efficient """ return random.randints(s, e, 1)[0] @staticmethod def choice(arr): i = random.randint(0, len(arr)) return arr[i] @staticmethod def sample(arr, n, unique=False): if unique: arr = set(arr) assert len(arr) > n, "Cannot sample uniquely from a small array." if len(arr) == n: return arr if n > len(arr) / 2: res = list(arr) while len(res) > n: del res[random.randint(0, len(res))] else: res = [] arr = list(arr) while len(res) < n: i = random.randint(0, len(arr)) res.append(arr[i]) del arr[i] else: return [arr[i] for i in random.randints(0, len(arr), n)]
rchatterjee/pwmodels
src/pwmodel/readpw.py
sample_following_dist
python
def sample_following_dist(handle_iter, n, totalf): multiplier = 1.0 if totalf == 1.0: multiplier = 1e8 # print "WARNING!! I don't except probabilities" totalf = totalf * multiplier print("# Population Size", totalf) A = np.sort(np.unique(np.random.randint(0, totalf, size=n*2))[:n]) A = A[::-1] # Uniqueness check, non necessarily required, but not very # computationally intensive assert len(A) == n, "Not enough randomnumbers generated"\ "Requried {}, generated only {}".format(n, len(A)) j = 0 sampled = 0 val = A.pop() # print handle_iter for _,w,f in handle_iter: j += f*multiplier if not A: break while val<j: sampled += 1 if sampled %5000 == 0: print ("Sampled:",sampled) yield (val, w) if A: val = A.pop() else: break print ("# Stopped at:", w, f, j, '\n') while A and val<j: yield (val, w) if A: i, val = A.pop() else: break
Samples n passwords following the distribution from the handle @handle_iter is an iterator that gives (pw,f) @n is the total number of samle asked for @totalf is the total number of users, which is euqal to sum(f for pw,f in handle_iter) As, handle_iterator is an iterator and can only traverse once, @totalf needs to be supplied to the funciton. Returns, an array of @n tuples (id, pw) sampled from @handle_iter.
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/readpw.py#L31-L77
null
#!/usr/bin/env python3 from __future__ import print_function import sys, os import bz2, re import itertools import operator import marisa_trie import numpy as np from os.path import (expanduser) from math import sqrt # opens file checking whether it is bz2 compressed or not. import tarfile from .helper import open_get_line """A simple password library. Has function to put passwords into nice data structure for fast look up. The file creates a cache databse file in home folder with the name .passwords. In Unix you can find it using `~/.pypasswords`. Run with Python 3 your life will be much easier. """ MAX_INT = 2**64-1 DEBUG = True home = expanduser("~") pass_dir = os.path.join(home, '.pypasswords') ROCKYOU_TOTAL_CNT = 32603388.0 def getallgroups(arr, k=-1): """ returns all the subset of @arr of size less than equalto @k the return array will be of size sum_{i=1}^k nCi, n = len(arr) """ if k<0: k = len(arr) return itertools.chain.from_iterable( itertools.combinations(set(arr), j) for j in range(1,k+1) ) def is_asciistring(s): try: s.decode('ascii') return True except (UnicodeDecodeError, UnicodeEncodeError) as e: # warning("UnicodeError:", s, str(e)) return False regex = r'([A-Za-z_]+)|([0-9]+)|(\W+)' def print_err( *args ): if DEBUG: sys.stderr.write(' '.join([str(a) for a in args])+'\n') def tokens(w): T = [] while w: m = re.match(regex, w) T.append(m.group(0)) w = w[len(T[-1]):] return T def whatchar(c): return 'L' if c.isalpha() else \ 'D' if c.isdigit else 'Y' def mean_sd(arr): s = sum(arr) s2 = sum([x * x for x in arr]) n = len(arr) m = s / float(n) sd = sqrt(float(s2) / n - m * m) return m, sd def convert2group(t, totalC): """ What is this? """ return t + np.random.randint(0, (MAX_INT-t)/totalC) * totalC # assumes last element in the array(A) is the sum of all elements def getIndex(p, A): p %= A[-1] i = 0 for i, v in enumerate(A): p -= v if p < 0: break return i class Passwords(object): """Its a class to efficiently store and read large password file. It creates two files for each password in the under the directory 'eff_data/' in home+.pypassword directory (~/.pypasswords). First file is a trie, which just stores all the password in efficient prefix trie format using "marisa_trie" module. The second is a numy large array, containing the indicies. This is what I found the most memory and compute efficient way of accessing passwords in Python. @pass_file: the path of the file you want to process. The file should countain freq and the password similar to the output of unix "uniq -c" command. @max_pass_len, min_pass_len defines the range of password to consider. Note, this filtering does not effect the totalf, and only changes the iterpws() function. """ def __init__(self, pass_file, max_pass_len=40, min_pass_len=1, **kwargs): self.fbasename = os.path.basename(pass_file).split('.',1)[0] _dirname = '{}/eff_data/'.format(pass_dir) if not os.path.exists(_dirname): os.makedirs(_dirname) self._max_pass_len = max_pass_len self._min_pass_len = min_pass_len self._file_trie = os.path.join(_dirname, self.fbasename + '.trie') self._file_freq = os.path.join(_dirname, self.fbasename + '.npz') self._T, self._freq_list, self._totalf = None, None, None if not kwargs.get('freshall', False) and os.path.exists(self._file_trie) and os.path.exists(self._file_freq): self.load_data() else: if 'freshall' in kwargs: del kwargs['freshall'] self.create_data_structure(pass_file, **kwargs) assert self._T, "Could not initialize the trie." def create_data_structure(self, pass_file, **kwargs): # Record trie, Slow, and not memory efficient # self._T = marisa_trie.RecordTrie( # '<II', ((unicode(w), (c,)) # for i, (w,c) in # enumerate(passwords.open_get_line(pass_file))) # ) print(kwargs) tmp_dict = {w: c for w,c in open_get_line(pass_file, **kwargs)} self._T = marisa_trie.Trie(tmp_dict.keys()) self._freq_list = np.zeros(len(self._T), dtype=int) for k in self._T.iterkeys(): self._freq_list[self._T.key_id(k)] = tmp_dict[k] self._T.save(self._file_trie) self._totalf = self._freq_list.sum() np.savez_compressed( self._file_freq, freq=self._freq_list, fsum=self._totalf ) def sample_pws(self, n, asperdist=True): """Returns n passwords sampled from this password dataset. if asperdist is True, then returns the password sampled according the password histogram distribution (with replacement). Passwords are always sampled with replacement. TODO: The sample users, instead of passwords perse. """ if asperdist: sample = np.random.choice( self._freq_list.shape[0], size=n, p=self._freq_list/self._totalf ) else: sample = np.random.choice(len(self._T), size=n) return (self._T.restore_key(i) for i in sample) def load_data(self): self._T = marisa_trie.Trie() self._T.load(self._file_trie) np_f = np.load(self._file_freq) self._freq_list, self._totalf = np_f['freq'], np_f['fsum'] def totalf(self): return self._totalf def pw2id(self, pw): try: return self._T.key_id(pw) except KeyError: return -1 except UnicodeDecodeError as e: print(repr(pw), e) raise ValueError(e) def id2pw(self, _id): try: return self._T.restore_key(_id) except KeyError: return '' def prob(self, pw): return self.__getitem__(pw)/self._totalf def pw2freq(self, pw): try: return self._freq_list[self._T.key_id(pw)] # return self._T.get(unicode(pw), 0) except KeyError: return 0 def id2freq(self, _id): _id = int(_id) try: return self._freq_list[_id] except ValueError: return 0 def sumvalues(self, q=0): """Sum of top q passowrd frequencies """ if q == 0: return self._totalf else: return -np.partition(-self._freq_list, q)[:q].sum() def iterpws(self, n): """ Returns passwords in order of their frequencies. @n: The numebr of passwords to return Return: pwid, password, frequency Every password is assigned an uniq id, for efficient access. """ for _id in np.argsort(self._freq_list)[::-1][:n]: pw = self._T.restore_key(_id) if self._min_pass_len <= len(pw) <= self._max_pass_len: yield _id, pw, self._freq_list[_id] def justiter(self): for w, _id in self._T.iteritems(): yield _id, w, self._freq_list[_id] def keys(self): return self._T.iterkeys() def values(self): return self._freq_list def __iter__(self): for _id in np.argsort(self._freq_list)[::-1]: yield _id, self._freq_list[_id] def __getitem__(self, k): if isinstance(k, int): return self._freq_list[k] if isinstance(k, str): return self._freq_list[self.pw2id(k)] raise TypeError("_id is wrong type ({}) expects str or int" .format(type(k))) def __len__(self): return self._freq_list.shape[0] import unittest class TestPasswords(unittest.TestCase): def test_pw2freq(self): passwords = Passwords( os.path.expanduser('~/passwords/rockyou-withcount.txt.bz2') ) for pw, f in {'michelle': 12714, 'george': 4749, 'familia': 1975, 'honeybunny': 242, 'asdfasdf2wg': 0, ' 234 adsf': 0}.items(): pw = pw self.assertEqual(passwords.pw2freq(pw), f) def test_getallgroups(self): for inp, res in [( [1,2,3], set([ (1,), (2,), (3,), (1,2), (2,3), (1,3), (1,2,3)]) )]: res1 = set(getallgroups(inp)) self.assertEqual(res1, res) if __name__ == "__main__": # print(list(getallgroups([1,2,3,4,5,6,7,8,9], 5))) unittest.main()
rchatterjee/pwmodels
src/pwmodel/readpw.py
Passwords.sample_pws
python
def sample_pws(self, n, asperdist=True): if asperdist: sample = np.random.choice( self._freq_list.shape[0], size=n, p=self._freq_list/self._totalf ) else: sample = np.random.choice(len(self._T), size=n) return (self._T.restore_key(i) for i in sample)
Returns n passwords sampled from this password dataset. if asperdist is True, then returns the password sampled according the password histogram distribution (with replacement). Passwords are always sampled with replacement. TODO: The sample users, instead of passwords perse.
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/readpw.py#L198-L212
null
class Passwords(object): """Its a class to efficiently store and read large password file. It creates two files for each password in the under the directory 'eff_data/' in home+.pypassword directory (~/.pypasswords). First file is a trie, which just stores all the password in efficient prefix trie format using "marisa_trie" module. The second is a numy large array, containing the indicies. This is what I found the most memory and compute efficient way of accessing passwords in Python. @pass_file: the path of the file you want to process. The file should countain freq and the password similar to the output of unix "uniq -c" command. @max_pass_len, min_pass_len defines the range of password to consider. Note, this filtering does not effect the totalf, and only changes the iterpws() function. """ def __init__(self, pass_file, max_pass_len=40, min_pass_len=1, **kwargs): self.fbasename = os.path.basename(pass_file).split('.',1)[0] _dirname = '{}/eff_data/'.format(pass_dir) if not os.path.exists(_dirname): os.makedirs(_dirname) self._max_pass_len = max_pass_len self._min_pass_len = min_pass_len self._file_trie = os.path.join(_dirname, self.fbasename + '.trie') self._file_freq = os.path.join(_dirname, self.fbasename + '.npz') self._T, self._freq_list, self._totalf = None, None, None if not kwargs.get('freshall', False) and os.path.exists(self._file_trie) and os.path.exists(self._file_freq): self.load_data() else: if 'freshall' in kwargs: del kwargs['freshall'] self.create_data_structure(pass_file, **kwargs) assert self._T, "Could not initialize the trie." def create_data_structure(self, pass_file, **kwargs): # Record trie, Slow, and not memory efficient # self._T = marisa_trie.RecordTrie( # '<II', ((unicode(w), (c,)) # for i, (w,c) in # enumerate(passwords.open_get_line(pass_file))) # ) print(kwargs) tmp_dict = {w: c for w,c in open_get_line(pass_file, **kwargs)} self._T = marisa_trie.Trie(tmp_dict.keys()) self._freq_list = np.zeros(len(self._T), dtype=int) for k in self._T.iterkeys(): self._freq_list[self._T.key_id(k)] = tmp_dict[k] self._T.save(self._file_trie) self._totalf = self._freq_list.sum() np.savez_compressed( self._file_freq, freq=self._freq_list, fsum=self._totalf ) def load_data(self): self._T = marisa_trie.Trie() self._T.load(self._file_trie) np_f = np.load(self._file_freq) self._freq_list, self._totalf = np_f['freq'], np_f['fsum'] def totalf(self): return self._totalf def pw2id(self, pw): try: return self._T.key_id(pw) except KeyError: return -1 except UnicodeDecodeError as e: print(repr(pw), e) raise ValueError(e) def id2pw(self, _id): try: return self._T.restore_key(_id) except KeyError: return '' def prob(self, pw): return self.__getitem__(pw)/self._totalf def pw2freq(self, pw): try: return self._freq_list[self._T.key_id(pw)] # return self._T.get(unicode(pw), 0) except KeyError: return 0 def id2freq(self, _id): _id = int(_id) try: return self._freq_list[_id] except ValueError: return 0 def sumvalues(self, q=0): """Sum of top q passowrd frequencies """ if q == 0: return self._totalf else: return -np.partition(-self._freq_list, q)[:q].sum() def iterpws(self, n): """ Returns passwords in order of their frequencies. @n: The numebr of passwords to return Return: pwid, password, frequency Every password is assigned an uniq id, for efficient access. """ for _id in np.argsort(self._freq_list)[::-1][:n]: pw = self._T.restore_key(_id) if self._min_pass_len <= len(pw) <= self._max_pass_len: yield _id, pw, self._freq_list[_id] def justiter(self): for w, _id in self._T.iteritems(): yield _id, w, self._freq_list[_id] def keys(self): return self._T.iterkeys() def values(self): return self._freq_list def __iter__(self): for _id in np.argsort(self._freq_list)[::-1]: yield _id, self._freq_list[_id] def __getitem__(self, k): if isinstance(k, int): return self._freq_list[k] if isinstance(k, str): return self._freq_list[self.pw2id(k)] raise TypeError("_id is wrong type ({}) expects str or int" .format(type(k))) def __len__(self): return self._freq_list.shape[0]
rchatterjee/pwmodels
src/pwmodel/readpw.py
Passwords.sumvalues
python
def sumvalues(self, q=0): if q == 0: return self._totalf else: return -np.partition(-self._freq_list, q)[:q].sum()
Sum of top q passowrd frequencies
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/readpw.py#L255-L261
null
class Passwords(object): """Its a class to efficiently store and read large password file. It creates two files for each password in the under the directory 'eff_data/' in home+.pypassword directory (~/.pypasswords). First file is a trie, which just stores all the password in efficient prefix trie format using "marisa_trie" module. The second is a numy large array, containing the indicies. This is what I found the most memory and compute efficient way of accessing passwords in Python. @pass_file: the path of the file you want to process. The file should countain freq and the password similar to the output of unix "uniq -c" command. @max_pass_len, min_pass_len defines the range of password to consider. Note, this filtering does not effect the totalf, and only changes the iterpws() function. """ def __init__(self, pass_file, max_pass_len=40, min_pass_len=1, **kwargs): self.fbasename = os.path.basename(pass_file).split('.',1)[0] _dirname = '{}/eff_data/'.format(pass_dir) if not os.path.exists(_dirname): os.makedirs(_dirname) self._max_pass_len = max_pass_len self._min_pass_len = min_pass_len self._file_trie = os.path.join(_dirname, self.fbasename + '.trie') self._file_freq = os.path.join(_dirname, self.fbasename + '.npz') self._T, self._freq_list, self._totalf = None, None, None if not kwargs.get('freshall', False) and os.path.exists(self._file_trie) and os.path.exists(self._file_freq): self.load_data() else: if 'freshall' in kwargs: del kwargs['freshall'] self.create_data_structure(pass_file, **kwargs) assert self._T, "Could not initialize the trie." def create_data_structure(self, pass_file, **kwargs): # Record trie, Slow, and not memory efficient # self._T = marisa_trie.RecordTrie( # '<II', ((unicode(w), (c,)) # for i, (w,c) in # enumerate(passwords.open_get_line(pass_file))) # ) print(kwargs) tmp_dict = {w: c for w,c in open_get_line(pass_file, **kwargs)} self._T = marisa_trie.Trie(tmp_dict.keys()) self._freq_list = np.zeros(len(self._T), dtype=int) for k in self._T.iterkeys(): self._freq_list[self._T.key_id(k)] = tmp_dict[k] self._T.save(self._file_trie) self._totalf = self._freq_list.sum() np.savez_compressed( self._file_freq, freq=self._freq_list, fsum=self._totalf ) def sample_pws(self, n, asperdist=True): """Returns n passwords sampled from this password dataset. if asperdist is True, then returns the password sampled according the password histogram distribution (with replacement). Passwords are always sampled with replacement. TODO: The sample users, instead of passwords perse. """ if asperdist: sample = np.random.choice( self._freq_list.shape[0], size=n, p=self._freq_list/self._totalf ) else: sample = np.random.choice(len(self._T), size=n) return (self._T.restore_key(i) for i in sample) def load_data(self): self._T = marisa_trie.Trie() self._T.load(self._file_trie) np_f = np.load(self._file_freq) self._freq_list, self._totalf = np_f['freq'], np_f['fsum'] def totalf(self): return self._totalf def pw2id(self, pw): try: return self._T.key_id(pw) except KeyError: return -1 except UnicodeDecodeError as e: print(repr(pw), e) raise ValueError(e) def id2pw(self, _id): try: return self._T.restore_key(_id) except KeyError: return '' def prob(self, pw): return self.__getitem__(pw)/self._totalf def pw2freq(self, pw): try: return self._freq_list[self._T.key_id(pw)] # return self._T.get(unicode(pw), 0) except KeyError: return 0 def id2freq(self, _id): _id = int(_id) try: return self._freq_list[_id] except ValueError: return 0 def iterpws(self, n): """ Returns passwords in order of their frequencies. @n: The numebr of passwords to return Return: pwid, password, frequency Every password is assigned an uniq id, for efficient access. """ for _id in np.argsort(self._freq_list)[::-1][:n]: pw = self._T.restore_key(_id) if self._min_pass_len <= len(pw) <= self._max_pass_len: yield _id, pw, self._freq_list[_id] def justiter(self): for w, _id in self._T.iteritems(): yield _id, w, self._freq_list[_id] def keys(self): return self._T.iterkeys() def values(self): return self._freq_list def __iter__(self): for _id in np.argsort(self._freq_list)[::-1]: yield _id, self._freq_list[_id] def __getitem__(self, k): if isinstance(k, int): return self._freq_list[k] if isinstance(k, str): return self._freq_list[self.pw2id(k)] raise TypeError("_id is wrong type ({}) expects str or int" .format(type(k))) def __len__(self): return self._freq_list.shape[0]
rchatterjee/pwmodels
src/pwmodel/readpw.py
Passwords.iterpws
python
def iterpws(self, n): for _id in np.argsort(self._freq_list)[::-1][:n]: pw = self._T.restore_key(_id) if self._min_pass_len <= len(pw) <= self._max_pass_len: yield _id, pw, self._freq_list[_id]
Returns passwords in order of their frequencies. @n: The numebr of passwords to return Return: pwid, password, frequency Every password is assigned an uniq id, for efficient access.
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/readpw.py#L263-L273
null
class Passwords(object): """Its a class to efficiently store and read large password file. It creates two files for each password in the under the directory 'eff_data/' in home+.pypassword directory (~/.pypasswords). First file is a trie, which just stores all the password in efficient prefix trie format using "marisa_trie" module. The second is a numy large array, containing the indicies. This is what I found the most memory and compute efficient way of accessing passwords in Python. @pass_file: the path of the file you want to process. The file should countain freq and the password similar to the output of unix "uniq -c" command. @max_pass_len, min_pass_len defines the range of password to consider. Note, this filtering does not effect the totalf, and only changes the iterpws() function. """ def __init__(self, pass_file, max_pass_len=40, min_pass_len=1, **kwargs): self.fbasename = os.path.basename(pass_file).split('.',1)[0] _dirname = '{}/eff_data/'.format(pass_dir) if not os.path.exists(_dirname): os.makedirs(_dirname) self._max_pass_len = max_pass_len self._min_pass_len = min_pass_len self._file_trie = os.path.join(_dirname, self.fbasename + '.trie') self._file_freq = os.path.join(_dirname, self.fbasename + '.npz') self._T, self._freq_list, self._totalf = None, None, None if not kwargs.get('freshall', False) and os.path.exists(self._file_trie) and os.path.exists(self._file_freq): self.load_data() else: if 'freshall' in kwargs: del kwargs['freshall'] self.create_data_structure(pass_file, **kwargs) assert self._T, "Could not initialize the trie." def create_data_structure(self, pass_file, **kwargs): # Record trie, Slow, and not memory efficient # self._T = marisa_trie.RecordTrie( # '<II', ((unicode(w), (c,)) # for i, (w,c) in # enumerate(passwords.open_get_line(pass_file))) # ) print(kwargs) tmp_dict = {w: c for w,c in open_get_line(pass_file, **kwargs)} self._T = marisa_trie.Trie(tmp_dict.keys()) self._freq_list = np.zeros(len(self._T), dtype=int) for k in self._T.iterkeys(): self._freq_list[self._T.key_id(k)] = tmp_dict[k] self._T.save(self._file_trie) self._totalf = self._freq_list.sum() np.savez_compressed( self._file_freq, freq=self._freq_list, fsum=self._totalf ) def sample_pws(self, n, asperdist=True): """Returns n passwords sampled from this password dataset. if asperdist is True, then returns the password sampled according the password histogram distribution (with replacement). Passwords are always sampled with replacement. TODO: The sample users, instead of passwords perse. """ if asperdist: sample = np.random.choice( self._freq_list.shape[0], size=n, p=self._freq_list/self._totalf ) else: sample = np.random.choice(len(self._T), size=n) return (self._T.restore_key(i) for i in sample) def load_data(self): self._T = marisa_trie.Trie() self._T.load(self._file_trie) np_f = np.load(self._file_freq) self._freq_list, self._totalf = np_f['freq'], np_f['fsum'] def totalf(self): return self._totalf def pw2id(self, pw): try: return self._T.key_id(pw) except KeyError: return -1 except UnicodeDecodeError as e: print(repr(pw), e) raise ValueError(e) def id2pw(self, _id): try: return self._T.restore_key(_id) except KeyError: return '' def prob(self, pw): return self.__getitem__(pw)/self._totalf def pw2freq(self, pw): try: return self._freq_list[self._T.key_id(pw)] # return self._T.get(unicode(pw), 0) except KeyError: return 0 def id2freq(self, _id): _id = int(_id) try: return self._freq_list[_id] except ValueError: return 0 def sumvalues(self, q=0): """Sum of top q passowrd frequencies """ if q == 0: return self._totalf else: return -np.partition(-self._freq_list, q)[:q].sum() def justiter(self): for w, _id in self._T.iteritems(): yield _id, w, self._freq_list[_id] def keys(self): return self._T.iterkeys() def values(self): return self._freq_list def __iter__(self): for _id in np.argsort(self._freq_list)[::-1]: yield _id, self._freq_list[_id] def __getitem__(self, k): if isinstance(k, int): return self._freq_list[k] if isinstance(k, str): return self._freq_list[self.pw2id(k)] raise TypeError("_id is wrong type ({}) expects str or int" .format(type(k))) def __len__(self): return self._freq_list.shape[0]
rchatterjee/pwmodels
src/pwmodel/fast_fuzzysearch.py
Fast1FuzzySearch.query
python
def query(self, w, ed=1): # Can only handle ed=1 assert ed <= self._ed if ed == 0: return [w] if w in self._L else [''] w = str(w) n = len(w) prefix, suffix = w[:n // 2], w[n // 2:][::-1] options_w_prefix = self._L.keys(prefix) options_w_suffix = [x[::-1] for x in self._R.iterkeys(suffix)] return [ _w for _w in set(itertools.chain(options_w_prefix, options_w_suffix)) if abs(len(_w) - len(w)) <= 1 and lvdistance(str(_w), str(w), 1) <= 1 ]
Finds the fuzzy matches (within edit distance 1) of w from words
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/fast_fuzzysearch.py#L107-L123
null
class Fast1FuzzySearch(object): """This is an implementation of fuzzy string matching using dawgs. Good for only edit distance 1. Idea is for the query take word and look at words with similar prifix, or the ones with simlar suffix. We are looking for words at distance 1, so, the edit must be either on the first half of the word, or on the last half, and we can safely check that using prefix, and suffix match. """ _ed = 1 def __init__(self, words): # good for 1 edit distance self._L, self._R = self._process_list(list(set(words))) def _process_list(self, words): rev_words = [w[::-1] for w in words] norm_dawg = dawg.CompletionDAWG(words) rev_dawg = dawg.CompletionDAWG(rev_words) return norm_dawg, rev_dawg def words_with_prefix(self, prefix): return self._L.iterkeys(str(prefix)) def words_with_suffix(self, suffix): return (w[::-1] for w in self._R.iterkeys(str(suffix[::-1])))
rchatterjee/pwmodels
src/pwmodel/models.py
create_model
python
def create_model(modelfunc, fname='', listw=[], outfname='', limit=int(3e6), min_pwlen=6, topk=10000, sep=r'\s+'): def length_filter(pw): pw = ''.join(c for c in pw if c in VALID_CHARS) return len(pw) >= min_pwlen pws = [] if fname: pws = helper.open_get_line(fname, limit=limit, pw_filter=length_filter, sep=sep) big_dict = defaultdict(int) total_f, total_e = 0, 0 # Add topk passwords from the input dataset to the list topk_pws = [] for pw, c in itertools.chain(pws, listw): for ng in modelfunc(pw): big_dict[ng] += c total_f += c total_e += 1 if len(big_dict) % 100000 == 0: print(("Dictionary size: {} (Total_freq: {}; Total_pws: {}"\ .format(len(big_dict), total_f, total_e))) if len(topk_pws) >= topk: heapq.heappushpop(topk_pws, (c, pw)) else: heapq.heappush(topk_pws, (c, pw)) # Adding topk password to deal with probability reduction of popular # passwords. Mostly effective for n-gram models print("topk={}".format(topk)) if topk > 0: for c, pw in topk_pws: tpw = helper.START + pw + helper.END big_dict[tpw] += c total_f += c total_e += 1 big_dict[NPWS_W] = total_e big_dict[TOTALF_W] = total_f nDawg = dawg.IntCompletionDAWG(big_dict) if not outfname: outfname = 'tmpmodel.dawg.gz' elif not outfname.endswith('.gz'): outfname += '.gz' pathlib.Path(outfname).parent.mkdir(parents=True, exist_ok=True) helper.save_dawg(nDawg, outfname) return nDawg
:modelfunc: is a function that takes a word and returns its splits. for ngram model this function returns all the ngrams of a word, for PCFG it will return splits of the password. @modelfunc: func: string -> [list of strings] @fname: name of the file to read from @listw: list of passwords. Used passwords from both the files and listw if provided. @outfname: the file to write down the model.
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/models.py#L25-L81
[ "def save_dawg(T, fname):\n if not fname.endswith('gz'):\n fname = fname + '.gz'\n with gzip.open(fname, 'wb') as f:\n T.write(f)\n", "def open_get_line(filename, limit=-1, **kwargs):\n \"\"\"Opens the password file named @filename and reads first @limit\n passwords. @kwargs are passed t...
import heapq import os import sys import itertools from collections import defaultdict import operator import dawg import functools import pathlib import json import string from . import helper from .fast_fuzzysearch import fast_fuzzysearch TOTALF_W = '\x02__TOTALF__\x03' NPWS_W = '\x02__NPWS__\x03' reserved_words = {TOTALF_W, NPWS_W} # Valid characters for passwords # string.digits + string.ascii_letters + string.punctuation VALID_CHARS = set(string.printable[:-6] + helper.START + helper.END) N_VALID_CHARS = len(VALID_CHARS) def read_dawg(fname): print(("reading {fname}".format(fname=fname))) return helper.load_dawg(fname, dawg.IntCompletionDAWG) def get_data_path(fname): data_dir = os.path.join(helper.home, '.pwmodel') if helper.DEBUG: data_dir = os.path.join(helper.thisdir, 'data') if not os.path.exists(data_dir): os.mkdir(data_dir) return os.path.join(data_dir, fname) class PwModel(object): def __init__(self, **kwargs): pwfilename = kwargs.get('pwfilename', '') self._leak = os.path.basename(pwfilename).split('-')[0] freshall = kwargs.get('freshall', False) self.modelname = kwargs.get('modelname', 'ngram-3') if not self._leak: self._leak = kwargs.get('leak', 'tmp') freshall = True self._modelf = get_data_path( '{}-{}.dawg.gz'.format(self._leak, self.modelname) ) self._T = None if kwargs.get('T') is not None: self._T = kwargs.get('T') return if freshall: try: os.remove(self._modelf) except OSError as e: print("File ({!r}) does not exist. ERROR: {}" .format(self._modelf, e), file=sys.stderr) if self._leak != 'tmp': try: self._T = read_dawg(self._modelf) except IOError as ex: print(("ex={}\nHang on while I am creating the model {!r}!\n" .format(ex, self.modelname))) if self._T is None: self._T = create_model( fname=pwfilename, listw=kwargs.get('listw', []), outfname=self._modelf, modelfunc=kwargs.get('modelfunc', self.modelfunc), limit=int(kwargs.get('limit', 3e6)), topk=kwargs.get('topk', -1), sep=kwargs.get('sep', r'\s+') ) def modelfunc(self, w): raise Exception("Not implemented") def prob(self, word): raise Exception("Not implemented") def qth_pw(self, q): """ returns the qth most probable element in the dawg. """ return heapq.nlargest(q + 2, self._T.iteritems(), key=operator.itemgetter(1))[-1] def get(self, pw): """Returns password probability""" return self.prob(pw) def __str__(self): return 'Pwmodel<{}-{}>'.format(self.modelname, self._leak) def npws(self): return self._T[NPWS_W] def totalf(self): return self._T[TOTALF_W] def leakname(self): return self._leak ################################################################################ MIN_PROB = 1e-10 class PcfgPw(PwModel): """Creates a pcfg model from the password in @pwfilename. """ def __init__(self, pwfilename, **kwargs): kwargs['modelfunc'] = self.pcfgtokensofw kwargs['modelname'] = 'weir-pcfg' kwargs['topk'] = 10000 super(PcfgPw, self).__init__(pwfilename=pwfilename, **kwargs) def pcfgtokensofw(self, word): """This splits the word into chunks similar to as described in Weir et al Oakland'14 paper. E.g., >> ngrampw.pcfgtokensofw('password@123') ['password', '@', '123', '__L8__', '__Y1__', '__D3__'] """ tok = helper.tokens(word) sym = ['__{0}{1}__'.format(helper.whatchar(w), len(w)) for w in tok] S = ['__S__' + ''.join(sym).replace('_', '') + '__'] return S + sym + tok def tokprob(self, tok, nonT): """ return P[nonT -> tok], e.g., P[ W3 -> 'abc'] """ p = self._T.get(tok, 0) / float(self._T.get(nonT, 1)) if not p: p = MIN_PROB return p def prob(self, pw): """ Return the probability of pw under the Weir PCFG model. P[{S -> L2D1Y3, L2 -> 'ab', D1 -> '1', Y3 -> '!@#'}] """ tokens = self.pcfgtokensofw(pw) S, tokens = tokens[0], tokens[1:] l = len(tokens) assert l % 2 == 0, "Expecting even number of tokens!. got {}".format(tokens) p = float(self._T.get(S, 0.0)) / sum(v for k, v in self._T.items('__S__')) for i, t in enumerate(tokens): f = self._T.get(t, 0.0) if f == 0: return 0.0 if i < l / 2: p /= f else: p *= f # print pw, p, t, self._T.get(t) return p ################################################################################ class NGramPw(PwModel): """Create a list of ngrams from a file @fname. NOTE: the file must be in password file format. See smaple/pw.txt file for the format. If the file is empty string, then it will try to read the passwords from the @listw which is a list of tuples [(w1, f1), (w2, f2)...]. (It can be an iterator.) @n is again the 'n' of n-gram Writes the ngrams in a file at @outfname. if outfname is empty string, it will print the ngrams on a file named 'ngrams.dawg' :param pwfilename: a `password' file :param n: an integer (NOTE: you should provide a `n`. `n` is default to 3) """ def __init__(self, pwfilename='', **kwargs): kwargs['modelfunc'] = self.ngramsofw kwargs['n'] = kwargs.get('n', 3) self._n = kwargs.get('n', 3) kwargs['modelname'] = 'ngram-{}'.format(self._n) kwargs['topk'] = -1 super(NGramPw, self).__init__(pwfilename=pwfilename, **kwargs) self._leet = self._T.compile_replaces(helper.L33T) @functools.lru_cache(maxsize=100000) def sum_freq(self, pre): if not isinstance(pre, str): pre = str(pre) return float(sum(v for k, v in self._T.iteritems(pre))) @functools.lru_cache(maxsize=100000) def get_freq(self, x): """get freq of x with or without L33t transformations """ # This is causing problem with ngram-probabilities. # > pwm.prob('s@f@r!') # > 1.441957095339684 # keys = self._T.similar_keys(x, self._leet) return self._T.get(x, 0.0) # # print("get_freq: {!r} -> {!r}".format(x, keys)) # if len(keys) > 0: # return self._T[keys[0]] # else: # return 0.0 def cprob(self, c, history): """ :param history: string :param c: character P[c | history] = (f(history+c) + 1)/(f(history) + |V|-1) Implement add-1 smoothing with backoff for simplicty. TODO: Does it make sense returns P[c | history] """ if not history: return 1 hist = history[:] if len(history) >= self._n: history = history[-(self._n-1):] if not isinstance(history, str): history = str(history) d, n = 0.0, 0.0 while (d == 0.0) and len(history) >= 1: try: d = self.get_freq(history) n = self.get_freq(history + c) except UnicodeDecodeError as e: print(("ERROR:", repr(history), e)) raise e history = history[1:] assert d != 0, "ERROR: Denominator zero!\n" \ "d={} n={} history={!r} c={!r} ({})" \ .format(d, n, hist, c, self._n) return (n + 1) / (d + N_VALID_CHARS-1) def ngramsofw(self, word): return helper.ngramsofw(word, 1, self._n) def _get_next(self, history): """Get the next set of characters and their probabilities""" orig_history = history if not history: return helper.START history = history[-(self._n-1):] while history and not self._T.get(history): history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if not (k in reserved_words or k == history)] total = sum(v for k, v in kv) while total == 0 and len(history) > 0: history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if not (k in reserved_words or k == history)] total = sum(v for k, v in kv) assert total > 0, "Sorry there is no n-gram with {!r}".format(orig_history) d = defaultdict(float) total = self._T.get(history) for k, v in kv: k = k[len(history):] d[k] += (v+1)/(total + N_VALID_CHARS-1) return d def _gen_next(self, history): """Generate next character sampled from the distribution of characters next. """ orig_history = history if not history: return helper.START history = history[-(self._n-1):] kv = [(k, v) for k, v in self._T.items(history) if k not in reserved_words] total = sum(v for k, v in kv) while total == 0 and len(history) > 0: history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if k not in reserved_words] total = sum(v for k, v in kv) assert total > 0, "Sorry there is no n-gram with {!r}".format(orig_history) _, sampled_k = list(helper.sample_following_dist(kv, 1, total))[0] # print(">>>", repr(sampled_k), len(history)) return sampled_k[len(history)] def sample_pw(self): s = helper.START while s[-1] != helper.END: s += self._gen_next(s) return s[1:-1] def generate_pws_in_order(self, n, filter_func=None, N_max=1e6): """ Generates passwords in order between upto N_max @N_max is the maximum size of the priority queue will be tolerated, so if the size of the queue is bigger than 1.5 * N_max, it will shrink the size to 0.75 * N_max @n is the number of password to generate. **This function is expensive, and shuold be called only if necessary. Cache its call as much as possible** # TODO: Need to recheck how to make sure this is working. """ # assert alpha < beta, 'alpha={} must be less than beta={}'.format(alpha, beta) states = [(-1.0, helper.START)] # get the topk first p_min = 1e-9 / (n**2) # max 1 million entries in the heap ret = [] done = set() already_added_in_heap = set() while len(ret) < n and len(states) > 0: # while n > 0 and len(states) > 0: p, s = heapq.heappop(states) if p < 0: p = -p if s in done: continue assert s[0] == helper.START, "Broken s: {!r}".format(s) if s[-1] == helper.END: done.add(s) clean_s = s[1:-1] if filter_func is None or filter_func(clean_s): ret.append((clean_s, p)) # n -= 1 # yield (clean_s, p) else: for c, f in self._get_next(s).items(): if (f*p < p_min or (s+c) in done or (s+c) in already_added_in_heap): continue already_added_in_heap.add(s+c) heapq.heappush(states, (-f*p, s+c)) if len(states) > N_max * 3 / 2: print("Heap size: {}. ret={}. (expected: {}) s={!r}" .format(len(states), len(ret), n, s)) print("The size of states={}. Still need={} pws. Truncating" .format(len(states), n - len(ret))) states = heapq.nsmallest(int(N_max * 3/4), states) print("Done") return ret def _get_largest_prefix(self, pw): s = self._T.prefixes(pw) if not s or len(s[-1]) <= self._n: return ('', 0.0), pw pre = s[-1] rest = pw[len(pre):] pre_prob = self._T.get(pre)/self.totalf() return (pre, pre_prob), rest def _prob(self, pw, given=''): p = 1.0 while pw: (pre, pre_prob), rest_pw = self._get_largest_prefix(pw) # print("pw={!r} given={!r} p={}".format(pw, given, p)) if pre_prob > 0.0: p *= pre_prob pw, given = rest_pw, pre else: try: p *= self.cprob(pw[0], given) pw, given = pw[1:], given+pw[0] except Exception as e: print((repr(pw))) raise e return p @functools.lru_cache(maxsize=100000) def prob(self, pw): new_pw = helper.START + pw + helper.END return self._prob(new_pw) def normalize(pw): """ Lower case, and change the symbols to closest characters""" pw_lower = pw.lower() return ''.join(helper.L33T.get(c, c) for c in pw_lower) ################################################################################ class HistPw(PwModel): """ Creates a histograms from the given file. Just converts the password file into a .dawg file. """ def __init__(self, pwfilename, fuzzysearch=False, **kwargs): kwargs['modelfunc'] = lambda x: [x] kwargs['modelname'] = 'histogram' super(HistPw, self).__init__(pwfilename=pwfilename, **kwargs) self.sep = kwargs.get('sep', r'\s+') self.pwfilename = pwfilename if fuzzysearch: self.ffs = fast_fuzzysearch(self._T.keys(), ed=2) else: self.ffs = None def similarpws(self, pw, ed=2): return self.ffs.query(pw, ed) def probsum(self, pws): """Sum of probs of all passwords in @pws.""" return sum(self.prob(pw) for pw in pws) def prob(self, pw): """ returns the probabiltiy of pw in the model. P[pw] = n(pw)/n(__total__) """ return float(self._T.get(pw, 0)) / self._T[TOTALF_W] def prob_correction(self, f=1): """ Corrects the probability error due to truncating the distribution. """ total = {'rockyou': 32602160} return f * self._T[TOTALF_W] / total.get(self._leak, self._T[TOTALF_W]) def iterpasswords(self, n=-1): return helper.open_get_line(self.pwfilename, limit=n, sep=self.sep) if __name__ == "__main__": import sys if len(sys.argv) == 3: if sys.argv[1] == '-createHpw': pwf = sys.argv[2] pwm = HistPw(pwf, freshall=True) print(pwm) print((pwm.prob('password12'))) elif sys.argv[1] == '-ngramGen': pwf = sys.argv[2] pwm = NGramPw(pwfilename=pwf) # print(pwm.sample_pw()) print(json.dumps(pwm.generate_pws_in_order(1000, filter_func=lambda x: len(x)>6), indent=4))
rchatterjee/pwmodels
src/pwmodel/models.py
normalize
python
def normalize(pw): pw_lower = pw.lower() return ''.join(helper.L33T.get(c, c) for c in pw_lower)
Lower case, and change the symbols to closest characters
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/models.py#L441-L444
null
import heapq import os import sys import itertools from collections import defaultdict import operator import dawg import functools import pathlib import json import string from . import helper from .fast_fuzzysearch import fast_fuzzysearch TOTALF_W = '\x02__TOTALF__\x03' NPWS_W = '\x02__NPWS__\x03' reserved_words = {TOTALF_W, NPWS_W} # Valid characters for passwords # string.digits + string.ascii_letters + string.punctuation VALID_CHARS = set(string.printable[:-6] + helper.START + helper.END) N_VALID_CHARS = len(VALID_CHARS) def create_model(modelfunc, fname='', listw=[], outfname='', limit=int(3e6), min_pwlen=6, topk=10000, sep=r'\s+'): """:modelfunc: is a function that takes a word and returns its splits. for ngram model this function returns all the ngrams of a word, for PCFG it will return splits of the password. @modelfunc: func: string -> [list of strings] @fname: name of the file to read from @listw: list of passwords. Used passwords from both the files and listw if provided. @outfname: the file to write down the model. """ def length_filter(pw): pw = ''.join(c for c in pw if c in VALID_CHARS) return len(pw) >= min_pwlen pws = [] if fname: pws = helper.open_get_line(fname, limit=limit, pw_filter=length_filter, sep=sep) big_dict = defaultdict(int) total_f, total_e = 0, 0 # Add topk passwords from the input dataset to the list topk_pws = [] for pw, c in itertools.chain(pws, listw): for ng in modelfunc(pw): big_dict[ng] += c total_f += c total_e += 1 if len(big_dict) % 100000 == 0: print(("Dictionary size: {} (Total_freq: {}; Total_pws: {}"\ .format(len(big_dict), total_f, total_e))) if len(topk_pws) >= topk: heapq.heappushpop(topk_pws, (c, pw)) else: heapq.heappush(topk_pws, (c, pw)) # Adding topk password to deal with probability reduction of popular # passwords. Mostly effective for n-gram models print("topk={}".format(topk)) if topk > 0: for c, pw in topk_pws: tpw = helper.START + pw + helper.END big_dict[tpw] += c total_f += c total_e += 1 big_dict[NPWS_W] = total_e big_dict[TOTALF_W] = total_f nDawg = dawg.IntCompletionDAWG(big_dict) if not outfname: outfname = 'tmpmodel.dawg.gz' elif not outfname.endswith('.gz'): outfname += '.gz' pathlib.Path(outfname).parent.mkdir(parents=True, exist_ok=True) helper.save_dawg(nDawg, outfname) return nDawg def read_dawg(fname): print(("reading {fname}".format(fname=fname))) return helper.load_dawg(fname, dawg.IntCompletionDAWG) def get_data_path(fname): data_dir = os.path.join(helper.home, '.pwmodel') if helper.DEBUG: data_dir = os.path.join(helper.thisdir, 'data') if not os.path.exists(data_dir): os.mkdir(data_dir) return os.path.join(data_dir, fname) class PwModel(object): def __init__(self, **kwargs): pwfilename = kwargs.get('pwfilename', '') self._leak = os.path.basename(pwfilename).split('-')[0] freshall = kwargs.get('freshall', False) self.modelname = kwargs.get('modelname', 'ngram-3') if not self._leak: self._leak = kwargs.get('leak', 'tmp') freshall = True self._modelf = get_data_path( '{}-{}.dawg.gz'.format(self._leak, self.modelname) ) self._T = None if kwargs.get('T') is not None: self._T = kwargs.get('T') return if freshall: try: os.remove(self._modelf) except OSError as e: print("File ({!r}) does not exist. ERROR: {}" .format(self._modelf, e), file=sys.stderr) if self._leak != 'tmp': try: self._T = read_dawg(self._modelf) except IOError as ex: print(("ex={}\nHang on while I am creating the model {!r}!\n" .format(ex, self.modelname))) if self._T is None: self._T = create_model( fname=pwfilename, listw=kwargs.get('listw', []), outfname=self._modelf, modelfunc=kwargs.get('modelfunc', self.modelfunc), limit=int(kwargs.get('limit', 3e6)), topk=kwargs.get('topk', -1), sep=kwargs.get('sep', r'\s+') ) def modelfunc(self, w): raise Exception("Not implemented") def prob(self, word): raise Exception("Not implemented") def qth_pw(self, q): """ returns the qth most probable element in the dawg. """ return heapq.nlargest(q + 2, self._T.iteritems(), key=operator.itemgetter(1))[-1] def get(self, pw): """Returns password probability""" return self.prob(pw) def __str__(self): return 'Pwmodel<{}-{}>'.format(self.modelname, self._leak) def npws(self): return self._T[NPWS_W] def totalf(self): return self._T[TOTALF_W] def leakname(self): return self._leak ################################################################################ MIN_PROB = 1e-10 class PcfgPw(PwModel): """Creates a pcfg model from the password in @pwfilename. """ def __init__(self, pwfilename, **kwargs): kwargs['modelfunc'] = self.pcfgtokensofw kwargs['modelname'] = 'weir-pcfg' kwargs['topk'] = 10000 super(PcfgPw, self).__init__(pwfilename=pwfilename, **kwargs) def pcfgtokensofw(self, word): """This splits the word into chunks similar to as described in Weir et al Oakland'14 paper. E.g., >> ngrampw.pcfgtokensofw('password@123') ['password', '@', '123', '__L8__', '__Y1__', '__D3__'] """ tok = helper.tokens(word) sym = ['__{0}{1}__'.format(helper.whatchar(w), len(w)) for w in tok] S = ['__S__' + ''.join(sym).replace('_', '') + '__'] return S + sym + tok def tokprob(self, tok, nonT): """ return P[nonT -> tok], e.g., P[ W3 -> 'abc'] """ p = self._T.get(tok, 0) / float(self._T.get(nonT, 1)) if not p: p = MIN_PROB return p def prob(self, pw): """ Return the probability of pw under the Weir PCFG model. P[{S -> L2D1Y3, L2 -> 'ab', D1 -> '1', Y3 -> '!@#'}] """ tokens = self.pcfgtokensofw(pw) S, tokens = tokens[0], tokens[1:] l = len(tokens) assert l % 2 == 0, "Expecting even number of tokens!. got {}".format(tokens) p = float(self._T.get(S, 0.0)) / sum(v for k, v in self._T.items('__S__')) for i, t in enumerate(tokens): f = self._T.get(t, 0.0) if f == 0: return 0.0 if i < l / 2: p /= f else: p *= f # print pw, p, t, self._T.get(t) return p ################################################################################ class NGramPw(PwModel): """Create a list of ngrams from a file @fname. NOTE: the file must be in password file format. See smaple/pw.txt file for the format. If the file is empty string, then it will try to read the passwords from the @listw which is a list of tuples [(w1, f1), (w2, f2)...]. (It can be an iterator.) @n is again the 'n' of n-gram Writes the ngrams in a file at @outfname. if outfname is empty string, it will print the ngrams on a file named 'ngrams.dawg' :param pwfilename: a `password' file :param n: an integer (NOTE: you should provide a `n`. `n` is default to 3) """ def __init__(self, pwfilename='', **kwargs): kwargs['modelfunc'] = self.ngramsofw kwargs['n'] = kwargs.get('n', 3) self._n = kwargs.get('n', 3) kwargs['modelname'] = 'ngram-{}'.format(self._n) kwargs['topk'] = -1 super(NGramPw, self).__init__(pwfilename=pwfilename, **kwargs) self._leet = self._T.compile_replaces(helper.L33T) @functools.lru_cache(maxsize=100000) def sum_freq(self, pre): if not isinstance(pre, str): pre = str(pre) return float(sum(v for k, v in self._T.iteritems(pre))) @functools.lru_cache(maxsize=100000) def get_freq(self, x): """get freq of x with or without L33t transformations """ # This is causing problem with ngram-probabilities. # > pwm.prob('s@f@r!') # > 1.441957095339684 # keys = self._T.similar_keys(x, self._leet) return self._T.get(x, 0.0) # # print("get_freq: {!r} -> {!r}".format(x, keys)) # if len(keys) > 0: # return self._T[keys[0]] # else: # return 0.0 def cprob(self, c, history): """ :param history: string :param c: character P[c | history] = (f(history+c) + 1)/(f(history) + |V|-1) Implement add-1 smoothing with backoff for simplicty. TODO: Does it make sense returns P[c | history] """ if not history: return 1 hist = history[:] if len(history) >= self._n: history = history[-(self._n-1):] if not isinstance(history, str): history = str(history) d, n = 0.0, 0.0 while (d == 0.0) and len(history) >= 1: try: d = self.get_freq(history) n = self.get_freq(history + c) except UnicodeDecodeError as e: print(("ERROR:", repr(history), e)) raise e history = history[1:] assert d != 0, "ERROR: Denominator zero!\n" \ "d={} n={} history={!r} c={!r} ({})" \ .format(d, n, hist, c, self._n) return (n + 1) / (d + N_VALID_CHARS-1) def ngramsofw(self, word): return helper.ngramsofw(word, 1, self._n) def _get_next(self, history): """Get the next set of characters and their probabilities""" orig_history = history if not history: return helper.START history = history[-(self._n-1):] while history and not self._T.get(history): history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if not (k in reserved_words or k == history)] total = sum(v for k, v in kv) while total == 0 and len(history) > 0: history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if not (k in reserved_words or k == history)] total = sum(v for k, v in kv) assert total > 0, "Sorry there is no n-gram with {!r}".format(orig_history) d = defaultdict(float) total = self._T.get(history) for k, v in kv: k = k[len(history):] d[k] += (v+1)/(total + N_VALID_CHARS-1) return d def _gen_next(self, history): """Generate next character sampled from the distribution of characters next. """ orig_history = history if not history: return helper.START history = history[-(self._n-1):] kv = [(k, v) for k, v in self._T.items(history) if k not in reserved_words] total = sum(v for k, v in kv) while total == 0 and len(history) > 0: history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if k not in reserved_words] total = sum(v for k, v in kv) assert total > 0, "Sorry there is no n-gram with {!r}".format(orig_history) _, sampled_k = list(helper.sample_following_dist(kv, 1, total))[0] # print(">>>", repr(sampled_k), len(history)) return sampled_k[len(history)] def sample_pw(self): s = helper.START while s[-1] != helper.END: s += self._gen_next(s) return s[1:-1] def generate_pws_in_order(self, n, filter_func=None, N_max=1e6): """ Generates passwords in order between upto N_max @N_max is the maximum size of the priority queue will be tolerated, so if the size of the queue is bigger than 1.5 * N_max, it will shrink the size to 0.75 * N_max @n is the number of password to generate. **This function is expensive, and shuold be called only if necessary. Cache its call as much as possible** # TODO: Need to recheck how to make sure this is working. """ # assert alpha < beta, 'alpha={} must be less than beta={}'.format(alpha, beta) states = [(-1.0, helper.START)] # get the topk first p_min = 1e-9 / (n**2) # max 1 million entries in the heap ret = [] done = set() already_added_in_heap = set() while len(ret) < n and len(states) > 0: # while n > 0 and len(states) > 0: p, s = heapq.heappop(states) if p < 0: p = -p if s in done: continue assert s[0] == helper.START, "Broken s: {!r}".format(s) if s[-1] == helper.END: done.add(s) clean_s = s[1:-1] if filter_func is None or filter_func(clean_s): ret.append((clean_s, p)) # n -= 1 # yield (clean_s, p) else: for c, f in self._get_next(s).items(): if (f*p < p_min or (s+c) in done or (s+c) in already_added_in_heap): continue already_added_in_heap.add(s+c) heapq.heappush(states, (-f*p, s+c)) if len(states) > N_max * 3 / 2: print("Heap size: {}. ret={}. (expected: {}) s={!r}" .format(len(states), len(ret), n, s)) print("The size of states={}. Still need={} pws. Truncating" .format(len(states), n - len(ret))) states = heapq.nsmallest(int(N_max * 3/4), states) print("Done") return ret def _get_largest_prefix(self, pw): s = self._T.prefixes(pw) if not s or len(s[-1]) <= self._n: return ('', 0.0), pw pre = s[-1] rest = pw[len(pre):] pre_prob = self._T.get(pre)/self.totalf() return (pre, pre_prob), rest def _prob(self, pw, given=''): p = 1.0 while pw: (pre, pre_prob), rest_pw = self._get_largest_prefix(pw) # print("pw={!r} given={!r} p={}".format(pw, given, p)) if pre_prob > 0.0: p *= pre_prob pw, given = rest_pw, pre else: try: p *= self.cprob(pw[0], given) pw, given = pw[1:], given+pw[0] except Exception as e: print((repr(pw))) raise e return p @functools.lru_cache(maxsize=100000) def prob(self, pw): new_pw = helper.START + pw + helper.END return self._prob(new_pw) ################################################################################ class HistPw(PwModel): """ Creates a histograms from the given file. Just converts the password file into a .dawg file. """ def __init__(self, pwfilename, fuzzysearch=False, **kwargs): kwargs['modelfunc'] = lambda x: [x] kwargs['modelname'] = 'histogram' super(HistPw, self).__init__(pwfilename=pwfilename, **kwargs) self.sep = kwargs.get('sep', r'\s+') self.pwfilename = pwfilename if fuzzysearch: self.ffs = fast_fuzzysearch(self._T.keys(), ed=2) else: self.ffs = None def similarpws(self, pw, ed=2): return self.ffs.query(pw, ed) def probsum(self, pws): """Sum of probs of all passwords in @pws.""" return sum(self.prob(pw) for pw in pws) def prob(self, pw): """ returns the probabiltiy of pw in the model. P[pw] = n(pw)/n(__total__) """ return float(self._T.get(pw, 0)) / self._T[TOTALF_W] def prob_correction(self, f=1): """ Corrects the probability error due to truncating the distribution. """ total = {'rockyou': 32602160} return f * self._T[TOTALF_W] / total.get(self._leak, self._T[TOTALF_W]) def iterpasswords(self, n=-1): return helper.open_get_line(self.pwfilename, limit=n, sep=self.sep) if __name__ == "__main__": import sys if len(sys.argv) == 3: if sys.argv[1] == '-createHpw': pwf = sys.argv[2] pwm = HistPw(pwf, freshall=True) print(pwm) print((pwm.prob('password12'))) elif sys.argv[1] == '-ngramGen': pwf = sys.argv[2] pwm = NGramPw(pwfilename=pwf) # print(pwm.sample_pw()) print(json.dumps(pwm.generate_pws_in_order(1000, filter_func=lambda x: len(x)>6), indent=4))
rchatterjee/pwmodels
src/pwmodel/models.py
PwModel.qth_pw
python
def qth_pw(self, q): return heapq.nlargest(q + 2, self._T.iteritems(), key=operator.itemgetter(1))[-1]
returns the qth most probable element in the dawg.
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/models.py#L142-L147
null
class PwModel(object): def __init__(self, **kwargs): pwfilename = kwargs.get('pwfilename', '') self._leak = os.path.basename(pwfilename).split('-')[0] freshall = kwargs.get('freshall', False) self.modelname = kwargs.get('modelname', 'ngram-3') if not self._leak: self._leak = kwargs.get('leak', 'tmp') freshall = True self._modelf = get_data_path( '{}-{}.dawg.gz'.format(self._leak, self.modelname) ) self._T = None if kwargs.get('T') is not None: self._T = kwargs.get('T') return if freshall: try: os.remove(self._modelf) except OSError as e: print("File ({!r}) does not exist. ERROR: {}" .format(self._modelf, e), file=sys.stderr) if self._leak != 'tmp': try: self._T = read_dawg(self._modelf) except IOError as ex: print(("ex={}\nHang on while I am creating the model {!r}!\n" .format(ex, self.modelname))) if self._T is None: self._T = create_model( fname=pwfilename, listw=kwargs.get('listw', []), outfname=self._modelf, modelfunc=kwargs.get('modelfunc', self.modelfunc), limit=int(kwargs.get('limit', 3e6)), topk=kwargs.get('topk', -1), sep=kwargs.get('sep', r'\s+') ) def modelfunc(self, w): raise Exception("Not implemented") def prob(self, word): raise Exception("Not implemented") def get(self, pw): """Returns password probability""" return self.prob(pw) def __str__(self): return 'Pwmodel<{}-{}>'.format(self.modelname, self._leak) def npws(self): return self._T[NPWS_W] def totalf(self): return self._T[TOTALF_W] def leakname(self): return self._leak
rchatterjee/pwmodels
src/pwmodel/models.py
PcfgPw.pcfgtokensofw
python
def pcfgtokensofw(self, word): tok = helper.tokens(word) sym = ['__{0}{1}__'.format(helper.whatchar(w), len(w)) for w in tok] S = ['__S__' + ''.join(sym).replace('_', '') + '__'] return S + sym + tok
This splits the word into chunks similar to as described in Weir et al Oakland'14 paper. E.g., >> ngrampw.pcfgtokensofw('password@123') ['password', '@', '123', '__L8__', '__Y1__', '__D3__']
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/models.py#L180-L193
[ "def tokens(w):\n T = []\n while w:\n m = re.match(regex, w)\n T.append(m.group(0))\n w = w[len(T[-1]):]\n return T\n" ]
class PcfgPw(PwModel): """Creates a pcfg model from the password in @pwfilename. """ def __init__(self, pwfilename, **kwargs): kwargs['modelfunc'] = self.pcfgtokensofw kwargs['modelname'] = 'weir-pcfg' kwargs['topk'] = 10000 super(PcfgPw, self).__init__(pwfilename=pwfilename, **kwargs) def tokprob(self, tok, nonT): """ return P[nonT -> tok], e.g., P[ W3 -> 'abc'] """ p = self._T.get(tok, 0) / float(self._T.get(nonT, 1)) if not p: p = MIN_PROB return p def prob(self, pw): """ Return the probability of pw under the Weir PCFG model. P[{S -> L2D1Y3, L2 -> 'ab', D1 -> '1', Y3 -> '!@#'}] """ tokens = self.pcfgtokensofw(pw) S, tokens = tokens[0], tokens[1:] l = len(tokens) assert l % 2 == 0, "Expecting even number of tokens!. got {}".format(tokens) p = float(self._T.get(S, 0.0)) / sum(v for k, v in self._T.items('__S__')) for i, t in enumerate(tokens): f = self._T.get(t, 0.0) if f == 0: return 0.0 if i < l / 2: p /= f else: p *= f # print pw, p, t, self._T.get(t) return p
rchatterjee/pwmodels
src/pwmodel/models.py
PcfgPw.tokprob
python
def tokprob(self, tok, nonT): p = self._T.get(tok, 0) / float(self._T.get(nonT, 1)) if not p: p = MIN_PROB return p
return P[nonT -> tok], e.g., P[ W3 -> 'abc']
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/models.py#L195-L204
null
class PcfgPw(PwModel): """Creates a pcfg model from the password in @pwfilename. """ def __init__(self, pwfilename, **kwargs): kwargs['modelfunc'] = self.pcfgtokensofw kwargs['modelname'] = 'weir-pcfg' kwargs['topk'] = 10000 super(PcfgPw, self).__init__(pwfilename=pwfilename, **kwargs) def pcfgtokensofw(self, word): """This splits the word into chunks similar to as described in Weir et al Oakland'14 paper. E.g., >> ngrampw.pcfgtokensofw('password@123') ['password', '@', '123', '__L8__', '__Y1__', '__D3__'] """ tok = helper.tokens(word) sym = ['__{0}{1}__'.format(helper.whatchar(w), len(w)) for w in tok] S = ['__S__' + ''.join(sym).replace('_', '') + '__'] return S + sym + tok def prob(self, pw): """ Return the probability of pw under the Weir PCFG model. P[{S -> L2D1Y3, L2 -> 'ab', D1 -> '1', Y3 -> '!@#'}] """ tokens = self.pcfgtokensofw(pw) S, tokens = tokens[0], tokens[1:] l = len(tokens) assert l % 2 == 0, "Expecting even number of tokens!. got {}".format(tokens) p = float(self._T.get(S, 0.0)) / sum(v for k, v in self._T.items('__S__')) for i, t in enumerate(tokens): f = self._T.get(t, 0.0) if f == 0: return 0.0 if i < l / 2: p /= f else: p *= f # print pw, p, t, self._T.get(t) return p
rchatterjee/pwmodels
src/pwmodel/models.py
PcfgPw.prob
python
def prob(self, pw): tokens = self.pcfgtokensofw(pw) S, tokens = tokens[0], tokens[1:] l = len(tokens) assert l % 2 == 0, "Expecting even number of tokens!. got {}".format(tokens) p = float(self._T.get(S, 0.0)) / sum(v for k, v in self._T.items('__S__')) for i, t in enumerate(tokens): f = self._T.get(t, 0.0) if f == 0: return 0.0 if i < l / 2: p /= f else: p *= f # print pw, p, t, self._T.get(t) return p
Return the probability of pw under the Weir PCFG model. P[{S -> L2D1Y3, L2 -> 'ab', D1 -> '1', Y3 -> '!@#'}]
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/models.py#L206-L227
[ "def pcfgtokensofw(self, word):\n \"\"\"This splits the word into chunks similar to as described in Weir\n et al Oakland'14 paper.\n E.g.,\n >> ngrampw.pcfgtokensofw('password@123')\n ['password', '@', '123', '__L8__', '__Y1__', '__D3__']\n\n \"\"\"\n tok = helper.tokens(word)\n\n sym = ['__...
class PcfgPw(PwModel): """Creates a pcfg model from the password in @pwfilename. """ def __init__(self, pwfilename, **kwargs): kwargs['modelfunc'] = self.pcfgtokensofw kwargs['modelname'] = 'weir-pcfg' kwargs['topk'] = 10000 super(PcfgPw, self).__init__(pwfilename=pwfilename, **kwargs) def pcfgtokensofw(self, word): """This splits the word into chunks similar to as described in Weir et al Oakland'14 paper. E.g., >> ngrampw.pcfgtokensofw('password@123') ['password', '@', '123', '__L8__', '__Y1__', '__D3__'] """ tok = helper.tokens(word) sym = ['__{0}{1}__'.format(helper.whatchar(w), len(w)) for w in tok] S = ['__S__' + ''.join(sym).replace('_', '') + '__'] return S + sym + tok def tokprob(self, tok, nonT): """ return P[nonT -> tok], e.g., P[ W3 -> 'abc'] """ p = self._T.get(tok, 0) / float(self._T.get(nonT, 1)) if not p: p = MIN_PROB return p
rchatterjee/pwmodels
src/pwmodel/models.py
NGramPw.cprob
python
def cprob(self, c, history): if not history: return 1 hist = history[:] if len(history) >= self._n: history = history[-(self._n-1):] if not isinstance(history, str): history = str(history) d, n = 0.0, 0.0 while (d == 0.0) and len(history) >= 1: try: d = self.get_freq(history) n = self.get_freq(history + c) except UnicodeDecodeError as e: print(("ERROR:", repr(history), e)) raise e history = history[1:] assert d != 0, "ERROR: Denominator zero!\n" \ "d={} n={} history={!r} c={!r} ({})" \ .format(d, n, hist, c, self._n) return (n + 1) / (d + N_VALID_CHARS-1)
:param history: string :param c: character P[c | history] = (f(history+c) + 1)/(f(history) + |V|-1) Implement add-1 smoothing with backoff for simplicty. TODO: Does it make sense returns P[c | history]
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/models.py#L276-L306
null
class NGramPw(PwModel): """Create a list of ngrams from a file @fname. NOTE: the file must be in password file format. See smaple/pw.txt file for the format. If the file is empty string, then it will try to read the passwords from the @listw which is a list of tuples [(w1, f1), (w2, f2)...]. (It can be an iterator.) @n is again the 'n' of n-gram Writes the ngrams in a file at @outfname. if outfname is empty string, it will print the ngrams on a file named 'ngrams.dawg' :param pwfilename: a `password' file :param n: an integer (NOTE: you should provide a `n`. `n` is default to 3) """ def __init__(self, pwfilename='', **kwargs): kwargs['modelfunc'] = self.ngramsofw kwargs['n'] = kwargs.get('n', 3) self._n = kwargs.get('n', 3) kwargs['modelname'] = 'ngram-{}'.format(self._n) kwargs['topk'] = -1 super(NGramPw, self).__init__(pwfilename=pwfilename, **kwargs) self._leet = self._T.compile_replaces(helper.L33T) @functools.lru_cache(maxsize=100000) def sum_freq(self, pre): if not isinstance(pre, str): pre = str(pre) return float(sum(v for k, v in self._T.iteritems(pre))) @functools.lru_cache(maxsize=100000) def get_freq(self, x): """get freq of x with or without L33t transformations """ # This is causing problem with ngram-probabilities. # > pwm.prob('s@f@r!') # > 1.441957095339684 # keys = self._T.similar_keys(x, self._leet) return self._T.get(x, 0.0) # # print("get_freq: {!r} -> {!r}".format(x, keys)) # if len(keys) > 0: # return self._T[keys[0]] # else: # return 0.0 def ngramsofw(self, word): return helper.ngramsofw(word, 1, self._n) def _get_next(self, history): """Get the next set of characters and their probabilities""" orig_history = history if not history: return helper.START history = history[-(self._n-1):] while history and not self._T.get(history): history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if not (k in reserved_words or k == history)] total = sum(v for k, v in kv) while total == 0 and len(history) > 0: history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if not (k in reserved_words or k == history)] total = sum(v for k, v in kv) assert total > 0, "Sorry there is no n-gram with {!r}".format(orig_history) d = defaultdict(float) total = self._T.get(history) for k, v in kv: k = k[len(history):] d[k] += (v+1)/(total + N_VALID_CHARS-1) return d def _gen_next(self, history): """Generate next character sampled from the distribution of characters next. """ orig_history = history if not history: return helper.START history = history[-(self._n-1):] kv = [(k, v) for k, v in self._T.items(history) if k not in reserved_words] total = sum(v for k, v in kv) while total == 0 and len(history) > 0: history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if k not in reserved_words] total = sum(v for k, v in kv) assert total > 0, "Sorry there is no n-gram with {!r}".format(orig_history) _, sampled_k = list(helper.sample_following_dist(kv, 1, total))[0] # print(">>>", repr(sampled_k), len(history)) return sampled_k[len(history)] def sample_pw(self): s = helper.START while s[-1] != helper.END: s += self._gen_next(s) return s[1:-1] def generate_pws_in_order(self, n, filter_func=None, N_max=1e6): """ Generates passwords in order between upto N_max @N_max is the maximum size of the priority queue will be tolerated, so if the size of the queue is bigger than 1.5 * N_max, it will shrink the size to 0.75 * N_max @n is the number of password to generate. **This function is expensive, and shuold be called only if necessary. Cache its call as much as possible** # TODO: Need to recheck how to make sure this is working. """ # assert alpha < beta, 'alpha={} must be less than beta={}'.format(alpha, beta) states = [(-1.0, helper.START)] # get the topk first p_min = 1e-9 / (n**2) # max 1 million entries in the heap ret = [] done = set() already_added_in_heap = set() while len(ret) < n and len(states) > 0: # while n > 0 and len(states) > 0: p, s = heapq.heappop(states) if p < 0: p = -p if s in done: continue assert s[0] == helper.START, "Broken s: {!r}".format(s) if s[-1] == helper.END: done.add(s) clean_s = s[1:-1] if filter_func is None or filter_func(clean_s): ret.append((clean_s, p)) # n -= 1 # yield (clean_s, p) else: for c, f in self._get_next(s).items(): if (f*p < p_min or (s+c) in done or (s+c) in already_added_in_heap): continue already_added_in_heap.add(s+c) heapq.heappush(states, (-f*p, s+c)) if len(states) > N_max * 3 / 2: print("Heap size: {}. ret={}. (expected: {}) s={!r}" .format(len(states), len(ret), n, s)) print("The size of states={}. Still need={} pws. Truncating" .format(len(states), n - len(ret))) states = heapq.nsmallest(int(N_max * 3/4), states) print("Done") return ret def _get_largest_prefix(self, pw): s = self._T.prefixes(pw) if not s or len(s[-1]) <= self._n: return ('', 0.0), pw pre = s[-1] rest = pw[len(pre):] pre_prob = self._T.get(pre)/self.totalf() return (pre, pre_prob), rest def _prob(self, pw, given=''): p = 1.0 while pw: (pre, pre_prob), rest_pw = self._get_largest_prefix(pw) # print("pw={!r} given={!r} p={}".format(pw, given, p)) if pre_prob > 0.0: p *= pre_prob pw, given = rest_pw, pre else: try: p *= self.cprob(pw[0], given) pw, given = pw[1:], given+pw[0] except Exception as e: print((repr(pw))) raise e return p @functools.lru_cache(maxsize=100000) def prob(self, pw): new_pw = helper.START + pw + helper.END return self._prob(new_pw)
rchatterjee/pwmodels
src/pwmodel/models.py
NGramPw._get_next
python
def _get_next(self, history): orig_history = history if not history: return helper.START history = history[-(self._n-1):] while history and not self._T.get(history): history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if not (k in reserved_words or k == history)] total = sum(v for k, v in kv) while total == 0 and len(history) > 0: history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if not (k in reserved_words or k == history)] total = sum(v for k, v in kv) assert total > 0, "Sorry there is no n-gram with {!r}".format(orig_history) d = defaultdict(float) total = self._T.get(history) for k, v in kv: k = k[len(history):] d[k] += (v+1)/(total + N_VALID_CHARS-1) return d
Get the next set of characters and their probabilities
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/models.py#L311-L333
null
class NGramPw(PwModel): """Create a list of ngrams from a file @fname. NOTE: the file must be in password file format. See smaple/pw.txt file for the format. If the file is empty string, then it will try to read the passwords from the @listw which is a list of tuples [(w1, f1), (w2, f2)...]. (It can be an iterator.) @n is again the 'n' of n-gram Writes the ngrams in a file at @outfname. if outfname is empty string, it will print the ngrams on a file named 'ngrams.dawg' :param pwfilename: a `password' file :param n: an integer (NOTE: you should provide a `n`. `n` is default to 3) """ def __init__(self, pwfilename='', **kwargs): kwargs['modelfunc'] = self.ngramsofw kwargs['n'] = kwargs.get('n', 3) self._n = kwargs.get('n', 3) kwargs['modelname'] = 'ngram-{}'.format(self._n) kwargs['topk'] = -1 super(NGramPw, self).__init__(pwfilename=pwfilename, **kwargs) self._leet = self._T.compile_replaces(helper.L33T) @functools.lru_cache(maxsize=100000) def sum_freq(self, pre): if not isinstance(pre, str): pre = str(pre) return float(sum(v for k, v in self._T.iteritems(pre))) @functools.lru_cache(maxsize=100000) def get_freq(self, x): """get freq of x with or without L33t transformations """ # This is causing problem with ngram-probabilities. # > pwm.prob('s@f@r!') # > 1.441957095339684 # keys = self._T.similar_keys(x, self._leet) return self._T.get(x, 0.0) # # print("get_freq: {!r} -> {!r}".format(x, keys)) # if len(keys) > 0: # return self._T[keys[0]] # else: # return 0.0 def cprob(self, c, history): """ :param history: string :param c: character P[c | history] = (f(history+c) + 1)/(f(history) + |V|-1) Implement add-1 smoothing with backoff for simplicty. TODO: Does it make sense returns P[c | history] """ if not history: return 1 hist = history[:] if len(history) >= self._n: history = history[-(self._n-1):] if not isinstance(history, str): history = str(history) d, n = 0.0, 0.0 while (d == 0.0) and len(history) >= 1: try: d = self.get_freq(history) n = self.get_freq(history + c) except UnicodeDecodeError as e: print(("ERROR:", repr(history), e)) raise e history = history[1:] assert d != 0, "ERROR: Denominator zero!\n" \ "d={} n={} history={!r} c={!r} ({})" \ .format(d, n, hist, c, self._n) return (n + 1) / (d + N_VALID_CHARS-1) def ngramsofw(self, word): return helper.ngramsofw(word, 1, self._n) def _gen_next(self, history): """Generate next character sampled from the distribution of characters next. """ orig_history = history if not history: return helper.START history = history[-(self._n-1):] kv = [(k, v) for k, v in self._T.items(history) if k not in reserved_words] total = sum(v for k, v in kv) while total == 0 and len(history) > 0: history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if k not in reserved_words] total = sum(v for k, v in kv) assert total > 0, "Sorry there is no n-gram with {!r}".format(orig_history) _, sampled_k = list(helper.sample_following_dist(kv, 1, total))[0] # print(">>>", repr(sampled_k), len(history)) return sampled_k[len(history)] def sample_pw(self): s = helper.START while s[-1] != helper.END: s += self._gen_next(s) return s[1:-1] def generate_pws_in_order(self, n, filter_func=None, N_max=1e6): """ Generates passwords in order between upto N_max @N_max is the maximum size of the priority queue will be tolerated, so if the size of the queue is bigger than 1.5 * N_max, it will shrink the size to 0.75 * N_max @n is the number of password to generate. **This function is expensive, and shuold be called only if necessary. Cache its call as much as possible** # TODO: Need to recheck how to make sure this is working. """ # assert alpha < beta, 'alpha={} must be less than beta={}'.format(alpha, beta) states = [(-1.0, helper.START)] # get the topk first p_min = 1e-9 / (n**2) # max 1 million entries in the heap ret = [] done = set() already_added_in_heap = set() while len(ret) < n and len(states) > 0: # while n > 0 and len(states) > 0: p, s = heapq.heappop(states) if p < 0: p = -p if s in done: continue assert s[0] == helper.START, "Broken s: {!r}".format(s) if s[-1] == helper.END: done.add(s) clean_s = s[1:-1] if filter_func is None or filter_func(clean_s): ret.append((clean_s, p)) # n -= 1 # yield (clean_s, p) else: for c, f in self._get_next(s).items(): if (f*p < p_min or (s+c) in done or (s+c) in already_added_in_heap): continue already_added_in_heap.add(s+c) heapq.heappush(states, (-f*p, s+c)) if len(states) > N_max * 3 / 2: print("Heap size: {}. ret={}. (expected: {}) s={!r}" .format(len(states), len(ret), n, s)) print("The size of states={}. Still need={} pws. Truncating" .format(len(states), n - len(ret))) states = heapq.nsmallest(int(N_max * 3/4), states) print("Done") return ret def _get_largest_prefix(self, pw): s = self._T.prefixes(pw) if not s or len(s[-1]) <= self._n: return ('', 0.0), pw pre = s[-1] rest = pw[len(pre):] pre_prob = self._T.get(pre)/self.totalf() return (pre, pre_prob), rest def _prob(self, pw, given=''): p = 1.0 while pw: (pre, pre_prob), rest_pw = self._get_largest_prefix(pw) # print("pw={!r} given={!r} p={}".format(pw, given, p)) if pre_prob > 0.0: p *= pre_prob pw, given = rest_pw, pre else: try: p *= self.cprob(pw[0], given) pw, given = pw[1:], given+pw[0] except Exception as e: print((repr(pw))) raise e return p @functools.lru_cache(maxsize=100000) def prob(self, pw): new_pw = helper.START + pw + helper.END return self._prob(new_pw)
rchatterjee/pwmodels
src/pwmodel/models.py
NGramPw._gen_next
python
def _gen_next(self, history): orig_history = history if not history: return helper.START history = history[-(self._n-1):] kv = [(k, v) for k, v in self._T.items(history) if k not in reserved_words] total = sum(v for k, v in kv) while total == 0 and len(history) > 0: history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if k not in reserved_words] total = sum(v for k, v in kv) assert total > 0, "Sorry there is no n-gram with {!r}".format(orig_history) _, sampled_k = list(helper.sample_following_dist(kv, 1, total))[0] # print(">>>", repr(sampled_k), len(history)) return sampled_k[len(history)]
Generate next character sampled from the distribution of characters next.
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/models.py#L335-L353
null
class NGramPw(PwModel): """Create a list of ngrams from a file @fname. NOTE: the file must be in password file format. See smaple/pw.txt file for the format. If the file is empty string, then it will try to read the passwords from the @listw which is a list of tuples [(w1, f1), (w2, f2)...]. (It can be an iterator.) @n is again the 'n' of n-gram Writes the ngrams in a file at @outfname. if outfname is empty string, it will print the ngrams on a file named 'ngrams.dawg' :param pwfilename: a `password' file :param n: an integer (NOTE: you should provide a `n`. `n` is default to 3) """ def __init__(self, pwfilename='', **kwargs): kwargs['modelfunc'] = self.ngramsofw kwargs['n'] = kwargs.get('n', 3) self._n = kwargs.get('n', 3) kwargs['modelname'] = 'ngram-{}'.format(self._n) kwargs['topk'] = -1 super(NGramPw, self).__init__(pwfilename=pwfilename, **kwargs) self._leet = self._T.compile_replaces(helper.L33T) @functools.lru_cache(maxsize=100000) def sum_freq(self, pre): if not isinstance(pre, str): pre = str(pre) return float(sum(v for k, v in self._T.iteritems(pre))) @functools.lru_cache(maxsize=100000) def get_freq(self, x): """get freq of x with or without L33t transformations """ # This is causing problem with ngram-probabilities. # > pwm.prob('s@f@r!') # > 1.441957095339684 # keys = self._T.similar_keys(x, self._leet) return self._T.get(x, 0.0) # # print("get_freq: {!r} -> {!r}".format(x, keys)) # if len(keys) > 0: # return self._T[keys[0]] # else: # return 0.0 def cprob(self, c, history): """ :param history: string :param c: character P[c | history] = (f(history+c) + 1)/(f(history) + |V|-1) Implement add-1 smoothing with backoff for simplicty. TODO: Does it make sense returns P[c | history] """ if not history: return 1 hist = history[:] if len(history) >= self._n: history = history[-(self._n-1):] if not isinstance(history, str): history = str(history) d, n = 0.0, 0.0 while (d == 0.0) and len(history) >= 1: try: d = self.get_freq(history) n = self.get_freq(history + c) except UnicodeDecodeError as e: print(("ERROR:", repr(history), e)) raise e history = history[1:] assert d != 0, "ERROR: Denominator zero!\n" \ "d={} n={} history={!r} c={!r} ({})" \ .format(d, n, hist, c, self._n) return (n + 1) / (d + N_VALID_CHARS-1) def ngramsofw(self, word): return helper.ngramsofw(word, 1, self._n) def _get_next(self, history): """Get the next set of characters and their probabilities""" orig_history = history if not history: return helper.START history = history[-(self._n-1):] while history and not self._T.get(history): history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if not (k in reserved_words or k == history)] total = sum(v for k, v in kv) while total == 0 and len(history) > 0: history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if not (k in reserved_words or k == history)] total = sum(v for k, v in kv) assert total > 0, "Sorry there is no n-gram with {!r}".format(orig_history) d = defaultdict(float) total = self._T.get(history) for k, v in kv: k = k[len(history):] d[k] += (v+1)/(total + N_VALID_CHARS-1) return d def sample_pw(self): s = helper.START while s[-1] != helper.END: s += self._gen_next(s) return s[1:-1] def generate_pws_in_order(self, n, filter_func=None, N_max=1e6): """ Generates passwords in order between upto N_max @N_max is the maximum size of the priority queue will be tolerated, so if the size of the queue is bigger than 1.5 * N_max, it will shrink the size to 0.75 * N_max @n is the number of password to generate. **This function is expensive, and shuold be called only if necessary. Cache its call as much as possible** # TODO: Need to recheck how to make sure this is working. """ # assert alpha < beta, 'alpha={} must be less than beta={}'.format(alpha, beta) states = [(-1.0, helper.START)] # get the topk first p_min = 1e-9 / (n**2) # max 1 million entries in the heap ret = [] done = set() already_added_in_heap = set() while len(ret) < n and len(states) > 0: # while n > 0 and len(states) > 0: p, s = heapq.heappop(states) if p < 0: p = -p if s in done: continue assert s[0] == helper.START, "Broken s: {!r}".format(s) if s[-1] == helper.END: done.add(s) clean_s = s[1:-1] if filter_func is None or filter_func(clean_s): ret.append((clean_s, p)) # n -= 1 # yield (clean_s, p) else: for c, f in self._get_next(s).items(): if (f*p < p_min or (s+c) in done or (s+c) in already_added_in_heap): continue already_added_in_heap.add(s+c) heapq.heappush(states, (-f*p, s+c)) if len(states) > N_max * 3 / 2: print("Heap size: {}. ret={}. (expected: {}) s={!r}" .format(len(states), len(ret), n, s)) print("The size of states={}. Still need={} pws. Truncating" .format(len(states), n - len(ret))) states = heapq.nsmallest(int(N_max * 3/4), states) print("Done") return ret def _get_largest_prefix(self, pw): s = self._T.prefixes(pw) if not s or len(s[-1]) <= self._n: return ('', 0.0), pw pre = s[-1] rest = pw[len(pre):] pre_prob = self._T.get(pre)/self.totalf() return (pre, pre_prob), rest def _prob(self, pw, given=''): p = 1.0 while pw: (pre, pre_prob), rest_pw = self._get_largest_prefix(pw) # print("pw={!r} given={!r} p={}".format(pw, given, p)) if pre_prob > 0.0: p *= pre_prob pw, given = rest_pw, pre else: try: p *= self.cprob(pw[0], given) pw, given = pw[1:], given+pw[0] except Exception as e: print((repr(pw))) raise e return p @functools.lru_cache(maxsize=100000) def prob(self, pw): new_pw = helper.START + pw + helper.END return self._prob(new_pw)
rchatterjee/pwmodels
src/pwmodel/models.py
NGramPw.generate_pws_in_order
python
def generate_pws_in_order(self, n, filter_func=None, N_max=1e6): # assert alpha < beta, 'alpha={} must be less than beta={}'.format(alpha, beta) states = [(-1.0, helper.START)] # get the topk first p_min = 1e-9 / (n**2) # max 1 million entries in the heap ret = [] done = set() already_added_in_heap = set() while len(ret) < n and len(states) > 0: # while n > 0 and len(states) > 0: p, s = heapq.heappop(states) if p < 0: p = -p if s in done: continue assert s[0] == helper.START, "Broken s: {!r}".format(s) if s[-1] == helper.END: done.add(s) clean_s = s[1:-1] if filter_func is None or filter_func(clean_s): ret.append((clean_s, p)) # n -= 1 # yield (clean_s, p) else: for c, f in self._get_next(s).items(): if (f*p < p_min or (s+c) in done or (s+c) in already_added_in_heap): continue already_added_in_heap.add(s+c) heapq.heappush(states, (-f*p, s+c)) if len(states) > N_max * 3 / 2: print("Heap size: {}. ret={}. (expected: {}) s={!r}" .format(len(states), len(ret), n, s)) print("The size of states={}. Still need={} pws. Truncating" .format(len(states), n - len(ret))) states = heapq.nsmallest(int(N_max * 3/4), states) print("Done") return ret
Generates passwords in order between upto N_max @N_max is the maximum size of the priority queue will be tolerated, so if the size of the queue is bigger than 1.5 * N_max, it will shrink the size to 0.75 * N_max @n is the number of password to generate. **This function is expensive, and shuold be called only if necessary. Cache its call as much as possible** # TODO: Need to recheck how to make sure this is working.
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/models.py#L361-L407
[ "pws = pwm.generate_pws_in_order(int(sys.argv[1]), filter_func=lambda x: 6 <= len(x) <= 30)\n", "def _get_next(self, history):\n \"\"\"Get the next set of characters and their probabilities\"\"\"\n orig_history = history\n if not history:\n return helper.START\n history = history[-(self._n-1):]...
class NGramPw(PwModel): """Create a list of ngrams from a file @fname. NOTE: the file must be in password file format. See smaple/pw.txt file for the format. If the file is empty string, then it will try to read the passwords from the @listw which is a list of tuples [(w1, f1), (w2, f2)...]. (It can be an iterator.) @n is again the 'n' of n-gram Writes the ngrams in a file at @outfname. if outfname is empty string, it will print the ngrams on a file named 'ngrams.dawg' :param pwfilename: a `password' file :param n: an integer (NOTE: you should provide a `n`. `n` is default to 3) """ def __init__(self, pwfilename='', **kwargs): kwargs['modelfunc'] = self.ngramsofw kwargs['n'] = kwargs.get('n', 3) self._n = kwargs.get('n', 3) kwargs['modelname'] = 'ngram-{}'.format(self._n) kwargs['topk'] = -1 super(NGramPw, self).__init__(pwfilename=pwfilename, **kwargs) self._leet = self._T.compile_replaces(helper.L33T) @functools.lru_cache(maxsize=100000) def sum_freq(self, pre): if not isinstance(pre, str): pre = str(pre) return float(sum(v for k, v in self._T.iteritems(pre))) @functools.lru_cache(maxsize=100000) def get_freq(self, x): """get freq of x with or without L33t transformations """ # This is causing problem with ngram-probabilities. # > pwm.prob('s@f@r!') # > 1.441957095339684 # keys = self._T.similar_keys(x, self._leet) return self._T.get(x, 0.0) # # print("get_freq: {!r} -> {!r}".format(x, keys)) # if len(keys) > 0: # return self._T[keys[0]] # else: # return 0.0 def cprob(self, c, history): """ :param history: string :param c: character P[c | history] = (f(history+c) + 1)/(f(history) + |V|-1) Implement add-1 smoothing with backoff for simplicty. TODO: Does it make sense returns P[c | history] """ if not history: return 1 hist = history[:] if len(history) >= self._n: history = history[-(self._n-1):] if not isinstance(history, str): history = str(history) d, n = 0.0, 0.0 while (d == 0.0) and len(history) >= 1: try: d = self.get_freq(history) n = self.get_freq(history + c) except UnicodeDecodeError as e: print(("ERROR:", repr(history), e)) raise e history = history[1:] assert d != 0, "ERROR: Denominator zero!\n" \ "d={} n={} history={!r} c={!r} ({})" \ .format(d, n, hist, c, self._n) return (n + 1) / (d + N_VALID_CHARS-1) def ngramsofw(self, word): return helper.ngramsofw(word, 1, self._n) def _get_next(self, history): """Get the next set of characters and their probabilities""" orig_history = history if not history: return helper.START history = history[-(self._n-1):] while history and not self._T.get(history): history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if not (k in reserved_words or k == history)] total = sum(v for k, v in kv) while total == 0 and len(history) > 0: history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if not (k in reserved_words or k == history)] total = sum(v for k, v in kv) assert total > 0, "Sorry there is no n-gram with {!r}".format(orig_history) d = defaultdict(float) total = self._T.get(history) for k, v in kv: k = k[len(history):] d[k] += (v+1)/(total + N_VALID_CHARS-1) return d def _gen_next(self, history): """Generate next character sampled from the distribution of characters next. """ orig_history = history if not history: return helper.START history = history[-(self._n-1):] kv = [(k, v) for k, v in self._T.items(history) if k not in reserved_words] total = sum(v for k, v in kv) while total == 0 and len(history) > 0: history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if k not in reserved_words] total = sum(v for k, v in kv) assert total > 0, "Sorry there is no n-gram with {!r}".format(orig_history) _, sampled_k = list(helper.sample_following_dist(kv, 1, total))[0] # print(">>>", repr(sampled_k), len(history)) return sampled_k[len(history)] def sample_pw(self): s = helper.START while s[-1] != helper.END: s += self._gen_next(s) return s[1:-1] def _get_largest_prefix(self, pw): s = self._T.prefixes(pw) if not s or len(s[-1]) <= self._n: return ('', 0.0), pw pre = s[-1] rest = pw[len(pre):] pre_prob = self._T.get(pre)/self.totalf() return (pre, pre_prob), rest def _prob(self, pw, given=''): p = 1.0 while pw: (pre, pre_prob), rest_pw = self._get_largest_prefix(pw) # print("pw={!r} given={!r} p={}".format(pw, given, p)) if pre_prob > 0.0: p *= pre_prob pw, given = rest_pw, pre else: try: p *= self.cprob(pw[0], given) pw, given = pw[1:], given+pw[0] except Exception as e: print((repr(pw))) raise e return p @functools.lru_cache(maxsize=100000) def prob(self, pw): new_pw = helper.START + pw + helper.END return self._prob(new_pw)
rchatterjee/pwmodels
src/pwmodel/models.py
HistPw.prob
python
def prob(self, pw): return float(self._T.get(pw, 0)) / self._T[TOTALF_W]
returns the probabiltiy of pw in the model. P[pw] = n(pw)/n(__total__)
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/models.py#L474-L479
null
class HistPw(PwModel): """ Creates a histograms from the given file. Just converts the password file into a .dawg file. """ def __init__(self, pwfilename, fuzzysearch=False, **kwargs): kwargs['modelfunc'] = lambda x: [x] kwargs['modelname'] = 'histogram' super(HistPw, self).__init__(pwfilename=pwfilename, **kwargs) self.sep = kwargs.get('sep', r'\s+') self.pwfilename = pwfilename if fuzzysearch: self.ffs = fast_fuzzysearch(self._T.keys(), ed=2) else: self.ffs = None def similarpws(self, pw, ed=2): return self.ffs.query(pw, ed) def probsum(self, pws): """Sum of probs of all passwords in @pws.""" return sum(self.prob(pw) for pw in pws) def prob_correction(self, f=1): """ Corrects the probability error due to truncating the distribution. """ total = {'rockyou': 32602160} return f * self._T[TOTALF_W] / total.get(self._leak, self._T[TOTALF_W]) def iterpasswords(self, n=-1): return helper.open_get_line(self.pwfilename, limit=n, sep=self.sep)
rchatterjee/pwmodels
src/pwmodel/models.py
HistPw.prob_correction
python
def prob_correction(self, f=1): total = {'rockyou': 32602160} return f * self._T[TOTALF_W] / total.get(self._leak, self._T[TOTALF_W])
Corrects the probability error due to truncating the distribution.
train
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/models.py#L481-L486
null
class HistPw(PwModel): """ Creates a histograms from the given file. Just converts the password file into a .dawg file. """ def __init__(self, pwfilename, fuzzysearch=False, **kwargs): kwargs['modelfunc'] = lambda x: [x] kwargs['modelname'] = 'histogram' super(HistPw, self).__init__(pwfilename=pwfilename, **kwargs) self.sep = kwargs.get('sep', r'\s+') self.pwfilename = pwfilename if fuzzysearch: self.ffs = fast_fuzzysearch(self._T.keys(), ed=2) else: self.ffs = None def similarpws(self, pw, ed=2): return self.ffs.query(pw, ed) def probsum(self, pws): """Sum of probs of all passwords in @pws.""" return sum(self.prob(pw) for pw in pws) def prob(self, pw): """ returns the probabiltiy of pw in the model. P[pw] = n(pw)/n(__total__) """ return float(self._T.get(pw, 0)) / self._T[TOTALF_W] def iterpasswords(self, n=-1): return helper.open_get_line(self.pwfilename, limit=n, sep=self.sep)
kevinconway/venvctrl
venvctrl/venv/create.py
CreateMixin.create
python
def create(self, python=None, system_site=False, always_copy=False): command = 'virtualenv' if python: command = '{0} --python={1}'.format(command, python) if system_site: command = '{0} --system-site-packages'.format(command) if always_copy: command = '{0} --always-copy'.format(command) command = '{0} {1}'.format(command, self.path) self._execute(command)
Create a new virtual environment. Args: python (str): The name or path of a python interpreter to use while creating the virtual environment. system_site (bool): Whether or not use use the system site packages within the virtual environment. Default is False. always_copy (bool): Whether or not to force copying instead of symlinking in the virtual environment. Default is False.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/create.py#L16-L41
[ "def _execute(cmd):\n \"\"\"Run a command in a subshell.\"\"\"\n cmd_parts = shlex.split(cmd)\n if sys.version_info[0] < 3:\n\n cmd_parts = shlex.split(cmd.encode('ascii'))\n\n proc = subprocess.Popen(\n cmd_parts,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n ...
class CreateMixin(object): """Can create new virtual environments. This mixin requires the command mixin. """
kevinconway/venvctrl
venvctrl/cli/relocate.py
relocate
python
def relocate(source, destination, move=False): venv = api.VirtualEnvironment(source) if not move: venv.relocate(destination) return None venv.move(destination) return None
Adjust the virtual environment settings and optional move it. Args: source (str): Path to the existing virtual environment. destination (str): Desired path of the virtual environment. move (bool): Whether or not to actually move the files. Default False.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/cli/relocate.py#L13-L28
[ "def relocate(self, destination):\n \"\"\"Configure the virtual environment for another path.\n\n Args:\n destination (str): The target path of the virtual environment.\n\n Note:\n This does not actually move the virtual environment. Is only\n rewrites the metadata required to support ...
"""Relocate a virtual environment.""" from __future__ import division from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import argparse from .. import api def main(): """Relocate a virtual environment.""" parser = argparse.ArgumentParser( description='Relocate a virtual environment.' ) parser.add_argument( '--source', help='The existing virtual environment.', required=True, ) parser.add_argument( '--destination', help='The location for which to configure the virtual environment.', required=True, ) parser.add_argument( '--move', help='Move the virtual environment to the destination.', default=False, action='store_true', ) args = parser.parse_args() relocate(args.source, args.destination, args.move) if __name__ == '__main__': main()
kevinconway/venvctrl
venvctrl/cli/relocate.py
main
python
def main(): parser = argparse.ArgumentParser( description='Relocate a virtual environment.' ) parser.add_argument( '--source', help='The existing virtual environment.', required=True, ) parser.add_argument( '--destination', help='The location for which to configure the virtual environment.', required=True, ) parser.add_argument( '--move', help='Move the virtual environment to the destination.', default=False, action='store_true', ) args = parser.parse_args() relocate(args.source, args.destination, args.move)
Relocate a virtual environment.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/cli/relocate.py#L31-L54
[ "def relocate(source, destination, move=False):\n \"\"\"Adjust the virtual environment settings and optional move it.\n\n Args:\n source (str): Path to the existing virtual environment.\n destination (str): Desired path of the virtual environment.\n move (bool): Whether or not to actually...
"""Relocate a virtual environment.""" from __future__ import division from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import argparse from .. import api def relocate(source, destination, move=False): """Adjust the virtual environment settings and optional move it. Args: source (str): Path to the existing virtual environment. destination (str): Desired path of the virtual environment. move (bool): Whether or not to actually move the files. Default False. """ venv = api.VirtualEnvironment(source) if not move: venv.relocate(destination) return None venv.move(destination) return None if __name__ == '__main__': main()
kevinconway/venvctrl
venvctrl/venv/pip.py
PipMixin.install_package
python
def install_package(self, name, index=None, force=False, update=False): cmd = 'install' if force: cmd = '{0} {1}'.format(cmd, '--force-reinstall') if update: cmd = '{0} {1}'.format(cmd, '--update') if index: cmd = '{0} {1}'.format(cmd, '--index-url {0}'.format(index)) self.pip('{0} {1}'.format(cmd, name))
Install a given package. Args: name (str): The package name to install. This can be any valid pip package specification. index (str): The URL for a pypi index to use. force (bool): For the reinstall of packages during updates. update (bool): Update the package if it is out of date.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/pip.py#L27-L50
null
class PipMixin(object): """Perform pip operations within a virtual environment. This mixin depends on the command mixin. """ def has_package(self, name): """Determine if the given package is installed. Args: name (str): The package name to find. Returns: bool: True if installed else false. """ return name in self.pip('list').out def install_requirements(self, path, index=None): """Install packages from a requirements.txt file. Args: path (str): The path to the requirements file. index (str): The URL for a pypi index to use. """ cmd = 'install -r {0}'.format(path) if index: cmd = 'install --index-url {0} -r {1}'.format(index, path) self.pip(cmd) def uninstall_package(self, name): """Uninstall a given package. Args: name (str): The name of the package to uninstall. """ self.pip('{0} --yes {1}'.format('uninstall', name))
kevinconway/venvctrl
venvctrl/venv/pip.py
PipMixin.install_requirements
python
def install_requirements(self, path, index=None): cmd = 'install -r {0}'.format(path) if index: cmd = 'install --index-url {0} -r {1}'.format(index, path) self.pip(cmd)
Install packages from a requirements.txt file. Args: path (str): The path to the requirements file. index (str): The URL for a pypi index to use.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/pip.py#L52-L64
null
class PipMixin(object): """Perform pip operations within a virtual environment. This mixin depends on the command mixin. """ def has_package(self, name): """Determine if the given package is installed. Args: name (str): The package name to find. Returns: bool: True if installed else false. """ return name in self.pip('list').out def install_package(self, name, index=None, force=False, update=False): """Install a given package. Args: name (str): The package name to install. This can be any valid pip package specification. index (str): The URL for a pypi index to use. force (bool): For the reinstall of packages during updates. update (bool): Update the package if it is out of date. """ cmd = 'install' if force: cmd = '{0} {1}'.format(cmd, '--force-reinstall') if update: cmd = '{0} {1}'.format(cmd, '--update') if index: cmd = '{0} {1}'.format(cmd, '--index-url {0}'.format(index)) self.pip('{0} {1}'.format(cmd, name)) def uninstall_package(self, name): """Uninstall a given package. Args: name (str): The name of the package to uninstall. """ self.pip('{0} --yes {1}'.format('uninstall', name))
kevinconway/venvctrl
venvctrl/venv/relocate.py
RelocateMixin.relocate
python
def relocate(self, destination): for activate in self.bin.activates: activate.vpath = destination for binfile in self.bin.files: if binfile.shebang and ( 'python' in binfile.shebang or 'pypy' in binfile.shebang ): binfile.shebang = '#!{0}'.format( os.path.join(destination, 'bin', 'python') )
Configure the virtual environment for another path. Args: destination (str): The target path of the virtual environment. Note: This does not actually move the virtual environment. Is only rewrites the metadata required to support a move.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/relocate.py#L16-L38
null
class RelocateMixin(object): """Mixin which adds the ability to relocate a virtual environment.""" def move(self, destination): """Reconfigure and move the virtual environment to another path. Args: destination (str): The target path of the virtual environment. Note: Unlike `relocate`, this method *will* move the virtual to the given path. """ self.relocate(destination) shutil.move(self.path, destination) self._path = destination
kevinconway/venvctrl
venvctrl/venv/relocate.py
RelocateMixin.move
python
def move(self, destination): self.relocate(destination) shutil.move(self.path, destination) self._path = destination
Reconfigure and move the virtual environment to another path. Args: destination (str): The target path of the virtual environment. Note: Unlike `relocate`, this method *will* move the virtual to the given path.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/relocate.py#L40-L52
[ "def relocate(self, destination):\n \"\"\"Configure the virtual environment for another path.\n\n Args:\n destination (str): The target path of the virtual environment.\n\n Note:\n This does not actually move the virtual environment. Is only\n rewrites the metadata required to support ...
class RelocateMixin(object): """Mixin which adds the ability to relocate a virtual environment.""" def relocate(self, destination): """Configure the virtual environment for another path. Args: destination (str): The target path of the virtual environment. Note: This does not actually move the virtual environment. Is only rewrites the metadata required to support a move. """ for activate in self.bin.activates: activate.vpath = destination for binfile in self.bin.files: if binfile.shebang and ( 'python' in binfile.shebang or 'pypy' in binfile.shebang ): binfile.shebang = '#!{0}'.format( os.path.join(destination, 'bin', 'python') )
kevinconway/venvctrl
venvctrl/venv/command.py
CommandMixin._execute
python
def _execute(cmd): cmd_parts = shlex.split(cmd) if sys.version_info[0] < 3: cmd_parts = shlex.split(cmd.encode('ascii')) proc = subprocess.Popen( cmd_parts, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) out, err = proc.communicate() if proc.returncode != 0: raise subprocess.CalledProcessError( returncode=proc.returncode, cmd=cmd, output=err, ) return CommandResult( code=proc.returncode, out=out.decode('utf8'), err=err.decode('utf8'), )
Run a command in a subshell.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/command.py#L22-L47
null
class CommandMixin(object): """Mixin which adds ability to execute commands.""" @staticmethod def cmd_path(self, cmd): """Get the path of a command in the virtual if it exists. Args: cmd (str): The command to look for. Returns: str: The full path to the command. Raises: ValueError: If the command is not present. """ for binscript in self.bin.files: if binscript.path.endswith('/{0}'.format(cmd)): return binscript.path raise ValueError('The command {0} was not found.'.format(cmd)) def run(self, cmd): """Execute a script from the virtual environment /bin directory.""" return self._execute(self.cmd_path(cmd)) def python(self, cmd): """Execute a python script using the virtual environment python.""" python_bin = self.cmd_path('python') cmd = '{0} {1}'.format(python_bin, cmd) return self._execute(cmd) def pip(self, cmd): """Execute some pip function using the virtual environment pip.""" pip_bin = self.cmd_path('pip') cmd = '{0} {1}'.format(pip_bin, cmd) return self._execute(cmd)
kevinconway/venvctrl
venvctrl/venv/command.py
CommandMixin.cmd_path
python
def cmd_path(self, cmd): for binscript in self.bin.files: if binscript.path.endswith('/{0}'.format(cmd)): return binscript.path raise ValueError('The command {0} was not found.'.format(cmd))
Get the path of a command in the virtual if it exists. Args: cmd (str): The command to look for. Returns: str: The full path to the command. Raises: ValueError: If the command is not present.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/command.py#L49-L67
null
class CommandMixin(object): """Mixin which adds ability to execute commands.""" @staticmethod def _execute(cmd): """Run a command in a subshell.""" cmd_parts = shlex.split(cmd) if sys.version_info[0] < 3: cmd_parts = shlex.split(cmd.encode('ascii')) proc = subprocess.Popen( cmd_parts, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) out, err = proc.communicate() if proc.returncode != 0: raise subprocess.CalledProcessError( returncode=proc.returncode, cmd=cmd, output=err, ) return CommandResult( code=proc.returncode, out=out.decode('utf8'), err=err.decode('utf8'), ) def run(self, cmd): """Execute a script from the virtual environment /bin directory.""" return self._execute(self.cmd_path(cmd)) def python(self, cmd): """Execute a python script using the virtual environment python.""" python_bin = self.cmd_path('python') cmd = '{0} {1}'.format(python_bin, cmd) return self._execute(cmd) def pip(self, cmd): """Execute some pip function using the virtual environment pip.""" pip_bin = self.cmd_path('pip') cmd = '{0} {1}'.format(pip_bin, cmd) return self._execute(cmd)
kevinconway/venvctrl
venvctrl/venv/command.py
CommandMixin.python
python
def python(self, cmd): python_bin = self.cmd_path('python') cmd = '{0} {1}'.format(python_bin, cmd) return self._execute(cmd)
Execute a python script using the virtual environment python.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/command.py#L73-L77
[ "def _execute(cmd):\n \"\"\"Run a command in a subshell.\"\"\"\n cmd_parts = shlex.split(cmd)\n if sys.version_info[0] < 3:\n\n cmd_parts = shlex.split(cmd.encode('ascii'))\n\n proc = subprocess.Popen(\n cmd_parts,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n ...
class CommandMixin(object): """Mixin which adds ability to execute commands.""" @staticmethod def _execute(cmd): """Run a command in a subshell.""" cmd_parts = shlex.split(cmd) if sys.version_info[0] < 3: cmd_parts = shlex.split(cmd.encode('ascii')) proc = subprocess.Popen( cmd_parts, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) out, err = proc.communicate() if proc.returncode != 0: raise subprocess.CalledProcessError( returncode=proc.returncode, cmd=cmd, output=err, ) return CommandResult( code=proc.returncode, out=out.decode('utf8'), err=err.decode('utf8'), ) def cmd_path(self, cmd): """Get the path of a command in the virtual if it exists. Args: cmd (str): The command to look for. Returns: str: The full path to the command. Raises: ValueError: If the command is not present. """ for binscript in self.bin.files: if binscript.path.endswith('/{0}'.format(cmd)): return binscript.path raise ValueError('The command {0} was not found.'.format(cmd)) def run(self, cmd): """Execute a script from the virtual environment /bin directory.""" return self._execute(self.cmd_path(cmd)) def pip(self, cmd): """Execute some pip function using the virtual environment pip.""" pip_bin = self.cmd_path('pip') cmd = '{0} {1}'.format(pip_bin, cmd) return self._execute(cmd)
kevinconway/venvctrl
venvctrl/venv/command.py
CommandMixin.pip
python
def pip(self, cmd): pip_bin = self.cmd_path('pip') cmd = '{0} {1}'.format(pip_bin, cmd) return self._execute(cmd)
Execute some pip function using the virtual environment pip.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/command.py#L79-L83
[ "def _execute(cmd):\n \"\"\"Run a command in a subshell.\"\"\"\n cmd_parts = shlex.split(cmd)\n if sys.version_info[0] < 3:\n\n cmd_parts = shlex.split(cmd.encode('ascii'))\n\n proc = subprocess.Popen(\n cmd_parts,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n ...
class CommandMixin(object): """Mixin which adds ability to execute commands.""" @staticmethod def _execute(cmd): """Run a command in a subshell.""" cmd_parts = shlex.split(cmd) if sys.version_info[0] < 3: cmd_parts = shlex.split(cmd.encode('ascii')) proc = subprocess.Popen( cmd_parts, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) out, err = proc.communicate() if proc.returncode != 0: raise subprocess.CalledProcessError( returncode=proc.returncode, cmd=cmd, output=err, ) return CommandResult( code=proc.returncode, out=out.decode('utf8'), err=err.decode('utf8'), ) def cmd_path(self, cmd): """Get the path of a command in the virtual if it exists. Args: cmd (str): The command to look for. Returns: str: The full path to the command. Raises: ValueError: If the command is not present. """ for binscript in self.bin.files: if binscript.path.endswith('/{0}'.format(cmd)): return binscript.path raise ValueError('The command {0} was not found.'.format(cmd)) def run(self, cmd): """Execute a script from the virtual environment /bin directory.""" return self._execute(self.cmd_path(cmd)) def python(self, cmd): """Execute a python script using the virtual environment python.""" python_bin = self.cmd_path('python') cmd = '{0} {1}'.format(python_bin, cmd) return self._execute(cmd)
kevinconway/venvctrl
venvctrl/venv/base.py
VenvFile.writeline
python
def writeline(self, line, line_number): tmp_file = tempfile.TemporaryFile('w+') if not line.endswith(os.linesep): line += os.linesep try: with open(self.path, 'r') as file_handle: for count, new_line in enumerate(file_handle): if count == line_number: new_line = line tmp_file.write(new_line) tmp_file.seek(0) with open(self.path, 'w') as file_handle: for new_line in tmp_file: file_handle.write(new_line) finally: tmp_file.close()
Rewrite a single line in the file. Args: line (str): The new text to write to the file. line_number (int): The line of the file to rewrite. Numbering starts at 0.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/base.py#L70-L102
null
class VenvFile(VenvPath): """A file within a virtual environment."""
kevinconway/venvctrl
venvctrl/venv/base.py
VenvDir.paths
python
def paths(self): contents = os.listdir(self.path) contents = (os.path.join(self.path, path) for path in contents) contents = (VenvPath(path) for path in contents) return contents
Get an iter of VenvPaths within the directory.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/base.py#L110-L115
null
class VenvDir(VenvPath): """A directory within a virtual environment.""" @property @property def files(self): """Get an iter of VenvFiles within the directory.""" contents = self.paths contents = (VenvFile(path.path) for path in contents if path.is_file) return contents @property def dirs(self): """Get an iter of VenvDirs within the directory.""" contents = self.paths contents = (VenvDir(path.path) for path in contents if path.is_dir) return contents @property def items(self): """Get an iter of VenvDirs and VenvFiles within the directory.""" contents = self.paths contents = ( VenvFile(path.path) if path.is_file else VenvDir(path.path) for path in contents ) return contents def __iter__(self): """Iter over items.""" return iter(self.items)
kevinconway/venvctrl
venvctrl/venv/base.py
VenvDir.files
python
def files(self): contents = self.paths contents = (VenvFile(path.path) for path in contents if path.is_file) return contents
Get an iter of VenvFiles within the directory.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/base.py#L118-L122
null
class VenvDir(VenvPath): """A directory within a virtual environment.""" @property def paths(self): """Get an iter of VenvPaths within the directory.""" contents = os.listdir(self.path) contents = (os.path.join(self.path, path) for path in contents) contents = (VenvPath(path) for path in contents) return contents @property @property def dirs(self): """Get an iter of VenvDirs within the directory.""" contents = self.paths contents = (VenvDir(path.path) for path in contents if path.is_dir) return contents @property def items(self): """Get an iter of VenvDirs and VenvFiles within the directory.""" contents = self.paths contents = ( VenvFile(path.path) if path.is_file else VenvDir(path.path) for path in contents ) return contents def __iter__(self): """Iter over items.""" return iter(self.items)
kevinconway/venvctrl
venvctrl/venv/base.py
VenvDir.dirs
python
def dirs(self): contents = self.paths contents = (VenvDir(path.path) for path in contents if path.is_dir) return contents
Get an iter of VenvDirs within the directory.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/base.py#L125-L129
null
class VenvDir(VenvPath): """A directory within a virtual environment.""" @property def paths(self): """Get an iter of VenvPaths within the directory.""" contents = os.listdir(self.path) contents = (os.path.join(self.path, path) for path in contents) contents = (VenvPath(path) for path in contents) return contents @property def files(self): """Get an iter of VenvFiles within the directory.""" contents = self.paths contents = (VenvFile(path.path) for path in contents if path.is_file) return contents @property @property def items(self): """Get an iter of VenvDirs and VenvFiles within the directory.""" contents = self.paths contents = ( VenvFile(path.path) if path.is_file else VenvDir(path.path) for path in contents ) return contents def __iter__(self): """Iter over items.""" return iter(self.items)
kevinconway/venvctrl
venvctrl/venv/base.py
VenvDir.items
python
def items(self): contents = self.paths contents = ( VenvFile(path.path) if path.is_file else VenvDir(path.path) for path in contents ) return contents
Get an iter of VenvDirs and VenvFiles within the directory.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/base.py#L132-L139
null
class VenvDir(VenvPath): """A directory within a virtual environment.""" @property def paths(self): """Get an iter of VenvPaths within the directory.""" contents = os.listdir(self.path) contents = (os.path.join(self.path, path) for path in contents) contents = (VenvPath(path) for path in contents) return contents @property def files(self): """Get an iter of VenvFiles within the directory.""" contents = self.paths contents = (VenvFile(path.path) for path in contents if path.is_file) return contents @property def dirs(self): """Get an iter of VenvDirs within the directory.""" contents = self.paths contents = (VenvDir(path.path) for path in contents if path.is_dir) return contents @property def __iter__(self): """Iter over items.""" return iter(self.items)
kevinconway/venvctrl
venvctrl/venv/base.py
BinFile.shebang
python
def shebang(self): with open(self.path, 'rb') as file_handle: hashtag = file_handle.read(2) if hashtag == b'#!': file_handle.seek(0) return file_handle.readline().decode('utf8') return None
Get the file shebang if is has one.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/base.py#L151-L160
[ "def writeline(self, line, line_number):\n \"\"\"Rewrite a single line in the file.\n\n Args:\n line (str): The new text to write to the file.\n line_number (int): The line of the file to rewrite. Numbering\n starts at 0.\n \"\"\"\n tmp_file = tempfile.TemporaryFile('w+')\n i...
class BinFile(VenvFile): """An executable file from a virtual environment.""" @property @shebang.setter def shebang(self, new_shebang): """Write a new shebang to the file. Raises: ValueError: If the file has no shebang to modify. ValueError: If the new shebang is invalid. """ if not self.shebang: raise ValueError('Cannot modify a shebang if it does not exist.') if not new_shebang.startswith('#!'): raise ValueError('Invalid shebang.') self.writeline(new_shebang, 0)
kevinconway/venvctrl
venvctrl/venv/base.py
BinFile.shebang
python
def shebang(self, new_shebang): if not self.shebang: raise ValueError('Cannot modify a shebang if it does not exist.') if not new_shebang.startswith('#!'): raise ValueError('Invalid shebang.') self.writeline(new_shebang, 0)
Write a new shebang to the file. Raises: ValueError: If the file has no shebang to modify. ValueError: If the new shebang is invalid.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/base.py#L163-L178
[ "def writeline(self, line, line_number):\n \"\"\"Rewrite a single line in the file.\n\n Args:\n line (str): The new text to write to the file.\n line_number (int): The line of the file to rewrite. Numbering\n starts at 0.\n \"\"\"\n tmp_file = tempfile.TemporaryFile('w+')\n i...
class BinFile(VenvFile): """An executable file from a virtual environment.""" @property def shebang(self): """Get the file shebang if is has one.""" with open(self.path, 'rb') as file_handle: hashtag = file_handle.read(2) if hashtag == b'#!': file_handle.seek(0) return file_handle.readline().decode('utf8') return None @shebang.setter
kevinconway/venvctrl
venvctrl/venv/base.py
ActivateFile._find_vpath
python
def _find_vpath(self): with open(self.path, 'r') as file_handle: for count, line in enumerate(file_handle): match = self.read_pattern.match(line) if match: return match.group(1), count return None, None
Find the VIRTUAL_ENV path entry.
train
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/base.py#L188-L199
null
class ActivateFile(BinFile): """The virtual environment /bin/activate script.""" read_pattern = re.compile(r'^VIRTUAL_ENV="(.*)"$') write_pattern = 'VIRTUAL_ENV="{0}"' @property def vpath(self): """Get the path to the virtual environment.""" return self._find_vpath()[0] @vpath.setter def vpath(self, new_vpath): """Change the path to the virtual environment.""" _, line_number = self._find_vpath() new_vpath = self.write_pattern.format(new_vpath) self.writeline(new_vpath, line_number)