code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
#!/usr/bin/env python3
import math
import autoprop
from vecrec import Vector, Rect
from glooey.helpers import *
@autoprop
class Grid:
def __init__(self, *, bounding_rect=None, min_cell_rects=None,
num_rows=0, num_cols=0, padding=None, inner_padding=None,
outer_padding=None, row_heights=None, col_widths=None,
default_row_height='expand', default_col_width='expand'):
# Attributes that the user can set to affect the shape of the grid.
self._bounding_rect = bounding_rect or Rect.null()
self._min_cell_rects = min_cell_rects or {}
self._requested_num_rows = num_rows
self._requested_num_cols = num_cols
self._inner_padding = first_not_none((inner_padding, padding, 0))
self._outer_padding = first_not_none((outer_padding, padding, 0))
self._requested_row_heights = row_heights or {}
self._requested_col_widths = col_widths or {}
self._default_row_height = default_row_height
self._default_col_width = default_col_width
# Read-only attributes that reflect the current state of the grid.
self._num_rows = 0
self._num_cols = 0
self._max_cell_heights = {}
self._max_cell_widths = {}
self._fixed_rows = set()
self._expandable_rows = set()
self._fixed_cols = set()
self._expandable_cols = set()
self._fixed_row_heights = {}
self._fixed_col_widths = {}
self._min_expandable_row_heights = {}
self._min_expandable_col_widths = {}
self._padding_height = 0
self._padding_width = 0
self._min_height = 0
self._min_width = 0
self._row_heights = {}
self._col_widths = {}
self._width = 0
self._height = 0
self._row_tops = {}
self._col_lefts = {}
self._cell_rects = {}
# Attributes that manage the cache.
self._is_shape_stale = True
self._is_claim_stale = True
self._are_cells_stale = True
def make_claim(self, min_cell_rects=None):
if min_cell_rects is not None:
self.min_cell_rects = min_cell_rects
self._update_claim()
return self._min_width, self._min_height
def make_cells(self, bounding_rect=None):
if bounding_rect is not None:
self.bounding_rect = bounding_rect
self._update_cells()
return self._cell_rects
def find_cell_under_mouse(self, x, y):
# The >=/<= comparisons in this method were chosen to be compatible
# with the comparisons in Widget.is_under_mouse(). That method counts
# points that are on any edge of a widget as being over that widget.
# The >=/<= comparisons do the same thing here.
#
# I initially wrote this method using an inclusive operator on one side
# and an exclusive one on the other, to avoid any ambiguity in the case
# where there's no padding. For example, imagine a 2x2 grid with no
# padding. In theory, the point exactly in the middle is over all four
# cells. In practice, the algorithm will identify the top-left-most
# cell first and return it. So the algorithm isn't really ambiguous,
# but it is more dependent on what's really an implementation detail.
# Find the row the mouse is over.
for i in range(self._num_rows):
row_top = self._row_tops[i]
row_bottom = row_top - self._row_heights[i]
if row_top >= y >= row_bottom:
break
else:
return None
# Find the col the mouse is over.
for j in range(self._num_cols):
col_left = self._col_lefts[j]
col_right = col_left + self._col_widths[j]
if col_left <= x <= col_right:
break
else:
return None
return i, j
def get_width(self):
return self._width
def get_height(self):
return self._height
def get_rect(self):
return Rect.from_size(self._width, self._height)
def get_min_width(self):
return self._min_width
def get_min_height(self):
return self._min_height
min_height = property(get_min_height)
def get_min_bounding_rect(self):
return Rect.from_size(self._min_width, self._min_height)
def get_cell_rects(self):
return self._cell_rects
cell_rects = property(get_cell_rects)
def get_bounding_rect(self):
return self._bounding_rect
def set_bounding_rect(self, new_rect):
if self._bounding_rect != new_rect:
self._bounding_rect = new_rect
self._invalidate_cells()
def get_min_cell_rect(self, i, j):
return self._min_cell_rects[i,j]
def set_min_cell_rect(self, i, j, new_rect):
if (i,j) not in self._min_cell_rects or \
self._min_cell_rects[i,j] != new_rect:
self._min_cell_rects[i,j] = new_rect
self._invalidate_shape()
def del_min_cell_rect(self, i, j):
if (i,j) in self._min_cell_rects:
del self._min_cell_rects[i,j]
self._invalidate_shape()
def get_min_cell_rects(self):
return self._min_cell_rects
def set_min_cell_rects(self, new_rects):
if self._min_cell_rects != new_rects:
self._min_cell_rects = new_rects
self._invalidate_shape()
def del_min_cell_rects(self):
if self._min_cell_rects:
self._min_cell_rects = {}
self._invalidate_shape()
def get_num_rows(self):
return self._num_rows
def set_num_rows(self, new_num):
self._requested_num_rows = new_num
self._invalidate_shape()
def get_num_cols(self):
return self._num_cols
def set_num_cols(self, new_num):
self._requested_num_cols = new_num
self._invalidate_shape()
def get_padding(self):
return self._inner_padding, self._outer_padding
def set_padding(self, new_padding):
self._inner_padding = new_padding
self._outer_padding = new_padding
self._invalidate_claim()
def get_inner_padding(self):
return self._inner_padding
def set_inner_padding(self, new_padding):
self._inner_padding = new_padding
self._invalidate_claim()
def get_outer_padding(self):
return self._outer_padding
def set_outer_padding(self, new_padding):
self._outer_padding = new_padding
self._invalidate_claim()
def get_row_height(self, i):
return self._row_heights[i]
def set_row_height(self, i, new_height):
self._requested_row_heights[i] = new_height
self._invalidate_claim()
def del_row_height(self, i):
if i in self._requested_row_heights:
del self._requested_row_heights[i]
self._invalidate_claim()
def get_row_heights(self):
return self._row_heights
def set_row_heights(self, new_heights):
self._requested_row_heights = new_heights
self._invalidate_claim()
def del_row_heights(self):
self._requested_row_heights = {}
self._invalidate_claim()
def get_col_width(self, j):
return self._col_widths[j]
def set_col_width(self, j, new_width):
self._requested_col_widths[j] = new_width
self._invalidate_claim()
def del_col_width(self, j):
if j in self._requested_col_widths:
del self._requested_col_widths[j]
self._invalidate_claim()
def get_col_widths(self):
return self._col_widths
def set_col_widths(self, new_widths):
self._requested_col_widths = new_widths
self._invalidate_claim()
def del_col_widths(self):
self._requested_col_widths = {}
self._invalidate_claim()
def get_default_row_height(self):
return self._default_row_height
def set_default_row_height(self, new_height):
self._default_row_height = new_height
self._invalidate_claim()
def get_default_col_width(self):
return self._default_col_width
def set_default_col_width(self, new_width):
self._default_col_width = new_width
self._invalidate_claim()
def get_requested_num_rows(self):
return self._requested_num_rows
def get_requested_num_cols(self):
return self._requested_num_cols
requested_num_cols = property(get_requested_num_cols)
def get_requested_row_height(self, i):
return self._requested_row_heights[i]
def get_requested_row_heights(self):
return self._requested_row_heights
def get_requested_col_width(self, i):
return self._requested_col_widths[i]
def get_requested_col_widths(self):
return self._requested_col_widths
def _invalidate_shape(self):
self._is_shape_stale = True
self._invalidate_claim()
def _invalidate_claim(self):
self._is_claim_stale = True
self._invalidate_cells()
def _invalidate_cells(self):
self._are_cells_stale = True
def _update_shape(self):
if self._is_shape_stale:
self._find_num_rows()
self._find_num_cols()
self._find_max_cell_dimensions()
self.is_shape_stale = False
def _update_claim(self):
if self._is_claim_stale:
self._update_shape()
self._find_which_rows_expand()
self._find_which_cols_expand()
self._find_fixed_row_heights()
self._find_fixed_col_widths()
self._find_min_expandable_row_heights()
self._find_min_expandable_col_widths()
self._find_padding_height()
self._find_padding_width()
self._find_min_height()
self._find_min_width()
self._is_claim_stale = False
def _update_cells(self):
if self._are_cells_stale:
self._update_claim()
if self._bounding_rect.width < self._min_width:
raise UsageError("grid cannot fit in {0[0]}x{0[1]}, need to be at least {1} px wide.".format(self._bounding_rect.size, self._min_width))
if self._bounding_rect.height < self._min_height:
raise UsageError("grid cannot fit in {0[0]}x{0[1]}, need to be at least {1} px tall.".format(self._bounding_rect.size, self._min_height))
self._find_row_heights()
self._find_col_widths()
self._find_cell_rects()
self._are_cells_stale = False
def _find_num_rows(self):
min_num_rows = 0
for i,j in self._min_cell_rects:
min_num_rows = max(i+1, min_num_rows)
if self._requested_num_rows:
self._num_rows = self._requested_num_rows
else:
self._num_rows = min_num_rows
if self._num_rows < min_num_rows:
raise UsageError("not enough rows requested")
def _find_num_cols(self):
min_num_cols = 0
for i,j in self._min_cell_rects:
min_num_cols = max(j+1, min_num_cols)
if self._requested_num_cols:
self._num_cols = self._requested_num_cols
else:
self._num_cols = min_num_cols
if self._num_cols < min_num_cols:
raise UsageError("not enough columns requested")
def _find_max_cell_dimensions(self):
"""
Find the tallest and widest cell in each dimension.
"""
self._max_cell_heights = {}
self._max_cell_widths = {}
for i,j in self._min_cell_rects:
# Use -math.inf so that negative cell sizes can be used.
self._max_cell_heights[i] = max(
self._min_cell_rects[i,j].height,
self._max_cell_heights.get(i, -math.inf))
self._max_cell_widths[j] = max(
self._min_cell_rects[i,j].width,
self._max_cell_widths.get(j, -math.inf))
def _find_which_rows_expand(self):
self._fixed_rows = set()
self._expandable_rows = set()
for i in range(self._num_rows):
size_request = self._get_requested_row_height(i)
if isinstance(size_request, int):
self._fixed_rows.add(i)
elif size_request == 'expand':
self._expandable_rows.add(i)
else:
raise UsageError("illegal row height: {}".format(repr(size_request)))
self._num_fixed_rows = len(self._fixed_rows)
self._num_expandable_rows = len(self._expandable_rows)
def _find_which_cols_expand(self):
self._fixed_cols = set()
self._expandable_cols = set()
for j in range(self._num_cols):
size_request = self._get_requested_col_width(j)
if isinstance(size_request, int):
self._fixed_cols.add(j)
elif size_request == 'expand':
self._expandable_cols.add(j)
else:
raise UsageError("illegal col width: {}".format(repr(size_request)))
self._num_fixed_cols = len(self._fixed_cols)
self._num_expandable_cols = len(self._expandable_cols)
def _find_fixed_row_heights(self):
self._fixed_row_heights = {}
for i in self._fixed_rows:
# Use -math.inf so that negative cell sizes can be used.
self._fixed_row_heights[i] = max(
self._get_requested_row_height(i),
self._max_cell_heights.get(i, -math.inf))
def _find_fixed_col_widths(self):
self._fixed_col_widths = {}
for j in self._fixed_cols:
# Use -math.inf so that negative cell sizes can be used.
self._fixed_col_widths[j] = max(
self._get_requested_col_width(j),
self._max_cell_widths.get(j, -math.inf))
def _find_min_expandable_row_heights(self):
self._min_expandable_row_heights = {}
for i in self._expandable_rows:
self._min_expandable_row_heights[i] = \
self._max_cell_heights.get(i, 0)
def _find_min_expandable_col_widths(self):
self._min_expandable_col_widths = {}
for j in self._expandable_cols:
self._min_expandable_col_widths[j] = \
self._max_cell_widths.get(j, 0)
def _find_padding_height(self):
self._padding_height = \
+ self._inner_padding * (self._num_rows - 1) \
+ self._outer_padding * 2
def _find_padding_width(self):
self._padding_width = \
+ self._inner_padding * (self._num_cols - 1) \
+ self._outer_padding * 2
def _find_min_height(self):
min_expandable_height = max(
self._min_expandable_row_heights.values() or [0])
self._min_height = \
+ sum(self._fixed_row_heights.values()) \
+ min_expandable_height * self._num_expandable_rows \
+ self._padding_height
def _find_min_width(self):
min_expandable_width = max(
self._min_expandable_col_widths.values() or [0])
self._min_width = \
+ sum(self._fixed_col_widths.values()) \
+ min_expandable_width * self._num_expandable_cols \
+ self._padding_width
def _find_row_heights(self):
self._row_heights = self._fixed_row_heights.copy()
if self._num_expandable_rows:
expandable_row_height = (
+ self._bounding_rect.height
- sum(self._fixed_row_heights.values())
- self._padding_height
) / self._num_expandable_rows
for i in self._expandable_rows:
self._row_heights[i] = expandable_row_height
self._height = \
+ sum(self._row_heights.values()) \
+ self._padding_height
def _find_col_widths(self):
self._col_widths = self._fixed_col_widths.copy()
if self._num_expandable_cols:
expandable_col_width = (
+ self._bounding_rect.width
- sum(self._fixed_col_widths.values())
- self._padding_width
) / self._num_expandable_cols
for j in self._expandable_cols:
self._col_widths[j] = expandable_col_width
self._width = \
+ sum(self._col_widths.values()) \
+ self._padding_width
def _find_cell_rects(self):
self._row_tops = {}
self._col_lefts = {}
self._cell_rects = {}
top_cursor = self._bounding_rect.top
for i in range(self._num_rows):
top_cursor -= self._get_row_padding(i)
left_cursor = self._bounding_rect.left
row_height = self._row_heights[i]
self._row_tops[i] = top_cursor
for j in range(self._num_cols):
left_cursor += self._get_col_padding(j)
col_width = self._col_widths[j]
self._cell_rects[i,j] = Rect.from_size(col_width, row_height)
self._cell_rects[i,j].top_left = left_cursor, top_cursor
self._col_lefts[j] = left_cursor
left_cursor += col_width
top_cursor -= row_height
def _get_requested_row_height(self, i):
return self._requested_row_heights.get(i, self._default_row_height)
def _get_requested_col_width(self, j):
return self._requested_col_widths.get(j, self._default_col_width)
def _get_row_padding(self, i):
return self._outer_padding if i == 0 else self._inner_padding
def _get_col_padding(self, j):
return self._outer_padding if j == 0 else self._inner_padding
def make_grid(rect, cells={}, num_rows=0, num_cols=0, padding=None,
inner_padding=None, outer_padding=None, row_heights={}, col_widths={},
default_row_height='expand', default_col_width='expand'):
"""
Return rectangles for each cell in the specified grid. The rectangles are
returned in a dictionary where the keys are (row, col) tuples.
"""
grid = Grid(
bounding_rect=rect,
min_cell_rects=cells,
num_rows=num_rows,
num_cols=num_cols,
padding=padding,
inner_padding=inner_padding,
outer_padding=outer_padding,
row_heights=row_heights,
col_widths=col_widths,
default_row_height=default_row_height,
default_col_width=default_col_width,
)
return grid.make_cells()
|
unknown
|
codeparrot/codeparrot-clean
| ||
""" Python Character Mapping Codec cp1026 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP1026.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1026',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'{' # 0x48 -> LEFT CURLY BRACKET
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xc7' # 0x4A -> LATIN CAPITAL LETTER C WITH CEDILLA
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'\u011e' # 0x5A -> LATIN CAPITAL LETTER G WITH BREVE
u'\u0130' # 0x5B -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'[' # 0x68 -> LEFT SQUARE BRACKET
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u015f' # 0x6A -> LATIN SMALL LETTER S WITH CEDILLA
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u0131' # 0x79 -> LATIN SMALL LETTER DOTLESS I
u':' # 0x7A -> COLON
u'\xd6' # 0x7B -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u015e' # 0x7C -> LATIN CAPITAL LETTER S WITH CEDILLA
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'\xdc' # 0x7F -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'}' # 0x8C -> RIGHT CURLY BRACKET
u'`' # 0x8D -> GRAVE ACCENT
u'\xa6' # 0x8E -> BROKEN BAR
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'\xf6' # 0xA1 -> LATIN SMALL LETTER O WITH DIAERESIS
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u']' # 0xAC -> RIGHT SQUARE BRACKET
u'$' # 0xAD -> DOLLAR SIGN
u'@' # 0xAE -> COMMERCIAL AT
u'\xae' # 0xAF -> REGISTERED SIGN
u'\xa2' # 0xB0 -> CENT SIGN
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'\xac' # 0xBA -> NOT SIGN
u'|' # 0xBB -> VERTICAL LINE
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'\xe7' # 0xC0 -> LATIN SMALL LETTER C WITH CEDILLA
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'~' # 0xCC -> TILDE
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'\u011f' # 0xD0 -> LATIN SMALL LETTER G WITH BREVE
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\\' # 0xDC -> REVERSE SOLIDUS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xfc' # 0xE0 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'#' # 0xEC -> NUMBER SIGN
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'"' # 0xFC -> QUOTATION MARK
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from common_fixtures import * # NOQA
from gdapi import ApiError
@pytest.fixture(scope='module')
def user_client(context):
return context.user_client
def _user_preference(client, name=None):
if name is None:
name = random_str()
preference = client.wait_success(client.create_user_preference(
name=name, value=random_str()))
got_preference = client.by_id('userPreference', preference.id)
assert preference.id == got_preference.id
assert name == got_preference.name
assert preference.value == got_preference.value
return got_preference
def test_create_user_preference(user_client):
_user_preference(user_client)
def test_delete_user_preference(user_client):
preference = _user_preference(user_client)
preference = user_client.wait_success(preference.deactivate())
preference = user_client.wait_success(preference.remove())
preference = user_client.wait_success(preference.purge())
preference = user_client.by_id('userPreference', preference.id)
assert preference.state == 'purged'
preference = _user_preference(user_client)
preference = user_client.wait_success(preference.remove())
assert preference.state == 'removed'
preference = user_client.wait_success(preference.purge())
assert preference.state == 'purged'
def test_update_user_preference(user_client):
preference = _user_preference(user_client)
new_value = random_str()
user_client.update(preference, value=new_value)
got_preference = user_client.by_id('userPreference', preference.id)
assert got_preference.value == new_value
def test_update_user_preference_pass_name(user_client):
preference = _user_preference(user_client)
new_value = random_str()
user_client.update(preference, name=preference.name, value=new_value)
got_preference = user_client.by_id('userPreference', preference.id)
assert got_preference.value == new_value
def test_unique_user_preference(user_client, admin_user_client):
rand_str = random_str()
_user_preference(user_client, name=rand_str)
with pytest.raises(ApiError) as e:
_user_preference(user_client, name=rand_str)
assert e.value.error.status == 422
_user_preference(admin_user_client, name=rand_str)
with pytest.raises(ApiError) as e:
_user_preference(admin_user_client, name=rand_str)
assert e.value.error.status == 422
|
unknown
|
codeparrot/codeparrot-clean
| ||
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.Harddisk import harddiskmanager
from Components.MenuList import MenuList
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.Task import job_manager
from Screens.MessageBox import MessageBox
from Tools.BoundFunction import boundFunction
import Screens.InfoBar
class HarddiskSetup(Screen):
def __init__(self, session, hdd, action, text, question):
self.setTitle(_("Setup Harddisk"))
Screen.__init__(self, session)
self.action = action
self.question = question
self.curentservice = None
self["model"] = Label(_("Model: ") + hdd.model())
self["capacity"] = Label(_("Capacity: ") + hdd.capacity())
self["bus"] = Label(_("Bus: ") + hdd.bus())
self["initialize"] = Pixmap()
self["initializetext"] = Label(text)
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.hddQuestion,
"cancel": self.close
})
self["shortcuts"] = ActionMap(["ShortcutActions"],
{
"red": self.hddQuestion
})
def hddQuestion(self, answer=False):
print 'answer:',answer
if Screens.InfoBar.InfoBar.instance.timeshiftEnabled():
message = self.question + "\n\n" + _("You seem to be in timeshft, the service wil breifly stop as timeshfit stops.")
message += '\n' + _("Do you want to continue?")
self.session.openWithCallback(self.stopTimeshift, MessageBox, message)
else:
message = self.question + "\n" + _("You can continue watching TV etc. while this is running.")
self.session.openWithCallback(self.hddConfirmed, MessageBox, message)
def stopTimeshift(self, confirmed):
if confirmed:
self.curentservice = self.session.nav.getCurrentlyPlayingServiceReference()
self.session.nav.stopService()
Screens.InfoBar.InfoBar.instance.stopTimeshiftcheckTimeshiftRunningCallback(True)
self.hddConfirmed(True)
def hddConfirmed(self, confirmed):
if not confirmed:
return
try:
job_manager.AddJob(self.action())
for job in job_manager.getPendingJobs():
if job.name in (_("Initializing storage device..."), _("Checking filesystem..."),_("Converting ext3 to ext4...")):
self.showJobView(job)
break
except Exception, ex:
self.session.open(MessageBox, str(ex), type=MessageBox.TYPE_ERROR, timeout=10)
if self.curentservice:
self.session.nav.playService(self.curentservice)
self.close()
def showJobView(self, job):
from Screens.TaskView import JobView
job_manager.in_background = False
self.session.openWithCallback(self.JobViewCB, JobView, job, cancelable=False, afterEventChangeable=False, afterEvent="close")
def JobViewCB(self, in_background):
job_manager.in_background = in_background
class HarddiskSelection(Screen):
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Initialization"))
self.skinName = "HarddiskSelection" # For derived classes
if harddiskmanager.HDDCount() == 0:
tlist = [(_("no storage devices found"), 0)]
self["hddlist"] = MenuList(tlist)
else:
self["hddlist"] = MenuList(harddiskmanager.HDDList())
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.okbuttonClick,
"cancel": self.close
})
def doIt(self, selection):
self.session.openWithCallback(self.close, HarddiskSetup, selection,
action=selection.createInitializeJob,
text=_("Initialize"),
question=_("Do you really want to initialize the device?\nAll data on the disk will be lost!"))
def okbuttonClick(self):
selection = self["hddlist"].getCurrent()
if selection[1] != 0:
self.doIt(selection[1])
self.close(True)
# This is actually just HarddiskSelection but with correct type
class HarddiskFsckSelection(HarddiskSelection):
def __init__(self, session):
HarddiskSelection.__init__(self, session)
Screen.setTitle(self, _("Filesystem Check"))
self.skinName = "HarddiskSelection"
def doIt(self, selection):
self.session.openWithCallback(self.close, HarddiskSetup, selection,
action=selection.createCheckJob,
text=_("Check"),
question=_("Do you really want to check the filesystem?\nThis could take a long time!"))
class HarddiskConvertExt4Selection(HarddiskSelection):
def __init__(self, session):
HarddiskSelection.__init__(self, session)
Screen.setTitle(self, _("Convert filesystem ext3 to ext4"))
self.skinName = "HarddiskSelection"
def doIt(self, selection):
self.session.openWithCallback(self.close, HarddiskSetup, selection,
action=selection.createExt4ConversionJob,
text=_("Convert ext3 to ext4"),
question=_("Do you really want to convert the filesystem?\nYou cannot go back!"))
|
unknown
|
codeparrot/codeparrot-clean
| ||
import codecs
import operator
import os
import tempfile
import textwrap
from mako.template import Template
from .. import Messages, blocks
from ..Constants import TOP_BLOCK_FILE_MODE
from .FlowGraphProxy import FlowGraphProxy
from ..utils import expr_utils
DATA_DIR = os.path.dirname(__file__)
PYTHON_TEMPLATE = os.path.join(DATA_DIR, 'flow_graph.py.mako')
python_template = Template(filename=PYTHON_TEMPLATE)
class TopBlockGenerator(object):
def __init__(self, flow_graph, file_path):
"""
Initialize the top block generator object.
Args:
flow_graph: the flow graph object
file_path: the path to write the file to
"""
self._flow_graph = FlowGraphProxy(flow_graph)
self._generate_options = self._flow_graph.get_option('generate_options')
self._mode = TOP_BLOCK_FILE_MODE
# Handle the case where the directory is read-only
# In this case, use the system's temp directory
if not os.access(file_path, os.W_OK):
file_path = tempfile.gettempdir()
filename = self._flow_graph.get_option('id') + '.py'
self.file_path = os.path.join(file_path, filename)
self._dirname = file_path
def _warnings(self):
throttling_blocks = [b for b in self._flow_graph.get_enabled_blocks()
if b.flags.throttle]
if not throttling_blocks and not self._generate_options.startswith('hb'):
Messages.send_warning("This flow graph may not have flow control: "
"no audio or RF hardware blocks found. "
"Add a Misc->Throttle block to your flow "
"graph to avoid CPU congestion.")
if len(throttling_blocks) > 1:
keys = set([b.key for b in throttling_blocks])
if len(keys) > 1 and 'blocks_throttle' in keys:
Messages.send_warning("This flow graph contains a throttle "
"block and another rate limiting block, "
"e.g. a hardware source or sink. "
"This is usually undesired. Consider "
"removing the throttle block.")
deprecated_block_keys = {b.name for b in self._flow_graph.get_enabled_blocks() if b.flags.deprecated}
for key in deprecated_block_keys:
Messages.send_warning("The block {!r} is deprecated.".format(key))
def write(self):
"""generate output and write it to files"""
self._warnings()
fg = self._flow_graph
self.title = fg.get_option('title') or fg.get_option('id').replace('_', ' ').title()
variables = fg.get_variables()
parameters = fg.get_parameters()
monitors = fg.get_monitors()
self.namespace = {
'flow_graph': fg,
'variables': variables,
'parameters': parameters,
'monitors': monitors,
'generate_options': self._generate_options,
}
for filename, data in self._build_python_code_from_template():
with codecs.open(filename, 'w', encoding='utf-8') as fp:
fp.write(data)
if filename == self.file_path:
os.chmod(filename, self._mode)
def _build_python_code_from_template(self):
"""
Convert the flow graph to python code.
Returns:
a string of python code
"""
output = []
fg = self._flow_graph
platform = fg.parent
title = fg.get_option('title') or fg.get_option('id').replace('_', ' ').title()
variables = fg.get_variables()
parameters = fg.get_parameters()
monitors = fg.get_monitors()
for block in fg.iter_enabled_blocks():
key = block.key
file_path = os.path.join(self._dirname, block.name + '.py')
if key == 'epy_block':
src = block.params['_source_code'].get_value()
output.append((file_path, src))
elif key == 'epy_module':
src = block.params['source_code'].get_value()
output.append((file_path, src))
self.namespace = {
'flow_graph': fg,
'variables': variables,
'parameters': parameters,
'monitors': monitors,
'generate_options': self._generate_options,
'version': platform.config.version
}
flow_graph_code = python_template.render(
title=title,
imports=self._imports(),
blocks=self._blocks(),
callbacks=self._callbacks(),
connections=self._connections(),
**self.namespace
)
# strip trailing white-space
flow_graph_code = "\n".join(line.rstrip() for line in flow_graph_code.split("\n"))
output.append((self.file_path, flow_graph_code))
return output
def _imports(self):
fg = self._flow_graph
imports = fg.imports()
seen = set()
output = []
need_path_hack = any(imp.endswith("# grc-generated hier_block") for imp in imports)
if need_path_hack:
output.insert(0, textwrap.dedent("""\
import os
import sys
sys.path.append(os.environ.get('GRC_HIER_PATH', os.path.expanduser('~/.grc_gnuradio')))
"""))
seen.add('import os')
seen.add('import sys')
if fg.get_option('qt_qss_theme'):
imports.append('import os')
imports.append('import sys')
if fg.get_option('thread_safe_setters'):
imports.append('import threading')
def is_duplicate(l):
if (l.startswith('import') or l.startswith('from')) and l in seen:
return True
seen.add(line)
return False
for import_ in sorted(imports):
lines = import_.strip().split('\n')
if not lines[0]:
continue
for line in lines:
line = line.rstrip()
if not is_duplicate(line):
output.append(line)
return output
def _blocks(self):
fg = self._flow_graph
parameters = fg.get_parameters()
# List of blocks not including variables and imports and parameters and disabled
def _get_block_sort_text(block):
code = block.templates.render('make').replace(block.name, ' ')
try:
code += block.params['gui_hint'].get_value() # Newer gui markup w/ qtgui
except KeyError:
# No gui hint
pass
return code
blocks = [
b for b in fg.blocks
if b.enabled and not (b.get_bypassed() or b.is_import or b in parameters or b.key == 'options')
]
blocks = expr_utils.sort_objects(blocks, operator.attrgetter('name'), _get_block_sort_text)
blocks_make = []
for block in blocks:
make = block.templates.render('make')
if make:
if not (block.is_variable or block.is_virtual_or_pad):
make = 'self.' + block.name + ' = ' + make
blocks_make.append((block, make))
return blocks_make
def _callbacks(self):
fg = self._flow_graph
variables = fg.get_variables()
parameters = fg.get_parameters()
# List of variable names
var_ids = [var.name for var in parameters + variables]
replace_dict = dict((var_id, 'self.' + var_id) for var_id in var_ids)
callbacks_all = []
for block in fg.iter_enabled_blocks():
callbacks_all.extend(expr_utils.expr_replace(cb, replace_dict) for cb in block.get_callbacks())
# Map var id to callbacks
def uses_var_id(callback):
used = expr_utils.get_variable_dependencies(callback, [var_id])
return used and (('self.' + var_id in callback) or ('this->' + var_id in callback)) # callback might contain var_id itself
callbacks = {}
for var_id in var_ids:
callbacks[var_id] = [callback for callback in callbacks_all if uses_var_id(callback)]
return callbacks
def _connections(self):
fg = self._flow_graph
templates = {key: Template(text)
for key, text in fg.parent_platform.connection_templates.items()}
def make_port_sig(port):
# TODO: make sense of this
if port.parent.key in ('pad_source', 'pad_sink'):
block = 'self'
key = fg.get_pad_port_global_key(port)
else:
block = 'self.' + port.parent_block.name
key = port.key
if not key.isdigit():
key = repr(key)
return '({block}, {key})'.format(block=block, key=key)
connections = fg.get_enabled_connections()
# Get the virtual blocks and resolve their connections
connection_factory = fg.parent_platform.Connection
virtual_source_connections = [c for c in connections if isinstance(c.source_block, blocks.VirtualSource)]
for connection in virtual_source_connections:
sink = connection.sink_port
for source in connection.source_port.resolve_virtual_source():
resolved = connection_factory(fg.orignal_flowgraph, source, sink)
connections.append(resolved)
virtual_connections = [c for c in connections if (isinstance(c.source_block, blocks.VirtualSource) or isinstance(c.sink_block, blocks.VirtualSink))]
for connection in virtual_connections:
# Remove the virtual connection
connections.remove(connection)
# Bypassing blocks: Need to find all the enabled connections for the block using
# the *connections* object rather than get_connections(). Create new connections
# that bypass the selected block and remove the existing ones. This allows adjacent
# bypassed blocks to see the newly created connections to downstream blocks,
# allowing them to correctly construct bypass connections.
bypassed_blocks = fg.get_bypassed_blocks()
for block in bypassed_blocks:
# Get the upstream connection (off of the sink ports)
# Use *connections* not get_connections()
source_connection = [c for c in connections if c.sink_port == block.sinks[0]]
# The source connection should never have more than one element.
assert (len(source_connection) == 1)
# Get the source of the connection.
source_port = source_connection[0].source_port
# Loop through all the downstream connections
for sink in (c for c in connections if c.source_port == block.sources[0]):
if not sink.enabled:
# Ignore disabled connections
continue
connection = connection_factory(fg.orignal_flowgraph, source_port, sink.sink_port)
connections.append(connection)
# Remove this sink connection
connections.remove(sink)
# Remove the source connection
connections.remove(source_connection[0])
# List of connections where each endpoint is enabled (sorted by domains, block names)
def by_domain_and_blocks(c):
return c.type, c.source_block.name, c.sink_block.name
rendered = []
for con in sorted(connections, key=by_domain_and_blocks):
template = templates[con.type]
code = template.render(make_port_sig=make_port_sig, source=con.source_port, sink=con.sink_port)
rendered.append(code)
return rendered
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
#
# Generated Thu Jun 11 18:43:54 2009 by generateDS.py.
#
import sys
import getopt
from string import lower as str_lower
from xml.dom import minidom
from xml.dom import Node
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper:
def format_string(self, input_data, input_name=''):
return input_data
def format_integer(self, input_data, input_name=''):
return '%d' % input_data
def format_float(self, input_data, input_name=''):
return '%f' % input_data
def format_double(self, input_data, input_name=''):
return '%e' % input_data
def format_boolean(self, input_data, input_name=''):
return '%s' % input_data
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace,name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class _MemberSpec(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type(self): return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
#
# Data representation classes.
#
class DoxygenType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, version=None, compound=None):
self.version = version
if compound is None:
self.compound = []
else:
self.compound = compound
def factory(*args_, **kwargs_):
if DoxygenType.subclass:
return DoxygenType.subclass(*args_, **kwargs_)
else:
return DoxygenType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_compound(self): return self.compound
def set_compound(self, compound): self.compound = compound
def add_compound(self, value): self.compound.append(value)
def insert_compound(self, index, value): self.compound[index] = value
def get_version(self): return self.version
def set_version(self, version): self.version = version
def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='DoxygenType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'):
outfile.write(' version=%s' % (self.format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), ))
def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'):
for compound_ in self.compound:
compound_.export(outfile, level, namespace_, name_='compound')
def hasContent_(self):
if (
self.compound is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DoxygenType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.version is not None:
showIndent(outfile, level)
outfile.write('version = %s,\n' % (self.version,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('compound=[\n')
level += 1
for compound in self.compound:
showIndent(outfile, level)
outfile.write('model_.compound(\n')
compound.exportLiteral(outfile, level, name_='compound')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('version'):
self.version = attrs.get('version').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'compound':
obj_ = CompoundType.factory()
obj_.build(child_)
self.compound.append(obj_)
# end class DoxygenType
class CompoundType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, kind=None, refid=None, name=None, member=None):
self.kind = kind
self.refid = refid
self.name = name
if member is None:
self.member = []
else:
self.member = member
def factory(*args_, **kwargs_):
if CompoundType.subclass:
return CompoundType.subclass(*args_, **kwargs_)
else:
return CompoundType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_member(self): return self.member
def set_member(self, member): self.member = member
def add_member(self, value): self.member.append(value)
def insert_member(self, index, value): self.member[index] = value
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def export(self, outfile, level, namespace_='', name_='CompoundType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='CompoundType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='CompoundType'):
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
def exportChildren(self, outfile, level, namespace_='', name_='CompoundType'):
if self.name is not None:
showIndent(outfile, level)
outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member')
def hasContent_(self):
if (
self.name is not None or
self.member is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='CompoundType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
outfile.write('kind = "%s",\n' % (self.kind,))
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('member=[\n')
level += 1
for member in self.member:
showIndent(outfile, level)
outfile.write('model_.member(\n')
member.exportLiteral(outfile, level, name_='member')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
self.name = name_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'member':
obj_ = MemberType.factory()
obj_.build(child_)
self.member.append(obj_)
# end class CompoundType
class MemberType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, kind=None, refid=None, name=None):
self.kind = kind
self.refid = refid
self.name = name
def factory(*args_, **kwargs_):
if MemberType.subclass:
return MemberType.subclass(*args_, **kwargs_)
else:
return MemberType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def export(self, outfile, level, namespace_='', name_='MemberType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='MemberType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='MemberType'):
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
def exportChildren(self, outfile, level, namespace_='', name_='MemberType'):
if self.name is not None:
showIndent(outfile, level)
outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
def hasContent_(self):
if (
self.name is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='MemberType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
outfile.write('kind = "%s",\n' % (self.kind,))
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
self.name = name_
# end class MemberType
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
Options:
-s Use the SAX parser, not the minidom parser.
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def parse(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.documentElement
rootObj = DoxygenType.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="doxygenindex",
namespacedef_='')
return rootObj
def parseString(inString):
doc = minidom.parseString(inString)
rootNode = doc.documentElement
rootObj = DoxygenType.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="doxygenindex",
namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.documentElement
rootObj = DoxygenType.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('from index import *\n\n')
sys.stdout.write('rootObj = doxygenindex(\n')
rootObj.exportLiteral(sys.stdout, 0, name_="doxygenindex")
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
main()
#import pdb
#pdb.run('main()')
|
unknown
|
codeparrot/codeparrot-clean
| ||
import { expect, test } from 'vitest'
import { buildDesignSystem } from '../../design-system'
import { Theme } from '../../theme'
import { resolveConfig } from './resolve-config'
test('top level theme keys are replaced', () => {
let design = buildDesignSystem(new Theme())
let { resolvedConfig, replacedThemeKeys } = resolveConfig(design, [
{
config: {
theme: {
colors: {
red: 'red',
},
fontFamily: {
sans: 'SF Pro Display',
},
},
},
base: '/root',
reference: false,
src: undefined,
},
{
config: {
theme: {
colors: {
green: 'green',
},
},
},
base: '/root',
reference: false,
src: undefined,
},
{
config: {
theme: {
colors: {
blue: 'blue',
},
},
},
base: '/root',
reference: false,
src: undefined,
},
])
expect(resolvedConfig).toMatchObject({
theme: {
colors: {
blue: 'blue',
},
fontFamily: {
sans: 'SF Pro Display',
},
},
})
expect(replacedThemeKeys).toEqual(new Set(['colors', 'fontFamily']))
})
test('theme can be extended', () => {
let design = buildDesignSystem(new Theme())
let { resolvedConfig, replacedThemeKeys } = resolveConfig(design, [
{
config: {
theme: {
colors: {
red: 'red',
},
fontFamily: {
sans: 'SF Pro Display',
},
},
},
base: '/root',
reference: false,
src: undefined,
},
{
config: {
theme: {
extend: {
colors: {
blue: 'blue',
},
},
},
},
base: '/root',
reference: false,
src: undefined,
},
])
expect(resolvedConfig).toMatchObject({
theme: {
colors: {
red: 'red',
blue: 'blue',
},
fontFamily: {
sans: 'SF Pro Display',
},
},
})
expect(replacedThemeKeys).toEqual(new Set(['colors', 'fontFamily']))
})
test('theme keys can reference other theme keys using the theme function regardless of order', ({
expect,
}) => {
let design = buildDesignSystem(new Theme())
let { resolvedConfig, replacedThemeKeys } = resolveConfig(design, [
{
config: {
theme: {
colors: {
red: 'red',
},
placeholderColor: {
green: 'green',
},
},
},
base: '/root',
reference: false,
src: undefined,
},
{
config: {
theme: {
extend: {
colors: ({ theme }) => ({
...theme('placeholderColor'),
blue: 'blue',
}),
},
},
},
base: '/root',
reference: false,
src: undefined,
},
{
config: {
theme: {
extend: {
caretColor: ({ theme }) => theme('accentColor'),
accentColor: ({ theme }) => theme('backgroundColor'),
backgroundColor: ({ theme }) => theme('colors'),
},
},
},
base: '/root',
reference: false,
src: undefined,
},
])
expect(resolvedConfig).toMatchObject({
theme: {
colors: {
red: 'red',
green: 'green',
blue: 'blue',
},
accentColor: {
red: 'red',
green: 'green',
blue: 'blue',
},
backgroundColor: {
red: 'red',
green: 'green',
blue: 'blue',
},
caretColor: {
red: 'red',
green: 'green',
blue: 'blue',
},
},
})
expect(replacedThemeKeys).toEqual(new Set(['colors', 'placeholderColor']))
})
test('theme keys can read from the CSS theme', () => {
let theme = new Theme()
theme.add('--color-green', 'green')
let design = buildDesignSystem(theme)
let { resolvedConfig, replacedThemeKeys } = resolveConfig(design, [
{
config: {
theme: {
colors: ({ theme }) => ({
// Reads from the --color-* namespace
...theme('color'),
red: 'red',
}),
accentColor: ({ theme }) => ({
// Reads from the --color-* namespace through `colors`
...theme('colors'),
}),
placeholderColor: ({ theme }) => ({
// Reads from the --color-* namespace through `colors`
primary: theme('colors.green'),
// Reads from the --color-* namespace directly
secondary: theme('color.green'),
}),
caretColor: ({ colors }) => ({
// Gives access to the colors object directly
primary: colors.green,
}),
transitionColor: (theme: any) => ({
// The parameter object is also the theme function
...theme('colors'),
}),
},
},
base: '/root',
reference: false,
src: undefined,
},
])
expect(resolvedConfig).toMatchObject({
theme: {
colors: {
red: 'red',
green: 'green',
},
accentColor: {
red: 'red',
green: 'green',
},
placeholderColor: {
primary: 'green',
secondary: 'green',
},
caretColor: {
primary: {
'100': 'oklch(96.2% 0.044 156.743)',
'200': 'oklch(92.5% 0.084 155.995)',
'300': 'oklch(87.1% 0.15 154.449)',
'400': 'oklch(79.2% 0.209 151.711)',
'50': 'oklch(98.2% 0.018 155.826)',
'500': 'oklch(72.3% 0.219 149.579)',
'600': 'oklch(62.7% 0.194 149.214)',
'700': 'oklch(52.7% 0.154 150.069)',
'800': 'oklch(44.8% 0.119 151.328)',
'900': 'oklch(39.3% 0.095 152.535)',
'950': 'oklch(26.6% 0.065 152.934)',
},
},
transitionColor: {
red: 'red',
green: 'green',
},
},
})
expect(replacedThemeKeys).toEqual(
new Set(['colors', 'accentColor', 'placeholderColor', 'caretColor', 'transitionColor']),
)
})
test('handles null as theme values', () => {
let theme = new Theme()
theme.add('--color-red-50', 'red')
theme.add('--color-red-100', 'red')
let design = buildDesignSystem(theme)
let { resolvedConfig, replacedThemeKeys } = resolveConfig(design, [
{
config: {
theme: {
colors: ({ theme }) => ({
// Reads from the --color-* namespace
...theme('color'),
}),
},
},
base: '/root',
reference: false,
src: undefined,
},
{
config: {
theme: {
extend: {
colors: {
red: null,
},
},
},
},
base: '/root',
reference: false,
src: undefined,
},
])
expect(resolvedConfig).toMatchObject({
theme: {
colors: {
red: null,
},
},
})
expect(replacedThemeKeys).toEqual(new Set(['colors']))
})
|
typescript
|
github
|
https://github.com/tailwindlabs/tailwindcss
|
packages/tailwindcss/src/compat/config/resolve-config.test.ts
|
:host {
display: inline;
.function {
&:hover {
background: var(--blue-02);
color: white;
cursor: pointer;
}
}
}
|
unknown
|
github
|
https://github.com/angular/angular
|
devtools/projects/ng-devtools/src/lib/shared/object-tree-explorer/property-preview/property-preview.component.scss
|
package client
import (
"context"
"encoding/json"
"fmt"
"io"
"math/rand"
"net/http"
"testing"
"time"
cerrdefs "github.com/containerd/errdefs"
"github.com/moby/moby/api/types/common"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
// TestSetHostHeader should set fake host for local communications, set real host
// for normal communications.
func TestSetHostHeader(t *testing.T) {
const testEndpoint = "/test"
testCases := []struct {
host string
expectedHost string
expectedURLHost string
}{
{
host: "unix:///var/run/docker.sock",
expectedHost: DummyHost,
expectedURLHost: "/var/run/docker.sock",
},
{
host: "npipe:////./pipe/docker_engine",
expectedHost: DummyHost,
expectedURLHost: "//./pipe/docker_engine",
},
{
host: "tcp://0.0.0.0:4243",
expectedHost: "",
expectedURLHost: "0.0.0.0:4243",
},
{
host: "tcp://localhost:4243",
expectedHost: "",
expectedURLHost: "localhost:4243",
},
}
for _, tc := range testCases {
t.Run(tc.host, func(t *testing.T) {
client, err := New(WithMockClient(func(req *http.Request) (*http.Response, error) {
if err := assertRequest(req, http.MethodGet, testEndpoint); err != nil {
return nil, err
}
if req.Host != tc.expectedHost {
return nil, fmt.Errorf("wxpected host %q, got %q", tc.expectedHost, req.Host)
}
if req.URL.Host != tc.expectedURLHost {
return nil, fmt.Errorf("expected URL host %q, got %q", tc.expectedURLHost, req.URL.Host)
}
return mockResponse(http.StatusOK, nil, "")(req)
}), WithHost(tc.host))
assert.NilError(t, err)
_, err = client.sendRequest(t.Context(), http.MethodGet, testEndpoint, nil, nil, nil)
assert.NilError(t, err)
})
}
}
// TestPlainTextError tests the server returning an error in plain text.
// API versions < 1.24 returned plain text errors, but we may encounter
// other situations where a non-JSON error is returned.
func TestPlainTextError(t *testing.T) {
client, err := New(WithMockClient(mockResponse(http.StatusInternalServerError, nil, "Server error")))
assert.NilError(t, err)
_, err = client.ContainerList(t.Context(), ContainerListOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInternal))
}
// TestResponseErrors tests handling of error responses returned by the API.
// It includes test-cases for malformed and invalid error-responses, as well
// as plain text errors for backwards compatibility with API versions <1.24.
func TestResponseErrors(t *testing.T) {
errorResponse, err := json.Marshal(&common.ErrorResponse{
Message: "Some error occurred",
})
assert.NilError(t, err)
tests := []struct {
doc string
apiVersion string
contentType string
response string
expected string
}{
{
// Valid [common.ErrorResponse] error, but not using a fixture, to validate current implementation..
doc: "JSON error (non-fixture)",
contentType: "application/json",
response: string(errorResponse),
expected: `Error response from daemon: Some error occurred`,
},
{
// Valid [common.ErrorResponse] error.
doc: "JSON error",
contentType: "application/json",
response: `{"message":"Some error occurred"}`,
expected: `Error response from daemon: Some error occurred`,
},
{
// Valid [common.ErrorResponse] error with additional fields.
doc: "JSON error with extra fields",
contentType: "application/json",
response: `{"message":"Some error occurred", "other_field": "some other field that's not part of common.ErrorResponse"}`,
expected: `Error response from daemon: Some error occurred`,
},
{
// API versions before 1.24 did not support JSON errors. Technically,
// we no longer downgrade to older API versions, but we make an
// exception for errors so that older clients would print a more
// readable error.
doc: "JSON error on old API",
apiVersion: "1.23",
contentType: "text/plain; charset=utf-8",
response: `client version 1.10 is too old. Minimum supported API version is 1.24, please upgrade your client to a newer version`,
expected: `Error response from daemon: client version 1.10 is too old. Minimum supported API version is 1.24, please upgrade your client to a newer version`,
},
{
doc: "plain-text error",
contentType: "text/plain",
response: `Some error occurred`,
expected: `Error response from daemon: Some error occurred`,
},
{
// TODO(thaJeztah): consider returning (partial) raw response for these
doc: "malformed JSON",
contentType: "application/json",
response: `{"message":"Some error occurred`,
expected: `error reading JSON: unexpected end of JSON input`,
},
{
// Server response that's valid JSON, but not the expected [common.ErrorResponse] scheme
doc: "incorrect JSON scheme",
contentType: "application/json",
response: `{"error":"Some error occurred"}`,
expected: `Error response from daemon: API returned a 400 (Bad Request) but provided no error-message`,
},
{
// TODO(thaJeztah): improve handling of such errors; we can return the generic "502 Bad Gateway" instead
doc: "html error",
contentType: "text/html",
response: `<!doctype html>
<html lang="en">
<head>
<title>502 Bad Gateway</title>
</head>
<body>
<h1>Bad Gateway</h1>
<p>The server was unable to complete your request. Please try again later.</p>
<p>If this problem persists, please <a href="https://example.com/support">contact support</a>.</p>
</body>
</html>`,
expected: `Error response from daemon: <!doctype html>
<html lang="en">
<head>
<title>502 Bad Gateway</title>
</head>
<body>
<h1>Bad Gateway</h1>
<p>The server was unable to complete your request. Please try again later.</p>
<p>If this problem persists, please <a href="https://example.com/support">contact support</a>.</p>
</body>
</html>`,
},
{
// TODO(thaJeztah): improve handling of these errors (JSON: invalid character '<' looking for beginning of value)
doc: "html error masquerading as JSON",
contentType: "application/json",
response: `<!doctype html>
<html lang="en">
<head>
<title>502 Bad Gateway</title>
</head>
<body>
<h1>Bad Gateway</h1>
<p>The server was unable to complete your request. Please try again later.</p>
<p>If this problem persists, please <a href="https://example.com/support">contact support</a>.</p>
</body>
</html>`,
expected: `error reading JSON: invalid character '<' looking for beginning of value`,
},
}
for _, tc := range tests {
t.Run(tc.doc, func(t *testing.T) {
client, err := New(WithBaseMockClient(func(req *http.Request) (*http.Response, error) {
return mockResponse(http.StatusBadRequest, http.Header{"Content-Type": []string{tc.contentType}}, tc.response)(req)
}))
if tc.apiVersion != "" {
client, err = New(WithHTTPClient(client.client), WithAPIVersion(tc.apiVersion))
}
assert.NilError(t, err)
_, err = client.Ping(t.Context(), PingOptions{})
assert.Check(t, is.Error(err, tc.expected))
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
})
}
}
func TestInfiniteError(t *testing.T) {
infinitR := rand.New(rand.NewSource(42))
client, err := New(WithBaseMockClient(func(req *http.Request) (*http.Response, error) {
resp := &http.Response{
StatusCode: http.StatusInternalServerError,
Header: http.Header{},
Body: io.NopCloser(infinitR),
}
return resp, nil
}))
assert.NilError(t, err)
_, err = client.Ping(t.Context(), PingOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInternal))
assert.Check(t, is.ErrorContains(err, "request returned Internal Server Error"))
}
func TestCanceledContext(t *testing.T) {
const testEndpoint = "/test"
client, err := New(WithMockClient(func(req *http.Request) (*http.Response, error) {
assert.Check(t, is.ErrorType(req.Context().Err(), context.Canceled))
return nil, context.Canceled
}))
assert.NilError(t, err)
ctx, cancel := context.WithCancel(t.Context())
cancel()
_, err = client.sendRequest(ctx, http.MethodGet, testEndpoint, nil, nil, nil)
assert.Check(t, is.ErrorIs(err, context.Canceled))
}
func TestDeadlineExceededContext(t *testing.T) {
const testEndpoint = "/test"
client, err := New(WithMockClient(func(req *http.Request) (*http.Response, error) {
assert.Check(t, is.ErrorType(req.Context().Err(), context.DeadlineExceeded))
return nil, context.DeadlineExceeded
}))
assert.NilError(t, err)
ctx, cancel := context.WithDeadline(t.Context(), time.Now())
defer cancel()
<-ctx.Done()
_, err = client.sendRequest(ctx, http.MethodGet, testEndpoint, nil, nil, nil)
assert.Check(t, is.ErrorIs(err, context.DeadlineExceeded))
}
func TestPrepareJSONRequest(t *testing.T) {
tests := []struct {
doc string
body any
headers http.Header
expBody string
expHeaders http.Header
}{
{
doc: "nil body",
body: nil,
headers: http.Header{"Something": []string{"something"}},
expBody: "",
expHeaders: http.Header{
// no content-type is set on empty requests.
"Something": []string{"something"},
},
},
{
doc: "nil interface body",
body: (*struct{})(nil),
headers: http.Header{"Something": []string{"something"}},
expBody: "",
expHeaders: http.Header{
// no content-type is set on empty requests.
"Something": []string{"something"},
},
},
{
doc: "empty struct body",
body: &struct{}{},
headers: http.Header{"Something": []string{"something"}},
expBody: `{}`,
expHeaders: http.Header{
"Content-Type": []string{"application/json"},
"Something": []string{"something"},
},
},
{
doc: "json raw message",
body: json.RawMessage("{}"),
expBody: `{}`,
expHeaders: http.Header{
"Content-Type": []string{"application/json"},
},
},
{
doc: "empty json raw message",
body: json.RawMessage(""),
expBody: "",
expHeaders: nil, // no content-type is set on empty requests.
},
{
doc: "empty body",
body: http.NoBody,
expBody: "",
expHeaders: nil, // no content-type is set on empty requests.
},
}
for _, tc := range tests {
t.Run(tc.doc, func(t *testing.T) {
req, hdr, err := prepareJSONRequest(tc.body, tc.headers)
assert.NilError(t, err)
resp, err := io.ReadAll(req)
assert.NilError(t, err)
body := string(resp)
assert.Check(t, is.Equal(body, tc.expBody))
assert.Check(t, is.DeepEqual(hdr, tc.expHeaders))
assert.Check(t, is.Equal(tc.headers.Get("Content-Type"), ""), "Should not have mutated original headers")
})
}
}
|
go
|
github
|
https://github.com/moby/moby
|
client/request_test.go
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
/**
* The set of requests which have been sent or are being sent but haven't yet received a response
*/
final class InFlightRequests {
private final int maxInFlightRequestsPerConnection;
private final Map<String, Deque<NetworkClient.InFlightRequest>> requests = new HashMap<>();
/** Thread safe total number of in flight requests. */
private final AtomicInteger inFlightRequestCount = new AtomicInteger(0);
public InFlightRequests(int maxInFlightRequestsPerConnection) {
this.maxInFlightRequestsPerConnection = maxInFlightRequestsPerConnection;
}
/**
* Add the given request to the queue for the connection it was directed to
*/
public void add(NetworkClient.InFlightRequest request) {
String destination = request.destination;
Deque<NetworkClient.InFlightRequest> reqs = this.requests.computeIfAbsent(destination, k -> new ArrayDeque<>());
reqs.addFirst(request);
inFlightRequestCount.incrementAndGet();
}
/**
* Get the request queue for the given node
*/
private Deque<NetworkClient.InFlightRequest> requestQueue(String node) {
Deque<NetworkClient.InFlightRequest> reqs = requests.get(node);
if (reqs == null || reqs.isEmpty())
throw new IllegalStateException("There are no in-flight requests for node " + node);
return reqs;
}
/**
* Get the oldest request (the one that will be completed next) for the given node
*/
public NetworkClient.InFlightRequest completeNext(String node) {
NetworkClient.InFlightRequest inFlightRequest = requestQueue(node).pollLast();
inFlightRequestCount.decrementAndGet();
return inFlightRequest;
}
/**
* Get the last request we sent to the given node (but don't remove it from the queue)
* @param node The node id
*/
public NetworkClient.InFlightRequest lastSent(String node) {
return requestQueue(node).peekFirst();
}
/**
* Complete the last request that was sent to a particular node.
* @param node The node the request was sent to
* @return The request
*/
public NetworkClient.InFlightRequest completeLastSent(String node) {
NetworkClient.InFlightRequest inFlightRequest = requestQueue(node).pollFirst();
inFlightRequestCount.decrementAndGet();
return inFlightRequest;
}
/**
* Can we send more requests to this node?
*
* @param node Node in question
* @return true iff we have no requests still being sent to the given node
*/
public boolean canSendMore(String node) {
Deque<NetworkClient.InFlightRequest> queue = requests.get(node);
return queue == null || queue.isEmpty() ||
(queue.peekFirst().send.completed() && queue.size() < this.maxInFlightRequestsPerConnection);
}
/**
* Return the number of in-flight requests directed at the given node
* @param node The node
* @return The request count.
*/
public int count(String node) {
Deque<NetworkClient.InFlightRequest> queue = requests.get(node);
return queue == null ? 0 : queue.size();
}
/**
* Return true if there is no in-flight request directed at the given node and false otherwise
*/
public boolean isEmpty(String node) {
Deque<NetworkClient.InFlightRequest> queue = requests.get(node);
return queue == null || queue.isEmpty();
}
/**
* Count all in-flight requests for all nodes. This method is thread safe, but may lag the actual count.
*/
public int count() {
return inFlightRequestCount.get();
}
/**
* Return true if there is no in-flight request and false otherwise
*/
public boolean isEmpty() {
for (Deque<NetworkClient.InFlightRequest> deque : this.requests.values()) {
if (!deque.isEmpty())
return false;
}
return true;
}
/**
* Clear out all the in-flight requests for the given node and return them
*
* @param node The node
* @return All the in-flight requests for that node that have been removed
*/
public Iterable<NetworkClient.InFlightRequest> clearAll(String node) {
Deque<NetworkClient.InFlightRequest> reqs = requests.get(node);
if (reqs == null) {
return Collections.emptyList();
} else {
final Deque<NetworkClient.InFlightRequest> clearedRequests = requests.remove(node);
inFlightRequestCount.getAndAdd(-clearedRequests.size());
return clearedRequests::descendingIterator;
}
}
private Boolean hasExpiredRequest(long now, Deque<NetworkClient.InFlightRequest> deque) {
for (NetworkClient.InFlightRequest request : deque) {
// We exclude throttle time here because we want to ensure that we don't expire requests while
// they are throttled. The request timeout should take effect only after the throttle time has elapsed.
if (request.timeElapsedSinceSendMs(now) - request.throttleTimeMs() > request.requestTimeoutMs)
return true;
}
return false;
}
/**
* Returns a list of nodes with pending in-flight request, that need to be timed out
*
* @param now current time in milliseconds
* @return list of nodes
*/
public List<String> nodesWithTimedOutRequests(long now) {
List<String> nodeIds = new ArrayList<>();
for (Map.Entry<String, Deque<NetworkClient.InFlightRequest>> requestEntry : requests.entrySet()) {
String nodeId = requestEntry.getKey();
Deque<NetworkClient.InFlightRequest> deque = requestEntry.getValue();
if (hasExpiredRequest(now, deque))
nodeIds.add(nodeId);
}
return nodeIds;
}
void incrementThrottleTime(String nodeId, long throttleTimeMs) {
requests.getOrDefault(nodeId, new ArrayDeque<>()).
forEach(request -> request.incrementThrottleTime(throttleTimeMs));
}
}
|
java
|
github
|
https://github.com/apache/kafka
|
clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java
|
# Copyright (C) 2011 Google Inc. All rights reserved.
# Copyright (C) 2011 Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.commands.commandtest import CommandsTest
from webkitpy.tool.commands.suggestnominations import SuggestNominations
from webkitpy.tool.mocktool import MockOptions, MockTool
class SuggestNominationsTest(CommandsTest):
mock_git_output = """commit 60831dde5beb22f35aef305a87fca7b5f284c698
Author: fpizlo@apple.com <fpizlo@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Date: 2011-09-15 19:56:21 +0000
Value profiles collect no information for global variables
https://bugs.webkit.org/show_bug.cgi?id=68143
Reviewed by Geoffrey Garen.
git-svn-id: http://svn.webkit.org/repository/webkit/trunk@95219 268f45cc-cd09-0410-ab3c-d52691b4dbfc
"""
mock_same_author_commit_message = """Author: fpizlo@apple.com <fpizlo@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Date: 2011-09-15 19:56:21 +0000
Value profiles collect no information for global variables
https://bugs.webkit.org/show_bug.cgi?id=68143
Reviewed by Geoffrey Garen.
git-svn-id: http://svn.webkit.org/repository/webkit/trunk@95219 268f45cc-cd09-0410-ab3c-d52691b4dbfc
"""
def _make_options(self, **kwargs):
defaults = {
'committer_minimum': 10,
'max_commit_age': 9,
'reviewer_minimum': 80,
'show_commits': False,
'verbose': False,
}
options = MockOptions(**defaults)
options.update(**kwargs)
return options
def test_recent_commit_messages(self):
tool = MockTool()
suggest_nominations = SuggestNominations()
suggest_nominations._init_options(options=self._make_options())
suggest_nominations.bind_to_tool(tool)
tool.executive.run_command = lambda command: self.mock_git_output
self.assertEqual(list(suggest_nominations._recent_commit_messages()), [self.mock_same_author_commit_message])
mock_non_committer_commit_message = """
Author: commit-queue@webkit.org <commit-queue@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Date: 2009-09-15 14:08:42 +0000
Let TestWebKitAPI work for chromium
https://bugs.webkit.org/show_bug.cgi?id=67756
Patch by Xianzhu Wang <wangxianzhu@chromium.org> on 2011-09-15
Reviewed by Sam Weinig.
Source/WebKit/chromium:
* WebKit.gyp:
git-svn-id: http://svn.webkit.org/repository/webkit/trunk@95188 268f45cc-cd09-0410-ab3c-d52691b4dbfc
"""
def test_basic(self):
expected_stdout = "REVIEWER: Xianzhu Wang (wangxianzhu@chromium.org) has 88 reviewed patches\n"
options = self._make_options()
suggest_nominations = SuggestNominations()
suggest_nominations._init_options(options=options)
suggest_nominations._recent_commit_messages = lambda: [self.mock_non_committer_commit_message for _ in range(88)]
self.assert_execute_outputs(suggest_nominations, [], expected_stdout=expected_stdout, options=options)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from designate.openstack.common import log as logging
from designate.openstack.common.rpc import service as rpc_service
from designate import backend
from designate.central import rpcapi as central_rpcapi
LOG = logging.getLogger(__name__)
central_api = central_rpcapi.CentralAPI()
class Service(rpc_service.Service):
def __init__(self, *args, **kwargs):
manager = backend.get_backend(cfg.CONF['service:agent'].backend_driver,
central_service=central_api)
kwargs.update(
host=cfg.CONF.host,
topic=cfg.CONF.agent_topic,
manager=manager
)
super(Service, self).__init__(*args, **kwargs)
def start(self):
self.manager.start()
super(Service, self).start()
def stop(self):
super(Service, self).stop()
self.manager.stop()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import tabs
from openstack_dashboard.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
class GeneralTab(tabs.Tab):
name = _("General Info")
slug = "job_details_tab"
template_name = ("project/data_processing.jobs/_details.html")
def get_context_data(self, request):
job_id = self.tab_group.kwargs['job_id']
job = saharaclient.job_get(request, job_id)
return {"job": job}
class JobDetailsTabs(tabs.TabGroup):
slug = "job_details"
tabs = (GeneralTab,)
sticky = True
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Unit tests for the buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks module."""
import sys
import unittest
from unittest.mock import MagicMock, patch
from buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks import (
GENERATED_TASK_PREFIX,
BazelCoreAnalysisTaskGenerator,
CoreInfo,
ResmokeCoreAnalysisTaskGenerator,
_get_core_analyzer_commands,
get_core_pid,
)
class TestCorePidExtraction(unittest.TestCase):
"""Unit tests for get_core_pid function."""
def test_standard_core_dump_format(self):
core_file = "dump_mongod.429814.core"
pid = get_core_pid(core_file)
self.assertEqual(pid, "429814")
def test_multiversion_core_dump_format(self):
core_file = "dump_mongod-8.2.429814.core"
pid = get_core_pid(core_file)
self.assertEqual(pid, "429814")
def test_with_path(self):
core_file = "/path/to/dump_mongod.789012.core"
pid = get_core_pid(core_file)
self.assertEqual(pid, "789012")
def test_invalid_format_non_digit_pid(self):
"""Test that non-digit PID raises an assertion."""
with self.assertRaises(AssertionError):
get_core_pid("dump_mongod.notanumber.core")
@unittest.skipIf(
not sys.platform.startswith("linux"),
reason="Core analysis is only support on linux",
)
class TestGetCoreAnalyzerCommands(unittest.TestCase):
"""Unit tests for get_core_analyzer_commands function."""
def test_returns_list_of_function_calls(self):
"""Test that function returns a list."""
commands = _get_core_analyzer_commands("task123", "0", "s3://results", "on", True, set())
self.assertIsInstance(commands, list)
self.assertGreater(len(commands), 0)
def test_includes_task_id_in_subprocess_command(self):
"""Test that task ID is included in subprocess command."""
task_id = "task_abc_123"
commands = _get_core_analyzer_commands(task_id, "0", "s3://results", "on", True, set())
# Find the subprocess.exec command
subprocess_cmd = None
for cmd in commands:
if hasattr(cmd, "as_dict") and "subprocess.exec" in str(cmd.as_dict()):
subprocess_cmd = cmd
break
self.assertIsNotNone(subprocess_cmd)
cmd_dict = subprocess_cmd.as_dict()
args = cmd_dict["params"]["args"]
self.assertIn(f"--task-id={task_id}", args)
def test_includes_execution_in_subprocess_command(self):
"""Test that execution is included in subprocess command."""
execution = "3"
commands = _get_core_analyzer_commands(
"task123", execution, "s3://results", "on", True, set()
)
subprocess_cmd = None
for cmd in commands:
if hasattr(cmd, "as_dict") and "subprocess.exec" in str(cmd.as_dict()):
subprocess_cmd = cmd
break
cmd_dict = subprocess_cmd.as_dict()
args = cmd_dict["params"]["args"]
self.assertIn(f"--execution={execution}", args)
def test_includes_gdb_index_cache_setting(self):
"""Test that gdb index cache setting is included."""
for cache_setting in ["on", "off"]:
commands = _get_core_analyzer_commands(
"task123", "0", "s3://results", cache_setting, True, set()
)
subprocess_cmd = None
for cmd in commands:
if hasattr(cmd, "as_dict") and "subprocess.exec" in str(cmd.as_dict()):
subprocess_cmd = cmd
break
cmd_dict = subprocess_cmd.as_dict()
args = cmd_dict["params"]["args"]
self.assertIn(f"--gdb-index-cache={cache_setting}", args)
def test_includes_boring_core_dump_pids(self):
"""Test that boring core dump PIDs are included."""
boring_pids = {"12345", "67890", "11111"}
commands = _get_core_analyzer_commands(
"task123", "0", "s3://results", "on", True, boring_pids
)
subprocess_cmd = None
for cmd in commands:
if hasattr(cmd, "as_dict") and "subprocess.exec" in str(cmd.as_dict()):
subprocess_cmd = cmd
break
cmd_dict = subprocess_cmd.as_dict()
args = cmd_dict["params"]["args"]
# Find the boring PIDs argument
boring_arg = None
for arg in args:
if arg and arg.startswith("--boring-core-dump-pids="):
boring_arg = arg
break
self.assertIsNotNone(boring_arg)
# Check that all PIDs are in the argument
for pid in boring_pids:
self.assertIn(pid, boring_arg)
def test_empty_boring_pids(self):
"""Test handling of empty boring PIDs set."""
commands = _get_core_analyzer_commands("task123", "0", "s3://results", "on", True, set())
subprocess_cmd = None
for cmd in commands:
if hasattr(cmd, "as_dict") and "subprocess.exec" in str(cmd.as_dict()):
subprocess_cmd = cmd
break
cmd_dict = subprocess_cmd.as_dict()
args = cmd_dict["params"]["args"]
boring_arg = None
for arg in args:
if arg and arg.startswith("--boring-core-dump-pids="):
boring_arg = arg
break
self.assertIsNotNone(boring_arg)
self.assertEqual(boring_arg, "--boring-core-dump-pids=")
def test_bazel_task_flag(self):
"""Test that is_bazel_task flag is passed correctly."""
commands = _get_core_analyzer_commands(
"task123", "0", "s3://results", "on", True, set(), is_bazel_task=True
)
subprocess_cmd = None
for cmd in commands:
if hasattr(cmd, "as_dict") and "subprocess.exec" in str(cmd.as_dict()):
subprocess_cmd = cmd
break
cmd_dict = subprocess_cmd.as_dict()
args = cmd_dict["params"]["args"]
self.assertIn("--is-bazel-task", args)
def test_non_bazel_task_no_flag(self):
"""Test that non-bazel tasks don't include the bazel flag."""
commands = _get_core_analyzer_commands(
"task123", "0", "s3://results", "on", True, set(), is_bazel_task=False
)
subprocess_cmd = None
for cmd in commands:
if hasattr(cmd, "as_dict") and "subprocess.exec" in str(cmd.as_dict()):
subprocess_cmd = cmd
break
cmd_dict = subprocess_cmd.as_dict()
args = cmd_dict["params"]["args"]
# Filter out None values
args = [arg for arg in args if arg is not None]
self.assertNotIn("--is-bazel-task", args)
def test_includes_s3_put_with_results_url(self):
"""Test that S3 put command includes correct results URL."""
results_url = "s3://bucket/path/to/results.tgz"
commands = _get_core_analyzer_commands("task123", "0", results_url, "on", True, set())
s3_cmd = None
for cmd in commands:
if hasattr(cmd, "as_dict") and "s3.put" in str(cmd.as_dict()):
s3_cmd = cmd
break
self.assertIsNotNone(s3_cmd)
cmd_dict = s3_cmd.as_dict()
self.assertEqual(cmd_dict["params"]["remote_file"], results_url)
def test_includes_otel_extra_data(self):
"""Test that OTEL extra data includes has_interesting_core_dumps flag."""
for has_interesting in [True, False]:
commands = _get_core_analyzer_commands(
"task123", "0", "s3://results", "on", has_interesting, set()
)
subprocess_cmd = None
for cmd in commands:
if hasattr(cmd, "as_dict") and "subprocess.exec" in str(cmd.as_dict()):
subprocess_cmd = cmd
break
cmd_dict = subprocess_cmd.as_dict()
args = cmd_dict["params"]["args"]
expected_str = (
f"--otel-extra-data=has_interesting_core_dumps={str(has_interesting).lower()}"
)
self.assertIn(expected_str, args)
@unittest.skipIf(
not sys.platform.startswith("linux"),
reason="Core analysis is only support on linux",
)
class TestCoreAnalysisTaskGenerator(unittest.TestCase):
"""Unit tests for CoreAnalysisTaskGenerator base class."""
def setUp(self):
"""Set up test fixtures."""
self.expansions_file = "test_expansions.yml"
self.mock_expansions = {
"task_name": "resmoke_test",
"task_id": "test_task_123",
"execution": "0",
"build_variant": "ubuntu2204",
"distro_id": "ubuntu2204-large",
"core_analyzer_results_url": "s3://bucket/results.tgz",
"compile_variant": "ubuntu2204-compile",
"workdir": "/data/mci",
}
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.read_config_file")
def test_generate_creates_task_config_with_interesting_cores(self, mock_read_config):
"""Test that generate creates proper task config when interesting cores are found."""
mock_read_config.return_value = self.mock_expansions
mock_cores = [
CoreInfo(
path="/tmp/dump_mongod.123.core",
binary_name="mongod",
pid="123",
marked_boring=False,
),
CoreInfo(
path="/tmp/dump_mongos.456.core",
binary_name="mongos",
pid="456",
marked_boring=False,
),
]
with patch.object(ResmokeCoreAnalysisTaskGenerator, "find_cores", return_value=mock_cores):
generator = ResmokeCoreAnalysisTaskGenerator(self.expansions_file, use_mock_tasks=True)
result = generator.generate()
self.assertIsNotNone(result)
self.assertIn("buildvariants", result)
self.assertEqual(len(result["buildvariants"]), 1)
variant = result["buildvariants"][0]
self.assertEqual(variant["name"], "ubuntu2204")
self.assertEqual(len(variant["tasks"]), 1)
task = variant["tasks"][0]
self.assertTrue(task["activate"])
self.assertTrue(task["name"].startswith(GENERATED_TASK_PREFIX))
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.read_config_file")
def test_generate_does_not_activate_with_only_boring_cores(self, mock_read_config):
"""Test that task is not activated when only boring cores are found."""
mock_read_config.return_value = self.mock_expansions
mock_cores = [
CoreInfo(
path="/tmp/dump_mongod.123.core",
binary_name="mongod",
pid="123",
marked_boring=True,
),
CoreInfo(
path="/tmp/dump_mongos.456.core",
binary_name="mongos",
pid="456",
marked_boring=True,
),
]
with patch.object(ResmokeCoreAnalysisTaskGenerator, "find_cores", return_value=mock_cores):
generator = ResmokeCoreAnalysisTaskGenerator(self.expansions_file, use_mock_tasks=True)
result = generator.generate()
self.assertIsNotNone(result)
variant = result["buildvariants"][0]
task = variant["tasks"][0]
self.assertFalse(task["activate"])
def test_should_skip_task_for_hardcoded_task_names(self):
"""Test that hardcoded task names are skipped."""
with patch(
"buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.read_config_file"
) as mock_read:
mock_read.return_value = self.mock_expansions
generator = ResmokeCoreAnalysisTaskGenerator(self.expansions_file, use_mock_tasks=True)
# Test skipped task names
for task_name in ["no_passthrough_disagg_override", "disagg_repl_jscore_passthrough"]:
mock_task = MagicMock()
mock_task.display_name = task_name
mock_task.parent_task_id = None
mock_task.build_variant = "ubuntu2204"
self.assertTrue(generator._should_skip_task(mock_task))
def test_should_not_skip_normal_task(self):
"""Test that normal tasks are not skipped."""
with patch(
"buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.read_config_file"
) as mock_read:
mock_read.return_value = self.mock_expansions
generator = ResmokeCoreAnalysisTaskGenerator(self.expansions_file, use_mock_tasks=True)
mock_task = MagicMock()
mock_task.display_name = "normal_task"
mock_task.parent_task_id = None
mock_task.build_variant = "ubuntu2204"
self.assertFalse(generator._should_skip_task(mock_task))
@unittest.skipIf(
not sys.platform.startswith("linux"),
reason="Core analysis is only support on linux",
)
class TestResmokeCoreAnalysisTaskGenerator(unittest.TestCase):
"""Unit tests for ResmokeCoreAnalysisTaskGenerator."""
def setUp(self):
"""Set up test fixtures."""
self.expansions_file = "test_expansions.yml"
self.mock_expansions = {
"task_name": "resmoke_test",
"task_id": "test_task_123",
"execution": "0",
"build_variant": "ubuntu2204",
"distro_id": "ubuntu2204-large",
"core_analyzer_results_url": "s3://bucket/results.tgz",
"compile_variant": "ubuntu2204-compile",
"workdir": "/data/mci",
}
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.read_config_file")
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.os.path.exists")
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.os.listdir")
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.dumper.get_dumpers")
def test_find_cores_discovers_cores_from_artifacts(
self, mock_get_dumpers, mock_listdir, mock_exists, mock_read_config
):
"""Test that find_cores discovers cores from task artifacts."""
mock_read_config.return_value = self.mock_expansions
# Mock binary directory exists
def exists_side_effect(path):
if "dist-test/bin" in path or "boring_core_dumps.txt" in path:
return True
return False
mock_exists.side_effect = exists_side_effect
mock_listdir.return_value = ["mongod", "mongos"]
# Mock task artifacts
mock_artifact1 = MagicMock()
mock_artifact1.name = "Core Dump 1 (dump_mongod.12345.core.gz)"
mock_artifact2 = MagicMock()
mock_artifact2.name = "Core Dump 2 (dump_mongos.67890.core.gz)"
mock_task = MagicMock()
mock_task.artifacts = [mock_artifact1, mock_artifact2]
# Mock dumper
mock_dbg = MagicMock()
mock_dbg.get_binary_from_core_dump.side_effect = [
("mongod", None),
("mongos", None),
]
mock_dumpers = MagicMock()
mock_dumpers.dbg = mock_dbg
mock_get_dumpers.return_value = mock_dumpers
with patch("builtins.open", unittest.mock.mock_open(read_data="")):
generator = ResmokeCoreAnalysisTaskGenerator(self.expansions_file, use_mock_tasks=True)
generator.evg_api.task_by_id.return_value = mock_task
cores = generator.find_cores()
self.assertEqual(len(cores), 2)
self.assertEqual(cores[0].path, "dump_mongod.12345.core")
self.assertEqual(cores[0].binary_name, "mongod")
self.assertEqual(cores[0].pid, "12345")
self.assertEqual(cores[1].path, "dump_mongos.67890.core")
self.assertEqual(cores[1].binary_name, "mongos")
self.assertEqual(cores[1].pid, "67890")
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.read_config_file")
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.os.path.exists")
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.os.listdir")
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.dumper.get_dumpers")
def test_find_cores_marks_boring_cores(
self, mock_get_dumpers, mock_listdir, mock_exists, mock_read_config
):
"""Test that find_cores correctly marks boring cores."""
mock_read_config.return_value = self.mock_expansions
mock_exists.return_value = True
mock_listdir.return_value = ["mongod"]
# Mock artifact with boring core
mock_artifact = MagicMock()
mock_artifact.name = "Core Dump 1 (dump_mongod.12345.core.gz)"
mock_task = MagicMock()
mock_task.artifacts = [mock_artifact]
# Mock dumper
mock_dbg = MagicMock()
mock_dbg.get_binary_from_core_dump.return_value = ("mongod", None)
mock_dumpers = MagicMock()
mock_dumpers.dbg = mock_dbg
mock_get_dumpers.return_value = mock_dumpers
# Mock boring PIDs file with PID 12345
with patch("builtins.open", unittest.mock.mock_open(read_data="12345\n67890\n")):
generator = ResmokeCoreAnalysisTaskGenerator(self.expansions_file, use_mock_tasks=True)
generator.evg_api.task_by_id.return_value = mock_task
cores = generator.find_cores()
self.assertEqual(len(cores), 1)
self.assertTrue(cores[0].marked_boring)
@unittest.skipIf(
not sys.platform.startswith("linux"),
reason="Core analysis is only support on linux",
)
class TestBazelCoreAnalysisTaskGenerator(unittest.TestCase):
"""Unit tests for BazelCoreAnalysisTaskGenerator."""
def setUp(self):
"""Set up test fixtures."""
self.expansions_file = "test_expansions.yml"
self.mock_expansions = {
"task_name": "bazel_test",
"task_id": "test_task_123",
"execution": "0",
"build_variant": "ubuntu2204",
"distro_id": "ubuntu2204-large",
"core_analyzer_results_url": "s3://bucket/results.tgz",
"compile_variant": "ubuntu2204-compile",
"workdir": "/data/mci",
}
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.read_config_file")
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.os.path.exists")
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.glob.glob")
def test_find_cores_discovers_cores_in_test_outputs(
self, mock_glob, mock_exists, mock_read_config
):
"""Test that find_cores discovers cores in test.outputs directories."""
mock_read_config.return_value = self.mock_expansions
def exists_side_effect(path):
if "results" in path and "boring_core_dumps.txt" not in path:
return True
return False
mock_exists.side_effect = exists_side_effect
# Mock glob to return test.outputs directories
def glob_side_effect(pattern, **kwargs):
if ".core" in pattern:
if "test1" in pattern:
return ["/data/mci/results/test1/test.outputs/dump_mongod.12345.core"]
elif "test2" in pattern:
return ["/data/mci/results/test2/test.outputs/dump_mongos.67890.core"]
elif ".mdmp" in pattern:
return []
elif "test.outputs" in pattern and "recursive" in kwargs:
return [
"/data/mci/results/test1/test.outputs",
"/data/mci/results/test2/test.outputs",
]
return []
mock_glob.side_effect = glob_side_effect
generator = BazelCoreAnalysisTaskGenerator(self.expansions_file, use_mock_tasks=True)
cores = generator.find_cores()
self.assertEqual(len(cores), 2)
self.assertIn("dump_mongod.12345.core", cores[0].path)
self.assertEqual(cores[0].pid, "12345")
self.assertIn("dump_mongos.67890.core", cores[1].path)
self.assertEqual(cores[1].pid, "67890")
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.read_config_file")
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.os.path.exists")
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.glob.glob")
def test_find_cores_marks_boring_cores_from_test_outputs(
self, mock_glob, mock_exists, mock_read_config
):
"""Test that find_cores marks boring cores based on boring_core_dumps.txt."""
mock_read_config.return_value = self.mock_expansions
boring_file_path = None
def exists_side_effect(path):
if "results" in path and "test.outputs" not in path:
return True
if "boring_core_dumps.txt" in path:
nonlocal boring_file_path
boring_file_path = path
return True
return False
mock_exists.side_effect = exists_side_effect
def glob_side_effect(pattern, **kwargs):
if ".core" in pattern:
return ["/data/mci/results/test1/test.outputs/dump_mongod.12345.core"]
elif ".mdmp" in pattern:
return []
elif "test.outputs" in pattern and "recursive" in kwargs:
return ["/data/mci/results/test1/test.outputs"]
return []
mock_glob.side_effect = glob_side_effect
# Mock boring PIDs file
with patch("builtins.open", unittest.mock.mock_open(read_data="12345\n")):
generator = BazelCoreAnalysisTaskGenerator(self.expansions_file, use_mock_tasks=True)
cores = generator.find_cores()
self.assertEqual(len(cores), 1)
self.assertTrue(cores[0].marked_boring)
@patch("buildscripts.resmokelib.hang_analyzer.gen_hang_analyzer_tasks.read_config_file")
def test_get_core_analyzer_commands_includes_bazel_flag(self, mock_read_config):
"""Test that get_core_analyzer_commands includes bazel flag."""
mock_read_config.return_value = self.mock_expansions
generator = BazelCoreAnalysisTaskGenerator(self.expansions_file, use_mock_tasks=True)
commands = generator.get_core_analyzer_commands(
"task123", "0", "s3://results", "on", True, set()
)
# Find subprocess command and verify it has --is-bazel-task flag
subprocess_cmd = None
for cmd in commands:
if hasattr(cmd, "as_dict") and "subprocess.exec" in str(cmd.as_dict()):
subprocess_cmd = cmd
break
self.assertIsNotNone(subprocess_cmd)
cmd_dict = subprocess_cmd.as_dict()
args = cmd_dict["params"]["args"]
self.assertIn("--is-bazel-task", args)
if __name__ == "__main__":
unittest.main()
|
python
|
github
|
https://github.com/mongodb/mongo
|
buildscripts/tests/resmokelib/hang_analyzer/test_gen_hang_analyzer_tasks.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
idcviewtest.py
Created by QingFeng on 2008-03-17.
Copyright (c) 2007 xBayDNS Team. All rights reserved.
"""
import basetest
import logging.config
import os
import pwd
import shutil
import tempfile
import time
import unittest
log = logging.getLogger('xbaydns.tests.idcviewtest')
#logging.basicConfig(level=logging.DEBUG)
from xbaydns.tools import idcview
from decimal import Decimal
class LogToListTest(basetest.BaseTestCase):
def setUp(self):
"""初始化测试环境"""
self.basedir = os.path.realpath(tempfile.mkdtemp(suffix='xbaydns_test'))
basetest.BaseTestCase.setUp(self)
self.__initfile()
def tearDown(self):
"""清洁测试环境"""
shutil.rmtree(self.basedir)
basetest.BaseTestCase.tearDown(self)
self.__rmfile()
def __initfile(self):
f=open('/tmp/10.10.10.10_20080317','w')
f.write('10.10.10.20,ping,0.905,2008-03-17 00:00:00\n')
f.write('10.10.10.21,pinggate,0.791,2008-03-17 00:00:00\n')
f.write('10.10.10.22,pinggate,0.985,2008-03-17 00:00:00\n')
f.write('10.10.10.20,nslookup,0.836,2008-03-17 00:05:00\n')
f.write('10.10.10.21,pinggate,0.715,2008-03-17 00:05:00\n')
f.write('10.10.10.22,ping,0.805,2008-03-17 00:05:00\n')
f.close()
f=open('/tmp/11.11.11.11_20080317','w')
f.write('20.10.10.20,ping,0.605,2008-03-17 00:00:00\n')
f.write('20.10.10.21,pinggate,0.991,2008-03-17 00:00:00\n')
f.write('20.10.10.22,pinggate,0.995,2008-03-17 00:00:00\n')
f.write('20.10.10.20,nslookup,0.896,2008-03-17 00:05:00\n')
f.write('20.10.10.21,pinggate,0.605,2008-03-17 00:05:00\n')
f.write('20.10.10.22,ping,0.405,2008-03-17 00:05:00\n')
f.close()
#agent 2008-03-18 data
f=open('/tmp/agent_result_20080318','w')
f.write('''202.108.35.50,PING_HOST,0.778,2008-03-17 17:51:38
10.210.12.10,PING_HOST,0.253,2008-03-17 17:51:38
202.106.182.153,PING_HOST,0.810,2008-03-17 17:51:38
127.0.0.1,PING_HOST,0.036,2008-03-17 17:51:38
10.210.12.10,PING_HOST,0.267,2008-03-17 17:52:49
202.108.35.50,PING_HOST,0.774,2008-03-17 17:52:49
202.106.182.153,PING_HOST,0.810,2008-03-17 17:52:49
127.0.0.1,PING_HOST,0.043,2008-03-17 17:52:49
10.210.12.10,PING_HOST,0.278,2008-03-17 17:53:48
202.108.35.50,PING_HOST,0.742,2008-03-17 17:53:48
202.106.182.153,PING_HOST,0.796,2008-03-17 17:53:48
127.0.0.1,PING_HOST,0.055,2008-03-17 17:53:52
127.0.0.1,PING_HOST,0.033,2008-03-18 16:43:29
202.108.35.50,PING_HOST,0.898,2008-03-18 16:43:29
10.210.12.10,PING_HOST,0.243,2008-03-18 16:43:29
202.106.182.153,PING_HOST,0.888,2008-03-18 16:43:29
202.106.0.20,NS_QUERY,39.8156,2008-03-18 16:44:05
22.12.231.1,OUT_OF_REACH,-1,2008-03-18 16:45:39''')
f.close()
def __rmfile(self):
os.remove('/tmp/10.10.10.10_20080317')
os.remove('/tmp/11.11.11.11_20080317')
os.remove('/tmp/agent_result_20080318')
def test_convfiles(self):
files=['/tmp/10.10.10.10_20080317','/tmp/11.11.11.11_20080317']
data=idcview.convfiles(files)
self.assertTrue(isinstance(data,list))
print data
def test_agent20080318(self):
files=['/tmp/agent_result_20080318',]
data=idcview.convfiles(files)
self.assertTrue(isinstance(data,list))
self.assertEqual(data[0][0],'22.12.231.1')
self.assertEqual(data[0][1],0)
self.assertEqual(data[5][0],'202.106.0.20')
self.assertEqual(data[5][1],Decimal('39.8156'))
def suite():
"""集合测试用例"""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(LogToListTest, 'test'))
return suite
"""
单独运行command的测试用例
"""
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, ProxyManager
from .packages.urllib3.response import HTTPResponse
from .compat import urlparse, basestring, urldefrag, unquote
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import TimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .cookies import extract_cookies_to_jar
from .exceptions import ConnectionError, Timeout, SSLError
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection should attempt.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter()
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries
self.config = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK):
"""Initializes a urllib3 PoolManager. This method should not be called
from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block)
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <reqeusts.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, urlparse(url).scheme)
conn = ProxyManager(self.poolmanager.connection_from_url(proxy))
else:
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a proxy, the full URL has to be
used. Otherwise, we should only use the path portion of the URL.
This shoudl not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(request.url).scheme)
if proxy:
url, _ = urldefrag(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. Currently this adds a
Proxy-Authorization header.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
proxies = kwargs.get('proxies', {})
if proxies is None:
proxies = {}
proxy = proxies.get(urlparse(request.url).scheme)
username, password = get_auth_from_url(proxy)
if username and password:
# Proxy auth usernames and passwords will be urlencoded, we need
# to decode them.
username = unquote(username)
password = unquote(password)
request.headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) The timeout on the request.
:param verify: (optional) Whether to verify SSL certificates.
:param vert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request, proxies=proxies)
chunked = not (request.body is None or 'Content-Length' in request.headers)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
low_conn.putrequest(request.method, url, skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except socket.error as sockerr:
raise ConnectionError(sockerr)
except MaxRetryError as e:
raise ConnectionError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e)
elif isinstance(e, TimeoutError):
raise Timeout(e)
else:
raise
r = self.build_response(request, resp)
if not stream:
r.content
return r
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry.core import discover
from telemetry.core import util
class DiscoverTest(unittest.TestCase):
def setUp(self):
self._base_dir = util.GetUnittestDataDir()
self._start_dir = os.path.join(self._base_dir, 'discoverable_classes')
self._base_class = Exception
def testDiscoverClassesBasic(self):
classes = discover.DiscoverClasses(
self._start_dir, self._base_dir, self._base_class)
actual_classes = dict(
(name, cls.__name__) for name, cls in classes.iteritems())
expected_classes = {
'discover_dummyclass': 'DummyException',
'another_discover_dummyclass': 'DummyExceptionImpl2',
}
self.assertEqual(actual_classes, expected_classes)
def testDiscoverClassesWithPattern(self):
classes = discover.DiscoverClasses(
self._start_dir, self._base_dir, self._base_class,
pattern='another*')
actual_classes = dict(
(name, cls.__name__) for name, cls in classes.iteritems())
expected_classes = {
'another_discover_dummyclass': 'DummyExceptionImpl2',
}
self.assertEqual(actual_classes, expected_classes)
def testDiscoverClassesByClassName(self):
classes = discover.DiscoverClasses(
self._start_dir, self._base_dir, self._base_class,
index_by_class_name=True)
actual_classes = dict(
(name, cls.__name__) for name, cls in classes.iteritems())
expected_classes = {
'dummy_exception': 'DummyException',
'dummy_exception_impl1': 'DummyExceptionImpl1',
'dummy_exception_impl2': 'DummyExceptionImpl2',
}
self.assertEqual(actual_classes, expected_classes)
def testIsPageSetFile(self):
top_10_ps_dir = os.path.join(util.GetChromiumSrcDir(),
'tools/perf/page_sets/top_10.py')
top_10_json_data = os.path.join(util.GetChromiumSrcDir(),
'tools/perf/page_sets/data/top_10.json')
test_ps_dir = os.path.join(util.GetTelemetryDir(),
'unittest_data/test_page_set.py')
page_set_dir = os.path.join(util.GetTelemetryDir(),
'telemetry/page/page_set.py')
self.assertTrue(discover.IsPageSetFile(top_10_ps_dir))
self.assertFalse(discover.IsPageSetFile(top_10_json_data))
self.assertFalse(discover.IsPageSetFile(test_ps_dir))
self.assertFalse(discover.IsPageSetFile(page_set_dir))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Classes for encoding different types into SDRs for HTM input."""
from collections import namedtuple
import numpy
from nupic.encoders.utils import bitsToString
from nupic.serializable import Serializable
defaultDtype = numpy.uint8
EncoderResult = namedtuple("EncoderResult", ['value', 'scalar', 'encoding'])
""" Tuple to represent the results of computations in different forms.
.. py:attribute:: value
A representation of the encoded value in the same format as the input
(i.e. float for scalars, string for categories). This is the value for
the sub-field in a format that is consistent with the type specified by
:meth:`.getDecoderOutputFieldTypes`. Note that this value is not
necessarily numeric.
.. py:attribute:: scalar
The scalar representation of value (e.g. for categories, this is the
internal index used by the encoder). This number is consistent with what
is returned by :meth:`.getScalars`. This value is always an int or
float, and can be used for numeric comparisons.
.. py:attribute:: encoding
This is the encoded bit-array (numpy array) that represents ``value``.
That is, if ``value`` was passed to :meth:`.encode`, an identical
bit-array should be returned.
"""
def _isSequence(obj):
"""Helper function to determine if a function is a list or sequence."""
mType = type(obj)
return mType is list or mType is tuple
class Encoder(Serializable):
"""
An encoder converts a value to a sparse distributed representation.
This is the base class for encoders that are compatible with the OPF. The OPF
requires that values can be represented as a scalar value for use in places
like the SDR Classifier.
.. note:: The Encoder superclass implements:
- :func:`~nupic.encoders.base.Encoder.encode`
- :func:`~nupic.encoders.base.Encoder.pprintHeader`
- :func:`~nupic.encoders.base.Encoder.pprint`
.. warning:: The following methods and properties must be implemented by
subclasses:
- :func:`~nupic.encoders.base.Encoder.getDecoderOutputFieldTypes`
- :func:`~nupic.encoders.base.Encoder.getWidth`
- :func:`~nupic.encoders.base.Encoder.encodeIntoArray`
- :func:`~nupic.encoders.base.Encoder.getDescription`
"""
def getWidth(self):
"""Should return the output width, in bits.
:return: (int) output width in bits
"""
raise NotImplementedError()
def encodeIntoArray(self, inputData, output):
"""
Encodes inputData and puts the encoded value into the numpy output array,
which is a 1-D array of length returned by :meth:`.getWidth`.
.. note:: The numpy output array is reused, so clear it before updating it.
:param inputData: Data to encode. This should be validated by the encoder.
:param output: numpy 1-D array of same length returned by
:meth:`.getWidth`.
"""
raise NotImplementedError()
def setLearning(self, learningEnabled):
"""Set whether learning is enabled.
:param learningEnabled: (bool) whether learning should be enabled
"""
# TODO: (#1943) Make sure subclasses don't rely on this and remove it.
# Default behavior should be a noop.
if hasattr(self, "_learningEnabled"):
self._learningEnabled = learningEnabled
def setFieldStats(self, fieldName, fieldStatistics):
"""
This method is called by the model to set the statistics like min and
max for the underlying encoders if this information is available.
:param fieldName: name of the field this encoder is encoding, provided by
:class:`~.nupic.encoders.multi.MultiEncoder`.
:param fieldStatistics: dictionary of dictionaries with the first level being
the fieldname and the second index the statistic ie:
``fieldStatistics['pounds']['min']``
"""
pass
def encode(self, inputData):
"""Convenience wrapper for :meth:`.encodeIntoArray`.
This may be less efficient because it allocates a new numpy array every
call.
:param inputData: input data to be encoded
:return: a numpy array with the encoded representation of inputData
"""
output = numpy.zeros((self.getWidth(),), dtype=defaultDtype)
self.encodeIntoArray(inputData, output)
return output
def getScalarNames(self, parentFieldName=''):
"""
Return the field names for each of the scalar values returned by
getScalars.
:param parentFieldName: The name of the encoder which is our parent. This
name is prefixed to each of the field names within this encoder to
form the keys of the dict() in the retval.
:return: array of field names
"""
names = []
if self.encoders is not None:
for (name, encoder, offset) in self.encoders:
subNames = encoder.getScalarNames(parentFieldName=name)
if parentFieldName != '':
subNames = ['%s.%s' % (parentFieldName, name) for name in subNames]
names.extend(subNames)
else:
if parentFieldName != '':
names.append(parentFieldName)
else:
names.append(self.name)
return names
def getDecoderOutputFieldTypes(self):
"""
Returns a sequence of field types corresponding to the elements in the
decoded output field array. The types are defined by
:class:`~nupic.data.field_meta.FieldMetaType`.
:return: list of :class:`~nupic.data.field_meta.FieldMetaType` objects
"""
if hasattr(self, '_flattenedFieldTypeList') and \
self._flattenedFieldTypeList is not None:
return self._flattenedFieldTypeList
fieldTypes = []
# NOTE: we take care of the composites, but leaf encoders must override
# this method and return a list of one field_meta.FieldMetaType.XXXX
# element corresponding to the encoder's decoder output field type
for (name, encoder, offset) in self.encoders:
subTypes = encoder.getDecoderOutputFieldTypes()
fieldTypes.extend(subTypes)
self._flattenedFieldTypeList = fieldTypes
return fieldTypes
def setStateLock(self,lock):
"""
Setting this to true freezes the state of the encoder
This is separate from the learning state which affects changing parameters.
Implemented in subclasses.
"""
pass
def _getInputValue(self, obj, fieldName):
"""
Gets the value of a given field from the input record
"""
if isinstance(obj, dict):
if not fieldName in obj:
knownFields = ", ".join(
key for key in obj.keys() if not key.startswith("_")
)
raise ValueError(
"Unknown field name '%s' in input record. Known fields are '%s'.\n"
"This could be because input headers are mislabeled, or because "
"input data rows do not contain a value for '%s'." % (
fieldName, knownFields, fieldName
)
)
return obj[fieldName]
else:
return getattr(obj, fieldName)
def getEncoderList(self):
"""
:return: a reference to each sub-encoder in this encoder. They are
returned in the same order as they are for :meth:`.getScalarNames`
and :meth:`.getScalars`.
"""
if hasattr(self, '_flattenedEncoderList') and \
self._flattenedEncoderList is not None:
return self._flattenedEncoderList
encoders = []
if self.encoders is not None:
for (name, encoder, offset) in self.encoders:
subEncoders = encoder.getEncoderList()
encoders.extend(subEncoders)
else:
encoders.append(self)
self._flattenedEncoderList = encoders
return encoders
def getScalars(self, inputData):
"""
Returns a numpy array containing the sub-field scalar value(s) for
each sub-field of the ``inputData``. To get the associated field names for
each of the scalar values, call :meth:`.getScalarNames()`.
For a simple scalar encoder, the scalar value is simply the input unmodified.
For category encoders, it is the scalar representing the category string
that is passed in. For the datetime encoder, the scalar value is the
the number of seconds since epoch.
The intent of the scalar representation of a sub-field is to provide a
baseline for measuring error differences. You can compare the scalar value
of the inputData with the scalar value returned from :meth:`.topDownCompute`
on a top-down representation to evaluate prediction accuracy, for example.
:param inputData: The data from the source. This is typically an object with
members
:return: array of scalar values
"""
retVals = numpy.array([])
if self.encoders is not None:
for (name, encoder, offset) in self.encoders:
values = encoder.getScalars(self._getInputValue(inputData, name))
retVals = numpy.hstack((retVals, values))
else:
retVals = numpy.hstack((retVals, inputData))
return retVals
def getEncodedValues(self, inputData):
"""
Returns the input in the same format as is returned by
:meth:`.topDownCompute`. For most encoder types, this is the same as the
input data. For instance, for scalar and category types, this corresponds to
the numeric and string values, respectively, from the inputs. For datetime
encoders, this returns the list of scalars for each of the sub-fields
(timeOfDay, dayOfWeek, etc.)
This method is essentially the same as :meth:`.getScalars` except that it
returns strings.
:param inputData: The input data in the format it is received from the data
source
:return: A list of values, in the same format and in the same order as they
are returned by :meth:`.topDownCompute`.
"""
retVals = []
if self.encoders is not None:
for name, encoders, offset in self.encoders:
values = encoders.getEncodedValues(self._getInputValue(inputData, name))
if _isSequence(values):
retVals.extend(values)
else:
retVals.append(values)
else:
if _isSequence(inputData):
retVals.extend(inputData)
else:
retVals.append(inputData)
return tuple(retVals)
def getBucketIndices(self, inputData):
"""
Returns an array containing the sub-field bucket indices for each sub-field
of the inputData. To get the associated field names for each of the buckets,
call :meth:`.getScalarNames`.
:param inputData: The data from the source. This is typically an object with
members.
:return: array of bucket indices
"""
retVals = []
if self.encoders is not None:
for (name, encoder, offset) in self.encoders:
values = encoder.getBucketIndices(self._getInputValue(inputData, name))
retVals.extend(values)
else:
assert False, "Should be implemented in base classes that are not " \
"containers for other encoders"
return retVals
def scalarsToStr(self, scalarValues, scalarNames=None):
"""
Return a pretty print string representing the return values from
:meth:`.getScalars` and :meth:`.getScalarNames`.
:param scalarValues: input values to encode to string
:param scalarNames: optional input of scalar names to convert. If None, gets
scalar names from :meth:`.getScalarNames`
:return: string representation of scalar values
"""
if scalarNames is None:
scalarNames = self.getScalarNames()
desc = ''
for (name, value) in zip(scalarNames, scalarValues):
if len(desc) > 0:
desc += ", %s:%.2f" % (name, value)
else:
desc += "%s:%.2f" % (name, value)
return desc
def getDescription(self):
"""
**Must be overridden by subclasses.**
This returns a list of tuples, each containing (``name``, ``offset``).
The ``name`` is a string description of each sub-field, and ``offset`` is
the bit offset of the sub-field for that encoder.
For now, only the 'multi' and 'date' encoders have multiple (name, offset)
pairs. All other encoders have a single pair, where the offset is 0.
:return: list of tuples containing (name, offset)
"""
raise Exception("getDescription must be implemented by all subclasses")
def getFieldDescription(self, fieldName):
"""
Return the offset and length of a given field within the encoded output.
:param fieldName: Name of the field
:return: tuple(``offset``, ``width``) of the field within the encoded output
"""
# Find which field it's in
description = self.getDescription() + [("end", self.getWidth())]
for i in xrange(len(description)):
(name, offset) = description[i]
if (name == fieldName):
break
if i >= len(description)-1:
raise RuntimeError("Field name %s not found in this encoder" % fieldName)
# Return the offset and width
return (offset, description[i+1][1] - offset)
def encodedBitDescription(self, bitOffset, formatted=False):
"""
Return a description of the given bit in the encoded output.
This will include the field name and the offset within the field.
:param bitOffset: Offset of the bit to get the description of
:param formatted: If True, the bitOffset is w.r.t. formatted output,
which includes separators
:return: tuple(``fieldName``, ``offsetWithinField``)
"""
# Find which field it's in
(prevFieldName, prevFieldOffset) = (None, None)
description = self.getDescription()
for i in xrange(len(description)):
(name, offset) = description[i]
if formatted:
offset = offset + i
if bitOffset == offset-1:
prevFieldName = "separator"
prevFieldOffset = bitOffset
break
if bitOffset < offset:
break
(prevFieldName, prevFieldOffset) = (name, offset)
# Return the field name and offset within the field
# return (fieldName, bitOffset - fieldOffset)
width = self.getDisplayWidth() if formatted else self.getWidth()
if prevFieldOffset is None or bitOffset > self.getWidth():
raise IndexError("Bit is outside of allowable range: [0 - %d]" % width)
return (prevFieldName, bitOffset - prevFieldOffset)
def pprintHeader(self, prefix=""):
"""
Pretty-print a header that labels the sub-fields of the encoded
output. This can be used in conjuction with :meth:`.pprint`.
:param prefix: printed before the header if specified
"""
print prefix,
description = self.getDescription() + [("end", self.getWidth())]
for i in xrange(len(description) - 1):
name = description[i][0]
width = description[i+1][1] - description[i][1]
formatStr = "%%-%ds |" % width
if len(name) > width:
pname = name[0:width]
else:
pname = name
print formatStr % pname,
print
print prefix, "-" * (self.getWidth() + (len(description) - 1)*3 - 1)
def pprint(self, output, prefix=""):
"""
Pretty-print the encoded output using ascii art.
:param output: to print
:param prefix: printed before the header if specified
"""
print prefix,
description = self.getDescription() + [("end", self.getWidth())]
for i in xrange(len(description) - 1):
offset = description[i][1]
nextoffset = description[i+1][1]
print "%s |" % bitsToString(output[offset:nextoffset]),
print
def decode(self, encoded, parentFieldName=''):
"""
Takes an encoded output and does its best to work backwards and generate
the input that would have generated it.
In cases where the encoded output contains more ON bits than an input
would have generated, this routine will return one or more ranges of inputs
which, if their encoded outputs were ORed together, would produce the
target output. This behavior makes this method suitable for doing things
like generating a description of a learned coincidence in the SP, which
in many cases might be a union of one or more inputs.
If instead, you want to figure the *most likely* single input scalar value
that would have generated a specific encoded output, use the
:meth:`.topDownCompute` method.
If you want to pretty print the return value from this method, use the
:meth:`.decodedToStr` method.
:param encoded: The encoded output that you want decode
:param parentFieldName: The name of the encoder which is our parent. This name
is prefixed to each of the field names within this encoder to form the
keys of the dict() in the retval.
:return: tuple(``fieldsDict``, ``fieldOrder``)
``fieldsDict`` is a dict() where the keys represent field names
(only 1 if this is a simple encoder, > 1 if this is a multi
or date encoder) and the values are the result of decoding each
field. If there are no bits in encoded that would have been
generated by a field, it won't be present in the dict. The
key of each entry in the dict is formed by joining the passed in
parentFieldName with the child encoder name using a '.'.
Each 'value' in ``fieldsDict`` consists of (ranges, desc), where
ranges is a list of one or more (minVal, maxVal) ranges of
input that would generate bits in the encoded output and 'desc'
is a pretty print description of the ranges. For encoders like
the category encoder, the 'desc' will contain the category
names that correspond to the scalar values included in the
ranges.
``fieldOrder`` is a list of the keys from ``fieldsDict``, in the
same order as the fields appear in the encoded output.
TODO: when we switch to Python 2.7 or 3.x, use OrderedDict
Example retvals for a scalar encoder:
.. code-block:: python
{'amount': ( [[1,3], [7,10]], '1-3, 7-10' )}
{'amount': ( [[2.5,2.5]], '2.5' )}
Example retval for a category encoder:
.. code-block:: python
{'country': ( [[1,1], [5,6]], 'US, GB, ES' )}
Example retval for a multi encoder:
.. code-block:: python
{'amount': ( [[2.5,2.5]], '2.5' ),
'country': ( [[1,1], [5,6]], 'US, GB, ES' )}
"""
fieldsDict = dict()
fieldsOrder = []
# What is the effective parent name?
if parentFieldName == '':
parentName = self.name
else:
parentName = "%s.%s" % (parentFieldName, self.name)
if self.encoders is not None:
# Merge decodings of all child encoders together
for i in xrange(len(self.encoders)):
# Get the encoder and the encoded output
(name, encoder, offset) = self.encoders[i]
if i < len(self.encoders)-1:
nextOffset = self.encoders[i+1][2]
else:
nextOffset = self.width
fieldOutput = encoded[offset:nextOffset]
(subFieldsDict, subFieldsOrder) = encoder.decode(fieldOutput,
parentFieldName=parentName)
fieldsDict.update(subFieldsDict)
fieldsOrder.extend(subFieldsOrder)
return (fieldsDict, fieldsOrder)
def decodedToStr(self, decodeResults):
"""
Return a pretty print string representing the return value from
:meth:`.decode`.
"""
(fieldsDict, fieldsOrder) = decodeResults
desc = ''
for fieldName in fieldsOrder:
(ranges, rangesStr) = fieldsDict[fieldName]
if len(desc) > 0:
desc += ", %s:" % (fieldName)
else:
desc += "%s:" % (fieldName)
desc += "[%s]" % (rangesStr)
return desc
def getBucketValues(self):
"""
**Must be overridden by subclasses.**
Returns a list of items, one for each bucket defined by this encoder.
Each item is the value assigned to that bucket, this is the same as the
:attr:`.EncoderResult.value` that would be returned by
:meth:`.getBucketInfo` for that bucket and is in the same format as the
input that would be passed to :meth:`.encode`.
This call is faster than calling :meth:`.getBucketInfo` on each bucket
individually if all you need are the bucket values.
:return: list of items, each item representing the bucket value for that
bucket.
"""
raise Exception("getBucketValues must be implemented by all subclasses")
def getBucketInfo(self, buckets):
"""
Returns a list of :class:`.EncoderResult` namedtuples describing the inputs
for each sub-field that correspond to the bucket indices passed in
``buckets``. To get the associated field names for each of the values, call
:meth:`.getScalarNames`.
:param buckets: The list of bucket indices, one for each sub-field encoder.
These bucket indices for example may have been retrieved
from the :meth:`.getBucketIndices` call.
:return: A list of :class:`.EncoderResult`.
"""
# Fall back topdown compute
if self.encoders is None:
raise RuntimeError("Must be implemented in sub-class")
# Concatenate the results from bucketInfo on each child encoder
retVals = []
bucketOffset = 0
for i in xrange(len(self.encoders)):
(name, encoder, offset) = self.encoders[i]
if encoder.encoders is not None:
nextBucketOffset = bucketOffset + len(encoder.encoders)
else:
nextBucketOffset = bucketOffset + 1
bucketIndices = buckets[bucketOffset:nextBucketOffset]
values = encoder.getBucketInfo(bucketIndices)
retVals.extend(values)
bucketOffset = nextBucketOffset
return retVals
def topDownCompute(self, encoded):
"""
Returns a list of :class:`.EncoderResult` namedtuples describing the
top-down best guess inputs for each sub-field given the encoded output.
These are the values which are most likely to generate the given encoded
output. To get the associated field names for each of the values, call
:meth:`.getScalarNames`.
:param encoded: The encoded output. Typically received from the topDown
outputs from the spatial pooler just above us.
:return: A list of :class:`.EncoderResult`
"""
# Fallback topdown compute
if self.encoders is None:
raise RuntimeError("Must be implemented in sub-class")
# Concatenate the results from topDownCompute on each child encoder
retVals = []
for i in xrange(len(self.encoders)):
(name, encoder, offset) = self.encoders[i]
if i < len(self.encoders)-1:
nextOffset = self.encoders[i+1][2]
else:
nextOffset = self.width
fieldOutput = encoded[offset:nextOffset]
values = encoder.topDownCompute(fieldOutput)
if _isSequence(values):
retVals.extend(values)
else:
retVals.append(values)
return retVals
def closenessScores(self, expValues, actValues, fractional=True):
"""
Compute closeness scores between the expected scalar value(s) and actual
scalar value(s). The expected scalar values are typically those obtained
from the :meth:`.getScalars` method. The actual scalar values are typically
those returned from :meth:`.topDownCompute`.
This method returns one closeness score for each value in expValues (or
actValues which must be the same length). The closeness score ranges from
0 to 1.0, 1.0 being a perfect match and 0 being the worst possible match.
If this encoder is a simple, single field encoder, then it will expect
just 1 item in each of the ``expValues`` and ``actValues`` arrays.
Multi-encoders will expect 1 item per sub-encoder.
Each encoder type can define it's own metric for closeness. For example,
a category encoder may return either 1 or 0, if the scalar matches exactly
or not. A scalar encoder might return a percentage match, etc.
:param expValues: Array of expected scalar values, typically obtained from
:meth:`.getScalars`
:param actValues: Array of actual values, typically obtained from
:meth:`.topDownCompute`
:return: Array of closeness scores, one per item in expValues (or
actValues).
"""
# Fallback closenss is a percentage match
if self.encoders is None:
err = abs(expValues[0] - actValues[0])
if fractional:
denom = max(expValues[0], actValues[0])
if denom == 0:
denom = 1.0
closeness = 1.0 - float(err)/denom
if closeness < 0:
closeness = 0
else:
closeness = err
return numpy.array([closeness])
# Concatenate the results from closeness scores on each child encoder
scalarIdx = 0
retVals = numpy.array([])
for (name, encoder, offset) in self.encoders:
values = encoder.closenessScores(expValues[scalarIdx:], actValues[scalarIdx:],
fractional=fractional)
scalarIdx += len(values)
retVals = numpy.hstack((retVals, values))
return retVals
def getDisplayWidth(self):
"""
Calculate width of display for bits plus blanks between fields.
:return: (int) width of display for bits plus blanks between fields
"""
width = self.getWidth() + len(self.getDescription()) - 1
return width
|
unknown
|
codeparrot/codeparrot-clean
| ||
## -*- coding: utf-8 -*-
## (C) 2015 Muthiah Annamalai,
## This module is part of solthiruthi project under open-tamil umbrella.
## This code maybe used/distributed under MIT LICENSE.
from __future__ import print_function
import abc
import codecs
import copy
from tamil import utf8
# Suffix removal algorithm
class RemoveSuffix(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
self.possible_suffixes = None
self.replace_suffixes = {} #valid dictionary
self.reversed_suffixes = []
@abc.abstractmethod
def setSuffixes(self):
pass
@abc.abstractmethod
def apply(self,word):
pass
def prepareSuffixes(self):
assert self.possible_suffixes
# reverse the words in each letter.
for word in self.possible_suffixes:
self.reversed_suffixes.append( utf8.reverse_word(word) )
return
def removeSuffix(self,word):
removed = False
if not self.possible_suffixes:
# init once
self.setSuffixes()
self.prepareSuffixes()
word_lett = utf8.get_letters(word)
rword_lett = copy.copy(word_lett)
rword_lett.reverse()
#print('rev word ->',rword_lett)
rword = u"".join(rword_lett)
longest_match = ""
for itr in range(len(self.reversed_suffixes)):
suffix = self.reversed_suffixes[itr]
#print(itr,utf8.get_letters(suffix))
if rword.startswith(suffix):
if len(longest_match) <= len(suffix):
longest_match = suffix
#print('L-match-->',utf8.get_letters(longest_match))
continue
if len(longest_match) > 0:
removed = True
sfx = []
for itr in range(len(utf8.get_letters(longest_match))):
sfx.append( word_lett.pop() )
word = u"".join(word_lett)
sfx.reverse()
sfx= u"".join(sfx)
# rule to replace suffix
alt_suffix = self.replace_suffixes.get(sfx,None)
if alt_suffix:
word = word + alt_suffix
return word,removed
# remove prefix using the suffix removal algorithm via reversal of word
class RemovePrefix(RemoveSuffix):
def __init__(self):
super(RemovePrefix,self).__init__()
def setSuffixes(self):
self.replace_suffixes = {u"மா":u"",u"பேர்":u"",u"அதி":u"",u"பெரிய":u"",u"பெரு":u"",u"சின்ன":u"",\
u"ஆதி":u"",u"சிறு":u"",u"அக்":u"",u"இக்":u"",u"எக்":u""}
self.possible_suffixes=[utf8.reverse_word(word) for word in self.replace_suffixes.keys()]
def apply(self,word):
return self.removePrefix(word)
def removePrefix(self,word):
word_lett = utf8.get_letters(word)
word_lett.reverse()
a,b = self.removeSuffix(u"".join(word_lett))
return [utf8.reverse_word(a),b]
class RemoveCaseSuffix(RemoveSuffix):
def __init__(self):
super(RemoveCaseSuffix,self).__init__()
def apply(self,word):
return self.removeSuffix(word)
def setSuffixes(self):
accusative = u"ை"
instrumental =u"ஆல்"
associative=u"ஓடு"
dative=u"க்கு"
genitive=u"இன்"
possessive=u"உடைய"
locative=u"இடம்"
ablative=u"இடமிருந்து"
self.possible_suffixes=[u"உக்கு",u"க்கு",u"ளை",u"கள்",
accusative,instrumental,associative,
dative,genitive,possessive,locative,ablative]
self.replace_suffixes = dict()
for w in self.possible_suffixes:
self.replace_suffixes[w] = u""
return
class RemoveHyphenatesNumberDate(RemoveCaseSuffix):
""" Done correctly (மேல்) 65536-மேல்,
ivan paritchayil இரண்டாவது, 2-வது """
pass
class RemoveVerbSuffixTense(RemoveCaseSuffix):
def __init__(self):
super(RemoveCaseSuffix,self).__init__()
self.tenses = { "present" :u"கிற்",
"past" : u"த",
"future" : u"வ" }
def setSuffixes(self):
"""
"""
tense_endings = [u"ஏன்",u"ஆய்",u"ஆர்",u"ஆன்",u"ஆள்",u"அது",u"ஓம்", u"அன"]
self.possible_suffixes=tense_endings
self.replace_suffixes = tense_endings
class RemovePluralSuffix(RemoveSuffix):
def __init__(self):
super(RemovePluralSuffix,self).__init__()
def apply(self,word):
return self.removeSuffix(word)
def setSuffixes(self):
self.replace_suffixes = {u"ற்கள்":u"ல்",u"கள்":u"",u"ல்":u"", u"ட்கள்": u"ள்", u"ங்கள்":u"ம்"}
self.possible_suffixes=list(self.replace_suffixes.keys())
class RemoveNegationSuffix(RemoveCaseSuffix):
def __init__(self):
super(RemoveNegationSuffix,self).__init__()
def setSuffixes(self):
self.replace_suffixes = {u"கே":u"",u"ல்லை":u"",u"ாதே":u"", u"ாமல்":u""}
self.possible_suffixes=list(self.replace_suffixes.keys())
class CaseFilter(object):
def __init__(self,*filter_obj_list):
object.__init__(self)
self.filters = filter_obj_list
def apply(self,word_in):
word = [word_in,None]
for filter_obj in self.filters:
word = filter_obj.apply( word[0] )
return word[0]
def xkcd():
obj = RemovePluralSuffix()
objf = CaseFilter(obj)
expected = [u"பதிவி",u"கட்டளை",u"அவர்",u"பள்ளி"]
words_list = [u"பதிவில்",u"கட்டளைகள்",u"அவர்கள்",u"பள்ளிகள்"]
for w,x in zip(words_list,expected):
rval = obj.removeSuffix(w)
trunc_word = objf.apply( w )
assert( trunc_word == rval[0] )
assert(rval[1])
print(utf8.get_letters(w),'->',rval[1])
assert(rval[0] == x)
return
if __name__ == "__main__":
xkcd()
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import division, print_function
from pint import UnitRegistry
from pint.quantity import _Quantity
import sys
if sys.version_info[0] >= 3:
string_type = str
else:
string_type = basestring
'''
:copyright: 2016 by The Autoprotocol Development Team, see AUTHORS
for more details.
:license: BSD, see LICENSE for more details
'''
# Preload UnitRegistry (Use default Pints definition file as a base)
_UnitRegistry = UnitRegistry()
'''Map string representation of Pint units over to Autoprotocol format'''
# Map Temperature Unit names
_UnitRegistry._units["degC"]._name = "celsius"
_UnitRegistry._units["celsius"]._name = "celsius"
_UnitRegistry._units["degF"]._name = "fahrenheit"
_UnitRegistry._units["fahrenheit"]._name = "fahrenheit"
_UnitRegistry._units["degR"]._name = "rankine"
_UnitRegistry._units["rankine"]._name = "rankine"
# Map Speed Unit names
_UnitRegistry._units["revolutions_per_minute"]._name = "rpm"
'''Add support for Molarity Unit'''
_UnitRegistry.define('molar = mole/liter = M')
class UnitError(Exception):
"""
Exceptions from creating new Unit instances with bad inputs.
"""
message_text = "Unit error for %s"
def __init__(self, value):
super(UnitError, self).__init__(self.message_text % value)
self.value = value
class UnitStringError(UnitError):
message_text = "Invalid format for %s: when building a Unit from a "
"string argument, string must be in \'1:meter\' format."
class UnitValueError(UnitError):
message_text = "Invalid value for %s: when building a Unit from a "
"value argument, value must be numeric."
class Unit(_Quantity):
"""
A representation of a measure of physical quantities such as length,
mass, time and volume.
Uses Pint's Quantity as a base class for implementing units and
inherits functionalities such as conversions and proper unit
arithmetic.
Note that the magnitude is stored as a double-precision float, so
there are inherent issues when dealing with extremely large/small
numbers as well as numerical rounding for non-base 2 numbers.
Example
-------
.. code-block:: python
vol_1 = Unit(10, 'microliter')
vol_2 = Unit(10, 'liter')
print(vol_1 + vol_2)
time_1 = Unit(1, 'second')
speed_1 = vol_1/time_1
print (speed_1)
print (speed_1.to('liter/hour'))
Returns
-------
.. code-block:: json
10000010.0:microliter
10.0:microliter / second
0.036:liter / hour
"""
def __new__(cls, value, units=None):
cls._REGISTRY = _UnitRegistry
cls.force_ndarray = False
# Automatically return Unit if Unit is provided
if isinstance(value, Unit):
return value
# Automatically parse String if no units provided
if not units and isinstance(value, string_type):
try:
value, units = value.split(":")
except ValueError:
raise UnitStringError(value)
try:
return super(Unit, cls).__new__(cls, float(value), units)
except ValueError:
raise UnitValueError(value)
def __init__(self, value, units=None):
super(Unit, self).__init__()
self.unit = self.units.__str__()
@staticmethod
def fromstring(s):
"""
Convert a string representation of a unit into a Unit object.
Example
-------
.. code-block:: python
Unit.fromstring("10:microliter")
becomes
.. code-block:: python
Unit(10, "microliter")
Parameters
----------
s : str
String in the format of "value:unit"
"""
if isinstance(s, Unit):
return s
else:
return Unit(s)
def __str__(self):
return ":".join([str(self._magnitude), "^".join(self.unit.split("**"))]).replace(" ", "")
def __repr__(self):
return "Unit({0}, '{1}')".format(self._magnitude, self._units)
def _mul_div(self, other, magnitude_op, units_op=None):
'''
Extends Pint's base _Quantity multiplication/division
implementation by checking for dimensionality
'''
if isinstance(other, Unit):
if self.dimensionality == other.dimensionality:
other = other.to(self.unit)
return super(Unit, self)._mul_div(other, magnitude_op, units_op)
def _imul_div(self, other, magnitude_op, units_op=None):
'''
Extends Pint's base _Quantity multiplication/division
implementation by checking for dimensionality
'''
if isinstance(other, Unit):
if self.dimensionality == other.dimensionality:
other = other.to(self.unit)
return super(Unit, self)._imul_div(other, magnitude_op, units_op)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from rackattack import api
import socket
import time
import subprocess
def defaultRequirement(imageHint="vanilla"):
labelRegex = "solvent__rootfs-vanilla__rootfs__.*__clean"
lines = subprocess.check_output(["osmosis", "listlabels", labelRegex]).strip()
if len(lines) == 0:
raise Exception("Local osmosis object store does not contain a label matching '%s'" % labelRegex)
imageLabel = lines.split("\n")[0]
return api.Requirement(imageLabel=imageLabel, imageHint=imageHint)
def getRequirement(imageLabel):
return api.Requirement(imageLabel=imageLabel, imageHint=imageLabel)
def whiteboxAllocationInfo():
return api.AllocationInfo(user="whitebox", purpose="whitebox", nice=0)
def waitForTCPServer(tcpEndpoint, timeout=3, interval=0.1):
before = time.time()
while time.time() - before < timeout:
if _connect(tcpEndpoint):
return
time.sleep(interval)
raise Exception("TCP Server '%(tcpEndpoint)s' did not respond within timeout" % dict(
tcpEndpoint=tcpEndpoint))
def _connect(tcpEndpoint):
s = socket.socket()
try:
s.connect(tcpEndpoint)
return True
except:
return False
finally:
s.close()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import { createUser, findUser, validatePassword } from "../lib/user";
import { setLoginSession, getLoginSession } from "../lib/auth";
import { removeTokenCookie } from "../lib/auth-cookies";
import { GraphQLError } from "graphql";
export const resolvers = {
Query: {
async viewer(_root, _args, context, _info) {
try {
const session = await getLoginSession(context.req);
if (session) {
return findUser({ email: session.email });
}
} catch (error) {
throw new GraphQLError(
"Authentication token is invalid, please log in",
{
extensions: {
code: "UNAUTHENTICATED",
},
},
);
}
},
},
Mutation: {
async signUp(_parent, args, _context, _info) {
const user = await createUser(args.input);
return { user };
},
async signIn(_parent, args, context, _info) {
const user = await findUser({ email: args.input.email });
if (user && (await validatePassword(user, args.input.password))) {
const session = {
id: user.id,
email: user.email,
};
await setLoginSession(context.res, session);
return { user };
}
throw new GraphQLError("Invalid email and password combination");
},
async signOut(_parent, _args, context, _info) {
removeTokenCookie(context.res);
return true;
},
},
};
|
typescript
|
github
|
https://github.com/vercel/next.js
|
examples/api-routes-apollo-server-and-client-auth/apollo/resolvers.ts
|
# coding=utf-8
"""Help for batch runner dialog."""
from safe.utilities.i18n import tr
from safe import messaging as m
from safe.messaging import styles
INFO_STYLE = styles.INFO_STYLE
__author__ = 'ismailsunni'
def batch_help():
"""Help message for Batch Dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
"""
message = m.Message()
message.add(m.Brand())
message.add(heading())
message.add(content())
return message
def heading():
"""Helper method that returns just the header.
This method was added so that the text could be reused in the
other contexts.
.. versionadded:: 3.2.2
:returns: A heading object.
:rtype: safe.messaging.heading.Heading
"""
message = m.Heading(tr('Batch Runner'), **INFO_STYLE)
return message
def content():
"""Helper method that returns just the content.
This method was added so that the text could be reused in the
other contexts.
.. versionadded:: 3.2.2
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message
"""
message = m.Message()
message.add(m.Paragraph(tr(
'With this tool you can set up numerous scenarios and run them all in '
'one go. A typical use case may be where you define a number of e.g. '
'flood impact scenarios all using a standard data set e.g. '
'flood.shp. As new flood data becomes available you replace flood.shp '
'and rerun the scenarios using the batch runner. Using this approach '
'you can quickly produce regional contingency plans as your '
'understanding of hazards changes. When you run the batch of '
'scenarios, pdf reports are generated automatically and all placed in '
'a single common directory making it easy for you to browse and '
'disseminate the reports produced.')))
message.add(m.Paragraph(tr(
'When the batch process completes, it will also produce a summary '
'report like this:')))
table = m.Table(style_class='table table-condensed table-striped')
row = m.Row(m.Cell(tr('InaSAFE Batch Report File')), header=True)
table.add(row)
table.add(m.Row(m.Cell('P: gempa bumi Sumatran fault (Mw7.8)')))
table.add(m.Row(m.Cell('P: gempa di Yogya tahun 2006')))
table.add(m.Row(m.Cell('P: banjir jakarta 2007')))
table.add(m.Row(m.Cell('P: Tsunami di Maumere (Mw 8.1)')))
table.add(m.Row(m.Cell('P: gempa Mw6.5 Palu-Koro Fault')))
table.add(m.Row(m.Cell('P: gunung merapi meletus')))
table.add(m.Row(m.Cell('-----------------------------')))
table.add(m.Row(m.Cell(tr('Total passed: 6'))))
table.add(m.Row(m.Cell(tr('Total failed: 0'))))
table.add(m.Row(m.Cell(tr('Total tasks: 6'))))
message.add(table)
# message.add(m.Paragraph(tr(
# 'For advanced users there is also the ability to batch run python '
# 'scripts using this tool, but this should be considered an '
# 'experimental</strong> feature still at this stage.')))
message.add(m.Paragraph(tr(
'Before running the Batch Runner you might want to use the \'save '
'scenario\' tool to first save some scenarios on which you '
'can let the batch runner do its work. This tool lets you run saved '
'scenarios in one go. It lets you select scenarios or let run all '
'scenarios in one go.')))
return message
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Font Awesome 5.0.4
Thanks for downloading Font Awesome! We're so excited you're here.
Our documentation is available online. Just head here:
https://fontawesome.com
|
unknown
|
github
|
https://github.com/django/django
|
docs/_theme/djangodocs/static/fontawesome/README.md
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api, _
import openerp.addons.decimal_precision as dp
class ProductSetAd(models.TransientModel):
_name = 'product.set.add'
_rec_name = 'product_set_id'
_descritpion = "Wizard model to add product set into a quotation"
product_set_id = fields.Many2one(
'product.set', _('Product set'), required=True)
quantity = fields.Float(
string=_('Quantity'),
digits=dp.get_precision('Product Unit of Measure'), required=True,
default=1)
@api.multi
def add_set(self):
""" Add product set, multiplied by quantity in sale order line """
so_id = self._context['active_id']
if not so_id:
return
so = self.env['sale.order'].browse(so_id)
max_sequence = 0
if so.order_line:
max_sequence = max([line.sequence for line in so.order_line])
sale_order_line = self.env['sale.order.line']
for set_line in self.product_set_id.set_line_ids:
sale_order_line.create(
self.prepare_sale_order_line_data(
so_id, self.product_set_id, set_line,
max_sequence=max_sequence))
def prepare_sale_order_line_data(self, sale_order_id, set, set_line,
max_sequence=0):
return {
'order_id': sale_order_id,
'product_id': set_line.product_id.id,
'product_uom_qty': set_line.quantity * self.quantity,
'sequence': max_sequence + set_line.sequence,
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.bigip_gtm_wide_ip import ApiParameters
from library.bigip_gtm_wide_ip import ModuleParameters
from library.bigip_gtm_wide_ip import ModuleManager
from library.bigip_gtm_wide_ip import ArgumentSpec
from library.bigip_gtm_wide_ip import UntypedManager
from library.bigip_gtm_wide_ip import TypedManager
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_gtm_wide_ip import ApiParameters
from ansible.modules.network.f5.bigip_gtm_wide_ip import ModuleParameters
from ansible.modules.network.f5.bigip_gtm_wide_ip import ModuleManager
from ansible.modules.network.f5.bigip_gtm_wide_ip import ArgumentSpec
from ansible.modules.network.f5.bigip_gtm_wide_ip import UntypedManager
from ansible.modules.network.f5.bigip_gtm_wide_ip import TypedManager
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo.baz.bar',
lb_method='round-robin',
)
p = ModuleParameters(params=args)
assert p.name == 'foo.baz.bar'
assert p.pool_lb_method == 'round-robin'
def test_module_pools(self):
args = dict(
pools=[
dict(
name='foo',
ratio='100'
)
]
)
p = ModuleParameters(params=args)
assert len(p.pools) == 1
def test_api_parameters(self):
args = dict(
name='foo.baz.bar',
poolLbMode='round-robin'
)
p = ApiParameters(params=args)
assert p.name == 'foo.baz.bar'
assert p.pool_lb_method == 'round-robin'
def test_api_pools(self):
args = load_fixture('load_gtm_wide_ip_with_pools.json')
p = ApiParameters(params=args)
assert len(p.pools) == 1
assert 'name' in p.pools[0]
assert 'ratio' in p.pools[0]
assert p.pools[0]['name'] == '/Common/baz'
assert p.pools[0]['ratio'] == 10
def test_module_not_fqdn_name(self):
args = dict(
name='foo.baz',
lb_method='round-robin'
)
with pytest.raises(F5ModuleError) as excinfo:
p = ModuleParameters(params=args)
assert p.name == 'foo.baz'
assert 'The provided name must be a valid FQDN' in str(excinfo)
class TestUntypedManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_wideip(self, *args):
set_module_args(dict(
name='foo.baz.bar',
lb_method='round-robin',
password='passsword',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = UntypedManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=True)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'foo.baz.bar'
assert results['state'] == 'present'
assert results['lb_method'] == 'round-robin'
class TestTypedManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_wideip(self, *args):
set_module_args(dict(
name='foo.baz.bar',
lb_method='round-robin',
type='a',
password='passsword',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = TypedManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'foo.baz.bar'
assert results['state'] == 'present'
assert results['lb_method'] == 'round-robin'
def test_create_wideip_deprecated_lb_method1(self, *args):
set_module_args(dict(
name='foo.baz.bar',
lb_method='round_robin',
type='a',
password='passsword',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = TypedManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'foo.baz.bar'
assert results['state'] == 'present'
assert results['lb_method'] == 'round-robin'
def test_create_wideip_deprecated_lb_method2(self, *args):
set_module_args(dict(
name='foo.baz.bar',
lb_method='global_availability',
type='a',
password='passsword',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = TypedManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'foo.baz.bar'
assert results['state'] == 'present'
assert results['lb_method'] == 'global-availability'
def test_create_wideip_with_pool(self, *args):
set_module_args(dict(
name='foo.baz.bar',
lb_method='round-robin',
type='a',
pools=[
dict(
name='foo',
ratio=10
)
],
password='passsword',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = TypedManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'foo.baz.bar'
assert results['state'] == 'present'
assert results['lb_method'] == 'round-robin'
def test_create_wideip_with_pool_idempotent(self, *args):
set_module_args(dict(
name='foo.bar.com',
lb_method='round-robin',
type='a',
pools=[
dict(
name='baz',
ratio=10
)
],
password='passsword',
server='localhost',
user='admin'
))
current = ApiParameters(params=load_fixture('load_gtm_wide_ip_with_pools.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = TypedManager(module=module, params=module.params)
tm.exists = Mock(return_value=True)
tm.read_current_from_device = Mock(return_value=current)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is False
def test_update_wideip_with_pool(self, *args):
set_module_args(dict(
name='foo.bar.com',
lb_method='round-robin',
type='a',
pools=[
dict(
name='baz',
ratio=10
),
dict(
name='alec',
ratio=100
)
],
password='passsword',
server='localhost',
user='admin'
))
current = ApiParameters(params=load_fixture('load_gtm_wide_ip_with_pools.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = TypedManager(module=module, params=module.params)
tm.exists = Mock(return_value=True)
tm.read_current_from_device = Mock(return_value=current)
tm.update_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert 'pools' in results
|
unknown
|
codeparrot/codeparrot-clean
| ||
#! python
#
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see __init__.py
#
# This module implements a special URL handler that uses the port listing to
# find ports by searching the string descriptions.
#
# (C) 2011 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
#
# URL format: hwgrep://regexp
import serial
import serial.tools.list_ports
class Serial(serial.Serial):
"""Just inherit the native Serial port implementation and patch the open function."""
def setPort(self, value):
"""translate port name before storing it"""
if isinstance(value, basestring) and value.startswith('hwgrep://'):
serial.Serial.setPort(self, self.fromURL(value))
else:
serial.Serial.setPort(self, value)
def fromURL(self, url):
"""extract host and port from an URL string"""
if url.lower().startswith("hwgrep://"): url = url[9:]
# use a for loop to get the 1st element from the generator
for port, desc, hwid in serial.tools.list_ports.grep(url):
return port
else:
raise serial.SerialException('no ports found matching regexp %r' % (url,))
# override property
port = property(serial.Serial.getPort, setPort, doc="Port setting")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
#~ s = Serial('hwgrep://ttyS0')
s = Serial(None)
s.port = 'hwgrep://ttyS0'
print s
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix
package syscall
import _ "unsafe" // for linkname
// mmap should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - modernc.org/memory
// - github.com/ncruces/go-sqlite3
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname mmap
|
go
|
github
|
https://github.com/golang/go
|
src/syscall/linkname_unix.go
|
"""
Django settings for daspi project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yqa&jfci=71f$m7ja%z#%5zi8r_=xc0#aefn5af^dydpv6k_g&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'daspi.apps.checkins',
'daspi.apps.api',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'daspi.urls'
WSGI_APPLICATION = 'daspi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
try:
from local_settings import *
except:
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""utilities methods and classes for reporters
Copyright (c) 2000-2003 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:contact@logilab.fr
"""
import sys, locale
CMPS = ['=', '-', '+']
def diff_string(old, new):
"""given a old and new int value, return a string representing the
difference
"""
diff = abs(old - new)
diff_str = "%s%s" % (CMPS[cmp(old, new)], diff and ('%.2f' % diff) or '')
return diff_str
class EmptyReport(Exception):
"""raised when a report is empty and so should not be displayed"""
class BaseReporter:
"""base class for reporters"""
extension = ''
def __init__(self, output=None):
self.linter = None
self.include_ids = None
self.section = 0
self.out = None
self.out_encoding = None
self.set_output(output)
def set_output(self, output=None):
"""set output stream"""
self.out = output or sys.stdout
# py3k streams handle their encoding :
if sys.version_info >= (3, 0):
self.encode = lambda x: x
return
def encode(string):
if not isinstance(string, unicode):
return string
encoding = (getattr(self.out, 'encoding', None) or
locale.getdefaultlocale()[1] or
sys.getdefaultencoding())
return string.encode(encoding)
self.encode = encode
def writeln(self, string=''):
"""write a line in the output buffer"""
print >> self.out, self.encode(string)
def display_results(self, layout):
"""display results encapsulated in the layout tree"""
self.section = 0
if self.include_ids and hasattr(layout, 'report_id'):
layout.children[0].children[0].data += ' (%s)' % layout.report_id
self._display(layout)
def _display(self, layout):
"""display the layout"""
raise NotImplementedError()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import numpy
from pt import configs
import operator_benchmark as op_bench
import torch
import torch.ao.nn.qat as nnqat
from torch.ao.quantization import default_embedding_qat_qconfig
"""
Microbenchmarks for QAT Embedding + EmbeddingBag operators.
"""
class QATEmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):
def init(
self,
embeddingbags,
dim,
mode,
input_size,
offset,
sparse,
include_last_offset,
device,
):
qconfig = default_embedding_qat_qconfig
self.embedding = nnqat.EmbeddingBag(
num_embeddings=embeddingbags,
embedding_dim=dim,
mode=mode,
include_last_offset=include_last_offset,
sparse=sparse,
device=device,
qconfig=qconfig,
)
numpy.random.seed((1 << 32) - 1)
offsets = torch.LongTensor([offset], device=device)
input = torch.tensor(
numpy.random.randint(0, embeddingbags, input_size), device=device
).long()
self.inputs = {
"input": input,
"offset": torch.cat(
(offsets, torch.tensor([input.size(0)], dtype=torch.long)), 0
),
}
self.set_module_name("qatEmbeddingBag")
def forward(self, input, offset):
return self.embedding(input, offset)
# Currently, EmbeddingBag QAT does not support sparse embeddings.
embeddingbag_short_dense_configs = [
config
for config in configs.embeddingbag_short_configs
if {"sparse": True} not in config
]
op_bench.generate_pt_test(embeddingbag_short_dense_configs, QATEmbeddingBagBenchmark)
op_bench.generate_pt_gradient_test(
embeddingbag_short_dense_configs, QATEmbeddingBagBenchmark
)
class QATEmbeddingBenchmark(op_bench.TorchBenchmarkBase):
def init(self, num_embeddings, embedding_dim, input_size, device):
qconfig = default_embedding_qat_qconfig
self.embedding = nnqat.Embedding(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
qconfig=qconfig,
device=device,
)
self.embedding.qconfig = default_embedding_qat_qconfig
numpy.random.seed((1 << 32) - 1)
self.input = torch.tensor(
numpy.random.randint(0, num_embeddings, input_size), device=device
).long()
self.inputs = {"input": self.input}
self.set_module_name("qatEmbedding")
def forward(self, input):
return self.embedding(input)
op_bench.generate_pt_test(configs.embedding_short_configs, QATEmbeddingBenchmark)
op_bench.generate_pt_gradient_test(
configs.embedding_short_configs, QATEmbeddingBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|
python
|
github
|
https://github.com/pytorch/pytorch
|
benchmarks/operator_benchmark/pt/qatembedding_ops_test.py
|
"""SPOJ Problem Set (classical)
2148. Candy III
Problem code: CANDY3
A class went to a school trip. And, as usually, all N kids have got their backpacks stuffed with candy. But soon quarrels started all over the place, as some of the kids had more candies than others. Soon, the teacher realized that he has to step in: "Everybody, listen! Put all the candies you have on this table here!"
Soon, there was quite a large heap of candies on the teacher's table. "Now, I will divide the candies into N equal heaps and everyone will get one of them." announced the teacher.
"Wait, is this really possible?" wondered some of the smarter kids.
Problem specification
You are given the number of candies each child brought. Find out whether the teacher can divide the candies into N exactly equal heaps. (For the purpose of this task, all candies are of the same type.)
Input specification
The first line of the input file contains an integer T specifying the number of test cases. Each test case is preceded by a blank line.
Each test case looks as follows: The first line contains N : the number of children. Each of the next N lines contains the number of candies one child brought.
Output specification
For each of the test cases output a single line with a single word "YES" if the candies can be distributed equally, or "NO" otherwise.
Example
Input:
2
5
5
2
7
3
8
6
7
11
2
7
3
4
Output:
YES
NO
"""
tests = int(input())
for i in range(tests):
skip = input()
kids = int(input())
candies = 0
for j in range(kids):
candies += int(input())
if candies % kids == 0:
print("YES")
else:
print("NO")
|
unknown
|
codeparrot/codeparrot-clean
| ||
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.gitlab import GitLabAPIWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"GitLabAPIWrapper": "langchain_community.utilities.gitlab"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GitLabAPIWrapper",
]
|
python
|
github
|
https://github.com/langchain-ai/langchain
|
libs/langchain/langchain_classic/utilities/gitlab.py
|
from __future__ import unicode_literals
import copy
import inspect
import warnings
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned,
ObjectDoesNotExist, ValidationError,
)
from django.db import (
DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connections,
router, transaction,
)
from django.db.models import signals
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import CASCADE, Collector
from django.db.models.fields import AutoField
from django.db.models.fields.related import (
ForeignObjectRel, ManyToOneRel, OneToOneField, lazy_related_operation,
resolve_relation,
)
from django.db.models.manager import Manager
from django.db.models.options import Options
from django.db.models.query import Q
from django.db.models.utils import make_model_tuple
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.functional import curry
from django.utils.six.moves import zip
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext_lazy as _
from django.utils.version import get_version
@python_2_unicode_compatible
class Deferred(object):
def __repr__(self):
return str('<Deferred field>')
def __str__(self):
return str('<Deferred field>')
DEFERRED = Deferred()
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
app_label = None
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
if not abstract:
raise RuntimeError(
"Model class %s.%s doesn't declare an explicit "
"app_label and isn't in an application in "
"INSTALLED_APPS." % (module, name)
)
else:
app_label = app_config.label
new_class.add_to_class('_meta', Options(meta, app_label))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
str('DoesNotExist'),
tuple(
x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
str('MultipleObjectsReturned'),
tuple(
x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (MultipleObjectsReturned,),
module,
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = chain(
new_class._meta.local_fields,
new_class._meta.local_many_to_many,
new_class._meta.private_fields
)
field_names = {f.name for f in new_fields}
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError(
"Abstract base class containing model fields not "
"permitted for proxy model '%s'." % name
)
else:
continue
if base is None:
base = parent
elif parent._meta.concrete_model is not base._meta.concrete_model:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, '_meta'):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField):
related = resolve_relation(new_class, field.remote_field.model)
parent_links[make_model_tuple(related)] = field
# Track fields inherited from base models.
inherited_attributes = set()
# Do the appropriate setup for any model parents.
for base in new_class.mro():
if base not in parents or not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
inherited_attributes |= set(base.__dict__.keys())
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
if not base._meta.abstract:
# Check for clashes between locally declared fields and those
# on the base classes.
for field in parent_fields:
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes with field of '
'the same name from base class %r.' % (
field.name,
name,
base.__name__,
)
)
else:
inherited_attributes.add(field.name)
# Concrete classes...
base = base._meta.concrete_model
base_key = make_model_tuple(base)
if base_key in parent_links:
field = parent_links[base_key]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(
base,
on_delete=CASCADE,
name=attr_name,
auto_created=True,
parent_link=True,
)
if attr_name in field_names:
raise FieldError(
"Auto-generated field '%s' in class %r for "
"parent_link to base class %r clashes with "
"declared field of the same name." % (
attr_name,
name,
base.__name__,
)
)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
base_parents = base._meta.parents.copy()
# Add fields from abstract base class if it wasn't overridden.
for field in parent_fields:
if (field.name not in field_names and
field.name not in new_class.__dict__ and
field.name not in inherited_attributes):
new_field = copy.deepcopy(field)
new_class.add_to_class(field.name, new_field)
# Replace parent links defined on this base by the new
# field. It will be appropriately resolved if required.
if field.one_to_one:
for parent, parent_link in base_parents.items():
if field == parent_link:
base_parents[parent] = new_field
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base_parents)
# Inherit private fields (like GenericForeignKey) from the parent
# class
for field in base._meta.private_fields:
if field.name in field_names:
if not base._meta.abstract:
raise FieldError(
'Local field %r in class %r clashes with field of '
'the same name from base class %r.' % (
field.name,
name,
base.__name__,
)
)
else:
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
def add_to_class(cls, name, value):
# We should call the contribute_to_class method only if it's bound
if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# Defer creating accessors on the foreign class until it has been
# created and registered. If remote_field is None, we're ordering
# with respect to a GenericForeignKey and don't know what the
# foreign class is - we'll add those accessors later in
# contribute_to_class().
if opts.order_with_respect_to.remote_field:
wrt = opts.order_with_respect_to
remote = wrt.remote_field.model
lazy_related_operation(make_foreign_order_accessors, cls, remote)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields))
get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower)
if get_absolute_url_override:
setattr(cls, 'get_absolute_url', get_absolute_url_override)
if not opts.managers or cls._requires_legacy_default_manager():
if any(f.name == 'objects' for f in opts.fields):
raise ValueError(
"Model %s must specify a custom Manager, because it has a "
"field named 'objects'." % cls.__name__
)
manager = Manager()
manager.auto_created = True
cls.add_to_class('objects', manager)
signals.class_prepared.send(sender=cls)
def _requires_legacy_default_manager(cls): # RemovedInDjango20Warning
opts = cls._meta
if opts.manager_inheritance_from_future:
return False
future_default_manager = opts.default_manager
# Step 1: Locate a manager that would have been promoted
# to default manager with the legacy system.
for manager in opts.managers:
originating_model = manager._originating_model
if (cls is originating_model or cls._meta.proxy or
originating_model._meta.abstract):
if manager is not cls._default_manager and not opts.default_manager_name:
warnings.warn(
"Managers from concrete parents will soon qualify as default "
"managers if they appear before any other managers in the "
"MRO. As a result, '{legacy_default_manager}' declared on "
"'{legacy_default_manager_model}' will no longer be the "
"default manager for '{model}' in favor of "
"'{future_default_manager}' declared on "
"'{future_default_manager_model}'. "
"You can redeclare '{legacy_default_manager}' on '{cls}' "
"to keep things the way they are or you can switch to the new "
"behavior right away by setting "
"`Meta.manager_inheritance_from_future` to `True`.".format(
cls=cls.__name__,
model=opts.label,
legacy_default_manager=manager.name,
legacy_default_manager_model=manager._originating_model._meta.label,
future_default_manager=future_default_manager.name,
future_default_manager_model=future_default_manager._originating_model._meta.label,
),
RemovedInDjango20Warning, 2
)
opts.default_manager_name = manager.name
opts._expire_cache()
break
# Step 2: Since there are managers but none of them qualified as
# default managers under the legacy system (meaning that there are
# managers from concrete parents that would be promoted under the
# new system), we need to create a new Manager instance for the
# 'objects' attribute as a deprecation shim.
else:
# If the "future" default manager was auto created there is no
# point warning the user since it's basically the same manager.
if not future_default_manager.auto_created:
warnings.warn(
"Managers from concrete parents will soon qualify as "
"default managers. As a result, the 'objects' manager "
"won't be created (or recreated) automatically "
"anymore on '{model}' and '{future_default_manager}' "
"declared on '{future_default_manager_model}' will be "
"promoted to default manager. You can declare "
"explicitly `objects = models.Manager()` on '{cls}' "
"to keep things the way they are or you can switch "
"to the new behavior right away by setting "
"`Meta.manager_inheritance_from_future` to `True`.".format(
cls=cls.__name__,
model=opts.label,
future_default_manager=future_default_manager.name,
future_default_manager_model=future_default_manager._originating_model._meta.label,
),
RemovedInDjango20Warning, 2
)
return True
@property
def _base_manager(cls):
return cls._meta.base_manager
@property
def _default_manager(cls):
return cls._meta.default_manager
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(six.with_metaclass(ModelBase)):
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(self._meta.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
if val is DEFERRED:
continue
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(self._meta.fields)
for val, field in zip(args, fields_iter):
if val is DEFERRED:
continue
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.remote_field, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# Virtual field
if field.attname not in kwargs and field.column is None:
continue
if kwargs:
if isinstance(field.remote_field, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
if rel_obj is not DEFERRED:
setattr(self, field.name, rel_obj)
else:
if val is not DEFERRED:
setattr(self, field.attname, val)
if kwargs:
for prop in list(kwargs):
try:
# Any remaining kwargs must correspond to properties or
# virtual fields.
if (isinstance(getattr(self.__class__, prop), property) or
self._meta.get_field(prop)):
if kwargs[prop] is not DEFERRED:
setattr(self, prop, kwargs[prop])
del kwargs[prop]
except (AttributeError, FieldDoesNotExist):
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
super(Model, self).__init__()
signals.post_init.send(sender=self.__class__, instance=self)
@classmethod
def from_db(cls, db, field_names, values):
if len(values) != len(cls._meta.concrete_fields):
values = list(values)
values.reverse()
values = [values.pop() if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields]
new = cls(*values)
new._state.adding = False
new._state.db = db
return new
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return force_str('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if six.PY2 and hasattr(self, '__unicode__'):
return force_text(self).encode('utf-8')
return str('%s object' % self.__class__.__name__)
def __eq__(self, other):
if not isinstance(other, Model):
return False
if self._meta.concrete_model != other._meta.concrete_model:
return False
my_pk = self._get_pk_val()
if my_pk is None:
return self is other
return my_pk == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provides pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
data[DJANGO_VERSION_PICKLE_KEY] = get_version()
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id,), data
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled model instance's Django version %s does not match "
"the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled model instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def get_deferred_fields(self):
"""
Returns a set containing names of deferred fields for this instance.
"""
return {
f.attname for f in self._meta.concrete_fields
if f.attname not in self.__dict__
}
def refresh_from_db(self, using=None, fields=None):
"""
Reloads field values from the database.
By default, the reloading happens from the database this instance was
loaded from, or by the read router if this instance wasn't loaded from
any database. The using parameter will override the default.
Fields can be used to specify which fields to reload. The fields
should be an iterable of field attnames. If fields is None, then
all non-deferred fields are reloaded.
When accessing deferred fields of an instance, the deferred loading
of the field will call this method.
"""
if fields is not None:
if len(fields) == 0:
return
if any(LOOKUP_SEP in f for f in fields):
raise ValueError(
'Found "%s" in fields argument. Relations and transforms '
'are not allowed in fields.' % LOOKUP_SEP)
db = using if using is not None else self._state.db
db_instance_qs = self.__class__._default_manager.using(db).filter(pk=self.pk)
# Use provided fields, if not set then reload all non-deferred fields.
deferred_fields = self.get_deferred_fields()
if fields is not None:
fields = list(fields)
db_instance_qs = db_instance_qs.only(*fields)
elif deferred_fields:
fields = [f.attname for f in self._meta.concrete_fields
if f.attname not in deferred_fields]
db_instance_qs = db_instance_qs.only(*fields)
db_instance = db_instance_qs.get()
non_loaded_fields = db_instance.get_deferred_fields()
for field in self._meta.concrete_fields:
if field.attname in non_loaded_fields:
# This field wasn't refreshed - skip ahead.
continue
setattr(self, field.attname, getattr(db_instance, field.attname))
# Throw away stale foreign key references.
if field.is_relation and field.get_cache_name() in self.__dict__:
rel_instance = getattr(self, field.get_cache_name())
local_val = getattr(db_instance, field.attname)
related_val = None if rel_instance is None else getattr(rel_instance, field.target_field.attname)
if local_val != related_val or (local_val is None and related_val is None):
del self.__dict__[field.get_cache_name()]
self._state.db = db_instance._state.db
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
# Ensure that a model instance without a PK hasn't been assigned to
# a ForeignKey or OneToOneField on this model. If the field is
# nullable, allowing the save() would result in silent data loss.
for field in self._meta.concrete_fields:
if field.is_relation:
# If the related field isn't cached, then an instance hasn't
# been assigned and there's no need to worry about this check.
try:
getattr(self, field.get_cache_name())
except AttributeError:
continue
obj = getattr(self, field.name, None)
# A pk may have been assigned manually to a model instance not
# saved to the database (or auto-generated in a case like
# UUIDField), but we allow the save to proceed and rely on the
# database to raise an IntegrityError if applicable. If
# constraints aren't supported by the database, there's the
# unavoidable risk of data corruption.
if obj and obj.pk is None:
# Remove the object from a related instance cache.
if not field.remote_field.multiple:
delattr(obj, field.remote_field.get_cache_name())
raise ValueError(
"save() prohibited to prevent data loss due to "
"unsaved related object '%s'." % field.name
)
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
deferred_fields = self.get_deferred_fields()
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and deferred_fields and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handles the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields)
with transaction.atomic(using=using, savepoint=False):
if not raw:
self._save_parents(cls, using, update_fields)
updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if not meta.auto_created:
signals.post_save.send(sender=origin, instance=self, created=(not updated),
update_fields=update_fields, raw=raw, using=using)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""
Saves all the parents of cls using values from self.
"""
meta = cls._meta
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (field and getattr(self, parent._meta.pk.attname) is None and
getattr(self, field.attname) is not None):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self._save_parents(cls=parent, using=using, update_fields=update_fields)
self._save_table(cls=parent, using=using, update_fields=update_fields)
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
cache_name = field.get_cache_name()
if hasattr(self, cache_name):
delattr(self, cache_name)
def _save_table(self, raw=False, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Updates or inserts the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks
if f.name in update_fields or f.attname in update_fields]
pk_val = self._get_pk_val(meta)
if pk_val is None:
pk_val = meta.pk.get_pk_value_on_save(self)
setattr(self, meta.pk.attname, pk_val)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)))
for f in non_pks]
forced_update = update_fields or force_update
updated = self._do_update(base_qs, using, pk_val, values, update_fields,
forced_update)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
filter_args = field.get_filter_kwargs_for_object(self)
order_value = cls._base_manager.using(using).filter(**filter_args).count()
self._order = order_value
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if not isinstance(f, AutoField)]
update_pk = bool(meta.has_auto_field and not pk_set)
result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
if update_pk:
setattr(self, meta.pk.attname, result)
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
This method will try to update the model. If the model was updated (in
the sense that an update query was done and a matching row was found
from the DB) the method will return True.
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
if filtered.exists():
# It may happen that the object is deleted from the DB right after
# this check, causing the subsequent UPDATE to return zero matching
# rows. The same result can occur in some rare cases when the
# database returns zero despite the UPDATE being executed
# successfully (a row is matched and updated). In order to
# distinguish these two cases, the object's existence in the
# database is again checked for if the UPDATE query returns 0.
return filtered._update(values) > 0 or filtered.exists()
else:
return False
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, update_pk, raw):
"""
Do an INSERT. If update_pk is defined then this method should return
the new pk for the model.
"""
return manager._insert([self], fields=fields, return_id=update_pk,
using=using, raw=raw)
def delete(self, using=None, keep_parents=False):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, (
"%s object can't be deleted because its %s attribute is set to None." %
(self._meta.object_name, self._meta.pk.attname)
)
collector = Collector(using=using)
collector.collect([self], keep_parents=keep_parents)
return collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = 'gt' if is_next else 'lt'
order = '' if is_next else '-'
param = force_text(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by(
'%s%s' % (order, field.name), '%spk' % order
)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = 'gt' if is_next else 'lt'
order = '_order' if is_next else '-_order'
order_field = self._meta.order_with_respect_to
filter_args = order_field.get_filter_kwargs_for_object(self)
obj = self.__class__._default_manager.filter(**filter_args).filter(**{
'_order__%s' % op: self.__class__._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, field):
if self.pk is None:
raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
return getattr(self, field.remote_field.get_related_field().attname)
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.get_parent_list():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.get_parent_list():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field_name, unique_for):
opts = self._meta
field = opts.get_field(field_name)
return ValidationError(
message=field.error_messages['unique_for_date'],
code='unique_for_date',
params={
'model': self,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'lookup_type': lookup_type,
'field': field_name,
'field_label': six.text_type(capfirst(field.verbose_name)),
'date_field': unique_for,
'date_field_label': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
}
)
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
'model': self,
'model_class': model_class,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'unique_check': unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params['field_label'] = six.text_type(capfirst(field.verbose_name))
return ValidationError(
message=field.error_messages['unique'],
code='unique',
params=params,
)
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
params['field_labels'] = six.text_type(get_text_list(field_labels, _('and')))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code='unique_together',
params=params,
)
def full_clean(self, exclude=None, validate_unique=True):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occurred.
"""
errors = {}
if exclude is None:
exclude = []
else:
exclude = list(exclude)
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing a dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
@classmethod
def check(cls, **kwargs):
errors = []
errors.extend(cls._check_swappable())
errors.extend(cls._check_model())
errors.extend(cls._check_managers(**kwargs))
if not cls._meta.swapped:
errors.extend(cls._check_fields(**kwargs))
errors.extend(cls._check_m2m_through_same_relationship())
errors.extend(cls._check_long_column_names())
clash_errors = cls._check_id_field() + cls._check_field_name_clashes()
errors.extend(clash_errors)
# If there are field name clashes, hide consequent column name
# clashes.
if not clash_errors:
errors.extend(cls._check_column_name_clashes())
errors.extend(cls._check_index_together())
errors.extend(cls._check_unique_together())
errors.extend(cls._check_ordering())
return errors
@classmethod
def _check_swappable(cls):
""" Check if the swapped model exists. """
errors = []
if cls._meta.swapped:
try:
apps.get_model(cls._meta.swapped)
except ValueError:
errors.append(
checks.Error(
"'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable,
id='models.E001',
)
)
except LookupError:
app_label, model_name = cls._meta.swapped.split('.')
errors.append(
checks.Error(
"'%s' references '%s.%s', which has not been "
"installed, or is abstract." % (
cls._meta.swappable, app_label, model_name
),
id='models.E002',
)
)
return errors
@classmethod
def _check_model(cls):
errors = []
if cls._meta.proxy:
if cls._meta.local_fields or cls._meta.local_many_to_many:
errors.append(
checks.Error(
"Proxy model '%s' contains model fields." % cls.__name__,
id='models.E017',
)
)
return errors
@classmethod
def _check_managers(cls, **kwargs):
""" Perform all manager checks. """
errors = []
for manager in cls._meta.managers:
errors.extend(manager.check(**kwargs))
return errors
@classmethod
def _check_fields(cls, **kwargs):
""" Perform all field checks. """
errors = []
for field in cls._meta.local_fields:
errors.extend(field.check(**kwargs))
for field in cls._meta.local_many_to_many:
errors.extend(field.check(from_model=cls, **kwargs))
return errors
@classmethod
def _check_m2m_through_same_relationship(cls):
""" Check if no relationship model is used by more than one m2m field.
"""
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))
# Skip when the relationship model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))
for f in fields:
signature = (f.remote_field.model, cls, f.remote_field.through)
if signature in seen_intermediary_signatures:
errors.append(
checks.Error(
"The model has two many-to-many relations through "
"the intermediate model '%s'." % f.remote_field.through._meta.label,
obj=cls,
id='models.E003',
)
)
else:
seen_intermediary_signatures.append(signature)
return errors
@classmethod
def _check_id_field(cls):
""" Check if `id` field is a primary key. """
fields = list(f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk)
# fields is empty or consists of the invalid "id" field
if fields and not fields[0].primary_key and cls._meta.pk.name == 'id':
return [
checks.Error(
"'id' can only be used as a field name if the field also "
"sets 'primary_key=True'.",
obj=cls,
id='models.E004',
)
]
else:
return []
@classmethod
def _check_field_name_clashes(cls):
""" Ref #17673. """
errors = []
used_fields = {} # name or attname -> field
# Check that multi-inheritance doesn't cause field name shadowing.
for parent in cls._meta.get_parent_list():
for f in parent._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
if clash:
errors.append(
checks.Error(
"The field '%s' from parent model "
"'%s' clashes with the field '%s' "
"from parent model '%s'." % (
clash.name, clash.model._meta,
f.name, f.model._meta
),
obj=cls,
id='models.E005',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
# Check that fields defined in the model don't clash with fields from
# parents, including auto-generated fields like multi-table inheritance
# child accessors.
for parent in cls._meta.get_parent_list():
for f in parent._meta.get_fields():
if f not in used_fields:
used_fields[f.name] = f
for f in cls._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
# Note that we may detect clash between user-defined non-unique
# field "id" and automatically added unique field "id", both
# defined at the same model. This special case is considered in
# _check_id_field and here we ignore it.
id_conflict = f.name == "id" and clash and clash.name == "id" and clash.model == cls
if clash and not id_conflict:
errors.append(
checks.Error(
"The field '%s' clashes with the field '%s' "
"from model '%s'." % (
f.name, clash.name, clash.model._meta
),
obj=f,
id='models.E006',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
return errors
@classmethod
def _check_column_name_clashes(cls):
# Store a list of column names which have already been used by other fields.
used_column_names = []
errors = []
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Ensure the column name is not already in use.
if column_name and column_name in used_column_names:
errors.append(
checks.Error(
"Field '%s' has column name '%s' that is used by "
"another field." % (f.name, column_name),
hint="Specify a 'db_column' for the field.",
obj=cls,
id='models.E007'
)
)
else:
used_column_names.append(column_name)
return errors
@classmethod
def _check_index_together(cls):
""" Check the value of "index_together" option. """
if not isinstance(cls._meta.index_together, (tuple, list)):
return [
checks.Error(
"'index_together' must be a list or tuple.",
obj=cls,
id='models.E008',
)
]
elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together):
return [
checks.Error(
"All 'index_together' elements must be lists or tuples.",
obj=cls,
id='models.E009',
)
]
else:
errors = []
for fields in cls._meta.index_together:
errors.extend(cls._check_local_fields(fields, "index_together"))
return errors
@classmethod
def _check_unique_together(cls):
""" Check the value of "unique_together" option. """
if not isinstance(cls._meta.unique_together, (tuple, list)):
return [
checks.Error(
"'unique_together' must be a list or tuple.",
obj=cls,
id='models.E010',
)
]
elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together):
return [
checks.Error(
"All 'unique_together' elements must be lists or tuples.",
obj=cls,
id='models.E011',
)
]
else:
errors = []
for fields in cls._meta.unique_together:
errors.extend(cls._check_local_fields(fields, "unique_together"))
return errors
@classmethod
def _check_local_fields(cls, fields, option):
from django.db import models
# In order to avoid hitting the relation tree prematurely, we use our
# own fields_map instead of using get_field()
forward_fields_map = {
field.name: field for field in cls._meta._get_fields(reverse=False)
}
errors = []
for field_name in fields:
try:
field = forward_fields_map[field_name]
except KeyError:
errors.append(
checks.Error(
"'%s' refers to the non-existent field '%s'." % (
option, field_name,
),
obj=cls,
id='models.E012',
)
)
else:
if isinstance(field.remote_field, models.ManyToManyRel):
errors.append(
checks.Error(
"'%s' refers to a ManyToManyField '%s', but "
"ManyToManyFields are not permitted in '%s'." % (
option, field_name, option,
),
obj=cls,
id='models.E013',
)
)
elif field not in cls._meta.local_fields:
errors.append(
checks.Error(
"'%s' refers to field '%s' which is not local to model '%s'."
% (option, field_name, cls._meta.object_name),
hint="This issue may be caused by multi-table inheritance.",
obj=cls,
id='models.E016',
)
)
return errors
@classmethod
def _check_ordering(cls):
""" Check "ordering" option -- is it a list of strings and do all fields
exist? """
if cls._meta._ordering_clash:
return [
checks.Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
obj=cls,
id='models.E021',
),
]
if cls._meta.order_with_respect_to or not cls._meta.ordering:
return []
if not isinstance(cls._meta.ordering, (list, tuple)):
return [
checks.Error(
"'ordering' must be a tuple or list (even if you want to order by only one field).",
obj=cls,
id='models.E014',
)
]
errors = []
fields = cls._meta.ordering
# Skip '?' fields.
fields = (f for f in fields if f != '?')
# Convert "-field" to "field".
fields = ((f[1:] if f.startswith('-') else f) for f in fields)
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
fields = (f for f in fields if '__' not in f)
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
fields = {f for f in fields if f != 'pk'}
# Check for invalid or non-existent fields in ordering.
invalid_fields = []
# Any field name that is not present in field_names does not exist.
# Also, ordering by m2m fields is not allowed.
opts = cls._meta
valid_fields = set(chain.from_iterable(
(f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),)
for f in chain(opts.fields, opts.related_objects)
))
invalid_fields.extend(fields - valid_fields)
for invalid_field in invalid_fields:
errors.append(
checks.Error(
"'ordering' refers to the non-existent field '%s'." % invalid_field,
obj=cls,
id='models.E015',
)
)
return errors
@classmethod
def _check_long_column_names(cls):
"""
Check that any auto-generated column names are shorter than the limits
for each database in which the model will be created.
"""
errors = []
allowed_len = None
db_alias = None
# Find the minimum max allowed length among all specified db_aliases.
for db in settings.DATABASES.keys():
# skip databases where the model won't be created
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
if allowed_len is None:
return errors
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Check if auto-generated name for the field is too long
# for the database.
if f.db_column is None and column_name is not None and len(column_name) > allowed_len:
errors.append(
checks.Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (column_name, allowed_len, db_alias),
hint="Set the column name manually using 'db_column'.",
obj=cls,
id='models.E018',
)
)
for f in cls._meta.local_many_to_many:
# Skip nonexistent models.
if isinstance(f.remote_field.through, six.string_types):
continue
# Check if auto-generated name for the M2M field is too long
# for the database.
for m2m in f.remote_field.through._meta.local_fields:
_, rel_name = m2m.get_attname_column()
if m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len:
errors.append(
checks.Error(
'Autogenerated column name too long for M2M field '
'"%s". Maximum length is "%s" for database "%s".'
% (rel_name, allowed_len, db_alias),
hint=(
"Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'."
),
obj=cls,
id='models.E019',
)
)
return errors
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
with transaction.atomic(using=using, savepoint=False):
for i, j in enumerate(id_list):
ordered_obj.objects.filter(pk=j, **filter_args).update(_order=i)
def method_get_order(ordered_obj, self):
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
pk_name = ordered_obj._meta.pk.name
return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True)
def make_foreign_order_accessors(model, related_model):
setattr(
related_model,
'get_%s_order' % model.__name__.lower(),
curry(method_get_order, model)
)
setattr(
related_model,
'set_%s_order' % model.__name__.lower(),
curry(method_set_order, model)
)
########
# MISC #
########
def model_unpickle(model_id):
"""
Used to unpickle Model subclasses with deferred fields.
"""
if isinstance(model_id, tuple):
model = apps.get_model(*model_id)
else:
# Backwards compat - the model was cached directly in earlier versions.
model = model_id
return model.__new__(model)
model_unpickle.__safe_for_unpickle__ = True
def unpickle_inner_exception(klass, exception_name):
# Get the exception class from the class it is attached to:
exception = getattr(klass, exception_name)
return exception.__new__(exception)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.docs.integration.jms.jmstxparticipation;
import jakarta.jms.ConnectionFactory;
import jakarta.jms.Destination;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.docs.integration.jms.jmsreceivingasync.ExampleListener;
import org.springframework.jms.listener.DefaultMessageListenerContainer;
import org.springframework.transaction.jta.JtaTransactionManager;
@Configuration
public class ExternalTxJmsConfiguration {
// tag::transactionManagerSnippet[]
@Bean
JtaTransactionManager transactionManager() {
return new JtaTransactionManager();
}
// end::transactionManagerSnippet[]
// tag::jmsContainerSnippet[]
@Bean
DefaultMessageListenerContainer jmsContainer(ConnectionFactory connectionFactory, Destination destination,
ExampleListener messageListener) {
DefaultMessageListenerContainer jmsContainer = new DefaultMessageListenerContainer();
jmsContainer.setConnectionFactory(connectionFactory);
jmsContainer.setDestination(destination);
jmsContainer.setMessageListener(messageListener);
jmsContainer.setSessionTransacted(true);
return jmsContainer;
}
// end::jmsContainerSnippet[]
}
|
java
|
github
|
https://github.com/spring-projects/spring-framework
|
framework-docs/src/main/java/org/springframework/docs/integration/jms/jmstxparticipation/ExternalTxJmsConfiguration.java
|
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_BASE_INTERNAL_HIDE_PTR_H_
#define ABSL_BASE_INTERNAL_HIDE_PTR_H_
#include <cstdint>
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
// Arbitrary value with high bits set. Xor'ing with it is unlikely
// to map one valid pointer to another valid pointer.
constexpr uintptr_t HideMask() {
return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU;
}
// Hide a pointer from the leak checker. For internal use only.
// Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr
// and all objects reachable from ptr to be ignored by the leak checker.
template <class T>
inline uintptr_t HidePtr(T* ptr) {
return reinterpret_cast<uintptr_t>(ptr) ^ HideMask();
}
// Return a pointer that has been hidden from the leak checker.
// For internal use only.
template <class T>
inline T* UnhidePtr(uintptr_t hidden) {
return reinterpret_cast<T*>(hidden ^ HideMask());
}
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_
|
c
|
github
|
https://github.com/mysql/mysql-server
|
extra/abseil/abseil-cpp-20230802.1/absl/base/internal/hide_ptr.h
|
<?php
$container->loadFromExtension('framework', [
'workflows' => [
'my_workflow' => [
'type' => 'workflow',
'places' => [
'first',
'last',
],
'transitions' => [
'go' => [
'from' => [
'first',
],
'to' => [
'last',
],
],
],
],
],
]);
|
php
|
github
|
https://github.com/symfony/symfony
|
src/Symfony/Bundle/FrameworkBundle/Tests/DependencyInjection/Fixtures/php/workflow_without_support_and_support_strategy.php
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2008 Gautier Hayoun <gautier.hayoun@itaapy.com>
# Copyright (C) 2008 Juan David Ibáñez Palomar <jdavid@itaapy.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from itools
from itools.datatypes import MultiLinesTokens, String, URI, Email, Tokens
from itools.handlers import ConfigFile
class SetupConf(ConfigFile):
schema = {
'name': String,
'title': String,
'url': String,
'author_name': String,
'author_email': Email,
'license': String,
'description': String,
'classifiers': MultiLinesTokens(default=()),
'packages': Tokens,
'requires': Tokens,
'provides': Tokens,
'scripts': Tokens,
'source_language': String,
'target_languages': Tokens,
'repository': URI,
'username': String}
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Authors:
# Rob Crittenden <rcritten@redhat.com>
# Pavel Zuna <pzuna@redhat.com>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the `ipalib/plugins/permission.py` module with old API.
This ensures basic backwards compatibility for code before
http://www.freeipa.org/page/V3/Permissions_V2
"""
from ipalib import api, errors
from ipatests.test_xmlrpc import objectclasses
from ipatests.test_xmlrpc.xmlrpc_test import (Declarative, fuzzy_digits,
fuzzy_uuid)
from ipapython.dn import DN
permission1 = u'testperm'
permission1_dn = DN(('cn',permission1),
api.env.container_permission,api.env.basedn)
permission1_renamed = u'testperm1_rn'
permission1_renamed_dn = DN(('cn',permission1_renamed),
api.env.container_permission,api.env.basedn)
permission1_renamed_ucase = u'Testperm_RN'
permission1_renamed_ucase_dn = DN(('cn',permission1_renamed_ucase),
api.env.container_permission,api.env.basedn)
permission2 = u'testperm2'
permission2_dn = DN(('cn',permission2),
api.env.container_permission,api.env.basedn)
permission3 = u'testperm3'
permission3_dn = DN(('cn',permission3),
api.env.container_permission,api.env.basedn)
permission3_attributelevelrights = {
'member': u'rscwo',
'seealso': u'rscwo',
'ipapermissiontype': u'rscwo',
'cn': u'rscwo',
'businesscategory': u'rscwo',
'objectclass': u'rscwo',
'memberof': u'rscwo',
'aci': u'rscwo',
'o': u'rscwo',
'owner': u'rscwo',
'ou': u'rscwo',
'targetgroup': u'rscwo',
'type': u'rscwo',
'nsaccountlock': u'rscwo',
'description': u'rscwo',
'attrs': u'rscwo',
'ipapermincludedattr': u'rscwo',
'ipapermbindruletype': u'rscwo',
'ipapermdefaultattr': u'rscwo',
'ipapermexcludedattr': u'rscwo',
'ipapermlocation': u'rscwo',
'ipapermright': u'rscwo',
'ipapermtarget': u'rscwo',
'ipapermtargetfilter': u'rscwo',
'ipapermtargetto': u'rscwo',
'ipapermtargetfrom': u'rscwo',
}
privilege1 = u'testpriv1'
privilege1_dn = DN(('cn',privilege1),
api.env.container_privilege,api.env.basedn)
invalid_permission1 = u'bad;perm'
users_dn = DN(api.env.container_user, api.env.basedn)
groups_dn = DN(api.env.container_group, api.env.basedn)
hbac_dn = DN(api.env.container_hbac, api.env.basedn)
class test_old_permission(Declarative):
default_version = u'2.65'
cleanup_commands = [
('permission_del', [permission1], {}),
('permission_del', [permission2], {}),
('permission_del', [permission3], {}),
('privilege_del', [privilege1], {}),
]
tests = [
dict(
desc='Try to retrieve non-existent %r' % permission1,
command=('permission_show', [permission1], {}),
expected=errors.NotFound(
reason=u'%s: permission not found' % permission1),
),
dict(
desc='Try to update non-existent %r' % permission1,
command=('permission_mod', [permission1], dict(permissions=u'all')),
expected=errors.NotFound(
reason=u'%s: permission not found' % permission1),
),
dict(
desc='Try to delete non-existent %r' % permission1,
command=('permission_del', [permission1], {}),
expected=errors.NotFound(
reason=u'%s: permission not found' % permission1),
),
dict(
desc='Search for non-existent %r' % permission1,
command=('permission_find', [permission1], {}),
expected=dict(
count=0,
truncated=False,
summary=u'0 permissions matched',
result=[],
),
),
dict(
desc='Create %r' % permission1,
command=(
'permission_add', [permission1], dict(
type=u'user',
permissions=u'write',
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=u'user',
permissions=[u'write'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'V2', u'SYSTEM'],
subtree=u'ldap:///%s' % users_dn,
),
),
),
dict(
desc='Try to create duplicate %r' % permission1,
command=(
'permission_add', [permission1], dict(
type=u'user',
permissions=u'write',
),
),
expected=errors.DuplicateEntry(
message='permission with name "%s" already exists' % permission1),
),
dict(
desc='Create %r' % privilege1,
command=('privilege_add', [privilege1],
dict(description=u'privilege desc. 1')
),
expected=dict(
value=privilege1,
summary=u'Added privilege "%s"' % privilege1,
result=dict(
dn=privilege1_dn,
cn=[privilege1],
description=[u'privilege desc. 1'],
objectclass=objectclasses.privilege,
),
),
),
dict(
desc='Add permission %r to privilege %r' % (permission1, privilege1),
command=('privilege_add_permission', [privilege1],
dict(permission=permission1)
),
expected=dict(
completed=1,
failed=dict(
member=dict(
permission=[],
),
),
result={
'dn': privilege1_dn,
'cn': [privilege1],
'description': [u'privilege desc. 1'],
'memberof_permission': [permission1],
'objectclass': objectclasses.privilege,
}
),
),
dict(
desc='Retrieve %r' % permission1,
command=('permission_show', [permission1], {}),
expected=dict(
value=permission1,
summary=None,
result={
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': u'user',
'permissions': [u'write'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'V2', u'SYSTEM'],
'subtree': u'ldap:///%s' % users_dn,
},
),
),
dict(
desc='Retrieve %r with --raw' % permission1,
command=('permission_show', [permission1], {'raw' : True}),
expected=dict(
value=permission1,
summary=None,
result={
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member': [privilege1_dn],
'aci': [u'(targetfilter = "(objectclass=posixaccount)")'+
u'(version 3.0;acl "permission:testperm";' +
u'allow (write) ' +
u'groupdn = "ldap:///%s";)' % DN(
('cn', 'testperm'), ('cn', 'permissions'),
('cn', 'pbac'), api.env.basedn)],
'ipapermright': [u'write'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'V2', u'SYSTEM'],
'ipapermtargetfilter': [u'(objectclass=posixaccount)'],
'ipapermlocation': [users_dn],
},
),
),
dict(
desc='Search for %r' % permission1,
command=('permission_find', [permission1], {}),
expected=dict(
count=1,
truncated=False,
summary=u'1 permission matched',
result=[
{
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': u'user',
'permissions': [u'write'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'V2', u'SYSTEM'],
'subtree': u'ldap:///%s' % users_dn,
},
],
),
),
dict(
desc='Search for %r using --name' % permission1,
command=('permission_find', [], {'cn': permission1}),
expected=dict(
count=1,
truncated=False,
summary=u'1 permission matched',
result=[
{
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': u'user',
'permissions': [u'write'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'V2', u'SYSTEM'],
'subtree': u'ldap:///%s' % users_dn,
},
],
),
),
dict(
desc='Search for non-existence permission using --name',
command=('permission_find', [], {'cn': u'notfound'}),
expected=dict(
count=0,
truncated=False,
summary=u'0 permissions matched',
result=[],
),
),
dict(
desc='Search for %r' % privilege1,
command=('permission_find', [privilege1], {}),
expected=dict(
count=1,
truncated=False,
summary=u'1 permission matched',
result=[
{
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': u'user',
'permissions': [u'write'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'V2', u'SYSTEM'],
'subtree': u'ldap:///%s' % users_dn,
},
],
),
),
dict(
desc='Search for %r with --raw' % permission1,
command=('permission_find', [permission1], {'raw' : True}),
expected=dict(
count=1,
truncated=False,
summary=u'1 permission matched',
result=[
{
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member': [privilege1_dn],
'aci': [u'(targetfilter = "(objectclass=posixaccount)")(version 3.0;acl "permission:testperm";allow (write) groupdn = "ldap:///%s";)' % \
DN(('cn', 'testperm'), ('cn', 'permissions'), ('cn', 'pbac'), api.env.basedn)],
'ipapermright': [u'write'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'V2', u'SYSTEM'],
'ipapermtargetfilter': [u'(objectclass=posixaccount)'],
'ipapermlocation': [users_dn],
},
],
),
),
dict(
desc='Create %r' % permission2,
command=(
'permission_add', [permission2], dict(
type=u'user',
permissions=u'write',
setattr=u'owner=cn=test',
addattr=u'owner=cn=test2',
)
),
expected=dict(
value=permission2,
summary=u'Added permission "%s"' % permission2,
result=dict(
dn=permission2_dn,
cn=[permission2],
objectclass=objectclasses.permission,
type=u'user',
permissions=[u'write'],
owner=[u'cn=test', u'cn=test2'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'V2', u'SYSTEM'],
subtree=u'ldap:///%s' % users_dn,
),
),
),
dict(
desc='Search for %r' % permission1,
command=('permission_find', [permission1], {}),
expected=dict(
count=2,
truncated=False,
summary=u'2 permissions matched',
result=[
{
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': u'user',
'permissions': [u'write'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'V2', u'SYSTEM'],
'subtree': u'ldap:///%s' % users_dn,
},
{
'dn': permission2_dn,
'cn': [permission2],
'objectclass': objectclasses.permission,
'type': u'user',
'permissions': [u'write'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'V2', u'SYSTEM'],
'subtree': u'ldap:///%s' % users_dn,
},
],
),
),
dict(
desc='Search for %r with --pkey-only' % permission1,
command=('permission_find', [permission1], {'pkey_only' : True}),
expected=dict(
count=2,
truncated=False,
summary=u'2 permissions matched',
result=[
{
'dn': permission1_dn,
'cn': [permission1],
},
{
'dn': permission2_dn,
'cn': [permission2],
},
],
),
),
dict(
desc='Search by ACI attribute with --pkey-only',
command=('permission_find', [], {'pkey_only': True,
'attrs': [u'krbminpwdlife']}),
expected=dict(
count=2,
truncated=False,
summary=u'2 permissions matched',
result=[
{
'dn': DN(('cn', 'System: Modify Group Password Policy'),
api.env.container_permission, api.env.basedn),
'cn': [u'System: Modify Group Password Policy'],
},
{
'dn': DN(('cn', 'System: Read Group Password Policy'),
api.env.container_permission, api.env.basedn),
'cn': [u'System: Read Group Password Policy'],
},
],
),
),
dict(
desc='Search for %r' % privilege1,
command=('privilege_find', [privilege1], {}),
expected=dict(
count=1,
truncated=False,
summary=u'1 privilege matched',
result=[
{
'dn': privilege1_dn,
'cn': [privilege1],
'description': [u'privilege desc. 1'],
'memberof_permission': [permission1],
},
],
),
),
dict(
desc='Search for %r with a limit of 1 (truncated)' % permission1,
command=('permission_find', [permission1], dict(sizelimit=1)),
expected=dict(
count=1,
truncated=True,
summary=u'1 permission matched',
result=[
{
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': u'user',
'permissions': [u'write'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'V2', u'SYSTEM'],
'subtree': u'ldap:///%s' % users_dn,
},
],
),
),
dict(
desc='Search for %r with a limit of 2' % permission1,
command=('permission_find', [permission1], dict(sizelimit=2)),
expected=dict(
count=2,
truncated=False,
summary=u'2 permissions matched',
result=[
{
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': u'user',
'permissions': [u'write'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'V2', u'SYSTEM'],
'subtree': u'ldap:///%s' % users_dn,
},
{
'dn': permission2_dn,
'cn': [permission2],
'objectclass': objectclasses.permission,
'type': u'user',
'permissions': [u'write'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'V2', u'SYSTEM'],
'subtree': u'ldap:///%s' % users_dn,
},
],
),
),
# This tests setting truncated to True in the post_callback of
# permission_find(). The return order in LDAP is not guaranteed
# so do not check the actual entry.
dict(
desc='Search for permissions by attr with a limit of 1 (truncated)',
command=('permission_find', [u'Modify'],
dict(attrs=u'ipaenabledflag', sizelimit=1)),
expected=dict(
count=1,
truncated=True,
summary=u'1 permission matched',
result=[lambda res:
DN(res['dn']).endswith(DN(api.env.container_permission,
api.env.basedn)) and
'ipapermission' in res['objectclass']],
),
),
dict(
desc='Update %r' % permission1,
command=(
'permission_mod', [permission1], dict(
permissions=u'read',
memberof=u'ipausers',
setattr=u'owner=cn=other-test',
addattr=u'owner=cn=other-test2',
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
member_privilege=[privilege1],
type=u'user',
permissions=[u'read'],
memberof=u'ipausers',
owner=[u'cn=other-test', u'cn=other-test2'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'V2', u'SYSTEM'],
subtree=u'ldap:///%s' % users_dn,
),
),
),
dict(
desc='Retrieve %r to verify update' % permission1,
command=('permission_show', [permission1], {}),
expected=dict(
value=permission1,
summary=None,
result={
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': u'user',
'permissions': [u'read'],
'memberof': u'ipausers',
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'V2', u'SYSTEM'],
'subtree': u'ldap:///%s' % users_dn,
},
),
),
dict(
desc='Try to rename %r to existing permission %r' % (permission1,
permission2),
command=(
'permission_mod', [permission1], dict(rename=permission2,
permissions=u'all',)
),
expected=errors.DuplicateEntry(),
),
dict(
desc='Try to rename %r to empty name' % (permission1),
command=(
'permission_mod', [permission1], dict(rename=u'',
permissions=u'all',)
),
expected=errors.ValidationError(name='rename',
error=u'New name can not be empty'),
),
dict(
desc='Check integrity of original permission %r' % permission1,
command=('permission_show', [permission1], {}),
expected=dict(
value=permission1,
summary=None,
result={
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': u'user',
'permissions': [u'read'],
'memberof': u'ipausers',
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'V2', u'SYSTEM'],
'subtree': u'ldap:///%s' % users_dn,
},
),
),
dict(
desc='Rename %r to permission %r' % (permission1,
permission1_renamed),
command=(
'permission_mod', [permission1], dict(rename=permission1_renamed,
permissions= u'all',)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result={
'dn': permission1_renamed_dn,
'cn': [permission1_renamed],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': u'user',
'permissions': [u'all'],
'memberof': u'ipausers',
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'V2', u'SYSTEM'],
'subtree': u'ldap:///%s' % users_dn,
},
),
),
dict(
desc='Rename %r to permission %r' % (permission1_renamed,
permission1_renamed_ucase),
command=(
'permission_mod', [permission1_renamed], dict(rename=permission1_renamed_ucase,
permissions= u'write',)
),
expected=dict(
value=permission1_renamed,
summary=u'Modified permission "%s"' % permission1_renamed,
result={
'dn': permission1_renamed_ucase_dn,
'cn': [permission1_renamed_ucase],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': u'user',
'permissions': [u'write'],
'memberof': u'ipausers',
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'V2', u'SYSTEM'],
'subtree': u'ldap:///%s' % users_dn,
},
),
),
dict(
desc='Change %r to a subtree type' % permission1_renamed_ucase,
command=(
'permission_mod', [permission1_renamed_ucase],
dict(subtree=u'ldap:///%s' % DN(('cn', 'accounts'), api.env.basedn),
type=None)
),
expected=dict(
value=permission1_renamed_ucase,
summary=u'Modified permission "%s"' % permission1_renamed_ucase,
result=dict(
dn=permission1_renamed_ucase_dn,
cn=[permission1_renamed_ucase],
objectclass=objectclasses.permission,
member_privilege=[privilege1],
subtree=u'ldap:///%s' % DN(('cn', 'accounts'), api.env.basedn),
permissions=[u'write'],
memberof=u'ipausers',
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'V2', u'SYSTEM'],
),
),
),
dict(
desc='Search for %r using --subtree' % permission1,
command=('permission_find', [],
{'subtree': u'ldap:///%s' % DN(('cn', 'accounts'), api.env.basedn)}),
expected=dict(
count=1,
truncated=False,
summary=u'1 permission matched',
result=[
{
'dn':permission1_renamed_ucase_dn,
'cn':[permission1_renamed_ucase],
'objectclass': objectclasses.permission,
'member_privilege':[privilege1],
'subtree':u'ldap:///%s' % DN(('cn', 'accounts'), api.env.basedn),
'permissions':[u'write'],
'memberof':u'ipausers',
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'V2', u'SYSTEM'],
},
],
),
),
dict(
desc='Search using nonexistent --subtree',
command=('permission_find', [], {'subtree': u'ldap:///foo=bar'}),
expected=dict(
count=0,
truncated=False,
summary=u'0 permissions matched',
result=[],
),
),
dict(
desc='Search using --targetgroup',
command=('permission_find', [], {'targetgroup': u'ipausers'}),
expected=dict(
count=1,
truncated=False,
summary=u'1 permission matched',
result=[
{
'dn': DN(('cn', 'System: Add User to default group'),
api.env.container_permission, api.env.basedn),
'cn': [u'System: Add User to default group'],
'objectclass': objectclasses.permission,
'member_privilege': [u'User Administrators'],
'attrs': [u'member'],
'targetgroup': u'ipausers',
'memberindirect_role': [u'User Administrator'],
'permissions': [u'write'],
'ipapermbindruletype': [u'permission'],
'ipapermtarget': [DN('cn=ipausers', groups_dn)],
'subtree': u'ldap:///%s' % groups_dn,
'ipapermdefaultattr': [u'member'],
'ipapermissiontype': [u'V2', u'MANAGED', u'SYSTEM'],
}
],
),
),
dict(
desc='Delete %r' % permission1_renamed_ucase,
command=('permission_del', [permission1_renamed_ucase], {}),
expected=dict(
result=dict(failed=u''),
value=permission1_renamed_ucase,
summary=u'Deleted permission "%s"' % permission1_renamed_ucase,
)
),
dict(
desc='Try to delete non-existent %r' % permission1,
command=('permission_del', [permission1], {}),
expected=errors.NotFound(
reason=u'%s: permission not found' % permission1),
),
dict(
desc='Try to retrieve non-existent %r' % permission1,
command=('permission_show', [permission1], {}),
expected=errors.NotFound(
reason=u'%s: permission not found' % permission1),
),
dict(
desc='Try to update non-existent %r' % permission1,
command=('permission_mod', [permission1], dict(rename=u'Foo')),
expected=errors.NotFound(
reason=u'%s: permission not found' % permission1),
),
dict(
desc='Delete %r' % permission2,
command=('permission_del', [permission2], {}),
expected=dict(
result=dict(failed=u''),
value=permission2,
summary=u'Deleted permission "%s"' % permission2,
)
),
dict(
desc='Search for %r' % permission1,
command=('permission_find', [permission1], {}),
expected=dict(
count=0,
truncated=False,
summary=u'0 permissions matched',
result=[],
),
),
dict(
desc='Delete %r' % privilege1,
command=('privilege_del', [privilege1], {}),
expected=dict(
result=dict(failed=u''),
value=privilege1,
summary=u'Deleted privilege "%s"' % privilege1,
)
),
dict(
desc='Try to create permission %r with non-existing memberof' % permission1,
command=(
'permission_add', [permission1], dict(
memberof=u'nonexisting',
permissions=u'write',
)
),
expected=errors.NotFound(reason=u'nonexisting: group not found'),
),
dict(
desc='Create memberof permission %r' % permission1,
command=(
'permission_add', [permission1], dict(
memberof=u'editors',
permissions=u'write',
type=u'user',
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
memberof=u'editors',
permissions=[u'write'],
type=u'user',
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'V2', u'SYSTEM'],
subtree=u'ldap:///%s' % users_dn,
),
),
),
dict(
desc='Try to update non-existent memberof of %r' % permission1,
command=('permission_mod', [permission1], dict(
memberof=u'nonexisting')),
expected=errors.NotFound(reason=u'nonexisting: group not found'),
),
dict(
desc='Update memberof permission %r' % permission1,
command=(
'permission_mod', [permission1], dict(
memberof=u'admins',
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
memberof=u'admins',
permissions=[u'write'],
type=u'user',
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'V2', u'SYSTEM'],
subtree=u'ldap:///%s' % users_dn,
),
),
),
dict(
desc='Unset memberof of permission %r' % permission1,
command=(
'permission_mod', [permission1], dict(
memberof=None,
)
),
expected=dict(
summary=u'Modified permission "%s"' % permission1,
value=permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
permissions=[u'write'],
type=u'user',
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'V2', u'SYSTEM'],
subtree=u'ldap:///%s' % users_dn,
),
),
),
dict(
desc='Delete %r' % permission1,
command=('permission_del', [permission1], {}),
expected=dict(
result=dict(failed=u''),
value=permission1,
summary=u'Deleted permission "%s"' % permission1,
)
),
dict(
desc='Create targetgroup permission %r' % permission1,
command=(
'permission_add', [permission1], dict(
targetgroup=u'editors',
permissions=u'write',
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
targetgroup=u'editors',
permissions=[u'write'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'V2', u'SYSTEM'],
ipapermtarget=[DN('cn=editors', groups_dn)],
subtree=u'ldap:///%s' % api.env.basedn,
),
),
),
dict(
desc='Try to create invalid %r' % invalid_permission1,
command=('permission_add', [invalid_permission1], dict(
type=u'user',
permissions=u'write',
)),
expected=errors.ValidationError(name='name',
error='May only contain letters, numbers, -, _, ., and space'),
),
dict(
desc='Create %r' % permission3,
command=(
'permission_add', [permission3], dict(
type=u'user',
permissions=u'write',
attrs=[u'cn']
)
),
expected=dict(
value=permission3,
summary=u'Added permission "%s"' % permission3,
result=dict(
dn=permission3_dn,
cn=[permission3],
objectclass=objectclasses.permission,
type=u'user',
permissions=[u'write'],
attrs=(u'cn',),
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'V2', u'SYSTEM'],
subtree=u'ldap:///%s' % users_dn,
),
),
),
dict(
desc='Retrieve %r with --all --rights' % permission3,
command=('permission_show', [permission3], {'all' : True, 'rights' : True}),
expected=dict(
value=permission3,
summary=None,
result=dict(
dn=permission3_dn,
cn=[permission3],
objectclass=objectclasses.permission,
type=u'user',
attrs=(u'cn',),
ipapermincludedattr=[u'cn'],
permissions=[u'write'],
attributelevelrights=permission3_attributelevelrights,
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'V2', u'SYSTEM'],
ipapermtargetfilter=[u'(objectclass=posixaccount)'],
subtree=u'ldap:///%s' % users_dn,
),
),
),
dict(
desc='Modify %r with --all -rights' % permission3,
command=('permission_mod', [permission3], {'all' : True, 'rights': True, 'attrs':[u'cn',u'uid']}),
expected=dict(
value=permission3,
summary=u'Modified permission "%s"' % permission3,
result=dict(
dn=permission3_dn,
cn=[permission3],
objectclass=objectclasses.permission,
type=u'user',
attrs=(u'cn',u'uid'),
ipapermincludedattr=[u'cn', u'uid'],
permissions=[u'write'],
attributelevelrights=permission3_attributelevelrights,
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'V2', u'SYSTEM'],
ipapermtargetfilter=[u'(objectclass=posixaccount)'],
subtree=u'ldap:///%s' % users_dn,
),
),
),
]
|
unknown
|
codeparrot/codeparrot-clean
| ||
set -e
for dir in ./src/src/mongo/db/modules/*; do
if test -f $dir/evergreen/fetch_images.sh; then
bash $dir/evergreen/fetch_images.sh
fi
done
|
unknown
|
github
|
https://github.com/mongodb/mongo
|
evergreen/fetch_module_images.sh
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import absolute_import
from __future__ import division
import os
from collections import namedtuple
from uuid import uuid4
import numpy as _np
import mxnet as mx
from mxnet import gluon, autograd, np, npx
from mxnet.test_utils import use_np, assert_almost_equal, check_gluon_hybridize_consistency, same, check_symbolic_backward
from common import assertRaises, setup_module, with_seed, teardown_module, \
xfail_when_nonstandard_decimal_separator
import random
from mxnet.base import MXNetError
from mxnet.gluon.data.vision import transforms
from mxnet import image
@with_seed()
@use_np
def test_to_tensor():
# 3D Input
data_in = np.random.uniform(0, 255, (300, 300, 3)).astype(dtype=np.uint8)
out_nd = transforms.ToTensor()(np.array(data_in, dtype='uint8'))
assert_almost_equal(out_nd.asnumpy(), np.transpose(
data_in.astype(dtype=np.float32) / 255.0, (2, 0, 1)))
# 4D Input
data_in = np.random.uniform(0, 255, (5, 300, 300, 3)).astype(dtype=np.uint8)
out_nd = transforms.ToTensor()(np.array(data_in, dtype='uint8'))
assert_almost_equal(out_nd.asnumpy(), np.transpose(
data_in.astype(dtype=np.float32) / 255.0, (0, 3, 1, 2)))
# Invalid Input
invalid_data_in = np.random.uniform(0, 255, (5, 5, 300, 300, 3)).astype(dtype=np.uint8)
transformer = transforms.ToTensor()
assertRaises(MXNetError, transformer, invalid_data_in)
# Bounds (0->0, 255->1)
data_in = np.zeros((10, 20, 3)).astype(dtype=np.uint8)
out_nd = transforms.ToTensor()(np.array(data_in, dtype='uint8'))
assert same(out_nd.asnumpy(), np.transpose(np.zeros(data_in.shape, dtype=np.float32), (2, 0, 1)))
data_in = np.full((10, 20, 3), 255).astype(dtype=np.uint8)
out_nd = transforms.ToTensor()(np.array(data_in, dtype='uint8'))
assert same(out_nd.asnumpy(), np.transpose(np.ones(data_in.shape, dtype=np.float32), (2, 0, 1)))
@with_seed()
@use_np
def test_normalize():
# 3D Input
data_in_3d = np.random.uniform(0, 1, (3, 300, 300))
out_nd_3d = transforms.Normalize(mean=(0, 1, 2), std=(3, 2, 1))(data_in_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
assert_almost_equal(data_expected_3d, out_nd_3d.asnumpy())
# 4D Input
data_in_4d = np.random.uniform(0, 1, (2, 3, 300, 300))
out_nd_4d = transforms.Normalize(mean=(0, 1, 2), std=(3, 2, 1))(data_in_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
assert_almost_equal(data_expected_4d, out_nd_4d.asnumpy())
# Invalid Input - Neither 3D or 4D input
invalid_data_in = np.random.uniform(0, 1, (5, 5, 3, 300, 300))
normalize_transformer = transforms.Normalize(mean=(0, 1, 2), std=(3, 2, 1))
assertRaises(MXNetError, normalize_transformer, invalid_data_in)
# Invalid Input - Channel neither 1 or 3
invalid_data_in = np.random.uniform(0, 1, (5, 4, 300, 300))
normalize_transformer = transforms.Normalize(mean=(0, 1, 2), std=(3, 2, 1))
assertRaises(MXNetError, normalize_transformer, invalid_data_in)
@with_seed()
@use_np
def test_resize():
def _test_resize_with_diff_type(dtype):
# test normal case
data_in = np.random.uniform(0, 255, (300, 200, 3)).astype(dtype)
out_nd = transforms.Resize(200)(data_in)
data_expected = mx.image.imresize(data_in, 200, 200, 1)
assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
# test 4D input
data_bath_in = np.random.uniform(0, 255, (3, 300, 200, 3)).astype(dtype)
out_batch_nd = transforms.Resize(200)(data_bath_in)
for i in range(len(out_batch_nd)):
assert_almost_equal(mx.image.imresize(data_bath_in[i], 200, 200, 1).asnumpy(),
out_batch_nd[i].asnumpy())
# test interp = 2
out_nd = transforms.Resize(200, interpolation=2)(data_in)
data_expected = mx.image.imresize(data_in, 200, 200, 2)
assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
# test height not equals to width
out_nd = transforms.Resize((200, 100))(data_in)
data_expected = mx.image.imresize(data_in, 200, 100, 1)
assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
# test keep_ratio
out_nd = transforms.Resize(150, keep_ratio=True)(data_in)
data_expected = mx.image.imresize(data_in, 150, 225, 1)
assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
# test size below zero
invalid_transform = transforms.Resize(-150, keep_ratio=True)
assertRaises(MXNetError, invalid_transform, data_in)
# test size more than 2:
invalid_transform = transforms.Resize((100, 100, 100), keep_ratio=True)
assertRaises(MXNetError, invalid_transform, data_in)
for dtype in ['uint8', 'float32', 'float64']:
_test_resize_with_diff_type(dtype)
@with_seed()
@use_np
def test_crop_resize():
def _test_crop_resize_with_diff_type(dtype):
# test normal case
data_in = np.arange(60).reshape((5, 4, 3)).astype(dtype)
out_nd = transforms.CropResize(0, 0, 3, 2)(data_in)
out_np = out_nd.asnumpy()
assert(out_np.sum() == 180)
assert((out_np[0:2,1,1].flatten() == [4, 16]).all())
# test 4D input
data_bath_in = np.arange(180).reshape((2, 6, 5, 3)).astype(dtype)
out_batch_nd = transforms.CropResize(1, 2, 3, 4)(data_bath_in)
out_batch_np = out_batch_nd.asnumpy()
assert(out_batch_np.sum() == 7524)
assert((out_batch_np[0:2,0:4,1,1].flatten() == [37, 52, 67, 82, 127, 142, 157, 172]).all())
# test normal case with resize
data_in = np.random.uniform(0, 255, (300, 200, 3)).astype(dtype)
out_nd = transforms.CropResize(0, 0, 100, 50, (25, 25), 1)(data_in)
data_expected = transforms.Resize(size=25, interpolation=1)(data_in[:50, :100, :3]) #nd.slice(data_in, (0, 0, 0), (50, 100, 3)))
assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
# test 4D input with resize
data_bath_in = np.random.uniform(0, 255, (3, 300, 200, 3)).astype(dtype)
out_batch_nd = transforms.CropResize(0, 0, 100, 50, (25, 25), 1)(data_bath_in)
for i in range(len(out_batch_nd)):
actual = transforms.Resize(size=25, interpolation=1)(data_bath_in[i][:50, :100, :3]).asnumpy() #(nd.slice(data_bath_in[i], (0, 0, 0), (50, 100, 3))).asnumpy()
expected = out_batch_nd[i].asnumpy()
assert_almost_equal(expected, actual)
# test with resize height and width should be greater than 0
transformer = transforms.CropResize(0, 0, 100, 50, (-25, 25), 1)
assertRaises(MXNetError, transformer, data_in)
# test height and width should be greater than 0
transformer = transforms.CropResize(0, 0, -100, -50)
assertRaises(MXNetError, transformer, data_in)
# test cropped area is bigger than input data
transformer = transforms.CropResize(150, 200, 200, 500)
assertRaises(MXNetError, transformer, data_in)
assertRaises(MXNetError, transformer, data_bath_in)
for dtype in ['uint8', 'float32', 'float64']:
_test_crop_resize_with_diff_type(dtype)
# test npx.image.crop backward
def test_crop_backward(test_nd_arr, TestCase):
a_np = test_nd_arr.asnumpy()
b_np = a_np[(slice(TestCase.y, TestCase.y + TestCase.height), slice(TestCase.x, TestCase.x + TestCase.width), slice(0, 3))]
data = mx.sym.Variable('data')
crop_sym = mx.sym.image.crop(data, TestCase.x, TestCase.y, TestCase.width, TestCase.height)
expected_in_grad = np.zeros_like(np.array(a_np))
expected_in_grad[(slice(TestCase.y, TestCase.y + TestCase.height), slice(TestCase.x, TestCase.x + TestCase.width), slice(0, 3))] = b_np
check_symbolic_backward(crop_sym, [a_np], [b_np], [expected_in_grad])
TestCase = namedtuple('TestCase', ['x', 'y', 'width', 'height'])
test_list = [TestCase(0, 0, 3, 3), TestCase(2, 1, 1, 2), TestCase(0, 1, 3, 2)]
for dtype in ['uint8', 'float32', 'float64']:
data_in = np.arange(60).reshape((5, 4, 3)).astype(dtype)
for test_case in test_list:
test_crop_backward(data_in, test_case)
@with_seed()
@use_np
def test_flip_left_right():
data_in = np.random.uniform(0, 255, (300, 300, 3)).astype(dtype=np.uint8)
flip_in = data_in[:, ::-1, :]
data_trans = npx.image.flip_left_right(np.array(data_in, dtype='uint8'))
assert_almost_equal(flip_in, data_trans.asnumpy())
@with_seed()
@use_np
def test_flip_top_bottom():
data_in = np.random.uniform(0, 255, (300, 300, 3)).astype(dtype=np.uint8)
flip_in = data_in[::-1, :, :]
data_trans = npx.image.flip_top_bottom(np.array(data_in, dtype='uint8'))
assert_almost_equal(flip_in, data_trans.asnumpy())
@with_seed()
@use_np
def test_transformer():
from mxnet.gluon.data.vision import transforms
transform = transforms.Compose([
transforms.Resize(300),
transforms.Resize(300, keep_ratio=True),
transforms.CenterCrop(256),
transforms.RandomCrop(256, pad=16),
transforms.RandomResizedCrop(224),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(0.1, 0.1, 0.1, 0.1),
transforms.RandomBrightness(0.1),
transforms.RandomContrast(0.1),
transforms.RandomSaturation(0.1),
transforms.RandomHue(0.1),
transforms.RandomLighting(0.1),
transforms.ToTensor(),
transforms.RandomRotation([-10., 10.]),
transforms.Normalize([0, 0, 0], [1, 1, 1])])
transform(mx.np.ones((245, 480, 3), dtype='uint8')).wait_to_read()
@with_seed()
@use_np
def test_random_crop():
x = mx.np.ones((245, 480, 3), dtype='uint8')
y = mx.npx.image.random_crop(x, width=100, height=100)
assert y.shape == (100, 100, 3)
@with_seed()
@use_np
def test_random_resize_crop():
x = mx.np.ones((245, 480, 3), dtype='uint8')
y = mx.npx.image.random_resized_crop(x, width=100, height=100)
assert y.shape == (100, 100, 3)
@with_seed()
@use_np
def test_hybrid_transformer():
from mxnet.gluon.data.vision import transforms
transform = transforms.HybridCompose([
transforms.Resize(300),
transforms.Resize(300, keep_ratio=True),
transforms.CenterCrop(256),
transforms.RandomCrop(256, pad=16),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(0.1, 0.1, 0.1, 0.1),
transforms.RandomBrightness(0.1),
transforms.RandomContrast(0.1),
transforms.RandomSaturation(0.1),
transforms.RandomHue(0.1),
transforms.RandomLighting(0.1),
transforms.ToTensor(),
transforms.Normalize([0, 0, 0], [1, 1, 1])])
transform(mx.np.ones((245, 480, 3), dtype='uint8')).wait_to_read()
@xfail_when_nonstandard_decimal_separator
@with_seed()
@use_np
def test_rotate():
transformer = transforms.Rotate(10.)
assertRaises(TypeError, transformer, mx.np.ones((3, 30, 60), dtype='uint8'))
single_image = mx.np.ones((3, 30, 60), dtype='float32')
single_output = transformer(single_image)
assert same(single_output.shape, (3, 30, 60))
batch_image = mx.np.ones((3, 3, 30, 60), dtype='float32')
batch_output = transformer(batch_image)
assert same(batch_output.shape, (3, 3, 30, 60))
input_image = np.array([[[0., 0., 0.],
[0., 0., 1.],
[0., 0., 0.]]])
rotation_angles_expected_outs = [
(90., np.array([[[0., 1., 0.],
[0., 0., 0.],
[0., 0., 0.]]])),
(180., np.array([[[0., 0., 0.],
[1., 0., 0.],
[0., 0., 0.]]])),
(270., np.array([[[0., 0., 0.],
[0., 0., 0.],
[0., 1., 0.]]])),
(360., np.array([[[0., 0., 0.],
[0., 0., 1.],
[0., 0., 0.]]])),
]
for rot_angle, expected_result in rotation_angles_expected_outs:
transformer = transforms.Rotate(rot_angle)
ans = transformer(input_image)
print(type(ans), ans, type(expected_result), expected_result)
assert_almost_equal(ans.asnumpy(), expected_result.asnumpy(), atol=1e-6)
@with_seed()
@use_np
def test_random_rotation():
# test exceptions for probability input outside of [0,1]
assertRaises(ValueError, transforms.RandomRotation, [-10, 10.], rotate_with_proba=1.1)
assertRaises(ValueError, transforms.RandomRotation, [-10, 10.], rotate_with_proba=-0.3)
# test `forward`
transformer = transforms.RandomRotation([-10, 10.])
assertRaises(TypeError, transformer, mx.np.ones((3, 30, 60), dtype='uint8'))
single_image = mx.np.ones((3, 30, 60), dtype='float32')
single_output = transformer(single_image)
assert same(single_output.shape, (3, 30, 60))
batch_image = mx.np.ones((3, 3, 30, 60), dtype='float32')
batch_output = transformer(batch_image)
assert same(batch_output.shape, (3, 3, 30, 60))
# test identity (rotate_with_proba = 0)
transformer = transforms.RandomRotation([-100., 100.], rotate_with_proba=0.0)
data = mx.np.random.normal(size=(3, 30, 60))
assert_almost_equal(data.asnumpy(), transformer(data).asnumpy())
@with_seed()
@use_np
def test_random_transforms():
from mxnet.gluon.data.vision import transforms
tmp_t = transforms.Compose([transforms.Resize(300), transforms.RandomResizedCrop(224)])
transform = transforms.Compose([transforms.RandomApply(tmp_t, 0.5)])
img = mx.np.ones((10, 10, 3), dtype='uint8')
iteration = 1000
num_apply = 0
for _ in range(iteration):
out = transform(img)
if out.shape[0] == 224:
num_apply += 1
assert_almost_equal(num_apply/float(iteration), 0.5, 0.1)
@xfail_when_nonstandard_decimal_separator
@with_seed()
@use_np
def test_random_gray():
from mxnet.gluon.data.vision import transforms
transform = transforms.RandomGray(0.5)
img = mx.np.ones((4, 4, 3), dtype='uint8')
pixel = img[0, 0, 0].asnumpy()
iteration = 1000
num_apply = 0
for _ in range(iteration):
out = transform(img)
if out[0][0][0].asnumpy() != pixel:
num_apply += 1
assert_almost_equal(num_apply/float(iteration), 0.5, 0.1)
transform = transforms.RandomGray(0.5)
transform.hybridize()
img = mx.np.ones((4, 4, 3), dtype='uint8')
pixel = img[0, 0, 0].asnumpy()
iteration = 1000
num_apply = 0
for _ in range(iteration):
out = transform(img)
if out[0][0][0].asnumpy() != pixel:
num_apply += 1
assert_almost_equal(num_apply/float(iteration), 0.5, 0.1)
@with_seed()
@use_np
def test_bbox_random_flip():
from mxnet.gluon.contrib.data.vision.transforms.bbox import ImageBboxRandomFlipLeftRight
transform = ImageBboxRandomFlipLeftRight(0.5)
iteration = 200
num_apply = 0
for _ in range(iteration):
img = mx.np.ones((10, 10, 3), dtype='uint8')
img[0, 0, 0] = 10
bbox = mx.np.array([[1, 2, 3, 4, 0]])
im_out, im_bbox = transform(img, bbox)
if im_bbox[0][0].asnumpy() != 1 and im_out[0, 0, 0].asnumpy() != 10:
num_apply += 1
assert_almost_equal(np.array([num_apply])/float(iteration), 0.5, 0.5)
@with_seed()
@use_np
def test_bbox_crop():
from mxnet.gluon.contrib.data.vision.transforms.bbox import ImageBboxCrop
transform = ImageBboxCrop((0, 0, 3, 3))
img = mx.np.ones((10, 10, 3), dtype='uint8')
bbox = mx.np.array([[0, 1, 3, 4, 0]])
im_out, im_bbox = transform(img, bbox)
assert im_out.shape == (3, 3, 3)
assert im_bbox[0][2] == 3
|
unknown
|
codeparrot/codeparrot-clean
| ||
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
//-----------------------------------------------------------------------------
//
// class IntAttribute
//
//-----------------------------------------------------------------------------
#include <ImfIntAttribute.h>
OPENEXR_IMF_INTERNAL_NAMESPACE_SOURCE_ENTER
template <>
const char *
IntAttribute::staticTypeName ()
{
return "int";
}
OPENEXR_IMF_INTERNAL_NAMESPACE_SOURCE_EXIT
|
cpp
|
github
|
https://github.com/opencv/opencv
|
3rdparty/openexr/IlmImf/ImfIntAttribute.cpp
|
#!/usr/bin/python
# walk vips and generate member definitions for all operators
# sample member definition:
# VImage VImage::invert( VOption *options )
# throw( VError )
# {
# VImage out;
#
# call( "invert",
# (options ? options : VImage::option())->
# set( "in", *this )->
# set( "out", &out ) );
#
# return( out );
# }
import sys
import re
import logging
#logging.basicConfig(level = logging.DEBUG)
from gi.repository import Vips, GObject
vips_type_image = GObject.GType.from_name("VipsImage")
vips_type_operation = GObject.GType.from_name("VipsOperation")
param_enum = GObject.GType.from_name("GParamEnum")
# turn a GType into a C++ type
gtype_to_cpp = {
"VipsImage" : "VImage",
"gint" : "int",
"gdouble" : "double",
"gboolean" : "bool",
"gchararray" : "char *",
"VipsArrayDouble" : "std::vector<double>",
"VipsArrayImage" : "std::vector<VImage>",
"VipsBlob" : "VipsBlob *"
}
def get_ctype(prop):
# enum params use the C name as their name
if GObject.type_is_a(param_enum, prop):
return prop.value_type.name
return gtype_to_cpp[prop.value_type.name]
def find_required(op):
required = []
for prop in op.props:
flags = op.get_argument_flags(prop.name)
if not flags & Vips.ArgumentFlags.REQUIRED:
continue
if flags & Vips.ArgumentFlags.DEPRECATED:
continue
required.append(prop)
def priority_sort(a, b):
pa = op.get_argument_priority(a.name)
pb = op.get_argument_priority(b.name)
return pa - pb
required.sort(priority_sort)
return required
# find the first input image ... this will be used as "this"
def find_first_input_image(op, required):
found = False
for prop in required:
flags = op.get_argument_flags(prop.name)
if not flags & Vips.ArgumentFlags.INPUT:
continue
if GObject.type_is_a(vips_type_image, prop.value_type):
found = True
break
if not found:
return None
return prop
# find the first output arg ... this will be used as the result
def find_first_output(op, required):
found = False
for prop in required:
flags = op.get_argument_flags(prop.name)
if not flags & Vips.ArgumentFlags.OUTPUT:
continue
found = True
break
if not found:
return None
return prop
# swap any "-" for "_"
def cppize(name):
return re.sub('-', '_', name)
def gen_arg_list(op, required):
first = True
for prop in required:
if not first:
print ',',
else:
first = False
print get_ctype(prop),
# output params are passed by reference
flags = op.get_argument_flags(prop.name)
if flags & Vips.ArgumentFlags.OUTPUT:
print '*',
print cppize(prop.name),
if not first:
print ',',
print 'VOption *options',
def gen_operation(cls):
op = Vips.Operation.new(cls.name)
gtype = Vips.type_find("VipsOperation", cls.name)
nickname = Vips.nickname_find(gtype)
all_required = find_required(op)
result = find_first_output(op, all_required)
this = find_first_input_image(op, all_required)
# shallow copy
required = all_required[:]
if result != None:
required.remove(result)
if this != None:
required.remove(this)
if result == None:
print 'void',
else:
print '%s' % gtype_to_cpp[result.value_type.name],
print 'VImage::%s(' % nickname,
gen_arg_list(op, required)
print ')'
print ' throw( VError )'
print '{'
if result != None:
print ' %s %s;' % (get_ctype(result), cppize(result.name))
print ''
print ' call( "%s"' % nickname,
first = True
for prop in all_required:
if first:
print ','
print ' (options ? options : VImage::option())',
first = False
print '->'
print ' ',
if prop == this:
print 'set( "%s", *this )' % prop.name,
else:
flags = op.get_argument_flags(prop.name)
arg = cppize(prop.name)
if flags & Vips.ArgumentFlags.OUTPUT and prop == result:
arg = '&' + arg
print 'set( "%s", %s )' % (prop.name, arg),
print ');'
if result != None:
print ''
print ' return( %s );' % cppize(result.name)
print '}'
print ''
# we have a few synonyms ... don't generate twice
generated = {}
def find_class_methods(cls):
if not cls.is_abstract():
gtype = Vips.type_find("VipsOperation", cls.name)
nickname = Vips.nickname_find(gtype)
if not nickname in generated:
gen_operation(cls)
generated[nickname] = True
if len(cls.children) > 0:
for child in cls.children:
find_class_methods(child)
if __name__ == '__main__':
find_class_methods(vips_type_operation)
|
unknown
|
codeparrot/codeparrot-clean
| ||
use ignore::gitignore::GitignoreBuilder;
const IGNORE_FILE: &'static str = "tests/gitignore_skip_bom.gitignore";
/// Skip a Byte-Order Mark (BOM) at the beginning of the file, matching Git's
/// behavior.
///
/// Ref: <https://github.com/BurntSushi/ripgrep/issues/2177>
#[test]
fn gitignore_skip_bom() {
let mut builder = GitignoreBuilder::new("ROOT");
let error = builder.add(IGNORE_FILE);
assert!(error.is_none(), "failed to open gitignore file");
let g = builder.build().unwrap();
assert!(g.matched("ignore/this/path", false).is_ignore());
}
|
rust
|
github
|
https://github.com/BurntSushi/ripgrep
|
crates/ignore/tests/gitignore_skip_bom.rs
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class FilterDatasetTest(test.TestCase):
def testFilterDataset(self):
components = (
np.arange(7, dtype=np.int64),
np.array([[1, 2, 3]], dtype=np.int64) * np.arange(
7, dtype=np.int64)[:, np.newaxis],
np.array(37.0, dtype=np.float64) * np.arange(7)
)
count = array_ops.placeholder(dtypes.int64, shape=[])
modulus = array_ops.placeholder(dtypes.int64)
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count)
.filter(lambda x, _y, _z: math_ops.equal(math_ops.mod(x, modulus), 0))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Test that we can dynamically feed a different modulus value for each
# iterator.
def do_test(count_val, modulus_val):
sess.run(init_op, feed_dict={count: count_val, modulus: modulus_val})
for _ in range(count_val):
for i in [x for x in range(7) if x**2 % modulus_val == 0]:
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
do_test(14, 2)
do_test(4, 18)
# Test an empty dataset.
do_test(0, 1)
def testFilterRange(self):
dataset = dataset_ops.Dataset.range(100).filter(
lambda x: math_ops.not_equal(math_ops.mod(x, 3), 2))
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
self.assertEqual(0, sess.run(get_next))
self.assertEqual(1, sess.run(get_next))
self.assertEqual(3, sess.run(get_next))
def testFilterDict(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x ** 2})
.filter(lambda d: math_ops.equal(d["bar"] % 2, 0))
.map(lambda d: d["foo"] + d["bar"])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
if (i ** 2) % 2 == 0:
self.assertEqual(i * 2 + i ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testUseStepContainerInFilter(self):
input_data = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64)
# Define a predicate that returns true for the first element of
# the sequence and not the second, and uses `tf.map_fn()`.
def _predicate(xs):
squared_xs = functional_ops.map_fn(lambda x: x * x, xs)
summed = math_ops.reduce_sum(squared_xs)
return math_ops.equal(summed, 1 + 4 + 9)
iterator = (
dataset_ops.Dataset.from_tensor_slices([[1, 2, 3], [4, 5, 6]])
.filter(_predicate)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual(input_data[0], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def assertSparseValuesEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1])), i
def _filter_fn(_, i):
return math_ops.equal(i % 2, 0)
iterator = (
dataset_ops.Dataset.range(10).map(_map_fn).filter(_filter_fn).map(
lambda x, i: x).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(5):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _map_fn(i * 2)[0])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
class FilterDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_filter_range_graph(self, div):
return dataset_ops.Dataset.range(100).filter(
lambda x: math_ops.not_equal(math_ops.mod(x, div), 2))
def testFilterCore(self):
div = 3
num_outputs = np.sum([x % 3 is not 2 for x in range(100)])
self.run_core_tests(lambda: self._build_filter_range_graph(div),
lambda: self._build_filter_range_graph(div * 2),
num_outputs)
def _build_filter_dict_graph(self):
return dataset_ops.Dataset.range(10).map(
lambda x: {"foo": x * 2, "bar": x ** 2}).filter(
lambda d: math_ops.equal(d["bar"] % 2, 0)).map(
lambda d: d["foo"] + d["bar"])
def testFilterDictCore(self):
num_outputs = np.sum([(x**2) % 2 == 0 for x in range(10)])
self.run_core_tests(self._build_filter_dict_graph, None, num_outputs)
def _build_sparse_filter(self):
def _map_fn(i):
return sparse_tensor.SparseTensor(
indices=[[0, 0]], values=(i * [1]), dense_shape=[1, 1]), i
def _filter_fn(_, i):
return math_ops.equal(i % 2, 0)
return dataset_ops.Dataset.range(10).map(_map_fn).filter(_filter_fn).map(
lambda x, i: x)
if __name__ == "__main__":
test.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import os.path, subprocess, sys
from build.project import Project
class AutotoolsProject(Project):
def __init__(self, url, alternative_url, md5, installed, configure_args=[],
autogen=False,
cppflags='',
ldflags='',
libs='',
shared=False,
install_prefix=None,
install_target='install',
use_destdir=False,
**kwargs):
Project.__init__(self, url, alternative_url, md5, installed, **kwargs)
self.configure_args = configure_args
self.autogen = autogen
self.cppflags = cppflags
self.ldflags = ldflags
self.libs = libs
self.shared = shared
self.install_prefix = install_prefix
self.install_target = install_target
self.use_destdir = use_destdir
def _filter_cflags(self, flags):
if self.shared:
# filter out certain flags which are only useful with
# static linking
for f in ('-fvisibility=hidden', '-fdata-sections', '-ffunction-sections'):
flags = flags.replace(' ' + f + ' ', ' ')
return flags
def configure(self, toolchain):
src = self.unpack(toolchain)
if self.autogen:
if sys.platform == 'darwin':
subprocess.check_call(['glibtoolize', '--force'], cwd=src)
else:
subprocess.check_call(['libtoolize', '--force'], cwd=src)
subprocess.check_call(['aclocal'], cwd=src)
subprocess.check_call(['automake', '--add-missing', '--force-missing', '--foreign'], cwd=src)
subprocess.check_call(['autoconf'], cwd=src)
build = self.make_build_path(toolchain)
install_prefix = self.install_prefix
if install_prefix is None:
install_prefix = toolchain.install_prefix
configure = [
os.path.join(src, 'configure'),
'CC=' + toolchain.cc,
'CXX=' + toolchain.cxx,
'CFLAGS=' + self._filter_cflags(toolchain.cflags),
'CXXFLAGS=' + self._filter_cflags(toolchain.cxxflags),
'CPPFLAGS=' + toolchain.cppflags + ' ' + self.cppflags,
'LDFLAGS=' + toolchain.ldflags + ' ' + self.ldflags,
'LIBS=' + toolchain.libs + ' ' + self.libs,
'AR=' + toolchain.ar,
'ARFLAGS=' + toolchain.arflags,
'RANLIB=' + toolchain.ranlib,
'STRIP=' + toolchain.strip,
'--host=' + toolchain.arch,
'--prefix=' + install_prefix,
'--enable-silent-rules',
] + self.configure_args
subprocess.check_call(configure, cwd=build, env=toolchain.env)
return build
def build(self, toolchain):
build = self.configure(toolchain)
destdir = []
if self.use_destdir:
destdir = ['DESTDIR=' + toolchain.install_prefix]
subprocess.check_call(['/usr/bin/make', '--quiet', '-j12'],
cwd=build, env=toolchain.env)
subprocess.check_call(['/usr/bin/make', '--quiet', self.install_target] + destdir,
cwd=build, env=toolchain.env)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
suffix = []
if self.connection.settings_dict['TEST_CHARSET']:
suffix.append('CHARACTER SET %s' % self.connection.settings_dict['TEST_CHARSET'])
if self.connection.settings_dict['TEST_COLLATION']:
suffix.append('COLLATE %s' % self.connection.settings_dict['TEST_COLLATION'])
return ' '.join(suffix)
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"All inline references are pending under MySQL"
return [], True
def sql_for_inline_many_to_many_references(self, model, field, style):
from django.db import models
opts = model._meta
qn = self.connection.ops.quote_name
table_output = [
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_column_name())),
style.SQL_COLTYPE(models.ForeignKey(model).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL')),
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_reverse_name())),
style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL'))
]
deferred = [
(field.m2m_db_table(), field.m2m_column_name(), opts.db_table,
opts.pk.column),
(field.m2m_db_table(), field.m2m_reverse_name(),
field.rel.to._meta.db_table, field.rel.to._meta.pk.column)
]
return table_output, deferred
|
unknown
|
codeparrot/codeparrot-clean
| ||
{
"KILL": {
"summary": "Terminates a function during execution.",
"complexity": "O(1)",
"group": "scripting",
"since": "7.0.0",
"arity": 2,
"container": "FUNCTION",
"function": "functionKillCommand",
"command_flags": [
"NOSCRIPT",
"ALLOW_BUSY"
],
"acl_categories": [
"SCRIPTING"
],
"command_tips": [
"REQUEST_POLICY:ALL_SHARDS",
"RESPONSE_POLICY:ONE_SUCCEEDED"
],
"reply_schema": {
"const": "OK"
}
}
}
|
json
|
github
|
https://github.com/redis/redis
|
src/commands/function-kill.json
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
import pyauto_functional # Must come before pyauto (and thus, policy_base).
import policy_base
sys.path.append('/usr/local') # Required to import autotest libs.
from autotest.cros import constants
from autotest.cros import cryptohome
class ChromeosEphemeral(policy_base.PolicyTestBase):
"""Tests a policy that makes users ephemeral.
When this policy is enabled, no persistent information in the form of
cryptohome shadow directories or local state prefs should be created for
users. Additionally, any persistent information previously accumulated should
be cleared when a user first logs in after enabling the policy."""
_usernames = ('alice@example.com', 'bob@example.com')
def _SetEphemeralUsersEnabled(self, enabled):
"""Sets the ephemeral users device policy.
The show_user_names policy is set to False to ensure that even if the local
state is not being automatically cleared, the login screen never shows user
pods. This is required by the Login browser automation call.
"""
self.SetDevicePolicy({'ephemeral_users_enabled': enabled,
'show_user_names': False})
def _DoesVaultDirectoryExist(self, user_index):
user_hash = cryptohome.get_user_hash(self._usernames[user_index])
return os.path.exists(os.path.join(constants.SHADOW_ROOT, user_hash))
def _AssertLocalStatePrefsSet(self, user_indexes):
expected = sorted([self._usernames[index] for index in user_indexes])
# The OAuthTokenStatus pref is populated asynchronously. Checking whether it
# is set would lead to an ugly race.
for pref in ['LoggedInUsers', 'UserImages', 'UserDisplayEmail', ]:
actual = sorted(self.GetLocalStatePrefsInfo().Prefs(pref))
self.assertEqual(actual, expected,
msg='Expected to find prefs in local state for users.')
def _AssertLocalStatePrefsEmpty(self):
for pref in ['LoggedInUsers',
'UserImages',
'UserDisplayEmail',
'OAuthTokenStatus']:
self.assertFalse(self.GetLocalStatePrefsInfo().Prefs(pref),
msg='Expected to not find prefs in local state for any user.')
def _AssertVaultDirectoryExists(self, user_index):
self.assertTrue(self._DoesVaultDirectoryExist(user_index=user_index),
msg='Expected vault shadow directory to exist.')
def _AssertVaultDirectoryDoesNotExist(self, user_index):
self.assertFalse(self._DoesVaultDirectoryExist(user_index=user_index),
msg='Expected vault shadow directory to not exist.')
def _AssertVaultMounted(self, user_index, ephemeral):
if ephemeral:
device_regex = constants.CRYPTOHOME_DEV_REGEX_REGULAR_USER_EPHEMERAL
fs_regex = constants.CRYPTOHOME_FS_REGEX_TMPFS
else:
device_regex = constants.CRYPTOHOME_DEV_REGEX_REGULAR_USER_SHADOW
fs_regex = constants.CRYPTOHOME_FS_REGEX_ANY
self.assertTrue(
cryptohome.is_vault_mounted(device_regex=device_regex,
fs_regex=fs_regex,
user=self._usernames[user_index],
allow_fail=True),
msg='Expected vault backed by %s to be mounted.' %
'tmpfs' if ephemeral else 'shadow directory')
def _AssertNoVaultMounted(self):
self.assertFalse(cryptohome.is_vault_mounted(allow_fail=True),
msg='Did not expect any vault to be mounted.')
def Login(self, user_index):
"""Convenience method to login to the usr at the given index."""
self.assertFalse(self.GetLoginInfo()['is_logged_in'],
msg='Expected to be logged out.')
policy_base.PolicyTestBase.Login(self,
self._usernames[user_index],
'dummy_password')
self.assertTrue(self.GetLoginInfo()['is_logged_in'],
msg='Expected to be logged in.')
def testEnablingBeforeSession(self):
"""Checks that a new session can be made ephemeral."""
self.PrepareToWaitForLoginFormReload()
self._SetEphemeralUsersEnabled(True)
self.WaitForLoginFormReload()
self.Login(user_index=0)
self._AssertLocalStatePrefsEmpty()
self._AssertVaultMounted(user_index=0, ephemeral=True)
self.Logout()
self._AssertLocalStatePrefsEmpty()
self._AssertNoVaultMounted()
self._AssertVaultDirectoryDoesNotExist(user_index=0)
def testEnablingDuringSession(self):
"""Checks that an existing non-ephemeral session is not made ephemeral."""
self.PrepareToWaitForLoginFormReload()
self._SetEphemeralUsersEnabled(False)
self.WaitForLoginFormReload()
self.Login(user_index=0)
self._AssertLocalStatePrefsSet(user_indexes=[0])
self._AssertVaultMounted(user_index=0, ephemeral=False)
self._SetEphemeralUsersEnabled(True)
self._AssertLocalStatePrefsSet(user_indexes=[0])
self._AssertVaultMounted(user_index=0, ephemeral=False)
self.Logout()
self._AssertLocalStatePrefsEmpty()
self._AssertNoVaultMounted()
self._AssertVaultDirectoryDoesNotExist(user_index=0)
def testDisablingDuringSession(self):
"""Checks that an existing ephemeral session is not made non-ephemeral."""
self.PrepareToWaitForLoginFormReload()
self._SetEphemeralUsersEnabled(True)
self.WaitForLoginFormReload()
self.Login(user_index=0)
self._AssertVaultMounted(user_index=0, ephemeral=True)
self._SetEphemeralUsersEnabled(False)
self._AssertVaultMounted(user_index=0, ephemeral=True)
self.Logout()
self._AssertLocalStatePrefsEmpty()
self._AssertNoVaultMounted()
self._AssertVaultDirectoryDoesNotExist(user_index=0)
def testEnablingEphemeralUsersCleansUp(self):
"""Checks that persistent information is cleared."""
self.PrepareToWaitForLoginFormReload()
self._SetEphemeralUsersEnabled(False)
self.WaitForLoginFormReload()
self.Login(user_index=0)
self.Logout()
self._AssertLocalStatePrefsSet(user_indexes=[0])
self.Login(user_index=1)
self.Logout()
self._AssertLocalStatePrefsSet(user_indexes=[0, 1])
self._AssertVaultDirectoryExists(user_index=0)
self._AssertVaultDirectoryExists(user_index=1)
self._SetEphemeralUsersEnabled(True)
self.Login(user_index=0)
self._AssertVaultMounted(user_index=0, ephemeral=True)
self.Logout()
self._AssertVaultDirectoryDoesNotExist(user_index=0)
self._AssertVaultDirectoryDoesNotExist(user_index=1)
if __name__ == '__main__':
pyauto_functional.Main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import gdata.gauth
import atom.http_core
import gdata.test_config as conf
PRIVATE_TEST_KEY = """
-----BEGIN PRIVATE KEY-----
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V
A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d
7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ
hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H
X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm
uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw
rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z
zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn
qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG
WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno
cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+
3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8
AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54
Lw03eHTNQghS0A==
-----END PRIVATE KEY-----"""
class AuthSubTest(unittest.TestCase):
def test_generate_request_url(self):
url = gdata.gauth.generate_auth_sub_url('http://example.com',
['http://example.net/scope1'])
self.assertTrue(isinstance(url, atom.http_core.Uri))
self.assertEqual(url.query['secure'], '0')
self.assertEqual(url.query['session'], '1')
self.assertEqual(url.query['scope'], 'http://example.net/scope1')
self.assertEqual(atom.http_core.Uri.parse_uri(
url.query['next']).query['auth_sub_scopes'],
'http://example.net/scope1')
self.assertEqual(atom.http_core.Uri.parse_uri(url.query['next']).path,
'/')
self.assertEqual(atom.http_core.Uri.parse_uri(url.query['next']).host,
'example.com')
def test_from_url(self):
token_str = gdata.gauth.auth_sub_string_from_url(
'http://example.com?token=123abc')[0]
self.assertEqual(token_str, '123abc')
def test_from_http_body(self):
token_str = gdata.gauth.auth_sub_string_from_body('Something\n'
'Token=DQAA...7DCTN\n'
'Expiration=20061004T123456Z\n')
self.assertEqual(token_str, 'DQAA...7DCTN')
def test_modify_request(self):
token = gdata.gauth.AuthSubToken('tval')
request = atom.http_core.HttpRequest()
token.modify_request(request)
self.assertEqual(request.headers['Authorization'], 'AuthSub token=tval')
def test_create_and_upgrade_tokens(self):
token = gdata.gauth.AuthSubToken.from_url(
'http://example.com?token=123abc')
self.assertTrue(isinstance(token, gdata.gauth.AuthSubToken))
self.assertEqual(token.token_string, '123abc')
self.assertEqual(token.scopes, [])
token._upgrade_token('Token=456def')
self.assertEqual(token.token_string, '456def')
self.assertEqual(token.scopes, [])
class SecureAuthSubTest(unittest.TestCase):
def test_build_data(self):
request = atom.http_core.HttpRequest(method='PUT')
request.uri = atom.http_core.Uri.parse_uri('http://example.com/foo?a=1')
data = gdata.gauth.build_auth_sub_data(request, 1234567890, 'mynonce')
self.assertEqual(data,
'PUT http://example.com/foo?a=1 1234567890 mynonce')
def test_generate_signature(self):
request = atom.http_core.HttpRequest(
method='GET', uri=atom.http_core.Uri(host='example.com', path='/foo',
query={'a': '1'}))
data = gdata.gauth.build_auth_sub_data(request, 1134567890, 'p234908')
self.assertEqual(data,
'GET http://example.com/foo?a=1 1134567890 p234908')
self.assertEqual(
gdata.gauth.generate_signature(data, PRIVATE_TEST_KEY),
'GeBfeIDnT41dvLquPgDB4U5D4hfxqaHk/5LX1kccNBnL4BjsHWU1djbEp7xp3BL9ab'
'QtLrK7oa/aHEHtGRUZGg87O+ND8iDPR76WFXAruuN8O8GCMqCDdPduNPY++LYO4MdJ'
'BZNY974Nn0m6Hc0/T4M1ElqvPhl61fkXMm+ElSM=')
class TokensToAndFromBlobsTest(unittest.TestCase):
def test_client_login_conversion(self):
token = gdata.gauth.ClientLoginToken('test|key')
copy = gdata.gauth.token_from_blob(gdata.gauth.token_to_blob(token))
self.assertEqual(token.token_string, copy.token_string)
self.assertTrue(isinstance(copy, gdata.gauth.ClientLoginToken))
def test_authsub_conversion(self):
token = gdata.gauth.AuthSubToken('test|key')
copy = gdata.gauth.token_from_blob(gdata.gauth.token_to_blob(token))
self.assertEqual(token.token_string, copy.token_string)
self.assertTrue(isinstance(copy, gdata.gauth.AuthSubToken))
scopes = ['http://example.com', 'http://other||test', 'thir|d']
token = gdata.gauth.AuthSubToken('key-=', scopes)
copy = gdata.gauth.token_from_blob(gdata.gauth.token_to_blob(token))
self.assertEqual(token.token_string, copy.token_string)
self.assertTrue(isinstance(copy, gdata.gauth.AuthSubToken))
self.assertEqual(token.scopes, scopes)
def test_join_and_split(self):
token_string = gdata.gauth._join_token_parts('1x', 'test|string', '%x%',
'', None)
self.assertEqual(token_string, '1x|test%7Cstring|%25x%25||')
token_type, a, b, c, d = gdata.gauth._split_token_parts(token_string)
self.assertEqual(token_type, '1x')
self.assertEqual(a, 'test|string')
self.assertEqual(b, '%x%')
self.assertTrue(c is None)
self.assertTrue(d is None)
def test_secure_authsub_conversion(self):
token = gdata.gauth.SecureAuthSubToken(
'%^%', 'myRsaKey', ['http://example.com', 'http://example.org'])
copy = gdata.gauth.token_from_blob(gdata.gauth.token_to_blob(token))
self.assertEqual(copy.token_string, '%^%')
self.assertEqual(copy.rsa_private_key, 'myRsaKey')
self.assertEqual(copy.scopes,
['http://example.com', 'http://example.org'])
token = gdata.gauth.SecureAuthSubToken(rsa_private_key='f',
token_string='b')
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(blob, '1s|b|f')
copy = gdata.gauth.token_from_blob(blob)
self.assertEqual(copy.token_string, 'b')
self.assertEqual(copy.rsa_private_key, 'f')
self.assertEqual(copy.scopes, [])
token = gdata.gauth.SecureAuthSubToken(None, '')
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(blob, '1s||')
copy = gdata.gauth.token_from_blob(blob)
self.assertEqual(copy.token_string, None)
self.assertEqual(copy.rsa_private_key, None)
self.assertEqual(copy.scopes, [])
token = gdata.gauth.SecureAuthSubToken('', None)
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(blob, '1s||')
copy = gdata.gauth.token_from_blob(blob)
self.assertEqual(copy.token_string, None)
self.assertEqual(copy.rsa_private_key, None)
self.assertEqual(copy.scopes, [])
token = gdata.gauth.SecureAuthSubToken(
None, None, ['http://example.net', 'http://google.com'])
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(
blob, '1s|||http%3A%2F%2Fexample.net|http%3A%2F%2Fgoogle.com')
copy = gdata.gauth.token_from_blob(blob)
self.assertTrue(copy.token_string is None)
self.assertTrue(copy.rsa_private_key is None)
self.assertEqual(copy.scopes, ['http://example.net', 'http://google.com'])
def test_oauth_rsa_conversion(self):
token = gdata.gauth.OAuthRsaToken(
'consumerKey', 'myRsa', 't', 'secret',
gdata.gauth.AUTHORIZED_REQUEST_TOKEN, 'http://example.com/next',
'verifier')
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(
blob, '1r|consumerKey|myRsa|t|secret|2|http%3A%2F%2Fexample.com'
'%2Fnext|verifier')
copy = gdata.gauth.token_from_blob(blob)
self.assertTrue(isinstance(copy, gdata.gauth.OAuthRsaToken))
self.assertEqual(copy.consumer_key, token.consumer_key)
self.assertEqual(copy.rsa_private_key, token.rsa_private_key)
self.assertEqual(copy.token, token.token)
self.assertEqual(copy.token_secret, token.token_secret)
self.assertEqual(copy.auth_state, token.auth_state)
self.assertEqual(copy.next, token.next)
self.assertEqual(copy.verifier, token.verifier)
token = gdata.gauth.OAuthRsaToken(
'', 'myRsa', 't', 'secret', 0)
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(blob, '1r||myRsa|t|secret|0||')
copy = gdata.gauth.token_from_blob(blob)
self.assertTrue(isinstance(copy, gdata.gauth.OAuthRsaToken))
self.assertFalse(copy.consumer_key == token.consumer_key)
self.assertTrue(copy.consumer_key is None)
self.assertEqual(copy.rsa_private_key, token.rsa_private_key)
self.assertEqual(copy.token, token.token)
self.assertEqual(copy.token_secret, token.token_secret)
self.assertEqual(copy.auth_state, token.auth_state)
self.assertEqual(copy.next, token.next)
self.assertTrue(copy.next is None)
self.assertEqual(copy.verifier, token.verifier)
self.assertTrue(copy.verifier is None)
token = gdata.gauth.OAuthRsaToken(
rsa_private_key='myRsa', token='t', token_secret='secret',
auth_state=gdata.gauth.ACCESS_TOKEN, verifier='v', consumer_key=None)
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(blob, '1r||myRsa|t|secret|3||v')
copy = gdata.gauth.token_from_blob(blob)
self.assertEqual(copy.consumer_key, token.consumer_key)
self.assertTrue(copy.consumer_key is None)
self.assertEqual(copy.rsa_private_key, token.rsa_private_key)
self.assertEqual(copy.token, token.token)
self.assertEqual(copy.token_secret, token.token_secret)
self.assertEqual(copy.auth_state, token.auth_state)
self.assertEqual(copy.next, token.next)
self.assertTrue(copy.next is None)
self.assertEqual(copy.verifier, token.verifier)
def test_oauth_hmac_conversion(self):
token = gdata.gauth.OAuthHmacToken(
'consumerKey', 'consumerSecret', 't', 'secret',
gdata.gauth.REQUEST_TOKEN, 'http://example.com/next', 'verifier')
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(
blob, '1h|consumerKey|consumerSecret|t|secret|1|http%3A%2F%2F'
'example.com%2Fnext|verifier')
copy = gdata.gauth.token_from_blob(blob)
self.assertTrue(isinstance(copy, gdata.gauth.OAuthHmacToken))
self.assertEqual(copy.consumer_key, token.consumer_key)
self.assertEqual(copy.consumer_secret, token.consumer_secret)
self.assertEqual(copy.token, token.token)
self.assertEqual(copy.token_secret, token.token_secret)
self.assertEqual(copy.auth_state, token.auth_state)
self.assertEqual(copy.next, token.next)
self.assertEqual(copy.verifier, token.verifier)
token = gdata.gauth.OAuthHmacToken(
consumer_secret='c,s', token='t', token_secret='secret',
auth_state=7, verifier='v', consumer_key=None)
blob = gdata.gauth.token_to_blob(token)
self.assertEqual(blob, '1h||c%2Cs|t|secret|7||v')
copy = gdata.gauth.token_from_blob(blob)
self.assertTrue(isinstance(copy, gdata.gauth.OAuthHmacToken))
self.assertEqual(copy.consumer_key, token.consumer_key)
self.assertTrue(copy.consumer_key is None)
self.assertEqual(copy.consumer_secret, token.consumer_secret)
self.assertEqual(copy.token, token.token)
self.assertEqual(copy.token_secret, token.token_secret)
self.assertEqual(copy.auth_state, token.auth_state)
self.assertEqual(copy.next, token.next)
self.assertTrue(copy.next is None)
self.assertEqual(copy.verifier, token.verifier)
def test_illegal_token_types(self):
class MyToken(object):
pass
token = MyToken()
self.assertRaises(gdata.gauth.UnsupportedTokenType,
gdata.gauth.token_to_blob, token)
blob = '~~z'
self.assertRaises(gdata.gauth.UnsupportedTokenType,
gdata.gauth.token_from_blob, blob)
class OAuthHmacTokenTests(unittest.TestCase):
def test_build_base_string(self):
request = atom.http_core.HttpRequest('http://example.com/', 'GET')
base_string = gdata.gauth.build_oauth_base_string(
request, 'example.org', '12345', gdata.gauth.HMAC_SHA1, 1246301653,
'1.0')
self.assertEqual(
base_string, 'GET&http%3A%2F%2Fexample.com%2F&oauth_callback%3Doob%2'
'6oauth_consumer_key%3Dexample.org%26oauth_nonce%3D12345%26oauth_sig'
'nature_method%3DHMAC-SHA1%26oauth_timestamp%3D1246301653%26oauth_ve'
'rsion%3D1.0')
# Test using example from documentation.
request = atom.http_core.HttpRequest(
'http://www.google.com/calendar/feeds/default/allcalendars/full'
'?orderby=starttime', 'GET')
base_string = gdata.gauth.build_oauth_base_string(
request, 'example.com', '4572616e48616d6d65724c61686176',
gdata.gauth.RSA_SHA1, 137131200, '1.0', token='1%2Fab3cd9j4ks73hf7g',
next='http://googlecodesamples.com/oauth_playground/index.php')
self.assertEqual(
base_string, 'GET&http%3A%2F%2Fwww.google.com%2Fcalendar%2Ffeeds%2Fd'
'efault%2Fallcalendars%2Ffull&oauth_callback%3Dhttp%253A%252F%252Fgo'
'oglecodesamples.com%252Foauth_playground%252Findex.php%26oauth_cons'
'umer_key%3Dexample.com%26oauth_nonce%3D4572616e48616d6d65724c616861'
'76%26oauth_signature_method%3DRSA-SHA1%26oauth_timestamp%3D13713120'
'0%26oauth_token%3D1%25252Fab3cd9j4ks73hf7g%26oauth_version%3D1.0%26'
'orderby%3Dstarttime')
# Test various defaults.
request = atom.http_core.HttpRequest('http://eXample.COM', 'get')
base_string = gdata.gauth.build_oauth_base_string(
request, 'example.org', '12345', gdata.gauth.HMAC_SHA1, 1246301653,
'1.0')
self.assertEqual(
base_string, 'GET&http%3A%2F%2Fexample.com%2F&oauth_callback%3Doob%2'
'6oauth_consumer_key%3Dexample.org%26oauth_nonce%3D12345%26oauth_sig'
'nature_method%3DHMAC-SHA1%26oauth_timestamp%3D1246301653%26oauth_ve'
'rsion%3D1.0')
request = atom.http_core.HttpRequest('https://eXample.COM:443', 'get')
base_string = gdata.gauth.build_oauth_base_string(
request, 'example.org', '12345', gdata.gauth.HMAC_SHA1, 1246301653,
'1.0', 'http://googlecodesamples.com/oauth_playground/index.php')
self.assertEqual(
base_string, 'GET&https%3A%2F%2Fexample.com%2F&oauth_callback%3Dhttp'
'%253A%252F%252Fgooglecodesamples.com%252Foauth_playground%252Findex'
'.php%26oauth_consumer_key%3Dexample.org%26oauth_nonce%3D12345%26oau'
'th_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1246301653%26oa'
'uth_version%3D1.0')
request = atom.http_core.HttpRequest('http://eXample.COM:443', 'get')
base_string = gdata.gauth.build_oauth_base_string(
request, 'example.org', '12345', gdata.gauth.HMAC_SHA1, 1246301653,
'1.0')
self.assertEqual(
base_string, 'GET&http%3A%2F%2Fexample.com%3A443%2F&oauth_callback%3'
'Doob%26oauth_consumer_key%3De'
'xample.org%26oauth_nonce%3D12345%26oauth_signature_method%3DHMAC-SH'
'A1%26oauth_timestamp%3D1246301653%26oauth_version%3D1.0')
request = atom.http_core.HttpRequest(
atom.http_core.Uri(host='eXample.COM'), 'GET')
base_string = gdata.gauth.build_oauth_base_string(
request, 'example.org', '12345', gdata.gauth.HMAC_SHA1, 1246301653,
'1.0', next='oob')
self.assertEqual(
base_string, 'GET&http%3A%2F%2Fexample.com%2F&oauth_callback%3Doob%2'
'6oauth_consumer_key%3Dexample.org%26oauth_nonce%3D12345%26oauth_sig'
'nature_method%3DHMAC-SHA1%26oauth_timestamp%3D1246301653%26oauth_ve'
'rsion%3D1.0')
request = atom.http_core.HttpRequest(
'https://www.google.com/accounts/OAuthGetRequestToken', 'GET')
request.uri.query['scope'] = ('https://docs.google.com/feeds/'
' http://docs.google.com/feeds/')
base_string = gdata.gauth.build_oauth_base_string(
request, 'anonymous', '48522759', gdata.gauth.HMAC_SHA1, 1246489532,
'1.0', 'http://googlecodesamples.com/oauth_playground/index.php')
self.assertEqual(
base_string, 'GET&https%3A%2F%2Fwww.google.com%2Faccounts%2FOAuthGet'
'RequestToken&oauth_callback%3Dhttp%253A%252F%252Fgooglecodesamples.'
'com%252Foauth_playground%252Findex.php%26oauth_consumer_key%3Danony'
'mous%26oauth_nonce%3D4852275'
'9%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D12464895'
'32%26oauth_version%3D1.0%26scope%3Dhttps%253A%252F%252Fdocs.google.'
'com%252Ffeeds%252F%2520http%253A%252F%252Fdocs.google.com%252Ffeeds'
'%252F')
def test_generate_hmac_signature(self):
# Use the example from the OAuth playground:
# http://googlecodesamples.com/oauth_playground/
request = atom.http_core.HttpRequest(
'https://www.google.com/accounts/OAuthGetRequestToken?'
'scope=http%3A%2F%2Fwww.blogger.com%2Ffeeds%2F', 'GET')
signature = gdata.gauth.generate_hmac_signature(
request, 'anonymous', 'anonymous', '1246491360',
'c0155b3f28697c029e7a62efff44bd46', '1.0',
next='http://googlecodesamples.com/oauth_playground/index.php')
self.assertEqual(signature, '5a2GPdtAY3LWYv8IdiT3wp1Coeg=')
# Try the same request but with a non escaped Uri object.
request = atom.http_core.HttpRequest(
'https://www.google.com/accounts/OAuthGetRequestToken', 'GET')
request.uri.query['scope'] = 'http://www.blogger.com/feeds/'
signature = gdata.gauth.generate_hmac_signature(
request, 'anonymous', 'anonymous', '1246491360',
'c0155b3f28697c029e7a62efff44bd46', '1.0',
'http://googlecodesamples.com/oauth_playground/index.php')
self.assertEqual(signature, '5a2GPdtAY3LWYv8IdiT3wp1Coeg=')
# A different request also checked against the OAuth playground.
request = atom.http_core.HttpRequest(
'https://www.google.com/accounts/OAuthGetRequestToken', 'GET')
request.uri.query['scope'] = ('https://www.google.com/analytics/feeds/ '
'http://www.google.com/base/feeds/ '
'http://www.google.com/calendar/feeds/')
signature = gdata.gauth.generate_hmac_signature(
request, 'anonymous', 'anonymous', 1246491797,
'33209c4d7a09be4eb1d6ff18e00f8548', '1.0',
next='http://googlecodesamples.com/oauth_playground/index.php')
self.assertEqual(signature, 'kFAgTTFDIWz4/xAabIlrcZZMTq8=')
class OAuthRsaTokenTests(unittest.TestCase):
def test_generate_rsa_signature(self):
request = atom.http_core.HttpRequest(
'https://www.google.com/accounts/OAuthGetRequestToken?'
'scope=http%3A%2F%2Fwww.blogger.com%2Ffeeds%2F', 'GET')
signature = gdata.gauth.generate_rsa_signature(
request, 'anonymous', PRIVATE_TEST_KEY, '1246491360',
'c0155b3f28697c029e7a62efff44bd46', '1.0',
next='http://googlecodesamples.com/oauth_playground/index.php')
self.assertEqual(
signature,
'bfMantdttKaTrwoxU87JiXmMeXhAiXPiq79a5XmLlOYwwlX06Pu7CafMp7hW1fPeZtL'
'4o9Sz3NvPI8GECCaZk7n5vi1EJ5/wfIQbddrC8j45joBG6gFSf4tRJct82dSyn6bd71'
'knwPZH1sKK46Y0ePJvEIDI3JDd7pRZuMM2sN8=')
class OAuthHeaderTest(unittest.TestCase):
def test_generate_auth_header(self):
header = gdata.gauth.generate_auth_header(
'consumerkey', 1234567890, 'mynonce', 'unknown_sig_type', 'sig')
self.assertTrue(header.startswith('OAuth'))
self.assertTrue(header.find('oauth_nonce="mynonce"') > -1)
self.assertTrue(header.find('oauth_timestamp="1234567890"') > -1)
self.assertTrue(header.find('oauth_consumer_key="consumerkey"') > -1)
self.assertTrue(
header.find('oauth_signature_method="unknown_sig_type"') > -1)
self.assertTrue(header.find('oauth_version="1.0"') > -1)
self.assertTrue(header.find('oauth_signature="sig"') > -1)
header = gdata.gauth.generate_auth_header(
'consumer/key', 1234567890, 'ab%&33', '', 'ab/+-_=')
self.assertTrue(header.find('oauth_nonce="ab%25%2633"') > -1)
self.assertTrue(header.find('oauth_consumer_key="consumer%2Fkey"') > -1)
self.assertTrue(header.find('oauth_signature_method=""') > -1)
self.assertTrue(header.find('oauth_signature="ab%2F%2B-_%3D"') > -1)
class OAuthGetRequestToken(unittest.TestCase):
def test_request_hmac_request_token(self):
request = gdata.gauth.generate_request_for_request_token(
'anonymous', gdata.gauth.HMAC_SHA1,
['http://www.blogger.com/feeds/',
'http://www.google.com/calendar/feeds/'],
consumer_secret='anonymous')
request_uri = str(request.uri)
self.assertTrue('http%3A%2F%2Fwww.blogger.com%2Ffeeds%2F' in request_uri)
self.assertTrue(
'http%3A%2F%2Fwww.google.com%2Fcalendar%2Ffeeds%2F' in request_uri)
auth_header = request.headers['Authorization']
self.assertTrue('oauth_consumer_key="anonymous"' in auth_header)
self.assertTrue('oauth_signature_method="HMAC-SHA1"' in auth_header)
self.assertTrue('oauth_version="1.0"' in auth_header)
self.assertTrue('oauth_signature="' in auth_header)
self.assertTrue('oauth_nonce="' in auth_header)
self.assertTrue('oauth_timestamp="' in auth_header)
def test_request_rsa_request_token(self):
request = gdata.gauth.generate_request_for_request_token(
'anonymous', gdata.gauth.RSA_SHA1,
['http://www.blogger.com/feeds/',
'http://www.google.com/calendar/feeds/'],
rsa_key=PRIVATE_TEST_KEY)
request_uri = str(request.uri)
self.assertTrue('http%3A%2F%2Fwww.blogger.com%2Ffeeds%2F' in request_uri)
self.assertTrue(
'http%3A%2F%2Fwww.google.com%2Fcalendar%2Ffeeds%2F' in request_uri)
auth_header = request.headers['Authorization']
self.assertTrue('oauth_consumer_key="anonymous"' in auth_header)
self.assertTrue('oauth_signature_method="RSA-SHA1"' in auth_header)
self.assertTrue('oauth_version="1.0"' in auth_header)
self.assertTrue('oauth_signature="' in auth_header)
self.assertTrue('oauth_nonce="' in auth_header)
self.assertTrue('oauth_timestamp="' in auth_header)
def test_extract_token_from_body(self):
body = ('oauth_token=4%2F5bNFM_efIu3yN-E9RrF1KfZzOAZG&oauth_token_secret='
'%2B4O49V9WUOkjXgpOobAtgYzy&oauth_callback_confirmed=true')
token, secret = gdata.gauth.oauth_token_info_from_body(body)
self.assertEqual(token, '4/5bNFM_efIu3yN-E9RrF1KfZzOAZG')
self.assertEqual(secret, '+4O49V9WUOkjXgpOobAtgYzy')
def test_hmac_request_token_from_body(self):
body = ('oauth_token=4%2F5bNFM_efIu3yN-E9RrF1KfZzOAZG&oauth_token_secret='
'%2B4O49V9WUOkjXgpOobAtgYzy&oauth_callback_confirmed=true')
request_token = gdata.gauth.hmac_token_from_body(body, 'myKey',
'mySecret', True)
self.assertEqual(request_token.consumer_key, 'myKey')
self.assertEqual(request_token.consumer_secret, 'mySecret')
self.assertEqual(request_token.token, '4/5bNFM_efIu3yN-E9RrF1KfZzOAZG')
self.assertEqual(request_token.token_secret, '+4O49V9WUOkjXgpOobAtgYzy')
self.assertEqual(request_token.auth_state, gdata.gauth.REQUEST_TOKEN)
def test_rsa_request_token_from_body(self):
body = ('oauth_token=4%2F5bNFM_efIu3yN-E9RrF1KfZzOAZG&oauth_token_secret='
'%2B4O49V9WUOkjXgpOobAtgYzy&oauth_callback_confirmed=true')
request_token = gdata.gauth.rsa_token_from_body(body, 'myKey',
'rsaKey', True)
self.assertEqual(request_token.consumer_key, 'myKey')
self.assertEqual(request_token.rsa_private_key, 'rsaKey')
self.assertEqual(request_token.token, '4/5bNFM_efIu3yN-E9RrF1KfZzOAZG')
self.assertEqual(request_token.token_secret, '+4O49V9WUOkjXgpOobAtgYzy')
self.assertEqual(request_token.auth_state, gdata.gauth.REQUEST_TOKEN)
class OAuthAuthorizeToken(unittest.TestCase):
def test_generate_authorization_url(self):
url = gdata.gauth.generate_oauth_authorization_url('/+=aosdpikk')
self.assertTrue(str(url).startswith(
'https://www.google.com/accounts/OAuthAuthorizeToken'))
self.assertTrue('oauth_token=%2F%2B%3Daosdpikk' in str(url))
def test_extract_auth_token(self):
url = ('http://www.example.com/test?oauth_token='
'CKF50YzIHxCT85KMAg&oauth_verifier=123zzz')
token = gdata.gauth.oauth_token_info_from_url(url)
self.assertEqual(token[0], 'CKF50YzIHxCT85KMAg')
self.assertEqual(token[1], '123zzz')
def suite():
return conf.build_suite([AuthSubTest, TokensToAndFromBlobsTest,
OAuthHmacTokenTests, OAuthRsaTokenTests,
OAuthHeaderTest, OAuthGetRequestToken,
OAuthAuthorizeToken])
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_operationcreator.ui'
#
# Created: Sun Feb 9 00:21:14 2014
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_OperationCreator(object):
def setupUi(self, OperationCreator):
OperationCreator.setObjectName(_fromUtf8("OperationCreator"))
OperationCreator.resize(766, 496)
self.verticalLayout_2 = QtGui.QVBoxLayout(OperationCreator)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.formLayout = QtGui.QFormLayout()
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.ExpandingFieldsGrow)
self.formLayout.setHorizontalSpacing(1)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.label_5 = QtGui.QLabel(OperationCreator)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.label_5.setFont(font)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.label_5)
self.label = QtGui.QLabel(OperationCreator)
self.label.setObjectName(_fromUtf8("label"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label)
self.nameEdit = QtGui.QLineEdit(OperationCreator)
self.nameEdit.setObjectName(_fromUtf8("nameEdit"))
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.nameEdit)
self.label_2 = QtGui.QLabel(OperationCreator)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_2)
self.label_3 = QtGui.QLabel(OperationCreator)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_3)
self.typeComboBox = QtGui.QComboBox(OperationCreator)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.typeComboBox.sizePolicy().hasHeightForWidth())
self.typeComboBox.setSizePolicy(sizePolicy)
self.typeComboBox.setObjectName(_fromUtf8("typeComboBox"))
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.typeComboBox)
self.label_4 = QtGui.QLabel(OperationCreator)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_4)
self.descriptionTextEdit = QtGui.QPlainTextEdit(OperationCreator)
self.descriptionTextEdit.setObjectName(_fromUtf8("descriptionTextEdit"))
self.formLayout.setWidget(4, QtGui.QFormLayout.FieldRole, self.descriptionTextEdit)
self.priceSpinBox = QtGui.QDoubleSpinBox(OperationCreator)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.priceSpinBox.sizePolicy().hasHeightForWidth())
self.priceSpinBox.setSizePolicy(sizePolicy)
self.priceSpinBox.setMaximum(999999999.99)
self.priceSpinBox.setObjectName(_fromUtf8("priceSpinBox"))
self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.priceSpinBox)
self.verticalLayout.addLayout(self.formLayout)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.stackedWidget = QtGui.QStackedWidget(OperationCreator)
self.stackedWidget.setObjectName(_fromUtf8("stackedWidget"))
self.emptyPage = QtGui.QWidget()
self.emptyPage.setObjectName(_fromUtf8("emptyPage"))
self.stackedWidget.addWidget(self.emptyPage)
self.vaccinePage = QtGui.QWidget()
self.vaccinePage.setObjectName(_fromUtf8("vaccinePage"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.vaccinePage)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.label_7 = QtGui.QLabel(self.vaccinePage)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.verticalLayout_3.addWidget(self.label_7)
self.searchLineEditLayout = QtGui.QHBoxLayout()
self.searchLineEditLayout.setObjectName(_fromUtf8("searchLineEditLayout"))
self.editmedicine = QtGui.QPushButton(self.vaccinePage)
self.editmedicine.setMaximumSize(QtCore.QSize(150, 16777215))
self.editmedicine.setObjectName(_fromUtf8("editmedicine"))
self.searchLineEditLayout.addWidget(self.editmedicine)
self.verticalLayout_3.addLayout(self.searchLineEditLayout)
self.resitCheckBox = QtGui.QCheckBox(self.vaccinePage)
self.resitCheckBox.setObjectName(_fromUtf8("resitCheckBox"))
self.verticalLayout_3.addWidget(self.resitCheckBox)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label_6 = QtGui.QLabel(self.vaccinePage)
self.label_6.setEnabled(False)
self.label_6.setMaximumSize(QtCore.QSize(80, 16777215))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.horizontalLayout.addWidget(self.label_6)
self.durationspinBox = QtGui.QSpinBox(self.vaccinePage)
self.durationspinBox.setEnabled(False)
self.durationspinBox.setMaximum(999999999)
self.durationspinBox.setObjectName(_fromUtf8("durationspinBox"))
self.horizontalLayout.addWidget(self.durationspinBox)
self.label_8 = QtGui.QLabel(self.vaccinePage)
self.label_8.setEnabled(False)
self.label_8.setMaximumSize(QtCore.QSize(20, 16777215))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.horizontalLayout.addWidget(self.label_8)
self.getFromMedicineButton = QtGui.QPushButton(self.vaccinePage)
self.getFromMedicineButton.setEnabled(False)
self.getFromMedicineButton.setObjectName(_fromUtf8("getFromMedicineButton"))
self.horizontalLayout.addWidget(self.getFromMedicineButton)
self.verticalLayout_3.addLayout(self.horizontalLayout)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem1)
self.stackedWidget.addWidget(self.vaccinePage)
self.itemPage = QtGui.QWidget()
self.itemPage.setObjectName(_fromUtf8("itemPage"))
self.itemPageLayout = QtGui.QHBoxLayout(self.itemPage)
self.itemPageLayout.setObjectName(_fromUtf8("itemPageLayout"))
self.stackedWidget.addWidget(self.itemPage)
self.horizontalLayout_2.addWidget(self.stackedWidget)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem2)
self.closeButton = QtGui.QPushButton(OperationCreator)
self.closeButton.setObjectName(_fromUtf8("closeButton"))
self.horizontalLayout_3.addWidget(self.closeButton)
self.saveButton = QtGui.QPushButton(OperationCreator)
self.saveButton.setObjectName(_fromUtf8("saveButton"))
self.horizontalLayout_3.addWidget(self.saveButton)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.retranslateUi(OperationCreator)
self.stackedWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(OperationCreator)
OperationCreator.setTabOrder(self.nameEdit, self.priceSpinBox)
OperationCreator.setTabOrder(self.priceSpinBox, self.typeComboBox)
OperationCreator.setTabOrder(self.typeComboBox, self.descriptionTextEdit)
OperationCreator.setTabOrder(self.descriptionTextEdit, self.editmedicine)
OperationCreator.setTabOrder(self.editmedicine, self.resitCheckBox)
OperationCreator.setTabOrder(self.resitCheckBox, self.durationspinBox)
OperationCreator.setTabOrder(self.durationspinBox, self.getFromMedicineButton)
OperationCreator.setTabOrder(self.getFromMedicineButton, self.saveButton)
OperationCreator.setTabOrder(self.saveButton, self.closeButton)
def retranslateUi(self, OperationCreator):
OperationCreator.setWindowTitle(_translate("OperationCreator", "Operaatiopohjan luonti", None))
self.label_5.setText(_translate("OperationCreator", "Operaatiopohja", None))
self.label.setText(_translate("OperationCreator", "Nimi", None))
self.nameEdit.setPlaceholderText(_translate("OperationCreator", "Operaation nimi", None))
self.label_2.setText(_translate("OperationCreator", "Hinta", None))
self.label_3.setText(_translate("OperationCreator", "Tyyppi", None))
self.label_4.setText(_translate("OperationCreator", "Kuvaus", None))
self.label_7.setText(_translate("OperationCreator", "Lääkevalinta", None))
self.editmedicine.setText(_translate("OperationCreator", "Muokkaa lääkettä", None))
self.resitCheckBox.setText(_translate("OperationCreator", "Uusittava", None))
self.label_6.setText(_translate("OperationCreator", "Uusintaväli", None))
self.label_8.setText(_translate("OperationCreator", "vrk", None))
self.getFromMedicineButton.setText(_translate("OperationCreator", "Hae lääkkeestä", None))
self.closeButton.setText(_translate("OperationCreator", "Hylkää", None))
self.saveButton.setText(_translate("OperationCreator", "Tallenna", None))
|
unknown
|
codeparrot/codeparrot-clean
| ||
#include <ATen/native/vulkan/ops/Common.h>
#include <torch/library.h>
namespace at {
namespace native {
namespace vulkan {
namespace ops {
namespace {
using namespace api::utils;
Scalar _local_scalar_dense(const Tensor& self) {
TORCH_CHECK(
self.dtype() == ScalarType::Float, "Only float dtype is supported");
return Scalar(self.cpu().item<float>());
}
#ifdef USE_VULKAN_API
TORCH_LIBRARY_IMPL(aten, Vulkan, m) {
m.impl(
TORCH_SELECTIVE_NAME("aten::_local_scalar_dense"),
TORCH_FN(_local_scalar_dense));
}
#endif /* USE_VULKAN_API */
} // namespace
} // namespace ops
} // namespace vulkan
} // namespace native
} // namespace at
|
cpp
|
github
|
https://github.com/pytorch/pytorch
|
aten/src/ATen/native/vulkan/ops/Scalar.cpp
|
package tarsum
import (
"archive/tar"
"errors"
"io"
"sort"
"strconv"
"strings"
)
// Version is used for versioning of the TarSum algorithm
// based on the prefix of the hash used
// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"
type Version int
// Prefix of "tarsum"
const (
Version0 Version = iota
Version1
// VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation
VersionDev
)
// WriteV1Header writes a tar header to a writer in V1 tarsum format.
func WriteV1Header(h *tar.Header, w io.Writer) {
for _, elem := range v1TarHeaderSelect(h) {
w.Write([]byte(elem[0] + elem[1]))
}
}
// VersionLabelForChecksum returns the label for the given tarsum
// checksum, i.e., everything before the first `+` character in
// the string or an empty string if no label separator is found.
func VersionLabelForChecksum(checksum string) string {
// Checksums are in the form: {versionLabel}+{hashID}:{hex}
before, _, ok := strings.Cut(checksum, "+")
if !ok {
return ""
}
return before
}
// GetVersions gets a list of all known tarsum versions.
func GetVersions() []Version {
v := []Version{}
for k := range tarSumVersions {
v = append(v, k)
}
return v
}
var (
tarSumVersions = map[Version]string{
Version0: "tarsum",
Version1: "tarsum.v1",
VersionDev: "tarsum.dev",
}
tarSumVersionsByName = map[string]Version{
"tarsum": Version0,
"tarsum.v1": Version1,
"tarsum.dev": VersionDev,
}
)
func (tsv Version) String() string {
return tarSumVersions[tsv]
}
// GetVersionFromTarsum returns the Version from the provided string.
func GetVersionFromTarsum(tarsum string) (Version, error) {
versionName, _, _ := strings.Cut(tarsum, "+")
version, ok := tarSumVersionsByName[versionName]
if !ok {
return -1, ErrNotVersion
}
return version, nil
}
// Errors that may be returned by functions in this package
var (
ErrNotVersion = errors.New("string does not include a TarSum Version")
ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented")
)
// tarHeaderSelector is the interface which different versions
// of tarsum should use for selecting and ordering tar headers
// for each item in the archive.
type tarHeaderSelector interface {
selectHeaders(h *tar.Header) (orderedHeaders [][2]string)
}
type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string)
func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) {
return f(h)
}
func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
return [][2]string{
{"name", h.Name},
{"mode", strconv.FormatInt(h.Mode, 10)},
{"uid", strconv.Itoa(h.Uid)},
{"gid", strconv.Itoa(h.Gid)},
{"size", strconv.FormatInt(h.Size, 10)},
{"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)},
{"typeflag", string([]byte{h.Typeflag})},
{"linkname", h.Linkname},
{"uname", h.Uname},
{"gname", h.Gname},
{"devmajor", strconv.FormatInt(h.Devmajor, 10)},
{"devminor", strconv.FormatInt(h.Devminor, 10)},
}
}
func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
// Get extended attributes.
const paxSchilyXattr = "SCHILY.xattr."
var xattrs [][2]string
for k, v := range h.PAXRecords {
if xattr, ok := strings.CutPrefix(k, paxSchilyXattr); ok {
// h.Xattrs keys take precedence over h.PAXRecords keys, like
// archive/tar does when writing.
if vv, ok := h.Xattrs[xattr]; ok { //nolint:staticcheck // field deprecated in stdlib
v = vv
}
xattrs = append(xattrs, [2]string{xattr, v})
}
}
// Get extended attributes which are not in PAXRecords.
for k, v := range h.Xattrs { //nolint:staticcheck // field deprecated in stdlib
if _, ok := h.PAXRecords[paxSchilyXattr+k]; !ok {
xattrs = append(xattrs, [2]string{k, v})
}
}
sort.Slice(xattrs, func(i, j int) bool { return xattrs[i][0] < xattrs[j][0] })
// Make the slice with enough capacity to hold the 11 basic headers
// we want from the v0 selector plus however many xattrs we have.
orderedHeaders = make([][2]string, 0, 11+len(xattrs))
// Copy all headers from v0 excluding the 'mtime' header (the 5th element).
v0headers := v0TarHeaderSelect(h)
orderedHeaders = append(orderedHeaders, v0headers[0:5]...)
orderedHeaders = append(orderedHeaders, v0headers[6:]...)
// Finally, append the sorted xattrs.
orderedHeaders = append(orderedHeaders, xattrs...)
return orderedHeaders
}
var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{
Version0: v0TarHeaderSelect,
Version1: v1TarHeaderSelect,
VersionDev: v1TarHeaderSelect,
}
func getTarHeaderSelector(v Version) (tarHeaderSelector, error) {
headerSelector, ok := registeredHeaderSelectors[v]
if !ok {
return nil, ErrVersionNotImplemented
}
return headerSelector, nil
}
|
go
|
github
|
https://github.com/moby/moby
|
daemon/builder/remotecontext/internal/tarsum/versioning.go
|
import numpy as np
import pytest
from pandas.core.arrays import TimedeltaArray
class TestTimedeltaArrayConstructor:
def test_other_type_raises(self):
msg = r"dtype bool cannot be converted to timedelta64\[ns\]"
with pytest.raises(TypeError, match=msg):
TimedeltaArray._from_sequence(np.array([1, 2, 3], dtype="bool"))
def test_incorrect_dtype_raises(self):
msg = "dtype 'category' is invalid, should be np.timedelta64 dtype"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence(
np.array([1, 2, 3], dtype="i8"), dtype="category"
)
msg = "dtype 'int64' is invalid, should be np.timedelta64 dtype"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence(
np.array([1, 2, 3], dtype="i8"), dtype=np.dtype("int64")
)
msg = r"dtype 'datetime64\[ns\]' is invalid, should be np.timedelta64 dtype"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence(
np.array([1, 2, 3], dtype="i8"), dtype=np.dtype("M8[ns]")
)
msg = (
r"dtype 'datetime64\[us, UTC\]' is invalid, should be np.timedelta64 dtype"
)
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence(
np.array([1, 2, 3], dtype="i8"), dtype="M8[us, UTC]"
)
msg = "Supported timedelta64 resolutions are 's', 'ms', 'us', 'ns'"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence(
np.array([1, 2, 3], dtype="i8"), dtype=np.dtype("m8[Y]")
)
def test_copy(self):
data = np.array([1, 2, 3], dtype="m8[ns]")
arr = TimedeltaArray._from_sequence(data, copy=False)
assert arr._ndarray is data
arr = TimedeltaArray._from_sequence(data, copy=True)
assert arr._ndarray is not data
assert arr._ndarray.base is not data
def test_from_sequence_dtype(self):
msg = "dtype 'object' is invalid, should be np.timedelta64 dtype"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence([], dtype=object)
|
python
|
github
|
https://github.com/pandas-dev/pandas
|
pandas/tests/arrays/timedeltas/test_constructors.py
|
# hgweb/wsgicgi.py - CGI->WSGI translator
#
# Copyright 2006 Eric Hopper <hopper@omnifarious.org>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
#
# This was originally copied from the public domain code at
# http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side
import os, sys
from mercurial import util
from mercurial.hgweb import common
def launch(application):
util.setbinary(sys.stdin)
util.setbinary(sys.stdout)
environ = dict(os.environ.iteritems())
environ.setdefault('PATH_INFO', '')
if environ.get('SERVER_SOFTWARE', '').startswith('Microsoft-IIS'):
# IIS includes script_name in PATH_INFO
scriptname = environ['SCRIPT_NAME']
if environ['PATH_INFO'].startswith(scriptname):
environ['PATH_INFO'] = environ['PATH_INFO'][len(scriptname):]
stdin = sys.stdin
if environ.get('HTTP_EXPECT', '').lower() == '100-continue':
stdin = common.continuereader(stdin, sys.stdout.write)
environ['wsgi.input'] = stdin
environ['wsgi.errors'] = sys.stderr
environ['wsgi.version'] = (1, 0)
environ['wsgi.multithread'] = False
environ['wsgi.multiprocess'] = True
environ['wsgi.run_once'] = True
if environ.get('HTTPS', 'off').lower() in ('on', '1', 'yes'):
environ['wsgi.url_scheme'] = 'https'
else:
environ['wsgi.url_scheme'] = 'http'
headers_set = []
headers_sent = []
out = sys.stdout
def write(data):
if not headers_set:
raise AssertionError("write() before start_response()")
elif not headers_sent:
# Before the first output, send the stored headers
status, response_headers = headers_sent[:] = headers_set
out.write('Status: %s\r\n' % status)
for header in response_headers:
out.write('%s: %s\r\n' % header)
out.write('\r\n')
out.write(data)
out.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0](exc_info[1], exc_info[2])
finally:
exc_info = None # avoid dangling circular ref
elif headers_set:
raise AssertionError("Headers already set!")
headers_set[:] = [status, response_headers]
return write
content = application(environ, start_response)
try:
for chunk in content:
write(chunk)
if not headers_sent:
write('') # send headers now if body was empty
finally:
getattr(content, 'close', lambda : None)()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import hmac
import re
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
from hashlib import sha1
except ImportError:
import sha as sha1
HASHED_KEY_MAGIC = "|1|"
def add_git_host_key(module, url, accept_hostkey=True, create_dir=True):
""" idempotently add a git url hostkey """
if is_ssh_url(url):
fqdn, port = get_fqdn_and_port(url)
if fqdn:
known_host = check_hostkey(module, fqdn)
if not known_host:
if accept_hostkey:
rc, out, err = add_host_key(module, fqdn, port=port, create_dir=create_dir)
if rc != 0:
module.fail_json(msg="failed to add %s hostkey: %s" % (fqdn, out + err))
else:
module.fail_json(msg="%s has an unknown hostkey. Set accept_hostkey to True "
"or manually add the hostkey prior to running the git module" % fqdn)
def is_ssh_url(url):
""" check if url is ssh """
if "@" in url and "://" not in url:
return True
for scheme in "ssh://", "git+ssh://", "ssh+git://":
if url.startswith(scheme):
return True
return False
def get_fqdn_and_port(repo_url):
""" chop the hostname and port out of a url """
fqdn = None
port = None
ipv6_re = re.compile('(\[[^]]*\])(?::([0-9]+))?')
if "@" in repo_url and "://" not in repo_url:
# most likely an user@host:path or user@host/path type URL
repo_url = repo_url.split("@", 1)[1]
match = ipv6_re.match(repo_url)
# For this type of URL, colon specifies the path, not the port
if match:
fqdn, path = match.groups()
elif ":" in repo_url:
fqdn = repo_url.split(":")[0]
elif "/" in repo_url:
fqdn = repo_url.split("/")[0]
elif "://" in repo_url:
# this should be something we can parse with urlparse
parts = urlparse.urlparse(repo_url)
# parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
# ensure we actually have a parts[1] before continuing.
if parts[1] != '':
fqdn = parts[1]
if "@" in fqdn:
fqdn = fqdn.split("@", 1)[1]
match = ipv6_re.match(fqdn)
if match:
fqdn, port = match.groups()
elif ":" in fqdn:
fqdn, port = fqdn.split(":")[0:2]
return fqdn, port
def check_hostkey(module, fqdn):
return not not_in_host_file(module, fqdn)
# this is a variant of code found in connection_plugins/paramiko.py and we should modify
# the paramiko code to import and use this.
def not_in_host_file(self, host):
if 'USER' in os.environ:
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_host_file = "~/.ssh/known_hosts"
user_host_file = os.path.expanduser(user_host_file)
host_file_list = []
host_file_list.append(user_host_file)
host_file_list.append("/etc/ssh/ssh_known_hosts")
host_file_list.append("/etc/ssh/ssh_known_hosts2")
host_file_list.append("/etc/openssh/ssh_known_hosts")
hfiles_not_found = 0
for hf in host_file_list:
if not os.path.exists(hf):
hfiles_not_found += 1
continue
try:
host_fh = open(hf)
except IOError:
hfiles_not_found += 1
continue
else:
data = host_fh.read()
host_fh.close()
for line in data.split("\n"):
if line is None or " " not in line:
continue
tokens = line.split()
if tokens[0].find(HASHED_KEY_MAGIC) == 0:
# this is a hashed known host entry
try:
(kn_salt,kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|",2)
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
hash.update(host)
if hash.digest() == kn_host.decode('base64'):
return False
except:
# invalid hashed host key, skip it
continue
else:
# standard host file entry
if host in tokens[0]:
return False
return True
def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
""" use ssh-keyscan to add the hostkey """
keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
if 'USER' in os.environ:
user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_ssh_dir = "~/.ssh/"
user_host_file = "~/.ssh/known_hosts"
user_ssh_dir = os.path.expanduser(user_ssh_dir)
if not os.path.exists(user_ssh_dir):
if create_dir:
try:
os.makedirs(user_ssh_dir, int('700', 8))
except:
module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
else:
module.fail_json(msg="%s does not exist" % user_ssh_dir)
elif not os.path.isdir(user_ssh_dir):
module.fail_json(msg="%s is not a directory" % user_ssh_dir)
if port:
this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn)
else:
this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
rc, out, err = module.run_command(this_cmd)
# ssh-keyscan gives a 0 exit code and prints nothins on timeout
if rc != 0 or not out:
module.fail_json(msg='failed to get the hostkey for %s' % fqdn)
module.append_to_file(user_host_file, out)
return rc, out, err
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
django-guardian helper functions.
Functions defined within this module should be considered as django-guardian's
internal functionality. They are **not** guaranteed to be stable - which means
they actual input parameters/output type may change in future releases.
"""
from __future__ import unicode_literals
import os
import logging
from itertools import chain
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.models import AnonymousUser, Group
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.db.models import Model
from django.http import HttpResponseForbidden, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext, TemplateDoesNotExist
from django.utils.http import urlquote
from guardian.compat import get_user_model
from guardian.conf import settings as guardian_settings
from guardian.exceptions import NotUserNorGroup
logger = logging.getLogger(__name__)
abspath = lambda *p: os.path.abspath(os.path.join(*p))
def get_anonymous_user():
"""
Returns ``User`` instance (not ``AnonymousUser``) depending on
``ANONYMOUS_USER_ID`` configuration.
"""
return get_user_model().objects.get(id=guardian_settings.ANONYMOUS_USER_ID)
def get_identity(identity):
"""
Returns (user_obj, None) or (None, group_obj) tuple depending on what is
given. Also accepts AnonymousUser instance but would return ``User``
instead - it is convenient and needed for authorization backend to support
anonymous users.
:param identity: either ``User`` or ``Group`` instance
:raises ``NotUserNorGroup``: if cannot return proper identity instance
**Examples**::
>>> from django.contrib.auth.models import User
>>> user = User.objects.create(username='joe')
>>> get_identity(user)
(<User: joe>, None)
>>> group = Group.objects.create(name='users')
>>> get_identity(group)
(None, <Group: users>)
>>> anon = AnonymousUser()
>>> get_identity(anon)
(<User: AnonymousUser>, None)
>>> get_identity("not instance")
...
NotUserNorGroup: User/AnonymousUser or Group instance is required (got )
"""
if isinstance(identity, AnonymousUser):
identity = get_anonymous_user()
if isinstance(identity, get_user_model()):
return identity, None
elif isinstance(identity, Group):
return None, identity
raise NotUserNorGroup("User/AnonymousUser or Group instance is required "
"(got %s)" % identity)
def get_403_or_None(request, perms, obj=None, login_url=None,
redirect_field_name=None, return_403=False, accept_global_perms=False):
login_url = login_url or settings.LOGIN_URL
redirect_field_name = redirect_field_name or REDIRECT_FIELD_NAME
# Handles both original and with object provided permission check
# as ``obj`` defaults to None
has_permissions = False
# global perms check first (if accept_global_perms)
if accept_global_perms:
has_permissions = all(request.user.has_perm(perm) for perm in perms)
# if still no permission granted, try obj perms
if not has_permissions:
has_permissions = all(request.user.has_perm(perm, obj) for perm in perms)
if not has_permissions:
if return_403:
if guardian_settings.RENDER_403:
try:
response = render_to_response(
guardian_settings.TEMPLATE_403, {},
RequestContext(request))
response.status_code = 403
return response
except TemplateDoesNotExist as e:
if settings.DEBUG:
raise e
elif guardian_settings.RAISE_403:
raise PermissionDenied
return HttpResponseForbidden()
else:
path = urlquote(request.get_full_path())
tup = login_url, redirect_field_name, path
return HttpResponseRedirect("%s?%s=%s" % tup)
def clean_orphan_obj_perms():
"""
Seeks and removes all object permissions entries pointing at non-existing
targets.
Returns number of removed objects.
"""
from guardian.models import UserObjectPermission
from guardian.models import GroupObjectPermission
deleted = 0
# TODO: optimise
for perm in chain(UserObjectPermission.objects.all(),
GroupObjectPermission.objects.all()):
if perm.content_object is None:
logger.debug("Removing %s (pk=%d)" % (perm, perm.pk))
perm.delete()
deleted += 1
logger.info("Total removed orphan object permissions instances: %d" %
deleted)
return deleted
# TODO: should raise error when multiple UserObjectPermission direct relations
# are defined
def get_obj_perms_model(obj, base_cls, generic_cls):
if isinstance(obj, Model):
obj = obj.__class__
ctype = ContentType.objects.get_for_model(obj)
for attr in obj._meta.get_all_related_objects():
model = getattr(attr, 'model', None)
if (model and issubclass(model, base_cls) and
model is not generic_cls):
# if model is generic one it would be returned anyway
if not model.objects.is_generic():
# make sure that content_object's content_type is same as
# the one of given obj
fk = model._meta.get_field_by_name('content_object')[0]
if ctype == ContentType.objects.get_for_model(fk.rel.to):
return model
return generic_cls
def get_user_obj_perms_model(obj):
"""
Returns model class that connects given ``obj`` and User class.
"""
from guardian.models import UserObjectPermissionBase
from guardian.models import UserObjectPermission
return get_obj_perms_model(obj, UserObjectPermissionBase, UserObjectPermission)
def get_group_obj_perms_model(obj):
"""
Returns model class that connects given ``obj`` and Group class.
"""
from guardian.models import GroupObjectPermissionBase
from guardian.models import GroupObjectPermission
return get_obj_perms_model(obj, GroupObjectPermissionBase, GroupObjectPermission)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
import time
from openerp.report import report_sxw
from openerp import pooler
class account_voucher(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_voucher, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'getLines': self._lines_get,
})
self.context = context
def _lines_get(self, voucher):
voucherline_obj = pooler.get_pool(self.cr.dbname).get('account.voucher.line')
voucherlines = voucherline_obj.search(self.cr, self.uid,[('voucher_id','=',voucher.id)])
voucherlines = voucherline_obj.browse(self.cr, self.uid, voucherlines)
return voucherlines
report_sxw.report_sxw('report.account_cheque_bancolombia', 'account.voucher',
'addons/print_receipt/reports/account_cheque_bancolombia.rml',
parser=account_voucher)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.netvisor import pn_user
from units.modules.utils import set_module_args
from .nvos_module import TestNvosModule, load_fixture
class TestUserModule(TestNvosModule):
module = pn_user
def setUp(self):
self.mock_run_nvos_commands = patch('ansible.modules.network.netvisor.pn_user.run_cli')
self.run_nvos_commands = self.mock_run_nvos_commands.start()
self.mock_run_check_cli = patch('ansible.modules.network.netvisor.pn_user.check_cli')
self.run_check_cli = self.mock_run_check_cli.start()
def tearDown(self):
self.mock_run_nvos_commands.stop()
self.run_check_cli.stop()
def run_cli_patch(self, module, cli, state_map):
if state_map['present'] == 'user-create':
results = dict(
changed=True,
cli_cmd=cli
)
elif state_map['absent'] == 'user-delete':
results = dict(
changed=True,
cli_cmd=cli
)
elif state_map['update'] == 'user-modify':
results = dict(
changed=True,
cli_cmd=cli
)
module.exit_json(**results)
def load_fixtures(self, commands=None, state=None, transport='cli'):
self.run_nvos_commands.side_effect = self.run_cli_patch
if state == 'present':
self.run_check_cli.return_value = False
if state == 'absent':
self.run_check_cli.return_value = True
if state == 'update':
self.run_check_cli.return_value = True
def test_user_create(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_name': 'foo',
'pn_scope': 'local', 'pn_password': 'test123', 'state': 'present'})
result = self.execute_module(changed=True, state='present')
expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 user-create name foo scope local password test123'
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_user_delete(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_name': 'foo',
'state': 'absent'})
result = self.execute_module(changed=True, state='absent')
expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 user-delete name foo '
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_user_modify(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_name': 'foo',
'pn_password': 'test1234', 'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 user-modify name foo password test1234'
self.assertEqual(result['cli_cmd'], expected_cmd)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
wirexfers.protocols.ipizza
~~~~~~~~~~~~~~~~~~~~~~~~~~
IPizza protocol implementations.
:copyright: (c) 2012-2014, Priit Laes
:license: ISC, see LICENSE for more details.
"""
from time import time
from base64 import b64encode, b64decode
from Crypto import Random
from Crypto.Hash import SHA
from Crypto.Signature import PKCS1_v1_5
from . import KeyChainBase, ProviderBase
from .. import PaymentResponse
from ..exc import InvalidResponseError
class IPizzaProviderBase(ProviderBase):
"""Base class for IPizza protocol provider.
Protocol
IPizza
KeyChain
:class:`~.IPizzaKeyChain`
Supported return urls:
* ``return``
Supported protocol version:
* ``008``
"""
form_charset = 'UTF-8'
class KeyChain(KeyChainBase):
def __init__(self, private_key, public_key):
#: RSA private key (:py:class:`Crypto.PublicKey.RSA._RSAobj`) object.
#: See :func:`wirexfers.utils.load_key`.
self.private_key = private_key
#: RSA public key (:py:class:`Crypto.PublicKey.RSA._RSAobj`) object
#: See :func:`wirexfers.utils.load_key`.
self.public_key = public_key
def _sign_request(self, info, return_urls):
"""Create and sign payment request data."""
# Basic fields
fields = [('VK_SERVICE', u'1002'),
('VK_VERSION', u'008'),
('VK_SND_ID', self.user),
('VK_STAMP', '%d' % int(time())),
('VK_AMOUNT', info.amount),
('VK_CURR', u'EUR'),
('VK_REF', info.refnum),
('VK_MSG', info.message)]
# Check whether provider supplies extra fields
if hasattr(self, 'extra_fields'):
fields.extend(self.extra_fields)
## MAC calculation for request 1002
m = self._build_mac(('SERVICE', 'VERSION', 'SND_ID', 'STAMP', \
'AMOUNT', 'CURR', 'REF', 'MSG'), dict(fields))
# Append mac fields
fields.append(('VK_MAC', b64encode( \
PKCS1_v1_5.new(self.keychain.private_key)
.sign(SHA.new(m)))))
# Append return url field(s)
fields.append(('VK_RETURN', return_urls['return']))
return fields
def parse_response(self, form, success=True):
"""Parse and return payment response."""
fields = {
# Successful payment
'1101': ('SERVICE', 'VERSION', 'SND_ID', 'REC_ID', 'STAMP', # 1..5
'T_NO', 'AMOUNT', 'CURR', 'REC_ACC', 'REC_NAME', # 6..10
'SND_ACC', 'SND_NAME', 'REF', 'MSG', 'T_DATE'), # 11..15
# Unsuccessful payment
'1901': ('SERVICE', 'VERSION', 'SND_ID', 'REC_ID', 'STAMP', # 1..5
'REF', 'MSG') # 6..7
}
# See which response we got
resp = form.get('VK_SERVICE', None)
if not resp and resp not in fields:
raise InvalidResponseError
success = resp == '1101'
Random.atfork()
# Parse and validate MAC
m = self._build_mac(fields[resp], form)
f = lambda x: form.get('VK_%s' % x)
if not PKCS1_v1_5.new(self.keychain.public_key) \
.verify(SHA.new(m), b64decode(f('MAC'))):
raise InvalidResponseError
# Save payment data
data = {}
if success:
for item in ('T_NO', 'AMOUNT', 'CURR', 'REC_ACC', 'REC_NAME',
'SND_ACC', 'SND_NAME', 'REF', 'MSG', 'T_DATE'):
data[item] = f(item)
return PaymentResponse(self, data, success)
@staticmethod
def _build_mac(fields, data):
"""Build MAC string ('003one003two') for required fields."""
f = lambda x: data.get('VK_%s' % x)
return u''.join(map(lambda k: '%03d%s' % (len(f(k).encode('utf-8')), f(k)), fields)).encode('utf-8')
class EEDanskeProvider(IPizzaProviderBase):
"""
| Danske Bank A/S Eesti filiaal
| http://www.danskebank.ee
Protocol
IPizza
KeyChain
:class:`~.IPizzaProviderBase.KeyChain`
Supported return urls:
* ``return``
Supported protocol version:
* ``008``
"""
form_charset = 'ISO-8859-1'
@staticmethod
def _build_mac(fields, data):
"""Build MAC string. Length is in bytes instead of symbols."""
f = lambda x: data.get('VK_%s' % x).encode('latin', 'ignore')
return ''.join(map(lambda k: '%03d%s' % (len(f(k)), f(k)), fields))
class EEKrediidipankProvider(IPizzaProviderBase):
"""
| AS Eesti Krediidipank
| http://krediidipank.ee/
Protocol
IPizza
KeyChain
:class:`~.IPizzaProviderBase.KeyChain`
Supported return urls:
* ``return``
Supported protocol version:
* ``008``
"""
extra_fields = (('VK_CHARSET', 'UTF-8'),)
@staticmethod
def _build_mac(fields, data):
"""Build MAC string. Length is in bytes instead of symbols."""
f = lambda x: data.get('VK_%s' % x)
return u''.join(map(lambda k: '%03d%s' % (len(f(k)), f(k)), fields)).encode('utf-8')
class EELHVProvider(IPizzaProviderBase):
"""
| AS LHV Pank
| https://www.lhv.ee
Protocol
IPizza
KeyChain
:class:`~.IPizzaProviderBase.KeyChain`
Supported return urls:
* ``return``
Supported protocol version:
* ``008``
"""
extra_fields = (('VK_CHARSET', 'UTF-8'),)
class EESEBProvider(IPizzaProviderBase):
"""
| AS SEB Pank
| http://www.seb.ee
Protocol
IPizza
KeyChain
:class:`~.IPizzaProviderBase.KeyChain`
Supported return urls:
* ``return``
Supported protocol version:
* ``008``
"""
extra_fields = (('VK_CHARSET', 'UTF-8'),)
class EESwedBankProvider(IPizzaProviderBase):
"""
| SWEDBANK AS
| https://www.swedbank.ee
Protocol
IPizza
KeyChain
:class:`~.IPizzaProviderBase.KeyChain`
Supported return urls:
* ``return``
Supported protocol version:
* ``008``
"""
extra_fields = (('VK_ENCODING', 'UTF-8'),)
@staticmethod
def _build_mac(fields, data):
"""Build MAC string. Length is in bytes instead of symbols."""
f = lambda x: data.get('VK_%s' % x)
return u''.join(map(lambda k: '%03d%s' % (len(f(k)), f(k)), fields)).encode('utf-8')
|
unknown
|
codeparrot/codeparrot-clean
| ||
# TLS and basic authentication configuration example.
#
# Additionally, a certificate and a key file are needed.
tls_server_config:
cert_file: server.crt
key_file: server.key
# Usernames and passwords required to connect to Prometheus.
# Passwords are hashed with bcrypt: https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md#about-bcrypt
basic_auth_users:
alice: $2y$10$mDwo.lAisC94iLAyP81MCesa29IzH37oigHC/42V2pdJlUprsJPze
bob: $2y$10$hLqFl9jSjoAAy95Z/zw8Ye8wkdMBM8c5Bn1ptYqP/AXyV0.oy0S8m
|
unknown
|
github
|
https://github.com/prometheus/prometheus
|
documentation/examples/web-config.yml
|
#!/usr/bin/env python
'''Example of simple text wrapping without using layout.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from pyglet.gl import *
from pyglet.window import Window
from pyglet.window import key
from pyglet import clock
from pyglet import font
from scene2d.textsprite import *
window = Window(visible=False, resizable=True)
arial = font.load('Arial', 24)
text = 'Type away... '
@window.event
def on_resize(width, height):
sprite.width = width
sprite.x = 10
@window.event
def on_text(text):
sprite.text += text.replace('\r', '\n')
@window.event
def on_key_press(symbol, modifiers):
if symbol == key.BACKSPACE:
sprite.text = sprite.text[:-1]
sprite = TextSprite(arial, text, color=(0, 0, 0, 1))
fps = clock.ClockDisplay()
window.push_handlers(fps)
glClearColor(1, 1, 1, 1)
window.set_visible()
while not window.has_exit:
window.dispatch_events()
clock.tick()
glClear(GL_COLOR_BUFFER_BIT)
sprite.y = sprite.height # TODO align on bottom
sprite.draw()
fps.draw()
window.flip()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import numpy as np
from numpy import dot, einsum
from numpy import tensordot as tdot
from scipy.optimize import minimize
import ctm
import gates
def _make_double_layer_tensor(a, D):
return einsum(a, [8,0,2,4,6], a.conj(), [8,1,3,5,7]).reshape([D**2]*4)
def _itebd_square_fu_singlebond(a, b, abg, env):
tdot(b, b.conj(), [0,0])
def _itebd_square_cost_fct(env, abg, p, D):
def cost_fct_impl(m):
m = m.reshape(D**6, p**2)
mH = m.conj().transpose()
return dot(env, dot(m,mH).reshape(D**12)) - 2.0 * np.real(dot(env, dot(abg,mH).reshape(D**12)))
return cost_fct_impl
def _itebd_square_fu_bond(p, D, a, b, g, env, err=1e-6, max_iterations=100):
"""
env6---+---+-----+
| | | |
| 0 1 |
+--5 2--+
| 4 3 |
| | | |
+-----+---+-----+
"""
idmat = np.identity(D, dtype=float)
envVec = env.reshape(D**12)
abg = einsum(a, [9,0,8,4,5], b, [10,1,2,3,8], g, [9,10,6,7])
b2 = b
d3 = None
for j in xrange(max_iterations):
d2 = None
for j2 in xrange(max_iterations):
S = tdot(
envVec,
einsum(
tdot(abg, b2.conj(), [7,0]), [0,2,4,6,8,10,12,3,5,7,14],
idmat, [1,13],
idmat, [9,15],
idmat, [11,16]
).reshape(D**12,p,D,D,D,D),
[0,0]
).reshape(p*D**4)
R = einsum(
tdot(_make_double_layer_tensor(b2,D), env, [[0,1,2],[1,2,3]]).reshape([D]*8), [2,7,1,6,3,8,4,9],
idmat, [0,5]).reshape([p*D**4]*2)
a2vec = np.linalg.lstsq(R, S)[0]
a2 = a2vec.reshape(p,D,D,D,D)
d = dot(a2vec.conj(), dot(R, a2vec)) - 2.0 * np.real(dot(a2vec.conj(), S))
if d2 is not None and np.abs(d-d2) < err:
break
d2 = d
d2 = None
for j2 in xrange(max_iterations):
S = tdot(
envVec,
einsum(
tdot(abg, a2.conj(), [6,0]), [0,2,4,6,8,10,12,1,16,9,11],
idmat, [3,13],
idmat, [5,14],
idmat, [7,15]
).reshape(D**12,p,D,D,D,D),
[0,0]
).reshape(p*D**4)
R = einsum(
tdot(_make_double_layer_tensor(a2,D), env, [[0,2,3],[0,4,5]]).reshape([D]*8), [4,9,1,6,2,7,3,8],
idmat, [0,5]).reshape([p*D**4]*2)
b2vec = np.linalg.lstsq(R, S)[0]
b2 = b2vec.reshape(p,D,D,D,D)
d = dot(b2vec.conj(), dot(R, b2vec)) - 2.0 * np.real(dot(b2vec.conj(), S))
if d2 is not None and np.abs(d-d2) < err:
break
d2 = d
if d3 is not None and np.abs(d-d3) < err:
break
d3 = d
return a2, b2
def itebd_square(a, b, gx, gy, chi, ctmrgerr=1e-6, ctmrg_max_iterations=1000000, tebd_max_iterations=1000000, tebd_update_err=1e-5, tebd_update_max_iterations=100, env=None):
p, D = a.shape[:2]
kronecker = np.fromfunction(np.vectorize(lambda j,k: 1. if j==k else 0), (p,p), dtype=int)
gx2 = gx.swapaxes(0,1).swapaxes(2,3)
gy2 = gy.swapaxes(0,1).swapaxes(2,3)
aDL = _make_double_layer_tensor(a, D)
bDL = _make_double_layer_tensor(b, D)
mz = None
for j in xrange(tebd_max_iterations):
#if j % 100 == 0:
#print "[itebd_square] {:d} iterations done".format(j)
env2 = env
mz2 = mz
env, env2, err, num_iterations = ctm.ctmrg_square_2x2(aDL, bDL, chi, err=ctmrgerr, env=env2, iteration_bunch=10)
xDL = einsum(einsum(a, [5,1,2,3,4], gates.sigmaz, [0,5]), [9,0,2,4,8], a.conj(), [9,1,3,5,7]).reshape(D**8)
e = env.toarray1x1a(aDL, bDL).reshape(D**8)
mz = dot(e, xDL) / dot(e, aDL.reshape(D**8))
#if j % 10 == 0:
if mz2 is not None:
#print "[itebd_square] mz estimate: {:.15e}; err: {:.15e}".format(mz, np.abs(mz-mz2))
if np.abs(mz-mz2) < 1e-6:
break
a, b = _itebd_square_fu_bond(p, D, a, b, gx, env.toarray1x2ab(aDL,bDL))
b, a = _itebd_square_fu_bond(p, D, np.rollaxis(b,1,5), np.rollaxis(a,1,5), gy2, np.rollaxis(env.toarray2x1ab(aDL,bDL),0,6))
a, b = np.rollaxis(a,4,1), np.rollaxis(b,4,1)
b, a = _itebd_square_fu_bond(p, D, b, a, gx2, env.toarray1x2ba(aDL,bDL))
a, b = _itebd_square_fu_bond(p, D, np.rollaxis(a,1,5), np.rollaxis(b,1,5), gy, np.rollaxis(env.toarray2x1ba(aDL,bDL),0,6))
a, b = np.rollaxis(a,4,1), np.rollaxis(b,4,1)
"""
abgx = tdot(einsum(a, [6,0,8,4,5], b, [7,1,2,3,8]), gx, [[6,7],[0,1]])
bagx = tdot(einsum(b, [6,0,8,4,5], a, [7,1,2,3,8]), gx, [[6,7],[0,1]])
abgy = tdot(einsum(a, [7,8,2,3,4], b, [6,0,1,8,5]), gy, [[6,7],[0,1]])
bagy = tdot(einsum(b, [7,8,2,3,4], a, [6,0,1,8,5]), gy, [[6,7],[0,1]])
d2 = None
e6 = env.toarray1x2ab(aDL, bDL)
e12 = e6.reshape([D]*12)
for k in xrange(tebd_update_max_iterations): # a-right b-left
bDL2 = _make_double_layer_tensor(b, D)
R = einsum(kronecker, [0,5], tdot(bDL2, e6, [[0,1,2],[1,2,3]]).swapaxes(0,1).reshape([D]*8), [1,6,2,7,3,8,4,9]).reshape([p*D**4]*2)
S = einsum(e12, [5,1,6,11,7,12,8,13,9,3,10,4], tdot(abgx, b.conj(), [7,0]), [5,6,7,8,9,10,0,11,12,13,2]).reshape(p*D**4)
a = np.linalg.lstsq(R, S)[0].reshape(p,D,D,D,D)
aDL2 = _make_double_layer_tensor(a, D)
R = einsum(kronecker, [0,5], tdot(e6, aDL2, [[0,4,5],[0,2,3]]).reshape([D]*8), [1,6,2,7,3,8,4,9]).reshape([p*D**4]*2)
S = einsum(e12, [5,11,6,1,7,2,8,3,9,12,10,13], tdot(abgx, a.conj(), [6,0]), [5,6,7,8,9,10,0,11,4,12,13]).reshape(p*D**4)
bvec = np.linalg.lstsq(R, S)[0]
b = bvec.reshape(p,D,D,D,D)
d = dot(bvec.conj(), dot(R, bvec)) - 2.0 * np.real(dot(bvec.conj(), S))
if d2 is not None:
if np.abs(d-d2) < tebd_update_err:
break
d2 = d
d2 = None
e6 = env.toarray2x1ab(aDL, bDL)
e12 = e6.reshape([D]*12)
for k in xrange(tebd_update_max_iterations): # update a-up b-down
bDL2 = _make_double_layer_tensor(b, D)
R = einsum(kronecker, [0,5], tdot(bDL2, e6, [[0,1,3],[0,1,5]]).reshape([D]*8), [1,6,2,7,3,8,4,9]).reshape([p*D**4]*2)
S = einsum(e12, [5,11,6,12,7,2,8,3,9,4,10,13], tdot(abgy, b.conj(), [6,0]), [5,6,7,8,9,10,0,11,12,1,13]).reshape(p*D**4)
a = np.linalg.lstsq(R, S)[0].reshape(p,D,D,D,D)
aDL2 = _make_double_layer_tensor(a, D)
R = einsum(kronecker, [0,5], tdot(e6, aDL2, [[2,3,4],[1,2,3]]).swapaxes(2,3).reshape([D]*8), [1,6,2,7,3,8,4,9]).reshape([p*D**4]*2)
S = einsum(e12, [5,1,6,2,7,11,8,12,9,13,10,4], tdot(abgy, a.conj(), [7,0]), [5,6,7,8,9,10,0,3,11,12,13]).reshape(p*D**4)
bvec = np.linalg.lstsq(R, S)[0]
b = bvec.reshape(p,D,D,D,D)
d = dot(bvec.conj(), dot(R, bvec)) - 2.0 * np.real(dot(bvec.conj(), S))
if d2 is not None:
if np.abs(d-d2) < tebd_update_err:
break
d2 = d
d2 = None
e6 = env.toarray1x2ba(aDL, bDL)
e12 = e6.reshape([D]*12)
for k in xrange(tebd_update_max_iterations): # b-right a-left
bDL2 = _make_double_layer_tensor(b, D)
R = einsum(kronecker, [0,5], tdot(e6, bDL2, [[0,4,5],[0,2,3]]).reshape([D]*8), [1,6,2,7,3,8,4,9]).reshape([p*D**4]*2)
S = einsum(e12, [5,11,6,1,7,2,8,3,9,12,10,13], tdot(bagx, b.conj(), [6,0]), [5,6,7,8,9,10,0,11,4,12,13]).reshape(p*D**4)
a = np.linalg.lstsq(R, S)[0].reshape(p,D,D,D,D)
aDL2 = _make_double_layer_tensor(a, D)
R = einsum(kronecker, [0,5], tdot(aDL2, e6, [[0,1,2],[1,2,3]]).swapaxes(0,1).reshape([D]*8), [1,6,2,7,3,8,4,9]).reshape([p*D**4]*2)
S = einsum(e12, [5,1,6,11,7,12,8,13,9,3,10,4], tdot(bagx, a.conj(), [7,0]), [5,6,7,8,9,10,0,11,12,13,2]).reshape(p*D**4)
bvec = np.linalg.lstsq(R, S)[0]
b = bvec.reshape(p,D,D,D,D)
d = dot(bvec.conj(), dot(R, bvec)) - 2.0 * np.real(dot(bvec.conj(), S))
if d2 is not None:
#print "[tebd_square] {:d} iterations done for b-right a-left; cost fct err: {:.15e}".format(k,np.abs(d-d2))
if np.abs(d-d2) < tebd_update_err:
break
d2 = d
d2 = None
e6 = env.toarray2x1ba(aDL, bDL)
e12 = e6.reshape([D]*12)
for k in xrange(tebd_update_max_iterations): # b-up a-down
bDL2 = _make_double_layer_tensor(b, D)
R = einsum(kronecker, [0,5], tdot(e6, bDL2, [[2,3,4],[1,2,3]]).reshape([D]*8).swapaxes(2,3), [1,6,2,7,3,8,4,9]).reshape([p*D**4]*2)
S = einsum(e12, [5,1,6,2,7,11,8,12,9,13,10,4], tdot(bagy, b.conj(), [7,0]), [5,6,7,8,9,10,0,3,11,12,13]).reshape(p*D**4)
a = np.linalg.lstsq(R, S)[0].reshape(p,D,D,D,D)
aDL2 = _make_double_layer_tensor(a, D)
R = einsum(kronecker, [0,5], tdot(aDL2, e6, [[0,1,3],[0,1,5]]).reshape([D]*8), [1,6,2,7,3,8,4,9]).reshape([p*D**4]*2)
S = einsum(e12, [5,11,6,12,7,2,8,3,9,4,10,13], tdot(bagy, a.conj(), [6,0]), [5,6,7,8,9,10,0,11,12,1,13]).reshape(p*D**4)
bvec = np.linalg.lstsq(R, S)[0]
b = bvec.reshape(p,D,D,D,D)
d = dot(bvec.conj(), dot(R, bvec)) - 2.0 * np.real(dot(bvec.conj(), S))
if d2 is not None:
#print "[tebd_square] {:d} iterations done for b-up a-down; cost fct err: {:.15e}".format(k,np.abs(d-d2))
if np.abs(d-d2) < tebd_update_err:
break
d2 = d
"""
a /= np.max(np.abs(a))
b /= np.max(np.abs(b))
aDL = _make_double_layer_tensor(a, D)
bDL = _make_double_layer_tensor(b, D)
return a, b, env
def _itebd_square_pepo_invsymm_cost(R, S, D, kappa):
def _itebd_square_pepo_invsymm_cost_impl(U):
U = U.reshape(kappa*D, D)
U2 = tdot(U, U, [1,1])
return einsum(R, [0,1,2,3], U2, [0,1], U2, [2,3]) - 2.0 * einsum(S, [0,1], U2, [0,1])
return _itebd_square_pepo_invsymm_cost_impl
def _init_downscaling_costfct(a, a2, D, kappa):
def _init_downscaling_costfct_impl(x):
U = x[:kappa*D**2].reshape(kappa*D, D)
V = x[kappa*D**2:].reshape(kappa*D, D)
aTest = einsum(a2, [0,5,6,7,8], V, [5,1], U, [6,2], V, [7,3], U, [8,4])
return np.sum(np.abs(a - aTest))
return _init_downscaling_costfct_impl
def itebd_square_pepo_invsymm(a, g, chi, env=None):
if np.sum(np.abs(a - a.swapaxes(1,3))) > 1e-15:
raise ValueError("given iPEPS is not invariant under spatial inversion")
if np.sum(np.abs(a - a.swapaxes(2,4))) > 1e-15:
raise ValueError("given iPEPS is not invariant under spatial inversion")
if np.sum(np.abs(g - g.swapaxes(2,4))) > 1e-15:
raise ValueError("given iPEPO is not invariant under spatial inversion")
if np.sum(np.abs(g - g.swapaxes(3,5))) > 1e-15:
raise ValueError("given iPEPO is not invariant under spatial inversion")
p, D = a.shape[:2]
kappa = g.shape[1]
mz = None
U2 = V2 = np.fromfunction(np.vectorize(lambda j,k: 1. if j==k else 0), (kappa*D,D), dtype=int)
a2 = einsum(a, [9,1,3,5,7], g, [9,0,2,4,6,8]).reshape([p] + [kappa*D]*4)
a3 = einsum(a2, [0,5,6,7,8], V2, [5,1], U2, [6,2], V2, [7,3], U2, [8,4])
print a-a3
print np.max(np.abs(a-a3))
exit()
x = minimize(_init_downscaling_costfct(a, a2, D, kappa), np.concatenate([U2.flatten(), V2.flatten()]))
U2 = x.x[:kappa*D**2].reshape(kappa*D, D)
V2 = x.x[kappa*D**2:].reshape(kappa*D, D)
print x
#print U
#print V
exit()
for j in xrange(5):
aDL = _make_double_layer_tensor(a, D)
env, env2, err, num_iterations = ctm.ctmrg_square_1x1_invsymm(aDL, chi, env=env, verbose=True)
xDL = einsum(einsum(a, [5,1,2,3,4], gates.sigmaz, [0,5]), [9,0,2,4,8], a.conj(), [9,1,3,5,7]).reshape(D**8)
e = env.toarray1x1().reshape(D**8)
mz, mz2 = dot(e, xDL) / dot(e, aDL.reshape(D**8)), mz
if mz2 is not None:
print "[itebd_square_pepo_invsymm] mz estimate: {:.15e}; err: {:.15e}".format(mz, np.abs(mz-mz2))
if np.abs(mz-mz2) < 1e-6:
break
a2 = einsum(a, [9,1,3,5,7], g, [9,0,2,4,6,8]).reshape([p] + [kappa*D]*4)
e = env.toarray1x2()
a2L = einsum(a2, [0,5,2,6,4], V2, [5,1], V2, [6,3])
a2R = einsum(a2L, [0,1,5,3,4], U2, [5,2]).reshape(p,D,kappa*D,D,D)
a2L = einsum(a2L, [0,1,2,3,5], U2, [5,4]).reshape(p,D,D,D,kappa*D)
a2L = einsum(a2L, [8,0,2,4,6], a2L, [8,1,3,5,7]).reshape(D**2,(kappa*D)**2,D**2,D**2)
a2R = einsum(a2R, [8,0,2,4,6], a2R, [8,1,3,5,7]).reshape(D**2,D**2,D**2,(kappa*D)**2)
R = einsum(einsum(e, [4,1,2,3,5,6], a2L, [4,0,5,6]), [0,2,3,4], a2R, [2,3,4,1]).reshape([kappa*D]*4).swapaxes(1,2)
S = einsum(R, [2,2,0,1])
U = minimize(_itebd_square_pepo_invsymm_cost(R,S,D,kappa), U2.reshape(kappa*D**2)).x.reshape(kappa*D, D)
#print U
e = env.toarray2x1()
a2U = einsum(a2, [0,1,5,3,6], U2, [5,2], U2, [6,4])
a2D = einsum(a2U, [0,1,2,5,4], V2, [5,3])
a2U = einsum(a2U, [0,5,2,3,4], V2, [5,1])
a2U = einsum(a2U, [8,0,2,4,6], a2U, [8,1,3,5,7]).reshape(D**2,D**2,(kappa*D)**2,D**2)
a2D = einsum(a2D, [8,0,2,4,6], a2D, [8,1,3,5,7]).reshape((kappa*D)**2,D**2,D**2,D**2)
R = einsum(e, [2,3,4,5,6,7], a2U, [2,3,0,7], a2D, [1,4,5,6]).reshape([kappa*D]*4).swapaxes(1,2)
S = einsum(R, [2,2,0,1])
V = minimize(_itebd_square_pepo_invsymm_cost(R,S,D,kappa), V2.reshape(kappa*D**2)).x.reshape(kappa*D, D)
#print V
a = einsum(a2, [0,5,6,7,8], V, [5,1], U, [6,2], V, [7,3], U, [8,4])
U2,V2 = U,V
print a
return a, env, j+1
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env ptatioython
# -*- coding: utf-8 -*-
'''
Created on May 18, 2016
@author: riccardo
'''
from __future__ import print_function
import os
import sys # @UnusedImport
from gfzreport.templates.network.core.utils import relpath
from gfzreport.templates.network.core import get_noise_pdfs_content, gen_title,\
get_net_desc, geofonstations_df, otherstations_df, get_map_df, get_figdirective_vars
from gfzreport.sphinxbuild.core.extensions import mapfigure
# from gfzreport.templates.utils import makedirs, copyfiles, validate_outdir,\
# cleanup_onerr, setupdir, get_rst_template
from gfzreport.templates import utils
from collections import OrderedDict
def run(network, start_after, area_margins_in_deg, out_path, noise_pdf, inst_uptimes,
move_data_files, update_config_only, confirm,
network_station_marker, nonnetwork_station_marker, network_station_color,
nonnetwork_station_color):
templater = Templater(out_path, update_config_only, move_data_files, confirm)
return templater(network, start_after, area_margins_in_deg, noise_pdf, inst_uptimes,
network_station_marker, nonnetwork_station_marker, network_station_color,
nonnetwork_station_color)
class Templater(utils.Templater):
def getdestpath(self, out_path, network, start_after, area_margins_in_deg, noise_pdf, inst_uptimes,
network_station_marker, nonnetwork_station_marker, network_station_color,
nonnetwork_station_color):
'''This method must return the *real* destination directory of this object.
In the most simple scenario, it can also just return `out_path`
:param out_path: initial output path (passed in the `__init__` call)
:param args, kwargs: the arguments passed to this object when called as function and
forwarded to this method
'''
return os.path.abspath(os.path.join(out_path,
"%s_%s" % (str(network), str(start_after))))
def getdatafiles(self, destpath, destdatapath, network, start_after, area_margins_in_deg,
noise_pdf, inst_uptimes,
network_station_marker, nonnetwork_station_marker, network_station_color,
nonnetwork_station_color):
'''This method must return the data files to be copied into `destdatapath`. It must
return a dict of
`{destdir: files, ...}`
where:
* `destdir` is a string, usually `destdatapath` or a sub-directory of it,
denoting the destination directory where to copy the files
* `files`: a list of files to be copied in the corresponding `destdir`. It can
be a list of strings denoting each a single file, a directory or a glob pattern.
If string, it will be converted to the 1-element list `[files]`
Use `collections.OrderedDict` to preserve the order of the keys
For each item `destdir, files`, and for each `filepath` in `files`, the function
will call:
:ref:`gfzreport.templates.utils.copyfiles(filepath, destdir, self._mv_data_files)`
Thus `filepath` can be a file (copy/move that file into `destdir`) a directory
(copy/move each file into `destdir`) or a glob expression (copy/move each matching
file into `destdir`)
:param destpath: the destination directory, as returned from `self.getdestpath`
:param destdatapath: the destination directory for the data files, currently
the subdirectory 'data' of `destpath` (do not rely on it as it might change in the
future)
:param args, kwargs: the arguments passed to this object when called as function and
forwarded to this method
:return: a dict of destination paths (ususally sub-directories of `self.destdatapath`
mapped to lists of strings (files/ directories/ glob patterns). An empty dict or
None (or pass) are valid (don't copy anything into `destdatadir`)
This function can safely raise as Exceptions will be caught and displayed in their
message displayed printed
'''
noise_pdf_destdir = os.path.join(destdatapath, "noise_pdf")
inst_uptimes_destdir = os.path.join(destdatapath, "inst_uptimes")
return OrderedDict([[inst_uptimes_destdir, inst_uptimes], [noise_pdf_destdir, noise_pdf]])
def getrstkwargs(self, destpath, destdatapath, datafiles, network, start_after,
area_margins_in_deg, noise_pdf, inst_uptimes,
network_station_marker, nonnetwork_station_marker, network_station_color,
nonnetwork_station_color):
'''This method accepts all arguments passed to this object when called as function and
should return a dict of keyword arguments used to render the rst
template, if the latter has been implemented as a jinja template.
You can return an empty dict or None (or pass) if the rst in the current source folder
is "fixed" and not variable according to the arguments. Note that at this
point you can access `self.destpath`, `self.destdatapath` and `self.datafiles`
:param destpath: the destination directory, as returned from `self.getdestpath`
:param destdatapath: the destination directory for the data files, currently
the subdirectory 'data' of `destpath` (do not rely on it as it might change in the
future)
:param datafiles: a dict as returned from self.getdatafiles`, where each key
represents a data destination directory and each value is a list of files that have
been copied or moved inthere. The keys of the dict are surely existing folders and are
usually sub-directories of `destdatapath` (or equal to `destdatapath`)
:param args, kwargs: the arguments passed to this object when called as function and
forwarded to this method
:return: a dict of key-> values to be used for rendering the rst if the latter is a
jinja template.
This function can safely raise as Exceptions will be caught and displayed in their
message displayed printed
'''
# get the destination data paths. Use getdatafiles implemented for
# moving data files, and check that they are not empty
# the two paths will also be used later
inst_uptimes_dst, noise_pdf_dst = datafiles.keys()
assert len(os.listdir(inst_uptimes_dst)), "'%s' empty" % inst_uptimes_dst
assert len(os.listdir(noise_pdf_dst)), "'%s' empty" % noise_pdf_dst
try:
geofon_df = geofonstations_df(network, start_after)
except Exception as exc:
raise Exception(("error while fetching network stations ('%s')\n"
"check arguments and internet connection") % str(exc))
try:
others_df = otherstations_df(geofon_df, area_margins_in_deg)
except Exception as exc:
raise Exception(("error while fetching other stations within network "
"stations boundaries ('%s')\n"
"check arguments and internet connection") % str(exc))
map_df = get_map_df(geofon_df, others_df)
# convert area margins into plotmap map_margins arg:
mymapdefaults = dict(mapmargins=", ".join("%sdeg" % str(m)
for m in area_margins_in_deg),
sizes=50, fontsize=8, figmargins="1,2,9,0", legend_ncol=2)
# building template, see template.rst:
return dict(
title=gen_title(network, geofon_df),
network_description=get_net_desc(geofon_df),
stations_table={'content': geofon_df.to_csv(sep=" ", quotechar='"',
na_rep=" ", # this makes to_csv
# quoting it (otherwise it might
# result in row misalign)
index=False),
},
stations_map={'content': map_df.to_csv(sep=" ", quotechar='"', index=False),
'options': mapfigure.get_defargs(**mymapdefaults)
},
noise_pdfs={'dirpath': relpath(noise_pdf_dst, destpath),
'content': get_noise_pdfs_content(noise_pdf_dst, geofon_df)
},
inst_uptimes=get_figdirective_vars(inst_uptimes_dst, destpath)
)
# if __name__ == '__main__':
# main() # pylint:disable=no-value-for-parameter
|
unknown
|
codeparrot/codeparrot-clean
| ||
import numpy as np
import pytest
import pandas as pd
from pandas import Timedelta
import pandas._testing as tm
from pandas.core import nanops
from pandas.core.arrays import TimedeltaArray
class TestReductions:
@pytest.mark.parametrize("name", ["std", "min", "max", "median", "mean"])
def test_reductions_empty(self, name, skipna):
tdi = pd.TimedeltaIndex([])
arr = tdi.array
result = getattr(tdi, name)(skipna=skipna)
assert result is pd.NaT
result = getattr(arr, name)(skipna=skipna)
assert result is pd.NaT
def test_sum_empty(self, skipna):
tdi = pd.TimedeltaIndex([])
arr = tdi.array
result = tdi.sum(skipna=skipna)
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = arr.sum(skipna=skipna)
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
def test_min_max(self, unit):
dtype = f"m8[{unit}]"
arr = TimedeltaArray._from_sequence(
["3h", "3h", "NaT", "2h", "5h", "4h"], dtype=dtype
)
result = arr.min()
expected = Timedelta("2h")
assert result == expected
result = arr.max()
expected = Timedelta("5h")
assert result == expected
result = arr.min(skipna=False)
assert result is pd.NaT
result = arr.max(skipna=False)
assert result is pd.NaT
def test_sum(self):
tdi = pd.TimedeltaIndex(["3h", "3h", "NaT", "2h", "5h", "4h"])
arr = tdi.array
result = arr.sum(skipna=True)
expected = Timedelta(hours=17)
assert isinstance(result, Timedelta)
assert result == expected
result = tdi.sum(skipna=True)
assert isinstance(result, Timedelta)
assert result == expected
result = arr.sum(skipna=False)
assert result is pd.NaT
result = tdi.sum(skipna=False)
assert result is pd.NaT
result = arr.sum(min_count=9)
assert result is pd.NaT
result = tdi.sum(min_count=9)
assert result is pd.NaT
result = arr.sum(min_count=1)
assert isinstance(result, Timedelta)
assert result == expected
result = tdi.sum(min_count=1)
assert isinstance(result, Timedelta)
assert result == expected
def test_npsum(self):
# GH#25282, GH#25335 np.sum should return a Timedelta, not timedelta64
tdi = pd.TimedeltaIndex(["3h", "3h", "2h", "5h", "4h"])
arr = tdi.array
result = np.sum(tdi)
expected = Timedelta(hours=17)
assert isinstance(result, Timedelta)
assert result == expected
result = np.sum(arr)
assert isinstance(result, Timedelta)
assert result == expected
def test_sum_2d_skipna_false(self):
arr = np.arange(8).astype(np.int64).view("m8[s]").astype("m8[ns]").reshape(4, 2)
arr[-1, -1] = "Nat"
tda = TimedeltaArray._from_sequence(arr)
result = tda.sum(skipna=False)
assert result is pd.NaT
result = tda.sum(axis=0, skipna=False)
expected = pd.TimedeltaIndex(
[Timedelta(seconds=12), pd.NaT], dtype="m8[ns]"
)._values
tm.assert_timedelta_array_equal(result, expected)
result = tda.sum(axis=1, skipna=False)
expected = pd.TimedeltaIndex(
[
Timedelta(seconds=1),
Timedelta(seconds=5),
Timedelta(seconds=9),
pd.NaT,
],
dtype="m8[ns]",
)._values
tm.assert_timedelta_array_equal(result, expected)
# Adding a Timestamp makes this a test for DatetimeArray.std
@pytest.mark.parametrize(
"add",
[
Timedelta(0).as_unit("us"),
pd.Timestamp("2021-01-01"),
pd.Timestamp("2021-01-01", tz="UTC"),
pd.Timestamp("2021-01-01", tz="Asia/Tokyo"),
],
)
def test_std(self, add):
tdi = pd.TimedeltaIndex(["0h", "4h", "NaT", "4h", "0h", "2h"]) + add
arr = tdi.array
result = arr.std(skipna=True)
expected = Timedelta(hours=2).as_unit("us")
assert isinstance(result, Timedelta)
assert result == expected
result = tdi.std(skipna=True)
assert isinstance(result, Timedelta)
assert result == expected
if getattr(arr, "tz", None) is None:
result = nanops.nanstd(np.asarray(arr), skipna=True)
assert isinstance(result, np.timedelta64)
assert result == expected
result = arr.std(skipna=False)
assert result is pd.NaT
result = tdi.std(skipna=False)
assert result is pd.NaT
if getattr(arr, "tz", None) is None:
result = nanops.nanstd(np.asarray(arr), skipna=False)
assert isinstance(result, np.timedelta64)
assert np.isnat(result)
def test_median(self):
tdi = pd.TimedeltaIndex(["0h", "3h", "NaT", "5h06m", "0h", "2h"])
arr = tdi.array
result = arr.median(skipna=True)
expected = Timedelta(hours=2)
assert isinstance(result, Timedelta)
assert result == expected
result = tdi.median(skipna=True)
assert isinstance(result, Timedelta)
assert result == expected
result = arr.median(skipna=False)
assert result is pd.NaT
result = tdi.median(skipna=False)
assert result is pd.NaT
def test_mean(self):
tdi = pd.TimedeltaIndex(["0h", "3h", "NaT", "5h06m", "0h", "2h"])
arr = tdi._data
# manually verified result
expected = Timedelta(arr.dropna()._ndarray.mean())
result = arr.mean()
assert result == expected
result = arr.mean(skipna=False)
assert result is pd.NaT
result = arr.dropna().mean(skipna=False)
assert result == expected
result = arr.mean(axis=0)
assert result == expected
def test_mean_2d(self):
tdi = pd.timedelta_range("14 days", periods=6)
tda = tdi._data.reshape(3, 2)
result = tda.mean(axis=0)
expected = tda[1]
tm.assert_timedelta_array_equal(result, expected)
result = tda.mean(axis=1)
expected = tda[:, 0] + Timedelta(hours=12).as_unit("us")
tm.assert_timedelta_array_equal(result, expected)
result = tda.mean(axis=None)
expected = tdi.mean()
assert result == expected
|
python
|
github
|
https://github.com/pandas-dev/pandas
|
pandas/tests/arrays/timedeltas/test_reductions.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CategoryOrg',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text="Type d'organisation", max_length=150, verbose_name=b"Type d'organisation")),
],
options={
'verbose_name': 'CategoryOrganization',
'verbose_name_plural': 'CategoryOrganizations',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('number', models.IntegerField(verbose_name=b'Numero')),
('date', models.DateField(default=datetime.datetime.today, verbose_name=b'Fait le')),
('subject', models.CharField(max_length=100, verbose_name=b'Objet', blank=True)),
],
options={
'verbose_name': 'Order',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantity', models.PositiveIntegerField(verbose_name=b'Quantite')),
('unit', models.CharField(max_length=1, verbose_name='unite', choices=[(b'C', 'Carton'), (b'P', 'Pi\xe8ce'), (b'K', 'Kg'), (b'L', 'Littre')])),
('description', models.CharField(max_length=50, verbose_name=b'Description')),
('price', models.PositiveIntegerField(verbose_name=b'Prix Unitiare')),
('order', models.ForeignKey(blank=True, to='sugu.Order', null=True)),
],
options={
'verbose_name': 'OrderItem',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('create_date', models.DateTimeField(default=datetime.datetime(2015, 4, 3, 14, 5, 35, 23893), verbose_name=b"Date d'enregistrement")),
('name', models.CharField(max_length=150, verbose_name=b'Nom de votre entreprise')),
('address', models.TextField(verbose_name=b'Adresse principale de votre soci\xc3\xa9t\xc3\xa9', blank=True)),
('address_extra', models.CharField(max_length=20, verbose_name=b'Numero de t\xc3\xa9l\xc3\xa9phone de votre entreprise', blank=True)),
('address_email', models.EmailField(max_length=75, verbose_name=b'Adresse \xc3\xa9lectronique de votre entreprise', blank=True)),
('legal_infos', models.TextField(verbose_name=b'Informations l\xc3\xa9gales', blank=True)),
('image', models.ImageField(upload_to=b'org_images/', verbose_name=b'image de la societe', blank=True)),
],
options={
'verbose_name': 'Organization',
'verbose_name_plural': 'Organizations',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Owner',
fields=[
('user_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('phone', models.CharField(max_length=30, verbose_name=b'Telephone', blank=True)),
('address_email', models.EmailField(max_length=75, verbose_name=b'Adresse \xc3\xa9lectronique', blank=True)),
],
options={
'verbose_name': 'Owner',
'verbose_name_plural': 'Owners',
},
bases=('auth.user',),
),
migrations.CreateModel(
name='TypeOrganization',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text="Type d'organisation", max_length=150, verbose_name=b"Type d'organisation")),
],
options={
'verbose_name': 'TypeOrganization',
'verbose_name_plural': 'TypeOrganizations',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='organization',
name='owner',
field=models.ForeignKey(related_name='owners', verbose_name=b'Proprietaire', to='sugu.Owner'),
preserve_default=True,
),
migrations.AddField(
model_name='organization',
name='type_org',
field=models.ForeignKey(related_name='types', verbose_name=b"Type d'organisation", to='sugu.TypeOrganization'),
preserve_default=True,
),
migrations.AddField(
model_name='order',
name='organization',
field=models.ForeignKey(related_name='Orders', verbose_name=b'Fournisseur', to='sugu.Organization'),
preserve_default=True,
),
]
|
unknown
|
codeparrot/codeparrot-clean
| ||
//
// registered_buffer.hpp
// ~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BOOST_ASIO_REGISTERED_BUFFER_HPP
#define BOOST_ASIO_REGISTERED_BUFFER_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include <boost/asio/detail/config.hpp>
#include <boost/asio/buffer.hpp>
#include <boost/asio/detail/push_options.hpp>
namespace boost {
namespace asio {
namespace detail {
class buffer_registration_base;
} // namespace detail
class const_registered_buffer;
/// Type used to identify a registered buffer.
class registered_buffer_id
{
public:
/// The native buffer identifier type.
typedef int native_handle_type;
/// Default constructor creates an invalid registered buffer identifier.
registered_buffer_id() noexcept
: scope_(0),
index_(-1)
{
}
/// Get the native buffer identifier type.
native_handle_type native_handle() const noexcept
{
return index_;
}
/// Compare two IDs for equality.
friend bool operator==(const registered_buffer_id& lhs,
const registered_buffer_id& rhs) noexcept
{
return lhs.scope_ == rhs.scope_ && lhs.index_ == rhs.index_;
}
/// Compare two IDs for equality.
friend bool operator!=(const registered_buffer_id& lhs,
const registered_buffer_id& rhs) noexcept
{
return lhs.scope_ != rhs.scope_ || lhs.index_ != rhs.index_;
}
private:
friend class detail::buffer_registration_base;
// Hidden constructor used by buffer registration.
registered_buffer_id(const void* scope, int index) noexcept
: scope_(scope),
index_(index)
{
}
const void* scope_;
int index_;
};
/// Holds a registered buffer over modifiable data.
/**
* Satisfies the @c MutableBufferSequence type requirements.
*/
class mutable_registered_buffer
{
public:
/// Default constructor creates an invalid registered buffer.
mutable_registered_buffer() noexcept
: buffer_(),
id_()
{
}
/// Get the underlying mutable buffer.
const mutable_buffer& buffer() const noexcept
{
return buffer_;
}
/// Get a pointer to the beginning of the memory range.
/**
* @returns <tt>buffer().data()</tt>.
*/
void* data() const noexcept
{
return buffer_.data();
}
/// Get the size of the memory range.
/**
* @returns <tt>buffer().size()</tt>.
*/
std::size_t size() const noexcept
{
return buffer_.size();
}
/// Get the registered buffer identifier.
const registered_buffer_id& id() const noexcept
{
return id_;
}
/// Move the start of the buffer by the specified number of bytes.
mutable_registered_buffer& operator+=(std::size_t n) noexcept
{
buffer_ += n;
return *this;
}
private:
friend class detail::buffer_registration_base;
// Hidden constructor used by buffer registration and operators.
mutable_registered_buffer(const mutable_buffer& b,
const registered_buffer_id& i) noexcept
: buffer_(b),
id_(i)
{
}
#if !defined(GENERATING_DOCUMENTATION)
friend mutable_registered_buffer buffer(
const mutable_registered_buffer& b, std::size_t n) noexcept;
#endif // !defined(GENERATING_DOCUMENTATION)
mutable_buffer buffer_;
registered_buffer_id id_;
};
/// Holds a registered buffer over non-modifiable data.
/**
* Satisfies the @c ConstBufferSequence type requirements.
*/
class const_registered_buffer
{
public:
/// Default constructor creates an invalid registered buffer.
const_registered_buffer() noexcept
: buffer_(),
id_()
{
}
/// Construct a non-modifiable buffer from a modifiable one.
const_registered_buffer(
const mutable_registered_buffer& b) noexcept
: buffer_(b.buffer()),
id_(b.id())
{
}
/// Get the underlying constant buffer.
const const_buffer& buffer() const noexcept
{
return buffer_;
}
/// Get a pointer to the beginning of the memory range.
/**
* @returns <tt>buffer().data()</tt>.
*/
const void* data() const noexcept
{
return buffer_.data();
}
/// Get the size of the memory range.
/**
* @returns <tt>buffer().size()</tt>.
*/
std::size_t size() const noexcept
{
return buffer_.size();
}
/// Get the registered buffer identifier.
const registered_buffer_id& id() const noexcept
{
return id_;
}
/// Move the start of the buffer by the specified number of bytes.
const_registered_buffer& operator+=(std::size_t n) noexcept
{
buffer_ += n;
return *this;
}
private:
// Hidden constructor used by operators.
const_registered_buffer(const const_buffer& b,
const registered_buffer_id& i) noexcept
: buffer_(b),
id_(i)
{
}
#if !defined(GENERATING_DOCUMENTATION)
friend const_registered_buffer buffer(
const const_registered_buffer& b, std::size_t n) noexcept;
#endif // !defined(GENERATING_DOCUMENTATION)
const_buffer buffer_;
registered_buffer_id id_;
};
/** @addtogroup buffer_sequence_begin */
/// Get an iterator to the first element in a buffer sequence.
inline const mutable_buffer* buffer_sequence_begin(
const mutable_registered_buffer& b) noexcept
{
return &b.buffer();
}
/// Get an iterator to the first element in a buffer sequence.
inline const const_buffer* buffer_sequence_begin(
const const_registered_buffer& b) noexcept
{
return &b.buffer();
}
/** @} */
/** @addtogroup buffer_sequence_end */
/// Get an iterator to one past the end element in a buffer sequence.
inline const mutable_buffer* buffer_sequence_end(
const mutable_registered_buffer& b) noexcept
{
return &b.buffer() + 1;
}
/// Get an iterator to one past the end element in a buffer sequence.
inline const const_buffer* buffer_sequence_end(
const const_registered_buffer& b) noexcept
{
return &b.buffer() + 1;
}
/** @} */
/** @addtogroup buffer */
/// Obtain a buffer representing the entire registered buffer.
inline mutable_registered_buffer buffer(
const mutable_registered_buffer& b) noexcept
{
return b;
}
/// Obtain a buffer representing the entire registered buffer.
inline const_registered_buffer buffer(
const const_registered_buffer& b) noexcept
{
return b;
}
/// Obtain a buffer representing part of a registered buffer.
inline mutable_registered_buffer buffer(
const mutable_registered_buffer& b, std::size_t n) noexcept
{
return mutable_registered_buffer(buffer(b.buffer_, n), b.id_);
}
/// Obtain a buffer representing part of a registered buffer.
inline const_registered_buffer buffer(
const const_registered_buffer& b, std::size_t n) noexcept
{
return const_registered_buffer(buffer(b.buffer_, n), b.id_);
}
/** @} */
/// Create a new modifiable registered buffer that is offset from the start of
/// another.
/**
* @relates mutable_registered_buffer
*/
inline mutable_registered_buffer operator+(
const mutable_registered_buffer& b, std::size_t n) noexcept
{
mutable_registered_buffer tmp(b);
tmp += n;
return tmp;
}
/// Create a new modifiable buffer that is offset from the start of another.
/**
* @relates mutable_registered_buffer
*/
inline mutable_registered_buffer operator+(std::size_t n,
const mutable_registered_buffer& b) noexcept
{
return b + n;
}
/// Create a new non-modifiable registered buffer that is offset from the start
/// of another.
/**
* @relates const_registered_buffer
*/
inline const_registered_buffer operator+(const const_registered_buffer& b,
std::size_t n) noexcept
{
const_registered_buffer tmp(b);
tmp += n;
return tmp;
}
/// Create a new non-modifiable buffer that is offset from the start of another.
/**
* @relates const_registered_buffer
*/
inline const_registered_buffer operator+(std::size_t n,
const const_registered_buffer& b) noexcept
{
return b + n;
}
} // namespace asio
} // namespace boost
#include <boost/asio/detail/pop_options.hpp>
#endif // BOOST_ASIO_REGISTERED_BUFFER_HPP
|
unknown
|
github
|
https://github.com/mysql/mysql-server
|
extra/boost/boost_1_87_0/boost/asio/registered_buffer.hpp
|
// eslint-disable-next-line @typescript-eslint/no-var-requires
const plugins = require("scripts/temp/config-plugins");
/**
* JSS configuration object
*/
export interface JssConfig extends Record<string, string | undefined> {
sitecoreApiKey?: string;
sitecoreApiHost?: string;
jssAppName?: string;
graphQLEndpointPath?: string;
defaultLanguage?: string;
graphQLEndpoint?: string;
}
export interface ConfigPlugin {
/**
* Detect order when the plugin should be called, e.g. 0 - will be called first (can be a plugin which data is required for other plugins)
*/
order: number;
/**
* A function which will be called during config generation
* @param {JssConfig} config Current (accumulated) config
*/
exec(config: JssConfig): Promise<JssConfig>;
}
export class JssConfigFactory {
public async create(defaultConfig: JssConfig = {}): Promise<JssConfig> {
return (Object.values(plugins) as ConfigPlugin[])
.sort((p1, p2) => p1.order - p2.order)
.reduce(
(promise, plugin) => promise.then((config) => plugin.exec(config)),
Promise.resolve(defaultConfig),
);
}
}
export const jssConfigFactory = new JssConfigFactory();
|
typescript
|
github
|
https://github.com/vercel/next.js
|
examples/cms-sitecore-xmcloud/scripts/config/index.ts
|
"""
WSGI config for testproject project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
PROJECT_PATH = os.path.abspath(os.path.split(__file__)[0])
PROJECT_PARENT = os.path.abspath(os.path.split(PROJECT_PATH)[0])
sys.path.append(PROJECT_PATH)
sys.path.append(PROJECT_PARENT)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testproject.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implement the realm for and run on port 8800 a PB service which allows both
anonymous and username/password based access.
Successful username/password-based login requests given an instance of
MyPerspective with a name which matches the username with which they
authenticated. Success anonymous login requests are given an instance of
MyPerspective with the name "Anonymous".
"""
from sys import stdout
from zope.interface import implements
from twisted.python.log import startLogging
from twisted.cred.checkers import ANONYMOUS, AllowAnonymousAccess
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.cred.portal import IRealm, Portal
from twisted.internet import reactor
from twisted.spread.pb import Avatar, IPerspective, PBServerFactory
class MyPerspective(Avatar):
"""
Trivial avatar exposing a single remote method for demonstrative
purposes. All successful login attempts in this example will result in
an avatar which is an instance of this class.
@type name: C{str}
@ivar name: The username which was used during login or C{"Anonymous"}
if the login was anonymous (a real service might want to avoid the
collision this introduces between anonoymous users and authenticated
users named "Anonymous").
"""
def __init__(self, name):
self.name = name
def perspective_foo(self, arg):
"""
Print a simple message which gives the argument this method was
called with and this avatar's name.
"""
print "I am %s. perspective_foo(%s) called on %s." % (
self.name, arg, self)
class MyRealm(object):
"""
Trivial realm which supports anonymous and named users by creating
avatars which are instances of MyPerspective for either.
"""
implements(IRealm)
def requestAvatar(self, avatarId, mind, *interfaces):
if IPerspective not in interfaces:
raise NotImplementedError("MyRealm only handles IPerspective")
if avatarId is ANONYMOUS:
avatarId = "Anonymous"
return IPerspective, MyPerspective(avatarId), lambda: None
def main():
"""
Create a PB server using MyRealm and run it on port 8800.
"""
startLogging(stdout)
p = Portal(MyRealm())
# Here the username/password checker is registered.
c1 = InMemoryUsernamePasswordDatabaseDontUse(user1="pass1", user2="pass2")
p.registerChecker(c1)
# Here the anonymous checker is registered.
c2 = AllowAnonymousAccess()
p.registerChecker(c2)
reactor.listenTCP(8800, PBServerFactory(p))
reactor.run()
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* pngrio.c - functions for data input
*
* Copyright (c) 2018-2025 Cosmin Truta
* Copyright (c) 1998-2002,2004,2006-2016,2018 Glenn Randers-Pehrson
* Copyright (c) 1996-1997 Andreas Dilger
* Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.
*
* This code is released under the libpng license.
* For conditions of distribution and use, see the disclaimer
* and license in png.h
*
* This file provides a location for all input. Users who need
* special handling are expected to write a function that has the same
* arguments as this and performs a similar function, but that possibly
* has a different input method. Note that you shouldn't change this
* function, but rather write a replacement function and then make
* libpng use it at run time with png_set_read_fn(...).
*/
#include "pngpriv.h"
#ifdef PNG_READ_SUPPORTED
/* Read the data from whatever input you are using. The default routine
* reads from a file pointer. Note that this routine sometimes gets called
* with very small lengths, so you should implement some kind of simple
* buffering if you are using unbuffered reads. This should never be asked
* to read more than 64K on a 16-bit machine.
*/
void /* PRIVATE */
png_read_data(png_structrp png_ptr, png_bytep data, size_t length)
{
png_debug1(4, "reading %d bytes", (int)length);
if (png_ptr->read_data_fn != NULL)
(*(png_ptr->read_data_fn))(png_ptr, data, length);
else
png_error(png_ptr, "Call to NULL read function");
}
#ifdef PNG_STDIO_SUPPORTED
/* This is the function that does the actual reading of data. If you are
* not reading from a standard C stream, you should create a replacement
* read_data function and use it at run time with png_set_read_fn(), rather
* than changing the library.
*/
void PNGCBAPI
png_default_read_data(png_structp png_ptr, png_bytep data, size_t length)
{
size_t check;
if (png_ptr == NULL)
return;
/* fread() returns 0 on error, so it is OK to store this in a size_t
* instead of an int, which is what fread() actually returns.
*/
check = fread(data, 1, length, png_voidcast(FILE *, png_ptr->io_ptr));
if (check != length)
png_error(png_ptr, "Read Error");
}
#endif
/* This function allows the application to supply a new input function
* for libpng if standard C streams aren't being used.
*
* This function takes as its arguments:
*
* png_ptr - pointer to a png input data structure
*
* io_ptr - pointer to user supplied structure containing info about
* the input functions. May be NULL.
*
* read_data_fn - pointer to a new input function that takes as its
* arguments a pointer to a png_struct, a pointer to
* a location where input data can be stored, and a 32-bit
* unsigned int that is the number of bytes to be read.
* To exit and output any fatal error messages the new write
* function should call png_error(png_ptr, "Error msg").
* May be NULL, in which case libpng's default function will
* be used.
*/
void PNGAPI
png_set_read_fn(png_structrp png_ptr, png_voidp io_ptr,
png_rw_ptr read_data_fn)
{
if (png_ptr == NULL)
return;
png_ptr->io_ptr = io_ptr;
#ifdef PNG_STDIO_SUPPORTED
if (read_data_fn != NULL)
png_ptr->read_data_fn = read_data_fn;
else
png_ptr->read_data_fn = png_default_read_data;
#else
png_ptr->read_data_fn = read_data_fn;
#endif
#ifdef PNG_WRITE_SUPPORTED
/* It is an error to write to a read device */
if (png_ptr->write_data_fn != NULL)
{
png_ptr->write_data_fn = NULL;
png_warning(png_ptr,
"Can't set both read_data_fn and write_data_fn in the"
" same structure");
}
#endif
#ifdef PNG_WRITE_FLUSH_SUPPORTED
png_ptr->output_flush_fn = NULL;
#endif
}
#endif /* READ */
|
c
|
github
|
https://github.com/opencv/opencv
|
3rdparty/libpng/pngrio.c
|
use rustc_abi as abi;
use rustc_middle::mir::interpret::{ConstAllocation, Scalar};
use super::BackendTypes;
pub trait ConstCodegenMethods: BackendTypes {
// Constant constructors
fn const_null(&self, t: Self::Type) -> Self::Value;
/// Generate an uninitialized value (matching uninitialized memory in MIR).
/// Whether memory is initialized or not is tracked byte-for-byte.
fn const_undef(&self, t: Self::Type) -> Self::Value;
/// Generate a fake value. Poison always affects the entire value, even if just a single byte is
/// poison. This can only be used in codepaths that are already UB, i.e., UB-free Rust code
/// (including code that e.g. copies uninit memory with `MaybeUninit`) can never encounter a
/// poison value.
fn const_poison(&self, t: Self::Type) -> Self::Value;
fn const_bool(&self, val: bool) -> Self::Value;
fn const_i8(&self, i: i8) -> Self::Value;
fn const_i16(&self, i: i16) -> Self::Value;
fn const_i32(&self, i: i32) -> Self::Value;
fn const_int(&self, t: Self::Type, i: i64) -> Self::Value;
fn const_u8(&self, i: u8) -> Self::Value;
fn const_u32(&self, i: u32) -> Self::Value;
fn const_u64(&self, i: u64) -> Self::Value;
fn const_u128(&self, i: u128) -> Self::Value;
fn const_usize(&self, i: u64) -> Self::Value;
fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value;
fn const_uint_big(&self, t: Self::Type, u: u128) -> Self::Value;
fn const_real(&self, t: Self::Type, val: f64) -> Self::Value;
fn const_str(&self, s: &str) -> (Self::Value, Self::Value);
fn const_struct(&self, elts: &[Self::Value], packed: bool) -> Self::Value;
fn const_vector(&self, elts: &[Self::Value]) -> Self::Value;
fn const_to_opt_uint(&self, v: Self::Value) -> Option<u64>;
fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option<u128>;
fn const_data_from_alloc(&self, alloc: ConstAllocation<'_>) -> Self::Value;
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value;
fn const_ptr_byte_offset(&self, val: Self::Value, offset: abi::Size) -> Self::Value;
}
|
rust
|
github
|
https://github.com/rust-lang/rust
|
compiler/rustc_codegen_ssa/src/traits/consts.rs
|
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sys
import eventlet
eventlet.monkey_patch()
from neutron.agent.common import config
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import config as common_cfg
from neutron.common import rpc
from neutron.common import utils as neutron_utils
from neutron.db import agents_db
from neutron.i18n import _LE, _LI
from neutron import manager
from neutron.plugins.ml2.drivers import type_vlan # noqa
from neutron import service
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import periodic_task
from oslo_service import service as svc
from networking_cisco.plugins.ml2.drivers.cisco.apic import (
mechanism_apic as ma)
ACI_PORT_DESCR_FORMATS = [
r'topology/pod-1/node-(\d+)/sys/conng/path-\[eth(\d+)/(\d+)\]',
r'topology/pod-1/paths-(\d+)/pathep-\[eth(\d+)/(\d+)\]',
]
AGENT_FORCE_UPDATE_COUNT = 100
BINARY_APIC_SERVICE_AGENT = 'neutron-cisco-apic-service-agent'
BINARY_APIC_HOST_AGENT = 'neutron-cisco-apic-host-agent'
TOPIC_APIC_SERVICE = 'apic-service'
TYPE_APIC_SERVICE_AGENT = 'cisco-apic-service-agent'
TYPE_APIC_HOST_AGENT = 'cisco-apic-host-agent'
LOG = logging.getLogger(__name__)
class ApicTopologyService(manager.Manager):
target = oslo_messaging.Target(version='1.1')
def __init__(self, host=None):
if host is None:
host = neutron_utils.get_hostname()
super(ApicTopologyService, self).__init__(host=host)
self.conf = cfg.CONF.ml2_cisco_apic
self.conn = None
self.peers = {}
self.invalid_peers = []
self.dispatcher = None
self.state = None
self.state_agent = None
self.topic = TOPIC_APIC_SERVICE
self.apic_manager = ma.APICMechanismDriver.get_apic_manager(False)
def init_host(self):
LOG.info(_LI("APIC service agent starting ..."))
self.state = {
'binary': BINARY_APIC_SERVICE_AGENT,
'host': self.host,
'topic': self.topic,
'configurations': {},
'start_flag': True,
'agent_type': TYPE_APIC_SERVICE_AGENT,
}
self.conn = rpc.create_connection(new=True)
self.dispatcher = [self, agents_db.AgentExtRpcCallback()]
self.conn.create_consumer(
self.topic, self.dispatcher, fanout=True)
self.conn.consume_in_threads()
def after_start(self):
LOG.info(_LI("APIC service agent started"))
def report_send(self, context):
if not self.state_agent:
return
LOG.debug("APIC service agent: sending report state")
try:
self.state_agent.report_state(context, self.state)
self.state.pop('start_flag', None)
except AttributeError:
# This means the server does not support report_state
# ignore it
return
except Exception:
LOG.exception(_LE("APIC service agent: failed in reporting state"))
@lockutils.synchronized('apic_service')
def update_link(self, context,
host, interface, mac,
switch, module, port):
LOG.debug("APIC service agent: received update_link: %s",
", ".join(map(str,
[host, interface, mac, switch, module, port])))
nlink = (host, interface, mac, switch, module, port)
clink = self.peers.get((host, interface), None)
if switch == 0:
# this is a link delete, remove it
if clink is not None:
self.apic_manager.remove_hostlink(*clink)
self.peers.pop((host, interface))
else:
if clink is None:
# add new link to database
self.apic_manager.add_hostlink(*nlink)
self.peers[(host, interface)] = nlink
elif clink != nlink:
# delete old link and add new one (don't update in place)
self.apic_manager.remove_hostlink(*clink)
self.peers.pop((host, interface))
self.apic_manager.add_hostlink(*nlink)
self.peers[(host, interface)] = nlink
class ApicTopologyServiceNotifierApi(object):
def __init__(self):
target = oslo_messaging.Target(topic=TOPIC_APIC_SERVICE, version='1.0')
self.client = rpc.get_client(target)
def update_link(self, context, host, interface, mac, switch, module, port):
cctxt = self.client.prepare(version='1.1', fanout=True)
cctxt.cast(context, 'update_link', host=host, interface=interface,
mac=mac, switch=switch, module=module, port=port)
def delete_link(self, context, host, interface):
cctxt = self.client.prepare(version='1.1', fanout=True)
cctxt.cast(context, 'delete_link', host=host, interface=interface,
mac=None, switch=0, module=0, port=0)
class ApicTopologyAgent(manager.Manager):
def __init__(self, host=None):
if host is None:
host = neutron_utils.get_hostname()
super(ApicTopologyAgent, self).__init__(host=host)
self.conf = cfg.CONF.ml2_cisco_apic
self.count_current = 0
self.count_force_send = AGENT_FORCE_UPDATE_COUNT
self.interfaces = {}
self.lldpcmd = None
self.peers = {}
self.port_desc_re = map(re.compile, ACI_PORT_DESCR_FORMATS)
self.service_agent = ApicTopologyServiceNotifierApi()
self.state = None
self.state_agent = None
self.topic = TOPIC_APIC_SERVICE
self.uplink_ports = []
self.invalid_peers = []
def init_host(self):
LOG.info(_LI("APIC host agent: agent starting on %s"), self.host)
self.state = {
'binary': BINARY_APIC_HOST_AGENT,
'host': self.host,
'topic': self.topic,
'configurations': {},
'start_flag': True,
'agent_type': TYPE_APIC_HOST_AGENT,
}
self.uplink_ports = []
for inf in self.conf.apic_host_uplink_ports:
if ip_lib.device_exists(inf):
self.uplink_ports.append(inf)
else:
# ignore unknown interfaces
LOG.error(_LE("No such interface (ignored): %s"), inf)
self.lldpcmd = ['lldpctl', '-f', 'keyvalue'] + self.uplink_ports
def after_start(self):
LOG.info(_LI("APIC host agent: started on %s"), self.host)
@periodic_task.periodic_task
def _check_for_new_peers(self, context):
LOG.debug("APIC host agent: _check_for_new_peers")
if not self.lldpcmd:
return
try:
# Check if we must send update even if there is no change
force_send = False
self.count_current += 1
if self.count_current >= self.count_force_send:
force_send = True
self.count_current = 0
# Check for new peers
new_peers = self._get_peers()
new_peers = self._valid_peers(new_peers)
# Make a copy of current interfaces
curr_peers = {}
for interface in self.peers:
curr_peers[interface] = self.peers[interface]
# Based curr -> new updates, add the new interfaces
self.peers = {}
for interface in new_peers:
peer = new_peers[interface]
self.peers[interface] = peer
if (interface in curr_peers and
curr_peers[interface] != peer):
self.service_agent.update_link(
context, peer[0], peer[1], None, 0, 0, 0)
if (interface not in curr_peers or
curr_peers[interface] != peer or
force_send):
self.service_agent.update_link(context, *peer)
if interface in curr_peers:
curr_peers.pop(interface)
# Any interface still in curr_peers need to be deleted
for peer in curr_peers.values():
self.service_agent.update_link(
context, peer[0], peer[1], None, 0, 0, 0)
except Exception:
LOG.exception(_LE("APIC service agent: exception in LLDP parsing"))
def _get_peers(self):
peers = {}
lldpkeys = utils.execute(self.lldpcmd, run_as_root=True)
for line in lldpkeys.splitlines():
if '=' not in line:
continue
fqkey, value = line.split('=', 1)
lldp, interface, key = fqkey.split('.', 2)
if key == 'port.descr':
for regexp in self.port_desc_re:
match = regexp.match(value)
if match:
mac = self._get_mac(interface)
switch, module, port = match.group(1, 2, 3)
peer = (self.host, interface, mac,
switch, module, port)
if interface not in peers:
peers[interface] = []
peers[interface].append(peer)
return peers
def _valid_peers(self, peers):
# Reduce the peers array to one valid peer per interface
# NOTE:
# There is a bug in lldpd daemon that it keeps reporting
# old peers even after their updates have stopped
# we keep track of that report remove them from peers
valid_peers = {}
invalid_peers = []
for interface in peers:
curr_peer = None
for peer in peers[interface]:
if peer in self.invalid_peers or curr_peer:
invalid_peers.append(peer)
else:
curr_peer = peer
if curr_peer is not None:
valid_peers[interface] = curr_peer
self.invalid_peers = invalid_peers
return valid_peers
def _get_mac(self, interface):
if interface in self.interfaces:
return self.interfaces[interface]
try:
mac = ip_lib.IPDevice(interface).link.address
self.interfaces[interface] = mac
return mac
except Exception:
# we can safely ignore it, it is only needed for debugging
LOG.exception(
_LE("APIC service agent: can not get MACaddr for %s"),
interface)
def report_send(self, context):
if not self.state_agent:
return
LOG.debug("APIC host agent: sending report state")
try:
self.state_agent.report_state(context, self.state)
self.state.pop('start_flag', None)
except AttributeError:
# This means the server does not support report_state
# ignore it
return
except Exception:
LOG.exception(_LE("APIC host agent: failed in reporting state"))
def launch(binary, manager, topic=None):
cfg.CONF(project='neutron')
common_cfg.init(sys.argv[1:])
config.setup_logging()
report_period = cfg.CONF.ml2_cisco_apic.apic_agent_report_interval
poll_period = cfg.CONF.ml2_cisco_apic.apic_agent_poll_interval
server = service.Service.create(
binary=binary, manager=manager, topic=topic,
report_interval=report_period, periodic_interval=poll_period)
svc.launch(cfg.CONF, server).wait()
def service_main():
launch(
BINARY_APIC_SERVICE_AGENT,
'networking_cisco.plugins.ml2.drivers.' +
'cisco.apic.apic_topology.ApicTopologyService',
TOPIC_APIC_SERVICE)
def agent_main():
launch(
BINARY_APIC_HOST_AGENT,
'networking_cisco.plugins.ml2.drivers.' +
'cisco.apic.apic_topology.ApicTopologyAgent')
|
unknown
|
codeparrot/codeparrot-clean
| ||
kind: Bar
apiVersion: company.com/v1
metadata:
name: test
labels:
pruneGroup: "true"
someField: field1
otherField: field2
|
unknown
|
github
|
https://github.com/kubernetes/kubernetes
|
hack/testdata/CRD/bar.yaml
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from flask import request
from indico.core import signals
from indico.modules.events.editing.controllers.base import RHEditingBase
from indico.modules.events.editing.models.editable import EditableType
from indico.modules.events.editing.models.file_types import EditingFileType
from indico.modules.events.editing.schemas import EditingFileTypeSchema, EditingMenuItemSchema, EditingTagSchema
from indico.util.signals import named_objects_from_signal
class RHEditingFileTypes(RHEditingBase):
"""Return all editing file types defined in the event for the editable type."""
def _process_args(self):
RHEditingBase._process_args(self)
self.editable_type = EditableType[request.view_args['type']]
self.editing_file_types = EditingFileType.query.with_parent(self.event).filter_by(type=self.editable_type).all()
def _process(self):
return EditingFileTypeSchema(many=True).jsonify(self.editing_file_types)
class RHEditingTags(RHEditingBase):
"""Return all editing tags defined in the event."""
def _process(self):
return EditingTagSchema(many=True).jsonify(self.event.editing_tags)
class RHMenuEntries(RHEditingBase):
"""Return the menu entries for the editing view."""
def _process(self):
menu_entries = named_objects_from_signal(signals.menu.items.send('event-editing-sidemenu', event=self.event))
return EditingMenuItemSchema(many=True).jsonify(menu_entries.values())
|
unknown
|
codeparrot/codeparrot-clean
| ||
import json
import os
import time
from threading import Thread
from libmproxy import controller, proxy
from netlib import http_auth
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial.unittest import TestCase
from scrapy.utils.test import get_crawler
from scrapy.http import Request
from tests.spiders import SimpleSpider, SingleRequestSpider
from tests.mockserver import MockServer
class HTTPSProxy(controller.Master, Thread):
def __init__(self, port):
password_manager = http_auth.PassManSingleUser('scrapy', 'scrapy')
authenticator = http_auth.BasicProxyAuth(password_manager, "mitmproxy")
cert_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'keys', 'mitmproxy-ca.pem')
server = proxy.ProxyServer(proxy.ProxyConfig(
authenticator = authenticator,
cacert = cert_path),
port)
Thread.__init__(self)
controller.Master.__init__(self, server)
class ProxyConnectTestCase(TestCase):
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
self._oldenv = os.environ.copy()
self._proxy = HTTPSProxy(8888)
self._proxy.start()
# Wait for the proxy to start.
time.sleep(1.0)
os.environ['http_proxy'] = 'http://scrapy:scrapy@localhost:8888'
os.environ['https_proxy'] = 'http://scrapy:scrapy@localhost:8888'
def tearDown(self):
self.mockserver.__exit__(None, None, None)
self._proxy.shutdown()
os.environ = self._oldenv
@defer.inlineCallbacks
def test_https_connect_tunnel(self):
crawler = get_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("https://localhost:8999/status?n=200")
self._assert_got_response_code(200, l)
@defer.inlineCallbacks
def test_https_noconnect(self):
os.environ['https_proxy'] = 'http://scrapy:scrapy@localhost:8888?noconnect'
crawler = get_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("https://localhost:8999/status?n=200")
self._assert_got_response_code(200, l)
os.environ['https_proxy'] = 'http://scrapy:scrapy@localhost:8888'
@defer.inlineCallbacks
def test_https_connect_tunnel_error(self):
crawler = get_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("https://localhost:99999/status?n=200")
self._assert_got_tunnel_error(l)
@defer.inlineCallbacks
def test_https_tunnel_auth_error(self):
os.environ['https_proxy'] = 'http://wrong:wronger@localhost:8888'
crawler = get_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("https://localhost:8999/status?n=200")
# The proxy returns a 407 error code but it does not reach the client;
# he just sees a TunnelError.
self._assert_got_tunnel_error(l)
os.environ['https_proxy'] = 'http://scrapy:scrapy@localhost:8888'
@defer.inlineCallbacks
def test_https_tunnel_without_leak_proxy_authorization_header(self):
request = Request("https://localhost:8999/echo")
crawler = get_crawler(SingleRequestSpider)
with LogCapture() as l:
yield crawler.crawl(seed=request)
self._assert_got_response_code(200, l)
echo = json.loads(crawler.spider.meta['responses'][0].body)
self.assertTrue('Proxy-Authorization' not in echo['headers'])
@defer.inlineCallbacks
def test_https_noconnect_auth_error(self):
os.environ['https_proxy'] = 'http://wrong:wronger@localhost:8888?noconnect'
crawler = get_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("https://localhost:8999/status?n=200")
self._assert_got_response_code(407, l)
def _assert_got_response_code(self, code, log):
self.assertEqual(str(log).count('Crawled (%d)' % code), 1)
def _assert_got_tunnel_error(self, log):
self.assertEqual(str(log).count('TunnelError'), 1)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""PandaSurvey includes two unique datasets for testing purpuses: `People` and a sample study. The `People` file is from the 2010 US Census. The sample study is from a small survey performed at InContext Solutions in 2014 (specific survey details withheld)"""
import os
import pandas
def _path(name):
root, _ = os.path.split(__file__)
return os.path.join(root, 'data/' + name)
def load_people():
"""Returns the `People` dataset as a DataFrame. The data consists of 9999 individuals with age, disability status, marital status, race, and gender demographic information. Columns and their codes are described below:
- Age
- Non-negative integer
- May include zeros
- Disability
- 1: Disabled
- 2: Not disabled
- MarritalStatus
- 1: Married
- 2: Widowed
- 3: Divorced
- 4: Separated
- 5: Never married or under 15 years old
- Race
- 1: White alone
- 2: Black or African American alone
- 3: American Indian alone
- 4: Alaska Native alone
- 5: American Indian and Alaska Native tribes specified; or American Indian or Alaska native, not specified and no other races
- 6: Asian alone
- 7: Native Hawaiian and Other Pacific Islander alone
- 8: Some other race alone
- 9: Two or more major race groups
- Gender
- 1: Male
- 2: Female
"""
return pandas.read_csv(_path("People.csv"))
def load_sample_study():
"""Returns a sample dataset describing demographics in coded format from 2092 respondents. The study consists of 7 cells and demographics considered include age, gender, income, hispanic, and race."""
df = pandas.read_csv(_path("SampleStudy.csv"))
del df['Weight']
return df
def load_sample_weights():
"""Returns individual weights from the sample survey calculated via a raking method previously implemented in R."""
df = pandas.read_csv(_path("SampleStudy.csv"))
return df['Weight']
def load_sample_proportions():
"""Returns the target sample proportions that correspond to the sample survey.
+-------------+-------------+-------------------+
| Demographic | Coded Value | Target Proportion |
+=============+=============+===================+
| Age | 1 | 0.07 |
+-------------+-------------+-------------------+
| Age | 2 | 0.22 |
+-------------+-------------+-------------------+
| Age | 3 | 0.2 |
+-------------+-------------+-------------------+
| Age | 4 | 0.2 |
+-------------+-------------+-------------------+
| Age | 5 | 0.21 |
+-------------+-------------+-------------------+
| Gender | 1 | 0.5 |
+-------------+-------------+-------------------+
| Gender | 2 | 0.5 |
+-------------+-------------+-------------------+
| Income | 1 | 0.17 |
+-------------+-------------+-------------------+
| Income | 2 | 0.21 |
+-------------+-------------+-------------------+
| Income | 3 | 0.25 |
+-------------+-------------+-------------------+
| Income | 4 | 0.16 |
+-------------+-------------+-------------------+
| Income | 5 | 0.11 |
+-------------+-------------+-------------------+
| Hispanic | 1 | 0.09 |
+-------------+-------------+-------------------+
| Hispanic | 2 | 0.91 |
+-------------+-------------+-------------------+
| Race | 0 | 0.15 |
+-------------+-------------+-------------------+
| Race | 1 | 0.85 |
+-------------+-------------+-------------------+
"""
weights = {}
with open(_path("SampleWeights.csv")) as csv_in:
for line in csv_in:
demo, category, proportion = line.split(',')
if demo not in weights:
weights[demo] = {}
weights[demo][int(category)] = float(proportion)
return weights
|
unknown
|
codeparrot/codeparrot-clean
| ||
'''tzinfo timezone information for Australia/ACT.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class ACT(DstTzInfo):
'''Australia/ACT timezone definition. See datetime.tzinfo for details'''
zone = 'Australia/ACT'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1916,12,31,14,1,0),
d(1917,3,24,15,0,0),
d(1941,12,31,16,0,0),
d(1942,3,28,15,0,0),
d(1942,9,26,16,0,0),
d(1943,3,27,15,0,0),
d(1943,10,2,16,0,0),
d(1944,3,25,15,0,0),
d(1971,10,30,16,0,0),
d(1972,2,26,16,0,0),
d(1972,10,28,16,0,0),
d(1973,3,3,16,0,0),
d(1973,10,27,16,0,0),
d(1974,3,2,16,0,0),
d(1974,10,26,16,0,0),
d(1975,3,1,16,0,0),
d(1975,10,25,16,0,0),
d(1976,3,6,16,0,0),
d(1976,10,30,16,0,0),
d(1977,3,5,16,0,0),
d(1977,10,29,16,0,0),
d(1978,3,4,16,0,0),
d(1978,10,28,16,0,0),
d(1979,3,3,16,0,0),
d(1979,10,27,16,0,0),
d(1980,3,1,16,0,0),
d(1980,10,25,16,0,0),
d(1981,2,28,16,0,0),
d(1981,10,24,16,0,0),
d(1982,4,3,16,0,0),
d(1982,10,30,16,0,0),
d(1983,3,5,16,0,0),
d(1983,10,29,16,0,0),
d(1984,3,3,16,0,0),
d(1984,10,27,16,0,0),
d(1985,3,2,16,0,0),
d(1985,10,26,16,0,0),
d(1986,3,15,16,0,0),
d(1986,10,18,16,0,0),
d(1987,3,14,16,0,0),
d(1987,10,24,16,0,0),
d(1988,3,19,16,0,0),
d(1988,10,29,16,0,0),
d(1989,3,18,16,0,0),
d(1989,10,28,16,0,0),
d(1990,3,3,16,0,0),
d(1990,10,27,16,0,0),
d(1991,3,2,16,0,0),
d(1991,10,26,16,0,0),
d(1992,2,29,16,0,0),
d(1992,10,24,16,0,0),
d(1993,3,6,16,0,0),
d(1993,10,30,16,0,0),
d(1994,3,5,16,0,0),
d(1994,10,29,16,0,0),
d(1995,3,4,16,0,0),
d(1995,10,28,16,0,0),
d(1996,3,30,16,0,0),
d(1996,10,26,16,0,0),
d(1997,3,29,16,0,0),
d(1997,10,25,16,0,0),
d(1998,3,28,16,0,0),
d(1998,10,24,16,0,0),
d(1999,3,27,16,0,0),
d(1999,10,30,16,0,0),
d(2000,3,25,16,0,0),
d(2000,8,26,16,0,0),
d(2001,3,24,16,0,0),
d(2001,10,27,16,0,0),
d(2002,3,30,16,0,0),
d(2002,10,26,16,0,0),
d(2003,3,29,16,0,0),
d(2003,10,25,16,0,0),
d(2004,3,27,16,0,0),
d(2004,10,30,16,0,0),
d(2005,3,26,16,0,0),
d(2005,10,29,16,0,0),
d(2006,4,1,16,0,0),
d(2006,10,28,16,0,0),
d(2007,3,24,16,0,0),
d(2007,10,27,16,0,0),
d(2008,3,29,16,0,0),
d(2008,10,25,16,0,0),
d(2009,3,28,16,0,0),
d(2009,10,24,16,0,0),
d(2010,3,27,16,0,0),
d(2010,10,30,16,0,0),
d(2011,3,26,16,0,0),
d(2011,10,29,16,0,0),
d(2012,3,24,16,0,0),
d(2012,10,27,16,0,0),
d(2013,3,30,16,0,0),
d(2013,10,26,16,0,0),
d(2014,3,29,16,0,0),
d(2014,10,25,16,0,0),
d(2015,3,28,16,0,0),
d(2015,10,24,16,0,0),
d(2016,3,26,16,0,0),
d(2016,10,29,16,0,0),
d(2017,3,25,16,0,0),
d(2017,10,28,16,0,0),
d(2018,3,24,16,0,0),
d(2018,10,27,16,0,0),
d(2019,3,30,16,0,0),
d(2019,10,26,16,0,0),
d(2020,3,28,16,0,0),
d(2020,10,24,16,0,0),
d(2021,3,27,16,0,0),
d(2021,10,30,16,0,0),
d(2022,3,26,16,0,0),
d(2022,10,29,16,0,0),
d(2023,3,25,16,0,0),
d(2023,10,28,16,0,0),
d(2024,3,30,16,0,0),
d(2024,10,26,16,0,0),
d(2025,3,29,16,0,0),
d(2025,10,25,16,0,0),
d(2026,3,28,16,0,0),
d(2026,10,24,16,0,0),
d(2027,3,27,16,0,0),
d(2027,10,30,16,0,0),
d(2028,3,25,16,0,0),
d(2028,10,28,16,0,0),
d(2029,3,24,16,0,0),
d(2029,10,27,16,0,0),
d(2030,3,30,16,0,0),
d(2030,10,26,16,0,0),
d(2031,3,29,16,0,0),
d(2031,10,25,16,0,0),
d(2032,3,27,16,0,0),
d(2032,10,30,16,0,0),
d(2033,3,26,16,0,0),
d(2033,10,29,16,0,0),
d(2034,3,25,16,0,0),
d(2034,10,28,16,0,0),
d(2035,3,24,16,0,0),
d(2035,10,27,16,0,0),
d(2036,3,29,16,0,0),
d(2036,10,25,16,0,0),
d(2037,3,28,16,0,0),
d(2037,10,24,16,0,0),
]
_transition_info = [
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
]
ACT = ACT()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# How to use Composer behind a proxy
Composer, like many other tools, uses environment variables to control the use of a proxy server and
supports:
- `http_proxy` - the proxy to use for HTTP requests
- `https_proxy` - the proxy to use for HTTPS requests
- `CGI_HTTP_PROXY` - the proxy to use for HTTP requests in a non-CLI context
- `no_proxy` - domains that do not require a proxy
These named variables are a convention, rather than an official standard, and their evolution and
usage across different operating systems and tools is complex. Composer prefers the use of lowercase
names, but accepts uppercase names where appropriate.
## Usage
Composer requires specific environment variables for HTTP and HTTPS requests. For example:
```
http_proxy=http://proxy.com:80
https_proxy=http://proxy.com:80
```
Uppercase names can also be used.
### Non-CLI usage
Composer does not look for `http_proxy` or `HTTP_PROXY` in a non-CLI context. If you are running it
this way (i.e. integration into a CMS or similar use case) you must use `CGI_HTTP_PROXY` for HTTP
requests:
```
CGI_HTTP_PROXY=http://proxy.com:80
https_proxy=http://proxy.com:80
# cgi_http_proxy can also be used
```
> **Note:** CGI_HTTP_PROXY was introduced by Perl in 2001 to prevent request header manipulation and
was popularized in 2016 when this vulnerability was widely reported: https://httpoxy.org
## Syntax
Use `scheme://host:port` as in the examples above. Although a missing scheme defaults to http and a
missing port defaults to 80/443 for http/https schemes, other tools might require these values.
The host can be specified as an IP address using dotted quad notation for IPv4, or enclosed in
square brackets for IPv6.
### Authorization
Composer supports Basic authorization, using the `scheme://user:pass@host:port` syntax. Reserved url
characters in either the user name or password must be percent-encoded. For example:
```
user: me@company
pass: p@ssw$rd
proxy: http://proxy.com:80
# percent-encoded authorization
me%40company:p%40ssw%24rd
scheme://me%40company:p%40ssw%24rd@proxy.com:80
```
> **Note:** The user name and password components must be percent-encoded individually and then
combined with the colon separator. The user name cannot contain a colon (even if percent-encoded),
because the proxy will split the components on the first colon it finds.
## HTTPS proxy servers
Composer supports HTTPS proxy servers, where HTTPS is the scheme used to connect to the proxy, but
only from PHP 7.3 with curl version 7.52.0 and above.
```
http_proxy=https://proxy.com:443
https_proxy=https://proxy.com:443
```
## Bypassing the proxy for specific domains
Use the `no_proxy` (or `NO_PROXY`) environment variable to set a comma-separated list of domains
that the proxy should **not** be used for.
```
no_proxy=example.com
# Bypasses the proxy for example.com and its sub-domains
no_proxy=www.example.com
# Bypasses the proxy for www.example.com and its sub-domains, but not for example.com
```
A domain can be restricted to a particular port (e.g. `:80`) and can also be specified as an IP
address or an IP address block in CIDR notation.
IPv6 addresses do not need to be enclosed in square brackets, like they are for
http_proxy/https_proxy values, although this format is accepted.
Setting the value to `*` will bypass the proxy for all requests.
> **Note:** A leading dot in the domain name has no significance and is removed prior to processing.
## Deprecated environment variables
Composer originally provided `HTTP_PROXY_REQUEST_FULLURI` and `HTTPS_PROXY_REQUEST_FULLURI` to help
mitigate issues with misbehaving proxies. These are no longer required or used.
|
unknown
|
github
|
https://github.com/composer/composer
|
doc/faqs/how-to-use-composer-behind-a-proxy.md
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Catchcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Catchcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal
import matplotlib.pyplot as plt
import statsmodels.sandbox.tsa.fftarma as fa
from statsmodels.tsa.descriptivestats import TsaDescriptive
from statsmodels.tsa.arma_mle import Arma
x = fa.ArmaFft([1, -0.5], [1., 0.4], 40).generate_sample(size=200, burnin=1000)
d = TsaDescriptive(x)
d.plot4()
#d.fit(order=(1,1))
d.fit((1,1), trend='nc')
print(d.res.params)
modc = Arma(x)
resls = modc.fit(order=(1,1))
print(resls[0])
rescm = modc.fit_mle(order=(1,1), start_params=[-0.4,0.4, 1.])
print(rescm.params)
#decimal 1 corresponds to threshold of 5% difference
assert_almost_equal(resls[0] / d.res.params, 1, decimal=1)
assert_almost_equal(rescm.params[:-1] / d.res.params, 1, decimal=1)
#copied to tsa.tests
plt.figure()
plt.plot(x, 'b-o')
plt.plot(modc.predicted(), 'r-')
plt.figure()
plt.plot(modc.error_estimate)
#plt.show()
from statsmodels.miscmodels.tmodel import TArma
modct = TArma(x)
reslst = modc.fit(order=(1,1))
print(reslst[0])
rescmt = modct.fit_mle(order=(1,1), start_params=[-0.4,0.4, 10, 1.],maxiter=500,
maxfun=500)
print(rescmt.params)
from statsmodels.tsa.arima_model import ARMA
mkf = ARMA(x)
##rkf = mkf.fit((1,1))
##rkf.params
rkf = mkf.fit((1,1), trend='nc')
print(rkf.params)
from statsmodels.tsa.arima_process import arma_generate_sample
np.random.seed(12345)
y_arma22 = arma_generate_sample([1.,-.85,.35, -0.1],[1,.25,-.7], nsample=1000)
##arma22 = ARMA(y_arma22)
##res22 = arma22.fit(trend = 'nc', order=(2,2))
##print 'kf ',res22.params
##res22css = arma22.fit(method='css',trend = 'nc', order=(2,2))
##print 'css', res22css.params
mod22 = Arma(y_arma22)
resls22 = mod22.fit(order=(2,2))
print('ls ', resls22[0])
resmle22 = mod22.fit_mle(order=(2,2), maxfun=2000)
print('mle', resmle22.params)
f = mod22.forecast()
f3 = mod22.forecast3(start=900)[-20:]
print(y_arma22[-10:])
print(f[-20:])
print(f3[-109:-90])
plt.show()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Support for exposing NX584 elements as sensors."""
import logging
import threading
import time
from nx584 import client as nx584_client
import requests
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_OPENING,
DEVICE_CLASSES,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.const import CONF_HOST, CONF_PORT
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_EXCLUDE_ZONES = "exclude_zones"
CONF_ZONE_TYPES = "zone_types"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = "5007"
DEFAULT_SSL = False
ZONE_TYPES_SCHEMA = vol.Schema({cv.positive_int: vol.In(DEVICE_CLASSES)})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_EXCLUDE_ZONES, default=[]): vol.All(
cv.ensure_list, [cv.positive_int]
),
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_ZONE_TYPES, default={}): ZONE_TYPES_SCHEMA,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NX584 binary sensor platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
exclude = config.get(CONF_EXCLUDE_ZONES)
zone_types = config.get(CONF_ZONE_TYPES)
try:
client = nx584_client.Client(f"http://{host}:{port}")
zones = client.list_zones()
except requests.exceptions.ConnectionError as ex:
_LOGGER.error("Unable to connect to NX584: %s", str(ex))
return False
version = [int(v) for v in client.get_version().split(".")]
if version < [1, 1]:
_LOGGER.error("NX584 is too old to use for sensors (>=0.2 required)")
return False
zone_sensors = {
zone["number"]: NX584ZoneSensor(
zone, zone_types.get(zone["number"], DEVICE_CLASS_OPENING)
)
for zone in zones
if zone["number"] not in exclude
}
if zone_sensors:
add_entities(zone_sensors.values())
watcher = NX584Watcher(client, zone_sensors)
watcher.start()
else:
_LOGGER.warning("No zones found on NX584")
return True
class NX584ZoneSensor(BinarySensorEntity):
"""Representation of a NX584 zone as a sensor."""
def __init__(self, zone, zone_type):
"""Initialize the nx594 binary sensor."""
self._zone = zone
self._zone_type = zone_type
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return self._zone_type
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the binary sensor."""
return self._zone["name"]
@property
def is_on(self):
"""Return true if the binary sensor is on."""
# True means "faulted" or "open" or "abnormal state"
return self._zone["state"]
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {"zone_number": self._zone["number"]}
class NX584Watcher(threading.Thread):
"""Event listener thread to process NX584 events."""
def __init__(self, client, zone_sensors):
"""Initialize NX584 watcher thread."""
super().__init__()
self.daemon = True
self._client = client
self._zone_sensors = zone_sensors
def _process_zone_event(self, event):
zone = event["zone"]
zone_sensor = self._zone_sensors.get(zone)
# pylint: disable=protected-access
if not zone_sensor:
return
zone_sensor._zone["state"] = event["zone_state"]
zone_sensor.schedule_update_ha_state()
def _process_events(self, events):
for event in events:
if event.get("type") == "zone_status":
self._process_zone_event(event)
def _run(self):
"""Throw away any existing events so we don't replay history."""
self._client.get_events()
while True:
events = self._client.get_events()
if events:
self._process_events(events)
def run(self):
"""Run the watcher."""
while True:
try:
self._run()
except requests.exceptions.ConnectionError:
_LOGGER.error("Failed to reach NX584 server")
time.sleep(10)
|
unknown
|
codeparrot/codeparrot-clean
| ||
// MODULE: context
// FILE: context1.kt
@file:JvmName("Context")
@file:JvmMultifileClass
class Outer {
class Inner {
fun test() {
fun call(): Int {
consume(this@Inner)
return 1
}
fun call(a: Int): Int {
consume(this@Inner)
return 1 + a
}
fun call(a: Int, f: (Int)->Int): Int {
consume(this@Inner)
return 1 + a + f(a)
}
fun call2(): Int {
fun call() = 1
consume(this@Inner)
return 2
}
fun call3(): Int {
fun call() = 1
fun call2() = 2
consume(this@Inner)
return 3
}
<caret_context>call() + call(4) + call(9) { 2 * it } + call2() + call3()
}
fun call() = 1
fun call2(): Int {
fun call() = 1
return 2
}
fun call3(): Int {
fun call() = 1
fun call2() = 2
return 3
}
}
fun call() = 1
fun call2(): Int {
fun call() = 1
return 2
}
fun call3(): Int {
fun call() = 1
fun call2() = 2
return 3
}
}
fun consume(obj: Outer.Inner) {}
// FILE: context2.kt
@file:JvmName("Context")
@file:JvmMultifileClass
fun call(): Int {
return 1
}
fun call(a: Int): Int {
return 1 + a
}
fun call(a: Int, f: (Int)->Int): Int {
return 1 + a + f(a)
}
fun call2(): Int {
fun call() = 1
return 2
}
fun call3(): Int {
fun call() = 1
fun call2() = 2
return 3
}
// MODULE: main
// MODULE_KIND: CodeFragment
// CONTEXT_MODULE: context
// FILE: fragment.kt
// CODE_FRAGMENT_KIND: EXPRESSION
call() + call(4) + call(9) { 2 * it } + call2() + call3()
|
kotlin
|
github
|
https://github.com/JetBrains/kotlin
|
analysis/analysis-api/testData/components/compilerFacility/compilation/codeFragments/capturing/localFunctionWithMultiFileClass.kt
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""PyAuto media test base. Handles PyAuto initialization and path setup.
Required to ensure each media test can load the appropriate libraries. Each
test must include this snippet:
# This should be at the top
import pyauto_media
<test code>
# This should be at the bottom.
if __name__ == '__main__':
pyauto_media.Main()
"""
import os
import sys
def _SetupPaths():
"""Add paths required for loading PyAuto and other utilities to sys.path."""
media_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(media_dir)
sys.path.append(os.path.normpath(os.path.join(media_dir, os.pardir)))
# Add psutil library path.
# TODO(dalecurtis): This should only be added for tests which use psutil.
sys.path.append(os.path.normpath(os.path.join(
media_dir, os.pardir, os.pardir, os.pardir, os.pardir,
'third_party', 'psutil')))
_SetupPaths()
import pyauto_functional
Main = pyauto_functional.Main
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import print_function, unicode_literals
from datetime import datetime
from celery.states import FAILURE
from django.core.management.base import BaseCommand, CommandError
from pytz import utc
from lms.djangoapps.instructor_task.models import PROGRESS, QUEUING, InstructorTask
class Command(BaseCommand):
"""
Command to manually fail old "QUEUING" or "PROGRESS" tasks in the
instructor task table.
Example:
./manage.py lms fail_old_tasks QUEUING --dry-run --after 2001-01-03 \
--before 2001-01-06 --task-type bulk_course_email
"""
def add_arguments(self, parser):
"""
Add arguments to the command parser.
"""
parser.add_argument(
"task_state",
type=str,
choices=[QUEUING, PROGRESS],
help="choose the current task_state of tasks you want to fail"
)
parser.add_argument(
'--before',
type=str,
dest='before',
help='Manually fail instructor tasks created before or on this date.',
)
parser.add_argument(
'--after',
type=str,
dest='after',
help='Manually fail instructor tasks created after or on this date.',
)
parser.add_argument(
'--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Return the records this command will update without updating them.',
)
parser.add_argument(
'--task-type',
dest='task_type',
type=str,
default=None,
help='Specify the type of task that you want to fail.',
)
@staticmethod
def parse_date(date_string):
"""
Converts an isoformat string into a python datetime object. Localizes
that datetime object to UTC.
"""
return utc.localize(datetime.strptime(date_string, "%Y-%m-%d"))
def handle(self, *args, **options):
if options['before'] is None:
raise CommandError("Must provide a 'before' date")
if options['after'] is None:
raise CommandError("Must provide an 'after' date")
before = self.parse_date(options['before'])
after = self.parse_date(options['after'])
filter_kwargs = {
"task_state": options['task_state'],
"created__lte": before,
"created__gte": after,
}
if options['task_type'] is not None:
filter_kwargs.update({"task_type": options['task_type']})
tasks = InstructorTask.objects.filter(**filter_kwargs)
for task in tasks:
print(
"{task_state} task '{task_id}', of type '{task_type}', created on '{created}', will be marked as 'FAILURE'".format(
task_state=task.task_state,
task_id=task.task_id,
task_type=task.task_type,
created=task.created,
)
)
if not options['dry_run']:
tasks_updated = tasks.update(
task_state=FAILURE,
)
print("{tasks_updated} records updated.".format(
tasks_updated=tasks_updated
))
else:
print(
"This was a dry run, so no records were updated. "
"If this command were run for real, {number} records would have been updated.".format(
number=tasks.count()
)
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Given an encoded string, return it's decoded string.
# The encoding rule is: k[encoded_string], where the encoded_string
# inside the square brackets is being repeated exactly k times.
# Note that k is guaranteed to be a positive integer.
# You may assume that the input string is always valid; No extra white spaces,
# square brackets are well-formed, etc.
# Furthermore, you may assume that the original data does not contain any
# digits and that digits are only for those repeat numbers, k.
# For example, there won't be input like 3a or 2[4].
# Examples:
# s = "3[a]2[bc]", return "aaabcbc".
# s = "3[a2[c]]", return "accaccacc".
# s = "2[abc]3[cd]ef", return "abcabccdcdcdef".
def decode_string(s):
"""
:type s: str
:rtype: str
"""
stack = []; cur_num = 0; cur_string = ''
for c in s:
if c == '[':
stack.append((cur_string, cur_num))
cur_string = ''
cur_num = 0
elif c == ']':
prev_string, num = stack.pop()
cur_string = prev_string + num * cur_string
elif c.isdigit():
cur_num = cur_num*10 + int(c)
else:
cur_string += c
return cur_string
|
unknown
|
codeparrot/codeparrot-clean
| ||
#This file is distributed under the terms of the GNU General Public license.
#Copyright (C) 2001 Al Riddoch (See the file COPYING for details).
from cyphesis.Thing import Thing
from atlas import *
from Vector3D import Vector3D
# bbox = 5,4,2.5
# bmedian = 4.5,4.5,2.5
# offset = SW corner = -0.5,0.5,0
class House_deco_1(Thing):
def setup_operation(self, op):
ret = Oplist()
# South wall
loc = Location(self, Vector3D(-0.5,0.5,0))
loc.bbox = Vector3D(6,0.5,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
loc = Location(self, Vector3D(7.5,0.5,0))
loc.bbox = Vector3D(2,0.5,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# West wall
loc = Location(self, Vector3D(-0.5,0.5,0))
loc.bbox = Vector3D(0.5,8,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# North wall
loc = Location(self, Vector3D(-0.5,8,0))
loc.bbox = Vector3D(10,0.5,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# East wall
loc = Location(self, Vector3D(9,0.5,0))
loc.bbox = Vector3D(0.5,8,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
return ret
|
unknown
|
codeparrot/codeparrot-clean
| ||
#! /usr/bin/env python
"""
Module difflib -- helpers for computing deltas between objects.
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
Use SequenceMatcher to return list of the best "good enough" matches.
Function context_diff(a, b):
For two lists of strings, return a delta in context diff format.
Function ndiff(a, b):
Return a delta: the difference between `a` and `b` (lists of strings).
Function restore(delta, which):
Return one of the two sequences that generated an ndiff delta.
Function unified_diff(a, b):
For two lists of strings, return a delta in unified diff format.
Class SequenceMatcher:
A flexible class for comparing pairs of sequences of any type.
Class Differ:
For producing human-readable deltas from sequences of lines of text.
Class HtmlDiff:
For producing HTML side by side comparison with change highlights.
"""
__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
'unified_diff', 'HtmlDiff', 'Match']
import heapq
from collections import namedtuple as _namedtuple
from functools import reduce
Match = _namedtuple('Match', 'a b size')
def _calculate_ratio(matches, length):
if length:
return 2.0 * matches / length
return 1.0
class SequenceMatcher:
"""
SequenceMatcher is a flexible class for comparing pairs of sequences of
any type, so long as the sequence elements are hashable. The basic
algorithm predates, and is a little fancier than, an algorithm
published in the late 1980's by Ratcliff and Obershelp under the
hyperbolic name "gestalt pattern matching". The basic idea is to find
the longest contiguous matching subsequence that contains no "junk"
elements (R-O doesn't address junk). The same idea is then applied
recursively to the pieces of the sequences to the left and to the right
of the matching subsequence. This does not yield minimal edit
sequences, but does tend to yield matches that "look right" to people.
SequenceMatcher tries to compute a "human-friendly diff" between two
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
longest *contiguous* & junk-free matching subsequence. That's what
catches peoples' eyes. The Windows(tm) windiff has another interesting
notion, pairing up elements that appear uniquely in each sequence.
That, and the method here, appear to yield more intuitive difference
reports than does diff. This method appears to be the least vulnerable
to synching up on blocks of "junk lines", though (like blank lines in
ordinary text files, or maybe "<P>" lines in HTML files). That may be
because this is the only method of the 3 that has a *concept* of
"junk" <wink>.
Example, comparing two strings, and considering blanks to be "junk":
>>> s = SequenceMatcher(lambda x: x == " ",
... "private Thread currentThread;",
... "private volatile Thread currentThread;")
>>>
.ratio() returns a float in [0, 1], measuring the "similarity" of the
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
>>> print round(s.ratio(), 3)
0.866
>>>
If you're only interested in where the sequences match,
.get_matching_blocks() is handy:
>>> for block in s.get_matching_blocks():
... print "a[%d] and b[%d] match for %d elements" % block
a[0] and b[0] match for 8 elements
a[8] and b[17] match for 21 elements
a[29] and b[38] match for 0 elements
Note that the last tuple returned by .get_matching_blocks() is always a
dummy, (len(a), len(b), 0), and this is the only case in which the last
tuple element (number of elements matched) is 0.
If you want to know how to change the first sequence into the second,
use .get_opcodes():
>>> for opcode in s.get_opcodes():
... print "%6s a[%d:%d] b[%d:%d]" % opcode
equal a[0:8] b[0:8]
insert a[8:8] b[8:17]
equal a[8:29] b[17:38]
See the Differ class for a fancy human-friendly file differencer, which
uses SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
See also function get_close_matches() in this module, which shows how
simple code building on SequenceMatcher can be used to do useful work.
Timing: Basic R-O is cubic time worst case and quadratic time expected
case. SequenceMatcher is quadratic time for the worst case and has
expected-case behavior dependent in a complicated way on how many
elements the sequences have in common; best case time is linear.
Methods:
__init__(isjunk=None, a='', b='')
Construct a SequenceMatcher.
set_seqs(a, b)
Set the two sequences to be compared.
set_seq1(a)
Set the first sequence to be compared.
set_seq2(b)
Set the second sequence to be compared.
find_longest_match(alo, ahi, blo, bhi)
Find longest matching block in a[alo:ahi] and b[blo:bhi].
get_matching_blocks()
Return list of triples describing matching subsequences.
get_opcodes()
Return list of 5-tuples describing how to turn a into b.
ratio()
Return a measure of the sequences' similarity (float in [0,1]).
quick_ratio()
Return an upper bound on .ratio() relatively quickly.
real_quick_ratio()
Return an upper bound on ratio() very quickly.
"""
def __init__(self, isjunk=None, a='', b=''):
"""Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument
function that takes a sequence element and returns true iff the
element is junk. None is equivalent to passing "lambda x: 0", i.e.
no elements are considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't
want to synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable. See
also .set_seqs() and .set_seq1().
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable. See
also .set_seqs() and .set_seq2().
"""
# Members:
# a
# first sequence
# b
# second sequence; differences are computed as "what do
# we need to do to 'a' to change it into 'b'?"
# b2j
# for x in b, b2j[x] is a list of the indices (into b)
# at which x appears; junk elements do not appear
# fullbcount
# for x in b, fullbcount[x] == the number of times x
# appears in b; only materialized if really needed (used
# only for computing quick_ratio())
# matching_blocks
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
# ascending & non-overlapping in i and in j; terminated by
# a dummy (len(a), len(b), 0) sentinel
# opcodes
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
# one of
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
# 'delete' a[i1:i2] should be deleted
# 'insert' b[j1:j2] should be inserted
# 'equal' a[i1:i2] == b[j1:j2]
# isjunk
# a user-supplied function taking a sequence element and
# returning true iff the element is "junk" -- this has
# subtle but helpful effects on the algorithm, which I'll
# get around to writing up someday <0.9 wink>.
# DON'T USE! Only __chain_b uses this. Use isbjunk.
# isbjunk
# for x in b, isbjunk(x) == isjunk(x) but much faster;
# it's really the __contains__ method of a hidden dict.
# DOES NOT WORK for x in a!
# isbpopular
# for x in b, isbpopular(x) is true iff b is reasonably long
# (at least 200 elements) and x accounts for more than 1% of
# its elements. DOES NOT WORK for x in a!
self.isjunk = isjunk
self.a = self.b = None
self.set_seqs(a, b)
def set_seqs(self, a, b):
"""Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
"""
self.set_seq1(a)
self.set_seq2(b)
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None
def set_seq2(self, b):
"""Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq1().
"""
if b is self.b:
return
self.b = b
self.matching_blocks = self.opcodes = None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# also creates the fast isbjunk function ...
# b2j also does not contain entries for "popular" elements, meaning
# elements that account for more than 1% of the total elements, and
# when the sequence is reasonably large (>= 200 elements); this can
# be viewed as an adaptive notion of semi-junk, and yields an enormous
# speedup when, e.g., comparing program files with hundreds of
# instances of "return NULL;" ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# Jim Roskind, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
n = len(b)
self.b2j = b2j = {}
populardict = {}
for i, elt in enumerate(b):
if elt in b2j:
indices = b2j[elt]
if n >= 200 and len(indices) * 100 > n:
populardict[elt] = 1
del indices[:]
else:
indices.append(i)
else:
b2j[elt] = [i]
# Purge leftover indices for popular elements.
for elt in populardict:
del b2j[elt]
# Now b2j.keys() contains elements uniquely, and especially when
# the sequence is a string, that's usually a good deal smaller
# than len(string). The difference is the number of isjunk calls
# saved.
isjunk = self.isjunk
junkdict = {}
if isjunk:
for d in populardict, b2j:
for elt in d.keys():
if isjunk(elt):
junkdict[elt] = 1
del d[elt]
# Now for x in b, isjunk(x) == x in junkdict, but the
# latter is much faster. Note too that while there may be a
# lot of junk in the sequence, the number of *unique* junk
# elements is probably small. So the memory burden of keeping
# this dict alive is likely trivial compared to the size of b2j.
self.isbjunk = junkdict.__contains__
self.isbpopular = populardict.__contains__
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that
starts earliest in a, and of all those maximal matching blocks that
start earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=0, b=4, size=5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=1, b=0, size=4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
Match(a=0, b=0, size=0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.isbjunk
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
nothing = []
for i in xrange(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
j2lenget = j2len.get
newj2len = {}
for j in b2j.get(a[i], nothing):
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2lenget(j-1, 0) + 1
if k > bestsize:
besti, bestj, bestsize = i-k+1, j-k+1, k
j2len = newj2len
# Extend the best by non-junk elements on each end. In particular,
# "popular" non-junk elements aren't in b2j, which greatly speeds
# the inner loop above, but also means "the best" match so far
# doesn't contain any junk *or* popular non-junk elements.
while besti > alo and bestj > blo and \
not isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
not isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize += 1
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while besti > alo and bestj > blo and \
isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize = bestsize + 1
return Match(besti, bestj, bestsize)
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j. New in Python 2.5, it's also guaranteed that if
(i, j, n) and (i', j', n') are adjacent triples in the list, and
the second is not the last triple in the list, then i+n != i' or
j+n != j'. IOW, adjacent triples never describe adjacent equal
blocks.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> s.get_matching_blocks()
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
la, lb = len(self.a), len(self.b)
# This is most naturally expressed as a recursive algorithm, but
# at least one user bumped into extreme use cases that exceeded
# the recursion limit on their box. So, now we maintain a list
# ('queue`) of blocks we still need to look at, and append partial
# results to `matching_blocks` in a loop; the matches are sorted
# at the end.
queue = [(0, la, 0, lb)]
matching_blocks = []
while queue:
alo, ahi, blo, bhi = queue.pop()
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k: # if k is 0, there was no matching block
matching_blocks.append(x)
if alo < i and blo < j:
queue.append((alo, i, blo, j))
if i+k < ahi and j+k < bhi:
queue.append((i+k, ahi, j+k, bhi))
matching_blocks.sort()
# It's possible that we have adjacent equal blocks in the
# matching_blocks list now. Starting with 2.5, this code was added
# to collapse them.
i1 = j1 = k1 = 0
non_adjacent = []
for i2, j2, k2 in matching_blocks:
# Is this block adjacent to i1, j1, k1?
if i1 + k1 == i2 and j1 + k1 == j2:
# Yes, so collapse them -- this just increases the length of
# the first block by the length of the second, and the first
# block so lengthened remains the block to compare against.
k1 += k2
else:
# Not adjacent. Remember the first block (k1==0 means it's
# the dummy we started with), and make the second block the
# new block to compare against.
if k1:
non_adjacent.append((i1, j1, k1))
i1, j1, k1 = i2, j2, k2
if k1:
non_adjacent.append((i1, j1, k1))
non_adjacent.append( (la, lb, 0) )
self.matching_blocks = non_adjacent
return map(Match._make, self.matching_blocks)
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print ("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = answer = []
for ai, bj, size in self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], and the next matching block is
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, and move (i,j) beyond the match
tag = ''
if i < ai and j < bj:
tag = 'replace'
elif i < ai:
tag = 'delete'
elif j < bj:
tag = 'insert'
if tag:
answer.append( (tag, i, ai, j, bj) )
i, j = ai+size, bj+size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
answer.append( ('equal', ai, i, bj, j) )
return answer
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with upto n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = map(str, range(1,40))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
if codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == 'equal' and i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
yield group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
if group and not (len(group)==1 and group[0][0] == 'equal'):
yield group
def ratio(self):
"""Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and
M is the number of matches, this is 2.0*M / T.
Note that this is 1 if the sequences are identical, and 0 if
they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may
want to try .quick_ratio() or .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
matches = reduce(lambda sum, triple: sum + triple[-1],
self.get_matching_blocks(), 0)
return _calculate_ratio(matches, len(self.a) + len(self.b))
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
availhas, matches = avail.__contains__, 0
for elt in self.a:
if availhas(elt):
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return _calculate_ratio(matches, len(self.a) + len(self.b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
la, lb = len(self.a), len(self.b)
# can't have more matches than the number of elements in the
# shorter sequence
return _calculate_ratio(min(la, lb), la + lb)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: %r" % (n,))
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Move the best scorers to head of list
result = heapq.nlargest(n, result)
# Strip scores for the best n matches
return [x for score, x in result]
def _count_leading(line, ch):
"""
Return number of `ch` characters at the start of `line`.
Example:
>>> _count_leading(' abc', ' ')
3
"""
i, n = 0, len(line)
while i < n and line[i] == ch:
i += 1
return i
class Differ:
r"""
Differ is a class for comparing sequences of lines of text, and
producing human-readable differences or deltas. Differ uses
SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
Each line of a Differ delta begins with a two-letter code:
'- ' line unique to sequence 1
'+ ' line unique to sequence 2
' ' line common to both sequences
'? ' line not present in either input sequence
Lines beginning with '? ' attempt to guide the eye to intraline
differences, and were not present in either input sequence. These lines
can be confusing if the sequences contain tab characters.
Note that Differ makes no claim to produce a *minimal* diff. To the
contrary, minimal diffs are often counter-intuitive, because they synch
up anywhere possible, sometimes accidental matches 100 pages apart.
Restricting synch points to contiguous matches preserves some notion of
locality, at the occasional cost of producing a longer diff.
Example: Comparing two texts.
First we set up the texts, sequences of individual single-line strings
ending with newlines (such sequences can also be obtained from the
`readlines()` method of file-like objects):
>>> text1 = ''' 1. Beautiful is better than ugly.
... 2. Explicit is better than implicit.
... 3. Simple is better than complex.
... 4. Complex is better than complicated.
... '''.splitlines(1)
>>> len(text1)
4
>>> text1[0][-1]
'\n'
>>> text2 = ''' 1. Beautiful is better than ugly.
... 3. Simple is better than complex.
... 4. Complicated is better than complex.
... 5. Flat is better than nested.
... '''.splitlines(1)
Next we instantiate a Differ object:
>>> d = Differ()
Note that when instantiating a Differ object we may pass functions to
filter out line and character 'junk'. See Differ.__init__ for details.
Finally, we compare the two:
>>> result = list(d.compare(text1, text2))
'result' is a list of strings, so let's pretty-print it:
>>> from pprint import pprint as _pprint
>>> _pprint(result)
[' 1. Beautiful is better than ugly.\n',
'- 2. Explicit is better than implicit.\n',
'- 3. Simple is better than complex.\n',
'+ 3. Simple is better than complex.\n',
'? ++\n',
'- 4. Complex is better than complicated.\n',
'? ^ ---- ^\n',
'+ 4. Complicated is better than complex.\n',
'? ++++ ^ ^\n',
'+ 5. Flat is better than nested.\n']
As a single multi-line string it looks like this:
>>> print ''.join(result),
1. Beautiful is better than ugly.
- 2. Explicit is better than implicit.
- 3. Simple is better than complex.
+ 3. Simple is better than complex.
? ++
- 4. Complex is better than complicated.
? ^ ---- ^
+ 4. Complicated is better than complex.
? ++++ ^ ^
+ 5. Flat is better than nested.
Methods:
__init__(linejunk=None, charjunk=None)
Construct a text differencer, with optional filters.
compare(a, b)
Compare two sequences of lines; generate the resulting delta.
"""
def __init__(self, linejunk=None, charjunk=None):
"""
Construct a text differencer, with optional filters.
The two optional keyword parameters are for filter functions:
- `linejunk`: A function that should accept a single string argument,
and return true iff the string is junk. The module-level function
`IS_LINE_JUNK` may be used to filter out lines without visible
characters, except for at most one splat ('#'). It is recommended
to leave linejunk None; as of Python 2.3, the underlying
SequenceMatcher class has grown an adaptive notion of "noise" lines
that's better than any static definition the author has ever been
able to craft.
- `charjunk`: A function that should accept a string of length 1. The
module-level function `IS_CHARACTER_JUNK` may be used to filter out
whitespace characters (a blank or tab; **note**: bad idea to include
newline in this!). Use of IS_CHARACTER_JUNK is recommended.
"""
self.linejunk = linejunk
self.charjunk = charjunk
def compare(self, a, b):
r"""
Compare two sequences of lines; generate the resulting delta.
Each sequence must contain individual single-line strings ending with
newlines. Such sequences can be obtained from the `readlines()` method
of file-like objects. The delta generated also consists of newline-
terminated strings, ready to be printed as-is via the writeline()
method of a file-like object.
Example:
>>> print ''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))),
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
cruncher = SequenceMatcher(self.linejunk, a, b)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if tag == 'replace':
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
elif tag == 'delete':
g = self._dump('-', a, alo, ahi)
elif tag == 'insert':
g = self._dump('+', b, blo, bhi)
elif tag == 'equal':
g = self._dump(' ', a, alo, ahi)
else:
raise ValueError, 'unknown tag %r' % (tag,)
for line in g:
yield line
def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in xrange(lo, hi):
yield '%s %s' % (tag, x[i])
def _plain_replace(self, a, alo, ahi, b, blo, bhi):
assert alo < ahi and blo < bhi
# dump the shorter block first -- reduces the burden on short-term
# memory if the blocks are of very different sizes
if bhi - blo < ahi - alo:
first = self._dump('+', b, blo, bhi)
second = self._dump('-', a, alo, ahi)
else:
first = self._dump('-', a, alo, ahi)
second = self._dump('+', b, blo, bhi)
for g in first, second:
for line in g:
yield line
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> d = Differ()
>>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
... ['abcdefGhijkl\n'], 0, 1)
>>> print ''.join(results),
- abcDefghiJkl
? ^ ^ ^
+ abcdefGhijkl
? ^ ^ ^
"""
# don't synch up unless the lines have a similarity score of at
# least cutoff; best_ratio tracks the best score seen so far
best_ratio, cutoff = 0.74, 0.75
cruncher = SequenceMatcher(self.charjunk)
eqi, eqj = None, None # 1st indices of equal lines (if any)
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in xrange(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in xrange(alo, ahi):
ai = a[i]
if ai == bj:
if eqi is None:
eqi, eqj = i, j
continue
cruncher.set_seq1(ai)
# computing similarity is expensive, so use the quick
# upper bounds first -- have seen this speed up messy
# compares by a factor of 3.
# note that ratio() is only expensive to compute the first
# time it's called on a sequence pair; the expensive part
# of the computation is cached by cruncher
if cruncher.real_quick_ratio() > best_ratio and \
cruncher.quick_ratio() > best_ratio and \
cruncher.ratio() > best_ratio:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
if best_ratio < cutoff:
# no non-identical "pretty close" pair
if eqi is None:
# no identical pair either -- treat it as a straight replace
for line in self._plain_replace(a, alo, ahi, b, blo, bhi):
yield line
return
# no close pair, but an identical pair -- synch up on that
best_i, best_j, best_ratio = eqi, eqj, 1.0
else:
# there's a close pair, so forget the identical pair (if any)
eqi = None
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
# identical
# pump out diffs from before the synch point
for line in self._fancy_helper(a, alo, best_i, b, blo, best_j):
yield line
# do intraline marking on the synch pair
aelt, belt = a[best_i], b[best_j]
if eqi is None:
# pump out a '-', '?', '+', '?' quad for the synched lines
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError, 'unknown tag %r' % (tag,)
for line in self._qformat(aelt, belt, atags, btags):
yield line
else:
# the synch pair is identical
yield ' ' + aelt
# pump out diffs from after the synch point
for line in self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi):
yield line
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
g = []
if alo < ahi:
if blo < bhi:
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
else:
g = self._dump('-', a, alo, ahi)
elif blo < bhi:
g = self._dump('+', b, blo, bhi)
for line in g:
yield line
def _qformat(self, aline, bline, atags, btags):
r"""
Format "?" output and deal with leading tabs.
Example:
>>> d = Differ()
>>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n',
... ' ^ ^ ^ ', ' ^ ^ ^ ')
>>> for line in results: print repr(line)
...
'- \tabcDefghiJkl\n'
'? \t ^ ^ ^\n'
'+ \tabcdefGhijkl\n'
'? \t ^ ^ ^\n'
"""
# Can hurt, but will probably help most of the time.
common = min(_count_leading(aline, "\t"),
_count_leading(bline, "\t"))
common = min(common, _count_leading(atags[:common], " "))
common = min(common, _count_leading(btags[:common], " "))
atags = atags[common:].rstrip()
btags = btags[common:].rstrip()
yield "- " + aline
if atags:
yield "? %s%s\n" % ("\t" * common, atags)
yield "+ " + bline
if btags:
yield "? %s%s\n" % ("\t" * common, btags)
# With respect to junk, an earlier version of ndiff simply refused to
# *start* a match with a junk element. The result was cases like this:
# before: private Thread currentThread;
# after: private volatile Thread currentThread;
# If you consider whitespace to be junk, the longest contiguous match
# not starting with junk is "e Thread currentThread". So ndiff reported
# that "e volatil" was inserted between the 't' and the 'e' in "private".
# While an accurate view, to people that's absurd. The current version
# looks for matching blocks that are entirely junk-free, then extends the
# longest one of those as far as possible but only with matching junk.
# So now "currentThread" is matched, then extended to suck up the
# preceding blank; then "private" is matched, and extended to suck up the
# following blank; then "Thread" is matched; and finally ndiff reports
# that "volatile " was inserted before "Thread". The only quibble
# remaining is that perhaps it was really the case that " volatile"
# was inserted after "private". I can live with that <wink>.
import re
def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
r"""
Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
Examples:
>>> IS_LINE_JUNK('\n')
True
>>> IS_LINE_JUNK(' # \n')
True
>>> IS_LINE_JUNK('hello\n')
False
"""
return pat(line) is not None
def IS_CHARACTER_JUNK(ch, ws=" \t"):
r"""
Return 1 for ignorable character: iff `ch` is a space or tab.
Examples:
>>> IS_CHARACTER_JUNK(' ')
True
>>> IS_CHARACTER_JUNK('\t')
True
>>> IS_CHARACTER_JUNK('\n')
False
>>> IS_CHARACTER_JUNK('x')
False
"""
return ch in ws
def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a unified diff.
Unified diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with ---, +++, or @@) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The unidiff format normally has a header for filenames and modification
times. Any or all of these may be specified using strings for
'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. The modification
times are normally expressed in the format returned by time.ctime().
Example:
>>> for line in unified_diff('one two three four'.split(),
... 'zero one tree four'.split(), 'Original', 'Current',
... 'Sat Jan 26 23:30:50 1991', 'Fri Jun 06 10:20:52 2003',
... lineterm=''):
... print line
--- Original Sat Jan 26 23:30:50 1991
+++ Current Fri Jun 06 10:20:52 2003
@@ -1,4 +1,4 @@
+zero
one
-two
-three
+tree
four
"""
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
yield '--- %s %s%s' % (fromfile, fromfiledate, lineterm)
yield '+++ %s %s%s' % (tofile, tofiledate, lineterm)
started = True
i1, i2, j1, j2 = group[0][1], group[-1][2], group[0][3], group[-1][4]
yield "@@ -%d,%d +%d,%d @@%s" % (i1+1, i2-i1, j1+1, j2-j1, lineterm)
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in a[i1:i2]:
yield ' ' + line
continue
if tag == 'replace' or tag == 'delete':
for line in a[i1:i2]:
yield '-' + line
if tag == 'replace' or tag == 'insert':
for line in b[j1:j2]:
yield '+' + line
# See http://www.unix.org/single_unix_specification/
def context_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a context diff.
Context diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with *** or ---) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The context diff format normally has a header for filenames and
modification times. Any or all of these may be specified using
strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the format returned
by time.ctime(). If not specified, the strings default to blanks.
Example:
>>> print ''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(1),
... 'zero\none\ntree\nfour\n'.splitlines(1), 'Original', 'Current',
... 'Sat Jan 26 23:30:50 1991', 'Fri Jun 06 10:22:46 2003')),
*** Original Sat Jan 26 23:30:50 1991
--- Current Fri Jun 06 10:22:46 2003
***************
*** 1,4 ****
one
! two
! three
four
--- 1,4 ----
+ zero
one
! tree
four
"""
started = False
prefixmap = {'insert':'+ ', 'delete':'- ', 'replace':'! ', 'equal':' '}
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
yield '*** %s %s%s' % (fromfile, fromfiledate, lineterm)
yield '--- %s %s%s' % (tofile, tofiledate, lineterm)
started = True
yield '***************%s' % (lineterm,)
if group[-1][2] - group[0][1] >= 2:
yield '*** %d,%d ****%s' % (group[0][1]+1, group[-1][2], lineterm)
else:
yield '*** %d ****%s' % (group[-1][2], lineterm)
visiblechanges = [e for e in group if e[0] in ('replace', 'delete')]
if visiblechanges:
for tag, i1, i2, _, _ in group:
if tag != 'insert':
for line in a[i1:i2]:
yield prefixmap[tag] + line
if group[-1][4] - group[0][3] >= 2:
yield '--- %d,%d ----%s' % (group[0][3]+1, group[-1][4], lineterm)
else:
yield '--- %d ----%s' % (group[-1][4], lineterm)
visiblechanges = [e for e in group if e[0] in ('replace', 'insert')]
if visiblechanges:
for tag, _, _, j1, j2 in group:
if tag != 'delete':
for line in b[j1:j2]:
yield prefixmap[tag] + line
def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions (or None):
- linejunk: A function that should accept a single string argument, and
return true iff the string is junk. The default is None, and is
recommended; as of Python 2.3, an adaptive notion of "noise" lines is
used that does a good job on its own.
- charjunk: A function that should accept a string of length 1. The
default is module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: bad idea to include newline
in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> print ''.join(diff),
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
return Differ(linejunk, charjunk).compare(a, b)
def _mdiff(fromlines, tolines, context=None, linejunk=None,
charjunk=IS_CHARACTER_JUNK):
r"""Returns generator yielding marked up from/to side by side differences.
Arguments:
fromlines -- list of text lines to compared to tolines
tolines -- list of text lines to be compared to fromlines
context -- number of context lines to display on each side of difference,
if None, all from/to text lines will be generated.
linejunk -- passed on to ndiff (see ndiff documentation)
charjunk -- passed on to ndiff (see ndiff documentation)
This function returns an interator which returns a tuple:
(from line tuple, to line tuple, boolean flag)
from/to line tuple -- (line num, line text)
line num -- integer or None (to indicate a context separation)
line text -- original line text with following markers inserted:
'\0+' -- marks start of added text
'\0-' -- marks start of deleted text
'\0^' -- marks start of changed text
'\1' -- marks end of added/deleted/changed text
boolean flag -- None indicates context separation, True indicates
either "from" or "to" line contains a change, otherwise False.
This function/iterator was originally developed to generate side by side
file difference for making HTML pages (see HtmlDiff class for example
usage).
Note, this function utilizes the ndiff function to generate the side by
side difference markup. Optional ndiff arguments may be passed to this
function and they in turn will be passed to ndiff.
"""
import re
# regular expression for finding intraline change indices
change_re = re.compile('(\++|\-+|\^+)')
# create the difference iterator to generate the differences
diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
def _make_line(lines, format_key, side, num_lines=[0,0]):
"""Returns line of text with user's change markup and line formatting.
lines -- list of lines from the ndiff generator to produce a line of
text from. When producing the line of text to return, the
lines used are removed from this list.
format_key -- '+' return first line in list with "add" markup around
the entire line.
'-' return first line in list with "delete" markup around
the entire line.
'?' return first line in list with add/delete/change
intraline markup (indices obtained from second line)
None return first line in list with no markup
side -- indice into the num_lines list (0=from,1=to)
num_lines -- from/to current line number. This is NOT intended to be a
passed parameter. It is present as a keyword argument to
maintain memory of the current line numbers between calls
of this function.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
num_lines[side] += 1
# Handle case where no user markup is to be added, just return line of
# text with user's line format to allow for usage of the line number.
if format_key is None:
return (num_lines[side],lines.pop(0)[2:])
# Handle case of intraline changes
if format_key == '?':
text, markers = lines.pop(0), lines.pop(0)
# find intraline changes (store change type and indices in tuples)
sub_info = []
def record_sub_info(match_object,sub_info=sub_info):
sub_info.append([match_object.group(1)[0],match_object.span()])
return match_object.group(1)
change_re.sub(record_sub_info,markers)
# process each tuple inserting our special marks that won't be
# noticed by an xml/html escaper.
for key,(begin,end) in sub_info[::-1]:
text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
text = text[2:]
# Handle case of add/delete entire line
else:
text = lines.pop(0)[2:]
# if line of text is just a newline, insert a space so there is
# something for the user to highlight and see.
if not text:
text = ' '
# insert marks that won't be noticed by an xml/html escaper.
text = '\0' + format_key + text + '\1'
# Return line of text, first allow user's line formatter to do its
# thing (such as adding the line number) then replace the special
# marks with what the user's change markup.
return (num_lines[side],text)
def _line_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from a
differencing iterator, processes them and yields them. When it can
it yields both a "from" and a "to" line, otherwise it will yield one
or the other. In addition to yielding the lines of from/to text, a
boolean flag is yielded to indicate if the text line(s) have
differences in them.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
lines = []
num_blanks_pending, num_blanks_to_yield = 0, 0
while True:
# Load up next 4 lines so we can look ahead, create strings which
# are a concatenation of the first character of each of the 4 lines
# so we can do some very readable comparisons.
while len(lines) < 4:
try:
lines.append(diff_lines_iterator.next())
except StopIteration:
lines.append('X')
s = ''.join([line[0] for line in lines])
if s.startswith('X'):
# When no more lines, pump out any remaining blank lines so the
# corresponding add/delete lines get a matching blank line so
# all line pairs get yielded at the next level.
num_blanks_to_yield = num_blanks_pending
elif s.startswith('-?+?'):
# simple intraline change
yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
continue
elif s.startswith('--++'):
# in delete block, add block coming: we do NOT want to get
# caught up on blank lines yet, just process the delete line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith(('--?+', '--+', '- ')):
# in delete block and see a intraline change or unchanged line
# coming: yield the delete line and then blanks
from_line,to_line = _make_line(lines,'-',0), None
num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
elif s.startswith('-+?'):
# intraline change
yield _make_line(lines,None,0), _make_line(lines,'?',1), True
continue
elif s.startswith('-?+'):
# intraline change
yield _make_line(lines,'?',0), _make_line(lines,None,1), True
continue
elif s.startswith('-'):
# delete FROM line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith('+--'):
# in add block, delete block coming: we do NOT want to get
# caught up on blank lines yet, just process the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(('+ ', '+-')):
# will be leaving an add block: yield blanks then add line
from_line, to_line = None, _make_line(lines,'+',1)
num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
elif s.startswith('+'):
# inside an add block, yield the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(' '):
# unchanged text, yield it to both sides
yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
continue
# Catch up on the blank lines so when we yield the next from/to
# pair, they are lined up.
while(num_blanks_to_yield < 0):
num_blanks_to_yield += 1
yield None,('','\n'),True
while(num_blanks_to_yield > 0):
num_blanks_to_yield -= 1
yield ('','\n'),None,True
if s.startswith('X'):
raise StopIteration
else:
yield from_line,to_line,True
def _line_pair_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from the line
iterator. Its difference from that iterator is that this function
always yields a pair of from/to text lines (with the change
indication). If necessary it will collect single from/to lines
until it has a matching pair from/to pair to yield.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
line_iterator = _line_iterator()
fromlines,tolines=[],[]
while True:
# Collecting lines of text until we have a from/to pair
while (len(fromlines)==0 or len(tolines)==0):
from_line, to_line, found_diff =line_iterator.next()
if from_line is not None:
fromlines.append((from_line,found_diff))
if to_line is not None:
tolines.append((to_line,found_diff))
# Once we have a pair, remove them from the collection and yield it
from_line, fromDiff = fromlines.pop(0)
to_line, to_diff = tolines.pop(0)
yield (from_line,to_line,fromDiff or to_diff)
# Handle case where user does not want context differencing, just yield
# them up without doing anything else with them.
line_pair_iterator = _line_pair_iterator()
if context is None:
while True:
yield line_pair_iterator.next()
# Handle case where user wants context differencing. We must do some
# storage of lines until we know for sure that they are to be yielded.
else:
context += 1
lines_to_write = 0
while True:
# Store lines up until we find a difference, note use of a
# circular queue because we only need to keep around what
# we need for context.
index, contextLines = 0, [None]*(context)
found_diff = False
while(found_diff is False):
from_line, to_line, found_diff = line_pair_iterator.next()
i = index % context
contextLines[i] = (from_line, to_line, found_diff)
index += 1
# Yield lines that we have collected so far, but first yield
# the user's separator.
if index > context:
yield None, None, None
lines_to_write = context
else:
lines_to_write = index
index = 0
while(lines_to_write):
i = index % context
index += 1
yield contextLines[i]
lines_to_write -= 1
# Now yield the context lines after the change
lines_to_write = context-1
while(lines_to_write):
from_line, to_line, found_diff = line_pair_iterator.next()
# If another change within the context, extend the context
if found_diff:
lines_to_write = context-1
else:
lines_to_write -= 1
yield from_line, to_line, found_diff
_file_template = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type"
content="text/html; charset=ISO-8859-1" />
<title></title>
<style type="text/css">%(styles)s
</style>
</head>
<body>
%(table)s%(legend)s
</body>
</html>"""
_styles = """
table.diff {font-family:Courier; border:medium;}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}"""
_table_template = """
<table class="diff" id="difflib_chg_%(prefix)s_top"
cellspacing="0" cellpadding="0" rules="groups" >
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
%(header_row)s
<tbody>
%(data_rows)s </tbody>
</table>"""
_legend = """
<table class="diff" summary="Legends">
<tr> <th colspan="2"> Legends </th> </tr>
<tr> <td> <table border="" summary="Colors">
<tr><th> Colors </th> </tr>
<tr><td class="diff_add"> Added </td></tr>
<tr><td class="diff_chg">Changed</td> </tr>
<tr><td class="diff_sub">Deleted</td> </tr>
</table></td>
<td> <table border="" summary="Links">
<tr><th colspan="2"> Links </th> </tr>
<tr><td>(f)irst change</td> </tr>
<tr><td>(n)ext change</td> </tr>
<tr><td>(t)op</td> </tr>
</table></td> </tr>
</table>"""
class HtmlDiff(object):
"""For producing HTML side by side comparison with change highlights.
This class can be used to create an HTML table (or a complete HTML file
containing the table) showing a side by side, line by line comparison
of text with inter-line and intra-line change highlights. The table can
be generated in either full or contextual difference mode.
The following methods are provided for HTML generation:
make_table -- generates HTML for a single side by side table
make_file -- generates complete HTML file with a single side by side table
See tools/scripts/diff.py for an example usage of this class.
"""
_file_template = _file_template
_styles = _styles
_table_template = _table_template
_legend = _legend
_default_prefix = 0
def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
charjunk=IS_CHARACTER_JUNK):
"""HtmlDiff instance initializer
Arguments:
tabsize -- tab stop spacing, defaults to 8.
wrapcolumn -- column number where lines are broken and wrapped,
defaults to None where lines are not wrapped.
linejunk,charjunk -- keyword arguments passed into ndiff() (used to by
HtmlDiff() to generate the side by side HTML differences). See
ndiff() documentation for argument default values and descriptions.
"""
self._tabsize = tabsize
self._wrapcolumn = wrapcolumn
self._linejunk = linejunk
self._charjunk = charjunk
def make_file(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML file of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
return self._file_template % dict(
styles = self._styles,
legend = self._legend,
table = self.make_table(fromlines,tolines,fromdesc,todesc,
context=context,numlines=numlines))
def _tab_newline_replace(self,fromlines,tolines):
"""Returns from/to line lists with tabs expanded and newlines removed.
Instead of tab characters being replaced by the number of spaces
needed to fill in to the next tab stop, this function will fill
the space with tab characters. This is done so that the difference
algorithms can identify changes in a file when tabs are replaced by
spaces and vice versa. At the end of the HTML generation, the tab
characters will be replaced with a nonbreakable space.
"""
def expand_tabs(line):
# hide real spaces
line = line.replace(' ','\0')
# expand tabs into spaces
line = line.expandtabs(self._tabsize)
# relace spaces from expanded tabs back into tab characters
# (we'll replace them with markup after we do differencing)
line = line.replace(' ','\t')
return line.replace('\0',' ').rstrip('\n')
fromlines = [expand_tabs(line) for line in fromlines]
tolines = [expand_tabs(line) for line in tolines]
return fromlines,tolines
def _split_line(self,data_list,line_num,text):
"""Builds list of text lines by splitting text lines at wrap point
This function will determine if the input text line needs to be
wrapped (split) into separate lines. If so, the first wrap point
will be determined and the first line appended to the output
text line list. This function is used recursively to handle
the second part of the split line to further split it.
"""
# if blank line or context separator, just add it to the output list
if not line_num:
data_list.append((line_num,text))
return
# if line text doesn't need wrapping, just add it to the output list
size = len(text)
max = self._wrapcolumn
if (size <= max) or ((size -(text.count('\0')*3)) <= max):
data_list.append((line_num,text))
return
# scan text looking for the wrap point, keeping track if the wrap
# point is inside markers
i = 0
n = 0
mark = ''
while n < max and i < size:
if text[i] == '\0':
i += 1
mark = text[i]
i += 1
elif text[i] == '\1':
i += 1
mark = ''
else:
i += 1
n += 1
# wrap point is inside text, break it up into separate lines
line1 = text[:i]
line2 = text[i:]
# if wrap point is inside markers, place end marker at end of first
# line and start marker at beginning of second line because each
# line will have its own table tag markup around it.
if mark:
line1 = line1 + '\1'
line2 = '\0' + mark + line2
# tack on first line onto the output list
data_list.append((line_num,line1))
# use this routine again to wrap the remaining text
self._split_line(data_list,'>',line2)
def _line_wrapper(self,diffs):
"""Returns iterator that splits (wraps) mdiff text lines"""
# pull from/to data and flags from mdiff iterator
for fromdata,todata,flag in diffs:
# check for context separators and pass them through
if flag is None:
yield fromdata,todata,flag
continue
(fromline,fromtext),(toline,totext) = fromdata,todata
# for each from/to line split it at the wrap column to form
# list of text lines.
fromlist,tolist = [],[]
self._split_line(fromlist,fromline,fromtext)
self._split_line(tolist,toline,totext)
# yield from/to line in pairs inserting blank lines as
# necessary when one side has more wrapped lines
while fromlist or tolist:
if fromlist:
fromdata = fromlist.pop(0)
else:
fromdata = ('',' ')
if tolist:
todata = tolist.pop(0)
else:
todata = ('',' ')
yield fromdata,todata,flag
def _collect_lines(self,diffs):
"""Collects mdiff output into separate lists
Before storing the mdiff from/to data into a list, it is converted
into a single line of text with HTML markup.
"""
fromlist,tolist,flaglist = [],[],[]
# pull from/to data and flags from mdiff style iterator
for fromdata,todata,flag in diffs:
try:
# store HTML markup of the lines into the lists
fromlist.append(self._format_line(0,flag,*fromdata))
tolist.append(self._format_line(1,flag,*todata))
except TypeError:
# exceptions occur for lines where context separators go
fromlist.append(None)
tolist.append(None)
flaglist.append(flag)
return fromlist,tolist,flaglist
def _format_line(self,side,flag,linenum,text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = '%d' % linenum
id = ' id="%s%s"' % (self._prefix[side],linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ''
# replace those things that would get confused with HTML symbols
text=text.replace("&","&").replace(">",">").replace("<","<")
# make space non-breakable so they don't get compressed or line wrapped
text = text.replace(' ',' ').rstrip()
return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
% (id,linenum,text)
def _make_prefix(self):
"""Create unique anchor prefixes"""
# Generate a unique anchor prefix so multiple tables
# can exist on the same HTML page without conflicts.
fromprefix = "from%d_" % HtmlDiff._default_prefix
toprefix = "to%d_" % HtmlDiff._default_prefix
HtmlDiff._default_prefix += 1
# store prefixes so line format method has access
self._prefix = [fromprefix,toprefix]
def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
"""Makes list of "next" links"""
# all anchor names will be generated using the unique "to" prefix
toprefix = self._prefix[1]
# process change flags, generating middle column of next anchors/links
next_id = ['']*len(flaglist)
next_href = ['']*len(flaglist)
num_chg, in_change = 0, False
last = 0
for i,flag in enumerate(flaglist):
if flag:
if not in_change:
in_change = True
last = i
# at the beginning of a change, drop an anchor a few lines
# (the context lines) before the change for the previous
# link
i = max([0,i-numlines])
next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
# at the beginning of a change, drop a link to the next
# change
num_chg += 1
next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
toprefix,num_chg)
else:
in_change = False
# check for cases where there is no content to avoid exceptions
if not flaglist:
flaglist = [False]
next_id = ['']
next_href = ['']
last = 0
if context:
fromlist = ['<td></td><td> No Differences Found </td>']
tolist = fromlist
else:
fromlist = tolist = ['<td></td><td> Empty File </td>']
# if not a change on first line, drop a link
if not flaglist[0]:
next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
# redo the last link to link to the top
next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
return fromlist,tolist,flaglist,next_href,next_id
def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML table of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
# make unique anchor prefixes so that multiple tables may exist
# on the same page without conflict.
self._make_prefix()
# change tabs to spaces before it gets more difficult after we insert
# markkup
fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
# create diffs iterator which generates side by side from/to data
if context:
context_lines = numlines
else:
context_lines = None
diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
charjunk=self._charjunk)
# set up iterator to wrap lines that exceed desired width
if self._wrapcolumn:
diffs = self._line_wrapper(diffs)
# collect up from/to lines and flags into lists (also format the lines)
fromlist,tolist,flaglist = self._collect_lines(diffs)
# process change flags, generating middle column of next anchors/links
fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
fromlist,tolist,flaglist,context,numlines)
s = []
fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
'<td class="diff_next">%s</td>%s</tr>\n'
for i in range(len(flaglist)):
if flaglist[i] is None:
# mdiff yields None on separator lines skip the bogus ones
# generated for the first line
if i > 0:
s.append(' </tbody> \n <tbody>\n')
else:
s.append( fmt % (next_id[i],next_href[i],fromlist[i],
next_href[i],tolist[i]))
if fromdesc or todesc:
header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % fromdesc,
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % todesc)
else:
header_row = ''
table = self._table_template % dict(
data_rows=''.join(s),
header_row=header_row,
prefix=self._prefix[1])
return table.replace('\0+','<span class="diff_add">'). \
replace('\0-','<span class="diff_sub">'). \
replace('\0^','<span class="diff_chg">'). \
replace('\1','</span>'). \
replace('\t',' ')
del re
def restore(delta, which):
r"""
Generate one of the two sequences that generated a delta.
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
lines originating from file 1 or 2 (parameter `which`), stripping off line
prefixes.
Examples:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> diff = list(diff)
>>> print ''.join(restore(diff, 1)),
one
two
three
>>> print ''.join(restore(diff, 2)),
ore
tree
emu
"""
try:
tag = {1: "- ", 2: "+ "}[int(which)]
except KeyError:
raise ValueError, ('unknown delta choice (must be 1 or 2): %r'
% which)
prefixes = (" ", tag)
for line in delta:
if line[:2] in prefixes:
yield line[2:]
def _test():
import doctest, difflib
return doctest.testmod(difflib)
if __name__ == "__main__":
_test()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* Generated file to emulate the fakes namespace. */
export * from "../../harness/_namespaces/fakes.js";
|
typescript
|
github
|
https://github.com/microsoft/TypeScript
|
src/testRunner/_namespaces/fakes.ts
|
#!/usr/bin/env python3
"""
This script reads the input from stdin, extracts all lines starting with
"# FDATA: " (or a given prefix instead of "FDATA"), parses the directives,
replaces symbol names ("#name#") with either symbol values or with offsets from
respective anchor symbols, and prints the resulting file to stdout.
"""
import argparse
import os
import platform
import shutil
import subprocess
import sys
import re
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("objfile", help="Object file to extract symbol values from")
parser.add_argument("output")
parser.add_argument("prefix", nargs="?", default="FDATA", help="Custom FDATA prefix")
parser.add_argument(
"--nmtool",
default="llvm-nm" if platform.system() == "Windows" else "nm",
help="Path to nm tool",
)
parser.add_argument("--no-lbr", action="store_true")
parser.add_argument("--no-redefine", action="store_true")
args = parser.parse_args()
# Regexes to extract FDATA lines from input and parse FDATA and pre-aggregated
# profile data
prefix_pat = re.compile(f"^(#|//) {args.prefix}: (.*)")
# FDATA records:
# <is symbol?> <closest elf symbol or DSO name> <relative FROM address>
# <is symbol?> <closest elf symbol or DSO name> <relative TO address>
# <number of mispredictions> <number of branches>
fdata_pat = re.compile(r"([01].*) (?P<mispred>\d+) (?P<exec>\d+)")
# Pre-aggregated profile:
# {T|R|S|E|B|F|f|r} <start> [<end>] [<ft_end>] <count> [<mispred_count>]
# <loc>: [<id>:]<offset>
preagg_pat = re.compile(r"(?P<type>[TRSBFfr]) (?P<offsets_count>.*)")
# No-LBR profile:
# <is symbol?> <closest elf symbol or DSO name> <relative address> <count>
nolbr_pat = re.compile(r"([01].*) (?P<count>\d+)")
# Replacement symbol: #symname#
replace_pat = re.compile(r"#(?P<symname>[^#]+)#")
# Read input and construct the representation of fdata expressions
# as (src_tuple, dst_tuple, mispred_count, exec_count) tuples, where src and dst
# are represented as (is_sym, anchor, offset) tuples
exprs = []
with open(args.input, "r") as f:
for line in f.readlines():
prefix_match = prefix_pat.match(line)
if not prefix_match:
continue
profile_line = prefix_match.group(2)
fdata_match = fdata_pat.match(profile_line)
preagg_match = preagg_pat.match(profile_line)
nolbr_match = nolbr_pat.match(profile_line)
if fdata_match:
src_dst, mispred, execnt = fdata_match.groups()
# Split by whitespaces not preceded by a backslash (negative lookbehind)
chunks = re.split(r"(?<!\\) +", src_dst)
# Check if the number of records separated by non-escaped whitespace
# exactly matches the format.
assert (
len(chunks) == 6
), f"ERROR: wrong format/whitespaces must be escaped:\n{line}"
exprs.append(("FDATA", (*chunks, mispred, execnt)))
elif nolbr_match:
loc, count = nolbr_match.groups()
# Split by whitespaces not preceded by a backslash (negative lookbehind)
chunks = re.split(r"(?<!\\) +", loc)
# Check if the number of records separated by non-escaped whitespace
# exactly matches the format.
assert (
len(chunks) == 3
), f"ERROR: wrong format/whitespaces must be escaped:\n{line}"
exprs.append(("NOLBR", (*chunks, count)))
elif preagg_match:
exprs.append(("PREAGG", preagg_match.groups()))
else:
exit("ERROR: unexpected input:\n%s" % line)
# Read nm output: <symbol value> <symbol type> <symbol name>
# Ignore .exe on Windows host.
is_llvm_nm = os.path.basename(os.path.realpath(shutil.which(args.nmtool))).startswith(
"llvm-nm"
)
nm_output = subprocess.run(
[
args.nmtool,
"--defined-only",
"--special-syms" if is_llvm_nm else "--synthetic",
args.objfile,
],
text=True,
capture_output=True,
).stdout
# Populate symbol map
symbols = {}
for symline in nm_output.splitlines():
symval, _, symname = symline.split(maxsplit=2)
if symname in symbols and args.no_redefine:
continue
symbols[symname] = symval
def evaluate_symbol(issym, anchor, offsym):
sym_match = replace_pat.match(offsym)
if not sym_match:
# No need to evaluate symbol value, return as is
return f"{issym} {anchor} {offsym}"
symname = sym_match.group("symname")
assert symname in symbols, f"ERROR: symbol {symname} is not defined in binary"
# Evaluate to an absolute offset if issym is false
if issym == "0":
return f"{issym} {anchor} {symbols[symname]}"
# Evaluate symbol against its anchor if issym is true
assert anchor in symbols, f"ERROR: symbol {anchor} is not defined in binary"
anchor_value = int(symbols[anchor], 16)
symbol_value = int(symbols[symname], 16)
sym_offset = symbol_value - anchor_value
return f'{issym} {anchor} {format(sym_offset, "x")}'
def replace_symbol(matchobj):
"""
Expects matchobj to only capture one group which contains the symbol name.
"""
symname = matchobj.group("symname")
assert symname in symbols, f"ERROR: symbol {symname} is not defined in binary"
return symbols[symname]
with open(args.output, "w", newline="\n") as f:
if args.no_lbr:
print("no_lbr", file=f)
for etype, expr in exprs:
if etype == "FDATA":
issym1, anchor1, offsym1, issym2, anchor2, offsym2, execnt, mispred = expr
print(
evaluate_symbol(issym1, anchor1, offsym1),
evaluate_symbol(issym2, anchor2, offsym2),
execnt,
mispred,
file=f,
)
elif etype == "NOLBR":
issym, anchor, offsym, count = expr
print(evaluate_symbol(issym, anchor, offsym), count, file=f)
elif etype == "PREAGG":
# Replace all symbols enclosed in ##
print(expr[0], re.sub(replace_pat, replace_symbol, expr[1]), file=f)
else:
exit("ERROR: unhandled expression type:\n%s" % etype)
|
python
|
github
|
https://github.com/llvm/llvm-project
|
bolt/test/link_fdata.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
import re
import os
import shlex
import yaml
import copy
import optparse
import operator
from ansible import errors
from ansible import __version__
from ansible.utils.plugins import *
from ansible.utils import template
from ansible.callbacks import display
import ansible.constants as C
import time
import StringIO
import stat
import termios
import tty
import pipes
import random
import difflib
import warnings
import traceback
import getpass
import sys
import textwrap
VERBOSITY=0
# list of all deprecation messages to prevent duplicate display
deprecations = {}
warns = {}
MAX_FILE_SIZE_FOR_DIFF=1*1024*1024
try:
import json
except ImportError:
import simplejson as json
try:
from hashlib import md5 as _md5
except ImportError:
from md5 import md5 as _md5
PASSLIB_AVAILABLE = False
try:
import passlib.hash
PASSLIB_AVAILABLE = True
except:
pass
KEYCZAR_AVAILABLE=False
try:
import keyczar.errors as key_errors
from keyczar.keys import AesKey
KEYCZAR_AVAILABLE=True
except ImportError:
pass
###############################################################
# Abstractions around keyczar
###############################################################
def key_for_hostname(hostname):
# fireball mode is an implementation of ansible firing up zeromq via SSH
# to use no persistent daemons or key management
if not KEYCZAR_AVAILABLE:
raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
key_path = os.path.expanduser("~/.fireball.keys")
if not os.path.exists(key_path):
os.makedirs(key_path)
key_path = os.path.expanduser("~/.fireball.keys/%s" % hostname)
# use new AES keys every 2 hours, which means fireball must not allow running for longer either
if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
key = AesKey.Generate()
fh = open(key_path, "w")
fh.write(str(key))
fh.close()
return key
else:
fh = open(key_path)
key = AesKey.Read(fh.read())
fh.close()
return key
def encrypt(key, msg):
return key.Encrypt(msg)
def decrypt(key, msg):
try:
return key.Decrypt(msg)
except key_errors.InvalidSignatureError:
raise errors.AnsibleError("decryption failed")
###############################################################
# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
###############################################################
def err(msg):
''' print an error message to stderr '''
print >> sys.stderr, msg
def exit(msg, rc=1):
''' quit with an error to stdout and a failure code '''
err(msg)
sys.exit(rc)
def jsonify(result, format=False):
''' format JSON output (uncompressed or uncompressed) '''
if result is None:
return "{}"
result2 = result.copy()
for key, value in result2.items():
if type(value) is str:
result2[key] = value.decode('utf-8', 'ignore')
if format:
return json.dumps(result2, sort_keys=True, indent=4)
else:
return json.dumps(result2, sort_keys=True)
def write_tree_file(tree, hostname, buf):
''' write something into treedir/hostname '''
# TODO: might be nice to append playbook runs per host in a similar way
# in which case, we'd want append mode.
path = os.path.join(tree, hostname)
fd = open(path, "w+")
fd.write(buf)
fd.close()
def is_failed(result):
''' is a given JSON result a failed result? '''
return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true']))
def is_changed(result):
''' is a given JSON result a changed result? '''
return (result.get('changed', False) in [ True, 'True', 'true'])
def check_conditional(conditional, basedir, inject, fail_on_undefined=False):
if conditional is None or conditional == '':
return True
if isinstance(conditional, list):
for x in conditional:
if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined):
return False
return True
if not isinstance(conditional, basestring):
return conditional
conditional = conditional.replace("jinja2_compare ","")
# allow variable names
if conditional in inject and str(inject[conditional]).find('-') == -1:
conditional = inject[conditional]
conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined)
original = str(conditional).replace("jinja2_compare ","")
# a Jinja2 evaluation that results in something Python can eval!
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
conditional = template.template(basedir, presented, inject)
val = conditional.strip()
if val == presented:
# the templating failed, meaning most likely a
# variable was undefined. If we happened to be
# looking for an undefined variable, return True,
# otherwise fail
if conditional.find("is undefined") != -1:
return True
elif conditional.find("is defined") != -1:
return False
else:
raise errors.AnsibleError("error while evaluating conditional: %s" % original)
elif val == "True":
return True
elif val == "False":
return False
else:
raise errors.AnsibleError("unable to evaluate conditional: %s" % original)
def is_executable(path):
'''is the given path executable?'''
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
def unfrackpath(path):
'''
returns a path that is free of symlinks, environment
variables, relative path traversals and symbols (~)
example:
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
def prepare_writeable_dir(tree,mode=0777):
''' make sure a directory exists and is writeable '''
# modify the mode to ensure the owner at least
# has read/write access to this directory
mode |= 0700
# make sure the tree path is always expanded
# and normalized and free of symlinks
tree = unfrackpath(tree)
if not os.path.exists(tree):
try:
os.makedirs(tree, mode)
except (IOError, OSError), e:
raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e))
if not os.access(tree, os.W_OK):
raise errors.AnsibleError("Cannot write to path %s" % tree)
return tree
def path_dwim(basedir, given):
'''
make relative paths work like folks expect.
'''
if given.startswith("/"):
return os.path.abspath(given)
elif given.startswith("~"):
return os.path.abspath(os.path.expanduser(given))
else:
return os.path.abspath(os.path.join(basedir, given))
def path_dwim_relative(original, dirname, source, playbook_base, check=True):
''' find one file in a directory one level up in a dir named dirname relative to current '''
# (used by roles code)
basedir = os.path.dirname(original)
if os.path.islink(basedir):
basedir = unfrackpath(basedir)
template2 = os.path.join(basedir, dirname, source)
else:
template2 = os.path.join(basedir, '..', dirname, source)
source2 = path_dwim(basedir, template2)
if os.path.exists(source2):
return source2
obvious_local_path = path_dwim(playbook_base, source)
if os.path.exists(obvious_local_path):
return obvious_local_path
if check:
raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path))
return source2 # which does not exist
def json_loads(data):
''' parse a JSON string and return a data structure '''
return json.loads(data)
def parse_json(raw_data):
''' this version for module return data only '''
orig_data = raw_data
# ignore stuff like tcgetattr spewage or other warnings
data = filter_leading_non_json_lines(raw_data)
try:
return json.loads(data)
except:
# not JSON, but try "Baby JSON" which allows many of our modules to not
# require JSON and makes writing modules in bash much simpler
results = {}
try:
tokens = shlex.split(data)
except:
print "failed to parse json: "+ data
raise
for t in tokens:
if t.find("=") == -1:
raise errors.AnsibleError("failed to parse: %s" % orig_data)
(key,value) = t.split("=", 1)
if key == 'changed' or 'failed':
if value.lower() in [ 'true', '1' ]:
value = True
elif value.lower() in [ 'false', '0' ]:
value = False
if key == 'rc':
value = int(value)
results[key] = value
if len(results.keys()) == 0:
return { "failed" : True, "parsed" : False, "msg" : orig_data }
return results
def smush_braces(data):
''' smush Jinaj2 braces so unresolved templates like {{ foo }} don't get parsed weird by key=value code '''
while data.find('{{ ') != -1:
data = data.replace('{{ ', '{{')
while data.find(' }}') != -1:
data = data.replace(' }}', '}}')
return data
def smush_ds(data):
# things like key={{ foo }} are not handled by shlex.split well, so preprocess any YAML we load
# so we do not have to call smush elsewhere
if type(data) == list:
return [ smush_ds(x) for x in data ]
elif type(data) == dict:
for (k,v) in data.items():
data[k] = smush_ds(v)
return data
elif isinstance(data, basestring):
return smush_braces(data)
else:
return data
def parse_yaml(data):
''' convert a yaml string to a data structure '''
return smush_ds(yaml.safe_load(data))
def process_common_errors(msg, probline, column):
replaced = probline.replace(" ","")
if replaced.find(":{{") != -1 and replaced.find("}}") != -1:
msg = msg + """
This one looks easy to fix. YAML thought it was looking for the start of a
hash/dictionary and was confused to see a second "{". Most likely this was
meant to be an ansible template evaluation instead, so we have to give the
parser a small hint that we wanted a string instead. The solution here is to
just quote the entire value.
For instance, if the original line was:
app_path: {{ base_path }}/foo
It should be written as:
app_path: "{{ base_path }}/foo"
"""
return msg
elif len(probline) and len(probline) >= column and probline[column] == ":" and probline.count(':') > 1:
msg = msg + """
This one looks easy to fix. There seems to be an extra unquoted colon in the line
and this is confusing the parser. It was only expecting to find one free
colon. The solution is just add some quotes around the colon, or quote the
entire line after the first colon.
For instance, if the original line was:
copy: src=file.txt dest=/path/filename:with_colon.txt
It can be written as:
copy: src=file.txt dest='/path/filename:with_colon.txt'
Or:
copy: 'src=file.txt dest=/path/filename:with_colon.txt'
"""
return msg
else:
parts = probline.split(":")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count("'") > 2:
unbalanced = True
if match:
msg = msg + """
This one looks easy to fix. It seems that there is a value started
with a quote, and the YAML parser is expecting to see the line ended
with the same kind of quote. For instance:
when: "ok" in result.stdout
Could be written as:
when: '"ok" in result.stdout'
or equivalently:
when: "'ok' in result.stdout"
"""
return msg
if unbalanced:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
unbalanced quotes. If starting a value with a quote, make sure the
line ends with the same set of quotes. For instance this arbitrary
example:
foo: "bad" "wolf"
Could be written as:
foo: '"bad" "wolf"'
"""
return msg
return msg
def process_yaml_error(exc, data, path=None):
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark
if mark.line -1 >= 0:
before_probline = data.split("\n")[mark.line-1]
else:
before_probline = ''
probline = data.split("\n")[mark.line]
arrow = " " * mark.column + "^"
msg = """Syntax Error while loading YAML script, %s
Note: The error may actually appear before this position: line %s, column %s
%s
%s
%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow)
unquoted_var = None
if '{{' in probline and '}}' in probline:
if '"{{' not in probline or "'{{" not in probline:
unquoted_var = True
msg = process_common_errors(msg, probline, mark.column)
if not unquoted_var:
msg = process_common_errors(msg, probline, mark.column)
else:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
missing quotes. Always quote template expression brackets when they
start a value. For instance:
with_items:
- {{ foo }}
Should be written as:
with_items:
- "{{ foo }}"
"""
msg = process_common_errors(msg, probline, mark.column)
else:
# No problem markers means we have to throw a generic
# "stuff messed up" type message. Sry bud.
if path:
msg = "Could not parse YAML. Check over %s again." % path
else:
msg = "Could not parse YAML."
raise errors.AnsibleYAMLValidationFailed(msg)
def parse_yaml_from_file(path):
''' convert a yaml file to a data structure '''
try:
data = file(path).read()
return parse_yaml(data)
except IOError:
raise errors.AnsibleError("file not found: %s" % path)
except yaml.YAMLError, exc:
process_yaml_error(exc, data, path)
def parse_kv(args):
''' convert a string of key/value items to a dict '''
options = {}
if args is not None:
# attempting to split a unicode here does bad things
args = args.encode('utf-8')
vargs = [x.decode('utf-8') for x in shlex.split(args, posix=True)]
#vargs = shlex.split(str(args), posix=True)
for x in vargs:
if x.find("=") != -1:
k, v = x.split("=",1)
options[k]=v
return options
def merge_hash(a, b):
''' recursively merges hash b into a
keys from b take precedence over keys from a '''
result = copy.deepcopy(a)
# next, iterate over b keys and values
for k, v in b.iteritems():
# if there's already such key in a
# and that key contains dict
if k in result and isinstance(result[k], dict):
# merge those dicts recursively
result[k] = merge_hash(a[k], v)
else:
# otherwise, just copy a value from b to a
result[k] = v
return result
def md5s(data):
''' Return MD5 hex digest of data. '''
digest = _md5()
try:
digest.update(data)
except UnicodeEncodeError:
digest.update(data.encode('utf-8'))
return digest.hexdigest()
def md5(filename):
''' Return MD5 hex digest of local file, or None if file is not present. '''
if not os.path.exists(filename):
return None
digest = _md5()
blocksize = 64 * 1024
infile = open(filename, 'rb')
block = infile.read(blocksize)
while block:
digest.update(block)
block = infile.read(blocksize)
infile.close()
return digest.hexdigest()
def default(value, function):
''' syntactic sugar around lazy evaluation of defaults '''
if value is None:
return function()
return value
def _gitinfo():
''' returns a string containing git branch, commit id and commit date '''
result = None
repo_path = os.path.join(os.path.dirname(__file__), '..', '..', '..', '.git')
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a posibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path.split('.git')[0], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
branch = f.readline().split('/')[-1].rstrip("\n")
f.close()
branch_path = os.path.join(repo_path, "refs", "heads", branch)
if os.path.exists(branch_path):
f = open(branch_path)
commit = f.readline()[:10]
f.close()
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36)
else:
result = ''
return result
def version(prog):
result = "{0} {1}".format(prog, __version__)
gitinfo = _gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
return result
def getch():
''' read in a single character '''
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
####################################################################
# option handling code for /usr/bin/ansible and ansible-playbook
# below this line
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
def format_help(self, formatter=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
def increment_debug(option, opt, value, parser):
global VERBOSITY
VERBOSITY += 1
def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
''' create an options parser for any ansible script '''
parser = SortedOptParser(usage, version=version("%prog"))
parser.add_option('-v','--verbose', default=False, action="callback",
callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
parser.add_option('-i', '--inventory-file', dest='inventory',
help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
default=constants.DEFAULT_HOST_LIST)
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
help='ask for SSH password')
parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password')
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-M', '--module-path', dest='module_path',
help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
default=None)
if subset_opts:
parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
dest='timeout',
help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if runas_opts:
parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true",
dest='sudo', help="run operations with sudo (nopasswd)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', help='desired sudo user (default=root)',
default=None) # Can't default to root because we need to detect when this option was given
parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER,
dest='remote_user',
help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
if connect_opts:
parser.add_option('-c', '--connection', dest='connection',
default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
if async_opts:
parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur"
)
if diff_opts:
parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check"
)
return parser
def ask_passwords(ask_pass=False, ask_sudo_pass=False):
sshpass = None
sudopass = None
sudo_prompt = "sudo password: "
if ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
sudo_prompt = "sudo password [defaults to SSH password]: "
if ask_sudo_pass:
sudopass = getpass.getpass(prompt=sudo_prompt)
if ask_pass and sudopass == '':
sudopass = sshpass
return (sshpass, sudopass)
def do_encrypt(result, encrypt, salt_size=None, salt=None):
if PASSLIB_AVAILABLE:
try:
crypt = getattr(passlib.hash, encrypt)
except:
raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt)
if salt_size:
result = crypt.encrypt(result, salt_size=salt_size)
elif salt:
result = crypt.encrypt(result, salt=salt)
else:
result = crypt.encrypt(result)
else:
raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values")
return result
def last_non_blank_line(buf):
all_lines = buf.splitlines()
all_lines.reverse()
for line in all_lines:
if (len(line) > 0):
return line
# shouldn't occur unless there's no output
return ""
def filter_leading_non_json_lines(buf):
'''
used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
'''
filtered_lines = StringIO.StringIO()
stop_filtering = False
for line in buf.splitlines():
if stop_filtering or "=" in line or line.startswith('{') or line.startswith('['):
stop_filtering = True
filtered_lines.write(line + '\n')
return filtered_lines.getvalue()
def boolean(value):
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def make_sudo_cmd(sudo_user, executable, cmd):
"""
helper function for connection plugins to create sudo commands
"""
# Rather than detect if sudo wants a password this time, -k makes
# sudo always ask for a password if one is required.
# Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote()
# and pass the quoted string to the user's shell. We loop reading
# output until we see the randomly-generated sudo prompt set with
# the -p option.
randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
prompt = '[sudo via ansible, key=%s] password: ' % randbits
success_key = 'SUDO-SUCCESS-%s' % randbits
sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % (
C.DEFAULT_SUDO_EXE, C.DEFAULT_SUDO_EXE, C.DEFAULT_SUDO_FLAGS,
prompt, sudo_user, executable or '$SHELL', pipes.quote('echo %s; %s' % (success_key, cmd)))
return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt, success_key)
_TO_UNICODE_TYPES = (unicode, type(None))
def to_unicode(value):
if isinstance(value, _TO_UNICODE_TYPES):
return value
return value.decode("utf-8")
def get_diff(diff):
# called by --diff usage in playbook and runner via callbacks
# include names in diffs 'before' and 'after' and do diff -U 10
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
ret = []
if 'dst_binary' in diff:
ret.append("diff skipped: destination file appears to be binary\n")
if 'src_binary' in diff:
ret.append("diff skipped: source file appears to be binary\n")
if 'dst_larger' in diff:
ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
if 'src_larger' in diff:
ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
if 'before_header' in diff:
before_header = "before: %s" % diff['before_header']
else:
before_header = 'before'
if 'after_header' in diff:
after_header = "after: %s" % diff['after_header']
else:
after_header = 'after'
differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
for line in list(differ):
ret.append(line)
return u"".join(ret)
except UnicodeDecodeError:
return ">> the files are different, but the diff library cannot compare unicode strings"
def is_list_of_strings(items):
for x in items:
if not isinstance(x, basestring):
return False
return True
def safe_eval(str, locals=None, include_exceptions=False):
'''
this is intended for allowing things like:
with_items: a_list_variable
where Jinja2 would return a string
but we do not want to allow it to call functions (outside of Jinja2, where
the env is constrained)
'''
# FIXME: is there a more native way to do this?
def is_set(var):
return not var.startswith("$") and not '{{' in var
def is_unset(var):
return var.startswith("$") or '{{' in var
# do not allow method calls to modules
if not isinstance(str, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (str, None)
return str
if re.search(r'\w\.\w+\(', str):
if include_exceptions:
return (str, None)
return str
# do not allow imports
if re.search(r'import \w+', str):
if include_exceptions:
return (str, None)
return str
try:
result = None
if not locals:
result = eval(str)
else:
result = eval(str, None, locals)
if include_exceptions:
return (result, None)
else:
return result
except Exception, e:
if include_exceptions:
return (str, e)
return str
def listify_lookup_plugin_terms(terms, basedir, inject):
if isinstance(terms, basestring):
# someone did:
# with_items: alist
# OR
# with_items: {{ alist }}
stripped = terms.strip()
if not (stripped.startswith('{') or stripped.startswith('[')) and not stripped.startswith("/"):
# if not already a list, get ready to evaluate with Jinja2
# not sure why the "/" is in above code :)
try:
new_terms = template.template(basedir, "{{ %s }}" % terms, inject)
if isinstance(new_terms, basestring) and new_terms.find("{{") != -1:
pass
else:
terms = new_terms
except:
pass
if '{' in terms or '[' in terms:
# Jinja2 already evaluated a variable to a list.
# Jinja2-ified list needs to be converted back to a real type
# TODO: something a bit less heavy than eval
return safe_eval(terms)
if isinstance(terms, basestring):
terms = [ terms ]
return terms
def deprecated(msg, version, removed=False):
''' used to print out a deprecation message.'''
if not removed and not C.DEPRECATION_WARNINGS:
return
if not removed:
if version:
new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in version %s." % (msg, version)
else:
new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in a future release." % (msg)
new_msg = new_msg + " Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.\n\n"
else:
raise errors.AnsibleError("[DEPRECATED]: %s. Please update your playbooks." % msg)
wrapped = textwrap.wrap(new_msg, 79)
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in deprecations:
display(new_msg, color='purple', stderr=True)
deprecations[new_msg] = 1
def warning(msg):
new_msg = "\n[WARNING]: %s" % msg
wrapped = textwrap.wrap(new_msg, 79)
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in warns:
display(new_msg, color='bright purple', stderr=True)
warns[new_msg] = 1
def combine_vars(a, b):
if C.DEFAULT_HASH_BEHAVIOUR == "merge":
return merge_hash(a, b)
else:
return dict(a.items() + b.items())
def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS):
'''Return a random password string of length containing only chars.'''
password = []
while len(password) < length:
new_char = os.urandom(1)
if new_char in chars:
password.append(new_char)
return ''.join(password)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from south.db import db
from django.db import models
from nepal.web.models import *
class Migration:
def forwards(self, orm):
# Adding field 'PHPIni.vhost'
db.add_column('php_ini', 'vhost', orm['web.phpini:vhost'])
def backwards(self, orm):
# Deleting field 'PHPIni.vhost'
db.delete_column('php_ini', 'vhost_id')
models = {
'account.userprofile': {
'Meta': {'db_table': "'user_profile'"},
'account_holder': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'account_no': ('django.db.models.fields.IntegerField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'bank_number': ('django.db.models.fields.IntegerField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'canbeactivated': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['countries.Country']"}),
'customer_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'iban': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'jid': ('django.db.models.fields.EmailField', [], {'max_length': '128', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['account.UserProfile']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'swift_bic': ('django.db.models.fields.CharField', [], {'max_length': '11', 'null': 'True', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'vatin': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'countries.country': {
'Meta': {'db_table': "'country'"},
'iso': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'numcode': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'domain.domain': {
'Meta': {'unique_together': "(('name', 'parent'),)", 'db_table': "'domain'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ip.IP']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['domain.Domain']"}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['account.UserProfile']"})
},
'ip.ip': {
'Meta': {'db_table': "'ip'"},
'address': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'web.phpini': {
'Meta': {'db_table': "'php_ini'"},
'allow_url_fopen': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'display_errors': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['domain.Domain']", 'unique': 'True'}),
'error_reporting': ('django.db.models.fields.CharField', [], {'default': "'E_ALL & ~E_NOTICE'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log_errors': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'max_execution_time': ('django.db.models.fields.PositiveIntegerField', [], {'default': '30'}),
'max_input_time': ('django.db.models.fields.PositiveIntegerField', [], {'default': '60'}),
'memory_limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '128'}),
'post_max_size': ('django.db.models.fields.PositiveIntegerField', [], {'default': '8'}),
'register_globals': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'register_long_arrays': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'session_only_cookies': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'vhost': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['web.VHost']", 'unique': 'True', 'null': 'True'})
},
'web.vhost': {
'Meta': {'db_table': "'vhost'"},
'domain': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['domain.Domain']", 'unique': 'True'}),
'forward_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'web.vhostalias': {
'Meta': {'db_table': "'vhost_alias'"},
'domain': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['domain.Domain']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'vhost': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web.VHost']"})
}
}
complete_apps = ['web']
|
unknown
|
codeparrot/codeparrot-clean
| ||
import pytest
from cfme import login, test_requirements
from cfme.base.login import BaseLoggedInPage
from cfme.base.credential import Credential
from cfme.configure.access_control import User
from utils import conf, error
from utils.appliance.implementations.ui import navigate_to
pytestmark = pytest.mark.usefixtures('browser')
@test_requirements.drift
@pytest.mark.tier(1)
@pytest.mark.sauce
@pytest.mark.smoke
@pytest.mark.parametrize("method", login.LOGIN_METHODS)
def test_login(method, appliance):
""" Tests that the appliance can be logged into and shows dashboard page. """
login_page = navigate_to(appliance.server, 'LoginScreen')
assert login_page.is_displayed
login_page.login_admin(method=method)
logged_in_page = appliance.browser.create_view(BaseLoggedInPage)
assert logged_in_page.is_displayed
logged_in_page.logout()
login_page.flush_widget_cache()
assert login_page.is_displayed
@test_requirements.drift
@pytest.mark.tier(1)
@pytest.mark.sauce
@pytest.mark.smoke
def test_re_login(appliance):
"""
Tests that the appliance can be logged into and shows dashboard page after re-login to it.
"""
login_page = navigate_to(appliance.server, 'LoginScreen')
assert login_page.is_displayed
login_page.login_admin()
logged_in_page = appliance.browser.create_view(BaseLoggedInPage)
assert logged_in_page.is_displayed
logged_in_page.logout()
assert login_page.is_displayed
# Added re-login
login_page.login_admin()
logged_in_page = appliance.browser.create_view(BaseLoggedInPage)
assert logged_in_page.is_displayed
logged_in_page.logout()
login_page.flush_widget_cache()
assert login_page.is_displayed
@test_requirements.drift
@pytest.mark.tier(2)
@pytest.mark.sauce
def test_bad_password(request, appliance):
""" Tests logging in with a bad password. """
request.addfinalizer(lambda: navigate_to(appliance.server, 'LoginScreen'))
login_page = navigate_to(appliance.server, 'LoginScreen')
username = conf.credentials['default']['username']
password = "badpassword@#$"
cred = Credential(principal=username, secret=password)
user = User(credential=cred)
user.name = 'Administrator'
with error.expected("Sorry, the username or password you entered is incorrect."):
login_page.log_in(user)
assert login_page.is_displayed
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import unicode_literals
import sys
import logging
import datetime
from modularodm import Q
from modularodm.storage.base import KeyExistsException
from scripts import utils as script_utils
from framework.transactions.context import TokuTransaction
from website.files import models
from website.app import init_app
from website.addons.osfstorage import model as osfstorage_model
NOW = datetime.datetime.utcnow()
logger = logging.getLogger(__name__)
def paginated(model, query=None, increment=200):
last_id = ''
pages = (model.find(query).count() / increment) + 1
for i in xrange(pages):
q = Q('_id', 'gt', last_id)
if query:
q &= query
page = list(model.find(q).limit(increment))
for item in page:
yield item
if page:
last_id = item._id
def do_migration():
logger.info('Migration: OsfStorageFileNode -> FileNode')
migrate_filenodes()
logger.info('Migration: OsfStorageTrashedFileNode -> TrashedFileNode')
migrate_trashedfilenodes()
logger.info('Checking that all Files have been migrated...')
diff = osfstorage_model.OsfStorageFileNode.find().count() - models.FileNode.find().count()
if diff > 0:
logger.error('Missing {} FileNodes; canceling transaction')
raise Exception('{} unmigrated FileNodes'.format(diff))
logger.info('Checking that all File versions have been migrated...')
diff = osfstorage_model.OsfStorageFileVersion.find().count() - models.FileVersion.find().count()
if diff != 0:
logger.error('{} OsfStorageFileVersions did not get migrated'.format(diff))
logger.error('This is most likely because they are orphaned')
logger.error('This is not a show stopper; The migration was still successful')
else:
logger.info('Migration successful')
def migrate_trashedfilenodes():
for trashed in osfstorage_model.OsfStorageTrashedFileNode.find():
logger.debug('Migrating OsfStorageTrashedFileNode {}'.format(trashed._id))
if trashed.node_settings is None:
logger.warning('OsfStorageTrashedFileNode {} has no node_settings; skipping'.format(trashed._id))
continue
parent_id = trashed.to_storage()['parent']
parent = osfstorage_model.OsfStorageTrashedFileNode.load(parent_id) or osfstorage_model.OsfStorageFileNode.load(parent_id)
if parent:
if isinstance(parent, osfstorage_model.OsfStorageFileNode):
parent = (parent._id, 'storedfilenode')
else:
parent = (parent._id, 'trashedfilenode')
models.TrashedFileNode(
_id=trashed._id,
versions=translate_versions(trashed.versions),
node=trashed.node_settings.owner,
parent=parent,
is_file=trashed.kind == 'file',
provider='osfstorage',
name=trashed.name,
path='/' + trashed._id + ('' if trashed.kind == 'file' else '/'),
materialized_path=''
).save()
def migrate_filenodes():
for node_settings in paginated(osfstorage_model.OsfStorageNodeSettings):
if node_settings.owner is None:
logger.warning('OsfStorageNodeSettings {} has no parent; skipping'.format(node_settings._id))
continue
logger.info('Migrating files for {!r}'.format(node_settings.owner))
listing = []
for filenode in osfstorage_model.OsfStorageFileNode.find(Q('node_settings', 'eq', node_settings._id)):
logger.debug('Migrating OsfStorageFileNode {}'.format(filenode._id))
versions = translate_versions(filenode.versions)
if filenode.is_file and not filenode.node.is_deleted:
if not filenode.versions:
logger.warning('File {!r} has no versions'.format(filenode))
elif not versions:
logger.warning('{!r} is a file with no translatable versions'.format(filenode))
new_node = models.StoredFileNode(
_id=filenode._id,
versions=versions,
node=node_settings.owner,
parent=None if not filenode.parent else filenode.parent._id,
is_file=filenode.kind == 'file',
provider='osfstorage',
name=filenode.name,
last_touched=NOW
)
# Wrapped's save will populate path and materialized_path
new_node.wrapped().save()
listing.append(new_node)
assert node_settings.get_root()
for x in listing:
# Make sure everything transfered properly
if x.to_storage()['parent']:
assert x.parent, '{!r}\'s parent {} does not exist'.format(x.wrapped(), x.to_storage()['parent'])
def translate_versions(versions):
translated = []
for index, version in enumerate(versions):
if version is None:
raise Exception('Version {} missing from database'.format(version))
if not version.metadata or not version.location:
logger.error('Version {} missing metadata or location'.format(version))
continue
translated.append(translate_version(version, index))
return translated
def translate_version(version, index):
version = models.FileVersion(
_id=version._id,
creator=version.creator,
identifier=index + 1,
date_created=version.date_created,
location=version.location,
metadata=version.metadata,
size=version.size,
content_type=version.content_type,
date_modified=version.date_modified,
)
try:
version.save()
except KeyExistsException:
version = models.FileVersion.load(version._id)
return version
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
with TokuTransaction():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
if 'debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif 'warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif 'info' in sys.argv:
logger.setLevel(logging.INFO)
elif 'error' in sys.argv:
logger.setLevel(logging.ERROR)
main(dry=dry)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutronclient.common import exceptions as n_exc
from neutronclient.neutron import v2_0 as neutronv20
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import uuidutils
import six
from webob import exc
from nova.compute import api as compute_api
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.network.neutronv2 import api as neutronapi
from nova.network.security_group import security_group_base
from nova import objects
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# NOTE: Neutron client has a max URL length of 8192, so we have
# to limit the number of IDs we include in any single search. Really
# doesn't seem to be any point in making this a config value.
MAX_SEARCH_IDS = 150
class SecurityGroupAPI(security_group_base.SecurityGroupBase):
id_is_uuid = True
def create_security_group(self, context, name, description):
neutron = neutronapi.get_client(context)
body = self._make_neutron_security_group_dict(name, description)
try:
security_group = neutron.create_security_group(
body).get('security_group')
except n_exc.BadRequest as e:
raise exception.Invalid(six.text_type(e))
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
LOG.exception(_LE("Neutron Error creating security group %s"),
name)
if e.status_code == 401:
# TODO(arosen) Cannot raise generic response from neutron here
# as this error code could be related to bad input or over
# quota
raise exc.HTTPBadRequest()
elif e.status_code == 409:
self.raise_over_quota(six.text_type(e))
six.reraise(*exc_info)
return self._convert_to_nova_security_group_format(security_group)
def update_security_group(self, context, security_group,
name, description):
neutron = neutronapi.get_client(context)
body = self._make_neutron_security_group_dict(name, description)
try:
security_group = neutron.update_security_group(
security_group['id'], body).get('security_group')
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
LOG.exception(_LE("Neutron Error updating security group %s"),
name)
if e.status_code == 401:
# TODO(arosen) Cannot raise generic response from neutron here
# as this error code could be related to bad input or over
# quota
raise exc.HTTPBadRequest()
six.reraise(*exc_info)
return self._convert_to_nova_security_group_format(security_group)
def _convert_to_nova_security_group_format(self, security_group):
nova_group = {}
nova_group['id'] = security_group['id']
nova_group['description'] = security_group['description']
nova_group['name'] = security_group['name']
nova_group['project_id'] = security_group['tenant_id']
nova_group['rules'] = []
for rule in security_group.get('security_group_rules', []):
if rule['direction'] == 'ingress':
nova_group['rules'].append(
self._convert_to_nova_security_group_rule_format(rule))
return nova_group
def _convert_to_nova_security_group_rule_format(self, rule):
nova_rule = {}
nova_rule['id'] = rule['id']
nova_rule['parent_group_id'] = rule['security_group_id']
nova_rule['protocol'] = rule['protocol']
if (nova_rule['protocol'] and rule.get('port_range_min') is None and
rule.get('port_range_max') is None):
if rule['protocol'].upper() in ['TCP', 'UDP']:
nova_rule['from_port'] = 1
nova_rule['to_port'] = 65535
else:
nova_rule['from_port'] = -1
nova_rule['to_port'] = -1
else:
nova_rule['from_port'] = rule.get('port_range_min')
nova_rule['to_port'] = rule.get('port_range_max')
nova_rule['group_id'] = rule['remote_group_id']
nova_rule['cidr'] = self.parse_cidr(rule.get('remote_ip_prefix'))
return nova_rule
def get(self, context, name=None, id=None, map_exception=False):
neutron = neutronapi.get_client(context)
try:
if not id and name:
# NOTE(flwang): The project id should be honoured so as to get
# the correct security group id when user(with admin role but
# non-admin project) try to query by name, so as to avoid
# getting more than duplicated records with the same name.
id = neutronv20.find_resourceid_by_name_or_id(
neutron, 'security_group', name, context.project_id)
group = neutron.show_security_group(id).get('security_group')
return self._convert_to_nova_security_group_format(group)
except n_exc.NeutronClientNoUniqueMatch as e:
raise exception.NoUniqueMatch(six.text_type(e))
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
LOG.debug("Neutron security group %s not found", name)
raise exception.SecurityGroupNotFound(six.text_type(e))
else:
LOG.error(_LE("Neutron Error: %s"), e)
six.reraise(*exc_info)
except TypeError as e:
LOG.error(_LE("Neutron Error: %s"), e)
msg = _("Invalid security group name: %(name)s.") % {"name": name}
raise exception.SecurityGroupNotFound(six.text_type(msg))
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
"""Returns list of security group rules owned by tenant."""
neutron = neutronapi.get_client(context)
search_opts = {}
if names:
search_opts['name'] = names
if ids:
search_opts['id'] = ids
if project:
search_opts['tenant_id'] = project
try:
security_groups = neutron.list_security_groups(**search_opts).get(
'security_groups')
except n_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Neutron Error getting security groups"))
converted_rules = []
for security_group in security_groups:
converted_rules.append(
self._convert_to_nova_security_group_format(security_group))
return converted_rules
def validate_id(self, id):
if not uuidutils.is_uuid_like(id):
msg = _("Security group id should be uuid")
self.raise_invalid_property(msg)
return id
def destroy(self, context, security_group):
"""This function deletes a security group."""
neutron = neutronapi.get_client(context)
try:
neutron.delete_security_group(security_group['id'])
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
self.raise_not_found(six.text_type(e))
elif e.status_code == 409:
self.raise_invalid_property(six.text_type(e))
else:
LOG.error(_LE("Neutron Error: %s"), e)
six.reraise(*exc_info)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding multiple
security group rules at once but the EC2 one does. Therefore,
this function is written to support both. Multiple rules are
installed to a security group in neutron using bulk support.
"""
neutron = neutronapi.get_client(context)
body = self._make_neutron_security_group_rules_list(vals)
try:
rules = neutron.create_security_group_rule(
body).get('security_group_rules')
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
LOG.exception(_LE("Neutron Error getting security group %s"),
name)
self.raise_not_found(six.text_type(e))
elif e.status_code == 409:
LOG.exception(_LE("Neutron Error adding rules to security "
"group %s"), name)
self.raise_over_quota(six.text_type(e))
elif e.status_code == 400:
LOG.exception(_LE("Neutron Error: %s"), six.text_type(e))
self.raise_invalid_property(six.text_type(e))
else:
LOG.exception(_LE("Neutron Error:"))
six.reraise(*exc_info)
converted_rules = []
for rule in rules:
converted_rules.append(
self._convert_to_nova_security_group_rule_format(rule))
return converted_rules
def _make_neutron_security_group_dict(self, name, description):
return {'security_group': {'name': name,
'description': description}}
def _make_neutron_security_group_rules_list(self, rules):
new_rules = []
for rule in rules:
new_rule = {}
# nova only supports ingress rules so all rules are ingress.
new_rule['direction'] = "ingress"
new_rule['protocol'] = rule.get('protocol')
# FIXME(arosen) Nova does not expose ethertype on security group
# rules. Therefore, in the case of self referential rules we
# should probably assume they want to allow both IPv4 and IPv6.
# Unfortunately, this would require adding two rules in neutron.
# The reason we do not do this is because when the user using the
# nova api wants to remove the rule we'd have to have some way to
# know that we should delete both of these rules in neutron.
# For now, self referential rules only support IPv4.
if not rule.get('cidr'):
new_rule['ethertype'] = 'IPv4'
else:
new_rule['ethertype'] = utils.get_ip_version(rule.get('cidr'))
new_rule['remote_ip_prefix'] = rule.get('cidr')
new_rule['security_group_id'] = rule.get('parent_group_id')
new_rule['remote_group_id'] = rule.get('group_id')
if 'from_port' in rule and rule['from_port'] != -1:
new_rule['port_range_min'] = rule['from_port']
if 'to_port' in rule and rule['to_port'] != -1:
new_rule['port_range_max'] = rule['to_port']
new_rules.append(new_rule)
return {'security_group_rules': new_rules}
def remove_rules(self, context, security_group, rule_ids):
neutron = neutronapi.get_client(context)
rule_ids = set(rule_ids)
try:
# The ec2 api allows one to delete multiple security group rules
# at once. Since there is no bulk delete for neutron the best
# thing we can do is delete the rules one by one and hope this
# works.... :/
for rule_id in range(0, len(rule_ids)):
neutron.delete_security_group_rule(rule_ids.pop())
except n_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Neutron Error unable to delete %s"),
rule_ids)
def get_rule(self, context, id):
neutron = neutronapi.get_client(context)
try:
rule = neutron.show_security_group_rule(
id).get('security_group_rule')
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
LOG.debug("Neutron security group rule %s not found", id)
self.raise_not_found(six.text_type(e))
else:
LOG.error(_LE("Neutron Error: %s"), e)
six.reraise(*exc_info)
return self._convert_to_nova_security_group_rule_format(rule)
def _get_ports_from_server_list(self, servers, neutron):
"""Returns a list of ports used by the servers."""
def _chunk_by_ids(servers, limit):
ids = []
for server in servers:
ids.append(server['id'])
if len(ids) >= limit:
yield ids
ids = []
if ids:
yield ids
# Note: Have to split the query up as the search criteria
# form part of the URL, which has a fixed max size
ports = []
for ids in _chunk_by_ids(servers, MAX_SEARCH_IDS):
search_opts = {'device_id': ids}
ports.extend(neutron.list_ports(**search_opts).get('ports'))
return ports
def _get_secgroups_from_port_list(self, ports, neutron):
"""Returns a dict of security groups keyed by their ids."""
def _chunk_by_ids(sg_ids, limit):
sg_id_list = []
for sg_id in sg_ids:
sg_id_list.append(sg_id)
if len(sg_id_list) >= limit:
yield sg_id_list
sg_id_list = []
if sg_id_list:
yield sg_id_list
# Find the set of unique SecGroup IDs to search for
sg_ids = set()
for port in ports:
sg_ids.update(port.get('security_groups', []))
# Note: Have to split the query up as the search criteria
# form part of the URL, which has a fixed max size
security_groups = {}
for sg_id_list in _chunk_by_ids(sg_ids, MAX_SEARCH_IDS):
sg_search_opts = {'id': sg_id_list}
search_results = neutron.list_security_groups(**sg_search_opts)
for sg in search_results.get('security_groups'):
security_groups[sg['id']] = sg
return security_groups
def get_instances_security_groups_bindings(self, context, servers,
detailed=False):
"""Returns a dict(instance_id, [security_groups]) to allow obtaining
all of the instances and their security groups in one shot.
"""
neutron = neutronapi.get_client(context)
ports = self._get_ports_from_server_list(servers, neutron)
security_groups = self._get_secgroups_from_port_list(ports, neutron)
instances_security_group_bindings = {}
for port in ports:
for port_sg_id in port.get('security_groups', []):
# Note: have to check we found port_sg as its possible
# the port has an SG that this user doesn't have access to
port_sg = security_groups.get(port_sg_id)
if port_sg:
if detailed:
sg_entry = self._convert_to_nova_security_group_format(
port_sg)
instances_security_group_bindings.setdefault(
port['device_id'], []).append(sg_entry)
else:
# name is optional in neutron so if not specified
# return id
name = port_sg.get('name')
if not name:
name = port_sg.get('id')
sg_entry = {'name': name}
instances_security_group_bindings.setdefault(
port['device_id'], []).append(sg_entry)
return instances_security_group_bindings
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
"""Returns the security groups that are associated with an instance.
If detailed is True then it also returns the full details of the
security groups associated with an instance.
"""
servers = [{'id': instance_uuid}]
sg_bindings = self.get_instances_security_groups_bindings(
context, servers, detailed)
return sg_bindings.get(instance_uuid, [])
def _has_security_group_requirements(self, port):
port_security_enabled = port.get('port_security_enabled', True)
has_ip = port.get('fixed_ips')
if has_ip:
return port_security_enabled
return False
@compute_api.wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
neutron = neutronapi.get_client(context)
try:
security_group_id = neutronv20.find_resourceid_by_name_or_id(
neutron, 'security_group',
security_group_name,
context.project_id)
except n_exc.NeutronClientNoUniqueMatch as e:
raise exception.NoUniqueMatch(six.text_type(e))
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
msg = (_("Security group %(name)s is not found for "
"project %(project)s") %
{'name': security_group_name,
'project': context.project_id})
self.raise_not_found(msg)
else:
LOG.exception(_LE("Neutron Error:"))
six.reraise(*exc_info)
params = {'device_id': instance.uuid}
try:
ports = neutron.list_ports(**params).get('ports')
except n_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Neutron Error:"))
if not ports:
msg = (_("instance_id %s could not be found as device id on"
" any ports") % instance.uuid)
self.raise_not_found(msg)
for port in ports:
if not self._has_security_group_requirements(port):
LOG.warning(_LW("Cannot add security group %(name)s to "
"%(instance)s since the port %(port_id)s "
"does not meet security requirements"),
{'name': security_group_name,
'instance': instance.uuid,
'port_id': port['id']})
raise exception.SecurityGroupCannotBeApplied()
if 'security_groups' not in port:
port['security_groups'] = []
port['security_groups'].append(security_group_id)
updated_port = {'security_groups': port['security_groups']}
try:
LOG.info(_LI("Adding security group %(security_group_id)s to "
"port %(port_id)s"),
{'security_group_id': security_group_id,
'port_id': port['id']})
neutron.update_port(port['id'], {'port': updated_port})
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Neutron Error:"))
@compute_api.wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
neutron = neutronapi.get_client(context)
try:
security_group_id = neutronv20.find_resourceid_by_name_or_id(
neutron, 'security_group',
security_group_name,
context.project_id)
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
msg = (_("Security group %(name)s is not found for "
"project %(project)s") %
{'name': security_group_name,
'project': context.project_id})
self.raise_not_found(msg)
else:
LOG.exception(_LE("Neutron Error:"))
six.reraise(*exc_info)
params = {'device_id': instance.uuid}
try:
ports = neutron.list_ports(**params).get('ports')
except n_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Neutron Error:"))
if not ports:
msg = (_("instance_id %s could not be found as device id on"
" any ports") % instance.uuid)
self.raise_not_found(msg)
found_security_group = False
for port in ports:
try:
port.get('security_groups', []).remove(security_group_id)
except ValueError:
# When removing a security group from an instance the security
# group should be on both ports since it was added this way if
# done through the nova api. In case it is not a 404 is only
# raised if the security group is not found on any of the
# ports on the instance.
continue
updated_port = {'security_groups': port['security_groups']}
try:
LOG.info(_LI("Adding security group %(security_group_id)s to "
"port %(port_id)s"),
{'security_group_id': security_group_id,
'port_id': port['id']})
neutron.update_port(port['id'], {'port': updated_port})
found_security_group = True
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Neutron Error:"))
if not found_security_group:
msg = (_("Security group %(security_group_name)s not associated "
"with the instance %(instance)s") %
{'security_group_name': security_group_name,
'instance': instance.uuid})
self.raise_not_found(msg)
def populate_security_groups(self, instance, security_groups):
# Setting to empty list since we do not want to populate this field
# in the nova database if using the neutron driver
instance.security_groups = objects.SecurityGroupList()
def get_default_rule(self, context, id):
msg = _("Network driver does not support this function.")
raise exc.HTTPNotImplemented(explanation=msg)
def get_all_default_rules(self, context):
msg = _("Network driver does not support this function.")
raise exc.HTTPNotImplemented(explanation=msg)
def add_default_rules(self, context, vals):
msg = _("Network driver does not support this function.")
raise exc.HTTPNotImplemented(explanation=msg)
def remove_default_rules(self, context, rule_ids):
msg = _("Network driver does not support this function.")
raise exc.HTTPNotImplemented(explanation=msg)
def default_rule_exists(self, context, values):
msg = _("Network driver does not support this function.")
raise exc.HTTPNotImplemented(explanation=msg)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
# Odoo, Open Source Management Solution
# Copyright (C) 2014-2015 Grupo ESOC <www.grupoesoc.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{
"name": "Contact's nationality",
"version": "8.0.1.0.0",
"author": "Odoo Community Association (OCA)",
"category": "Customer Relationship Management",
"website": "https://odoo-community.org/",
"depends": [
"partner_contact_personal_information_page",
],
"data": [
"views/res_partner.xml",
],
"license": "AGPL-3",
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
###############################################################################
#
# GetCustomerProfileIds
# Retrieves all existing customer profile IDs.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetCustomerProfileIds(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetCustomerProfileIds Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetCustomerProfileIds, self).__init__(temboo_session, '/Library/AuthorizeNet/CustomerInformationManager/GetCustomerProfileIds')
def new_input_set(self):
return GetCustomerProfileIdsInputSet()
def _make_result_set(self, result, path):
return GetCustomerProfileIdsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetCustomerProfileIdsChoreographyExecution(session, exec_id, path)
class GetCustomerProfileIdsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetCustomerProfileIds
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APILoginId(self, value):
"""
Set the value of the APILoginId input for this Choreo. ((required, string) The API Login Id provided by Authorize.net when signing up for a developer account.)
"""
super(GetCustomerProfileIdsInputSet, self)._set_input('APILoginId', value)
def set_Endpoint(self, value):
"""
Set the value of the Endpoint input for this Choreo. ((optional, string) Set to api.authorize.net when running in production. Defaults to apitest.authorize.net for sandbox testing.)
"""
super(GetCustomerProfileIdsInputSet, self)._set_input('Endpoint', value)
def set_TransactionKey(self, value):
"""
Set the value of the TransactionKey input for this Choreo. ((required, string) The TransactionKey provided by Authorize.net when signing up for a developer account.)
"""
super(GetCustomerProfileIdsInputSet, self)._set_input('TransactionKey', value)
class GetCustomerProfileIdsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetCustomerProfileIds Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Authorize.net.)
"""
return self._output.get('Response', None)
class GetCustomerProfileIdsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetCustomerProfileIdsResultSet(response, path)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import unittest
from hamcrest import has_entries
from hamcrest.core import *
from lib.csv import tabular_map, dict_map
from lib.service import Service
from test.service import details
class TestCSV(unittest.TestCase):
def test_csv_generation(self):
services = [
Service(details({"Name of service": "test_name", "Abbr": "tn"})),
Service(details({"Name of service": "test_name_2", "Abbr": "tn2"}))
]
table = tabular_map([("name_column", lambda s: s.name),
("abbr", lambda s: s.abbr)],
services)
assert_that(table, is_([["name_column", "abbr"],
["test_name", "tn"],
["test_name_2", "tn2"]]))
def test_dict_generation(self):
services = [
Service(details({"Name of service": "test_name_2", "Abbr": "tn2", "2013-Q3 Vol.": "6,400,000"})),
Service(details({"Name of service": "test_name_3", "Abbr": "tn3", "2012-Q4 Vol.": "6,400,000"}))
]
dicts = dict_map([("name", lambda s: s.name),
("abbr", lambda s: s.abbr)],
services)
assert_that(dicts[0], has_entries({'name': 'test_name_2', 'abbr': 'tn2'}))
assert_that(dicts[1], has_entries({'name': 'test_name_3', 'abbr': 'tn3', 'historic': 'Apr 2011 to Mar 2012'}))
def test_strings_get_utf8_encoded(self):
services = [Service(details({"Name of service": u"\u2019"}))]
table = tabular_map([("column", lambda s: s.name)], services)
assert_that(table, is_([["column"], ["\xe2\x80\x99"]]))
|
unknown
|
codeparrot/codeparrot-clean
| ||
from collections.abc import Iterable
from django.apps import apps
from django.contrib import auth
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.hashers import make_password
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail
from django.db import models
from django.db.models.manager import EmptyManager
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from .validators import UnicodeUsernameValidator
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = timezone.now()
user.save(update_fields=["last_login"])
class PermissionManager(models.Manager):
use_in_migrations = True
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.db_manager(self.db).get_by_natural_key(
app_label, model
),
)
class Permission(models.Model):
"""
The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form
and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
- The "view" permission limits the ability to view an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have a
certain status or publication date."
The permissions listed above are automatically created for each model.
"""
name = models.CharField(_("name"), max_length=255)
content_type = models.ForeignKey(
ContentType,
models.CASCADE,
verbose_name=_("content type"),
)
codename = models.CharField(_("codename"), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _("permission")
verbose_name_plural = _("permissions")
unique_together = [["content_type", "codename"]]
ordering = ["content_type__app_label", "content_type__model", "codename"]
def __str__(self):
return "%s | %s" % (self.content_type, self.name)
def natural_key(self):
return (self.codename, *self.content_type.natural_key())
natural_key.dependencies = ["contenttypes.contenttype"]
class GroupManager(models.Manager):
"""
The manager for the auth's Group model.
"""
use_in_migrations = True
def get_by_natural_key(self, name):
return self.get(name=name)
async def aget_by_natural_key(self, name):
return await self.aget(name=name)
class Group(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group 'Site editors' has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_("name"), max_length=150, unique=True)
permissions = models.ManyToManyField(
Permission,
verbose_name=_("permissions"),
blank=True,
)
objects = GroupManager()
class Meta:
verbose_name = _("group")
verbose_name_plural = _("groups")
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user_object(self, username, email, password, **extra_fields):
if not username:
raise ValueError("The given username must be set")
email = self.normalize_email(email)
# Lookup the real model class from the global app registry so this
# manager method can be used in migrations. This is fine because
# managers are by definition working on the real model.
GlobalUserModel = apps.get_model(
self.model._meta.app_label, self.model._meta.object_name
)
username = GlobalUserModel.normalize_username(username)
user = self.model(username=username, email=email, **extra_fields)
user.password = make_password(password)
return user
def _create_user(self, username, email, password, **extra_fields):
"""
Create and save a user with the given username, email, and password.
"""
user = self._create_user_object(username, email, password, **extra_fields)
user.save(using=self._db)
return user
async def _acreate_user(self, username, email, password, **extra_fields):
"""See _create_user()"""
user = self._create_user_object(username, email, password, **extra_fields)
await user.asave(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
extra_fields.setdefault("is_staff", False)
extra_fields.setdefault("is_superuser", False)
return self._create_user(username, email, password, **extra_fields)
create_user.alters_data = True
async def acreate_user(self, username, email=None, password=None, **extra_fields):
extra_fields.setdefault("is_staff", False)
extra_fields.setdefault("is_superuser", False)
return await self._acreate_user(username, email, password, **extra_fields)
acreate_user.alters_data = True
def create_superuser(self, username, email=None, password=None, **extra_fields):
extra_fields.setdefault("is_staff", True)
extra_fields.setdefault("is_superuser", True)
if extra_fields.get("is_staff") is not True:
raise ValueError("Superuser must have is_staff=True.")
if extra_fields.get("is_superuser") is not True:
raise ValueError("Superuser must have is_superuser=True.")
return self._create_user(username, email, password, **extra_fields)
create_superuser.alters_data = True
async def acreate_superuser(
self, username, email=None, password=None, **extra_fields
):
extra_fields.setdefault("is_staff", True)
extra_fields.setdefault("is_superuser", True)
if extra_fields.get("is_staff") is not True:
raise ValueError("Superuser must have is_staff=True.")
if extra_fields.get("is_superuser") is not True:
raise ValueError("Superuser must have is_superuser=True.")
return await self._acreate_user(username, email, password, **extra_fields)
acreate_superuser.alters_data = True
def with_perm(
self, perm, is_active=True, include_superusers=True, backend=None, obj=None
):
if backend is None:
backends = auth.get_backends()
if len(backends) == 1:
backend = backends[0]
else:
raise ValueError(
"You have multiple authentication backends configured and "
"therefore must provide the `backend` argument."
)
elif not isinstance(backend, str):
raise TypeError(
"backend must be a dotted import path string (got %r)." % backend
)
else:
backend = auth.load_backend(backend)
if hasattr(backend, "with_perm"):
return backend.with_perm(
perm,
is_active=is_active,
include_superusers=include_superusers,
obj=obj,
)
return self.none()
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_permissions(user, obj, from_name):
permissions = set()
name = "get_%s_permissions" % from_name
for backend in auth.get_backends():
if hasattr(backend, name):
permissions.update(getattr(backend, name)(user, obj))
return permissions
async def _auser_get_permissions(user, obj, from_name):
permissions = set()
name = "aget_%s_permissions" % from_name
for backend in auth.get_backends():
if hasattr(backend, name):
permissions.update(await getattr(backend, name)(user, obj))
return permissions
def _user_has_perm(user, perm, obj):
"""
A backend can raise `PermissionDenied` to short-circuit permission checks.
"""
for backend in auth.get_backends():
if not hasattr(backend, "has_perm"):
continue
try:
if backend.has_perm(user, perm, obj):
return True
except PermissionDenied:
return False
return False
async def _auser_has_perm(user, perm, obj):
"""See _user_has_perm()"""
for backend in auth.get_backends():
if not hasattr(backend, "ahas_perm"):
continue
try:
if await backend.ahas_perm(user, perm, obj):
return True
except PermissionDenied:
return False
return False
def _user_has_module_perms(user, app_label):
"""
A backend can raise `PermissionDenied` to short-circuit permission checks.
"""
for backend in auth.get_backends():
if not hasattr(backend, "has_module_perms"):
continue
try:
if backend.has_module_perms(user, app_label):
return True
except PermissionDenied:
return False
return False
async def _auser_has_module_perms(user, app_label):
"""See _user_has_module_perms()"""
for backend in auth.get_backends():
if not hasattr(backend, "ahas_module_perms"):
continue
try:
if await backend.ahas_module_perms(user, app_label):
return True
except PermissionDenied:
return False
return False
class PermissionsMixin(models.Model):
"""
Add the fields and methods necessary to support the Group and Permission
models using the ModelBackend.
"""
is_superuser = models.BooleanField(
_("superuser status"),
default=False,
help_text=_(
"Designates that this user has all permissions without "
"explicitly assigning them."
),
)
groups = models.ManyToManyField(
Group,
verbose_name=_("groups"),
blank=True,
help_text=_(
"The groups this user belongs to. A user will get all permissions "
"granted to each of their groups."
),
related_name="user_set",
related_query_name="user",
)
user_permissions = models.ManyToManyField(
Permission,
verbose_name=_("user permissions"),
blank=True,
help_text=_("Specific permissions for this user."),
related_name="user_set",
related_query_name="user",
)
class Meta:
abstract = True
def get_user_permissions(self, obj=None):
"""
Return a list of permission strings that this user has directly.
Query all available auth backends. If an object is passed in,
return only permissions matching this object.
"""
return _user_get_permissions(self, obj, "user")
async def aget_user_permissions(self, obj=None):
"""See get_user_permissions()"""
return await _auser_get_permissions(self, obj, "user")
def get_group_permissions(self, obj=None):
"""
Return a list of permission strings that this user has through their
groups. Query all available auth backends. If an object is passed in,
return only permissions matching this object.
"""
return _user_get_permissions(self, obj, "group")
async def aget_group_permissions(self, obj=None):
"""See get_group_permissions()"""
return await _auser_get_permissions(self, obj, "group")
def get_all_permissions(self, obj=None):
return _user_get_permissions(self, obj, "all")
async def aget_all_permissions(self, obj=None):
return await _auser_get_permissions(self, obj, "all")
def has_perm(self, perm, obj=None):
"""
Return True if the user has the specified permission. Query all
available auth backends, but return immediately if any backend returns
True. Thus, a user who has permission from a single auth backend is
assumed to have permission in general. If an object is provided, check
permissions for that object.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
async def ahas_perm(self, perm, obj=None):
"""See has_perm()"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return await _auser_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Return True if the user has each of the specified permissions. If
object is passed, check if the user has all required perms for it.
"""
if not isinstance(perm_list, Iterable) or isinstance(perm_list, str):
raise ValueError("perm_list must be an iterable of permissions.")
return all(self.has_perm(perm, obj) for perm in perm_list)
async def ahas_perms(self, perm_list, obj=None):
"""See has_perms()"""
if not isinstance(perm_list, Iterable) or isinstance(perm_list, str):
raise ValueError("perm_list must be an iterable of permissions.")
for perm in perm_list:
if not await self.ahas_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Return True if the user has any permissions in the given app label.
Use similar logic as has_perm(), above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
async def ahas_module_perms(self, app_label):
"""See has_module_perms()"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return await _auser_has_module_perms(self, app_label)
class AbstractUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username and password are required. Other fields are optional.
"""
username_validator = UnicodeUsernameValidator()
username = models.CharField(
_("username"),
max_length=150,
unique=True,
help_text=_(
"Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only."
),
validators=[username_validator],
error_messages={
"unique": _("A user with that username already exists."),
},
)
first_name = models.CharField(_("first name"), max_length=150, blank=True)
last_name = models.CharField(_("last name"), max_length=150, blank=True)
email = models.EmailField(_("email address"), blank=True)
is_staff = models.BooleanField(
_("staff status"),
default=False,
help_text=_("Designates whether the user can log into this admin site."),
)
is_active = models.BooleanField(
_("active"),
default=True,
help_text=_(
"Designates whether this user should be treated as active. "
"Unselect this instead of deleting accounts."
),
)
date_joined = models.DateTimeField(_("date joined"), default=timezone.now)
objects = UserManager()
EMAIL_FIELD = "email"
USERNAME_FIELD = "username"
REQUIRED_FIELDS = ["email"]
class Meta:
verbose_name = _("user")
verbose_name_plural = _("users")
abstract = True
def clean(self):
super().clean()
self.email = self.__class__.objects.normalize_email(self.email)
def get_full_name(self):
"""
Return the first_name plus the last_name, with a space in between.
"""
full_name = "%s %s" % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""Return the short name for the user."""
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""Send an email to this user."""
send_mail(subject, message, from_email, [self.email], **kwargs)
class User(AbstractUser):
"""
Users within the Django authentication system are represented by this
model.
Username and password are required. Other fields are optional.
"""
class Meta(AbstractUser.Meta):
swappable = "AUTH_USER_MODEL"
class AnonymousUser:
id = None
pk = None
username = ""
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager(Group)
_user_permissions = EmptyManager(Permission)
def __str__(self):
return "AnonymousUser"
def __eq__(self, other):
return isinstance(other, self.__class__)
def __hash__(self):
return 1 # instances always return the same hash value
def __int__(self):
raise TypeError(
"Cannot cast AnonymousUser to int. Are you trying to use it in place of "
"User?"
)
def save(self):
raise NotImplementedError(
"Django doesn't provide a DB representation for AnonymousUser."
)
def delete(self):
raise NotImplementedError(
"Django doesn't provide a DB representation for AnonymousUser."
)
def set_password(self, raw_password):
raise NotImplementedError(
"Django doesn't provide a DB representation for AnonymousUser."
)
def check_password(self, raw_password):
raise NotImplementedError(
"Django doesn't provide a DB representation for AnonymousUser."
)
@property
def groups(self):
return self._groups
@property
def user_permissions(self):
return self._user_permissions
def get_user_permissions(self, obj=None):
return _user_get_permissions(self, obj, "user")
async def aget_user_permissions(self, obj=None):
return await _auser_get_permissions(self, obj, "user")
def get_group_permissions(self, obj=None):
return set()
async def aget_group_permissions(self, obj=None):
return self.get_group_permissions(obj)
def get_all_permissions(self, obj=None):
return _user_get_permissions(self, obj, "all")
async def aget_all_permissions(self, obj=None):
return await _auser_get_permissions(self, obj, "all")
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
async def ahas_perm(self, perm, obj=None):
return await _auser_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
if not isinstance(perm_list, Iterable) or isinstance(perm_list, str):
raise ValueError("perm_list must be an iterable of permissions.")
return all(self.has_perm(perm, obj) for perm in perm_list)
async def ahas_perms(self, perm_list, obj=None):
if not isinstance(perm_list, Iterable) or isinstance(perm_list, str):
raise ValueError("perm_list must be an iterable of permissions.")
for perm in perm_list:
if not await self.ahas_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
async def ahas_module_perms(self, module):
return await _auser_has_module_perms(self, module)
@property
def is_anonymous(self):
return True
@property
def is_authenticated(self):
return False
def get_username(self):
return self.username
|
python
|
github
|
https://github.com/django/django
|
django/contrib/auth/models.py
|
#
# File : win32spawn.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2015-01-20 Bernard Add copyright information
#
import os
import threading
import Queue
# Windows import
import win32file
import win32pipe
import win32api
import win32con
import win32security
import win32process
import win32event
class Win32Spawn(object):
def __init__(self, cmd, shell=False):
self.queue = Queue.Queue()
self.is_terminated = False
self.wake_up_event = win32event.CreateEvent(None, 0, 0, None)
exec_dir = os.getcwd()
comspec = os.environ.get("COMSPEC", "cmd.exe")
cmd = comspec + ' /c ' + cmd
win32event.ResetEvent(self.wake_up_event)
currproc = win32api.GetCurrentProcess()
sa = win32security.SECURITY_ATTRIBUTES()
sa.bInheritHandle = 1
child_stdout_rd, child_stdout_wr = win32pipe.CreatePipe(sa, 0)
child_stdout_rd_dup = win32api.DuplicateHandle(currproc, child_stdout_rd, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stdout_rd)
child_stderr_rd, child_stderr_wr = win32pipe.CreatePipe(sa, 0)
child_stderr_rd_dup = win32api.DuplicateHandle(currproc, child_stderr_rd, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stderr_rd)
child_stdin_rd, child_stdin_wr = win32pipe.CreatePipe(sa, 0)
child_stdin_wr_dup = win32api.DuplicateHandle(currproc, child_stdin_wr, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stdin_wr)
startup_info = win32process.STARTUPINFO()
startup_info.hStdInput = child_stdin_rd
startup_info.hStdOutput = child_stdout_wr
startup_info.hStdError = child_stderr_wr
startup_info.dwFlags = win32process.STARTF_USESTDHANDLES
cr_flags = 0
cr_flags = win32process.CREATE_NEW_PROCESS_GROUP
env = os.environ.copy()
self.h_process, h_thread, dw_pid, dw_tid = win32process.CreateProcess(None, cmd, None, None, 1,
cr_flags, env, os.path.abspath(exec_dir),
startup_info)
win32api.CloseHandle(h_thread)
win32file.CloseHandle(child_stdin_rd)
win32file.CloseHandle(child_stdout_wr)
win32file.CloseHandle(child_stderr_wr)
self.__child_stdout = child_stdout_rd_dup
self.__child_stderr = child_stderr_rd_dup
self.__child_stdin = child_stdin_wr_dup
self.exit_code = -1
def close(self):
win32file.CloseHandle(self.__child_stdout)
win32file.CloseHandle(self.__child_stderr)
win32file.CloseHandle(self.__child_stdin)
win32api.CloseHandle(self.h_process)
win32api.CloseHandle(self.wake_up_event)
def kill_subprocess():
win32event.SetEvent(self.wake_up_event)
def sleep(secs):
win32event.ResetEvent(self.wake_up_event)
timeout = int(1000 * secs)
val = win32event.WaitForSingleObject(self.wake_up_event, timeout)
if val == win32event.WAIT_TIMEOUT:
return True
else:
# The wake_up_event must have been signalled
return False
def get(self, block=True, timeout=None):
return self.queue.get(block=block, timeout=timeout)
def qsize(self):
return self.queue.qsize()
def __wait_for_child(self):
# kick off threads to read from stdout and stderr of the child process
threading.Thread(target=self.__do_read, args=(self.__child_stdout, )).start()
threading.Thread(target=self.__do_read, args=(self.__child_stderr, )).start()
while True:
# block waiting for the process to finish or the interrupt to happen
handles = (self.wake_up_event, self.h_process)
val = win32event.WaitForMultipleObjects(handles, 0, win32event.INFINITE)
if val >= win32event.WAIT_OBJECT_0 and val < win32event.WAIT_OBJECT_0 + len(handles):
handle = handles[val - win32event.WAIT_OBJECT_0]
if handle == self.wake_up_event:
win32api.TerminateProcess(self.h_process, 1)
win32event.ResetEvent(self.wake_up_event)
return False
elif handle == self.h_process:
# the process has ended naturally
return True
else:
assert False, "Unknown handle fired"
else:
assert False, "Unexpected return from WaitForMultipleObjects"
# Wait for job to finish. Since this method blocks, it can to be called from another thread.
# If the application wants to kill the process, it should call kill_subprocess().
def wait(self):
if not self.__wait_for_child():
# it's been killed
result = False
else:
# normal termination
self.exit_code = win32process.GetExitCodeProcess(self.h_process)
result = self.exit_code == 0
self.close()
self.is_terminated = True
return result
# This method gets called on a worker thread to read from either a stderr
# or stdout thread from the child process.
def __do_read(self, handle):
bytesToRead = 1024
while 1:
try:
finished = 0
hr, data = win32file.ReadFile(handle, bytesToRead, None)
if data:
self.queue.put_nowait(data)
except win32api.error:
finished = 1
if finished:
return
def start_pipe(self):
def worker(pipe):
return pipe.wait()
thrd = threading.Thread(target=worker, args=(self, ))
thrd.start()
|
unknown
|
codeparrot/codeparrot-clean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.