repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
greyli/flask-avatars
|
flask_avatars/identicon.py
|
Identicon._create_matrix
|
python
|
def _create_matrix(self, byte_list):
# Number of rows * cols halfed and rounded
# in order to fill opposite side
cells = int(self.rows * self.cols / 2 + self.cols % 2)
matrix = [[False] * self.cols for num in range(self.rows)]
for cell_number in range(cells):
# If the bit with index corresponding to this cell is 1
# mark that cell as fg_colour
# Skip byte 1, that's used in determining fg_colour
if self._bit_is_one(cell_number, byte_list[1:]):
# Find cell coordinates in matrix.
x_row = cell_number % self.rows
y_col = int(cell_number / self.cols)
# Set coord True and its opposite side
matrix[x_row][self.cols - y_col - 1] = True
matrix[x_row][y_col] = True
return matrix
|
This matrix decides which blocks should be filled fg/bg colour
True for fg_colour
False for bg_colour
hash_bytes - array of hash bytes values. RGB range values in each slot
Returns:
List representation of the matrix
[[True, True, True, True],
[False, True, True, False],
[True, True, True, True],
[False, False, False, False]]
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L169-L203
|
[
"def _bit_is_one(self, n, hash_bytes):\n \"\"\"\n Check if the n (index) of hash_bytes is 1 or 0.\n \"\"\"\n\n scale = 16 # hexadecimal\n\n if not hash_bytes[int(n / (scale / 2))] >> int(\n (scale / 2) - ((n % (scale / 2)) + 1)) & 1 == 1:\n return False\n return True\n"
] |
class Identicon(object):
def __init__(self, rows=None, cols=None, bg_color=None):
"""Generate identicon image.
:param rows: The row of pixels in avatar.
:param columns: The column of pixels in avatar.
:param bg_color: Backgroud color, pass RGB tuple, for example: (125, 125, 125).
Set it to ``None`` to use random color.
"""
self.rows = rows or current_app.config['AVATARS_IDENTICON_ROWS']
self.cols = cols or current_app.config['AVATARS_IDENTICON_COLS']
self.bg_colour = bg_color or current_app.config['AVATARS_IDENTICON_BG']
self._generate_colours()
m = hashlib.md5()
m.update(b"hello world")
entropy = len(m.hexdigest()) / 2 * 8
if self.rows > 15 or self.cols > 15:
raise ValueError("Rows and columns must be valued 15 or under")
self.digest = hashlib.md5
self.digest_entropy = entropy
def _generate_colours(self):
colours_ok = False
while colours_ok is False:
self.fg_colour = self._get_pastel_colour()
if self.bg_colour is None:
self.bg_colour = self._get_pastel_colour(lighten=80)
# Get the luminance for each colour
fg_lum = self._luminance(self.fg_colour) + 0.05
bg_lum = self._luminance(self.bg_colour) + 0.05
# Check the difference in luminance
# meets the 1.25 threshold
result = (fg_lum / bg_lum) \
if (fg_lum / bg_lum) else (bg_lum / fg_lum)
if result > 1.20:
colours_ok = True
else:
colours_ok = True
def get_image(self, string, width, height, pad=0):
"""
Byte representation of a PNG image
"""
hex_digest_byte_list = self._string_to_byte_list(string)
matrix = self._create_matrix(hex_digest_byte_list)
return self._create_image(matrix, width, height, pad)
def save(self, image_byte_array=None, save_location=None):
if image_byte_array and save_location:
with open(save_location, 'wb') as f:
return f.write(image_byte_array)
else:
raise ValueError('image_byte_array and path must be provided')
def _get_pastel_colour(self, lighten=127):
"""
Create a pastel colour hex colour string
"""
def r():
return random.randint(0, 128) + lighten
return r(), r(), r() # return rgb values as a tuple
def _luminance(self, rgb):
"""
Determine the liminanace of an RGB colour
"""
a = []
for v in rgb:
v = v / float(255)
if v < 0.03928:
result = v / 12.92
else:
result = math.pow(((v + 0.055) / 1.055), 2.4)
a.append(result)
return a[0] * 0.2126 + a[1] * 0.7152 + a[2] * 0.0722
def _string_to_byte_list(self, data):
"""
Creates a hex digest of the input string given to create the image,
if it's not already hexadecimal
Returns:
Length 16 list of rgb value range integers
(each representing a byte of the hex digest)
"""
bytes_length = 16
m = self.digest()
m.update(str.encode(data))
hex_digest = m.hexdigest()
return list(int(hex_digest[num * 2:num * 2 + 2], bytes_length)
for num in range(bytes_length))
def _bit_is_one(self, n, hash_bytes):
"""
Check if the n (index) of hash_bytes is 1 or 0.
"""
scale = 16 # hexadecimal
if not hash_bytes[int(n / (scale / 2))] >> int(
(scale / 2) - ((n % (scale / 2)) + 1)) & 1 == 1:
return False
return True
def _create_image(self, matrix, width, height, pad):
"""
Generates a PNG byte list
"""
image = Image.new("RGB", (width + (pad * 2),
height + (pad * 2)), self.bg_colour)
image_draw = ImageDraw.Draw(image)
# Calculate the block width and height.
block_width = float(width) / self.cols
block_height = float(height) / self.rows
# Loop through blocks in matrix, draw rectangles.
for row, cols in enumerate(matrix):
for col, cell in enumerate(cols):
if cell:
image_draw.rectangle((
pad + col * block_width, # x1
pad + row * block_height, # y1
pad + (col + 1) * block_width - 1, # x2
pad + (row + 1) * block_height - 1 # y2
), fill=self.fg_colour)
stream = BytesIO()
image.save(stream, format="png", optimize=True)
# return the image byte data
return stream.getvalue()
def generate(self, text):
"""Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l].
:param text: The text used to generate image.
"""
sizes = current_app.config['AVATARS_SIZE_TUPLE']
path = current_app.config['AVATARS_SAVE_PATH']
suffix = {sizes[0]: 's', sizes[1]: 'm', sizes[2]: 'l'}
for size in sizes:
image_byte_array = self.get_image(
string=str(text),
width=int(size),
height=int(size),
pad=int(size * 0.1))
self.save(image_byte_array, save_location=os.path.join(path, '%s_%s.png' % (text, suffix[size])))
return [text + '_s.png', text + '_m.png', text + '_l.png']
|
greyli/flask-avatars
|
flask_avatars/identicon.py
|
Identicon.generate
|
python
|
def generate(self, text):
sizes = current_app.config['AVATARS_SIZE_TUPLE']
path = current_app.config['AVATARS_SAVE_PATH']
suffix = {sizes[0]: 's', sizes[1]: 'm', sizes[2]: 'l'}
for size in sizes:
image_byte_array = self.get_image(
string=str(text),
width=int(size),
height=int(size),
pad=int(size * 0.1))
self.save(image_byte_array, save_location=os.path.join(path, '%s_%s.png' % (text, suffix[size])))
return [text + '_s.png', text + '_m.png', text + '_l.png']
|
Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l].
:param text: The text used to generate image.
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L205-L221
|
[
"def get_image(self, string, width, height, pad=0):\n \"\"\"\n Byte representation of a PNG image\n \"\"\"\n hex_digest_byte_list = self._string_to_byte_list(string)\n matrix = self._create_matrix(hex_digest_byte_list)\n return self._create_image(matrix, width, height, pad)\n",
"def save(self, image_byte_array=None, save_location=None):\n if image_byte_array and save_location:\n with open(save_location, 'wb') as f:\n return f.write(image_byte_array)\n else:\n raise ValueError('image_byte_array and path must be provided')\n"
] |
class Identicon(object):
def __init__(self, rows=None, cols=None, bg_color=None):
"""Generate identicon image.
:param rows: The row of pixels in avatar.
:param columns: The column of pixels in avatar.
:param bg_color: Backgroud color, pass RGB tuple, for example: (125, 125, 125).
Set it to ``None`` to use random color.
"""
self.rows = rows or current_app.config['AVATARS_IDENTICON_ROWS']
self.cols = cols or current_app.config['AVATARS_IDENTICON_COLS']
self.bg_colour = bg_color or current_app.config['AVATARS_IDENTICON_BG']
self._generate_colours()
m = hashlib.md5()
m.update(b"hello world")
entropy = len(m.hexdigest()) / 2 * 8
if self.rows > 15 or self.cols > 15:
raise ValueError("Rows and columns must be valued 15 or under")
self.digest = hashlib.md5
self.digest_entropy = entropy
def _generate_colours(self):
colours_ok = False
while colours_ok is False:
self.fg_colour = self._get_pastel_colour()
if self.bg_colour is None:
self.bg_colour = self._get_pastel_colour(lighten=80)
# Get the luminance for each colour
fg_lum = self._luminance(self.fg_colour) + 0.05
bg_lum = self._luminance(self.bg_colour) + 0.05
# Check the difference in luminance
# meets the 1.25 threshold
result = (fg_lum / bg_lum) \
if (fg_lum / bg_lum) else (bg_lum / fg_lum)
if result > 1.20:
colours_ok = True
else:
colours_ok = True
def get_image(self, string, width, height, pad=0):
"""
Byte representation of a PNG image
"""
hex_digest_byte_list = self._string_to_byte_list(string)
matrix = self._create_matrix(hex_digest_byte_list)
return self._create_image(matrix, width, height, pad)
def save(self, image_byte_array=None, save_location=None):
if image_byte_array and save_location:
with open(save_location, 'wb') as f:
return f.write(image_byte_array)
else:
raise ValueError('image_byte_array and path must be provided')
def _get_pastel_colour(self, lighten=127):
"""
Create a pastel colour hex colour string
"""
def r():
return random.randint(0, 128) + lighten
return r(), r(), r() # return rgb values as a tuple
def _luminance(self, rgb):
"""
Determine the liminanace of an RGB colour
"""
a = []
for v in rgb:
v = v / float(255)
if v < 0.03928:
result = v / 12.92
else:
result = math.pow(((v + 0.055) / 1.055), 2.4)
a.append(result)
return a[0] * 0.2126 + a[1] * 0.7152 + a[2] * 0.0722
def _string_to_byte_list(self, data):
"""
Creates a hex digest of the input string given to create the image,
if it's not already hexadecimal
Returns:
Length 16 list of rgb value range integers
(each representing a byte of the hex digest)
"""
bytes_length = 16
m = self.digest()
m.update(str.encode(data))
hex_digest = m.hexdigest()
return list(int(hex_digest[num * 2:num * 2 + 2], bytes_length)
for num in range(bytes_length))
def _bit_is_one(self, n, hash_bytes):
"""
Check if the n (index) of hash_bytes is 1 or 0.
"""
scale = 16 # hexadecimal
if not hash_bytes[int(n / (scale / 2))] >> int(
(scale / 2) - ((n % (scale / 2)) + 1)) & 1 == 1:
return False
return True
def _create_image(self, matrix, width, height, pad):
"""
Generates a PNG byte list
"""
image = Image.new("RGB", (width + (pad * 2),
height + (pad * 2)), self.bg_colour)
image_draw = ImageDraw.Draw(image)
# Calculate the block width and height.
block_width = float(width) / self.cols
block_height = float(height) / self.rows
# Loop through blocks in matrix, draw rectangles.
for row, cols in enumerate(matrix):
for col, cell in enumerate(cols):
if cell:
image_draw.rectangle((
pad + col * block_width, # x1
pad + row * block_height, # y1
pad + (col + 1) * block_width - 1, # x2
pad + (row + 1) * block_height - 1 # y2
), fill=self.fg_colour)
stream = BytesIO()
image.save(stream, format="png", optimize=True)
# return the image byte data
return stream.getvalue()
def _create_matrix(self, byte_list):
"""
This matrix decides which blocks should be filled fg/bg colour
True for fg_colour
False for bg_colour
hash_bytes - array of hash bytes values. RGB range values in each slot
Returns:
List representation of the matrix
[[True, True, True, True],
[False, True, True, False],
[True, True, True, True],
[False, False, False, False]]
"""
# Number of rows * cols halfed and rounded
# in order to fill opposite side
cells = int(self.rows * self.cols / 2 + self.cols % 2)
matrix = [[False] * self.cols for num in range(self.rows)]
for cell_number in range(cells):
# If the bit with index corresponding to this cell is 1
# mark that cell as fg_colour
# Skip byte 1, that's used in determining fg_colour
if self._bit_is_one(cell_number, byte_list[1:]):
# Find cell coordinates in matrix.
x_row = cell_number % self.rows
y_col = int(cell_number / self.cols)
# Set coord True and its opposite side
matrix[x_row][self.cols - y_col - 1] = True
matrix[x_row][y_col] = True
return matrix
|
rfarley3/Kibana
|
kibana/mapping.py
|
KibanaMapping.get_field_cache
|
python
|
def get_field_cache(self, cache_type='es'):
if cache_type == 'kibana':
try:
search_results = urlopen(self.get_url).read().decode('utf-8')
except HTTPError: # as e:
# self.pr_err("get_field_cache(kibana), HTTPError: %s" % e)
return []
index_pattern = json.loads(search_results)
# Results look like: {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":6,"found":true,"_source":{"title":"aaa*","fields":"<what we want>"}} # noqa
fields_str = index_pattern['_source']['fields']
return json.loads(fields_str)
elif cache_type == 'es' or cache_type.startswith('elastic'):
search_results = urlopen(self.es_get_url).read().decode('utf-8')
es_mappings = json.loads(search_results)
# Results look like: {"<index_name>":{"mappings":{"<doc_type>":{"<field_name>":{"full_name":"<field_name>","mapping":{"<sub-field_name>":{"type":"date","index_name":"<sub-field_name>","boost":1.0,"index":"not_analyzed","store":false,"doc_values":false,"term_vector":"no","norms":{"enabled":false},"index_options":"docs","index_analyzer":"_date/16","search_analyzer":"_date/max","postings_format":"default","doc_values_format":"default","similarity":"default","fielddata":{},"ignore_malformed":false,"coerce":true,"precision_step":16,"format":"dateOptionalTime","null_value":null,"include_in_all":false,"numeric_resolution":"milliseconds","locale":""}}}, # noqa
# now convert the mappings into the .kibana format
field_cache = []
for (index_name, val) in iteritems(es_mappings):
if index_name != self.index: # only get non-'.kibana' indices
# self.pr_dbg("index: %s" % index_name)
m_dict = es_mappings[index_name]['mappings']
# self.pr_dbg('m_dict %s' % m_dict)
mappings = self.get_index_mappings(m_dict)
# self.pr_dbg('mappings %s' % mappings)
field_cache.extend(mappings)
field_cache = self.dedup_field_cache(field_cache)
return field_cache
self.pr_err("Unknown cache type: %s" % cache_type)
return None
|
Return a list of fields' mappings
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/mapping.py#L96-L125
|
[
"def iteritems(d):\n if PY3:\n return d.items()\n else:\n return d.iteritems()\n",
"def pr_err(self, msg):\n print('[ERR] Mapping %s' % msg)\n",
"def dedup_field_cache(self, field_cache):\n deduped = []\n fields_found = {}\n for field in field_cache:\n name = field['name']\n if name not in fields_found:\n deduped.append(field)\n fields_found[name] = field\n elif fields_found[name] != field:\n self.pr_dbg(\"Dup field doesn't match\")\n self.pr_dbg(\"1st found: %s\" % fields_found[name])\n self.pr_dbg(\" Dup one: %s\" % field)\n # else ignore, pass\n return deduped\n",
"def get_index_mappings(self, index):\n \"\"\"Converts all index's doc_types to .kibana\"\"\"\n fields_arr = []\n for (key, val) in iteritems(index):\n # self.pr_dbg(\"\\tdoc_type: %s\" % key)\n doc_mapping = self.get_doc_type_mappings(index[key])\n # self.pr_dbg(\"\\tdoc_mapping: %s\" % doc_mapping)\n if doc_mapping is None:\n return None\n # keep adding to the fields array\n fields_arr.extend(doc_mapping)\n return fields_arr\n"
] |
class KibanaMapping():
def __init__(self, index, index_pattern, host, debug=False):
self.index = index
self._index_pattern = index_pattern
self._host = host
self.update_urls()
# from the js possible mappings are:
# { type, indexed, analyzed, doc_values }
# but indexed and analyzed are .kibana specific,
# determined by the value within ES's 'index', which could be:
# { analyzed, no, not_analyzed }
self.mappings = ['type', 'doc_values']
# ignore system fields:
self.sys_mappings = ['_source', '_index', '_type', '_id']
# .kibana has some fields to ignore too:
self.mappings_ignore = ['count']
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Mapping %s' % msg)
def pr_inf(self, msg):
print('[INF] Mapping %s' % msg)
def pr_err(self, msg):
print('[ERR] Mapping %s' % msg)
def update_urls(self):
# 'http://localhost:5601/elasticsearch/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
# 'http://localhost:9200/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
self.es_get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self._index_pattern +
'_mapping/field/' +
'*?ignore_unavailable=false&' +
'allow_no_indices=false&' +
'include_defaults=true')
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*'
self.post_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s' % self._index_pattern)
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/_search/?id=aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*/'
self.get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s/' % self._index_pattern)
@property
def index_pattern(self):
return self._index_pattern
@index_pattern.setter
def index_pattern_setter(self, index_pattern):
self._index_pattern = index_pattern
self.update_urls()
@property
def host(self):
return self._host
@host.setter
def host_setter(self, host):
self._host = host
self.update_urls()
def dedup_field_cache(self, field_cache):
deduped = []
fields_found = {}
for field in field_cache:
name = field['name']
if name not in fields_found:
deduped.append(field)
fields_found[name] = field
elif fields_found[name] != field:
self.pr_dbg("Dup field doesn't match")
self.pr_dbg("1st found: %s" % fields_found[name])
self.pr_dbg(" Dup one: %s" % field)
# else ignore, pass
return deduped
def post_field_cache(self, field_cache):
"""Where field_cache is a list of fields' mappings"""
index_pattern = self.field_cache_to_index_pattern(field_cache)
# self.pr_dbg("request/post: %s" % index_pattern)
resp = requests.post(self.post_url, data=index_pattern).text
# resp = {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":1,"created":true} # noqa
resp = json.loads(resp)
return 0
# TODO detect failure (return 1)
def field_cache_to_index_pattern(self, field_cache):
"""Return a .kibana index-pattern doc_type"""
mapping_dict = {}
mapping_dict['customFormats'] = "{}"
mapping_dict['title'] = self.index_pattern
# now post the data into .kibana
mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':'))
# in order to post, we need to create the post string
mapping_str = json.dumps(mapping_dict, separators=(',', ':'))
return mapping_str
def check_mapping(self, m):
"""Assert minimum set of fields in cache, does not validate contents"""
if 'name' not in m:
self.pr_dbg("Missing %s" % "name")
return False
# self.pr_dbg("Checking %s" % m['name'])
for x in ['analyzed', 'indexed', 'type', 'scripted', 'count']:
if x not in m or m[x] == "":
self.pr_dbg("Missing %s" % x)
self.pr_dbg("Full %s" % m)
return False
if 'doc_values' not in m or m['doc_values'] == "":
if not m['name'].startswith('_'):
self.pr_dbg("Missing %s" % "doc_values")
return False
m['doc_values'] = False
return True
def get_index_mappings(self, index):
"""Converts all index's doc_types to .kibana"""
fields_arr = []
for (key, val) in iteritems(index):
# self.pr_dbg("\tdoc_type: %s" % key)
doc_mapping = self.get_doc_type_mappings(index[key])
# self.pr_dbg("\tdoc_mapping: %s" % doc_mapping)
if doc_mapping is None:
return None
# keep adding to the fields array
fields_arr.extend(doc_mapping)
return fields_arr
def get_doc_type_mappings(self, doc_type):
"""Converts all doc_types' fields to .kibana"""
doc_fields_arr = []
found_score = False
for (key, val) in iteritems(doc_type):
# self.pr_dbg("\t\tfield: %s" % key)
# self.pr_dbg("\tval: %s" % val)
add_it = False
retdict = {}
# _ are system
if not key.startswith('_'):
if 'mapping' not in doc_type[key]:
self.pr_err("No mapping in doc_type[%s]" % key)
return None
if key in doc_type[key]['mapping']:
subkey_name = key
else:
subkey_name = re.sub('.*\.', '', key)
if subkey_name not in doc_type[key]['mapping']:
self.pr_err(
"Couldn't find subkey " +
"doc_type[%s]['mapping'][%s]" % (key, subkey_name))
return None
# self.pr_dbg("\t\tsubkey_name: %s" % subkey_name)
retdict = self.get_field_mappings(
doc_type[key]['mapping'][subkey_name])
add_it = True
# system mappings don't list a type,
# but kibana makes them all strings
if key in self.sys_mappings:
retdict['analyzed'] = False
retdict['indexed'] = False
if key == '_source':
retdict = self.get_field_mappings(
doc_type[key]['mapping'][key])
retdict['type'] = "_source"
elif key == '_score':
retdict['type'] = "number"
elif 'type' not in retdict:
retdict['type'] = "string"
add_it = True
if add_it:
retdict['name'] = key
retdict['count'] = 0 # always init to 0
retdict['scripted'] = False # I haven't observed a True yet
if not self.check_mapping(retdict):
self.pr_err("Error, invalid mapping")
return None
# the fields element is an escaped array of json
# make the array here, after all collected, then escape it
doc_fields_arr.append(retdict)
if not found_score:
doc_fields_arr.append(
{"name": "_score",
"type": "number",
"count": 0,
"scripted": False,
"indexed": False,
"analyzed": False,
"doc_values": False})
return doc_fields_arr
def get_field_mappings(self, field):
"""Converts ES field mappings to .kibana field mappings"""
retdict = {}
retdict['indexed'] = False
retdict['analyzed'] = False
for (key, val) in iteritems(field):
if key in self.mappings:
if (key == 'type' and
(val == "long" or
val == "integer" or
val == "double" or
val == "float")):
val = "number"
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
retdict[key] = val
if key == 'index' and val != "no":
retdict['indexed'] = True
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
if val == "analyzed":
retdict['analyzed'] = True
return retdict
def refresh_poll(self, period):
self.poll_another = True
while self.poll_another:
self.do_refresh()
self.pr_inf("Polling again in %s secs" % period)
try:
time.sleep(period)
except KeyboardInterrupt:
self.poll_another = False
def needs_refresh(self):
es_cache = self.get_field_cache('es')
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
return True
return False
def do_refresh(self, force=False):
es_cache = self.get_field_cache('es')
if force:
self.pr_inf("Forcing mapping update")
# no need to get kibana if we are forcing it
return self.post_field_cache(es_cache)
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
self.pr_inf("Mapping is incomplete, doing update")
return self.post_field_cache(es_cache)
self.pr_inf("Mapping is correct, no refresh needed")
return 0
def is_kibana_cache_incomplete(self, es_cache, k_cache):
"""Test if k_cache is incomplete
Assume k_cache is always correct, but could be missing new
fields that es_cache has
"""
# convert list into dict, with each item's ['name'] as key
k_dict = {}
for field in k_cache:
# self.pr_dbg("field: %s" % field)
k_dict[field['name']] = field
for ign_f in self.mappings_ignore:
k_dict[field['name']][ign_f] = 0
es_dict = {}
for field in es_cache:
es_dict[field['name']] = field
for ign_f in self.mappings_ignore:
es_dict[field['name']][ign_f] = 0
es_set = set(es_dict.keys())
k_set = set(k_dict.keys())
# reasons why kibana cache could be incomplete:
# k_dict is missing keys that are within es_dict
# We don't care if k has keys that es doesn't
# es {1,2} k {1,2,3}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {1,2}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {}; intersection {}; len(es-{}) 2
# es {1,2} k {1}; intersection {1}; len(es-{}) 1
# es {2,3} k {1}; intersection {}; len(es-{}) 2
# es {2,3} k {1,2}; intersection {2}; len(es-{}) 1
return len(es_set - k_set.intersection(es_set)) > 0
def list_to_compare_dict(self, list_form):
"""Convert list into a data structure we can query easier"""
compare_dict = {}
for field in list_form:
if field['name'] in compare_dict:
self.pr_dbg("List has duplicate field %s:\n%s" %
(field['name'], compare_dict[field['name']]))
if compare_dict[field['name']] != field:
self.pr_dbg("And values are different:\n%s" % field)
return None
compare_dict[field['name']] = field
for ign_f in self.mappings_ignore:
compare_dict[field['name']][ign_f] = 0
return compare_dict
def compare_field_caches(self, replica, original):
"""Verify original is subset of replica"""
if original is None:
original = []
if replica is None:
replica = []
self.pr_dbg("Comparing orig with %s fields to replica with %s fields" %
(len(original), len(replica)))
# convert list into dict, with each item's ['name'] as key
orig = self.list_to_compare_dict(original)
if orig is None:
self.pr_dbg("Original has duplicate fields")
return 1
repl = self.list_to_compare_dict(replica)
if repl is None:
self.pr_dbg("Replica has duplicate fields")
return 1
# search orig for each item in repl
# if any items in repl not within orig or vice versa, then complain
# make sure contents of each item match
orig_found = {}
for (key, field) in iteritems(repl):
field_name = field['name']
if field_name not in orig:
self.pr_dbg("Replica has field not found in orig %s: %s" %
(field_name, field))
return 1
orig_found[field_name] = True
if orig[field_name] != field:
self.pr_dbg("Field in replica doesn't match orig:")
self.pr_dbg("orig:%s\nrepl:%s" % (orig[field_name], field))
return 1
unfound = set(orig_found.keys()) - set(repl.keys())
if len(unfound) > 0:
self.pr_dbg("Orig contains fields that were not in replica")
self.pr_dbg('%s' % unfound)
return 1
# We don't care about case when replica has more fields than orig
# unfound = set(repl.keys()) - set(orig_found.keys())
# if len(unfound) > 0:
# self.pr_dbg("Replica contains fields that were not in orig")
# self.pr_dbg('%s' % unfound)
# return 1
self.pr_dbg("Original matches replica")
return 0
def test_cache(self):
"""Test if this code is equiv to Kibana.refreshFields()
Within Kibana GUI click refreshFields, then either:
* self.test_cache()
* vagrant ssh -c "python -c \"
import kibana; kibana.DotKibana('aaa*').mapping.test_cache()\""
"""
es_cache = self.get_field_cache(cache_type='es')
# self.pr_dbg(json.dumps(es_cache))
kibana_cache = self.get_field_cache(cache_type='kibana')
# self.pr_dbg(json.dumps(kibana_cache))
return self.compare_field_caches(es_cache, kibana_cache)
|
rfarley3/Kibana
|
kibana/mapping.py
|
KibanaMapping.post_field_cache
|
python
|
def post_field_cache(self, field_cache):
index_pattern = self.field_cache_to_index_pattern(field_cache)
# self.pr_dbg("request/post: %s" % index_pattern)
resp = requests.post(self.post_url, data=index_pattern).text
# resp = {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":1,"created":true} # noqa
resp = json.loads(resp)
return 0
|
Where field_cache is a list of fields' mappings
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/mapping.py#L142-L149
|
[
"def field_cache_to_index_pattern(self, field_cache):\n \"\"\"Return a .kibana index-pattern doc_type\"\"\"\n mapping_dict = {}\n mapping_dict['customFormats'] = \"{}\"\n mapping_dict['title'] = self.index_pattern\n # now post the data into .kibana\n mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':'))\n # in order to post, we need to create the post string\n mapping_str = json.dumps(mapping_dict, separators=(',', ':'))\n return mapping_str\n"
] |
class KibanaMapping():
def __init__(self, index, index_pattern, host, debug=False):
self.index = index
self._index_pattern = index_pattern
self._host = host
self.update_urls()
# from the js possible mappings are:
# { type, indexed, analyzed, doc_values }
# but indexed and analyzed are .kibana specific,
# determined by the value within ES's 'index', which could be:
# { analyzed, no, not_analyzed }
self.mappings = ['type', 'doc_values']
# ignore system fields:
self.sys_mappings = ['_source', '_index', '_type', '_id']
# .kibana has some fields to ignore too:
self.mappings_ignore = ['count']
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Mapping %s' % msg)
def pr_inf(self, msg):
print('[INF] Mapping %s' % msg)
def pr_err(self, msg):
print('[ERR] Mapping %s' % msg)
def update_urls(self):
# 'http://localhost:5601/elasticsearch/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
# 'http://localhost:9200/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
self.es_get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self._index_pattern +
'_mapping/field/' +
'*?ignore_unavailable=false&' +
'allow_no_indices=false&' +
'include_defaults=true')
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*'
self.post_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s' % self._index_pattern)
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/_search/?id=aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*/'
self.get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s/' % self._index_pattern)
@property
def index_pattern(self):
return self._index_pattern
@index_pattern.setter
def index_pattern_setter(self, index_pattern):
self._index_pattern = index_pattern
self.update_urls()
@property
def host(self):
return self._host
@host.setter
def host_setter(self, host):
self._host = host
self.update_urls()
def get_field_cache(self, cache_type='es'):
"""Return a list of fields' mappings"""
if cache_type == 'kibana':
try:
search_results = urlopen(self.get_url).read().decode('utf-8')
except HTTPError: # as e:
# self.pr_err("get_field_cache(kibana), HTTPError: %s" % e)
return []
index_pattern = json.loads(search_results)
# Results look like: {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":6,"found":true,"_source":{"title":"aaa*","fields":"<what we want>"}} # noqa
fields_str = index_pattern['_source']['fields']
return json.loads(fields_str)
elif cache_type == 'es' or cache_type.startswith('elastic'):
search_results = urlopen(self.es_get_url).read().decode('utf-8')
es_mappings = json.loads(search_results)
# Results look like: {"<index_name>":{"mappings":{"<doc_type>":{"<field_name>":{"full_name":"<field_name>","mapping":{"<sub-field_name>":{"type":"date","index_name":"<sub-field_name>","boost":1.0,"index":"not_analyzed","store":false,"doc_values":false,"term_vector":"no","norms":{"enabled":false},"index_options":"docs","index_analyzer":"_date/16","search_analyzer":"_date/max","postings_format":"default","doc_values_format":"default","similarity":"default","fielddata":{},"ignore_malformed":false,"coerce":true,"precision_step":16,"format":"dateOptionalTime","null_value":null,"include_in_all":false,"numeric_resolution":"milliseconds","locale":""}}}, # noqa
# now convert the mappings into the .kibana format
field_cache = []
for (index_name, val) in iteritems(es_mappings):
if index_name != self.index: # only get non-'.kibana' indices
# self.pr_dbg("index: %s" % index_name)
m_dict = es_mappings[index_name]['mappings']
# self.pr_dbg('m_dict %s' % m_dict)
mappings = self.get_index_mappings(m_dict)
# self.pr_dbg('mappings %s' % mappings)
field_cache.extend(mappings)
field_cache = self.dedup_field_cache(field_cache)
return field_cache
self.pr_err("Unknown cache type: %s" % cache_type)
return None
def dedup_field_cache(self, field_cache):
deduped = []
fields_found = {}
for field in field_cache:
name = field['name']
if name not in fields_found:
deduped.append(field)
fields_found[name] = field
elif fields_found[name] != field:
self.pr_dbg("Dup field doesn't match")
self.pr_dbg("1st found: %s" % fields_found[name])
self.pr_dbg(" Dup one: %s" % field)
# else ignore, pass
return deduped
# TODO detect failure (return 1)
def field_cache_to_index_pattern(self, field_cache):
"""Return a .kibana index-pattern doc_type"""
mapping_dict = {}
mapping_dict['customFormats'] = "{}"
mapping_dict['title'] = self.index_pattern
# now post the data into .kibana
mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':'))
# in order to post, we need to create the post string
mapping_str = json.dumps(mapping_dict, separators=(',', ':'))
return mapping_str
def check_mapping(self, m):
"""Assert minimum set of fields in cache, does not validate contents"""
if 'name' not in m:
self.pr_dbg("Missing %s" % "name")
return False
# self.pr_dbg("Checking %s" % m['name'])
for x in ['analyzed', 'indexed', 'type', 'scripted', 'count']:
if x not in m or m[x] == "":
self.pr_dbg("Missing %s" % x)
self.pr_dbg("Full %s" % m)
return False
if 'doc_values' not in m or m['doc_values'] == "":
if not m['name'].startswith('_'):
self.pr_dbg("Missing %s" % "doc_values")
return False
m['doc_values'] = False
return True
def get_index_mappings(self, index):
"""Converts all index's doc_types to .kibana"""
fields_arr = []
for (key, val) in iteritems(index):
# self.pr_dbg("\tdoc_type: %s" % key)
doc_mapping = self.get_doc_type_mappings(index[key])
# self.pr_dbg("\tdoc_mapping: %s" % doc_mapping)
if doc_mapping is None:
return None
# keep adding to the fields array
fields_arr.extend(doc_mapping)
return fields_arr
def get_doc_type_mappings(self, doc_type):
"""Converts all doc_types' fields to .kibana"""
doc_fields_arr = []
found_score = False
for (key, val) in iteritems(doc_type):
# self.pr_dbg("\t\tfield: %s" % key)
# self.pr_dbg("\tval: %s" % val)
add_it = False
retdict = {}
# _ are system
if not key.startswith('_'):
if 'mapping' not in doc_type[key]:
self.pr_err("No mapping in doc_type[%s]" % key)
return None
if key in doc_type[key]['mapping']:
subkey_name = key
else:
subkey_name = re.sub('.*\.', '', key)
if subkey_name not in doc_type[key]['mapping']:
self.pr_err(
"Couldn't find subkey " +
"doc_type[%s]['mapping'][%s]" % (key, subkey_name))
return None
# self.pr_dbg("\t\tsubkey_name: %s" % subkey_name)
retdict = self.get_field_mappings(
doc_type[key]['mapping'][subkey_name])
add_it = True
# system mappings don't list a type,
# but kibana makes them all strings
if key in self.sys_mappings:
retdict['analyzed'] = False
retdict['indexed'] = False
if key == '_source':
retdict = self.get_field_mappings(
doc_type[key]['mapping'][key])
retdict['type'] = "_source"
elif key == '_score':
retdict['type'] = "number"
elif 'type' not in retdict:
retdict['type'] = "string"
add_it = True
if add_it:
retdict['name'] = key
retdict['count'] = 0 # always init to 0
retdict['scripted'] = False # I haven't observed a True yet
if not self.check_mapping(retdict):
self.pr_err("Error, invalid mapping")
return None
# the fields element is an escaped array of json
# make the array here, after all collected, then escape it
doc_fields_arr.append(retdict)
if not found_score:
doc_fields_arr.append(
{"name": "_score",
"type": "number",
"count": 0,
"scripted": False,
"indexed": False,
"analyzed": False,
"doc_values": False})
return doc_fields_arr
def get_field_mappings(self, field):
"""Converts ES field mappings to .kibana field mappings"""
retdict = {}
retdict['indexed'] = False
retdict['analyzed'] = False
for (key, val) in iteritems(field):
if key in self.mappings:
if (key == 'type' and
(val == "long" or
val == "integer" or
val == "double" or
val == "float")):
val = "number"
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
retdict[key] = val
if key == 'index' and val != "no":
retdict['indexed'] = True
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
if val == "analyzed":
retdict['analyzed'] = True
return retdict
def refresh_poll(self, period):
self.poll_another = True
while self.poll_another:
self.do_refresh()
self.pr_inf("Polling again in %s secs" % period)
try:
time.sleep(period)
except KeyboardInterrupt:
self.poll_another = False
def needs_refresh(self):
es_cache = self.get_field_cache('es')
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
return True
return False
def do_refresh(self, force=False):
es_cache = self.get_field_cache('es')
if force:
self.pr_inf("Forcing mapping update")
# no need to get kibana if we are forcing it
return self.post_field_cache(es_cache)
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
self.pr_inf("Mapping is incomplete, doing update")
return self.post_field_cache(es_cache)
self.pr_inf("Mapping is correct, no refresh needed")
return 0
def is_kibana_cache_incomplete(self, es_cache, k_cache):
"""Test if k_cache is incomplete
Assume k_cache is always correct, but could be missing new
fields that es_cache has
"""
# convert list into dict, with each item's ['name'] as key
k_dict = {}
for field in k_cache:
# self.pr_dbg("field: %s" % field)
k_dict[field['name']] = field
for ign_f in self.mappings_ignore:
k_dict[field['name']][ign_f] = 0
es_dict = {}
for field in es_cache:
es_dict[field['name']] = field
for ign_f in self.mappings_ignore:
es_dict[field['name']][ign_f] = 0
es_set = set(es_dict.keys())
k_set = set(k_dict.keys())
# reasons why kibana cache could be incomplete:
# k_dict is missing keys that are within es_dict
# We don't care if k has keys that es doesn't
# es {1,2} k {1,2,3}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {1,2}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {}; intersection {}; len(es-{}) 2
# es {1,2} k {1}; intersection {1}; len(es-{}) 1
# es {2,3} k {1}; intersection {}; len(es-{}) 2
# es {2,3} k {1,2}; intersection {2}; len(es-{}) 1
return len(es_set - k_set.intersection(es_set)) > 0
def list_to_compare_dict(self, list_form):
"""Convert list into a data structure we can query easier"""
compare_dict = {}
for field in list_form:
if field['name'] in compare_dict:
self.pr_dbg("List has duplicate field %s:\n%s" %
(field['name'], compare_dict[field['name']]))
if compare_dict[field['name']] != field:
self.pr_dbg("And values are different:\n%s" % field)
return None
compare_dict[field['name']] = field
for ign_f in self.mappings_ignore:
compare_dict[field['name']][ign_f] = 0
return compare_dict
def compare_field_caches(self, replica, original):
"""Verify original is subset of replica"""
if original is None:
original = []
if replica is None:
replica = []
self.pr_dbg("Comparing orig with %s fields to replica with %s fields" %
(len(original), len(replica)))
# convert list into dict, with each item's ['name'] as key
orig = self.list_to_compare_dict(original)
if orig is None:
self.pr_dbg("Original has duplicate fields")
return 1
repl = self.list_to_compare_dict(replica)
if repl is None:
self.pr_dbg("Replica has duplicate fields")
return 1
# search orig for each item in repl
# if any items in repl not within orig or vice versa, then complain
# make sure contents of each item match
orig_found = {}
for (key, field) in iteritems(repl):
field_name = field['name']
if field_name not in orig:
self.pr_dbg("Replica has field not found in orig %s: %s" %
(field_name, field))
return 1
orig_found[field_name] = True
if orig[field_name] != field:
self.pr_dbg("Field in replica doesn't match orig:")
self.pr_dbg("orig:%s\nrepl:%s" % (orig[field_name], field))
return 1
unfound = set(orig_found.keys()) - set(repl.keys())
if len(unfound) > 0:
self.pr_dbg("Orig contains fields that were not in replica")
self.pr_dbg('%s' % unfound)
return 1
# We don't care about case when replica has more fields than orig
# unfound = set(repl.keys()) - set(orig_found.keys())
# if len(unfound) > 0:
# self.pr_dbg("Replica contains fields that were not in orig")
# self.pr_dbg('%s' % unfound)
# return 1
self.pr_dbg("Original matches replica")
return 0
def test_cache(self):
"""Test if this code is equiv to Kibana.refreshFields()
Within Kibana GUI click refreshFields, then either:
* self.test_cache()
* vagrant ssh -c "python -c \"
import kibana; kibana.DotKibana('aaa*').mapping.test_cache()\""
"""
es_cache = self.get_field_cache(cache_type='es')
# self.pr_dbg(json.dumps(es_cache))
kibana_cache = self.get_field_cache(cache_type='kibana')
# self.pr_dbg(json.dumps(kibana_cache))
return self.compare_field_caches(es_cache, kibana_cache)
|
rfarley3/Kibana
|
kibana/mapping.py
|
KibanaMapping.field_cache_to_index_pattern
|
python
|
def field_cache_to_index_pattern(self, field_cache):
mapping_dict = {}
mapping_dict['customFormats'] = "{}"
mapping_dict['title'] = self.index_pattern
# now post the data into .kibana
mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':'))
# in order to post, we need to create the post string
mapping_str = json.dumps(mapping_dict, separators=(',', ':'))
return mapping_str
|
Return a .kibana index-pattern doc_type
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/mapping.py#L152-L161
| null |
class KibanaMapping():
def __init__(self, index, index_pattern, host, debug=False):
self.index = index
self._index_pattern = index_pattern
self._host = host
self.update_urls()
# from the js possible mappings are:
# { type, indexed, analyzed, doc_values }
# but indexed and analyzed are .kibana specific,
# determined by the value within ES's 'index', which could be:
# { analyzed, no, not_analyzed }
self.mappings = ['type', 'doc_values']
# ignore system fields:
self.sys_mappings = ['_source', '_index', '_type', '_id']
# .kibana has some fields to ignore too:
self.mappings_ignore = ['count']
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Mapping %s' % msg)
def pr_inf(self, msg):
print('[INF] Mapping %s' % msg)
def pr_err(self, msg):
print('[ERR] Mapping %s' % msg)
def update_urls(self):
# 'http://localhost:5601/elasticsearch/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
# 'http://localhost:9200/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
self.es_get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self._index_pattern +
'_mapping/field/' +
'*?ignore_unavailable=false&' +
'allow_no_indices=false&' +
'include_defaults=true')
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*'
self.post_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s' % self._index_pattern)
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/_search/?id=aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*/'
self.get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s/' % self._index_pattern)
@property
def index_pattern(self):
return self._index_pattern
@index_pattern.setter
def index_pattern_setter(self, index_pattern):
self._index_pattern = index_pattern
self.update_urls()
@property
def host(self):
return self._host
@host.setter
def host_setter(self, host):
self._host = host
self.update_urls()
def get_field_cache(self, cache_type='es'):
"""Return a list of fields' mappings"""
if cache_type == 'kibana':
try:
search_results = urlopen(self.get_url).read().decode('utf-8')
except HTTPError: # as e:
# self.pr_err("get_field_cache(kibana), HTTPError: %s" % e)
return []
index_pattern = json.loads(search_results)
# Results look like: {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":6,"found":true,"_source":{"title":"aaa*","fields":"<what we want>"}} # noqa
fields_str = index_pattern['_source']['fields']
return json.loads(fields_str)
elif cache_type == 'es' or cache_type.startswith('elastic'):
search_results = urlopen(self.es_get_url).read().decode('utf-8')
es_mappings = json.loads(search_results)
# Results look like: {"<index_name>":{"mappings":{"<doc_type>":{"<field_name>":{"full_name":"<field_name>","mapping":{"<sub-field_name>":{"type":"date","index_name":"<sub-field_name>","boost":1.0,"index":"not_analyzed","store":false,"doc_values":false,"term_vector":"no","norms":{"enabled":false},"index_options":"docs","index_analyzer":"_date/16","search_analyzer":"_date/max","postings_format":"default","doc_values_format":"default","similarity":"default","fielddata":{},"ignore_malformed":false,"coerce":true,"precision_step":16,"format":"dateOptionalTime","null_value":null,"include_in_all":false,"numeric_resolution":"milliseconds","locale":""}}}, # noqa
# now convert the mappings into the .kibana format
field_cache = []
for (index_name, val) in iteritems(es_mappings):
if index_name != self.index: # only get non-'.kibana' indices
# self.pr_dbg("index: %s" % index_name)
m_dict = es_mappings[index_name]['mappings']
# self.pr_dbg('m_dict %s' % m_dict)
mappings = self.get_index_mappings(m_dict)
# self.pr_dbg('mappings %s' % mappings)
field_cache.extend(mappings)
field_cache = self.dedup_field_cache(field_cache)
return field_cache
self.pr_err("Unknown cache type: %s" % cache_type)
return None
def dedup_field_cache(self, field_cache):
deduped = []
fields_found = {}
for field in field_cache:
name = field['name']
if name not in fields_found:
deduped.append(field)
fields_found[name] = field
elif fields_found[name] != field:
self.pr_dbg("Dup field doesn't match")
self.pr_dbg("1st found: %s" % fields_found[name])
self.pr_dbg(" Dup one: %s" % field)
# else ignore, pass
return deduped
def post_field_cache(self, field_cache):
"""Where field_cache is a list of fields' mappings"""
index_pattern = self.field_cache_to_index_pattern(field_cache)
# self.pr_dbg("request/post: %s" % index_pattern)
resp = requests.post(self.post_url, data=index_pattern).text
# resp = {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":1,"created":true} # noqa
resp = json.loads(resp)
return 0
# TODO detect failure (return 1)
def check_mapping(self, m):
"""Assert minimum set of fields in cache, does not validate contents"""
if 'name' not in m:
self.pr_dbg("Missing %s" % "name")
return False
# self.pr_dbg("Checking %s" % m['name'])
for x in ['analyzed', 'indexed', 'type', 'scripted', 'count']:
if x not in m or m[x] == "":
self.pr_dbg("Missing %s" % x)
self.pr_dbg("Full %s" % m)
return False
if 'doc_values' not in m or m['doc_values'] == "":
if not m['name'].startswith('_'):
self.pr_dbg("Missing %s" % "doc_values")
return False
m['doc_values'] = False
return True
def get_index_mappings(self, index):
"""Converts all index's doc_types to .kibana"""
fields_arr = []
for (key, val) in iteritems(index):
# self.pr_dbg("\tdoc_type: %s" % key)
doc_mapping = self.get_doc_type_mappings(index[key])
# self.pr_dbg("\tdoc_mapping: %s" % doc_mapping)
if doc_mapping is None:
return None
# keep adding to the fields array
fields_arr.extend(doc_mapping)
return fields_arr
def get_doc_type_mappings(self, doc_type):
"""Converts all doc_types' fields to .kibana"""
doc_fields_arr = []
found_score = False
for (key, val) in iteritems(doc_type):
# self.pr_dbg("\t\tfield: %s" % key)
# self.pr_dbg("\tval: %s" % val)
add_it = False
retdict = {}
# _ are system
if not key.startswith('_'):
if 'mapping' not in doc_type[key]:
self.pr_err("No mapping in doc_type[%s]" % key)
return None
if key in doc_type[key]['mapping']:
subkey_name = key
else:
subkey_name = re.sub('.*\.', '', key)
if subkey_name not in doc_type[key]['mapping']:
self.pr_err(
"Couldn't find subkey " +
"doc_type[%s]['mapping'][%s]" % (key, subkey_name))
return None
# self.pr_dbg("\t\tsubkey_name: %s" % subkey_name)
retdict = self.get_field_mappings(
doc_type[key]['mapping'][subkey_name])
add_it = True
# system mappings don't list a type,
# but kibana makes them all strings
if key in self.sys_mappings:
retdict['analyzed'] = False
retdict['indexed'] = False
if key == '_source':
retdict = self.get_field_mappings(
doc_type[key]['mapping'][key])
retdict['type'] = "_source"
elif key == '_score':
retdict['type'] = "number"
elif 'type' not in retdict:
retdict['type'] = "string"
add_it = True
if add_it:
retdict['name'] = key
retdict['count'] = 0 # always init to 0
retdict['scripted'] = False # I haven't observed a True yet
if not self.check_mapping(retdict):
self.pr_err("Error, invalid mapping")
return None
# the fields element is an escaped array of json
# make the array here, after all collected, then escape it
doc_fields_arr.append(retdict)
if not found_score:
doc_fields_arr.append(
{"name": "_score",
"type": "number",
"count": 0,
"scripted": False,
"indexed": False,
"analyzed": False,
"doc_values": False})
return doc_fields_arr
def get_field_mappings(self, field):
"""Converts ES field mappings to .kibana field mappings"""
retdict = {}
retdict['indexed'] = False
retdict['analyzed'] = False
for (key, val) in iteritems(field):
if key in self.mappings:
if (key == 'type' and
(val == "long" or
val == "integer" or
val == "double" or
val == "float")):
val = "number"
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
retdict[key] = val
if key == 'index' and val != "no":
retdict['indexed'] = True
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
if val == "analyzed":
retdict['analyzed'] = True
return retdict
def refresh_poll(self, period):
self.poll_another = True
while self.poll_another:
self.do_refresh()
self.pr_inf("Polling again in %s secs" % period)
try:
time.sleep(period)
except KeyboardInterrupt:
self.poll_another = False
def needs_refresh(self):
es_cache = self.get_field_cache('es')
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
return True
return False
def do_refresh(self, force=False):
es_cache = self.get_field_cache('es')
if force:
self.pr_inf("Forcing mapping update")
# no need to get kibana if we are forcing it
return self.post_field_cache(es_cache)
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
self.pr_inf("Mapping is incomplete, doing update")
return self.post_field_cache(es_cache)
self.pr_inf("Mapping is correct, no refresh needed")
return 0
def is_kibana_cache_incomplete(self, es_cache, k_cache):
"""Test if k_cache is incomplete
Assume k_cache is always correct, but could be missing new
fields that es_cache has
"""
# convert list into dict, with each item's ['name'] as key
k_dict = {}
for field in k_cache:
# self.pr_dbg("field: %s" % field)
k_dict[field['name']] = field
for ign_f in self.mappings_ignore:
k_dict[field['name']][ign_f] = 0
es_dict = {}
for field in es_cache:
es_dict[field['name']] = field
for ign_f in self.mappings_ignore:
es_dict[field['name']][ign_f] = 0
es_set = set(es_dict.keys())
k_set = set(k_dict.keys())
# reasons why kibana cache could be incomplete:
# k_dict is missing keys that are within es_dict
# We don't care if k has keys that es doesn't
# es {1,2} k {1,2,3}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {1,2}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {}; intersection {}; len(es-{}) 2
# es {1,2} k {1}; intersection {1}; len(es-{}) 1
# es {2,3} k {1}; intersection {}; len(es-{}) 2
# es {2,3} k {1,2}; intersection {2}; len(es-{}) 1
return len(es_set - k_set.intersection(es_set)) > 0
def list_to_compare_dict(self, list_form):
"""Convert list into a data structure we can query easier"""
compare_dict = {}
for field in list_form:
if field['name'] in compare_dict:
self.pr_dbg("List has duplicate field %s:\n%s" %
(field['name'], compare_dict[field['name']]))
if compare_dict[field['name']] != field:
self.pr_dbg("And values are different:\n%s" % field)
return None
compare_dict[field['name']] = field
for ign_f in self.mappings_ignore:
compare_dict[field['name']][ign_f] = 0
return compare_dict
def compare_field_caches(self, replica, original):
"""Verify original is subset of replica"""
if original is None:
original = []
if replica is None:
replica = []
self.pr_dbg("Comparing orig with %s fields to replica with %s fields" %
(len(original), len(replica)))
# convert list into dict, with each item's ['name'] as key
orig = self.list_to_compare_dict(original)
if orig is None:
self.pr_dbg("Original has duplicate fields")
return 1
repl = self.list_to_compare_dict(replica)
if repl is None:
self.pr_dbg("Replica has duplicate fields")
return 1
# search orig for each item in repl
# if any items in repl not within orig or vice versa, then complain
# make sure contents of each item match
orig_found = {}
for (key, field) in iteritems(repl):
field_name = field['name']
if field_name not in orig:
self.pr_dbg("Replica has field not found in orig %s: %s" %
(field_name, field))
return 1
orig_found[field_name] = True
if orig[field_name] != field:
self.pr_dbg("Field in replica doesn't match orig:")
self.pr_dbg("orig:%s\nrepl:%s" % (orig[field_name], field))
return 1
unfound = set(orig_found.keys()) - set(repl.keys())
if len(unfound) > 0:
self.pr_dbg("Orig contains fields that were not in replica")
self.pr_dbg('%s' % unfound)
return 1
# We don't care about case when replica has more fields than orig
# unfound = set(repl.keys()) - set(orig_found.keys())
# if len(unfound) > 0:
# self.pr_dbg("Replica contains fields that were not in orig")
# self.pr_dbg('%s' % unfound)
# return 1
self.pr_dbg("Original matches replica")
return 0
def test_cache(self):
"""Test if this code is equiv to Kibana.refreshFields()
Within Kibana GUI click refreshFields, then either:
* self.test_cache()
* vagrant ssh -c "python -c \"
import kibana; kibana.DotKibana('aaa*').mapping.test_cache()\""
"""
es_cache = self.get_field_cache(cache_type='es')
# self.pr_dbg(json.dumps(es_cache))
kibana_cache = self.get_field_cache(cache_type='kibana')
# self.pr_dbg(json.dumps(kibana_cache))
return self.compare_field_caches(es_cache, kibana_cache)
|
rfarley3/Kibana
|
kibana/mapping.py
|
KibanaMapping.check_mapping
|
python
|
def check_mapping(self, m):
if 'name' not in m:
self.pr_dbg("Missing %s" % "name")
return False
# self.pr_dbg("Checking %s" % m['name'])
for x in ['analyzed', 'indexed', 'type', 'scripted', 'count']:
if x not in m or m[x] == "":
self.pr_dbg("Missing %s" % x)
self.pr_dbg("Full %s" % m)
return False
if 'doc_values' not in m or m['doc_values'] == "":
if not m['name'].startswith('_'):
self.pr_dbg("Missing %s" % "doc_values")
return False
m['doc_values'] = False
return True
|
Assert minimum set of fields in cache, does not validate contents
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/mapping.py#L163-L179
|
[
"def pr_dbg(self, msg):\n if self.debug:\n print('[DBG] Mapping %s' % msg)\n"
] |
class KibanaMapping():
def __init__(self, index, index_pattern, host, debug=False):
self.index = index
self._index_pattern = index_pattern
self._host = host
self.update_urls()
# from the js possible mappings are:
# { type, indexed, analyzed, doc_values }
# but indexed and analyzed are .kibana specific,
# determined by the value within ES's 'index', which could be:
# { analyzed, no, not_analyzed }
self.mappings = ['type', 'doc_values']
# ignore system fields:
self.sys_mappings = ['_source', '_index', '_type', '_id']
# .kibana has some fields to ignore too:
self.mappings_ignore = ['count']
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Mapping %s' % msg)
def pr_inf(self, msg):
print('[INF] Mapping %s' % msg)
def pr_err(self, msg):
print('[ERR] Mapping %s' % msg)
def update_urls(self):
# 'http://localhost:5601/elasticsearch/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
# 'http://localhost:9200/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
self.es_get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self._index_pattern +
'_mapping/field/' +
'*?ignore_unavailable=false&' +
'allow_no_indices=false&' +
'include_defaults=true')
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*'
self.post_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s' % self._index_pattern)
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/_search/?id=aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*/'
self.get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s/' % self._index_pattern)
@property
def index_pattern(self):
return self._index_pattern
@index_pattern.setter
def index_pattern_setter(self, index_pattern):
self._index_pattern = index_pattern
self.update_urls()
@property
def host(self):
return self._host
@host.setter
def host_setter(self, host):
self._host = host
self.update_urls()
def get_field_cache(self, cache_type='es'):
"""Return a list of fields' mappings"""
if cache_type == 'kibana':
try:
search_results = urlopen(self.get_url).read().decode('utf-8')
except HTTPError: # as e:
# self.pr_err("get_field_cache(kibana), HTTPError: %s" % e)
return []
index_pattern = json.loads(search_results)
# Results look like: {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":6,"found":true,"_source":{"title":"aaa*","fields":"<what we want>"}} # noqa
fields_str = index_pattern['_source']['fields']
return json.loads(fields_str)
elif cache_type == 'es' or cache_type.startswith('elastic'):
search_results = urlopen(self.es_get_url).read().decode('utf-8')
es_mappings = json.loads(search_results)
# Results look like: {"<index_name>":{"mappings":{"<doc_type>":{"<field_name>":{"full_name":"<field_name>","mapping":{"<sub-field_name>":{"type":"date","index_name":"<sub-field_name>","boost":1.0,"index":"not_analyzed","store":false,"doc_values":false,"term_vector":"no","norms":{"enabled":false},"index_options":"docs","index_analyzer":"_date/16","search_analyzer":"_date/max","postings_format":"default","doc_values_format":"default","similarity":"default","fielddata":{},"ignore_malformed":false,"coerce":true,"precision_step":16,"format":"dateOptionalTime","null_value":null,"include_in_all":false,"numeric_resolution":"milliseconds","locale":""}}}, # noqa
# now convert the mappings into the .kibana format
field_cache = []
for (index_name, val) in iteritems(es_mappings):
if index_name != self.index: # only get non-'.kibana' indices
# self.pr_dbg("index: %s" % index_name)
m_dict = es_mappings[index_name]['mappings']
# self.pr_dbg('m_dict %s' % m_dict)
mappings = self.get_index_mappings(m_dict)
# self.pr_dbg('mappings %s' % mappings)
field_cache.extend(mappings)
field_cache = self.dedup_field_cache(field_cache)
return field_cache
self.pr_err("Unknown cache type: %s" % cache_type)
return None
def dedup_field_cache(self, field_cache):
deduped = []
fields_found = {}
for field in field_cache:
name = field['name']
if name not in fields_found:
deduped.append(field)
fields_found[name] = field
elif fields_found[name] != field:
self.pr_dbg("Dup field doesn't match")
self.pr_dbg("1st found: %s" % fields_found[name])
self.pr_dbg(" Dup one: %s" % field)
# else ignore, pass
return deduped
def post_field_cache(self, field_cache):
"""Where field_cache is a list of fields' mappings"""
index_pattern = self.field_cache_to_index_pattern(field_cache)
# self.pr_dbg("request/post: %s" % index_pattern)
resp = requests.post(self.post_url, data=index_pattern).text
# resp = {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":1,"created":true} # noqa
resp = json.loads(resp)
return 0
# TODO detect failure (return 1)
def field_cache_to_index_pattern(self, field_cache):
"""Return a .kibana index-pattern doc_type"""
mapping_dict = {}
mapping_dict['customFormats'] = "{}"
mapping_dict['title'] = self.index_pattern
# now post the data into .kibana
mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':'))
# in order to post, we need to create the post string
mapping_str = json.dumps(mapping_dict, separators=(',', ':'))
return mapping_str
def get_index_mappings(self, index):
"""Converts all index's doc_types to .kibana"""
fields_arr = []
for (key, val) in iteritems(index):
# self.pr_dbg("\tdoc_type: %s" % key)
doc_mapping = self.get_doc_type_mappings(index[key])
# self.pr_dbg("\tdoc_mapping: %s" % doc_mapping)
if doc_mapping is None:
return None
# keep adding to the fields array
fields_arr.extend(doc_mapping)
return fields_arr
def get_doc_type_mappings(self, doc_type):
"""Converts all doc_types' fields to .kibana"""
doc_fields_arr = []
found_score = False
for (key, val) in iteritems(doc_type):
# self.pr_dbg("\t\tfield: %s" % key)
# self.pr_dbg("\tval: %s" % val)
add_it = False
retdict = {}
# _ are system
if not key.startswith('_'):
if 'mapping' not in doc_type[key]:
self.pr_err("No mapping in doc_type[%s]" % key)
return None
if key in doc_type[key]['mapping']:
subkey_name = key
else:
subkey_name = re.sub('.*\.', '', key)
if subkey_name not in doc_type[key]['mapping']:
self.pr_err(
"Couldn't find subkey " +
"doc_type[%s]['mapping'][%s]" % (key, subkey_name))
return None
# self.pr_dbg("\t\tsubkey_name: %s" % subkey_name)
retdict = self.get_field_mappings(
doc_type[key]['mapping'][subkey_name])
add_it = True
# system mappings don't list a type,
# but kibana makes them all strings
if key in self.sys_mappings:
retdict['analyzed'] = False
retdict['indexed'] = False
if key == '_source':
retdict = self.get_field_mappings(
doc_type[key]['mapping'][key])
retdict['type'] = "_source"
elif key == '_score':
retdict['type'] = "number"
elif 'type' not in retdict:
retdict['type'] = "string"
add_it = True
if add_it:
retdict['name'] = key
retdict['count'] = 0 # always init to 0
retdict['scripted'] = False # I haven't observed a True yet
if not self.check_mapping(retdict):
self.pr_err("Error, invalid mapping")
return None
# the fields element is an escaped array of json
# make the array here, after all collected, then escape it
doc_fields_arr.append(retdict)
if not found_score:
doc_fields_arr.append(
{"name": "_score",
"type": "number",
"count": 0,
"scripted": False,
"indexed": False,
"analyzed": False,
"doc_values": False})
return doc_fields_arr
def get_field_mappings(self, field):
"""Converts ES field mappings to .kibana field mappings"""
retdict = {}
retdict['indexed'] = False
retdict['analyzed'] = False
for (key, val) in iteritems(field):
if key in self.mappings:
if (key == 'type' and
(val == "long" or
val == "integer" or
val == "double" or
val == "float")):
val = "number"
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
retdict[key] = val
if key == 'index' and val != "no":
retdict['indexed'] = True
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
if val == "analyzed":
retdict['analyzed'] = True
return retdict
def refresh_poll(self, period):
self.poll_another = True
while self.poll_another:
self.do_refresh()
self.pr_inf("Polling again in %s secs" % period)
try:
time.sleep(period)
except KeyboardInterrupt:
self.poll_another = False
def needs_refresh(self):
es_cache = self.get_field_cache('es')
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
return True
return False
def do_refresh(self, force=False):
es_cache = self.get_field_cache('es')
if force:
self.pr_inf("Forcing mapping update")
# no need to get kibana if we are forcing it
return self.post_field_cache(es_cache)
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
self.pr_inf("Mapping is incomplete, doing update")
return self.post_field_cache(es_cache)
self.pr_inf("Mapping is correct, no refresh needed")
return 0
def is_kibana_cache_incomplete(self, es_cache, k_cache):
"""Test if k_cache is incomplete
Assume k_cache is always correct, but could be missing new
fields that es_cache has
"""
# convert list into dict, with each item's ['name'] as key
k_dict = {}
for field in k_cache:
# self.pr_dbg("field: %s" % field)
k_dict[field['name']] = field
for ign_f in self.mappings_ignore:
k_dict[field['name']][ign_f] = 0
es_dict = {}
for field in es_cache:
es_dict[field['name']] = field
for ign_f in self.mappings_ignore:
es_dict[field['name']][ign_f] = 0
es_set = set(es_dict.keys())
k_set = set(k_dict.keys())
# reasons why kibana cache could be incomplete:
# k_dict is missing keys that are within es_dict
# We don't care if k has keys that es doesn't
# es {1,2} k {1,2,3}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {1,2}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {}; intersection {}; len(es-{}) 2
# es {1,2} k {1}; intersection {1}; len(es-{}) 1
# es {2,3} k {1}; intersection {}; len(es-{}) 2
# es {2,3} k {1,2}; intersection {2}; len(es-{}) 1
return len(es_set - k_set.intersection(es_set)) > 0
def list_to_compare_dict(self, list_form):
"""Convert list into a data structure we can query easier"""
compare_dict = {}
for field in list_form:
if field['name'] in compare_dict:
self.pr_dbg("List has duplicate field %s:\n%s" %
(field['name'], compare_dict[field['name']]))
if compare_dict[field['name']] != field:
self.pr_dbg("And values are different:\n%s" % field)
return None
compare_dict[field['name']] = field
for ign_f in self.mappings_ignore:
compare_dict[field['name']][ign_f] = 0
return compare_dict
def compare_field_caches(self, replica, original):
"""Verify original is subset of replica"""
if original is None:
original = []
if replica is None:
replica = []
self.pr_dbg("Comparing orig with %s fields to replica with %s fields" %
(len(original), len(replica)))
# convert list into dict, with each item's ['name'] as key
orig = self.list_to_compare_dict(original)
if orig is None:
self.pr_dbg("Original has duplicate fields")
return 1
repl = self.list_to_compare_dict(replica)
if repl is None:
self.pr_dbg("Replica has duplicate fields")
return 1
# search orig for each item in repl
# if any items in repl not within orig or vice versa, then complain
# make sure contents of each item match
orig_found = {}
for (key, field) in iteritems(repl):
field_name = field['name']
if field_name not in orig:
self.pr_dbg("Replica has field not found in orig %s: %s" %
(field_name, field))
return 1
orig_found[field_name] = True
if orig[field_name] != field:
self.pr_dbg("Field in replica doesn't match orig:")
self.pr_dbg("orig:%s\nrepl:%s" % (orig[field_name], field))
return 1
unfound = set(orig_found.keys()) - set(repl.keys())
if len(unfound) > 0:
self.pr_dbg("Orig contains fields that were not in replica")
self.pr_dbg('%s' % unfound)
return 1
# We don't care about case when replica has more fields than orig
# unfound = set(repl.keys()) - set(orig_found.keys())
# if len(unfound) > 0:
# self.pr_dbg("Replica contains fields that were not in orig")
# self.pr_dbg('%s' % unfound)
# return 1
self.pr_dbg("Original matches replica")
return 0
def test_cache(self):
"""Test if this code is equiv to Kibana.refreshFields()
Within Kibana GUI click refreshFields, then either:
* self.test_cache()
* vagrant ssh -c "python -c \"
import kibana; kibana.DotKibana('aaa*').mapping.test_cache()\""
"""
es_cache = self.get_field_cache(cache_type='es')
# self.pr_dbg(json.dumps(es_cache))
kibana_cache = self.get_field_cache(cache_type='kibana')
# self.pr_dbg(json.dumps(kibana_cache))
return self.compare_field_caches(es_cache, kibana_cache)
|
rfarley3/Kibana
|
kibana/mapping.py
|
KibanaMapping.get_index_mappings
|
python
|
def get_index_mappings(self, index):
fields_arr = []
for (key, val) in iteritems(index):
# self.pr_dbg("\tdoc_type: %s" % key)
doc_mapping = self.get_doc_type_mappings(index[key])
# self.pr_dbg("\tdoc_mapping: %s" % doc_mapping)
if doc_mapping is None:
return None
# keep adding to the fields array
fields_arr.extend(doc_mapping)
return fields_arr
|
Converts all index's doc_types to .kibana
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/mapping.py#L181-L192
|
[
"def iteritems(d):\n if PY3:\n return d.items()\n else:\n return d.iteritems()\n",
"def get_doc_type_mappings(self, doc_type):\n \"\"\"Converts all doc_types' fields to .kibana\"\"\"\n doc_fields_arr = []\n found_score = False\n for (key, val) in iteritems(doc_type):\n # self.pr_dbg(\"\\t\\tfield: %s\" % key)\n # self.pr_dbg(\"\\tval: %s\" % val)\n add_it = False\n retdict = {}\n # _ are system\n if not key.startswith('_'):\n if 'mapping' not in doc_type[key]:\n self.pr_err(\"No mapping in doc_type[%s]\" % key)\n return None\n if key in doc_type[key]['mapping']:\n subkey_name = key\n else:\n subkey_name = re.sub('.*\\.', '', key)\n if subkey_name not in doc_type[key]['mapping']:\n self.pr_err(\n \"Couldn't find subkey \" +\n \"doc_type[%s]['mapping'][%s]\" % (key, subkey_name))\n return None\n # self.pr_dbg(\"\\t\\tsubkey_name: %s\" % subkey_name)\n retdict = self.get_field_mappings(\n doc_type[key]['mapping'][subkey_name])\n add_it = True\n # system mappings don't list a type,\n # but kibana makes them all strings\n if key in self.sys_mappings:\n retdict['analyzed'] = False\n retdict['indexed'] = False\n if key == '_source':\n retdict = self.get_field_mappings(\n doc_type[key]['mapping'][key])\n retdict['type'] = \"_source\"\n elif key == '_score':\n retdict['type'] = \"number\"\n elif 'type' not in retdict:\n retdict['type'] = \"string\"\n add_it = True\n if add_it:\n retdict['name'] = key\n retdict['count'] = 0 # always init to 0\n retdict['scripted'] = False # I haven't observed a True yet\n if not self.check_mapping(retdict):\n self.pr_err(\"Error, invalid mapping\")\n return None\n # the fields element is an escaped array of json\n # make the array here, after all collected, then escape it\n doc_fields_arr.append(retdict)\n if not found_score:\n doc_fields_arr.append(\n {\"name\": \"_score\",\n \"type\": \"number\",\n \"count\": 0,\n \"scripted\": False,\n \"indexed\": False,\n \"analyzed\": False,\n \"doc_values\": False})\n return doc_fields_arr\n"
] |
class KibanaMapping():
def __init__(self, index, index_pattern, host, debug=False):
self.index = index
self._index_pattern = index_pattern
self._host = host
self.update_urls()
# from the js possible mappings are:
# { type, indexed, analyzed, doc_values }
# but indexed and analyzed are .kibana specific,
# determined by the value within ES's 'index', which could be:
# { analyzed, no, not_analyzed }
self.mappings = ['type', 'doc_values']
# ignore system fields:
self.sys_mappings = ['_source', '_index', '_type', '_id']
# .kibana has some fields to ignore too:
self.mappings_ignore = ['count']
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Mapping %s' % msg)
def pr_inf(self, msg):
print('[INF] Mapping %s' % msg)
def pr_err(self, msg):
print('[ERR] Mapping %s' % msg)
def update_urls(self):
# 'http://localhost:5601/elasticsearch/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
# 'http://localhost:9200/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
self.es_get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self._index_pattern +
'_mapping/field/' +
'*?ignore_unavailable=false&' +
'allow_no_indices=false&' +
'include_defaults=true')
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*'
self.post_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s' % self._index_pattern)
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/_search/?id=aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*/'
self.get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s/' % self._index_pattern)
@property
def index_pattern(self):
return self._index_pattern
@index_pattern.setter
def index_pattern_setter(self, index_pattern):
self._index_pattern = index_pattern
self.update_urls()
@property
def host(self):
return self._host
@host.setter
def host_setter(self, host):
self._host = host
self.update_urls()
def get_field_cache(self, cache_type='es'):
"""Return a list of fields' mappings"""
if cache_type == 'kibana':
try:
search_results = urlopen(self.get_url).read().decode('utf-8')
except HTTPError: # as e:
# self.pr_err("get_field_cache(kibana), HTTPError: %s" % e)
return []
index_pattern = json.loads(search_results)
# Results look like: {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":6,"found":true,"_source":{"title":"aaa*","fields":"<what we want>"}} # noqa
fields_str = index_pattern['_source']['fields']
return json.loads(fields_str)
elif cache_type == 'es' or cache_type.startswith('elastic'):
search_results = urlopen(self.es_get_url).read().decode('utf-8')
es_mappings = json.loads(search_results)
# Results look like: {"<index_name>":{"mappings":{"<doc_type>":{"<field_name>":{"full_name":"<field_name>","mapping":{"<sub-field_name>":{"type":"date","index_name":"<sub-field_name>","boost":1.0,"index":"not_analyzed","store":false,"doc_values":false,"term_vector":"no","norms":{"enabled":false},"index_options":"docs","index_analyzer":"_date/16","search_analyzer":"_date/max","postings_format":"default","doc_values_format":"default","similarity":"default","fielddata":{},"ignore_malformed":false,"coerce":true,"precision_step":16,"format":"dateOptionalTime","null_value":null,"include_in_all":false,"numeric_resolution":"milliseconds","locale":""}}}, # noqa
# now convert the mappings into the .kibana format
field_cache = []
for (index_name, val) in iteritems(es_mappings):
if index_name != self.index: # only get non-'.kibana' indices
# self.pr_dbg("index: %s" % index_name)
m_dict = es_mappings[index_name]['mappings']
# self.pr_dbg('m_dict %s' % m_dict)
mappings = self.get_index_mappings(m_dict)
# self.pr_dbg('mappings %s' % mappings)
field_cache.extend(mappings)
field_cache = self.dedup_field_cache(field_cache)
return field_cache
self.pr_err("Unknown cache type: %s" % cache_type)
return None
def dedup_field_cache(self, field_cache):
deduped = []
fields_found = {}
for field in field_cache:
name = field['name']
if name not in fields_found:
deduped.append(field)
fields_found[name] = field
elif fields_found[name] != field:
self.pr_dbg("Dup field doesn't match")
self.pr_dbg("1st found: %s" % fields_found[name])
self.pr_dbg(" Dup one: %s" % field)
# else ignore, pass
return deduped
def post_field_cache(self, field_cache):
"""Where field_cache is a list of fields' mappings"""
index_pattern = self.field_cache_to_index_pattern(field_cache)
# self.pr_dbg("request/post: %s" % index_pattern)
resp = requests.post(self.post_url, data=index_pattern).text
# resp = {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":1,"created":true} # noqa
resp = json.loads(resp)
return 0
# TODO detect failure (return 1)
def field_cache_to_index_pattern(self, field_cache):
"""Return a .kibana index-pattern doc_type"""
mapping_dict = {}
mapping_dict['customFormats'] = "{}"
mapping_dict['title'] = self.index_pattern
# now post the data into .kibana
mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':'))
# in order to post, we need to create the post string
mapping_str = json.dumps(mapping_dict, separators=(',', ':'))
return mapping_str
def check_mapping(self, m):
"""Assert minimum set of fields in cache, does not validate contents"""
if 'name' not in m:
self.pr_dbg("Missing %s" % "name")
return False
# self.pr_dbg("Checking %s" % m['name'])
for x in ['analyzed', 'indexed', 'type', 'scripted', 'count']:
if x not in m or m[x] == "":
self.pr_dbg("Missing %s" % x)
self.pr_dbg("Full %s" % m)
return False
if 'doc_values' not in m or m['doc_values'] == "":
if not m['name'].startswith('_'):
self.pr_dbg("Missing %s" % "doc_values")
return False
m['doc_values'] = False
return True
def get_doc_type_mappings(self, doc_type):
"""Converts all doc_types' fields to .kibana"""
doc_fields_arr = []
found_score = False
for (key, val) in iteritems(doc_type):
# self.pr_dbg("\t\tfield: %s" % key)
# self.pr_dbg("\tval: %s" % val)
add_it = False
retdict = {}
# _ are system
if not key.startswith('_'):
if 'mapping' not in doc_type[key]:
self.pr_err("No mapping in doc_type[%s]" % key)
return None
if key in doc_type[key]['mapping']:
subkey_name = key
else:
subkey_name = re.sub('.*\.', '', key)
if subkey_name not in doc_type[key]['mapping']:
self.pr_err(
"Couldn't find subkey " +
"doc_type[%s]['mapping'][%s]" % (key, subkey_name))
return None
# self.pr_dbg("\t\tsubkey_name: %s" % subkey_name)
retdict = self.get_field_mappings(
doc_type[key]['mapping'][subkey_name])
add_it = True
# system mappings don't list a type,
# but kibana makes them all strings
if key in self.sys_mappings:
retdict['analyzed'] = False
retdict['indexed'] = False
if key == '_source':
retdict = self.get_field_mappings(
doc_type[key]['mapping'][key])
retdict['type'] = "_source"
elif key == '_score':
retdict['type'] = "number"
elif 'type' not in retdict:
retdict['type'] = "string"
add_it = True
if add_it:
retdict['name'] = key
retdict['count'] = 0 # always init to 0
retdict['scripted'] = False # I haven't observed a True yet
if not self.check_mapping(retdict):
self.pr_err("Error, invalid mapping")
return None
# the fields element is an escaped array of json
# make the array here, after all collected, then escape it
doc_fields_arr.append(retdict)
if not found_score:
doc_fields_arr.append(
{"name": "_score",
"type": "number",
"count": 0,
"scripted": False,
"indexed": False,
"analyzed": False,
"doc_values": False})
return doc_fields_arr
def get_field_mappings(self, field):
"""Converts ES field mappings to .kibana field mappings"""
retdict = {}
retdict['indexed'] = False
retdict['analyzed'] = False
for (key, val) in iteritems(field):
if key in self.mappings:
if (key == 'type' and
(val == "long" or
val == "integer" or
val == "double" or
val == "float")):
val = "number"
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
retdict[key] = val
if key == 'index' and val != "no":
retdict['indexed'] = True
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
if val == "analyzed":
retdict['analyzed'] = True
return retdict
def refresh_poll(self, period):
self.poll_another = True
while self.poll_another:
self.do_refresh()
self.pr_inf("Polling again in %s secs" % period)
try:
time.sleep(period)
except KeyboardInterrupt:
self.poll_another = False
def needs_refresh(self):
es_cache = self.get_field_cache('es')
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
return True
return False
def do_refresh(self, force=False):
es_cache = self.get_field_cache('es')
if force:
self.pr_inf("Forcing mapping update")
# no need to get kibana if we are forcing it
return self.post_field_cache(es_cache)
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
self.pr_inf("Mapping is incomplete, doing update")
return self.post_field_cache(es_cache)
self.pr_inf("Mapping is correct, no refresh needed")
return 0
def is_kibana_cache_incomplete(self, es_cache, k_cache):
"""Test if k_cache is incomplete
Assume k_cache is always correct, but could be missing new
fields that es_cache has
"""
# convert list into dict, with each item's ['name'] as key
k_dict = {}
for field in k_cache:
# self.pr_dbg("field: %s" % field)
k_dict[field['name']] = field
for ign_f in self.mappings_ignore:
k_dict[field['name']][ign_f] = 0
es_dict = {}
for field in es_cache:
es_dict[field['name']] = field
for ign_f in self.mappings_ignore:
es_dict[field['name']][ign_f] = 0
es_set = set(es_dict.keys())
k_set = set(k_dict.keys())
# reasons why kibana cache could be incomplete:
# k_dict is missing keys that are within es_dict
# We don't care if k has keys that es doesn't
# es {1,2} k {1,2,3}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {1,2}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {}; intersection {}; len(es-{}) 2
# es {1,2} k {1}; intersection {1}; len(es-{}) 1
# es {2,3} k {1}; intersection {}; len(es-{}) 2
# es {2,3} k {1,2}; intersection {2}; len(es-{}) 1
return len(es_set - k_set.intersection(es_set)) > 0
def list_to_compare_dict(self, list_form):
"""Convert list into a data structure we can query easier"""
compare_dict = {}
for field in list_form:
if field['name'] in compare_dict:
self.pr_dbg("List has duplicate field %s:\n%s" %
(field['name'], compare_dict[field['name']]))
if compare_dict[field['name']] != field:
self.pr_dbg("And values are different:\n%s" % field)
return None
compare_dict[field['name']] = field
for ign_f in self.mappings_ignore:
compare_dict[field['name']][ign_f] = 0
return compare_dict
def compare_field_caches(self, replica, original):
"""Verify original is subset of replica"""
if original is None:
original = []
if replica is None:
replica = []
self.pr_dbg("Comparing orig with %s fields to replica with %s fields" %
(len(original), len(replica)))
# convert list into dict, with each item's ['name'] as key
orig = self.list_to_compare_dict(original)
if orig is None:
self.pr_dbg("Original has duplicate fields")
return 1
repl = self.list_to_compare_dict(replica)
if repl is None:
self.pr_dbg("Replica has duplicate fields")
return 1
# search orig for each item in repl
# if any items in repl not within orig or vice versa, then complain
# make sure contents of each item match
orig_found = {}
for (key, field) in iteritems(repl):
field_name = field['name']
if field_name not in orig:
self.pr_dbg("Replica has field not found in orig %s: %s" %
(field_name, field))
return 1
orig_found[field_name] = True
if orig[field_name] != field:
self.pr_dbg("Field in replica doesn't match orig:")
self.pr_dbg("orig:%s\nrepl:%s" % (orig[field_name], field))
return 1
unfound = set(orig_found.keys()) - set(repl.keys())
if len(unfound) > 0:
self.pr_dbg("Orig contains fields that were not in replica")
self.pr_dbg('%s' % unfound)
return 1
# We don't care about case when replica has more fields than orig
# unfound = set(repl.keys()) - set(orig_found.keys())
# if len(unfound) > 0:
# self.pr_dbg("Replica contains fields that were not in orig")
# self.pr_dbg('%s' % unfound)
# return 1
self.pr_dbg("Original matches replica")
return 0
def test_cache(self):
"""Test if this code is equiv to Kibana.refreshFields()
Within Kibana GUI click refreshFields, then either:
* self.test_cache()
* vagrant ssh -c "python -c \"
import kibana; kibana.DotKibana('aaa*').mapping.test_cache()\""
"""
es_cache = self.get_field_cache(cache_type='es')
# self.pr_dbg(json.dumps(es_cache))
kibana_cache = self.get_field_cache(cache_type='kibana')
# self.pr_dbg(json.dumps(kibana_cache))
return self.compare_field_caches(es_cache, kibana_cache)
|
rfarley3/Kibana
|
kibana/mapping.py
|
KibanaMapping.get_doc_type_mappings
|
python
|
def get_doc_type_mappings(self, doc_type):
doc_fields_arr = []
found_score = False
for (key, val) in iteritems(doc_type):
# self.pr_dbg("\t\tfield: %s" % key)
# self.pr_dbg("\tval: %s" % val)
add_it = False
retdict = {}
# _ are system
if not key.startswith('_'):
if 'mapping' not in doc_type[key]:
self.pr_err("No mapping in doc_type[%s]" % key)
return None
if key in doc_type[key]['mapping']:
subkey_name = key
else:
subkey_name = re.sub('.*\.', '', key)
if subkey_name not in doc_type[key]['mapping']:
self.pr_err(
"Couldn't find subkey " +
"doc_type[%s]['mapping'][%s]" % (key, subkey_name))
return None
# self.pr_dbg("\t\tsubkey_name: %s" % subkey_name)
retdict = self.get_field_mappings(
doc_type[key]['mapping'][subkey_name])
add_it = True
# system mappings don't list a type,
# but kibana makes them all strings
if key in self.sys_mappings:
retdict['analyzed'] = False
retdict['indexed'] = False
if key == '_source':
retdict = self.get_field_mappings(
doc_type[key]['mapping'][key])
retdict['type'] = "_source"
elif key == '_score':
retdict['type'] = "number"
elif 'type' not in retdict:
retdict['type'] = "string"
add_it = True
if add_it:
retdict['name'] = key
retdict['count'] = 0 # always init to 0
retdict['scripted'] = False # I haven't observed a True yet
if not self.check_mapping(retdict):
self.pr_err("Error, invalid mapping")
return None
# the fields element is an escaped array of json
# make the array here, after all collected, then escape it
doc_fields_arr.append(retdict)
if not found_score:
doc_fields_arr.append(
{"name": "_score",
"type": "number",
"count": 0,
"scripted": False,
"indexed": False,
"analyzed": False,
"doc_values": False})
return doc_fields_arr
|
Converts all doc_types' fields to .kibana
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/mapping.py#L194-L254
|
[
"def iteritems(d):\n if PY3:\n return d.items()\n else:\n return d.iteritems()\n",
"def pr_err(self, msg):\n print('[ERR] Mapping %s' % msg)\n",
"def check_mapping(self, m):\n \"\"\"Assert minimum set of fields in cache, does not validate contents\"\"\"\n if 'name' not in m:\n self.pr_dbg(\"Missing %s\" % \"name\")\n return False\n # self.pr_dbg(\"Checking %s\" % m['name'])\n for x in ['analyzed', 'indexed', 'type', 'scripted', 'count']:\n if x not in m or m[x] == \"\":\n self.pr_dbg(\"Missing %s\" % x)\n self.pr_dbg(\"Full %s\" % m)\n return False\n if 'doc_values' not in m or m['doc_values'] == \"\":\n if not m['name'].startswith('_'):\n self.pr_dbg(\"Missing %s\" % \"doc_values\")\n return False\n m['doc_values'] = False\n return True\n",
"def get_field_mappings(self, field):\n \"\"\"Converts ES field mappings to .kibana field mappings\"\"\"\n retdict = {}\n retdict['indexed'] = False\n retdict['analyzed'] = False\n for (key, val) in iteritems(field):\n if key in self.mappings:\n if (key == 'type' and\n (val == \"long\" or\n val == \"integer\" or\n val == \"double\" or\n val == \"float\")):\n val = \"number\"\n # self.pr_dbg(\"\\t\\t\\tkey: %s\" % key)\n # self.pr_dbg(\"\\t\\t\\t\\tval: %s\" % val)\n retdict[key] = val\n if key == 'index' and val != \"no\":\n retdict['indexed'] = True\n # self.pr_dbg(\"\\t\\t\\tkey: %s\" % key)\n # self.pr_dbg(\"\\t\\t\\t\\tval: %s\" % val)\n if val == \"analyzed\":\n retdict['analyzed'] = True\n return retdict\n"
] |
class KibanaMapping():
def __init__(self, index, index_pattern, host, debug=False):
self.index = index
self._index_pattern = index_pattern
self._host = host
self.update_urls()
# from the js possible mappings are:
# { type, indexed, analyzed, doc_values }
# but indexed and analyzed are .kibana specific,
# determined by the value within ES's 'index', which could be:
# { analyzed, no, not_analyzed }
self.mappings = ['type', 'doc_values']
# ignore system fields:
self.sys_mappings = ['_source', '_index', '_type', '_id']
# .kibana has some fields to ignore too:
self.mappings_ignore = ['count']
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Mapping %s' % msg)
def pr_inf(self, msg):
print('[INF] Mapping %s' % msg)
def pr_err(self, msg):
print('[ERR] Mapping %s' % msg)
def update_urls(self):
# 'http://localhost:5601/elasticsearch/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
# 'http://localhost:9200/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
self.es_get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self._index_pattern +
'_mapping/field/' +
'*?ignore_unavailable=false&' +
'allow_no_indices=false&' +
'include_defaults=true')
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*'
self.post_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s' % self._index_pattern)
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/_search/?id=aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*/'
self.get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s/' % self._index_pattern)
@property
def index_pattern(self):
return self._index_pattern
@index_pattern.setter
def index_pattern_setter(self, index_pattern):
self._index_pattern = index_pattern
self.update_urls()
@property
def host(self):
return self._host
@host.setter
def host_setter(self, host):
self._host = host
self.update_urls()
def get_field_cache(self, cache_type='es'):
"""Return a list of fields' mappings"""
if cache_type == 'kibana':
try:
search_results = urlopen(self.get_url).read().decode('utf-8')
except HTTPError: # as e:
# self.pr_err("get_field_cache(kibana), HTTPError: %s" % e)
return []
index_pattern = json.loads(search_results)
# Results look like: {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":6,"found":true,"_source":{"title":"aaa*","fields":"<what we want>"}} # noqa
fields_str = index_pattern['_source']['fields']
return json.loads(fields_str)
elif cache_type == 'es' or cache_type.startswith('elastic'):
search_results = urlopen(self.es_get_url).read().decode('utf-8')
es_mappings = json.loads(search_results)
# Results look like: {"<index_name>":{"mappings":{"<doc_type>":{"<field_name>":{"full_name":"<field_name>","mapping":{"<sub-field_name>":{"type":"date","index_name":"<sub-field_name>","boost":1.0,"index":"not_analyzed","store":false,"doc_values":false,"term_vector":"no","norms":{"enabled":false},"index_options":"docs","index_analyzer":"_date/16","search_analyzer":"_date/max","postings_format":"default","doc_values_format":"default","similarity":"default","fielddata":{},"ignore_malformed":false,"coerce":true,"precision_step":16,"format":"dateOptionalTime","null_value":null,"include_in_all":false,"numeric_resolution":"milliseconds","locale":""}}}, # noqa
# now convert the mappings into the .kibana format
field_cache = []
for (index_name, val) in iteritems(es_mappings):
if index_name != self.index: # only get non-'.kibana' indices
# self.pr_dbg("index: %s" % index_name)
m_dict = es_mappings[index_name]['mappings']
# self.pr_dbg('m_dict %s' % m_dict)
mappings = self.get_index_mappings(m_dict)
# self.pr_dbg('mappings %s' % mappings)
field_cache.extend(mappings)
field_cache = self.dedup_field_cache(field_cache)
return field_cache
self.pr_err("Unknown cache type: %s" % cache_type)
return None
def dedup_field_cache(self, field_cache):
deduped = []
fields_found = {}
for field in field_cache:
name = field['name']
if name not in fields_found:
deduped.append(field)
fields_found[name] = field
elif fields_found[name] != field:
self.pr_dbg("Dup field doesn't match")
self.pr_dbg("1st found: %s" % fields_found[name])
self.pr_dbg(" Dup one: %s" % field)
# else ignore, pass
return deduped
def post_field_cache(self, field_cache):
"""Where field_cache is a list of fields' mappings"""
index_pattern = self.field_cache_to_index_pattern(field_cache)
# self.pr_dbg("request/post: %s" % index_pattern)
resp = requests.post(self.post_url, data=index_pattern).text
# resp = {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":1,"created":true} # noqa
resp = json.loads(resp)
return 0
# TODO detect failure (return 1)
def field_cache_to_index_pattern(self, field_cache):
"""Return a .kibana index-pattern doc_type"""
mapping_dict = {}
mapping_dict['customFormats'] = "{}"
mapping_dict['title'] = self.index_pattern
# now post the data into .kibana
mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':'))
# in order to post, we need to create the post string
mapping_str = json.dumps(mapping_dict, separators=(',', ':'))
return mapping_str
def check_mapping(self, m):
"""Assert minimum set of fields in cache, does not validate contents"""
if 'name' not in m:
self.pr_dbg("Missing %s" % "name")
return False
# self.pr_dbg("Checking %s" % m['name'])
for x in ['analyzed', 'indexed', 'type', 'scripted', 'count']:
if x not in m or m[x] == "":
self.pr_dbg("Missing %s" % x)
self.pr_dbg("Full %s" % m)
return False
if 'doc_values' not in m or m['doc_values'] == "":
if not m['name'].startswith('_'):
self.pr_dbg("Missing %s" % "doc_values")
return False
m['doc_values'] = False
return True
def get_index_mappings(self, index):
"""Converts all index's doc_types to .kibana"""
fields_arr = []
for (key, val) in iteritems(index):
# self.pr_dbg("\tdoc_type: %s" % key)
doc_mapping = self.get_doc_type_mappings(index[key])
# self.pr_dbg("\tdoc_mapping: %s" % doc_mapping)
if doc_mapping is None:
return None
# keep adding to the fields array
fields_arr.extend(doc_mapping)
return fields_arr
def get_field_mappings(self, field):
"""Converts ES field mappings to .kibana field mappings"""
retdict = {}
retdict['indexed'] = False
retdict['analyzed'] = False
for (key, val) in iteritems(field):
if key in self.mappings:
if (key == 'type' and
(val == "long" or
val == "integer" or
val == "double" or
val == "float")):
val = "number"
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
retdict[key] = val
if key == 'index' and val != "no":
retdict['indexed'] = True
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
if val == "analyzed":
retdict['analyzed'] = True
return retdict
def refresh_poll(self, period):
self.poll_another = True
while self.poll_another:
self.do_refresh()
self.pr_inf("Polling again in %s secs" % period)
try:
time.sleep(period)
except KeyboardInterrupt:
self.poll_another = False
def needs_refresh(self):
es_cache = self.get_field_cache('es')
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
return True
return False
def do_refresh(self, force=False):
es_cache = self.get_field_cache('es')
if force:
self.pr_inf("Forcing mapping update")
# no need to get kibana if we are forcing it
return self.post_field_cache(es_cache)
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
self.pr_inf("Mapping is incomplete, doing update")
return self.post_field_cache(es_cache)
self.pr_inf("Mapping is correct, no refresh needed")
return 0
def is_kibana_cache_incomplete(self, es_cache, k_cache):
"""Test if k_cache is incomplete
Assume k_cache is always correct, but could be missing new
fields that es_cache has
"""
# convert list into dict, with each item's ['name'] as key
k_dict = {}
for field in k_cache:
# self.pr_dbg("field: %s" % field)
k_dict[field['name']] = field
for ign_f in self.mappings_ignore:
k_dict[field['name']][ign_f] = 0
es_dict = {}
for field in es_cache:
es_dict[field['name']] = field
for ign_f in self.mappings_ignore:
es_dict[field['name']][ign_f] = 0
es_set = set(es_dict.keys())
k_set = set(k_dict.keys())
# reasons why kibana cache could be incomplete:
# k_dict is missing keys that are within es_dict
# We don't care if k has keys that es doesn't
# es {1,2} k {1,2,3}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {1,2}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {}; intersection {}; len(es-{}) 2
# es {1,2} k {1}; intersection {1}; len(es-{}) 1
# es {2,3} k {1}; intersection {}; len(es-{}) 2
# es {2,3} k {1,2}; intersection {2}; len(es-{}) 1
return len(es_set - k_set.intersection(es_set)) > 0
def list_to_compare_dict(self, list_form):
"""Convert list into a data structure we can query easier"""
compare_dict = {}
for field in list_form:
if field['name'] in compare_dict:
self.pr_dbg("List has duplicate field %s:\n%s" %
(field['name'], compare_dict[field['name']]))
if compare_dict[field['name']] != field:
self.pr_dbg("And values are different:\n%s" % field)
return None
compare_dict[field['name']] = field
for ign_f in self.mappings_ignore:
compare_dict[field['name']][ign_f] = 0
return compare_dict
def compare_field_caches(self, replica, original):
"""Verify original is subset of replica"""
if original is None:
original = []
if replica is None:
replica = []
self.pr_dbg("Comparing orig with %s fields to replica with %s fields" %
(len(original), len(replica)))
# convert list into dict, with each item's ['name'] as key
orig = self.list_to_compare_dict(original)
if orig is None:
self.pr_dbg("Original has duplicate fields")
return 1
repl = self.list_to_compare_dict(replica)
if repl is None:
self.pr_dbg("Replica has duplicate fields")
return 1
# search orig for each item in repl
# if any items in repl not within orig or vice versa, then complain
# make sure contents of each item match
orig_found = {}
for (key, field) in iteritems(repl):
field_name = field['name']
if field_name not in orig:
self.pr_dbg("Replica has field not found in orig %s: %s" %
(field_name, field))
return 1
orig_found[field_name] = True
if orig[field_name] != field:
self.pr_dbg("Field in replica doesn't match orig:")
self.pr_dbg("orig:%s\nrepl:%s" % (orig[field_name], field))
return 1
unfound = set(orig_found.keys()) - set(repl.keys())
if len(unfound) > 0:
self.pr_dbg("Orig contains fields that were not in replica")
self.pr_dbg('%s' % unfound)
return 1
# We don't care about case when replica has more fields than orig
# unfound = set(repl.keys()) - set(orig_found.keys())
# if len(unfound) > 0:
# self.pr_dbg("Replica contains fields that were not in orig")
# self.pr_dbg('%s' % unfound)
# return 1
self.pr_dbg("Original matches replica")
return 0
def test_cache(self):
"""Test if this code is equiv to Kibana.refreshFields()
Within Kibana GUI click refreshFields, then either:
* self.test_cache()
* vagrant ssh -c "python -c \"
import kibana; kibana.DotKibana('aaa*').mapping.test_cache()\""
"""
es_cache = self.get_field_cache(cache_type='es')
# self.pr_dbg(json.dumps(es_cache))
kibana_cache = self.get_field_cache(cache_type='kibana')
# self.pr_dbg(json.dumps(kibana_cache))
return self.compare_field_caches(es_cache, kibana_cache)
|
rfarley3/Kibana
|
kibana/mapping.py
|
KibanaMapping.get_field_mappings
|
python
|
def get_field_mappings(self, field):
retdict = {}
retdict['indexed'] = False
retdict['analyzed'] = False
for (key, val) in iteritems(field):
if key in self.mappings:
if (key == 'type' and
(val == "long" or
val == "integer" or
val == "double" or
val == "float")):
val = "number"
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
retdict[key] = val
if key == 'index' and val != "no":
retdict['indexed'] = True
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
if val == "analyzed":
retdict['analyzed'] = True
return retdict
|
Converts ES field mappings to .kibana field mappings
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/mapping.py#L256-L278
|
[
"def iteritems(d):\n if PY3:\n return d.items()\n else:\n return d.iteritems()\n"
] |
class KibanaMapping():
def __init__(self, index, index_pattern, host, debug=False):
self.index = index
self._index_pattern = index_pattern
self._host = host
self.update_urls()
# from the js possible mappings are:
# { type, indexed, analyzed, doc_values }
# but indexed and analyzed are .kibana specific,
# determined by the value within ES's 'index', which could be:
# { analyzed, no, not_analyzed }
self.mappings = ['type', 'doc_values']
# ignore system fields:
self.sys_mappings = ['_source', '_index', '_type', '_id']
# .kibana has some fields to ignore too:
self.mappings_ignore = ['count']
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Mapping %s' % msg)
def pr_inf(self, msg):
print('[INF] Mapping %s' % msg)
def pr_err(self, msg):
print('[ERR] Mapping %s' % msg)
def update_urls(self):
# 'http://localhost:5601/elasticsearch/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
# 'http://localhost:9200/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
self.es_get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self._index_pattern +
'_mapping/field/' +
'*?ignore_unavailable=false&' +
'allow_no_indices=false&' +
'include_defaults=true')
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*'
self.post_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s' % self._index_pattern)
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/_search/?id=aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*/'
self.get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s/' % self._index_pattern)
@property
def index_pattern(self):
return self._index_pattern
@index_pattern.setter
def index_pattern_setter(self, index_pattern):
self._index_pattern = index_pattern
self.update_urls()
@property
def host(self):
return self._host
@host.setter
def host_setter(self, host):
self._host = host
self.update_urls()
def get_field_cache(self, cache_type='es'):
"""Return a list of fields' mappings"""
if cache_type == 'kibana':
try:
search_results = urlopen(self.get_url).read().decode('utf-8')
except HTTPError: # as e:
# self.pr_err("get_field_cache(kibana), HTTPError: %s" % e)
return []
index_pattern = json.loads(search_results)
# Results look like: {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":6,"found":true,"_source":{"title":"aaa*","fields":"<what we want>"}} # noqa
fields_str = index_pattern['_source']['fields']
return json.loads(fields_str)
elif cache_type == 'es' or cache_type.startswith('elastic'):
search_results = urlopen(self.es_get_url).read().decode('utf-8')
es_mappings = json.loads(search_results)
# Results look like: {"<index_name>":{"mappings":{"<doc_type>":{"<field_name>":{"full_name":"<field_name>","mapping":{"<sub-field_name>":{"type":"date","index_name":"<sub-field_name>","boost":1.0,"index":"not_analyzed","store":false,"doc_values":false,"term_vector":"no","norms":{"enabled":false},"index_options":"docs","index_analyzer":"_date/16","search_analyzer":"_date/max","postings_format":"default","doc_values_format":"default","similarity":"default","fielddata":{},"ignore_malformed":false,"coerce":true,"precision_step":16,"format":"dateOptionalTime","null_value":null,"include_in_all":false,"numeric_resolution":"milliseconds","locale":""}}}, # noqa
# now convert the mappings into the .kibana format
field_cache = []
for (index_name, val) in iteritems(es_mappings):
if index_name != self.index: # only get non-'.kibana' indices
# self.pr_dbg("index: %s" % index_name)
m_dict = es_mappings[index_name]['mappings']
# self.pr_dbg('m_dict %s' % m_dict)
mappings = self.get_index_mappings(m_dict)
# self.pr_dbg('mappings %s' % mappings)
field_cache.extend(mappings)
field_cache = self.dedup_field_cache(field_cache)
return field_cache
self.pr_err("Unknown cache type: %s" % cache_type)
return None
def dedup_field_cache(self, field_cache):
deduped = []
fields_found = {}
for field in field_cache:
name = field['name']
if name not in fields_found:
deduped.append(field)
fields_found[name] = field
elif fields_found[name] != field:
self.pr_dbg("Dup field doesn't match")
self.pr_dbg("1st found: %s" % fields_found[name])
self.pr_dbg(" Dup one: %s" % field)
# else ignore, pass
return deduped
def post_field_cache(self, field_cache):
"""Where field_cache is a list of fields' mappings"""
index_pattern = self.field_cache_to_index_pattern(field_cache)
# self.pr_dbg("request/post: %s" % index_pattern)
resp = requests.post(self.post_url, data=index_pattern).text
# resp = {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":1,"created":true} # noqa
resp = json.loads(resp)
return 0
# TODO detect failure (return 1)
def field_cache_to_index_pattern(self, field_cache):
"""Return a .kibana index-pattern doc_type"""
mapping_dict = {}
mapping_dict['customFormats'] = "{}"
mapping_dict['title'] = self.index_pattern
# now post the data into .kibana
mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':'))
# in order to post, we need to create the post string
mapping_str = json.dumps(mapping_dict, separators=(',', ':'))
return mapping_str
def check_mapping(self, m):
"""Assert minimum set of fields in cache, does not validate contents"""
if 'name' not in m:
self.pr_dbg("Missing %s" % "name")
return False
# self.pr_dbg("Checking %s" % m['name'])
for x in ['analyzed', 'indexed', 'type', 'scripted', 'count']:
if x not in m or m[x] == "":
self.pr_dbg("Missing %s" % x)
self.pr_dbg("Full %s" % m)
return False
if 'doc_values' not in m or m['doc_values'] == "":
if not m['name'].startswith('_'):
self.pr_dbg("Missing %s" % "doc_values")
return False
m['doc_values'] = False
return True
def get_index_mappings(self, index):
"""Converts all index's doc_types to .kibana"""
fields_arr = []
for (key, val) in iteritems(index):
# self.pr_dbg("\tdoc_type: %s" % key)
doc_mapping = self.get_doc_type_mappings(index[key])
# self.pr_dbg("\tdoc_mapping: %s" % doc_mapping)
if doc_mapping is None:
return None
# keep adding to the fields array
fields_arr.extend(doc_mapping)
return fields_arr
def get_doc_type_mappings(self, doc_type):
"""Converts all doc_types' fields to .kibana"""
doc_fields_arr = []
found_score = False
for (key, val) in iteritems(doc_type):
# self.pr_dbg("\t\tfield: %s" % key)
# self.pr_dbg("\tval: %s" % val)
add_it = False
retdict = {}
# _ are system
if not key.startswith('_'):
if 'mapping' not in doc_type[key]:
self.pr_err("No mapping in doc_type[%s]" % key)
return None
if key in doc_type[key]['mapping']:
subkey_name = key
else:
subkey_name = re.sub('.*\.', '', key)
if subkey_name not in doc_type[key]['mapping']:
self.pr_err(
"Couldn't find subkey " +
"doc_type[%s]['mapping'][%s]" % (key, subkey_name))
return None
# self.pr_dbg("\t\tsubkey_name: %s" % subkey_name)
retdict = self.get_field_mappings(
doc_type[key]['mapping'][subkey_name])
add_it = True
# system mappings don't list a type,
# but kibana makes them all strings
if key in self.sys_mappings:
retdict['analyzed'] = False
retdict['indexed'] = False
if key == '_source':
retdict = self.get_field_mappings(
doc_type[key]['mapping'][key])
retdict['type'] = "_source"
elif key == '_score':
retdict['type'] = "number"
elif 'type' not in retdict:
retdict['type'] = "string"
add_it = True
if add_it:
retdict['name'] = key
retdict['count'] = 0 # always init to 0
retdict['scripted'] = False # I haven't observed a True yet
if not self.check_mapping(retdict):
self.pr_err("Error, invalid mapping")
return None
# the fields element is an escaped array of json
# make the array here, after all collected, then escape it
doc_fields_arr.append(retdict)
if not found_score:
doc_fields_arr.append(
{"name": "_score",
"type": "number",
"count": 0,
"scripted": False,
"indexed": False,
"analyzed": False,
"doc_values": False})
return doc_fields_arr
def refresh_poll(self, period):
self.poll_another = True
while self.poll_another:
self.do_refresh()
self.pr_inf("Polling again in %s secs" % period)
try:
time.sleep(period)
except KeyboardInterrupt:
self.poll_another = False
def needs_refresh(self):
es_cache = self.get_field_cache('es')
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
return True
return False
def do_refresh(self, force=False):
es_cache = self.get_field_cache('es')
if force:
self.pr_inf("Forcing mapping update")
# no need to get kibana if we are forcing it
return self.post_field_cache(es_cache)
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
self.pr_inf("Mapping is incomplete, doing update")
return self.post_field_cache(es_cache)
self.pr_inf("Mapping is correct, no refresh needed")
return 0
def is_kibana_cache_incomplete(self, es_cache, k_cache):
"""Test if k_cache is incomplete
Assume k_cache is always correct, but could be missing new
fields that es_cache has
"""
# convert list into dict, with each item's ['name'] as key
k_dict = {}
for field in k_cache:
# self.pr_dbg("field: %s" % field)
k_dict[field['name']] = field
for ign_f in self.mappings_ignore:
k_dict[field['name']][ign_f] = 0
es_dict = {}
for field in es_cache:
es_dict[field['name']] = field
for ign_f in self.mappings_ignore:
es_dict[field['name']][ign_f] = 0
es_set = set(es_dict.keys())
k_set = set(k_dict.keys())
# reasons why kibana cache could be incomplete:
# k_dict is missing keys that are within es_dict
# We don't care if k has keys that es doesn't
# es {1,2} k {1,2,3}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {1,2}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {}; intersection {}; len(es-{}) 2
# es {1,2} k {1}; intersection {1}; len(es-{}) 1
# es {2,3} k {1}; intersection {}; len(es-{}) 2
# es {2,3} k {1,2}; intersection {2}; len(es-{}) 1
return len(es_set - k_set.intersection(es_set)) > 0
def list_to_compare_dict(self, list_form):
"""Convert list into a data structure we can query easier"""
compare_dict = {}
for field in list_form:
if field['name'] in compare_dict:
self.pr_dbg("List has duplicate field %s:\n%s" %
(field['name'], compare_dict[field['name']]))
if compare_dict[field['name']] != field:
self.pr_dbg("And values are different:\n%s" % field)
return None
compare_dict[field['name']] = field
for ign_f in self.mappings_ignore:
compare_dict[field['name']][ign_f] = 0
return compare_dict
def compare_field_caches(self, replica, original):
"""Verify original is subset of replica"""
if original is None:
original = []
if replica is None:
replica = []
self.pr_dbg("Comparing orig with %s fields to replica with %s fields" %
(len(original), len(replica)))
# convert list into dict, with each item's ['name'] as key
orig = self.list_to_compare_dict(original)
if orig is None:
self.pr_dbg("Original has duplicate fields")
return 1
repl = self.list_to_compare_dict(replica)
if repl is None:
self.pr_dbg("Replica has duplicate fields")
return 1
# search orig for each item in repl
# if any items in repl not within orig or vice versa, then complain
# make sure contents of each item match
orig_found = {}
for (key, field) in iteritems(repl):
field_name = field['name']
if field_name not in orig:
self.pr_dbg("Replica has field not found in orig %s: %s" %
(field_name, field))
return 1
orig_found[field_name] = True
if orig[field_name] != field:
self.pr_dbg("Field in replica doesn't match orig:")
self.pr_dbg("orig:%s\nrepl:%s" % (orig[field_name], field))
return 1
unfound = set(orig_found.keys()) - set(repl.keys())
if len(unfound) > 0:
self.pr_dbg("Orig contains fields that were not in replica")
self.pr_dbg('%s' % unfound)
return 1
# We don't care about case when replica has more fields than orig
# unfound = set(repl.keys()) - set(orig_found.keys())
# if len(unfound) > 0:
# self.pr_dbg("Replica contains fields that were not in orig")
# self.pr_dbg('%s' % unfound)
# return 1
self.pr_dbg("Original matches replica")
return 0
def test_cache(self):
"""Test if this code is equiv to Kibana.refreshFields()
Within Kibana GUI click refreshFields, then either:
* self.test_cache()
* vagrant ssh -c "python -c \"
import kibana; kibana.DotKibana('aaa*').mapping.test_cache()\""
"""
es_cache = self.get_field_cache(cache_type='es')
# self.pr_dbg(json.dumps(es_cache))
kibana_cache = self.get_field_cache(cache_type='kibana')
# self.pr_dbg(json.dumps(kibana_cache))
return self.compare_field_caches(es_cache, kibana_cache)
|
rfarley3/Kibana
|
kibana/mapping.py
|
KibanaMapping.is_kibana_cache_incomplete
|
python
|
def is_kibana_cache_incomplete(self, es_cache, k_cache):
# convert list into dict, with each item's ['name'] as key
k_dict = {}
for field in k_cache:
# self.pr_dbg("field: %s" % field)
k_dict[field['name']] = field
for ign_f in self.mappings_ignore:
k_dict[field['name']][ign_f] = 0
es_dict = {}
for field in es_cache:
es_dict[field['name']] = field
for ign_f in self.mappings_ignore:
es_dict[field['name']][ign_f] = 0
es_set = set(es_dict.keys())
k_set = set(k_dict.keys())
# reasons why kibana cache could be incomplete:
# k_dict is missing keys that are within es_dict
# We don't care if k has keys that es doesn't
# es {1,2} k {1,2,3}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {1,2}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {}; intersection {}; len(es-{}) 2
# es {1,2} k {1}; intersection {1}; len(es-{}) 1
# es {2,3} k {1}; intersection {}; len(es-{}) 2
# es {2,3} k {1,2}; intersection {2}; len(es-{}) 1
return len(es_set - k_set.intersection(es_set)) > 0
|
Test if k_cache is incomplete
Assume k_cache is always correct, but could be missing new
fields that es_cache has
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/mapping.py#L310-L339
| null |
class KibanaMapping():
def __init__(self, index, index_pattern, host, debug=False):
self.index = index
self._index_pattern = index_pattern
self._host = host
self.update_urls()
# from the js possible mappings are:
# { type, indexed, analyzed, doc_values }
# but indexed and analyzed are .kibana specific,
# determined by the value within ES's 'index', which could be:
# { analyzed, no, not_analyzed }
self.mappings = ['type', 'doc_values']
# ignore system fields:
self.sys_mappings = ['_source', '_index', '_type', '_id']
# .kibana has some fields to ignore too:
self.mappings_ignore = ['count']
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Mapping %s' % msg)
def pr_inf(self, msg):
print('[INF] Mapping %s' % msg)
def pr_err(self, msg):
print('[ERR] Mapping %s' % msg)
def update_urls(self):
# 'http://localhost:5601/elasticsearch/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
# 'http://localhost:9200/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
self.es_get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self._index_pattern +
'_mapping/field/' +
'*?ignore_unavailable=false&' +
'allow_no_indices=false&' +
'include_defaults=true')
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*'
self.post_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s' % self._index_pattern)
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/_search/?id=aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*/'
self.get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s/' % self._index_pattern)
@property
def index_pattern(self):
return self._index_pattern
@index_pattern.setter
def index_pattern_setter(self, index_pattern):
self._index_pattern = index_pattern
self.update_urls()
@property
def host(self):
return self._host
@host.setter
def host_setter(self, host):
self._host = host
self.update_urls()
def get_field_cache(self, cache_type='es'):
"""Return a list of fields' mappings"""
if cache_type == 'kibana':
try:
search_results = urlopen(self.get_url).read().decode('utf-8')
except HTTPError: # as e:
# self.pr_err("get_field_cache(kibana), HTTPError: %s" % e)
return []
index_pattern = json.loads(search_results)
# Results look like: {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":6,"found":true,"_source":{"title":"aaa*","fields":"<what we want>"}} # noqa
fields_str = index_pattern['_source']['fields']
return json.loads(fields_str)
elif cache_type == 'es' or cache_type.startswith('elastic'):
search_results = urlopen(self.es_get_url).read().decode('utf-8')
es_mappings = json.loads(search_results)
# Results look like: {"<index_name>":{"mappings":{"<doc_type>":{"<field_name>":{"full_name":"<field_name>","mapping":{"<sub-field_name>":{"type":"date","index_name":"<sub-field_name>","boost":1.0,"index":"not_analyzed","store":false,"doc_values":false,"term_vector":"no","norms":{"enabled":false},"index_options":"docs","index_analyzer":"_date/16","search_analyzer":"_date/max","postings_format":"default","doc_values_format":"default","similarity":"default","fielddata":{},"ignore_malformed":false,"coerce":true,"precision_step":16,"format":"dateOptionalTime","null_value":null,"include_in_all":false,"numeric_resolution":"milliseconds","locale":""}}}, # noqa
# now convert the mappings into the .kibana format
field_cache = []
for (index_name, val) in iteritems(es_mappings):
if index_name != self.index: # only get non-'.kibana' indices
# self.pr_dbg("index: %s" % index_name)
m_dict = es_mappings[index_name]['mappings']
# self.pr_dbg('m_dict %s' % m_dict)
mappings = self.get_index_mappings(m_dict)
# self.pr_dbg('mappings %s' % mappings)
field_cache.extend(mappings)
field_cache = self.dedup_field_cache(field_cache)
return field_cache
self.pr_err("Unknown cache type: %s" % cache_type)
return None
def dedup_field_cache(self, field_cache):
deduped = []
fields_found = {}
for field in field_cache:
name = field['name']
if name not in fields_found:
deduped.append(field)
fields_found[name] = field
elif fields_found[name] != field:
self.pr_dbg("Dup field doesn't match")
self.pr_dbg("1st found: %s" % fields_found[name])
self.pr_dbg(" Dup one: %s" % field)
# else ignore, pass
return deduped
def post_field_cache(self, field_cache):
"""Where field_cache is a list of fields' mappings"""
index_pattern = self.field_cache_to_index_pattern(field_cache)
# self.pr_dbg("request/post: %s" % index_pattern)
resp = requests.post(self.post_url, data=index_pattern).text
# resp = {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":1,"created":true} # noqa
resp = json.loads(resp)
return 0
# TODO detect failure (return 1)
def field_cache_to_index_pattern(self, field_cache):
"""Return a .kibana index-pattern doc_type"""
mapping_dict = {}
mapping_dict['customFormats'] = "{}"
mapping_dict['title'] = self.index_pattern
# now post the data into .kibana
mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':'))
# in order to post, we need to create the post string
mapping_str = json.dumps(mapping_dict, separators=(',', ':'))
return mapping_str
def check_mapping(self, m):
"""Assert minimum set of fields in cache, does not validate contents"""
if 'name' not in m:
self.pr_dbg("Missing %s" % "name")
return False
# self.pr_dbg("Checking %s" % m['name'])
for x in ['analyzed', 'indexed', 'type', 'scripted', 'count']:
if x not in m or m[x] == "":
self.pr_dbg("Missing %s" % x)
self.pr_dbg("Full %s" % m)
return False
if 'doc_values' not in m or m['doc_values'] == "":
if not m['name'].startswith('_'):
self.pr_dbg("Missing %s" % "doc_values")
return False
m['doc_values'] = False
return True
def get_index_mappings(self, index):
"""Converts all index's doc_types to .kibana"""
fields_arr = []
for (key, val) in iteritems(index):
# self.pr_dbg("\tdoc_type: %s" % key)
doc_mapping = self.get_doc_type_mappings(index[key])
# self.pr_dbg("\tdoc_mapping: %s" % doc_mapping)
if doc_mapping is None:
return None
# keep adding to the fields array
fields_arr.extend(doc_mapping)
return fields_arr
def get_doc_type_mappings(self, doc_type):
"""Converts all doc_types' fields to .kibana"""
doc_fields_arr = []
found_score = False
for (key, val) in iteritems(doc_type):
# self.pr_dbg("\t\tfield: %s" % key)
# self.pr_dbg("\tval: %s" % val)
add_it = False
retdict = {}
# _ are system
if not key.startswith('_'):
if 'mapping' not in doc_type[key]:
self.pr_err("No mapping in doc_type[%s]" % key)
return None
if key in doc_type[key]['mapping']:
subkey_name = key
else:
subkey_name = re.sub('.*\.', '', key)
if subkey_name not in doc_type[key]['mapping']:
self.pr_err(
"Couldn't find subkey " +
"doc_type[%s]['mapping'][%s]" % (key, subkey_name))
return None
# self.pr_dbg("\t\tsubkey_name: %s" % subkey_name)
retdict = self.get_field_mappings(
doc_type[key]['mapping'][subkey_name])
add_it = True
# system mappings don't list a type,
# but kibana makes them all strings
if key in self.sys_mappings:
retdict['analyzed'] = False
retdict['indexed'] = False
if key == '_source':
retdict = self.get_field_mappings(
doc_type[key]['mapping'][key])
retdict['type'] = "_source"
elif key == '_score':
retdict['type'] = "number"
elif 'type' not in retdict:
retdict['type'] = "string"
add_it = True
if add_it:
retdict['name'] = key
retdict['count'] = 0 # always init to 0
retdict['scripted'] = False # I haven't observed a True yet
if not self.check_mapping(retdict):
self.pr_err("Error, invalid mapping")
return None
# the fields element is an escaped array of json
# make the array here, after all collected, then escape it
doc_fields_arr.append(retdict)
if not found_score:
doc_fields_arr.append(
{"name": "_score",
"type": "number",
"count": 0,
"scripted": False,
"indexed": False,
"analyzed": False,
"doc_values": False})
return doc_fields_arr
def get_field_mappings(self, field):
"""Converts ES field mappings to .kibana field mappings"""
retdict = {}
retdict['indexed'] = False
retdict['analyzed'] = False
for (key, val) in iteritems(field):
if key in self.mappings:
if (key == 'type' and
(val == "long" or
val == "integer" or
val == "double" or
val == "float")):
val = "number"
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
retdict[key] = val
if key == 'index' and val != "no":
retdict['indexed'] = True
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
if val == "analyzed":
retdict['analyzed'] = True
return retdict
def refresh_poll(self, period):
self.poll_another = True
while self.poll_another:
self.do_refresh()
self.pr_inf("Polling again in %s secs" % period)
try:
time.sleep(period)
except KeyboardInterrupt:
self.poll_another = False
def needs_refresh(self):
es_cache = self.get_field_cache('es')
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
return True
return False
def do_refresh(self, force=False):
es_cache = self.get_field_cache('es')
if force:
self.pr_inf("Forcing mapping update")
# no need to get kibana if we are forcing it
return self.post_field_cache(es_cache)
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
self.pr_inf("Mapping is incomplete, doing update")
return self.post_field_cache(es_cache)
self.pr_inf("Mapping is correct, no refresh needed")
return 0
def list_to_compare_dict(self, list_form):
"""Convert list into a data structure we can query easier"""
compare_dict = {}
for field in list_form:
if field['name'] in compare_dict:
self.pr_dbg("List has duplicate field %s:\n%s" %
(field['name'], compare_dict[field['name']]))
if compare_dict[field['name']] != field:
self.pr_dbg("And values are different:\n%s" % field)
return None
compare_dict[field['name']] = field
for ign_f in self.mappings_ignore:
compare_dict[field['name']][ign_f] = 0
return compare_dict
def compare_field_caches(self, replica, original):
"""Verify original is subset of replica"""
if original is None:
original = []
if replica is None:
replica = []
self.pr_dbg("Comparing orig with %s fields to replica with %s fields" %
(len(original), len(replica)))
# convert list into dict, with each item's ['name'] as key
orig = self.list_to_compare_dict(original)
if orig is None:
self.pr_dbg("Original has duplicate fields")
return 1
repl = self.list_to_compare_dict(replica)
if repl is None:
self.pr_dbg("Replica has duplicate fields")
return 1
# search orig for each item in repl
# if any items in repl not within orig or vice versa, then complain
# make sure contents of each item match
orig_found = {}
for (key, field) in iteritems(repl):
field_name = field['name']
if field_name not in orig:
self.pr_dbg("Replica has field not found in orig %s: %s" %
(field_name, field))
return 1
orig_found[field_name] = True
if orig[field_name] != field:
self.pr_dbg("Field in replica doesn't match orig:")
self.pr_dbg("orig:%s\nrepl:%s" % (orig[field_name], field))
return 1
unfound = set(orig_found.keys()) - set(repl.keys())
if len(unfound) > 0:
self.pr_dbg("Orig contains fields that were not in replica")
self.pr_dbg('%s' % unfound)
return 1
# We don't care about case when replica has more fields than orig
# unfound = set(repl.keys()) - set(orig_found.keys())
# if len(unfound) > 0:
# self.pr_dbg("Replica contains fields that were not in orig")
# self.pr_dbg('%s' % unfound)
# return 1
self.pr_dbg("Original matches replica")
return 0
def test_cache(self):
"""Test if this code is equiv to Kibana.refreshFields()
Within Kibana GUI click refreshFields, then either:
* self.test_cache()
* vagrant ssh -c "python -c \"
import kibana; kibana.DotKibana('aaa*').mapping.test_cache()\""
"""
es_cache = self.get_field_cache(cache_type='es')
# self.pr_dbg(json.dumps(es_cache))
kibana_cache = self.get_field_cache(cache_type='kibana')
# self.pr_dbg(json.dumps(kibana_cache))
return self.compare_field_caches(es_cache, kibana_cache)
|
rfarley3/Kibana
|
kibana/mapping.py
|
KibanaMapping.list_to_compare_dict
|
python
|
def list_to_compare_dict(self, list_form):
compare_dict = {}
for field in list_form:
if field['name'] in compare_dict:
self.pr_dbg("List has duplicate field %s:\n%s" %
(field['name'], compare_dict[field['name']]))
if compare_dict[field['name']] != field:
self.pr_dbg("And values are different:\n%s" % field)
return None
compare_dict[field['name']] = field
for ign_f in self.mappings_ignore:
compare_dict[field['name']][ign_f] = 0
return compare_dict
|
Convert list into a data structure we can query easier
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/mapping.py#L341-L354
|
[
"def pr_dbg(self, msg):\n if self.debug:\n print('[DBG] Mapping %s' % msg)\n"
] |
class KibanaMapping():
def __init__(self, index, index_pattern, host, debug=False):
self.index = index
self._index_pattern = index_pattern
self._host = host
self.update_urls()
# from the js possible mappings are:
# { type, indexed, analyzed, doc_values }
# but indexed and analyzed are .kibana specific,
# determined by the value within ES's 'index', which could be:
# { analyzed, no, not_analyzed }
self.mappings = ['type', 'doc_values']
# ignore system fields:
self.sys_mappings = ['_source', '_index', '_type', '_id']
# .kibana has some fields to ignore too:
self.mappings_ignore = ['count']
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Mapping %s' % msg)
def pr_inf(self, msg):
print('[INF] Mapping %s' % msg)
def pr_err(self, msg):
print('[ERR] Mapping %s' % msg)
def update_urls(self):
# 'http://localhost:5601/elasticsearch/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
# 'http://localhost:9200/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
self.es_get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self._index_pattern +
'_mapping/field/' +
'*?ignore_unavailable=false&' +
'allow_no_indices=false&' +
'include_defaults=true')
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*'
self.post_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s' % self._index_pattern)
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/_search/?id=aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*/'
self.get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s/' % self._index_pattern)
@property
def index_pattern(self):
return self._index_pattern
@index_pattern.setter
def index_pattern_setter(self, index_pattern):
self._index_pattern = index_pattern
self.update_urls()
@property
def host(self):
return self._host
@host.setter
def host_setter(self, host):
self._host = host
self.update_urls()
def get_field_cache(self, cache_type='es'):
"""Return a list of fields' mappings"""
if cache_type == 'kibana':
try:
search_results = urlopen(self.get_url).read().decode('utf-8')
except HTTPError: # as e:
# self.pr_err("get_field_cache(kibana), HTTPError: %s" % e)
return []
index_pattern = json.loads(search_results)
# Results look like: {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":6,"found":true,"_source":{"title":"aaa*","fields":"<what we want>"}} # noqa
fields_str = index_pattern['_source']['fields']
return json.loads(fields_str)
elif cache_type == 'es' or cache_type.startswith('elastic'):
search_results = urlopen(self.es_get_url).read().decode('utf-8')
es_mappings = json.loads(search_results)
# Results look like: {"<index_name>":{"mappings":{"<doc_type>":{"<field_name>":{"full_name":"<field_name>","mapping":{"<sub-field_name>":{"type":"date","index_name":"<sub-field_name>","boost":1.0,"index":"not_analyzed","store":false,"doc_values":false,"term_vector":"no","norms":{"enabled":false},"index_options":"docs","index_analyzer":"_date/16","search_analyzer":"_date/max","postings_format":"default","doc_values_format":"default","similarity":"default","fielddata":{},"ignore_malformed":false,"coerce":true,"precision_step":16,"format":"dateOptionalTime","null_value":null,"include_in_all":false,"numeric_resolution":"milliseconds","locale":""}}}, # noqa
# now convert the mappings into the .kibana format
field_cache = []
for (index_name, val) in iteritems(es_mappings):
if index_name != self.index: # only get non-'.kibana' indices
# self.pr_dbg("index: %s" % index_name)
m_dict = es_mappings[index_name]['mappings']
# self.pr_dbg('m_dict %s' % m_dict)
mappings = self.get_index_mappings(m_dict)
# self.pr_dbg('mappings %s' % mappings)
field_cache.extend(mappings)
field_cache = self.dedup_field_cache(field_cache)
return field_cache
self.pr_err("Unknown cache type: %s" % cache_type)
return None
def dedup_field_cache(self, field_cache):
deduped = []
fields_found = {}
for field in field_cache:
name = field['name']
if name not in fields_found:
deduped.append(field)
fields_found[name] = field
elif fields_found[name] != field:
self.pr_dbg("Dup field doesn't match")
self.pr_dbg("1st found: %s" % fields_found[name])
self.pr_dbg(" Dup one: %s" % field)
# else ignore, pass
return deduped
def post_field_cache(self, field_cache):
"""Where field_cache is a list of fields' mappings"""
index_pattern = self.field_cache_to_index_pattern(field_cache)
# self.pr_dbg("request/post: %s" % index_pattern)
resp = requests.post(self.post_url, data=index_pattern).text
# resp = {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":1,"created":true} # noqa
resp = json.loads(resp)
return 0
# TODO detect failure (return 1)
def field_cache_to_index_pattern(self, field_cache):
"""Return a .kibana index-pattern doc_type"""
mapping_dict = {}
mapping_dict['customFormats'] = "{}"
mapping_dict['title'] = self.index_pattern
# now post the data into .kibana
mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':'))
# in order to post, we need to create the post string
mapping_str = json.dumps(mapping_dict, separators=(',', ':'))
return mapping_str
def check_mapping(self, m):
"""Assert minimum set of fields in cache, does not validate contents"""
if 'name' not in m:
self.pr_dbg("Missing %s" % "name")
return False
# self.pr_dbg("Checking %s" % m['name'])
for x in ['analyzed', 'indexed', 'type', 'scripted', 'count']:
if x not in m or m[x] == "":
self.pr_dbg("Missing %s" % x)
self.pr_dbg("Full %s" % m)
return False
if 'doc_values' not in m or m['doc_values'] == "":
if not m['name'].startswith('_'):
self.pr_dbg("Missing %s" % "doc_values")
return False
m['doc_values'] = False
return True
def get_index_mappings(self, index):
"""Converts all index's doc_types to .kibana"""
fields_arr = []
for (key, val) in iteritems(index):
# self.pr_dbg("\tdoc_type: %s" % key)
doc_mapping = self.get_doc_type_mappings(index[key])
# self.pr_dbg("\tdoc_mapping: %s" % doc_mapping)
if doc_mapping is None:
return None
# keep adding to the fields array
fields_arr.extend(doc_mapping)
return fields_arr
def get_doc_type_mappings(self, doc_type):
"""Converts all doc_types' fields to .kibana"""
doc_fields_arr = []
found_score = False
for (key, val) in iteritems(doc_type):
# self.pr_dbg("\t\tfield: %s" % key)
# self.pr_dbg("\tval: %s" % val)
add_it = False
retdict = {}
# _ are system
if not key.startswith('_'):
if 'mapping' not in doc_type[key]:
self.pr_err("No mapping in doc_type[%s]" % key)
return None
if key in doc_type[key]['mapping']:
subkey_name = key
else:
subkey_name = re.sub('.*\.', '', key)
if subkey_name not in doc_type[key]['mapping']:
self.pr_err(
"Couldn't find subkey " +
"doc_type[%s]['mapping'][%s]" % (key, subkey_name))
return None
# self.pr_dbg("\t\tsubkey_name: %s" % subkey_name)
retdict = self.get_field_mappings(
doc_type[key]['mapping'][subkey_name])
add_it = True
# system mappings don't list a type,
# but kibana makes them all strings
if key in self.sys_mappings:
retdict['analyzed'] = False
retdict['indexed'] = False
if key == '_source':
retdict = self.get_field_mappings(
doc_type[key]['mapping'][key])
retdict['type'] = "_source"
elif key == '_score':
retdict['type'] = "number"
elif 'type' not in retdict:
retdict['type'] = "string"
add_it = True
if add_it:
retdict['name'] = key
retdict['count'] = 0 # always init to 0
retdict['scripted'] = False # I haven't observed a True yet
if not self.check_mapping(retdict):
self.pr_err("Error, invalid mapping")
return None
# the fields element is an escaped array of json
# make the array here, after all collected, then escape it
doc_fields_arr.append(retdict)
if not found_score:
doc_fields_arr.append(
{"name": "_score",
"type": "number",
"count": 0,
"scripted": False,
"indexed": False,
"analyzed": False,
"doc_values": False})
return doc_fields_arr
def get_field_mappings(self, field):
"""Converts ES field mappings to .kibana field mappings"""
retdict = {}
retdict['indexed'] = False
retdict['analyzed'] = False
for (key, val) in iteritems(field):
if key in self.mappings:
if (key == 'type' and
(val == "long" or
val == "integer" or
val == "double" or
val == "float")):
val = "number"
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
retdict[key] = val
if key == 'index' and val != "no":
retdict['indexed'] = True
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
if val == "analyzed":
retdict['analyzed'] = True
return retdict
def refresh_poll(self, period):
self.poll_another = True
while self.poll_another:
self.do_refresh()
self.pr_inf("Polling again in %s secs" % period)
try:
time.sleep(period)
except KeyboardInterrupt:
self.poll_another = False
def needs_refresh(self):
es_cache = self.get_field_cache('es')
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
return True
return False
def do_refresh(self, force=False):
es_cache = self.get_field_cache('es')
if force:
self.pr_inf("Forcing mapping update")
# no need to get kibana if we are forcing it
return self.post_field_cache(es_cache)
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
self.pr_inf("Mapping is incomplete, doing update")
return self.post_field_cache(es_cache)
self.pr_inf("Mapping is correct, no refresh needed")
return 0
def is_kibana_cache_incomplete(self, es_cache, k_cache):
"""Test if k_cache is incomplete
Assume k_cache is always correct, but could be missing new
fields that es_cache has
"""
# convert list into dict, with each item's ['name'] as key
k_dict = {}
for field in k_cache:
# self.pr_dbg("field: %s" % field)
k_dict[field['name']] = field
for ign_f in self.mappings_ignore:
k_dict[field['name']][ign_f] = 0
es_dict = {}
for field in es_cache:
es_dict[field['name']] = field
for ign_f in self.mappings_ignore:
es_dict[field['name']][ign_f] = 0
es_set = set(es_dict.keys())
k_set = set(k_dict.keys())
# reasons why kibana cache could be incomplete:
# k_dict is missing keys that are within es_dict
# We don't care if k has keys that es doesn't
# es {1,2} k {1,2,3}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {1,2}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {}; intersection {}; len(es-{}) 2
# es {1,2} k {1}; intersection {1}; len(es-{}) 1
# es {2,3} k {1}; intersection {}; len(es-{}) 2
# es {2,3} k {1,2}; intersection {2}; len(es-{}) 1
return len(es_set - k_set.intersection(es_set)) > 0
def compare_field_caches(self, replica, original):
"""Verify original is subset of replica"""
if original is None:
original = []
if replica is None:
replica = []
self.pr_dbg("Comparing orig with %s fields to replica with %s fields" %
(len(original), len(replica)))
# convert list into dict, with each item's ['name'] as key
orig = self.list_to_compare_dict(original)
if orig is None:
self.pr_dbg("Original has duplicate fields")
return 1
repl = self.list_to_compare_dict(replica)
if repl is None:
self.pr_dbg("Replica has duplicate fields")
return 1
# search orig for each item in repl
# if any items in repl not within orig or vice versa, then complain
# make sure contents of each item match
orig_found = {}
for (key, field) in iteritems(repl):
field_name = field['name']
if field_name not in orig:
self.pr_dbg("Replica has field not found in orig %s: %s" %
(field_name, field))
return 1
orig_found[field_name] = True
if orig[field_name] != field:
self.pr_dbg("Field in replica doesn't match orig:")
self.pr_dbg("orig:%s\nrepl:%s" % (orig[field_name], field))
return 1
unfound = set(orig_found.keys()) - set(repl.keys())
if len(unfound) > 0:
self.pr_dbg("Orig contains fields that were not in replica")
self.pr_dbg('%s' % unfound)
return 1
# We don't care about case when replica has more fields than orig
# unfound = set(repl.keys()) - set(orig_found.keys())
# if len(unfound) > 0:
# self.pr_dbg("Replica contains fields that were not in orig")
# self.pr_dbg('%s' % unfound)
# return 1
self.pr_dbg("Original matches replica")
return 0
def test_cache(self):
"""Test if this code is equiv to Kibana.refreshFields()
Within Kibana GUI click refreshFields, then either:
* self.test_cache()
* vagrant ssh -c "python -c \"
import kibana; kibana.DotKibana('aaa*').mapping.test_cache()\""
"""
es_cache = self.get_field_cache(cache_type='es')
# self.pr_dbg(json.dumps(es_cache))
kibana_cache = self.get_field_cache(cache_type='kibana')
# self.pr_dbg(json.dumps(kibana_cache))
return self.compare_field_caches(es_cache, kibana_cache)
|
rfarley3/Kibana
|
kibana/mapping.py
|
KibanaMapping.compare_field_caches
|
python
|
def compare_field_caches(self, replica, original):
if original is None:
original = []
if replica is None:
replica = []
self.pr_dbg("Comparing orig with %s fields to replica with %s fields" %
(len(original), len(replica)))
# convert list into dict, with each item's ['name'] as key
orig = self.list_to_compare_dict(original)
if orig is None:
self.pr_dbg("Original has duplicate fields")
return 1
repl = self.list_to_compare_dict(replica)
if repl is None:
self.pr_dbg("Replica has duplicate fields")
return 1
# search orig for each item in repl
# if any items in repl not within orig or vice versa, then complain
# make sure contents of each item match
orig_found = {}
for (key, field) in iteritems(repl):
field_name = field['name']
if field_name not in orig:
self.pr_dbg("Replica has field not found in orig %s: %s" %
(field_name, field))
return 1
orig_found[field_name] = True
if orig[field_name] != field:
self.pr_dbg("Field in replica doesn't match orig:")
self.pr_dbg("orig:%s\nrepl:%s" % (orig[field_name], field))
return 1
unfound = set(orig_found.keys()) - set(repl.keys())
if len(unfound) > 0:
self.pr_dbg("Orig contains fields that were not in replica")
self.pr_dbg('%s' % unfound)
return 1
# We don't care about case when replica has more fields than orig
# unfound = set(repl.keys()) - set(orig_found.keys())
# if len(unfound) > 0:
# self.pr_dbg("Replica contains fields that were not in orig")
# self.pr_dbg('%s' % unfound)
# return 1
self.pr_dbg("Original matches replica")
return 0
|
Verify original is subset of replica
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/mapping.py#L356-L400
|
[
"def iteritems(d):\n if PY3:\n return d.items()\n else:\n return d.iteritems()\n",
"def pr_dbg(self, msg):\n if self.debug:\n print('[DBG] Mapping %s' % msg)\n",
"def list_to_compare_dict(self, list_form):\n \"\"\"Convert list into a data structure we can query easier\"\"\"\n compare_dict = {}\n for field in list_form:\n if field['name'] in compare_dict:\n self.pr_dbg(\"List has duplicate field %s:\\n%s\" %\n (field['name'], compare_dict[field['name']]))\n if compare_dict[field['name']] != field:\n self.pr_dbg(\"And values are different:\\n%s\" % field)\n return None\n compare_dict[field['name']] = field\n for ign_f in self.mappings_ignore:\n compare_dict[field['name']][ign_f] = 0\n return compare_dict\n"
] |
class KibanaMapping():
def __init__(self, index, index_pattern, host, debug=False):
self.index = index
self._index_pattern = index_pattern
self._host = host
self.update_urls()
# from the js possible mappings are:
# { type, indexed, analyzed, doc_values }
# but indexed and analyzed are .kibana specific,
# determined by the value within ES's 'index', which could be:
# { analyzed, no, not_analyzed }
self.mappings = ['type', 'doc_values']
# ignore system fields:
self.sys_mappings = ['_source', '_index', '_type', '_id']
# .kibana has some fields to ignore too:
self.mappings_ignore = ['count']
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Mapping %s' % msg)
def pr_inf(self, msg):
print('[INF] Mapping %s' % msg)
def pr_err(self, msg):
print('[ERR] Mapping %s' % msg)
def update_urls(self):
# 'http://localhost:5601/elasticsearch/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
# 'http://localhost:9200/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
self.es_get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self._index_pattern +
'_mapping/field/' +
'*?ignore_unavailable=false&' +
'allow_no_indices=false&' +
'include_defaults=true')
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*'
self.post_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s' % self._index_pattern)
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/_search/?id=aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*/'
self.get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s/' % self._index_pattern)
@property
def index_pattern(self):
return self._index_pattern
@index_pattern.setter
def index_pattern_setter(self, index_pattern):
self._index_pattern = index_pattern
self.update_urls()
@property
def host(self):
return self._host
@host.setter
def host_setter(self, host):
self._host = host
self.update_urls()
def get_field_cache(self, cache_type='es'):
"""Return a list of fields' mappings"""
if cache_type == 'kibana':
try:
search_results = urlopen(self.get_url).read().decode('utf-8')
except HTTPError: # as e:
# self.pr_err("get_field_cache(kibana), HTTPError: %s" % e)
return []
index_pattern = json.loads(search_results)
# Results look like: {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":6,"found":true,"_source":{"title":"aaa*","fields":"<what we want>"}} # noqa
fields_str = index_pattern['_source']['fields']
return json.loads(fields_str)
elif cache_type == 'es' or cache_type.startswith('elastic'):
search_results = urlopen(self.es_get_url).read().decode('utf-8')
es_mappings = json.loads(search_results)
# Results look like: {"<index_name>":{"mappings":{"<doc_type>":{"<field_name>":{"full_name":"<field_name>","mapping":{"<sub-field_name>":{"type":"date","index_name":"<sub-field_name>","boost":1.0,"index":"not_analyzed","store":false,"doc_values":false,"term_vector":"no","norms":{"enabled":false},"index_options":"docs","index_analyzer":"_date/16","search_analyzer":"_date/max","postings_format":"default","doc_values_format":"default","similarity":"default","fielddata":{},"ignore_malformed":false,"coerce":true,"precision_step":16,"format":"dateOptionalTime","null_value":null,"include_in_all":false,"numeric_resolution":"milliseconds","locale":""}}}, # noqa
# now convert the mappings into the .kibana format
field_cache = []
for (index_name, val) in iteritems(es_mappings):
if index_name != self.index: # only get non-'.kibana' indices
# self.pr_dbg("index: %s" % index_name)
m_dict = es_mappings[index_name]['mappings']
# self.pr_dbg('m_dict %s' % m_dict)
mappings = self.get_index_mappings(m_dict)
# self.pr_dbg('mappings %s' % mappings)
field_cache.extend(mappings)
field_cache = self.dedup_field_cache(field_cache)
return field_cache
self.pr_err("Unknown cache type: %s" % cache_type)
return None
def dedup_field_cache(self, field_cache):
deduped = []
fields_found = {}
for field in field_cache:
name = field['name']
if name not in fields_found:
deduped.append(field)
fields_found[name] = field
elif fields_found[name] != field:
self.pr_dbg("Dup field doesn't match")
self.pr_dbg("1st found: %s" % fields_found[name])
self.pr_dbg(" Dup one: %s" % field)
# else ignore, pass
return deduped
def post_field_cache(self, field_cache):
"""Where field_cache is a list of fields' mappings"""
index_pattern = self.field_cache_to_index_pattern(field_cache)
# self.pr_dbg("request/post: %s" % index_pattern)
resp = requests.post(self.post_url, data=index_pattern).text
# resp = {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":1,"created":true} # noqa
resp = json.loads(resp)
return 0
# TODO detect failure (return 1)
def field_cache_to_index_pattern(self, field_cache):
"""Return a .kibana index-pattern doc_type"""
mapping_dict = {}
mapping_dict['customFormats'] = "{}"
mapping_dict['title'] = self.index_pattern
# now post the data into .kibana
mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':'))
# in order to post, we need to create the post string
mapping_str = json.dumps(mapping_dict, separators=(',', ':'))
return mapping_str
def check_mapping(self, m):
"""Assert minimum set of fields in cache, does not validate contents"""
if 'name' not in m:
self.pr_dbg("Missing %s" % "name")
return False
# self.pr_dbg("Checking %s" % m['name'])
for x in ['analyzed', 'indexed', 'type', 'scripted', 'count']:
if x not in m or m[x] == "":
self.pr_dbg("Missing %s" % x)
self.pr_dbg("Full %s" % m)
return False
if 'doc_values' not in m or m['doc_values'] == "":
if not m['name'].startswith('_'):
self.pr_dbg("Missing %s" % "doc_values")
return False
m['doc_values'] = False
return True
def get_index_mappings(self, index):
"""Converts all index's doc_types to .kibana"""
fields_arr = []
for (key, val) in iteritems(index):
# self.pr_dbg("\tdoc_type: %s" % key)
doc_mapping = self.get_doc_type_mappings(index[key])
# self.pr_dbg("\tdoc_mapping: %s" % doc_mapping)
if doc_mapping is None:
return None
# keep adding to the fields array
fields_arr.extend(doc_mapping)
return fields_arr
def get_doc_type_mappings(self, doc_type):
"""Converts all doc_types' fields to .kibana"""
doc_fields_arr = []
found_score = False
for (key, val) in iteritems(doc_type):
# self.pr_dbg("\t\tfield: %s" % key)
# self.pr_dbg("\tval: %s" % val)
add_it = False
retdict = {}
# _ are system
if not key.startswith('_'):
if 'mapping' not in doc_type[key]:
self.pr_err("No mapping in doc_type[%s]" % key)
return None
if key in doc_type[key]['mapping']:
subkey_name = key
else:
subkey_name = re.sub('.*\.', '', key)
if subkey_name not in doc_type[key]['mapping']:
self.pr_err(
"Couldn't find subkey " +
"doc_type[%s]['mapping'][%s]" % (key, subkey_name))
return None
# self.pr_dbg("\t\tsubkey_name: %s" % subkey_name)
retdict = self.get_field_mappings(
doc_type[key]['mapping'][subkey_name])
add_it = True
# system mappings don't list a type,
# but kibana makes them all strings
if key in self.sys_mappings:
retdict['analyzed'] = False
retdict['indexed'] = False
if key == '_source':
retdict = self.get_field_mappings(
doc_type[key]['mapping'][key])
retdict['type'] = "_source"
elif key == '_score':
retdict['type'] = "number"
elif 'type' not in retdict:
retdict['type'] = "string"
add_it = True
if add_it:
retdict['name'] = key
retdict['count'] = 0 # always init to 0
retdict['scripted'] = False # I haven't observed a True yet
if not self.check_mapping(retdict):
self.pr_err("Error, invalid mapping")
return None
# the fields element is an escaped array of json
# make the array here, after all collected, then escape it
doc_fields_arr.append(retdict)
if not found_score:
doc_fields_arr.append(
{"name": "_score",
"type": "number",
"count": 0,
"scripted": False,
"indexed": False,
"analyzed": False,
"doc_values": False})
return doc_fields_arr
def get_field_mappings(self, field):
"""Converts ES field mappings to .kibana field mappings"""
retdict = {}
retdict['indexed'] = False
retdict['analyzed'] = False
for (key, val) in iteritems(field):
if key in self.mappings:
if (key == 'type' and
(val == "long" or
val == "integer" or
val == "double" or
val == "float")):
val = "number"
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
retdict[key] = val
if key == 'index' and val != "no":
retdict['indexed'] = True
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
if val == "analyzed":
retdict['analyzed'] = True
return retdict
def refresh_poll(self, period):
self.poll_another = True
while self.poll_another:
self.do_refresh()
self.pr_inf("Polling again in %s secs" % period)
try:
time.sleep(period)
except KeyboardInterrupt:
self.poll_another = False
def needs_refresh(self):
es_cache = self.get_field_cache('es')
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
return True
return False
def do_refresh(self, force=False):
es_cache = self.get_field_cache('es')
if force:
self.pr_inf("Forcing mapping update")
# no need to get kibana if we are forcing it
return self.post_field_cache(es_cache)
k_cache = self.get_field_cache('kibana')
if self.is_kibana_cache_incomplete(es_cache, k_cache):
self.pr_inf("Mapping is incomplete, doing update")
return self.post_field_cache(es_cache)
self.pr_inf("Mapping is correct, no refresh needed")
return 0
def is_kibana_cache_incomplete(self, es_cache, k_cache):
"""Test if k_cache is incomplete
Assume k_cache is always correct, but could be missing new
fields that es_cache has
"""
# convert list into dict, with each item's ['name'] as key
k_dict = {}
for field in k_cache:
# self.pr_dbg("field: %s" % field)
k_dict[field['name']] = field
for ign_f in self.mappings_ignore:
k_dict[field['name']][ign_f] = 0
es_dict = {}
for field in es_cache:
es_dict[field['name']] = field
for ign_f in self.mappings_ignore:
es_dict[field['name']][ign_f] = 0
es_set = set(es_dict.keys())
k_set = set(k_dict.keys())
# reasons why kibana cache could be incomplete:
# k_dict is missing keys that are within es_dict
# We don't care if k has keys that es doesn't
# es {1,2} k {1,2,3}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {1,2}; intersection {1,2}; len(es-{}) 0
# es {1,2} k {}; intersection {}; len(es-{}) 2
# es {1,2} k {1}; intersection {1}; len(es-{}) 1
# es {2,3} k {1}; intersection {}; len(es-{}) 2
# es {2,3} k {1,2}; intersection {2}; len(es-{}) 1
return len(es_set - k_set.intersection(es_set)) > 0
def list_to_compare_dict(self, list_form):
"""Convert list into a data structure we can query easier"""
compare_dict = {}
for field in list_form:
if field['name'] in compare_dict:
self.pr_dbg("List has duplicate field %s:\n%s" %
(field['name'], compare_dict[field['name']]))
if compare_dict[field['name']] != field:
self.pr_dbg("And values are different:\n%s" % field)
return None
compare_dict[field['name']] = field
for ign_f in self.mappings_ignore:
compare_dict[field['name']][ign_f] = 0
return compare_dict
def test_cache(self):
"""Test if this code is equiv to Kibana.refreshFields()
Within Kibana GUI click refreshFields, then either:
* self.test_cache()
* vagrant ssh -c "python -c \"
import kibana; kibana.DotKibana('aaa*').mapping.test_cache()\""
"""
es_cache = self.get_field_cache(cache_type='es')
# self.pr_dbg(json.dumps(es_cache))
kibana_cache = self.get_field_cache(cache_type='kibana')
# self.pr_dbg(json.dumps(kibana_cache))
return self.compare_field_caches(es_cache, kibana_cache)
|
rfarley3/Kibana
|
kibana/manager.py
|
KibanaManager.put_object
|
python
|
def put_object(self, obj):
# TODO consider putting into a ES class
self.pr_dbg('put_obj: %s' % self.json_dumps(obj))
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object, no index")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object, no _id")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object, no _type")
if obj['_source'] is None or obj['_source'] == "":
raise Exception("Invalid Object, no _source")
self.connect_es()
self.es.indices.create(index=obj['_index'], ignore=400, timeout="2m")
try:
resp = self.es.index(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'],
body=obj['_source'], timeout="2m")
except RequestError as e:
self.pr_err('RequestError: %s, info: %s' % (e.error, e.info))
raise
return resp
|
Wrapper for es.index, determines metadata needed to index from obj.
If you have a raw object json string you can hard code these:
index is .kibana (as of kibana4);
id can be A-Za-z0-9\- and must be unique;
doc_type is either visualization, dashboard, search
or for settings docs: config, or index-pattern.
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/manager.py#L105-L134
|
[
"def pr_dbg(self, msg):\n if self.debug:\n print('[DBG] Manager %s' % msg)\n",
"def pr_err(self, msg):\n print('[ERR] Manager %s' % msg)\n",
"def connect_es(self):\n if self.es is not None:\n return\n self.es = Elasticsearch(\n [{'host': self._host_ip, 'port': self._host_port}])\n",
"def json_dumps(self, obj):\n \"\"\"Serializer for consistency\"\"\"\n return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))\n"
] |
class KibanaManager():
"""Import/Export Kibana objects"""
def __init__(self, index, host, debug=False):
self._host_ip = host[0]
self._host_port = host[1]
self.index = index
self.es = None
self.max_hits = 9999
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Manager %s' % msg)
def pr_inf(self, msg):
print('[INF] Manager %s' % msg)
def pr_err(self, msg):
print('[ERR] Manager %s' % msg)
@property
def host(self):
return (self._host_ip, self._host_port)
@host.setter
def host_setter(self, host):
self._host_ip = host[0]
self._host_port = host[1]
def connect_es(self):
if self.es is not None:
return
self.es = Elasticsearch(
[{'host': self._host_ip, 'port': self._host_port}])
def read_object_from_file(self, filename):
self.pr_inf("Reading object from file: " + filename)
obj = {}
with open(filename, 'rb') as f:
obj = json.loads(f.read().decode('utf-8'))
return obj
def read_pkg_from_file(self, filename):
obj = {}
with open(filename, 'rb') as f:
obj = json.loads(f.read().decode('utf-8'))
return obj
def put_pkg(self, objs):
for obj in objs:
self.put_object(obj)
def put_objects(self, objects):
for name, obj in iteritems(objects):
self.put_object(obj)
def del_object(self, obj):
"""Debug deletes obj of obj[_type] with id of obj['_id']"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object")
self.connect_es()
self.es.delete(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'])
def del_objects(self, objects):
for name, obj in iteritems(objects):
self.del_object(obj)
def json_dumps(self, obj):
"""Serializer for consistency"""
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
def safe_filename(self, otype, oid):
"""Santize obj name into fname and verify doesn't already exist"""
permitted = set(['_', '-', '(', ')'])
oid = ''.join([c for c in oid if c.isalnum() or c in permitted])
while oid.find('--') != -1:
oid = oid.replace('--', '-')
ext = 'json'
ts = datetime.now().strftime("%Y%m%dT%H%M%S")
fname = ''
is_new = False
while not is_new:
oid_len = 255 - len('%s--%s.%s' % (otype, ts, ext))
fname = '%s-%s-%s.%s' % (otype, oid[:oid_len], ts, ext)
is_new = True
if os.path.exists(fname):
is_new = False
ts += '-bck'
return fname
def write_object_to_file(self, obj, path='.', filename=None):
"""Convert obj (dict) to json string and write to file"""
output = self.json_dumps(obj) + '\n'
if filename is None:
filename = self.safe_filename(obj['_type'], obj['_id'])
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
# self.pr_dbg("Contents: " + output)
return filename
def write_objects_to_file(self, objects, path='.'):
for name, obj in iteritems(objects):
self.write_object_to_file(obj, path)
def write_pkg_to_file(self, name, objects, path='.', filename=None):
"""Write a list of related objs to file"""
# Kibana uses an array of docs, do the same
# as opposed to a dict of docs
pkg_objs = []
for _, obj in iteritems(objects):
pkg_objs.append(obj)
sorted_pkg = sorted(pkg_objs, key=lambda k: k['_id'])
output = self.json_dumps(sorted_pkg) + '\n'
if filename is None:
filename = self.safe_filename('Pkg', name)
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
return filename
def get_objects(self, search_field, search_val):
"""Return all objects of type (assumes < MAX_HITS)"""
query = ("{ size: " + str(self.max_hits) + ", " +
"query: { filtered: { filter: { " +
search_field + ": { value: \"" + search_val + "\"" +
" } } } } } }")
self.connect_es()
res = self.es.search(index=self.index, body=query)
# self.pr_dbg("%d Hits:" % res['hits']['total'])
objects = {}
for doc in res['hits']['hits']:
objects[doc['_id']] = {}
# To make uploading easier in the future:
# Record all those bits into the backup.
# Mimics how ES returns the result.
# Prevents having to store this in some external, contrived, format
objects[doc['_id']]['_index'] = self.index # also in doc['_index']
objects[doc['_id']]['_type'] = doc['_type']
objects[doc['_id']]['_id'] = doc['_id']
objects[doc['_id']]['_source'] = doc['_source'] # the actual result
return objects
def get_config(self):
""" Wrapper for get_objects to collect config; skips index-pattern"""
return self.get_objects("type", "config")
def get_visualizations(self):
"""Wrapper for get_objects to collect all visualizations"""
return self.get_objects("type", "visualization")
def get_dashboards(self):
"""Wrapper for get_objects to collect all dashboards"""
return self.get_objects("type", "dashboard")
def get_searches(self):
"""Wrapper for get_objects to collect all saved searches"""
return self.get_objects("type", "search")
def get_dashboard_full(self, db_name):
"""Get DB and all objs needed to duplicate it"""
objects = {}
dashboards = self.get_objects("type", "dashboard")
vizs = self.get_objects("type", "visualization")
searches = self.get_objects("type", "search")
if db_name not in dashboards:
return None
self.pr_inf("Found dashboard: " + db_name)
objects[db_name] = dashboards[db_name]
panels = json.loads(dashboards[db_name]['_source']['panelsJSON'])
for panel in panels:
if 'id' not in panel:
continue
pid = panel['id']
if pid in searches:
self.pr_inf("Found search: " + pid)
objects[pid] = searches[pid]
elif pid in vizs:
self.pr_inf("Found vis: " + pid)
objects[pid] = vizs[pid]
emb = vizs[pid].get('_source', {}).get('savedSearchId', None)
if emb is not None and emb not in objects:
if emb not in searches:
self.pr_err('Missing search %s' % emb)
return objects
objects[emb] = searches[emb]
return objects
|
rfarley3/Kibana
|
kibana/manager.py
|
KibanaManager.del_object
|
python
|
def del_object(self, obj):
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object")
self.connect_es()
self.es.delete(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'])
|
Debug deletes obj of obj[_type] with id of obj['_id']
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/manager.py#L144-L155
|
[
"def connect_es(self):\n if self.es is not None:\n return\n self.es = Elasticsearch(\n [{'host': self._host_ip, 'port': self._host_port}])\n"
] |
class KibanaManager():
"""Import/Export Kibana objects"""
def __init__(self, index, host, debug=False):
self._host_ip = host[0]
self._host_port = host[1]
self.index = index
self.es = None
self.max_hits = 9999
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Manager %s' % msg)
def pr_inf(self, msg):
print('[INF] Manager %s' % msg)
def pr_err(self, msg):
print('[ERR] Manager %s' % msg)
@property
def host(self):
return (self._host_ip, self._host_port)
@host.setter
def host_setter(self, host):
self._host_ip = host[0]
self._host_port = host[1]
def connect_es(self):
if self.es is not None:
return
self.es = Elasticsearch(
[{'host': self._host_ip, 'port': self._host_port}])
def read_object_from_file(self, filename):
self.pr_inf("Reading object from file: " + filename)
obj = {}
with open(filename, 'rb') as f:
obj = json.loads(f.read().decode('utf-8'))
return obj
def read_pkg_from_file(self, filename):
obj = {}
with open(filename, 'rb') as f:
obj = json.loads(f.read().decode('utf-8'))
return obj
def put_object(self, obj):
# TODO consider putting into a ES class
self.pr_dbg('put_obj: %s' % self.json_dumps(obj))
"""
Wrapper for es.index, determines metadata needed to index from obj.
If you have a raw object json string you can hard code these:
index is .kibana (as of kibana4);
id can be A-Za-z0-9\- and must be unique;
doc_type is either visualization, dashboard, search
or for settings docs: config, or index-pattern.
"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object, no index")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object, no _id")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object, no _type")
if obj['_source'] is None or obj['_source'] == "":
raise Exception("Invalid Object, no _source")
self.connect_es()
self.es.indices.create(index=obj['_index'], ignore=400, timeout="2m")
try:
resp = self.es.index(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'],
body=obj['_source'], timeout="2m")
except RequestError as e:
self.pr_err('RequestError: %s, info: %s' % (e.error, e.info))
raise
return resp
def put_pkg(self, objs):
for obj in objs:
self.put_object(obj)
def put_objects(self, objects):
for name, obj in iteritems(objects):
self.put_object(obj)
def del_objects(self, objects):
for name, obj in iteritems(objects):
self.del_object(obj)
def json_dumps(self, obj):
"""Serializer for consistency"""
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
def safe_filename(self, otype, oid):
"""Santize obj name into fname and verify doesn't already exist"""
permitted = set(['_', '-', '(', ')'])
oid = ''.join([c for c in oid if c.isalnum() or c in permitted])
while oid.find('--') != -1:
oid = oid.replace('--', '-')
ext = 'json'
ts = datetime.now().strftime("%Y%m%dT%H%M%S")
fname = ''
is_new = False
while not is_new:
oid_len = 255 - len('%s--%s.%s' % (otype, ts, ext))
fname = '%s-%s-%s.%s' % (otype, oid[:oid_len], ts, ext)
is_new = True
if os.path.exists(fname):
is_new = False
ts += '-bck'
return fname
def write_object_to_file(self, obj, path='.', filename=None):
"""Convert obj (dict) to json string and write to file"""
output = self.json_dumps(obj) + '\n'
if filename is None:
filename = self.safe_filename(obj['_type'], obj['_id'])
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
# self.pr_dbg("Contents: " + output)
return filename
def write_objects_to_file(self, objects, path='.'):
for name, obj in iteritems(objects):
self.write_object_to_file(obj, path)
def write_pkg_to_file(self, name, objects, path='.', filename=None):
"""Write a list of related objs to file"""
# Kibana uses an array of docs, do the same
# as opposed to a dict of docs
pkg_objs = []
for _, obj in iteritems(objects):
pkg_objs.append(obj)
sorted_pkg = sorted(pkg_objs, key=lambda k: k['_id'])
output = self.json_dumps(sorted_pkg) + '\n'
if filename is None:
filename = self.safe_filename('Pkg', name)
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
return filename
def get_objects(self, search_field, search_val):
"""Return all objects of type (assumes < MAX_HITS)"""
query = ("{ size: " + str(self.max_hits) + ", " +
"query: { filtered: { filter: { " +
search_field + ": { value: \"" + search_val + "\"" +
" } } } } } }")
self.connect_es()
res = self.es.search(index=self.index, body=query)
# self.pr_dbg("%d Hits:" % res['hits']['total'])
objects = {}
for doc in res['hits']['hits']:
objects[doc['_id']] = {}
# To make uploading easier in the future:
# Record all those bits into the backup.
# Mimics how ES returns the result.
# Prevents having to store this in some external, contrived, format
objects[doc['_id']]['_index'] = self.index # also in doc['_index']
objects[doc['_id']]['_type'] = doc['_type']
objects[doc['_id']]['_id'] = doc['_id']
objects[doc['_id']]['_source'] = doc['_source'] # the actual result
return objects
def get_config(self):
""" Wrapper for get_objects to collect config; skips index-pattern"""
return self.get_objects("type", "config")
def get_visualizations(self):
"""Wrapper for get_objects to collect all visualizations"""
return self.get_objects("type", "visualization")
def get_dashboards(self):
"""Wrapper for get_objects to collect all dashboards"""
return self.get_objects("type", "dashboard")
def get_searches(self):
"""Wrapper for get_objects to collect all saved searches"""
return self.get_objects("type", "search")
def get_dashboard_full(self, db_name):
"""Get DB and all objs needed to duplicate it"""
objects = {}
dashboards = self.get_objects("type", "dashboard")
vizs = self.get_objects("type", "visualization")
searches = self.get_objects("type", "search")
if db_name not in dashboards:
return None
self.pr_inf("Found dashboard: " + db_name)
objects[db_name] = dashboards[db_name]
panels = json.loads(dashboards[db_name]['_source']['panelsJSON'])
for panel in panels:
if 'id' not in panel:
continue
pid = panel['id']
if pid in searches:
self.pr_inf("Found search: " + pid)
objects[pid] = searches[pid]
elif pid in vizs:
self.pr_inf("Found vis: " + pid)
objects[pid] = vizs[pid]
emb = vizs[pid].get('_source', {}).get('savedSearchId', None)
if emb is not None and emb not in objects:
if emb not in searches:
self.pr_err('Missing search %s' % emb)
return objects
objects[emb] = searches[emb]
return objects
|
rfarley3/Kibana
|
kibana/manager.py
|
KibanaManager.json_dumps
|
python
|
def json_dumps(self, obj):
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
|
Serializer for consistency
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/manager.py#L161-L163
| null |
class KibanaManager():
"""Import/Export Kibana objects"""
def __init__(self, index, host, debug=False):
self._host_ip = host[0]
self._host_port = host[1]
self.index = index
self.es = None
self.max_hits = 9999
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Manager %s' % msg)
def pr_inf(self, msg):
print('[INF] Manager %s' % msg)
def pr_err(self, msg):
print('[ERR] Manager %s' % msg)
@property
def host(self):
return (self._host_ip, self._host_port)
@host.setter
def host_setter(self, host):
self._host_ip = host[0]
self._host_port = host[1]
def connect_es(self):
if self.es is not None:
return
self.es = Elasticsearch(
[{'host': self._host_ip, 'port': self._host_port}])
def read_object_from_file(self, filename):
self.pr_inf("Reading object from file: " + filename)
obj = {}
with open(filename, 'rb') as f:
obj = json.loads(f.read().decode('utf-8'))
return obj
def read_pkg_from_file(self, filename):
obj = {}
with open(filename, 'rb') as f:
obj = json.loads(f.read().decode('utf-8'))
return obj
def put_object(self, obj):
# TODO consider putting into a ES class
self.pr_dbg('put_obj: %s' % self.json_dumps(obj))
"""
Wrapper for es.index, determines metadata needed to index from obj.
If you have a raw object json string you can hard code these:
index is .kibana (as of kibana4);
id can be A-Za-z0-9\- and must be unique;
doc_type is either visualization, dashboard, search
or for settings docs: config, or index-pattern.
"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object, no index")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object, no _id")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object, no _type")
if obj['_source'] is None or obj['_source'] == "":
raise Exception("Invalid Object, no _source")
self.connect_es()
self.es.indices.create(index=obj['_index'], ignore=400, timeout="2m")
try:
resp = self.es.index(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'],
body=obj['_source'], timeout="2m")
except RequestError as e:
self.pr_err('RequestError: %s, info: %s' % (e.error, e.info))
raise
return resp
def put_pkg(self, objs):
for obj in objs:
self.put_object(obj)
def put_objects(self, objects):
for name, obj in iteritems(objects):
self.put_object(obj)
def del_object(self, obj):
"""Debug deletes obj of obj[_type] with id of obj['_id']"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object")
self.connect_es()
self.es.delete(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'])
def del_objects(self, objects):
for name, obj in iteritems(objects):
self.del_object(obj)
def safe_filename(self, otype, oid):
"""Santize obj name into fname and verify doesn't already exist"""
permitted = set(['_', '-', '(', ')'])
oid = ''.join([c for c in oid if c.isalnum() or c in permitted])
while oid.find('--') != -1:
oid = oid.replace('--', '-')
ext = 'json'
ts = datetime.now().strftime("%Y%m%dT%H%M%S")
fname = ''
is_new = False
while not is_new:
oid_len = 255 - len('%s--%s.%s' % (otype, ts, ext))
fname = '%s-%s-%s.%s' % (otype, oid[:oid_len], ts, ext)
is_new = True
if os.path.exists(fname):
is_new = False
ts += '-bck'
return fname
def write_object_to_file(self, obj, path='.', filename=None):
"""Convert obj (dict) to json string and write to file"""
output = self.json_dumps(obj) + '\n'
if filename is None:
filename = self.safe_filename(obj['_type'], obj['_id'])
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
# self.pr_dbg("Contents: " + output)
return filename
def write_objects_to_file(self, objects, path='.'):
for name, obj in iteritems(objects):
self.write_object_to_file(obj, path)
def write_pkg_to_file(self, name, objects, path='.', filename=None):
"""Write a list of related objs to file"""
# Kibana uses an array of docs, do the same
# as opposed to a dict of docs
pkg_objs = []
for _, obj in iteritems(objects):
pkg_objs.append(obj)
sorted_pkg = sorted(pkg_objs, key=lambda k: k['_id'])
output = self.json_dumps(sorted_pkg) + '\n'
if filename is None:
filename = self.safe_filename('Pkg', name)
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
return filename
def get_objects(self, search_field, search_val):
"""Return all objects of type (assumes < MAX_HITS)"""
query = ("{ size: " + str(self.max_hits) + ", " +
"query: { filtered: { filter: { " +
search_field + ": { value: \"" + search_val + "\"" +
" } } } } } }")
self.connect_es()
res = self.es.search(index=self.index, body=query)
# self.pr_dbg("%d Hits:" % res['hits']['total'])
objects = {}
for doc in res['hits']['hits']:
objects[doc['_id']] = {}
# To make uploading easier in the future:
# Record all those bits into the backup.
# Mimics how ES returns the result.
# Prevents having to store this in some external, contrived, format
objects[doc['_id']]['_index'] = self.index # also in doc['_index']
objects[doc['_id']]['_type'] = doc['_type']
objects[doc['_id']]['_id'] = doc['_id']
objects[doc['_id']]['_source'] = doc['_source'] # the actual result
return objects
def get_config(self):
""" Wrapper for get_objects to collect config; skips index-pattern"""
return self.get_objects("type", "config")
def get_visualizations(self):
"""Wrapper for get_objects to collect all visualizations"""
return self.get_objects("type", "visualization")
def get_dashboards(self):
"""Wrapper for get_objects to collect all dashboards"""
return self.get_objects("type", "dashboard")
def get_searches(self):
"""Wrapper for get_objects to collect all saved searches"""
return self.get_objects("type", "search")
def get_dashboard_full(self, db_name):
"""Get DB and all objs needed to duplicate it"""
objects = {}
dashboards = self.get_objects("type", "dashboard")
vizs = self.get_objects("type", "visualization")
searches = self.get_objects("type", "search")
if db_name not in dashboards:
return None
self.pr_inf("Found dashboard: " + db_name)
objects[db_name] = dashboards[db_name]
panels = json.loads(dashboards[db_name]['_source']['panelsJSON'])
for panel in panels:
if 'id' not in panel:
continue
pid = panel['id']
if pid in searches:
self.pr_inf("Found search: " + pid)
objects[pid] = searches[pid]
elif pid in vizs:
self.pr_inf("Found vis: " + pid)
objects[pid] = vizs[pid]
emb = vizs[pid].get('_source', {}).get('savedSearchId', None)
if emb is not None and emb not in objects:
if emb not in searches:
self.pr_err('Missing search %s' % emb)
return objects
objects[emb] = searches[emb]
return objects
|
rfarley3/Kibana
|
kibana/manager.py
|
KibanaManager.safe_filename
|
python
|
def safe_filename(self, otype, oid):
permitted = set(['_', '-', '(', ')'])
oid = ''.join([c for c in oid if c.isalnum() or c in permitted])
while oid.find('--') != -1:
oid = oid.replace('--', '-')
ext = 'json'
ts = datetime.now().strftime("%Y%m%dT%H%M%S")
fname = ''
is_new = False
while not is_new:
oid_len = 255 - len('%s--%s.%s' % (otype, ts, ext))
fname = '%s-%s-%s.%s' % (otype, oid[:oid_len], ts, ext)
is_new = True
if os.path.exists(fname):
is_new = False
ts += '-bck'
return fname
|
Santize obj name into fname and verify doesn't already exist
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/manager.py#L165-L182
| null |
class KibanaManager():
"""Import/Export Kibana objects"""
def __init__(self, index, host, debug=False):
self._host_ip = host[0]
self._host_port = host[1]
self.index = index
self.es = None
self.max_hits = 9999
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Manager %s' % msg)
def pr_inf(self, msg):
print('[INF] Manager %s' % msg)
def pr_err(self, msg):
print('[ERR] Manager %s' % msg)
@property
def host(self):
return (self._host_ip, self._host_port)
@host.setter
def host_setter(self, host):
self._host_ip = host[0]
self._host_port = host[1]
def connect_es(self):
if self.es is not None:
return
self.es = Elasticsearch(
[{'host': self._host_ip, 'port': self._host_port}])
def read_object_from_file(self, filename):
self.pr_inf("Reading object from file: " + filename)
obj = {}
with open(filename, 'rb') as f:
obj = json.loads(f.read().decode('utf-8'))
return obj
def read_pkg_from_file(self, filename):
obj = {}
with open(filename, 'rb') as f:
obj = json.loads(f.read().decode('utf-8'))
return obj
def put_object(self, obj):
# TODO consider putting into a ES class
self.pr_dbg('put_obj: %s' % self.json_dumps(obj))
"""
Wrapper for es.index, determines metadata needed to index from obj.
If you have a raw object json string you can hard code these:
index is .kibana (as of kibana4);
id can be A-Za-z0-9\- and must be unique;
doc_type is either visualization, dashboard, search
or for settings docs: config, or index-pattern.
"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object, no index")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object, no _id")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object, no _type")
if obj['_source'] is None or obj['_source'] == "":
raise Exception("Invalid Object, no _source")
self.connect_es()
self.es.indices.create(index=obj['_index'], ignore=400, timeout="2m")
try:
resp = self.es.index(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'],
body=obj['_source'], timeout="2m")
except RequestError as e:
self.pr_err('RequestError: %s, info: %s' % (e.error, e.info))
raise
return resp
def put_pkg(self, objs):
for obj in objs:
self.put_object(obj)
def put_objects(self, objects):
for name, obj in iteritems(objects):
self.put_object(obj)
def del_object(self, obj):
"""Debug deletes obj of obj[_type] with id of obj['_id']"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object")
self.connect_es()
self.es.delete(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'])
def del_objects(self, objects):
for name, obj in iteritems(objects):
self.del_object(obj)
def json_dumps(self, obj):
"""Serializer for consistency"""
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
def write_object_to_file(self, obj, path='.', filename=None):
"""Convert obj (dict) to json string and write to file"""
output = self.json_dumps(obj) + '\n'
if filename is None:
filename = self.safe_filename(obj['_type'], obj['_id'])
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
# self.pr_dbg("Contents: " + output)
return filename
def write_objects_to_file(self, objects, path='.'):
for name, obj in iteritems(objects):
self.write_object_to_file(obj, path)
def write_pkg_to_file(self, name, objects, path='.', filename=None):
"""Write a list of related objs to file"""
# Kibana uses an array of docs, do the same
# as opposed to a dict of docs
pkg_objs = []
for _, obj in iteritems(objects):
pkg_objs.append(obj)
sorted_pkg = sorted(pkg_objs, key=lambda k: k['_id'])
output = self.json_dumps(sorted_pkg) + '\n'
if filename is None:
filename = self.safe_filename('Pkg', name)
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
return filename
def get_objects(self, search_field, search_val):
"""Return all objects of type (assumes < MAX_HITS)"""
query = ("{ size: " + str(self.max_hits) + ", " +
"query: { filtered: { filter: { " +
search_field + ": { value: \"" + search_val + "\"" +
" } } } } } }")
self.connect_es()
res = self.es.search(index=self.index, body=query)
# self.pr_dbg("%d Hits:" % res['hits']['total'])
objects = {}
for doc in res['hits']['hits']:
objects[doc['_id']] = {}
# To make uploading easier in the future:
# Record all those bits into the backup.
# Mimics how ES returns the result.
# Prevents having to store this in some external, contrived, format
objects[doc['_id']]['_index'] = self.index # also in doc['_index']
objects[doc['_id']]['_type'] = doc['_type']
objects[doc['_id']]['_id'] = doc['_id']
objects[doc['_id']]['_source'] = doc['_source'] # the actual result
return objects
def get_config(self):
""" Wrapper for get_objects to collect config; skips index-pattern"""
return self.get_objects("type", "config")
def get_visualizations(self):
"""Wrapper for get_objects to collect all visualizations"""
return self.get_objects("type", "visualization")
def get_dashboards(self):
"""Wrapper for get_objects to collect all dashboards"""
return self.get_objects("type", "dashboard")
def get_searches(self):
"""Wrapper for get_objects to collect all saved searches"""
return self.get_objects("type", "search")
def get_dashboard_full(self, db_name):
"""Get DB and all objs needed to duplicate it"""
objects = {}
dashboards = self.get_objects("type", "dashboard")
vizs = self.get_objects("type", "visualization")
searches = self.get_objects("type", "search")
if db_name not in dashboards:
return None
self.pr_inf("Found dashboard: " + db_name)
objects[db_name] = dashboards[db_name]
panels = json.loads(dashboards[db_name]['_source']['panelsJSON'])
for panel in panels:
if 'id' not in panel:
continue
pid = panel['id']
if pid in searches:
self.pr_inf("Found search: " + pid)
objects[pid] = searches[pid]
elif pid in vizs:
self.pr_inf("Found vis: " + pid)
objects[pid] = vizs[pid]
emb = vizs[pid].get('_source', {}).get('savedSearchId', None)
if emb is not None and emb not in objects:
if emb not in searches:
self.pr_err('Missing search %s' % emb)
return objects
objects[emb] = searches[emb]
return objects
|
rfarley3/Kibana
|
kibana/manager.py
|
KibanaManager.write_object_to_file
|
python
|
def write_object_to_file(self, obj, path='.', filename=None):
output = self.json_dumps(obj) + '\n'
if filename is None:
filename = self.safe_filename(obj['_type'], obj['_id'])
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
# self.pr_dbg("Contents: " + output)
return filename
|
Convert obj (dict) to json string and write to file
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/manager.py#L184-L194
|
[
"def pr_inf(self, msg):\n print('[INF] Manager %s' % msg)\n",
"def json_dumps(self, obj):\n \"\"\"Serializer for consistency\"\"\"\n return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))\n",
"def safe_filename(self, otype, oid):\n \"\"\"Santize obj name into fname and verify doesn't already exist\"\"\"\n permitted = set(['_', '-', '(', ')'])\n oid = ''.join([c for c in oid if c.isalnum() or c in permitted])\n while oid.find('--') != -1:\n oid = oid.replace('--', '-')\n ext = 'json'\n ts = datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n fname = ''\n is_new = False\n while not is_new:\n oid_len = 255 - len('%s--%s.%s' % (otype, ts, ext))\n fname = '%s-%s-%s.%s' % (otype, oid[:oid_len], ts, ext)\n is_new = True\n if os.path.exists(fname):\n is_new = False\n ts += '-bck'\n return fname\n"
] |
class KibanaManager():
"""Import/Export Kibana objects"""
def __init__(self, index, host, debug=False):
self._host_ip = host[0]
self._host_port = host[1]
self.index = index
self.es = None
self.max_hits = 9999
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Manager %s' % msg)
def pr_inf(self, msg):
print('[INF] Manager %s' % msg)
def pr_err(self, msg):
print('[ERR] Manager %s' % msg)
@property
def host(self):
return (self._host_ip, self._host_port)
@host.setter
def host_setter(self, host):
self._host_ip = host[0]
self._host_port = host[1]
def connect_es(self):
if self.es is not None:
return
self.es = Elasticsearch(
[{'host': self._host_ip, 'port': self._host_port}])
def read_object_from_file(self, filename):
self.pr_inf("Reading object from file: " + filename)
obj = {}
with open(filename, 'rb') as f:
obj = json.loads(f.read().decode('utf-8'))
return obj
def read_pkg_from_file(self, filename):
obj = {}
with open(filename, 'rb') as f:
obj = json.loads(f.read().decode('utf-8'))
return obj
def put_object(self, obj):
# TODO consider putting into a ES class
self.pr_dbg('put_obj: %s' % self.json_dumps(obj))
"""
Wrapper for es.index, determines metadata needed to index from obj.
If you have a raw object json string you can hard code these:
index is .kibana (as of kibana4);
id can be A-Za-z0-9\- and must be unique;
doc_type is either visualization, dashboard, search
or for settings docs: config, or index-pattern.
"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object, no index")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object, no _id")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object, no _type")
if obj['_source'] is None or obj['_source'] == "":
raise Exception("Invalid Object, no _source")
self.connect_es()
self.es.indices.create(index=obj['_index'], ignore=400, timeout="2m")
try:
resp = self.es.index(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'],
body=obj['_source'], timeout="2m")
except RequestError as e:
self.pr_err('RequestError: %s, info: %s' % (e.error, e.info))
raise
return resp
def put_pkg(self, objs):
for obj in objs:
self.put_object(obj)
def put_objects(self, objects):
for name, obj in iteritems(objects):
self.put_object(obj)
def del_object(self, obj):
"""Debug deletes obj of obj[_type] with id of obj['_id']"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object")
self.connect_es()
self.es.delete(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'])
def del_objects(self, objects):
for name, obj in iteritems(objects):
self.del_object(obj)
def json_dumps(self, obj):
"""Serializer for consistency"""
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
def safe_filename(self, otype, oid):
"""Santize obj name into fname and verify doesn't already exist"""
permitted = set(['_', '-', '(', ')'])
oid = ''.join([c for c in oid if c.isalnum() or c in permitted])
while oid.find('--') != -1:
oid = oid.replace('--', '-')
ext = 'json'
ts = datetime.now().strftime("%Y%m%dT%H%M%S")
fname = ''
is_new = False
while not is_new:
oid_len = 255 - len('%s--%s.%s' % (otype, ts, ext))
fname = '%s-%s-%s.%s' % (otype, oid[:oid_len], ts, ext)
is_new = True
if os.path.exists(fname):
is_new = False
ts += '-bck'
return fname
def write_objects_to_file(self, objects, path='.'):
for name, obj in iteritems(objects):
self.write_object_to_file(obj, path)
def write_pkg_to_file(self, name, objects, path='.', filename=None):
"""Write a list of related objs to file"""
# Kibana uses an array of docs, do the same
# as opposed to a dict of docs
pkg_objs = []
for _, obj in iteritems(objects):
pkg_objs.append(obj)
sorted_pkg = sorted(pkg_objs, key=lambda k: k['_id'])
output = self.json_dumps(sorted_pkg) + '\n'
if filename is None:
filename = self.safe_filename('Pkg', name)
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
return filename
def get_objects(self, search_field, search_val):
"""Return all objects of type (assumes < MAX_HITS)"""
query = ("{ size: " + str(self.max_hits) + ", " +
"query: { filtered: { filter: { " +
search_field + ": { value: \"" + search_val + "\"" +
" } } } } } }")
self.connect_es()
res = self.es.search(index=self.index, body=query)
# self.pr_dbg("%d Hits:" % res['hits']['total'])
objects = {}
for doc in res['hits']['hits']:
objects[doc['_id']] = {}
# To make uploading easier in the future:
# Record all those bits into the backup.
# Mimics how ES returns the result.
# Prevents having to store this in some external, contrived, format
objects[doc['_id']]['_index'] = self.index # also in doc['_index']
objects[doc['_id']]['_type'] = doc['_type']
objects[doc['_id']]['_id'] = doc['_id']
objects[doc['_id']]['_source'] = doc['_source'] # the actual result
return objects
def get_config(self):
""" Wrapper for get_objects to collect config; skips index-pattern"""
return self.get_objects("type", "config")
def get_visualizations(self):
"""Wrapper for get_objects to collect all visualizations"""
return self.get_objects("type", "visualization")
def get_dashboards(self):
"""Wrapper for get_objects to collect all dashboards"""
return self.get_objects("type", "dashboard")
def get_searches(self):
"""Wrapper for get_objects to collect all saved searches"""
return self.get_objects("type", "search")
def get_dashboard_full(self, db_name):
"""Get DB and all objs needed to duplicate it"""
objects = {}
dashboards = self.get_objects("type", "dashboard")
vizs = self.get_objects("type", "visualization")
searches = self.get_objects("type", "search")
if db_name not in dashboards:
return None
self.pr_inf("Found dashboard: " + db_name)
objects[db_name] = dashboards[db_name]
panels = json.loads(dashboards[db_name]['_source']['panelsJSON'])
for panel in panels:
if 'id' not in panel:
continue
pid = panel['id']
if pid in searches:
self.pr_inf("Found search: " + pid)
objects[pid] = searches[pid]
elif pid in vizs:
self.pr_inf("Found vis: " + pid)
objects[pid] = vizs[pid]
emb = vizs[pid].get('_source', {}).get('savedSearchId', None)
if emb is not None and emb not in objects:
if emb not in searches:
self.pr_err('Missing search %s' % emb)
return objects
objects[emb] = searches[emb]
return objects
|
rfarley3/Kibana
|
kibana/manager.py
|
KibanaManager.write_pkg_to_file
|
python
|
def write_pkg_to_file(self, name, objects, path='.', filename=None):
# Kibana uses an array of docs, do the same
# as opposed to a dict of docs
pkg_objs = []
for _, obj in iteritems(objects):
pkg_objs.append(obj)
sorted_pkg = sorted(pkg_objs, key=lambda k: k['_id'])
output = self.json_dumps(sorted_pkg) + '\n'
if filename is None:
filename = self.safe_filename('Pkg', name)
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
return filename
|
Write a list of related objs to file
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/manager.py#L200-L215
|
[
"def iteritems(d):\n if PY3:\n return d.items()\n else:\n return d.iteritems()\n",
"def pr_inf(self, msg):\n print('[INF] Manager %s' % msg)\n",
"def json_dumps(self, obj):\n \"\"\"Serializer for consistency\"\"\"\n return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))\n",
"def safe_filename(self, otype, oid):\n \"\"\"Santize obj name into fname and verify doesn't already exist\"\"\"\n permitted = set(['_', '-', '(', ')'])\n oid = ''.join([c for c in oid if c.isalnum() or c in permitted])\n while oid.find('--') != -1:\n oid = oid.replace('--', '-')\n ext = 'json'\n ts = datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n fname = ''\n is_new = False\n while not is_new:\n oid_len = 255 - len('%s--%s.%s' % (otype, ts, ext))\n fname = '%s-%s-%s.%s' % (otype, oid[:oid_len], ts, ext)\n is_new = True\n if os.path.exists(fname):\n is_new = False\n ts += '-bck'\n return fname\n"
] |
class KibanaManager():
"""Import/Export Kibana objects"""
def __init__(self, index, host, debug=False):
self._host_ip = host[0]
self._host_port = host[1]
self.index = index
self.es = None
self.max_hits = 9999
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Manager %s' % msg)
def pr_inf(self, msg):
print('[INF] Manager %s' % msg)
def pr_err(self, msg):
print('[ERR] Manager %s' % msg)
@property
def host(self):
return (self._host_ip, self._host_port)
@host.setter
def host_setter(self, host):
self._host_ip = host[0]
self._host_port = host[1]
def connect_es(self):
if self.es is not None:
return
self.es = Elasticsearch(
[{'host': self._host_ip, 'port': self._host_port}])
def read_object_from_file(self, filename):
self.pr_inf("Reading object from file: " + filename)
obj = {}
with open(filename, 'rb') as f:
obj = json.loads(f.read().decode('utf-8'))
return obj
def read_pkg_from_file(self, filename):
obj = {}
with open(filename, 'rb') as f:
obj = json.loads(f.read().decode('utf-8'))
return obj
def put_object(self, obj):
# TODO consider putting into a ES class
self.pr_dbg('put_obj: %s' % self.json_dumps(obj))
"""
Wrapper for es.index, determines metadata needed to index from obj.
If you have a raw object json string you can hard code these:
index is .kibana (as of kibana4);
id can be A-Za-z0-9\- and must be unique;
doc_type is either visualization, dashboard, search
or for settings docs: config, or index-pattern.
"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object, no index")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object, no _id")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object, no _type")
if obj['_source'] is None or obj['_source'] == "":
raise Exception("Invalid Object, no _source")
self.connect_es()
self.es.indices.create(index=obj['_index'], ignore=400, timeout="2m")
try:
resp = self.es.index(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'],
body=obj['_source'], timeout="2m")
except RequestError as e:
self.pr_err('RequestError: %s, info: %s' % (e.error, e.info))
raise
return resp
def put_pkg(self, objs):
for obj in objs:
self.put_object(obj)
def put_objects(self, objects):
for name, obj in iteritems(objects):
self.put_object(obj)
def del_object(self, obj):
"""Debug deletes obj of obj[_type] with id of obj['_id']"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object")
self.connect_es()
self.es.delete(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'])
def del_objects(self, objects):
for name, obj in iteritems(objects):
self.del_object(obj)
def json_dumps(self, obj):
"""Serializer for consistency"""
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
def safe_filename(self, otype, oid):
"""Santize obj name into fname and verify doesn't already exist"""
permitted = set(['_', '-', '(', ')'])
oid = ''.join([c for c in oid if c.isalnum() or c in permitted])
while oid.find('--') != -1:
oid = oid.replace('--', '-')
ext = 'json'
ts = datetime.now().strftime("%Y%m%dT%H%M%S")
fname = ''
is_new = False
while not is_new:
oid_len = 255 - len('%s--%s.%s' % (otype, ts, ext))
fname = '%s-%s-%s.%s' % (otype, oid[:oid_len], ts, ext)
is_new = True
if os.path.exists(fname):
is_new = False
ts += '-bck'
return fname
def write_object_to_file(self, obj, path='.', filename=None):
"""Convert obj (dict) to json string and write to file"""
output = self.json_dumps(obj) + '\n'
if filename is None:
filename = self.safe_filename(obj['_type'], obj['_id'])
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
# self.pr_dbg("Contents: " + output)
return filename
def write_objects_to_file(self, objects, path='.'):
for name, obj in iteritems(objects):
self.write_object_to_file(obj, path)
def get_objects(self, search_field, search_val):
"""Return all objects of type (assumes < MAX_HITS)"""
query = ("{ size: " + str(self.max_hits) + ", " +
"query: { filtered: { filter: { " +
search_field + ": { value: \"" + search_val + "\"" +
" } } } } } }")
self.connect_es()
res = self.es.search(index=self.index, body=query)
# self.pr_dbg("%d Hits:" % res['hits']['total'])
objects = {}
for doc in res['hits']['hits']:
objects[doc['_id']] = {}
# To make uploading easier in the future:
# Record all those bits into the backup.
# Mimics how ES returns the result.
# Prevents having to store this in some external, contrived, format
objects[doc['_id']]['_index'] = self.index # also in doc['_index']
objects[doc['_id']]['_type'] = doc['_type']
objects[doc['_id']]['_id'] = doc['_id']
objects[doc['_id']]['_source'] = doc['_source'] # the actual result
return objects
def get_config(self):
""" Wrapper for get_objects to collect config; skips index-pattern"""
return self.get_objects("type", "config")
def get_visualizations(self):
"""Wrapper for get_objects to collect all visualizations"""
return self.get_objects("type", "visualization")
def get_dashboards(self):
"""Wrapper for get_objects to collect all dashboards"""
return self.get_objects("type", "dashboard")
def get_searches(self):
"""Wrapper for get_objects to collect all saved searches"""
return self.get_objects("type", "search")
def get_dashboard_full(self, db_name):
"""Get DB and all objs needed to duplicate it"""
objects = {}
dashboards = self.get_objects("type", "dashboard")
vizs = self.get_objects("type", "visualization")
searches = self.get_objects("type", "search")
if db_name not in dashboards:
return None
self.pr_inf("Found dashboard: " + db_name)
objects[db_name] = dashboards[db_name]
panels = json.loads(dashboards[db_name]['_source']['panelsJSON'])
for panel in panels:
if 'id' not in panel:
continue
pid = panel['id']
if pid in searches:
self.pr_inf("Found search: " + pid)
objects[pid] = searches[pid]
elif pid in vizs:
self.pr_inf("Found vis: " + pid)
objects[pid] = vizs[pid]
emb = vizs[pid].get('_source', {}).get('savedSearchId', None)
if emb is not None and emb not in objects:
if emb not in searches:
self.pr_err('Missing search %s' % emb)
return objects
objects[emb] = searches[emb]
return objects
|
rfarley3/Kibana
|
kibana/manager.py
|
KibanaManager.get_objects
|
python
|
def get_objects(self, search_field, search_val):
query = ("{ size: " + str(self.max_hits) + ", " +
"query: { filtered: { filter: { " +
search_field + ": { value: \"" + search_val + "\"" +
" } } } } } }")
self.connect_es()
res = self.es.search(index=self.index, body=query)
# self.pr_dbg("%d Hits:" % res['hits']['total'])
objects = {}
for doc in res['hits']['hits']:
objects[doc['_id']] = {}
# To make uploading easier in the future:
# Record all those bits into the backup.
# Mimics how ES returns the result.
# Prevents having to store this in some external, contrived, format
objects[doc['_id']]['_index'] = self.index # also in doc['_index']
objects[doc['_id']]['_type'] = doc['_type']
objects[doc['_id']]['_id'] = doc['_id']
objects[doc['_id']]['_source'] = doc['_source'] # the actual result
return objects
|
Return all objects of type (assumes < MAX_HITS)
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/manager.py#L217-L237
|
[
"def connect_es(self):\n if self.es is not None:\n return\n self.es = Elasticsearch(\n [{'host': self._host_ip, 'port': self._host_port}])\n"
] |
class KibanaManager():
"""Import/Export Kibana objects"""
def __init__(self, index, host, debug=False):
self._host_ip = host[0]
self._host_port = host[1]
self.index = index
self.es = None
self.max_hits = 9999
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Manager %s' % msg)
def pr_inf(self, msg):
print('[INF] Manager %s' % msg)
def pr_err(self, msg):
print('[ERR] Manager %s' % msg)
@property
def host(self):
return (self._host_ip, self._host_port)
@host.setter
def host_setter(self, host):
self._host_ip = host[0]
self._host_port = host[1]
def connect_es(self):
if self.es is not None:
return
self.es = Elasticsearch(
[{'host': self._host_ip, 'port': self._host_port}])
def read_object_from_file(self, filename):
self.pr_inf("Reading object from file: " + filename)
obj = {}
with open(filename, 'rb') as f:
obj = json.loads(f.read().decode('utf-8'))
return obj
def read_pkg_from_file(self, filename):
obj = {}
with open(filename, 'rb') as f:
obj = json.loads(f.read().decode('utf-8'))
return obj
def put_object(self, obj):
# TODO consider putting into a ES class
self.pr_dbg('put_obj: %s' % self.json_dumps(obj))
"""
Wrapper for es.index, determines metadata needed to index from obj.
If you have a raw object json string you can hard code these:
index is .kibana (as of kibana4);
id can be A-Za-z0-9\- and must be unique;
doc_type is either visualization, dashboard, search
or for settings docs: config, or index-pattern.
"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object, no index")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object, no _id")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object, no _type")
if obj['_source'] is None or obj['_source'] == "":
raise Exception("Invalid Object, no _source")
self.connect_es()
self.es.indices.create(index=obj['_index'], ignore=400, timeout="2m")
try:
resp = self.es.index(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'],
body=obj['_source'], timeout="2m")
except RequestError as e:
self.pr_err('RequestError: %s, info: %s' % (e.error, e.info))
raise
return resp
def put_pkg(self, objs):
for obj in objs:
self.put_object(obj)
def put_objects(self, objects):
for name, obj in iteritems(objects):
self.put_object(obj)
def del_object(self, obj):
"""Debug deletes obj of obj[_type] with id of obj['_id']"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object")
self.connect_es()
self.es.delete(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'])
def del_objects(self, objects):
for name, obj in iteritems(objects):
self.del_object(obj)
def json_dumps(self, obj):
"""Serializer for consistency"""
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
def safe_filename(self, otype, oid):
"""Santize obj name into fname and verify doesn't already exist"""
permitted = set(['_', '-', '(', ')'])
oid = ''.join([c for c in oid if c.isalnum() or c in permitted])
while oid.find('--') != -1:
oid = oid.replace('--', '-')
ext = 'json'
ts = datetime.now().strftime("%Y%m%dT%H%M%S")
fname = ''
is_new = False
while not is_new:
oid_len = 255 - len('%s--%s.%s' % (otype, ts, ext))
fname = '%s-%s-%s.%s' % (otype, oid[:oid_len], ts, ext)
is_new = True
if os.path.exists(fname):
is_new = False
ts += '-bck'
return fname
def write_object_to_file(self, obj, path='.', filename=None):
"""Convert obj (dict) to json string and write to file"""
output = self.json_dumps(obj) + '\n'
if filename is None:
filename = self.safe_filename(obj['_type'], obj['_id'])
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
# self.pr_dbg("Contents: " + output)
return filename
def write_objects_to_file(self, objects, path='.'):
for name, obj in iteritems(objects):
self.write_object_to_file(obj, path)
def write_pkg_to_file(self, name, objects, path='.', filename=None):
"""Write a list of related objs to file"""
# Kibana uses an array of docs, do the same
# as opposed to a dict of docs
pkg_objs = []
for _, obj in iteritems(objects):
pkg_objs.append(obj)
sorted_pkg = sorted(pkg_objs, key=lambda k: k['_id'])
output = self.json_dumps(sorted_pkg) + '\n'
if filename is None:
filename = self.safe_filename('Pkg', name)
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
return filename
def get_config(self):
""" Wrapper for get_objects to collect config; skips index-pattern"""
return self.get_objects("type", "config")
def get_visualizations(self):
"""Wrapper for get_objects to collect all visualizations"""
return self.get_objects("type", "visualization")
def get_dashboards(self):
"""Wrapper for get_objects to collect all dashboards"""
return self.get_objects("type", "dashboard")
def get_searches(self):
"""Wrapper for get_objects to collect all saved searches"""
return self.get_objects("type", "search")
def get_dashboard_full(self, db_name):
"""Get DB and all objs needed to duplicate it"""
objects = {}
dashboards = self.get_objects("type", "dashboard")
vizs = self.get_objects("type", "visualization")
searches = self.get_objects("type", "search")
if db_name not in dashboards:
return None
self.pr_inf("Found dashboard: " + db_name)
objects[db_name] = dashboards[db_name]
panels = json.loads(dashboards[db_name]['_source']['panelsJSON'])
for panel in panels:
if 'id' not in panel:
continue
pid = panel['id']
if pid in searches:
self.pr_inf("Found search: " + pid)
objects[pid] = searches[pid]
elif pid in vizs:
self.pr_inf("Found vis: " + pid)
objects[pid] = vizs[pid]
emb = vizs[pid].get('_source', {}).get('savedSearchId', None)
if emb is not None and emb not in objects:
if emb not in searches:
self.pr_err('Missing search %s' % emb)
return objects
objects[emb] = searches[emb]
return objects
|
rfarley3/Kibana
|
kibana/manager.py
|
KibanaManager.get_dashboard_full
|
python
|
def get_dashboard_full(self, db_name):
objects = {}
dashboards = self.get_objects("type", "dashboard")
vizs = self.get_objects("type", "visualization")
searches = self.get_objects("type", "search")
if db_name not in dashboards:
return None
self.pr_inf("Found dashboard: " + db_name)
objects[db_name] = dashboards[db_name]
panels = json.loads(dashboards[db_name]['_source']['panelsJSON'])
for panel in panels:
if 'id' not in panel:
continue
pid = panel['id']
if pid in searches:
self.pr_inf("Found search: " + pid)
objects[pid] = searches[pid]
elif pid in vizs:
self.pr_inf("Found vis: " + pid)
objects[pid] = vizs[pid]
emb = vizs[pid].get('_source', {}).get('savedSearchId', None)
if emb is not None and emb not in objects:
if emb not in searches:
self.pr_err('Missing search %s' % emb)
return objects
objects[emb] = searches[emb]
return objects
|
Get DB and all objs needed to duplicate it
|
train
|
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/manager.py#L255-L282
|
[
"def pr_inf(self, msg):\n print('[INF] Manager %s' % msg)\n",
"def pr_err(self, msg):\n print('[ERR] Manager %s' % msg)\n",
"def get_objects(self, search_field, search_val):\n \"\"\"Return all objects of type (assumes < MAX_HITS)\"\"\"\n query = (\"{ size: \" + str(self.max_hits) + \", \" +\n \"query: { filtered: { filter: { \" +\n search_field + \": { value: \\\"\" + search_val + \"\\\"\" +\n \" } } } } } }\")\n self.connect_es()\n res = self.es.search(index=self.index, body=query)\n # self.pr_dbg(\"%d Hits:\" % res['hits']['total'])\n objects = {}\n for doc in res['hits']['hits']:\n objects[doc['_id']] = {}\n # To make uploading easier in the future:\n # Record all those bits into the backup.\n # Mimics how ES returns the result.\n # Prevents having to store this in some external, contrived, format\n objects[doc['_id']]['_index'] = self.index # also in doc['_index']\n objects[doc['_id']]['_type'] = doc['_type']\n objects[doc['_id']]['_id'] = doc['_id']\n objects[doc['_id']]['_source'] = doc['_source'] # the actual result\n return objects\n"
] |
class KibanaManager():
"""Import/Export Kibana objects"""
def __init__(self, index, host, debug=False):
self._host_ip = host[0]
self._host_port = host[1]
self.index = index
self.es = None
self.max_hits = 9999
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Manager %s' % msg)
def pr_inf(self, msg):
print('[INF] Manager %s' % msg)
def pr_err(self, msg):
print('[ERR] Manager %s' % msg)
@property
def host(self):
return (self._host_ip, self._host_port)
@host.setter
def host_setter(self, host):
self._host_ip = host[0]
self._host_port = host[1]
def connect_es(self):
if self.es is not None:
return
self.es = Elasticsearch(
[{'host': self._host_ip, 'port': self._host_port}])
def read_object_from_file(self, filename):
self.pr_inf("Reading object from file: " + filename)
obj = {}
with open(filename, 'rb') as f:
obj = json.loads(f.read().decode('utf-8'))
return obj
def read_pkg_from_file(self, filename):
obj = {}
with open(filename, 'rb') as f:
obj = json.loads(f.read().decode('utf-8'))
return obj
def put_object(self, obj):
# TODO consider putting into a ES class
self.pr_dbg('put_obj: %s' % self.json_dumps(obj))
"""
Wrapper for es.index, determines metadata needed to index from obj.
If you have a raw object json string you can hard code these:
index is .kibana (as of kibana4);
id can be A-Za-z0-9\- and must be unique;
doc_type is either visualization, dashboard, search
or for settings docs: config, or index-pattern.
"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object, no index")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object, no _id")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object, no _type")
if obj['_source'] is None or obj['_source'] == "":
raise Exception("Invalid Object, no _source")
self.connect_es()
self.es.indices.create(index=obj['_index'], ignore=400, timeout="2m")
try:
resp = self.es.index(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'],
body=obj['_source'], timeout="2m")
except RequestError as e:
self.pr_err('RequestError: %s, info: %s' % (e.error, e.info))
raise
return resp
def put_pkg(self, objs):
for obj in objs:
self.put_object(obj)
def put_objects(self, objects):
for name, obj in iteritems(objects):
self.put_object(obj)
def del_object(self, obj):
"""Debug deletes obj of obj[_type] with id of obj['_id']"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object")
self.connect_es()
self.es.delete(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type'])
def del_objects(self, objects):
for name, obj in iteritems(objects):
self.del_object(obj)
def json_dumps(self, obj):
"""Serializer for consistency"""
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
def safe_filename(self, otype, oid):
"""Santize obj name into fname and verify doesn't already exist"""
permitted = set(['_', '-', '(', ')'])
oid = ''.join([c for c in oid if c.isalnum() or c in permitted])
while oid.find('--') != -1:
oid = oid.replace('--', '-')
ext = 'json'
ts = datetime.now().strftime("%Y%m%dT%H%M%S")
fname = ''
is_new = False
while not is_new:
oid_len = 255 - len('%s--%s.%s' % (otype, ts, ext))
fname = '%s-%s-%s.%s' % (otype, oid[:oid_len], ts, ext)
is_new = True
if os.path.exists(fname):
is_new = False
ts += '-bck'
return fname
def write_object_to_file(self, obj, path='.', filename=None):
"""Convert obj (dict) to json string and write to file"""
output = self.json_dumps(obj) + '\n'
if filename is None:
filename = self.safe_filename(obj['_type'], obj['_id'])
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
# self.pr_dbg("Contents: " + output)
return filename
def write_objects_to_file(self, objects, path='.'):
for name, obj in iteritems(objects):
self.write_object_to_file(obj, path)
def write_pkg_to_file(self, name, objects, path='.', filename=None):
"""Write a list of related objs to file"""
# Kibana uses an array of docs, do the same
# as opposed to a dict of docs
pkg_objs = []
for _, obj in iteritems(objects):
pkg_objs.append(obj)
sorted_pkg = sorted(pkg_objs, key=lambda k: k['_id'])
output = self.json_dumps(sorted_pkg) + '\n'
if filename is None:
filename = self.safe_filename('Pkg', name)
filename = os.path.join(path, filename)
self.pr_inf("Writing to file: " + filename)
with open(filename, 'w') as f:
f.write(output)
return filename
def get_objects(self, search_field, search_val):
"""Return all objects of type (assumes < MAX_HITS)"""
query = ("{ size: " + str(self.max_hits) + ", " +
"query: { filtered: { filter: { " +
search_field + ": { value: \"" + search_val + "\"" +
" } } } } } }")
self.connect_es()
res = self.es.search(index=self.index, body=query)
# self.pr_dbg("%d Hits:" % res['hits']['total'])
objects = {}
for doc in res['hits']['hits']:
objects[doc['_id']] = {}
# To make uploading easier in the future:
# Record all those bits into the backup.
# Mimics how ES returns the result.
# Prevents having to store this in some external, contrived, format
objects[doc['_id']]['_index'] = self.index # also in doc['_index']
objects[doc['_id']]['_type'] = doc['_type']
objects[doc['_id']]['_id'] = doc['_id']
objects[doc['_id']]['_source'] = doc['_source'] # the actual result
return objects
def get_config(self):
""" Wrapper for get_objects to collect config; skips index-pattern"""
return self.get_objects("type", "config")
def get_visualizations(self):
"""Wrapper for get_objects to collect all visualizations"""
return self.get_objects("type", "visualization")
def get_dashboards(self):
"""Wrapper for get_objects to collect all dashboards"""
return self.get_objects("type", "dashboard")
def get_searches(self):
"""Wrapper for get_objects to collect all saved searches"""
return self.get_objects("type", "search")
|
deep-compute/logagg
|
logagg/util.py
|
start_daemon_thread
|
python
|
def start_daemon_thread(target, args=()):
th = Thread(target=target, args=args)
th.daemon = True
th.start()
return th
|
starts a deamon thread for a given target function and arguments.
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/util.py#L45-L50
| null |
import collections
from deeputil import Dummy
from operator import attrgetter
DUMMY = Dummy()
def memoize(f):
# from: https://goo.gl/aXt4Qy
class memodict(dict):
__slots__ = ()
def __missing__(self, key):
self[key] = ret = f(key)
return ret
return memodict().__getitem__
@memoize
def load_object(imp_path):
"""Given a path (python import path), load the object.
eg of path: logagg.formatters.nginx_access
: logagg.forwarders.mongodb
"""
module_name, obj_name = imp_path.split('.', 1)
module = __import__(module_name)
obj = attrgetter(obj_name)(module)
return obj
import traceback
def log_exception(self, __fn__):
self.log.exception('error_during_run_Continuing', fn=__fn__.func_name,
tb=repr(traceback.format_exc()))
from threading import Thread
def serialize_dict_keys(d, prefix=""):
"""returns all the keys in a dictionary.
>>> serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } })
['a', 'a.b', 'a.b.c', 'a.b.b']
"""
keys = []
for k, v in d.iteritems():
fqk = '%s%s' % (prefix, k)
keys.append(fqk)
if isinstance(v, dict):
keys.extend(serialize_dict_keys(v, prefix="%s." % fqk))
return keys
class MarkValue(str): pass
def flatten_dict(d, parent_key='', sep='.',
ignore_under_prefixed=True, mark_value=True):
'''
>>> flatten_dict({"a": {"b": {"c": 1, "b": 2, "__d": 'ignore', "_e": "mark"} } })
{'a.b.b': 2, 'a.b.c': 1, 'a.b._e': "'mark'"}
'''
items = {}
for k in d:
if ignore_under_prefixed and k.startswith('__'):
continue
v = d[k]
if mark_value and k.startswith('_') and not k.startswith('__'):
v = MarkValue(repr(v))
new_key = sep.join((parent_key, k)) if parent_key else k
if isinstance(v, collections.MutableMapping):
items.update(flatten_dict(v, new_key, sep=sep,
ignore_under_prefixed=True,
mark_value=True)
)
else:
items[new_key] = v
return items
import numbers
def is_number(x): return isinstance(x, numbers.Number)
from re import match
spaces = (' ', '\t', '\n')
def ispartial(x):
'''
If log line starts with a space it is recognized as a partial line
>>> ispartial('<time> <event> <some_log_line>')
False
>>> ispartial(' <space> <traceback:> <some_line>')
True
>>> ispartial(' <tab> <traceback:> <some_line>')
True
>>> ispartial(' <white_space> <traceback:> <some_line>')
True
>>> ispartial('')
False
'''
try:
if x[0] in spaces:
return True
except IndexError:
return False
else:
return False
|
deep-compute/logagg
|
logagg/util.py
|
serialize_dict_keys
|
python
|
def serialize_dict_keys(d, prefix=""):
keys = []
for k, v in d.iteritems():
fqk = '%s%s' % (prefix, k)
keys.append(fqk)
if isinstance(v, dict):
keys.extend(serialize_dict_keys(v, prefix="%s." % fqk))
return keys
|
returns all the keys in a dictionary.
>>> serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } })
['a', 'a.b', 'a.b.c', 'a.b.b']
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/util.py#L53-L66
|
[
"def serialize_dict_keys(d, prefix=\"\"):\n \"\"\"returns all the keys in a dictionary.\n\n >>> serialize_dict_keys({\"a\": {\"b\": {\"c\": 1, \"b\": 2} } })\n ['a', 'a.b', 'a.b.c', 'a.b.b']\n \"\"\"\n keys = []\n for k, v in d.iteritems():\n fqk = '%s%s' % (prefix, k)\n keys.append(fqk)\n if isinstance(v, dict):\n keys.extend(serialize_dict_keys(v, prefix=\"%s.\" % fqk))\n\n return keys\n"
] |
import collections
from deeputil import Dummy
from operator import attrgetter
DUMMY = Dummy()
def memoize(f):
# from: https://goo.gl/aXt4Qy
class memodict(dict):
__slots__ = ()
def __missing__(self, key):
self[key] = ret = f(key)
return ret
return memodict().__getitem__
@memoize
def load_object(imp_path):
"""Given a path (python import path), load the object.
eg of path: logagg.formatters.nginx_access
: logagg.forwarders.mongodb
"""
module_name, obj_name = imp_path.split('.', 1)
module = __import__(module_name)
obj = attrgetter(obj_name)(module)
return obj
import traceback
def log_exception(self, __fn__):
self.log.exception('error_during_run_Continuing', fn=__fn__.func_name,
tb=repr(traceback.format_exc()))
from threading import Thread
def start_daemon_thread(target, args=()):
"""starts a deamon thread for a given target function and arguments."""
th = Thread(target=target, args=args)
th.daemon = True
th.start()
return th
class MarkValue(str): pass
def flatten_dict(d, parent_key='', sep='.',
ignore_under_prefixed=True, mark_value=True):
'''
>>> flatten_dict({"a": {"b": {"c": 1, "b": 2, "__d": 'ignore', "_e": "mark"} } })
{'a.b.b': 2, 'a.b.c': 1, 'a.b._e': "'mark'"}
'''
items = {}
for k in d:
if ignore_under_prefixed and k.startswith('__'):
continue
v = d[k]
if mark_value and k.startswith('_') and not k.startswith('__'):
v = MarkValue(repr(v))
new_key = sep.join((parent_key, k)) if parent_key else k
if isinstance(v, collections.MutableMapping):
items.update(flatten_dict(v, new_key, sep=sep,
ignore_under_prefixed=True,
mark_value=True)
)
else:
items[new_key] = v
return items
import numbers
def is_number(x): return isinstance(x, numbers.Number)
from re import match
spaces = (' ', '\t', '\n')
def ispartial(x):
'''
If log line starts with a space it is recognized as a partial line
>>> ispartial('<time> <event> <some_log_line>')
False
>>> ispartial(' <space> <traceback:> <some_line>')
True
>>> ispartial(' <tab> <traceback:> <some_line>')
True
>>> ispartial(' <white_space> <traceback:> <some_line>')
True
>>> ispartial('')
False
'''
try:
if x[0] in spaces:
return True
except IndexError:
return False
else:
return False
|
deep-compute/logagg
|
logagg/formatters.py
|
haproxy
|
python
|
def haproxy(line):
#TODO Handle all message formats
'''
>>> import pprint
>>> input_line1 = 'Apr 24 00:00:02 node haproxy[12298]: 1.1.1.1:48660 [24/Apr/2019:00:00:02.358] pre-staging~ pre-staging_doc/pre-staging_active 261/0/2/8/271 200 2406 - - ---- 4/4/0/1/0 0/0 {AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018} "GET /doc/api/get?call=apple HTTP/1.1"'
>>> output_line1 = haproxy(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'Tc': 2.0,
'Tq': 261.0,
'Tr': 8.0,
'Tw': 0.0,
'_api': '/doc/api/get?call=apple',
'_headers': ['AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018'],
'actconn': 4,
'backend': 'pre-staging_doc/pre-staging_active',
'backend_queue': 0,
'beconn': 1,
'bytes_read': 2406.0,
'client_port': '48660',
'client_server': '1.1.1.1',
'feconn': 4,
'front_end': 'pre-staging~',
'haproxy_server': 'node',
'method': 'GET',
'resp_time': 271.0,
'retries': 0,
'srv_conn': 0,
'srv_queue': 0,
'status': '200',
'timestamp': '2019-04-24T00:00:02.358000'},
'event': 'haproxy_event',
'timestamp': '2019-04-24T00:00:02.358000',
'type': 'metric'}
'''
_line = line.strip().split()
log = {}
log['client_server'] = _line[5].split(':')[0].strip()
log['client_port'] = _line[5].split(':')[1].strip()
_timestamp = re.findall(r'\[(.*?)\]', _line[6])[0]
log['timestamp'] = datetime.datetime.strptime(_timestamp, '%d/%b/%Y:%H:%M:%S.%f').isoformat()
log['front_end'] = _line[7].strip()
log['backend'] = _line[8].strip()
log['Tq'] = float(_line[9].split('/')[0].strip())
log['Tw'] = float(_line[9].split('/')[1].strip())
log['Tc'] = float(_line[9].split('/')[2].strip())
log['Tr'] = float(_line[9].split('/')[3].strip())
log['resp_time'] = float(_line[9].split('/')[-1].strip())
log['status'] = _line[10].strip()
log['bytes_read'] = float(_line[11].strip())
log['_headers'] = re.findall(r'{(.*)}', line)
log['haproxy_server'] = _line[3].strip()
log['method'] = _line[-3].strip('"').strip()
log['_api'] = _line[-2].strip()
log['retries'] = int(_line[15].split('/')[-1].strip())
log['actconn'] = int(_line[15].split('/')[0].strip())
log['feconn'] = int(_line[15].split('/')[1].strip())
log['beconn'] = int(_line[15].split('/')[-2].strip())
log['srv_conn'] = int(_line[15].split('/')[-3].strip())
log['srv_queue'] = int(_line[16].split('/')[0].strip())
log['backend_queue'] = int(_line[16].split('/')[1].strip())
return dict(
data=log,
event='haproxy_event',
timestamp=log.get('timestamp'),
type='metric'
)
|
>>> import pprint
>>> input_line1 = 'Apr 24 00:00:02 node haproxy[12298]: 1.1.1.1:48660 [24/Apr/2019:00:00:02.358] pre-staging~ pre-staging_doc/pre-staging_active 261/0/2/8/271 200 2406 - - ---- 4/4/0/1/0 0/0 {AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018} "GET /doc/api/get?call=apple HTTP/1.1"'
>>> output_line1 = haproxy(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'Tc': 2.0,
'Tq': 261.0,
'Tr': 8.0,
'Tw': 0.0,
'_api': '/doc/api/get?call=apple',
'_headers': ['AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018'],
'actconn': 4,
'backend': 'pre-staging_doc/pre-staging_active',
'backend_queue': 0,
'beconn': 1,
'bytes_read': 2406.0,
'client_port': '48660',
'client_server': '1.1.1.1',
'feconn': 4,
'front_end': 'pre-staging~',
'haproxy_server': 'node',
'method': 'GET',
'resp_time': 271.0,
'retries': 0,
'srv_conn': 0,
'srv_queue': 0,
'status': '200',
'timestamp': '2019-04-24T00:00:02.358000'},
'event': 'haproxy_event',
'timestamp': '2019-04-24T00:00:02.358000',
'type': 'metric'}
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/formatters.py#L19-L94
| null |
import re
import ujson as json
import datetime
class RawLog(dict): pass
#FIXME: cannot do both returns .. should it?
def docker_file_log_driver(line):
log = json.loads(json.loads(line)['msg'])
if 'formatter' in log.get('extra'):
return RawLog(dict(formatter=log.get('extra').get('formatter'),
raw=log.get('message'),
host=log.get('host'),
timestamp=log.get('timestamp'),
)
)
return dict(timestamp=log.get('timestamp'), data=log, type='log')
def nginx_access(line):
'''
>>> import pprint
>>> input_line1 = '{ \
"remote_addr": "127.0.0.1","remote_user": "-","timestamp": "1515144699.201", \
"request": "GET / HTTP/1.1","status": "200","request_time": "0.000", \
"body_bytes_sent": "396","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line1 = nginx_access(input_line1)
>>> pprint.pprint(output_line1)
{'data': {u'body_bytes_sent': 396.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'127.0.0.1',
u'remote_user': u'-',
u'request': u'GET / HTTP/1.1',
u'request_time': 0.0,
u'status': u'200',
u'timestamp': '2018-01-05T09:31:39.201000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:31:39.201000',
'type': 'metric'}
>>> input_line2 = '{ \
"remote_addr": "192.158.0.51","remote_user": "-","timestamp": "1515143686.415", \
"request": "POST /mpub?topic=heartbeat HTTP/1.1","status": "404","request_time": "0.000", \
"body_bytes_sent": "152","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line2 = nginx_access(input_line2)
>>> pprint.pprint(output_line2)
{'data': {u'body_bytes_sent': 152.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'192.158.0.51',
u'remote_user': u'-',
u'request': u'POST /mpub?topic=heartbeat HTTP/1.1',
u'request_time': 0.0,
u'status': u'404',
u'timestamp': '2018-01-05T09:14:46.415000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:14:46.415000',
'type': 'metric'}
'''
#TODO Handle nginx error logs
log = json.loads(line)
timestamp_iso = datetime.datetime.utcfromtimestamp(float(log['timestamp'])).isoformat()
log.update({'timestamp':timestamp_iso})
if '-' in log.get('upstream_response_time'):
log['upstream_response_time'] = 0.0
log['body_bytes_sent'] = float(log['body_bytes_sent'])
log['request_time'] = float(log['request_time'])
log['upstream_response_time'] = float(log['upstream_response_time'])
return dict(
timestamp=log.get('timestamp',' '),
data=log,
type='metric',
event='nginx_event',
)
def mongodb(line):
'''
>>> import pprint
>>> input_line1 = '2017-08-17T07:56:33.489+0200 I REPL [signalProcessingThread] shutting down replication subsystems'
>>> output_line1 = mongodb(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'component': 'REPL',
'context': '[signalProcessingThread]',
'message': 'shutting down replication subsystems',
'severity': 'I',
'timestamp': '2017-08-17T07:56:33.489+0200'},
'timestamp': '2017-08-17T07:56:33.489+0200',
'type': 'log'}
>>> input_line2 = '2017-08-17T07:56:33.515+0200 W NETWORK [initandlisten] No primary detected for set confsvr_repl1'
>>> output_line2 = mongodb(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'component': 'NETWORK',
'context': '[initandlisten]',
'message': 'No primary detected for set confsvr_repl1',
'severity': 'W',
'timestamp': '2017-08-17T07:56:33.515+0200'},
'timestamp': '2017-08-17T07:56:33.515+0200',
'type': 'log'}
'''
keys = ['timestamp', 'severity', 'component', 'context', 'message']
values = re.split(r'\s+', line, maxsplit=4)
mongodb_log = dict(zip(keys,values))
return dict(
timestamp=values[0],
data=mongodb_log,
type='log',
)
def django(line):
'''
>>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true }
'''
#TODO we need to handle case2 logs
data = {}
log = re.findall(r'^(\[\d+/\w+/\d+ \d+:\d+:\d+\].*)', line)
if len(log) == 1:
data['timestamp'] = datetime.datetime.strptime(re.findall(r'(\d+/\w+/\d+ \d+:\d+:\d+)',\
log[0])[0],"%d/%b/%Y %H:%M:%S").isoformat()
data['loglevel'] = re.findall('[A-Z]+', log[0])[1]
data['logname'] = re.findall('\[\D+.\w+:\d+\]', log[0])[0]
message = re.findall('\{.+\}', log[0])
try:
if len(message) > 0:
message = json.loads(message[0])
else:
message = re.split(']', log[0])
message = ''.join(message[2:])
except ValueError:
message = re.split(']', log[0])
message = ''.join(message[2:])
data['message'] = message
return dict(
timestamp=data['timestamp'],
level=data['loglevel'],
data=data,
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow()),
data={raw:line}
)
def basescript(line):
'''
>>> import pprint
>>> input_line = '{"level": "warning", "timestamp": "2018-02-07T06:37:00.297610Z", "event": "exited via keyboard interrupt", "type": "log", "id": "20180207T063700_4d03fe800bd111e89ecb96000007bc65", "_": {"ln": 58, "file": "/usr/local/lib/python2.7/dist-packages/basescript/basescript.py", "name": "basescript.basescript", "fn": "start"}}'
>>> output_line1 = basescript(input_line)
>>> pprint.pprint(output_line1)
{'data': {u'_': {u'file': u'/usr/local/lib/python2.7/dist-packages/basescript/basescript.py',
u'fn': u'start',
u'ln': 58,
u'name': u'basescript.basescript'},
u'event': u'exited via keyboard interrupt',
u'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
u'level': u'warning',
u'timestamp': u'2018-02-07T06:37:00.297610Z',
u'type': u'log'},
'event': u'exited via keyboard interrupt',
'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
'level': u'warning',
'timestamp': u'2018-02-07T06:37:00.297610Z',
'type': u'log'}
'''
log = json.loads(line)
return dict(
timestamp=log['timestamp'],
data=log,
id=log['id'],
type=log['type'],
level=log['level'],
event=log['event']
)
def elasticsearch(line):
'''
>>> import pprint
>>> input_line = '[2017-08-30T06:27:19,158] [WARN ][o.e.m.j.JvmGcMonitorService] [Glsuj_2] [gc][296816] overhead, spent [1.2s] collecting in the last [1.3s]'
>>> output_line = elasticsearch(input_line)
>>> pprint.pprint(output_line)
{'data': {'garbage_collector': 'gc',
'gc_count': 296816.0,
'level': 'WARN',
'message': 'o.e.m.j.JvmGcMonitorService',
'plugin': 'Glsuj_2',
'query_time_ms': 1200.0,
'resp_time_ms': 1300.0,
'timestamp': '2017-08-30T06:27:19,158'},
'event': 'o.e.m.j.JvmGcMonitorService',
'level': 'WARN ',
'timestamp': '2017-08-30T06:27:19,158',
'type': 'metric'}
Case 2:
[2017-09-13T23:15:00,415][WARN ][o.e.i.e.Engine ] [Glsuj_2] [filebeat-2017.09.09][3] failed engine [index]
java.nio.file.FileSystemException: /home/user/elasticsearch/data/nodes/0/indices/jsVSO6f3Rl-wwBpQyNRCbQ/3/index/_0.fdx: Too many open files
at sun.nio.fs.UnixException.translateToIOException(UnixException.java:91) ~[?:?]
'''
# TODO we need to handle case2 logs
elasticsearch_log = line
actuallog = re.findall(r'(\[\d+\-+\d+\d+\-+\d+\w+\d+:\d+:\d+,+\d\d\d+\].*)', elasticsearch_log)
if len(actuallog) == 1:
keys = ['timestamp','level','message','plugin','garbage_collector','gc_count','query_time_ms', 'resp_time_ms']
values = re.findall(r'\[(.*?)\]', actuallog[0])
for index, i in enumerate(values):
if not isinstance(i, str):
continue
if len(re.findall(r'.*ms$', i)) > 0 and 'ms' in re.findall(r'.*ms$', i)[0]:
num = re.split('ms', i)[0]
values[index] = float(num)
continue
if len(re.findall(r'.*s$', i)) > 0 and 's' in re.findall(r'.*s$', i)[0]:
num = re.split('s', i)[0]
values[index] = float(num) * 1000
continue
data = dict(zip(keys,values))
if 'level' in data and data['level'][-1] == ' ':
data['level'] = data['level'][:-1]
if 'gc_count' in data:
data['gc_count'] = float(data['gc_count'])
event = data['message']
level=values[1]
timestamp=values[0]
return dict(
timestamp=timestamp,
level=level,
type='metric',
data=data,
event=event
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.now()),
data={'raw': line}
)
LOG_BEGIN_PATTERN = [re.compile(r'^\s+\['), re.compile(r'^\[')]
def elasticsearch_ispartial_log(line):
'''
>>> line1 = ' [2018-04-03T00:22:38,048][DEBUG][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] [search17/search]: there were [2000] tasks in [809ms], avg task time [28.4micros], EWMA task execution [790nanos], [35165.36 tasks/s], optimal queue is [35165], current capacity [1000]'
>>> line2 = ' org.elasticsearch.ResourceAlreadyExistsException: index [media_corpus_refresh/6_3sRAMsRr2r63J6gbOjQw] already exists'
>>> line3 = ' at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.validateIndexName(MetaDataCreateIndexService.java:151) ~[elasticsearch-6.2.0.jar:6.2.0]'
>>> elasticsearch_ispartial_log(line1)
False
>>> elasticsearch_ispartial_log(line2)
True
>>> elasticsearch_ispartial_log(line3)
True
'''
match_result = []
for p in LOG_BEGIN_PATTERN:
if re.match(p, line) != None:
return False
return True
elasticsearch.ispartial = elasticsearch_ispartial_log
|
deep-compute/logagg
|
logagg/formatters.py
|
nginx_access
|
python
|
def nginx_access(line):
'''
>>> import pprint
>>> input_line1 = '{ \
"remote_addr": "127.0.0.1","remote_user": "-","timestamp": "1515144699.201", \
"request": "GET / HTTP/1.1","status": "200","request_time": "0.000", \
"body_bytes_sent": "396","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line1 = nginx_access(input_line1)
>>> pprint.pprint(output_line1)
{'data': {u'body_bytes_sent': 396.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'127.0.0.1',
u'remote_user': u'-',
u'request': u'GET / HTTP/1.1',
u'request_time': 0.0,
u'status': u'200',
u'timestamp': '2018-01-05T09:31:39.201000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:31:39.201000',
'type': 'metric'}
>>> input_line2 = '{ \
"remote_addr": "192.158.0.51","remote_user": "-","timestamp": "1515143686.415", \
"request": "POST /mpub?topic=heartbeat HTTP/1.1","status": "404","request_time": "0.000", \
"body_bytes_sent": "152","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line2 = nginx_access(input_line2)
>>> pprint.pprint(output_line2)
{'data': {u'body_bytes_sent': 152.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'192.158.0.51',
u'remote_user': u'-',
u'request': u'POST /mpub?topic=heartbeat HTTP/1.1',
u'request_time': 0.0,
u'status': u'404',
u'timestamp': '2018-01-05T09:14:46.415000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:14:46.415000',
'type': 'metric'}
'''
#TODO Handle nginx error logs
log = json.loads(line)
timestamp_iso = datetime.datetime.utcfromtimestamp(float(log['timestamp'])).isoformat()
log.update({'timestamp':timestamp_iso})
if '-' in log.get('upstream_response_time'):
log['upstream_response_time'] = 0.0
log['body_bytes_sent'] = float(log['body_bytes_sent'])
log['request_time'] = float(log['request_time'])
log['upstream_response_time'] = float(log['upstream_response_time'])
return dict(
timestamp=log.get('timestamp',' '),
data=log,
type='metric',
event='nginx_event',
)
|
>>> import pprint
>>> input_line1 = '{ \
"remote_addr": "127.0.0.1","remote_user": "-","timestamp": "1515144699.201", \
"request": "GET / HTTP/1.1","status": "200","request_time": "0.000", \
"body_bytes_sent": "396","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line1 = nginx_access(input_line1)
>>> pprint.pprint(output_line1)
{'data': {u'body_bytes_sent': 396.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'127.0.0.1',
u'remote_user': u'-',
u'request': u'GET / HTTP/1.1',
u'request_time': 0.0,
u'status': u'200',
u'timestamp': '2018-01-05T09:31:39.201000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:31:39.201000',
'type': 'metric'}
>>> input_line2 = '{ \
"remote_addr": "192.158.0.51","remote_user": "-","timestamp": "1515143686.415", \
"request": "POST /mpub?topic=heartbeat HTTP/1.1","status": "404","request_time": "0.000", \
"body_bytes_sent": "152","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line2 = nginx_access(input_line2)
>>> pprint.pprint(output_line2)
{'data': {u'body_bytes_sent': 152.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'192.158.0.51',
u'remote_user': u'-',
u'request': u'POST /mpub?topic=heartbeat HTTP/1.1',
u'request_time': 0.0,
u'status': u'404',
u'timestamp': '2018-01-05T09:14:46.415000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:14:46.415000',
'type': 'metric'}
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/formatters.py#L96-L160
| null |
import re
import ujson as json
import datetime
class RawLog(dict): pass
#FIXME: cannot do both returns .. should it?
def docker_file_log_driver(line):
log = json.loads(json.loads(line)['msg'])
if 'formatter' in log.get('extra'):
return RawLog(dict(formatter=log.get('extra').get('formatter'),
raw=log.get('message'),
host=log.get('host'),
timestamp=log.get('timestamp'),
)
)
return dict(timestamp=log.get('timestamp'), data=log, type='log')
def haproxy(line):
#TODO Handle all message formats
'''
>>> import pprint
>>> input_line1 = 'Apr 24 00:00:02 node haproxy[12298]: 1.1.1.1:48660 [24/Apr/2019:00:00:02.358] pre-staging~ pre-staging_doc/pre-staging_active 261/0/2/8/271 200 2406 - - ---- 4/4/0/1/0 0/0 {AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018} "GET /doc/api/get?call=apple HTTP/1.1"'
>>> output_line1 = haproxy(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'Tc': 2.0,
'Tq': 261.0,
'Tr': 8.0,
'Tw': 0.0,
'_api': '/doc/api/get?call=apple',
'_headers': ['AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018'],
'actconn': 4,
'backend': 'pre-staging_doc/pre-staging_active',
'backend_queue': 0,
'beconn': 1,
'bytes_read': 2406.0,
'client_port': '48660',
'client_server': '1.1.1.1',
'feconn': 4,
'front_end': 'pre-staging~',
'haproxy_server': 'node',
'method': 'GET',
'resp_time': 271.0,
'retries': 0,
'srv_conn': 0,
'srv_queue': 0,
'status': '200',
'timestamp': '2019-04-24T00:00:02.358000'},
'event': 'haproxy_event',
'timestamp': '2019-04-24T00:00:02.358000',
'type': 'metric'}
'''
_line = line.strip().split()
log = {}
log['client_server'] = _line[5].split(':')[0].strip()
log['client_port'] = _line[5].split(':')[1].strip()
_timestamp = re.findall(r'\[(.*?)\]', _line[6])[0]
log['timestamp'] = datetime.datetime.strptime(_timestamp, '%d/%b/%Y:%H:%M:%S.%f').isoformat()
log['front_end'] = _line[7].strip()
log['backend'] = _line[8].strip()
log['Tq'] = float(_line[9].split('/')[0].strip())
log['Tw'] = float(_line[9].split('/')[1].strip())
log['Tc'] = float(_line[9].split('/')[2].strip())
log['Tr'] = float(_line[9].split('/')[3].strip())
log['resp_time'] = float(_line[9].split('/')[-1].strip())
log['status'] = _line[10].strip()
log['bytes_read'] = float(_line[11].strip())
log['_headers'] = re.findall(r'{(.*)}', line)
log['haproxy_server'] = _line[3].strip()
log['method'] = _line[-3].strip('"').strip()
log['_api'] = _line[-2].strip()
log['retries'] = int(_line[15].split('/')[-1].strip())
log['actconn'] = int(_line[15].split('/')[0].strip())
log['feconn'] = int(_line[15].split('/')[1].strip())
log['beconn'] = int(_line[15].split('/')[-2].strip())
log['srv_conn'] = int(_line[15].split('/')[-3].strip())
log['srv_queue'] = int(_line[16].split('/')[0].strip())
log['backend_queue'] = int(_line[16].split('/')[1].strip())
return dict(
data=log,
event='haproxy_event',
timestamp=log.get('timestamp'),
type='metric'
)
def mongodb(line):
'''
>>> import pprint
>>> input_line1 = '2017-08-17T07:56:33.489+0200 I REPL [signalProcessingThread] shutting down replication subsystems'
>>> output_line1 = mongodb(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'component': 'REPL',
'context': '[signalProcessingThread]',
'message': 'shutting down replication subsystems',
'severity': 'I',
'timestamp': '2017-08-17T07:56:33.489+0200'},
'timestamp': '2017-08-17T07:56:33.489+0200',
'type': 'log'}
>>> input_line2 = '2017-08-17T07:56:33.515+0200 W NETWORK [initandlisten] No primary detected for set confsvr_repl1'
>>> output_line2 = mongodb(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'component': 'NETWORK',
'context': '[initandlisten]',
'message': 'No primary detected for set confsvr_repl1',
'severity': 'W',
'timestamp': '2017-08-17T07:56:33.515+0200'},
'timestamp': '2017-08-17T07:56:33.515+0200',
'type': 'log'}
'''
keys = ['timestamp', 'severity', 'component', 'context', 'message']
values = re.split(r'\s+', line, maxsplit=4)
mongodb_log = dict(zip(keys,values))
return dict(
timestamp=values[0],
data=mongodb_log,
type='log',
)
def django(line):
'''
>>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true }
'''
#TODO we need to handle case2 logs
data = {}
log = re.findall(r'^(\[\d+/\w+/\d+ \d+:\d+:\d+\].*)', line)
if len(log) == 1:
data['timestamp'] = datetime.datetime.strptime(re.findall(r'(\d+/\w+/\d+ \d+:\d+:\d+)',\
log[0])[0],"%d/%b/%Y %H:%M:%S").isoformat()
data['loglevel'] = re.findall('[A-Z]+', log[0])[1]
data['logname'] = re.findall('\[\D+.\w+:\d+\]', log[0])[0]
message = re.findall('\{.+\}', log[0])
try:
if len(message) > 0:
message = json.loads(message[0])
else:
message = re.split(']', log[0])
message = ''.join(message[2:])
except ValueError:
message = re.split(']', log[0])
message = ''.join(message[2:])
data['message'] = message
return dict(
timestamp=data['timestamp'],
level=data['loglevel'],
data=data,
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow()),
data={raw:line}
)
def basescript(line):
'''
>>> import pprint
>>> input_line = '{"level": "warning", "timestamp": "2018-02-07T06:37:00.297610Z", "event": "exited via keyboard interrupt", "type": "log", "id": "20180207T063700_4d03fe800bd111e89ecb96000007bc65", "_": {"ln": 58, "file": "/usr/local/lib/python2.7/dist-packages/basescript/basescript.py", "name": "basescript.basescript", "fn": "start"}}'
>>> output_line1 = basescript(input_line)
>>> pprint.pprint(output_line1)
{'data': {u'_': {u'file': u'/usr/local/lib/python2.7/dist-packages/basescript/basescript.py',
u'fn': u'start',
u'ln': 58,
u'name': u'basescript.basescript'},
u'event': u'exited via keyboard interrupt',
u'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
u'level': u'warning',
u'timestamp': u'2018-02-07T06:37:00.297610Z',
u'type': u'log'},
'event': u'exited via keyboard interrupt',
'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
'level': u'warning',
'timestamp': u'2018-02-07T06:37:00.297610Z',
'type': u'log'}
'''
log = json.loads(line)
return dict(
timestamp=log['timestamp'],
data=log,
id=log['id'],
type=log['type'],
level=log['level'],
event=log['event']
)
def elasticsearch(line):
'''
>>> import pprint
>>> input_line = '[2017-08-30T06:27:19,158] [WARN ][o.e.m.j.JvmGcMonitorService] [Glsuj_2] [gc][296816] overhead, spent [1.2s] collecting in the last [1.3s]'
>>> output_line = elasticsearch(input_line)
>>> pprint.pprint(output_line)
{'data': {'garbage_collector': 'gc',
'gc_count': 296816.0,
'level': 'WARN',
'message': 'o.e.m.j.JvmGcMonitorService',
'plugin': 'Glsuj_2',
'query_time_ms': 1200.0,
'resp_time_ms': 1300.0,
'timestamp': '2017-08-30T06:27:19,158'},
'event': 'o.e.m.j.JvmGcMonitorService',
'level': 'WARN ',
'timestamp': '2017-08-30T06:27:19,158',
'type': 'metric'}
Case 2:
[2017-09-13T23:15:00,415][WARN ][o.e.i.e.Engine ] [Glsuj_2] [filebeat-2017.09.09][3] failed engine [index]
java.nio.file.FileSystemException: /home/user/elasticsearch/data/nodes/0/indices/jsVSO6f3Rl-wwBpQyNRCbQ/3/index/_0.fdx: Too many open files
at sun.nio.fs.UnixException.translateToIOException(UnixException.java:91) ~[?:?]
'''
# TODO we need to handle case2 logs
elasticsearch_log = line
actuallog = re.findall(r'(\[\d+\-+\d+\d+\-+\d+\w+\d+:\d+:\d+,+\d\d\d+\].*)', elasticsearch_log)
if len(actuallog) == 1:
keys = ['timestamp','level','message','plugin','garbage_collector','gc_count','query_time_ms', 'resp_time_ms']
values = re.findall(r'\[(.*?)\]', actuallog[0])
for index, i in enumerate(values):
if not isinstance(i, str):
continue
if len(re.findall(r'.*ms$', i)) > 0 and 'ms' in re.findall(r'.*ms$', i)[0]:
num = re.split('ms', i)[0]
values[index] = float(num)
continue
if len(re.findall(r'.*s$', i)) > 0 and 's' in re.findall(r'.*s$', i)[0]:
num = re.split('s', i)[0]
values[index] = float(num) * 1000
continue
data = dict(zip(keys,values))
if 'level' in data and data['level'][-1] == ' ':
data['level'] = data['level'][:-1]
if 'gc_count' in data:
data['gc_count'] = float(data['gc_count'])
event = data['message']
level=values[1]
timestamp=values[0]
return dict(
timestamp=timestamp,
level=level,
type='metric',
data=data,
event=event
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.now()),
data={'raw': line}
)
LOG_BEGIN_PATTERN = [re.compile(r'^\s+\['), re.compile(r'^\[')]
def elasticsearch_ispartial_log(line):
'''
>>> line1 = ' [2018-04-03T00:22:38,048][DEBUG][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] [search17/search]: there were [2000] tasks in [809ms], avg task time [28.4micros], EWMA task execution [790nanos], [35165.36 tasks/s], optimal queue is [35165], current capacity [1000]'
>>> line2 = ' org.elasticsearch.ResourceAlreadyExistsException: index [media_corpus_refresh/6_3sRAMsRr2r63J6gbOjQw] already exists'
>>> line3 = ' at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.validateIndexName(MetaDataCreateIndexService.java:151) ~[elasticsearch-6.2.0.jar:6.2.0]'
>>> elasticsearch_ispartial_log(line1)
False
>>> elasticsearch_ispartial_log(line2)
True
>>> elasticsearch_ispartial_log(line3)
True
'''
match_result = []
for p in LOG_BEGIN_PATTERN:
if re.match(p, line) != None:
return False
return True
elasticsearch.ispartial = elasticsearch_ispartial_log
|
deep-compute/logagg
|
logagg/formatters.py
|
mongodb
|
python
|
def mongodb(line):
'''
>>> import pprint
>>> input_line1 = '2017-08-17T07:56:33.489+0200 I REPL [signalProcessingThread] shutting down replication subsystems'
>>> output_line1 = mongodb(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'component': 'REPL',
'context': '[signalProcessingThread]',
'message': 'shutting down replication subsystems',
'severity': 'I',
'timestamp': '2017-08-17T07:56:33.489+0200'},
'timestamp': '2017-08-17T07:56:33.489+0200',
'type': 'log'}
>>> input_line2 = '2017-08-17T07:56:33.515+0200 W NETWORK [initandlisten] No primary detected for set confsvr_repl1'
>>> output_line2 = mongodb(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'component': 'NETWORK',
'context': '[initandlisten]',
'message': 'No primary detected for set confsvr_repl1',
'severity': 'W',
'timestamp': '2017-08-17T07:56:33.515+0200'},
'timestamp': '2017-08-17T07:56:33.515+0200',
'type': 'log'}
'''
keys = ['timestamp', 'severity', 'component', 'context', 'message']
values = re.split(r'\s+', line, maxsplit=4)
mongodb_log = dict(zip(keys,values))
return dict(
timestamp=values[0],
data=mongodb_log,
type='log',
)
|
>>> import pprint
>>> input_line1 = '2017-08-17T07:56:33.489+0200 I REPL [signalProcessingThread] shutting down replication subsystems'
>>> output_line1 = mongodb(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'component': 'REPL',
'context': '[signalProcessingThread]',
'message': 'shutting down replication subsystems',
'severity': 'I',
'timestamp': '2017-08-17T07:56:33.489+0200'},
'timestamp': '2017-08-17T07:56:33.489+0200',
'type': 'log'}
>>> input_line2 = '2017-08-17T07:56:33.515+0200 W NETWORK [initandlisten] No primary detected for set confsvr_repl1'
>>> output_line2 = mongodb(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'component': 'NETWORK',
'context': '[initandlisten]',
'message': 'No primary detected for set confsvr_repl1',
'severity': 'W',
'timestamp': '2017-08-17T07:56:33.515+0200'},
'timestamp': '2017-08-17T07:56:33.515+0200',
'type': 'log'}
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/formatters.py#L162-L196
| null |
import re
import ujson as json
import datetime
class RawLog(dict): pass
#FIXME: cannot do both returns .. should it?
def docker_file_log_driver(line):
log = json.loads(json.loads(line)['msg'])
if 'formatter' in log.get('extra'):
return RawLog(dict(formatter=log.get('extra').get('formatter'),
raw=log.get('message'),
host=log.get('host'),
timestamp=log.get('timestamp'),
)
)
return dict(timestamp=log.get('timestamp'), data=log, type='log')
def haproxy(line):
#TODO Handle all message formats
'''
>>> import pprint
>>> input_line1 = 'Apr 24 00:00:02 node haproxy[12298]: 1.1.1.1:48660 [24/Apr/2019:00:00:02.358] pre-staging~ pre-staging_doc/pre-staging_active 261/0/2/8/271 200 2406 - - ---- 4/4/0/1/0 0/0 {AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018} "GET /doc/api/get?call=apple HTTP/1.1"'
>>> output_line1 = haproxy(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'Tc': 2.0,
'Tq': 261.0,
'Tr': 8.0,
'Tw': 0.0,
'_api': '/doc/api/get?call=apple',
'_headers': ['AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018'],
'actconn': 4,
'backend': 'pre-staging_doc/pre-staging_active',
'backend_queue': 0,
'beconn': 1,
'bytes_read': 2406.0,
'client_port': '48660',
'client_server': '1.1.1.1',
'feconn': 4,
'front_end': 'pre-staging~',
'haproxy_server': 'node',
'method': 'GET',
'resp_time': 271.0,
'retries': 0,
'srv_conn': 0,
'srv_queue': 0,
'status': '200',
'timestamp': '2019-04-24T00:00:02.358000'},
'event': 'haproxy_event',
'timestamp': '2019-04-24T00:00:02.358000',
'type': 'metric'}
'''
_line = line.strip().split()
log = {}
log['client_server'] = _line[5].split(':')[0].strip()
log['client_port'] = _line[5].split(':')[1].strip()
_timestamp = re.findall(r'\[(.*?)\]', _line[6])[0]
log['timestamp'] = datetime.datetime.strptime(_timestamp, '%d/%b/%Y:%H:%M:%S.%f').isoformat()
log['front_end'] = _line[7].strip()
log['backend'] = _line[8].strip()
log['Tq'] = float(_line[9].split('/')[0].strip())
log['Tw'] = float(_line[9].split('/')[1].strip())
log['Tc'] = float(_line[9].split('/')[2].strip())
log['Tr'] = float(_line[9].split('/')[3].strip())
log['resp_time'] = float(_line[9].split('/')[-1].strip())
log['status'] = _line[10].strip()
log['bytes_read'] = float(_line[11].strip())
log['_headers'] = re.findall(r'{(.*)}', line)
log['haproxy_server'] = _line[3].strip()
log['method'] = _line[-3].strip('"').strip()
log['_api'] = _line[-2].strip()
log['retries'] = int(_line[15].split('/')[-1].strip())
log['actconn'] = int(_line[15].split('/')[0].strip())
log['feconn'] = int(_line[15].split('/')[1].strip())
log['beconn'] = int(_line[15].split('/')[-2].strip())
log['srv_conn'] = int(_line[15].split('/')[-3].strip())
log['srv_queue'] = int(_line[16].split('/')[0].strip())
log['backend_queue'] = int(_line[16].split('/')[1].strip())
return dict(
data=log,
event='haproxy_event',
timestamp=log.get('timestamp'),
type='metric'
)
def nginx_access(line):
'''
>>> import pprint
>>> input_line1 = '{ \
"remote_addr": "127.0.0.1","remote_user": "-","timestamp": "1515144699.201", \
"request": "GET / HTTP/1.1","status": "200","request_time": "0.000", \
"body_bytes_sent": "396","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line1 = nginx_access(input_line1)
>>> pprint.pprint(output_line1)
{'data': {u'body_bytes_sent': 396.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'127.0.0.1',
u'remote_user': u'-',
u'request': u'GET / HTTP/1.1',
u'request_time': 0.0,
u'status': u'200',
u'timestamp': '2018-01-05T09:31:39.201000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:31:39.201000',
'type': 'metric'}
>>> input_line2 = '{ \
"remote_addr": "192.158.0.51","remote_user": "-","timestamp": "1515143686.415", \
"request": "POST /mpub?topic=heartbeat HTTP/1.1","status": "404","request_time": "0.000", \
"body_bytes_sent": "152","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line2 = nginx_access(input_line2)
>>> pprint.pprint(output_line2)
{'data': {u'body_bytes_sent': 152.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'192.158.0.51',
u'remote_user': u'-',
u'request': u'POST /mpub?topic=heartbeat HTTP/1.1',
u'request_time': 0.0,
u'status': u'404',
u'timestamp': '2018-01-05T09:14:46.415000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:14:46.415000',
'type': 'metric'}
'''
#TODO Handle nginx error logs
log = json.loads(line)
timestamp_iso = datetime.datetime.utcfromtimestamp(float(log['timestamp'])).isoformat()
log.update({'timestamp':timestamp_iso})
if '-' in log.get('upstream_response_time'):
log['upstream_response_time'] = 0.0
log['body_bytes_sent'] = float(log['body_bytes_sent'])
log['request_time'] = float(log['request_time'])
log['upstream_response_time'] = float(log['upstream_response_time'])
return dict(
timestamp=log.get('timestamp',' '),
data=log,
type='metric',
event='nginx_event',
)
def django(line):
'''
>>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true }
'''
#TODO we need to handle case2 logs
data = {}
log = re.findall(r'^(\[\d+/\w+/\d+ \d+:\d+:\d+\].*)', line)
if len(log) == 1:
data['timestamp'] = datetime.datetime.strptime(re.findall(r'(\d+/\w+/\d+ \d+:\d+:\d+)',\
log[0])[0],"%d/%b/%Y %H:%M:%S").isoformat()
data['loglevel'] = re.findall('[A-Z]+', log[0])[1]
data['logname'] = re.findall('\[\D+.\w+:\d+\]', log[0])[0]
message = re.findall('\{.+\}', log[0])
try:
if len(message) > 0:
message = json.loads(message[0])
else:
message = re.split(']', log[0])
message = ''.join(message[2:])
except ValueError:
message = re.split(']', log[0])
message = ''.join(message[2:])
data['message'] = message
return dict(
timestamp=data['timestamp'],
level=data['loglevel'],
data=data,
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow()),
data={raw:line}
)
def basescript(line):
'''
>>> import pprint
>>> input_line = '{"level": "warning", "timestamp": "2018-02-07T06:37:00.297610Z", "event": "exited via keyboard interrupt", "type": "log", "id": "20180207T063700_4d03fe800bd111e89ecb96000007bc65", "_": {"ln": 58, "file": "/usr/local/lib/python2.7/dist-packages/basescript/basescript.py", "name": "basescript.basescript", "fn": "start"}}'
>>> output_line1 = basescript(input_line)
>>> pprint.pprint(output_line1)
{'data': {u'_': {u'file': u'/usr/local/lib/python2.7/dist-packages/basescript/basescript.py',
u'fn': u'start',
u'ln': 58,
u'name': u'basescript.basescript'},
u'event': u'exited via keyboard interrupt',
u'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
u'level': u'warning',
u'timestamp': u'2018-02-07T06:37:00.297610Z',
u'type': u'log'},
'event': u'exited via keyboard interrupt',
'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
'level': u'warning',
'timestamp': u'2018-02-07T06:37:00.297610Z',
'type': u'log'}
'''
log = json.loads(line)
return dict(
timestamp=log['timestamp'],
data=log,
id=log['id'],
type=log['type'],
level=log['level'],
event=log['event']
)
def elasticsearch(line):
'''
>>> import pprint
>>> input_line = '[2017-08-30T06:27:19,158] [WARN ][o.e.m.j.JvmGcMonitorService] [Glsuj_2] [gc][296816] overhead, spent [1.2s] collecting in the last [1.3s]'
>>> output_line = elasticsearch(input_line)
>>> pprint.pprint(output_line)
{'data': {'garbage_collector': 'gc',
'gc_count': 296816.0,
'level': 'WARN',
'message': 'o.e.m.j.JvmGcMonitorService',
'plugin': 'Glsuj_2',
'query_time_ms': 1200.0,
'resp_time_ms': 1300.0,
'timestamp': '2017-08-30T06:27:19,158'},
'event': 'o.e.m.j.JvmGcMonitorService',
'level': 'WARN ',
'timestamp': '2017-08-30T06:27:19,158',
'type': 'metric'}
Case 2:
[2017-09-13T23:15:00,415][WARN ][o.e.i.e.Engine ] [Glsuj_2] [filebeat-2017.09.09][3] failed engine [index]
java.nio.file.FileSystemException: /home/user/elasticsearch/data/nodes/0/indices/jsVSO6f3Rl-wwBpQyNRCbQ/3/index/_0.fdx: Too many open files
at sun.nio.fs.UnixException.translateToIOException(UnixException.java:91) ~[?:?]
'''
# TODO we need to handle case2 logs
elasticsearch_log = line
actuallog = re.findall(r'(\[\d+\-+\d+\d+\-+\d+\w+\d+:\d+:\d+,+\d\d\d+\].*)', elasticsearch_log)
if len(actuallog) == 1:
keys = ['timestamp','level','message','plugin','garbage_collector','gc_count','query_time_ms', 'resp_time_ms']
values = re.findall(r'\[(.*?)\]', actuallog[0])
for index, i in enumerate(values):
if not isinstance(i, str):
continue
if len(re.findall(r'.*ms$', i)) > 0 and 'ms' in re.findall(r'.*ms$', i)[0]:
num = re.split('ms', i)[0]
values[index] = float(num)
continue
if len(re.findall(r'.*s$', i)) > 0 and 's' in re.findall(r'.*s$', i)[0]:
num = re.split('s', i)[0]
values[index] = float(num) * 1000
continue
data = dict(zip(keys,values))
if 'level' in data and data['level'][-1] == ' ':
data['level'] = data['level'][:-1]
if 'gc_count' in data:
data['gc_count'] = float(data['gc_count'])
event = data['message']
level=values[1]
timestamp=values[0]
return dict(
timestamp=timestamp,
level=level,
type='metric',
data=data,
event=event
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.now()),
data={'raw': line}
)
LOG_BEGIN_PATTERN = [re.compile(r'^\s+\['), re.compile(r'^\[')]
def elasticsearch_ispartial_log(line):
'''
>>> line1 = ' [2018-04-03T00:22:38,048][DEBUG][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] [search17/search]: there were [2000] tasks in [809ms], avg task time [28.4micros], EWMA task execution [790nanos], [35165.36 tasks/s], optimal queue is [35165], current capacity [1000]'
>>> line2 = ' org.elasticsearch.ResourceAlreadyExistsException: index [media_corpus_refresh/6_3sRAMsRr2r63J6gbOjQw] already exists'
>>> line3 = ' at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.validateIndexName(MetaDataCreateIndexService.java:151) ~[elasticsearch-6.2.0.jar:6.2.0]'
>>> elasticsearch_ispartial_log(line1)
False
>>> elasticsearch_ispartial_log(line2)
True
>>> elasticsearch_ispartial_log(line3)
True
'''
match_result = []
for p in LOG_BEGIN_PATTERN:
if re.match(p, line) != None:
return False
return True
elasticsearch.ispartial = elasticsearch_ispartial_log
|
deep-compute/logagg
|
logagg/formatters.py
|
django
|
python
|
def django(line):
'''
>>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true }
'''
#TODO we need to handle case2 logs
data = {}
log = re.findall(r'^(\[\d+/\w+/\d+ \d+:\d+:\d+\].*)', line)
if len(log) == 1:
data['timestamp'] = datetime.datetime.strptime(re.findall(r'(\d+/\w+/\d+ \d+:\d+:\d+)',\
log[0])[0],"%d/%b/%Y %H:%M:%S").isoformat()
data['loglevel'] = re.findall('[A-Z]+', log[0])[1]
data['logname'] = re.findall('\[\D+.\w+:\d+\]', log[0])[0]
message = re.findall('\{.+\}', log[0])
try:
if len(message) > 0:
message = json.loads(message[0])
else:
message = re.split(']', log[0])
message = ''.join(message[2:])
except ValueError:
message = re.split(']', log[0])
message = ''.join(message[2:])
data['message'] = message
return dict(
timestamp=data['timestamp'],
level=data['loglevel'],
data=data,
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow()),
data={raw:line}
)
|
>>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true }
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/formatters.py#L199-L271
| null |
import re
import ujson as json
import datetime
class RawLog(dict): pass
#FIXME: cannot do both returns .. should it?
def docker_file_log_driver(line):
log = json.loads(json.loads(line)['msg'])
if 'formatter' in log.get('extra'):
return RawLog(dict(formatter=log.get('extra').get('formatter'),
raw=log.get('message'),
host=log.get('host'),
timestamp=log.get('timestamp'),
)
)
return dict(timestamp=log.get('timestamp'), data=log, type='log')
def haproxy(line):
#TODO Handle all message formats
'''
>>> import pprint
>>> input_line1 = 'Apr 24 00:00:02 node haproxy[12298]: 1.1.1.1:48660 [24/Apr/2019:00:00:02.358] pre-staging~ pre-staging_doc/pre-staging_active 261/0/2/8/271 200 2406 - - ---- 4/4/0/1/0 0/0 {AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018} "GET /doc/api/get?call=apple HTTP/1.1"'
>>> output_line1 = haproxy(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'Tc': 2.0,
'Tq': 261.0,
'Tr': 8.0,
'Tw': 0.0,
'_api': '/doc/api/get?call=apple',
'_headers': ['AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018'],
'actconn': 4,
'backend': 'pre-staging_doc/pre-staging_active',
'backend_queue': 0,
'beconn': 1,
'bytes_read': 2406.0,
'client_port': '48660',
'client_server': '1.1.1.1',
'feconn': 4,
'front_end': 'pre-staging~',
'haproxy_server': 'node',
'method': 'GET',
'resp_time': 271.0,
'retries': 0,
'srv_conn': 0,
'srv_queue': 0,
'status': '200',
'timestamp': '2019-04-24T00:00:02.358000'},
'event': 'haproxy_event',
'timestamp': '2019-04-24T00:00:02.358000',
'type': 'metric'}
'''
_line = line.strip().split()
log = {}
log['client_server'] = _line[5].split(':')[0].strip()
log['client_port'] = _line[5].split(':')[1].strip()
_timestamp = re.findall(r'\[(.*?)\]', _line[6])[0]
log['timestamp'] = datetime.datetime.strptime(_timestamp, '%d/%b/%Y:%H:%M:%S.%f').isoformat()
log['front_end'] = _line[7].strip()
log['backend'] = _line[8].strip()
log['Tq'] = float(_line[9].split('/')[0].strip())
log['Tw'] = float(_line[9].split('/')[1].strip())
log['Tc'] = float(_line[9].split('/')[2].strip())
log['Tr'] = float(_line[9].split('/')[3].strip())
log['resp_time'] = float(_line[9].split('/')[-1].strip())
log['status'] = _line[10].strip()
log['bytes_read'] = float(_line[11].strip())
log['_headers'] = re.findall(r'{(.*)}', line)
log['haproxy_server'] = _line[3].strip()
log['method'] = _line[-3].strip('"').strip()
log['_api'] = _line[-2].strip()
log['retries'] = int(_line[15].split('/')[-1].strip())
log['actconn'] = int(_line[15].split('/')[0].strip())
log['feconn'] = int(_line[15].split('/')[1].strip())
log['beconn'] = int(_line[15].split('/')[-2].strip())
log['srv_conn'] = int(_line[15].split('/')[-3].strip())
log['srv_queue'] = int(_line[16].split('/')[0].strip())
log['backend_queue'] = int(_line[16].split('/')[1].strip())
return dict(
data=log,
event='haproxy_event',
timestamp=log.get('timestamp'),
type='metric'
)
def nginx_access(line):
'''
>>> import pprint
>>> input_line1 = '{ \
"remote_addr": "127.0.0.1","remote_user": "-","timestamp": "1515144699.201", \
"request": "GET / HTTP/1.1","status": "200","request_time": "0.000", \
"body_bytes_sent": "396","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line1 = nginx_access(input_line1)
>>> pprint.pprint(output_line1)
{'data': {u'body_bytes_sent': 396.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'127.0.0.1',
u'remote_user': u'-',
u'request': u'GET / HTTP/1.1',
u'request_time': 0.0,
u'status': u'200',
u'timestamp': '2018-01-05T09:31:39.201000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:31:39.201000',
'type': 'metric'}
>>> input_line2 = '{ \
"remote_addr": "192.158.0.51","remote_user": "-","timestamp": "1515143686.415", \
"request": "POST /mpub?topic=heartbeat HTTP/1.1","status": "404","request_time": "0.000", \
"body_bytes_sent": "152","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line2 = nginx_access(input_line2)
>>> pprint.pprint(output_line2)
{'data': {u'body_bytes_sent': 152.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'192.158.0.51',
u'remote_user': u'-',
u'request': u'POST /mpub?topic=heartbeat HTTP/1.1',
u'request_time': 0.0,
u'status': u'404',
u'timestamp': '2018-01-05T09:14:46.415000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:14:46.415000',
'type': 'metric'}
'''
#TODO Handle nginx error logs
log = json.loads(line)
timestamp_iso = datetime.datetime.utcfromtimestamp(float(log['timestamp'])).isoformat()
log.update({'timestamp':timestamp_iso})
if '-' in log.get('upstream_response_time'):
log['upstream_response_time'] = 0.0
log['body_bytes_sent'] = float(log['body_bytes_sent'])
log['request_time'] = float(log['request_time'])
log['upstream_response_time'] = float(log['upstream_response_time'])
return dict(
timestamp=log.get('timestamp',' '),
data=log,
type='metric',
event='nginx_event',
)
def mongodb(line):
'''
>>> import pprint
>>> input_line1 = '2017-08-17T07:56:33.489+0200 I REPL [signalProcessingThread] shutting down replication subsystems'
>>> output_line1 = mongodb(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'component': 'REPL',
'context': '[signalProcessingThread]',
'message': 'shutting down replication subsystems',
'severity': 'I',
'timestamp': '2017-08-17T07:56:33.489+0200'},
'timestamp': '2017-08-17T07:56:33.489+0200',
'type': 'log'}
>>> input_line2 = '2017-08-17T07:56:33.515+0200 W NETWORK [initandlisten] No primary detected for set confsvr_repl1'
>>> output_line2 = mongodb(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'component': 'NETWORK',
'context': '[initandlisten]',
'message': 'No primary detected for set confsvr_repl1',
'severity': 'W',
'timestamp': '2017-08-17T07:56:33.515+0200'},
'timestamp': '2017-08-17T07:56:33.515+0200',
'type': 'log'}
'''
keys = ['timestamp', 'severity', 'component', 'context', 'message']
values = re.split(r'\s+', line, maxsplit=4)
mongodb_log = dict(zip(keys,values))
return dict(
timestamp=values[0],
data=mongodb_log,
type='log',
)
def basescript(line):
'''
>>> import pprint
>>> input_line = '{"level": "warning", "timestamp": "2018-02-07T06:37:00.297610Z", "event": "exited via keyboard interrupt", "type": "log", "id": "20180207T063700_4d03fe800bd111e89ecb96000007bc65", "_": {"ln": 58, "file": "/usr/local/lib/python2.7/dist-packages/basescript/basescript.py", "name": "basescript.basescript", "fn": "start"}}'
>>> output_line1 = basescript(input_line)
>>> pprint.pprint(output_line1)
{'data': {u'_': {u'file': u'/usr/local/lib/python2.7/dist-packages/basescript/basescript.py',
u'fn': u'start',
u'ln': 58,
u'name': u'basescript.basescript'},
u'event': u'exited via keyboard interrupt',
u'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
u'level': u'warning',
u'timestamp': u'2018-02-07T06:37:00.297610Z',
u'type': u'log'},
'event': u'exited via keyboard interrupt',
'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
'level': u'warning',
'timestamp': u'2018-02-07T06:37:00.297610Z',
'type': u'log'}
'''
log = json.loads(line)
return dict(
timestamp=log['timestamp'],
data=log,
id=log['id'],
type=log['type'],
level=log['level'],
event=log['event']
)
def elasticsearch(line):
'''
>>> import pprint
>>> input_line = '[2017-08-30T06:27:19,158] [WARN ][o.e.m.j.JvmGcMonitorService] [Glsuj_2] [gc][296816] overhead, spent [1.2s] collecting in the last [1.3s]'
>>> output_line = elasticsearch(input_line)
>>> pprint.pprint(output_line)
{'data': {'garbage_collector': 'gc',
'gc_count': 296816.0,
'level': 'WARN',
'message': 'o.e.m.j.JvmGcMonitorService',
'plugin': 'Glsuj_2',
'query_time_ms': 1200.0,
'resp_time_ms': 1300.0,
'timestamp': '2017-08-30T06:27:19,158'},
'event': 'o.e.m.j.JvmGcMonitorService',
'level': 'WARN ',
'timestamp': '2017-08-30T06:27:19,158',
'type': 'metric'}
Case 2:
[2017-09-13T23:15:00,415][WARN ][o.e.i.e.Engine ] [Glsuj_2] [filebeat-2017.09.09][3] failed engine [index]
java.nio.file.FileSystemException: /home/user/elasticsearch/data/nodes/0/indices/jsVSO6f3Rl-wwBpQyNRCbQ/3/index/_0.fdx: Too many open files
at sun.nio.fs.UnixException.translateToIOException(UnixException.java:91) ~[?:?]
'''
# TODO we need to handle case2 logs
elasticsearch_log = line
actuallog = re.findall(r'(\[\d+\-+\d+\d+\-+\d+\w+\d+:\d+:\d+,+\d\d\d+\].*)', elasticsearch_log)
if len(actuallog) == 1:
keys = ['timestamp','level','message','plugin','garbage_collector','gc_count','query_time_ms', 'resp_time_ms']
values = re.findall(r'\[(.*?)\]', actuallog[0])
for index, i in enumerate(values):
if not isinstance(i, str):
continue
if len(re.findall(r'.*ms$', i)) > 0 and 'ms' in re.findall(r'.*ms$', i)[0]:
num = re.split('ms', i)[0]
values[index] = float(num)
continue
if len(re.findall(r'.*s$', i)) > 0 and 's' in re.findall(r'.*s$', i)[0]:
num = re.split('s', i)[0]
values[index] = float(num) * 1000
continue
data = dict(zip(keys,values))
if 'level' in data and data['level'][-1] == ' ':
data['level'] = data['level'][:-1]
if 'gc_count' in data:
data['gc_count'] = float(data['gc_count'])
event = data['message']
level=values[1]
timestamp=values[0]
return dict(
timestamp=timestamp,
level=level,
type='metric',
data=data,
event=event
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.now()),
data={'raw': line}
)
LOG_BEGIN_PATTERN = [re.compile(r'^\s+\['), re.compile(r'^\[')]
def elasticsearch_ispartial_log(line):
'''
>>> line1 = ' [2018-04-03T00:22:38,048][DEBUG][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] [search17/search]: there were [2000] tasks in [809ms], avg task time [28.4micros], EWMA task execution [790nanos], [35165.36 tasks/s], optimal queue is [35165], current capacity [1000]'
>>> line2 = ' org.elasticsearch.ResourceAlreadyExistsException: index [media_corpus_refresh/6_3sRAMsRr2r63J6gbOjQw] already exists'
>>> line3 = ' at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.validateIndexName(MetaDataCreateIndexService.java:151) ~[elasticsearch-6.2.0.jar:6.2.0]'
>>> elasticsearch_ispartial_log(line1)
False
>>> elasticsearch_ispartial_log(line2)
True
>>> elasticsearch_ispartial_log(line3)
True
'''
match_result = []
for p in LOG_BEGIN_PATTERN:
if re.match(p, line) != None:
return False
return True
elasticsearch.ispartial = elasticsearch_ispartial_log
|
deep-compute/logagg
|
logagg/formatters.py
|
basescript
|
python
|
def basescript(line):
'''
>>> import pprint
>>> input_line = '{"level": "warning", "timestamp": "2018-02-07T06:37:00.297610Z", "event": "exited via keyboard interrupt", "type": "log", "id": "20180207T063700_4d03fe800bd111e89ecb96000007bc65", "_": {"ln": 58, "file": "/usr/local/lib/python2.7/dist-packages/basescript/basescript.py", "name": "basescript.basescript", "fn": "start"}}'
>>> output_line1 = basescript(input_line)
>>> pprint.pprint(output_line1)
{'data': {u'_': {u'file': u'/usr/local/lib/python2.7/dist-packages/basescript/basescript.py',
u'fn': u'start',
u'ln': 58,
u'name': u'basescript.basescript'},
u'event': u'exited via keyboard interrupt',
u'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
u'level': u'warning',
u'timestamp': u'2018-02-07T06:37:00.297610Z',
u'type': u'log'},
'event': u'exited via keyboard interrupt',
'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
'level': u'warning',
'timestamp': u'2018-02-07T06:37:00.297610Z',
'type': u'log'}
'''
log = json.loads(line)
return dict(
timestamp=log['timestamp'],
data=log,
id=log['id'],
type=log['type'],
level=log['level'],
event=log['event']
)
|
>>> import pprint
>>> input_line = '{"level": "warning", "timestamp": "2018-02-07T06:37:00.297610Z", "event": "exited via keyboard interrupt", "type": "log", "id": "20180207T063700_4d03fe800bd111e89ecb96000007bc65", "_": {"ln": 58, "file": "/usr/local/lib/python2.7/dist-packages/basescript/basescript.py", "name": "basescript.basescript", "fn": "start"}}'
>>> output_line1 = basescript(input_line)
>>> pprint.pprint(output_line1)
{'data': {u'_': {u'file': u'/usr/local/lib/python2.7/dist-packages/basescript/basescript.py',
u'fn': u'start',
u'ln': 58,
u'name': u'basescript.basescript'},
u'event': u'exited via keyboard interrupt',
u'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
u'level': u'warning',
u'timestamp': u'2018-02-07T06:37:00.297610Z',
u'type': u'log'},
'event': u'exited via keyboard interrupt',
'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
'level': u'warning',
'timestamp': u'2018-02-07T06:37:00.297610Z',
'type': u'log'}
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/formatters.py#L273-L304
| null |
import re
import ujson as json
import datetime
class RawLog(dict): pass
#FIXME: cannot do both returns .. should it?
def docker_file_log_driver(line):
log = json.loads(json.loads(line)['msg'])
if 'formatter' in log.get('extra'):
return RawLog(dict(formatter=log.get('extra').get('formatter'),
raw=log.get('message'),
host=log.get('host'),
timestamp=log.get('timestamp'),
)
)
return dict(timestamp=log.get('timestamp'), data=log, type='log')
def haproxy(line):
#TODO Handle all message formats
'''
>>> import pprint
>>> input_line1 = 'Apr 24 00:00:02 node haproxy[12298]: 1.1.1.1:48660 [24/Apr/2019:00:00:02.358] pre-staging~ pre-staging_doc/pre-staging_active 261/0/2/8/271 200 2406 - - ---- 4/4/0/1/0 0/0 {AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018} "GET /doc/api/get?call=apple HTTP/1.1"'
>>> output_line1 = haproxy(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'Tc': 2.0,
'Tq': 261.0,
'Tr': 8.0,
'Tw': 0.0,
'_api': '/doc/api/get?call=apple',
'_headers': ['AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018'],
'actconn': 4,
'backend': 'pre-staging_doc/pre-staging_active',
'backend_queue': 0,
'beconn': 1,
'bytes_read': 2406.0,
'client_port': '48660',
'client_server': '1.1.1.1',
'feconn': 4,
'front_end': 'pre-staging~',
'haproxy_server': 'node',
'method': 'GET',
'resp_time': 271.0,
'retries': 0,
'srv_conn': 0,
'srv_queue': 0,
'status': '200',
'timestamp': '2019-04-24T00:00:02.358000'},
'event': 'haproxy_event',
'timestamp': '2019-04-24T00:00:02.358000',
'type': 'metric'}
'''
_line = line.strip().split()
log = {}
log['client_server'] = _line[5].split(':')[0].strip()
log['client_port'] = _line[5].split(':')[1].strip()
_timestamp = re.findall(r'\[(.*?)\]', _line[6])[0]
log['timestamp'] = datetime.datetime.strptime(_timestamp, '%d/%b/%Y:%H:%M:%S.%f').isoformat()
log['front_end'] = _line[7].strip()
log['backend'] = _line[8].strip()
log['Tq'] = float(_line[9].split('/')[0].strip())
log['Tw'] = float(_line[9].split('/')[1].strip())
log['Tc'] = float(_line[9].split('/')[2].strip())
log['Tr'] = float(_line[9].split('/')[3].strip())
log['resp_time'] = float(_line[9].split('/')[-1].strip())
log['status'] = _line[10].strip()
log['bytes_read'] = float(_line[11].strip())
log['_headers'] = re.findall(r'{(.*)}', line)
log['haproxy_server'] = _line[3].strip()
log['method'] = _line[-3].strip('"').strip()
log['_api'] = _line[-2].strip()
log['retries'] = int(_line[15].split('/')[-1].strip())
log['actconn'] = int(_line[15].split('/')[0].strip())
log['feconn'] = int(_line[15].split('/')[1].strip())
log['beconn'] = int(_line[15].split('/')[-2].strip())
log['srv_conn'] = int(_line[15].split('/')[-3].strip())
log['srv_queue'] = int(_line[16].split('/')[0].strip())
log['backend_queue'] = int(_line[16].split('/')[1].strip())
return dict(
data=log,
event='haproxy_event',
timestamp=log.get('timestamp'),
type='metric'
)
def nginx_access(line):
'''
>>> import pprint
>>> input_line1 = '{ \
"remote_addr": "127.0.0.1","remote_user": "-","timestamp": "1515144699.201", \
"request": "GET / HTTP/1.1","status": "200","request_time": "0.000", \
"body_bytes_sent": "396","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line1 = nginx_access(input_line1)
>>> pprint.pprint(output_line1)
{'data': {u'body_bytes_sent': 396.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'127.0.0.1',
u'remote_user': u'-',
u'request': u'GET / HTTP/1.1',
u'request_time': 0.0,
u'status': u'200',
u'timestamp': '2018-01-05T09:31:39.201000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:31:39.201000',
'type': 'metric'}
>>> input_line2 = '{ \
"remote_addr": "192.158.0.51","remote_user": "-","timestamp": "1515143686.415", \
"request": "POST /mpub?topic=heartbeat HTTP/1.1","status": "404","request_time": "0.000", \
"body_bytes_sent": "152","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line2 = nginx_access(input_line2)
>>> pprint.pprint(output_line2)
{'data': {u'body_bytes_sent': 152.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'192.158.0.51',
u'remote_user': u'-',
u'request': u'POST /mpub?topic=heartbeat HTTP/1.1',
u'request_time': 0.0,
u'status': u'404',
u'timestamp': '2018-01-05T09:14:46.415000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:14:46.415000',
'type': 'metric'}
'''
#TODO Handle nginx error logs
log = json.loads(line)
timestamp_iso = datetime.datetime.utcfromtimestamp(float(log['timestamp'])).isoformat()
log.update({'timestamp':timestamp_iso})
if '-' in log.get('upstream_response_time'):
log['upstream_response_time'] = 0.0
log['body_bytes_sent'] = float(log['body_bytes_sent'])
log['request_time'] = float(log['request_time'])
log['upstream_response_time'] = float(log['upstream_response_time'])
return dict(
timestamp=log.get('timestamp',' '),
data=log,
type='metric',
event='nginx_event',
)
def mongodb(line):
'''
>>> import pprint
>>> input_line1 = '2017-08-17T07:56:33.489+0200 I REPL [signalProcessingThread] shutting down replication subsystems'
>>> output_line1 = mongodb(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'component': 'REPL',
'context': '[signalProcessingThread]',
'message': 'shutting down replication subsystems',
'severity': 'I',
'timestamp': '2017-08-17T07:56:33.489+0200'},
'timestamp': '2017-08-17T07:56:33.489+0200',
'type': 'log'}
>>> input_line2 = '2017-08-17T07:56:33.515+0200 W NETWORK [initandlisten] No primary detected for set confsvr_repl1'
>>> output_line2 = mongodb(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'component': 'NETWORK',
'context': '[initandlisten]',
'message': 'No primary detected for set confsvr_repl1',
'severity': 'W',
'timestamp': '2017-08-17T07:56:33.515+0200'},
'timestamp': '2017-08-17T07:56:33.515+0200',
'type': 'log'}
'''
keys = ['timestamp', 'severity', 'component', 'context', 'message']
values = re.split(r'\s+', line, maxsplit=4)
mongodb_log = dict(zip(keys,values))
return dict(
timestamp=values[0],
data=mongodb_log,
type='log',
)
def django(line):
'''
>>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true }
'''
#TODO we need to handle case2 logs
data = {}
log = re.findall(r'^(\[\d+/\w+/\d+ \d+:\d+:\d+\].*)', line)
if len(log) == 1:
data['timestamp'] = datetime.datetime.strptime(re.findall(r'(\d+/\w+/\d+ \d+:\d+:\d+)',\
log[0])[0],"%d/%b/%Y %H:%M:%S").isoformat()
data['loglevel'] = re.findall('[A-Z]+', log[0])[1]
data['logname'] = re.findall('\[\D+.\w+:\d+\]', log[0])[0]
message = re.findall('\{.+\}', log[0])
try:
if len(message) > 0:
message = json.loads(message[0])
else:
message = re.split(']', log[0])
message = ''.join(message[2:])
except ValueError:
message = re.split(']', log[0])
message = ''.join(message[2:])
data['message'] = message
return dict(
timestamp=data['timestamp'],
level=data['loglevel'],
data=data,
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow()),
data={raw:line}
)
def elasticsearch(line):
'''
>>> import pprint
>>> input_line = '[2017-08-30T06:27:19,158] [WARN ][o.e.m.j.JvmGcMonitorService] [Glsuj_2] [gc][296816] overhead, spent [1.2s] collecting in the last [1.3s]'
>>> output_line = elasticsearch(input_line)
>>> pprint.pprint(output_line)
{'data': {'garbage_collector': 'gc',
'gc_count': 296816.0,
'level': 'WARN',
'message': 'o.e.m.j.JvmGcMonitorService',
'plugin': 'Glsuj_2',
'query_time_ms': 1200.0,
'resp_time_ms': 1300.0,
'timestamp': '2017-08-30T06:27:19,158'},
'event': 'o.e.m.j.JvmGcMonitorService',
'level': 'WARN ',
'timestamp': '2017-08-30T06:27:19,158',
'type': 'metric'}
Case 2:
[2017-09-13T23:15:00,415][WARN ][o.e.i.e.Engine ] [Glsuj_2] [filebeat-2017.09.09][3] failed engine [index]
java.nio.file.FileSystemException: /home/user/elasticsearch/data/nodes/0/indices/jsVSO6f3Rl-wwBpQyNRCbQ/3/index/_0.fdx: Too many open files
at sun.nio.fs.UnixException.translateToIOException(UnixException.java:91) ~[?:?]
'''
# TODO we need to handle case2 logs
elasticsearch_log = line
actuallog = re.findall(r'(\[\d+\-+\d+\d+\-+\d+\w+\d+:\d+:\d+,+\d\d\d+\].*)', elasticsearch_log)
if len(actuallog) == 1:
keys = ['timestamp','level','message','plugin','garbage_collector','gc_count','query_time_ms', 'resp_time_ms']
values = re.findall(r'\[(.*?)\]', actuallog[0])
for index, i in enumerate(values):
if not isinstance(i, str):
continue
if len(re.findall(r'.*ms$', i)) > 0 and 'ms' in re.findall(r'.*ms$', i)[0]:
num = re.split('ms', i)[0]
values[index] = float(num)
continue
if len(re.findall(r'.*s$', i)) > 0 and 's' in re.findall(r'.*s$', i)[0]:
num = re.split('s', i)[0]
values[index] = float(num) * 1000
continue
data = dict(zip(keys,values))
if 'level' in data and data['level'][-1] == ' ':
data['level'] = data['level'][:-1]
if 'gc_count' in data:
data['gc_count'] = float(data['gc_count'])
event = data['message']
level=values[1]
timestamp=values[0]
return dict(
timestamp=timestamp,
level=level,
type='metric',
data=data,
event=event
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.now()),
data={'raw': line}
)
LOG_BEGIN_PATTERN = [re.compile(r'^\s+\['), re.compile(r'^\[')]
def elasticsearch_ispartial_log(line):
'''
>>> line1 = ' [2018-04-03T00:22:38,048][DEBUG][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] [search17/search]: there were [2000] tasks in [809ms], avg task time [28.4micros], EWMA task execution [790nanos], [35165.36 tasks/s], optimal queue is [35165], current capacity [1000]'
>>> line2 = ' org.elasticsearch.ResourceAlreadyExistsException: index [media_corpus_refresh/6_3sRAMsRr2r63J6gbOjQw] already exists'
>>> line3 = ' at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.validateIndexName(MetaDataCreateIndexService.java:151) ~[elasticsearch-6.2.0.jar:6.2.0]'
>>> elasticsearch_ispartial_log(line1)
False
>>> elasticsearch_ispartial_log(line2)
True
>>> elasticsearch_ispartial_log(line3)
True
'''
match_result = []
for p in LOG_BEGIN_PATTERN:
if re.match(p, line) != None:
return False
return True
elasticsearch.ispartial = elasticsearch_ispartial_log
|
deep-compute/logagg
|
logagg/formatters.py
|
elasticsearch
|
python
|
def elasticsearch(line):
'''
>>> import pprint
>>> input_line = '[2017-08-30T06:27:19,158] [WARN ][o.e.m.j.JvmGcMonitorService] [Glsuj_2] [gc][296816] overhead, spent [1.2s] collecting in the last [1.3s]'
>>> output_line = elasticsearch(input_line)
>>> pprint.pprint(output_line)
{'data': {'garbage_collector': 'gc',
'gc_count': 296816.0,
'level': 'WARN',
'message': 'o.e.m.j.JvmGcMonitorService',
'plugin': 'Glsuj_2',
'query_time_ms': 1200.0,
'resp_time_ms': 1300.0,
'timestamp': '2017-08-30T06:27:19,158'},
'event': 'o.e.m.j.JvmGcMonitorService',
'level': 'WARN ',
'timestamp': '2017-08-30T06:27:19,158',
'type': 'metric'}
Case 2:
[2017-09-13T23:15:00,415][WARN ][o.e.i.e.Engine ] [Glsuj_2] [filebeat-2017.09.09][3] failed engine [index]
java.nio.file.FileSystemException: /home/user/elasticsearch/data/nodes/0/indices/jsVSO6f3Rl-wwBpQyNRCbQ/3/index/_0.fdx: Too many open files
at sun.nio.fs.UnixException.translateToIOException(UnixException.java:91) ~[?:?]
'''
# TODO we need to handle case2 logs
elasticsearch_log = line
actuallog = re.findall(r'(\[\d+\-+\d+\d+\-+\d+\w+\d+:\d+:\d+,+\d\d\d+\].*)', elasticsearch_log)
if len(actuallog) == 1:
keys = ['timestamp','level','message','plugin','garbage_collector','gc_count','query_time_ms', 'resp_time_ms']
values = re.findall(r'\[(.*?)\]', actuallog[0])
for index, i in enumerate(values):
if not isinstance(i, str):
continue
if len(re.findall(r'.*ms$', i)) > 0 and 'ms' in re.findall(r'.*ms$', i)[0]:
num = re.split('ms', i)[0]
values[index] = float(num)
continue
if len(re.findall(r'.*s$', i)) > 0 and 's' in re.findall(r'.*s$', i)[0]:
num = re.split('s', i)[0]
values[index] = float(num) * 1000
continue
data = dict(zip(keys,values))
if 'level' in data and data['level'][-1] == ' ':
data['level'] = data['level'][:-1]
if 'gc_count' in data:
data['gc_count'] = float(data['gc_count'])
event = data['message']
level=values[1]
timestamp=values[0]
return dict(
timestamp=timestamp,
level=level,
type='metric',
data=data,
event=event
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.now()),
data={'raw': line}
)
|
>>> import pprint
>>> input_line = '[2017-08-30T06:27:19,158] [WARN ][o.e.m.j.JvmGcMonitorService] [Glsuj_2] [gc][296816] overhead, spent [1.2s] collecting in the last [1.3s]'
>>> output_line = elasticsearch(input_line)
>>> pprint.pprint(output_line)
{'data': {'garbage_collector': 'gc',
'gc_count': 296816.0,
'level': 'WARN',
'message': 'o.e.m.j.JvmGcMonitorService',
'plugin': 'Glsuj_2',
'query_time_ms': 1200.0,
'resp_time_ms': 1300.0,
'timestamp': '2017-08-30T06:27:19,158'},
'event': 'o.e.m.j.JvmGcMonitorService',
'level': 'WARN ',
'timestamp': '2017-08-30T06:27:19,158',
'type': 'metric'}
Case 2:
[2017-09-13T23:15:00,415][WARN ][o.e.i.e.Engine ] [Glsuj_2] [filebeat-2017.09.09][3] failed engine [index]
java.nio.file.FileSystemException: /home/user/elasticsearch/data/nodes/0/indices/jsVSO6f3Rl-wwBpQyNRCbQ/3/index/_0.fdx: Too many open files
at sun.nio.fs.UnixException.translateToIOException(UnixException.java:91) ~[?:?]
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/formatters.py#L306-L370
| null |
import re
import ujson as json
import datetime
class RawLog(dict): pass
#FIXME: cannot do both returns .. should it?
def docker_file_log_driver(line):
log = json.loads(json.loads(line)['msg'])
if 'formatter' in log.get('extra'):
return RawLog(dict(formatter=log.get('extra').get('formatter'),
raw=log.get('message'),
host=log.get('host'),
timestamp=log.get('timestamp'),
)
)
return dict(timestamp=log.get('timestamp'), data=log, type='log')
def haproxy(line):
#TODO Handle all message formats
'''
>>> import pprint
>>> input_line1 = 'Apr 24 00:00:02 node haproxy[12298]: 1.1.1.1:48660 [24/Apr/2019:00:00:02.358] pre-staging~ pre-staging_doc/pre-staging_active 261/0/2/8/271 200 2406 - - ---- 4/4/0/1/0 0/0 {AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018} "GET /doc/api/get?call=apple HTTP/1.1"'
>>> output_line1 = haproxy(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'Tc': 2.0,
'Tq': 261.0,
'Tr': 8.0,
'Tw': 0.0,
'_api': '/doc/api/get?call=apple',
'_headers': ['AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018'],
'actconn': 4,
'backend': 'pre-staging_doc/pre-staging_active',
'backend_queue': 0,
'beconn': 1,
'bytes_read': 2406.0,
'client_port': '48660',
'client_server': '1.1.1.1',
'feconn': 4,
'front_end': 'pre-staging~',
'haproxy_server': 'node',
'method': 'GET',
'resp_time': 271.0,
'retries': 0,
'srv_conn': 0,
'srv_queue': 0,
'status': '200',
'timestamp': '2019-04-24T00:00:02.358000'},
'event': 'haproxy_event',
'timestamp': '2019-04-24T00:00:02.358000',
'type': 'metric'}
'''
_line = line.strip().split()
log = {}
log['client_server'] = _line[5].split(':')[0].strip()
log['client_port'] = _line[5].split(':')[1].strip()
_timestamp = re.findall(r'\[(.*?)\]', _line[6])[0]
log['timestamp'] = datetime.datetime.strptime(_timestamp, '%d/%b/%Y:%H:%M:%S.%f').isoformat()
log['front_end'] = _line[7].strip()
log['backend'] = _line[8].strip()
log['Tq'] = float(_line[9].split('/')[0].strip())
log['Tw'] = float(_line[9].split('/')[1].strip())
log['Tc'] = float(_line[9].split('/')[2].strip())
log['Tr'] = float(_line[9].split('/')[3].strip())
log['resp_time'] = float(_line[9].split('/')[-1].strip())
log['status'] = _line[10].strip()
log['bytes_read'] = float(_line[11].strip())
log['_headers'] = re.findall(r'{(.*)}', line)
log['haproxy_server'] = _line[3].strip()
log['method'] = _line[-3].strip('"').strip()
log['_api'] = _line[-2].strip()
log['retries'] = int(_line[15].split('/')[-1].strip())
log['actconn'] = int(_line[15].split('/')[0].strip())
log['feconn'] = int(_line[15].split('/')[1].strip())
log['beconn'] = int(_line[15].split('/')[-2].strip())
log['srv_conn'] = int(_line[15].split('/')[-3].strip())
log['srv_queue'] = int(_line[16].split('/')[0].strip())
log['backend_queue'] = int(_line[16].split('/')[1].strip())
return dict(
data=log,
event='haproxy_event',
timestamp=log.get('timestamp'),
type='metric'
)
def nginx_access(line):
'''
>>> import pprint
>>> input_line1 = '{ \
"remote_addr": "127.0.0.1","remote_user": "-","timestamp": "1515144699.201", \
"request": "GET / HTTP/1.1","status": "200","request_time": "0.000", \
"body_bytes_sent": "396","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line1 = nginx_access(input_line1)
>>> pprint.pprint(output_line1)
{'data': {u'body_bytes_sent': 396.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'127.0.0.1',
u'remote_user': u'-',
u'request': u'GET / HTTP/1.1',
u'request_time': 0.0,
u'status': u'200',
u'timestamp': '2018-01-05T09:31:39.201000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:31:39.201000',
'type': 'metric'}
>>> input_line2 = '{ \
"remote_addr": "192.158.0.51","remote_user": "-","timestamp": "1515143686.415", \
"request": "POST /mpub?topic=heartbeat HTTP/1.1","status": "404","request_time": "0.000", \
"body_bytes_sent": "152","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line2 = nginx_access(input_line2)
>>> pprint.pprint(output_line2)
{'data': {u'body_bytes_sent': 152.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'192.158.0.51',
u'remote_user': u'-',
u'request': u'POST /mpub?topic=heartbeat HTTP/1.1',
u'request_time': 0.0,
u'status': u'404',
u'timestamp': '2018-01-05T09:14:46.415000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:14:46.415000',
'type': 'metric'}
'''
#TODO Handle nginx error logs
log = json.loads(line)
timestamp_iso = datetime.datetime.utcfromtimestamp(float(log['timestamp'])).isoformat()
log.update({'timestamp':timestamp_iso})
if '-' in log.get('upstream_response_time'):
log['upstream_response_time'] = 0.0
log['body_bytes_sent'] = float(log['body_bytes_sent'])
log['request_time'] = float(log['request_time'])
log['upstream_response_time'] = float(log['upstream_response_time'])
return dict(
timestamp=log.get('timestamp',' '),
data=log,
type='metric',
event='nginx_event',
)
def mongodb(line):
'''
>>> import pprint
>>> input_line1 = '2017-08-17T07:56:33.489+0200 I REPL [signalProcessingThread] shutting down replication subsystems'
>>> output_line1 = mongodb(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'component': 'REPL',
'context': '[signalProcessingThread]',
'message': 'shutting down replication subsystems',
'severity': 'I',
'timestamp': '2017-08-17T07:56:33.489+0200'},
'timestamp': '2017-08-17T07:56:33.489+0200',
'type': 'log'}
>>> input_line2 = '2017-08-17T07:56:33.515+0200 W NETWORK [initandlisten] No primary detected for set confsvr_repl1'
>>> output_line2 = mongodb(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'component': 'NETWORK',
'context': '[initandlisten]',
'message': 'No primary detected for set confsvr_repl1',
'severity': 'W',
'timestamp': '2017-08-17T07:56:33.515+0200'},
'timestamp': '2017-08-17T07:56:33.515+0200',
'type': 'log'}
'''
keys = ['timestamp', 'severity', 'component', 'context', 'message']
values = re.split(r'\s+', line, maxsplit=4)
mongodb_log = dict(zip(keys,values))
return dict(
timestamp=values[0],
data=mongodb_log,
type='log',
)
def django(line):
'''
>>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true }
'''
#TODO we need to handle case2 logs
data = {}
log = re.findall(r'^(\[\d+/\w+/\d+ \d+:\d+:\d+\].*)', line)
if len(log) == 1:
data['timestamp'] = datetime.datetime.strptime(re.findall(r'(\d+/\w+/\d+ \d+:\d+:\d+)',\
log[0])[0],"%d/%b/%Y %H:%M:%S").isoformat()
data['loglevel'] = re.findall('[A-Z]+', log[0])[1]
data['logname'] = re.findall('\[\D+.\w+:\d+\]', log[0])[0]
message = re.findall('\{.+\}', log[0])
try:
if len(message) > 0:
message = json.loads(message[0])
else:
message = re.split(']', log[0])
message = ''.join(message[2:])
except ValueError:
message = re.split(']', log[0])
message = ''.join(message[2:])
data['message'] = message
return dict(
timestamp=data['timestamp'],
level=data['loglevel'],
data=data,
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow()),
data={raw:line}
)
def basescript(line):
'''
>>> import pprint
>>> input_line = '{"level": "warning", "timestamp": "2018-02-07T06:37:00.297610Z", "event": "exited via keyboard interrupt", "type": "log", "id": "20180207T063700_4d03fe800bd111e89ecb96000007bc65", "_": {"ln": 58, "file": "/usr/local/lib/python2.7/dist-packages/basescript/basescript.py", "name": "basescript.basescript", "fn": "start"}}'
>>> output_line1 = basescript(input_line)
>>> pprint.pprint(output_line1)
{'data': {u'_': {u'file': u'/usr/local/lib/python2.7/dist-packages/basescript/basescript.py',
u'fn': u'start',
u'ln': 58,
u'name': u'basescript.basescript'},
u'event': u'exited via keyboard interrupt',
u'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
u'level': u'warning',
u'timestamp': u'2018-02-07T06:37:00.297610Z',
u'type': u'log'},
'event': u'exited via keyboard interrupt',
'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
'level': u'warning',
'timestamp': u'2018-02-07T06:37:00.297610Z',
'type': u'log'}
'''
log = json.loads(line)
return dict(
timestamp=log['timestamp'],
data=log,
id=log['id'],
type=log['type'],
level=log['level'],
event=log['event']
)
LOG_BEGIN_PATTERN = [re.compile(r'^\s+\['), re.compile(r'^\[')]
def elasticsearch_ispartial_log(line):
'''
>>> line1 = ' [2018-04-03T00:22:38,048][DEBUG][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] [search17/search]: there were [2000] tasks in [809ms], avg task time [28.4micros], EWMA task execution [790nanos], [35165.36 tasks/s], optimal queue is [35165], current capacity [1000]'
>>> line2 = ' org.elasticsearch.ResourceAlreadyExistsException: index [media_corpus_refresh/6_3sRAMsRr2r63J6gbOjQw] already exists'
>>> line3 = ' at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.validateIndexName(MetaDataCreateIndexService.java:151) ~[elasticsearch-6.2.0.jar:6.2.0]'
>>> elasticsearch_ispartial_log(line1)
False
>>> elasticsearch_ispartial_log(line2)
True
>>> elasticsearch_ispartial_log(line3)
True
'''
match_result = []
for p in LOG_BEGIN_PATTERN:
if re.match(p, line) != None:
return False
return True
elasticsearch.ispartial = elasticsearch_ispartial_log
|
deep-compute/logagg
|
logagg/formatters.py
|
elasticsearch_ispartial_log
|
python
|
def elasticsearch_ispartial_log(line):
'''
>>> line1 = ' [2018-04-03T00:22:38,048][DEBUG][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] [search17/search]: there were [2000] tasks in [809ms], avg task time [28.4micros], EWMA task execution [790nanos], [35165.36 tasks/s], optimal queue is [35165], current capacity [1000]'
>>> line2 = ' org.elasticsearch.ResourceAlreadyExistsException: index [media_corpus_refresh/6_3sRAMsRr2r63J6gbOjQw] already exists'
>>> line3 = ' at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.validateIndexName(MetaDataCreateIndexService.java:151) ~[elasticsearch-6.2.0.jar:6.2.0]'
>>> elasticsearch_ispartial_log(line1)
False
>>> elasticsearch_ispartial_log(line2)
True
>>> elasticsearch_ispartial_log(line3)
True
'''
match_result = []
for p in LOG_BEGIN_PATTERN:
if re.match(p, line) != None:
return False
return True
|
>>> line1 = ' [2018-04-03T00:22:38,048][DEBUG][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] [search17/search]: there were [2000] tasks in [809ms], avg task time [28.4micros], EWMA task execution [790nanos], [35165.36 tasks/s], optimal queue is [35165], current capacity [1000]'
>>> line2 = ' org.elasticsearch.ResourceAlreadyExistsException: index [media_corpus_refresh/6_3sRAMsRr2r63J6gbOjQw] already exists'
>>> line3 = ' at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.validateIndexName(MetaDataCreateIndexService.java:151) ~[elasticsearch-6.2.0.jar:6.2.0]'
>>> elasticsearch_ispartial_log(line1)
False
>>> elasticsearch_ispartial_log(line2)
True
>>> elasticsearch_ispartial_log(line3)
True
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/formatters.py#L374-L391
| null |
import re
import ujson as json
import datetime
class RawLog(dict): pass
#FIXME: cannot do both returns .. should it?
def docker_file_log_driver(line):
log = json.loads(json.loads(line)['msg'])
if 'formatter' in log.get('extra'):
return RawLog(dict(formatter=log.get('extra').get('formatter'),
raw=log.get('message'),
host=log.get('host'),
timestamp=log.get('timestamp'),
)
)
return dict(timestamp=log.get('timestamp'), data=log, type='log')
def haproxy(line):
#TODO Handle all message formats
'''
>>> import pprint
>>> input_line1 = 'Apr 24 00:00:02 node haproxy[12298]: 1.1.1.1:48660 [24/Apr/2019:00:00:02.358] pre-staging~ pre-staging_doc/pre-staging_active 261/0/2/8/271 200 2406 - - ---- 4/4/0/1/0 0/0 {AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018} "GET /doc/api/get?call=apple HTTP/1.1"'
>>> output_line1 = haproxy(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'Tc': 2.0,
'Tq': 261.0,
'Tr': 8.0,
'Tw': 0.0,
'_api': '/doc/api/get?call=apple',
'_headers': ['AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018'],
'actconn': 4,
'backend': 'pre-staging_doc/pre-staging_active',
'backend_queue': 0,
'beconn': 1,
'bytes_read': 2406.0,
'client_port': '48660',
'client_server': '1.1.1.1',
'feconn': 4,
'front_end': 'pre-staging~',
'haproxy_server': 'node',
'method': 'GET',
'resp_time': 271.0,
'retries': 0,
'srv_conn': 0,
'srv_queue': 0,
'status': '200',
'timestamp': '2019-04-24T00:00:02.358000'},
'event': 'haproxy_event',
'timestamp': '2019-04-24T00:00:02.358000',
'type': 'metric'}
'''
_line = line.strip().split()
log = {}
log['client_server'] = _line[5].split(':')[0].strip()
log['client_port'] = _line[5].split(':')[1].strip()
_timestamp = re.findall(r'\[(.*?)\]', _line[6])[0]
log['timestamp'] = datetime.datetime.strptime(_timestamp, '%d/%b/%Y:%H:%M:%S.%f').isoformat()
log['front_end'] = _line[7].strip()
log['backend'] = _line[8].strip()
log['Tq'] = float(_line[9].split('/')[0].strip())
log['Tw'] = float(_line[9].split('/')[1].strip())
log['Tc'] = float(_line[9].split('/')[2].strip())
log['Tr'] = float(_line[9].split('/')[3].strip())
log['resp_time'] = float(_line[9].split('/')[-1].strip())
log['status'] = _line[10].strip()
log['bytes_read'] = float(_line[11].strip())
log['_headers'] = re.findall(r'{(.*)}', line)
log['haproxy_server'] = _line[3].strip()
log['method'] = _line[-3].strip('"').strip()
log['_api'] = _line[-2].strip()
log['retries'] = int(_line[15].split('/')[-1].strip())
log['actconn'] = int(_line[15].split('/')[0].strip())
log['feconn'] = int(_line[15].split('/')[1].strip())
log['beconn'] = int(_line[15].split('/')[-2].strip())
log['srv_conn'] = int(_line[15].split('/')[-3].strip())
log['srv_queue'] = int(_line[16].split('/')[0].strip())
log['backend_queue'] = int(_line[16].split('/')[1].strip())
return dict(
data=log,
event='haproxy_event',
timestamp=log.get('timestamp'),
type='metric'
)
def nginx_access(line):
'''
>>> import pprint
>>> input_line1 = '{ \
"remote_addr": "127.0.0.1","remote_user": "-","timestamp": "1515144699.201", \
"request": "GET / HTTP/1.1","status": "200","request_time": "0.000", \
"body_bytes_sent": "396","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line1 = nginx_access(input_line1)
>>> pprint.pprint(output_line1)
{'data': {u'body_bytes_sent': 396.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'127.0.0.1',
u'remote_user': u'-',
u'request': u'GET / HTTP/1.1',
u'request_time': 0.0,
u'status': u'200',
u'timestamp': '2018-01-05T09:31:39.201000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:31:39.201000',
'type': 'metric'}
>>> input_line2 = '{ \
"remote_addr": "192.158.0.51","remote_user": "-","timestamp": "1515143686.415", \
"request": "POST /mpub?topic=heartbeat HTTP/1.1","status": "404","request_time": "0.000", \
"body_bytes_sent": "152","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line2 = nginx_access(input_line2)
>>> pprint.pprint(output_line2)
{'data': {u'body_bytes_sent': 152.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'192.158.0.51',
u'remote_user': u'-',
u'request': u'POST /mpub?topic=heartbeat HTTP/1.1',
u'request_time': 0.0,
u'status': u'404',
u'timestamp': '2018-01-05T09:14:46.415000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:14:46.415000',
'type': 'metric'}
'''
#TODO Handle nginx error logs
log = json.loads(line)
timestamp_iso = datetime.datetime.utcfromtimestamp(float(log['timestamp'])).isoformat()
log.update({'timestamp':timestamp_iso})
if '-' in log.get('upstream_response_time'):
log['upstream_response_time'] = 0.0
log['body_bytes_sent'] = float(log['body_bytes_sent'])
log['request_time'] = float(log['request_time'])
log['upstream_response_time'] = float(log['upstream_response_time'])
return dict(
timestamp=log.get('timestamp',' '),
data=log,
type='metric',
event='nginx_event',
)
def mongodb(line):
'''
>>> import pprint
>>> input_line1 = '2017-08-17T07:56:33.489+0200 I REPL [signalProcessingThread] shutting down replication subsystems'
>>> output_line1 = mongodb(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'component': 'REPL',
'context': '[signalProcessingThread]',
'message': 'shutting down replication subsystems',
'severity': 'I',
'timestamp': '2017-08-17T07:56:33.489+0200'},
'timestamp': '2017-08-17T07:56:33.489+0200',
'type': 'log'}
>>> input_line2 = '2017-08-17T07:56:33.515+0200 W NETWORK [initandlisten] No primary detected for set confsvr_repl1'
>>> output_line2 = mongodb(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'component': 'NETWORK',
'context': '[initandlisten]',
'message': 'No primary detected for set confsvr_repl1',
'severity': 'W',
'timestamp': '2017-08-17T07:56:33.515+0200'},
'timestamp': '2017-08-17T07:56:33.515+0200',
'type': 'log'}
'''
keys = ['timestamp', 'severity', 'component', 'context', 'message']
values = re.split(r'\s+', line, maxsplit=4)
mongodb_log = dict(zip(keys,values))
return dict(
timestamp=values[0],
data=mongodb_log,
type='log',
)
def django(line):
'''
>>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true }
'''
#TODO we need to handle case2 logs
data = {}
log = re.findall(r'^(\[\d+/\w+/\d+ \d+:\d+:\d+\].*)', line)
if len(log) == 1:
data['timestamp'] = datetime.datetime.strptime(re.findall(r'(\d+/\w+/\d+ \d+:\d+:\d+)',\
log[0])[0],"%d/%b/%Y %H:%M:%S").isoformat()
data['loglevel'] = re.findall('[A-Z]+', log[0])[1]
data['logname'] = re.findall('\[\D+.\w+:\d+\]', log[0])[0]
message = re.findall('\{.+\}', log[0])
try:
if len(message) > 0:
message = json.loads(message[0])
else:
message = re.split(']', log[0])
message = ''.join(message[2:])
except ValueError:
message = re.split(']', log[0])
message = ''.join(message[2:])
data['message'] = message
return dict(
timestamp=data['timestamp'],
level=data['loglevel'],
data=data,
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow()),
data={raw:line}
)
def basescript(line):
'''
>>> import pprint
>>> input_line = '{"level": "warning", "timestamp": "2018-02-07T06:37:00.297610Z", "event": "exited via keyboard interrupt", "type": "log", "id": "20180207T063700_4d03fe800bd111e89ecb96000007bc65", "_": {"ln": 58, "file": "/usr/local/lib/python2.7/dist-packages/basescript/basescript.py", "name": "basescript.basescript", "fn": "start"}}'
>>> output_line1 = basescript(input_line)
>>> pprint.pprint(output_line1)
{'data': {u'_': {u'file': u'/usr/local/lib/python2.7/dist-packages/basescript/basescript.py',
u'fn': u'start',
u'ln': 58,
u'name': u'basescript.basescript'},
u'event': u'exited via keyboard interrupt',
u'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
u'level': u'warning',
u'timestamp': u'2018-02-07T06:37:00.297610Z',
u'type': u'log'},
'event': u'exited via keyboard interrupt',
'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
'level': u'warning',
'timestamp': u'2018-02-07T06:37:00.297610Z',
'type': u'log'}
'''
log = json.loads(line)
return dict(
timestamp=log['timestamp'],
data=log,
id=log['id'],
type=log['type'],
level=log['level'],
event=log['event']
)
def elasticsearch(line):
'''
>>> import pprint
>>> input_line = '[2017-08-30T06:27:19,158] [WARN ][o.e.m.j.JvmGcMonitorService] [Glsuj_2] [gc][296816] overhead, spent [1.2s] collecting in the last [1.3s]'
>>> output_line = elasticsearch(input_line)
>>> pprint.pprint(output_line)
{'data': {'garbage_collector': 'gc',
'gc_count': 296816.0,
'level': 'WARN',
'message': 'o.e.m.j.JvmGcMonitorService',
'plugin': 'Glsuj_2',
'query_time_ms': 1200.0,
'resp_time_ms': 1300.0,
'timestamp': '2017-08-30T06:27:19,158'},
'event': 'o.e.m.j.JvmGcMonitorService',
'level': 'WARN ',
'timestamp': '2017-08-30T06:27:19,158',
'type': 'metric'}
Case 2:
[2017-09-13T23:15:00,415][WARN ][o.e.i.e.Engine ] [Glsuj_2] [filebeat-2017.09.09][3] failed engine [index]
java.nio.file.FileSystemException: /home/user/elasticsearch/data/nodes/0/indices/jsVSO6f3Rl-wwBpQyNRCbQ/3/index/_0.fdx: Too many open files
at sun.nio.fs.UnixException.translateToIOException(UnixException.java:91) ~[?:?]
'''
# TODO we need to handle case2 logs
elasticsearch_log = line
actuallog = re.findall(r'(\[\d+\-+\d+\d+\-+\d+\w+\d+:\d+:\d+,+\d\d\d+\].*)', elasticsearch_log)
if len(actuallog) == 1:
keys = ['timestamp','level','message','plugin','garbage_collector','gc_count','query_time_ms', 'resp_time_ms']
values = re.findall(r'\[(.*?)\]', actuallog[0])
for index, i in enumerate(values):
if not isinstance(i, str):
continue
if len(re.findall(r'.*ms$', i)) > 0 and 'ms' in re.findall(r'.*ms$', i)[0]:
num = re.split('ms', i)[0]
values[index] = float(num)
continue
if len(re.findall(r'.*s$', i)) > 0 and 's' in re.findall(r'.*s$', i)[0]:
num = re.split('s', i)[0]
values[index] = float(num) * 1000
continue
data = dict(zip(keys,values))
if 'level' in data and data['level'][-1] == ' ':
data['level'] = data['level'][:-1]
if 'gc_count' in data:
data['gc_count'] = float(data['gc_count'])
event = data['message']
level=values[1]
timestamp=values[0]
return dict(
timestamp=timestamp,
level=level,
type='metric',
data=data,
event=event
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.now()),
data={'raw': line}
)
LOG_BEGIN_PATTERN = [re.compile(r'^\s+\['), re.compile(r'^\[')]
elasticsearch.ispartial = elasticsearch_ispartial_log
|
deep-compute/logagg
|
logagg/collector.py
|
load_formatter_fn
|
python
|
def load_formatter_fn(formatter):
'''
>>> load_formatter_fn('logagg.formatters.basescript') #doctest: +ELLIPSIS
<function basescript at 0x...>
'''
obj = util.load_object(formatter)
if not hasattr(obj, 'ispartial'):
obj.ispartial = util.ispartial
return obj
|
>>> load_formatter_fn('logagg.formatters.basescript') #doctest: +ELLIPSIS
<function basescript at 0x...>
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/collector.py#L23-L31
| null |
import os
import sys
import time
import ujson as json
import glob
import uuid
import Queue
import socket
import datetime
from operator import attrgetter
import traceback
from deeputil import AttrDict, keeprunning
from pygtail import Pygtail
from logagg import util
from logagg.formatters import RawLog
# TODO
'''
After a downtime of collector, pygtail is missing logs from rotational files
'''
class LogCollector(object):
DESC = 'Collects the log information and sends to NSQTopic'
QUEUE_MAX_SIZE = 2000 # Maximum number of messages in in-mem queue
MAX_NBYTES_TO_SEND = 4.5 * (1024**2) # Number of bytes from in-mem queue minimally required to push
MIN_NBYTES_TO_SEND = 512 * 1024 # Minimum number of bytes to send to nsq in mpub
MAX_SECONDS_TO_PUSH = 1 # Wait till this much time elapses before pushing
LOG_FILE_POLL_INTERVAL = 0.25 # Wait time to pull log file for new lines added
QUEUE_READ_TIMEOUT = 1 # Wait time when doing blocking read on the in-mem q
PYGTAIL_ACK_WAIT_TIME = 0.05 # TODO: Document this
SCAN_FPATTERNS_INTERVAL = 30 # How often to scan filesystem for files matching fpatterns
HOST = socket.gethostname()
HEARTBEAT_RESTART_INTERVAL = 30 # Wait time if heartbeat sending stops
LOG_STRUCTURE = {
'id': basestring,
'timestamp': basestring,
'file' : basestring,
'host': basestring,
'formatter' : basestring,
'raw' : basestring,
'type' : basestring,
'level' : basestring,
'event' : basestring,
'data' : dict,
'error' : bool,
'error_tb' : basestring,
}
def __init__(self,
fpaths,
heartbeat_interval,
nsq_sender=util.DUMMY,
log=util.DUMMY):
self.fpaths = fpaths
self.nsq_sender = nsq_sender
self.heartbeat_interval = heartbeat_interval
self.log = log
# Log fpath to thread mapping
self.log_reader_threads = {}
# Handle name to formatter fn obj map
self.formatters = {}
self.queue = Queue.Queue(maxsize=self.QUEUE_MAX_SIZE)
def _remove_redundancy(self, log):
"""Removes duplicate data from 'data' inside log dict and brings it
out.
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> log = {'id' : 46846876, 'type' : 'log',
... 'data' : {'a' : 1, 'b' : 2, 'type' : 'metric'}}
>>> lc._remove_redundancy(log)
{'data': {'a': 1, 'b': 2}, 'type': 'metric', 'id': 46846876}
"""
for key in log:
if key in log and key in log['data']:
log[key] = log['data'].pop(key)
return log
def validate_log_format(self, log):
'''
>>> lc = LogCollector('file=/path/to/file.log:formatter=logagg.formatters.basescript', 30)
>>> incomplete_log = {'data' : {'x' : 1, 'y' : 2},
... 'raw' : 'Not all keys present'}
>>> lc.validate_log_format(incomplete_log)
'failed'
>>> redundant_log = {'one_invalid_key' : 'Extra information',
... 'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(redundant_log)
'failed'
>>> correct_log = {'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(correct_log)
'passed'
'''
keys_in_log = set(log)
keys_in_log_structure = set(self.LOG_STRUCTURE)
try:
assert (keys_in_log == keys_in_log_structure)
except AssertionError as e:
self.log.warning('formatted_log_structure_rejected' ,
key_not_found = list(keys_in_log_structure-keys_in_log),
extra_keys_found = list(keys_in_log-keys_in_log_structure),
num_logs=1,
type='metric')
return 'failed'
for key in log:
try:
assert isinstance(log[key], self.LOG_STRUCTURE[key])
except AssertionError as e:
self.log.warning('formatted_log_structure_rejected' ,
key_datatype_not_matched = key,
datatype_expected = type(self.LOG_STRUCTURE[key]),
datatype_got = type(log[key]),
num_logs=1,
type='metric')
return 'failed'
return 'passed'
def _full_from_frags(self, frags):
full_line = '\n'.join([l for l, _ in frags])
line_info = frags[-1][-1]
return full_line, line_info
def _iter_logs(self, freader, fmtfn):
# FIXME: does not handle partial lines
# at the start of a file properly
frags = []
for line_info in freader:
line = line_info['line'][:-1] # remove new line char at the end
if not fmtfn.ispartial(line) and frags:
yield self._full_from_frags(frags)
frags = []
frags.append((line, line_info))
if frags:
yield self._full_from_frags(frags)
def assign_default_log_values(self, fpath, line, formatter):
'''
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> from pprint import pprint
>>> formatter = 'logagg.formatters.mongodb'
>>> fpath = '/var/log/mongodb/mongodb.log'
>>> line = 'some log line here'
>>> default_log = lc.assign_default_log_values(fpath, line, formatter)
>>> pprint(default_log) #doctest: +ELLIPSIS
{'data': {},
'error': False,
'error_tb': '',
'event': 'event',
'file': '/var/log/mongodb/mongodb.log',
'formatter': 'logagg.formatters.mongodb',
'host': '...',
'id': None,
'level': 'debug',
'raw': 'some log line here',
'timestamp': '...',
'type': 'log'}
'''
return dict(
id=None,
file=fpath,
host=self.HOST,
formatter=formatter,
event='event',
data={},
raw=line,
timestamp=datetime.datetime.utcnow().isoformat(),
type='log',
level='debug',
error= False,
error_tb='',
)
@keeprunning(LOG_FILE_POLL_INTERVAL, on_error=util.log_exception)
def collect_log_lines(self, log_file):
L = log_file
fpath = L['fpath']
fmtfn = L['formatter_fn']
formatter = L['formatter']
freader = Pygtail(fpath)
for line, line_info in self._iter_logs(freader, fmtfn):
log = self.assign_default_log_values(fpath, line, formatter)
try:
_log = fmtfn(line)
if isinstance(_log, RawLog):
formatter, raw_log = _log['formatter'], _log['raw']
log.update(_log)
_log = load_formatter_fn(formatter)(raw_log)
log.update(_log)
except (SystemExit, KeyboardInterrupt) as e: raise
except:
log['error'] = True
log['error_tb'] = traceback.format_exc()
self.log.exception('error_during_handling_log_line', log=log['raw'])
if log['id'] == None:
log['id'] = uuid.uuid1().hex
log = self._remove_redundancy(log)
if self.validate_log_format(log) == 'failed': continue
self.queue.put(dict(log=json.dumps(log),
freader=freader, line_info=line_info))
self.log.debug('tally:put_into_self.queue', size=self.queue.qsize())
while not freader.is_fully_acknowledged():
t = self.PYGTAIL_ACK_WAIT_TIME
self.log.debug('waiting_for_pygtail_to_fully_ack', wait_time=t)
time.sleep(t)
time.sleep(self.LOG_FILE_POLL_INTERVAL)
def _get_msgs_from_queue(self, msgs, timeout):
msgs_pending = []
read_from_q = False
ts = time.time()
msgs_nbytes = sum(len(m['log']) for m in msgs)
while 1:
try:
msg = self.queue.get(block=True, timeout=self.QUEUE_READ_TIMEOUT)
read_from_q = True
self.log.debug("tally:get_from_self.queue")
_msgs_nbytes = msgs_nbytes + len(msg['log'])
_msgs_nbytes += 1 # for newline char
if _msgs_nbytes > self.MAX_NBYTES_TO_SEND:
msgs_pending.append(msg)
self.log.debug('msg_bytes_read_mem_queue_exceeded')
break
msgs.append(msg)
msgs_nbytes = _msgs_nbytes
#FIXME condition never met
if time.time() - ts >= timeout and msgs:
self.log.debug('msg_reading_timeout_from_mem_queue_got_exceeded')
break
# TODO: What if a single log message itself is bigger than max bytes limit?
except Queue.Empty:
self.log.debug('queue_empty')
time.sleep(self.QUEUE_READ_TIMEOUT)
if not msgs:
continue
else:
return msgs_pending, msgs_nbytes, read_from_q
self.log.debug('got_msgs_from_mem_queue')
return msgs_pending, msgs_nbytes, read_from_q
@keeprunning(0, on_error=util.log_exception) # FIXME: what wait time var here?
def send_to_nsq(self, state):
msgs = []
should_push = False
while not should_push:
cur_ts = time.time()
self.log.debug('should_push', should_push=should_push)
time_since_last_push = cur_ts - state.last_push_ts
msgs_pending, msgs_nbytes, read_from_q = self._get_msgs_from_queue(msgs,
self.MAX_SECONDS_TO_PUSH)
have_enough_msgs = msgs_nbytes >= self.MIN_NBYTES_TO_SEND
is_max_time_elapsed = time_since_last_push >= self.MAX_SECONDS_TO_PUSH
should_push = len(msgs) > 0 and (is_max_time_elapsed or have_enough_msgs)
self.log.debug('deciding_to_push', should_push=should_push,
time_since_last_push=time_since_last_push,
msgs_nbytes=msgs_nbytes)
try:
if isinstance(self.nsq_sender, type(util.DUMMY)):
for m in msgs:
self.log.info('final_log_format', log=m['log'])
else:
self.log.debug('trying_to_push_to_nsq', msgs_length=len(msgs))
self.nsq_sender.handle_logs(msgs)
self.log.debug('pushed_to_nsq', msgs_length=len(msgs))
self.confirm_success(msgs)
msgs = msgs_pending
state.last_push_ts = time.time()
except (SystemExit, KeyboardInterrupt): raise
finally:
if read_from_q: self.queue.task_done()
def confirm_success(self, msgs):
ack_fnames = set()
for msg in reversed(msgs):
freader = msg['freader']
fname = freader.filename
if fname in ack_fnames:
continue
ack_fnames.add(fname)
freader.update_offset_file(msg['line_info'])
@keeprunning(SCAN_FPATTERNS_INTERVAL, on_error=util.log_exception)
def _scan_fpatterns(self, state):
'''
For a list of given fpatterns, this starts a thread
collecting log lines from file
>>> os.path.isfile = lambda path: path == '/path/to/log_file.log'
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> print(lc.fpaths)
file=/path/to/log_file.log:formatter=logagg.formatters.basescript
>>> print('formatters loaded:', lc.formatters)
{}
>>> print('log file reader threads started:', lc.log_reader_threads)
{}
>>> state = AttrDict(files_tracked=list())
>>> print('files bieng tracked:', state.files_tracked)
[]
>>> if not state.files_tracked:
>>> lc._scan_fpatterns(state)
>>> print('formatters loaded:', lc.formatters)
>>> print('log file reader threads started:', lc.log_reader_threads)
>>> print('files bieng tracked:', state.files_tracked)
'''
for f in self.fpaths:
fpattern, formatter =(a.split('=')[1] for a in f.split(':', 1))
self.log.debug('scan_fpatterns', fpattern=fpattern, formatter=formatter)
# TODO code for scanning fpatterns for the files not yet present goes here
fpaths = glob.glob(fpattern)
# Load formatter_fn if not in list
fpaths = list(set(fpaths) - set(state.files_tracked))
for fpath in fpaths:
try:
formatter_fn = self.formatters.get(formatter,
load_formatter_fn(formatter))
self.log.info('found_formatter_fn', fn=formatter)
self.formatters[formatter] = formatter_fn
except (SystemExit, KeyboardInterrupt): raise
except (ImportError, AttributeError):
self.log.exception('formatter_fn_not_found', fn=formatter)
sys.exit(-1)
# Start a thread for every file
self.log.info('found_log_file', log_file=fpath)
log_f = dict(fpath=fpath, fpattern=fpattern,
formatter=formatter, formatter_fn=formatter_fn)
log_key = (fpath, fpattern, formatter)
if log_key not in self.log_reader_threads:
self.log.info('starting_collect_log_lines_thread', log_key=log_key)
# There is no existing thread tracking this log file. Start one
log_reader_thread = util.start_daemon_thread(self.collect_log_lines, (log_f,))
self.log_reader_threads[log_key] = log_reader_thread
state.files_tracked.append(fpath)
time.sleep(self.SCAN_FPATTERNS_INTERVAL)
@keeprunning(HEARTBEAT_RESTART_INTERVAL, on_error=util.log_exception)
def send_heartbeat(self, state):
# Sends continuous heartbeats to a seperate topic in nsq
if self.log_reader_threads:
for f in self.log_reader_threads:
files_tracked = self.log_reader_threads.keys()
else:
files_tracked = ''
heartbeat_payload = {'host': self.HOST,
'heartbeat_number': state.heartbeat_number,
'timestamp': time.time(),
'nsq_topic': self.nsq_sender.topic_name,
'files_tracked': files_tracked
}
self.nsq_sender.handle_heartbeat(heartbeat_payload)
state.heartbeat_number += 1
time.sleep(self.heartbeat_interval)
def start(self):
state = AttrDict(files_tracked=list())
util.start_daemon_thread(self._scan_fpatterns, (state,))
state = AttrDict(last_push_ts=time.time())
util.start_daemon_thread(self.send_to_nsq, (state,))
state = AttrDict(heartbeat_number=0)
th_heartbeat = util.start_daemon_thread(self.send_heartbeat, (state,))
while True:
th_heartbeat.join(1)
if not th_heartbeat.isAlive(): break
|
deep-compute/logagg
|
logagg/collector.py
|
LogCollector._remove_redundancy
|
python
|
def _remove_redundancy(self, log):
for key in log:
if key in log and key in log['data']:
log[key] = log['data'].pop(key)
return log
|
Removes duplicate data from 'data' inside log dict and brings it
out.
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> log = {'id' : 46846876, 'type' : 'log',
... 'data' : {'a' : 1, 'b' : 2, 'type' : 'metric'}}
>>> lc._remove_redundancy(log)
{'data': {'a': 1, 'b': 2}, 'type': 'metric', 'id': 46846876}
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/collector.py#L78-L92
| null |
class LogCollector(object):
DESC = 'Collects the log information and sends to NSQTopic'
QUEUE_MAX_SIZE = 2000 # Maximum number of messages in in-mem queue
MAX_NBYTES_TO_SEND = 4.5 * (1024**2) # Number of bytes from in-mem queue minimally required to push
MIN_NBYTES_TO_SEND = 512 * 1024 # Minimum number of bytes to send to nsq in mpub
MAX_SECONDS_TO_PUSH = 1 # Wait till this much time elapses before pushing
LOG_FILE_POLL_INTERVAL = 0.25 # Wait time to pull log file for new lines added
QUEUE_READ_TIMEOUT = 1 # Wait time when doing blocking read on the in-mem q
PYGTAIL_ACK_WAIT_TIME = 0.05 # TODO: Document this
SCAN_FPATTERNS_INTERVAL = 30 # How often to scan filesystem for files matching fpatterns
HOST = socket.gethostname()
HEARTBEAT_RESTART_INTERVAL = 30 # Wait time if heartbeat sending stops
LOG_STRUCTURE = {
'id': basestring,
'timestamp': basestring,
'file' : basestring,
'host': basestring,
'formatter' : basestring,
'raw' : basestring,
'type' : basestring,
'level' : basestring,
'event' : basestring,
'data' : dict,
'error' : bool,
'error_tb' : basestring,
}
def __init__(self,
fpaths,
heartbeat_interval,
nsq_sender=util.DUMMY,
log=util.DUMMY):
self.fpaths = fpaths
self.nsq_sender = nsq_sender
self.heartbeat_interval = heartbeat_interval
self.log = log
# Log fpath to thread mapping
self.log_reader_threads = {}
# Handle name to formatter fn obj map
self.formatters = {}
self.queue = Queue.Queue(maxsize=self.QUEUE_MAX_SIZE)
def validate_log_format(self, log):
'''
>>> lc = LogCollector('file=/path/to/file.log:formatter=logagg.formatters.basescript', 30)
>>> incomplete_log = {'data' : {'x' : 1, 'y' : 2},
... 'raw' : 'Not all keys present'}
>>> lc.validate_log_format(incomplete_log)
'failed'
>>> redundant_log = {'one_invalid_key' : 'Extra information',
... 'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(redundant_log)
'failed'
>>> correct_log = {'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(correct_log)
'passed'
'''
keys_in_log = set(log)
keys_in_log_structure = set(self.LOG_STRUCTURE)
try:
assert (keys_in_log == keys_in_log_structure)
except AssertionError as e:
self.log.warning('formatted_log_structure_rejected' ,
key_not_found = list(keys_in_log_structure-keys_in_log),
extra_keys_found = list(keys_in_log-keys_in_log_structure),
num_logs=1,
type='metric')
return 'failed'
for key in log:
try:
assert isinstance(log[key], self.LOG_STRUCTURE[key])
except AssertionError as e:
self.log.warning('formatted_log_structure_rejected' ,
key_datatype_not_matched = key,
datatype_expected = type(self.LOG_STRUCTURE[key]),
datatype_got = type(log[key]),
num_logs=1,
type='metric')
return 'failed'
return 'passed'
def _full_from_frags(self, frags):
full_line = '\n'.join([l for l, _ in frags])
line_info = frags[-1][-1]
return full_line, line_info
def _iter_logs(self, freader, fmtfn):
# FIXME: does not handle partial lines
# at the start of a file properly
frags = []
for line_info in freader:
line = line_info['line'][:-1] # remove new line char at the end
if not fmtfn.ispartial(line) and frags:
yield self._full_from_frags(frags)
frags = []
frags.append((line, line_info))
if frags:
yield self._full_from_frags(frags)
def assign_default_log_values(self, fpath, line, formatter):
'''
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> from pprint import pprint
>>> formatter = 'logagg.formatters.mongodb'
>>> fpath = '/var/log/mongodb/mongodb.log'
>>> line = 'some log line here'
>>> default_log = lc.assign_default_log_values(fpath, line, formatter)
>>> pprint(default_log) #doctest: +ELLIPSIS
{'data': {},
'error': False,
'error_tb': '',
'event': 'event',
'file': '/var/log/mongodb/mongodb.log',
'formatter': 'logagg.formatters.mongodb',
'host': '...',
'id': None,
'level': 'debug',
'raw': 'some log line here',
'timestamp': '...',
'type': 'log'}
'''
return dict(
id=None,
file=fpath,
host=self.HOST,
formatter=formatter,
event='event',
data={},
raw=line,
timestamp=datetime.datetime.utcnow().isoformat(),
type='log',
level='debug',
error= False,
error_tb='',
)
@keeprunning(LOG_FILE_POLL_INTERVAL, on_error=util.log_exception)
def collect_log_lines(self, log_file):
L = log_file
fpath = L['fpath']
fmtfn = L['formatter_fn']
formatter = L['formatter']
freader = Pygtail(fpath)
for line, line_info in self._iter_logs(freader, fmtfn):
log = self.assign_default_log_values(fpath, line, formatter)
try:
_log = fmtfn(line)
if isinstance(_log, RawLog):
formatter, raw_log = _log['formatter'], _log['raw']
log.update(_log)
_log = load_formatter_fn(formatter)(raw_log)
log.update(_log)
except (SystemExit, KeyboardInterrupt) as e: raise
except:
log['error'] = True
log['error_tb'] = traceback.format_exc()
self.log.exception('error_during_handling_log_line', log=log['raw'])
if log['id'] == None:
log['id'] = uuid.uuid1().hex
log = self._remove_redundancy(log)
if self.validate_log_format(log) == 'failed': continue
self.queue.put(dict(log=json.dumps(log),
freader=freader, line_info=line_info))
self.log.debug('tally:put_into_self.queue', size=self.queue.qsize())
while not freader.is_fully_acknowledged():
t = self.PYGTAIL_ACK_WAIT_TIME
self.log.debug('waiting_for_pygtail_to_fully_ack', wait_time=t)
time.sleep(t)
time.sleep(self.LOG_FILE_POLL_INTERVAL)
def _get_msgs_from_queue(self, msgs, timeout):
msgs_pending = []
read_from_q = False
ts = time.time()
msgs_nbytes = sum(len(m['log']) for m in msgs)
while 1:
try:
msg = self.queue.get(block=True, timeout=self.QUEUE_READ_TIMEOUT)
read_from_q = True
self.log.debug("tally:get_from_self.queue")
_msgs_nbytes = msgs_nbytes + len(msg['log'])
_msgs_nbytes += 1 # for newline char
if _msgs_nbytes > self.MAX_NBYTES_TO_SEND:
msgs_pending.append(msg)
self.log.debug('msg_bytes_read_mem_queue_exceeded')
break
msgs.append(msg)
msgs_nbytes = _msgs_nbytes
#FIXME condition never met
if time.time() - ts >= timeout and msgs:
self.log.debug('msg_reading_timeout_from_mem_queue_got_exceeded')
break
# TODO: What if a single log message itself is bigger than max bytes limit?
except Queue.Empty:
self.log.debug('queue_empty')
time.sleep(self.QUEUE_READ_TIMEOUT)
if not msgs:
continue
else:
return msgs_pending, msgs_nbytes, read_from_q
self.log.debug('got_msgs_from_mem_queue')
return msgs_pending, msgs_nbytes, read_from_q
@keeprunning(0, on_error=util.log_exception) # FIXME: what wait time var here?
def send_to_nsq(self, state):
msgs = []
should_push = False
while not should_push:
cur_ts = time.time()
self.log.debug('should_push', should_push=should_push)
time_since_last_push = cur_ts - state.last_push_ts
msgs_pending, msgs_nbytes, read_from_q = self._get_msgs_from_queue(msgs,
self.MAX_SECONDS_TO_PUSH)
have_enough_msgs = msgs_nbytes >= self.MIN_NBYTES_TO_SEND
is_max_time_elapsed = time_since_last_push >= self.MAX_SECONDS_TO_PUSH
should_push = len(msgs) > 0 and (is_max_time_elapsed or have_enough_msgs)
self.log.debug('deciding_to_push', should_push=should_push,
time_since_last_push=time_since_last_push,
msgs_nbytes=msgs_nbytes)
try:
if isinstance(self.nsq_sender, type(util.DUMMY)):
for m in msgs:
self.log.info('final_log_format', log=m['log'])
else:
self.log.debug('trying_to_push_to_nsq', msgs_length=len(msgs))
self.nsq_sender.handle_logs(msgs)
self.log.debug('pushed_to_nsq', msgs_length=len(msgs))
self.confirm_success(msgs)
msgs = msgs_pending
state.last_push_ts = time.time()
except (SystemExit, KeyboardInterrupt): raise
finally:
if read_from_q: self.queue.task_done()
def confirm_success(self, msgs):
ack_fnames = set()
for msg in reversed(msgs):
freader = msg['freader']
fname = freader.filename
if fname in ack_fnames:
continue
ack_fnames.add(fname)
freader.update_offset_file(msg['line_info'])
@keeprunning(SCAN_FPATTERNS_INTERVAL, on_error=util.log_exception)
def _scan_fpatterns(self, state):
'''
For a list of given fpatterns, this starts a thread
collecting log lines from file
>>> os.path.isfile = lambda path: path == '/path/to/log_file.log'
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> print(lc.fpaths)
file=/path/to/log_file.log:formatter=logagg.formatters.basescript
>>> print('formatters loaded:', lc.formatters)
{}
>>> print('log file reader threads started:', lc.log_reader_threads)
{}
>>> state = AttrDict(files_tracked=list())
>>> print('files bieng tracked:', state.files_tracked)
[]
>>> if not state.files_tracked:
>>> lc._scan_fpatterns(state)
>>> print('formatters loaded:', lc.formatters)
>>> print('log file reader threads started:', lc.log_reader_threads)
>>> print('files bieng tracked:', state.files_tracked)
'''
for f in self.fpaths:
fpattern, formatter =(a.split('=')[1] for a in f.split(':', 1))
self.log.debug('scan_fpatterns', fpattern=fpattern, formatter=formatter)
# TODO code for scanning fpatterns for the files not yet present goes here
fpaths = glob.glob(fpattern)
# Load formatter_fn if not in list
fpaths = list(set(fpaths) - set(state.files_tracked))
for fpath in fpaths:
try:
formatter_fn = self.formatters.get(formatter,
load_formatter_fn(formatter))
self.log.info('found_formatter_fn', fn=formatter)
self.formatters[formatter] = formatter_fn
except (SystemExit, KeyboardInterrupt): raise
except (ImportError, AttributeError):
self.log.exception('formatter_fn_not_found', fn=formatter)
sys.exit(-1)
# Start a thread for every file
self.log.info('found_log_file', log_file=fpath)
log_f = dict(fpath=fpath, fpattern=fpattern,
formatter=formatter, formatter_fn=formatter_fn)
log_key = (fpath, fpattern, formatter)
if log_key not in self.log_reader_threads:
self.log.info('starting_collect_log_lines_thread', log_key=log_key)
# There is no existing thread tracking this log file. Start one
log_reader_thread = util.start_daemon_thread(self.collect_log_lines, (log_f,))
self.log_reader_threads[log_key] = log_reader_thread
state.files_tracked.append(fpath)
time.sleep(self.SCAN_FPATTERNS_INTERVAL)
@keeprunning(HEARTBEAT_RESTART_INTERVAL, on_error=util.log_exception)
def send_heartbeat(self, state):
# Sends continuous heartbeats to a seperate topic in nsq
if self.log_reader_threads:
for f in self.log_reader_threads:
files_tracked = self.log_reader_threads.keys()
else:
files_tracked = ''
heartbeat_payload = {'host': self.HOST,
'heartbeat_number': state.heartbeat_number,
'timestamp': time.time(),
'nsq_topic': self.nsq_sender.topic_name,
'files_tracked': files_tracked
}
self.nsq_sender.handle_heartbeat(heartbeat_payload)
state.heartbeat_number += 1
time.sleep(self.heartbeat_interval)
def start(self):
state = AttrDict(files_tracked=list())
util.start_daemon_thread(self._scan_fpatterns, (state,))
state = AttrDict(last_push_ts=time.time())
util.start_daemon_thread(self.send_to_nsq, (state,))
state = AttrDict(heartbeat_number=0)
th_heartbeat = util.start_daemon_thread(self.send_heartbeat, (state,))
while True:
th_heartbeat.join(1)
if not th_heartbeat.isAlive(): break
|
deep-compute/logagg
|
logagg/collector.py
|
LogCollector.validate_log_format
|
python
|
def validate_log_format(self, log):
'''
>>> lc = LogCollector('file=/path/to/file.log:formatter=logagg.formatters.basescript', 30)
>>> incomplete_log = {'data' : {'x' : 1, 'y' : 2},
... 'raw' : 'Not all keys present'}
>>> lc.validate_log_format(incomplete_log)
'failed'
>>> redundant_log = {'one_invalid_key' : 'Extra information',
... 'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(redundant_log)
'failed'
>>> correct_log = {'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(correct_log)
'passed'
'''
keys_in_log = set(log)
keys_in_log_structure = set(self.LOG_STRUCTURE)
try:
assert (keys_in_log == keys_in_log_structure)
except AssertionError as e:
self.log.warning('formatted_log_structure_rejected' ,
key_not_found = list(keys_in_log_structure-keys_in_log),
extra_keys_found = list(keys_in_log-keys_in_log_structure),
num_logs=1,
type='metric')
return 'failed'
for key in log:
try:
assert isinstance(log[key], self.LOG_STRUCTURE[key])
except AssertionError as e:
self.log.warning('formatted_log_structure_rejected' ,
key_datatype_not_matched = key,
datatype_expected = type(self.LOG_STRUCTURE[key]),
datatype_got = type(log[key]),
num_logs=1,
type='metric')
return 'failed'
return 'passed'
|
>>> lc = LogCollector('file=/path/to/file.log:formatter=logagg.formatters.basescript', 30)
>>> incomplete_log = {'data' : {'x' : 1, 'y' : 2},
... 'raw' : 'Not all keys present'}
>>> lc.validate_log_format(incomplete_log)
'failed'
>>> redundant_log = {'one_invalid_key' : 'Extra information',
... 'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(redundant_log)
'failed'
>>> correct_log = {'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(correct_log)
'passed'
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/collector.py#L94-L159
| null |
class LogCollector(object):
DESC = 'Collects the log information and sends to NSQTopic'
QUEUE_MAX_SIZE = 2000 # Maximum number of messages in in-mem queue
MAX_NBYTES_TO_SEND = 4.5 * (1024**2) # Number of bytes from in-mem queue minimally required to push
MIN_NBYTES_TO_SEND = 512 * 1024 # Minimum number of bytes to send to nsq in mpub
MAX_SECONDS_TO_PUSH = 1 # Wait till this much time elapses before pushing
LOG_FILE_POLL_INTERVAL = 0.25 # Wait time to pull log file for new lines added
QUEUE_READ_TIMEOUT = 1 # Wait time when doing blocking read on the in-mem q
PYGTAIL_ACK_WAIT_TIME = 0.05 # TODO: Document this
SCAN_FPATTERNS_INTERVAL = 30 # How often to scan filesystem for files matching fpatterns
HOST = socket.gethostname()
HEARTBEAT_RESTART_INTERVAL = 30 # Wait time if heartbeat sending stops
LOG_STRUCTURE = {
'id': basestring,
'timestamp': basestring,
'file' : basestring,
'host': basestring,
'formatter' : basestring,
'raw' : basestring,
'type' : basestring,
'level' : basestring,
'event' : basestring,
'data' : dict,
'error' : bool,
'error_tb' : basestring,
}
def __init__(self,
fpaths,
heartbeat_interval,
nsq_sender=util.DUMMY,
log=util.DUMMY):
self.fpaths = fpaths
self.nsq_sender = nsq_sender
self.heartbeat_interval = heartbeat_interval
self.log = log
# Log fpath to thread mapping
self.log_reader_threads = {}
# Handle name to formatter fn obj map
self.formatters = {}
self.queue = Queue.Queue(maxsize=self.QUEUE_MAX_SIZE)
def _remove_redundancy(self, log):
"""Removes duplicate data from 'data' inside log dict and brings it
out.
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> log = {'id' : 46846876, 'type' : 'log',
... 'data' : {'a' : 1, 'b' : 2, 'type' : 'metric'}}
>>> lc._remove_redundancy(log)
{'data': {'a': 1, 'b': 2}, 'type': 'metric', 'id': 46846876}
"""
for key in log:
if key in log and key in log['data']:
log[key] = log['data'].pop(key)
return log
def _full_from_frags(self, frags):
full_line = '\n'.join([l for l, _ in frags])
line_info = frags[-1][-1]
return full_line, line_info
def _iter_logs(self, freader, fmtfn):
# FIXME: does not handle partial lines
# at the start of a file properly
frags = []
for line_info in freader:
line = line_info['line'][:-1] # remove new line char at the end
if not fmtfn.ispartial(line) and frags:
yield self._full_from_frags(frags)
frags = []
frags.append((line, line_info))
if frags:
yield self._full_from_frags(frags)
def assign_default_log_values(self, fpath, line, formatter):
'''
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> from pprint import pprint
>>> formatter = 'logagg.formatters.mongodb'
>>> fpath = '/var/log/mongodb/mongodb.log'
>>> line = 'some log line here'
>>> default_log = lc.assign_default_log_values(fpath, line, formatter)
>>> pprint(default_log) #doctest: +ELLIPSIS
{'data': {},
'error': False,
'error_tb': '',
'event': 'event',
'file': '/var/log/mongodb/mongodb.log',
'formatter': 'logagg.formatters.mongodb',
'host': '...',
'id': None,
'level': 'debug',
'raw': 'some log line here',
'timestamp': '...',
'type': 'log'}
'''
return dict(
id=None,
file=fpath,
host=self.HOST,
formatter=formatter,
event='event',
data={},
raw=line,
timestamp=datetime.datetime.utcnow().isoformat(),
type='log',
level='debug',
error= False,
error_tb='',
)
@keeprunning(LOG_FILE_POLL_INTERVAL, on_error=util.log_exception)
def collect_log_lines(self, log_file):
L = log_file
fpath = L['fpath']
fmtfn = L['formatter_fn']
formatter = L['formatter']
freader = Pygtail(fpath)
for line, line_info in self._iter_logs(freader, fmtfn):
log = self.assign_default_log_values(fpath, line, formatter)
try:
_log = fmtfn(line)
if isinstance(_log, RawLog):
formatter, raw_log = _log['formatter'], _log['raw']
log.update(_log)
_log = load_formatter_fn(formatter)(raw_log)
log.update(_log)
except (SystemExit, KeyboardInterrupt) as e: raise
except:
log['error'] = True
log['error_tb'] = traceback.format_exc()
self.log.exception('error_during_handling_log_line', log=log['raw'])
if log['id'] == None:
log['id'] = uuid.uuid1().hex
log = self._remove_redundancy(log)
if self.validate_log_format(log) == 'failed': continue
self.queue.put(dict(log=json.dumps(log),
freader=freader, line_info=line_info))
self.log.debug('tally:put_into_self.queue', size=self.queue.qsize())
while not freader.is_fully_acknowledged():
t = self.PYGTAIL_ACK_WAIT_TIME
self.log.debug('waiting_for_pygtail_to_fully_ack', wait_time=t)
time.sleep(t)
time.sleep(self.LOG_FILE_POLL_INTERVAL)
def _get_msgs_from_queue(self, msgs, timeout):
msgs_pending = []
read_from_q = False
ts = time.time()
msgs_nbytes = sum(len(m['log']) for m in msgs)
while 1:
try:
msg = self.queue.get(block=True, timeout=self.QUEUE_READ_TIMEOUT)
read_from_q = True
self.log.debug("tally:get_from_self.queue")
_msgs_nbytes = msgs_nbytes + len(msg['log'])
_msgs_nbytes += 1 # for newline char
if _msgs_nbytes > self.MAX_NBYTES_TO_SEND:
msgs_pending.append(msg)
self.log.debug('msg_bytes_read_mem_queue_exceeded')
break
msgs.append(msg)
msgs_nbytes = _msgs_nbytes
#FIXME condition never met
if time.time() - ts >= timeout and msgs:
self.log.debug('msg_reading_timeout_from_mem_queue_got_exceeded')
break
# TODO: What if a single log message itself is bigger than max bytes limit?
except Queue.Empty:
self.log.debug('queue_empty')
time.sleep(self.QUEUE_READ_TIMEOUT)
if not msgs:
continue
else:
return msgs_pending, msgs_nbytes, read_from_q
self.log.debug('got_msgs_from_mem_queue')
return msgs_pending, msgs_nbytes, read_from_q
@keeprunning(0, on_error=util.log_exception) # FIXME: what wait time var here?
def send_to_nsq(self, state):
msgs = []
should_push = False
while not should_push:
cur_ts = time.time()
self.log.debug('should_push', should_push=should_push)
time_since_last_push = cur_ts - state.last_push_ts
msgs_pending, msgs_nbytes, read_from_q = self._get_msgs_from_queue(msgs,
self.MAX_SECONDS_TO_PUSH)
have_enough_msgs = msgs_nbytes >= self.MIN_NBYTES_TO_SEND
is_max_time_elapsed = time_since_last_push >= self.MAX_SECONDS_TO_PUSH
should_push = len(msgs) > 0 and (is_max_time_elapsed or have_enough_msgs)
self.log.debug('deciding_to_push', should_push=should_push,
time_since_last_push=time_since_last_push,
msgs_nbytes=msgs_nbytes)
try:
if isinstance(self.nsq_sender, type(util.DUMMY)):
for m in msgs:
self.log.info('final_log_format', log=m['log'])
else:
self.log.debug('trying_to_push_to_nsq', msgs_length=len(msgs))
self.nsq_sender.handle_logs(msgs)
self.log.debug('pushed_to_nsq', msgs_length=len(msgs))
self.confirm_success(msgs)
msgs = msgs_pending
state.last_push_ts = time.time()
except (SystemExit, KeyboardInterrupt): raise
finally:
if read_from_q: self.queue.task_done()
def confirm_success(self, msgs):
ack_fnames = set()
for msg in reversed(msgs):
freader = msg['freader']
fname = freader.filename
if fname in ack_fnames:
continue
ack_fnames.add(fname)
freader.update_offset_file(msg['line_info'])
@keeprunning(SCAN_FPATTERNS_INTERVAL, on_error=util.log_exception)
def _scan_fpatterns(self, state):
'''
For a list of given fpatterns, this starts a thread
collecting log lines from file
>>> os.path.isfile = lambda path: path == '/path/to/log_file.log'
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> print(lc.fpaths)
file=/path/to/log_file.log:formatter=logagg.formatters.basescript
>>> print('formatters loaded:', lc.formatters)
{}
>>> print('log file reader threads started:', lc.log_reader_threads)
{}
>>> state = AttrDict(files_tracked=list())
>>> print('files bieng tracked:', state.files_tracked)
[]
>>> if not state.files_tracked:
>>> lc._scan_fpatterns(state)
>>> print('formatters loaded:', lc.formatters)
>>> print('log file reader threads started:', lc.log_reader_threads)
>>> print('files bieng tracked:', state.files_tracked)
'''
for f in self.fpaths:
fpattern, formatter =(a.split('=')[1] for a in f.split(':', 1))
self.log.debug('scan_fpatterns', fpattern=fpattern, formatter=formatter)
# TODO code for scanning fpatterns for the files not yet present goes here
fpaths = glob.glob(fpattern)
# Load formatter_fn if not in list
fpaths = list(set(fpaths) - set(state.files_tracked))
for fpath in fpaths:
try:
formatter_fn = self.formatters.get(formatter,
load_formatter_fn(formatter))
self.log.info('found_formatter_fn', fn=formatter)
self.formatters[formatter] = formatter_fn
except (SystemExit, KeyboardInterrupt): raise
except (ImportError, AttributeError):
self.log.exception('formatter_fn_not_found', fn=formatter)
sys.exit(-1)
# Start a thread for every file
self.log.info('found_log_file', log_file=fpath)
log_f = dict(fpath=fpath, fpattern=fpattern,
formatter=formatter, formatter_fn=formatter_fn)
log_key = (fpath, fpattern, formatter)
if log_key not in self.log_reader_threads:
self.log.info('starting_collect_log_lines_thread', log_key=log_key)
# There is no existing thread tracking this log file. Start one
log_reader_thread = util.start_daemon_thread(self.collect_log_lines, (log_f,))
self.log_reader_threads[log_key] = log_reader_thread
state.files_tracked.append(fpath)
time.sleep(self.SCAN_FPATTERNS_INTERVAL)
@keeprunning(HEARTBEAT_RESTART_INTERVAL, on_error=util.log_exception)
def send_heartbeat(self, state):
# Sends continuous heartbeats to a seperate topic in nsq
if self.log_reader_threads:
for f in self.log_reader_threads:
files_tracked = self.log_reader_threads.keys()
else:
files_tracked = ''
heartbeat_payload = {'host': self.HOST,
'heartbeat_number': state.heartbeat_number,
'timestamp': time.time(),
'nsq_topic': self.nsq_sender.topic_name,
'files_tracked': files_tracked
}
self.nsq_sender.handle_heartbeat(heartbeat_payload)
state.heartbeat_number += 1
time.sleep(self.heartbeat_interval)
def start(self):
state = AttrDict(files_tracked=list())
util.start_daemon_thread(self._scan_fpatterns, (state,))
state = AttrDict(last_push_ts=time.time())
util.start_daemon_thread(self.send_to_nsq, (state,))
state = AttrDict(heartbeat_number=0)
th_heartbeat = util.start_daemon_thread(self.send_heartbeat, (state,))
while True:
th_heartbeat.join(1)
if not th_heartbeat.isAlive(): break
|
deep-compute/logagg
|
logagg/collector.py
|
LogCollector.assign_default_log_values
|
python
|
def assign_default_log_values(self, fpath, line, formatter):
'''
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> from pprint import pprint
>>> formatter = 'logagg.formatters.mongodb'
>>> fpath = '/var/log/mongodb/mongodb.log'
>>> line = 'some log line here'
>>> default_log = lc.assign_default_log_values(fpath, line, formatter)
>>> pprint(default_log) #doctest: +ELLIPSIS
{'data': {},
'error': False,
'error_tb': '',
'event': 'event',
'file': '/var/log/mongodb/mongodb.log',
'formatter': 'logagg.formatters.mongodb',
'host': '...',
'id': None,
'level': 'debug',
'raw': 'some log line here',
'timestamp': '...',
'type': 'log'}
'''
return dict(
id=None,
file=fpath,
host=self.HOST,
formatter=formatter,
event='event',
data={},
raw=line,
timestamp=datetime.datetime.utcnow().isoformat(),
type='log',
level='debug',
error= False,
error_tb='',
)
|
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> from pprint import pprint
>>> formatter = 'logagg.formatters.mongodb'
>>> fpath = '/var/log/mongodb/mongodb.log'
>>> line = 'some log line here'
>>> default_log = lc.assign_default_log_values(fpath, line, formatter)
>>> pprint(default_log) #doctest: +ELLIPSIS
{'data': {},
'error': False,
'error_tb': '',
'event': 'event',
'file': '/var/log/mongodb/mongodb.log',
'formatter': 'logagg.formatters.mongodb',
'host': '...',
'id': None,
'level': 'debug',
'raw': 'some log line here',
'timestamp': '...',
'type': 'log'}
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/collector.py#L184-L221
| null |
class LogCollector(object):
DESC = 'Collects the log information and sends to NSQTopic'
QUEUE_MAX_SIZE = 2000 # Maximum number of messages in in-mem queue
MAX_NBYTES_TO_SEND = 4.5 * (1024**2) # Number of bytes from in-mem queue minimally required to push
MIN_NBYTES_TO_SEND = 512 * 1024 # Minimum number of bytes to send to nsq in mpub
MAX_SECONDS_TO_PUSH = 1 # Wait till this much time elapses before pushing
LOG_FILE_POLL_INTERVAL = 0.25 # Wait time to pull log file for new lines added
QUEUE_READ_TIMEOUT = 1 # Wait time when doing blocking read on the in-mem q
PYGTAIL_ACK_WAIT_TIME = 0.05 # TODO: Document this
SCAN_FPATTERNS_INTERVAL = 30 # How often to scan filesystem for files matching fpatterns
HOST = socket.gethostname()
HEARTBEAT_RESTART_INTERVAL = 30 # Wait time if heartbeat sending stops
LOG_STRUCTURE = {
'id': basestring,
'timestamp': basestring,
'file' : basestring,
'host': basestring,
'formatter' : basestring,
'raw' : basestring,
'type' : basestring,
'level' : basestring,
'event' : basestring,
'data' : dict,
'error' : bool,
'error_tb' : basestring,
}
def __init__(self,
fpaths,
heartbeat_interval,
nsq_sender=util.DUMMY,
log=util.DUMMY):
self.fpaths = fpaths
self.nsq_sender = nsq_sender
self.heartbeat_interval = heartbeat_interval
self.log = log
# Log fpath to thread mapping
self.log_reader_threads = {}
# Handle name to formatter fn obj map
self.formatters = {}
self.queue = Queue.Queue(maxsize=self.QUEUE_MAX_SIZE)
def _remove_redundancy(self, log):
"""Removes duplicate data from 'data' inside log dict and brings it
out.
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> log = {'id' : 46846876, 'type' : 'log',
... 'data' : {'a' : 1, 'b' : 2, 'type' : 'metric'}}
>>> lc._remove_redundancy(log)
{'data': {'a': 1, 'b': 2}, 'type': 'metric', 'id': 46846876}
"""
for key in log:
if key in log and key in log['data']:
log[key] = log['data'].pop(key)
return log
def validate_log_format(self, log):
'''
>>> lc = LogCollector('file=/path/to/file.log:formatter=logagg.formatters.basescript', 30)
>>> incomplete_log = {'data' : {'x' : 1, 'y' : 2},
... 'raw' : 'Not all keys present'}
>>> lc.validate_log_format(incomplete_log)
'failed'
>>> redundant_log = {'one_invalid_key' : 'Extra information',
... 'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(redundant_log)
'failed'
>>> correct_log = {'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(correct_log)
'passed'
'''
keys_in_log = set(log)
keys_in_log_structure = set(self.LOG_STRUCTURE)
try:
assert (keys_in_log == keys_in_log_structure)
except AssertionError as e:
self.log.warning('formatted_log_structure_rejected' ,
key_not_found = list(keys_in_log_structure-keys_in_log),
extra_keys_found = list(keys_in_log-keys_in_log_structure),
num_logs=1,
type='metric')
return 'failed'
for key in log:
try:
assert isinstance(log[key], self.LOG_STRUCTURE[key])
except AssertionError as e:
self.log.warning('formatted_log_structure_rejected' ,
key_datatype_not_matched = key,
datatype_expected = type(self.LOG_STRUCTURE[key]),
datatype_got = type(log[key]),
num_logs=1,
type='metric')
return 'failed'
return 'passed'
def _full_from_frags(self, frags):
full_line = '\n'.join([l for l, _ in frags])
line_info = frags[-1][-1]
return full_line, line_info
def _iter_logs(self, freader, fmtfn):
# FIXME: does not handle partial lines
# at the start of a file properly
frags = []
for line_info in freader:
line = line_info['line'][:-1] # remove new line char at the end
if not fmtfn.ispartial(line) and frags:
yield self._full_from_frags(frags)
frags = []
frags.append((line, line_info))
if frags:
yield self._full_from_frags(frags)
@keeprunning(LOG_FILE_POLL_INTERVAL, on_error=util.log_exception)
def collect_log_lines(self, log_file):
L = log_file
fpath = L['fpath']
fmtfn = L['formatter_fn']
formatter = L['formatter']
freader = Pygtail(fpath)
for line, line_info in self._iter_logs(freader, fmtfn):
log = self.assign_default_log_values(fpath, line, formatter)
try:
_log = fmtfn(line)
if isinstance(_log, RawLog):
formatter, raw_log = _log['formatter'], _log['raw']
log.update(_log)
_log = load_formatter_fn(formatter)(raw_log)
log.update(_log)
except (SystemExit, KeyboardInterrupt) as e: raise
except:
log['error'] = True
log['error_tb'] = traceback.format_exc()
self.log.exception('error_during_handling_log_line', log=log['raw'])
if log['id'] == None:
log['id'] = uuid.uuid1().hex
log = self._remove_redundancy(log)
if self.validate_log_format(log) == 'failed': continue
self.queue.put(dict(log=json.dumps(log),
freader=freader, line_info=line_info))
self.log.debug('tally:put_into_self.queue', size=self.queue.qsize())
while not freader.is_fully_acknowledged():
t = self.PYGTAIL_ACK_WAIT_TIME
self.log.debug('waiting_for_pygtail_to_fully_ack', wait_time=t)
time.sleep(t)
time.sleep(self.LOG_FILE_POLL_INTERVAL)
def _get_msgs_from_queue(self, msgs, timeout):
msgs_pending = []
read_from_q = False
ts = time.time()
msgs_nbytes = sum(len(m['log']) for m in msgs)
while 1:
try:
msg = self.queue.get(block=True, timeout=self.QUEUE_READ_TIMEOUT)
read_from_q = True
self.log.debug("tally:get_from_self.queue")
_msgs_nbytes = msgs_nbytes + len(msg['log'])
_msgs_nbytes += 1 # for newline char
if _msgs_nbytes > self.MAX_NBYTES_TO_SEND:
msgs_pending.append(msg)
self.log.debug('msg_bytes_read_mem_queue_exceeded')
break
msgs.append(msg)
msgs_nbytes = _msgs_nbytes
#FIXME condition never met
if time.time() - ts >= timeout and msgs:
self.log.debug('msg_reading_timeout_from_mem_queue_got_exceeded')
break
# TODO: What if a single log message itself is bigger than max bytes limit?
except Queue.Empty:
self.log.debug('queue_empty')
time.sleep(self.QUEUE_READ_TIMEOUT)
if not msgs:
continue
else:
return msgs_pending, msgs_nbytes, read_from_q
self.log.debug('got_msgs_from_mem_queue')
return msgs_pending, msgs_nbytes, read_from_q
@keeprunning(0, on_error=util.log_exception) # FIXME: what wait time var here?
def send_to_nsq(self, state):
msgs = []
should_push = False
while not should_push:
cur_ts = time.time()
self.log.debug('should_push', should_push=should_push)
time_since_last_push = cur_ts - state.last_push_ts
msgs_pending, msgs_nbytes, read_from_q = self._get_msgs_from_queue(msgs,
self.MAX_SECONDS_TO_PUSH)
have_enough_msgs = msgs_nbytes >= self.MIN_NBYTES_TO_SEND
is_max_time_elapsed = time_since_last_push >= self.MAX_SECONDS_TO_PUSH
should_push = len(msgs) > 0 and (is_max_time_elapsed or have_enough_msgs)
self.log.debug('deciding_to_push', should_push=should_push,
time_since_last_push=time_since_last_push,
msgs_nbytes=msgs_nbytes)
try:
if isinstance(self.nsq_sender, type(util.DUMMY)):
for m in msgs:
self.log.info('final_log_format', log=m['log'])
else:
self.log.debug('trying_to_push_to_nsq', msgs_length=len(msgs))
self.nsq_sender.handle_logs(msgs)
self.log.debug('pushed_to_nsq', msgs_length=len(msgs))
self.confirm_success(msgs)
msgs = msgs_pending
state.last_push_ts = time.time()
except (SystemExit, KeyboardInterrupt): raise
finally:
if read_from_q: self.queue.task_done()
def confirm_success(self, msgs):
ack_fnames = set()
for msg in reversed(msgs):
freader = msg['freader']
fname = freader.filename
if fname in ack_fnames:
continue
ack_fnames.add(fname)
freader.update_offset_file(msg['line_info'])
@keeprunning(SCAN_FPATTERNS_INTERVAL, on_error=util.log_exception)
def _scan_fpatterns(self, state):
'''
For a list of given fpatterns, this starts a thread
collecting log lines from file
>>> os.path.isfile = lambda path: path == '/path/to/log_file.log'
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> print(lc.fpaths)
file=/path/to/log_file.log:formatter=logagg.formatters.basescript
>>> print('formatters loaded:', lc.formatters)
{}
>>> print('log file reader threads started:', lc.log_reader_threads)
{}
>>> state = AttrDict(files_tracked=list())
>>> print('files bieng tracked:', state.files_tracked)
[]
>>> if not state.files_tracked:
>>> lc._scan_fpatterns(state)
>>> print('formatters loaded:', lc.formatters)
>>> print('log file reader threads started:', lc.log_reader_threads)
>>> print('files bieng tracked:', state.files_tracked)
'''
for f in self.fpaths:
fpattern, formatter =(a.split('=')[1] for a in f.split(':', 1))
self.log.debug('scan_fpatterns', fpattern=fpattern, formatter=formatter)
# TODO code for scanning fpatterns for the files not yet present goes here
fpaths = glob.glob(fpattern)
# Load formatter_fn if not in list
fpaths = list(set(fpaths) - set(state.files_tracked))
for fpath in fpaths:
try:
formatter_fn = self.formatters.get(formatter,
load_formatter_fn(formatter))
self.log.info('found_formatter_fn', fn=formatter)
self.formatters[formatter] = formatter_fn
except (SystemExit, KeyboardInterrupt): raise
except (ImportError, AttributeError):
self.log.exception('formatter_fn_not_found', fn=formatter)
sys.exit(-1)
# Start a thread for every file
self.log.info('found_log_file', log_file=fpath)
log_f = dict(fpath=fpath, fpattern=fpattern,
formatter=formatter, formatter_fn=formatter_fn)
log_key = (fpath, fpattern, formatter)
if log_key not in self.log_reader_threads:
self.log.info('starting_collect_log_lines_thread', log_key=log_key)
# There is no existing thread tracking this log file. Start one
log_reader_thread = util.start_daemon_thread(self.collect_log_lines, (log_f,))
self.log_reader_threads[log_key] = log_reader_thread
state.files_tracked.append(fpath)
time.sleep(self.SCAN_FPATTERNS_INTERVAL)
@keeprunning(HEARTBEAT_RESTART_INTERVAL, on_error=util.log_exception)
def send_heartbeat(self, state):
# Sends continuous heartbeats to a seperate topic in nsq
if self.log_reader_threads:
for f in self.log_reader_threads:
files_tracked = self.log_reader_threads.keys()
else:
files_tracked = ''
heartbeat_payload = {'host': self.HOST,
'heartbeat_number': state.heartbeat_number,
'timestamp': time.time(),
'nsq_topic': self.nsq_sender.topic_name,
'files_tracked': files_tracked
}
self.nsq_sender.handle_heartbeat(heartbeat_payload)
state.heartbeat_number += 1
time.sleep(self.heartbeat_interval)
def start(self):
state = AttrDict(files_tracked=list())
util.start_daemon_thread(self._scan_fpatterns, (state,))
state = AttrDict(last_push_ts=time.time())
util.start_daemon_thread(self.send_to_nsq, (state,))
state = AttrDict(heartbeat_number=0)
th_heartbeat = util.start_daemon_thread(self.send_heartbeat, (state,))
while True:
th_heartbeat.join(1)
if not th_heartbeat.isAlive(): break
|
deep-compute/logagg
|
logagg/collector.py
|
LogCollector._scan_fpatterns
|
python
|
def _scan_fpatterns(self, state):
'''
For a list of given fpatterns, this starts a thread
collecting log lines from file
>>> os.path.isfile = lambda path: path == '/path/to/log_file.log'
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> print(lc.fpaths)
file=/path/to/log_file.log:formatter=logagg.formatters.basescript
>>> print('formatters loaded:', lc.formatters)
{}
>>> print('log file reader threads started:', lc.log_reader_threads)
{}
>>> state = AttrDict(files_tracked=list())
>>> print('files bieng tracked:', state.files_tracked)
[]
>>> if not state.files_tracked:
>>> lc._scan_fpatterns(state)
>>> print('formatters loaded:', lc.formatters)
>>> print('log file reader threads started:', lc.log_reader_threads)
>>> print('files bieng tracked:', state.files_tracked)
'''
for f in self.fpaths:
fpattern, formatter =(a.split('=')[1] for a in f.split(':', 1))
self.log.debug('scan_fpatterns', fpattern=fpattern, formatter=formatter)
# TODO code for scanning fpatterns for the files not yet present goes here
fpaths = glob.glob(fpattern)
# Load formatter_fn if not in list
fpaths = list(set(fpaths) - set(state.files_tracked))
for fpath in fpaths:
try:
formatter_fn = self.formatters.get(formatter,
load_formatter_fn(formatter))
self.log.info('found_formatter_fn', fn=formatter)
self.formatters[formatter] = formatter_fn
except (SystemExit, KeyboardInterrupt): raise
except (ImportError, AttributeError):
self.log.exception('formatter_fn_not_found', fn=formatter)
sys.exit(-1)
# Start a thread for every file
self.log.info('found_log_file', log_file=fpath)
log_f = dict(fpath=fpath, fpattern=fpattern,
formatter=formatter, formatter_fn=formatter_fn)
log_key = (fpath, fpattern, formatter)
if log_key not in self.log_reader_threads:
self.log.info('starting_collect_log_lines_thread', log_key=log_key)
# There is no existing thread tracking this log file. Start one
log_reader_thread = util.start_daemon_thread(self.collect_log_lines, (log_f,))
self.log_reader_threads[log_key] = log_reader_thread
state.files_tracked.append(fpath)
time.sleep(self.SCAN_FPATTERNS_INTERVAL)
|
For a list of given fpatterns, this starts a thread
collecting log lines from file
>>> os.path.isfile = lambda path: path == '/path/to/log_file.log'
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> print(lc.fpaths)
file=/path/to/log_file.log:formatter=logagg.formatters.basescript
>>> print('formatters loaded:', lc.formatters)
{}
>>> print('log file reader threads started:', lc.log_reader_threads)
{}
>>> state = AttrDict(files_tracked=list())
>>> print('files bieng tracked:', state.files_tracked)
[]
>>> if not state.files_tracked:
>>> lc._scan_fpatterns(state)
>>> print('formatters loaded:', lc.formatters)
>>> print('log file reader threads started:', lc.log_reader_threads)
>>> print('files bieng tracked:', state.files_tracked)
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/collector.py#L356-L412
| null |
class LogCollector(object):
DESC = 'Collects the log information and sends to NSQTopic'
QUEUE_MAX_SIZE = 2000 # Maximum number of messages in in-mem queue
MAX_NBYTES_TO_SEND = 4.5 * (1024**2) # Number of bytes from in-mem queue minimally required to push
MIN_NBYTES_TO_SEND = 512 * 1024 # Minimum number of bytes to send to nsq in mpub
MAX_SECONDS_TO_PUSH = 1 # Wait till this much time elapses before pushing
LOG_FILE_POLL_INTERVAL = 0.25 # Wait time to pull log file for new lines added
QUEUE_READ_TIMEOUT = 1 # Wait time when doing blocking read on the in-mem q
PYGTAIL_ACK_WAIT_TIME = 0.05 # TODO: Document this
SCAN_FPATTERNS_INTERVAL = 30 # How often to scan filesystem for files matching fpatterns
HOST = socket.gethostname()
HEARTBEAT_RESTART_INTERVAL = 30 # Wait time if heartbeat sending stops
LOG_STRUCTURE = {
'id': basestring,
'timestamp': basestring,
'file' : basestring,
'host': basestring,
'formatter' : basestring,
'raw' : basestring,
'type' : basestring,
'level' : basestring,
'event' : basestring,
'data' : dict,
'error' : bool,
'error_tb' : basestring,
}
def __init__(self,
fpaths,
heartbeat_interval,
nsq_sender=util.DUMMY,
log=util.DUMMY):
self.fpaths = fpaths
self.nsq_sender = nsq_sender
self.heartbeat_interval = heartbeat_interval
self.log = log
# Log fpath to thread mapping
self.log_reader_threads = {}
# Handle name to formatter fn obj map
self.formatters = {}
self.queue = Queue.Queue(maxsize=self.QUEUE_MAX_SIZE)
def _remove_redundancy(self, log):
"""Removes duplicate data from 'data' inside log dict and brings it
out.
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> log = {'id' : 46846876, 'type' : 'log',
... 'data' : {'a' : 1, 'b' : 2, 'type' : 'metric'}}
>>> lc._remove_redundancy(log)
{'data': {'a': 1, 'b': 2}, 'type': 'metric', 'id': 46846876}
"""
for key in log:
if key in log and key in log['data']:
log[key] = log['data'].pop(key)
return log
def validate_log_format(self, log):
'''
>>> lc = LogCollector('file=/path/to/file.log:formatter=logagg.formatters.basescript', 30)
>>> incomplete_log = {'data' : {'x' : 1, 'y' : 2},
... 'raw' : 'Not all keys present'}
>>> lc.validate_log_format(incomplete_log)
'failed'
>>> redundant_log = {'one_invalid_key' : 'Extra information',
... 'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(redundant_log)
'failed'
>>> correct_log = {'data': {'x' : 1, 'y' : 2},
... 'error': False,
... 'error_tb': '',
... 'event': 'event',
... 'file': '/path/to/file.log',
... 'formatter': 'logagg.formatters.mongodb',
... 'host': 'deepcompute-ThinkPad-E470',
... 'id': '0112358',
... 'level': 'debug',
... 'raw': 'some log line here',
... 'timestamp': '2018-04-07T14:06:17.404818',
... 'type': 'log'}
>>> lc.validate_log_format(correct_log)
'passed'
'''
keys_in_log = set(log)
keys_in_log_structure = set(self.LOG_STRUCTURE)
try:
assert (keys_in_log == keys_in_log_structure)
except AssertionError as e:
self.log.warning('formatted_log_structure_rejected' ,
key_not_found = list(keys_in_log_structure-keys_in_log),
extra_keys_found = list(keys_in_log-keys_in_log_structure),
num_logs=1,
type='metric')
return 'failed'
for key in log:
try:
assert isinstance(log[key], self.LOG_STRUCTURE[key])
except AssertionError as e:
self.log.warning('formatted_log_structure_rejected' ,
key_datatype_not_matched = key,
datatype_expected = type(self.LOG_STRUCTURE[key]),
datatype_got = type(log[key]),
num_logs=1,
type='metric')
return 'failed'
return 'passed'
def _full_from_frags(self, frags):
full_line = '\n'.join([l for l, _ in frags])
line_info = frags[-1][-1]
return full_line, line_info
def _iter_logs(self, freader, fmtfn):
# FIXME: does not handle partial lines
# at the start of a file properly
frags = []
for line_info in freader:
line = line_info['line'][:-1] # remove new line char at the end
if not fmtfn.ispartial(line) and frags:
yield self._full_from_frags(frags)
frags = []
frags.append((line, line_info))
if frags:
yield self._full_from_frags(frags)
def assign_default_log_values(self, fpath, line, formatter):
'''
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> from pprint import pprint
>>> formatter = 'logagg.formatters.mongodb'
>>> fpath = '/var/log/mongodb/mongodb.log'
>>> line = 'some log line here'
>>> default_log = lc.assign_default_log_values(fpath, line, formatter)
>>> pprint(default_log) #doctest: +ELLIPSIS
{'data': {},
'error': False,
'error_tb': '',
'event': 'event',
'file': '/var/log/mongodb/mongodb.log',
'formatter': 'logagg.formatters.mongodb',
'host': '...',
'id': None,
'level': 'debug',
'raw': 'some log line here',
'timestamp': '...',
'type': 'log'}
'''
return dict(
id=None,
file=fpath,
host=self.HOST,
formatter=formatter,
event='event',
data={},
raw=line,
timestamp=datetime.datetime.utcnow().isoformat(),
type='log',
level='debug',
error= False,
error_tb='',
)
@keeprunning(LOG_FILE_POLL_INTERVAL, on_error=util.log_exception)
def collect_log_lines(self, log_file):
L = log_file
fpath = L['fpath']
fmtfn = L['formatter_fn']
formatter = L['formatter']
freader = Pygtail(fpath)
for line, line_info in self._iter_logs(freader, fmtfn):
log = self.assign_default_log_values(fpath, line, formatter)
try:
_log = fmtfn(line)
if isinstance(_log, RawLog):
formatter, raw_log = _log['formatter'], _log['raw']
log.update(_log)
_log = load_formatter_fn(formatter)(raw_log)
log.update(_log)
except (SystemExit, KeyboardInterrupt) as e: raise
except:
log['error'] = True
log['error_tb'] = traceback.format_exc()
self.log.exception('error_during_handling_log_line', log=log['raw'])
if log['id'] == None:
log['id'] = uuid.uuid1().hex
log = self._remove_redundancy(log)
if self.validate_log_format(log) == 'failed': continue
self.queue.put(dict(log=json.dumps(log),
freader=freader, line_info=line_info))
self.log.debug('tally:put_into_self.queue', size=self.queue.qsize())
while not freader.is_fully_acknowledged():
t = self.PYGTAIL_ACK_WAIT_TIME
self.log.debug('waiting_for_pygtail_to_fully_ack', wait_time=t)
time.sleep(t)
time.sleep(self.LOG_FILE_POLL_INTERVAL)
def _get_msgs_from_queue(self, msgs, timeout):
msgs_pending = []
read_from_q = False
ts = time.time()
msgs_nbytes = sum(len(m['log']) for m in msgs)
while 1:
try:
msg = self.queue.get(block=True, timeout=self.QUEUE_READ_TIMEOUT)
read_from_q = True
self.log.debug("tally:get_from_self.queue")
_msgs_nbytes = msgs_nbytes + len(msg['log'])
_msgs_nbytes += 1 # for newline char
if _msgs_nbytes > self.MAX_NBYTES_TO_SEND:
msgs_pending.append(msg)
self.log.debug('msg_bytes_read_mem_queue_exceeded')
break
msgs.append(msg)
msgs_nbytes = _msgs_nbytes
#FIXME condition never met
if time.time() - ts >= timeout and msgs:
self.log.debug('msg_reading_timeout_from_mem_queue_got_exceeded')
break
# TODO: What if a single log message itself is bigger than max bytes limit?
except Queue.Empty:
self.log.debug('queue_empty')
time.sleep(self.QUEUE_READ_TIMEOUT)
if not msgs:
continue
else:
return msgs_pending, msgs_nbytes, read_from_q
self.log.debug('got_msgs_from_mem_queue')
return msgs_pending, msgs_nbytes, read_from_q
@keeprunning(0, on_error=util.log_exception) # FIXME: what wait time var here?
def send_to_nsq(self, state):
msgs = []
should_push = False
while not should_push:
cur_ts = time.time()
self.log.debug('should_push', should_push=should_push)
time_since_last_push = cur_ts - state.last_push_ts
msgs_pending, msgs_nbytes, read_from_q = self._get_msgs_from_queue(msgs,
self.MAX_SECONDS_TO_PUSH)
have_enough_msgs = msgs_nbytes >= self.MIN_NBYTES_TO_SEND
is_max_time_elapsed = time_since_last_push >= self.MAX_SECONDS_TO_PUSH
should_push = len(msgs) > 0 and (is_max_time_elapsed or have_enough_msgs)
self.log.debug('deciding_to_push', should_push=should_push,
time_since_last_push=time_since_last_push,
msgs_nbytes=msgs_nbytes)
try:
if isinstance(self.nsq_sender, type(util.DUMMY)):
for m in msgs:
self.log.info('final_log_format', log=m['log'])
else:
self.log.debug('trying_to_push_to_nsq', msgs_length=len(msgs))
self.nsq_sender.handle_logs(msgs)
self.log.debug('pushed_to_nsq', msgs_length=len(msgs))
self.confirm_success(msgs)
msgs = msgs_pending
state.last_push_ts = time.time()
except (SystemExit, KeyboardInterrupt): raise
finally:
if read_from_q: self.queue.task_done()
def confirm_success(self, msgs):
ack_fnames = set()
for msg in reversed(msgs):
freader = msg['freader']
fname = freader.filename
if fname in ack_fnames:
continue
ack_fnames.add(fname)
freader.update_offset_file(msg['line_info'])
@keeprunning(SCAN_FPATTERNS_INTERVAL, on_error=util.log_exception)
@keeprunning(HEARTBEAT_RESTART_INTERVAL, on_error=util.log_exception)
def send_heartbeat(self, state):
# Sends continuous heartbeats to a seperate topic in nsq
if self.log_reader_threads:
for f in self.log_reader_threads:
files_tracked = self.log_reader_threads.keys()
else:
files_tracked = ''
heartbeat_payload = {'host': self.HOST,
'heartbeat_number': state.heartbeat_number,
'timestamp': time.time(),
'nsq_topic': self.nsq_sender.topic_name,
'files_tracked': files_tracked
}
self.nsq_sender.handle_heartbeat(heartbeat_payload)
state.heartbeat_number += 1
time.sleep(self.heartbeat_interval)
def start(self):
state = AttrDict(files_tracked=list())
util.start_daemon_thread(self._scan_fpatterns, (state,))
state = AttrDict(last_push_ts=time.time())
util.start_daemon_thread(self.send_to_nsq, (state,))
state = AttrDict(heartbeat_number=0)
th_heartbeat = util.start_daemon_thread(self.send_heartbeat, (state,))
while True:
th_heartbeat.join(1)
if not th_heartbeat.isAlive(): break
|
deep-compute/logagg
|
logagg/forwarders.py
|
MongoDBForwarder._parse_msg_for_mongodb
|
python
|
def _parse_msg_for_mongodb(self, msgs):
'''
>>> mdbf = MongoDBForwarder('no_host', '27017', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> log = [{u'data': {u'_': {u'file': u'log.py',
... u'fn': u'start',
... u'ln': 8,
... u'name': u'__main__'},
... u'a': 1,
... u'b': 2,
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}]
>>> records = mdbf._parse_msg_for_mongodb(log)
>>> from pprint import pprint
>>> pprint(records)
[{'_id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
u'data': {u'_': {u'file': u'log.py',
u'fn': u'start',
u'ln': 8,
u'name': u'__main__'},
u'a': 1,
u'b': 2,
u'msg': u'this is a dummy log'},
u'error': False,
u'error_tb': u'',
u'event': u'some_log',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info',
u'raw': u'{...}',
u'timestamp': u'2018-04-09T09:59:24.733945Z',
u'type': u'metric'}]
'''
msgs_list = []
for msg in msgs:
try:
msg['_id'] = msg.pop('id')
except KeyError:
self.log.exception('collector_failure_id_not_found', log=msg)
msgs_list.append(msg)
return msgs_list
|
>>> mdbf = MongoDBForwarder('no_host', '27017', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> log = [{u'data': {u'_': {u'file': u'log.py',
... u'fn': u'start',
... u'ln': 8,
... u'name': u'__main__'},
... u'a': 1,
... u'b': 2,
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}]
>>> records = mdbf._parse_msg_for_mongodb(log)
>>> from pprint import pprint
>>> pprint(records)
[{'_id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
u'data': {u'_': {u'file': u'log.py',
u'fn': u'start',
u'ln': 8,
u'name': u'__main__'},
u'a': 1,
u'b': 2,
u'msg': u'this is a dummy log'},
u'error': False,
u'error_tb': u'',
u'event': u'some_log',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info',
u'raw': u'{...}',
u'timestamp': u'2018-04-09T09:59:24.733945Z',
u'type': u'metric'}]
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/forwarders.py#L64-L116
| null |
class MongoDBForwarder(BaseForwarder):
SERVER_SELECTION_TIMEOUT = 500 # MongoDB server selection timeout
# FIXME: normalize all var names
def __init__(self,
host, port,
user, password,
db, collection, log=DUMMY):
self.host = host
self.port = port
self.user = user
self.passwd = password
self.db_name = db
self.coll = collection
self.log = log
if host != 'no_host':
self._ensure_connection()
# FIXME: clean up the logs
@keeprunning(wait_secs=SERVER_SELECTION_TIMEOUT, exit_on_success=True)
def _ensure_connection(self):
# Establish connection to MongoDB to store the nsq messages
url = 'mongodb://%s:%s@%s:%s' % (self.user,
self.passwd,
self.host,
self.port)
client = MongoClient(
url, serverSelectionTimeoutMS=self.SERVER_SELECTION_TIMEOUT)
self.log.info('mongodb_server_connection_established', host=self.host)
self.database = client[self.db_name]
self.log.info('mongodb_database_created', db=self.db_name)
self.collection = self.database[self.coll]
self.log.info('mongodb_collection_created',
collection=self.collection, db=self.db_name)
def handle_logs(self, msgs):
msgs_list = self._parse_msg_for_mongodb(msgs)
try:
self.log.debug('inserting_msgs_mongodb')
self.collection.insert_many(msgs_list, ordered=False)
self.log.info('logs_inserted_into_mongodb',
num_records=len(msgs), type='metric')
except pymongo.errors.AutoReconnect(message='connection_to_mongodb_failed'):
self._ensure_connection()
except pymongo.errors.BulkWriteError as bwe:
self.log.info('logs_inserted_into_mongodb',
num_records=bwe.details['nInserted'], type='metric',
records_not_inserted=bwe.details['writeErrors'],
num_records_missed=len(bwe.details['writeErrors']))
|
deep-compute/logagg
|
logagg/forwarders.py
|
InfluxDBForwarder._tag_and_field_maker
|
python
|
def _tag_and_field_maker(self, event):
'''
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> log = {u'data': {u'_': {u'file': u'log.py',
... u'fn': u'start',
... u'ln': 8,
... u'name': u'__main__'},
... u'a': 1,
... u'b': 2,
... u'__ignore_this': 'some_string',
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}
>>> tags, fields = idbf._tag_and_field_maker(log)
>>> from pprint import pprint
>>> pprint(tags)
{u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'}
>>> pprint(fields)
{u'data._': "{u'ln': 8, u'fn': u'start', u'file': u'log.py', u'name': u'__main__'}",
u'data.a': 1,
u'data.b': 2}
'''
data = event.pop('data')
data = flatten_dict({'data': data})
t = dict((k, event[k]) for k in event if k not in self.EXCLUDE_TAGS)
f = dict()
for k in data:
v = data[k]
if is_number(v) or isinstance(v, MarkValue):
f[k] = v
else:
#if v.startswith('_'): f[k] = eval(v.split('_', 1)[1])
t[k] = v
return t, f
|
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> log = {u'data': {u'_': {u'file': u'log.py',
... u'fn': u'start',
... u'ln': 8,
... u'name': u'__main__'},
... u'a': 1,
... u'b': 2,
... u'__ignore_this': 'some_string',
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}
>>> tags, fields = idbf._tag_and_field_maker(log)
>>> from pprint import pprint
>>> pprint(tags)
{u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'}
>>> pprint(fields)
{u'data._': "{u'ln': 8, u'fn': u'start', u'file': u'log.py', u'name': u'__main__'}",
u'data.a': 1,
u'data.b': 2}
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/forwarders.py#L167-L221
| null |
class InfluxDBForwarder(BaseForwarder):
EXCLUDE_TAGS = set(["id", "raw", "timestamp", "type", "event", "error"])
def __init__(self,
host, port,
user, password,
db, collection, log=DUMMY):
self.host = host
self.port = port
self.user = user
self.passwd = password
self.db_name = db
self.log = log
if host != 'no_host':
self._ensure_connection()
def _ensure_connection(self):
# Establish connection to influxDB to store metrics
self.influxdb_client = InfluxDBClient(self.host, self.port, self.user,
self.passwd, self.db_name)
self.log.info('influxdb_server_connection_established', host=self.host)
self.influxdb_database = self.influxdb_client.create_database(
self.db_name)
self.log.info('influxdb_database_created', dbname=self.db_name)
def _parse_msg_for_influxdb(self, msgs):
'''
>>> from logagg.forwarders import InfluxDBForwarder
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> valid_log = [{u'data': {u'_force_this_as_field': 'CXNS CNS nbkbsd',
... u'a': 1,
... u'b': 2,
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}]
>>> pointvalues = idbf._parse_msg_for_influxdb(valid_log)
>>> from pprint import pprint
>>> pprint(pointvalues)
[{'fields': {u'data._force_this_as_field': "'CXNS CNS nbkbsd'",
u'data.a': 1,
u'data.b': 2},
'measurement': u'some_log',
'tags': {u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'},
'time': u'2018-04-09T09:59:24.733945Z'}]
>>> invalid_log = valid_log
>>> invalid_log[0]['error'] = True
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
>>> invalid_log = valid_log
>>> invalid_log[0]['type'] = 'log'
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
'''
series = []
for msg in msgs:
if msg.get('error'):
continue
if msg.get('type').lower() == 'metric':
time = msg.get('timestamp')
measurement = msg.get('event')
tags, fields = self._tag_and_field_maker(msg)
pointvalues = {
"time": time,
"measurement": measurement,
"fields": fields,
"tags": tags}
series.append(pointvalues)
return series
def handle_logs(self, msgs):
self.log.debug('parsing_of_metrics_started')
records = self._parse_msg_for_influxdb(msgs)
self.log.debug('parsing_of_metrics_completed')
try:
self.log.debug('inserting_the_metrics_into_influxdb')
self.influxdb_client.write_points(records)
self.log.info('metrics_inserted_into_influxdb',
num_records=len(records),
type='metric')
except (InfluxDBClientError, InfluxDBServerError) as e:
self.log.exception('failed_to_insert metric',
record=records,
num_records=len(records),
type='metric')
|
deep-compute/logagg
|
logagg/forwarders.py
|
InfluxDBForwarder._parse_msg_for_influxdb
|
python
|
def _parse_msg_for_influxdb(self, msgs):
'''
>>> from logagg.forwarders import InfluxDBForwarder
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> valid_log = [{u'data': {u'_force_this_as_field': 'CXNS CNS nbkbsd',
... u'a': 1,
... u'b': 2,
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}]
>>> pointvalues = idbf._parse_msg_for_influxdb(valid_log)
>>> from pprint import pprint
>>> pprint(pointvalues)
[{'fields': {u'data._force_this_as_field': "'CXNS CNS nbkbsd'",
u'data.a': 1,
u'data.b': 2},
'measurement': u'some_log',
'tags': {u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'},
'time': u'2018-04-09T09:59:24.733945Z'}]
>>> invalid_log = valid_log
>>> invalid_log[0]['error'] = True
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
>>> invalid_log = valid_log
>>> invalid_log[0]['type'] = 'log'
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
'''
series = []
for msg in msgs:
if msg.get('error'):
continue
if msg.get('type').lower() == 'metric':
time = msg.get('timestamp')
measurement = msg.get('event')
tags, fields = self._tag_and_field_maker(msg)
pointvalues = {
"time": time,
"measurement": measurement,
"fields": fields,
"tags": tags}
series.append(pointvalues)
return series
|
>>> from logagg.forwarders import InfluxDBForwarder
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> valid_log = [{u'data': {u'_force_this_as_field': 'CXNS CNS nbkbsd',
... u'a': 1,
... u'b': 2,
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}]
>>> pointvalues = idbf._parse_msg_for_influxdb(valid_log)
>>> from pprint import pprint
>>> pprint(pointvalues)
[{'fields': {u'data._force_this_as_field': "'CXNS CNS nbkbsd'",
u'data.a': 1,
u'data.b': 2},
'measurement': u'some_log',
'tags': {u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'},
'time': u'2018-04-09T09:59:24.733945Z'}]
>>> invalid_log = valid_log
>>> invalid_log[0]['error'] = True
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
>>> invalid_log = valid_log
>>> invalid_log[0]['type'] = 'log'
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/forwarders.py#L223-L290
| null |
class InfluxDBForwarder(BaseForwarder):
EXCLUDE_TAGS = set(["id", "raw", "timestamp", "type", "event", "error"])
def __init__(self,
host, port,
user, password,
db, collection, log=DUMMY):
self.host = host
self.port = port
self.user = user
self.passwd = password
self.db_name = db
self.log = log
if host != 'no_host':
self._ensure_connection()
def _ensure_connection(self):
# Establish connection to influxDB to store metrics
self.influxdb_client = InfluxDBClient(self.host, self.port, self.user,
self.passwd, self.db_name)
self.log.info('influxdb_server_connection_established', host=self.host)
self.influxdb_database = self.influxdb_client.create_database(
self.db_name)
self.log.info('influxdb_database_created', dbname=self.db_name)
def _tag_and_field_maker(self, event):
'''
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> log = {u'data': {u'_': {u'file': u'log.py',
... u'fn': u'start',
... u'ln': 8,
... u'name': u'__main__'},
... u'a': 1,
... u'b': 2,
... u'__ignore_this': 'some_string',
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}
>>> tags, fields = idbf._tag_and_field_maker(log)
>>> from pprint import pprint
>>> pprint(tags)
{u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'}
>>> pprint(fields)
{u'data._': "{u'ln': 8, u'fn': u'start', u'file': u'log.py', u'name': u'__main__'}",
u'data.a': 1,
u'data.b': 2}
'''
data = event.pop('data')
data = flatten_dict({'data': data})
t = dict((k, event[k]) for k in event if k not in self.EXCLUDE_TAGS)
f = dict()
for k in data:
v = data[k]
if is_number(v) or isinstance(v, MarkValue):
f[k] = v
else:
#if v.startswith('_'): f[k] = eval(v.split('_', 1)[1])
t[k] = v
return t, f
def handle_logs(self, msgs):
self.log.debug('parsing_of_metrics_started')
records = self._parse_msg_for_influxdb(msgs)
self.log.debug('parsing_of_metrics_completed')
try:
self.log.debug('inserting_the_metrics_into_influxdb')
self.influxdb_client.write_points(records)
self.log.info('metrics_inserted_into_influxdb',
num_records=len(records),
type='metric')
except (InfluxDBClientError, InfluxDBServerError) as e:
self.log.exception('failed_to_insert metric',
record=records,
num_records=len(records),
type='metric')
|
deep-compute/logagg
|
logagg/nsqsender.py
|
NSQSender._is_ready
|
python
|
def _is_ready(self, topic_name):
'''
Is NSQ running and have space to receive messages?
'''
url = 'http://%s/stats?format=json&topic=%s' % (self.nsqd_http_address, topic_name)
#Cheacking for ephmeral channels
if '#' in topic_name:
topic_name, tag =topic_name.split("#", 1)
try:
data = self.session.get(url).json()
'''
data = {u'start_time': 1516164866, u'version': u'1.0.0-compat', \
u'health': u'OK', u'topics': [{u'message_count': 19019, \
u'paused': False, u'topic_name': u'test_topic', u'channels': [], \
u'depth': 19019, u'backend_depth': 9019, u'e2e_processing_latency': {u'count': 0, \
u'percentiles': None}}]}
'''
topics = data.get('topics', [])
topics = [t for t in topics if t['topic_name'] == topic_name]
if not topics:
raise Exception('topic_missing_at_nsq')
topic = topics[0]
depth = topic['depth']
depth += sum(c.get('depth', 0) for c in topic['channels'])
self.log.debug('nsq_depth_check', topic=topic_name,
depth=depth, max_depth=self.nsq_max_depth)
if depth < self.nsq_max_depth:
return
else:
raise Exception('nsq_is_full_waiting_to_clear')
except:
raise
|
Is NSQ running and have space to receive messages?
|
train
|
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/nsqsender.py#L39-L74
| null |
class NSQSender(object):
NSQ_READY_CHECK_INTERVAL = 1 # Wait time to check nsq readiness (alive and not full)
HEARTBEAT_TOPIC = 'Heartbeat#ephemeral' # Topic name at which heartbeat is to be sent
MPUB_URL = 'http://%s/mpub?topic=%s' # Url to post msgs to NSQ
def __init__(self, http_loc, nsq_topic, nsq_max_depth, log=util.DUMMY):
self.nsqd_http_address = http_loc
self.topic_name = nsq_topic
self.nsq_max_depth = nsq_max_depth
self.log = log
self.session = requests
self._ensure_topic(self.topic_name)
self._ensure_topic(self.HEARTBEAT_TOPIC)
@keeprunning(NSQ_READY_CHECK_INTERVAL,
exit_on_success=True,
on_error=util.log_exception)
def _ensure_topic(self, topic_name):
u = 'http://%s/topic/create?topic=%s' % (self.nsqd_http_address, topic_name)
try:
self.session.post(u, timeout=1)
except requests.exceptions.RequestException as e:
self.log.exception('could_not_create_topic,retrying....', topic=topic_name)
raise
self.log.info('created_topic ', topic=topic_name)
@keeprunning(NSQ_READY_CHECK_INTERVAL,
exit_on_success=True,
on_error=util.log_exception)
@keeprunning(NSQ_READY_CHECK_INTERVAL,
exit_on_success=True,
on_error=util.log_exception)
def _send_messages(self, msgs, topic_name):
url = self.MPUB_URL % (self.nsqd_http_address, topic_name)
try:
self.session.post(url, data=msgs, timeout=5) # TODO What if session expires?
except (SystemExit, KeyboardInterrupt): raise
except requests.exceptions.RequestException as e:
raise
self.log.debug('nsq push done ', nmsgs=len(msgs), nbytes=len(msgs))
def handle_logs(self, msgs):
self._is_ready(topic_name=self.topic_name)
msgs = '\n'.join(m['log'] for m in msgs)
self._send_messages(msgs, topic_name=self.topic_name)
def handle_heartbeat(self, heartbeat):
msgs = json.dumps(heartbeat)
self._is_ready(topic_name=self.HEARTBEAT_TOPIC)
self._send_messages(msgs, topic_name=self.HEARTBEAT_TOPIC)
|
chrismattmann/nutch-python
|
nutch/nutch.py
|
defaultCrawlId
|
python
|
def defaultCrawlId():
timestamp = datetime.now().isoformat().replace(':', '_')
user = getuser()
return '_'.join(('crawl', user, timestamp))
|
Provide a reasonable default crawl name using the user name and date
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/nutch.py#L91-L98
| null |
#!/usr/bin/env python2.7
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from __future__ import division
USAGE = """
A simple python client for Nutch using the Nutch server REST API.
Most commands return results in JSON format by default, or plain text.
To control Nutch, please see wiki:
https://github.com/chrismattmann/nutch-python/wiki#get-your-nutch-python-script-going
To get/set the configuration of the Nutch server, use:
-- nt.configGetList() # get list of named configurations
-- nt.configGetInfo(id) # get parameters in named config.
-- nt.configCreate(id, parameterDict) # create a new named config.
To see the status of jobs, use:
-- nt.jobGetList() # get list of running jobs
-- nt.jobGetInfo(id) # get metadata for a job id
-- nt.jobStop(id) # stop a job, DANGEROUS!!, may corrupt segment files
"""
import collections
from datetime import datetime
import getopt
from getpass import getuser
import requests
import sys
from time import sleep
DefaultServerHost = "localhost"
DefaultPort = "8081"
DefaultServerEndpoint = 'http://' + DefaultServerHost + ':' + DefaultPort
DefaultConfig = 'default'
DefaultUserAgent = 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
LegalJobs = ['INJECT', 'GENERATE', 'FETCH', 'PARSE', 'UPDATEDB',
'CRAWL', 'DEDUP', 'INVERTLINKS', 'INDEX']
RequestVerbs = {'get': requests.get, 'put': requests.put, 'post': requests.post, 'delete': requests.delete}
TextSendHeader = {'Content-Type': 'text/plain'}
TextAcceptHeader = {'Accept': 'text/plain'}
JsonAcceptHeader = {'Accept': 'application/json'}
class NutchException(Exception):
status_code = None
class NutchCrawlException(NutchException):
current_job = None
completed_jobs = []
# TODO: Replace with Python logger
Verbose = True
def echo2(*s):
sys.stderr.write('nutch.py: ' + ' '.join(map(str, s)) + '\n')
def warn(*s):
echo2('Warn:', *s)
def die(*s):
echo2('Error:', *s)
echo2(USAGE)
sys.exit()
class Server:
"""
Implements basic interactions with a Nutch RESTful Server
"""
def __init__(self, serverEndpoint, raiseErrors=True):
"""
Create a Server object for low-level interactions with a Nutch RESTful Server
:param serverEndpoint: URL of the server
:param raiseErrors: Raise an exception for non-200 status codes
"""
self.serverEndpoint = serverEndpoint
self.raiseErrors = raiseErrors
def call(self, verb, servicePath, data=None, headers=None, forceText=False, sendJson=True):
"""Call the Nutch Server, do some error checking, and return the response.
:param verb: One of nutch.RequestVerbs
:param servicePath: path component of URL to append to endpoint, e.g. '/config'
:param data: Data to attach to this request
:param headers: headers to attach to this request, default are JsonAcceptHeader
:param forceText: don't trust the response headers and just get the text
:param sendJson: Whether to treat attached data as JSON or not
"""
default_data = {} if sendJson else ""
data = data if data else default_data
headers = headers if headers else JsonAcceptHeader.copy()
if not sendJson:
headers.update(TextSendHeader)
if verb not in RequestVerbs:
die('Server call verb must be one of %s' % str(RequestVerbs.keys()))
if Verbose:
echo2("%s Endpoint:" % verb.upper(), servicePath)
echo2("%s Request data:" % verb.upper(), data)
echo2("%s Request headers:" % verb.upper(), headers)
verbFn = RequestVerbs[verb]
if sendJson:
resp = verbFn(self.serverEndpoint + servicePath, json=data, headers=headers)
else:
resp = verbFn(self.serverEndpoint + servicePath, data=data, headers=headers)
if Verbose:
echo2("Response headers:", resp.headers)
echo2("Response status:", resp.status_code)
if resp.status_code != 200:
if self.raiseErrors:
error = NutchException("Unexpected server response: %d" % resp.status_code)
error.status_code = resp.status_code
raise error
else:
warn('Nutch server returned status:', resp.status_code)
if forceText or 'content-type' not in resp.headers or resp.headers['content-type'] == 'text/plain':
if Verbose:
echo2("Response text:", resp.text)
return resp.text
content_type = resp.headers['content-type']
if content_type == 'application/json' and not forceText:
if Verbose:
echo2("Response JSON:", resp.json())
return resp.json()
else:
die('Did not understand server response: %s' % resp.headers)
defaultServer = lambda: Server(DefaultServerEndpoint)
class IdEqualityMixin(object):
"""
Mix-in class to use self.id == other.id to check for equality
"""
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.id == other.id)
def __ne__(self, other):
return not self.__eq__(other)
class Job(IdEqualityMixin):
"""
Representation of a running Nutch job, use JobClient to get a list of running jobs or to create one
"""
def __init__(self, jid, server):
self.id = jid
self.server = server
def info(self):
"""Get current information about this job"""
return self.server.call('get', '/job/' + self.id)
def stop(self):
return self.server.call('get', '/job/%s/stop' % self.id)
def abort(self):
return self.server.call('get', '/job/%s/abort' % self.id)
class Config(IdEqualityMixin):
"""
Representation of an active Nutch configuration
Use ConfigClient to get a list of configurations or create a new one
"""
def __init__(self, cid, server):
self.id = cid
self.server = server
def __str__(self):
return "Config(id:%s, ...)" %self.id
def delete(self):
return self.server.call('delete', '/config/' + self.id)
def info(self):
return self.server.call('get', '/config/' + self.id)
def parameter(self, parameterId):
return self.server.call('get', '/config/%s/%s' % (self.id, parameterId))
def __getitem__(self, item):
"""
Overload [] to provide get access to parameters
:param item: the name of a parameter
:return: the parameter if the name is valid, otherwise raise NutchException
"""
return self.server.call('get', '/config/%s/%s' % (self.id, item), forceText=True)
def __setitem__(self, key, value):
"""
Overload [] to provide set access to configurations
:param key: the name of the parameter to set
:param value: the data associated with this parameter
:return: the set value
"""
self.server.call('put', '/config/%s/%s' % (self.id, key), value, sendJson=False)
return value
class Seed(IdEqualityMixin):
"""
Representation of an active Nutch seed list
Use SeedClient to get a list of seed lists or create a new one
"""
def __init__(self, sid, seedPath, server):
self.id = sid
self.seedPath = seedPath
self.server = server
class ConfigClient:
def __init__(self, server):
"""Nutch Config client
List named configurations, create new ones, or delete them with methods to get the list of named
configurations, get parameters for a named configuration, get an individual parameter of a named
configuration, create a new named configuration using a parameter dictionary, and delete a named configuration.
"""
self.server = server
def list(self):
configs = self.server.call('get', '/config')
return [Config(cid, self.server) for cid in configs]
def create(self, cid, configData):
"""
Create a new named (cid) configuration from a parameter dictionary (config_data).
"""
configArgs = {'configId': cid, 'params': configData, 'force': True}
cid = self.server.call('post', "/config/create", configArgs, forceText=True, headers=TextAcceptHeader)
new_config = Config(cid, self.server)
return new_config
def __getitem__(self, item):
"""
Overload [] to provide get access to configurations
:param item: the name of a configuration
:return: the Config object if the name is valid, otherwise raise KeyError
"""
# let's be optimistic...
config = Config(item, self.server)
if config.info():
return config
# not found!
raise KeyError
def __setitem__(self, key, value):
"""
Overload [] to provide set access to configurations
:param key: the name of the configuration to create
:param value: the dict-like data associated with this configuration
:return: the created Config object
"""
if not isinstance(value, collections.Mapping):
raise TypeError(repr(value) + "is not a dict-like object")
return self.create(key, value)
class JobClient:
def __init__(self, server, crawlId, confId, parameters=None):
"""
Nutch Job client with methods to list, create jobs.
When the client is created, a crawlID and confID are associated.
The client will automatically filter out jobs that do not match the associated crawlId or confId.
:param server:
:param crawlId:
:param confId:
:param parameters:
:return:
"""
self.server = server
self.crawlId = crawlId
self.confId = confId
self.parameters=parameters if parameters else {'args': dict()}
def _job_owned(self, job):
return job['crawlId'] == self.crawlId and job['confId'] == self.confId
def list(self, allJobs=False):
"""
Return list of jobs at this endpoint.
Call get(allJobs=True) to see all jobs, not just the ones managed by this Client
"""
jobs = self.server.call('get', '/job')
return [Job(job['id'], self.server) for job in jobs if allJobs or self._job_owned(job)]
def create(self, command, **args):
"""
Create a job given a command
:param command: Nutch command, one of nutch.LegalJobs
:param args: Additional arguments to pass to the job
:return: The created Job
"""
command = command.upper()
if command not in LegalJobs:
warn('Nutch command must be one of: %s' % ', '.join(LegalJobs))
else:
echo2('Starting %s job with args %s' % (command, str(args)))
parameters = self.parameters.copy()
parameters['type'] = command
parameters['crawlId'] = self.crawlId
parameters['confId'] = self.confId
parameters['args'].update(args)
job_info = self.server.call('post', "/job/create", parameters, JsonAcceptHeader)
job = Job(job_info['id'], self.server)
return job
# some short-hand functions
def inject(self, seed=None, urlDir=None, **args):
"""
:param seed: A Seed object (this or urlDir must be specified)
:param urlDir: The directory on the server containing the seed list (this or urlDir must be specified)
:param args: Extra arguments for the job
:return: a created Job object
"""
if seed:
if urlDir and urlDir != seed.seedPath:
raise NutchException("Can't specify both seed and urlDir")
urlDir = seed.seedPath
elif urlDir:
pass
else:
raise NutchException("Must specify seed or urlDir")
args['url_dir'] = urlDir
return self.create('INJECT', **args)
def generate(self, **args):
return self.create('GENERATE', **args)
def fetch(self, **args):
return self.create('FETCH', **args)
def parse(self, **args):
return self.create('PARSE', **args)
def updatedb(self, **args):
return self.create('UPDATEDB', **args)
def stats(self):
statsArgs = {'confId': self.confId, 'crawlId': self.crawlId, 'type': 'stats', 'args': {}}
return self.server.call('post', '/db/crawldb', statsArgs)
class SeedClient():
def __init__(self, server):
"""Nutch Seed client
Client for uploading seed lists to Nutch
"""
self.server = server
def create(self, sid, seedList):
"""
Create a new named (sid) Seed from a list of seed URLs
:param sid: the name to assign to the new seed list
:param seedList: the list of seeds to use
:return: the created Seed object
"""
seedUrl = lambda uid, url: {"id": uid, "url": url}
if not isinstance(seedList,tuple):
seedList = (seedList,)
seedListData = {
"id": "12345",
"name": sid,
"seedUrls": [seedUrl(uid, url) for uid, url in enumerate(seedList)]
}
# As per resolution of https://issues.apache.org/jira/browse/NUTCH-2123
seedPath = self.server.call('post', "/seed/create", seedListData, TextAcceptHeader)
new_seed = Seed(sid, seedPath, self.server)
return new_seed
def createFromFile(self, sid, filename):
"""
Create a new named (sid) Seed from a file containing URLs
It's assumed URLs are whitespace seperated.
:param sid: the name to assign to the new seed list
:param filename: the name of the file that contains URLs
:return: the created Seed object
"""
urls = []
with open(filename) as f:
for line in f:
for url in line.split():
urls.append(url)
return self.create(sid, tuple(urls))
class CrawlClient():
def __init__(self, server, seed, jobClient, rounds, index):
"""Nutch Crawl manager
High-level Nutch client for managing crawls.
When this client is initialized, the seedList will automatically be injected.
There are four ways to proceed from here.
progress() - checks the status of the current job, enqueue the next job if the current job is finished,
and return immediately
waitJob() - wait until the current job is finished and return
waitRound() - wait and enqueue jobs until the current round is finished and return
waitAll() - wait and enqueue jobs until all rounds are finished and return
It is recommended to use progress() in a while loop for any applications that need to remain interactive.
"""
self.server = server
self.jobClient = jobClient
self.crawlId = jobClient.crawlId
self.currentRound = 1
self.totalRounds = rounds
self.currentJob = None
self.sleepTime = 1
self.enable_index = index
# dispatch injection
self.currentJob = self.jobClient.inject(seed)
def _nextJob(self, job, nextRound=True):
"""
Given a completed job, start the next job in the round, or return None
:param nextRound: whether to start jobs from the next round if the current round is completed.
:return: the newly started Job, or None if no job was started
"""
jobInfo = job.info()
assert jobInfo['state'] == 'FINISHED'
roundEnd = False
if jobInfo['type'] == 'INJECT':
nextCommand = 'GENERATE'
elif jobInfo['type'] == 'GENERATE':
nextCommand = 'FETCH'
elif jobInfo['type'] == 'FETCH':
nextCommand = 'PARSE'
elif jobInfo['type'] == 'PARSE':
nextCommand = 'UPDATEDB'
elif jobInfo['type'] == 'UPDATEDB':
nextCommand = 'INVERTLINKS'
elif jobInfo['type'] == 'INVERTLINKS':
nextCommand = 'DEDUP'
elif jobInfo['type'] == 'DEDUP':
if self.enable_index:
nextCommand = 'INDEX'
else:
roundEnd = True
elif jobInfo['type'] == 'INDEX':
roundEnd = True
else:
raise NutchException("Unrecognized job type {}".format(jobInfo['type']))
if roundEnd:
if nextRound and self.currentRound < self.totalRounds:
nextCommand = 'GENERATE'
self.currentRound += 1
else:
return None
return self.jobClient.create(nextCommand)
def progress(self, nextRound=True):
"""
Check the status of the current job, activate the next job if it's finished, and return the active job
If the current job has failed, a NutchCrawlException will be raised with no jobs attached.
:param nextRound: whether to start jobs from the next round if the current job/round is completed.
:return: the currently running Job, or None if no jobs are running.
"""
currentJob = self.currentJob
if currentJob is None:
return currentJob
jobInfo = currentJob.info()
if jobInfo['state'] == 'RUNNING':
return currentJob
elif jobInfo['state'] == 'FINISHED':
nextJob = self._nextJob(currentJob, nextRound)
self.currentJob = nextJob
return nextJob
else:
error = NutchCrawlException("Unexpected job state: {}".format(jobInfo['state']))
error.current_job = currentJob
raise NutchCrawlException
def addRounds(self, numRounds=1):
"""
Add more rounds to the crawl. This command does not start execution.
:param numRounds: the number of rounds to add to the crawl
:return: the total number of rounds scheduled for execution
"""
self.totalRounds += numRounds
return self.totalRounds
def nextRound(self):
"""
Execute all jobs in the current round and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs from this round attached
to the exception.
:return: a list of all completed Jobs
"""
finishedJobs = []
if self.currentJob is None:
self.currentJob = self.jobClient.create('GENERATE')
activeJob = self.progress(nextRound=False)
while activeJob:
oldJob = activeJob
activeJob = self.progress(nextRound=False) # updates self.currentJob
if oldJob and oldJob != activeJob:
finishedJobs.append(oldJob)
sleep(self.sleepTime)
self.currentRound += 1
return finishedJobs
def waitAll(self):
"""
Execute all queued rounds and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs attached
to the exception
:return: a list of jobs completed for each round, organized by round (list-of-lists)
"""
finishedRounds = [self.nextRound()]
while self.currentRound < self.totalRounds:
finishedRounds.append(self.nextRound())
return finishedRounds
class Nutch:
def __init__(self, confId=DefaultConfig, serverEndpoint=DefaultServerEndpoint, raiseErrors=True, **args):
'''
Nutch client for interacting with a Nutch instance over its REST API.
Constructor:
nt = Nutch()
Optional arguments:
confID - The name of the default configuration file to use, by default: nutch.DefaultConfig
serverEndpoint - The location of the Nutch server, by default: nutch.DefaultServerEndpoint
raiseErrors - raise exceptions if server response is not 200
Provides functions:
server - getServerStatus, stopServer
config - get and set parameters for this configuration
job - get list of running jobs, get job metadata, stop/abort a job by id, and create a new job
To start a crawl job, use:
Crawl() - or use the methods inject, generate, fetch, parse, updatedb in that order.
To run a crawl in one method, use:
-- nt = Nutch()
-- response, status = nt.crawl()
Methods return a tuple of two items, the response content (JSON or text) and the response status.
'''
self.confId = confId
self.server = Server(serverEndpoint, raiseErrors)
self.config = ConfigClient(self.server)[self.confId]
self.job_parameters = dict()
self.job_parameters['confId'] = confId
self.job_parameters['args'] = args # additional config. args as a dictionary
# if the configuration doesn't contain a user agent, set a default one.
if 'http.agent.name' not in self.config.info():
self.config['http.agent.name'] = DefaultUserAgent
def Jobs(self, crawlId=None):
"""
Create a JobClient for listing and creating jobs.
The JobClient inherits the confId from the Nutch client.
:param crawlId: crawlIds to use for this client. If not provided, will be generated
by nutch.defaultCrawlId()
:return: a JobClient
"""
crawlId = crawlId if crawlId else defaultCrawlId()
return JobClient(self.server, crawlId, self.confId)
def Config(self):
return self.config
def Configs(self):
return ConfigClient(self.server)
def Seeds(self):
return SeedClient(self.server)
def Crawl(self, seed, seedClient=None, jobClient=None, rounds=1, index=True):
"""
Launch a crawl using the given seed
:param seed: Type (Seed or SeedList) - used for crawl
:param seedClient: if a SeedList is given, the SeedClient to upload, if None a default will be created
:param jobClient: the JobClient to be used, if None a default will be created
:param rounds: the number of rounds in the crawl
:return: a CrawlClient to monitor and control the crawl
"""
if seedClient is None:
seedClient = self.Seeds()
if jobClient is None:
jobClient = self.Jobs()
if type(seed) != Seed:
seed = seedClient.create(jobClient.crawlId + '_seeds', seed)
return CrawlClient(self.server, seed, jobClient, rounds, index)
## convenience functions
## TODO: Decide if any of these should be deprecated.
def getServerStatus(self):
return self.server.call('get', '/admin')
def stopServer(self):
return self.server.call('post', '/admin/stop', headers=TextAcceptHeader)
def configGetList(self):
return self.Configs().list()
def configGetInfo(self, cid):
return self.Configs()[cid].info()
def configGetParameter(self, cid, parameterId):
return self.Configs()[cid][parameterId]
def configCreate(self, cid, config_data):
return self.Configs().create(cid, config_data)
def main(argv=None):
"""Run Nutch command using REST API."""
global Verbose, Mock
if argv is None:
argv = sys.argv
if len(argv) < 5: die('Bad args')
try:
opts, argv = getopt.getopt(argv[1:], 'hs:p:mv',
['help', 'server=', 'port=', 'mock', 'verbose'])
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
die()
serverEndpoint = DefaultServerEndpoint
# TODO: Fix this
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('-s', '--server'): serverEndpoint = val
elif opt in ('-p', '--port'): serverEndpoint = 'http://localhost:%s' % val
elif opt in ('-m', '--mock'): Mock = 1
elif opt in ('-v', '--verbose'): Verbose = 1
else: die(USAGE)
cmd = argv[0]
crawlId = argv[1]
confId = argv[2]
urlDir = argv[3]
args = {}
if len(argv) > 4: args = eval(argv[4])
nt = Nutch(crawlId, confId, serverEndpoint, urlDir)
nt.Jobs().create(cmd, **args)
if __name__ == '__main__':
resp = main(sys.argv)
print(resp[0])
|
chrismattmann/nutch-python
|
nutch/nutch.py
|
main
|
python
|
def main(argv=None):
global Verbose, Mock
if argv is None:
argv = sys.argv
if len(argv) < 5: die('Bad args')
try:
opts, argv = getopt.getopt(argv[1:], 'hs:p:mv',
['help', 'server=', 'port=', 'mock', 'verbose'])
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
die()
serverEndpoint = DefaultServerEndpoint
# TODO: Fix this
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('-s', '--server'): serverEndpoint = val
elif opt in ('-p', '--port'): serverEndpoint = 'http://localhost:%s' % val
elif opt in ('-m', '--mock'): Mock = 1
elif opt in ('-v', '--verbose'): Verbose = 1
else: die(USAGE)
cmd = argv[0]
crawlId = argv[1]
confId = argv[2]
urlDir = argv[3]
args = {}
if len(argv) > 4: args = eval(argv[4])
nt = Nutch(crawlId, confId, serverEndpoint, urlDir)
nt.Jobs().create(cmd, **args)
|
Run Nutch command using REST API.
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/nutch.py#L716-L749
|
[
"def echo2(*s):\n sys.stderr.write('nutch.py: ' + ' '.join(map(str, s)) + '\\n')\n",
"def die(*s):\n echo2('Error:', *s)\n echo2(USAGE)\n sys.exit()\n",
"def create(self, command, **args):\n \"\"\"\n Create a job given a command\n :param command: Nutch command, one of nutch.LegalJobs\n :param args: Additional arguments to pass to the job\n :return: The created Job\n \"\"\"\n\n command = command.upper()\n if command not in LegalJobs:\n warn('Nutch command must be one of: %s' % ', '.join(LegalJobs))\n else:\n echo2('Starting %s job with args %s' % (command, str(args)))\n parameters = self.parameters.copy()\n parameters['type'] = command\n parameters['crawlId'] = self.crawlId\n parameters['confId'] = self.confId\n parameters['args'].update(args)\n\n job_info = self.server.call('post', \"/job/create\", parameters, JsonAcceptHeader)\n\n job = Job(job_info['id'], self.server)\n return job\n",
"def Jobs(self, crawlId=None):\n \"\"\"\n Create a JobClient for listing and creating jobs.\n The JobClient inherits the confId from the Nutch client.\n\n :param crawlId: crawlIds to use for this client. If not provided, will be generated\n by nutch.defaultCrawlId()\n :return: a JobClient\n \"\"\"\n crawlId = crawlId if crawlId else defaultCrawlId()\n return JobClient(self.server, crawlId, self.confId)\n"
] |
#!/usr/bin/env python2.7
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from __future__ import division
USAGE = """
A simple python client for Nutch using the Nutch server REST API.
Most commands return results in JSON format by default, or plain text.
To control Nutch, please see wiki:
https://github.com/chrismattmann/nutch-python/wiki#get-your-nutch-python-script-going
To get/set the configuration of the Nutch server, use:
-- nt.configGetList() # get list of named configurations
-- nt.configGetInfo(id) # get parameters in named config.
-- nt.configCreate(id, parameterDict) # create a new named config.
To see the status of jobs, use:
-- nt.jobGetList() # get list of running jobs
-- nt.jobGetInfo(id) # get metadata for a job id
-- nt.jobStop(id) # stop a job, DANGEROUS!!, may corrupt segment files
"""
import collections
from datetime import datetime
import getopt
from getpass import getuser
import requests
import sys
from time import sleep
DefaultServerHost = "localhost"
DefaultPort = "8081"
DefaultServerEndpoint = 'http://' + DefaultServerHost + ':' + DefaultPort
DefaultConfig = 'default'
DefaultUserAgent = 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
LegalJobs = ['INJECT', 'GENERATE', 'FETCH', 'PARSE', 'UPDATEDB',
'CRAWL', 'DEDUP', 'INVERTLINKS', 'INDEX']
RequestVerbs = {'get': requests.get, 'put': requests.put, 'post': requests.post, 'delete': requests.delete}
TextSendHeader = {'Content-Type': 'text/plain'}
TextAcceptHeader = {'Accept': 'text/plain'}
JsonAcceptHeader = {'Accept': 'application/json'}
class NutchException(Exception):
status_code = None
class NutchCrawlException(NutchException):
current_job = None
completed_jobs = []
# TODO: Replace with Python logger
Verbose = True
def echo2(*s):
sys.stderr.write('nutch.py: ' + ' '.join(map(str, s)) + '\n')
def warn(*s):
echo2('Warn:', *s)
def die(*s):
echo2('Error:', *s)
echo2(USAGE)
sys.exit()
def defaultCrawlId():
"""
Provide a reasonable default crawl name using the user name and date
"""
timestamp = datetime.now().isoformat().replace(':', '_')
user = getuser()
return '_'.join(('crawl', user, timestamp))
class Server:
"""
Implements basic interactions with a Nutch RESTful Server
"""
def __init__(self, serverEndpoint, raiseErrors=True):
"""
Create a Server object for low-level interactions with a Nutch RESTful Server
:param serverEndpoint: URL of the server
:param raiseErrors: Raise an exception for non-200 status codes
"""
self.serverEndpoint = serverEndpoint
self.raiseErrors = raiseErrors
def call(self, verb, servicePath, data=None, headers=None, forceText=False, sendJson=True):
"""Call the Nutch Server, do some error checking, and return the response.
:param verb: One of nutch.RequestVerbs
:param servicePath: path component of URL to append to endpoint, e.g. '/config'
:param data: Data to attach to this request
:param headers: headers to attach to this request, default are JsonAcceptHeader
:param forceText: don't trust the response headers and just get the text
:param sendJson: Whether to treat attached data as JSON or not
"""
default_data = {} if sendJson else ""
data = data if data else default_data
headers = headers if headers else JsonAcceptHeader.copy()
if not sendJson:
headers.update(TextSendHeader)
if verb not in RequestVerbs:
die('Server call verb must be one of %s' % str(RequestVerbs.keys()))
if Verbose:
echo2("%s Endpoint:" % verb.upper(), servicePath)
echo2("%s Request data:" % verb.upper(), data)
echo2("%s Request headers:" % verb.upper(), headers)
verbFn = RequestVerbs[verb]
if sendJson:
resp = verbFn(self.serverEndpoint + servicePath, json=data, headers=headers)
else:
resp = verbFn(self.serverEndpoint + servicePath, data=data, headers=headers)
if Verbose:
echo2("Response headers:", resp.headers)
echo2("Response status:", resp.status_code)
if resp.status_code != 200:
if self.raiseErrors:
error = NutchException("Unexpected server response: %d" % resp.status_code)
error.status_code = resp.status_code
raise error
else:
warn('Nutch server returned status:', resp.status_code)
if forceText or 'content-type' not in resp.headers or resp.headers['content-type'] == 'text/plain':
if Verbose:
echo2("Response text:", resp.text)
return resp.text
content_type = resp.headers['content-type']
if content_type == 'application/json' and not forceText:
if Verbose:
echo2("Response JSON:", resp.json())
return resp.json()
else:
die('Did not understand server response: %s' % resp.headers)
defaultServer = lambda: Server(DefaultServerEndpoint)
class IdEqualityMixin(object):
"""
Mix-in class to use self.id == other.id to check for equality
"""
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.id == other.id)
def __ne__(self, other):
return not self.__eq__(other)
class Job(IdEqualityMixin):
"""
Representation of a running Nutch job, use JobClient to get a list of running jobs or to create one
"""
def __init__(self, jid, server):
self.id = jid
self.server = server
def info(self):
"""Get current information about this job"""
return self.server.call('get', '/job/' + self.id)
def stop(self):
return self.server.call('get', '/job/%s/stop' % self.id)
def abort(self):
return self.server.call('get', '/job/%s/abort' % self.id)
class Config(IdEqualityMixin):
"""
Representation of an active Nutch configuration
Use ConfigClient to get a list of configurations or create a new one
"""
def __init__(self, cid, server):
self.id = cid
self.server = server
def __str__(self):
return "Config(id:%s, ...)" %self.id
def delete(self):
return self.server.call('delete', '/config/' + self.id)
def info(self):
return self.server.call('get', '/config/' + self.id)
def parameter(self, parameterId):
return self.server.call('get', '/config/%s/%s' % (self.id, parameterId))
def __getitem__(self, item):
"""
Overload [] to provide get access to parameters
:param item: the name of a parameter
:return: the parameter if the name is valid, otherwise raise NutchException
"""
return self.server.call('get', '/config/%s/%s' % (self.id, item), forceText=True)
def __setitem__(self, key, value):
"""
Overload [] to provide set access to configurations
:param key: the name of the parameter to set
:param value: the data associated with this parameter
:return: the set value
"""
self.server.call('put', '/config/%s/%s' % (self.id, key), value, sendJson=False)
return value
class Seed(IdEqualityMixin):
"""
Representation of an active Nutch seed list
Use SeedClient to get a list of seed lists or create a new one
"""
def __init__(self, sid, seedPath, server):
self.id = sid
self.seedPath = seedPath
self.server = server
class ConfigClient:
def __init__(self, server):
"""Nutch Config client
List named configurations, create new ones, or delete them with methods to get the list of named
configurations, get parameters for a named configuration, get an individual parameter of a named
configuration, create a new named configuration using a parameter dictionary, and delete a named configuration.
"""
self.server = server
def list(self):
configs = self.server.call('get', '/config')
return [Config(cid, self.server) for cid in configs]
def create(self, cid, configData):
"""
Create a new named (cid) configuration from a parameter dictionary (config_data).
"""
configArgs = {'configId': cid, 'params': configData, 'force': True}
cid = self.server.call('post', "/config/create", configArgs, forceText=True, headers=TextAcceptHeader)
new_config = Config(cid, self.server)
return new_config
def __getitem__(self, item):
"""
Overload [] to provide get access to configurations
:param item: the name of a configuration
:return: the Config object if the name is valid, otherwise raise KeyError
"""
# let's be optimistic...
config = Config(item, self.server)
if config.info():
return config
# not found!
raise KeyError
def __setitem__(self, key, value):
"""
Overload [] to provide set access to configurations
:param key: the name of the configuration to create
:param value: the dict-like data associated with this configuration
:return: the created Config object
"""
if not isinstance(value, collections.Mapping):
raise TypeError(repr(value) + "is not a dict-like object")
return self.create(key, value)
class JobClient:
def __init__(self, server, crawlId, confId, parameters=None):
"""
Nutch Job client with methods to list, create jobs.
When the client is created, a crawlID and confID are associated.
The client will automatically filter out jobs that do not match the associated crawlId or confId.
:param server:
:param crawlId:
:param confId:
:param parameters:
:return:
"""
self.server = server
self.crawlId = crawlId
self.confId = confId
self.parameters=parameters if parameters else {'args': dict()}
def _job_owned(self, job):
return job['crawlId'] == self.crawlId and job['confId'] == self.confId
def list(self, allJobs=False):
"""
Return list of jobs at this endpoint.
Call get(allJobs=True) to see all jobs, not just the ones managed by this Client
"""
jobs = self.server.call('get', '/job')
return [Job(job['id'], self.server) for job in jobs if allJobs or self._job_owned(job)]
def create(self, command, **args):
"""
Create a job given a command
:param command: Nutch command, one of nutch.LegalJobs
:param args: Additional arguments to pass to the job
:return: The created Job
"""
command = command.upper()
if command not in LegalJobs:
warn('Nutch command must be one of: %s' % ', '.join(LegalJobs))
else:
echo2('Starting %s job with args %s' % (command, str(args)))
parameters = self.parameters.copy()
parameters['type'] = command
parameters['crawlId'] = self.crawlId
parameters['confId'] = self.confId
parameters['args'].update(args)
job_info = self.server.call('post', "/job/create", parameters, JsonAcceptHeader)
job = Job(job_info['id'], self.server)
return job
# some short-hand functions
def inject(self, seed=None, urlDir=None, **args):
"""
:param seed: A Seed object (this or urlDir must be specified)
:param urlDir: The directory on the server containing the seed list (this or urlDir must be specified)
:param args: Extra arguments for the job
:return: a created Job object
"""
if seed:
if urlDir and urlDir != seed.seedPath:
raise NutchException("Can't specify both seed and urlDir")
urlDir = seed.seedPath
elif urlDir:
pass
else:
raise NutchException("Must specify seed or urlDir")
args['url_dir'] = urlDir
return self.create('INJECT', **args)
def generate(self, **args):
return self.create('GENERATE', **args)
def fetch(self, **args):
return self.create('FETCH', **args)
def parse(self, **args):
return self.create('PARSE', **args)
def updatedb(self, **args):
return self.create('UPDATEDB', **args)
def stats(self):
statsArgs = {'confId': self.confId, 'crawlId': self.crawlId, 'type': 'stats', 'args': {}}
return self.server.call('post', '/db/crawldb', statsArgs)
class SeedClient():
def __init__(self, server):
"""Nutch Seed client
Client for uploading seed lists to Nutch
"""
self.server = server
def create(self, sid, seedList):
"""
Create a new named (sid) Seed from a list of seed URLs
:param sid: the name to assign to the new seed list
:param seedList: the list of seeds to use
:return: the created Seed object
"""
seedUrl = lambda uid, url: {"id": uid, "url": url}
if not isinstance(seedList,tuple):
seedList = (seedList,)
seedListData = {
"id": "12345",
"name": sid,
"seedUrls": [seedUrl(uid, url) for uid, url in enumerate(seedList)]
}
# As per resolution of https://issues.apache.org/jira/browse/NUTCH-2123
seedPath = self.server.call('post', "/seed/create", seedListData, TextAcceptHeader)
new_seed = Seed(sid, seedPath, self.server)
return new_seed
def createFromFile(self, sid, filename):
"""
Create a new named (sid) Seed from a file containing URLs
It's assumed URLs are whitespace seperated.
:param sid: the name to assign to the new seed list
:param filename: the name of the file that contains URLs
:return: the created Seed object
"""
urls = []
with open(filename) as f:
for line in f:
for url in line.split():
urls.append(url)
return self.create(sid, tuple(urls))
class CrawlClient():
def __init__(self, server, seed, jobClient, rounds, index):
"""Nutch Crawl manager
High-level Nutch client for managing crawls.
When this client is initialized, the seedList will automatically be injected.
There are four ways to proceed from here.
progress() - checks the status of the current job, enqueue the next job if the current job is finished,
and return immediately
waitJob() - wait until the current job is finished and return
waitRound() - wait and enqueue jobs until the current round is finished and return
waitAll() - wait and enqueue jobs until all rounds are finished and return
It is recommended to use progress() in a while loop for any applications that need to remain interactive.
"""
self.server = server
self.jobClient = jobClient
self.crawlId = jobClient.crawlId
self.currentRound = 1
self.totalRounds = rounds
self.currentJob = None
self.sleepTime = 1
self.enable_index = index
# dispatch injection
self.currentJob = self.jobClient.inject(seed)
def _nextJob(self, job, nextRound=True):
"""
Given a completed job, start the next job in the round, or return None
:param nextRound: whether to start jobs from the next round if the current round is completed.
:return: the newly started Job, or None if no job was started
"""
jobInfo = job.info()
assert jobInfo['state'] == 'FINISHED'
roundEnd = False
if jobInfo['type'] == 'INJECT':
nextCommand = 'GENERATE'
elif jobInfo['type'] == 'GENERATE':
nextCommand = 'FETCH'
elif jobInfo['type'] == 'FETCH':
nextCommand = 'PARSE'
elif jobInfo['type'] == 'PARSE':
nextCommand = 'UPDATEDB'
elif jobInfo['type'] == 'UPDATEDB':
nextCommand = 'INVERTLINKS'
elif jobInfo['type'] == 'INVERTLINKS':
nextCommand = 'DEDUP'
elif jobInfo['type'] == 'DEDUP':
if self.enable_index:
nextCommand = 'INDEX'
else:
roundEnd = True
elif jobInfo['type'] == 'INDEX':
roundEnd = True
else:
raise NutchException("Unrecognized job type {}".format(jobInfo['type']))
if roundEnd:
if nextRound and self.currentRound < self.totalRounds:
nextCommand = 'GENERATE'
self.currentRound += 1
else:
return None
return self.jobClient.create(nextCommand)
def progress(self, nextRound=True):
"""
Check the status of the current job, activate the next job if it's finished, and return the active job
If the current job has failed, a NutchCrawlException will be raised with no jobs attached.
:param nextRound: whether to start jobs from the next round if the current job/round is completed.
:return: the currently running Job, or None if no jobs are running.
"""
currentJob = self.currentJob
if currentJob is None:
return currentJob
jobInfo = currentJob.info()
if jobInfo['state'] == 'RUNNING':
return currentJob
elif jobInfo['state'] == 'FINISHED':
nextJob = self._nextJob(currentJob, nextRound)
self.currentJob = nextJob
return nextJob
else:
error = NutchCrawlException("Unexpected job state: {}".format(jobInfo['state']))
error.current_job = currentJob
raise NutchCrawlException
def addRounds(self, numRounds=1):
"""
Add more rounds to the crawl. This command does not start execution.
:param numRounds: the number of rounds to add to the crawl
:return: the total number of rounds scheduled for execution
"""
self.totalRounds += numRounds
return self.totalRounds
def nextRound(self):
"""
Execute all jobs in the current round and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs from this round attached
to the exception.
:return: a list of all completed Jobs
"""
finishedJobs = []
if self.currentJob is None:
self.currentJob = self.jobClient.create('GENERATE')
activeJob = self.progress(nextRound=False)
while activeJob:
oldJob = activeJob
activeJob = self.progress(nextRound=False) # updates self.currentJob
if oldJob and oldJob != activeJob:
finishedJobs.append(oldJob)
sleep(self.sleepTime)
self.currentRound += 1
return finishedJobs
def waitAll(self):
"""
Execute all queued rounds and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs attached
to the exception
:return: a list of jobs completed for each round, organized by round (list-of-lists)
"""
finishedRounds = [self.nextRound()]
while self.currentRound < self.totalRounds:
finishedRounds.append(self.nextRound())
return finishedRounds
class Nutch:
def __init__(self, confId=DefaultConfig, serverEndpoint=DefaultServerEndpoint, raiseErrors=True, **args):
'''
Nutch client for interacting with a Nutch instance over its REST API.
Constructor:
nt = Nutch()
Optional arguments:
confID - The name of the default configuration file to use, by default: nutch.DefaultConfig
serverEndpoint - The location of the Nutch server, by default: nutch.DefaultServerEndpoint
raiseErrors - raise exceptions if server response is not 200
Provides functions:
server - getServerStatus, stopServer
config - get and set parameters for this configuration
job - get list of running jobs, get job metadata, stop/abort a job by id, and create a new job
To start a crawl job, use:
Crawl() - or use the methods inject, generate, fetch, parse, updatedb in that order.
To run a crawl in one method, use:
-- nt = Nutch()
-- response, status = nt.crawl()
Methods return a tuple of two items, the response content (JSON or text) and the response status.
'''
self.confId = confId
self.server = Server(serverEndpoint, raiseErrors)
self.config = ConfigClient(self.server)[self.confId]
self.job_parameters = dict()
self.job_parameters['confId'] = confId
self.job_parameters['args'] = args # additional config. args as a dictionary
# if the configuration doesn't contain a user agent, set a default one.
if 'http.agent.name' not in self.config.info():
self.config['http.agent.name'] = DefaultUserAgent
def Jobs(self, crawlId=None):
"""
Create a JobClient for listing and creating jobs.
The JobClient inherits the confId from the Nutch client.
:param crawlId: crawlIds to use for this client. If not provided, will be generated
by nutch.defaultCrawlId()
:return: a JobClient
"""
crawlId = crawlId if crawlId else defaultCrawlId()
return JobClient(self.server, crawlId, self.confId)
def Config(self):
return self.config
def Configs(self):
return ConfigClient(self.server)
def Seeds(self):
return SeedClient(self.server)
def Crawl(self, seed, seedClient=None, jobClient=None, rounds=1, index=True):
"""
Launch a crawl using the given seed
:param seed: Type (Seed or SeedList) - used for crawl
:param seedClient: if a SeedList is given, the SeedClient to upload, if None a default will be created
:param jobClient: the JobClient to be used, if None a default will be created
:param rounds: the number of rounds in the crawl
:return: a CrawlClient to monitor and control the crawl
"""
if seedClient is None:
seedClient = self.Seeds()
if jobClient is None:
jobClient = self.Jobs()
if type(seed) != Seed:
seed = seedClient.create(jobClient.crawlId + '_seeds', seed)
return CrawlClient(self.server, seed, jobClient, rounds, index)
## convenience functions
## TODO: Decide if any of these should be deprecated.
def getServerStatus(self):
return self.server.call('get', '/admin')
def stopServer(self):
return self.server.call('post', '/admin/stop', headers=TextAcceptHeader)
def configGetList(self):
return self.Configs().list()
def configGetInfo(self, cid):
return self.Configs()[cid].info()
def configGetParameter(self, cid, parameterId):
return self.Configs()[cid][parameterId]
def configCreate(self, cid, config_data):
return self.Configs().create(cid, config_data)
if __name__ == '__main__':
resp = main(sys.argv)
print(resp[0])
|
chrismattmann/nutch-python
|
nutch/nutch.py
|
Server.call
|
python
|
def call(self, verb, servicePath, data=None, headers=None, forceText=False, sendJson=True):
default_data = {} if sendJson else ""
data = data if data else default_data
headers = headers if headers else JsonAcceptHeader.copy()
if not sendJson:
headers.update(TextSendHeader)
if verb not in RequestVerbs:
die('Server call verb must be one of %s' % str(RequestVerbs.keys()))
if Verbose:
echo2("%s Endpoint:" % verb.upper(), servicePath)
echo2("%s Request data:" % verb.upper(), data)
echo2("%s Request headers:" % verb.upper(), headers)
verbFn = RequestVerbs[verb]
if sendJson:
resp = verbFn(self.serverEndpoint + servicePath, json=data, headers=headers)
else:
resp = verbFn(self.serverEndpoint + servicePath, data=data, headers=headers)
if Verbose:
echo2("Response headers:", resp.headers)
echo2("Response status:", resp.status_code)
if resp.status_code != 200:
if self.raiseErrors:
error = NutchException("Unexpected server response: %d" % resp.status_code)
error.status_code = resp.status_code
raise error
else:
warn('Nutch server returned status:', resp.status_code)
if forceText or 'content-type' not in resp.headers or resp.headers['content-type'] == 'text/plain':
if Verbose:
echo2("Response text:", resp.text)
return resp.text
content_type = resp.headers['content-type']
if content_type == 'application/json' and not forceText:
if Verbose:
echo2("Response JSON:", resp.json())
return resp.json()
else:
die('Did not understand server response: %s' % resp.headers)
|
Call the Nutch Server, do some error checking, and return the response.
:param verb: One of nutch.RequestVerbs
:param servicePath: path component of URL to append to endpoint, e.g. '/config'
:param data: Data to attach to this request
:param headers: headers to attach to this request, default are JsonAcceptHeader
:param forceText: don't trust the response headers and just get the text
:param sendJson: Whether to treat attached data as JSON or not
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/nutch.py#L117-L170
|
[
"def warn(*s):\n echo2('Warn:', *s)\n",
"def echo2(*s):\n sys.stderr.write('nutch.py: ' + ' '.join(map(str, s)) + '\\n')\n",
"def die(*s):\n echo2('Error:', *s)\n echo2(USAGE)\n sys.exit()\n"
] |
class Server:
"""
Implements basic interactions with a Nutch RESTful Server
"""
def __init__(self, serverEndpoint, raiseErrors=True):
"""
Create a Server object for low-level interactions with a Nutch RESTful Server
:param serverEndpoint: URL of the server
:param raiseErrors: Raise an exception for non-200 status codes
"""
self.serverEndpoint = serverEndpoint
self.raiseErrors = raiseErrors
|
chrismattmann/nutch-python
|
nutch/nutch.py
|
ConfigClient.create
|
python
|
def create(self, cid, configData):
configArgs = {'configId': cid, 'params': configData, 'force': True}
cid = self.server.call('post', "/config/create", configArgs, forceText=True, headers=TextAcceptHeader)
new_config = Config(cid, self.server)
return new_config
|
Create a new named (cid) configuration from a parameter dictionary (config_data).
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/nutch.py#L278-L285
| null |
class ConfigClient:
def __init__(self, server):
"""Nutch Config client
List named configurations, create new ones, or delete them with methods to get the list of named
configurations, get parameters for a named configuration, get an individual parameter of a named
configuration, create a new named configuration using a parameter dictionary, and delete a named configuration.
"""
self.server = server
def list(self):
configs = self.server.call('get', '/config')
return [Config(cid, self.server) for cid in configs]
def __getitem__(self, item):
"""
Overload [] to provide get access to configurations
:param item: the name of a configuration
:return: the Config object if the name is valid, otherwise raise KeyError
"""
# let's be optimistic...
config = Config(item, self.server)
if config.info():
return config
# not found!
raise KeyError
def __setitem__(self, key, value):
"""
Overload [] to provide set access to configurations
:param key: the name of the configuration to create
:param value: the dict-like data associated with this configuration
:return: the created Config object
"""
if not isinstance(value, collections.Mapping):
raise TypeError(repr(value) + "is not a dict-like object")
return self.create(key, value)
|
chrismattmann/nutch-python
|
nutch/nutch.py
|
JobClient.list
|
python
|
def list(self, allJobs=False):
jobs = self.server.call('get', '/job')
return [Job(job['id'], self.server) for job in jobs if allJobs or self._job_owned(job)]
|
Return list of jobs at this endpoint.
Call get(allJobs=True) to see all jobs, not just the ones managed by this Client
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/nutch.py#L337-L346
| null |
class JobClient:
def __init__(self, server, crawlId, confId, parameters=None):
"""
Nutch Job client with methods to list, create jobs.
When the client is created, a crawlID and confID are associated.
The client will automatically filter out jobs that do not match the associated crawlId or confId.
:param server:
:param crawlId:
:param confId:
:param parameters:
:return:
"""
self.server = server
self.crawlId = crawlId
self.confId = confId
self.parameters=parameters if parameters else {'args': dict()}
def _job_owned(self, job):
return job['crawlId'] == self.crawlId and job['confId'] == self.confId
def create(self, command, **args):
"""
Create a job given a command
:param command: Nutch command, one of nutch.LegalJobs
:param args: Additional arguments to pass to the job
:return: The created Job
"""
command = command.upper()
if command not in LegalJobs:
warn('Nutch command must be one of: %s' % ', '.join(LegalJobs))
else:
echo2('Starting %s job with args %s' % (command, str(args)))
parameters = self.parameters.copy()
parameters['type'] = command
parameters['crawlId'] = self.crawlId
parameters['confId'] = self.confId
parameters['args'].update(args)
job_info = self.server.call('post', "/job/create", parameters, JsonAcceptHeader)
job = Job(job_info['id'], self.server)
return job
# some short-hand functions
def inject(self, seed=None, urlDir=None, **args):
"""
:param seed: A Seed object (this or urlDir must be specified)
:param urlDir: The directory on the server containing the seed list (this or urlDir must be specified)
:param args: Extra arguments for the job
:return: a created Job object
"""
if seed:
if urlDir and urlDir != seed.seedPath:
raise NutchException("Can't specify both seed and urlDir")
urlDir = seed.seedPath
elif urlDir:
pass
else:
raise NutchException("Must specify seed or urlDir")
args['url_dir'] = urlDir
return self.create('INJECT', **args)
def generate(self, **args):
return self.create('GENERATE', **args)
def fetch(self, **args):
return self.create('FETCH', **args)
def parse(self, **args):
return self.create('PARSE', **args)
def updatedb(self, **args):
return self.create('UPDATEDB', **args)
def stats(self):
statsArgs = {'confId': self.confId, 'crawlId': self.crawlId, 'type': 'stats', 'args': {}}
return self.server.call('post', '/db/crawldb', statsArgs)
|
chrismattmann/nutch-python
|
nutch/nutch.py
|
JobClient.create
|
python
|
def create(self, command, **args):
command = command.upper()
if command not in LegalJobs:
warn('Nutch command must be one of: %s' % ', '.join(LegalJobs))
else:
echo2('Starting %s job with args %s' % (command, str(args)))
parameters = self.parameters.copy()
parameters['type'] = command
parameters['crawlId'] = self.crawlId
parameters['confId'] = self.confId
parameters['args'].update(args)
job_info = self.server.call('post', "/job/create", parameters, JsonAcceptHeader)
job = Job(job_info['id'], self.server)
return job
|
Create a job given a command
:param command: Nutch command, one of nutch.LegalJobs
:param args: Additional arguments to pass to the job
:return: The created Job
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/nutch.py#L348-L370
|
[
"def warn(*s):\n echo2('Warn:', *s)\n",
"def echo2(*s):\n sys.stderr.write('nutch.py: ' + ' '.join(map(str, s)) + '\\n')\n"
] |
class JobClient:
def __init__(self, server, crawlId, confId, parameters=None):
"""
Nutch Job client with methods to list, create jobs.
When the client is created, a crawlID and confID are associated.
The client will automatically filter out jobs that do not match the associated crawlId or confId.
:param server:
:param crawlId:
:param confId:
:param parameters:
:return:
"""
self.server = server
self.crawlId = crawlId
self.confId = confId
self.parameters=parameters if parameters else {'args': dict()}
def _job_owned(self, job):
return job['crawlId'] == self.crawlId and job['confId'] == self.confId
def list(self, allJobs=False):
"""
Return list of jobs at this endpoint.
Call get(allJobs=True) to see all jobs, not just the ones managed by this Client
"""
jobs = self.server.call('get', '/job')
return [Job(job['id'], self.server) for job in jobs if allJobs or self._job_owned(job)]
# some short-hand functions
def inject(self, seed=None, urlDir=None, **args):
"""
:param seed: A Seed object (this or urlDir must be specified)
:param urlDir: The directory on the server containing the seed list (this or urlDir must be specified)
:param args: Extra arguments for the job
:return: a created Job object
"""
if seed:
if urlDir and urlDir != seed.seedPath:
raise NutchException("Can't specify both seed and urlDir")
urlDir = seed.seedPath
elif urlDir:
pass
else:
raise NutchException("Must specify seed or urlDir")
args['url_dir'] = urlDir
return self.create('INJECT', **args)
def generate(self, **args):
return self.create('GENERATE', **args)
def fetch(self, **args):
return self.create('FETCH', **args)
def parse(self, **args):
return self.create('PARSE', **args)
def updatedb(self, **args):
return self.create('UPDATEDB', **args)
def stats(self):
statsArgs = {'confId': self.confId, 'crawlId': self.crawlId, 'type': 'stats', 'args': {}}
return self.server.call('post', '/db/crawldb', statsArgs)
|
chrismattmann/nutch-python
|
nutch/nutch.py
|
JobClient.inject
|
python
|
def inject(self, seed=None, urlDir=None, **args):
if seed:
if urlDir and urlDir != seed.seedPath:
raise NutchException("Can't specify both seed and urlDir")
urlDir = seed.seedPath
elif urlDir:
pass
else:
raise NutchException("Must specify seed or urlDir")
args['url_dir'] = urlDir
return self.create('INJECT', **args)
|
:param seed: A Seed object (this or urlDir must be specified)
:param urlDir: The directory on the server containing the seed list (this or urlDir must be specified)
:param args: Extra arguments for the job
:return: a created Job object
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/nutch.py#L374-L391
|
[
"def create(self, command, **args):\n \"\"\"\n Create a job given a command\n :param command: Nutch command, one of nutch.LegalJobs\n :param args: Additional arguments to pass to the job\n :return: The created Job\n \"\"\"\n\n command = command.upper()\n if command not in LegalJobs:\n warn('Nutch command must be one of: %s' % ', '.join(LegalJobs))\n else:\n echo2('Starting %s job with args %s' % (command, str(args)))\n parameters = self.parameters.copy()\n parameters['type'] = command\n parameters['crawlId'] = self.crawlId\n parameters['confId'] = self.confId\n parameters['args'].update(args)\n\n job_info = self.server.call('post', \"/job/create\", parameters, JsonAcceptHeader)\n\n job = Job(job_info['id'], self.server)\n return job\n"
] |
class JobClient:
def __init__(self, server, crawlId, confId, parameters=None):
"""
Nutch Job client with methods to list, create jobs.
When the client is created, a crawlID and confID are associated.
The client will automatically filter out jobs that do not match the associated crawlId or confId.
:param server:
:param crawlId:
:param confId:
:param parameters:
:return:
"""
self.server = server
self.crawlId = crawlId
self.confId = confId
self.parameters=parameters if parameters else {'args': dict()}
def _job_owned(self, job):
return job['crawlId'] == self.crawlId and job['confId'] == self.confId
def list(self, allJobs=False):
"""
Return list of jobs at this endpoint.
Call get(allJobs=True) to see all jobs, not just the ones managed by this Client
"""
jobs = self.server.call('get', '/job')
return [Job(job['id'], self.server) for job in jobs if allJobs or self._job_owned(job)]
def create(self, command, **args):
"""
Create a job given a command
:param command: Nutch command, one of nutch.LegalJobs
:param args: Additional arguments to pass to the job
:return: The created Job
"""
command = command.upper()
if command not in LegalJobs:
warn('Nutch command must be one of: %s' % ', '.join(LegalJobs))
else:
echo2('Starting %s job with args %s' % (command, str(args)))
parameters = self.parameters.copy()
parameters['type'] = command
parameters['crawlId'] = self.crawlId
parameters['confId'] = self.confId
parameters['args'].update(args)
job_info = self.server.call('post', "/job/create", parameters, JsonAcceptHeader)
job = Job(job_info['id'], self.server)
return job
# some short-hand functions
def generate(self, **args):
return self.create('GENERATE', **args)
def fetch(self, **args):
return self.create('FETCH', **args)
def parse(self, **args):
return self.create('PARSE', **args)
def updatedb(self, **args):
return self.create('UPDATEDB', **args)
def stats(self):
statsArgs = {'confId': self.confId, 'crawlId': self.crawlId, 'type': 'stats', 'args': {}}
return self.server.call('post', '/db/crawldb', statsArgs)
|
chrismattmann/nutch-python
|
nutch/nutch.py
|
SeedClient.create
|
python
|
def create(self, sid, seedList):
seedUrl = lambda uid, url: {"id": uid, "url": url}
if not isinstance(seedList,tuple):
seedList = (seedList,)
seedListData = {
"id": "12345",
"name": sid,
"seedUrls": [seedUrl(uid, url) for uid, url in enumerate(seedList)]
}
# As per resolution of https://issues.apache.org/jira/browse/NUTCH-2123
seedPath = self.server.call('post', "/seed/create", seedListData, TextAcceptHeader)
new_seed = Seed(sid, seedPath, self.server)
return new_seed
|
Create a new named (sid) Seed from a list of seed URLs
:param sid: the name to assign to the new seed list
:param seedList: the list of seeds to use
:return: the created Seed object
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/nutch.py#L419-L442
| null |
class SeedClient():
def __init__(self, server):
"""Nutch Seed client
Client for uploading seed lists to Nutch
"""
self.server = server
def createFromFile(self, sid, filename):
"""
Create a new named (sid) Seed from a file containing URLs
It's assumed URLs are whitespace seperated.
:param sid: the name to assign to the new seed list
:param filename: the name of the file that contains URLs
:return: the created Seed object
"""
urls = []
with open(filename) as f:
for line in f:
for url in line.split():
urls.append(url)
return self.create(sid, tuple(urls))
|
chrismattmann/nutch-python
|
nutch/nutch.py
|
SeedClient.createFromFile
|
python
|
def createFromFile(self, sid, filename):
urls = []
with open(filename) as f:
for line in f:
for url in line.split():
urls.append(url)
return self.create(sid, tuple(urls))
|
Create a new named (sid) Seed from a file containing URLs
It's assumed URLs are whitespace seperated.
:param sid: the name to assign to the new seed list
:param filename: the name of the file that contains URLs
:return: the created Seed object
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/nutch.py#L444-L460
|
[
"def create(self, sid, seedList):\n \"\"\"\n Create a new named (sid) Seed from a list of seed URLs\n\n :param sid: the name to assign to the new seed list\n :param seedList: the list of seeds to use\n :return: the created Seed object\n \"\"\"\n\n seedUrl = lambda uid, url: {\"id\": uid, \"url\": url}\n\n if not isinstance(seedList,tuple):\n seedList = (seedList,)\n\n seedListData = {\n \"id\": \"12345\",\n \"name\": sid,\n \"seedUrls\": [seedUrl(uid, url) for uid, url in enumerate(seedList)]\n }\n\n # As per resolution of https://issues.apache.org/jira/browse/NUTCH-2123\n seedPath = self.server.call('post', \"/seed/create\", seedListData, TextAcceptHeader)\n new_seed = Seed(sid, seedPath, self.server)\n return new_seed\n"
] |
class SeedClient():
def __init__(self, server):
"""Nutch Seed client
Client for uploading seed lists to Nutch
"""
self.server = server
def create(self, sid, seedList):
"""
Create a new named (sid) Seed from a list of seed URLs
:param sid: the name to assign to the new seed list
:param seedList: the list of seeds to use
:return: the created Seed object
"""
seedUrl = lambda uid, url: {"id": uid, "url": url}
if not isinstance(seedList,tuple):
seedList = (seedList,)
seedListData = {
"id": "12345",
"name": sid,
"seedUrls": [seedUrl(uid, url) for uid, url in enumerate(seedList)]
}
# As per resolution of https://issues.apache.org/jira/browse/NUTCH-2123
seedPath = self.server.call('post', "/seed/create", seedListData, TextAcceptHeader)
new_seed = Seed(sid, seedPath, self.server)
return new_seed
|
chrismattmann/nutch-python
|
nutch/nutch.py
|
CrawlClient._nextJob
|
python
|
def _nextJob(self, job, nextRound=True):
jobInfo = job.info()
assert jobInfo['state'] == 'FINISHED'
roundEnd = False
if jobInfo['type'] == 'INJECT':
nextCommand = 'GENERATE'
elif jobInfo['type'] == 'GENERATE':
nextCommand = 'FETCH'
elif jobInfo['type'] == 'FETCH':
nextCommand = 'PARSE'
elif jobInfo['type'] == 'PARSE':
nextCommand = 'UPDATEDB'
elif jobInfo['type'] == 'UPDATEDB':
nextCommand = 'INVERTLINKS'
elif jobInfo['type'] == 'INVERTLINKS':
nextCommand = 'DEDUP'
elif jobInfo['type'] == 'DEDUP':
if self.enable_index:
nextCommand = 'INDEX'
else:
roundEnd = True
elif jobInfo['type'] == 'INDEX':
roundEnd = True
else:
raise NutchException("Unrecognized job type {}".format(jobInfo['type']))
if roundEnd:
if nextRound and self.currentRound < self.totalRounds:
nextCommand = 'GENERATE'
self.currentRound += 1
else:
return None
return self.jobClient.create(nextCommand)
|
Given a completed job, start the next job in the round, or return None
:param nextRound: whether to start jobs from the next round if the current round is completed.
:return: the newly started Job, or None if no job was started
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/nutch.py#L492-L533
| null |
class CrawlClient():
def __init__(self, server, seed, jobClient, rounds, index):
"""Nutch Crawl manager
High-level Nutch client for managing crawls.
When this client is initialized, the seedList will automatically be injected.
There are four ways to proceed from here.
progress() - checks the status of the current job, enqueue the next job if the current job is finished,
and return immediately
waitJob() - wait until the current job is finished and return
waitRound() - wait and enqueue jobs until the current round is finished and return
waitAll() - wait and enqueue jobs until all rounds are finished and return
It is recommended to use progress() in a while loop for any applications that need to remain interactive.
"""
self.server = server
self.jobClient = jobClient
self.crawlId = jobClient.crawlId
self.currentRound = 1
self.totalRounds = rounds
self.currentJob = None
self.sleepTime = 1
self.enable_index = index
# dispatch injection
self.currentJob = self.jobClient.inject(seed)
def progress(self, nextRound=True):
"""
Check the status of the current job, activate the next job if it's finished, and return the active job
If the current job has failed, a NutchCrawlException will be raised with no jobs attached.
:param nextRound: whether to start jobs from the next round if the current job/round is completed.
:return: the currently running Job, or None if no jobs are running.
"""
currentJob = self.currentJob
if currentJob is None:
return currentJob
jobInfo = currentJob.info()
if jobInfo['state'] == 'RUNNING':
return currentJob
elif jobInfo['state'] == 'FINISHED':
nextJob = self._nextJob(currentJob, nextRound)
self.currentJob = nextJob
return nextJob
else:
error = NutchCrawlException("Unexpected job state: {}".format(jobInfo['state']))
error.current_job = currentJob
raise NutchCrawlException
def addRounds(self, numRounds=1):
"""
Add more rounds to the crawl. This command does not start execution.
:param numRounds: the number of rounds to add to the crawl
:return: the total number of rounds scheduled for execution
"""
self.totalRounds += numRounds
return self.totalRounds
def nextRound(self):
"""
Execute all jobs in the current round and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs from this round attached
to the exception.
:return: a list of all completed Jobs
"""
finishedJobs = []
if self.currentJob is None:
self.currentJob = self.jobClient.create('GENERATE')
activeJob = self.progress(nextRound=False)
while activeJob:
oldJob = activeJob
activeJob = self.progress(nextRound=False) # updates self.currentJob
if oldJob and oldJob != activeJob:
finishedJobs.append(oldJob)
sleep(self.sleepTime)
self.currentRound += 1
return finishedJobs
def waitAll(self):
"""
Execute all queued rounds and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs attached
to the exception
:return: a list of jobs completed for each round, organized by round (list-of-lists)
"""
finishedRounds = [self.nextRound()]
while self.currentRound < self.totalRounds:
finishedRounds.append(self.nextRound())
return finishedRounds
|
chrismattmann/nutch-python
|
nutch/nutch.py
|
CrawlClient.progress
|
python
|
def progress(self, nextRound=True):
currentJob = self.currentJob
if currentJob is None:
return currentJob
jobInfo = currentJob.info()
if jobInfo['state'] == 'RUNNING':
return currentJob
elif jobInfo['state'] == 'FINISHED':
nextJob = self._nextJob(currentJob, nextRound)
self.currentJob = nextJob
return nextJob
else:
error = NutchCrawlException("Unexpected job state: {}".format(jobInfo['state']))
error.current_job = currentJob
raise NutchCrawlException
|
Check the status of the current job, activate the next job if it's finished, and return the active job
If the current job has failed, a NutchCrawlException will be raised with no jobs attached.
:param nextRound: whether to start jobs from the next round if the current job/round is completed.
:return: the currently running Job, or None if no jobs are running.
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/nutch.py#L535-L560
|
[
"def _nextJob(self, job, nextRound=True):\n \"\"\"\n Given a completed job, start the next job in the round, or return None\n\n :param nextRound: whether to start jobs from the next round if the current round is completed.\n :return: the newly started Job, or None if no job was started\n \"\"\"\n\n jobInfo = job.info()\n assert jobInfo['state'] == 'FINISHED'\n\n roundEnd = False\n if jobInfo['type'] == 'INJECT':\n nextCommand = 'GENERATE'\n elif jobInfo['type'] == 'GENERATE':\n nextCommand = 'FETCH'\n elif jobInfo['type'] == 'FETCH':\n nextCommand = 'PARSE'\n elif jobInfo['type'] == 'PARSE':\n nextCommand = 'UPDATEDB'\n elif jobInfo['type'] == 'UPDATEDB':\n nextCommand = 'INVERTLINKS'\n elif jobInfo['type'] == 'INVERTLINKS':\n nextCommand = 'DEDUP'\n elif jobInfo['type'] == 'DEDUP':\n if self.enable_index:\n nextCommand = 'INDEX'\n else:\n roundEnd = True\n elif jobInfo['type'] == 'INDEX':\n roundEnd = True\n else:\n raise NutchException(\"Unrecognized job type {}\".format(jobInfo['type']))\n\n if roundEnd:\n if nextRound and self.currentRound < self.totalRounds:\n nextCommand = 'GENERATE'\n self.currentRound += 1\n else:\n return None\n\n return self.jobClient.create(nextCommand)\n"
] |
class CrawlClient():
def __init__(self, server, seed, jobClient, rounds, index):
"""Nutch Crawl manager
High-level Nutch client for managing crawls.
When this client is initialized, the seedList will automatically be injected.
There are four ways to proceed from here.
progress() - checks the status of the current job, enqueue the next job if the current job is finished,
and return immediately
waitJob() - wait until the current job is finished and return
waitRound() - wait and enqueue jobs until the current round is finished and return
waitAll() - wait and enqueue jobs until all rounds are finished and return
It is recommended to use progress() in a while loop for any applications that need to remain interactive.
"""
self.server = server
self.jobClient = jobClient
self.crawlId = jobClient.crawlId
self.currentRound = 1
self.totalRounds = rounds
self.currentJob = None
self.sleepTime = 1
self.enable_index = index
# dispatch injection
self.currentJob = self.jobClient.inject(seed)
def _nextJob(self, job, nextRound=True):
"""
Given a completed job, start the next job in the round, or return None
:param nextRound: whether to start jobs from the next round if the current round is completed.
:return: the newly started Job, or None if no job was started
"""
jobInfo = job.info()
assert jobInfo['state'] == 'FINISHED'
roundEnd = False
if jobInfo['type'] == 'INJECT':
nextCommand = 'GENERATE'
elif jobInfo['type'] == 'GENERATE':
nextCommand = 'FETCH'
elif jobInfo['type'] == 'FETCH':
nextCommand = 'PARSE'
elif jobInfo['type'] == 'PARSE':
nextCommand = 'UPDATEDB'
elif jobInfo['type'] == 'UPDATEDB':
nextCommand = 'INVERTLINKS'
elif jobInfo['type'] == 'INVERTLINKS':
nextCommand = 'DEDUP'
elif jobInfo['type'] == 'DEDUP':
if self.enable_index:
nextCommand = 'INDEX'
else:
roundEnd = True
elif jobInfo['type'] == 'INDEX':
roundEnd = True
else:
raise NutchException("Unrecognized job type {}".format(jobInfo['type']))
if roundEnd:
if nextRound and self.currentRound < self.totalRounds:
nextCommand = 'GENERATE'
self.currentRound += 1
else:
return None
return self.jobClient.create(nextCommand)
def addRounds(self, numRounds=1):
"""
Add more rounds to the crawl. This command does not start execution.
:param numRounds: the number of rounds to add to the crawl
:return: the total number of rounds scheduled for execution
"""
self.totalRounds += numRounds
return self.totalRounds
def nextRound(self):
"""
Execute all jobs in the current round and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs from this round attached
to the exception.
:return: a list of all completed Jobs
"""
finishedJobs = []
if self.currentJob is None:
self.currentJob = self.jobClient.create('GENERATE')
activeJob = self.progress(nextRound=False)
while activeJob:
oldJob = activeJob
activeJob = self.progress(nextRound=False) # updates self.currentJob
if oldJob and oldJob != activeJob:
finishedJobs.append(oldJob)
sleep(self.sleepTime)
self.currentRound += 1
return finishedJobs
def waitAll(self):
"""
Execute all queued rounds and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs attached
to the exception
:return: a list of jobs completed for each round, organized by round (list-of-lists)
"""
finishedRounds = [self.nextRound()]
while self.currentRound < self.totalRounds:
finishedRounds.append(self.nextRound())
return finishedRounds
|
chrismattmann/nutch-python
|
nutch/nutch.py
|
CrawlClient.nextRound
|
python
|
def nextRound(self):
finishedJobs = []
if self.currentJob is None:
self.currentJob = self.jobClient.create('GENERATE')
activeJob = self.progress(nextRound=False)
while activeJob:
oldJob = activeJob
activeJob = self.progress(nextRound=False) # updates self.currentJob
if oldJob and oldJob != activeJob:
finishedJobs.append(oldJob)
sleep(self.sleepTime)
self.currentRound += 1
return finishedJobs
|
Execute all jobs in the current round and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs from this round attached
to the exception.
:return: a list of all completed Jobs
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/nutch.py#L573-L595
|
[
"def progress(self, nextRound=True):\n \"\"\"\n Check the status of the current job, activate the next job if it's finished, and return the active job\n\n If the current job has failed, a NutchCrawlException will be raised with no jobs attached.\n\n :param nextRound: whether to start jobs from the next round if the current job/round is completed.\n :return: the currently running Job, or None if no jobs are running.\n \"\"\"\n\n currentJob = self.currentJob\n if currentJob is None:\n return currentJob\n\n jobInfo = currentJob.info()\n\n if jobInfo['state'] == 'RUNNING':\n return currentJob\n elif jobInfo['state'] == 'FINISHED':\n nextJob = self._nextJob(currentJob, nextRound)\n self.currentJob = nextJob\n return nextJob\n else:\n error = NutchCrawlException(\"Unexpected job state: {}\".format(jobInfo['state']))\n error.current_job = currentJob\n raise NutchCrawlException\n"
] |
class CrawlClient():
def __init__(self, server, seed, jobClient, rounds, index):
"""Nutch Crawl manager
High-level Nutch client for managing crawls.
When this client is initialized, the seedList will automatically be injected.
There are four ways to proceed from here.
progress() - checks the status of the current job, enqueue the next job if the current job is finished,
and return immediately
waitJob() - wait until the current job is finished and return
waitRound() - wait and enqueue jobs until the current round is finished and return
waitAll() - wait and enqueue jobs until all rounds are finished and return
It is recommended to use progress() in a while loop for any applications that need to remain interactive.
"""
self.server = server
self.jobClient = jobClient
self.crawlId = jobClient.crawlId
self.currentRound = 1
self.totalRounds = rounds
self.currentJob = None
self.sleepTime = 1
self.enable_index = index
# dispatch injection
self.currentJob = self.jobClient.inject(seed)
def _nextJob(self, job, nextRound=True):
"""
Given a completed job, start the next job in the round, or return None
:param nextRound: whether to start jobs from the next round if the current round is completed.
:return: the newly started Job, or None if no job was started
"""
jobInfo = job.info()
assert jobInfo['state'] == 'FINISHED'
roundEnd = False
if jobInfo['type'] == 'INJECT':
nextCommand = 'GENERATE'
elif jobInfo['type'] == 'GENERATE':
nextCommand = 'FETCH'
elif jobInfo['type'] == 'FETCH':
nextCommand = 'PARSE'
elif jobInfo['type'] == 'PARSE':
nextCommand = 'UPDATEDB'
elif jobInfo['type'] == 'UPDATEDB':
nextCommand = 'INVERTLINKS'
elif jobInfo['type'] == 'INVERTLINKS':
nextCommand = 'DEDUP'
elif jobInfo['type'] == 'DEDUP':
if self.enable_index:
nextCommand = 'INDEX'
else:
roundEnd = True
elif jobInfo['type'] == 'INDEX':
roundEnd = True
else:
raise NutchException("Unrecognized job type {}".format(jobInfo['type']))
if roundEnd:
if nextRound and self.currentRound < self.totalRounds:
nextCommand = 'GENERATE'
self.currentRound += 1
else:
return None
return self.jobClient.create(nextCommand)
def progress(self, nextRound=True):
"""
Check the status of the current job, activate the next job if it's finished, and return the active job
If the current job has failed, a NutchCrawlException will be raised with no jobs attached.
:param nextRound: whether to start jobs from the next round if the current job/round is completed.
:return: the currently running Job, or None if no jobs are running.
"""
currentJob = self.currentJob
if currentJob is None:
return currentJob
jobInfo = currentJob.info()
if jobInfo['state'] == 'RUNNING':
return currentJob
elif jobInfo['state'] == 'FINISHED':
nextJob = self._nextJob(currentJob, nextRound)
self.currentJob = nextJob
return nextJob
else:
error = NutchCrawlException("Unexpected job state: {}".format(jobInfo['state']))
error.current_job = currentJob
raise NutchCrawlException
def addRounds(self, numRounds=1):
"""
Add more rounds to the crawl. This command does not start execution.
:param numRounds: the number of rounds to add to the crawl
:return: the total number of rounds scheduled for execution
"""
self.totalRounds += numRounds
return self.totalRounds
def waitAll(self):
"""
Execute all queued rounds and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs attached
to the exception
:return: a list of jobs completed for each round, organized by round (list-of-lists)
"""
finishedRounds = [self.nextRound()]
while self.currentRound < self.totalRounds:
finishedRounds.append(self.nextRound())
return finishedRounds
|
chrismattmann/nutch-python
|
nutch/nutch.py
|
CrawlClient.waitAll
|
python
|
def waitAll(self):
finishedRounds = [self.nextRound()]
while self.currentRound < self.totalRounds:
finishedRounds.append(self.nextRound())
return finishedRounds
|
Execute all queued rounds and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs attached
to the exception
:return: a list of jobs completed for each round, organized by round (list-of-lists)
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/nutch.py#L597-L612
|
[
"def nextRound(self):\n \"\"\"\n Execute all jobs in the current round and return when they have finished.\n\n If a job fails, a NutchCrawlException will be raised, with all completed jobs from this round attached\n to the exception.\n\n :return: a list of all completed Jobs\n \"\"\"\n\n finishedJobs = []\n if self.currentJob is None:\n self.currentJob = self.jobClient.create('GENERATE')\n\n activeJob = self.progress(nextRound=False)\n while activeJob:\n oldJob = activeJob\n activeJob = self.progress(nextRound=False) # updates self.currentJob\n if oldJob and oldJob != activeJob:\n finishedJobs.append(oldJob)\n sleep(self.sleepTime)\n self.currentRound += 1\n return finishedJobs\n"
] |
class CrawlClient():
def __init__(self, server, seed, jobClient, rounds, index):
"""Nutch Crawl manager
High-level Nutch client for managing crawls.
When this client is initialized, the seedList will automatically be injected.
There are four ways to proceed from here.
progress() - checks the status of the current job, enqueue the next job if the current job is finished,
and return immediately
waitJob() - wait until the current job is finished and return
waitRound() - wait and enqueue jobs until the current round is finished and return
waitAll() - wait and enqueue jobs until all rounds are finished and return
It is recommended to use progress() in a while loop for any applications that need to remain interactive.
"""
self.server = server
self.jobClient = jobClient
self.crawlId = jobClient.crawlId
self.currentRound = 1
self.totalRounds = rounds
self.currentJob = None
self.sleepTime = 1
self.enable_index = index
# dispatch injection
self.currentJob = self.jobClient.inject(seed)
def _nextJob(self, job, nextRound=True):
"""
Given a completed job, start the next job in the round, or return None
:param nextRound: whether to start jobs from the next round if the current round is completed.
:return: the newly started Job, or None if no job was started
"""
jobInfo = job.info()
assert jobInfo['state'] == 'FINISHED'
roundEnd = False
if jobInfo['type'] == 'INJECT':
nextCommand = 'GENERATE'
elif jobInfo['type'] == 'GENERATE':
nextCommand = 'FETCH'
elif jobInfo['type'] == 'FETCH':
nextCommand = 'PARSE'
elif jobInfo['type'] == 'PARSE':
nextCommand = 'UPDATEDB'
elif jobInfo['type'] == 'UPDATEDB':
nextCommand = 'INVERTLINKS'
elif jobInfo['type'] == 'INVERTLINKS':
nextCommand = 'DEDUP'
elif jobInfo['type'] == 'DEDUP':
if self.enable_index:
nextCommand = 'INDEX'
else:
roundEnd = True
elif jobInfo['type'] == 'INDEX':
roundEnd = True
else:
raise NutchException("Unrecognized job type {}".format(jobInfo['type']))
if roundEnd:
if nextRound and self.currentRound < self.totalRounds:
nextCommand = 'GENERATE'
self.currentRound += 1
else:
return None
return self.jobClient.create(nextCommand)
def progress(self, nextRound=True):
"""
Check the status of the current job, activate the next job if it's finished, and return the active job
If the current job has failed, a NutchCrawlException will be raised with no jobs attached.
:param nextRound: whether to start jobs from the next round if the current job/round is completed.
:return: the currently running Job, or None if no jobs are running.
"""
currentJob = self.currentJob
if currentJob is None:
return currentJob
jobInfo = currentJob.info()
if jobInfo['state'] == 'RUNNING':
return currentJob
elif jobInfo['state'] == 'FINISHED':
nextJob = self._nextJob(currentJob, nextRound)
self.currentJob = nextJob
return nextJob
else:
error = NutchCrawlException("Unexpected job state: {}".format(jobInfo['state']))
error.current_job = currentJob
raise NutchCrawlException
def addRounds(self, numRounds=1):
"""
Add more rounds to the crawl. This command does not start execution.
:param numRounds: the number of rounds to add to the crawl
:return: the total number of rounds scheduled for execution
"""
self.totalRounds += numRounds
return self.totalRounds
def nextRound(self):
"""
Execute all jobs in the current round and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs from this round attached
to the exception.
:return: a list of all completed Jobs
"""
finishedJobs = []
if self.currentJob is None:
self.currentJob = self.jobClient.create('GENERATE')
activeJob = self.progress(nextRound=False)
while activeJob:
oldJob = activeJob
activeJob = self.progress(nextRound=False) # updates self.currentJob
if oldJob and oldJob != activeJob:
finishedJobs.append(oldJob)
sleep(self.sleepTime)
self.currentRound += 1
return finishedJobs
|
chrismattmann/nutch-python
|
nutch/nutch.py
|
Nutch.Jobs
|
python
|
def Jobs(self, crawlId=None):
crawlId = crawlId if crawlId else defaultCrawlId()
return JobClient(self.server, crawlId, self.confId)
|
Create a JobClient for listing and creating jobs.
The JobClient inherits the confId from the Nutch client.
:param crawlId: crawlIds to use for this client. If not provided, will be generated
by nutch.defaultCrawlId()
:return: a JobClient
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/nutch.py#L656-L666
|
[
"def defaultCrawlId():\n \"\"\"\n Provide a reasonable default crawl name using the user name and date\n \"\"\"\n\n timestamp = datetime.now().isoformat().replace(':', '_')\n user = getuser()\n return '_'.join(('crawl', user, timestamp))\n"
] |
class Nutch:
def __init__(self, confId=DefaultConfig, serverEndpoint=DefaultServerEndpoint, raiseErrors=True, **args):
'''
Nutch client for interacting with a Nutch instance over its REST API.
Constructor:
nt = Nutch()
Optional arguments:
confID - The name of the default configuration file to use, by default: nutch.DefaultConfig
serverEndpoint - The location of the Nutch server, by default: nutch.DefaultServerEndpoint
raiseErrors - raise exceptions if server response is not 200
Provides functions:
server - getServerStatus, stopServer
config - get and set parameters for this configuration
job - get list of running jobs, get job metadata, stop/abort a job by id, and create a new job
To start a crawl job, use:
Crawl() - or use the methods inject, generate, fetch, parse, updatedb in that order.
To run a crawl in one method, use:
-- nt = Nutch()
-- response, status = nt.crawl()
Methods return a tuple of two items, the response content (JSON or text) and the response status.
'''
self.confId = confId
self.server = Server(serverEndpoint, raiseErrors)
self.config = ConfigClient(self.server)[self.confId]
self.job_parameters = dict()
self.job_parameters['confId'] = confId
self.job_parameters['args'] = args # additional config. args as a dictionary
# if the configuration doesn't contain a user agent, set a default one.
if 'http.agent.name' not in self.config.info():
self.config['http.agent.name'] = DefaultUserAgent
def Config(self):
return self.config
def Configs(self):
return ConfigClient(self.server)
def Seeds(self):
return SeedClient(self.server)
def Crawl(self, seed, seedClient=None, jobClient=None, rounds=1, index=True):
"""
Launch a crawl using the given seed
:param seed: Type (Seed or SeedList) - used for crawl
:param seedClient: if a SeedList is given, the SeedClient to upload, if None a default will be created
:param jobClient: the JobClient to be used, if None a default will be created
:param rounds: the number of rounds in the crawl
:return: a CrawlClient to monitor and control the crawl
"""
if seedClient is None:
seedClient = self.Seeds()
if jobClient is None:
jobClient = self.Jobs()
if type(seed) != Seed:
seed = seedClient.create(jobClient.crawlId + '_seeds', seed)
return CrawlClient(self.server, seed, jobClient, rounds, index)
## convenience functions
## TODO: Decide if any of these should be deprecated.
def getServerStatus(self):
return self.server.call('get', '/admin')
def stopServer(self):
return self.server.call('post', '/admin/stop', headers=TextAcceptHeader)
def configGetList(self):
return self.Configs().list()
def configGetInfo(self, cid):
return self.Configs()[cid].info()
def configGetParameter(self, cid, parameterId):
return self.Configs()[cid][parameterId]
def configCreate(self, cid, config_data):
return self.Configs().create(cid, config_data)
|
chrismattmann/nutch-python
|
nutch/nutch.py
|
Nutch.Crawl
|
python
|
def Crawl(self, seed, seedClient=None, jobClient=None, rounds=1, index=True):
if seedClient is None:
seedClient = self.Seeds()
if jobClient is None:
jobClient = self.Jobs()
if type(seed) != Seed:
seed = seedClient.create(jobClient.crawlId + '_seeds', seed)
return CrawlClient(self.server, seed, jobClient, rounds, index)
|
Launch a crawl using the given seed
:param seed: Type (Seed or SeedList) - used for crawl
:param seedClient: if a SeedList is given, the SeedClient to upload, if None a default will be created
:param jobClient: the JobClient to be used, if None a default will be created
:param rounds: the number of rounds in the crawl
:return: a CrawlClient to monitor and control the crawl
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/nutch.py#L677-L693
|
[
"def create(self, sid, seedList):\n \"\"\"\n Create a new named (sid) Seed from a list of seed URLs\n\n :param sid: the name to assign to the new seed list\n :param seedList: the list of seeds to use\n :return: the created Seed object\n \"\"\"\n\n seedUrl = lambda uid, url: {\"id\": uid, \"url\": url}\n\n if not isinstance(seedList,tuple):\n seedList = (seedList,)\n\n seedListData = {\n \"id\": \"12345\",\n \"name\": sid,\n \"seedUrls\": [seedUrl(uid, url) for uid, url in enumerate(seedList)]\n }\n\n # As per resolution of https://issues.apache.org/jira/browse/NUTCH-2123\n seedPath = self.server.call('post', \"/seed/create\", seedListData, TextAcceptHeader)\n new_seed = Seed(sid, seedPath, self.server)\n return new_seed\n",
"def Jobs(self, crawlId=None):\n \"\"\"\n Create a JobClient for listing and creating jobs.\n The JobClient inherits the confId from the Nutch client.\n\n :param crawlId: crawlIds to use for this client. If not provided, will be generated\n by nutch.defaultCrawlId()\n :return: a JobClient\n \"\"\"\n crawlId = crawlId if crawlId else defaultCrawlId()\n return JobClient(self.server, crawlId, self.confId)\n",
"def Seeds(self):\n return SeedClient(self.server)\n"
] |
class Nutch:
def __init__(self, confId=DefaultConfig, serverEndpoint=DefaultServerEndpoint, raiseErrors=True, **args):
'''
Nutch client for interacting with a Nutch instance over its REST API.
Constructor:
nt = Nutch()
Optional arguments:
confID - The name of the default configuration file to use, by default: nutch.DefaultConfig
serverEndpoint - The location of the Nutch server, by default: nutch.DefaultServerEndpoint
raiseErrors - raise exceptions if server response is not 200
Provides functions:
server - getServerStatus, stopServer
config - get and set parameters for this configuration
job - get list of running jobs, get job metadata, stop/abort a job by id, and create a new job
To start a crawl job, use:
Crawl() - or use the methods inject, generate, fetch, parse, updatedb in that order.
To run a crawl in one method, use:
-- nt = Nutch()
-- response, status = nt.crawl()
Methods return a tuple of two items, the response content (JSON or text) and the response status.
'''
self.confId = confId
self.server = Server(serverEndpoint, raiseErrors)
self.config = ConfigClient(self.server)[self.confId]
self.job_parameters = dict()
self.job_parameters['confId'] = confId
self.job_parameters['args'] = args # additional config. args as a dictionary
# if the configuration doesn't contain a user agent, set a default one.
if 'http.agent.name' not in self.config.info():
self.config['http.agent.name'] = DefaultUserAgent
def Jobs(self, crawlId=None):
"""
Create a JobClient for listing and creating jobs.
The JobClient inherits the confId from the Nutch client.
:param crawlId: crawlIds to use for this client. If not provided, will be generated
by nutch.defaultCrawlId()
:return: a JobClient
"""
crawlId = crawlId if crawlId else defaultCrawlId()
return JobClient(self.server, crawlId, self.confId)
def Config(self):
return self.config
def Configs(self):
return ConfigClient(self.server)
def Seeds(self):
return SeedClient(self.server)
## convenience functions
## TODO: Decide if any of these should be deprecated.
def getServerStatus(self):
return self.server.call('get', '/admin')
def stopServer(self):
return self.server.call('post', '/admin/stop', headers=TextAcceptHeader)
def configGetList(self):
return self.Configs().list()
def configGetInfo(self, cid):
return self.Configs()[cid].info()
def configGetParameter(self, cid, parameterId):
return self.Configs()[cid][parameterId]
def configCreate(self, cid, config_data):
return self.Configs().create(cid, config_data)
|
chrismattmann/nutch-python
|
nutch/crawl.py
|
Crawler.crawl_cmd
|
python
|
def crawl_cmd(self, seed_list, n):
'''
Runs the crawl job for n rounds
:param seed_list: lines of seed URLs
:param n: number of rounds
:return: number of successful rounds
'''
print("Num Rounds "+str(n))
cc = self.proxy.Crawl(seed=seed_list, rounds=n)
rounds = cc.waitAll()
print("Completed %d rounds" % len(rounds))
return len(rounds)
|
Runs the crawl job for n rounds
:param seed_list: lines of seed URLs
:param n: number of rounds
:return: number of successful rounds
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/crawl.py#L38-L51
|
[
"def Crawl(self, seed, seedClient=None, jobClient=None, rounds=1, index=True):\n \"\"\"\n Launch a crawl using the given seed\n :param seed: Type (Seed or SeedList) - used for crawl\n :param seedClient: if a SeedList is given, the SeedClient to upload, if None a default will be created\n :param jobClient: the JobClient to be used, if None a default will be created\n :param rounds: the number of rounds in the crawl\n :return: a CrawlClient to monitor and control the crawl\n \"\"\"\n if seedClient is None:\n seedClient = self.Seeds()\n if jobClient is None:\n jobClient = self.Jobs()\n\n if type(seed) != Seed:\n seed = seedClient.create(jobClient.crawlId + '_seeds', seed)\n return CrawlClient(self.server, seed, jobClient, rounds, index)\n"
] |
class Crawler(object):
def __init__(self, args):
self.args = args
self.server_url = args['url'] if 'url' in args else nutch.DefaultServerEndpoint
self.conf_id = args['conf_id'] if 'conf_id' in args else nutch.DefaultConfig
self.proxy = nutch.Nutch(self.conf_id, self.server_url)
def load_xml_conf(self, xml_file, id):
'''
Creates a new config from xml file.
:param xml_file: path to xml file. Format : nutch-site.xml or nutch-default.xml
:param id:
:return: config object
'''
# converting nutch-site.xml to key:value pairs
import xml.etree.ElementTree as ET
tree = ET.parse(xml_file)
params = {}
for prop in tree.getroot().findall(".//property"):
params[prop.find('./name').text.strip()] = prop.find('./value').text.strip()
return self.proxy.Configs().create(id, configData=params)
def create_cmd(self, args):
'''
'create' sub-command
:param args: cli arguments
:return:
'''
cmd = args.get('cmd_create')
if cmd == 'conf':
conf_file = args['conf_file']
conf_id = args['id']
return self.load_xml_conf(conf_file, conf_id)
else:
print("Error: Create %s is invalid or not implemented" % cmd)
|
chrismattmann/nutch-python
|
nutch/crawl.py
|
Crawler.load_xml_conf
|
python
|
def load_xml_conf(self, xml_file, id):
'''
Creates a new config from xml file.
:param xml_file: path to xml file. Format : nutch-site.xml or nutch-default.xml
:param id:
:return: config object
'''
# converting nutch-site.xml to key:value pairs
import xml.etree.ElementTree as ET
tree = ET.parse(xml_file)
params = {}
for prop in tree.getroot().findall(".//property"):
params[prop.find('./name').text.strip()] = prop.find('./value').text.strip()
return self.proxy.Configs().create(id, configData=params)
|
Creates a new config from xml file.
:param xml_file: path to xml file. Format : nutch-site.xml or nutch-default.xml
:param id:
:return: config object
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/crawl.py#L53-L67
| null |
class Crawler(object):
def __init__(self, args):
self.args = args
self.server_url = args['url'] if 'url' in args else nutch.DefaultServerEndpoint
self.conf_id = args['conf_id'] if 'conf_id' in args else nutch.DefaultConfig
self.proxy = nutch.Nutch(self.conf_id, self.server_url)
def crawl_cmd(self, seed_list, n):
'''
Runs the crawl job for n rounds
:param seed_list: lines of seed URLs
:param n: number of rounds
:return: number of successful rounds
'''
print("Num Rounds "+str(n))
cc = self.proxy.Crawl(seed=seed_list, rounds=n)
rounds = cc.waitAll()
print("Completed %d rounds" % len(rounds))
return len(rounds)
def create_cmd(self, args):
'''
'create' sub-command
:param args: cli arguments
:return:
'''
cmd = args.get('cmd_create')
if cmd == 'conf':
conf_file = args['conf_file']
conf_id = args['id']
return self.load_xml_conf(conf_file, conf_id)
else:
print("Error: Create %s is invalid or not implemented" % cmd)
|
chrismattmann/nutch-python
|
nutch/crawl.py
|
Crawler.create_cmd
|
python
|
def create_cmd(self, args):
'''
'create' sub-command
:param args: cli arguments
:return:
'''
cmd = args.get('cmd_create')
if cmd == 'conf':
conf_file = args['conf_file']
conf_id = args['id']
return self.load_xml_conf(conf_file, conf_id)
else:
print("Error: Create %s is invalid or not implemented" % cmd)
|
'create' sub-command
:param args: cli arguments
:return:
|
train
|
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/crawl.py#L70-L82
|
[
"def load_xml_conf(self, xml_file, id):\n '''\n Creates a new config from xml file.\n :param xml_file: path to xml file. Format : nutch-site.xml or nutch-default.xml\n :param id:\n :return: config object\n '''\n\n # converting nutch-site.xml to key:value pairs\n import xml.etree.ElementTree as ET\n tree = ET.parse(xml_file)\n params = {}\n for prop in tree.getroot().findall(\".//property\"):\n params[prop.find('./name').text.strip()] = prop.find('./value').text.strip()\n return self.proxy.Configs().create(id, configData=params)\n"
] |
class Crawler(object):
def __init__(self, args):
self.args = args
self.server_url = args['url'] if 'url' in args else nutch.DefaultServerEndpoint
self.conf_id = args['conf_id'] if 'conf_id' in args else nutch.DefaultConfig
self.proxy = nutch.Nutch(self.conf_id, self.server_url)
def crawl_cmd(self, seed_list, n):
'''
Runs the crawl job for n rounds
:param seed_list: lines of seed URLs
:param n: number of rounds
:return: number of successful rounds
'''
print("Num Rounds "+str(n))
cc = self.proxy.Crawl(seed=seed_list, rounds=n)
rounds = cc.waitAll()
print("Completed %d rounds" % len(rounds))
return len(rounds)
def load_xml_conf(self, xml_file, id):
'''
Creates a new config from xml file.
:param xml_file: path to xml file. Format : nutch-site.xml or nutch-default.xml
:param id:
:return: config object
'''
# converting nutch-site.xml to key:value pairs
import xml.etree.ElementTree as ET
tree = ET.parse(xml_file)
params = {}
for prop in tree.getroot().findall(".//property"):
params[prop.find('./name').text.strip()] = prop.find('./value').text.strip()
return self.proxy.Configs().create(id, configData=params)
|
tbielawa/bitmath
|
bitmath/integrations.py
|
BitmathType
|
python
|
def BitmathType(bmstring):
try:
argvalue = bitmath.parse_string(bmstring)
except ValueError:
raise argparse.ArgumentTypeError("'%s' can not be parsed into a valid bitmath object" %
bmstring)
else:
return argvalue
|
An 'argument type' for integrations with the argparse module.
For more information, see
https://docs.python.org/2/library/argparse.html#type Of particular
interest to us is this bit:
``type=`` can take any callable that takes a single string
argument and returns the converted value
I.e., ``type`` can be a function (such as this function) or a class
which implements the ``__call__`` method.
Example usage of the bitmath.BitmathType argparser type:
>>> import bitmath
>>> import argparse
>>> parser = argparse.ArgumentParser()
>>> parser.add_argument("--file-size", type=bitmath.BitmathType)
>>> parser.parse_args("--file-size 1337MiB".split())
Namespace(file_size=MiB(1337.0))
Invalid usage includes any input that the bitmath.parse_string
function already rejects. Additionally, **UNQUOTED** arguments with
spaces in them are rejected (shlex.split used in the following
examples to conserve single quotes in the parse_args call):
>>> parser = argparse.ArgumentParser()
>>> parser.add_argument("--file-size", type=bitmath.BitmathType)
>>> import shlex
>>> # The following is ACCEPTABLE USAGE:
...
>>> parser.parse_args(shlex.split("--file-size '1337 MiB'"))
Namespace(file_size=MiB(1337.0))
>>> # The following is INCORRECT USAGE because the string "1337 MiB" is not quoted!
...
>>> parser.parse_args(shlex.split("--file-size 1337 MiB"))
error: argument --file-size: 1337 can not be parsed into a valid bitmath object
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/integrations.py#L33-L80
|
[
"def parse_string(s):\n \"\"\"Parse a string with units and try to make a bitmath object out of\nit.\n\nString inputs may include whitespace characters between the value and\nthe unit.\n \"\"\"\n # Strings only please\n if not isinstance(s, (str, unicode)):\n raise ValueError(\"parse_string only accepts string inputs but a %s was given\" %\n type(s))\n\n # get the index of the first alphabetic character\n try:\n index = list([i.isalpha() for i in s]).index(True)\n except ValueError:\n # If there's no alphabetic characters we won't be able to .index(True)\n raise ValueError(\"No unit detected, can not parse string '%s' into a bitmath object\" % s)\n\n # split the string into the value and the unit\n val, unit = s[:index], s[index:]\n\n # see if the unit exists as a type in our namespace\n\n if unit == \"b\":\n unit_class = Bit\n elif unit == \"B\":\n unit_class = Byte\n else:\n if not (hasattr(sys.modules[__name__], unit) and isinstance(getattr(sys.modules[__name__], unit), type)):\n raise ValueError(\"The unit %s is not a valid bitmath unit\" % unit)\n unit_class = globals()[unit]\n\n try:\n val = float(val)\n except ValueError:\n raise\n try:\n return unit_class(val)\n except: # pragma: no cover\n raise ValueError(\"Can't parse string %s into a bitmath object\" % s)\n"
] |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright © 2014-2016 Tim Bielawa <timbielawa@gmail.com>
# See GitHub Contributors Graph for more information
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sub-license, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import bitmath
import argparse
import progressbar.widgets
######################################################################
# Integrations with 3rd party modules
######################################################################
# Speed widget for integration with the Progress bar module
class BitmathFileTransferSpeed(progressbar.widgets.Widget):
"""Widget for showing the transfer speed (useful for file transfers)."""
__slots__ = ('system', 'format')
def __init__(self, system=bitmath.NIST, format="{value:.2f} {unit}/s"):
self.system = system
self.format = format
def update(self, pbar):
"""Updates the widget with the current NIST/SI speed.
Basically, this calculates the average rate of update and figures out
how to make a "pretty" prefix unit"""
if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6:
scaled = bitmath.Byte()
else:
speed = pbar.currval / pbar.seconds_elapsed
scaled = bitmath.Byte(speed).best_prefix(system=self.system)
return scaled.format(self.format)
|
tbielawa/bitmath
|
bitmath/integrations.py
|
BitmathFileTransferSpeed.update
|
python
|
def update(self, pbar):
if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6:
scaled = bitmath.Byte()
else:
speed = pbar.currval / pbar.seconds_elapsed
scaled = bitmath.Byte(speed).best_prefix(system=self.system)
return scaled.format(self.format)
|
Updates the widget with the current NIST/SI speed.
Basically, this calculates the average rate of update and figures out
how to make a "pretty" prefix unit
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/integrations.py#L92-L104
|
[
" def format(self, fmt):\n \"\"\"Return a representation of this instance formatted with user\nsupplied syntax\"\"\"\n _fmt_params = {\n 'base': self.base,\n 'bin': self.bin,\n 'binary': self.binary,\n 'bits': self.bits,\n 'bytes': self.bytes,\n 'power': self.power,\n 'system': self.system,\n 'unit': self.unit,\n 'unit_plural': self.unit_plural,\n 'unit_singular': self.unit_singular,\n 'value': self.value\n }\n\n return fmt.format(**_fmt_params)\n",
" def best_prefix(self, system=None):\n \"\"\"Optional parameter, `system`, allows you to prefer NIST or SI in\nthe results. By default, the current system is used (Bit/Byte default\nto NIST).\n\nLogic discussion/notes:\n\nBase-case, does it need converting?\n\nIf the instance is less than one Byte, return the instance as a Bit\ninstance.\n\nElse, begin by recording the unit system the instance is defined\nby. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.\n\nIf the instance is not already a ``Byte`` instance, convert it to one.\n\nNIST units step up by powers of 1024, SI units step up by powers of\n1000.\n\nTake integer value of the log(base=STEP_POWER) of the instance's byte\nvalue. E.g.:\n\n >>> int(math.log(Gb(100).bytes, 1000))\n 3\n\nThis will return a value >= 0. The following determines the 'best\nprefix unit' for representation:\n\n* result == 0, best represented as a Byte\n* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte\n* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]\n\n \"\"\"\n\n # Use absolute value so we don't return Bit's for *everything*\n # less than Byte(1). From github issue #55\n if abs(self) < Byte(1):\n return Bit.from_other(self)\n else:\n if type(self) is Byte: # pylint: disable=unidiomatic-typecheck\n _inst = self\n else:\n _inst = Byte.from_other(self)\n\n # Which table to consult? Was a preferred system provided?\n if system is None:\n # No preference. Use existing system\n if self.system == 'NIST':\n _STEPS = NIST_PREFIXES\n _BASE = 1024\n elif self.system == 'SI':\n _STEPS = SI_PREFIXES\n _BASE = 1000\n # Anything else would have raised by now\n else:\n # Preferred system provided.\n if system == NIST:\n _STEPS = NIST_PREFIXES\n _BASE = 1024\n elif system == SI:\n _STEPS = SI_PREFIXES\n _BASE = 1000\n else:\n raise ValueError(\"Invalid value given for 'system' parameter.\"\n \" Must be one of NIST or SI\")\n\n # Index of the string of the best prefix in the STEPS list\n _index = int(math.log(abs(_inst.bytes), _BASE))\n\n # Recall that the log() function returns >= 0. This doesn't\n # map to the STEPS list 1:1. That is to say, 0 is handled with\n # special care. So if the _index is 1, we actually want item 0\n # in the list.\n\n if _index == 0:\n # Already a Byte() type, so return it.\n return _inst\n elif _index >= len(_STEPS):\n # This is a really big number. Use the biggest prefix we've got\n _best_prefix = _STEPS[-1]\n elif 0 < _index < len(_STEPS):\n # There is an appropriate prefix unit to represent this\n _best_prefix = _STEPS[_index - 1]\n\n _conversion_method = getattr(\n self,\n 'to_%sB' % _best_prefix)\n\n return _conversion_method()\n"
] |
class BitmathFileTransferSpeed(progressbar.widgets.Widget):
"""Widget for showing the transfer speed (useful for file transfers)."""
__slots__ = ('system', 'format')
def __init__(self, system=bitmath.NIST, format="{value:.2f} {unit}/s"):
self.system = system
self.format = format
|
tbielawa/bitmath
|
bitmath/__init__.py
|
best_prefix
|
python
|
def best_prefix(bytes, system=NIST):
if isinstance(bytes, Bitmath):
value = bytes.bytes
else:
value = bytes
return Byte(value).best_prefix(system=system)
|
Return a bitmath instance representing the best human-readable
representation of the number of bytes given by ``bytes``. In addition
to a numeric type, the ``bytes`` parameter may also be a bitmath type.
Optionally select a preferred unit system by specifying the ``system``
keyword. Choices for ``system`` are ``bitmath.NIST`` (default) and
``bitmath.SI``.
Basically a shortcut for:
>>> import bitmath
>>> b = bitmath.Byte(12345)
>>> best = b.best_prefix()
Or:
>>> import bitmath
>>> best = (bitmath.KiB(12345) * 4201).best_prefix()
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L1174-L1198
|
[
" def best_prefix(self, system=None):\n \"\"\"Optional parameter, `system`, allows you to prefer NIST or SI in\nthe results. By default, the current system is used (Bit/Byte default\nto NIST).\n\nLogic discussion/notes:\n\nBase-case, does it need converting?\n\nIf the instance is less than one Byte, return the instance as a Bit\ninstance.\n\nElse, begin by recording the unit system the instance is defined\nby. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.\n\nIf the instance is not already a ``Byte`` instance, convert it to one.\n\nNIST units step up by powers of 1024, SI units step up by powers of\n1000.\n\nTake integer value of the log(base=STEP_POWER) of the instance's byte\nvalue. E.g.:\n\n >>> int(math.log(Gb(100).bytes, 1000))\n 3\n\nThis will return a value >= 0. The following determines the 'best\nprefix unit' for representation:\n\n* result == 0, best represented as a Byte\n* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte\n* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]\n\n \"\"\"\n\n # Use absolute value so we don't return Bit's for *everything*\n # less than Byte(1). From github issue #55\n if abs(self) < Byte(1):\n return Bit.from_other(self)\n else:\n if type(self) is Byte: # pylint: disable=unidiomatic-typecheck\n _inst = self\n else:\n _inst = Byte.from_other(self)\n\n # Which table to consult? Was a preferred system provided?\n if system is None:\n # No preference. Use existing system\n if self.system == 'NIST':\n _STEPS = NIST_PREFIXES\n _BASE = 1024\n elif self.system == 'SI':\n _STEPS = SI_PREFIXES\n _BASE = 1000\n # Anything else would have raised by now\n else:\n # Preferred system provided.\n if system == NIST:\n _STEPS = NIST_PREFIXES\n _BASE = 1024\n elif system == SI:\n _STEPS = SI_PREFIXES\n _BASE = 1000\n else:\n raise ValueError(\"Invalid value given for 'system' parameter.\"\n \" Must be one of NIST or SI\")\n\n # Index of the string of the best prefix in the STEPS list\n _index = int(math.log(abs(_inst.bytes), _BASE))\n\n # Recall that the log() function returns >= 0. This doesn't\n # map to the STEPS list 1:1. That is to say, 0 is handled with\n # special care. So if the _index is 1, we actually want item 0\n # in the list.\n\n if _index == 0:\n # Already a Byte() type, so return it.\n return _inst\n elif _index >= len(_STEPS):\n # This is a really big number. Use the biggest prefix we've got\n _best_prefix = _STEPS[-1]\n elif 0 < _index < len(_STEPS):\n # There is an appropriate prefix unit to represent this\n _best_prefix = _STEPS[_index - 1]\n\n _conversion_method = getattr(\n self,\n 'to_%sB' % _best_prefix)\n\n return _conversion_method()\n"
] |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright © 2014-2016 Tim Bielawa <timbielawa@gmail.com>
# See GitHub Contributors Graph for more information
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sub-license, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable=bad-continuation,missing-docstring,invalid-name,line-too-long
"""Reference material:
The bitmath homepage is located at:
* http://bitmath.readthedocs.io/en/latest/
Prefixes for binary multiples:
http://physics.nist.gov/cuu/Units/binary.html
decimal and binary prefixes:
man 7 units (from the Linux Documentation Project 'man-pages' package)
BEFORE YOU GET HASTY WITH EXCLUDING CODE FROM COVERAGE: If you
absolutely need to skip code coverage because of a strange Python 2.x
vs 3.x thing, use the fancy environment substitution stuff from the
.coverage RC file. In review:
* If you *NEED* to skip a statement because of Python 2.x issues add the following::
# pragma: PY2X no cover
* If you *NEED* to skip a statement because of Python 3.x issues add the following::
# pragma: PY3X no cover
In this configuration, statements which are skipped in 2.x are still
covered in 3.x, and the reverse holds true for tests skipped in 3.x.
"""
from __future__ import print_function
import argparse
import contextlib
import fnmatch
import math
import numbers
import os
import os.path
import platform
import sys
# For device capacity reading in query_device_capacity(). Only supported
# on posix systems for now. Will be addressed in issue #52 on GitHub.
if os.name == 'posix':
import stat
import fcntl
import struct
__all__ = ['Bit', 'Byte', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB',
'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'Kib',
'Mib', 'Gib', 'Tib', 'Pib', 'Eib', 'kb', 'Mb', 'Gb', 'Tb',
'Pb', 'Eb', 'Zb', 'Yb', 'getsize', 'listdir', 'format',
'format_string', 'format_plural', 'parse_string', 'parse_string_unsafe',
'ALL_UNIT_TYPES', 'NIST', 'NIST_PREFIXES', 'NIST_STEPS',
'SI', 'SI_PREFIXES', 'SI_STEPS']
# Python 3.x compat
if sys.version > '3':
long = int # pragma: PY2X no cover
unicode = str # pragma: PY2X no cover
#: A list of all the valid prefix unit types. Mostly for reference,
#: also used by the CLI tool as valid types
ALL_UNIT_TYPES = ['Bit', 'Byte', 'kb', 'kB', 'Mb', 'MB', 'Gb', 'GB', 'Tb',
'TB', 'Pb', 'PB', 'Eb', 'EB', 'Zb', 'ZB', 'Yb',
'YB', 'Kib', 'KiB', 'Mib', 'MiB', 'Gib', 'GiB',
'Tib', 'TiB', 'Pib', 'PiB', 'Eib', 'EiB']
# #####################################################################
# Set up our module variables/constants
###################################
# Internal:
# Console repr(), ex: MiB(13.37), or kB(42.0)
_FORMAT_REPR = '{unit_singular}({value})'
# ##################################
# Exposed:
#: Constants for referring to NIST prefix system
NIST = int(2)
#: Constants for referring to SI prefix system
SI = int(10)
# ##################################
#: All of the SI prefixes
SI_PREFIXES = ['k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
#: Byte values represented by each SI prefix unit
SI_STEPS = {
'Bit': 1 / 8.0,
'Byte': 1,
'k': 1000,
'M': 1000000,
'G': 1000000000,
'T': 1000000000000,
'P': 1000000000000000,
'E': 1000000000000000000,
'Z': 1000000000000000000000,
'Y': 1000000000000000000000000
}
#: All of the NIST prefixes
NIST_PREFIXES = ['Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei']
#: Byte values represented by each NIST prefix unit
NIST_STEPS = {
'Bit': 1 / 8.0,
'Byte': 1,
'Ki': 1024,
'Mi': 1048576,
'Gi': 1073741824,
'Ti': 1099511627776,
'Pi': 1125899906842624,
'Ei': 1152921504606846976
}
#: String representation, ex: ``13.37 MiB``, or ``42.0 kB``
format_string = "{value} {unit}"
#: Pluralization behavior
format_plural = False
def os_name():
# makes unittesting platform specific code easier
return os.name
def capitalize_first(s):
"""Capitalize ONLY the first letter of the input `s`
* returns a copy of input `s` with the first letter capitalized
"""
pfx = s[0].upper()
_s = s[1:]
return pfx + _s
######################################################################
# Base class for everything else
class Bitmath(object):
"""The base class for all the other prefix classes"""
# All the allowed input types
valid_types = (int, float, long)
def __init__(self, value=0, bytes=None, bits=None):
"""Instantiate with `value` by the unit, in plain bytes, or
bits. Don't supply more than one keyword.
default behavior: initialize with value of 0
only setting value: assert bytes is None and bits is None
only setting bytes: assert value == 0 and bits is None
only setting bits: assert value == 0 and bytes is None
"""
_raise = False
if (value == 0) and (bytes is None) and (bits is None):
pass
# Setting by bytes
elif bytes is not None:
if (value == 0) and (bits is None):
pass
else:
_raise = True
# setting by bits
elif bits is not None:
if (value == 0) and (bytes is None):
pass
else:
_raise = True
if _raise:
raise ValueError("Only one parameter of: value, bytes, or bits is allowed")
self._do_setup()
if bytes:
# We were provided with the fundamental base unit, no need
# to normalize
self._byte_value = bytes
self._bit_value = bytes * 8.0
elif bits:
# We were *ALMOST* given the fundamental base
# unit. Translate it into the fundamental unit then
# normalize.
self._byte_value = bits / 8.0
self._bit_value = bits
else:
# We were given a value representative of this *prefix
# unit*. We need to normalize it into the number of bytes
# it represents.
self._norm(value)
# We have the fundamental unit figured out. Set the 'pretty' unit
self._set_prefix_value()
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._byte_value)
def _to_prefix_value(self, value):
"""Return the number of bits/bytes as they would look like if we
converted *to* this unit"""
return value / float(self._unit_value)
def _setup(self):
raise NotImplementedError("The base 'bitmath.Bitmath' class can not be used directly")
def _do_setup(self):
"""Setup basic parameters for this class.
`base` is the numeric base which when raised to `power` is equivalent
to 1 unit of the corresponding prefix. I.e., base=2, power=10
represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte.
Likewise, for the SI prefix classes `base` will be 10, and the `power`
for the Kilobyte is 3.
"""
(self._base, self._power, self._name_singular, self._name_plural) = self._setup()
self._unit_value = self._base ** self._power
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type.
:param number value: The input value to be normalized
:raises ValueError: if the input value is not a type of real number
"""
if isinstance(value, self.valid_types):
self._byte_value = value * self._unit_value
self._bit_value = self._byte_value * 8.0
else:
raise ValueError("Initialization value '%s' is of an invalid type: %s. "
"Must be one of %s" % (
value,
type(value),
", ".join(str(x) for x in self.valid_types)))
##################################################################
# Properties
#: The mathematical base of an instance
base = property(lambda s: s._base)
binary = property(lambda s: bin(int(s.bits)))
"""The binary representation of an instance in binary 1s and 0s. Note
that for very large numbers this will mean a lot of 1s and 0s. For
example, GiB(100) would be represented as::
0b1100100000000000000000000000000000000000
That leading ``0b`` is normal. That's how Python represents binary.
"""
#: Alias for :attr:`binary`
bin = property(lambda s: s.binary)
#: The number of bits in an instance
bits = property(lambda s: s._bit_value)
#: The number of bytes in an instance
bytes = property(lambda s: s._byte_value)
#: The mathematical power of an instance
power = property(lambda s: s._power)
@property
def system(self):
"""The system of units used to measure an instance"""
if self._base == 2:
return "NIST"
elif self._base == 10:
return "SI"
else:
# I don't expect to ever encounter this logic branch, but
# hey, it's better to have extra test coverage than
# insufficient test coverage.
raise ValueError("Instances mathematical base is an unsupported value: %s" % (
str(self._base)))
@property
def unit(self):
"""The string that is this instances prefix unit name in agreement
with this instance value (singular or plural). Following the
convention that only 1 is singular. This will always be the singular
form when :attr:`bitmath.format_plural` is ``False`` (default value).
For example:
>>> KiB(1).unit == 'KiB'
>>> Byte(0).unit == 'Bytes'
>>> Byte(1).unit == 'Byte'
>>> Byte(1.1).unit == 'Bytes'
>>> Gb(2).unit == 'Gbs'
"""
global format_plural
if self.prefix_value == 1:
# If it's a '1', return it singular, no matter what
return self._name_singular
elif format_plural:
# Pluralization requested
return self._name_plural
else:
# Pluralization NOT requested, and the value is not 1
return self._name_singular
@property
def unit_plural(self):
"""The string that is an instances prefix unit name in the plural
form.
For example:
>>> KiB(1).unit_plural == 'KiB'
>>> Byte(1024).unit_plural == 'Bytes'
>>> Gb(1).unit_plural == 'Gb'
"""
return self._name_plural
@property
def unit_singular(self):
"""The string that is an instances prefix unit name in the singular
form.
For example:
>>> KiB(1).unit_singular == 'KiB'
>>> Byte(1024).unit == 'B'
>>> Gb(1).unit_singular == 'Gb'
"""
return self._name_singular
#: The "prefix" value of an instance
value = property(lambda s: s.prefix_value)
@classmethod
def from_other(cls, item):
"""Factory function to return instances of `item` converted into a new
instance of ``cls``. Because this is a class method, it may be called
from any bitmath class object without the need to explicitly
instantiate the class ahead of time.
*Implicit Parameter:*
* ``cls`` A bitmath class, implicitly set to the class of the
instance object it is called on
*User Supplied Parameter:*
* ``item`` A :class:`bitmath.Bitmath` subclass instance
*Example:*
>>> import bitmath
>>> kib = bitmath.KiB.from_other(bitmath.MiB(1))
>>> print kib
KiB(1024.0)
"""
if isinstance(item, Bitmath):
return cls(bits=item.bits)
else:
raise ValueError("The provided items must be a valid bitmath class: %s" %
str(item.__class__))
######################################################################
# The following implement the Python datamodel customization methods
#
# Reference: http://docs.python.org/2.7/reference/datamodel.html#basic-customization
def __repr__(self):
"""Representation of this object as you would expect to see in an
interpreter"""
global _FORMAT_REPR
return self.format(_FORMAT_REPR)
def __str__(self):
"""String representation of this object"""
global format_string
return self.format(format_string)
def format(self, fmt):
"""Return a representation of this instance formatted with user
supplied syntax"""
_fmt_params = {
'base': self.base,
'bin': self.bin,
'binary': self.binary,
'bits': self.bits,
'bytes': self.bytes,
'power': self.power,
'system': self.system,
'unit': self.unit,
'unit_plural': self.unit_plural,
'unit_singular': self.unit_singular,
'value': self.value
}
return fmt.format(**_fmt_params)
##################################################################
# Guess the best human-readable prefix unit for representation
##################################################################
def best_prefix(self, system=None):
"""Optional parameter, `system`, allows you to prefer NIST or SI in
the results. By default, the current system is used (Bit/Byte default
to NIST).
Logic discussion/notes:
Base-case, does it need converting?
If the instance is less than one Byte, return the instance as a Bit
instance.
Else, begin by recording the unit system the instance is defined
by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.
If the instance is not already a ``Byte`` instance, convert it to one.
NIST units step up by powers of 1024, SI units step up by powers of
1000.
Take integer value of the log(base=STEP_POWER) of the instance's byte
value. E.g.:
>>> int(math.log(Gb(100).bytes, 1000))
3
This will return a value >= 0. The following determines the 'best
prefix unit' for representation:
* result == 0, best represented as a Byte
* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte
* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
"""
# Use absolute value so we don't return Bit's for *everything*
# less than Byte(1). From github issue #55
if abs(self) < Byte(1):
return Bit.from_other(self)
else:
if type(self) is Byte: # pylint: disable=unidiomatic-typecheck
_inst = self
else:
_inst = Byte.from_other(self)
# Which table to consult? Was a preferred system provided?
if system is None:
# No preference. Use existing system
if self.system == 'NIST':
_STEPS = NIST_PREFIXES
_BASE = 1024
elif self.system == 'SI':
_STEPS = SI_PREFIXES
_BASE = 1000
# Anything else would have raised by now
else:
# Preferred system provided.
if system == NIST:
_STEPS = NIST_PREFIXES
_BASE = 1024
elif system == SI:
_STEPS = SI_PREFIXES
_BASE = 1000
else:
raise ValueError("Invalid value given for 'system' parameter."
" Must be one of NIST or SI")
# Index of the string of the best prefix in the STEPS list
_index = int(math.log(abs(_inst.bytes), _BASE))
# Recall that the log() function returns >= 0. This doesn't
# map to the STEPS list 1:1. That is to say, 0 is handled with
# special care. So if the _index is 1, we actually want item 0
# in the list.
if _index == 0:
# Already a Byte() type, so return it.
return _inst
elif _index >= len(_STEPS):
# This is a really big number. Use the biggest prefix we've got
_best_prefix = _STEPS[-1]
elif 0 < _index < len(_STEPS):
# There is an appropriate prefix unit to represent this
_best_prefix = _STEPS[_index - 1]
_conversion_method = getattr(
self,
'to_%sB' % _best_prefix)
return _conversion_method()
##################################################################
def to_Bit(self):
return Bit(self._bit_value)
def to_Byte(self):
return Byte(self._byte_value / float(NIST_STEPS['Byte']))
# Properties
Bit = property(lambda s: s.to_Bit())
Byte = property(lambda s: s.to_Byte())
##################################################################
def to_KiB(self):
return KiB(bits=self._bit_value)
def to_Kib(self):
return Kib(bits=self._bit_value)
def to_kB(self):
return kB(bits=self._bit_value)
def to_kb(self):
return kb(bits=self._bit_value)
# Properties
KiB = property(lambda s: s.to_KiB())
Kib = property(lambda s: s.to_Kib())
kB = property(lambda s: s.to_kB())
kb = property(lambda s: s.to_kb())
##################################################################
def to_MiB(self):
return MiB(bits=self._bit_value)
def to_Mib(self):
return Mib(bits=self._bit_value)
def to_MB(self):
return MB(bits=self._bit_value)
def to_Mb(self):
return Mb(bits=self._bit_value)
# Properties
MiB = property(lambda s: s.to_MiB())
Mib = property(lambda s: s.to_Mib())
MB = property(lambda s: s.to_MB())
Mb = property(lambda s: s.to_Mb())
##################################################################
def to_GiB(self):
return GiB(bits=self._bit_value)
def to_Gib(self):
return Gib(bits=self._bit_value)
def to_GB(self):
return GB(bits=self._bit_value)
def to_Gb(self):
return Gb(bits=self._bit_value)
# Properties
GiB = property(lambda s: s.to_GiB())
Gib = property(lambda s: s.to_Gib())
GB = property(lambda s: s.to_GB())
Gb = property(lambda s: s.to_Gb())
##################################################################
def to_TiB(self):
return TiB(bits=self._bit_value)
def to_Tib(self):
return Tib(bits=self._bit_value)
def to_TB(self):
return TB(bits=self._bit_value)
def to_Tb(self):
return Tb(bits=self._bit_value)
# Properties
TiB = property(lambda s: s.to_TiB())
Tib = property(lambda s: s.to_Tib())
TB = property(lambda s: s.to_TB())
Tb = property(lambda s: s.to_Tb())
##################################################################
def to_PiB(self):
return PiB(bits=self._bit_value)
def to_Pib(self):
return Pib(bits=self._bit_value)
def to_PB(self):
return PB(bits=self._bit_value)
def to_Pb(self):
return Pb(bits=self._bit_value)
# Properties
PiB = property(lambda s: s.to_PiB())
Pib = property(lambda s: s.to_Pib())
PB = property(lambda s: s.to_PB())
Pb = property(lambda s: s.to_Pb())
##################################################################
def to_EiB(self):
return EiB(bits=self._bit_value)
def to_Eib(self):
return Eib(bits=self._bit_value)
def to_EB(self):
return EB(bits=self._bit_value)
def to_Eb(self):
return Eb(bits=self._bit_value)
# Properties
EiB = property(lambda s: s.to_EiB())
Eib = property(lambda s: s.to_Eib())
EB = property(lambda s: s.to_EB())
Eb = property(lambda s: s.to_Eb())
##################################################################
# The SI units go beyond the NIST units. They also have the Zetta
# and Yotta prefixes.
def to_ZB(self):
return ZB(bits=self._bit_value)
def to_Zb(self):
return Zb(bits=self._bit_value)
# Properties
ZB = property(lambda s: s.to_ZB())
Zb = property(lambda s: s.to_Zb())
##################################################################
def to_YB(self):
return YB(bits=self._bit_value)
def to_Yb(self):
return Yb(bits=self._bit_value)
#: A new object representing this instance as a Yottabyte
YB = property(lambda s: s.to_YB())
Yb = property(lambda s: s.to_Yb())
##################################################################
# Rich comparison operations
##################################################################
def __lt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value < other
else:
return self._byte_value < other.bytes
def __le__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value <= other
else:
return self._byte_value <= other.bytes
def __eq__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value == other
else:
return self._byte_value == other.bytes
def __ne__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value != other
else:
return self._byte_value != other.bytes
def __gt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value > other
else:
return self._byte_value > other.bytes
def __ge__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value >= other
else:
return self._byte_value >= other.bytes
##################################################################
# Basic math operations
##################################################################
# Reference: http://docs.python.org/2.7/reference/datamodel.html#emulating-numeric-types
"""These methods are called to implement the binary arithmetic
operations (+, -, *, //, %, divmod(), pow(), **, <<, >>, &, ^, |). For
instance, to evaluate the expression x + y, where x is an instance of
a class that has an __add__() method, x.__add__(y) is called. The
__divmod__() method should be the equivalent to using __floordiv__()
and __mod__(); it should not be related to __truediv__() (described
below). Note that __pow__() should be defined to accept an optional
third argument if the ternary version of the built-in pow() function
is to be supported.object.__complex__(self)
"""
def __add__(self, other):
"""Supported operations with result types:
- bm + bm = bm
- bm + num = num
- num + bm = num (see radd)
"""
if isinstance(other, numbers.Number):
# bm + num
return other + self.value
else:
# bm + bm
total_bytes = self._byte_value + other.bytes
return (type(self))(bytes=total_bytes)
def __sub__(self, other):
"""Subtraction: Supported operations with result types:
- bm - bm = bm
- bm - num = num
- num - bm = num (see rsub)
"""
if isinstance(other, numbers.Number):
# bm - num
return self.value - other
else:
# bm - bm
total_bytes = self._byte_value - other.bytes
return (type(self))(bytes=total_bytes)
def __mul__(self, other):
"""Multiplication: Supported operations with result types:
- bm1 * bm2 = bm1
- bm * num = bm
- num * bm = num (see rmul)
"""
if isinstance(other, numbers.Number):
# bm * num
result = self._byte_value * other
return (type(self))(bytes=result)
else:
# bm1 * bm2
_other = other.value * other.base ** other.power
_self = self.prefix_value * self._base ** self._power
return (type(self))(bytes=_other * _self)
"""The division operator (/) is implemented by these methods. The
__truediv__() method is used when __future__.division is in effect,
otherwise __div__() is used. If only one of these two methods is
defined, the object will not support division in the alternate
context; TypeError will be raised instead."""
def __div__(self, other):
"""Division: Supported operations with result types:
- bm1 / bm2 = num
- bm / num = bm
- num / bm = num (see rdiv)
"""
if isinstance(other, numbers.Number):
# bm / num
result = self._byte_value / other
return (type(self))(bytes=result)
else:
# bm1 / bm2
return self._byte_value / float(other.bytes)
def __truediv__(self, other):
# num / bm
return self.__div__(other)
# def __floordiv__(self, other):
# return NotImplemented
# def __mod__(self, other):
# return NotImplemented
# def __divmod__(self, other):
# return NotImplemented
# def __pow__(self, other, modulo=None):
# return NotImplemented
##################################################################
"""These methods are called to implement the binary arithmetic
operations (+, -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with
reflected (swapped) operands. These functions are only called if the
left operand does not support the corresponding operation and the
operands are of different types. [2] For instance, to evaluate the
expression x - y, where y is an instance of a class that has an
__rsub__() method, y.__rsub__(x) is called if x.__sub__(y) returns
NotImplemented.
These are the add/sub/mul/div methods for syntax where a number type
is given for the LTYPE and a bitmath object is given for the
RTYPE. E.g., 3 * MiB(3), or 10 / GB(42)
"""
def __radd__(self, other):
# num + bm = num
return other + self.value
def __rsub__(self, other):
# num - bm = num
return other - self.value
def __rmul__(self, other):
# num * bm = bm
return self * other
def __rdiv__(self, other):
# num / bm = num
return other / float(self.value)
def __rtruediv__(self, other):
# num / bm = num
return other / float(self.value)
"""Called to implement the built-in functions complex(), int(),
long(), and float(). Should return a value of the appropriate type.
If one of those methods does not support the operation with the
supplied arguments, it should return NotImplemented.
For bitmath purposes, these methods return the int/long/float
equivalent of the this instances prefix Unix value. That is to say:
- int(KiB(3.336)) would return 3
- long(KiB(3.336)) would return 3L
- float(KiB(3.336)) would return 3.336
"""
def __int__(self):
"""Return this instances prefix unit as an integer"""
return int(self.prefix_value)
def __long__(self):
"""Return this instances prefix unit as a long integer"""
return long(self.prefix_value) # pragma: PY3X no cover
def __float__(self):
"""Return this instances prefix unit as a floating point number"""
return float(self.prefix_value)
##################################################################
# Bitwise operations
##################################################################
def __lshift__(self, other):
"""Left shift, ex: 100 << 2
A left shift by n bits is equivalent to multiplication by pow(2,
n). A long integer is returned if the result exceeds the range of
plain integers."""
shifted = int(self.bits) << other
return type(self)(bits=shifted)
def __rshift__(self, other):
"""Right shift, ex: 100 >> 2
A right shift by n bits is equivalent to division by pow(2, n)."""
shifted = int(self.bits) >> other
return type(self)(bits=shifted)
def __and__(self, other):
""""Bitwise and, ex: 100 & 2
bitwise and". Each bit of the output is 1 if the corresponding bit
of x AND of y is 1, otherwise it's 0."""
andd = int(self.bits) & other
return type(self)(bits=andd)
def __xor__(self, other):
"""Bitwise xor, ex: 100 ^ 2
Does a "bitwise exclusive or". Each bit of the output is the same
as the corresponding bit in x if that bit in y is 0, and it's the
complement of the bit in x if that bit in y is 1."""
xord = int(self.bits) ^ other
return type(self)(bits=xord)
def __or__(self, other):
"""Bitwise or, ex: 100 | 2
Does a "bitwise or". Each bit of the output is 0 if the corresponding
bit of x AND of y is 0, otherwise it's 1."""
ord = int(self.bits) | other
return type(self)(bits=ord)
##################################################################
def __neg__(self):
"""The negative version of this instance"""
return (type(self))(-abs(self.prefix_value))
def __pos__(self):
return (type(self))(abs(self.prefix_value))
def __abs__(self):
return (type(self))(abs(self.prefix_value))
# def __invert__(self):
# """Called to implement the unary arithmetic operations (-, +, abs()
# and ~)."""
# return NotImplemented
######################################################################
# First, the bytes...
class Byte(Bitmath):
"""Byte based types fundamentally operate on self._bit_value"""
def _setup(self):
return (2, 0, 'Byte', 'Bytes')
######################################################################
# NIST Prefixes for Byte based types
class KiB(Byte):
def _setup(self):
return (2, 10, 'KiB', 'KiBs')
Kio = KiB
class MiB(Byte):
def _setup(self):
return (2, 20, 'MiB', 'MiBs')
Mio = MiB
class GiB(Byte):
def _setup(self):
return (2, 30, 'GiB', 'GiBs')
Gio = GiB
class TiB(Byte):
def _setup(self):
return (2, 40, 'TiB', 'TiBs')
Tio = TiB
class PiB(Byte):
def _setup(self):
return (2, 50, 'PiB', 'PiBs')
Pio = PiB
class EiB(Byte):
def _setup(self):
return (2, 60, 'EiB', 'EiBs')
Eio = EiB
######################################################################
# SI Prefixes for Byte based types
class kB(Byte):
def _setup(self):
return (10, 3, 'kB', 'kBs')
ko = kB
class MB(Byte):
def _setup(self):
return (10, 6, 'MB', 'MBs')
Mo = MB
class GB(Byte):
def _setup(self):
return (10, 9, 'GB', 'GBs')
Go = GB
class TB(Byte):
def _setup(self):
return (10, 12, 'TB', 'TBs')
To = TB
class PB(Byte):
def _setup(self):
return (10, 15, 'PB', 'PBs')
Po = PB
class EB(Byte):
def _setup(self):
return (10, 18, 'EB', 'EBs')
Eo = EB
class ZB(Byte):
def _setup(self):
return (10, 21, 'ZB', 'ZBs')
Zo = ZB
class YB(Byte):
def _setup(self):
return (10, 24, 'YB', 'YBs')
Yo = YB
######################################################################
# And now the bit types
class Bit(Bitmath):
"""Bit based types fundamentally operate on self._bit_value"""
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._bit_value)
def _setup(self):
return (2, 0, 'Bit', 'Bits')
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type"""
self._bit_value = value * self._unit_value
self._byte_value = self._bit_value / 8.0
######################################################################
# NIST Prefixes for Bit based types
class Kib(Bit):
def _setup(self):
return (2, 10, 'Kib', 'Kibs')
class Mib(Bit):
def _setup(self):
return (2, 20, 'Mib', 'Mibs')
class Gib(Bit):
def _setup(self):
return (2, 30, 'Gib', 'Gibs')
class Tib(Bit):
def _setup(self):
return (2, 40, 'Tib', 'Tibs')
class Pib(Bit):
def _setup(self):
return (2, 50, 'Pib', 'Pibs')
class Eib(Bit):
def _setup(self):
return (2, 60, 'Eib', 'Eibs')
######################################################################
# SI Prefixes for Bit based types
class kb(Bit):
def _setup(self):
return (10, 3, 'kb', 'kbs')
class Mb(Bit):
def _setup(self):
return (10, 6, 'Mb', 'Mbs')
class Gb(Bit):
def _setup(self):
return (10, 9, 'Gb', 'Gbs')
class Tb(Bit):
def _setup(self):
return (10, 12, 'Tb', 'Tbs')
class Pb(Bit):
def _setup(self):
return (10, 15, 'Pb', 'Pbs')
class Eb(Bit):
def _setup(self):
return (10, 18, 'Eb', 'Ebs')
class Zb(Bit):
def _setup(self):
return (10, 21, 'Zb', 'Zbs')
class Yb(Bit):
def _setup(self):
return (10, 24, 'Yb', 'Ybs')
######################################################################
# Utility functions
def query_device_capacity(device_fd):
"""Create bitmath instances of the capacity of a system block device
Make one or more ioctl request to query the capacity of a block
device. Perform any processing required to compute the final capacity
value. Return the device capacity in bytes as a :class:`bitmath.Byte`
instance.
Thanks to the following resources for help figuring this out Linux/Mac
ioctl's for querying block device sizes:
* http://stackoverflow.com/a/12925285/263969
* http://stackoverflow.com/a/9764508/263969
:param file device_fd: A ``file`` object of the device to query the
capacity of (as in ``get_device_capacity(open("/dev/sda"))``).
:return: a bitmath :class:`bitmath.Byte` instance equivalent to the
capacity of the target device in bytes.
"""
if os_name() != 'posix':
raise NotImplementedError("'bitmath.query_device_capacity' is not supported on this platform: %s" % os_name())
s = os.stat(device_fd.name).st_mode
if not stat.S_ISBLK(s):
raise ValueError("The file descriptor provided is not of a device type")
# The keys of the ``ioctl_map`` dictionary correlate to possible
# values from the ``platform.system`` function.
ioctl_map = {
# ioctls for the "Linux" platform
"Linux": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("BLKGETSIZE64", "L", 0x80081272)
# Per <linux/fs.h>, the BLKGETSIZE64 request returns a
# 'u64' sized value. This is an unsigned 64 bit
# integer C type. This means to correctly "buffer" the
# result we need 64 bits, or 8 bytes, of memory.
#
# The struct module documentation include a reference
# chart relating formatting characters to native C
# Types. In this case, using the "native size", the
# table tells us:
#
# * Character 'L' - Unsigned Long C Type (u64) - Loads into a Python int type
#
# Confirm this character is right by running (on Linux):
#
# >>> import struct
# >>> print 8 == struct.calcsize('L')
#
# The result should be true as long as your kernel
# headers define BLKGETSIZE64 as a u64 type (please
# file a bug report at
# https://github.com/tbielawa/bitmath/issues/new if
# this does *not* work for you)
],
# func is how the final result is decided. Because the
# Linux BLKGETSIZE64 call returns the block device
# capacity in bytes as an integer value, no extra
# calculations are required. Simply return the value of
# BLKGETSIZE64.
"func": lambda x: x["BLKGETSIZE64"]
},
# ioctls for the "Darwin" (Mac OS X) platform
"Darwin": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("DKIOCGETBLOCKCOUNT", "L", 0x40086419),
# Per <sys/disk.h>: get media's block count - uint64_t
#
# As in the BLKGETSIZE64 example, an unsigned 64 bit
# integer will use the 'L' formatting character
("DKIOCGETBLOCKSIZE", "I", 0x40046418)
# Per <sys/disk.h>: get media's block size - uint32_t
#
# This request returns an unsigned 32 bit integer, or
# in other words: just a normal integer (or 'int' c
# type). That should require 4 bytes of space for
# buffering. According to the struct modules
# 'Formatting Characters' chart:
#
# * Character 'I' - Unsigned Int C Type (uint32_t) - Loads into a Python int type
],
# OS X doesn't have a direct equivalent to the Linux
# BLKGETSIZE64 request. Instead, we must request how many
# blocks (or "sectors") are on the disk, and the size (in
# bytes) of each block. Finally, multiply the two together
# to obtain capacity:
#
# n Block * y Byte
# capacity (bytes) = -------
# 1 Block
"func": lambda x: x["DKIOCGETBLOCKCOUNT"] * x["DKIOCGETBLOCKSIZE"]
# This expression simply accepts a dictionary ``x`` as a
# parameter, and then returns the result of multiplying
# the two named dictionary items together. In this case,
# that means multiplying ``DKIOCGETBLOCKCOUNT``, the total
# number of blocks, by ``DKIOCGETBLOCKSIZE``, the size of
# each block in bytes.
}
}
platform_params = ioctl_map[platform.system()]
results = {}
for req_name, fmt, request_code in platform_params['request_params']:
# Read the systems native size (in bytes) of this format type.
buffer_size = struct.calcsize(fmt)
# Construct a buffer to store the ioctl result in
buffer = ' ' * buffer_size
# This code has been ran on only a few test systems. If it's
# appropriate, maybe in the future we'll add try/except
# conditions for some possible errors. Really only for cases
# where it would add value to override the default exception
# message string.
buffer = fcntl.ioctl(device_fd.fileno(), request_code, buffer)
# Unpack the raw result from the ioctl call into a familiar
# python data type according to the ``fmt`` rules.
result = struct.unpack(fmt, buffer)[0]
# Add the new result to our collection
results[req_name] = result
return Byte(platform_params['func'](results))
def getsize(path, bestprefix=True, system=NIST):
"""Return a bitmath instance in the best human-readable representation
of the file size at `path`. Optionally, provide a preferred unit
system by setting `system` to either `bitmath.NIST` (default) or
`bitmath.SI`.
Optionally, set ``bestprefix`` to ``False`` to get ``bitmath.Byte``
instances back.
"""
_path = os.path.realpath(path)
size_bytes = os.path.getsize(_path)
if bestprefix:
return Byte(size_bytes).best_prefix(system=system)
else:
return Byte(size_bytes)
def listdir(search_base, followlinks=False, filter='*',
relpath=False, bestprefix=False, system=NIST):
"""This is a generator which recurses the directory tree
`search_base`, yielding 2-tuples of:
* The absolute/relative path to a discovered file
* A bitmath instance representing the "apparent size" of the file.
- `search_base` - The directory to begin walking down.
- `followlinks` - Whether or not to follow symbolic links to directories
- `filter` - A glob (see :py:mod:`fnmatch`) to filter results with
(default: ``*``, everything)
- `relpath` - ``True`` to return the relative path from `pwd` or
``False`` (default) to return the fully qualified path
- ``bestprefix`` - set to ``False`` to get ``bitmath.Byte``
instances back instead.
- `system` - Provide a preferred unit system by setting `system`
to either ``bitmath.NIST`` (default) or ``bitmath.SI``.
.. note:: This function does NOT return tuples for directory entities.
.. note:: Symlinks to **files** are followed automatically
"""
for root, dirs, files in os.walk(search_base, followlinks=followlinks):
for name in fnmatch.filter(files, filter):
_path = os.path.join(root, name)
if relpath:
# RELATIVE path
_return_path = os.path.relpath(_path, '.')
else:
# REAL path
_return_path = os.path.realpath(_path)
if followlinks:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
else:
if os.path.isdir(_path) or os.path.islink(_path):
pass
else:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
def parse_string(s):
"""Parse a string with units and try to make a bitmath object out of
it.
String inputs may include whitespace characters between the value and
the unit.
"""
# Strings only please
if not isinstance(s, (str, unicode)):
raise ValueError("parse_string only accepts string inputs but a %s was given" %
type(s))
# get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError:
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# split the string into the value and the unit
val, unit = s[:index], s[index:]
# see if the unit exists as a type in our namespace
if unit == "b":
unit_class = Bit
elif unit == "B":
unit_class = Byte
else:
if not (hasattr(sys.modules[__name__], unit) and isinstance(getattr(sys.modules[__name__], unit), type)):
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
unit_class = globals()[unit]
try:
val = float(val)
except ValueError:
raise
try:
return unit_class(val)
except: # pragma: no cover
raise ValueError("Can't parse string %s into a bitmath object" % s)
def parse_string_unsafe(s, system=SI):
"""Attempt to parse a string with ambiguous units and try to make a
bitmath object out of it.
This may produce inaccurate results if parsing shell output. For
example `ls` may say a 2730 Byte file is '2.7K'. 2730 Bytes == 2.73 kB
~= 2.666 KiB. See the documentation for all of the important details.
Note the following caveats:
* All inputs are assumed to be byte-based (as opposed to bit based)
* Numerical inputs (those without any units) are assumed to be a
number of bytes
* Inputs with single letter units (k, M, G, etc) are assumed to be SI
units (base-10). Set the `system` parameter to `bitmath.NIST` to
change this behavior.
* Inputs with an `i` character following the leading letter (Ki, Mi,
Gi) are assumed to be NIST units (base 2)
* Capitalization does not matter
"""
if not isinstance(s, (str, unicode)) and \
not isinstance(s, numbers.Number):
raise ValueError("parse_string_unsafe only accepts string/number inputs but a %s was given" %
type(s))
######################################################################
# Is the input simple to parse? Just a number, or a number
# masquerading as a string perhaps?
# Test case: raw number input (easy!)
if isinstance(s, numbers.Number):
# It's just a number. Assume bytes
return Byte(s)
# Test case: a number pretending to be a string
if isinstance(s, (str, unicode)):
try:
# Can we turn it directly into a number?
return Byte(float(s))
except ValueError:
# Nope, this is not a plain number
pass
######################################################################
# At this point:
# - the input is also not just a number wrapped in a string
# - nor is is just a plain number type
#
# We need to do some more digging around now to figure out exactly
# what we were given and possibly normalize the input into a
# format we can recognize.
# First we'll separate the number and the unit.
#
# Get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError: # pragma: no cover
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# Split the string into the value and the unit
val, unit = s[:index], s[index:]
# Don't trust anything. We'll make sure the correct 'b' is in place.
unit = unit.rstrip('Bb')
unit += 'B'
# At this point we can expect `unit` to be either:
#
# - 2 Characters (for SI, ex: kB or GB)
# - 3 Caracters (so NIST, ex: KiB, or GiB)
#
# A unit with any other number of chars is not a valid unit
# SI
if len(unit) == 2:
# Has NIST parsing been requested?
if system == NIST:
# NIST units requested. Ensure the unit begins with a
# capital letter and is followed by an 'i' character.
unit = capitalize_first(unit)
# Insert an 'i' char after the first letter
_unit = list(unit)
_unit.insert(1, 'i')
# Collapse the list back into a 3 letter string
unit = ''.join(_unit)
unit_class = globals()[unit]
else:
# Default parsing (SI format)
#
# Edge-case checking: SI 'thousand' is a lower-case K
if unit.startswith('K'):
unit = unit.replace('K', 'k')
elif not unit.startswith('k'):
# Otherwise, ensure the first char is capitalized
unit = capitalize_first(unit)
# This is an SI-type unit
if unit[0] in SI_PREFIXES:
unit_class = globals()[unit]
# NIST
elif len(unit) == 3:
unit = capitalize_first(unit)
# This is a NIST-type unit
if unit[:2] in NIST_PREFIXES:
unit_class = globals()[unit]
else:
# This is not a unit we recognize
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
try:
unit_class
except UnboundLocalError:
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
return unit_class(float(val))
######################################################################
# Contxt Managers
@contextlib.contextmanager
def format(fmt_str=None, plural=False, bestprefix=False):
"""Context manager for printing bitmath instances.
``fmt_str`` - a formatting mini-language compat formatting string. See
the @properties (above) for a list of available items.
``plural`` - True enables printing instances with 's's if they're
plural. False (default) prints them as singular (no trailing 's').
``bestprefix`` - True enables printing instances in their best
human-readable representation. False, the default, prints instances
using their current prefix unit.
"""
if 'bitmath' not in globals():
import bitmath
if plural:
orig_fmt_plural = bitmath.format_plural
bitmath.format_plural = True
if fmt_str:
orig_fmt_str = bitmath.format_string
bitmath.format_string = fmt_str
yield
if plural:
bitmath.format_plural = orig_fmt_plural
if fmt_str:
bitmath.format_string = orig_fmt_str
def cli_script_main(cli_args):
"""
A command line interface to basic bitmath operations.
"""
choices = ALL_UNIT_TYPES
parser = argparse.ArgumentParser(
description='Converts from one type of size to another.')
parser.add_argument('--from-stdin', default=False, action='store_true',
help='Reads number from stdin rather than the cli')
parser.add_argument(
'-f', '--from', choices=choices, nargs=1,
type=str, dest='fromunit', default=['Byte'],
help='Input type you are converting from. Defaultes to Byte.')
parser.add_argument(
'-t', '--to', choices=choices, required=False, nargs=1, type=str,
help=('Input type you are converting to. '
'Attempts to detect best result if omitted.'), dest='tounit')
parser.add_argument(
'size', nargs='*', type=float,
help='The number to convert.')
args = parser.parse_args(cli_args)
# Not sure how to cover this with tests, or if the functionality
# will remain in this form long enough for it to make writing a
# test worth the effort.
if args.from_stdin: # pragma: no cover
args.size = [float(sys.stdin.readline()[:-1])]
results = []
for size in args.size:
instance = getattr(__import__(
'bitmath', fromlist=['True']), args.fromunit[0])(size)
# If we have a unit provided then use it
if args.tounit:
result = getattr(instance, args.tounit[0])
# Otherwise use the best_prefix call
else:
result = instance.best_prefix()
results.append(result)
return results
def cli_script(): # pragma: no cover
# Wrapper around cli_script_main so we can unittest the command
# line functionality
for result in cli_script_main(sys.argv[1:]):
print(result)
if __name__ == '__main__':
cli_script()
|
tbielawa/bitmath
|
bitmath/__init__.py
|
query_device_capacity
|
python
|
def query_device_capacity(device_fd):
if os_name() != 'posix':
raise NotImplementedError("'bitmath.query_device_capacity' is not supported on this platform: %s" % os_name())
s = os.stat(device_fd.name).st_mode
if not stat.S_ISBLK(s):
raise ValueError("The file descriptor provided is not of a device type")
# The keys of the ``ioctl_map`` dictionary correlate to possible
# values from the ``platform.system`` function.
ioctl_map = {
# ioctls for the "Linux" platform
"Linux": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("BLKGETSIZE64", "L", 0x80081272)
# Per <linux/fs.h>, the BLKGETSIZE64 request returns a
# 'u64' sized value. This is an unsigned 64 bit
# integer C type. This means to correctly "buffer" the
# result we need 64 bits, or 8 bytes, of memory.
#
# The struct module documentation include a reference
# chart relating formatting characters to native C
# Types. In this case, using the "native size", the
# table tells us:
#
# * Character 'L' - Unsigned Long C Type (u64) - Loads into a Python int type
#
# Confirm this character is right by running (on Linux):
#
# >>> import struct
# >>> print 8 == struct.calcsize('L')
#
# The result should be true as long as your kernel
# headers define BLKGETSIZE64 as a u64 type (please
# file a bug report at
# https://github.com/tbielawa/bitmath/issues/new if
# this does *not* work for you)
],
# func is how the final result is decided. Because the
# Linux BLKGETSIZE64 call returns the block device
# capacity in bytes as an integer value, no extra
# calculations are required. Simply return the value of
# BLKGETSIZE64.
"func": lambda x: x["BLKGETSIZE64"]
},
# ioctls for the "Darwin" (Mac OS X) platform
"Darwin": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("DKIOCGETBLOCKCOUNT", "L", 0x40086419),
# Per <sys/disk.h>: get media's block count - uint64_t
#
# As in the BLKGETSIZE64 example, an unsigned 64 bit
# integer will use the 'L' formatting character
("DKIOCGETBLOCKSIZE", "I", 0x40046418)
# Per <sys/disk.h>: get media's block size - uint32_t
#
# This request returns an unsigned 32 bit integer, or
# in other words: just a normal integer (or 'int' c
# type). That should require 4 bytes of space for
# buffering. According to the struct modules
# 'Formatting Characters' chart:
#
# * Character 'I' - Unsigned Int C Type (uint32_t) - Loads into a Python int type
],
# OS X doesn't have a direct equivalent to the Linux
# BLKGETSIZE64 request. Instead, we must request how many
# blocks (or "sectors") are on the disk, and the size (in
# bytes) of each block. Finally, multiply the two together
# to obtain capacity:
#
# n Block * y Byte
# capacity (bytes) = -------
# 1 Block
"func": lambda x: x["DKIOCGETBLOCKCOUNT"] * x["DKIOCGETBLOCKSIZE"]
# This expression simply accepts a dictionary ``x`` as a
# parameter, and then returns the result of multiplying
# the two named dictionary items together. In this case,
# that means multiplying ``DKIOCGETBLOCKCOUNT``, the total
# number of blocks, by ``DKIOCGETBLOCKSIZE``, the size of
# each block in bytes.
}
}
platform_params = ioctl_map[platform.system()]
results = {}
for req_name, fmt, request_code in platform_params['request_params']:
# Read the systems native size (in bytes) of this format type.
buffer_size = struct.calcsize(fmt)
# Construct a buffer to store the ioctl result in
buffer = ' ' * buffer_size
# This code has been ran on only a few test systems. If it's
# appropriate, maybe in the future we'll add try/except
# conditions for some possible errors. Really only for cases
# where it would add value to override the default exception
# message string.
buffer = fcntl.ioctl(device_fd.fileno(), request_code, buffer)
# Unpack the raw result from the ioctl call into a familiar
# python data type according to the ``fmt`` rules.
result = struct.unpack(fmt, buffer)[0]
# Add the new result to our collection
results[req_name] = result
return Byte(platform_params['func'](results))
|
Create bitmath instances of the capacity of a system block device
Make one or more ioctl request to query the capacity of a block
device. Perform any processing required to compute the final capacity
value. Return the device capacity in bytes as a :class:`bitmath.Byte`
instance.
Thanks to the following resources for help figuring this out Linux/Mac
ioctl's for querying block device sizes:
* http://stackoverflow.com/a/12925285/263969
* http://stackoverflow.com/a/9764508/263969
:param file device_fd: A ``file`` object of the device to query the
capacity of (as in ``get_device_capacity(open("/dev/sda"))``).
:return: a bitmath :class:`bitmath.Byte` instance equivalent to the
capacity of the target device in bytes.
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L1201-L1331
|
[
"def os_name():\n # makes unittesting platform specific code easier\n return os.name\n"
] |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright © 2014-2016 Tim Bielawa <timbielawa@gmail.com>
# See GitHub Contributors Graph for more information
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sub-license, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable=bad-continuation,missing-docstring,invalid-name,line-too-long
"""Reference material:
The bitmath homepage is located at:
* http://bitmath.readthedocs.io/en/latest/
Prefixes for binary multiples:
http://physics.nist.gov/cuu/Units/binary.html
decimal and binary prefixes:
man 7 units (from the Linux Documentation Project 'man-pages' package)
BEFORE YOU GET HASTY WITH EXCLUDING CODE FROM COVERAGE: If you
absolutely need to skip code coverage because of a strange Python 2.x
vs 3.x thing, use the fancy environment substitution stuff from the
.coverage RC file. In review:
* If you *NEED* to skip a statement because of Python 2.x issues add the following::
# pragma: PY2X no cover
* If you *NEED* to skip a statement because of Python 3.x issues add the following::
# pragma: PY3X no cover
In this configuration, statements which are skipped in 2.x are still
covered in 3.x, and the reverse holds true for tests skipped in 3.x.
"""
from __future__ import print_function
import argparse
import contextlib
import fnmatch
import math
import numbers
import os
import os.path
import platform
import sys
# For device capacity reading in query_device_capacity(). Only supported
# on posix systems for now. Will be addressed in issue #52 on GitHub.
if os.name == 'posix':
import stat
import fcntl
import struct
__all__ = ['Bit', 'Byte', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB',
'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'Kib',
'Mib', 'Gib', 'Tib', 'Pib', 'Eib', 'kb', 'Mb', 'Gb', 'Tb',
'Pb', 'Eb', 'Zb', 'Yb', 'getsize', 'listdir', 'format',
'format_string', 'format_plural', 'parse_string', 'parse_string_unsafe',
'ALL_UNIT_TYPES', 'NIST', 'NIST_PREFIXES', 'NIST_STEPS',
'SI', 'SI_PREFIXES', 'SI_STEPS']
# Python 3.x compat
if sys.version > '3':
long = int # pragma: PY2X no cover
unicode = str # pragma: PY2X no cover
#: A list of all the valid prefix unit types. Mostly for reference,
#: also used by the CLI tool as valid types
ALL_UNIT_TYPES = ['Bit', 'Byte', 'kb', 'kB', 'Mb', 'MB', 'Gb', 'GB', 'Tb',
'TB', 'Pb', 'PB', 'Eb', 'EB', 'Zb', 'ZB', 'Yb',
'YB', 'Kib', 'KiB', 'Mib', 'MiB', 'Gib', 'GiB',
'Tib', 'TiB', 'Pib', 'PiB', 'Eib', 'EiB']
# #####################################################################
# Set up our module variables/constants
###################################
# Internal:
# Console repr(), ex: MiB(13.37), or kB(42.0)
_FORMAT_REPR = '{unit_singular}({value})'
# ##################################
# Exposed:
#: Constants for referring to NIST prefix system
NIST = int(2)
#: Constants for referring to SI prefix system
SI = int(10)
# ##################################
#: All of the SI prefixes
SI_PREFIXES = ['k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
#: Byte values represented by each SI prefix unit
SI_STEPS = {
'Bit': 1 / 8.0,
'Byte': 1,
'k': 1000,
'M': 1000000,
'G': 1000000000,
'T': 1000000000000,
'P': 1000000000000000,
'E': 1000000000000000000,
'Z': 1000000000000000000000,
'Y': 1000000000000000000000000
}
#: All of the NIST prefixes
NIST_PREFIXES = ['Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei']
#: Byte values represented by each NIST prefix unit
NIST_STEPS = {
'Bit': 1 / 8.0,
'Byte': 1,
'Ki': 1024,
'Mi': 1048576,
'Gi': 1073741824,
'Ti': 1099511627776,
'Pi': 1125899906842624,
'Ei': 1152921504606846976
}
#: String representation, ex: ``13.37 MiB``, or ``42.0 kB``
format_string = "{value} {unit}"
#: Pluralization behavior
format_plural = False
def os_name():
# makes unittesting platform specific code easier
return os.name
def capitalize_first(s):
"""Capitalize ONLY the first letter of the input `s`
* returns a copy of input `s` with the first letter capitalized
"""
pfx = s[0].upper()
_s = s[1:]
return pfx + _s
######################################################################
# Base class for everything else
class Bitmath(object):
"""The base class for all the other prefix classes"""
# All the allowed input types
valid_types = (int, float, long)
def __init__(self, value=0, bytes=None, bits=None):
"""Instantiate with `value` by the unit, in plain bytes, or
bits. Don't supply more than one keyword.
default behavior: initialize with value of 0
only setting value: assert bytes is None and bits is None
only setting bytes: assert value == 0 and bits is None
only setting bits: assert value == 0 and bytes is None
"""
_raise = False
if (value == 0) and (bytes is None) and (bits is None):
pass
# Setting by bytes
elif bytes is not None:
if (value == 0) and (bits is None):
pass
else:
_raise = True
# setting by bits
elif bits is not None:
if (value == 0) and (bytes is None):
pass
else:
_raise = True
if _raise:
raise ValueError("Only one parameter of: value, bytes, or bits is allowed")
self._do_setup()
if bytes:
# We were provided with the fundamental base unit, no need
# to normalize
self._byte_value = bytes
self._bit_value = bytes * 8.0
elif bits:
# We were *ALMOST* given the fundamental base
# unit. Translate it into the fundamental unit then
# normalize.
self._byte_value = bits / 8.0
self._bit_value = bits
else:
# We were given a value representative of this *prefix
# unit*. We need to normalize it into the number of bytes
# it represents.
self._norm(value)
# We have the fundamental unit figured out. Set the 'pretty' unit
self._set_prefix_value()
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._byte_value)
def _to_prefix_value(self, value):
"""Return the number of bits/bytes as they would look like if we
converted *to* this unit"""
return value / float(self._unit_value)
def _setup(self):
raise NotImplementedError("The base 'bitmath.Bitmath' class can not be used directly")
def _do_setup(self):
"""Setup basic parameters for this class.
`base` is the numeric base which when raised to `power` is equivalent
to 1 unit of the corresponding prefix. I.e., base=2, power=10
represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte.
Likewise, for the SI prefix classes `base` will be 10, and the `power`
for the Kilobyte is 3.
"""
(self._base, self._power, self._name_singular, self._name_plural) = self._setup()
self._unit_value = self._base ** self._power
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type.
:param number value: The input value to be normalized
:raises ValueError: if the input value is not a type of real number
"""
if isinstance(value, self.valid_types):
self._byte_value = value * self._unit_value
self._bit_value = self._byte_value * 8.0
else:
raise ValueError("Initialization value '%s' is of an invalid type: %s. "
"Must be one of %s" % (
value,
type(value),
", ".join(str(x) for x in self.valid_types)))
##################################################################
# Properties
#: The mathematical base of an instance
base = property(lambda s: s._base)
binary = property(lambda s: bin(int(s.bits)))
"""The binary representation of an instance in binary 1s and 0s. Note
that for very large numbers this will mean a lot of 1s and 0s. For
example, GiB(100) would be represented as::
0b1100100000000000000000000000000000000000
That leading ``0b`` is normal. That's how Python represents binary.
"""
#: Alias for :attr:`binary`
bin = property(lambda s: s.binary)
#: The number of bits in an instance
bits = property(lambda s: s._bit_value)
#: The number of bytes in an instance
bytes = property(lambda s: s._byte_value)
#: The mathematical power of an instance
power = property(lambda s: s._power)
@property
def system(self):
"""The system of units used to measure an instance"""
if self._base == 2:
return "NIST"
elif self._base == 10:
return "SI"
else:
# I don't expect to ever encounter this logic branch, but
# hey, it's better to have extra test coverage than
# insufficient test coverage.
raise ValueError("Instances mathematical base is an unsupported value: %s" % (
str(self._base)))
@property
def unit(self):
"""The string that is this instances prefix unit name in agreement
with this instance value (singular or plural). Following the
convention that only 1 is singular. This will always be the singular
form when :attr:`bitmath.format_plural` is ``False`` (default value).
For example:
>>> KiB(1).unit == 'KiB'
>>> Byte(0).unit == 'Bytes'
>>> Byte(1).unit == 'Byte'
>>> Byte(1.1).unit == 'Bytes'
>>> Gb(2).unit == 'Gbs'
"""
global format_plural
if self.prefix_value == 1:
# If it's a '1', return it singular, no matter what
return self._name_singular
elif format_plural:
# Pluralization requested
return self._name_plural
else:
# Pluralization NOT requested, and the value is not 1
return self._name_singular
@property
def unit_plural(self):
"""The string that is an instances prefix unit name in the plural
form.
For example:
>>> KiB(1).unit_plural == 'KiB'
>>> Byte(1024).unit_plural == 'Bytes'
>>> Gb(1).unit_plural == 'Gb'
"""
return self._name_plural
@property
def unit_singular(self):
"""The string that is an instances prefix unit name in the singular
form.
For example:
>>> KiB(1).unit_singular == 'KiB'
>>> Byte(1024).unit == 'B'
>>> Gb(1).unit_singular == 'Gb'
"""
return self._name_singular
#: The "prefix" value of an instance
value = property(lambda s: s.prefix_value)
@classmethod
def from_other(cls, item):
"""Factory function to return instances of `item` converted into a new
instance of ``cls``. Because this is a class method, it may be called
from any bitmath class object without the need to explicitly
instantiate the class ahead of time.
*Implicit Parameter:*
* ``cls`` A bitmath class, implicitly set to the class of the
instance object it is called on
*User Supplied Parameter:*
* ``item`` A :class:`bitmath.Bitmath` subclass instance
*Example:*
>>> import bitmath
>>> kib = bitmath.KiB.from_other(bitmath.MiB(1))
>>> print kib
KiB(1024.0)
"""
if isinstance(item, Bitmath):
return cls(bits=item.bits)
else:
raise ValueError("The provided items must be a valid bitmath class: %s" %
str(item.__class__))
######################################################################
# The following implement the Python datamodel customization methods
#
# Reference: http://docs.python.org/2.7/reference/datamodel.html#basic-customization
def __repr__(self):
"""Representation of this object as you would expect to see in an
interpreter"""
global _FORMAT_REPR
return self.format(_FORMAT_REPR)
def __str__(self):
"""String representation of this object"""
global format_string
return self.format(format_string)
def format(self, fmt):
"""Return a representation of this instance formatted with user
supplied syntax"""
_fmt_params = {
'base': self.base,
'bin': self.bin,
'binary': self.binary,
'bits': self.bits,
'bytes': self.bytes,
'power': self.power,
'system': self.system,
'unit': self.unit,
'unit_plural': self.unit_plural,
'unit_singular': self.unit_singular,
'value': self.value
}
return fmt.format(**_fmt_params)
##################################################################
# Guess the best human-readable prefix unit for representation
##################################################################
def best_prefix(self, system=None):
"""Optional parameter, `system`, allows you to prefer NIST or SI in
the results. By default, the current system is used (Bit/Byte default
to NIST).
Logic discussion/notes:
Base-case, does it need converting?
If the instance is less than one Byte, return the instance as a Bit
instance.
Else, begin by recording the unit system the instance is defined
by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.
If the instance is not already a ``Byte`` instance, convert it to one.
NIST units step up by powers of 1024, SI units step up by powers of
1000.
Take integer value of the log(base=STEP_POWER) of the instance's byte
value. E.g.:
>>> int(math.log(Gb(100).bytes, 1000))
3
This will return a value >= 0. The following determines the 'best
prefix unit' for representation:
* result == 0, best represented as a Byte
* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte
* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
"""
# Use absolute value so we don't return Bit's for *everything*
# less than Byte(1). From github issue #55
if abs(self) < Byte(1):
return Bit.from_other(self)
else:
if type(self) is Byte: # pylint: disable=unidiomatic-typecheck
_inst = self
else:
_inst = Byte.from_other(self)
# Which table to consult? Was a preferred system provided?
if system is None:
# No preference. Use existing system
if self.system == 'NIST':
_STEPS = NIST_PREFIXES
_BASE = 1024
elif self.system == 'SI':
_STEPS = SI_PREFIXES
_BASE = 1000
# Anything else would have raised by now
else:
# Preferred system provided.
if system == NIST:
_STEPS = NIST_PREFIXES
_BASE = 1024
elif system == SI:
_STEPS = SI_PREFIXES
_BASE = 1000
else:
raise ValueError("Invalid value given for 'system' parameter."
" Must be one of NIST or SI")
# Index of the string of the best prefix in the STEPS list
_index = int(math.log(abs(_inst.bytes), _BASE))
# Recall that the log() function returns >= 0. This doesn't
# map to the STEPS list 1:1. That is to say, 0 is handled with
# special care. So if the _index is 1, we actually want item 0
# in the list.
if _index == 0:
# Already a Byte() type, so return it.
return _inst
elif _index >= len(_STEPS):
# This is a really big number. Use the biggest prefix we've got
_best_prefix = _STEPS[-1]
elif 0 < _index < len(_STEPS):
# There is an appropriate prefix unit to represent this
_best_prefix = _STEPS[_index - 1]
_conversion_method = getattr(
self,
'to_%sB' % _best_prefix)
return _conversion_method()
##################################################################
def to_Bit(self):
return Bit(self._bit_value)
def to_Byte(self):
return Byte(self._byte_value / float(NIST_STEPS['Byte']))
# Properties
Bit = property(lambda s: s.to_Bit())
Byte = property(lambda s: s.to_Byte())
##################################################################
def to_KiB(self):
return KiB(bits=self._bit_value)
def to_Kib(self):
return Kib(bits=self._bit_value)
def to_kB(self):
return kB(bits=self._bit_value)
def to_kb(self):
return kb(bits=self._bit_value)
# Properties
KiB = property(lambda s: s.to_KiB())
Kib = property(lambda s: s.to_Kib())
kB = property(lambda s: s.to_kB())
kb = property(lambda s: s.to_kb())
##################################################################
def to_MiB(self):
return MiB(bits=self._bit_value)
def to_Mib(self):
return Mib(bits=self._bit_value)
def to_MB(self):
return MB(bits=self._bit_value)
def to_Mb(self):
return Mb(bits=self._bit_value)
# Properties
MiB = property(lambda s: s.to_MiB())
Mib = property(lambda s: s.to_Mib())
MB = property(lambda s: s.to_MB())
Mb = property(lambda s: s.to_Mb())
##################################################################
def to_GiB(self):
return GiB(bits=self._bit_value)
def to_Gib(self):
return Gib(bits=self._bit_value)
def to_GB(self):
return GB(bits=self._bit_value)
def to_Gb(self):
return Gb(bits=self._bit_value)
# Properties
GiB = property(lambda s: s.to_GiB())
Gib = property(lambda s: s.to_Gib())
GB = property(lambda s: s.to_GB())
Gb = property(lambda s: s.to_Gb())
##################################################################
def to_TiB(self):
return TiB(bits=self._bit_value)
def to_Tib(self):
return Tib(bits=self._bit_value)
def to_TB(self):
return TB(bits=self._bit_value)
def to_Tb(self):
return Tb(bits=self._bit_value)
# Properties
TiB = property(lambda s: s.to_TiB())
Tib = property(lambda s: s.to_Tib())
TB = property(lambda s: s.to_TB())
Tb = property(lambda s: s.to_Tb())
##################################################################
def to_PiB(self):
return PiB(bits=self._bit_value)
def to_Pib(self):
return Pib(bits=self._bit_value)
def to_PB(self):
return PB(bits=self._bit_value)
def to_Pb(self):
return Pb(bits=self._bit_value)
# Properties
PiB = property(lambda s: s.to_PiB())
Pib = property(lambda s: s.to_Pib())
PB = property(lambda s: s.to_PB())
Pb = property(lambda s: s.to_Pb())
##################################################################
def to_EiB(self):
return EiB(bits=self._bit_value)
def to_Eib(self):
return Eib(bits=self._bit_value)
def to_EB(self):
return EB(bits=self._bit_value)
def to_Eb(self):
return Eb(bits=self._bit_value)
# Properties
EiB = property(lambda s: s.to_EiB())
Eib = property(lambda s: s.to_Eib())
EB = property(lambda s: s.to_EB())
Eb = property(lambda s: s.to_Eb())
##################################################################
# The SI units go beyond the NIST units. They also have the Zetta
# and Yotta prefixes.
def to_ZB(self):
return ZB(bits=self._bit_value)
def to_Zb(self):
return Zb(bits=self._bit_value)
# Properties
ZB = property(lambda s: s.to_ZB())
Zb = property(lambda s: s.to_Zb())
##################################################################
def to_YB(self):
return YB(bits=self._bit_value)
def to_Yb(self):
return Yb(bits=self._bit_value)
#: A new object representing this instance as a Yottabyte
YB = property(lambda s: s.to_YB())
Yb = property(lambda s: s.to_Yb())
##################################################################
# Rich comparison operations
##################################################################
def __lt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value < other
else:
return self._byte_value < other.bytes
def __le__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value <= other
else:
return self._byte_value <= other.bytes
def __eq__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value == other
else:
return self._byte_value == other.bytes
def __ne__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value != other
else:
return self._byte_value != other.bytes
def __gt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value > other
else:
return self._byte_value > other.bytes
def __ge__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value >= other
else:
return self._byte_value >= other.bytes
##################################################################
# Basic math operations
##################################################################
# Reference: http://docs.python.org/2.7/reference/datamodel.html#emulating-numeric-types
"""These methods are called to implement the binary arithmetic
operations (+, -, *, //, %, divmod(), pow(), **, <<, >>, &, ^, |). For
instance, to evaluate the expression x + y, where x is an instance of
a class that has an __add__() method, x.__add__(y) is called. The
__divmod__() method should be the equivalent to using __floordiv__()
and __mod__(); it should not be related to __truediv__() (described
below). Note that __pow__() should be defined to accept an optional
third argument if the ternary version of the built-in pow() function
is to be supported.object.__complex__(self)
"""
def __add__(self, other):
"""Supported operations with result types:
- bm + bm = bm
- bm + num = num
- num + bm = num (see radd)
"""
if isinstance(other, numbers.Number):
# bm + num
return other + self.value
else:
# bm + bm
total_bytes = self._byte_value + other.bytes
return (type(self))(bytes=total_bytes)
def __sub__(self, other):
"""Subtraction: Supported operations with result types:
- bm - bm = bm
- bm - num = num
- num - bm = num (see rsub)
"""
if isinstance(other, numbers.Number):
# bm - num
return self.value - other
else:
# bm - bm
total_bytes = self._byte_value - other.bytes
return (type(self))(bytes=total_bytes)
def __mul__(self, other):
"""Multiplication: Supported operations with result types:
- bm1 * bm2 = bm1
- bm * num = bm
- num * bm = num (see rmul)
"""
if isinstance(other, numbers.Number):
# bm * num
result = self._byte_value * other
return (type(self))(bytes=result)
else:
# bm1 * bm2
_other = other.value * other.base ** other.power
_self = self.prefix_value * self._base ** self._power
return (type(self))(bytes=_other * _self)
"""The division operator (/) is implemented by these methods. The
__truediv__() method is used when __future__.division is in effect,
otherwise __div__() is used. If only one of these two methods is
defined, the object will not support division in the alternate
context; TypeError will be raised instead."""
def __div__(self, other):
"""Division: Supported operations with result types:
- bm1 / bm2 = num
- bm / num = bm
- num / bm = num (see rdiv)
"""
if isinstance(other, numbers.Number):
# bm / num
result = self._byte_value / other
return (type(self))(bytes=result)
else:
# bm1 / bm2
return self._byte_value / float(other.bytes)
def __truediv__(self, other):
# num / bm
return self.__div__(other)
# def __floordiv__(self, other):
# return NotImplemented
# def __mod__(self, other):
# return NotImplemented
# def __divmod__(self, other):
# return NotImplemented
# def __pow__(self, other, modulo=None):
# return NotImplemented
##################################################################
"""These methods are called to implement the binary arithmetic
operations (+, -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with
reflected (swapped) operands. These functions are only called if the
left operand does not support the corresponding operation and the
operands are of different types. [2] For instance, to evaluate the
expression x - y, where y is an instance of a class that has an
__rsub__() method, y.__rsub__(x) is called if x.__sub__(y) returns
NotImplemented.
These are the add/sub/mul/div methods for syntax where a number type
is given for the LTYPE and a bitmath object is given for the
RTYPE. E.g., 3 * MiB(3), or 10 / GB(42)
"""
def __radd__(self, other):
# num + bm = num
return other + self.value
def __rsub__(self, other):
# num - bm = num
return other - self.value
def __rmul__(self, other):
# num * bm = bm
return self * other
def __rdiv__(self, other):
# num / bm = num
return other / float(self.value)
def __rtruediv__(self, other):
# num / bm = num
return other / float(self.value)
"""Called to implement the built-in functions complex(), int(),
long(), and float(). Should return a value of the appropriate type.
If one of those methods does not support the operation with the
supplied arguments, it should return NotImplemented.
For bitmath purposes, these methods return the int/long/float
equivalent of the this instances prefix Unix value. That is to say:
- int(KiB(3.336)) would return 3
- long(KiB(3.336)) would return 3L
- float(KiB(3.336)) would return 3.336
"""
def __int__(self):
"""Return this instances prefix unit as an integer"""
return int(self.prefix_value)
def __long__(self):
"""Return this instances prefix unit as a long integer"""
return long(self.prefix_value) # pragma: PY3X no cover
def __float__(self):
"""Return this instances prefix unit as a floating point number"""
return float(self.prefix_value)
##################################################################
# Bitwise operations
##################################################################
def __lshift__(self, other):
"""Left shift, ex: 100 << 2
A left shift by n bits is equivalent to multiplication by pow(2,
n). A long integer is returned if the result exceeds the range of
plain integers."""
shifted = int(self.bits) << other
return type(self)(bits=shifted)
def __rshift__(self, other):
"""Right shift, ex: 100 >> 2
A right shift by n bits is equivalent to division by pow(2, n)."""
shifted = int(self.bits) >> other
return type(self)(bits=shifted)
def __and__(self, other):
""""Bitwise and, ex: 100 & 2
bitwise and". Each bit of the output is 1 if the corresponding bit
of x AND of y is 1, otherwise it's 0."""
andd = int(self.bits) & other
return type(self)(bits=andd)
def __xor__(self, other):
"""Bitwise xor, ex: 100 ^ 2
Does a "bitwise exclusive or". Each bit of the output is the same
as the corresponding bit in x if that bit in y is 0, and it's the
complement of the bit in x if that bit in y is 1."""
xord = int(self.bits) ^ other
return type(self)(bits=xord)
def __or__(self, other):
"""Bitwise or, ex: 100 | 2
Does a "bitwise or". Each bit of the output is 0 if the corresponding
bit of x AND of y is 0, otherwise it's 1."""
ord = int(self.bits) | other
return type(self)(bits=ord)
##################################################################
def __neg__(self):
"""The negative version of this instance"""
return (type(self))(-abs(self.prefix_value))
def __pos__(self):
return (type(self))(abs(self.prefix_value))
def __abs__(self):
return (type(self))(abs(self.prefix_value))
# def __invert__(self):
# """Called to implement the unary arithmetic operations (-, +, abs()
# and ~)."""
# return NotImplemented
######################################################################
# First, the bytes...
class Byte(Bitmath):
"""Byte based types fundamentally operate on self._bit_value"""
def _setup(self):
return (2, 0, 'Byte', 'Bytes')
######################################################################
# NIST Prefixes for Byte based types
class KiB(Byte):
def _setup(self):
return (2, 10, 'KiB', 'KiBs')
Kio = KiB
class MiB(Byte):
def _setup(self):
return (2, 20, 'MiB', 'MiBs')
Mio = MiB
class GiB(Byte):
def _setup(self):
return (2, 30, 'GiB', 'GiBs')
Gio = GiB
class TiB(Byte):
def _setup(self):
return (2, 40, 'TiB', 'TiBs')
Tio = TiB
class PiB(Byte):
def _setup(self):
return (2, 50, 'PiB', 'PiBs')
Pio = PiB
class EiB(Byte):
def _setup(self):
return (2, 60, 'EiB', 'EiBs')
Eio = EiB
######################################################################
# SI Prefixes for Byte based types
class kB(Byte):
def _setup(self):
return (10, 3, 'kB', 'kBs')
ko = kB
class MB(Byte):
def _setup(self):
return (10, 6, 'MB', 'MBs')
Mo = MB
class GB(Byte):
def _setup(self):
return (10, 9, 'GB', 'GBs')
Go = GB
class TB(Byte):
def _setup(self):
return (10, 12, 'TB', 'TBs')
To = TB
class PB(Byte):
def _setup(self):
return (10, 15, 'PB', 'PBs')
Po = PB
class EB(Byte):
def _setup(self):
return (10, 18, 'EB', 'EBs')
Eo = EB
class ZB(Byte):
def _setup(self):
return (10, 21, 'ZB', 'ZBs')
Zo = ZB
class YB(Byte):
def _setup(self):
return (10, 24, 'YB', 'YBs')
Yo = YB
######################################################################
# And now the bit types
class Bit(Bitmath):
"""Bit based types fundamentally operate on self._bit_value"""
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._bit_value)
def _setup(self):
return (2, 0, 'Bit', 'Bits')
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type"""
self._bit_value = value * self._unit_value
self._byte_value = self._bit_value / 8.0
######################################################################
# NIST Prefixes for Bit based types
class Kib(Bit):
def _setup(self):
return (2, 10, 'Kib', 'Kibs')
class Mib(Bit):
def _setup(self):
return (2, 20, 'Mib', 'Mibs')
class Gib(Bit):
def _setup(self):
return (2, 30, 'Gib', 'Gibs')
class Tib(Bit):
def _setup(self):
return (2, 40, 'Tib', 'Tibs')
class Pib(Bit):
def _setup(self):
return (2, 50, 'Pib', 'Pibs')
class Eib(Bit):
def _setup(self):
return (2, 60, 'Eib', 'Eibs')
######################################################################
# SI Prefixes for Bit based types
class kb(Bit):
def _setup(self):
return (10, 3, 'kb', 'kbs')
class Mb(Bit):
def _setup(self):
return (10, 6, 'Mb', 'Mbs')
class Gb(Bit):
def _setup(self):
return (10, 9, 'Gb', 'Gbs')
class Tb(Bit):
def _setup(self):
return (10, 12, 'Tb', 'Tbs')
class Pb(Bit):
def _setup(self):
return (10, 15, 'Pb', 'Pbs')
class Eb(Bit):
def _setup(self):
return (10, 18, 'Eb', 'Ebs')
class Zb(Bit):
def _setup(self):
return (10, 21, 'Zb', 'Zbs')
class Yb(Bit):
def _setup(self):
return (10, 24, 'Yb', 'Ybs')
######################################################################
# Utility functions
def best_prefix(bytes, system=NIST):
"""Return a bitmath instance representing the best human-readable
representation of the number of bytes given by ``bytes``. In addition
to a numeric type, the ``bytes`` parameter may also be a bitmath type.
Optionally select a preferred unit system by specifying the ``system``
keyword. Choices for ``system`` are ``bitmath.NIST`` (default) and
``bitmath.SI``.
Basically a shortcut for:
>>> import bitmath
>>> b = bitmath.Byte(12345)
>>> best = b.best_prefix()
Or:
>>> import bitmath
>>> best = (bitmath.KiB(12345) * 4201).best_prefix()
"""
if isinstance(bytes, Bitmath):
value = bytes.bytes
else:
value = bytes
return Byte(value).best_prefix(system=system)
def getsize(path, bestprefix=True, system=NIST):
"""Return a bitmath instance in the best human-readable representation
of the file size at `path`. Optionally, provide a preferred unit
system by setting `system` to either `bitmath.NIST` (default) or
`bitmath.SI`.
Optionally, set ``bestprefix`` to ``False`` to get ``bitmath.Byte``
instances back.
"""
_path = os.path.realpath(path)
size_bytes = os.path.getsize(_path)
if bestprefix:
return Byte(size_bytes).best_prefix(system=system)
else:
return Byte(size_bytes)
def listdir(search_base, followlinks=False, filter='*',
relpath=False, bestprefix=False, system=NIST):
"""This is a generator which recurses the directory tree
`search_base`, yielding 2-tuples of:
* The absolute/relative path to a discovered file
* A bitmath instance representing the "apparent size" of the file.
- `search_base` - The directory to begin walking down.
- `followlinks` - Whether or not to follow symbolic links to directories
- `filter` - A glob (see :py:mod:`fnmatch`) to filter results with
(default: ``*``, everything)
- `relpath` - ``True`` to return the relative path from `pwd` or
``False`` (default) to return the fully qualified path
- ``bestprefix`` - set to ``False`` to get ``bitmath.Byte``
instances back instead.
- `system` - Provide a preferred unit system by setting `system`
to either ``bitmath.NIST`` (default) or ``bitmath.SI``.
.. note:: This function does NOT return tuples for directory entities.
.. note:: Symlinks to **files** are followed automatically
"""
for root, dirs, files in os.walk(search_base, followlinks=followlinks):
for name in fnmatch.filter(files, filter):
_path = os.path.join(root, name)
if relpath:
# RELATIVE path
_return_path = os.path.relpath(_path, '.')
else:
# REAL path
_return_path = os.path.realpath(_path)
if followlinks:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
else:
if os.path.isdir(_path) or os.path.islink(_path):
pass
else:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
def parse_string(s):
"""Parse a string with units and try to make a bitmath object out of
it.
String inputs may include whitespace characters between the value and
the unit.
"""
# Strings only please
if not isinstance(s, (str, unicode)):
raise ValueError("parse_string only accepts string inputs but a %s was given" %
type(s))
# get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError:
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# split the string into the value and the unit
val, unit = s[:index], s[index:]
# see if the unit exists as a type in our namespace
if unit == "b":
unit_class = Bit
elif unit == "B":
unit_class = Byte
else:
if not (hasattr(sys.modules[__name__], unit) and isinstance(getattr(sys.modules[__name__], unit), type)):
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
unit_class = globals()[unit]
try:
val = float(val)
except ValueError:
raise
try:
return unit_class(val)
except: # pragma: no cover
raise ValueError("Can't parse string %s into a bitmath object" % s)
def parse_string_unsafe(s, system=SI):
"""Attempt to parse a string with ambiguous units and try to make a
bitmath object out of it.
This may produce inaccurate results if parsing shell output. For
example `ls` may say a 2730 Byte file is '2.7K'. 2730 Bytes == 2.73 kB
~= 2.666 KiB. See the documentation for all of the important details.
Note the following caveats:
* All inputs are assumed to be byte-based (as opposed to bit based)
* Numerical inputs (those without any units) are assumed to be a
number of bytes
* Inputs with single letter units (k, M, G, etc) are assumed to be SI
units (base-10). Set the `system` parameter to `bitmath.NIST` to
change this behavior.
* Inputs with an `i` character following the leading letter (Ki, Mi,
Gi) are assumed to be NIST units (base 2)
* Capitalization does not matter
"""
if not isinstance(s, (str, unicode)) and \
not isinstance(s, numbers.Number):
raise ValueError("parse_string_unsafe only accepts string/number inputs but a %s was given" %
type(s))
######################################################################
# Is the input simple to parse? Just a number, or a number
# masquerading as a string perhaps?
# Test case: raw number input (easy!)
if isinstance(s, numbers.Number):
# It's just a number. Assume bytes
return Byte(s)
# Test case: a number pretending to be a string
if isinstance(s, (str, unicode)):
try:
# Can we turn it directly into a number?
return Byte(float(s))
except ValueError:
# Nope, this is not a plain number
pass
######################################################################
# At this point:
# - the input is also not just a number wrapped in a string
# - nor is is just a plain number type
#
# We need to do some more digging around now to figure out exactly
# what we were given and possibly normalize the input into a
# format we can recognize.
# First we'll separate the number and the unit.
#
# Get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError: # pragma: no cover
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# Split the string into the value and the unit
val, unit = s[:index], s[index:]
# Don't trust anything. We'll make sure the correct 'b' is in place.
unit = unit.rstrip('Bb')
unit += 'B'
# At this point we can expect `unit` to be either:
#
# - 2 Characters (for SI, ex: kB or GB)
# - 3 Caracters (so NIST, ex: KiB, or GiB)
#
# A unit with any other number of chars is not a valid unit
# SI
if len(unit) == 2:
# Has NIST parsing been requested?
if system == NIST:
# NIST units requested. Ensure the unit begins with a
# capital letter and is followed by an 'i' character.
unit = capitalize_first(unit)
# Insert an 'i' char after the first letter
_unit = list(unit)
_unit.insert(1, 'i')
# Collapse the list back into a 3 letter string
unit = ''.join(_unit)
unit_class = globals()[unit]
else:
# Default parsing (SI format)
#
# Edge-case checking: SI 'thousand' is a lower-case K
if unit.startswith('K'):
unit = unit.replace('K', 'k')
elif not unit.startswith('k'):
# Otherwise, ensure the first char is capitalized
unit = capitalize_first(unit)
# This is an SI-type unit
if unit[0] in SI_PREFIXES:
unit_class = globals()[unit]
# NIST
elif len(unit) == 3:
unit = capitalize_first(unit)
# This is a NIST-type unit
if unit[:2] in NIST_PREFIXES:
unit_class = globals()[unit]
else:
# This is not a unit we recognize
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
try:
unit_class
except UnboundLocalError:
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
return unit_class(float(val))
######################################################################
# Contxt Managers
@contextlib.contextmanager
def format(fmt_str=None, plural=False, bestprefix=False):
"""Context manager for printing bitmath instances.
``fmt_str`` - a formatting mini-language compat formatting string. See
the @properties (above) for a list of available items.
``plural`` - True enables printing instances with 's's if they're
plural. False (default) prints them as singular (no trailing 's').
``bestprefix`` - True enables printing instances in their best
human-readable representation. False, the default, prints instances
using their current prefix unit.
"""
if 'bitmath' not in globals():
import bitmath
if plural:
orig_fmt_plural = bitmath.format_plural
bitmath.format_plural = True
if fmt_str:
orig_fmt_str = bitmath.format_string
bitmath.format_string = fmt_str
yield
if plural:
bitmath.format_plural = orig_fmt_plural
if fmt_str:
bitmath.format_string = orig_fmt_str
def cli_script_main(cli_args):
"""
A command line interface to basic bitmath operations.
"""
choices = ALL_UNIT_TYPES
parser = argparse.ArgumentParser(
description='Converts from one type of size to another.')
parser.add_argument('--from-stdin', default=False, action='store_true',
help='Reads number from stdin rather than the cli')
parser.add_argument(
'-f', '--from', choices=choices, nargs=1,
type=str, dest='fromunit', default=['Byte'],
help='Input type you are converting from. Defaultes to Byte.')
parser.add_argument(
'-t', '--to', choices=choices, required=False, nargs=1, type=str,
help=('Input type you are converting to. '
'Attempts to detect best result if omitted.'), dest='tounit')
parser.add_argument(
'size', nargs='*', type=float,
help='The number to convert.')
args = parser.parse_args(cli_args)
# Not sure how to cover this with tests, or if the functionality
# will remain in this form long enough for it to make writing a
# test worth the effort.
if args.from_stdin: # pragma: no cover
args.size = [float(sys.stdin.readline()[:-1])]
results = []
for size in args.size:
instance = getattr(__import__(
'bitmath', fromlist=['True']), args.fromunit[0])(size)
# If we have a unit provided then use it
if args.tounit:
result = getattr(instance, args.tounit[0])
# Otherwise use the best_prefix call
else:
result = instance.best_prefix()
results.append(result)
return results
def cli_script(): # pragma: no cover
# Wrapper around cli_script_main so we can unittest the command
# line functionality
for result in cli_script_main(sys.argv[1:]):
print(result)
if __name__ == '__main__':
cli_script()
|
tbielawa/bitmath
|
bitmath/__init__.py
|
getsize
|
python
|
def getsize(path, bestprefix=True, system=NIST):
_path = os.path.realpath(path)
size_bytes = os.path.getsize(_path)
if bestprefix:
return Byte(size_bytes).best_prefix(system=system)
else:
return Byte(size_bytes)
|
Return a bitmath instance in the best human-readable representation
of the file size at `path`. Optionally, provide a preferred unit
system by setting `system` to either `bitmath.NIST` (default) or
`bitmath.SI`.
Optionally, set ``bestprefix`` to ``False`` to get ``bitmath.Byte``
instances back.
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L1334-L1348
|
[
" def best_prefix(self, system=None):\n \"\"\"Optional parameter, `system`, allows you to prefer NIST or SI in\nthe results. By default, the current system is used (Bit/Byte default\nto NIST).\n\nLogic discussion/notes:\n\nBase-case, does it need converting?\n\nIf the instance is less than one Byte, return the instance as a Bit\ninstance.\n\nElse, begin by recording the unit system the instance is defined\nby. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.\n\nIf the instance is not already a ``Byte`` instance, convert it to one.\n\nNIST units step up by powers of 1024, SI units step up by powers of\n1000.\n\nTake integer value of the log(base=STEP_POWER) of the instance's byte\nvalue. E.g.:\n\n >>> int(math.log(Gb(100).bytes, 1000))\n 3\n\nThis will return a value >= 0. The following determines the 'best\nprefix unit' for representation:\n\n* result == 0, best represented as a Byte\n* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte\n* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]\n\n \"\"\"\n\n # Use absolute value so we don't return Bit's for *everything*\n # less than Byte(1). From github issue #55\n if abs(self) < Byte(1):\n return Bit.from_other(self)\n else:\n if type(self) is Byte: # pylint: disable=unidiomatic-typecheck\n _inst = self\n else:\n _inst = Byte.from_other(self)\n\n # Which table to consult? Was a preferred system provided?\n if system is None:\n # No preference. Use existing system\n if self.system == 'NIST':\n _STEPS = NIST_PREFIXES\n _BASE = 1024\n elif self.system == 'SI':\n _STEPS = SI_PREFIXES\n _BASE = 1000\n # Anything else would have raised by now\n else:\n # Preferred system provided.\n if system == NIST:\n _STEPS = NIST_PREFIXES\n _BASE = 1024\n elif system == SI:\n _STEPS = SI_PREFIXES\n _BASE = 1000\n else:\n raise ValueError(\"Invalid value given for 'system' parameter.\"\n \" Must be one of NIST or SI\")\n\n # Index of the string of the best prefix in the STEPS list\n _index = int(math.log(abs(_inst.bytes), _BASE))\n\n # Recall that the log() function returns >= 0. This doesn't\n # map to the STEPS list 1:1. That is to say, 0 is handled with\n # special care. So if the _index is 1, we actually want item 0\n # in the list.\n\n if _index == 0:\n # Already a Byte() type, so return it.\n return _inst\n elif _index >= len(_STEPS):\n # This is a really big number. Use the biggest prefix we've got\n _best_prefix = _STEPS[-1]\n elif 0 < _index < len(_STEPS):\n # There is an appropriate prefix unit to represent this\n _best_prefix = _STEPS[_index - 1]\n\n _conversion_method = getattr(\n self,\n 'to_%sB' % _best_prefix)\n\n return _conversion_method()\n"
] |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright © 2014-2016 Tim Bielawa <timbielawa@gmail.com>
# See GitHub Contributors Graph for more information
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sub-license, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable=bad-continuation,missing-docstring,invalid-name,line-too-long
"""Reference material:
The bitmath homepage is located at:
* http://bitmath.readthedocs.io/en/latest/
Prefixes for binary multiples:
http://physics.nist.gov/cuu/Units/binary.html
decimal and binary prefixes:
man 7 units (from the Linux Documentation Project 'man-pages' package)
BEFORE YOU GET HASTY WITH EXCLUDING CODE FROM COVERAGE: If you
absolutely need to skip code coverage because of a strange Python 2.x
vs 3.x thing, use the fancy environment substitution stuff from the
.coverage RC file. In review:
* If you *NEED* to skip a statement because of Python 2.x issues add the following::
# pragma: PY2X no cover
* If you *NEED* to skip a statement because of Python 3.x issues add the following::
# pragma: PY3X no cover
In this configuration, statements which are skipped in 2.x are still
covered in 3.x, and the reverse holds true for tests skipped in 3.x.
"""
from __future__ import print_function
import argparse
import contextlib
import fnmatch
import math
import numbers
import os
import os.path
import platform
import sys
# For device capacity reading in query_device_capacity(). Only supported
# on posix systems for now. Will be addressed in issue #52 on GitHub.
if os.name == 'posix':
import stat
import fcntl
import struct
__all__ = ['Bit', 'Byte', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB',
'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'Kib',
'Mib', 'Gib', 'Tib', 'Pib', 'Eib', 'kb', 'Mb', 'Gb', 'Tb',
'Pb', 'Eb', 'Zb', 'Yb', 'getsize', 'listdir', 'format',
'format_string', 'format_plural', 'parse_string', 'parse_string_unsafe',
'ALL_UNIT_TYPES', 'NIST', 'NIST_PREFIXES', 'NIST_STEPS',
'SI', 'SI_PREFIXES', 'SI_STEPS']
# Python 3.x compat
if sys.version > '3':
long = int # pragma: PY2X no cover
unicode = str # pragma: PY2X no cover
#: A list of all the valid prefix unit types. Mostly for reference,
#: also used by the CLI tool as valid types
ALL_UNIT_TYPES = ['Bit', 'Byte', 'kb', 'kB', 'Mb', 'MB', 'Gb', 'GB', 'Tb',
'TB', 'Pb', 'PB', 'Eb', 'EB', 'Zb', 'ZB', 'Yb',
'YB', 'Kib', 'KiB', 'Mib', 'MiB', 'Gib', 'GiB',
'Tib', 'TiB', 'Pib', 'PiB', 'Eib', 'EiB']
# #####################################################################
# Set up our module variables/constants
###################################
# Internal:
# Console repr(), ex: MiB(13.37), or kB(42.0)
_FORMAT_REPR = '{unit_singular}({value})'
# ##################################
# Exposed:
#: Constants for referring to NIST prefix system
NIST = int(2)
#: Constants for referring to SI prefix system
SI = int(10)
# ##################################
#: All of the SI prefixes
SI_PREFIXES = ['k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
#: Byte values represented by each SI prefix unit
SI_STEPS = {
'Bit': 1 / 8.0,
'Byte': 1,
'k': 1000,
'M': 1000000,
'G': 1000000000,
'T': 1000000000000,
'P': 1000000000000000,
'E': 1000000000000000000,
'Z': 1000000000000000000000,
'Y': 1000000000000000000000000
}
#: All of the NIST prefixes
NIST_PREFIXES = ['Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei']
#: Byte values represented by each NIST prefix unit
NIST_STEPS = {
'Bit': 1 / 8.0,
'Byte': 1,
'Ki': 1024,
'Mi': 1048576,
'Gi': 1073741824,
'Ti': 1099511627776,
'Pi': 1125899906842624,
'Ei': 1152921504606846976
}
#: String representation, ex: ``13.37 MiB``, or ``42.0 kB``
format_string = "{value} {unit}"
#: Pluralization behavior
format_plural = False
def os_name():
# makes unittesting platform specific code easier
return os.name
def capitalize_first(s):
"""Capitalize ONLY the first letter of the input `s`
* returns a copy of input `s` with the first letter capitalized
"""
pfx = s[0].upper()
_s = s[1:]
return pfx + _s
######################################################################
# Base class for everything else
class Bitmath(object):
"""The base class for all the other prefix classes"""
# All the allowed input types
valid_types = (int, float, long)
def __init__(self, value=0, bytes=None, bits=None):
"""Instantiate with `value` by the unit, in plain bytes, or
bits. Don't supply more than one keyword.
default behavior: initialize with value of 0
only setting value: assert bytes is None and bits is None
only setting bytes: assert value == 0 and bits is None
only setting bits: assert value == 0 and bytes is None
"""
_raise = False
if (value == 0) and (bytes is None) and (bits is None):
pass
# Setting by bytes
elif bytes is not None:
if (value == 0) and (bits is None):
pass
else:
_raise = True
# setting by bits
elif bits is not None:
if (value == 0) and (bytes is None):
pass
else:
_raise = True
if _raise:
raise ValueError("Only one parameter of: value, bytes, or bits is allowed")
self._do_setup()
if bytes:
# We were provided with the fundamental base unit, no need
# to normalize
self._byte_value = bytes
self._bit_value = bytes * 8.0
elif bits:
# We were *ALMOST* given the fundamental base
# unit. Translate it into the fundamental unit then
# normalize.
self._byte_value = bits / 8.0
self._bit_value = bits
else:
# We were given a value representative of this *prefix
# unit*. We need to normalize it into the number of bytes
# it represents.
self._norm(value)
# We have the fundamental unit figured out. Set the 'pretty' unit
self._set_prefix_value()
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._byte_value)
def _to_prefix_value(self, value):
"""Return the number of bits/bytes as they would look like if we
converted *to* this unit"""
return value / float(self._unit_value)
def _setup(self):
raise NotImplementedError("The base 'bitmath.Bitmath' class can not be used directly")
def _do_setup(self):
"""Setup basic parameters for this class.
`base` is the numeric base which when raised to `power` is equivalent
to 1 unit of the corresponding prefix. I.e., base=2, power=10
represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte.
Likewise, for the SI prefix classes `base` will be 10, and the `power`
for the Kilobyte is 3.
"""
(self._base, self._power, self._name_singular, self._name_plural) = self._setup()
self._unit_value = self._base ** self._power
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type.
:param number value: The input value to be normalized
:raises ValueError: if the input value is not a type of real number
"""
if isinstance(value, self.valid_types):
self._byte_value = value * self._unit_value
self._bit_value = self._byte_value * 8.0
else:
raise ValueError("Initialization value '%s' is of an invalid type: %s. "
"Must be one of %s" % (
value,
type(value),
", ".join(str(x) for x in self.valid_types)))
##################################################################
# Properties
#: The mathematical base of an instance
base = property(lambda s: s._base)
binary = property(lambda s: bin(int(s.bits)))
"""The binary representation of an instance in binary 1s and 0s. Note
that for very large numbers this will mean a lot of 1s and 0s. For
example, GiB(100) would be represented as::
0b1100100000000000000000000000000000000000
That leading ``0b`` is normal. That's how Python represents binary.
"""
#: Alias for :attr:`binary`
bin = property(lambda s: s.binary)
#: The number of bits in an instance
bits = property(lambda s: s._bit_value)
#: The number of bytes in an instance
bytes = property(lambda s: s._byte_value)
#: The mathematical power of an instance
power = property(lambda s: s._power)
@property
def system(self):
"""The system of units used to measure an instance"""
if self._base == 2:
return "NIST"
elif self._base == 10:
return "SI"
else:
# I don't expect to ever encounter this logic branch, but
# hey, it's better to have extra test coverage than
# insufficient test coverage.
raise ValueError("Instances mathematical base is an unsupported value: %s" % (
str(self._base)))
@property
def unit(self):
"""The string that is this instances prefix unit name in agreement
with this instance value (singular or plural). Following the
convention that only 1 is singular. This will always be the singular
form when :attr:`bitmath.format_plural` is ``False`` (default value).
For example:
>>> KiB(1).unit == 'KiB'
>>> Byte(0).unit == 'Bytes'
>>> Byte(1).unit == 'Byte'
>>> Byte(1.1).unit == 'Bytes'
>>> Gb(2).unit == 'Gbs'
"""
global format_plural
if self.prefix_value == 1:
# If it's a '1', return it singular, no matter what
return self._name_singular
elif format_plural:
# Pluralization requested
return self._name_plural
else:
# Pluralization NOT requested, and the value is not 1
return self._name_singular
@property
def unit_plural(self):
"""The string that is an instances prefix unit name in the plural
form.
For example:
>>> KiB(1).unit_plural == 'KiB'
>>> Byte(1024).unit_plural == 'Bytes'
>>> Gb(1).unit_plural == 'Gb'
"""
return self._name_plural
@property
def unit_singular(self):
"""The string that is an instances prefix unit name in the singular
form.
For example:
>>> KiB(1).unit_singular == 'KiB'
>>> Byte(1024).unit == 'B'
>>> Gb(1).unit_singular == 'Gb'
"""
return self._name_singular
#: The "prefix" value of an instance
value = property(lambda s: s.prefix_value)
@classmethod
def from_other(cls, item):
"""Factory function to return instances of `item` converted into a new
instance of ``cls``. Because this is a class method, it may be called
from any bitmath class object without the need to explicitly
instantiate the class ahead of time.
*Implicit Parameter:*
* ``cls`` A bitmath class, implicitly set to the class of the
instance object it is called on
*User Supplied Parameter:*
* ``item`` A :class:`bitmath.Bitmath` subclass instance
*Example:*
>>> import bitmath
>>> kib = bitmath.KiB.from_other(bitmath.MiB(1))
>>> print kib
KiB(1024.0)
"""
if isinstance(item, Bitmath):
return cls(bits=item.bits)
else:
raise ValueError("The provided items must be a valid bitmath class: %s" %
str(item.__class__))
######################################################################
# The following implement the Python datamodel customization methods
#
# Reference: http://docs.python.org/2.7/reference/datamodel.html#basic-customization
def __repr__(self):
"""Representation of this object as you would expect to see in an
interpreter"""
global _FORMAT_REPR
return self.format(_FORMAT_REPR)
def __str__(self):
"""String representation of this object"""
global format_string
return self.format(format_string)
def format(self, fmt):
"""Return a representation of this instance formatted with user
supplied syntax"""
_fmt_params = {
'base': self.base,
'bin': self.bin,
'binary': self.binary,
'bits': self.bits,
'bytes': self.bytes,
'power': self.power,
'system': self.system,
'unit': self.unit,
'unit_plural': self.unit_plural,
'unit_singular': self.unit_singular,
'value': self.value
}
return fmt.format(**_fmt_params)
##################################################################
# Guess the best human-readable prefix unit for representation
##################################################################
def best_prefix(self, system=None):
"""Optional parameter, `system`, allows you to prefer NIST or SI in
the results. By default, the current system is used (Bit/Byte default
to NIST).
Logic discussion/notes:
Base-case, does it need converting?
If the instance is less than one Byte, return the instance as a Bit
instance.
Else, begin by recording the unit system the instance is defined
by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.
If the instance is not already a ``Byte`` instance, convert it to one.
NIST units step up by powers of 1024, SI units step up by powers of
1000.
Take integer value of the log(base=STEP_POWER) of the instance's byte
value. E.g.:
>>> int(math.log(Gb(100).bytes, 1000))
3
This will return a value >= 0. The following determines the 'best
prefix unit' for representation:
* result == 0, best represented as a Byte
* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte
* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
"""
# Use absolute value so we don't return Bit's for *everything*
# less than Byte(1). From github issue #55
if abs(self) < Byte(1):
return Bit.from_other(self)
else:
if type(self) is Byte: # pylint: disable=unidiomatic-typecheck
_inst = self
else:
_inst = Byte.from_other(self)
# Which table to consult? Was a preferred system provided?
if system is None:
# No preference. Use existing system
if self.system == 'NIST':
_STEPS = NIST_PREFIXES
_BASE = 1024
elif self.system == 'SI':
_STEPS = SI_PREFIXES
_BASE = 1000
# Anything else would have raised by now
else:
# Preferred system provided.
if system == NIST:
_STEPS = NIST_PREFIXES
_BASE = 1024
elif system == SI:
_STEPS = SI_PREFIXES
_BASE = 1000
else:
raise ValueError("Invalid value given for 'system' parameter."
" Must be one of NIST or SI")
# Index of the string of the best prefix in the STEPS list
_index = int(math.log(abs(_inst.bytes), _BASE))
# Recall that the log() function returns >= 0. This doesn't
# map to the STEPS list 1:1. That is to say, 0 is handled with
# special care. So if the _index is 1, we actually want item 0
# in the list.
if _index == 0:
# Already a Byte() type, so return it.
return _inst
elif _index >= len(_STEPS):
# This is a really big number. Use the biggest prefix we've got
_best_prefix = _STEPS[-1]
elif 0 < _index < len(_STEPS):
# There is an appropriate prefix unit to represent this
_best_prefix = _STEPS[_index - 1]
_conversion_method = getattr(
self,
'to_%sB' % _best_prefix)
return _conversion_method()
##################################################################
def to_Bit(self):
return Bit(self._bit_value)
def to_Byte(self):
return Byte(self._byte_value / float(NIST_STEPS['Byte']))
# Properties
Bit = property(lambda s: s.to_Bit())
Byte = property(lambda s: s.to_Byte())
##################################################################
def to_KiB(self):
return KiB(bits=self._bit_value)
def to_Kib(self):
return Kib(bits=self._bit_value)
def to_kB(self):
return kB(bits=self._bit_value)
def to_kb(self):
return kb(bits=self._bit_value)
# Properties
KiB = property(lambda s: s.to_KiB())
Kib = property(lambda s: s.to_Kib())
kB = property(lambda s: s.to_kB())
kb = property(lambda s: s.to_kb())
##################################################################
def to_MiB(self):
return MiB(bits=self._bit_value)
def to_Mib(self):
return Mib(bits=self._bit_value)
def to_MB(self):
return MB(bits=self._bit_value)
def to_Mb(self):
return Mb(bits=self._bit_value)
# Properties
MiB = property(lambda s: s.to_MiB())
Mib = property(lambda s: s.to_Mib())
MB = property(lambda s: s.to_MB())
Mb = property(lambda s: s.to_Mb())
##################################################################
def to_GiB(self):
return GiB(bits=self._bit_value)
def to_Gib(self):
return Gib(bits=self._bit_value)
def to_GB(self):
return GB(bits=self._bit_value)
def to_Gb(self):
return Gb(bits=self._bit_value)
# Properties
GiB = property(lambda s: s.to_GiB())
Gib = property(lambda s: s.to_Gib())
GB = property(lambda s: s.to_GB())
Gb = property(lambda s: s.to_Gb())
##################################################################
def to_TiB(self):
return TiB(bits=self._bit_value)
def to_Tib(self):
return Tib(bits=self._bit_value)
def to_TB(self):
return TB(bits=self._bit_value)
def to_Tb(self):
return Tb(bits=self._bit_value)
# Properties
TiB = property(lambda s: s.to_TiB())
Tib = property(lambda s: s.to_Tib())
TB = property(lambda s: s.to_TB())
Tb = property(lambda s: s.to_Tb())
##################################################################
def to_PiB(self):
return PiB(bits=self._bit_value)
def to_Pib(self):
return Pib(bits=self._bit_value)
def to_PB(self):
return PB(bits=self._bit_value)
def to_Pb(self):
return Pb(bits=self._bit_value)
# Properties
PiB = property(lambda s: s.to_PiB())
Pib = property(lambda s: s.to_Pib())
PB = property(lambda s: s.to_PB())
Pb = property(lambda s: s.to_Pb())
##################################################################
def to_EiB(self):
return EiB(bits=self._bit_value)
def to_Eib(self):
return Eib(bits=self._bit_value)
def to_EB(self):
return EB(bits=self._bit_value)
def to_Eb(self):
return Eb(bits=self._bit_value)
# Properties
EiB = property(lambda s: s.to_EiB())
Eib = property(lambda s: s.to_Eib())
EB = property(lambda s: s.to_EB())
Eb = property(lambda s: s.to_Eb())
##################################################################
# The SI units go beyond the NIST units. They also have the Zetta
# and Yotta prefixes.
def to_ZB(self):
return ZB(bits=self._bit_value)
def to_Zb(self):
return Zb(bits=self._bit_value)
# Properties
ZB = property(lambda s: s.to_ZB())
Zb = property(lambda s: s.to_Zb())
##################################################################
def to_YB(self):
return YB(bits=self._bit_value)
def to_Yb(self):
return Yb(bits=self._bit_value)
#: A new object representing this instance as a Yottabyte
YB = property(lambda s: s.to_YB())
Yb = property(lambda s: s.to_Yb())
##################################################################
# Rich comparison operations
##################################################################
def __lt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value < other
else:
return self._byte_value < other.bytes
def __le__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value <= other
else:
return self._byte_value <= other.bytes
def __eq__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value == other
else:
return self._byte_value == other.bytes
def __ne__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value != other
else:
return self._byte_value != other.bytes
def __gt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value > other
else:
return self._byte_value > other.bytes
def __ge__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value >= other
else:
return self._byte_value >= other.bytes
##################################################################
# Basic math operations
##################################################################
# Reference: http://docs.python.org/2.7/reference/datamodel.html#emulating-numeric-types
"""These methods are called to implement the binary arithmetic
operations (+, -, *, //, %, divmod(), pow(), **, <<, >>, &, ^, |). For
instance, to evaluate the expression x + y, where x is an instance of
a class that has an __add__() method, x.__add__(y) is called. The
__divmod__() method should be the equivalent to using __floordiv__()
and __mod__(); it should not be related to __truediv__() (described
below). Note that __pow__() should be defined to accept an optional
third argument if the ternary version of the built-in pow() function
is to be supported.object.__complex__(self)
"""
def __add__(self, other):
"""Supported operations with result types:
- bm + bm = bm
- bm + num = num
- num + bm = num (see radd)
"""
if isinstance(other, numbers.Number):
# bm + num
return other + self.value
else:
# bm + bm
total_bytes = self._byte_value + other.bytes
return (type(self))(bytes=total_bytes)
def __sub__(self, other):
"""Subtraction: Supported operations with result types:
- bm - bm = bm
- bm - num = num
- num - bm = num (see rsub)
"""
if isinstance(other, numbers.Number):
# bm - num
return self.value - other
else:
# bm - bm
total_bytes = self._byte_value - other.bytes
return (type(self))(bytes=total_bytes)
def __mul__(self, other):
"""Multiplication: Supported operations with result types:
- bm1 * bm2 = bm1
- bm * num = bm
- num * bm = num (see rmul)
"""
if isinstance(other, numbers.Number):
# bm * num
result = self._byte_value * other
return (type(self))(bytes=result)
else:
# bm1 * bm2
_other = other.value * other.base ** other.power
_self = self.prefix_value * self._base ** self._power
return (type(self))(bytes=_other * _self)
"""The division operator (/) is implemented by these methods. The
__truediv__() method is used when __future__.division is in effect,
otherwise __div__() is used. If only one of these two methods is
defined, the object will not support division in the alternate
context; TypeError will be raised instead."""
def __div__(self, other):
"""Division: Supported operations with result types:
- bm1 / bm2 = num
- bm / num = bm
- num / bm = num (see rdiv)
"""
if isinstance(other, numbers.Number):
# bm / num
result = self._byte_value / other
return (type(self))(bytes=result)
else:
# bm1 / bm2
return self._byte_value / float(other.bytes)
def __truediv__(self, other):
# num / bm
return self.__div__(other)
# def __floordiv__(self, other):
# return NotImplemented
# def __mod__(self, other):
# return NotImplemented
# def __divmod__(self, other):
# return NotImplemented
# def __pow__(self, other, modulo=None):
# return NotImplemented
##################################################################
"""These methods are called to implement the binary arithmetic
operations (+, -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with
reflected (swapped) operands. These functions are only called if the
left operand does not support the corresponding operation and the
operands are of different types. [2] For instance, to evaluate the
expression x - y, where y is an instance of a class that has an
__rsub__() method, y.__rsub__(x) is called if x.__sub__(y) returns
NotImplemented.
These are the add/sub/mul/div methods for syntax where a number type
is given for the LTYPE and a bitmath object is given for the
RTYPE. E.g., 3 * MiB(3), or 10 / GB(42)
"""
def __radd__(self, other):
# num + bm = num
return other + self.value
def __rsub__(self, other):
# num - bm = num
return other - self.value
def __rmul__(self, other):
# num * bm = bm
return self * other
def __rdiv__(self, other):
# num / bm = num
return other / float(self.value)
def __rtruediv__(self, other):
# num / bm = num
return other / float(self.value)
"""Called to implement the built-in functions complex(), int(),
long(), and float(). Should return a value of the appropriate type.
If one of those methods does not support the operation with the
supplied arguments, it should return NotImplemented.
For bitmath purposes, these methods return the int/long/float
equivalent of the this instances prefix Unix value. That is to say:
- int(KiB(3.336)) would return 3
- long(KiB(3.336)) would return 3L
- float(KiB(3.336)) would return 3.336
"""
def __int__(self):
"""Return this instances prefix unit as an integer"""
return int(self.prefix_value)
def __long__(self):
"""Return this instances prefix unit as a long integer"""
return long(self.prefix_value) # pragma: PY3X no cover
def __float__(self):
"""Return this instances prefix unit as a floating point number"""
return float(self.prefix_value)
##################################################################
# Bitwise operations
##################################################################
def __lshift__(self, other):
"""Left shift, ex: 100 << 2
A left shift by n bits is equivalent to multiplication by pow(2,
n). A long integer is returned if the result exceeds the range of
plain integers."""
shifted = int(self.bits) << other
return type(self)(bits=shifted)
def __rshift__(self, other):
"""Right shift, ex: 100 >> 2
A right shift by n bits is equivalent to division by pow(2, n)."""
shifted = int(self.bits) >> other
return type(self)(bits=shifted)
def __and__(self, other):
""""Bitwise and, ex: 100 & 2
bitwise and". Each bit of the output is 1 if the corresponding bit
of x AND of y is 1, otherwise it's 0."""
andd = int(self.bits) & other
return type(self)(bits=andd)
def __xor__(self, other):
"""Bitwise xor, ex: 100 ^ 2
Does a "bitwise exclusive or". Each bit of the output is the same
as the corresponding bit in x if that bit in y is 0, and it's the
complement of the bit in x if that bit in y is 1."""
xord = int(self.bits) ^ other
return type(self)(bits=xord)
def __or__(self, other):
"""Bitwise or, ex: 100 | 2
Does a "bitwise or". Each bit of the output is 0 if the corresponding
bit of x AND of y is 0, otherwise it's 1."""
ord = int(self.bits) | other
return type(self)(bits=ord)
##################################################################
def __neg__(self):
"""The negative version of this instance"""
return (type(self))(-abs(self.prefix_value))
def __pos__(self):
return (type(self))(abs(self.prefix_value))
def __abs__(self):
return (type(self))(abs(self.prefix_value))
# def __invert__(self):
# """Called to implement the unary arithmetic operations (-, +, abs()
# and ~)."""
# return NotImplemented
######################################################################
# First, the bytes...
class Byte(Bitmath):
"""Byte based types fundamentally operate on self._bit_value"""
def _setup(self):
return (2, 0, 'Byte', 'Bytes')
######################################################################
# NIST Prefixes for Byte based types
class KiB(Byte):
def _setup(self):
return (2, 10, 'KiB', 'KiBs')
Kio = KiB
class MiB(Byte):
def _setup(self):
return (2, 20, 'MiB', 'MiBs')
Mio = MiB
class GiB(Byte):
def _setup(self):
return (2, 30, 'GiB', 'GiBs')
Gio = GiB
class TiB(Byte):
def _setup(self):
return (2, 40, 'TiB', 'TiBs')
Tio = TiB
class PiB(Byte):
def _setup(self):
return (2, 50, 'PiB', 'PiBs')
Pio = PiB
class EiB(Byte):
def _setup(self):
return (2, 60, 'EiB', 'EiBs')
Eio = EiB
######################################################################
# SI Prefixes for Byte based types
class kB(Byte):
def _setup(self):
return (10, 3, 'kB', 'kBs')
ko = kB
class MB(Byte):
def _setup(self):
return (10, 6, 'MB', 'MBs')
Mo = MB
class GB(Byte):
def _setup(self):
return (10, 9, 'GB', 'GBs')
Go = GB
class TB(Byte):
def _setup(self):
return (10, 12, 'TB', 'TBs')
To = TB
class PB(Byte):
def _setup(self):
return (10, 15, 'PB', 'PBs')
Po = PB
class EB(Byte):
def _setup(self):
return (10, 18, 'EB', 'EBs')
Eo = EB
class ZB(Byte):
def _setup(self):
return (10, 21, 'ZB', 'ZBs')
Zo = ZB
class YB(Byte):
def _setup(self):
return (10, 24, 'YB', 'YBs')
Yo = YB
######################################################################
# And now the bit types
class Bit(Bitmath):
"""Bit based types fundamentally operate on self._bit_value"""
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._bit_value)
def _setup(self):
return (2, 0, 'Bit', 'Bits')
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type"""
self._bit_value = value * self._unit_value
self._byte_value = self._bit_value / 8.0
######################################################################
# NIST Prefixes for Bit based types
class Kib(Bit):
def _setup(self):
return (2, 10, 'Kib', 'Kibs')
class Mib(Bit):
def _setup(self):
return (2, 20, 'Mib', 'Mibs')
class Gib(Bit):
def _setup(self):
return (2, 30, 'Gib', 'Gibs')
class Tib(Bit):
def _setup(self):
return (2, 40, 'Tib', 'Tibs')
class Pib(Bit):
def _setup(self):
return (2, 50, 'Pib', 'Pibs')
class Eib(Bit):
def _setup(self):
return (2, 60, 'Eib', 'Eibs')
######################################################################
# SI Prefixes for Bit based types
class kb(Bit):
def _setup(self):
return (10, 3, 'kb', 'kbs')
class Mb(Bit):
def _setup(self):
return (10, 6, 'Mb', 'Mbs')
class Gb(Bit):
def _setup(self):
return (10, 9, 'Gb', 'Gbs')
class Tb(Bit):
def _setup(self):
return (10, 12, 'Tb', 'Tbs')
class Pb(Bit):
def _setup(self):
return (10, 15, 'Pb', 'Pbs')
class Eb(Bit):
def _setup(self):
return (10, 18, 'Eb', 'Ebs')
class Zb(Bit):
def _setup(self):
return (10, 21, 'Zb', 'Zbs')
class Yb(Bit):
def _setup(self):
return (10, 24, 'Yb', 'Ybs')
######################################################################
# Utility functions
def best_prefix(bytes, system=NIST):
"""Return a bitmath instance representing the best human-readable
representation of the number of bytes given by ``bytes``. In addition
to a numeric type, the ``bytes`` parameter may also be a bitmath type.
Optionally select a preferred unit system by specifying the ``system``
keyword. Choices for ``system`` are ``bitmath.NIST`` (default) and
``bitmath.SI``.
Basically a shortcut for:
>>> import bitmath
>>> b = bitmath.Byte(12345)
>>> best = b.best_prefix()
Or:
>>> import bitmath
>>> best = (bitmath.KiB(12345) * 4201).best_prefix()
"""
if isinstance(bytes, Bitmath):
value = bytes.bytes
else:
value = bytes
return Byte(value).best_prefix(system=system)
def query_device_capacity(device_fd):
"""Create bitmath instances of the capacity of a system block device
Make one or more ioctl request to query the capacity of a block
device. Perform any processing required to compute the final capacity
value. Return the device capacity in bytes as a :class:`bitmath.Byte`
instance.
Thanks to the following resources for help figuring this out Linux/Mac
ioctl's for querying block device sizes:
* http://stackoverflow.com/a/12925285/263969
* http://stackoverflow.com/a/9764508/263969
:param file device_fd: A ``file`` object of the device to query the
capacity of (as in ``get_device_capacity(open("/dev/sda"))``).
:return: a bitmath :class:`bitmath.Byte` instance equivalent to the
capacity of the target device in bytes.
"""
if os_name() != 'posix':
raise NotImplementedError("'bitmath.query_device_capacity' is not supported on this platform: %s" % os_name())
s = os.stat(device_fd.name).st_mode
if not stat.S_ISBLK(s):
raise ValueError("The file descriptor provided is not of a device type")
# The keys of the ``ioctl_map`` dictionary correlate to possible
# values from the ``platform.system`` function.
ioctl_map = {
# ioctls for the "Linux" platform
"Linux": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("BLKGETSIZE64", "L", 0x80081272)
# Per <linux/fs.h>, the BLKGETSIZE64 request returns a
# 'u64' sized value. This is an unsigned 64 bit
# integer C type. This means to correctly "buffer" the
# result we need 64 bits, or 8 bytes, of memory.
#
# The struct module documentation include a reference
# chart relating formatting characters to native C
# Types. In this case, using the "native size", the
# table tells us:
#
# * Character 'L' - Unsigned Long C Type (u64) - Loads into a Python int type
#
# Confirm this character is right by running (on Linux):
#
# >>> import struct
# >>> print 8 == struct.calcsize('L')
#
# The result should be true as long as your kernel
# headers define BLKGETSIZE64 as a u64 type (please
# file a bug report at
# https://github.com/tbielawa/bitmath/issues/new if
# this does *not* work for you)
],
# func is how the final result is decided. Because the
# Linux BLKGETSIZE64 call returns the block device
# capacity in bytes as an integer value, no extra
# calculations are required. Simply return the value of
# BLKGETSIZE64.
"func": lambda x: x["BLKGETSIZE64"]
},
# ioctls for the "Darwin" (Mac OS X) platform
"Darwin": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("DKIOCGETBLOCKCOUNT", "L", 0x40086419),
# Per <sys/disk.h>: get media's block count - uint64_t
#
# As in the BLKGETSIZE64 example, an unsigned 64 bit
# integer will use the 'L' formatting character
("DKIOCGETBLOCKSIZE", "I", 0x40046418)
# Per <sys/disk.h>: get media's block size - uint32_t
#
# This request returns an unsigned 32 bit integer, or
# in other words: just a normal integer (or 'int' c
# type). That should require 4 bytes of space for
# buffering. According to the struct modules
# 'Formatting Characters' chart:
#
# * Character 'I' - Unsigned Int C Type (uint32_t) - Loads into a Python int type
],
# OS X doesn't have a direct equivalent to the Linux
# BLKGETSIZE64 request. Instead, we must request how many
# blocks (or "sectors") are on the disk, and the size (in
# bytes) of each block. Finally, multiply the two together
# to obtain capacity:
#
# n Block * y Byte
# capacity (bytes) = -------
# 1 Block
"func": lambda x: x["DKIOCGETBLOCKCOUNT"] * x["DKIOCGETBLOCKSIZE"]
# This expression simply accepts a dictionary ``x`` as a
# parameter, and then returns the result of multiplying
# the two named dictionary items together. In this case,
# that means multiplying ``DKIOCGETBLOCKCOUNT``, the total
# number of blocks, by ``DKIOCGETBLOCKSIZE``, the size of
# each block in bytes.
}
}
platform_params = ioctl_map[platform.system()]
results = {}
for req_name, fmt, request_code in platform_params['request_params']:
# Read the systems native size (in bytes) of this format type.
buffer_size = struct.calcsize(fmt)
# Construct a buffer to store the ioctl result in
buffer = ' ' * buffer_size
# This code has been ran on only a few test systems. If it's
# appropriate, maybe in the future we'll add try/except
# conditions for some possible errors. Really only for cases
# where it would add value to override the default exception
# message string.
buffer = fcntl.ioctl(device_fd.fileno(), request_code, buffer)
# Unpack the raw result from the ioctl call into a familiar
# python data type according to the ``fmt`` rules.
result = struct.unpack(fmt, buffer)[0]
# Add the new result to our collection
results[req_name] = result
return Byte(platform_params['func'](results))
def listdir(search_base, followlinks=False, filter='*',
relpath=False, bestprefix=False, system=NIST):
"""This is a generator which recurses the directory tree
`search_base`, yielding 2-tuples of:
* The absolute/relative path to a discovered file
* A bitmath instance representing the "apparent size" of the file.
- `search_base` - The directory to begin walking down.
- `followlinks` - Whether or not to follow symbolic links to directories
- `filter` - A glob (see :py:mod:`fnmatch`) to filter results with
(default: ``*``, everything)
- `relpath` - ``True`` to return the relative path from `pwd` or
``False`` (default) to return the fully qualified path
- ``bestprefix`` - set to ``False`` to get ``bitmath.Byte``
instances back instead.
- `system` - Provide a preferred unit system by setting `system`
to either ``bitmath.NIST`` (default) or ``bitmath.SI``.
.. note:: This function does NOT return tuples for directory entities.
.. note:: Symlinks to **files** are followed automatically
"""
for root, dirs, files in os.walk(search_base, followlinks=followlinks):
for name in fnmatch.filter(files, filter):
_path = os.path.join(root, name)
if relpath:
# RELATIVE path
_return_path = os.path.relpath(_path, '.')
else:
# REAL path
_return_path = os.path.realpath(_path)
if followlinks:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
else:
if os.path.isdir(_path) or os.path.islink(_path):
pass
else:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
def parse_string(s):
"""Parse a string with units and try to make a bitmath object out of
it.
String inputs may include whitespace characters between the value and
the unit.
"""
# Strings only please
if not isinstance(s, (str, unicode)):
raise ValueError("parse_string only accepts string inputs but a %s was given" %
type(s))
# get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError:
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# split the string into the value and the unit
val, unit = s[:index], s[index:]
# see if the unit exists as a type in our namespace
if unit == "b":
unit_class = Bit
elif unit == "B":
unit_class = Byte
else:
if not (hasattr(sys.modules[__name__], unit) and isinstance(getattr(sys.modules[__name__], unit), type)):
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
unit_class = globals()[unit]
try:
val = float(val)
except ValueError:
raise
try:
return unit_class(val)
except: # pragma: no cover
raise ValueError("Can't parse string %s into a bitmath object" % s)
def parse_string_unsafe(s, system=SI):
"""Attempt to parse a string with ambiguous units and try to make a
bitmath object out of it.
This may produce inaccurate results if parsing shell output. For
example `ls` may say a 2730 Byte file is '2.7K'. 2730 Bytes == 2.73 kB
~= 2.666 KiB. See the documentation for all of the important details.
Note the following caveats:
* All inputs are assumed to be byte-based (as opposed to bit based)
* Numerical inputs (those without any units) are assumed to be a
number of bytes
* Inputs with single letter units (k, M, G, etc) are assumed to be SI
units (base-10). Set the `system` parameter to `bitmath.NIST` to
change this behavior.
* Inputs with an `i` character following the leading letter (Ki, Mi,
Gi) are assumed to be NIST units (base 2)
* Capitalization does not matter
"""
if not isinstance(s, (str, unicode)) and \
not isinstance(s, numbers.Number):
raise ValueError("parse_string_unsafe only accepts string/number inputs but a %s was given" %
type(s))
######################################################################
# Is the input simple to parse? Just a number, or a number
# masquerading as a string perhaps?
# Test case: raw number input (easy!)
if isinstance(s, numbers.Number):
# It's just a number. Assume bytes
return Byte(s)
# Test case: a number pretending to be a string
if isinstance(s, (str, unicode)):
try:
# Can we turn it directly into a number?
return Byte(float(s))
except ValueError:
# Nope, this is not a plain number
pass
######################################################################
# At this point:
# - the input is also not just a number wrapped in a string
# - nor is is just a plain number type
#
# We need to do some more digging around now to figure out exactly
# what we were given and possibly normalize the input into a
# format we can recognize.
# First we'll separate the number and the unit.
#
# Get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError: # pragma: no cover
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# Split the string into the value and the unit
val, unit = s[:index], s[index:]
# Don't trust anything. We'll make sure the correct 'b' is in place.
unit = unit.rstrip('Bb')
unit += 'B'
# At this point we can expect `unit` to be either:
#
# - 2 Characters (for SI, ex: kB or GB)
# - 3 Caracters (so NIST, ex: KiB, or GiB)
#
# A unit with any other number of chars is not a valid unit
# SI
if len(unit) == 2:
# Has NIST parsing been requested?
if system == NIST:
# NIST units requested. Ensure the unit begins with a
# capital letter and is followed by an 'i' character.
unit = capitalize_first(unit)
# Insert an 'i' char after the first letter
_unit = list(unit)
_unit.insert(1, 'i')
# Collapse the list back into a 3 letter string
unit = ''.join(_unit)
unit_class = globals()[unit]
else:
# Default parsing (SI format)
#
# Edge-case checking: SI 'thousand' is a lower-case K
if unit.startswith('K'):
unit = unit.replace('K', 'k')
elif not unit.startswith('k'):
# Otherwise, ensure the first char is capitalized
unit = capitalize_first(unit)
# This is an SI-type unit
if unit[0] in SI_PREFIXES:
unit_class = globals()[unit]
# NIST
elif len(unit) == 3:
unit = capitalize_first(unit)
# This is a NIST-type unit
if unit[:2] in NIST_PREFIXES:
unit_class = globals()[unit]
else:
# This is not a unit we recognize
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
try:
unit_class
except UnboundLocalError:
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
return unit_class(float(val))
######################################################################
# Contxt Managers
@contextlib.contextmanager
def format(fmt_str=None, plural=False, bestprefix=False):
"""Context manager for printing bitmath instances.
``fmt_str`` - a formatting mini-language compat formatting string. See
the @properties (above) for a list of available items.
``plural`` - True enables printing instances with 's's if they're
plural. False (default) prints them as singular (no trailing 's').
``bestprefix`` - True enables printing instances in their best
human-readable representation. False, the default, prints instances
using their current prefix unit.
"""
if 'bitmath' not in globals():
import bitmath
if plural:
orig_fmt_plural = bitmath.format_plural
bitmath.format_plural = True
if fmt_str:
orig_fmt_str = bitmath.format_string
bitmath.format_string = fmt_str
yield
if plural:
bitmath.format_plural = orig_fmt_plural
if fmt_str:
bitmath.format_string = orig_fmt_str
def cli_script_main(cli_args):
"""
A command line interface to basic bitmath operations.
"""
choices = ALL_UNIT_TYPES
parser = argparse.ArgumentParser(
description='Converts from one type of size to another.')
parser.add_argument('--from-stdin', default=False, action='store_true',
help='Reads number from stdin rather than the cli')
parser.add_argument(
'-f', '--from', choices=choices, nargs=1,
type=str, dest='fromunit', default=['Byte'],
help='Input type you are converting from. Defaultes to Byte.')
parser.add_argument(
'-t', '--to', choices=choices, required=False, nargs=1, type=str,
help=('Input type you are converting to. '
'Attempts to detect best result if omitted.'), dest='tounit')
parser.add_argument(
'size', nargs='*', type=float,
help='The number to convert.')
args = parser.parse_args(cli_args)
# Not sure how to cover this with tests, or if the functionality
# will remain in this form long enough for it to make writing a
# test worth the effort.
if args.from_stdin: # pragma: no cover
args.size = [float(sys.stdin.readline()[:-1])]
results = []
for size in args.size:
instance = getattr(__import__(
'bitmath', fromlist=['True']), args.fromunit[0])(size)
# If we have a unit provided then use it
if args.tounit:
result = getattr(instance, args.tounit[0])
# Otherwise use the best_prefix call
else:
result = instance.best_prefix()
results.append(result)
return results
def cli_script(): # pragma: no cover
# Wrapper around cli_script_main so we can unittest the command
# line functionality
for result in cli_script_main(sys.argv[1:]):
print(result)
if __name__ == '__main__':
cli_script()
|
tbielawa/bitmath
|
bitmath/__init__.py
|
listdir
|
python
|
def listdir(search_base, followlinks=False, filter='*',
relpath=False, bestprefix=False, system=NIST):
for root, dirs, files in os.walk(search_base, followlinks=followlinks):
for name in fnmatch.filter(files, filter):
_path = os.path.join(root, name)
if relpath:
# RELATIVE path
_return_path = os.path.relpath(_path, '.')
else:
# REAL path
_return_path = os.path.realpath(_path)
if followlinks:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
else:
if os.path.isdir(_path) or os.path.islink(_path):
pass
else:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
|
This is a generator which recurses the directory tree
`search_base`, yielding 2-tuples of:
* The absolute/relative path to a discovered file
* A bitmath instance representing the "apparent size" of the file.
- `search_base` - The directory to begin walking down.
- `followlinks` - Whether or not to follow symbolic links to directories
- `filter` - A glob (see :py:mod:`fnmatch`) to filter results with
(default: ``*``, everything)
- `relpath` - ``True`` to return the relative path from `pwd` or
``False`` (default) to return the fully qualified path
- ``bestprefix`` - set to ``False`` to get ``bitmath.Byte``
instances back instead.
- `system` - Provide a preferred unit system by setting `system`
to either ``bitmath.NIST`` (default) or ``bitmath.SI``.
.. note:: This function does NOT return tuples for directory entities.
.. note:: Symlinks to **files** are followed automatically
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L1351-L1391
|
[
"def getsize(path, bestprefix=True, system=NIST):\n \"\"\"Return a bitmath instance in the best human-readable representation\nof the file size at `path`. Optionally, provide a preferred unit\nsystem by setting `system` to either `bitmath.NIST` (default) or\n`bitmath.SI`.\n\nOptionally, set ``bestprefix`` to ``False`` to get ``bitmath.Byte``\ninstances back.\n \"\"\"\n _path = os.path.realpath(path)\n size_bytes = os.path.getsize(_path)\n if bestprefix:\n return Byte(size_bytes).best_prefix(system=system)\n else:\n return Byte(size_bytes)\n"
] |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright © 2014-2016 Tim Bielawa <timbielawa@gmail.com>
# See GitHub Contributors Graph for more information
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sub-license, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable=bad-continuation,missing-docstring,invalid-name,line-too-long
"""Reference material:
The bitmath homepage is located at:
* http://bitmath.readthedocs.io/en/latest/
Prefixes for binary multiples:
http://physics.nist.gov/cuu/Units/binary.html
decimal and binary prefixes:
man 7 units (from the Linux Documentation Project 'man-pages' package)
BEFORE YOU GET HASTY WITH EXCLUDING CODE FROM COVERAGE: If you
absolutely need to skip code coverage because of a strange Python 2.x
vs 3.x thing, use the fancy environment substitution stuff from the
.coverage RC file. In review:
* If you *NEED* to skip a statement because of Python 2.x issues add the following::
# pragma: PY2X no cover
* If you *NEED* to skip a statement because of Python 3.x issues add the following::
# pragma: PY3X no cover
In this configuration, statements which are skipped in 2.x are still
covered in 3.x, and the reverse holds true for tests skipped in 3.x.
"""
from __future__ import print_function
import argparse
import contextlib
import fnmatch
import math
import numbers
import os
import os.path
import platform
import sys
# For device capacity reading in query_device_capacity(). Only supported
# on posix systems for now. Will be addressed in issue #52 on GitHub.
if os.name == 'posix':
import stat
import fcntl
import struct
__all__ = ['Bit', 'Byte', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB',
'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'Kib',
'Mib', 'Gib', 'Tib', 'Pib', 'Eib', 'kb', 'Mb', 'Gb', 'Tb',
'Pb', 'Eb', 'Zb', 'Yb', 'getsize', 'listdir', 'format',
'format_string', 'format_plural', 'parse_string', 'parse_string_unsafe',
'ALL_UNIT_TYPES', 'NIST', 'NIST_PREFIXES', 'NIST_STEPS',
'SI', 'SI_PREFIXES', 'SI_STEPS']
# Python 3.x compat
if sys.version > '3':
long = int # pragma: PY2X no cover
unicode = str # pragma: PY2X no cover
#: A list of all the valid prefix unit types. Mostly for reference,
#: also used by the CLI tool as valid types
ALL_UNIT_TYPES = ['Bit', 'Byte', 'kb', 'kB', 'Mb', 'MB', 'Gb', 'GB', 'Tb',
'TB', 'Pb', 'PB', 'Eb', 'EB', 'Zb', 'ZB', 'Yb',
'YB', 'Kib', 'KiB', 'Mib', 'MiB', 'Gib', 'GiB',
'Tib', 'TiB', 'Pib', 'PiB', 'Eib', 'EiB']
# #####################################################################
# Set up our module variables/constants
###################################
# Internal:
# Console repr(), ex: MiB(13.37), or kB(42.0)
_FORMAT_REPR = '{unit_singular}({value})'
# ##################################
# Exposed:
#: Constants for referring to NIST prefix system
NIST = int(2)
#: Constants for referring to SI prefix system
SI = int(10)
# ##################################
#: All of the SI prefixes
SI_PREFIXES = ['k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
#: Byte values represented by each SI prefix unit
SI_STEPS = {
'Bit': 1 / 8.0,
'Byte': 1,
'k': 1000,
'M': 1000000,
'G': 1000000000,
'T': 1000000000000,
'P': 1000000000000000,
'E': 1000000000000000000,
'Z': 1000000000000000000000,
'Y': 1000000000000000000000000
}
#: All of the NIST prefixes
NIST_PREFIXES = ['Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei']
#: Byte values represented by each NIST prefix unit
NIST_STEPS = {
'Bit': 1 / 8.0,
'Byte': 1,
'Ki': 1024,
'Mi': 1048576,
'Gi': 1073741824,
'Ti': 1099511627776,
'Pi': 1125899906842624,
'Ei': 1152921504606846976
}
#: String representation, ex: ``13.37 MiB``, or ``42.0 kB``
format_string = "{value} {unit}"
#: Pluralization behavior
format_plural = False
def os_name():
# makes unittesting platform specific code easier
return os.name
def capitalize_first(s):
"""Capitalize ONLY the first letter of the input `s`
* returns a copy of input `s` with the first letter capitalized
"""
pfx = s[0].upper()
_s = s[1:]
return pfx + _s
######################################################################
# Base class for everything else
class Bitmath(object):
"""The base class for all the other prefix classes"""
# All the allowed input types
valid_types = (int, float, long)
def __init__(self, value=0, bytes=None, bits=None):
"""Instantiate with `value` by the unit, in plain bytes, or
bits. Don't supply more than one keyword.
default behavior: initialize with value of 0
only setting value: assert bytes is None and bits is None
only setting bytes: assert value == 0 and bits is None
only setting bits: assert value == 0 and bytes is None
"""
_raise = False
if (value == 0) and (bytes is None) and (bits is None):
pass
# Setting by bytes
elif bytes is not None:
if (value == 0) and (bits is None):
pass
else:
_raise = True
# setting by bits
elif bits is not None:
if (value == 0) and (bytes is None):
pass
else:
_raise = True
if _raise:
raise ValueError("Only one parameter of: value, bytes, or bits is allowed")
self._do_setup()
if bytes:
# We were provided with the fundamental base unit, no need
# to normalize
self._byte_value = bytes
self._bit_value = bytes * 8.0
elif bits:
# We were *ALMOST* given the fundamental base
# unit. Translate it into the fundamental unit then
# normalize.
self._byte_value = bits / 8.0
self._bit_value = bits
else:
# We were given a value representative of this *prefix
# unit*. We need to normalize it into the number of bytes
# it represents.
self._norm(value)
# We have the fundamental unit figured out. Set the 'pretty' unit
self._set_prefix_value()
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._byte_value)
def _to_prefix_value(self, value):
"""Return the number of bits/bytes as they would look like if we
converted *to* this unit"""
return value / float(self._unit_value)
def _setup(self):
raise NotImplementedError("The base 'bitmath.Bitmath' class can not be used directly")
def _do_setup(self):
"""Setup basic parameters for this class.
`base` is the numeric base which when raised to `power` is equivalent
to 1 unit of the corresponding prefix. I.e., base=2, power=10
represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte.
Likewise, for the SI prefix classes `base` will be 10, and the `power`
for the Kilobyte is 3.
"""
(self._base, self._power, self._name_singular, self._name_plural) = self._setup()
self._unit_value = self._base ** self._power
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type.
:param number value: The input value to be normalized
:raises ValueError: if the input value is not a type of real number
"""
if isinstance(value, self.valid_types):
self._byte_value = value * self._unit_value
self._bit_value = self._byte_value * 8.0
else:
raise ValueError("Initialization value '%s' is of an invalid type: %s. "
"Must be one of %s" % (
value,
type(value),
", ".join(str(x) for x in self.valid_types)))
##################################################################
# Properties
#: The mathematical base of an instance
base = property(lambda s: s._base)
binary = property(lambda s: bin(int(s.bits)))
"""The binary representation of an instance in binary 1s and 0s. Note
that for very large numbers this will mean a lot of 1s and 0s. For
example, GiB(100) would be represented as::
0b1100100000000000000000000000000000000000
That leading ``0b`` is normal. That's how Python represents binary.
"""
#: Alias for :attr:`binary`
bin = property(lambda s: s.binary)
#: The number of bits in an instance
bits = property(lambda s: s._bit_value)
#: The number of bytes in an instance
bytes = property(lambda s: s._byte_value)
#: The mathematical power of an instance
power = property(lambda s: s._power)
@property
def system(self):
"""The system of units used to measure an instance"""
if self._base == 2:
return "NIST"
elif self._base == 10:
return "SI"
else:
# I don't expect to ever encounter this logic branch, but
# hey, it's better to have extra test coverage than
# insufficient test coverage.
raise ValueError("Instances mathematical base is an unsupported value: %s" % (
str(self._base)))
@property
def unit(self):
"""The string that is this instances prefix unit name in agreement
with this instance value (singular or plural). Following the
convention that only 1 is singular. This will always be the singular
form when :attr:`bitmath.format_plural` is ``False`` (default value).
For example:
>>> KiB(1).unit == 'KiB'
>>> Byte(0).unit == 'Bytes'
>>> Byte(1).unit == 'Byte'
>>> Byte(1.1).unit == 'Bytes'
>>> Gb(2).unit == 'Gbs'
"""
global format_plural
if self.prefix_value == 1:
# If it's a '1', return it singular, no matter what
return self._name_singular
elif format_plural:
# Pluralization requested
return self._name_plural
else:
# Pluralization NOT requested, and the value is not 1
return self._name_singular
@property
def unit_plural(self):
"""The string that is an instances prefix unit name in the plural
form.
For example:
>>> KiB(1).unit_plural == 'KiB'
>>> Byte(1024).unit_plural == 'Bytes'
>>> Gb(1).unit_plural == 'Gb'
"""
return self._name_plural
@property
def unit_singular(self):
"""The string that is an instances prefix unit name in the singular
form.
For example:
>>> KiB(1).unit_singular == 'KiB'
>>> Byte(1024).unit == 'B'
>>> Gb(1).unit_singular == 'Gb'
"""
return self._name_singular
#: The "prefix" value of an instance
value = property(lambda s: s.prefix_value)
@classmethod
def from_other(cls, item):
"""Factory function to return instances of `item` converted into a new
instance of ``cls``. Because this is a class method, it may be called
from any bitmath class object without the need to explicitly
instantiate the class ahead of time.
*Implicit Parameter:*
* ``cls`` A bitmath class, implicitly set to the class of the
instance object it is called on
*User Supplied Parameter:*
* ``item`` A :class:`bitmath.Bitmath` subclass instance
*Example:*
>>> import bitmath
>>> kib = bitmath.KiB.from_other(bitmath.MiB(1))
>>> print kib
KiB(1024.0)
"""
if isinstance(item, Bitmath):
return cls(bits=item.bits)
else:
raise ValueError("The provided items must be a valid bitmath class: %s" %
str(item.__class__))
######################################################################
# The following implement the Python datamodel customization methods
#
# Reference: http://docs.python.org/2.7/reference/datamodel.html#basic-customization
def __repr__(self):
"""Representation of this object as you would expect to see in an
interpreter"""
global _FORMAT_REPR
return self.format(_FORMAT_REPR)
def __str__(self):
"""String representation of this object"""
global format_string
return self.format(format_string)
def format(self, fmt):
"""Return a representation of this instance formatted with user
supplied syntax"""
_fmt_params = {
'base': self.base,
'bin': self.bin,
'binary': self.binary,
'bits': self.bits,
'bytes': self.bytes,
'power': self.power,
'system': self.system,
'unit': self.unit,
'unit_plural': self.unit_plural,
'unit_singular': self.unit_singular,
'value': self.value
}
return fmt.format(**_fmt_params)
##################################################################
# Guess the best human-readable prefix unit for representation
##################################################################
def best_prefix(self, system=None):
"""Optional parameter, `system`, allows you to prefer NIST or SI in
the results. By default, the current system is used (Bit/Byte default
to NIST).
Logic discussion/notes:
Base-case, does it need converting?
If the instance is less than one Byte, return the instance as a Bit
instance.
Else, begin by recording the unit system the instance is defined
by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.
If the instance is not already a ``Byte`` instance, convert it to one.
NIST units step up by powers of 1024, SI units step up by powers of
1000.
Take integer value of the log(base=STEP_POWER) of the instance's byte
value. E.g.:
>>> int(math.log(Gb(100).bytes, 1000))
3
This will return a value >= 0. The following determines the 'best
prefix unit' for representation:
* result == 0, best represented as a Byte
* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte
* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
"""
# Use absolute value so we don't return Bit's for *everything*
# less than Byte(1). From github issue #55
if abs(self) < Byte(1):
return Bit.from_other(self)
else:
if type(self) is Byte: # pylint: disable=unidiomatic-typecheck
_inst = self
else:
_inst = Byte.from_other(self)
# Which table to consult? Was a preferred system provided?
if system is None:
# No preference. Use existing system
if self.system == 'NIST':
_STEPS = NIST_PREFIXES
_BASE = 1024
elif self.system == 'SI':
_STEPS = SI_PREFIXES
_BASE = 1000
# Anything else would have raised by now
else:
# Preferred system provided.
if system == NIST:
_STEPS = NIST_PREFIXES
_BASE = 1024
elif system == SI:
_STEPS = SI_PREFIXES
_BASE = 1000
else:
raise ValueError("Invalid value given for 'system' parameter."
" Must be one of NIST or SI")
# Index of the string of the best prefix in the STEPS list
_index = int(math.log(abs(_inst.bytes), _BASE))
# Recall that the log() function returns >= 0. This doesn't
# map to the STEPS list 1:1. That is to say, 0 is handled with
# special care. So if the _index is 1, we actually want item 0
# in the list.
if _index == 0:
# Already a Byte() type, so return it.
return _inst
elif _index >= len(_STEPS):
# This is a really big number. Use the biggest prefix we've got
_best_prefix = _STEPS[-1]
elif 0 < _index < len(_STEPS):
# There is an appropriate prefix unit to represent this
_best_prefix = _STEPS[_index - 1]
_conversion_method = getattr(
self,
'to_%sB' % _best_prefix)
return _conversion_method()
##################################################################
def to_Bit(self):
return Bit(self._bit_value)
def to_Byte(self):
return Byte(self._byte_value / float(NIST_STEPS['Byte']))
# Properties
Bit = property(lambda s: s.to_Bit())
Byte = property(lambda s: s.to_Byte())
##################################################################
def to_KiB(self):
return KiB(bits=self._bit_value)
def to_Kib(self):
return Kib(bits=self._bit_value)
def to_kB(self):
return kB(bits=self._bit_value)
def to_kb(self):
return kb(bits=self._bit_value)
# Properties
KiB = property(lambda s: s.to_KiB())
Kib = property(lambda s: s.to_Kib())
kB = property(lambda s: s.to_kB())
kb = property(lambda s: s.to_kb())
##################################################################
def to_MiB(self):
return MiB(bits=self._bit_value)
def to_Mib(self):
return Mib(bits=self._bit_value)
def to_MB(self):
return MB(bits=self._bit_value)
def to_Mb(self):
return Mb(bits=self._bit_value)
# Properties
MiB = property(lambda s: s.to_MiB())
Mib = property(lambda s: s.to_Mib())
MB = property(lambda s: s.to_MB())
Mb = property(lambda s: s.to_Mb())
##################################################################
def to_GiB(self):
return GiB(bits=self._bit_value)
def to_Gib(self):
return Gib(bits=self._bit_value)
def to_GB(self):
return GB(bits=self._bit_value)
def to_Gb(self):
return Gb(bits=self._bit_value)
# Properties
GiB = property(lambda s: s.to_GiB())
Gib = property(lambda s: s.to_Gib())
GB = property(lambda s: s.to_GB())
Gb = property(lambda s: s.to_Gb())
##################################################################
def to_TiB(self):
return TiB(bits=self._bit_value)
def to_Tib(self):
return Tib(bits=self._bit_value)
def to_TB(self):
return TB(bits=self._bit_value)
def to_Tb(self):
return Tb(bits=self._bit_value)
# Properties
TiB = property(lambda s: s.to_TiB())
Tib = property(lambda s: s.to_Tib())
TB = property(lambda s: s.to_TB())
Tb = property(lambda s: s.to_Tb())
##################################################################
def to_PiB(self):
return PiB(bits=self._bit_value)
def to_Pib(self):
return Pib(bits=self._bit_value)
def to_PB(self):
return PB(bits=self._bit_value)
def to_Pb(self):
return Pb(bits=self._bit_value)
# Properties
PiB = property(lambda s: s.to_PiB())
Pib = property(lambda s: s.to_Pib())
PB = property(lambda s: s.to_PB())
Pb = property(lambda s: s.to_Pb())
##################################################################
def to_EiB(self):
return EiB(bits=self._bit_value)
def to_Eib(self):
return Eib(bits=self._bit_value)
def to_EB(self):
return EB(bits=self._bit_value)
def to_Eb(self):
return Eb(bits=self._bit_value)
# Properties
EiB = property(lambda s: s.to_EiB())
Eib = property(lambda s: s.to_Eib())
EB = property(lambda s: s.to_EB())
Eb = property(lambda s: s.to_Eb())
##################################################################
# The SI units go beyond the NIST units. They also have the Zetta
# and Yotta prefixes.
def to_ZB(self):
return ZB(bits=self._bit_value)
def to_Zb(self):
return Zb(bits=self._bit_value)
# Properties
ZB = property(lambda s: s.to_ZB())
Zb = property(lambda s: s.to_Zb())
##################################################################
def to_YB(self):
return YB(bits=self._bit_value)
def to_Yb(self):
return Yb(bits=self._bit_value)
#: A new object representing this instance as a Yottabyte
YB = property(lambda s: s.to_YB())
Yb = property(lambda s: s.to_Yb())
##################################################################
# Rich comparison operations
##################################################################
def __lt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value < other
else:
return self._byte_value < other.bytes
def __le__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value <= other
else:
return self._byte_value <= other.bytes
def __eq__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value == other
else:
return self._byte_value == other.bytes
def __ne__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value != other
else:
return self._byte_value != other.bytes
def __gt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value > other
else:
return self._byte_value > other.bytes
def __ge__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value >= other
else:
return self._byte_value >= other.bytes
##################################################################
# Basic math operations
##################################################################
# Reference: http://docs.python.org/2.7/reference/datamodel.html#emulating-numeric-types
"""These methods are called to implement the binary arithmetic
operations (+, -, *, //, %, divmod(), pow(), **, <<, >>, &, ^, |). For
instance, to evaluate the expression x + y, where x is an instance of
a class that has an __add__() method, x.__add__(y) is called. The
__divmod__() method should be the equivalent to using __floordiv__()
and __mod__(); it should not be related to __truediv__() (described
below). Note that __pow__() should be defined to accept an optional
third argument if the ternary version of the built-in pow() function
is to be supported.object.__complex__(self)
"""
def __add__(self, other):
"""Supported operations with result types:
- bm + bm = bm
- bm + num = num
- num + bm = num (see radd)
"""
if isinstance(other, numbers.Number):
# bm + num
return other + self.value
else:
# bm + bm
total_bytes = self._byte_value + other.bytes
return (type(self))(bytes=total_bytes)
def __sub__(self, other):
"""Subtraction: Supported operations with result types:
- bm - bm = bm
- bm - num = num
- num - bm = num (see rsub)
"""
if isinstance(other, numbers.Number):
# bm - num
return self.value - other
else:
# bm - bm
total_bytes = self._byte_value - other.bytes
return (type(self))(bytes=total_bytes)
def __mul__(self, other):
"""Multiplication: Supported operations with result types:
- bm1 * bm2 = bm1
- bm * num = bm
- num * bm = num (see rmul)
"""
if isinstance(other, numbers.Number):
# bm * num
result = self._byte_value * other
return (type(self))(bytes=result)
else:
# bm1 * bm2
_other = other.value * other.base ** other.power
_self = self.prefix_value * self._base ** self._power
return (type(self))(bytes=_other * _self)
"""The division operator (/) is implemented by these methods. The
__truediv__() method is used when __future__.division is in effect,
otherwise __div__() is used. If only one of these two methods is
defined, the object will not support division in the alternate
context; TypeError will be raised instead."""
def __div__(self, other):
"""Division: Supported operations with result types:
- bm1 / bm2 = num
- bm / num = bm
- num / bm = num (see rdiv)
"""
if isinstance(other, numbers.Number):
# bm / num
result = self._byte_value / other
return (type(self))(bytes=result)
else:
# bm1 / bm2
return self._byte_value / float(other.bytes)
def __truediv__(self, other):
# num / bm
return self.__div__(other)
# def __floordiv__(self, other):
# return NotImplemented
# def __mod__(self, other):
# return NotImplemented
# def __divmod__(self, other):
# return NotImplemented
# def __pow__(self, other, modulo=None):
# return NotImplemented
##################################################################
"""These methods are called to implement the binary arithmetic
operations (+, -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with
reflected (swapped) operands. These functions are only called if the
left operand does not support the corresponding operation and the
operands are of different types. [2] For instance, to evaluate the
expression x - y, where y is an instance of a class that has an
__rsub__() method, y.__rsub__(x) is called if x.__sub__(y) returns
NotImplemented.
These are the add/sub/mul/div methods for syntax where a number type
is given for the LTYPE and a bitmath object is given for the
RTYPE. E.g., 3 * MiB(3), or 10 / GB(42)
"""
def __radd__(self, other):
# num + bm = num
return other + self.value
def __rsub__(self, other):
# num - bm = num
return other - self.value
def __rmul__(self, other):
# num * bm = bm
return self * other
def __rdiv__(self, other):
# num / bm = num
return other / float(self.value)
def __rtruediv__(self, other):
# num / bm = num
return other / float(self.value)
"""Called to implement the built-in functions complex(), int(),
long(), and float(). Should return a value of the appropriate type.
If one of those methods does not support the operation with the
supplied arguments, it should return NotImplemented.
For bitmath purposes, these methods return the int/long/float
equivalent of the this instances prefix Unix value. That is to say:
- int(KiB(3.336)) would return 3
- long(KiB(3.336)) would return 3L
- float(KiB(3.336)) would return 3.336
"""
def __int__(self):
"""Return this instances prefix unit as an integer"""
return int(self.prefix_value)
def __long__(self):
"""Return this instances prefix unit as a long integer"""
return long(self.prefix_value) # pragma: PY3X no cover
def __float__(self):
"""Return this instances prefix unit as a floating point number"""
return float(self.prefix_value)
##################################################################
# Bitwise operations
##################################################################
def __lshift__(self, other):
"""Left shift, ex: 100 << 2
A left shift by n bits is equivalent to multiplication by pow(2,
n). A long integer is returned if the result exceeds the range of
plain integers."""
shifted = int(self.bits) << other
return type(self)(bits=shifted)
def __rshift__(self, other):
"""Right shift, ex: 100 >> 2
A right shift by n bits is equivalent to division by pow(2, n)."""
shifted = int(self.bits) >> other
return type(self)(bits=shifted)
def __and__(self, other):
""""Bitwise and, ex: 100 & 2
bitwise and". Each bit of the output is 1 if the corresponding bit
of x AND of y is 1, otherwise it's 0."""
andd = int(self.bits) & other
return type(self)(bits=andd)
def __xor__(self, other):
"""Bitwise xor, ex: 100 ^ 2
Does a "bitwise exclusive or". Each bit of the output is the same
as the corresponding bit in x if that bit in y is 0, and it's the
complement of the bit in x if that bit in y is 1."""
xord = int(self.bits) ^ other
return type(self)(bits=xord)
def __or__(self, other):
"""Bitwise or, ex: 100 | 2
Does a "bitwise or". Each bit of the output is 0 if the corresponding
bit of x AND of y is 0, otherwise it's 1."""
ord = int(self.bits) | other
return type(self)(bits=ord)
##################################################################
def __neg__(self):
"""The negative version of this instance"""
return (type(self))(-abs(self.prefix_value))
def __pos__(self):
return (type(self))(abs(self.prefix_value))
def __abs__(self):
return (type(self))(abs(self.prefix_value))
# def __invert__(self):
# """Called to implement the unary arithmetic operations (-, +, abs()
# and ~)."""
# return NotImplemented
######################################################################
# First, the bytes...
class Byte(Bitmath):
"""Byte based types fundamentally operate on self._bit_value"""
def _setup(self):
return (2, 0, 'Byte', 'Bytes')
######################################################################
# NIST Prefixes for Byte based types
class KiB(Byte):
def _setup(self):
return (2, 10, 'KiB', 'KiBs')
Kio = KiB
class MiB(Byte):
def _setup(self):
return (2, 20, 'MiB', 'MiBs')
Mio = MiB
class GiB(Byte):
def _setup(self):
return (2, 30, 'GiB', 'GiBs')
Gio = GiB
class TiB(Byte):
def _setup(self):
return (2, 40, 'TiB', 'TiBs')
Tio = TiB
class PiB(Byte):
def _setup(self):
return (2, 50, 'PiB', 'PiBs')
Pio = PiB
class EiB(Byte):
def _setup(self):
return (2, 60, 'EiB', 'EiBs')
Eio = EiB
######################################################################
# SI Prefixes for Byte based types
class kB(Byte):
def _setup(self):
return (10, 3, 'kB', 'kBs')
ko = kB
class MB(Byte):
def _setup(self):
return (10, 6, 'MB', 'MBs')
Mo = MB
class GB(Byte):
def _setup(self):
return (10, 9, 'GB', 'GBs')
Go = GB
class TB(Byte):
def _setup(self):
return (10, 12, 'TB', 'TBs')
To = TB
class PB(Byte):
def _setup(self):
return (10, 15, 'PB', 'PBs')
Po = PB
class EB(Byte):
def _setup(self):
return (10, 18, 'EB', 'EBs')
Eo = EB
class ZB(Byte):
def _setup(self):
return (10, 21, 'ZB', 'ZBs')
Zo = ZB
class YB(Byte):
def _setup(self):
return (10, 24, 'YB', 'YBs')
Yo = YB
######################################################################
# And now the bit types
class Bit(Bitmath):
"""Bit based types fundamentally operate on self._bit_value"""
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._bit_value)
def _setup(self):
return (2, 0, 'Bit', 'Bits')
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type"""
self._bit_value = value * self._unit_value
self._byte_value = self._bit_value / 8.0
######################################################################
# NIST Prefixes for Bit based types
class Kib(Bit):
def _setup(self):
return (2, 10, 'Kib', 'Kibs')
class Mib(Bit):
def _setup(self):
return (2, 20, 'Mib', 'Mibs')
class Gib(Bit):
def _setup(self):
return (2, 30, 'Gib', 'Gibs')
class Tib(Bit):
def _setup(self):
return (2, 40, 'Tib', 'Tibs')
class Pib(Bit):
def _setup(self):
return (2, 50, 'Pib', 'Pibs')
class Eib(Bit):
def _setup(self):
return (2, 60, 'Eib', 'Eibs')
######################################################################
# SI Prefixes for Bit based types
class kb(Bit):
def _setup(self):
return (10, 3, 'kb', 'kbs')
class Mb(Bit):
def _setup(self):
return (10, 6, 'Mb', 'Mbs')
class Gb(Bit):
def _setup(self):
return (10, 9, 'Gb', 'Gbs')
class Tb(Bit):
def _setup(self):
return (10, 12, 'Tb', 'Tbs')
class Pb(Bit):
def _setup(self):
return (10, 15, 'Pb', 'Pbs')
class Eb(Bit):
def _setup(self):
return (10, 18, 'Eb', 'Ebs')
class Zb(Bit):
def _setup(self):
return (10, 21, 'Zb', 'Zbs')
class Yb(Bit):
def _setup(self):
return (10, 24, 'Yb', 'Ybs')
######################################################################
# Utility functions
def best_prefix(bytes, system=NIST):
"""Return a bitmath instance representing the best human-readable
representation of the number of bytes given by ``bytes``. In addition
to a numeric type, the ``bytes`` parameter may also be a bitmath type.
Optionally select a preferred unit system by specifying the ``system``
keyword. Choices for ``system`` are ``bitmath.NIST`` (default) and
``bitmath.SI``.
Basically a shortcut for:
>>> import bitmath
>>> b = bitmath.Byte(12345)
>>> best = b.best_prefix()
Or:
>>> import bitmath
>>> best = (bitmath.KiB(12345) * 4201).best_prefix()
"""
if isinstance(bytes, Bitmath):
value = bytes.bytes
else:
value = bytes
return Byte(value).best_prefix(system=system)
def query_device_capacity(device_fd):
"""Create bitmath instances of the capacity of a system block device
Make one or more ioctl request to query the capacity of a block
device. Perform any processing required to compute the final capacity
value. Return the device capacity in bytes as a :class:`bitmath.Byte`
instance.
Thanks to the following resources for help figuring this out Linux/Mac
ioctl's for querying block device sizes:
* http://stackoverflow.com/a/12925285/263969
* http://stackoverflow.com/a/9764508/263969
:param file device_fd: A ``file`` object of the device to query the
capacity of (as in ``get_device_capacity(open("/dev/sda"))``).
:return: a bitmath :class:`bitmath.Byte` instance equivalent to the
capacity of the target device in bytes.
"""
if os_name() != 'posix':
raise NotImplementedError("'bitmath.query_device_capacity' is not supported on this platform: %s" % os_name())
s = os.stat(device_fd.name).st_mode
if not stat.S_ISBLK(s):
raise ValueError("The file descriptor provided is not of a device type")
# The keys of the ``ioctl_map`` dictionary correlate to possible
# values from the ``platform.system`` function.
ioctl_map = {
# ioctls for the "Linux" platform
"Linux": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("BLKGETSIZE64", "L", 0x80081272)
# Per <linux/fs.h>, the BLKGETSIZE64 request returns a
# 'u64' sized value. This is an unsigned 64 bit
# integer C type. This means to correctly "buffer" the
# result we need 64 bits, or 8 bytes, of memory.
#
# The struct module documentation include a reference
# chart relating formatting characters to native C
# Types. In this case, using the "native size", the
# table tells us:
#
# * Character 'L' - Unsigned Long C Type (u64) - Loads into a Python int type
#
# Confirm this character is right by running (on Linux):
#
# >>> import struct
# >>> print 8 == struct.calcsize('L')
#
# The result should be true as long as your kernel
# headers define BLKGETSIZE64 as a u64 type (please
# file a bug report at
# https://github.com/tbielawa/bitmath/issues/new if
# this does *not* work for you)
],
# func is how the final result is decided. Because the
# Linux BLKGETSIZE64 call returns the block device
# capacity in bytes as an integer value, no extra
# calculations are required. Simply return the value of
# BLKGETSIZE64.
"func": lambda x: x["BLKGETSIZE64"]
},
# ioctls for the "Darwin" (Mac OS X) platform
"Darwin": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("DKIOCGETBLOCKCOUNT", "L", 0x40086419),
# Per <sys/disk.h>: get media's block count - uint64_t
#
# As in the BLKGETSIZE64 example, an unsigned 64 bit
# integer will use the 'L' formatting character
("DKIOCGETBLOCKSIZE", "I", 0x40046418)
# Per <sys/disk.h>: get media's block size - uint32_t
#
# This request returns an unsigned 32 bit integer, or
# in other words: just a normal integer (or 'int' c
# type). That should require 4 bytes of space for
# buffering. According to the struct modules
# 'Formatting Characters' chart:
#
# * Character 'I' - Unsigned Int C Type (uint32_t) - Loads into a Python int type
],
# OS X doesn't have a direct equivalent to the Linux
# BLKGETSIZE64 request. Instead, we must request how many
# blocks (or "sectors") are on the disk, and the size (in
# bytes) of each block. Finally, multiply the two together
# to obtain capacity:
#
# n Block * y Byte
# capacity (bytes) = -------
# 1 Block
"func": lambda x: x["DKIOCGETBLOCKCOUNT"] * x["DKIOCGETBLOCKSIZE"]
# This expression simply accepts a dictionary ``x`` as a
# parameter, and then returns the result of multiplying
# the two named dictionary items together. In this case,
# that means multiplying ``DKIOCGETBLOCKCOUNT``, the total
# number of blocks, by ``DKIOCGETBLOCKSIZE``, the size of
# each block in bytes.
}
}
platform_params = ioctl_map[platform.system()]
results = {}
for req_name, fmt, request_code in platform_params['request_params']:
# Read the systems native size (in bytes) of this format type.
buffer_size = struct.calcsize(fmt)
# Construct a buffer to store the ioctl result in
buffer = ' ' * buffer_size
# This code has been ran on only a few test systems. If it's
# appropriate, maybe in the future we'll add try/except
# conditions for some possible errors. Really only for cases
# where it would add value to override the default exception
# message string.
buffer = fcntl.ioctl(device_fd.fileno(), request_code, buffer)
# Unpack the raw result from the ioctl call into a familiar
# python data type according to the ``fmt`` rules.
result = struct.unpack(fmt, buffer)[0]
# Add the new result to our collection
results[req_name] = result
return Byte(platform_params['func'](results))
def getsize(path, bestprefix=True, system=NIST):
"""Return a bitmath instance in the best human-readable representation
of the file size at `path`. Optionally, provide a preferred unit
system by setting `system` to either `bitmath.NIST` (default) or
`bitmath.SI`.
Optionally, set ``bestprefix`` to ``False`` to get ``bitmath.Byte``
instances back.
"""
_path = os.path.realpath(path)
size_bytes = os.path.getsize(_path)
if bestprefix:
return Byte(size_bytes).best_prefix(system=system)
else:
return Byte(size_bytes)
def parse_string(s):
"""Parse a string with units and try to make a bitmath object out of
it.
String inputs may include whitespace characters between the value and
the unit.
"""
# Strings only please
if not isinstance(s, (str, unicode)):
raise ValueError("parse_string only accepts string inputs but a %s was given" %
type(s))
# get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError:
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# split the string into the value and the unit
val, unit = s[:index], s[index:]
# see if the unit exists as a type in our namespace
if unit == "b":
unit_class = Bit
elif unit == "B":
unit_class = Byte
else:
if not (hasattr(sys.modules[__name__], unit) and isinstance(getattr(sys.modules[__name__], unit), type)):
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
unit_class = globals()[unit]
try:
val = float(val)
except ValueError:
raise
try:
return unit_class(val)
except: # pragma: no cover
raise ValueError("Can't parse string %s into a bitmath object" % s)
def parse_string_unsafe(s, system=SI):
"""Attempt to parse a string with ambiguous units and try to make a
bitmath object out of it.
This may produce inaccurate results if parsing shell output. For
example `ls` may say a 2730 Byte file is '2.7K'. 2730 Bytes == 2.73 kB
~= 2.666 KiB. See the documentation for all of the important details.
Note the following caveats:
* All inputs are assumed to be byte-based (as opposed to bit based)
* Numerical inputs (those without any units) are assumed to be a
number of bytes
* Inputs with single letter units (k, M, G, etc) are assumed to be SI
units (base-10). Set the `system` parameter to `bitmath.NIST` to
change this behavior.
* Inputs with an `i` character following the leading letter (Ki, Mi,
Gi) are assumed to be NIST units (base 2)
* Capitalization does not matter
"""
if not isinstance(s, (str, unicode)) and \
not isinstance(s, numbers.Number):
raise ValueError("parse_string_unsafe only accepts string/number inputs but a %s was given" %
type(s))
######################################################################
# Is the input simple to parse? Just a number, or a number
# masquerading as a string perhaps?
# Test case: raw number input (easy!)
if isinstance(s, numbers.Number):
# It's just a number. Assume bytes
return Byte(s)
# Test case: a number pretending to be a string
if isinstance(s, (str, unicode)):
try:
# Can we turn it directly into a number?
return Byte(float(s))
except ValueError:
# Nope, this is not a plain number
pass
######################################################################
# At this point:
# - the input is also not just a number wrapped in a string
# - nor is is just a plain number type
#
# We need to do some more digging around now to figure out exactly
# what we were given and possibly normalize the input into a
# format we can recognize.
# First we'll separate the number and the unit.
#
# Get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError: # pragma: no cover
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# Split the string into the value and the unit
val, unit = s[:index], s[index:]
# Don't trust anything. We'll make sure the correct 'b' is in place.
unit = unit.rstrip('Bb')
unit += 'B'
# At this point we can expect `unit` to be either:
#
# - 2 Characters (for SI, ex: kB or GB)
# - 3 Caracters (so NIST, ex: KiB, or GiB)
#
# A unit with any other number of chars is not a valid unit
# SI
if len(unit) == 2:
# Has NIST parsing been requested?
if system == NIST:
# NIST units requested. Ensure the unit begins with a
# capital letter and is followed by an 'i' character.
unit = capitalize_first(unit)
# Insert an 'i' char after the first letter
_unit = list(unit)
_unit.insert(1, 'i')
# Collapse the list back into a 3 letter string
unit = ''.join(_unit)
unit_class = globals()[unit]
else:
# Default parsing (SI format)
#
# Edge-case checking: SI 'thousand' is a lower-case K
if unit.startswith('K'):
unit = unit.replace('K', 'k')
elif not unit.startswith('k'):
# Otherwise, ensure the first char is capitalized
unit = capitalize_first(unit)
# This is an SI-type unit
if unit[0] in SI_PREFIXES:
unit_class = globals()[unit]
# NIST
elif len(unit) == 3:
unit = capitalize_first(unit)
# This is a NIST-type unit
if unit[:2] in NIST_PREFIXES:
unit_class = globals()[unit]
else:
# This is not a unit we recognize
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
try:
unit_class
except UnboundLocalError:
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
return unit_class(float(val))
######################################################################
# Contxt Managers
@contextlib.contextmanager
def format(fmt_str=None, plural=False, bestprefix=False):
"""Context manager for printing bitmath instances.
``fmt_str`` - a formatting mini-language compat formatting string. See
the @properties (above) for a list of available items.
``plural`` - True enables printing instances with 's's if they're
plural. False (default) prints them as singular (no trailing 's').
``bestprefix`` - True enables printing instances in their best
human-readable representation. False, the default, prints instances
using their current prefix unit.
"""
if 'bitmath' not in globals():
import bitmath
if plural:
orig_fmt_plural = bitmath.format_plural
bitmath.format_plural = True
if fmt_str:
orig_fmt_str = bitmath.format_string
bitmath.format_string = fmt_str
yield
if plural:
bitmath.format_plural = orig_fmt_plural
if fmt_str:
bitmath.format_string = orig_fmt_str
def cli_script_main(cli_args):
"""
A command line interface to basic bitmath operations.
"""
choices = ALL_UNIT_TYPES
parser = argparse.ArgumentParser(
description='Converts from one type of size to another.')
parser.add_argument('--from-stdin', default=False, action='store_true',
help='Reads number from stdin rather than the cli')
parser.add_argument(
'-f', '--from', choices=choices, nargs=1,
type=str, dest='fromunit', default=['Byte'],
help='Input type you are converting from. Defaultes to Byte.')
parser.add_argument(
'-t', '--to', choices=choices, required=False, nargs=1, type=str,
help=('Input type you are converting to. '
'Attempts to detect best result if omitted.'), dest='tounit')
parser.add_argument(
'size', nargs='*', type=float,
help='The number to convert.')
args = parser.parse_args(cli_args)
# Not sure how to cover this with tests, or if the functionality
# will remain in this form long enough for it to make writing a
# test worth the effort.
if args.from_stdin: # pragma: no cover
args.size = [float(sys.stdin.readline()[:-1])]
results = []
for size in args.size:
instance = getattr(__import__(
'bitmath', fromlist=['True']), args.fromunit[0])(size)
# If we have a unit provided then use it
if args.tounit:
result = getattr(instance, args.tounit[0])
# Otherwise use the best_prefix call
else:
result = instance.best_prefix()
results.append(result)
return results
def cli_script(): # pragma: no cover
# Wrapper around cli_script_main so we can unittest the command
# line functionality
for result in cli_script_main(sys.argv[1:]):
print(result)
if __name__ == '__main__':
cli_script()
|
tbielawa/bitmath
|
bitmath/__init__.py
|
parse_string
|
python
|
def parse_string(s):
# Strings only please
if not isinstance(s, (str, unicode)):
raise ValueError("parse_string only accepts string inputs but a %s was given" %
type(s))
# get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError:
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# split the string into the value and the unit
val, unit = s[:index], s[index:]
# see if the unit exists as a type in our namespace
if unit == "b":
unit_class = Bit
elif unit == "B":
unit_class = Byte
else:
if not (hasattr(sys.modules[__name__], unit) and isinstance(getattr(sys.modules[__name__], unit), type)):
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
unit_class = globals()[unit]
try:
val = float(val)
except ValueError:
raise
try:
return unit_class(val)
except: # pragma: no cover
raise ValueError("Can't parse string %s into a bitmath object" % s)
|
Parse a string with units and try to make a bitmath object out of
it.
String inputs may include whitespace characters between the value and
the unit.
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L1394-L1434
| null |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright © 2014-2016 Tim Bielawa <timbielawa@gmail.com>
# See GitHub Contributors Graph for more information
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sub-license, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable=bad-continuation,missing-docstring,invalid-name,line-too-long
"""Reference material:
The bitmath homepage is located at:
* http://bitmath.readthedocs.io/en/latest/
Prefixes for binary multiples:
http://physics.nist.gov/cuu/Units/binary.html
decimal and binary prefixes:
man 7 units (from the Linux Documentation Project 'man-pages' package)
BEFORE YOU GET HASTY WITH EXCLUDING CODE FROM COVERAGE: If you
absolutely need to skip code coverage because of a strange Python 2.x
vs 3.x thing, use the fancy environment substitution stuff from the
.coverage RC file. In review:
* If you *NEED* to skip a statement because of Python 2.x issues add the following::
# pragma: PY2X no cover
* If you *NEED* to skip a statement because of Python 3.x issues add the following::
# pragma: PY3X no cover
In this configuration, statements which are skipped in 2.x are still
covered in 3.x, and the reverse holds true for tests skipped in 3.x.
"""
from __future__ import print_function
import argparse
import contextlib
import fnmatch
import math
import numbers
import os
import os.path
import platform
import sys
# For device capacity reading in query_device_capacity(). Only supported
# on posix systems for now. Will be addressed in issue #52 on GitHub.
if os.name == 'posix':
import stat
import fcntl
import struct
__all__ = ['Bit', 'Byte', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB',
'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'Kib',
'Mib', 'Gib', 'Tib', 'Pib', 'Eib', 'kb', 'Mb', 'Gb', 'Tb',
'Pb', 'Eb', 'Zb', 'Yb', 'getsize', 'listdir', 'format',
'format_string', 'format_plural', 'parse_string', 'parse_string_unsafe',
'ALL_UNIT_TYPES', 'NIST', 'NIST_PREFIXES', 'NIST_STEPS',
'SI', 'SI_PREFIXES', 'SI_STEPS']
# Python 3.x compat
if sys.version > '3':
long = int # pragma: PY2X no cover
unicode = str # pragma: PY2X no cover
#: A list of all the valid prefix unit types. Mostly for reference,
#: also used by the CLI tool as valid types
ALL_UNIT_TYPES = ['Bit', 'Byte', 'kb', 'kB', 'Mb', 'MB', 'Gb', 'GB', 'Tb',
'TB', 'Pb', 'PB', 'Eb', 'EB', 'Zb', 'ZB', 'Yb',
'YB', 'Kib', 'KiB', 'Mib', 'MiB', 'Gib', 'GiB',
'Tib', 'TiB', 'Pib', 'PiB', 'Eib', 'EiB']
# #####################################################################
# Set up our module variables/constants
###################################
# Internal:
# Console repr(), ex: MiB(13.37), or kB(42.0)
_FORMAT_REPR = '{unit_singular}({value})'
# ##################################
# Exposed:
#: Constants for referring to NIST prefix system
NIST = int(2)
#: Constants for referring to SI prefix system
SI = int(10)
# ##################################
#: All of the SI prefixes
SI_PREFIXES = ['k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
#: Byte values represented by each SI prefix unit
SI_STEPS = {
'Bit': 1 / 8.0,
'Byte': 1,
'k': 1000,
'M': 1000000,
'G': 1000000000,
'T': 1000000000000,
'P': 1000000000000000,
'E': 1000000000000000000,
'Z': 1000000000000000000000,
'Y': 1000000000000000000000000
}
#: All of the NIST prefixes
NIST_PREFIXES = ['Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei']
#: Byte values represented by each NIST prefix unit
NIST_STEPS = {
'Bit': 1 / 8.0,
'Byte': 1,
'Ki': 1024,
'Mi': 1048576,
'Gi': 1073741824,
'Ti': 1099511627776,
'Pi': 1125899906842624,
'Ei': 1152921504606846976
}
#: String representation, ex: ``13.37 MiB``, or ``42.0 kB``
format_string = "{value} {unit}"
#: Pluralization behavior
format_plural = False
def os_name():
# makes unittesting platform specific code easier
return os.name
def capitalize_first(s):
"""Capitalize ONLY the first letter of the input `s`
* returns a copy of input `s` with the first letter capitalized
"""
pfx = s[0].upper()
_s = s[1:]
return pfx + _s
######################################################################
# Base class for everything else
class Bitmath(object):
"""The base class for all the other prefix classes"""
# All the allowed input types
valid_types = (int, float, long)
def __init__(self, value=0, bytes=None, bits=None):
"""Instantiate with `value` by the unit, in plain bytes, or
bits. Don't supply more than one keyword.
default behavior: initialize with value of 0
only setting value: assert bytes is None and bits is None
only setting bytes: assert value == 0 and bits is None
only setting bits: assert value == 0 and bytes is None
"""
_raise = False
if (value == 0) and (bytes is None) and (bits is None):
pass
# Setting by bytes
elif bytes is not None:
if (value == 0) and (bits is None):
pass
else:
_raise = True
# setting by bits
elif bits is not None:
if (value == 0) and (bytes is None):
pass
else:
_raise = True
if _raise:
raise ValueError("Only one parameter of: value, bytes, or bits is allowed")
self._do_setup()
if bytes:
# We were provided with the fundamental base unit, no need
# to normalize
self._byte_value = bytes
self._bit_value = bytes * 8.0
elif bits:
# We were *ALMOST* given the fundamental base
# unit. Translate it into the fundamental unit then
# normalize.
self._byte_value = bits / 8.0
self._bit_value = bits
else:
# We were given a value representative of this *prefix
# unit*. We need to normalize it into the number of bytes
# it represents.
self._norm(value)
# We have the fundamental unit figured out. Set the 'pretty' unit
self._set_prefix_value()
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._byte_value)
def _to_prefix_value(self, value):
"""Return the number of bits/bytes as they would look like if we
converted *to* this unit"""
return value / float(self._unit_value)
def _setup(self):
raise NotImplementedError("The base 'bitmath.Bitmath' class can not be used directly")
def _do_setup(self):
"""Setup basic parameters for this class.
`base` is the numeric base which when raised to `power` is equivalent
to 1 unit of the corresponding prefix. I.e., base=2, power=10
represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte.
Likewise, for the SI prefix classes `base` will be 10, and the `power`
for the Kilobyte is 3.
"""
(self._base, self._power, self._name_singular, self._name_plural) = self._setup()
self._unit_value = self._base ** self._power
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type.
:param number value: The input value to be normalized
:raises ValueError: if the input value is not a type of real number
"""
if isinstance(value, self.valid_types):
self._byte_value = value * self._unit_value
self._bit_value = self._byte_value * 8.0
else:
raise ValueError("Initialization value '%s' is of an invalid type: %s. "
"Must be one of %s" % (
value,
type(value),
", ".join(str(x) for x in self.valid_types)))
##################################################################
# Properties
#: The mathematical base of an instance
base = property(lambda s: s._base)
binary = property(lambda s: bin(int(s.bits)))
"""The binary representation of an instance in binary 1s and 0s. Note
that for very large numbers this will mean a lot of 1s and 0s. For
example, GiB(100) would be represented as::
0b1100100000000000000000000000000000000000
That leading ``0b`` is normal. That's how Python represents binary.
"""
#: Alias for :attr:`binary`
bin = property(lambda s: s.binary)
#: The number of bits in an instance
bits = property(lambda s: s._bit_value)
#: The number of bytes in an instance
bytes = property(lambda s: s._byte_value)
#: The mathematical power of an instance
power = property(lambda s: s._power)
@property
def system(self):
"""The system of units used to measure an instance"""
if self._base == 2:
return "NIST"
elif self._base == 10:
return "SI"
else:
# I don't expect to ever encounter this logic branch, but
# hey, it's better to have extra test coverage than
# insufficient test coverage.
raise ValueError("Instances mathematical base is an unsupported value: %s" % (
str(self._base)))
@property
def unit(self):
"""The string that is this instances prefix unit name in agreement
with this instance value (singular or plural). Following the
convention that only 1 is singular. This will always be the singular
form when :attr:`bitmath.format_plural` is ``False`` (default value).
For example:
>>> KiB(1).unit == 'KiB'
>>> Byte(0).unit == 'Bytes'
>>> Byte(1).unit == 'Byte'
>>> Byte(1.1).unit == 'Bytes'
>>> Gb(2).unit == 'Gbs'
"""
global format_plural
if self.prefix_value == 1:
# If it's a '1', return it singular, no matter what
return self._name_singular
elif format_plural:
# Pluralization requested
return self._name_plural
else:
# Pluralization NOT requested, and the value is not 1
return self._name_singular
@property
def unit_plural(self):
"""The string that is an instances prefix unit name in the plural
form.
For example:
>>> KiB(1).unit_plural == 'KiB'
>>> Byte(1024).unit_plural == 'Bytes'
>>> Gb(1).unit_plural == 'Gb'
"""
return self._name_plural
@property
def unit_singular(self):
"""The string that is an instances prefix unit name in the singular
form.
For example:
>>> KiB(1).unit_singular == 'KiB'
>>> Byte(1024).unit == 'B'
>>> Gb(1).unit_singular == 'Gb'
"""
return self._name_singular
#: The "prefix" value of an instance
value = property(lambda s: s.prefix_value)
@classmethod
def from_other(cls, item):
"""Factory function to return instances of `item` converted into a new
instance of ``cls``. Because this is a class method, it may be called
from any bitmath class object without the need to explicitly
instantiate the class ahead of time.
*Implicit Parameter:*
* ``cls`` A bitmath class, implicitly set to the class of the
instance object it is called on
*User Supplied Parameter:*
* ``item`` A :class:`bitmath.Bitmath` subclass instance
*Example:*
>>> import bitmath
>>> kib = bitmath.KiB.from_other(bitmath.MiB(1))
>>> print kib
KiB(1024.0)
"""
if isinstance(item, Bitmath):
return cls(bits=item.bits)
else:
raise ValueError("The provided items must be a valid bitmath class: %s" %
str(item.__class__))
######################################################################
# The following implement the Python datamodel customization methods
#
# Reference: http://docs.python.org/2.7/reference/datamodel.html#basic-customization
def __repr__(self):
"""Representation of this object as you would expect to see in an
interpreter"""
global _FORMAT_REPR
return self.format(_FORMAT_REPR)
def __str__(self):
"""String representation of this object"""
global format_string
return self.format(format_string)
def format(self, fmt):
"""Return a representation of this instance formatted with user
supplied syntax"""
_fmt_params = {
'base': self.base,
'bin': self.bin,
'binary': self.binary,
'bits': self.bits,
'bytes': self.bytes,
'power': self.power,
'system': self.system,
'unit': self.unit,
'unit_plural': self.unit_plural,
'unit_singular': self.unit_singular,
'value': self.value
}
return fmt.format(**_fmt_params)
##################################################################
# Guess the best human-readable prefix unit for representation
##################################################################
def best_prefix(self, system=None):
"""Optional parameter, `system`, allows you to prefer NIST or SI in
the results. By default, the current system is used (Bit/Byte default
to NIST).
Logic discussion/notes:
Base-case, does it need converting?
If the instance is less than one Byte, return the instance as a Bit
instance.
Else, begin by recording the unit system the instance is defined
by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.
If the instance is not already a ``Byte`` instance, convert it to one.
NIST units step up by powers of 1024, SI units step up by powers of
1000.
Take integer value of the log(base=STEP_POWER) of the instance's byte
value. E.g.:
>>> int(math.log(Gb(100).bytes, 1000))
3
This will return a value >= 0. The following determines the 'best
prefix unit' for representation:
* result == 0, best represented as a Byte
* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte
* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
"""
# Use absolute value so we don't return Bit's for *everything*
# less than Byte(1). From github issue #55
if abs(self) < Byte(1):
return Bit.from_other(self)
else:
if type(self) is Byte: # pylint: disable=unidiomatic-typecheck
_inst = self
else:
_inst = Byte.from_other(self)
# Which table to consult? Was a preferred system provided?
if system is None:
# No preference. Use existing system
if self.system == 'NIST':
_STEPS = NIST_PREFIXES
_BASE = 1024
elif self.system == 'SI':
_STEPS = SI_PREFIXES
_BASE = 1000
# Anything else would have raised by now
else:
# Preferred system provided.
if system == NIST:
_STEPS = NIST_PREFIXES
_BASE = 1024
elif system == SI:
_STEPS = SI_PREFIXES
_BASE = 1000
else:
raise ValueError("Invalid value given for 'system' parameter."
" Must be one of NIST or SI")
# Index of the string of the best prefix in the STEPS list
_index = int(math.log(abs(_inst.bytes), _BASE))
# Recall that the log() function returns >= 0. This doesn't
# map to the STEPS list 1:1. That is to say, 0 is handled with
# special care. So if the _index is 1, we actually want item 0
# in the list.
if _index == 0:
# Already a Byte() type, so return it.
return _inst
elif _index >= len(_STEPS):
# This is a really big number. Use the biggest prefix we've got
_best_prefix = _STEPS[-1]
elif 0 < _index < len(_STEPS):
# There is an appropriate prefix unit to represent this
_best_prefix = _STEPS[_index - 1]
_conversion_method = getattr(
self,
'to_%sB' % _best_prefix)
return _conversion_method()
##################################################################
def to_Bit(self):
return Bit(self._bit_value)
def to_Byte(self):
return Byte(self._byte_value / float(NIST_STEPS['Byte']))
# Properties
Bit = property(lambda s: s.to_Bit())
Byte = property(lambda s: s.to_Byte())
##################################################################
def to_KiB(self):
return KiB(bits=self._bit_value)
def to_Kib(self):
return Kib(bits=self._bit_value)
def to_kB(self):
return kB(bits=self._bit_value)
def to_kb(self):
return kb(bits=self._bit_value)
# Properties
KiB = property(lambda s: s.to_KiB())
Kib = property(lambda s: s.to_Kib())
kB = property(lambda s: s.to_kB())
kb = property(lambda s: s.to_kb())
##################################################################
def to_MiB(self):
return MiB(bits=self._bit_value)
def to_Mib(self):
return Mib(bits=self._bit_value)
def to_MB(self):
return MB(bits=self._bit_value)
def to_Mb(self):
return Mb(bits=self._bit_value)
# Properties
MiB = property(lambda s: s.to_MiB())
Mib = property(lambda s: s.to_Mib())
MB = property(lambda s: s.to_MB())
Mb = property(lambda s: s.to_Mb())
##################################################################
def to_GiB(self):
return GiB(bits=self._bit_value)
def to_Gib(self):
return Gib(bits=self._bit_value)
def to_GB(self):
return GB(bits=self._bit_value)
def to_Gb(self):
return Gb(bits=self._bit_value)
# Properties
GiB = property(lambda s: s.to_GiB())
Gib = property(lambda s: s.to_Gib())
GB = property(lambda s: s.to_GB())
Gb = property(lambda s: s.to_Gb())
##################################################################
def to_TiB(self):
return TiB(bits=self._bit_value)
def to_Tib(self):
return Tib(bits=self._bit_value)
def to_TB(self):
return TB(bits=self._bit_value)
def to_Tb(self):
return Tb(bits=self._bit_value)
# Properties
TiB = property(lambda s: s.to_TiB())
Tib = property(lambda s: s.to_Tib())
TB = property(lambda s: s.to_TB())
Tb = property(lambda s: s.to_Tb())
##################################################################
def to_PiB(self):
return PiB(bits=self._bit_value)
def to_Pib(self):
return Pib(bits=self._bit_value)
def to_PB(self):
return PB(bits=self._bit_value)
def to_Pb(self):
return Pb(bits=self._bit_value)
# Properties
PiB = property(lambda s: s.to_PiB())
Pib = property(lambda s: s.to_Pib())
PB = property(lambda s: s.to_PB())
Pb = property(lambda s: s.to_Pb())
##################################################################
def to_EiB(self):
return EiB(bits=self._bit_value)
def to_Eib(self):
return Eib(bits=self._bit_value)
def to_EB(self):
return EB(bits=self._bit_value)
def to_Eb(self):
return Eb(bits=self._bit_value)
# Properties
EiB = property(lambda s: s.to_EiB())
Eib = property(lambda s: s.to_Eib())
EB = property(lambda s: s.to_EB())
Eb = property(lambda s: s.to_Eb())
##################################################################
# The SI units go beyond the NIST units. They also have the Zetta
# and Yotta prefixes.
def to_ZB(self):
return ZB(bits=self._bit_value)
def to_Zb(self):
return Zb(bits=self._bit_value)
# Properties
ZB = property(lambda s: s.to_ZB())
Zb = property(lambda s: s.to_Zb())
##################################################################
def to_YB(self):
return YB(bits=self._bit_value)
def to_Yb(self):
return Yb(bits=self._bit_value)
#: A new object representing this instance as a Yottabyte
YB = property(lambda s: s.to_YB())
Yb = property(lambda s: s.to_Yb())
##################################################################
# Rich comparison operations
##################################################################
def __lt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value < other
else:
return self._byte_value < other.bytes
def __le__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value <= other
else:
return self._byte_value <= other.bytes
def __eq__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value == other
else:
return self._byte_value == other.bytes
def __ne__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value != other
else:
return self._byte_value != other.bytes
def __gt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value > other
else:
return self._byte_value > other.bytes
def __ge__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value >= other
else:
return self._byte_value >= other.bytes
##################################################################
# Basic math operations
##################################################################
# Reference: http://docs.python.org/2.7/reference/datamodel.html#emulating-numeric-types
"""These methods are called to implement the binary arithmetic
operations (+, -, *, //, %, divmod(), pow(), **, <<, >>, &, ^, |). For
instance, to evaluate the expression x + y, where x is an instance of
a class that has an __add__() method, x.__add__(y) is called. The
__divmod__() method should be the equivalent to using __floordiv__()
and __mod__(); it should not be related to __truediv__() (described
below). Note that __pow__() should be defined to accept an optional
third argument if the ternary version of the built-in pow() function
is to be supported.object.__complex__(self)
"""
def __add__(self, other):
"""Supported operations with result types:
- bm + bm = bm
- bm + num = num
- num + bm = num (see radd)
"""
if isinstance(other, numbers.Number):
# bm + num
return other + self.value
else:
# bm + bm
total_bytes = self._byte_value + other.bytes
return (type(self))(bytes=total_bytes)
def __sub__(self, other):
"""Subtraction: Supported operations with result types:
- bm - bm = bm
- bm - num = num
- num - bm = num (see rsub)
"""
if isinstance(other, numbers.Number):
# bm - num
return self.value - other
else:
# bm - bm
total_bytes = self._byte_value - other.bytes
return (type(self))(bytes=total_bytes)
def __mul__(self, other):
"""Multiplication: Supported operations with result types:
- bm1 * bm2 = bm1
- bm * num = bm
- num * bm = num (see rmul)
"""
if isinstance(other, numbers.Number):
# bm * num
result = self._byte_value * other
return (type(self))(bytes=result)
else:
# bm1 * bm2
_other = other.value * other.base ** other.power
_self = self.prefix_value * self._base ** self._power
return (type(self))(bytes=_other * _self)
"""The division operator (/) is implemented by these methods. The
__truediv__() method is used when __future__.division is in effect,
otherwise __div__() is used. If only one of these two methods is
defined, the object will not support division in the alternate
context; TypeError will be raised instead."""
def __div__(self, other):
"""Division: Supported operations with result types:
- bm1 / bm2 = num
- bm / num = bm
- num / bm = num (see rdiv)
"""
if isinstance(other, numbers.Number):
# bm / num
result = self._byte_value / other
return (type(self))(bytes=result)
else:
# bm1 / bm2
return self._byte_value / float(other.bytes)
def __truediv__(self, other):
# num / bm
return self.__div__(other)
# def __floordiv__(self, other):
# return NotImplemented
# def __mod__(self, other):
# return NotImplemented
# def __divmod__(self, other):
# return NotImplemented
# def __pow__(self, other, modulo=None):
# return NotImplemented
##################################################################
"""These methods are called to implement the binary arithmetic
operations (+, -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with
reflected (swapped) operands. These functions are only called if the
left operand does not support the corresponding operation and the
operands are of different types. [2] For instance, to evaluate the
expression x - y, where y is an instance of a class that has an
__rsub__() method, y.__rsub__(x) is called if x.__sub__(y) returns
NotImplemented.
These are the add/sub/mul/div methods for syntax where a number type
is given for the LTYPE and a bitmath object is given for the
RTYPE. E.g., 3 * MiB(3), or 10 / GB(42)
"""
def __radd__(self, other):
# num + bm = num
return other + self.value
def __rsub__(self, other):
# num - bm = num
return other - self.value
def __rmul__(self, other):
# num * bm = bm
return self * other
def __rdiv__(self, other):
# num / bm = num
return other / float(self.value)
def __rtruediv__(self, other):
# num / bm = num
return other / float(self.value)
"""Called to implement the built-in functions complex(), int(),
long(), and float(). Should return a value of the appropriate type.
If one of those methods does not support the operation with the
supplied arguments, it should return NotImplemented.
For bitmath purposes, these methods return the int/long/float
equivalent of the this instances prefix Unix value. That is to say:
- int(KiB(3.336)) would return 3
- long(KiB(3.336)) would return 3L
- float(KiB(3.336)) would return 3.336
"""
def __int__(self):
"""Return this instances prefix unit as an integer"""
return int(self.prefix_value)
def __long__(self):
"""Return this instances prefix unit as a long integer"""
return long(self.prefix_value) # pragma: PY3X no cover
def __float__(self):
"""Return this instances prefix unit as a floating point number"""
return float(self.prefix_value)
##################################################################
# Bitwise operations
##################################################################
def __lshift__(self, other):
"""Left shift, ex: 100 << 2
A left shift by n bits is equivalent to multiplication by pow(2,
n). A long integer is returned if the result exceeds the range of
plain integers."""
shifted = int(self.bits) << other
return type(self)(bits=shifted)
def __rshift__(self, other):
"""Right shift, ex: 100 >> 2
A right shift by n bits is equivalent to division by pow(2, n)."""
shifted = int(self.bits) >> other
return type(self)(bits=shifted)
def __and__(self, other):
""""Bitwise and, ex: 100 & 2
bitwise and". Each bit of the output is 1 if the corresponding bit
of x AND of y is 1, otherwise it's 0."""
andd = int(self.bits) & other
return type(self)(bits=andd)
def __xor__(self, other):
"""Bitwise xor, ex: 100 ^ 2
Does a "bitwise exclusive or". Each bit of the output is the same
as the corresponding bit in x if that bit in y is 0, and it's the
complement of the bit in x if that bit in y is 1."""
xord = int(self.bits) ^ other
return type(self)(bits=xord)
def __or__(self, other):
"""Bitwise or, ex: 100 | 2
Does a "bitwise or". Each bit of the output is 0 if the corresponding
bit of x AND of y is 0, otherwise it's 1."""
ord = int(self.bits) | other
return type(self)(bits=ord)
##################################################################
def __neg__(self):
"""The negative version of this instance"""
return (type(self))(-abs(self.prefix_value))
def __pos__(self):
return (type(self))(abs(self.prefix_value))
def __abs__(self):
return (type(self))(abs(self.prefix_value))
# def __invert__(self):
# """Called to implement the unary arithmetic operations (-, +, abs()
# and ~)."""
# return NotImplemented
######################################################################
# First, the bytes...
class Byte(Bitmath):
"""Byte based types fundamentally operate on self._bit_value"""
def _setup(self):
return (2, 0, 'Byte', 'Bytes')
######################################################################
# NIST Prefixes for Byte based types
class KiB(Byte):
def _setup(self):
return (2, 10, 'KiB', 'KiBs')
Kio = KiB
class MiB(Byte):
def _setup(self):
return (2, 20, 'MiB', 'MiBs')
Mio = MiB
class GiB(Byte):
def _setup(self):
return (2, 30, 'GiB', 'GiBs')
Gio = GiB
class TiB(Byte):
def _setup(self):
return (2, 40, 'TiB', 'TiBs')
Tio = TiB
class PiB(Byte):
def _setup(self):
return (2, 50, 'PiB', 'PiBs')
Pio = PiB
class EiB(Byte):
def _setup(self):
return (2, 60, 'EiB', 'EiBs')
Eio = EiB
######################################################################
# SI Prefixes for Byte based types
class kB(Byte):
def _setup(self):
return (10, 3, 'kB', 'kBs')
ko = kB
class MB(Byte):
def _setup(self):
return (10, 6, 'MB', 'MBs')
Mo = MB
class GB(Byte):
def _setup(self):
return (10, 9, 'GB', 'GBs')
Go = GB
class TB(Byte):
def _setup(self):
return (10, 12, 'TB', 'TBs')
To = TB
class PB(Byte):
def _setup(self):
return (10, 15, 'PB', 'PBs')
Po = PB
class EB(Byte):
def _setup(self):
return (10, 18, 'EB', 'EBs')
Eo = EB
class ZB(Byte):
def _setup(self):
return (10, 21, 'ZB', 'ZBs')
Zo = ZB
class YB(Byte):
def _setup(self):
return (10, 24, 'YB', 'YBs')
Yo = YB
######################################################################
# And now the bit types
class Bit(Bitmath):
"""Bit based types fundamentally operate on self._bit_value"""
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._bit_value)
def _setup(self):
return (2, 0, 'Bit', 'Bits')
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type"""
self._bit_value = value * self._unit_value
self._byte_value = self._bit_value / 8.0
######################################################################
# NIST Prefixes for Bit based types
class Kib(Bit):
def _setup(self):
return (2, 10, 'Kib', 'Kibs')
class Mib(Bit):
def _setup(self):
return (2, 20, 'Mib', 'Mibs')
class Gib(Bit):
def _setup(self):
return (2, 30, 'Gib', 'Gibs')
class Tib(Bit):
def _setup(self):
return (2, 40, 'Tib', 'Tibs')
class Pib(Bit):
def _setup(self):
return (2, 50, 'Pib', 'Pibs')
class Eib(Bit):
def _setup(self):
return (2, 60, 'Eib', 'Eibs')
######################################################################
# SI Prefixes for Bit based types
class kb(Bit):
def _setup(self):
return (10, 3, 'kb', 'kbs')
class Mb(Bit):
def _setup(self):
return (10, 6, 'Mb', 'Mbs')
class Gb(Bit):
def _setup(self):
return (10, 9, 'Gb', 'Gbs')
class Tb(Bit):
def _setup(self):
return (10, 12, 'Tb', 'Tbs')
class Pb(Bit):
def _setup(self):
return (10, 15, 'Pb', 'Pbs')
class Eb(Bit):
def _setup(self):
return (10, 18, 'Eb', 'Ebs')
class Zb(Bit):
def _setup(self):
return (10, 21, 'Zb', 'Zbs')
class Yb(Bit):
def _setup(self):
return (10, 24, 'Yb', 'Ybs')
######################################################################
# Utility functions
def best_prefix(bytes, system=NIST):
"""Return a bitmath instance representing the best human-readable
representation of the number of bytes given by ``bytes``. In addition
to a numeric type, the ``bytes`` parameter may also be a bitmath type.
Optionally select a preferred unit system by specifying the ``system``
keyword. Choices for ``system`` are ``bitmath.NIST`` (default) and
``bitmath.SI``.
Basically a shortcut for:
>>> import bitmath
>>> b = bitmath.Byte(12345)
>>> best = b.best_prefix()
Or:
>>> import bitmath
>>> best = (bitmath.KiB(12345) * 4201).best_prefix()
"""
if isinstance(bytes, Bitmath):
value = bytes.bytes
else:
value = bytes
return Byte(value).best_prefix(system=system)
def query_device_capacity(device_fd):
"""Create bitmath instances of the capacity of a system block device
Make one or more ioctl request to query the capacity of a block
device. Perform any processing required to compute the final capacity
value. Return the device capacity in bytes as a :class:`bitmath.Byte`
instance.
Thanks to the following resources for help figuring this out Linux/Mac
ioctl's for querying block device sizes:
* http://stackoverflow.com/a/12925285/263969
* http://stackoverflow.com/a/9764508/263969
:param file device_fd: A ``file`` object of the device to query the
capacity of (as in ``get_device_capacity(open("/dev/sda"))``).
:return: a bitmath :class:`bitmath.Byte` instance equivalent to the
capacity of the target device in bytes.
"""
if os_name() != 'posix':
raise NotImplementedError("'bitmath.query_device_capacity' is not supported on this platform: %s" % os_name())
s = os.stat(device_fd.name).st_mode
if not stat.S_ISBLK(s):
raise ValueError("The file descriptor provided is not of a device type")
# The keys of the ``ioctl_map`` dictionary correlate to possible
# values from the ``platform.system`` function.
ioctl_map = {
# ioctls for the "Linux" platform
"Linux": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("BLKGETSIZE64", "L", 0x80081272)
# Per <linux/fs.h>, the BLKGETSIZE64 request returns a
# 'u64' sized value. This is an unsigned 64 bit
# integer C type. This means to correctly "buffer" the
# result we need 64 bits, or 8 bytes, of memory.
#
# The struct module documentation include a reference
# chart relating formatting characters to native C
# Types. In this case, using the "native size", the
# table tells us:
#
# * Character 'L' - Unsigned Long C Type (u64) - Loads into a Python int type
#
# Confirm this character is right by running (on Linux):
#
# >>> import struct
# >>> print 8 == struct.calcsize('L')
#
# The result should be true as long as your kernel
# headers define BLKGETSIZE64 as a u64 type (please
# file a bug report at
# https://github.com/tbielawa/bitmath/issues/new if
# this does *not* work for you)
],
# func is how the final result is decided. Because the
# Linux BLKGETSIZE64 call returns the block device
# capacity in bytes as an integer value, no extra
# calculations are required. Simply return the value of
# BLKGETSIZE64.
"func": lambda x: x["BLKGETSIZE64"]
},
# ioctls for the "Darwin" (Mac OS X) platform
"Darwin": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("DKIOCGETBLOCKCOUNT", "L", 0x40086419),
# Per <sys/disk.h>: get media's block count - uint64_t
#
# As in the BLKGETSIZE64 example, an unsigned 64 bit
# integer will use the 'L' formatting character
("DKIOCGETBLOCKSIZE", "I", 0x40046418)
# Per <sys/disk.h>: get media's block size - uint32_t
#
# This request returns an unsigned 32 bit integer, or
# in other words: just a normal integer (or 'int' c
# type). That should require 4 bytes of space for
# buffering. According to the struct modules
# 'Formatting Characters' chart:
#
# * Character 'I' - Unsigned Int C Type (uint32_t) - Loads into a Python int type
],
# OS X doesn't have a direct equivalent to the Linux
# BLKGETSIZE64 request. Instead, we must request how many
# blocks (or "sectors") are on the disk, and the size (in
# bytes) of each block. Finally, multiply the two together
# to obtain capacity:
#
# n Block * y Byte
# capacity (bytes) = -------
# 1 Block
"func": lambda x: x["DKIOCGETBLOCKCOUNT"] * x["DKIOCGETBLOCKSIZE"]
# This expression simply accepts a dictionary ``x`` as a
# parameter, and then returns the result of multiplying
# the two named dictionary items together. In this case,
# that means multiplying ``DKIOCGETBLOCKCOUNT``, the total
# number of blocks, by ``DKIOCGETBLOCKSIZE``, the size of
# each block in bytes.
}
}
platform_params = ioctl_map[platform.system()]
results = {}
for req_name, fmt, request_code in platform_params['request_params']:
# Read the systems native size (in bytes) of this format type.
buffer_size = struct.calcsize(fmt)
# Construct a buffer to store the ioctl result in
buffer = ' ' * buffer_size
# This code has been ran on only a few test systems. If it's
# appropriate, maybe in the future we'll add try/except
# conditions for some possible errors. Really only for cases
# where it would add value to override the default exception
# message string.
buffer = fcntl.ioctl(device_fd.fileno(), request_code, buffer)
# Unpack the raw result from the ioctl call into a familiar
# python data type according to the ``fmt`` rules.
result = struct.unpack(fmt, buffer)[0]
# Add the new result to our collection
results[req_name] = result
return Byte(platform_params['func'](results))
def getsize(path, bestprefix=True, system=NIST):
"""Return a bitmath instance in the best human-readable representation
of the file size at `path`. Optionally, provide a preferred unit
system by setting `system` to either `bitmath.NIST` (default) or
`bitmath.SI`.
Optionally, set ``bestprefix`` to ``False`` to get ``bitmath.Byte``
instances back.
"""
_path = os.path.realpath(path)
size_bytes = os.path.getsize(_path)
if bestprefix:
return Byte(size_bytes).best_prefix(system=system)
else:
return Byte(size_bytes)
def listdir(search_base, followlinks=False, filter='*',
relpath=False, bestprefix=False, system=NIST):
"""This is a generator which recurses the directory tree
`search_base`, yielding 2-tuples of:
* The absolute/relative path to a discovered file
* A bitmath instance representing the "apparent size" of the file.
- `search_base` - The directory to begin walking down.
- `followlinks` - Whether or not to follow symbolic links to directories
- `filter` - A glob (see :py:mod:`fnmatch`) to filter results with
(default: ``*``, everything)
- `relpath` - ``True`` to return the relative path from `pwd` or
``False`` (default) to return the fully qualified path
- ``bestprefix`` - set to ``False`` to get ``bitmath.Byte``
instances back instead.
- `system` - Provide a preferred unit system by setting `system`
to either ``bitmath.NIST`` (default) or ``bitmath.SI``.
.. note:: This function does NOT return tuples for directory entities.
.. note:: Symlinks to **files** are followed automatically
"""
for root, dirs, files in os.walk(search_base, followlinks=followlinks):
for name in fnmatch.filter(files, filter):
_path = os.path.join(root, name)
if relpath:
# RELATIVE path
_return_path = os.path.relpath(_path, '.')
else:
# REAL path
_return_path = os.path.realpath(_path)
if followlinks:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
else:
if os.path.isdir(_path) or os.path.islink(_path):
pass
else:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
def parse_string_unsafe(s, system=SI):
"""Attempt to parse a string with ambiguous units and try to make a
bitmath object out of it.
This may produce inaccurate results if parsing shell output. For
example `ls` may say a 2730 Byte file is '2.7K'. 2730 Bytes == 2.73 kB
~= 2.666 KiB. See the documentation for all of the important details.
Note the following caveats:
* All inputs are assumed to be byte-based (as opposed to bit based)
* Numerical inputs (those without any units) are assumed to be a
number of bytes
* Inputs with single letter units (k, M, G, etc) are assumed to be SI
units (base-10). Set the `system` parameter to `bitmath.NIST` to
change this behavior.
* Inputs with an `i` character following the leading letter (Ki, Mi,
Gi) are assumed to be NIST units (base 2)
* Capitalization does not matter
"""
if not isinstance(s, (str, unicode)) and \
not isinstance(s, numbers.Number):
raise ValueError("parse_string_unsafe only accepts string/number inputs but a %s was given" %
type(s))
######################################################################
# Is the input simple to parse? Just a number, or a number
# masquerading as a string perhaps?
# Test case: raw number input (easy!)
if isinstance(s, numbers.Number):
# It's just a number. Assume bytes
return Byte(s)
# Test case: a number pretending to be a string
if isinstance(s, (str, unicode)):
try:
# Can we turn it directly into a number?
return Byte(float(s))
except ValueError:
# Nope, this is not a plain number
pass
######################################################################
# At this point:
# - the input is also not just a number wrapped in a string
# - nor is is just a plain number type
#
# We need to do some more digging around now to figure out exactly
# what we were given and possibly normalize the input into a
# format we can recognize.
# First we'll separate the number and the unit.
#
# Get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError: # pragma: no cover
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# Split the string into the value and the unit
val, unit = s[:index], s[index:]
# Don't trust anything. We'll make sure the correct 'b' is in place.
unit = unit.rstrip('Bb')
unit += 'B'
# At this point we can expect `unit` to be either:
#
# - 2 Characters (for SI, ex: kB or GB)
# - 3 Caracters (so NIST, ex: KiB, or GiB)
#
# A unit with any other number of chars is not a valid unit
# SI
if len(unit) == 2:
# Has NIST parsing been requested?
if system == NIST:
# NIST units requested. Ensure the unit begins with a
# capital letter and is followed by an 'i' character.
unit = capitalize_first(unit)
# Insert an 'i' char after the first letter
_unit = list(unit)
_unit.insert(1, 'i')
# Collapse the list back into a 3 letter string
unit = ''.join(_unit)
unit_class = globals()[unit]
else:
# Default parsing (SI format)
#
# Edge-case checking: SI 'thousand' is a lower-case K
if unit.startswith('K'):
unit = unit.replace('K', 'k')
elif not unit.startswith('k'):
# Otherwise, ensure the first char is capitalized
unit = capitalize_first(unit)
# This is an SI-type unit
if unit[0] in SI_PREFIXES:
unit_class = globals()[unit]
# NIST
elif len(unit) == 3:
unit = capitalize_first(unit)
# This is a NIST-type unit
if unit[:2] in NIST_PREFIXES:
unit_class = globals()[unit]
else:
# This is not a unit we recognize
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
try:
unit_class
except UnboundLocalError:
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
return unit_class(float(val))
######################################################################
# Contxt Managers
@contextlib.contextmanager
def format(fmt_str=None, plural=False, bestprefix=False):
"""Context manager for printing bitmath instances.
``fmt_str`` - a formatting mini-language compat formatting string. See
the @properties (above) for a list of available items.
``plural`` - True enables printing instances with 's's if they're
plural. False (default) prints them as singular (no trailing 's').
``bestprefix`` - True enables printing instances in their best
human-readable representation. False, the default, prints instances
using their current prefix unit.
"""
if 'bitmath' not in globals():
import bitmath
if plural:
orig_fmt_plural = bitmath.format_plural
bitmath.format_plural = True
if fmt_str:
orig_fmt_str = bitmath.format_string
bitmath.format_string = fmt_str
yield
if plural:
bitmath.format_plural = orig_fmt_plural
if fmt_str:
bitmath.format_string = orig_fmt_str
def cli_script_main(cli_args):
"""
A command line interface to basic bitmath operations.
"""
choices = ALL_UNIT_TYPES
parser = argparse.ArgumentParser(
description='Converts from one type of size to another.')
parser.add_argument('--from-stdin', default=False, action='store_true',
help='Reads number from stdin rather than the cli')
parser.add_argument(
'-f', '--from', choices=choices, nargs=1,
type=str, dest='fromunit', default=['Byte'],
help='Input type you are converting from. Defaultes to Byte.')
parser.add_argument(
'-t', '--to', choices=choices, required=False, nargs=1, type=str,
help=('Input type you are converting to. '
'Attempts to detect best result if omitted.'), dest='tounit')
parser.add_argument(
'size', nargs='*', type=float,
help='The number to convert.')
args = parser.parse_args(cli_args)
# Not sure how to cover this with tests, or if the functionality
# will remain in this form long enough for it to make writing a
# test worth the effort.
if args.from_stdin: # pragma: no cover
args.size = [float(sys.stdin.readline()[:-1])]
results = []
for size in args.size:
instance = getattr(__import__(
'bitmath', fromlist=['True']), args.fromunit[0])(size)
# If we have a unit provided then use it
if args.tounit:
result = getattr(instance, args.tounit[0])
# Otherwise use the best_prefix call
else:
result = instance.best_prefix()
results.append(result)
return results
def cli_script(): # pragma: no cover
# Wrapper around cli_script_main so we can unittest the command
# line functionality
for result in cli_script_main(sys.argv[1:]):
print(result)
if __name__ == '__main__':
cli_script()
|
tbielawa/bitmath
|
bitmath/__init__.py
|
parse_string_unsafe
|
python
|
def parse_string_unsafe(s, system=SI):
if not isinstance(s, (str, unicode)) and \
not isinstance(s, numbers.Number):
raise ValueError("parse_string_unsafe only accepts string/number inputs but a %s was given" %
type(s))
######################################################################
# Is the input simple to parse? Just a number, or a number
# masquerading as a string perhaps?
# Test case: raw number input (easy!)
if isinstance(s, numbers.Number):
# It's just a number. Assume bytes
return Byte(s)
# Test case: a number pretending to be a string
if isinstance(s, (str, unicode)):
try:
# Can we turn it directly into a number?
return Byte(float(s))
except ValueError:
# Nope, this is not a plain number
pass
######################################################################
# At this point:
# - the input is also not just a number wrapped in a string
# - nor is is just a plain number type
#
# We need to do some more digging around now to figure out exactly
# what we were given and possibly normalize the input into a
# format we can recognize.
# First we'll separate the number and the unit.
#
# Get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError: # pragma: no cover
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# Split the string into the value and the unit
val, unit = s[:index], s[index:]
# Don't trust anything. We'll make sure the correct 'b' is in place.
unit = unit.rstrip('Bb')
unit += 'B'
# At this point we can expect `unit` to be either:
#
# - 2 Characters (for SI, ex: kB or GB)
# - 3 Caracters (so NIST, ex: KiB, or GiB)
#
# A unit with any other number of chars is not a valid unit
# SI
if len(unit) == 2:
# Has NIST parsing been requested?
if system == NIST:
# NIST units requested. Ensure the unit begins with a
# capital letter and is followed by an 'i' character.
unit = capitalize_first(unit)
# Insert an 'i' char after the first letter
_unit = list(unit)
_unit.insert(1, 'i')
# Collapse the list back into a 3 letter string
unit = ''.join(_unit)
unit_class = globals()[unit]
else:
# Default parsing (SI format)
#
# Edge-case checking: SI 'thousand' is a lower-case K
if unit.startswith('K'):
unit = unit.replace('K', 'k')
elif not unit.startswith('k'):
# Otherwise, ensure the first char is capitalized
unit = capitalize_first(unit)
# This is an SI-type unit
if unit[0] in SI_PREFIXES:
unit_class = globals()[unit]
# NIST
elif len(unit) == 3:
unit = capitalize_first(unit)
# This is a NIST-type unit
if unit[:2] in NIST_PREFIXES:
unit_class = globals()[unit]
else:
# This is not a unit we recognize
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
try:
unit_class
except UnboundLocalError:
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
return unit_class(float(val))
|
Attempt to parse a string with ambiguous units and try to make a
bitmath object out of it.
This may produce inaccurate results if parsing shell output. For
example `ls` may say a 2730 Byte file is '2.7K'. 2730 Bytes == 2.73 kB
~= 2.666 KiB. See the documentation for all of the important details.
Note the following caveats:
* All inputs are assumed to be byte-based (as opposed to bit based)
* Numerical inputs (those without any units) are assumed to be a
number of bytes
* Inputs with single letter units (k, M, G, etc) are assumed to be SI
units (base-10). Set the `system` parameter to `bitmath.NIST` to
change this behavior.
* Inputs with an `i` character following the leading letter (Ki, Mi,
Gi) are assumed to be NIST units (base 2)
* Capitalization does not matter
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L1437-L1559
|
[
"def capitalize_first(s):\n \"\"\"Capitalize ONLY the first letter of the input `s`\n\n* returns a copy of input `s` with the first letter capitalized\n \"\"\"\n pfx = s[0].upper()\n _s = s[1:]\n return pfx + _s\n"
] |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright © 2014-2016 Tim Bielawa <timbielawa@gmail.com>
# See GitHub Contributors Graph for more information
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sub-license, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable=bad-continuation,missing-docstring,invalid-name,line-too-long
"""Reference material:
The bitmath homepage is located at:
* http://bitmath.readthedocs.io/en/latest/
Prefixes for binary multiples:
http://physics.nist.gov/cuu/Units/binary.html
decimal and binary prefixes:
man 7 units (from the Linux Documentation Project 'man-pages' package)
BEFORE YOU GET HASTY WITH EXCLUDING CODE FROM COVERAGE: If you
absolutely need to skip code coverage because of a strange Python 2.x
vs 3.x thing, use the fancy environment substitution stuff from the
.coverage RC file. In review:
* If you *NEED* to skip a statement because of Python 2.x issues add the following::
# pragma: PY2X no cover
* If you *NEED* to skip a statement because of Python 3.x issues add the following::
# pragma: PY3X no cover
In this configuration, statements which are skipped in 2.x are still
covered in 3.x, and the reverse holds true for tests skipped in 3.x.
"""
from __future__ import print_function
import argparse
import contextlib
import fnmatch
import math
import numbers
import os
import os.path
import platform
import sys
# For device capacity reading in query_device_capacity(). Only supported
# on posix systems for now. Will be addressed in issue #52 on GitHub.
if os.name == 'posix':
import stat
import fcntl
import struct
__all__ = ['Bit', 'Byte', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB',
'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'Kib',
'Mib', 'Gib', 'Tib', 'Pib', 'Eib', 'kb', 'Mb', 'Gb', 'Tb',
'Pb', 'Eb', 'Zb', 'Yb', 'getsize', 'listdir', 'format',
'format_string', 'format_plural', 'parse_string', 'parse_string_unsafe',
'ALL_UNIT_TYPES', 'NIST', 'NIST_PREFIXES', 'NIST_STEPS',
'SI', 'SI_PREFIXES', 'SI_STEPS']
# Python 3.x compat
if sys.version > '3':
long = int # pragma: PY2X no cover
unicode = str # pragma: PY2X no cover
#: A list of all the valid prefix unit types. Mostly for reference,
#: also used by the CLI tool as valid types
ALL_UNIT_TYPES = ['Bit', 'Byte', 'kb', 'kB', 'Mb', 'MB', 'Gb', 'GB', 'Tb',
'TB', 'Pb', 'PB', 'Eb', 'EB', 'Zb', 'ZB', 'Yb',
'YB', 'Kib', 'KiB', 'Mib', 'MiB', 'Gib', 'GiB',
'Tib', 'TiB', 'Pib', 'PiB', 'Eib', 'EiB']
# #####################################################################
# Set up our module variables/constants
###################################
# Internal:
# Console repr(), ex: MiB(13.37), or kB(42.0)
_FORMAT_REPR = '{unit_singular}({value})'
# ##################################
# Exposed:
#: Constants for referring to NIST prefix system
NIST = int(2)
#: Constants for referring to SI prefix system
SI = int(10)
# ##################################
#: All of the SI prefixes
SI_PREFIXES = ['k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
#: Byte values represented by each SI prefix unit
SI_STEPS = {
'Bit': 1 / 8.0,
'Byte': 1,
'k': 1000,
'M': 1000000,
'G': 1000000000,
'T': 1000000000000,
'P': 1000000000000000,
'E': 1000000000000000000,
'Z': 1000000000000000000000,
'Y': 1000000000000000000000000
}
#: All of the NIST prefixes
NIST_PREFIXES = ['Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei']
#: Byte values represented by each NIST prefix unit
NIST_STEPS = {
'Bit': 1 / 8.0,
'Byte': 1,
'Ki': 1024,
'Mi': 1048576,
'Gi': 1073741824,
'Ti': 1099511627776,
'Pi': 1125899906842624,
'Ei': 1152921504606846976
}
#: String representation, ex: ``13.37 MiB``, or ``42.0 kB``
format_string = "{value} {unit}"
#: Pluralization behavior
format_plural = False
def os_name():
# makes unittesting platform specific code easier
return os.name
def capitalize_first(s):
"""Capitalize ONLY the first letter of the input `s`
* returns a copy of input `s` with the first letter capitalized
"""
pfx = s[0].upper()
_s = s[1:]
return pfx + _s
######################################################################
# Base class for everything else
class Bitmath(object):
"""The base class for all the other prefix classes"""
# All the allowed input types
valid_types = (int, float, long)
def __init__(self, value=0, bytes=None, bits=None):
"""Instantiate with `value` by the unit, in plain bytes, or
bits. Don't supply more than one keyword.
default behavior: initialize with value of 0
only setting value: assert bytes is None and bits is None
only setting bytes: assert value == 0 and bits is None
only setting bits: assert value == 0 and bytes is None
"""
_raise = False
if (value == 0) and (bytes is None) and (bits is None):
pass
# Setting by bytes
elif bytes is not None:
if (value == 0) and (bits is None):
pass
else:
_raise = True
# setting by bits
elif bits is not None:
if (value == 0) and (bytes is None):
pass
else:
_raise = True
if _raise:
raise ValueError("Only one parameter of: value, bytes, or bits is allowed")
self._do_setup()
if bytes:
# We were provided with the fundamental base unit, no need
# to normalize
self._byte_value = bytes
self._bit_value = bytes * 8.0
elif bits:
# We were *ALMOST* given the fundamental base
# unit. Translate it into the fundamental unit then
# normalize.
self._byte_value = bits / 8.0
self._bit_value = bits
else:
# We were given a value representative of this *prefix
# unit*. We need to normalize it into the number of bytes
# it represents.
self._norm(value)
# We have the fundamental unit figured out. Set the 'pretty' unit
self._set_prefix_value()
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._byte_value)
def _to_prefix_value(self, value):
"""Return the number of bits/bytes as they would look like if we
converted *to* this unit"""
return value / float(self._unit_value)
def _setup(self):
raise NotImplementedError("The base 'bitmath.Bitmath' class can not be used directly")
def _do_setup(self):
"""Setup basic parameters for this class.
`base` is the numeric base which when raised to `power` is equivalent
to 1 unit of the corresponding prefix. I.e., base=2, power=10
represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte.
Likewise, for the SI prefix classes `base` will be 10, and the `power`
for the Kilobyte is 3.
"""
(self._base, self._power, self._name_singular, self._name_plural) = self._setup()
self._unit_value = self._base ** self._power
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type.
:param number value: The input value to be normalized
:raises ValueError: if the input value is not a type of real number
"""
if isinstance(value, self.valid_types):
self._byte_value = value * self._unit_value
self._bit_value = self._byte_value * 8.0
else:
raise ValueError("Initialization value '%s' is of an invalid type: %s. "
"Must be one of %s" % (
value,
type(value),
", ".join(str(x) for x in self.valid_types)))
##################################################################
# Properties
#: The mathematical base of an instance
base = property(lambda s: s._base)
binary = property(lambda s: bin(int(s.bits)))
"""The binary representation of an instance in binary 1s and 0s. Note
that for very large numbers this will mean a lot of 1s and 0s. For
example, GiB(100) would be represented as::
0b1100100000000000000000000000000000000000
That leading ``0b`` is normal. That's how Python represents binary.
"""
#: Alias for :attr:`binary`
bin = property(lambda s: s.binary)
#: The number of bits in an instance
bits = property(lambda s: s._bit_value)
#: The number of bytes in an instance
bytes = property(lambda s: s._byte_value)
#: The mathematical power of an instance
power = property(lambda s: s._power)
@property
def system(self):
"""The system of units used to measure an instance"""
if self._base == 2:
return "NIST"
elif self._base == 10:
return "SI"
else:
# I don't expect to ever encounter this logic branch, but
# hey, it's better to have extra test coverage than
# insufficient test coverage.
raise ValueError("Instances mathematical base is an unsupported value: %s" % (
str(self._base)))
@property
def unit(self):
"""The string that is this instances prefix unit name in agreement
with this instance value (singular or plural). Following the
convention that only 1 is singular. This will always be the singular
form when :attr:`bitmath.format_plural` is ``False`` (default value).
For example:
>>> KiB(1).unit == 'KiB'
>>> Byte(0).unit == 'Bytes'
>>> Byte(1).unit == 'Byte'
>>> Byte(1.1).unit == 'Bytes'
>>> Gb(2).unit == 'Gbs'
"""
global format_plural
if self.prefix_value == 1:
# If it's a '1', return it singular, no matter what
return self._name_singular
elif format_plural:
# Pluralization requested
return self._name_plural
else:
# Pluralization NOT requested, and the value is not 1
return self._name_singular
@property
def unit_plural(self):
"""The string that is an instances prefix unit name in the plural
form.
For example:
>>> KiB(1).unit_plural == 'KiB'
>>> Byte(1024).unit_plural == 'Bytes'
>>> Gb(1).unit_plural == 'Gb'
"""
return self._name_plural
@property
def unit_singular(self):
"""The string that is an instances prefix unit name in the singular
form.
For example:
>>> KiB(1).unit_singular == 'KiB'
>>> Byte(1024).unit == 'B'
>>> Gb(1).unit_singular == 'Gb'
"""
return self._name_singular
#: The "prefix" value of an instance
value = property(lambda s: s.prefix_value)
@classmethod
def from_other(cls, item):
"""Factory function to return instances of `item` converted into a new
instance of ``cls``. Because this is a class method, it may be called
from any bitmath class object without the need to explicitly
instantiate the class ahead of time.
*Implicit Parameter:*
* ``cls`` A bitmath class, implicitly set to the class of the
instance object it is called on
*User Supplied Parameter:*
* ``item`` A :class:`bitmath.Bitmath` subclass instance
*Example:*
>>> import bitmath
>>> kib = bitmath.KiB.from_other(bitmath.MiB(1))
>>> print kib
KiB(1024.0)
"""
if isinstance(item, Bitmath):
return cls(bits=item.bits)
else:
raise ValueError("The provided items must be a valid bitmath class: %s" %
str(item.__class__))
######################################################################
# The following implement the Python datamodel customization methods
#
# Reference: http://docs.python.org/2.7/reference/datamodel.html#basic-customization
def __repr__(self):
"""Representation of this object as you would expect to see in an
interpreter"""
global _FORMAT_REPR
return self.format(_FORMAT_REPR)
def __str__(self):
"""String representation of this object"""
global format_string
return self.format(format_string)
def format(self, fmt):
"""Return a representation of this instance formatted with user
supplied syntax"""
_fmt_params = {
'base': self.base,
'bin': self.bin,
'binary': self.binary,
'bits': self.bits,
'bytes': self.bytes,
'power': self.power,
'system': self.system,
'unit': self.unit,
'unit_plural': self.unit_plural,
'unit_singular': self.unit_singular,
'value': self.value
}
return fmt.format(**_fmt_params)
##################################################################
# Guess the best human-readable prefix unit for representation
##################################################################
def best_prefix(self, system=None):
"""Optional parameter, `system`, allows you to prefer NIST or SI in
the results. By default, the current system is used (Bit/Byte default
to NIST).
Logic discussion/notes:
Base-case, does it need converting?
If the instance is less than one Byte, return the instance as a Bit
instance.
Else, begin by recording the unit system the instance is defined
by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.
If the instance is not already a ``Byte`` instance, convert it to one.
NIST units step up by powers of 1024, SI units step up by powers of
1000.
Take integer value of the log(base=STEP_POWER) of the instance's byte
value. E.g.:
>>> int(math.log(Gb(100).bytes, 1000))
3
This will return a value >= 0. The following determines the 'best
prefix unit' for representation:
* result == 0, best represented as a Byte
* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte
* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
"""
# Use absolute value so we don't return Bit's for *everything*
# less than Byte(1). From github issue #55
if abs(self) < Byte(1):
return Bit.from_other(self)
else:
if type(self) is Byte: # pylint: disable=unidiomatic-typecheck
_inst = self
else:
_inst = Byte.from_other(self)
# Which table to consult? Was a preferred system provided?
if system is None:
# No preference. Use existing system
if self.system == 'NIST':
_STEPS = NIST_PREFIXES
_BASE = 1024
elif self.system == 'SI':
_STEPS = SI_PREFIXES
_BASE = 1000
# Anything else would have raised by now
else:
# Preferred system provided.
if system == NIST:
_STEPS = NIST_PREFIXES
_BASE = 1024
elif system == SI:
_STEPS = SI_PREFIXES
_BASE = 1000
else:
raise ValueError("Invalid value given for 'system' parameter."
" Must be one of NIST or SI")
# Index of the string of the best prefix in the STEPS list
_index = int(math.log(abs(_inst.bytes), _BASE))
# Recall that the log() function returns >= 0. This doesn't
# map to the STEPS list 1:1. That is to say, 0 is handled with
# special care. So if the _index is 1, we actually want item 0
# in the list.
if _index == 0:
# Already a Byte() type, so return it.
return _inst
elif _index >= len(_STEPS):
# This is a really big number. Use the biggest prefix we've got
_best_prefix = _STEPS[-1]
elif 0 < _index < len(_STEPS):
# There is an appropriate prefix unit to represent this
_best_prefix = _STEPS[_index - 1]
_conversion_method = getattr(
self,
'to_%sB' % _best_prefix)
return _conversion_method()
##################################################################
def to_Bit(self):
return Bit(self._bit_value)
def to_Byte(self):
return Byte(self._byte_value / float(NIST_STEPS['Byte']))
# Properties
Bit = property(lambda s: s.to_Bit())
Byte = property(lambda s: s.to_Byte())
##################################################################
def to_KiB(self):
return KiB(bits=self._bit_value)
def to_Kib(self):
return Kib(bits=self._bit_value)
def to_kB(self):
return kB(bits=self._bit_value)
def to_kb(self):
return kb(bits=self._bit_value)
# Properties
KiB = property(lambda s: s.to_KiB())
Kib = property(lambda s: s.to_Kib())
kB = property(lambda s: s.to_kB())
kb = property(lambda s: s.to_kb())
##################################################################
def to_MiB(self):
return MiB(bits=self._bit_value)
def to_Mib(self):
return Mib(bits=self._bit_value)
def to_MB(self):
return MB(bits=self._bit_value)
def to_Mb(self):
return Mb(bits=self._bit_value)
# Properties
MiB = property(lambda s: s.to_MiB())
Mib = property(lambda s: s.to_Mib())
MB = property(lambda s: s.to_MB())
Mb = property(lambda s: s.to_Mb())
##################################################################
def to_GiB(self):
return GiB(bits=self._bit_value)
def to_Gib(self):
return Gib(bits=self._bit_value)
def to_GB(self):
return GB(bits=self._bit_value)
def to_Gb(self):
return Gb(bits=self._bit_value)
# Properties
GiB = property(lambda s: s.to_GiB())
Gib = property(lambda s: s.to_Gib())
GB = property(lambda s: s.to_GB())
Gb = property(lambda s: s.to_Gb())
##################################################################
def to_TiB(self):
return TiB(bits=self._bit_value)
def to_Tib(self):
return Tib(bits=self._bit_value)
def to_TB(self):
return TB(bits=self._bit_value)
def to_Tb(self):
return Tb(bits=self._bit_value)
# Properties
TiB = property(lambda s: s.to_TiB())
Tib = property(lambda s: s.to_Tib())
TB = property(lambda s: s.to_TB())
Tb = property(lambda s: s.to_Tb())
##################################################################
def to_PiB(self):
return PiB(bits=self._bit_value)
def to_Pib(self):
return Pib(bits=self._bit_value)
def to_PB(self):
return PB(bits=self._bit_value)
def to_Pb(self):
return Pb(bits=self._bit_value)
# Properties
PiB = property(lambda s: s.to_PiB())
Pib = property(lambda s: s.to_Pib())
PB = property(lambda s: s.to_PB())
Pb = property(lambda s: s.to_Pb())
##################################################################
def to_EiB(self):
return EiB(bits=self._bit_value)
def to_Eib(self):
return Eib(bits=self._bit_value)
def to_EB(self):
return EB(bits=self._bit_value)
def to_Eb(self):
return Eb(bits=self._bit_value)
# Properties
EiB = property(lambda s: s.to_EiB())
Eib = property(lambda s: s.to_Eib())
EB = property(lambda s: s.to_EB())
Eb = property(lambda s: s.to_Eb())
##################################################################
# The SI units go beyond the NIST units. They also have the Zetta
# and Yotta prefixes.
def to_ZB(self):
return ZB(bits=self._bit_value)
def to_Zb(self):
return Zb(bits=self._bit_value)
# Properties
ZB = property(lambda s: s.to_ZB())
Zb = property(lambda s: s.to_Zb())
##################################################################
def to_YB(self):
return YB(bits=self._bit_value)
def to_Yb(self):
return Yb(bits=self._bit_value)
#: A new object representing this instance as a Yottabyte
YB = property(lambda s: s.to_YB())
Yb = property(lambda s: s.to_Yb())
##################################################################
# Rich comparison operations
##################################################################
def __lt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value < other
else:
return self._byte_value < other.bytes
def __le__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value <= other
else:
return self._byte_value <= other.bytes
def __eq__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value == other
else:
return self._byte_value == other.bytes
def __ne__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value != other
else:
return self._byte_value != other.bytes
def __gt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value > other
else:
return self._byte_value > other.bytes
def __ge__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value >= other
else:
return self._byte_value >= other.bytes
##################################################################
# Basic math operations
##################################################################
# Reference: http://docs.python.org/2.7/reference/datamodel.html#emulating-numeric-types
"""These methods are called to implement the binary arithmetic
operations (+, -, *, //, %, divmod(), pow(), **, <<, >>, &, ^, |). For
instance, to evaluate the expression x + y, where x is an instance of
a class that has an __add__() method, x.__add__(y) is called. The
__divmod__() method should be the equivalent to using __floordiv__()
and __mod__(); it should not be related to __truediv__() (described
below). Note that __pow__() should be defined to accept an optional
third argument if the ternary version of the built-in pow() function
is to be supported.object.__complex__(self)
"""
def __add__(self, other):
"""Supported operations with result types:
- bm + bm = bm
- bm + num = num
- num + bm = num (see radd)
"""
if isinstance(other, numbers.Number):
# bm + num
return other + self.value
else:
# bm + bm
total_bytes = self._byte_value + other.bytes
return (type(self))(bytes=total_bytes)
def __sub__(self, other):
"""Subtraction: Supported operations with result types:
- bm - bm = bm
- bm - num = num
- num - bm = num (see rsub)
"""
if isinstance(other, numbers.Number):
# bm - num
return self.value - other
else:
# bm - bm
total_bytes = self._byte_value - other.bytes
return (type(self))(bytes=total_bytes)
def __mul__(self, other):
"""Multiplication: Supported operations with result types:
- bm1 * bm2 = bm1
- bm * num = bm
- num * bm = num (see rmul)
"""
if isinstance(other, numbers.Number):
# bm * num
result = self._byte_value * other
return (type(self))(bytes=result)
else:
# bm1 * bm2
_other = other.value * other.base ** other.power
_self = self.prefix_value * self._base ** self._power
return (type(self))(bytes=_other * _self)
"""The division operator (/) is implemented by these methods. The
__truediv__() method is used when __future__.division is in effect,
otherwise __div__() is used. If only one of these two methods is
defined, the object will not support division in the alternate
context; TypeError will be raised instead."""
def __div__(self, other):
"""Division: Supported operations with result types:
- bm1 / bm2 = num
- bm / num = bm
- num / bm = num (see rdiv)
"""
if isinstance(other, numbers.Number):
# bm / num
result = self._byte_value / other
return (type(self))(bytes=result)
else:
# bm1 / bm2
return self._byte_value / float(other.bytes)
def __truediv__(self, other):
# num / bm
return self.__div__(other)
# def __floordiv__(self, other):
# return NotImplemented
# def __mod__(self, other):
# return NotImplemented
# def __divmod__(self, other):
# return NotImplemented
# def __pow__(self, other, modulo=None):
# return NotImplemented
##################################################################
"""These methods are called to implement the binary arithmetic
operations (+, -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with
reflected (swapped) operands. These functions are only called if the
left operand does not support the corresponding operation and the
operands are of different types. [2] For instance, to evaluate the
expression x - y, where y is an instance of a class that has an
__rsub__() method, y.__rsub__(x) is called if x.__sub__(y) returns
NotImplemented.
These are the add/sub/mul/div methods for syntax where a number type
is given for the LTYPE and a bitmath object is given for the
RTYPE. E.g., 3 * MiB(3), or 10 / GB(42)
"""
def __radd__(self, other):
# num + bm = num
return other + self.value
def __rsub__(self, other):
# num - bm = num
return other - self.value
def __rmul__(self, other):
# num * bm = bm
return self * other
def __rdiv__(self, other):
# num / bm = num
return other / float(self.value)
def __rtruediv__(self, other):
# num / bm = num
return other / float(self.value)
"""Called to implement the built-in functions complex(), int(),
long(), and float(). Should return a value of the appropriate type.
If one of those methods does not support the operation with the
supplied arguments, it should return NotImplemented.
For bitmath purposes, these methods return the int/long/float
equivalent of the this instances prefix Unix value. That is to say:
- int(KiB(3.336)) would return 3
- long(KiB(3.336)) would return 3L
- float(KiB(3.336)) would return 3.336
"""
def __int__(self):
"""Return this instances prefix unit as an integer"""
return int(self.prefix_value)
def __long__(self):
"""Return this instances prefix unit as a long integer"""
return long(self.prefix_value) # pragma: PY3X no cover
def __float__(self):
"""Return this instances prefix unit as a floating point number"""
return float(self.prefix_value)
##################################################################
# Bitwise operations
##################################################################
def __lshift__(self, other):
"""Left shift, ex: 100 << 2
A left shift by n bits is equivalent to multiplication by pow(2,
n). A long integer is returned if the result exceeds the range of
plain integers."""
shifted = int(self.bits) << other
return type(self)(bits=shifted)
def __rshift__(self, other):
"""Right shift, ex: 100 >> 2
A right shift by n bits is equivalent to division by pow(2, n)."""
shifted = int(self.bits) >> other
return type(self)(bits=shifted)
def __and__(self, other):
""""Bitwise and, ex: 100 & 2
bitwise and". Each bit of the output is 1 if the corresponding bit
of x AND of y is 1, otherwise it's 0."""
andd = int(self.bits) & other
return type(self)(bits=andd)
def __xor__(self, other):
"""Bitwise xor, ex: 100 ^ 2
Does a "bitwise exclusive or". Each bit of the output is the same
as the corresponding bit in x if that bit in y is 0, and it's the
complement of the bit in x if that bit in y is 1."""
xord = int(self.bits) ^ other
return type(self)(bits=xord)
def __or__(self, other):
"""Bitwise or, ex: 100 | 2
Does a "bitwise or". Each bit of the output is 0 if the corresponding
bit of x AND of y is 0, otherwise it's 1."""
ord = int(self.bits) | other
return type(self)(bits=ord)
##################################################################
def __neg__(self):
"""The negative version of this instance"""
return (type(self))(-abs(self.prefix_value))
def __pos__(self):
return (type(self))(abs(self.prefix_value))
def __abs__(self):
return (type(self))(abs(self.prefix_value))
# def __invert__(self):
# """Called to implement the unary arithmetic operations (-, +, abs()
# and ~)."""
# return NotImplemented
######################################################################
# First, the bytes...
class Byte(Bitmath):
"""Byte based types fundamentally operate on self._bit_value"""
def _setup(self):
return (2, 0, 'Byte', 'Bytes')
######################################################################
# NIST Prefixes for Byte based types
class KiB(Byte):
def _setup(self):
return (2, 10, 'KiB', 'KiBs')
Kio = KiB
class MiB(Byte):
def _setup(self):
return (2, 20, 'MiB', 'MiBs')
Mio = MiB
class GiB(Byte):
def _setup(self):
return (2, 30, 'GiB', 'GiBs')
Gio = GiB
class TiB(Byte):
def _setup(self):
return (2, 40, 'TiB', 'TiBs')
Tio = TiB
class PiB(Byte):
def _setup(self):
return (2, 50, 'PiB', 'PiBs')
Pio = PiB
class EiB(Byte):
def _setup(self):
return (2, 60, 'EiB', 'EiBs')
Eio = EiB
######################################################################
# SI Prefixes for Byte based types
class kB(Byte):
def _setup(self):
return (10, 3, 'kB', 'kBs')
ko = kB
class MB(Byte):
def _setup(self):
return (10, 6, 'MB', 'MBs')
Mo = MB
class GB(Byte):
def _setup(self):
return (10, 9, 'GB', 'GBs')
Go = GB
class TB(Byte):
def _setup(self):
return (10, 12, 'TB', 'TBs')
To = TB
class PB(Byte):
def _setup(self):
return (10, 15, 'PB', 'PBs')
Po = PB
class EB(Byte):
def _setup(self):
return (10, 18, 'EB', 'EBs')
Eo = EB
class ZB(Byte):
def _setup(self):
return (10, 21, 'ZB', 'ZBs')
Zo = ZB
class YB(Byte):
def _setup(self):
return (10, 24, 'YB', 'YBs')
Yo = YB
######################################################################
# And now the bit types
class Bit(Bitmath):
"""Bit based types fundamentally operate on self._bit_value"""
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._bit_value)
def _setup(self):
return (2, 0, 'Bit', 'Bits')
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type"""
self._bit_value = value * self._unit_value
self._byte_value = self._bit_value / 8.0
######################################################################
# NIST Prefixes for Bit based types
class Kib(Bit):
def _setup(self):
return (2, 10, 'Kib', 'Kibs')
class Mib(Bit):
def _setup(self):
return (2, 20, 'Mib', 'Mibs')
class Gib(Bit):
def _setup(self):
return (2, 30, 'Gib', 'Gibs')
class Tib(Bit):
def _setup(self):
return (2, 40, 'Tib', 'Tibs')
class Pib(Bit):
def _setup(self):
return (2, 50, 'Pib', 'Pibs')
class Eib(Bit):
def _setup(self):
return (2, 60, 'Eib', 'Eibs')
######################################################################
# SI Prefixes for Bit based types
class kb(Bit):
def _setup(self):
return (10, 3, 'kb', 'kbs')
class Mb(Bit):
def _setup(self):
return (10, 6, 'Mb', 'Mbs')
class Gb(Bit):
def _setup(self):
return (10, 9, 'Gb', 'Gbs')
class Tb(Bit):
def _setup(self):
return (10, 12, 'Tb', 'Tbs')
class Pb(Bit):
def _setup(self):
return (10, 15, 'Pb', 'Pbs')
class Eb(Bit):
def _setup(self):
return (10, 18, 'Eb', 'Ebs')
class Zb(Bit):
def _setup(self):
return (10, 21, 'Zb', 'Zbs')
class Yb(Bit):
def _setup(self):
return (10, 24, 'Yb', 'Ybs')
######################################################################
# Utility functions
def best_prefix(bytes, system=NIST):
"""Return a bitmath instance representing the best human-readable
representation of the number of bytes given by ``bytes``. In addition
to a numeric type, the ``bytes`` parameter may also be a bitmath type.
Optionally select a preferred unit system by specifying the ``system``
keyword. Choices for ``system`` are ``bitmath.NIST`` (default) and
``bitmath.SI``.
Basically a shortcut for:
>>> import bitmath
>>> b = bitmath.Byte(12345)
>>> best = b.best_prefix()
Or:
>>> import bitmath
>>> best = (bitmath.KiB(12345) * 4201).best_prefix()
"""
if isinstance(bytes, Bitmath):
value = bytes.bytes
else:
value = bytes
return Byte(value).best_prefix(system=system)
def query_device_capacity(device_fd):
"""Create bitmath instances of the capacity of a system block device
Make one or more ioctl request to query the capacity of a block
device. Perform any processing required to compute the final capacity
value. Return the device capacity in bytes as a :class:`bitmath.Byte`
instance.
Thanks to the following resources for help figuring this out Linux/Mac
ioctl's for querying block device sizes:
* http://stackoverflow.com/a/12925285/263969
* http://stackoverflow.com/a/9764508/263969
:param file device_fd: A ``file`` object of the device to query the
capacity of (as in ``get_device_capacity(open("/dev/sda"))``).
:return: a bitmath :class:`bitmath.Byte` instance equivalent to the
capacity of the target device in bytes.
"""
if os_name() != 'posix':
raise NotImplementedError("'bitmath.query_device_capacity' is not supported on this platform: %s" % os_name())
s = os.stat(device_fd.name).st_mode
if not stat.S_ISBLK(s):
raise ValueError("The file descriptor provided is not of a device type")
# The keys of the ``ioctl_map`` dictionary correlate to possible
# values from the ``platform.system`` function.
ioctl_map = {
# ioctls for the "Linux" platform
"Linux": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("BLKGETSIZE64", "L", 0x80081272)
# Per <linux/fs.h>, the BLKGETSIZE64 request returns a
# 'u64' sized value. This is an unsigned 64 bit
# integer C type. This means to correctly "buffer" the
# result we need 64 bits, or 8 bytes, of memory.
#
# The struct module documentation include a reference
# chart relating formatting characters to native C
# Types. In this case, using the "native size", the
# table tells us:
#
# * Character 'L' - Unsigned Long C Type (u64) - Loads into a Python int type
#
# Confirm this character is right by running (on Linux):
#
# >>> import struct
# >>> print 8 == struct.calcsize('L')
#
# The result should be true as long as your kernel
# headers define BLKGETSIZE64 as a u64 type (please
# file a bug report at
# https://github.com/tbielawa/bitmath/issues/new if
# this does *not* work for you)
],
# func is how the final result is decided. Because the
# Linux BLKGETSIZE64 call returns the block device
# capacity in bytes as an integer value, no extra
# calculations are required. Simply return the value of
# BLKGETSIZE64.
"func": lambda x: x["BLKGETSIZE64"]
},
# ioctls for the "Darwin" (Mac OS X) platform
"Darwin": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("DKIOCGETBLOCKCOUNT", "L", 0x40086419),
# Per <sys/disk.h>: get media's block count - uint64_t
#
# As in the BLKGETSIZE64 example, an unsigned 64 bit
# integer will use the 'L' formatting character
("DKIOCGETBLOCKSIZE", "I", 0x40046418)
# Per <sys/disk.h>: get media's block size - uint32_t
#
# This request returns an unsigned 32 bit integer, or
# in other words: just a normal integer (or 'int' c
# type). That should require 4 bytes of space for
# buffering. According to the struct modules
# 'Formatting Characters' chart:
#
# * Character 'I' - Unsigned Int C Type (uint32_t) - Loads into a Python int type
],
# OS X doesn't have a direct equivalent to the Linux
# BLKGETSIZE64 request. Instead, we must request how many
# blocks (or "sectors") are on the disk, and the size (in
# bytes) of each block. Finally, multiply the two together
# to obtain capacity:
#
# n Block * y Byte
# capacity (bytes) = -------
# 1 Block
"func": lambda x: x["DKIOCGETBLOCKCOUNT"] * x["DKIOCGETBLOCKSIZE"]
# This expression simply accepts a dictionary ``x`` as a
# parameter, and then returns the result of multiplying
# the two named dictionary items together. In this case,
# that means multiplying ``DKIOCGETBLOCKCOUNT``, the total
# number of blocks, by ``DKIOCGETBLOCKSIZE``, the size of
# each block in bytes.
}
}
platform_params = ioctl_map[platform.system()]
results = {}
for req_name, fmt, request_code in platform_params['request_params']:
# Read the systems native size (in bytes) of this format type.
buffer_size = struct.calcsize(fmt)
# Construct a buffer to store the ioctl result in
buffer = ' ' * buffer_size
# This code has been ran on only a few test systems. If it's
# appropriate, maybe in the future we'll add try/except
# conditions for some possible errors. Really only for cases
# where it would add value to override the default exception
# message string.
buffer = fcntl.ioctl(device_fd.fileno(), request_code, buffer)
# Unpack the raw result from the ioctl call into a familiar
# python data type according to the ``fmt`` rules.
result = struct.unpack(fmt, buffer)[0]
# Add the new result to our collection
results[req_name] = result
return Byte(platform_params['func'](results))
def getsize(path, bestprefix=True, system=NIST):
"""Return a bitmath instance in the best human-readable representation
of the file size at `path`. Optionally, provide a preferred unit
system by setting `system` to either `bitmath.NIST` (default) or
`bitmath.SI`.
Optionally, set ``bestprefix`` to ``False`` to get ``bitmath.Byte``
instances back.
"""
_path = os.path.realpath(path)
size_bytes = os.path.getsize(_path)
if bestprefix:
return Byte(size_bytes).best_prefix(system=system)
else:
return Byte(size_bytes)
def listdir(search_base, followlinks=False, filter='*',
relpath=False, bestprefix=False, system=NIST):
"""This is a generator which recurses the directory tree
`search_base`, yielding 2-tuples of:
* The absolute/relative path to a discovered file
* A bitmath instance representing the "apparent size" of the file.
- `search_base` - The directory to begin walking down.
- `followlinks` - Whether or not to follow symbolic links to directories
- `filter` - A glob (see :py:mod:`fnmatch`) to filter results with
(default: ``*``, everything)
- `relpath` - ``True`` to return the relative path from `pwd` or
``False`` (default) to return the fully qualified path
- ``bestprefix`` - set to ``False`` to get ``bitmath.Byte``
instances back instead.
- `system` - Provide a preferred unit system by setting `system`
to either ``bitmath.NIST`` (default) or ``bitmath.SI``.
.. note:: This function does NOT return tuples for directory entities.
.. note:: Symlinks to **files** are followed automatically
"""
for root, dirs, files in os.walk(search_base, followlinks=followlinks):
for name in fnmatch.filter(files, filter):
_path = os.path.join(root, name)
if relpath:
# RELATIVE path
_return_path = os.path.relpath(_path, '.')
else:
# REAL path
_return_path = os.path.realpath(_path)
if followlinks:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
else:
if os.path.isdir(_path) or os.path.islink(_path):
pass
else:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
def parse_string(s):
"""Parse a string with units and try to make a bitmath object out of
it.
String inputs may include whitespace characters between the value and
the unit.
"""
# Strings only please
if not isinstance(s, (str, unicode)):
raise ValueError("parse_string only accepts string inputs but a %s was given" %
type(s))
# get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError:
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# split the string into the value and the unit
val, unit = s[:index], s[index:]
# see if the unit exists as a type in our namespace
if unit == "b":
unit_class = Bit
elif unit == "B":
unit_class = Byte
else:
if not (hasattr(sys.modules[__name__], unit) and isinstance(getattr(sys.modules[__name__], unit), type)):
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
unit_class = globals()[unit]
try:
val = float(val)
except ValueError:
raise
try:
return unit_class(val)
except: # pragma: no cover
raise ValueError("Can't parse string %s into a bitmath object" % s)
######################################################################
# Contxt Managers
@contextlib.contextmanager
def format(fmt_str=None, plural=False, bestprefix=False):
"""Context manager for printing bitmath instances.
``fmt_str`` - a formatting mini-language compat formatting string. See
the @properties (above) for a list of available items.
``plural`` - True enables printing instances with 's's if they're
plural. False (default) prints them as singular (no trailing 's').
``bestprefix`` - True enables printing instances in their best
human-readable representation. False, the default, prints instances
using their current prefix unit.
"""
if 'bitmath' not in globals():
import bitmath
if plural:
orig_fmt_plural = bitmath.format_plural
bitmath.format_plural = True
if fmt_str:
orig_fmt_str = bitmath.format_string
bitmath.format_string = fmt_str
yield
if plural:
bitmath.format_plural = orig_fmt_plural
if fmt_str:
bitmath.format_string = orig_fmt_str
def cli_script_main(cli_args):
"""
A command line interface to basic bitmath operations.
"""
choices = ALL_UNIT_TYPES
parser = argparse.ArgumentParser(
description='Converts from one type of size to another.')
parser.add_argument('--from-stdin', default=False, action='store_true',
help='Reads number from stdin rather than the cli')
parser.add_argument(
'-f', '--from', choices=choices, nargs=1,
type=str, dest='fromunit', default=['Byte'],
help='Input type you are converting from. Defaultes to Byte.')
parser.add_argument(
'-t', '--to', choices=choices, required=False, nargs=1, type=str,
help=('Input type you are converting to. '
'Attempts to detect best result if omitted.'), dest='tounit')
parser.add_argument(
'size', nargs='*', type=float,
help='The number to convert.')
args = parser.parse_args(cli_args)
# Not sure how to cover this with tests, or if the functionality
# will remain in this form long enough for it to make writing a
# test worth the effort.
if args.from_stdin: # pragma: no cover
args.size = [float(sys.stdin.readline()[:-1])]
results = []
for size in args.size:
instance = getattr(__import__(
'bitmath', fromlist=['True']), args.fromunit[0])(size)
# If we have a unit provided then use it
if args.tounit:
result = getattr(instance, args.tounit[0])
# Otherwise use the best_prefix call
else:
result = instance.best_prefix()
results.append(result)
return results
def cli_script(): # pragma: no cover
# Wrapper around cli_script_main so we can unittest the command
# line functionality
for result in cli_script_main(sys.argv[1:]):
print(result)
if __name__ == '__main__':
cli_script()
|
tbielawa/bitmath
|
bitmath/__init__.py
|
format
|
python
|
def format(fmt_str=None, plural=False, bestprefix=False):
if 'bitmath' not in globals():
import bitmath
if plural:
orig_fmt_plural = bitmath.format_plural
bitmath.format_plural = True
if fmt_str:
orig_fmt_str = bitmath.format_string
bitmath.format_string = fmt_str
yield
if plural:
bitmath.format_plural = orig_fmt_plural
if fmt_str:
bitmath.format_string = orig_fmt_str
|
Context manager for printing bitmath instances.
``fmt_str`` - a formatting mini-language compat formatting string. See
the @properties (above) for a list of available items.
``plural`` - True enables printing instances with 's's if they're
plural. False (default) prints them as singular (no trailing 's').
``bestprefix`` - True enables printing instances in their best
human-readable representation. False, the default, prints instances
using their current prefix unit.
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L1565-L1595
| null |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright © 2014-2016 Tim Bielawa <timbielawa@gmail.com>
# See GitHub Contributors Graph for more information
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sub-license, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable=bad-continuation,missing-docstring,invalid-name,line-too-long
"""Reference material:
The bitmath homepage is located at:
* http://bitmath.readthedocs.io/en/latest/
Prefixes for binary multiples:
http://physics.nist.gov/cuu/Units/binary.html
decimal and binary prefixes:
man 7 units (from the Linux Documentation Project 'man-pages' package)
BEFORE YOU GET HASTY WITH EXCLUDING CODE FROM COVERAGE: If you
absolutely need to skip code coverage because of a strange Python 2.x
vs 3.x thing, use the fancy environment substitution stuff from the
.coverage RC file. In review:
* If you *NEED* to skip a statement because of Python 2.x issues add the following::
# pragma: PY2X no cover
* If you *NEED* to skip a statement because of Python 3.x issues add the following::
# pragma: PY3X no cover
In this configuration, statements which are skipped in 2.x are still
covered in 3.x, and the reverse holds true for tests skipped in 3.x.
"""
from __future__ import print_function
import argparse
import contextlib
import fnmatch
import math
import numbers
import os
import os.path
import platform
import sys
# For device capacity reading in query_device_capacity(). Only supported
# on posix systems for now. Will be addressed in issue #52 on GitHub.
if os.name == 'posix':
import stat
import fcntl
import struct
__all__ = ['Bit', 'Byte', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB',
'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'Kib',
'Mib', 'Gib', 'Tib', 'Pib', 'Eib', 'kb', 'Mb', 'Gb', 'Tb',
'Pb', 'Eb', 'Zb', 'Yb', 'getsize', 'listdir', 'format',
'format_string', 'format_plural', 'parse_string', 'parse_string_unsafe',
'ALL_UNIT_TYPES', 'NIST', 'NIST_PREFIXES', 'NIST_STEPS',
'SI', 'SI_PREFIXES', 'SI_STEPS']
# Python 3.x compat
if sys.version > '3':
long = int # pragma: PY2X no cover
unicode = str # pragma: PY2X no cover
#: A list of all the valid prefix unit types. Mostly for reference,
#: also used by the CLI tool as valid types
ALL_UNIT_TYPES = ['Bit', 'Byte', 'kb', 'kB', 'Mb', 'MB', 'Gb', 'GB', 'Tb',
'TB', 'Pb', 'PB', 'Eb', 'EB', 'Zb', 'ZB', 'Yb',
'YB', 'Kib', 'KiB', 'Mib', 'MiB', 'Gib', 'GiB',
'Tib', 'TiB', 'Pib', 'PiB', 'Eib', 'EiB']
# #####################################################################
# Set up our module variables/constants
###################################
# Internal:
# Console repr(), ex: MiB(13.37), or kB(42.0)
_FORMAT_REPR = '{unit_singular}({value})'
# ##################################
# Exposed:
#: Constants for referring to NIST prefix system
NIST = int(2)
#: Constants for referring to SI prefix system
SI = int(10)
# ##################################
#: All of the SI prefixes
SI_PREFIXES = ['k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
#: Byte values represented by each SI prefix unit
SI_STEPS = {
'Bit': 1 / 8.0,
'Byte': 1,
'k': 1000,
'M': 1000000,
'G': 1000000000,
'T': 1000000000000,
'P': 1000000000000000,
'E': 1000000000000000000,
'Z': 1000000000000000000000,
'Y': 1000000000000000000000000
}
#: All of the NIST prefixes
NIST_PREFIXES = ['Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei']
#: Byte values represented by each NIST prefix unit
NIST_STEPS = {
'Bit': 1 / 8.0,
'Byte': 1,
'Ki': 1024,
'Mi': 1048576,
'Gi': 1073741824,
'Ti': 1099511627776,
'Pi': 1125899906842624,
'Ei': 1152921504606846976
}
#: String representation, ex: ``13.37 MiB``, or ``42.0 kB``
format_string = "{value} {unit}"
#: Pluralization behavior
format_plural = False
def os_name():
# makes unittesting platform specific code easier
return os.name
def capitalize_first(s):
"""Capitalize ONLY the first letter of the input `s`
* returns a copy of input `s` with the first letter capitalized
"""
pfx = s[0].upper()
_s = s[1:]
return pfx + _s
######################################################################
# Base class for everything else
class Bitmath(object):
"""The base class for all the other prefix classes"""
# All the allowed input types
valid_types = (int, float, long)
def __init__(self, value=0, bytes=None, bits=None):
"""Instantiate with `value` by the unit, in plain bytes, or
bits. Don't supply more than one keyword.
default behavior: initialize with value of 0
only setting value: assert bytes is None and bits is None
only setting bytes: assert value == 0 and bits is None
only setting bits: assert value == 0 and bytes is None
"""
_raise = False
if (value == 0) and (bytes is None) and (bits is None):
pass
# Setting by bytes
elif bytes is not None:
if (value == 0) and (bits is None):
pass
else:
_raise = True
# setting by bits
elif bits is not None:
if (value == 0) and (bytes is None):
pass
else:
_raise = True
if _raise:
raise ValueError("Only one parameter of: value, bytes, or bits is allowed")
self._do_setup()
if bytes:
# We were provided with the fundamental base unit, no need
# to normalize
self._byte_value = bytes
self._bit_value = bytes * 8.0
elif bits:
# We were *ALMOST* given the fundamental base
# unit. Translate it into the fundamental unit then
# normalize.
self._byte_value = bits / 8.0
self._bit_value = bits
else:
# We were given a value representative of this *prefix
# unit*. We need to normalize it into the number of bytes
# it represents.
self._norm(value)
# We have the fundamental unit figured out. Set the 'pretty' unit
self._set_prefix_value()
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._byte_value)
def _to_prefix_value(self, value):
"""Return the number of bits/bytes as they would look like if we
converted *to* this unit"""
return value / float(self._unit_value)
def _setup(self):
raise NotImplementedError("The base 'bitmath.Bitmath' class can not be used directly")
def _do_setup(self):
"""Setup basic parameters for this class.
`base` is the numeric base which when raised to `power` is equivalent
to 1 unit of the corresponding prefix. I.e., base=2, power=10
represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte.
Likewise, for the SI prefix classes `base` will be 10, and the `power`
for the Kilobyte is 3.
"""
(self._base, self._power, self._name_singular, self._name_plural) = self._setup()
self._unit_value = self._base ** self._power
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type.
:param number value: The input value to be normalized
:raises ValueError: if the input value is not a type of real number
"""
if isinstance(value, self.valid_types):
self._byte_value = value * self._unit_value
self._bit_value = self._byte_value * 8.0
else:
raise ValueError("Initialization value '%s' is of an invalid type: %s. "
"Must be one of %s" % (
value,
type(value),
", ".join(str(x) for x in self.valid_types)))
##################################################################
# Properties
#: The mathematical base of an instance
base = property(lambda s: s._base)
binary = property(lambda s: bin(int(s.bits)))
"""The binary representation of an instance in binary 1s and 0s. Note
that for very large numbers this will mean a lot of 1s and 0s. For
example, GiB(100) would be represented as::
0b1100100000000000000000000000000000000000
That leading ``0b`` is normal. That's how Python represents binary.
"""
#: Alias for :attr:`binary`
bin = property(lambda s: s.binary)
#: The number of bits in an instance
bits = property(lambda s: s._bit_value)
#: The number of bytes in an instance
bytes = property(lambda s: s._byte_value)
#: The mathematical power of an instance
power = property(lambda s: s._power)
@property
def system(self):
"""The system of units used to measure an instance"""
if self._base == 2:
return "NIST"
elif self._base == 10:
return "SI"
else:
# I don't expect to ever encounter this logic branch, but
# hey, it's better to have extra test coverage than
# insufficient test coverage.
raise ValueError("Instances mathematical base is an unsupported value: %s" % (
str(self._base)))
@property
def unit(self):
"""The string that is this instances prefix unit name in agreement
with this instance value (singular or plural). Following the
convention that only 1 is singular. This will always be the singular
form when :attr:`bitmath.format_plural` is ``False`` (default value).
For example:
>>> KiB(1).unit == 'KiB'
>>> Byte(0).unit == 'Bytes'
>>> Byte(1).unit == 'Byte'
>>> Byte(1.1).unit == 'Bytes'
>>> Gb(2).unit == 'Gbs'
"""
global format_plural
if self.prefix_value == 1:
# If it's a '1', return it singular, no matter what
return self._name_singular
elif format_plural:
# Pluralization requested
return self._name_plural
else:
# Pluralization NOT requested, and the value is not 1
return self._name_singular
@property
def unit_plural(self):
"""The string that is an instances prefix unit name in the plural
form.
For example:
>>> KiB(1).unit_plural == 'KiB'
>>> Byte(1024).unit_plural == 'Bytes'
>>> Gb(1).unit_plural == 'Gb'
"""
return self._name_plural
@property
def unit_singular(self):
"""The string that is an instances prefix unit name in the singular
form.
For example:
>>> KiB(1).unit_singular == 'KiB'
>>> Byte(1024).unit == 'B'
>>> Gb(1).unit_singular == 'Gb'
"""
return self._name_singular
#: The "prefix" value of an instance
value = property(lambda s: s.prefix_value)
@classmethod
def from_other(cls, item):
"""Factory function to return instances of `item` converted into a new
instance of ``cls``. Because this is a class method, it may be called
from any bitmath class object without the need to explicitly
instantiate the class ahead of time.
*Implicit Parameter:*
* ``cls`` A bitmath class, implicitly set to the class of the
instance object it is called on
*User Supplied Parameter:*
* ``item`` A :class:`bitmath.Bitmath` subclass instance
*Example:*
>>> import bitmath
>>> kib = bitmath.KiB.from_other(bitmath.MiB(1))
>>> print kib
KiB(1024.0)
"""
if isinstance(item, Bitmath):
return cls(bits=item.bits)
else:
raise ValueError("The provided items must be a valid bitmath class: %s" %
str(item.__class__))
######################################################################
# The following implement the Python datamodel customization methods
#
# Reference: http://docs.python.org/2.7/reference/datamodel.html#basic-customization
def __repr__(self):
"""Representation of this object as you would expect to see in an
interpreter"""
global _FORMAT_REPR
return self.format(_FORMAT_REPR)
def __str__(self):
"""String representation of this object"""
global format_string
return self.format(format_string)
def format(self, fmt):
"""Return a representation of this instance formatted with user
supplied syntax"""
_fmt_params = {
'base': self.base,
'bin': self.bin,
'binary': self.binary,
'bits': self.bits,
'bytes': self.bytes,
'power': self.power,
'system': self.system,
'unit': self.unit,
'unit_plural': self.unit_plural,
'unit_singular': self.unit_singular,
'value': self.value
}
return fmt.format(**_fmt_params)
##################################################################
# Guess the best human-readable prefix unit for representation
##################################################################
def best_prefix(self, system=None):
"""Optional parameter, `system`, allows you to prefer NIST or SI in
the results. By default, the current system is used (Bit/Byte default
to NIST).
Logic discussion/notes:
Base-case, does it need converting?
If the instance is less than one Byte, return the instance as a Bit
instance.
Else, begin by recording the unit system the instance is defined
by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.
If the instance is not already a ``Byte`` instance, convert it to one.
NIST units step up by powers of 1024, SI units step up by powers of
1000.
Take integer value of the log(base=STEP_POWER) of the instance's byte
value. E.g.:
>>> int(math.log(Gb(100).bytes, 1000))
3
This will return a value >= 0. The following determines the 'best
prefix unit' for representation:
* result == 0, best represented as a Byte
* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte
* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
"""
# Use absolute value so we don't return Bit's for *everything*
# less than Byte(1). From github issue #55
if abs(self) < Byte(1):
return Bit.from_other(self)
else:
if type(self) is Byte: # pylint: disable=unidiomatic-typecheck
_inst = self
else:
_inst = Byte.from_other(self)
# Which table to consult? Was a preferred system provided?
if system is None:
# No preference. Use existing system
if self.system == 'NIST':
_STEPS = NIST_PREFIXES
_BASE = 1024
elif self.system == 'SI':
_STEPS = SI_PREFIXES
_BASE = 1000
# Anything else would have raised by now
else:
# Preferred system provided.
if system == NIST:
_STEPS = NIST_PREFIXES
_BASE = 1024
elif system == SI:
_STEPS = SI_PREFIXES
_BASE = 1000
else:
raise ValueError("Invalid value given for 'system' parameter."
" Must be one of NIST or SI")
# Index of the string of the best prefix in the STEPS list
_index = int(math.log(abs(_inst.bytes), _BASE))
# Recall that the log() function returns >= 0. This doesn't
# map to the STEPS list 1:1. That is to say, 0 is handled with
# special care. So if the _index is 1, we actually want item 0
# in the list.
if _index == 0:
# Already a Byte() type, so return it.
return _inst
elif _index >= len(_STEPS):
# This is a really big number. Use the biggest prefix we've got
_best_prefix = _STEPS[-1]
elif 0 < _index < len(_STEPS):
# There is an appropriate prefix unit to represent this
_best_prefix = _STEPS[_index - 1]
_conversion_method = getattr(
self,
'to_%sB' % _best_prefix)
return _conversion_method()
##################################################################
def to_Bit(self):
return Bit(self._bit_value)
def to_Byte(self):
return Byte(self._byte_value / float(NIST_STEPS['Byte']))
# Properties
Bit = property(lambda s: s.to_Bit())
Byte = property(lambda s: s.to_Byte())
##################################################################
def to_KiB(self):
return KiB(bits=self._bit_value)
def to_Kib(self):
return Kib(bits=self._bit_value)
def to_kB(self):
return kB(bits=self._bit_value)
def to_kb(self):
return kb(bits=self._bit_value)
# Properties
KiB = property(lambda s: s.to_KiB())
Kib = property(lambda s: s.to_Kib())
kB = property(lambda s: s.to_kB())
kb = property(lambda s: s.to_kb())
##################################################################
def to_MiB(self):
return MiB(bits=self._bit_value)
def to_Mib(self):
return Mib(bits=self._bit_value)
def to_MB(self):
return MB(bits=self._bit_value)
def to_Mb(self):
return Mb(bits=self._bit_value)
# Properties
MiB = property(lambda s: s.to_MiB())
Mib = property(lambda s: s.to_Mib())
MB = property(lambda s: s.to_MB())
Mb = property(lambda s: s.to_Mb())
##################################################################
def to_GiB(self):
return GiB(bits=self._bit_value)
def to_Gib(self):
return Gib(bits=self._bit_value)
def to_GB(self):
return GB(bits=self._bit_value)
def to_Gb(self):
return Gb(bits=self._bit_value)
# Properties
GiB = property(lambda s: s.to_GiB())
Gib = property(lambda s: s.to_Gib())
GB = property(lambda s: s.to_GB())
Gb = property(lambda s: s.to_Gb())
##################################################################
def to_TiB(self):
return TiB(bits=self._bit_value)
def to_Tib(self):
return Tib(bits=self._bit_value)
def to_TB(self):
return TB(bits=self._bit_value)
def to_Tb(self):
return Tb(bits=self._bit_value)
# Properties
TiB = property(lambda s: s.to_TiB())
Tib = property(lambda s: s.to_Tib())
TB = property(lambda s: s.to_TB())
Tb = property(lambda s: s.to_Tb())
##################################################################
def to_PiB(self):
return PiB(bits=self._bit_value)
def to_Pib(self):
return Pib(bits=self._bit_value)
def to_PB(self):
return PB(bits=self._bit_value)
def to_Pb(self):
return Pb(bits=self._bit_value)
# Properties
PiB = property(lambda s: s.to_PiB())
Pib = property(lambda s: s.to_Pib())
PB = property(lambda s: s.to_PB())
Pb = property(lambda s: s.to_Pb())
##################################################################
def to_EiB(self):
return EiB(bits=self._bit_value)
def to_Eib(self):
return Eib(bits=self._bit_value)
def to_EB(self):
return EB(bits=self._bit_value)
def to_Eb(self):
return Eb(bits=self._bit_value)
# Properties
EiB = property(lambda s: s.to_EiB())
Eib = property(lambda s: s.to_Eib())
EB = property(lambda s: s.to_EB())
Eb = property(lambda s: s.to_Eb())
##################################################################
# The SI units go beyond the NIST units. They also have the Zetta
# and Yotta prefixes.
def to_ZB(self):
return ZB(bits=self._bit_value)
def to_Zb(self):
return Zb(bits=self._bit_value)
# Properties
ZB = property(lambda s: s.to_ZB())
Zb = property(lambda s: s.to_Zb())
##################################################################
def to_YB(self):
return YB(bits=self._bit_value)
def to_Yb(self):
return Yb(bits=self._bit_value)
#: A new object representing this instance as a Yottabyte
YB = property(lambda s: s.to_YB())
Yb = property(lambda s: s.to_Yb())
##################################################################
# Rich comparison operations
##################################################################
def __lt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value < other
else:
return self._byte_value < other.bytes
def __le__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value <= other
else:
return self._byte_value <= other.bytes
def __eq__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value == other
else:
return self._byte_value == other.bytes
def __ne__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value != other
else:
return self._byte_value != other.bytes
def __gt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value > other
else:
return self._byte_value > other.bytes
def __ge__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value >= other
else:
return self._byte_value >= other.bytes
##################################################################
# Basic math operations
##################################################################
# Reference: http://docs.python.org/2.7/reference/datamodel.html#emulating-numeric-types
"""These methods are called to implement the binary arithmetic
operations (+, -, *, //, %, divmod(), pow(), **, <<, >>, &, ^, |). For
instance, to evaluate the expression x + y, where x is an instance of
a class that has an __add__() method, x.__add__(y) is called. The
__divmod__() method should be the equivalent to using __floordiv__()
and __mod__(); it should not be related to __truediv__() (described
below). Note that __pow__() should be defined to accept an optional
third argument if the ternary version of the built-in pow() function
is to be supported.object.__complex__(self)
"""
def __add__(self, other):
"""Supported operations with result types:
- bm + bm = bm
- bm + num = num
- num + bm = num (see radd)
"""
if isinstance(other, numbers.Number):
# bm + num
return other + self.value
else:
# bm + bm
total_bytes = self._byte_value + other.bytes
return (type(self))(bytes=total_bytes)
def __sub__(self, other):
"""Subtraction: Supported operations with result types:
- bm - bm = bm
- bm - num = num
- num - bm = num (see rsub)
"""
if isinstance(other, numbers.Number):
# bm - num
return self.value - other
else:
# bm - bm
total_bytes = self._byte_value - other.bytes
return (type(self))(bytes=total_bytes)
def __mul__(self, other):
"""Multiplication: Supported operations with result types:
- bm1 * bm2 = bm1
- bm * num = bm
- num * bm = num (see rmul)
"""
if isinstance(other, numbers.Number):
# bm * num
result = self._byte_value * other
return (type(self))(bytes=result)
else:
# bm1 * bm2
_other = other.value * other.base ** other.power
_self = self.prefix_value * self._base ** self._power
return (type(self))(bytes=_other * _self)
"""The division operator (/) is implemented by these methods. The
__truediv__() method is used when __future__.division is in effect,
otherwise __div__() is used. If only one of these two methods is
defined, the object will not support division in the alternate
context; TypeError will be raised instead."""
def __div__(self, other):
"""Division: Supported operations with result types:
- bm1 / bm2 = num
- bm / num = bm
- num / bm = num (see rdiv)
"""
if isinstance(other, numbers.Number):
# bm / num
result = self._byte_value / other
return (type(self))(bytes=result)
else:
# bm1 / bm2
return self._byte_value / float(other.bytes)
def __truediv__(self, other):
# num / bm
return self.__div__(other)
# def __floordiv__(self, other):
# return NotImplemented
# def __mod__(self, other):
# return NotImplemented
# def __divmod__(self, other):
# return NotImplemented
# def __pow__(self, other, modulo=None):
# return NotImplemented
##################################################################
"""These methods are called to implement the binary arithmetic
operations (+, -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with
reflected (swapped) operands. These functions are only called if the
left operand does not support the corresponding operation and the
operands are of different types. [2] For instance, to evaluate the
expression x - y, where y is an instance of a class that has an
__rsub__() method, y.__rsub__(x) is called if x.__sub__(y) returns
NotImplemented.
These are the add/sub/mul/div methods for syntax where a number type
is given for the LTYPE and a bitmath object is given for the
RTYPE. E.g., 3 * MiB(3), or 10 / GB(42)
"""
def __radd__(self, other):
# num + bm = num
return other + self.value
def __rsub__(self, other):
# num - bm = num
return other - self.value
def __rmul__(self, other):
# num * bm = bm
return self * other
def __rdiv__(self, other):
# num / bm = num
return other / float(self.value)
def __rtruediv__(self, other):
# num / bm = num
return other / float(self.value)
"""Called to implement the built-in functions complex(), int(),
long(), and float(). Should return a value of the appropriate type.
If one of those methods does not support the operation with the
supplied arguments, it should return NotImplemented.
For bitmath purposes, these methods return the int/long/float
equivalent of the this instances prefix Unix value. That is to say:
- int(KiB(3.336)) would return 3
- long(KiB(3.336)) would return 3L
- float(KiB(3.336)) would return 3.336
"""
def __int__(self):
"""Return this instances prefix unit as an integer"""
return int(self.prefix_value)
def __long__(self):
"""Return this instances prefix unit as a long integer"""
return long(self.prefix_value) # pragma: PY3X no cover
def __float__(self):
"""Return this instances prefix unit as a floating point number"""
return float(self.prefix_value)
##################################################################
# Bitwise operations
##################################################################
def __lshift__(self, other):
"""Left shift, ex: 100 << 2
A left shift by n bits is equivalent to multiplication by pow(2,
n). A long integer is returned if the result exceeds the range of
plain integers."""
shifted = int(self.bits) << other
return type(self)(bits=shifted)
def __rshift__(self, other):
"""Right shift, ex: 100 >> 2
A right shift by n bits is equivalent to division by pow(2, n)."""
shifted = int(self.bits) >> other
return type(self)(bits=shifted)
def __and__(self, other):
""""Bitwise and, ex: 100 & 2
bitwise and". Each bit of the output is 1 if the corresponding bit
of x AND of y is 1, otherwise it's 0."""
andd = int(self.bits) & other
return type(self)(bits=andd)
def __xor__(self, other):
"""Bitwise xor, ex: 100 ^ 2
Does a "bitwise exclusive or". Each bit of the output is the same
as the corresponding bit in x if that bit in y is 0, and it's the
complement of the bit in x if that bit in y is 1."""
xord = int(self.bits) ^ other
return type(self)(bits=xord)
def __or__(self, other):
"""Bitwise or, ex: 100 | 2
Does a "bitwise or". Each bit of the output is 0 if the corresponding
bit of x AND of y is 0, otherwise it's 1."""
ord = int(self.bits) | other
return type(self)(bits=ord)
##################################################################
def __neg__(self):
"""The negative version of this instance"""
return (type(self))(-abs(self.prefix_value))
def __pos__(self):
return (type(self))(abs(self.prefix_value))
def __abs__(self):
return (type(self))(abs(self.prefix_value))
# def __invert__(self):
# """Called to implement the unary arithmetic operations (-, +, abs()
# and ~)."""
# return NotImplemented
######################################################################
# First, the bytes...
class Byte(Bitmath):
"""Byte based types fundamentally operate on self._bit_value"""
def _setup(self):
return (2, 0, 'Byte', 'Bytes')
######################################################################
# NIST Prefixes for Byte based types
class KiB(Byte):
def _setup(self):
return (2, 10, 'KiB', 'KiBs')
Kio = KiB
class MiB(Byte):
def _setup(self):
return (2, 20, 'MiB', 'MiBs')
Mio = MiB
class GiB(Byte):
def _setup(self):
return (2, 30, 'GiB', 'GiBs')
Gio = GiB
class TiB(Byte):
def _setup(self):
return (2, 40, 'TiB', 'TiBs')
Tio = TiB
class PiB(Byte):
def _setup(self):
return (2, 50, 'PiB', 'PiBs')
Pio = PiB
class EiB(Byte):
def _setup(self):
return (2, 60, 'EiB', 'EiBs')
Eio = EiB
######################################################################
# SI Prefixes for Byte based types
class kB(Byte):
def _setup(self):
return (10, 3, 'kB', 'kBs')
ko = kB
class MB(Byte):
def _setup(self):
return (10, 6, 'MB', 'MBs')
Mo = MB
class GB(Byte):
def _setup(self):
return (10, 9, 'GB', 'GBs')
Go = GB
class TB(Byte):
def _setup(self):
return (10, 12, 'TB', 'TBs')
To = TB
class PB(Byte):
def _setup(self):
return (10, 15, 'PB', 'PBs')
Po = PB
class EB(Byte):
def _setup(self):
return (10, 18, 'EB', 'EBs')
Eo = EB
class ZB(Byte):
def _setup(self):
return (10, 21, 'ZB', 'ZBs')
Zo = ZB
class YB(Byte):
def _setup(self):
return (10, 24, 'YB', 'YBs')
Yo = YB
######################################################################
# And now the bit types
class Bit(Bitmath):
"""Bit based types fundamentally operate on self._bit_value"""
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._bit_value)
def _setup(self):
return (2, 0, 'Bit', 'Bits')
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type"""
self._bit_value = value * self._unit_value
self._byte_value = self._bit_value / 8.0
######################################################################
# NIST Prefixes for Bit based types
class Kib(Bit):
def _setup(self):
return (2, 10, 'Kib', 'Kibs')
class Mib(Bit):
def _setup(self):
return (2, 20, 'Mib', 'Mibs')
class Gib(Bit):
def _setup(self):
return (2, 30, 'Gib', 'Gibs')
class Tib(Bit):
def _setup(self):
return (2, 40, 'Tib', 'Tibs')
class Pib(Bit):
def _setup(self):
return (2, 50, 'Pib', 'Pibs')
class Eib(Bit):
def _setup(self):
return (2, 60, 'Eib', 'Eibs')
######################################################################
# SI Prefixes for Bit based types
class kb(Bit):
def _setup(self):
return (10, 3, 'kb', 'kbs')
class Mb(Bit):
def _setup(self):
return (10, 6, 'Mb', 'Mbs')
class Gb(Bit):
def _setup(self):
return (10, 9, 'Gb', 'Gbs')
class Tb(Bit):
def _setup(self):
return (10, 12, 'Tb', 'Tbs')
class Pb(Bit):
def _setup(self):
return (10, 15, 'Pb', 'Pbs')
class Eb(Bit):
def _setup(self):
return (10, 18, 'Eb', 'Ebs')
class Zb(Bit):
def _setup(self):
return (10, 21, 'Zb', 'Zbs')
class Yb(Bit):
def _setup(self):
return (10, 24, 'Yb', 'Ybs')
######################################################################
# Utility functions
def best_prefix(bytes, system=NIST):
"""Return a bitmath instance representing the best human-readable
representation of the number of bytes given by ``bytes``. In addition
to a numeric type, the ``bytes`` parameter may also be a bitmath type.
Optionally select a preferred unit system by specifying the ``system``
keyword. Choices for ``system`` are ``bitmath.NIST`` (default) and
``bitmath.SI``.
Basically a shortcut for:
>>> import bitmath
>>> b = bitmath.Byte(12345)
>>> best = b.best_prefix()
Or:
>>> import bitmath
>>> best = (bitmath.KiB(12345) * 4201).best_prefix()
"""
if isinstance(bytes, Bitmath):
value = bytes.bytes
else:
value = bytes
return Byte(value).best_prefix(system=system)
def query_device_capacity(device_fd):
"""Create bitmath instances of the capacity of a system block device
Make one or more ioctl request to query the capacity of a block
device. Perform any processing required to compute the final capacity
value. Return the device capacity in bytes as a :class:`bitmath.Byte`
instance.
Thanks to the following resources for help figuring this out Linux/Mac
ioctl's for querying block device sizes:
* http://stackoverflow.com/a/12925285/263969
* http://stackoverflow.com/a/9764508/263969
:param file device_fd: A ``file`` object of the device to query the
capacity of (as in ``get_device_capacity(open("/dev/sda"))``).
:return: a bitmath :class:`bitmath.Byte` instance equivalent to the
capacity of the target device in bytes.
"""
if os_name() != 'posix':
raise NotImplementedError("'bitmath.query_device_capacity' is not supported on this platform: %s" % os_name())
s = os.stat(device_fd.name).st_mode
if not stat.S_ISBLK(s):
raise ValueError("The file descriptor provided is not of a device type")
# The keys of the ``ioctl_map`` dictionary correlate to possible
# values from the ``platform.system`` function.
ioctl_map = {
# ioctls for the "Linux" platform
"Linux": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("BLKGETSIZE64", "L", 0x80081272)
# Per <linux/fs.h>, the BLKGETSIZE64 request returns a
# 'u64' sized value. This is an unsigned 64 bit
# integer C type. This means to correctly "buffer" the
# result we need 64 bits, or 8 bytes, of memory.
#
# The struct module documentation include a reference
# chart relating formatting characters to native C
# Types. In this case, using the "native size", the
# table tells us:
#
# * Character 'L' - Unsigned Long C Type (u64) - Loads into a Python int type
#
# Confirm this character is right by running (on Linux):
#
# >>> import struct
# >>> print 8 == struct.calcsize('L')
#
# The result should be true as long as your kernel
# headers define BLKGETSIZE64 as a u64 type (please
# file a bug report at
# https://github.com/tbielawa/bitmath/issues/new if
# this does *not* work for you)
],
# func is how the final result is decided. Because the
# Linux BLKGETSIZE64 call returns the block device
# capacity in bytes as an integer value, no extra
# calculations are required. Simply return the value of
# BLKGETSIZE64.
"func": lambda x: x["BLKGETSIZE64"]
},
# ioctls for the "Darwin" (Mac OS X) platform
"Darwin": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("DKIOCGETBLOCKCOUNT", "L", 0x40086419),
# Per <sys/disk.h>: get media's block count - uint64_t
#
# As in the BLKGETSIZE64 example, an unsigned 64 bit
# integer will use the 'L' formatting character
("DKIOCGETBLOCKSIZE", "I", 0x40046418)
# Per <sys/disk.h>: get media's block size - uint32_t
#
# This request returns an unsigned 32 bit integer, or
# in other words: just a normal integer (or 'int' c
# type). That should require 4 bytes of space for
# buffering. According to the struct modules
# 'Formatting Characters' chart:
#
# * Character 'I' - Unsigned Int C Type (uint32_t) - Loads into a Python int type
],
# OS X doesn't have a direct equivalent to the Linux
# BLKGETSIZE64 request. Instead, we must request how many
# blocks (or "sectors") are on the disk, and the size (in
# bytes) of each block. Finally, multiply the two together
# to obtain capacity:
#
# n Block * y Byte
# capacity (bytes) = -------
# 1 Block
"func": lambda x: x["DKIOCGETBLOCKCOUNT"] * x["DKIOCGETBLOCKSIZE"]
# This expression simply accepts a dictionary ``x`` as a
# parameter, and then returns the result of multiplying
# the two named dictionary items together. In this case,
# that means multiplying ``DKIOCGETBLOCKCOUNT``, the total
# number of blocks, by ``DKIOCGETBLOCKSIZE``, the size of
# each block in bytes.
}
}
platform_params = ioctl_map[platform.system()]
results = {}
for req_name, fmt, request_code in platform_params['request_params']:
# Read the systems native size (in bytes) of this format type.
buffer_size = struct.calcsize(fmt)
# Construct a buffer to store the ioctl result in
buffer = ' ' * buffer_size
# This code has been ran on only a few test systems. If it's
# appropriate, maybe in the future we'll add try/except
# conditions for some possible errors. Really only for cases
# where it would add value to override the default exception
# message string.
buffer = fcntl.ioctl(device_fd.fileno(), request_code, buffer)
# Unpack the raw result from the ioctl call into a familiar
# python data type according to the ``fmt`` rules.
result = struct.unpack(fmt, buffer)[0]
# Add the new result to our collection
results[req_name] = result
return Byte(platform_params['func'](results))
def getsize(path, bestprefix=True, system=NIST):
"""Return a bitmath instance in the best human-readable representation
of the file size at `path`. Optionally, provide a preferred unit
system by setting `system` to either `bitmath.NIST` (default) or
`bitmath.SI`.
Optionally, set ``bestprefix`` to ``False`` to get ``bitmath.Byte``
instances back.
"""
_path = os.path.realpath(path)
size_bytes = os.path.getsize(_path)
if bestprefix:
return Byte(size_bytes).best_prefix(system=system)
else:
return Byte(size_bytes)
def listdir(search_base, followlinks=False, filter='*',
relpath=False, bestprefix=False, system=NIST):
"""This is a generator which recurses the directory tree
`search_base`, yielding 2-tuples of:
* The absolute/relative path to a discovered file
* A bitmath instance representing the "apparent size" of the file.
- `search_base` - The directory to begin walking down.
- `followlinks` - Whether or not to follow symbolic links to directories
- `filter` - A glob (see :py:mod:`fnmatch`) to filter results with
(default: ``*``, everything)
- `relpath` - ``True`` to return the relative path from `pwd` or
``False`` (default) to return the fully qualified path
- ``bestprefix`` - set to ``False`` to get ``bitmath.Byte``
instances back instead.
- `system` - Provide a preferred unit system by setting `system`
to either ``bitmath.NIST`` (default) or ``bitmath.SI``.
.. note:: This function does NOT return tuples for directory entities.
.. note:: Symlinks to **files** are followed automatically
"""
for root, dirs, files in os.walk(search_base, followlinks=followlinks):
for name in fnmatch.filter(files, filter):
_path = os.path.join(root, name)
if relpath:
# RELATIVE path
_return_path = os.path.relpath(_path, '.')
else:
# REAL path
_return_path = os.path.realpath(_path)
if followlinks:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
else:
if os.path.isdir(_path) or os.path.islink(_path):
pass
else:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
def parse_string(s):
"""Parse a string with units and try to make a bitmath object out of
it.
String inputs may include whitespace characters between the value and
the unit.
"""
# Strings only please
if not isinstance(s, (str, unicode)):
raise ValueError("parse_string only accepts string inputs but a %s was given" %
type(s))
# get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError:
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# split the string into the value and the unit
val, unit = s[:index], s[index:]
# see if the unit exists as a type in our namespace
if unit == "b":
unit_class = Bit
elif unit == "B":
unit_class = Byte
else:
if not (hasattr(sys.modules[__name__], unit) and isinstance(getattr(sys.modules[__name__], unit), type)):
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
unit_class = globals()[unit]
try:
val = float(val)
except ValueError:
raise
try:
return unit_class(val)
except: # pragma: no cover
raise ValueError("Can't parse string %s into a bitmath object" % s)
def parse_string_unsafe(s, system=SI):
"""Attempt to parse a string with ambiguous units and try to make a
bitmath object out of it.
This may produce inaccurate results if parsing shell output. For
example `ls` may say a 2730 Byte file is '2.7K'. 2730 Bytes == 2.73 kB
~= 2.666 KiB. See the documentation for all of the important details.
Note the following caveats:
* All inputs are assumed to be byte-based (as opposed to bit based)
* Numerical inputs (those without any units) are assumed to be a
number of bytes
* Inputs with single letter units (k, M, G, etc) are assumed to be SI
units (base-10). Set the `system` parameter to `bitmath.NIST` to
change this behavior.
* Inputs with an `i` character following the leading letter (Ki, Mi,
Gi) are assumed to be NIST units (base 2)
* Capitalization does not matter
"""
if not isinstance(s, (str, unicode)) and \
not isinstance(s, numbers.Number):
raise ValueError("parse_string_unsafe only accepts string/number inputs but a %s was given" %
type(s))
######################################################################
# Is the input simple to parse? Just a number, or a number
# masquerading as a string perhaps?
# Test case: raw number input (easy!)
if isinstance(s, numbers.Number):
# It's just a number. Assume bytes
return Byte(s)
# Test case: a number pretending to be a string
if isinstance(s, (str, unicode)):
try:
# Can we turn it directly into a number?
return Byte(float(s))
except ValueError:
# Nope, this is not a plain number
pass
######################################################################
# At this point:
# - the input is also not just a number wrapped in a string
# - nor is is just a plain number type
#
# We need to do some more digging around now to figure out exactly
# what we were given and possibly normalize the input into a
# format we can recognize.
# First we'll separate the number and the unit.
#
# Get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError: # pragma: no cover
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# Split the string into the value and the unit
val, unit = s[:index], s[index:]
# Don't trust anything. We'll make sure the correct 'b' is in place.
unit = unit.rstrip('Bb')
unit += 'B'
# At this point we can expect `unit` to be either:
#
# - 2 Characters (for SI, ex: kB or GB)
# - 3 Caracters (so NIST, ex: KiB, or GiB)
#
# A unit with any other number of chars is not a valid unit
# SI
if len(unit) == 2:
# Has NIST parsing been requested?
if system == NIST:
# NIST units requested. Ensure the unit begins with a
# capital letter and is followed by an 'i' character.
unit = capitalize_first(unit)
# Insert an 'i' char after the first letter
_unit = list(unit)
_unit.insert(1, 'i')
# Collapse the list back into a 3 letter string
unit = ''.join(_unit)
unit_class = globals()[unit]
else:
# Default parsing (SI format)
#
# Edge-case checking: SI 'thousand' is a lower-case K
if unit.startswith('K'):
unit = unit.replace('K', 'k')
elif not unit.startswith('k'):
# Otherwise, ensure the first char is capitalized
unit = capitalize_first(unit)
# This is an SI-type unit
if unit[0] in SI_PREFIXES:
unit_class = globals()[unit]
# NIST
elif len(unit) == 3:
unit = capitalize_first(unit)
# This is a NIST-type unit
if unit[:2] in NIST_PREFIXES:
unit_class = globals()[unit]
else:
# This is not a unit we recognize
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
try:
unit_class
except UnboundLocalError:
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
return unit_class(float(val))
######################################################################
# Contxt Managers
@contextlib.contextmanager
def cli_script_main(cli_args):
"""
A command line interface to basic bitmath operations.
"""
choices = ALL_UNIT_TYPES
parser = argparse.ArgumentParser(
description='Converts from one type of size to another.')
parser.add_argument('--from-stdin', default=False, action='store_true',
help='Reads number from stdin rather than the cli')
parser.add_argument(
'-f', '--from', choices=choices, nargs=1,
type=str, dest='fromunit', default=['Byte'],
help='Input type you are converting from. Defaultes to Byte.')
parser.add_argument(
'-t', '--to', choices=choices, required=False, nargs=1, type=str,
help=('Input type you are converting to. '
'Attempts to detect best result if omitted.'), dest='tounit')
parser.add_argument(
'size', nargs='*', type=float,
help='The number to convert.')
args = parser.parse_args(cli_args)
# Not sure how to cover this with tests, or if the functionality
# will remain in this form long enough for it to make writing a
# test worth the effort.
if args.from_stdin: # pragma: no cover
args.size = [float(sys.stdin.readline()[:-1])]
results = []
for size in args.size:
instance = getattr(__import__(
'bitmath', fromlist=['True']), args.fromunit[0])(size)
# If we have a unit provided then use it
if args.tounit:
result = getattr(instance, args.tounit[0])
# Otherwise use the best_prefix call
else:
result = instance.best_prefix()
results.append(result)
return results
def cli_script(): # pragma: no cover
# Wrapper around cli_script_main so we can unittest the command
# line functionality
for result in cli_script_main(sys.argv[1:]):
print(result)
if __name__ == '__main__':
cli_script()
|
tbielawa/bitmath
|
bitmath/__init__.py
|
cli_script_main
|
python
|
def cli_script_main(cli_args):
choices = ALL_UNIT_TYPES
parser = argparse.ArgumentParser(
description='Converts from one type of size to another.')
parser.add_argument('--from-stdin', default=False, action='store_true',
help='Reads number from stdin rather than the cli')
parser.add_argument(
'-f', '--from', choices=choices, nargs=1,
type=str, dest='fromunit', default=['Byte'],
help='Input type you are converting from. Defaultes to Byte.')
parser.add_argument(
'-t', '--to', choices=choices, required=False, nargs=1, type=str,
help=('Input type you are converting to. '
'Attempts to detect best result if omitted.'), dest='tounit')
parser.add_argument(
'size', nargs='*', type=float,
help='The number to convert.')
args = parser.parse_args(cli_args)
# Not sure how to cover this with tests, or if the functionality
# will remain in this form long enough for it to make writing a
# test worth the effort.
if args.from_stdin: # pragma: no cover
args.size = [float(sys.stdin.readline()[:-1])]
results = []
for size in args.size:
instance = getattr(__import__(
'bitmath', fromlist=['True']), args.fromunit[0])(size)
# If we have a unit provided then use it
if args.tounit:
result = getattr(instance, args.tounit[0])
# Otherwise use the best_prefix call
else:
result = instance.best_prefix()
results.append(result)
return results
|
A command line interface to basic bitmath operations.
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L1598-L1643
| null |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright © 2014-2016 Tim Bielawa <timbielawa@gmail.com>
# See GitHub Contributors Graph for more information
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sub-license, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable=bad-continuation,missing-docstring,invalid-name,line-too-long
"""Reference material:
The bitmath homepage is located at:
* http://bitmath.readthedocs.io/en/latest/
Prefixes for binary multiples:
http://physics.nist.gov/cuu/Units/binary.html
decimal and binary prefixes:
man 7 units (from the Linux Documentation Project 'man-pages' package)
BEFORE YOU GET HASTY WITH EXCLUDING CODE FROM COVERAGE: If you
absolutely need to skip code coverage because of a strange Python 2.x
vs 3.x thing, use the fancy environment substitution stuff from the
.coverage RC file. In review:
* If you *NEED* to skip a statement because of Python 2.x issues add the following::
# pragma: PY2X no cover
* If you *NEED* to skip a statement because of Python 3.x issues add the following::
# pragma: PY3X no cover
In this configuration, statements which are skipped in 2.x are still
covered in 3.x, and the reverse holds true for tests skipped in 3.x.
"""
from __future__ import print_function
import argparse
import contextlib
import fnmatch
import math
import numbers
import os
import os.path
import platform
import sys
# For device capacity reading in query_device_capacity(). Only supported
# on posix systems for now. Will be addressed in issue #52 on GitHub.
if os.name == 'posix':
import stat
import fcntl
import struct
__all__ = ['Bit', 'Byte', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB',
'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'Kib',
'Mib', 'Gib', 'Tib', 'Pib', 'Eib', 'kb', 'Mb', 'Gb', 'Tb',
'Pb', 'Eb', 'Zb', 'Yb', 'getsize', 'listdir', 'format',
'format_string', 'format_plural', 'parse_string', 'parse_string_unsafe',
'ALL_UNIT_TYPES', 'NIST', 'NIST_PREFIXES', 'NIST_STEPS',
'SI', 'SI_PREFIXES', 'SI_STEPS']
# Python 3.x compat
if sys.version > '3':
long = int # pragma: PY2X no cover
unicode = str # pragma: PY2X no cover
#: A list of all the valid prefix unit types. Mostly for reference,
#: also used by the CLI tool as valid types
ALL_UNIT_TYPES = ['Bit', 'Byte', 'kb', 'kB', 'Mb', 'MB', 'Gb', 'GB', 'Tb',
'TB', 'Pb', 'PB', 'Eb', 'EB', 'Zb', 'ZB', 'Yb',
'YB', 'Kib', 'KiB', 'Mib', 'MiB', 'Gib', 'GiB',
'Tib', 'TiB', 'Pib', 'PiB', 'Eib', 'EiB']
# #####################################################################
# Set up our module variables/constants
###################################
# Internal:
# Console repr(), ex: MiB(13.37), or kB(42.0)
_FORMAT_REPR = '{unit_singular}({value})'
# ##################################
# Exposed:
#: Constants for referring to NIST prefix system
NIST = int(2)
#: Constants for referring to SI prefix system
SI = int(10)
# ##################################
#: All of the SI prefixes
SI_PREFIXES = ['k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
#: Byte values represented by each SI prefix unit
SI_STEPS = {
'Bit': 1 / 8.0,
'Byte': 1,
'k': 1000,
'M': 1000000,
'G': 1000000000,
'T': 1000000000000,
'P': 1000000000000000,
'E': 1000000000000000000,
'Z': 1000000000000000000000,
'Y': 1000000000000000000000000
}
#: All of the NIST prefixes
NIST_PREFIXES = ['Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei']
#: Byte values represented by each NIST prefix unit
NIST_STEPS = {
'Bit': 1 / 8.0,
'Byte': 1,
'Ki': 1024,
'Mi': 1048576,
'Gi': 1073741824,
'Ti': 1099511627776,
'Pi': 1125899906842624,
'Ei': 1152921504606846976
}
#: String representation, ex: ``13.37 MiB``, or ``42.0 kB``
format_string = "{value} {unit}"
#: Pluralization behavior
format_plural = False
def os_name():
# makes unittesting platform specific code easier
return os.name
def capitalize_first(s):
"""Capitalize ONLY the first letter of the input `s`
* returns a copy of input `s` with the first letter capitalized
"""
pfx = s[0].upper()
_s = s[1:]
return pfx + _s
######################################################################
# Base class for everything else
class Bitmath(object):
"""The base class for all the other prefix classes"""
# All the allowed input types
valid_types = (int, float, long)
def __init__(self, value=0, bytes=None, bits=None):
"""Instantiate with `value` by the unit, in plain bytes, or
bits. Don't supply more than one keyword.
default behavior: initialize with value of 0
only setting value: assert bytes is None and bits is None
only setting bytes: assert value == 0 and bits is None
only setting bits: assert value == 0 and bytes is None
"""
_raise = False
if (value == 0) and (bytes is None) and (bits is None):
pass
# Setting by bytes
elif bytes is not None:
if (value == 0) and (bits is None):
pass
else:
_raise = True
# setting by bits
elif bits is not None:
if (value == 0) and (bytes is None):
pass
else:
_raise = True
if _raise:
raise ValueError("Only one parameter of: value, bytes, or bits is allowed")
self._do_setup()
if bytes:
# We were provided with the fundamental base unit, no need
# to normalize
self._byte_value = bytes
self._bit_value = bytes * 8.0
elif bits:
# We were *ALMOST* given the fundamental base
# unit. Translate it into the fundamental unit then
# normalize.
self._byte_value = bits / 8.0
self._bit_value = bits
else:
# We were given a value representative of this *prefix
# unit*. We need to normalize it into the number of bytes
# it represents.
self._norm(value)
# We have the fundamental unit figured out. Set the 'pretty' unit
self._set_prefix_value()
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._byte_value)
def _to_prefix_value(self, value):
"""Return the number of bits/bytes as they would look like if we
converted *to* this unit"""
return value / float(self._unit_value)
def _setup(self):
raise NotImplementedError("The base 'bitmath.Bitmath' class can not be used directly")
def _do_setup(self):
"""Setup basic parameters for this class.
`base` is the numeric base which when raised to `power` is equivalent
to 1 unit of the corresponding prefix. I.e., base=2, power=10
represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte.
Likewise, for the SI prefix classes `base` will be 10, and the `power`
for the Kilobyte is 3.
"""
(self._base, self._power, self._name_singular, self._name_plural) = self._setup()
self._unit_value = self._base ** self._power
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type.
:param number value: The input value to be normalized
:raises ValueError: if the input value is not a type of real number
"""
if isinstance(value, self.valid_types):
self._byte_value = value * self._unit_value
self._bit_value = self._byte_value * 8.0
else:
raise ValueError("Initialization value '%s' is of an invalid type: %s. "
"Must be one of %s" % (
value,
type(value),
", ".join(str(x) for x in self.valid_types)))
##################################################################
# Properties
#: The mathematical base of an instance
base = property(lambda s: s._base)
binary = property(lambda s: bin(int(s.bits)))
"""The binary representation of an instance in binary 1s and 0s. Note
that for very large numbers this will mean a lot of 1s and 0s. For
example, GiB(100) would be represented as::
0b1100100000000000000000000000000000000000
That leading ``0b`` is normal. That's how Python represents binary.
"""
#: Alias for :attr:`binary`
bin = property(lambda s: s.binary)
#: The number of bits in an instance
bits = property(lambda s: s._bit_value)
#: The number of bytes in an instance
bytes = property(lambda s: s._byte_value)
#: The mathematical power of an instance
power = property(lambda s: s._power)
@property
def system(self):
"""The system of units used to measure an instance"""
if self._base == 2:
return "NIST"
elif self._base == 10:
return "SI"
else:
# I don't expect to ever encounter this logic branch, but
# hey, it's better to have extra test coverage than
# insufficient test coverage.
raise ValueError("Instances mathematical base is an unsupported value: %s" % (
str(self._base)))
@property
def unit(self):
"""The string that is this instances prefix unit name in agreement
with this instance value (singular or plural). Following the
convention that only 1 is singular. This will always be the singular
form when :attr:`bitmath.format_plural` is ``False`` (default value).
For example:
>>> KiB(1).unit == 'KiB'
>>> Byte(0).unit == 'Bytes'
>>> Byte(1).unit == 'Byte'
>>> Byte(1.1).unit == 'Bytes'
>>> Gb(2).unit == 'Gbs'
"""
global format_plural
if self.prefix_value == 1:
# If it's a '1', return it singular, no matter what
return self._name_singular
elif format_plural:
# Pluralization requested
return self._name_plural
else:
# Pluralization NOT requested, and the value is not 1
return self._name_singular
@property
def unit_plural(self):
"""The string that is an instances prefix unit name in the plural
form.
For example:
>>> KiB(1).unit_plural == 'KiB'
>>> Byte(1024).unit_plural == 'Bytes'
>>> Gb(1).unit_plural == 'Gb'
"""
return self._name_plural
@property
def unit_singular(self):
"""The string that is an instances prefix unit name in the singular
form.
For example:
>>> KiB(1).unit_singular == 'KiB'
>>> Byte(1024).unit == 'B'
>>> Gb(1).unit_singular == 'Gb'
"""
return self._name_singular
#: The "prefix" value of an instance
value = property(lambda s: s.prefix_value)
@classmethod
def from_other(cls, item):
"""Factory function to return instances of `item` converted into a new
instance of ``cls``. Because this is a class method, it may be called
from any bitmath class object without the need to explicitly
instantiate the class ahead of time.
*Implicit Parameter:*
* ``cls`` A bitmath class, implicitly set to the class of the
instance object it is called on
*User Supplied Parameter:*
* ``item`` A :class:`bitmath.Bitmath` subclass instance
*Example:*
>>> import bitmath
>>> kib = bitmath.KiB.from_other(bitmath.MiB(1))
>>> print kib
KiB(1024.0)
"""
if isinstance(item, Bitmath):
return cls(bits=item.bits)
else:
raise ValueError("The provided items must be a valid bitmath class: %s" %
str(item.__class__))
######################################################################
# The following implement the Python datamodel customization methods
#
# Reference: http://docs.python.org/2.7/reference/datamodel.html#basic-customization
def __repr__(self):
"""Representation of this object as you would expect to see in an
interpreter"""
global _FORMAT_REPR
return self.format(_FORMAT_REPR)
def __str__(self):
"""String representation of this object"""
global format_string
return self.format(format_string)
def format(self, fmt):
"""Return a representation of this instance formatted with user
supplied syntax"""
_fmt_params = {
'base': self.base,
'bin': self.bin,
'binary': self.binary,
'bits': self.bits,
'bytes': self.bytes,
'power': self.power,
'system': self.system,
'unit': self.unit,
'unit_plural': self.unit_plural,
'unit_singular': self.unit_singular,
'value': self.value
}
return fmt.format(**_fmt_params)
##################################################################
# Guess the best human-readable prefix unit for representation
##################################################################
def best_prefix(self, system=None):
"""Optional parameter, `system`, allows you to prefer NIST or SI in
the results. By default, the current system is used (Bit/Byte default
to NIST).
Logic discussion/notes:
Base-case, does it need converting?
If the instance is less than one Byte, return the instance as a Bit
instance.
Else, begin by recording the unit system the instance is defined
by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.
If the instance is not already a ``Byte`` instance, convert it to one.
NIST units step up by powers of 1024, SI units step up by powers of
1000.
Take integer value of the log(base=STEP_POWER) of the instance's byte
value. E.g.:
>>> int(math.log(Gb(100).bytes, 1000))
3
This will return a value >= 0. The following determines the 'best
prefix unit' for representation:
* result == 0, best represented as a Byte
* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte
* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
"""
# Use absolute value so we don't return Bit's for *everything*
# less than Byte(1). From github issue #55
if abs(self) < Byte(1):
return Bit.from_other(self)
else:
if type(self) is Byte: # pylint: disable=unidiomatic-typecheck
_inst = self
else:
_inst = Byte.from_other(self)
# Which table to consult? Was a preferred system provided?
if system is None:
# No preference. Use existing system
if self.system == 'NIST':
_STEPS = NIST_PREFIXES
_BASE = 1024
elif self.system == 'SI':
_STEPS = SI_PREFIXES
_BASE = 1000
# Anything else would have raised by now
else:
# Preferred system provided.
if system == NIST:
_STEPS = NIST_PREFIXES
_BASE = 1024
elif system == SI:
_STEPS = SI_PREFIXES
_BASE = 1000
else:
raise ValueError("Invalid value given for 'system' parameter."
" Must be one of NIST or SI")
# Index of the string of the best prefix in the STEPS list
_index = int(math.log(abs(_inst.bytes), _BASE))
# Recall that the log() function returns >= 0. This doesn't
# map to the STEPS list 1:1. That is to say, 0 is handled with
# special care. So if the _index is 1, we actually want item 0
# in the list.
if _index == 0:
# Already a Byte() type, so return it.
return _inst
elif _index >= len(_STEPS):
# This is a really big number. Use the biggest prefix we've got
_best_prefix = _STEPS[-1]
elif 0 < _index < len(_STEPS):
# There is an appropriate prefix unit to represent this
_best_prefix = _STEPS[_index - 1]
_conversion_method = getattr(
self,
'to_%sB' % _best_prefix)
return _conversion_method()
##################################################################
def to_Bit(self):
return Bit(self._bit_value)
def to_Byte(self):
return Byte(self._byte_value / float(NIST_STEPS['Byte']))
# Properties
Bit = property(lambda s: s.to_Bit())
Byte = property(lambda s: s.to_Byte())
##################################################################
def to_KiB(self):
return KiB(bits=self._bit_value)
def to_Kib(self):
return Kib(bits=self._bit_value)
def to_kB(self):
return kB(bits=self._bit_value)
def to_kb(self):
return kb(bits=self._bit_value)
# Properties
KiB = property(lambda s: s.to_KiB())
Kib = property(lambda s: s.to_Kib())
kB = property(lambda s: s.to_kB())
kb = property(lambda s: s.to_kb())
##################################################################
def to_MiB(self):
return MiB(bits=self._bit_value)
def to_Mib(self):
return Mib(bits=self._bit_value)
def to_MB(self):
return MB(bits=self._bit_value)
def to_Mb(self):
return Mb(bits=self._bit_value)
# Properties
MiB = property(lambda s: s.to_MiB())
Mib = property(lambda s: s.to_Mib())
MB = property(lambda s: s.to_MB())
Mb = property(lambda s: s.to_Mb())
##################################################################
def to_GiB(self):
return GiB(bits=self._bit_value)
def to_Gib(self):
return Gib(bits=self._bit_value)
def to_GB(self):
return GB(bits=self._bit_value)
def to_Gb(self):
return Gb(bits=self._bit_value)
# Properties
GiB = property(lambda s: s.to_GiB())
Gib = property(lambda s: s.to_Gib())
GB = property(lambda s: s.to_GB())
Gb = property(lambda s: s.to_Gb())
##################################################################
def to_TiB(self):
return TiB(bits=self._bit_value)
def to_Tib(self):
return Tib(bits=self._bit_value)
def to_TB(self):
return TB(bits=self._bit_value)
def to_Tb(self):
return Tb(bits=self._bit_value)
# Properties
TiB = property(lambda s: s.to_TiB())
Tib = property(lambda s: s.to_Tib())
TB = property(lambda s: s.to_TB())
Tb = property(lambda s: s.to_Tb())
##################################################################
def to_PiB(self):
return PiB(bits=self._bit_value)
def to_Pib(self):
return Pib(bits=self._bit_value)
def to_PB(self):
return PB(bits=self._bit_value)
def to_Pb(self):
return Pb(bits=self._bit_value)
# Properties
PiB = property(lambda s: s.to_PiB())
Pib = property(lambda s: s.to_Pib())
PB = property(lambda s: s.to_PB())
Pb = property(lambda s: s.to_Pb())
##################################################################
def to_EiB(self):
return EiB(bits=self._bit_value)
def to_Eib(self):
return Eib(bits=self._bit_value)
def to_EB(self):
return EB(bits=self._bit_value)
def to_Eb(self):
return Eb(bits=self._bit_value)
# Properties
EiB = property(lambda s: s.to_EiB())
Eib = property(lambda s: s.to_Eib())
EB = property(lambda s: s.to_EB())
Eb = property(lambda s: s.to_Eb())
##################################################################
# The SI units go beyond the NIST units. They also have the Zetta
# and Yotta prefixes.
def to_ZB(self):
return ZB(bits=self._bit_value)
def to_Zb(self):
return Zb(bits=self._bit_value)
# Properties
ZB = property(lambda s: s.to_ZB())
Zb = property(lambda s: s.to_Zb())
##################################################################
def to_YB(self):
return YB(bits=self._bit_value)
def to_Yb(self):
return Yb(bits=self._bit_value)
#: A new object representing this instance as a Yottabyte
YB = property(lambda s: s.to_YB())
Yb = property(lambda s: s.to_Yb())
##################################################################
# Rich comparison operations
##################################################################
def __lt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value < other
else:
return self._byte_value < other.bytes
def __le__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value <= other
else:
return self._byte_value <= other.bytes
def __eq__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value == other
else:
return self._byte_value == other.bytes
def __ne__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value != other
else:
return self._byte_value != other.bytes
def __gt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value > other
else:
return self._byte_value > other.bytes
def __ge__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value >= other
else:
return self._byte_value >= other.bytes
##################################################################
# Basic math operations
##################################################################
# Reference: http://docs.python.org/2.7/reference/datamodel.html#emulating-numeric-types
"""These methods are called to implement the binary arithmetic
operations (+, -, *, //, %, divmod(), pow(), **, <<, >>, &, ^, |). For
instance, to evaluate the expression x + y, where x is an instance of
a class that has an __add__() method, x.__add__(y) is called. The
__divmod__() method should be the equivalent to using __floordiv__()
and __mod__(); it should not be related to __truediv__() (described
below). Note that __pow__() should be defined to accept an optional
third argument if the ternary version of the built-in pow() function
is to be supported.object.__complex__(self)
"""
def __add__(self, other):
"""Supported operations with result types:
- bm + bm = bm
- bm + num = num
- num + bm = num (see radd)
"""
if isinstance(other, numbers.Number):
# bm + num
return other + self.value
else:
# bm + bm
total_bytes = self._byte_value + other.bytes
return (type(self))(bytes=total_bytes)
def __sub__(self, other):
"""Subtraction: Supported operations with result types:
- bm - bm = bm
- bm - num = num
- num - bm = num (see rsub)
"""
if isinstance(other, numbers.Number):
# bm - num
return self.value - other
else:
# bm - bm
total_bytes = self._byte_value - other.bytes
return (type(self))(bytes=total_bytes)
def __mul__(self, other):
"""Multiplication: Supported operations with result types:
- bm1 * bm2 = bm1
- bm * num = bm
- num * bm = num (see rmul)
"""
if isinstance(other, numbers.Number):
# bm * num
result = self._byte_value * other
return (type(self))(bytes=result)
else:
# bm1 * bm2
_other = other.value * other.base ** other.power
_self = self.prefix_value * self._base ** self._power
return (type(self))(bytes=_other * _self)
"""The division operator (/) is implemented by these methods. The
__truediv__() method is used when __future__.division is in effect,
otherwise __div__() is used. If only one of these two methods is
defined, the object will not support division in the alternate
context; TypeError will be raised instead."""
def __div__(self, other):
"""Division: Supported operations with result types:
- bm1 / bm2 = num
- bm / num = bm
- num / bm = num (see rdiv)
"""
if isinstance(other, numbers.Number):
# bm / num
result = self._byte_value / other
return (type(self))(bytes=result)
else:
# bm1 / bm2
return self._byte_value / float(other.bytes)
def __truediv__(self, other):
# num / bm
return self.__div__(other)
# def __floordiv__(self, other):
# return NotImplemented
# def __mod__(self, other):
# return NotImplemented
# def __divmod__(self, other):
# return NotImplemented
# def __pow__(self, other, modulo=None):
# return NotImplemented
##################################################################
"""These methods are called to implement the binary arithmetic
operations (+, -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with
reflected (swapped) operands. These functions are only called if the
left operand does not support the corresponding operation and the
operands are of different types. [2] For instance, to evaluate the
expression x - y, where y is an instance of a class that has an
__rsub__() method, y.__rsub__(x) is called if x.__sub__(y) returns
NotImplemented.
These are the add/sub/mul/div methods for syntax where a number type
is given for the LTYPE and a bitmath object is given for the
RTYPE. E.g., 3 * MiB(3), or 10 / GB(42)
"""
def __radd__(self, other):
# num + bm = num
return other + self.value
def __rsub__(self, other):
# num - bm = num
return other - self.value
def __rmul__(self, other):
# num * bm = bm
return self * other
def __rdiv__(self, other):
# num / bm = num
return other / float(self.value)
def __rtruediv__(self, other):
# num / bm = num
return other / float(self.value)
"""Called to implement the built-in functions complex(), int(),
long(), and float(). Should return a value of the appropriate type.
If one of those methods does not support the operation with the
supplied arguments, it should return NotImplemented.
For bitmath purposes, these methods return the int/long/float
equivalent of the this instances prefix Unix value. That is to say:
- int(KiB(3.336)) would return 3
- long(KiB(3.336)) would return 3L
- float(KiB(3.336)) would return 3.336
"""
def __int__(self):
"""Return this instances prefix unit as an integer"""
return int(self.prefix_value)
def __long__(self):
"""Return this instances prefix unit as a long integer"""
return long(self.prefix_value) # pragma: PY3X no cover
def __float__(self):
"""Return this instances prefix unit as a floating point number"""
return float(self.prefix_value)
##################################################################
# Bitwise operations
##################################################################
def __lshift__(self, other):
"""Left shift, ex: 100 << 2
A left shift by n bits is equivalent to multiplication by pow(2,
n). A long integer is returned if the result exceeds the range of
plain integers."""
shifted = int(self.bits) << other
return type(self)(bits=shifted)
def __rshift__(self, other):
"""Right shift, ex: 100 >> 2
A right shift by n bits is equivalent to division by pow(2, n)."""
shifted = int(self.bits) >> other
return type(self)(bits=shifted)
def __and__(self, other):
""""Bitwise and, ex: 100 & 2
bitwise and". Each bit of the output is 1 if the corresponding bit
of x AND of y is 1, otherwise it's 0."""
andd = int(self.bits) & other
return type(self)(bits=andd)
def __xor__(self, other):
"""Bitwise xor, ex: 100 ^ 2
Does a "bitwise exclusive or". Each bit of the output is the same
as the corresponding bit in x if that bit in y is 0, and it's the
complement of the bit in x if that bit in y is 1."""
xord = int(self.bits) ^ other
return type(self)(bits=xord)
def __or__(self, other):
"""Bitwise or, ex: 100 | 2
Does a "bitwise or". Each bit of the output is 0 if the corresponding
bit of x AND of y is 0, otherwise it's 1."""
ord = int(self.bits) | other
return type(self)(bits=ord)
##################################################################
def __neg__(self):
"""The negative version of this instance"""
return (type(self))(-abs(self.prefix_value))
def __pos__(self):
return (type(self))(abs(self.prefix_value))
def __abs__(self):
return (type(self))(abs(self.prefix_value))
# def __invert__(self):
# """Called to implement the unary arithmetic operations (-, +, abs()
# and ~)."""
# return NotImplemented
######################################################################
# First, the bytes...
class Byte(Bitmath):
"""Byte based types fundamentally operate on self._bit_value"""
def _setup(self):
return (2, 0, 'Byte', 'Bytes')
######################################################################
# NIST Prefixes for Byte based types
class KiB(Byte):
def _setup(self):
return (2, 10, 'KiB', 'KiBs')
Kio = KiB
class MiB(Byte):
def _setup(self):
return (2, 20, 'MiB', 'MiBs')
Mio = MiB
class GiB(Byte):
def _setup(self):
return (2, 30, 'GiB', 'GiBs')
Gio = GiB
class TiB(Byte):
def _setup(self):
return (2, 40, 'TiB', 'TiBs')
Tio = TiB
class PiB(Byte):
def _setup(self):
return (2, 50, 'PiB', 'PiBs')
Pio = PiB
class EiB(Byte):
def _setup(self):
return (2, 60, 'EiB', 'EiBs')
Eio = EiB
######################################################################
# SI Prefixes for Byte based types
class kB(Byte):
def _setup(self):
return (10, 3, 'kB', 'kBs')
ko = kB
class MB(Byte):
def _setup(self):
return (10, 6, 'MB', 'MBs')
Mo = MB
class GB(Byte):
def _setup(self):
return (10, 9, 'GB', 'GBs')
Go = GB
class TB(Byte):
def _setup(self):
return (10, 12, 'TB', 'TBs')
To = TB
class PB(Byte):
def _setup(self):
return (10, 15, 'PB', 'PBs')
Po = PB
class EB(Byte):
def _setup(self):
return (10, 18, 'EB', 'EBs')
Eo = EB
class ZB(Byte):
def _setup(self):
return (10, 21, 'ZB', 'ZBs')
Zo = ZB
class YB(Byte):
def _setup(self):
return (10, 24, 'YB', 'YBs')
Yo = YB
######################################################################
# And now the bit types
class Bit(Bitmath):
"""Bit based types fundamentally operate on self._bit_value"""
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._bit_value)
def _setup(self):
return (2, 0, 'Bit', 'Bits')
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type"""
self._bit_value = value * self._unit_value
self._byte_value = self._bit_value / 8.0
######################################################################
# NIST Prefixes for Bit based types
class Kib(Bit):
def _setup(self):
return (2, 10, 'Kib', 'Kibs')
class Mib(Bit):
def _setup(self):
return (2, 20, 'Mib', 'Mibs')
class Gib(Bit):
def _setup(self):
return (2, 30, 'Gib', 'Gibs')
class Tib(Bit):
def _setup(self):
return (2, 40, 'Tib', 'Tibs')
class Pib(Bit):
def _setup(self):
return (2, 50, 'Pib', 'Pibs')
class Eib(Bit):
def _setup(self):
return (2, 60, 'Eib', 'Eibs')
######################################################################
# SI Prefixes for Bit based types
class kb(Bit):
def _setup(self):
return (10, 3, 'kb', 'kbs')
class Mb(Bit):
def _setup(self):
return (10, 6, 'Mb', 'Mbs')
class Gb(Bit):
def _setup(self):
return (10, 9, 'Gb', 'Gbs')
class Tb(Bit):
def _setup(self):
return (10, 12, 'Tb', 'Tbs')
class Pb(Bit):
def _setup(self):
return (10, 15, 'Pb', 'Pbs')
class Eb(Bit):
def _setup(self):
return (10, 18, 'Eb', 'Ebs')
class Zb(Bit):
def _setup(self):
return (10, 21, 'Zb', 'Zbs')
class Yb(Bit):
def _setup(self):
return (10, 24, 'Yb', 'Ybs')
######################################################################
# Utility functions
def best_prefix(bytes, system=NIST):
"""Return a bitmath instance representing the best human-readable
representation of the number of bytes given by ``bytes``. In addition
to a numeric type, the ``bytes`` parameter may also be a bitmath type.
Optionally select a preferred unit system by specifying the ``system``
keyword. Choices for ``system`` are ``bitmath.NIST`` (default) and
``bitmath.SI``.
Basically a shortcut for:
>>> import bitmath
>>> b = bitmath.Byte(12345)
>>> best = b.best_prefix()
Or:
>>> import bitmath
>>> best = (bitmath.KiB(12345) * 4201).best_prefix()
"""
if isinstance(bytes, Bitmath):
value = bytes.bytes
else:
value = bytes
return Byte(value).best_prefix(system=system)
def query_device_capacity(device_fd):
"""Create bitmath instances of the capacity of a system block device
Make one or more ioctl request to query the capacity of a block
device. Perform any processing required to compute the final capacity
value. Return the device capacity in bytes as a :class:`bitmath.Byte`
instance.
Thanks to the following resources for help figuring this out Linux/Mac
ioctl's for querying block device sizes:
* http://stackoverflow.com/a/12925285/263969
* http://stackoverflow.com/a/9764508/263969
:param file device_fd: A ``file`` object of the device to query the
capacity of (as in ``get_device_capacity(open("/dev/sda"))``).
:return: a bitmath :class:`bitmath.Byte` instance equivalent to the
capacity of the target device in bytes.
"""
if os_name() != 'posix':
raise NotImplementedError("'bitmath.query_device_capacity' is not supported on this platform: %s" % os_name())
s = os.stat(device_fd.name).st_mode
if not stat.S_ISBLK(s):
raise ValueError("The file descriptor provided is not of a device type")
# The keys of the ``ioctl_map`` dictionary correlate to possible
# values from the ``platform.system`` function.
ioctl_map = {
# ioctls for the "Linux" platform
"Linux": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("BLKGETSIZE64", "L", 0x80081272)
# Per <linux/fs.h>, the BLKGETSIZE64 request returns a
# 'u64' sized value. This is an unsigned 64 bit
# integer C type. This means to correctly "buffer" the
# result we need 64 bits, or 8 bytes, of memory.
#
# The struct module documentation include a reference
# chart relating formatting characters to native C
# Types. In this case, using the "native size", the
# table tells us:
#
# * Character 'L' - Unsigned Long C Type (u64) - Loads into a Python int type
#
# Confirm this character is right by running (on Linux):
#
# >>> import struct
# >>> print 8 == struct.calcsize('L')
#
# The result should be true as long as your kernel
# headers define BLKGETSIZE64 as a u64 type (please
# file a bug report at
# https://github.com/tbielawa/bitmath/issues/new if
# this does *not* work for you)
],
# func is how the final result is decided. Because the
# Linux BLKGETSIZE64 call returns the block device
# capacity in bytes as an integer value, no extra
# calculations are required. Simply return the value of
# BLKGETSIZE64.
"func": lambda x: x["BLKGETSIZE64"]
},
# ioctls for the "Darwin" (Mac OS X) platform
"Darwin": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("DKIOCGETBLOCKCOUNT", "L", 0x40086419),
# Per <sys/disk.h>: get media's block count - uint64_t
#
# As in the BLKGETSIZE64 example, an unsigned 64 bit
# integer will use the 'L' formatting character
("DKIOCGETBLOCKSIZE", "I", 0x40046418)
# Per <sys/disk.h>: get media's block size - uint32_t
#
# This request returns an unsigned 32 bit integer, or
# in other words: just a normal integer (or 'int' c
# type). That should require 4 bytes of space for
# buffering. According to the struct modules
# 'Formatting Characters' chart:
#
# * Character 'I' - Unsigned Int C Type (uint32_t) - Loads into a Python int type
],
# OS X doesn't have a direct equivalent to the Linux
# BLKGETSIZE64 request. Instead, we must request how many
# blocks (or "sectors") are on the disk, and the size (in
# bytes) of each block. Finally, multiply the two together
# to obtain capacity:
#
# n Block * y Byte
# capacity (bytes) = -------
# 1 Block
"func": lambda x: x["DKIOCGETBLOCKCOUNT"] * x["DKIOCGETBLOCKSIZE"]
# This expression simply accepts a dictionary ``x`` as a
# parameter, and then returns the result of multiplying
# the two named dictionary items together. In this case,
# that means multiplying ``DKIOCGETBLOCKCOUNT``, the total
# number of blocks, by ``DKIOCGETBLOCKSIZE``, the size of
# each block in bytes.
}
}
platform_params = ioctl_map[platform.system()]
results = {}
for req_name, fmt, request_code in platform_params['request_params']:
# Read the systems native size (in bytes) of this format type.
buffer_size = struct.calcsize(fmt)
# Construct a buffer to store the ioctl result in
buffer = ' ' * buffer_size
# This code has been ran on only a few test systems. If it's
# appropriate, maybe in the future we'll add try/except
# conditions for some possible errors. Really only for cases
# where it would add value to override the default exception
# message string.
buffer = fcntl.ioctl(device_fd.fileno(), request_code, buffer)
# Unpack the raw result from the ioctl call into a familiar
# python data type according to the ``fmt`` rules.
result = struct.unpack(fmt, buffer)[0]
# Add the new result to our collection
results[req_name] = result
return Byte(platform_params['func'](results))
def getsize(path, bestprefix=True, system=NIST):
"""Return a bitmath instance in the best human-readable representation
of the file size at `path`. Optionally, provide a preferred unit
system by setting `system` to either `bitmath.NIST` (default) or
`bitmath.SI`.
Optionally, set ``bestprefix`` to ``False`` to get ``bitmath.Byte``
instances back.
"""
_path = os.path.realpath(path)
size_bytes = os.path.getsize(_path)
if bestprefix:
return Byte(size_bytes).best_prefix(system=system)
else:
return Byte(size_bytes)
def listdir(search_base, followlinks=False, filter='*',
relpath=False, bestprefix=False, system=NIST):
"""This is a generator which recurses the directory tree
`search_base`, yielding 2-tuples of:
* The absolute/relative path to a discovered file
* A bitmath instance representing the "apparent size" of the file.
- `search_base` - The directory to begin walking down.
- `followlinks` - Whether or not to follow symbolic links to directories
- `filter` - A glob (see :py:mod:`fnmatch`) to filter results with
(default: ``*``, everything)
- `relpath` - ``True`` to return the relative path from `pwd` or
``False`` (default) to return the fully qualified path
- ``bestprefix`` - set to ``False`` to get ``bitmath.Byte``
instances back instead.
- `system` - Provide a preferred unit system by setting `system`
to either ``bitmath.NIST`` (default) or ``bitmath.SI``.
.. note:: This function does NOT return tuples for directory entities.
.. note:: Symlinks to **files** are followed automatically
"""
for root, dirs, files in os.walk(search_base, followlinks=followlinks):
for name in fnmatch.filter(files, filter):
_path = os.path.join(root, name)
if relpath:
# RELATIVE path
_return_path = os.path.relpath(_path, '.')
else:
# REAL path
_return_path = os.path.realpath(_path)
if followlinks:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
else:
if os.path.isdir(_path) or os.path.islink(_path):
pass
else:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
def parse_string(s):
"""Parse a string with units and try to make a bitmath object out of
it.
String inputs may include whitespace characters between the value and
the unit.
"""
# Strings only please
if not isinstance(s, (str, unicode)):
raise ValueError("parse_string only accepts string inputs but a %s was given" %
type(s))
# get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError:
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# split the string into the value and the unit
val, unit = s[:index], s[index:]
# see if the unit exists as a type in our namespace
if unit == "b":
unit_class = Bit
elif unit == "B":
unit_class = Byte
else:
if not (hasattr(sys.modules[__name__], unit) and isinstance(getattr(sys.modules[__name__], unit), type)):
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
unit_class = globals()[unit]
try:
val = float(val)
except ValueError:
raise
try:
return unit_class(val)
except: # pragma: no cover
raise ValueError("Can't parse string %s into a bitmath object" % s)
def parse_string_unsafe(s, system=SI):
"""Attempt to parse a string with ambiguous units and try to make a
bitmath object out of it.
This may produce inaccurate results if parsing shell output. For
example `ls` may say a 2730 Byte file is '2.7K'. 2730 Bytes == 2.73 kB
~= 2.666 KiB. See the documentation for all of the important details.
Note the following caveats:
* All inputs are assumed to be byte-based (as opposed to bit based)
* Numerical inputs (those without any units) are assumed to be a
number of bytes
* Inputs with single letter units (k, M, G, etc) are assumed to be SI
units (base-10). Set the `system` parameter to `bitmath.NIST` to
change this behavior.
* Inputs with an `i` character following the leading letter (Ki, Mi,
Gi) are assumed to be NIST units (base 2)
* Capitalization does not matter
"""
if not isinstance(s, (str, unicode)) and \
not isinstance(s, numbers.Number):
raise ValueError("parse_string_unsafe only accepts string/number inputs but a %s was given" %
type(s))
######################################################################
# Is the input simple to parse? Just a number, or a number
# masquerading as a string perhaps?
# Test case: raw number input (easy!)
if isinstance(s, numbers.Number):
# It's just a number. Assume bytes
return Byte(s)
# Test case: a number pretending to be a string
if isinstance(s, (str, unicode)):
try:
# Can we turn it directly into a number?
return Byte(float(s))
except ValueError:
# Nope, this is not a plain number
pass
######################################################################
# At this point:
# - the input is also not just a number wrapped in a string
# - nor is is just a plain number type
#
# We need to do some more digging around now to figure out exactly
# what we were given and possibly normalize the input into a
# format we can recognize.
# First we'll separate the number and the unit.
#
# Get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError: # pragma: no cover
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# Split the string into the value and the unit
val, unit = s[:index], s[index:]
# Don't trust anything. We'll make sure the correct 'b' is in place.
unit = unit.rstrip('Bb')
unit += 'B'
# At this point we can expect `unit` to be either:
#
# - 2 Characters (for SI, ex: kB or GB)
# - 3 Caracters (so NIST, ex: KiB, or GiB)
#
# A unit with any other number of chars is not a valid unit
# SI
if len(unit) == 2:
# Has NIST parsing been requested?
if system == NIST:
# NIST units requested. Ensure the unit begins with a
# capital letter and is followed by an 'i' character.
unit = capitalize_first(unit)
# Insert an 'i' char after the first letter
_unit = list(unit)
_unit.insert(1, 'i')
# Collapse the list back into a 3 letter string
unit = ''.join(_unit)
unit_class = globals()[unit]
else:
# Default parsing (SI format)
#
# Edge-case checking: SI 'thousand' is a lower-case K
if unit.startswith('K'):
unit = unit.replace('K', 'k')
elif not unit.startswith('k'):
# Otherwise, ensure the first char is capitalized
unit = capitalize_first(unit)
# This is an SI-type unit
if unit[0] in SI_PREFIXES:
unit_class = globals()[unit]
# NIST
elif len(unit) == 3:
unit = capitalize_first(unit)
# This is a NIST-type unit
if unit[:2] in NIST_PREFIXES:
unit_class = globals()[unit]
else:
# This is not a unit we recognize
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
try:
unit_class
except UnboundLocalError:
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
return unit_class(float(val))
######################################################################
# Contxt Managers
@contextlib.contextmanager
def format(fmt_str=None, plural=False, bestprefix=False):
"""Context manager for printing bitmath instances.
``fmt_str`` - a formatting mini-language compat formatting string. See
the @properties (above) for a list of available items.
``plural`` - True enables printing instances with 's's if they're
plural. False (default) prints them as singular (no trailing 's').
``bestprefix`` - True enables printing instances in their best
human-readable representation. False, the default, prints instances
using their current prefix unit.
"""
if 'bitmath' not in globals():
import bitmath
if plural:
orig_fmt_plural = bitmath.format_plural
bitmath.format_plural = True
if fmt_str:
orig_fmt_str = bitmath.format_string
bitmath.format_string = fmt_str
yield
if plural:
bitmath.format_plural = orig_fmt_plural
if fmt_str:
bitmath.format_string = orig_fmt_str
def cli_script(): # pragma: no cover
# Wrapper around cli_script_main so we can unittest the command
# line functionality
for result in cli_script_main(sys.argv[1:]):
print(result)
if __name__ == '__main__':
cli_script()
|
tbielawa/bitmath
|
bitmath/__init__.py
|
Bitmath._do_setup
|
python
|
def _do_setup(self):
(self._base, self._power, self._name_singular, self._name_plural) = self._setup()
self._unit_value = self._base ** self._power
|
Setup basic parameters for this class.
`base` is the numeric base which when raised to `power` is equivalent
to 1 unit of the corresponding prefix. I.e., base=2, power=10
represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte.
Likewise, for the SI prefix classes `base` will be 10, and the `power`
for the Kilobyte is 3.
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L239-L250
|
[
"def _setup(self):\n raise NotImplementedError(\"The base 'bitmath.Bitmath' class can not be used directly\")\n"
] |
class Bitmath(object):
"""The base class for all the other prefix classes"""
# All the allowed input types
valid_types = (int, float, long)
def __init__(self, value=0, bytes=None, bits=None):
"""Instantiate with `value` by the unit, in plain bytes, or
bits. Don't supply more than one keyword.
default behavior: initialize with value of 0
only setting value: assert bytes is None and bits is None
only setting bytes: assert value == 0 and bits is None
only setting bits: assert value == 0 and bytes is None
"""
_raise = False
if (value == 0) and (bytes is None) and (bits is None):
pass
# Setting by bytes
elif bytes is not None:
if (value == 0) and (bits is None):
pass
else:
_raise = True
# setting by bits
elif bits is not None:
if (value == 0) and (bytes is None):
pass
else:
_raise = True
if _raise:
raise ValueError("Only one parameter of: value, bytes, or bits is allowed")
self._do_setup()
if bytes:
# We were provided with the fundamental base unit, no need
# to normalize
self._byte_value = bytes
self._bit_value = bytes * 8.0
elif bits:
# We were *ALMOST* given the fundamental base
# unit. Translate it into the fundamental unit then
# normalize.
self._byte_value = bits / 8.0
self._bit_value = bits
else:
# We were given a value representative of this *prefix
# unit*. We need to normalize it into the number of bytes
# it represents.
self._norm(value)
# We have the fundamental unit figured out. Set the 'pretty' unit
self._set_prefix_value()
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._byte_value)
def _to_prefix_value(self, value):
"""Return the number of bits/bytes as they would look like if we
converted *to* this unit"""
return value / float(self._unit_value)
def _setup(self):
raise NotImplementedError("The base 'bitmath.Bitmath' class can not be used directly")
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type.
:param number value: The input value to be normalized
:raises ValueError: if the input value is not a type of real number
"""
if isinstance(value, self.valid_types):
self._byte_value = value * self._unit_value
self._bit_value = self._byte_value * 8.0
else:
raise ValueError("Initialization value '%s' is of an invalid type: %s. "
"Must be one of %s" % (
value,
type(value),
", ".join(str(x) for x in self.valid_types)))
##################################################################
# Properties
#: The mathematical base of an instance
base = property(lambda s: s._base)
binary = property(lambda s: bin(int(s.bits)))
"""The binary representation of an instance in binary 1s and 0s. Note
that for very large numbers this will mean a lot of 1s and 0s. For
example, GiB(100) would be represented as::
0b1100100000000000000000000000000000000000
That leading ``0b`` is normal. That's how Python represents binary.
"""
#: Alias for :attr:`binary`
bin = property(lambda s: s.binary)
#: The number of bits in an instance
bits = property(lambda s: s._bit_value)
#: The number of bytes in an instance
bytes = property(lambda s: s._byte_value)
#: The mathematical power of an instance
power = property(lambda s: s._power)
@property
def system(self):
"""The system of units used to measure an instance"""
if self._base == 2:
return "NIST"
elif self._base == 10:
return "SI"
else:
# I don't expect to ever encounter this logic branch, but
# hey, it's better to have extra test coverage than
# insufficient test coverage.
raise ValueError("Instances mathematical base is an unsupported value: %s" % (
str(self._base)))
@property
def unit(self):
"""The string that is this instances prefix unit name in agreement
with this instance value (singular or plural). Following the
convention that only 1 is singular. This will always be the singular
form when :attr:`bitmath.format_plural` is ``False`` (default value).
For example:
>>> KiB(1).unit == 'KiB'
>>> Byte(0).unit == 'Bytes'
>>> Byte(1).unit == 'Byte'
>>> Byte(1.1).unit == 'Bytes'
>>> Gb(2).unit == 'Gbs'
"""
global format_plural
if self.prefix_value == 1:
# If it's a '1', return it singular, no matter what
return self._name_singular
elif format_plural:
# Pluralization requested
return self._name_plural
else:
# Pluralization NOT requested, and the value is not 1
return self._name_singular
@property
def unit_plural(self):
"""The string that is an instances prefix unit name in the plural
form.
For example:
>>> KiB(1).unit_plural == 'KiB'
>>> Byte(1024).unit_plural == 'Bytes'
>>> Gb(1).unit_plural == 'Gb'
"""
return self._name_plural
@property
def unit_singular(self):
"""The string that is an instances prefix unit name in the singular
form.
For example:
>>> KiB(1).unit_singular == 'KiB'
>>> Byte(1024).unit == 'B'
>>> Gb(1).unit_singular == 'Gb'
"""
return self._name_singular
#: The "prefix" value of an instance
value = property(lambda s: s.prefix_value)
@classmethod
def from_other(cls, item):
"""Factory function to return instances of `item` converted into a new
instance of ``cls``. Because this is a class method, it may be called
from any bitmath class object without the need to explicitly
instantiate the class ahead of time.
*Implicit Parameter:*
* ``cls`` A bitmath class, implicitly set to the class of the
instance object it is called on
*User Supplied Parameter:*
* ``item`` A :class:`bitmath.Bitmath` subclass instance
*Example:*
>>> import bitmath
>>> kib = bitmath.KiB.from_other(bitmath.MiB(1))
>>> print kib
KiB(1024.0)
"""
if isinstance(item, Bitmath):
return cls(bits=item.bits)
else:
raise ValueError("The provided items must be a valid bitmath class: %s" %
str(item.__class__))
######################################################################
# The following implement the Python datamodel customization methods
#
# Reference: http://docs.python.org/2.7/reference/datamodel.html#basic-customization
def __repr__(self):
"""Representation of this object as you would expect to see in an
interpreter"""
global _FORMAT_REPR
return self.format(_FORMAT_REPR)
def __str__(self):
"""String representation of this object"""
global format_string
return self.format(format_string)
def format(self, fmt):
"""Return a representation of this instance formatted with user
supplied syntax"""
_fmt_params = {
'base': self.base,
'bin': self.bin,
'binary': self.binary,
'bits': self.bits,
'bytes': self.bytes,
'power': self.power,
'system': self.system,
'unit': self.unit,
'unit_plural': self.unit_plural,
'unit_singular': self.unit_singular,
'value': self.value
}
return fmt.format(**_fmt_params)
##################################################################
# Guess the best human-readable prefix unit for representation
##################################################################
def best_prefix(self, system=None):
"""Optional parameter, `system`, allows you to prefer NIST or SI in
the results. By default, the current system is used (Bit/Byte default
to NIST).
Logic discussion/notes:
Base-case, does it need converting?
If the instance is less than one Byte, return the instance as a Bit
instance.
Else, begin by recording the unit system the instance is defined
by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.
If the instance is not already a ``Byte`` instance, convert it to one.
NIST units step up by powers of 1024, SI units step up by powers of
1000.
Take integer value of the log(base=STEP_POWER) of the instance's byte
value. E.g.:
>>> int(math.log(Gb(100).bytes, 1000))
3
This will return a value >= 0. The following determines the 'best
prefix unit' for representation:
* result == 0, best represented as a Byte
* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte
* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
"""
# Use absolute value so we don't return Bit's for *everything*
# less than Byte(1). From github issue #55
if abs(self) < Byte(1):
return Bit.from_other(self)
else:
if type(self) is Byte: # pylint: disable=unidiomatic-typecheck
_inst = self
else:
_inst = Byte.from_other(self)
# Which table to consult? Was a preferred system provided?
if system is None:
# No preference. Use existing system
if self.system == 'NIST':
_STEPS = NIST_PREFIXES
_BASE = 1024
elif self.system == 'SI':
_STEPS = SI_PREFIXES
_BASE = 1000
# Anything else would have raised by now
else:
# Preferred system provided.
if system == NIST:
_STEPS = NIST_PREFIXES
_BASE = 1024
elif system == SI:
_STEPS = SI_PREFIXES
_BASE = 1000
else:
raise ValueError("Invalid value given for 'system' parameter."
" Must be one of NIST or SI")
# Index of the string of the best prefix in the STEPS list
_index = int(math.log(abs(_inst.bytes), _BASE))
# Recall that the log() function returns >= 0. This doesn't
# map to the STEPS list 1:1. That is to say, 0 is handled with
# special care. So if the _index is 1, we actually want item 0
# in the list.
if _index == 0:
# Already a Byte() type, so return it.
return _inst
elif _index >= len(_STEPS):
# This is a really big number. Use the biggest prefix we've got
_best_prefix = _STEPS[-1]
elif 0 < _index < len(_STEPS):
# There is an appropriate prefix unit to represent this
_best_prefix = _STEPS[_index - 1]
_conversion_method = getattr(
self,
'to_%sB' % _best_prefix)
return _conversion_method()
##################################################################
def to_Bit(self):
return Bit(self._bit_value)
def to_Byte(self):
return Byte(self._byte_value / float(NIST_STEPS['Byte']))
# Properties
Bit = property(lambda s: s.to_Bit())
Byte = property(lambda s: s.to_Byte())
##################################################################
def to_KiB(self):
return KiB(bits=self._bit_value)
def to_Kib(self):
return Kib(bits=self._bit_value)
def to_kB(self):
return kB(bits=self._bit_value)
def to_kb(self):
return kb(bits=self._bit_value)
# Properties
KiB = property(lambda s: s.to_KiB())
Kib = property(lambda s: s.to_Kib())
kB = property(lambda s: s.to_kB())
kb = property(lambda s: s.to_kb())
##################################################################
def to_MiB(self):
return MiB(bits=self._bit_value)
def to_Mib(self):
return Mib(bits=self._bit_value)
def to_MB(self):
return MB(bits=self._bit_value)
def to_Mb(self):
return Mb(bits=self._bit_value)
# Properties
MiB = property(lambda s: s.to_MiB())
Mib = property(lambda s: s.to_Mib())
MB = property(lambda s: s.to_MB())
Mb = property(lambda s: s.to_Mb())
##################################################################
def to_GiB(self):
return GiB(bits=self._bit_value)
def to_Gib(self):
return Gib(bits=self._bit_value)
def to_GB(self):
return GB(bits=self._bit_value)
def to_Gb(self):
return Gb(bits=self._bit_value)
# Properties
GiB = property(lambda s: s.to_GiB())
Gib = property(lambda s: s.to_Gib())
GB = property(lambda s: s.to_GB())
Gb = property(lambda s: s.to_Gb())
##################################################################
def to_TiB(self):
return TiB(bits=self._bit_value)
def to_Tib(self):
return Tib(bits=self._bit_value)
def to_TB(self):
return TB(bits=self._bit_value)
def to_Tb(self):
return Tb(bits=self._bit_value)
# Properties
TiB = property(lambda s: s.to_TiB())
Tib = property(lambda s: s.to_Tib())
TB = property(lambda s: s.to_TB())
Tb = property(lambda s: s.to_Tb())
##################################################################
def to_PiB(self):
return PiB(bits=self._bit_value)
def to_Pib(self):
return Pib(bits=self._bit_value)
def to_PB(self):
return PB(bits=self._bit_value)
def to_Pb(self):
return Pb(bits=self._bit_value)
# Properties
PiB = property(lambda s: s.to_PiB())
Pib = property(lambda s: s.to_Pib())
PB = property(lambda s: s.to_PB())
Pb = property(lambda s: s.to_Pb())
##################################################################
def to_EiB(self):
return EiB(bits=self._bit_value)
def to_Eib(self):
return Eib(bits=self._bit_value)
def to_EB(self):
return EB(bits=self._bit_value)
def to_Eb(self):
return Eb(bits=self._bit_value)
# Properties
EiB = property(lambda s: s.to_EiB())
Eib = property(lambda s: s.to_Eib())
EB = property(lambda s: s.to_EB())
Eb = property(lambda s: s.to_Eb())
##################################################################
# The SI units go beyond the NIST units. They also have the Zetta
# and Yotta prefixes.
def to_ZB(self):
return ZB(bits=self._bit_value)
def to_Zb(self):
return Zb(bits=self._bit_value)
# Properties
ZB = property(lambda s: s.to_ZB())
Zb = property(lambda s: s.to_Zb())
##################################################################
def to_YB(self):
return YB(bits=self._bit_value)
def to_Yb(self):
return Yb(bits=self._bit_value)
#: A new object representing this instance as a Yottabyte
YB = property(lambda s: s.to_YB())
Yb = property(lambda s: s.to_Yb())
##################################################################
# Rich comparison operations
##################################################################
def __lt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value < other
else:
return self._byte_value < other.bytes
def __le__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value <= other
else:
return self._byte_value <= other.bytes
def __eq__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value == other
else:
return self._byte_value == other.bytes
def __ne__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value != other
else:
return self._byte_value != other.bytes
def __gt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value > other
else:
return self._byte_value > other.bytes
def __ge__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value >= other
else:
return self._byte_value >= other.bytes
##################################################################
# Basic math operations
##################################################################
# Reference: http://docs.python.org/2.7/reference/datamodel.html#emulating-numeric-types
"""These methods are called to implement the binary arithmetic
operations (+, -, *, //, %, divmod(), pow(), **, <<, >>, &, ^, |). For
instance, to evaluate the expression x + y, where x is an instance of
a class that has an __add__() method, x.__add__(y) is called. The
__divmod__() method should be the equivalent to using __floordiv__()
and __mod__(); it should not be related to __truediv__() (described
below). Note that __pow__() should be defined to accept an optional
third argument if the ternary version of the built-in pow() function
is to be supported.object.__complex__(self)
"""
def __add__(self, other):
"""Supported operations with result types:
- bm + bm = bm
- bm + num = num
- num + bm = num (see radd)
"""
if isinstance(other, numbers.Number):
# bm + num
return other + self.value
else:
# bm + bm
total_bytes = self._byte_value + other.bytes
return (type(self))(bytes=total_bytes)
def __sub__(self, other):
"""Subtraction: Supported operations with result types:
- bm - bm = bm
- bm - num = num
- num - bm = num (see rsub)
"""
if isinstance(other, numbers.Number):
# bm - num
return self.value - other
else:
# bm - bm
total_bytes = self._byte_value - other.bytes
return (type(self))(bytes=total_bytes)
def __mul__(self, other):
"""Multiplication: Supported operations with result types:
- bm1 * bm2 = bm1
- bm * num = bm
- num * bm = num (see rmul)
"""
if isinstance(other, numbers.Number):
# bm * num
result = self._byte_value * other
return (type(self))(bytes=result)
else:
# bm1 * bm2
_other = other.value * other.base ** other.power
_self = self.prefix_value * self._base ** self._power
return (type(self))(bytes=_other * _self)
"""The division operator (/) is implemented by these methods. The
__truediv__() method is used when __future__.division is in effect,
otherwise __div__() is used. If only one of these two methods is
defined, the object will not support division in the alternate
context; TypeError will be raised instead."""
def __div__(self, other):
"""Division: Supported operations with result types:
- bm1 / bm2 = num
- bm / num = bm
- num / bm = num (see rdiv)
"""
if isinstance(other, numbers.Number):
# bm / num
result = self._byte_value / other
return (type(self))(bytes=result)
else:
# bm1 / bm2
return self._byte_value / float(other.bytes)
def __truediv__(self, other):
# num / bm
return self.__div__(other)
# def __floordiv__(self, other):
# return NotImplemented
# def __mod__(self, other):
# return NotImplemented
# def __divmod__(self, other):
# return NotImplemented
# def __pow__(self, other, modulo=None):
# return NotImplemented
##################################################################
"""These methods are called to implement the binary arithmetic
operations (+, -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with
reflected (swapped) operands. These functions are only called if the
left operand does not support the corresponding operation and the
operands are of different types. [2] For instance, to evaluate the
expression x - y, where y is an instance of a class that has an
__rsub__() method, y.__rsub__(x) is called if x.__sub__(y) returns
NotImplemented.
These are the add/sub/mul/div methods for syntax where a number type
is given for the LTYPE and a bitmath object is given for the
RTYPE. E.g., 3 * MiB(3), or 10 / GB(42)
"""
def __radd__(self, other):
# num + bm = num
return other + self.value
def __rsub__(self, other):
# num - bm = num
return other - self.value
def __rmul__(self, other):
# num * bm = bm
return self * other
def __rdiv__(self, other):
# num / bm = num
return other / float(self.value)
def __rtruediv__(self, other):
# num / bm = num
return other / float(self.value)
"""Called to implement the built-in functions complex(), int(),
long(), and float(). Should return a value of the appropriate type.
If one of those methods does not support the operation with the
supplied arguments, it should return NotImplemented.
For bitmath purposes, these methods return the int/long/float
equivalent of the this instances prefix Unix value. That is to say:
- int(KiB(3.336)) would return 3
- long(KiB(3.336)) would return 3L
- float(KiB(3.336)) would return 3.336
"""
def __int__(self):
"""Return this instances prefix unit as an integer"""
return int(self.prefix_value)
def __long__(self):
"""Return this instances prefix unit as a long integer"""
return long(self.prefix_value) # pragma: PY3X no cover
def __float__(self):
"""Return this instances prefix unit as a floating point number"""
return float(self.prefix_value)
##################################################################
# Bitwise operations
##################################################################
def __lshift__(self, other):
"""Left shift, ex: 100 << 2
A left shift by n bits is equivalent to multiplication by pow(2,
n). A long integer is returned if the result exceeds the range of
plain integers."""
shifted = int(self.bits) << other
return type(self)(bits=shifted)
def __rshift__(self, other):
"""Right shift, ex: 100 >> 2
A right shift by n bits is equivalent to division by pow(2, n)."""
shifted = int(self.bits) >> other
return type(self)(bits=shifted)
def __and__(self, other):
""""Bitwise and, ex: 100 & 2
bitwise and". Each bit of the output is 1 if the corresponding bit
of x AND of y is 1, otherwise it's 0."""
andd = int(self.bits) & other
return type(self)(bits=andd)
def __xor__(self, other):
"""Bitwise xor, ex: 100 ^ 2
Does a "bitwise exclusive or". Each bit of the output is the same
as the corresponding bit in x if that bit in y is 0, and it's the
complement of the bit in x if that bit in y is 1."""
xord = int(self.bits) ^ other
return type(self)(bits=xord)
def __or__(self, other):
"""Bitwise or, ex: 100 | 2
Does a "bitwise or". Each bit of the output is 0 if the corresponding
bit of x AND of y is 0, otherwise it's 1."""
ord = int(self.bits) | other
return type(self)(bits=ord)
##################################################################
def __neg__(self):
"""The negative version of this instance"""
return (type(self))(-abs(self.prefix_value))
def __pos__(self):
return (type(self))(abs(self.prefix_value))
def __abs__(self):
return (type(self))(abs(self.prefix_value))
|
tbielawa/bitmath
|
bitmath/__init__.py
|
Bitmath._norm
|
python
|
def _norm(self, value):
if isinstance(value, self.valid_types):
self._byte_value = value * self._unit_value
self._bit_value = self._byte_value * 8.0
else:
raise ValueError("Initialization value '%s' is of an invalid type: %s. "
"Must be one of %s" % (
value,
type(value),
", ".join(str(x) for x in self.valid_types)))
|
Normalize the input value into the fundamental unit for this prefix
type.
:param number value: The input value to be normalized
:raises ValueError: if the input value is not a type of real number
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L252-L267
| null |
class Bitmath(object):
"""The base class for all the other prefix classes"""
# All the allowed input types
valid_types = (int, float, long)
def __init__(self, value=0, bytes=None, bits=None):
"""Instantiate with `value` by the unit, in plain bytes, or
bits. Don't supply more than one keyword.
default behavior: initialize with value of 0
only setting value: assert bytes is None and bits is None
only setting bytes: assert value == 0 and bits is None
only setting bits: assert value == 0 and bytes is None
"""
_raise = False
if (value == 0) and (bytes is None) and (bits is None):
pass
# Setting by bytes
elif bytes is not None:
if (value == 0) and (bits is None):
pass
else:
_raise = True
# setting by bits
elif bits is not None:
if (value == 0) and (bytes is None):
pass
else:
_raise = True
if _raise:
raise ValueError("Only one parameter of: value, bytes, or bits is allowed")
self._do_setup()
if bytes:
# We were provided with the fundamental base unit, no need
# to normalize
self._byte_value = bytes
self._bit_value = bytes * 8.0
elif bits:
# We were *ALMOST* given the fundamental base
# unit. Translate it into the fundamental unit then
# normalize.
self._byte_value = bits / 8.0
self._bit_value = bits
else:
# We were given a value representative of this *prefix
# unit*. We need to normalize it into the number of bytes
# it represents.
self._norm(value)
# We have the fundamental unit figured out. Set the 'pretty' unit
self._set_prefix_value()
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._byte_value)
def _to_prefix_value(self, value):
"""Return the number of bits/bytes as they would look like if we
converted *to* this unit"""
return value / float(self._unit_value)
def _setup(self):
raise NotImplementedError("The base 'bitmath.Bitmath' class can not be used directly")
def _do_setup(self):
"""Setup basic parameters for this class.
`base` is the numeric base which when raised to `power` is equivalent
to 1 unit of the corresponding prefix. I.e., base=2, power=10
represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte.
Likewise, for the SI prefix classes `base` will be 10, and the `power`
for the Kilobyte is 3.
"""
(self._base, self._power, self._name_singular, self._name_plural) = self._setup()
self._unit_value = self._base ** self._power
##################################################################
# Properties
#: The mathematical base of an instance
base = property(lambda s: s._base)
binary = property(lambda s: bin(int(s.bits)))
"""The binary representation of an instance in binary 1s and 0s. Note
that for very large numbers this will mean a lot of 1s and 0s. For
example, GiB(100) would be represented as::
0b1100100000000000000000000000000000000000
That leading ``0b`` is normal. That's how Python represents binary.
"""
#: Alias for :attr:`binary`
bin = property(lambda s: s.binary)
#: The number of bits in an instance
bits = property(lambda s: s._bit_value)
#: The number of bytes in an instance
bytes = property(lambda s: s._byte_value)
#: The mathematical power of an instance
power = property(lambda s: s._power)
@property
def system(self):
"""The system of units used to measure an instance"""
if self._base == 2:
return "NIST"
elif self._base == 10:
return "SI"
else:
# I don't expect to ever encounter this logic branch, but
# hey, it's better to have extra test coverage than
# insufficient test coverage.
raise ValueError("Instances mathematical base is an unsupported value: %s" % (
str(self._base)))
@property
def unit(self):
"""The string that is this instances prefix unit name in agreement
with this instance value (singular or plural). Following the
convention that only 1 is singular. This will always be the singular
form when :attr:`bitmath.format_plural` is ``False`` (default value).
For example:
>>> KiB(1).unit == 'KiB'
>>> Byte(0).unit == 'Bytes'
>>> Byte(1).unit == 'Byte'
>>> Byte(1.1).unit == 'Bytes'
>>> Gb(2).unit == 'Gbs'
"""
global format_plural
if self.prefix_value == 1:
# If it's a '1', return it singular, no matter what
return self._name_singular
elif format_plural:
# Pluralization requested
return self._name_plural
else:
# Pluralization NOT requested, and the value is not 1
return self._name_singular
@property
def unit_plural(self):
"""The string that is an instances prefix unit name in the plural
form.
For example:
>>> KiB(1).unit_plural == 'KiB'
>>> Byte(1024).unit_plural == 'Bytes'
>>> Gb(1).unit_plural == 'Gb'
"""
return self._name_plural
@property
def unit_singular(self):
"""The string that is an instances prefix unit name in the singular
form.
For example:
>>> KiB(1).unit_singular == 'KiB'
>>> Byte(1024).unit == 'B'
>>> Gb(1).unit_singular == 'Gb'
"""
return self._name_singular
#: The "prefix" value of an instance
value = property(lambda s: s.prefix_value)
@classmethod
def from_other(cls, item):
"""Factory function to return instances of `item` converted into a new
instance of ``cls``. Because this is a class method, it may be called
from any bitmath class object without the need to explicitly
instantiate the class ahead of time.
*Implicit Parameter:*
* ``cls`` A bitmath class, implicitly set to the class of the
instance object it is called on
*User Supplied Parameter:*
* ``item`` A :class:`bitmath.Bitmath` subclass instance
*Example:*
>>> import bitmath
>>> kib = bitmath.KiB.from_other(bitmath.MiB(1))
>>> print kib
KiB(1024.0)
"""
if isinstance(item, Bitmath):
return cls(bits=item.bits)
else:
raise ValueError("The provided items must be a valid bitmath class: %s" %
str(item.__class__))
######################################################################
# The following implement the Python datamodel customization methods
#
# Reference: http://docs.python.org/2.7/reference/datamodel.html#basic-customization
def __repr__(self):
"""Representation of this object as you would expect to see in an
interpreter"""
global _FORMAT_REPR
return self.format(_FORMAT_REPR)
def __str__(self):
"""String representation of this object"""
global format_string
return self.format(format_string)
def format(self, fmt):
"""Return a representation of this instance formatted with user
supplied syntax"""
_fmt_params = {
'base': self.base,
'bin': self.bin,
'binary': self.binary,
'bits': self.bits,
'bytes': self.bytes,
'power': self.power,
'system': self.system,
'unit': self.unit,
'unit_plural': self.unit_plural,
'unit_singular': self.unit_singular,
'value': self.value
}
return fmt.format(**_fmt_params)
##################################################################
# Guess the best human-readable prefix unit for representation
##################################################################
def best_prefix(self, system=None):
"""Optional parameter, `system`, allows you to prefer NIST or SI in
the results. By default, the current system is used (Bit/Byte default
to NIST).
Logic discussion/notes:
Base-case, does it need converting?
If the instance is less than one Byte, return the instance as a Bit
instance.
Else, begin by recording the unit system the instance is defined
by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.
If the instance is not already a ``Byte`` instance, convert it to one.
NIST units step up by powers of 1024, SI units step up by powers of
1000.
Take integer value of the log(base=STEP_POWER) of the instance's byte
value. E.g.:
>>> int(math.log(Gb(100).bytes, 1000))
3
This will return a value >= 0. The following determines the 'best
prefix unit' for representation:
* result == 0, best represented as a Byte
* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte
* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
"""
# Use absolute value so we don't return Bit's for *everything*
# less than Byte(1). From github issue #55
if abs(self) < Byte(1):
return Bit.from_other(self)
else:
if type(self) is Byte: # pylint: disable=unidiomatic-typecheck
_inst = self
else:
_inst = Byte.from_other(self)
# Which table to consult? Was a preferred system provided?
if system is None:
# No preference. Use existing system
if self.system == 'NIST':
_STEPS = NIST_PREFIXES
_BASE = 1024
elif self.system == 'SI':
_STEPS = SI_PREFIXES
_BASE = 1000
# Anything else would have raised by now
else:
# Preferred system provided.
if system == NIST:
_STEPS = NIST_PREFIXES
_BASE = 1024
elif system == SI:
_STEPS = SI_PREFIXES
_BASE = 1000
else:
raise ValueError("Invalid value given for 'system' parameter."
" Must be one of NIST or SI")
# Index of the string of the best prefix in the STEPS list
_index = int(math.log(abs(_inst.bytes), _BASE))
# Recall that the log() function returns >= 0. This doesn't
# map to the STEPS list 1:1. That is to say, 0 is handled with
# special care. So if the _index is 1, we actually want item 0
# in the list.
if _index == 0:
# Already a Byte() type, so return it.
return _inst
elif _index >= len(_STEPS):
# This is a really big number. Use the biggest prefix we've got
_best_prefix = _STEPS[-1]
elif 0 < _index < len(_STEPS):
# There is an appropriate prefix unit to represent this
_best_prefix = _STEPS[_index - 1]
_conversion_method = getattr(
self,
'to_%sB' % _best_prefix)
return _conversion_method()
##################################################################
def to_Bit(self):
return Bit(self._bit_value)
def to_Byte(self):
return Byte(self._byte_value / float(NIST_STEPS['Byte']))
# Properties
Bit = property(lambda s: s.to_Bit())
Byte = property(lambda s: s.to_Byte())
##################################################################
def to_KiB(self):
return KiB(bits=self._bit_value)
def to_Kib(self):
return Kib(bits=self._bit_value)
def to_kB(self):
return kB(bits=self._bit_value)
def to_kb(self):
return kb(bits=self._bit_value)
# Properties
KiB = property(lambda s: s.to_KiB())
Kib = property(lambda s: s.to_Kib())
kB = property(lambda s: s.to_kB())
kb = property(lambda s: s.to_kb())
##################################################################
def to_MiB(self):
return MiB(bits=self._bit_value)
def to_Mib(self):
return Mib(bits=self._bit_value)
def to_MB(self):
return MB(bits=self._bit_value)
def to_Mb(self):
return Mb(bits=self._bit_value)
# Properties
MiB = property(lambda s: s.to_MiB())
Mib = property(lambda s: s.to_Mib())
MB = property(lambda s: s.to_MB())
Mb = property(lambda s: s.to_Mb())
##################################################################
def to_GiB(self):
return GiB(bits=self._bit_value)
def to_Gib(self):
return Gib(bits=self._bit_value)
def to_GB(self):
return GB(bits=self._bit_value)
def to_Gb(self):
return Gb(bits=self._bit_value)
# Properties
GiB = property(lambda s: s.to_GiB())
Gib = property(lambda s: s.to_Gib())
GB = property(lambda s: s.to_GB())
Gb = property(lambda s: s.to_Gb())
##################################################################
def to_TiB(self):
return TiB(bits=self._bit_value)
def to_Tib(self):
return Tib(bits=self._bit_value)
def to_TB(self):
return TB(bits=self._bit_value)
def to_Tb(self):
return Tb(bits=self._bit_value)
# Properties
TiB = property(lambda s: s.to_TiB())
Tib = property(lambda s: s.to_Tib())
TB = property(lambda s: s.to_TB())
Tb = property(lambda s: s.to_Tb())
##################################################################
def to_PiB(self):
return PiB(bits=self._bit_value)
def to_Pib(self):
return Pib(bits=self._bit_value)
def to_PB(self):
return PB(bits=self._bit_value)
def to_Pb(self):
return Pb(bits=self._bit_value)
# Properties
PiB = property(lambda s: s.to_PiB())
Pib = property(lambda s: s.to_Pib())
PB = property(lambda s: s.to_PB())
Pb = property(lambda s: s.to_Pb())
##################################################################
def to_EiB(self):
return EiB(bits=self._bit_value)
def to_Eib(self):
return Eib(bits=self._bit_value)
def to_EB(self):
return EB(bits=self._bit_value)
def to_Eb(self):
return Eb(bits=self._bit_value)
# Properties
EiB = property(lambda s: s.to_EiB())
Eib = property(lambda s: s.to_Eib())
EB = property(lambda s: s.to_EB())
Eb = property(lambda s: s.to_Eb())
##################################################################
# The SI units go beyond the NIST units. They also have the Zetta
# and Yotta prefixes.
def to_ZB(self):
return ZB(bits=self._bit_value)
def to_Zb(self):
return Zb(bits=self._bit_value)
# Properties
ZB = property(lambda s: s.to_ZB())
Zb = property(lambda s: s.to_Zb())
##################################################################
def to_YB(self):
return YB(bits=self._bit_value)
def to_Yb(self):
return Yb(bits=self._bit_value)
#: A new object representing this instance as a Yottabyte
YB = property(lambda s: s.to_YB())
Yb = property(lambda s: s.to_Yb())
##################################################################
# Rich comparison operations
##################################################################
def __lt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value < other
else:
return self._byte_value < other.bytes
def __le__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value <= other
else:
return self._byte_value <= other.bytes
def __eq__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value == other
else:
return self._byte_value == other.bytes
def __ne__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value != other
else:
return self._byte_value != other.bytes
def __gt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value > other
else:
return self._byte_value > other.bytes
def __ge__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value >= other
else:
return self._byte_value >= other.bytes
##################################################################
# Basic math operations
##################################################################
# Reference: http://docs.python.org/2.7/reference/datamodel.html#emulating-numeric-types
"""These methods are called to implement the binary arithmetic
operations (+, -, *, //, %, divmod(), pow(), **, <<, >>, &, ^, |). For
instance, to evaluate the expression x + y, where x is an instance of
a class that has an __add__() method, x.__add__(y) is called. The
__divmod__() method should be the equivalent to using __floordiv__()
and __mod__(); it should not be related to __truediv__() (described
below). Note that __pow__() should be defined to accept an optional
third argument if the ternary version of the built-in pow() function
is to be supported.object.__complex__(self)
"""
def __add__(self, other):
"""Supported operations with result types:
- bm + bm = bm
- bm + num = num
- num + bm = num (see radd)
"""
if isinstance(other, numbers.Number):
# bm + num
return other + self.value
else:
# bm + bm
total_bytes = self._byte_value + other.bytes
return (type(self))(bytes=total_bytes)
def __sub__(self, other):
"""Subtraction: Supported operations with result types:
- bm - bm = bm
- bm - num = num
- num - bm = num (see rsub)
"""
if isinstance(other, numbers.Number):
# bm - num
return self.value - other
else:
# bm - bm
total_bytes = self._byte_value - other.bytes
return (type(self))(bytes=total_bytes)
def __mul__(self, other):
"""Multiplication: Supported operations with result types:
- bm1 * bm2 = bm1
- bm * num = bm
- num * bm = num (see rmul)
"""
if isinstance(other, numbers.Number):
# bm * num
result = self._byte_value * other
return (type(self))(bytes=result)
else:
# bm1 * bm2
_other = other.value * other.base ** other.power
_self = self.prefix_value * self._base ** self._power
return (type(self))(bytes=_other * _self)
"""The division operator (/) is implemented by these methods. The
__truediv__() method is used when __future__.division is in effect,
otherwise __div__() is used. If only one of these two methods is
defined, the object will not support division in the alternate
context; TypeError will be raised instead."""
def __div__(self, other):
"""Division: Supported operations with result types:
- bm1 / bm2 = num
- bm / num = bm
- num / bm = num (see rdiv)
"""
if isinstance(other, numbers.Number):
# bm / num
result = self._byte_value / other
return (type(self))(bytes=result)
else:
# bm1 / bm2
return self._byte_value / float(other.bytes)
def __truediv__(self, other):
# num / bm
return self.__div__(other)
# def __floordiv__(self, other):
# return NotImplemented
# def __mod__(self, other):
# return NotImplemented
# def __divmod__(self, other):
# return NotImplemented
# def __pow__(self, other, modulo=None):
# return NotImplemented
##################################################################
"""These methods are called to implement the binary arithmetic
operations (+, -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with
reflected (swapped) operands. These functions are only called if the
left operand does not support the corresponding operation and the
operands are of different types. [2] For instance, to evaluate the
expression x - y, where y is an instance of a class that has an
__rsub__() method, y.__rsub__(x) is called if x.__sub__(y) returns
NotImplemented.
These are the add/sub/mul/div methods for syntax where a number type
is given for the LTYPE and a bitmath object is given for the
RTYPE. E.g., 3 * MiB(3), or 10 / GB(42)
"""
def __radd__(self, other):
# num + bm = num
return other + self.value
def __rsub__(self, other):
# num - bm = num
return other - self.value
def __rmul__(self, other):
# num * bm = bm
return self * other
def __rdiv__(self, other):
# num / bm = num
return other / float(self.value)
def __rtruediv__(self, other):
# num / bm = num
return other / float(self.value)
"""Called to implement the built-in functions complex(), int(),
long(), and float(). Should return a value of the appropriate type.
If one of those methods does not support the operation with the
supplied arguments, it should return NotImplemented.
For bitmath purposes, these methods return the int/long/float
equivalent of the this instances prefix Unix value. That is to say:
- int(KiB(3.336)) would return 3
- long(KiB(3.336)) would return 3L
- float(KiB(3.336)) would return 3.336
"""
def __int__(self):
"""Return this instances prefix unit as an integer"""
return int(self.prefix_value)
def __long__(self):
"""Return this instances prefix unit as a long integer"""
return long(self.prefix_value) # pragma: PY3X no cover
def __float__(self):
"""Return this instances prefix unit as a floating point number"""
return float(self.prefix_value)
##################################################################
# Bitwise operations
##################################################################
def __lshift__(self, other):
"""Left shift, ex: 100 << 2
A left shift by n bits is equivalent to multiplication by pow(2,
n). A long integer is returned if the result exceeds the range of
plain integers."""
shifted = int(self.bits) << other
return type(self)(bits=shifted)
def __rshift__(self, other):
"""Right shift, ex: 100 >> 2
A right shift by n bits is equivalent to division by pow(2, n)."""
shifted = int(self.bits) >> other
return type(self)(bits=shifted)
def __and__(self, other):
""""Bitwise and, ex: 100 & 2
bitwise and". Each bit of the output is 1 if the corresponding bit
of x AND of y is 1, otherwise it's 0."""
andd = int(self.bits) & other
return type(self)(bits=andd)
def __xor__(self, other):
"""Bitwise xor, ex: 100 ^ 2
Does a "bitwise exclusive or". Each bit of the output is the same
as the corresponding bit in x if that bit in y is 0, and it's the
complement of the bit in x if that bit in y is 1."""
xord = int(self.bits) ^ other
return type(self)(bits=xord)
def __or__(self, other):
"""Bitwise or, ex: 100 | 2
Does a "bitwise or". Each bit of the output is 0 if the corresponding
bit of x AND of y is 0, otherwise it's 1."""
ord = int(self.bits) | other
return type(self)(bits=ord)
##################################################################
def __neg__(self):
"""The negative version of this instance"""
return (type(self))(-abs(self.prefix_value))
def __pos__(self):
return (type(self))(abs(self.prefix_value))
def __abs__(self):
return (type(self))(abs(self.prefix_value))
|
tbielawa/bitmath
|
bitmath/__init__.py
|
Bitmath.system
|
python
|
def system(self):
if self._base == 2:
return "NIST"
elif self._base == 10:
return "SI"
else:
# I don't expect to ever encounter this logic branch, but
# hey, it's better to have extra test coverage than
# insufficient test coverage.
raise ValueError("Instances mathematical base is an unsupported value: %s" % (
str(self._base)))
|
The system of units used to measure an instance
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L299-L310
| null |
class Bitmath(object):
"""The base class for all the other prefix classes"""
# All the allowed input types
valid_types = (int, float, long)
def __init__(self, value=0, bytes=None, bits=None):
"""Instantiate with `value` by the unit, in plain bytes, or
bits. Don't supply more than one keyword.
default behavior: initialize with value of 0
only setting value: assert bytes is None and bits is None
only setting bytes: assert value == 0 and bits is None
only setting bits: assert value == 0 and bytes is None
"""
_raise = False
if (value == 0) and (bytes is None) and (bits is None):
pass
# Setting by bytes
elif bytes is not None:
if (value == 0) and (bits is None):
pass
else:
_raise = True
# setting by bits
elif bits is not None:
if (value == 0) and (bytes is None):
pass
else:
_raise = True
if _raise:
raise ValueError("Only one parameter of: value, bytes, or bits is allowed")
self._do_setup()
if bytes:
# We were provided with the fundamental base unit, no need
# to normalize
self._byte_value = bytes
self._bit_value = bytes * 8.0
elif bits:
# We were *ALMOST* given the fundamental base
# unit. Translate it into the fundamental unit then
# normalize.
self._byte_value = bits / 8.0
self._bit_value = bits
else:
# We were given a value representative of this *prefix
# unit*. We need to normalize it into the number of bytes
# it represents.
self._norm(value)
# We have the fundamental unit figured out. Set the 'pretty' unit
self._set_prefix_value()
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._byte_value)
def _to_prefix_value(self, value):
"""Return the number of bits/bytes as they would look like if we
converted *to* this unit"""
return value / float(self._unit_value)
def _setup(self):
raise NotImplementedError("The base 'bitmath.Bitmath' class can not be used directly")
def _do_setup(self):
"""Setup basic parameters for this class.
`base` is the numeric base which when raised to `power` is equivalent
to 1 unit of the corresponding prefix. I.e., base=2, power=10
represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte.
Likewise, for the SI prefix classes `base` will be 10, and the `power`
for the Kilobyte is 3.
"""
(self._base, self._power, self._name_singular, self._name_plural) = self._setup()
self._unit_value = self._base ** self._power
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type.
:param number value: The input value to be normalized
:raises ValueError: if the input value is not a type of real number
"""
if isinstance(value, self.valid_types):
self._byte_value = value * self._unit_value
self._bit_value = self._byte_value * 8.0
else:
raise ValueError("Initialization value '%s' is of an invalid type: %s. "
"Must be one of %s" % (
value,
type(value),
", ".join(str(x) for x in self.valid_types)))
##################################################################
# Properties
#: The mathematical base of an instance
base = property(lambda s: s._base)
binary = property(lambda s: bin(int(s.bits)))
"""The binary representation of an instance in binary 1s and 0s. Note
that for very large numbers this will mean a lot of 1s and 0s. For
example, GiB(100) would be represented as::
0b1100100000000000000000000000000000000000
That leading ``0b`` is normal. That's how Python represents binary.
"""
#: Alias for :attr:`binary`
bin = property(lambda s: s.binary)
#: The number of bits in an instance
bits = property(lambda s: s._bit_value)
#: The number of bytes in an instance
bytes = property(lambda s: s._byte_value)
#: The mathematical power of an instance
power = property(lambda s: s._power)
@property
@property
def unit(self):
"""The string that is this instances prefix unit name in agreement
with this instance value (singular or plural). Following the
convention that only 1 is singular. This will always be the singular
form when :attr:`bitmath.format_plural` is ``False`` (default value).
For example:
>>> KiB(1).unit == 'KiB'
>>> Byte(0).unit == 'Bytes'
>>> Byte(1).unit == 'Byte'
>>> Byte(1.1).unit == 'Bytes'
>>> Gb(2).unit == 'Gbs'
"""
global format_plural
if self.prefix_value == 1:
# If it's a '1', return it singular, no matter what
return self._name_singular
elif format_plural:
# Pluralization requested
return self._name_plural
else:
# Pluralization NOT requested, and the value is not 1
return self._name_singular
@property
def unit_plural(self):
"""The string that is an instances prefix unit name in the plural
form.
For example:
>>> KiB(1).unit_plural == 'KiB'
>>> Byte(1024).unit_plural == 'Bytes'
>>> Gb(1).unit_plural == 'Gb'
"""
return self._name_plural
@property
def unit_singular(self):
"""The string that is an instances prefix unit name in the singular
form.
For example:
>>> KiB(1).unit_singular == 'KiB'
>>> Byte(1024).unit == 'B'
>>> Gb(1).unit_singular == 'Gb'
"""
return self._name_singular
#: The "prefix" value of an instance
value = property(lambda s: s.prefix_value)
@classmethod
def from_other(cls, item):
"""Factory function to return instances of `item` converted into a new
instance of ``cls``. Because this is a class method, it may be called
from any bitmath class object without the need to explicitly
instantiate the class ahead of time.
*Implicit Parameter:*
* ``cls`` A bitmath class, implicitly set to the class of the
instance object it is called on
*User Supplied Parameter:*
* ``item`` A :class:`bitmath.Bitmath` subclass instance
*Example:*
>>> import bitmath
>>> kib = bitmath.KiB.from_other(bitmath.MiB(1))
>>> print kib
KiB(1024.0)
"""
if isinstance(item, Bitmath):
return cls(bits=item.bits)
else:
raise ValueError("The provided items must be a valid bitmath class: %s" %
str(item.__class__))
######################################################################
# The following implement the Python datamodel customization methods
#
# Reference: http://docs.python.org/2.7/reference/datamodel.html#basic-customization
def __repr__(self):
"""Representation of this object as you would expect to see in an
interpreter"""
global _FORMAT_REPR
return self.format(_FORMAT_REPR)
def __str__(self):
"""String representation of this object"""
global format_string
return self.format(format_string)
def format(self, fmt):
"""Return a representation of this instance formatted with user
supplied syntax"""
_fmt_params = {
'base': self.base,
'bin': self.bin,
'binary': self.binary,
'bits': self.bits,
'bytes': self.bytes,
'power': self.power,
'system': self.system,
'unit': self.unit,
'unit_plural': self.unit_plural,
'unit_singular': self.unit_singular,
'value': self.value
}
return fmt.format(**_fmt_params)
##################################################################
# Guess the best human-readable prefix unit for representation
##################################################################
def best_prefix(self, system=None):
"""Optional parameter, `system`, allows you to prefer NIST or SI in
the results. By default, the current system is used (Bit/Byte default
to NIST).
Logic discussion/notes:
Base-case, does it need converting?
If the instance is less than one Byte, return the instance as a Bit
instance.
Else, begin by recording the unit system the instance is defined
by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.
If the instance is not already a ``Byte`` instance, convert it to one.
NIST units step up by powers of 1024, SI units step up by powers of
1000.
Take integer value of the log(base=STEP_POWER) of the instance's byte
value. E.g.:
>>> int(math.log(Gb(100).bytes, 1000))
3
This will return a value >= 0. The following determines the 'best
prefix unit' for representation:
* result == 0, best represented as a Byte
* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte
* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
"""
# Use absolute value so we don't return Bit's for *everything*
# less than Byte(1). From github issue #55
if abs(self) < Byte(1):
return Bit.from_other(self)
else:
if type(self) is Byte: # pylint: disable=unidiomatic-typecheck
_inst = self
else:
_inst = Byte.from_other(self)
# Which table to consult? Was a preferred system provided?
if system is None:
# No preference. Use existing system
if self.system == 'NIST':
_STEPS = NIST_PREFIXES
_BASE = 1024
elif self.system == 'SI':
_STEPS = SI_PREFIXES
_BASE = 1000
# Anything else would have raised by now
else:
# Preferred system provided.
if system == NIST:
_STEPS = NIST_PREFIXES
_BASE = 1024
elif system == SI:
_STEPS = SI_PREFIXES
_BASE = 1000
else:
raise ValueError("Invalid value given for 'system' parameter."
" Must be one of NIST or SI")
# Index of the string of the best prefix in the STEPS list
_index = int(math.log(abs(_inst.bytes), _BASE))
# Recall that the log() function returns >= 0. This doesn't
# map to the STEPS list 1:1. That is to say, 0 is handled with
# special care. So if the _index is 1, we actually want item 0
# in the list.
if _index == 0:
# Already a Byte() type, so return it.
return _inst
elif _index >= len(_STEPS):
# This is a really big number. Use the biggest prefix we've got
_best_prefix = _STEPS[-1]
elif 0 < _index < len(_STEPS):
# There is an appropriate prefix unit to represent this
_best_prefix = _STEPS[_index - 1]
_conversion_method = getattr(
self,
'to_%sB' % _best_prefix)
return _conversion_method()
##################################################################
def to_Bit(self):
return Bit(self._bit_value)
def to_Byte(self):
return Byte(self._byte_value / float(NIST_STEPS['Byte']))
# Properties
Bit = property(lambda s: s.to_Bit())
Byte = property(lambda s: s.to_Byte())
##################################################################
def to_KiB(self):
return KiB(bits=self._bit_value)
def to_Kib(self):
return Kib(bits=self._bit_value)
def to_kB(self):
return kB(bits=self._bit_value)
def to_kb(self):
return kb(bits=self._bit_value)
# Properties
KiB = property(lambda s: s.to_KiB())
Kib = property(lambda s: s.to_Kib())
kB = property(lambda s: s.to_kB())
kb = property(lambda s: s.to_kb())
##################################################################
def to_MiB(self):
return MiB(bits=self._bit_value)
def to_Mib(self):
return Mib(bits=self._bit_value)
def to_MB(self):
return MB(bits=self._bit_value)
def to_Mb(self):
return Mb(bits=self._bit_value)
# Properties
MiB = property(lambda s: s.to_MiB())
Mib = property(lambda s: s.to_Mib())
MB = property(lambda s: s.to_MB())
Mb = property(lambda s: s.to_Mb())
##################################################################
def to_GiB(self):
return GiB(bits=self._bit_value)
def to_Gib(self):
return Gib(bits=self._bit_value)
def to_GB(self):
return GB(bits=self._bit_value)
def to_Gb(self):
return Gb(bits=self._bit_value)
# Properties
GiB = property(lambda s: s.to_GiB())
Gib = property(lambda s: s.to_Gib())
GB = property(lambda s: s.to_GB())
Gb = property(lambda s: s.to_Gb())
##################################################################
def to_TiB(self):
return TiB(bits=self._bit_value)
def to_Tib(self):
return Tib(bits=self._bit_value)
def to_TB(self):
return TB(bits=self._bit_value)
def to_Tb(self):
return Tb(bits=self._bit_value)
# Properties
TiB = property(lambda s: s.to_TiB())
Tib = property(lambda s: s.to_Tib())
TB = property(lambda s: s.to_TB())
Tb = property(lambda s: s.to_Tb())
##################################################################
def to_PiB(self):
return PiB(bits=self._bit_value)
def to_Pib(self):
return Pib(bits=self._bit_value)
def to_PB(self):
return PB(bits=self._bit_value)
def to_Pb(self):
return Pb(bits=self._bit_value)
# Properties
PiB = property(lambda s: s.to_PiB())
Pib = property(lambda s: s.to_Pib())
PB = property(lambda s: s.to_PB())
Pb = property(lambda s: s.to_Pb())
##################################################################
def to_EiB(self):
return EiB(bits=self._bit_value)
def to_Eib(self):
return Eib(bits=self._bit_value)
def to_EB(self):
return EB(bits=self._bit_value)
def to_Eb(self):
return Eb(bits=self._bit_value)
# Properties
EiB = property(lambda s: s.to_EiB())
Eib = property(lambda s: s.to_Eib())
EB = property(lambda s: s.to_EB())
Eb = property(lambda s: s.to_Eb())
##################################################################
# The SI units go beyond the NIST units. They also have the Zetta
# and Yotta prefixes.
def to_ZB(self):
return ZB(bits=self._bit_value)
def to_Zb(self):
return Zb(bits=self._bit_value)
# Properties
ZB = property(lambda s: s.to_ZB())
Zb = property(lambda s: s.to_Zb())
##################################################################
def to_YB(self):
return YB(bits=self._bit_value)
def to_Yb(self):
return Yb(bits=self._bit_value)
#: A new object representing this instance as a Yottabyte
YB = property(lambda s: s.to_YB())
Yb = property(lambda s: s.to_Yb())
##################################################################
# Rich comparison operations
##################################################################
def __lt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value < other
else:
return self._byte_value < other.bytes
def __le__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value <= other
else:
return self._byte_value <= other.bytes
def __eq__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value == other
else:
return self._byte_value == other.bytes
def __ne__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value != other
else:
return self._byte_value != other.bytes
def __gt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value > other
else:
return self._byte_value > other.bytes
def __ge__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value >= other
else:
return self._byte_value >= other.bytes
##################################################################
# Basic math operations
##################################################################
# Reference: http://docs.python.org/2.7/reference/datamodel.html#emulating-numeric-types
"""These methods are called to implement the binary arithmetic
operations (+, -, *, //, %, divmod(), pow(), **, <<, >>, &, ^, |). For
instance, to evaluate the expression x + y, where x is an instance of
a class that has an __add__() method, x.__add__(y) is called. The
__divmod__() method should be the equivalent to using __floordiv__()
and __mod__(); it should not be related to __truediv__() (described
below). Note that __pow__() should be defined to accept an optional
third argument if the ternary version of the built-in pow() function
is to be supported.object.__complex__(self)
"""
def __add__(self, other):
"""Supported operations with result types:
- bm + bm = bm
- bm + num = num
- num + bm = num (see radd)
"""
if isinstance(other, numbers.Number):
# bm + num
return other + self.value
else:
# bm + bm
total_bytes = self._byte_value + other.bytes
return (type(self))(bytes=total_bytes)
def __sub__(self, other):
"""Subtraction: Supported operations with result types:
- bm - bm = bm
- bm - num = num
- num - bm = num (see rsub)
"""
if isinstance(other, numbers.Number):
# bm - num
return self.value - other
else:
# bm - bm
total_bytes = self._byte_value - other.bytes
return (type(self))(bytes=total_bytes)
def __mul__(self, other):
"""Multiplication: Supported operations with result types:
- bm1 * bm2 = bm1
- bm * num = bm
- num * bm = num (see rmul)
"""
if isinstance(other, numbers.Number):
# bm * num
result = self._byte_value * other
return (type(self))(bytes=result)
else:
# bm1 * bm2
_other = other.value * other.base ** other.power
_self = self.prefix_value * self._base ** self._power
return (type(self))(bytes=_other * _self)
"""The division operator (/) is implemented by these methods. The
__truediv__() method is used when __future__.division is in effect,
otherwise __div__() is used. If only one of these two methods is
defined, the object will not support division in the alternate
context; TypeError will be raised instead."""
def __div__(self, other):
"""Division: Supported operations with result types:
- bm1 / bm2 = num
- bm / num = bm
- num / bm = num (see rdiv)
"""
if isinstance(other, numbers.Number):
# bm / num
result = self._byte_value / other
return (type(self))(bytes=result)
else:
# bm1 / bm2
return self._byte_value / float(other.bytes)
def __truediv__(self, other):
# num / bm
return self.__div__(other)
# def __floordiv__(self, other):
# return NotImplemented
# def __mod__(self, other):
# return NotImplemented
# def __divmod__(self, other):
# return NotImplemented
# def __pow__(self, other, modulo=None):
# return NotImplemented
##################################################################
"""These methods are called to implement the binary arithmetic
operations (+, -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with
reflected (swapped) operands. These functions are only called if the
left operand does not support the corresponding operation and the
operands are of different types. [2] For instance, to evaluate the
expression x - y, where y is an instance of a class that has an
__rsub__() method, y.__rsub__(x) is called if x.__sub__(y) returns
NotImplemented.
These are the add/sub/mul/div methods for syntax where a number type
is given for the LTYPE and a bitmath object is given for the
RTYPE. E.g., 3 * MiB(3), or 10 / GB(42)
"""
def __radd__(self, other):
# num + bm = num
return other + self.value
def __rsub__(self, other):
# num - bm = num
return other - self.value
def __rmul__(self, other):
# num * bm = bm
return self * other
def __rdiv__(self, other):
# num / bm = num
return other / float(self.value)
def __rtruediv__(self, other):
# num / bm = num
return other / float(self.value)
"""Called to implement the built-in functions complex(), int(),
long(), and float(). Should return a value of the appropriate type.
If one of those methods does not support the operation with the
supplied arguments, it should return NotImplemented.
For bitmath purposes, these methods return the int/long/float
equivalent of the this instances prefix Unix value. That is to say:
- int(KiB(3.336)) would return 3
- long(KiB(3.336)) would return 3L
- float(KiB(3.336)) would return 3.336
"""
def __int__(self):
"""Return this instances prefix unit as an integer"""
return int(self.prefix_value)
def __long__(self):
"""Return this instances prefix unit as a long integer"""
return long(self.prefix_value) # pragma: PY3X no cover
def __float__(self):
"""Return this instances prefix unit as a floating point number"""
return float(self.prefix_value)
##################################################################
# Bitwise operations
##################################################################
def __lshift__(self, other):
"""Left shift, ex: 100 << 2
A left shift by n bits is equivalent to multiplication by pow(2,
n). A long integer is returned if the result exceeds the range of
plain integers."""
shifted = int(self.bits) << other
return type(self)(bits=shifted)
def __rshift__(self, other):
"""Right shift, ex: 100 >> 2
A right shift by n bits is equivalent to division by pow(2, n)."""
shifted = int(self.bits) >> other
return type(self)(bits=shifted)
def __and__(self, other):
""""Bitwise and, ex: 100 & 2
bitwise and". Each bit of the output is 1 if the corresponding bit
of x AND of y is 1, otherwise it's 0."""
andd = int(self.bits) & other
return type(self)(bits=andd)
def __xor__(self, other):
"""Bitwise xor, ex: 100 ^ 2
Does a "bitwise exclusive or". Each bit of the output is the same
as the corresponding bit in x if that bit in y is 0, and it's the
complement of the bit in x if that bit in y is 1."""
xord = int(self.bits) ^ other
return type(self)(bits=xord)
def __or__(self, other):
"""Bitwise or, ex: 100 | 2
Does a "bitwise or". Each bit of the output is 0 if the corresponding
bit of x AND of y is 0, otherwise it's 1."""
ord = int(self.bits) | other
return type(self)(bits=ord)
##################################################################
def __neg__(self):
"""The negative version of this instance"""
return (type(self))(-abs(self.prefix_value))
def __pos__(self):
return (type(self))(abs(self.prefix_value))
def __abs__(self):
return (type(self))(abs(self.prefix_value))
|
tbielawa/bitmath
|
bitmath/__init__.py
|
Bitmath.unit
|
python
|
def unit(self):
global format_plural
if self.prefix_value == 1:
# If it's a '1', return it singular, no matter what
return self._name_singular
elif format_plural:
# Pluralization requested
return self._name_plural
else:
# Pluralization NOT requested, and the value is not 1
return self._name_singular
|
The string that is this instances prefix unit name in agreement
with this instance value (singular or plural). Following the
convention that only 1 is singular. This will always be the singular
form when :attr:`bitmath.format_plural` is ``False`` (default value).
For example:
>>> KiB(1).unit == 'KiB'
>>> Byte(0).unit == 'Bytes'
>>> Byte(1).unit == 'Byte'
>>> Byte(1.1).unit == 'Bytes'
>>> Gb(2).unit == 'Gbs'
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L313-L338
| null |
class Bitmath(object):
"""The base class for all the other prefix classes"""
# All the allowed input types
valid_types = (int, float, long)
def __init__(self, value=0, bytes=None, bits=None):
"""Instantiate with `value` by the unit, in plain bytes, or
bits. Don't supply more than one keyword.
default behavior: initialize with value of 0
only setting value: assert bytes is None and bits is None
only setting bytes: assert value == 0 and bits is None
only setting bits: assert value == 0 and bytes is None
"""
_raise = False
if (value == 0) and (bytes is None) and (bits is None):
pass
# Setting by bytes
elif bytes is not None:
if (value == 0) and (bits is None):
pass
else:
_raise = True
# setting by bits
elif bits is not None:
if (value == 0) and (bytes is None):
pass
else:
_raise = True
if _raise:
raise ValueError("Only one parameter of: value, bytes, or bits is allowed")
self._do_setup()
if bytes:
# We were provided with the fundamental base unit, no need
# to normalize
self._byte_value = bytes
self._bit_value = bytes * 8.0
elif bits:
# We were *ALMOST* given the fundamental base
# unit. Translate it into the fundamental unit then
# normalize.
self._byte_value = bits / 8.0
self._bit_value = bits
else:
# We were given a value representative of this *prefix
# unit*. We need to normalize it into the number of bytes
# it represents.
self._norm(value)
# We have the fundamental unit figured out. Set the 'pretty' unit
self._set_prefix_value()
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._byte_value)
def _to_prefix_value(self, value):
"""Return the number of bits/bytes as they would look like if we
converted *to* this unit"""
return value / float(self._unit_value)
def _setup(self):
raise NotImplementedError("The base 'bitmath.Bitmath' class can not be used directly")
def _do_setup(self):
"""Setup basic parameters for this class.
`base` is the numeric base which when raised to `power` is equivalent
to 1 unit of the corresponding prefix. I.e., base=2, power=10
represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte.
Likewise, for the SI prefix classes `base` will be 10, and the `power`
for the Kilobyte is 3.
"""
(self._base, self._power, self._name_singular, self._name_plural) = self._setup()
self._unit_value = self._base ** self._power
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type.
:param number value: The input value to be normalized
:raises ValueError: if the input value is not a type of real number
"""
if isinstance(value, self.valid_types):
self._byte_value = value * self._unit_value
self._bit_value = self._byte_value * 8.0
else:
raise ValueError("Initialization value '%s' is of an invalid type: %s. "
"Must be one of %s" % (
value,
type(value),
", ".join(str(x) for x in self.valid_types)))
##################################################################
# Properties
#: The mathematical base of an instance
base = property(lambda s: s._base)
binary = property(lambda s: bin(int(s.bits)))
"""The binary representation of an instance in binary 1s and 0s. Note
that for very large numbers this will mean a lot of 1s and 0s. For
example, GiB(100) would be represented as::
0b1100100000000000000000000000000000000000
That leading ``0b`` is normal. That's how Python represents binary.
"""
#: Alias for :attr:`binary`
bin = property(lambda s: s.binary)
#: The number of bits in an instance
bits = property(lambda s: s._bit_value)
#: The number of bytes in an instance
bytes = property(lambda s: s._byte_value)
#: The mathematical power of an instance
power = property(lambda s: s._power)
@property
def system(self):
"""The system of units used to measure an instance"""
if self._base == 2:
return "NIST"
elif self._base == 10:
return "SI"
else:
# I don't expect to ever encounter this logic branch, but
# hey, it's better to have extra test coverage than
# insufficient test coverage.
raise ValueError("Instances mathematical base is an unsupported value: %s" % (
str(self._base)))
@property
@property
def unit_plural(self):
"""The string that is an instances prefix unit name in the plural
form.
For example:
>>> KiB(1).unit_plural == 'KiB'
>>> Byte(1024).unit_plural == 'Bytes'
>>> Gb(1).unit_plural == 'Gb'
"""
return self._name_plural
@property
def unit_singular(self):
"""The string that is an instances prefix unit name in the singular
form.
For example:
>>> KiB(1).unit_singular == 'KiB'
>>> Byte(1024).unit == 'B'
>>> Gb(1).unit_singular == 'Gb'
"""
return self._name_singular
#: The "prefix" value of an instance
value = property(lambda s: s.prefix_value)
@classmethod
def from_other(cls, item):
"""Factory function to return instances of `item` converted into a new
instance of ``cls``. Because this is a class method, it may be called
from any bitmath class object without the need to explicitly
instantiate the class ahead of time.
*Implicit Parameter:*
* ``cls`` A bitmath class, implicitly set to the class of the
instance object it is called on
*User Supplied Parameter:*
* ``item`` A :class:`bitmath.Bitmath` subclass instance
*Example:*
>>> import bitmath
>>> kib = bitmath.KiB.from_other(bitmath.MiB(1))
>>> print kib
KiB(1024.0)
"""
if isinstance(item, Bitmath):
return cls(bits=item.bits)
else:
raise ValueError("The provided items must be a valid bitmath class: %s" %
str(item.__class__))
######################################################################
# The following implement the Python datamodel customization methods
#
# Reference: http://docs.python.org/2.7/reference/datamodel.html#basic-customization
def __repr__(self):
"""Representation of this object as you would expect to see in an
interpreter"""
global _FORMAT_REPR
return self.format(_FORMAT_REPR)
def __str__(self):
"""String representation of this object"""
global format_string
return self.format(format_string)
def format(self, fmt):
"""Return a representation of this instance formatted with user
supplied syntax"""
_fmt_params = {
'base': self.base,
'bin': self.bin,
'binary': self.binary,
'bits': self.bits,
'bytes': self.bytes,
'power': self.power,
'system': self.system,
'unit': self.unit,
'unit_plural': self.unit_plural,
'unit_singular': self.unit_singular,
'value': self.value
}
return fmt.format(**_fmt_params)
##################################################################
# Guess the best human-readable prefix unit for representation
##################################################################
def best_prefix(self, system=None):
"""Optional parameter, `system`, allows you to prefer NIST or SI in
the results. By default, the current system is used (Bit/Byte default
to NIST).
Logic discussion/notes:
Base-case, does it need converting?
If the instance is less than one Byte, return the instance as a Bit
instance.
Else, begin by recording the unit system the instance is defined
by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.
If the instance is not already a ``Byte`` instance, convert it to one.
NIST units step up by powers of 1024, SI units step up by powers of
1000.
Take integer value of the log(base=STEP_POWER) of the instance's byte
value. E.g.:
>>> int(math.log(Gb(100).bytes, 1000))
3
This will return a value >= 0. The following determines the 'best
prefix unit' for representation:
* result == 0, best represented as a Byte
* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte
* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
"""
# Use absolute value so we don't return Bit's for *everything*
# less than Byte(1). From github issue #55
if abs(self) < Byte(1):
return Bit.from_other(self)
else:
if type(self) is Byte: # pylint: disable=unidiomatic-typecheck
_inst = self
else:
_inst = Byte.from_other(self)
# Which table to consult? Was a preferred system provided?
if system is None:
# No preference. Use existing system
if self.system == 'NIST':
_STEPS = NIST_PREFIXES
_BASE = 1024
elif self.system == 'SI':
_STEPS = SI_PREFIXES
_BASE = 1000
# Anything else would have raised by now
else:
# Preferred system provided.
if system == NIST:
_STEPS = NIST_PREFIXES
_BASE = 1024
elif system == SI:
_STEPS = SI_PREFIXES
_BASE = 1000
else:
raise ValueError("Invalid value given for 'system' parameter."
" Must be one of NIST or SI")
# Index of the string of the best prefix in the STEPS list
_index = int(math.log(abs(_inst.bytes), _BASE))
# Recall that the log() function returns >= 0. This doesn't
# map to the STEPS list 1:1. That is to say, 0 is handled with
# special care. So if the _index is 1, we actually want item 0
# in the list.
if _index == 0:
# Already a Byte() type, so return it.
return _inst
elif _index >= len(_STEPS):
# This is a really big number. Use the biggest prefix we've got
_best_prefix = _STEPS[-1]
elif 0 < _index < len(_STEPS):
# There is an appropriate prefix unit to represent this
_best_prefix = _STEPS[_index - 1]
_conversion_method = getattr(
self,
'to_%sB' % _best_prefix)
return _conversion_method()
##################################################################
def to_Bit(self):
return Bit(self._bit_value)
def to_Byte(self):
return Byte(self._byte_value / float(NIST_STEPS['Byte']))
# Properties
Bit = property(lambda s: s.to_Bit())
Byte = property(lambda s: s.to_Byte())
##################################################################
def to_KiB(self):
return KiB(bits=self._bit_value)
def to_Kib(self):
return Kib(bits=self._bit_value)
def to_kB(self):
return kB(bits=self._bit_value)
def to_kb(self):
return kb(bits=self._bit_value)
# Properties
KiB = property(lambda s: s.to_KiB())
Kib = property(lambda s: s.to_Kib())
kB = property(lambda s: s.to_kB())
kb = property(lambda s: s.to_kb())
##################################################################
def to_MiB(self):
return MiB(bits=self._bit_value)
def to_Mib(self):
return Mib(bits=self._bit_value)
def to_MB(self):
return MB(bits=self._bit_value)
def to_Mb(self):
return Mb(bits=self._bit_value)
# Properties
MiB = property(lambda s: s.to_MiB())
Mib = property(lambda s: s.to_Mib())
MB = property(lambda s: s.to_MB())
Mb = property(lambda s: s.to_Mb())
##################################################################
def to_GiB(self):
return GiB(bits=self._bit_value)
def to_Gib(self):
return Gib(bits=self._bit_value)
def to_GB(self):
return GB(bits=self._bit_value)
def to_Gb(self):
return Gb(bits=self._bit_value)
# Properties
GiB = property(lambda s: s.to_GiB())
Gib = property(lambda s: s.to_Gib())
GB = property(lambda s: s.to_GB())
Gb = property(lambda s: s.to_Gb())
##################################################################
def to_TiB(self):
return TiB(bits=self._bit_value)
def to_Tib(self):
return Tib(bits=self._bit_value)
def to_TB(self):
return TB(bits=self._bit_value)
def to_Tb(self):
return Tb(bits=self._bit_value)
# Properties
TiB = property(lambda s: s.to_TiB())
Tib = property(lambda s: s.to_Tib())
TB = property(lambda s: s.to_TB())
Tb = property(lambda s: s.to_Tb())
##################################################################
def to_PiB(self):
return PiB(bits=self._bit_value)
def to_Pib(self):
return Pib(bits=self._bit_value)
def to_PB(self):
return PB(bits=self._bit_value)
def to_Pb(self):
return Pb(bits=self._bit_value)
# Properties
PiB = property(lambda s: s.to_PiB())
Pib = property(lambda s: s.to_Pib())
PB = property(lambda s: s.to_PB())
Pb = property(lambda s: s.to_Pb())
##################################################################
def to_EiB(self):
return EiB(bits=self._bit_value)
def to_Eib(self):
return Eib(bits=self._bit_value)
def to_EB(self):
return EB(bits=self._bit_value)
def to_Eb(self):
return Eb(bits=self._bit_value)
# Properties
EiB = property(lambda s: s.to_EiB())
Eib = property(lambda s: s.to_Eib())
EB = property(lambda s: s.to_EB())
Eb = property(lambda s: s.to_Eb())
##################################################################
# The SI units go beyond the NIST units. They also have the Zetta
# and Yotta prefixes.
def to_ZB(self):
return ZB(bits=self._bit_value)
def to_Zb(self):
return Zb(bits=self._bit_value)
# Properties
ZB = property(lambda s: s.to_ZB())
Zb = property(lambda s: s.to_Zb())
##################################################################
def to_YB(self):
return YB(bits=self._bit_value)
def to_Yb(self):
return Yb(bits=self._bit_value)
#: A new object representing this instance as a Yottabyte
YB = property(lambda s: s.to_YB())
Yb = property(lambda s: s.to_Yb())
##################################################################
# Rich comparison operations
##################################################################
def __lt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value < other
else:
return self._byte_value < other.bytes
def __le__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value <= other
else:
return self._byte_value <= other.bytes
def __eq__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value == other
else:
return self._byte_value == other.bytes
def __ne__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value != other
else:
return self._byte_value != other.bytes
def __gt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value > other
else:
return self._byte_value > other.bytes
def __ge__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value >= other
else:
return self._byte_value >= other.bytes
##################################################################
# Basic math operations
##################################################################
# Reference: http://docs.python.org/2.7/reference/datamodel.html#emulating-numeric-types
"""These methods are called to implement the binary arithmetic
operations (+, -, *, //, %, divmod(), pow(), **, <<, >>, &, ^, |). For
instance, to evaluate the expression x + y, where x is an instance of
a class that has an __add__() method, x.__add__(y) is called. The
__divmod__() method should be the equivalent to using __floordiv__()
and __mod__(); it should not be related to __truediv__() (described
below). Note that __pow__() should be defined to accept an optional
third argument if the ternary version of the built-in pow() function
is to be supported.object.__complex__(self)
"""
def __add__(self, other):
"""Supported operations with result types:
- bm + bm = bm
- bm + num = num
- num + bm = num (see radd)
"""
if isinstance(other, numbers.Number):
# bm + num
return other + self.value
else:
# bm + bm
total_bytes = self._byte_value + other.bytes
return (type(self))(bytes=total_bytes)
def __sub__(self, other):
"""Subtraction: Supported operations with result types:
- bm - bm = bm
- bm - num = num
- num - bm = num (see rsub)
"""
if isinstance(other, numbers.Number):
# bm - num
return self.value - other
else:
# bm - bm
total_bytes = self._byte_value - other.bytes
return (type(self))(bytes=total_bytes)
def __mul__(self, other):
"""Multiplication: Supported operations with result types:
- bm1 * bm2 = bm1
- bm * num = bm
- num * bm = num (see rmul)
"""
if isinstance(other, numbers.Number):
# bm * num
result = self._byte_value * other
return (type(self))(bytes=result)
else:
# bm1 * bm2
_other = other.value * other.base ** other.power
_self = self.prefix_value * self._base ** self._power
return (type(self))(bytes=_other * _self)
"""The division operator (/) is implemented by these methods. The
__truediv__() method is used when __future__.division is in effect,
otherwise __div__() is used. If only one of these two methods is
defined, the object will not support division in the alternate
context; TypeError will be raised instead."""
def __div__(self, other):
"""Division: Supported operations with result types:
- bm1 / bm2 = num
- bm / num = bm
- num / bm = num (see rdiv)
"""
if isinstance(other, numbers.Number):
# bm / num
result = self._byte_value / other
return (type(self))(bytes=result)
else:
# bm1 / bm2
return self._byte_value / float(other.bytes)
def __truediv__(self, other):
# num / bm
return self.__div__(other)
# def __floordiv__(self, other):
# return NotImplemented
# def __mod__(self, other):
# return NotImplemented
# def __divmod__(self, other):
# return NotImplemented
# def __pow__(self, other, modulo=None):
# return NotImplemented
##################################################################
"""These methods are called to implement the binary arithmetic
operations (+, -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with
reflected (swapped) operands. These functions are only called if the
left operand does not support the corresponding operation and the
operands are of different types. [2] For instance, to evaluate the
expression x - y, where y is an instance of a class that has an
__rsub__() method, y.__rsub__(x) is called if x.__sub__(y) returns
NotImplemented.
These are the add/sub/mul/div methods for syntax where a number type
is given for the LTYPE and a bitmath object is given for the
RTYPE. E.g., 3 * MiB(3), or 10 / GB(42)
"""
def __radd__(self, other):
# num + bm = num
return other + self.value
def __rsub__(self, other):
# num - bm = num
return other - self.value
def __rmul__(self, other):
# num * bm = bm
return self * other
def __rdiv__(self, other):
# num / bm = num
return other / float(self.value)
def __rtruediv__(self, other):
# num / bm = num
return other / float(self.value)
"""Called to implement the built-in functions complex(), int(),
long(), and float(). Should return a value of the appropriate type.
If one of those methods does not support the operation with the
supplied arguments, it should return NotImplemented.
For bitmath purposes, these methods return the int/long/float
equivalent of the this instances prefix Unix value. That is to say:
- int(KiB(3.336)) would return 3
- long(KiB(3.336)) would return 3L
- float(KiB(3.336)) would return 3.336
"""
def __int__(self):
"""Return this instances prefix unit as an integer"""
return int(self.prefix_value)
def __long__(self):
"""Return this instances prefix unit as a long integer"""
return long(self.prefix_value) # pragma: PY3X no cover
def __float__(self):
"""Return this instances prefix unit as a floating point number"""
return float(self.prefix_value)
##################################################################
# Bitwise operations
##################################################################
def __lshift__(self, other):
"""Left shift, ex: 100 << 2
A left shift by n bits is equivalent to multiplication by pow(2,
n). A long integer is returned if the result exceeds the range of
plain integers."""
shifted = int(self.bits) << other
return type(self)(bits=shifted)
def __rshift__(self, other):
"""Right shift, ex: 100 >> 2
A right shift by n bits is equivalent to division by pow(2, n)."""
shifted = int(self.bits) >> other
return type(self)(bits=shifted)
def __and__(self, other):
""""Bitwise and, ex: 100 & 2
bitwise and". Each bit of the output is 1 if the corresponding bit
of x AND of y is 1, otherwise it's 0."""
andd = int(self.bits) & other
return type(self)(bits=andd)
def __xor__(self, other):
"""Bitwise xor, ex: 100 ^ 2
Does a "bitwise exclusive or". Each bit of the output is the same
as the corresponding bit in x if that bit in y is 0, and it's the
complement of the bit in x if that bit in y is 1."""
xord = int(self.bits) ^ other
return type(self)(bits=xord)
def __or__(self, other):
"""Bitwise or, ex: 100 | 2
Does a "bitwise or". Each bit of the output is 0 if the corresponding
bit of x AND of y is 0, otherwise it's 1."""
ord = int(self.bits) | other
return type(self)(bits=ord)
##################################################################
def __neg__(self):
"""The negative version of this instance"""
return (type(self))(-abs(self.prefix_value))
def __pos__(self):
return (type(self))(abs(self.prefix_value))
def __abs__(self):
return (type(self))(abs(self.prefix_value))
|
tbielawa/bitmath
|
bitmath/__init__.py
|
Bitmath.from_other
|
python
|
def from_other(cls, item):
if isinstance(item, Bitmath):
return cls(bits=item.bits)
else:
raise ValueError("The provided items must be a valid bitmath class: %s" %
str(item.__class__))
|
Factory function to return instances of `item` converted into a new
instance of ``cls``. Because this is a class method, it may be called
from any bitmath class object without the need to explicitly
instantiate the class ahead of time.
*Implicit Parameter:*
* ``cls`` A bitmath class, implicitly set to the class of the
instance object it is called on
*User Supplied Parameter:*
* ``item`` A :class:`bitmath.Bitmath` subclass instance
*Example:*
>>> import bitmath
>>> kib = bitmath.KiB.from_other(bitmath.MiB(1))
>>> print kib
KiB(1024.0)
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L371-L398
| null |
class Bitmath(object):
"""The base class for all the other prefix classes"""
# All the allowed input types
valid_types = (int, float, long)
def __init__(self, value=0, bytes=None, bits=None):
"""Instantiate with `value` by the unit, in plain bytes, or
bits. Don't supply more than one keyword.
default behavior: initialize with value of 0
only setting value: assert bytes is None and bits is None
only setting bytes: assert value == 0 and bits is None
only setting bits: assert value == 0 and bytes is None
"""
_raise = False
if (value == 0) and (bytes is None) and (bits is None):
pass
# Setting by bytes
elif bytes is not None:
if (value == 0) and (bits is None):
pass
else:
_raise = True
# setting by bits
elif bits is not None:
if (value == 0) and (bytes is None):
pass
else:
_raise = True
if _raise:
raise ValueError("Only one parameter of: value, bytes, or bits is allowed")
self._do_setup()
if bytes:
# We were provided with the fundamental base unit, no need
# to normalize
self._byte_value = bytes
self._bit_value = bytes * 8.0
elif bits:
# We were *ALMOST* given the fundamental base
# unit. Translate it into the fundamental unit then
# normalize.
self._byte_value = bits / 8.0
self._bit_value = bits
else:
# We were given a value representative of this *prefix
# unit*. We need to normalize it into the number of bytes
# it represents.
self._norm(value)
# We have the fundamental unit figured out. Set the 'pretty' unit
self._set_prefix_value()
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._byte_value)
def _to_prefix_value(self, value):
"""Return the number of bits/bytes as they would look like if we
converted *to* this unit"""
return value / float(self._unit_value)
def _setup(self):
raise NotImplementedError("The base 'bitmath.Bitmath' class can not be used directly")
def _do_setup(self):
"""Setup basic parameters for this class.
`base` is the numeric base which when raised to `power` is equivalent
to 1 unit of the corresponding prefix. I.e., base=2, power=10
represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte.
Likewise, for the SI prefix classes `base` will be 10, and the `power`
for the Kilobyte is 3.
"""
(self._base, self._power, self._name_singular, self._name_plural) = self._setup()
self._unit_value = self._base ** self._power
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type.
:param number value: The input value to be normalized
:raises ValueError: if the input value is not a type of real number
"""
if isinstance(value, self.valid_types):
self._byte_value = value * self._unit_value
self._bit_value = self._byte_value * 8.0
else:
raise ValueError("Initialization value '%s' is of an invalid type: %s. "
"Must be one of %s" % (
value,
type(value),
", ".join(str(x) for x in self.valid_types)))
##################################################################
# Properties
#: The mathematical base of an instance
base = property(lambda s: s._base)
binary = property(lambda s: bin(int(s.bits)))
"""The binary representation of an instance in binary 1s and 0s. Note
that for very large numbers this will mean a lot of 1s and 0s. For
example, GiB(100) would be represented as::
0b1100100000000000000000000000000000000000
That leading ``0b`` is normal. That's how Python represents binary.
"""
#: Alias for :attr:`binary`
bin = property(lambda s: s.binary)
#: The number of bits in an instance
bits = property(lambda s: s._bit_value)
#: The number of bytes in an instance
bytes = property(lambda s: s._byte_value)
#: The mathematical power of an instance
power = property(lambda s: s._power)
@property
def system(self):
"""The system of units used to measure an instance"""
if self._base == 2:
return "NIST"
elif self._base == 10:
return "SI"
else:
# I don't expect to ever encounter this logic branch, but
# hey, it's better to have extra test coverage than
# insufficient test coverage.
raise ValueError("Instances mathematical base is an unsupported value: %s" % (
str(self._base)))
@property
def unit(self):
"""The string that is this instances prefix unit name in agreement
with this instance value (singular or plural). Following the
convention that only 1 is singular. This will always be the singular
form when :attr:`bitmath.format_plural` is ``False`` (default value).
For example:
>>> KiB(1).unit == 'KiB'
>>> Byte(0).unit == 'Bytes'
>>> Byte(1).unit == 'Byte'
>>> Byte(1.1).unit == 'Bytes'
>>> Gb(2).unit == 'Gbs'
"""
global format_plural
if self.prefix_value == 1:
# If it's a '1', return it singular, no matter what
return self._name_singular
elif format_plural:
# Pluralization requested
return self._name_plural
else:
# Pluralization NOT requested, and the value is not 1
return self._name_singular
@property
def unit_plural(self):
"""The string that is an instances prefix unit name in the plural
form.
For example:
>>> KiB(1).unit_plural == 'KiB'
>>> Byte(1024).unit_plural == 'Bytes'
>>> Gb(1).unit_plural == 'Gb'
"""
return self._name_plural
@property
def unit_singular(self):
"""The string that is an instances prefix unit name in the singular
form.
For example:
>>> KiB(1).unit_singular == 'KiB'
>>> Byte(1024).unit == 'B'
>>> Gb(1).unit_singular == 'Gb'
"""
return self._name_singular
#: The "prefix" value of an instance
value = property(lambda s: s.prefix_value)
@classmethod
######################################################################
# The following implement the Python datamodel customization methods
#
# Reference: http://docs.python.org/2.7/reference/datamodel.html#basic-customization
def __repr__(self):
"""Representation of this object as you would expect to see in an
interpreter"""
global _FORMAT_REPR
return self.format(_FORMAT_REPR)
def __str__(self):
"""String representation of this object"""
global format_string
return self.format(format_string)
def format(self, fmt):
"""Return a representation of this instance formatted with user
supplied syntax"""
_fmt_params = {
'base': self.base,
'bin': self.bin,
'binary': self.binary,
'bits': self.bits,
'bytes': self.bytes,
'power': self.power,
'system': self.system,
'unit': self.unit,
'unit_plural': self.unit_plural,
'unit_singular': self.unit_singular,
'value': self.value
}
return fmt.format(**_fmt_params)
##################################################################
# Guess the best human-readable prefix unit for representation
##################################################################
def best_prefix(self, system=None):
"""Optional parameter, `system`, allows you to prefer NIST or SI in
the results. By default, the current system is used (Bit/Byte default
to NIST).
Logic discussion/notes:
Base-case, does it need converting?
If the instance is less than one Byte, return the instance as a Bit
instance.
Else, begin by recording the unit system the instance is defined
by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.
If the instance is not already a ``Byte`` instance, convert it to one.
NIST units step up by powers of 1024, SI units step up by powers of
1000.
Take integer value of the log(base=STEP_POWER) of the instance's byte
value. E.g.:
>>> int(math.log(Gb(100).bytes, 1000))
3
This will return a value >= 0. The following determines the 'best
prefix unit' for representation:
* result == 0, best represented as a Byte
* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte
* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
"""
# Use absolute value so we don't return Bit's for *everything*
# less than Byte(1). From github issue #55
if abs(self) < Byte(1):
return Bit.from_other(self)
else:
if type(self) is Byte: # pylint: disable=unidiomatic-typecheck
_inst = self
else:
_inst = Byte.from_other(self)
# Which table to consult? Was a preferred system provided?
if system is None:
# No preference. Use existing system
if self.system == 'NIST':
_STEPS = NIST_PREFIXES
_BASE = 1024
elif self.system == 'SI':
_STEPS = SI_PREFIXES
_BASE = 1000
# Anything else would have raised by now
else:
# Preferred system provided.
if system == NIST:
_STEPS = NIST_PREFIXES
_BASE = 1024
elif system == SI:
_STEPS = SI_PREFIXES
_BASE = 1000
else:
raise ValueError("Invalid value given for 'system' parameter."
" Must be one of NIST or SI")
# Index of the string of the best prefix in the STEPS list
_index = int(math.log(abs(_inst.bytes), _BASE))
# Recall that the log() function returns >= 0. This doesn't
# map to the STEPS list 1:1. That is to say, 0 is handled with
# special care. So if the _index is 1, we actually want item 0
# in the list.
if _index == 0:
# Already a Byte() type, so return it.
return _inst
elif _index >= len(_STEPS):
# This is a really big number. Use the biggest prefix we've got
_best_prefix = _STEPS[-1]
elif 0 < _index < len(_STEPS):
# There is an appropriate prefix unit to represent this
_best_prefix = _STEPS[_index - 1]
_conversion_method = getattr(
self,
'to_%sB' % _best_prefix)
return _conversion_method()
##################################################################
def to_Bit(self):
return Bit(self._bit_value)
def to_Byte(self):
return Byte(self._byte_value / float(NIST_STEPS['Byte']))
# Properties
Bit = property(lambda s: s.to_Bit())
Byte = property(lambda s: s.to_Byte())
##################################################################
def to_KiB(self):
return KiB(bits=self._bit_value)
def to_Kib(self):
return Kib(bits=self._bit_value)
def to_kB(self):
return kB(bits=self._bit_value)
def to_kb(self):
return kb(bits=self._bit_value)
# Properties
KiB = property(lambda s: s.to_KiB())
Kib = property(lambda s: s.to_Kib())
kB = property(lambda s: s.to_kB())
kb = property(lambda s: s.to_kb())
##################################################################
def to_MiB(self):
return MiB(bits=self._bit_value)
def to_Mib(self):
return Mib(bits=self._bit_value)
def to_MB(self):
return MB(bits=self._bit_value)
def to_Mb(self):
return Mb(bits=self._bit_value)
# Properties
MiB = property(lambda s: s.to_MiB())
Mib = property(lambda s: s.to_Mib())
MB = property(lambda s: s.to_MB())
Mb = property(lambda s: s.to_Mb())
##################################################################
def to_GiB(self):
return GiB(bits=self._bit_value)
def to_Gib(self):
return Gib(bits=self._bit_value)
def to_GB(self):
return GB(bits=self._bit_value)
def to_Gb(self):
return Gb(bits=self._bit_value)
# Properties
GiB = property(lambda s: s.to_GiB())
Gib = property(lambda s: s.to_Gib())
GB = property(lambda s: s.to_GB())
Gb = property(lambda s: s.to_Gb())
##################################################################
def to_TiB(self):
return TiB(bits=self._bit_value)
def to_Tib(self):
return Tib(bits=self._bit_value)
def to_TB(self):
return TB(bits=self._bit_value)
def to_Tb(self):
return Tb(bits=self._bit_value)
# Properties
TiB = property(lambda s: s.to_TiB())
Tib = property(lambda s: s.to_Tib())
TB = property(lambda s: s.to_TB())
Tb = property(lambda s: s.to_Tb())
##################################################################
def to_PiB(self):
return PiB(bits=self._bit_value)
def to_Pib(self):
return Pib(bits=self._bit_value)
def to_PB(self):
return PB(bits=self._bit_value)
def to_Pb(self):
return Pb(bits=self._bit_value)
# Properties
PiB = property(lambda s: s.to_PiB())
Pib = property(lambda s: s.to_Pib())
PB = property(lambda s: s.to_PB())
Pb = property(lambda s: s.to_Pb())
##################################################################
def to_EiB(self):
return EiB(bits=self._bit_value)
def to_Eib(self):
return Eib(bits=self._bit_value)
def to_EB(self):
return EB(bits=self._bit_value)
def to_Eb(self):
return Eb(bits=self._bit_value)
# Properties
EiB = property(lambda s: s.to_EiB())
Eib = property(lambda s: s.to_Eib())
EB = property(lambda s: s.to_EB())
Eb = property(lambda s: s.to_Eb())
##################################################################
# The SI units go beyond the NIST units. They also have the Zetta
# and Yotta prefixes.
def to_ZB(self):
return ZB(bits=self._bit_value)
def to_Zb(self):
return Zb(bits=self._bit_value)
# Properties
ZB = property(lambda s: s.to_ZB())
Zb = property(lambda s: s.to_Zb())
##################################################################
def to_YB(self):
return YB(bits=self._bit_value)
def to_Yb(self):
return Yb(bits=self._bit_value)
#: A new object representing this instance as a Yottabyte
YB = property(lambda s: s.to_YB())
Yb = property(lambda s: s.to_Yb())
##################################################################
# Rich comparison operations
##################################################################
def __lt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value < other
else:
return self._byte_value < other.bytes
def __le__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value <= other
else:
return self._byte_value <= other.bytes
def __eq__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value == other
else:
return self._byte_value == other.bytes
def __ne__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value != other
else:
return self._byte_value != other.bytes
def __gt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value > other
else:
return self._byte_value > other.bytes
def __ge__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value >= other
else:
return self._byte_value >= other.bytes
##################################################################
# Basic math operations
##################################################################
# Reference: http://docs.python.org/2.7/reference/datamodel.html#emulating-numeric-types
"""These methods are called to implement the binary arithmetic
operations (+, -, *, //, %, divmod(), pow(), **, <<, >>, &, ^, |). For
instance, to evaluate the expression x + y, where x is an instance of
a class that has an __add__() method, x.__add__(y) is called. The
__divmod__() method should be the equivalent to using __floordiv__()
and __mod__(); it should not be related to __truediv__() (described
below). Note that __pow__() should be defined to accept an optional
third argument if the ternary version of the built-in pow() function
is to be supported.object.__complex__(self)
"""
def __add__(self, other):
"""Supported operations with result types:
- bm + bm = bm
- bm + num = num
- num + bm = num (see radd)
"""
if isinstance(other, numbers.Number):
# bm + num
return other + self.value
else:
# bm + bm
total_bytes = self._byte_value + other.bytes
return (type(self))(bytes=total_bytes)
def __sub__(self, other):
"""Subtraction: Supported operations with result types:
- bm - bm = bm
- bm - num = num
- num - bm = num (see rsub)
"""
if isinstance(other, numbers.Number):
# bm - num
return self.value - other
else:
# bm - bm
total_bytes = self._byte_value - other.bytes
return (type(self))(bytes=total_bytes)
def __mul__(self, other):
"""Multiplication: Supported operations with result types:
- bm1 * bm2 = bm1
- bm * num = bm
- num * bm = num (see rmul)
"""
if isinstance(other, numbers.Number):
# bm * num
result = self._byte_value * other
return (type(self))(bytes=result)
else:
# bm1 * bm2
_other = other.value * other.base ** other.power
_self = self.prefix_value * self._base ** self._power
return (type(self))(bytes=_other * _self)
"""The division operator (/) is implemented by these methods. The
__truediv__() method is used when __future__.division is in effect,
otherwise __div__() is used. If only one of these two methods is
defined, the object will not support division in the alternate
context; TypeError will be raised instead."""
def __div__(self, other):
"""Division: Supported operations with result types:
- bm1 / bm2 = num
- bm / num = bm
- num / bm = num (see rdiv)
"""
if isinstance(other, numbers.Number):
# bm / num
result = self._byte_value / other
return (type(self))(bytes=result)
else:
# bm1 / bm2
return self._byte_value / float(other.bytes)
def __truediv__(self, other):
# num / bm
return self.__div__(other)
# def __floordiv__(self, other):
# return NotImplemented
# def __mod__(self, other):
# return NotImplemented
# def __divmod__(self, other):
# return NotImplemented
# def __pow__(self, other, modulo=None):
# return NotImplemented
##################################################################
"""These methods are called to implement the binary arithmetic
operations (+, -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with
reflected (swapped) operands. These functions are only called if the
left operand does not support the corresponding operation and the
operands are of different types. [2] For instance, to evaluate the
expression x - y, where y is an instance of a class that has an
__rsub__() method, y.__rsub__(x) is called if x.__sub__(y) returns
NotImplemented.
These are the add/sub/mul/div methods for syntax where a number type
is given for the LTYPE and a bitmath object is given for the
RTYPE. E.g., 3 * MiB(3), or 10 / GB(42)
"""
def __radd__(self, other):
# num + bm = num
return other + self.value
def __rsub__(self, other):
# num - bm = num
return other - self.value
def __rmul__(self, other):
# num * bm = bm
return self * other
def __rdiv__(self, other):
# num / bm = num
return other / float(self.value)
def __rtruediv__(self, other):
# num / bm = num
return other / float(self.value)
"""Called to implement the built-in functions complex(), int(),
long(), and float(). Should return a value of the appropriate type.
If one of those methods does not support the operation with the
supplied arguments, it should return NotImplemented.
For bitmath purposes, these methods return the int/long/float
equivalent of the this instances prefix Unix value. That is to say:
- int(KiB(3.336)) would return 3
- long(KiB(3.336)) would return 3L
- float(KiB(3.336)) would return 3.336
"""
def __int__(self):
"""Return this instances prefix unit as an integer"""
return int(self.prefix_value)
def __long__(self):
"""Return this instances prefix unit as a long integer"""
return long(self.prefix_value) # pragma: PY3X no cover
def __float__(self):
"""Return this instances prefix unit as a floating point number"""
return float(self.prefix_value)
##################################################################
# Bitwise operations
##################################################################
def __lshift__(self, other):
"""Left shift, ex: 100 << 2
A left shift by n bits is equivalent to multiplication by pow(2,
n). A long integer is returned if the result exceeds the range of
plain integers."""
shifted = int(self.bits) << other
return type(self)(bits=shifted)
def __rshift__(self, other):
"""Right shift, ex: 100 >> 2
A right shift by n bits is equivalent to division by pow(2, n)."""
shifted = int(self.bits) >> other
return type(self)(bits=shifted)
def __and__(self, other):
""""Bitwise and, ex: 100 & 2
bitwise and". Each bit of the output is 1 if the corresponding bit
of x AND of y is 1, otherwise it's 0."""
andd = int(self.bits) & other
return type(self)(bits=andd)
def __xor__(self, other):
"""Bitwise xor, ex: 100 ^ 2
Does a "bitwise exclusive or". Each bit of the output is the same
as the corresponding bit in x if that bit in y is 0, and it's the
complement of the bit in x if that bit in y is 1."""
xord = int(self.bits) ^ other
return type(self)(bits=xord)
def __or__(self, other):
"""Bitwise or, ex: 100 | 2
Does a "bitwise or". Each bit of the output is 0 if the corresponding
bit of x AND of y is 0, otherwise it's 1."""
ord = int(self.bits) | other
return type(self)(bits=ord)
##################################################################
def __neg__(self):
"""The negative version of this instance"""
return (type(self))(-abs(self.prefix_value))
def __pos__(self):
return (type(self))(abs(self.prefix_value))
def __abs__(self):
return (type(self))(abs(self.prefix_value))
|
tbielawa/bitmath
|
bitmath/__init__.py
|
Bitmath.format
|
python
|
def format(self, fmt):
_fmt_params = {
'base': self.base,
'bin': self.bin,
'binary': self.binary,
'bits': self.bits,
'bytes': self.bytes,
'power': self.power,
'system': self.system,
'unit': self.unit,
'unit_plural': self.unit_plural,
'unit_singular': self.unit_singular,
'value': self.value
}
return fmt.format(**_fmt_params)
|
Return a representation of this instance formatted with user
supplied syntax
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L416-L433
| null |
class Bitmath(object):
"""The base class for all the other prefix classes"""
# All the allowed input types
valid_types = (int, float, long)
def __init__(self, value=0, bytes=None, bits=None):
"""Instantiate with `value` by the unit, in plain bytes, or
bits. Don't supply more than one keyword.
default behavior: initialize with value of 0
only setting value: assert bytes is None and bits is None
only setting bytes: assert value == 0 and bits is None
only setting bits: assert value == 0 and bytes is None
"""
_raise = False
if (value == 0) and (bytes is None) and (bits is None):
pass
# Setting by bytes
elif bytes is not None:
if (value == 0) and (bits is None):
pass
else:
_raise = True
# setting by bits
elif bits is not None:
if (value == 0) and (bytes is None):
pass
else:
_raise = True
if _raise:
raise ValueError("Only one parameter of: value, bytes, or bits is allowed")
self._do_setup()
if bytes:
# We were provided with the fundamental base unit, no need
# to normalize
self._byte_value = bytes
self._bit_value = bytes * 8.0
elif bits:
# We were *ALMOST* given the fundamental base
# unit. Translate it into the fundamental unit then
# normalize.
self._byte_value = bits / 8.0
self._bit_value = bits
else:
# We were given a value representative of this *prefix
# unit*. We need to normalize it into the number of bytes
# it represents.
self._norm(value)
# We have the fundamental unit figured out. Set the 'pretty' unit
self._set_prefix_value()
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._byte_value)
def _to_prefix_value(self, value):
"""Return the number of bits/bytes as they would look like if we
converted *to* this unit"""
return value / float(self._unit_value)
def _setup(self):
raise NotImplementedError("The base 'bitmath.Bitmath' class can not be used directly")
def _do_setup(self):
"""Setup basic parameters for this class.
`base` is the numeric base which when raised to `power` is equivalent
to 1 unit of the corresponding prefix. I.e., base=2, power=10
represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte.
Likewise, for the SI prefix classes `base` will be 10, and the `power`
for the Kilobyte is 3.
"""
(self._base, self._power, self._name_singular, self._name_plural) = self._setup()
self._unit_value = self._base ** self._power
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type.
:param number value: The input value to be normalized
:raises ValueError: if the input value is not a type of real number
"""
if isinstance(value, self.valid_types):
self._byte_value = value * self._unit_value
self._bit_value = self._byte_value * 8.0
else:
raise ValueError("Initialization value '%s' is of an invalid type: %s. "
"Must be one of %s" % (
value,
type(value),
", ".join(str(x) for x in self.valid_types)))
##################################################################
# Properties
#: The mathematical base of an instance
base = property(lambda s: s._base)
binary = property(lambda s: bin(int(s.bits)))
"""The binary representation of an instance in binary 1s and 0s. Note
that for very large numbers this will mean a lot of 1s and 0s. For
example, GiB(100) would be represented as::
0b1100100000000000000000000000000000000000
That leading ``0b`` is normal. That's how Python represents binary.
"""
#: Alias for :attr:`binary`
bin = property(lambda s: s.binary)
#: The number of bits in an instance
bits = property(lambda s: s._bit_value)
#: The number of bytes in an instance
bytes = property(lambda s: s._byte_value)
#: The mathematical power of an instance
power = property(lambda s: s._power)
@property
def system(self):
"""The system of units used to measure an instance"""
if self._base == 2:
return "NIST"
elif self._base == 10:
return "SI"
else:
# I don't expect to ever encounter this logic branch, but
# hey, it's better to have extra test coverage than
# insufficient test coverage.
raise ValueError("Instances mathematical base is an unsupported value: %s" % (
str(self._base)))
@property
def unit(self):
"""The string that is this instances prefix unit name in agreement
with this instance value (singular or plural). Following the
convention that only 1 is singular. This will always be the singular
form when :attr:`bitmath.format_plural` is ``False`` (default value).
For example:
>>> KiB(1).unit == 'KiB'
>>> Byte(0).unit == 'Bytes'
>>> Byte(1).unit == 'Byte'
>>> Byte(1.1).unit == 'Bytes'
>>> Gb(2).unit == 'Gbs'
"""
global format_plural
if self.prefix_value == 1:
# If it's a '1', return it singular, no matter what
return self._name_singular
elif format_plural:
# Pluralization requested
return self._name_plural
else:
# Pluralization NOT requested, and the value is not 1
return self._name_singular
@property
def unit_plural(self):
"""The string that is an instances prefix unit name in the plural
form.
For example:
>>> KiB(1).unit_plural == 'KiB'
>>> Byte(1024).unit_plural == 'Bytes'
>>> Gb(1).unit_plural == 'Gb'
"""
return self._name_plural
@property
def unit_singular(self):
"""The string that is an instances prefix unit name in the singular
form.
For example:
>>> KiB(1).unit_singular == 'KiB'
>>> Byte(1024).unit == 'B'
>>> Gb(1).unit_singular == 'Gb'
"""
return self._name_singular
#: The "prefix" value of an instance
value = property(lambda s: s.prefix_value)
@classmethod
def from_other(cls, item):
"""Factory function to return instances of `item` converted into a new
instance of ``cls``. Because this is a class method, it may be called
from any bitmath class object without the need to explicitly
instantiate the class ahead of time.
*Implicit Parameter:*
* ``cls`` A bitmath class, implicitly set to the class of the
instance object it is called on
*User Supplied Parameter:*
* ``item`` A :class:`bitmath.Bitmath` subclass instance
*Example:*
>>> import bitmath
>>> kib = bitmath.KiB.from_other(bitmath.MiB(1))
>>> print kib
KiB(1024.0)
"""
if isinstance(item, Bitmath):
return cls(bits=item.bits)
else:
raise ValueError("The provided items must be a valid bitmath class: %s" %
str(item.__class__))
######################################################################
# The following implement the Python datamodel customization methods
#
# Reference: http://docs.python.org/2.7/reference/datamodel.html#basic-customization
def __repr__(self):
"""Representation of this object as you would expect to see in an
interpreter"""
global _FORMAT_REPR
return self.format(_FORMAT_REPR)
def __str__(self):
"""String representation of this object"""
global format_string
return self.format(format_string)
##################################################################
# Guess the best human-readable prefix unit for representation
##################################################################
def best_prefix(self, system=None):
"""Optional parameter, `system`, allows you to prefer NIST or SI in
the results. By default, the current system is used (Bit/Byte default
to NIST).
Logic discussion/notes:
Base-case, does it need converting?
If the instance is less than one Byte, return the instance as a Bit
instance.
Else, begin by recording the unit system the instance is defined
by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.
If the instance is not already a ``Byte`` instance, convert it to one.
NIST units step up by powers of 1024, SI units step up by powers of
1000.
Take integer value of the log(base=STEP_POWER) of the instance's byte
value. E.g.:
>>> int(math.log(Gb(100).bytes, 1000))
3
This will return a value >= 0. The following determines the 'best
prefix unit' for representation:
* result == 0, best represented as a Byte
* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte
* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
"""
# Use absolute value so we don't return Bit's for *everything*
# less than Byte(1). From github issue #55
if abs(self) < Byte(1):
return Bit.from_other(self)
else:
if type(self) is Byte: # pylint: disable=unidiomatic-typecheck
_inst = self
else:
_inst = Byte.from_other(self)
# Which table to consult? Was a preferred system provided?
if system is None:
# No preference. Use existing system
if self.system == 'NIST':
_STEPS = NIST_PREFIXES
_BASE = 1024
elif self.system == 'SI':
_STEPS = SI_PREFIXES
_BASE = 1000
# Anything else would have raised by now
else:
# Preferred system provided.
if system == NIST:
_STEPS = NIST_PREFIXES
_BASE = 1024
elif system == SI:
_STEPS = SI_PREFIXES
_BASE = 1000
else:
raise ValueError("Invalid value given for 'system' parameter."
" Must be one of NIST or SI")
# Index of the string of the best prefix in the STEPS list
_index = int(math.log(abs(_inst.bytes), _BASE))
# Recall that the log() function returns >= 0. This doesn't
# map to the STEPS list 1:1. That is to say, 0 is handled with
# special care. So if the _index is 1, we actually want item 0
# in the list.
if _index == 0:
# Already a Byte() type, so return it.
return _inst
elif _index >= len(_STEPS):
# This is a really big number. Use the biggest prefix we've got
_best_prefix = _STEPS[-1]
elif 0 < _index < len(_STEPS):
# There is an appropriate prefix unit to represent this
_best_prefix = _STEPS[_index - 1]
_conversion_method = getattr(
self,
'to_%sB' % _best_prefix)
return _conversion_method()
##################################################################
def to_Bit(self):
return Bit(self._bit_value)
def to_Byte(self):
return Byte(self._byte_value / float(NIST_STEPS['Byte']))
# Properties
Bit = property(lambda s: s.to_Bit())
Byte = property(lambda s: s.to_Byte())
##################################################################
def to_KiB(self):
return KiB(bits=self._bit_value)
def to_Kib(self):
return Kib(bits=self._bit_value)
def to_kB(self):
return kB(bits=self._bit_value)
def to_kb(self):
return kb(bits=self._bit_value)
# Properties
KiB = property(lambda s: s.to_KiB())
Kib = property(lambda s: s.to_Kib())
kB = property(lambda s: s.to_kB())
kb = property(lambda s: s.to_kb())
##################################################################
def to_MiB(self):
return MiB(bits=self._bit_value)
def to_Mib(self):
return Mib(bits=self._bit_value)
def to_MB(self):
return MB(bits=self._bit_value)
def to_Mb(self):
return Mb(bits=self._bit_value)
# Properties
MiB = property(lambda s: s.to_MiB())
Mib = property(lambda s: s.to_Mib())
MB = property(lambda s: s.to_MB())
Mb = property(lambda s: s.to_Mb())
##################################################################
def to_GiB(self):
return GiB(bits=self._bit_value)
def to_Gib(self):
return Gib(bits=self._bit_value)
def to_GB(self):
return GB(bits=self._bit_value)
def to_Gb(self):
return Gb(bits=self._bit_value)
# Properties
GiB = property(lambda s: s.to_GiB())
Gib = property(lambda s: s.to_Gib())
GB = property(lambda s: s.to_GB())
Gb = property(lambda s: s.to_Gb())
##################################################################
def to_TiB(self):
return TiB(bits=self._bit_value)
def to_Tib(self):
return Tib(bits=self._bit_value)
def to_TB(self):
return TB(bits=self._bit_value)
def to_Tb(self):
return Tb(bits=self._bit_value)
# Properties
TiB = property(lambda s: s.to_TiB())
Tib = property(lambda s: s.to_Tib())
TB = property(lambda s: s.to_TB())
Tb = property(lambda s: s.to_Tb())
##################################################################
def to_PiB(self):
return PiB(bits=self._bit_value)
def to_Pib(self):
return Pib(bits=self._bit_value)
def to_PB(self):
return PB(bits=self._bit_value)
def to_Pb(self):
return Pb(bits=self._bit_value)
# Properties
PiB = property(lambda s: s.to_PiB())
Pib = property(lambda s: s.to_Pib())
PB = property(lambda s: s.to_PB())
Pb = property(lambda s: s.to_Pb())
##################################################################
def to_EiB(self):
return EiB(bits=self._bit_value)
def to_Eib(self):
return Eib(bits=self._bit_value)
def to_EB(self):
return EB(bits=self._bit_value)
def to_Eb(self):
return Eb(bits=self._bit_value)
# Properties
EiB = property(lambda s: s.to_EiB())
Eib = property(lambda s: s.to_Eib())
EB = property(lambda s: s.to_EB())
Eb = property(lambda s: s.to_Eb())
##################################################################
# The SI units go beyond the NIST units. They also have the Zetta
# and Yotta prefixes.
def to_ZB(self):
return ZB(bits=self._bit_value)
def to_Zb(self):
return Zb(bits=self._bit_value)
# Properties
ZB = property(lambda s: s.to_ZB())
Zb = property(lambda s: s.to_Zb())
##################################################################
def to_YB(self):
return YB(bits=self._bit_value)
def to_Yb(self):
return Yb(bits=self._bit_value)
#: A new object representing this instance as a Yottabyte
YB = property(lambda s: s.to_YB())
Yb = property(lambda s: s.to_Yb())
##################################################################
# Rich comparison operations
##################################################################
def __lt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value < other
else:
return self._byte_value < other.bytes
def __le__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value <= other
else:
return self._byte_value <= other.bytes
def __eq__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value == other
else:
return self._byte_value == other.bytes
def __ne__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value != other
else:
return self._byte_value != other.bytes
def __gt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value > other
else:
return self._byte_value > other.bytes
def __ge__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value >= other
else:
return self._byte_value >= other.bytes
##################################################################
# Basic math operations
##################################################################
# Reference: http://docs.python.org/2.7/reference/datamodel.html#emulating-numeric-types
"""These methods are called to implement the binary arithmetic
operations (+, -, *, //, %, divmod(), pow(), **, <<, >>, &, ^, |). For
instance, to evaluate the expression x + y, where x is an instance of
a class that has an __add__() method, x.__add__(y) is called. The
__divmod__() method should be the equivalent to using __floordiv__()
and __mod__(); it should not be related to __truediv__() (described
below). Note that __pow__() should be defined to accept an optional
third argument if the ternary version of the built-in pow() function
is to be supported.object.__complex__(self)
"""
def __add__(self, other):
"""Supported operations with result types:
- bm + bm = bm
- bm + num = num
- num + bm = num (see radd)
"""
if isinstance(other, numbers.Number):
# bm + num
return other + self.value
else:
# bm + bm
total_bytes = self._byte_value + other.bytes
return (type(self))(bytes=total_bytes)
def __sub__(self, other):
"""Subtraction: Supported operations with result types:
- bm - bm = bm
- bm - num = num
- num - bm = num (see rsub)
"""
if isinstance(other, numbers.Number):
# bm - num
return self.value - other
else:
# bm - bm
total_bytes = self._byte_value - other.bytes
return (type(self))(bytes=total_bytes)
def __mul__(self, other):
"""Multiplication: Supported operations with result types:
- bm1 * bm2 = bm1
- bm * num = bm
- num * bm = num (see rmul)
"""
if isinstance(other, numbers.Number):
# bm * num
result = self._byte_value * other
return (type(self))(bytes=result)
else:
# bm1 * bm2
_other = other.value * other.base ** other.power
_self = self.prefix_value * self._base ** self._power
return (type(self))(bytes=_other * _self)
"""The division operator (/) is implemented by these methods. The
__truediv__() method is used when __future__.division is in effect,
otherwise __div__() is used. If only one of these two methods is
defined, the object will not support division in the alternate
context; TypeError will be raised instead."""
def __div__(self, other):
"""Division: Supported operations with result types:
- bm1 / bm2 = num
- bm / num = bm
- num / bm = num (see rdiv)
"""
if isinstance(other, numbers.Number):
# bm / num
result = self._byte_value / other
return (type(self))(bytes=result)
else:
# bm1 / bm2
return self._byte_value / float(other.bytes)
def __truediv__(self, other):
# num / bm
return self.__div__(other)
# def __floordiv__(self, other):
# return NotImplemented
# def __mod__(self, other):
# return NotImplemented
# def __divmod__(self, other):
# return NotImplemented
# def __pow__(self, other, modulo=None):
# return NotImplemented
##################################################################
"""These methods are called to implement the binary arithmetic
operations (+, -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with
reflected (swapped) operands. These functions are only called if the
left operand does not support the corresponding operation and the
operands are of different types. [2] For instance, to evaluate the
expression x - y, where y is an instance of a class that has an
__rsub__() method, y.__rsub__(x) is called if x.__sub__(y) returns
NotImplemented.
These are the add/sub/mul/div methods for syntax where a number type
is given for the LTYPE and a bitmath object is given for the
RTYPE. E.g., 3 * MiB(3), or 10 / GB(42)
"""
def __radd__(self, other):
# num + bm = num
return other + self.value
def __rsub__(self, other):
# num - bm = num
return other - self.value
def __rmul__(self, other):
# num * bm = bm
return self * other
def __rdiv__(self, other):
# num / bm = num
return other / float(self.value)
def __rtruediv__(self, other):
# num / bm = num
return other / float(self.value)
"""Called to implement the built-in functions complex(), int(),
long(), and float(). Should return a value of the appropriate type.
If one of those methods does not support the operation with the
supplied arguments, it should return NotImplemented.
For bitmath purposes, these methods return the int/long/float
equivalent of the this instances prefix Unix value. That is to say:
- int(KiB(3.336)) would return 3
- long(KiB(3.336)) would return 3L
- float(KiB(3.336)) would return 3.336
"""
def __int__(self):
"""Return this instances prefix unit as an integer"""
return int(self.prefix_value)
def __long__(self):
"""Return this instances prefix unit as a long integer"""
return long(self.prefix_value) # pragma: PY3X no cover
def __float__(self):
"""Return this instances prefix unit as a floating point number"""
return float(self.prefix_value)
##################################################################
# Bitwise operations
##################################################################
def __lshift__(self, other):
"""Left shift, ex: 100 << 2
A left shift by n bits is equivalent to multiplication by pow(2,
n). A long integer is returned if the result exceeds the range of
plain integers."""
shifted = int(self.bits) << other
return type(self)(bits=shifted)
def __rshift__(self, other):
"""Right shift, ex: 100 >> 2
A right shift by n bits is equivalent to division by pow(2, n)."""
shifted = int(self.bits) >> other
return type(self)(bits=shifted)
def __and__(self, other):
""""Bitwise and, ex: 100 & 2
bitwise and". Each bit of the output is 1 if the corresponding bit
of x AND of y is 1, otherwise it's 0."""
andd = int(self.bits) & other
return type(self)(bits=andd)
def __xor__(self, other):
"""Bitwise xor, ex: 100 ^ 2
Does a "bitwise exclusive or". Each bit of the output is the same
as the corresponding bit in x if that bit in y is 0, and it's the
complement of the bit in x if that bit in y is 1."""
xord = int(self.bits) ^ other
return type(self)(bits=xord)
def __or__(self, other):
"""Bitwise or, ex: 100 | 2
Does a "bitwise or". Each bit of the output is 0 if the corresponding
bit of x AND of y is 0, otherwise it's 1."""
ord = int(self.bits) | other
return type(self)(bits=ord)
##################################################################
def __neg__(self):
"""The negative version of this instance"""
return (type(self))(-abs(self.prefix_value))
def __pos__(self):
return (type(self))(abs(self.prefix_value))
def __abs__(self):
return (type(self))(abs(self.prefix_value))
|
tbielawa/bitmath
|
bitmath/__init__.py
|
Bitmath.best_prefix
|
python
|
def best_prefix(self, system=None):
# Use absolute value so we don't return Bit's for *everything*
# less than Byte(1). From github issue #55
if abs(self) < Byte(1):
return Bit.from_other(self)
else:
if type(self) is Byte: # pylint: disable=unidiomatic-typecheck
_inst = self
else:
_inst = Byte.from_other(self)
# Which table to consult? Was a preferred system provided?
if system is None:
# No preference. Use existing system
if self.system == 'NIST':
_STEPS = NIST_PREFIXES
_BASE = 1024
elif self.system == 'SI':
_STEPS = SI_PREFIXES
_BASE = 1000
# Anything else would have raised by now
else:
# Preferred system provided.
if system == NIST:
_STEPS = NIST_PREFIXES
_BASE = 1024
elif system == SI:
_STEPS = SI_PREFIXES
_BASE = 1000
else:
raise ValueError("Invalid value given for 'system' parameter."
" Must be one of NIST or SI")
# Index of the string of the best prefix in the STEPS list
_index = int(math.log(abs(_inst.bytes), _BASE))
# Recall that the log() function returns >= 0. This doesn't
# map to the STEPS list 1:1. That is to say, 0 is handled with
# special care. So if the _index is 1, we actually want item 0
# in the list.
if _index == 0:
# Already a Byte() type, so return it.
return _inst
elif _index >= len(_STEPS):
# This is a really big number. Use the biggest prefix we've got
_best_prefix = _STEPS[-1]
elif 0 < _index < len(_STEPS):
# There is an appropriate prefix unit to represent this
_best_prefix = _STEPS[_index - 1]
_conversion_method = getattr(
self,
'to_%sB' % _best_prefix)
return _conversion_method()
|
Optional parameter, `system`, allows you to prefer NIST or SI in
the results. By default, the current system is used (Bit/Byte default
to NIST).
Logic discussion/notes:
Base-case, does it need converting?
If the instance is less than one Byte, return the instance as a Bit
instance.
Else, begin by recording the unit system the instance is defined
by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.
If the instance is not already a ``Byte`` instance, convert it to one.
NIST units step up by powers of 1024, SI units step up by powers of
1000.
Take integer value of the log(base=STEP_POWER) of the instance's byte
value. E.g.:
>>> int(math.log(Gb(100).bytes, 1000))
3
This will return a value >= 0. The following determines the 'best
prefix unit' for representation:
* result == 0, best represented as a Byte
* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte
* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L439-L528
|
[
" def from_other(cls, item):\n \"\"\"Factory function to return instances of `item` converted into a new\ninstance of ``cls``. Because this is a class method, it may be called\nfrom any bitmath class object without the need to explicitly\ninstantiate the class ahead of time.\n\n*Implicit Parameter:*\n\n* ``cls`` A bitmath class, implicitly set to the class of the\n instance object it is called on\n\n*User Supplied Parameter:*\n\n* ``item`` A :class:`bitmath.Bitmath` subclass instance\n\n*Example:*\n\n >>> import bitmath\n >>> kib = bitmath.KiB.from_other(bitmath.MiB(1))\n >>> print kib\n KiB(1024.0)\n\n \"\"\"\n if isinstance(item, Bitmath):\n return cls(bits=item.bits)\n else:\n raise ValueError(\"The provided items must be a valid bitmath class: %s\" %\n str(item.__class__))\n"
] |
class Bitmath(object):
"""The base class for all the other prefix classes"""
# All the allowed input types
valid_types = (int, float, long)
def __init__(self, value=0, bytes=None, bits=None):
"""Instantiate with `value` by the unit, in plain bytes, or
bits. Don't supply more than one keyword.
default behavior: initialize with value of 0
only setting value: assert bytes is None and bits is None
only setting bytes: assert value == 0 and bits is None
only setting bits: assert value == 0 and bytes is None
"""
_raise = False
if (value == 0) and (bytes is None) and (bits is None):
pass
# Setting by bytes
elif bytes is not None:
if (value == 0) and (bits is None):
pass
else:
_raise = True
# setting by bits
elif bits is not None:
if (value == 0) and (bytes is None):
pass
else:
_raise = True
if _raise:
raise ValueError("Only one parameter of: value, bytes, or bits is allowed")
self._do_setup()
if bytes:
# We were provided with the fundamental base unit, no need
# to normalize
self._byte_value = bytes
self._bit_value = bytes * 8.0
elif bits:
# We were *ALMOST* given the fundamental base
# unit. Translate it into the fundamental unit then
# normalize.
self._byte_value = bits / 8.0
self._bit_value = bits
else:
# We were given a value representative of this *prefix
# unit*. We need to normalize it into the number of bytes
# it represents.
self._norm(value)
# We have the fundamental unit figured out. Set the 'pretty' unit
self._set_prefix_value()
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._byte_value)
def _to_prefix_value(self, value):
"""Return the number of bits/bytes as they would look like if we
converted *to* this unit"""
return value / float(self._unit_value)
def _setup(self):
raise NotImplementedError("The base 'bitmath.Bitmath' class can not be used directly")
def _do_setup(self):
"""Setup basic parameters for this class.
`base` is the numeric base which when raised to `power` is equivalent
to 1 unit of the corresponding prefix. I.e., base=2, power=10
represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte.
Likewise, for the SI prefix classes `base` will be 10, and the `power`
for the Kilobyte is 3.
"""
(self._base, self._power, self._name_singular, self._name_plural) = self._setup()
self._unit_value = self._base ** self._power
def _norm(self, value):
"""Normalize the input value into the fundamental unit for this prefix
type.
:param number value: The input value to be normalized
:raises ValueError: if the input value is not a type of real number
"""
if isinstance(value, self.valid_types):
self._byte_value = value * self._unit_value
self._bit_value = self._byte_value * 8.0
else:
raise ValueError("Initialization value '%s' is of an invalid type: %s. "
"Must be one of %s" % (
value,
type(value),
", ".join(str(x) for x in self.valid_types)))
##################################################################
# Properties
#: The mathematical base of an instance
base = property(lambda s: s._base)
binary = property(lambda s: bin(int(s.bits)))
"""The binary representation of an instance in binary 1s and 0s. Note
that for very large numbers this will mean a lot of 1s and 0s. For
example, GiB(100) would be represented as::
0b1100100000000000000000000000000000000000
That leading ``0b`` is normal. That's how Python represents binary.
"""
#: Alias for :attr:`binary`
bin = property(lambda s: s.binary)
#: The number of bits in an instance
bits = property(lambda s: s._bit_value)
#: The number of bytes in an instance
bytes = property(lambda s: s._byte_value)
#: The mathematical power of an instance
power = property(lambda s: s._power)
@property
def system(self):
"""The system of units used to measure an instance"""
if self._base == 2:
return "NIST"
elif self._base == 10:
return "SI"
else:
# I don't expect to ever encounter this logic branch, but
# hey, it's better to have extra test coverage than
# insufficient test coverage.
raise ValueError("Instances mathematical base is an unsupported value: %s" % (
str(self._base)))
@property
def unit(self):
"""The string that is this instances prefix unit name in agreement
with this instance value (singular or plural). Following the
convention that only 1 is singular. This will always be the singular
form when :attr:`bitmath.format_plural` is ``False`` (default value).
For example:
>>> KiB(1).unit == 'KiB'
>>> Byte(0).unit == 'Bytes'
>>> Byte(1).unit == 'Byte'
>>> Byte(1.1).unit == 'Bytes'
>>> Gb(2).unit == 'Gbs'
"""
global format_plural
if self.prefix_value == 1:
# If it's a '1', return it singular, no matter what
return self._name_singular
elif format_plural:
# Pluralization requested
return self._name_plural
else:
# Pluralization NOT requested, and the value is not 1
return self._name_singular
@property
def unit_plural(self):
"""The string that is an instances prefix unit name in the plural
form.
For example:
>>> KiB(1).unit_plural == 'KiB'
>>> Byte(1024).unit_plural == 'Bytes'
>>> Gb(1).unit_plural == 'Gb'
"""
return self._name_plural
@property
def unit_singular(self):
"""The string that is an instances prefix unit name in the singular
form.
For example:
>>> KiB(1).unit_singular == 'KiB'
>>> Byte(1024).unit == 'B'
>>> Gb(1).unit_singular == 'Gb'
"""
return self._name_singular
#: The "prefix" value of an instance
value = property(lambda s: s.prefix_value)
@classmethod
def from_other(cls, item):
"""Factory function to return instances of `item` converted into a new
instance of ``cls``. Because this is a class method, it may be called
from any bitmath class object without the need to explicitly
instantiate the class ahead of time.
*Implicit Parameter:*
* ``cls`` A bitmath class, implicitly set to the class of the
instance object it is called on
*User Supplied Parameter:*
* ``item`` A :class:`bitmath.Bitmath` subclass instance
*Example:*
>>> import bitmath
>>> kib = bitmath.KiB.from_other(bitmath.MiB(1))
>>> print kib
KiB(1024.0)
"""
if isinstance(item, Bitmath):
return cls(bits=item.bits)
else:
raise ValueError("The provided items must be a valid bitmath class: %s" %
str(item.__class__))
######################################################################
# The following implement the Python datamodel customization methods
#
# Reference: http://docs.python.org/2.7/reference/datamodel.html#basic-customization
def __repr__(self):
"""Representation of this object as you would expect to see in an
interpreter"""
global _FORMAT_REPR
return self.format(_FORMAT_REPR)
def __str__(self):
"""String representation of this object"""
global format_string
return self.format(format_string)
def format(self, fmt):
"""Return a representation of this instance formatted with user
supplied syntax"""
_fmt_params = {
'base': self.base,
'bin': self.bin,
'binary': self.binary,
'bits': self.bits,
'bytes': self.bytes,
'power': self.power,
'system': self.system,
'unit': self.unit,
'unit_plural': self.unit_plural,
'unit_singular': self.unit_singular,
'value': self.value
}
return fmt.format(**_fmt_params)
##################################################################
# Guess the best human-readable prefix unit for representation
##################################################################
##################################################################
def to_Bit(self):
return Bit(self._bit_value)
def to_Byte(self):
return Byte(self._byte_value / float(NIST_STEPS['Byte']))
# Properties
Bit = property(lambda s: s.to_Bit())
Byte = property(lambda s: s.to_Byte())
##################################################################
def to_KiB(self):
return KiB(bits=self._bit_value)
def to_Kib(self):
return Kib(bits=self._bit_value)
def to_kB(self):
return kB(bits=self._bit_value)
def to_kb(self):
return kb(bits=self._bit_value)
# Properties
KiB = property(lambda s: s.to_KiB())
Kib = property(lambda s: s.to_Kib())
kB = property(lambda s: s.to_kB())
kb = property(lambda s: s.to_kb())
##################################################################
def to_MiB(self):
return MiB(bits=self._bit_value)
def to_Mib(self):
return Mib(bits=self._bit_value)
def to_MB(self):
return MB(bits=self._bit_value)
def to_Mb(self):
return Mb(bits=self._bit_value)
# Properties
MiB = property(lambda s: s.to_MiB())
Mib = property(lambda s: s.to_Mib())
MB = property(lambda s: s.to_MB())
Mb = property(lambda s: s.to_Mb())
##################################################################
def to_GiB(self):
return GiB(bits=self._bit_value)
def to_Gib(self):
return Gib(bits=self._bit_value)
def to_GB(self):
return GB(bits=self._bit_value)
def to_Gb(self):
return Gb(bits=self._bit_value)
# Properties
GiB = property(lambda s: s.to_GiB())
Gib = property(lambda s: s.to_Gib())
GB = property(lambda s: s.to_GB())
Gb = property(lambda s: s.to_Gb())
##################################################################
def to_TiB(self):
return TiB(bits=self._bit_value)
def to_Tib(self):
return Tib(bits=self._bit_value)
def to_TB(self):
return TB(bits=self._bit_value)
def to_Tb(self):
return Tb(bits=self._bit_value)
# Properties
TiB = property(lambda s: s.to_TiB())
Tib = property(lambda s: s.to_Tib())
TB = property(lambda s: s.to_TB())
Tb = property(lambda s: s.to_Tb())
##################################################################
def to_PiB(self):
return PiB(bits=self._bit_value)
def to_Pib(self):
return Pib(bits=self._bit_value)
def to_PB(self):
return PB(bits=self._bit_value)
def to_Pb(self):
return Pb(bits=self._bit_value)
# Properties
PiB = property(lambda s: s.to_PiB())
Pib = property(lambda s: s.to_Pib())
PB = property(lambda s: s.to_PB())
Pb = property(lambda s: s.to_Pb())
##################################################################
def to_EiB(self):
return EiB(bits=self._bit_value)
def to_Eib(self):
return Eib(bits=self._bit_value)
def to_EB(self):
return EB(bits=self._bit_value)
def to_Eb(self):
return Eb(bits=self._bit_value)
# Properties
EiB = property(lambda s: s.to_EiB())
Eib = property(lambda s: s.to_Eib())
EB = property(lambda s: s.to_EB())
Eb = property(lambda s: s.to_Eb())
##################################################################
# The SI units go beyond the NIST units. They also have the Zetta
# and Yotta prefixes.
def to_ZB(self):
return ZB(bits=self._bit_value)
def to_Zb(self):
return Zb(bits=self._bit_value)
# Properties
ZB = property(lambda s: s.to_ZB())
Zb = property(lambda s: s.to_Zb())
##################################################################
def to_YB(self):
return YB(bits=self._bit_value)
def to_Yb(self):
return Yb(bits=self._bit_value)
#: A new object representing this instance as a Yottabyte
YB = property(lambda s: s.to_YB())
Yb = property(lambda s: s.to_Yb())
##################################################################
# Rich comparison operations
##################################################################
def __lt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value < other
else:
return self._byte_value < other.bytes
def __le__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value <= other
else:
return self._byte_value <= other.bytes
def __eq__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value == other
else:
return self._byte_value == other.bytes
def __ne__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value != other
else:
return self._byte_value != other.bytes
def __gt__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value > other
else:
return self._byte_value > other.bytes
def __ge__(self, other):
if isinstance(other, numbers.Number):
return self.prefix_value >= other
else:
return self._byte_value >= other.bytes
##################################################################
# Basic math operations
##################################################################
# Reference: http://docs.python.org/2.7/reference/datamodel.html#emulating-numeric-types
"""These methods are called to implement the binary arithmetic
operations (+, -, *, //, %, divmod(), pow(), **, <<, >>, &, ^, |). For
instance, to evaluate the expression x + y, where x is an instance of
a class that has an __add__() method, x.__add__(y) is called. The
__divmod__() method should be the equivalent to using __floordiv__()
and __mod__(); it should not be related to __truediv__() (described
below). Note that __pow__() should be defined to accept an optional
third argument if the ternary version of the built-in pow() function
is to be supported.object.__complex__(self)
"""
def __add__(self, other):
"""Supported operations with result types:
- bm + bm = bm
- bm + num = num
- num + bm = num (see radd)
"""
if isinstance(other, numbers.Number):
# bm + num
return other + self.value
else:
# bm + bm
total_bytes = self._byte_value + other.bytes
return (type(self))(bytes=total_bytes)
def __sub__(self, other):
"""Subtraction: Supported operations with result types:
- bm - bm = bm
- bm - num = num
- num - bm = num (see rsub)
"""
if isinstance(other, numbers.Number):
# bm - num
return self.value - other
else:
# bm - bm
total_bytes = self._byte_value - other.bytes
return (type(self))(bytes=total_bytes)
def __mul__(self, other):
"""Multiplication: Supported operations with result types:
- bm1 * bm2 = bm1
- bm * num = bm
- num * bm = num (see rmul)
"""
if isinstance(other, numbers.Number):
# bm * num
result = self._byte_value * other
return (type(self))(bytes=result)
else:
# bm1 * bm2
_other = other.value * other.base ** other.power
_self = self.prefix_value * self._base ** self._power
return (type(self))(bytes=_other * _self)
"""The division operator (/) is implemented by these methods. The
__truediv__() method is used when __future__.division is in effect,
otherwise __div__() is used. If only one of these two methods is
defined, the object will not support division in the alternate
context; TypeError will be raised instead."""
def __div__(self, other):
"""Division: Supported operations with result types:
- bm1 / bm2 = num
- bm / num = bm
- num / bm = num (see rdiv)
"""
if isinstance(other, numbers.Number):
# bm / num
result = self._byte_value / other
return (type(self))(bytes=result)
else:
# bm1 / bm2
return self._byte_value / float(other.bytes)
def __truediv__(self, other):
# num / bm
return self.__div__(other)
# def __floordiv__(self, other):
# return NotImplemented
# def __mod__(self, other):
# return NotImplemented
# def __divmod__(self, other):
# return NotImplemented
# def __pow__(self, other, modulo=None):
# return NotImplemented
##################################################################
"""These methods are called to implement the binary arithmetic
operations (+, -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with
reflected (swapped) operands. These functions are only called if the
left operand does not support the corresponding operation and the
operands are of different types. [2] For instance, to evaluate the
expression x - y, where y is an instance of a class that has an
__rsub__() method, y.__rsub__(x) is called if x.__sub__(y) returns
NotImplemented.
These are the add/sub/mul/div methods for syntax where a number type
is given for the LTYPE and a bitmath object is given for the
RTYPE. E.g., 3 * MiB(3), or 10 / GB(42)
"""
def __radd__(self, other):
# num + bm = num
return other + self.value
def __rsub__(self, other):
# num - bm = num
return other - self.value
def __rmul__(self, other):
# num * bm = bm
return self * other
def __rdiv__(self, other):
# num / bm = num
return other / float(self.value)
def __rtruediv__(self, other):
# num / bm = num
return other / float(self.value)
"""Called to implement the built-in functions complex(), int(),
long(), and float(). Should return a value of the appropriate type.
If one of those methods does not support the operation with the
supplied arguments, it should return NotImplemented.
For bitmath purposes, these methods return the int/long/float
equivalent of the this instances prefix Unix value. That is to say:
- int(KiB(3.336)) would return 3
- long(KiB(3.336)) would return 3L
- float(KiB(3.336)) would return 3.336
"""
def __int__(self):
"""Return this instances prefix unit as an integer"""
return int(self.prefix_value)
def __long__(self):
"""Return this instances prefix unit as a long integer"""
return long(self.prefix_value) # pragma: PY3X no cover
def __float__(self):
"""Return this instances prefix unit as a floating point number"""
return float(self.prefix_value)
##################################################################
# Bitwise operations
##################################################################
def __lshift__(self, other):
"""Left shift, ex: 100 << 2
A left shift by n bits is equivalent to multiplication by pow(2,
n). A long integer is returned if the result exceeds the range of
plain integers."""
shifted = int(self.bits) << other
return type(self)(bits=shifted)
def __rshift__(self, other):
"""Right shift, ex: 100 >> 2
A right shift by n bits is equivalent to division by pow(2, n)."""
shifted = int(self.bits) >> other
return type(self)(bits=shifted)
def __and__(self, other):
""""Bitwise and, ex: 100 & 2
bitwise and". Each bit of the output is 1 if the corresponding bit
of x AND of y is 1, otherwise it's 0."""
andd = int(self.bits) & other
return type(self)(bits=andd)
def __xor__(self, other):
"""Bitwise xor, ex: 100 ^ 2
Does a "bitwise exclusive or". Each bit of the output is the same
as the corresponding bit in x if that bit in y is 0, and it's the
complement of the bit in x if that bit in y is 1."""
xord = int(self.bits) ^ other
return type(self)(bits=xord)
def __or__(self, other):
"""Bitwise or, ex: 100 | 2
Does a "bitwise or". Each bit of the output is 0 if the corresponding
bit of x AND of y is 0, otherwise it's 1."""
ord = int(self.bits) | other
return type(self)(bits=ord)
##################################################################
def __neg__(self):
"""The negative version of this instance"""
return (type(self))(-abs(self.prefix_value))
def __pos__(self):
return (type(self))(abs(self.prefix_value))
def __abs__(self):
return (type(self))(abs(self.prefix_value))
|
tbielawa/bitmath
|
bitmath/__init__.py
|
Bit._norm
|
python
|
def _norm(self, value):
self._bit_value = value * self._unit_value
self._byte_value = self._bit_value / 8.0
|
Normalize the input value into the fundamental unit for this prefix
type
|
train
|
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L1091-L1095
| null |
class Bit(Bitmath):
"""Bit based types fundamentally operate on self._bit_value"""
def _set_prefix_value(self):
self.prefix_value = self._to_prefix_value(self._bit_value)
def _setup(self):
return (2, 0, 'Bit', 'Bits')
|
disqus/nydus
|
nydus/db/routers/keyvalue.py
|
ConsistentHashingRouter._route
|
python
|
def _route(self, attr, args, kwargs, **fkwargs):
key = get_key(args, kwargs)
found = self._hash.get_node(key)
if not found and len(self._down_connections) > 0:
raise self.HostListExhausted()
return [i for i, h in self.cluster.hosts.iteritems()
if h.identifier == found]
|
The first argument is assumed to be the ``key`` for routing.
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/keyvalue.py#L66-L79
| null |
class ConsistentHashingRouter(RoundRobinRouter):
"""
Router that returns host number based on a consistent hashing algorithm.
The consistent hashing algorithm only works if a key argument is provided.
If a key is not provided, then all hosts are returned.
The first argument is assumed to be the ``key`` for routing. Keyword arguments
are not supported.
"""
def __init__(self, *args, **kwargs):
self._db_num_id_map = {}
super(ConsistentHashingRouter, self).__init__(*args, **kwargs)
def mark_connection_down(self, db_num):
db_num = self.ensure_db_num(db_num)
self._hash.remove_node(self._db_num_id_map[db_num])
super(ConsistentHashingRouter, self).mark_connection_down(db_num)
def mark_connection_up(self, db_num):
db_num = self.ensure_db_num(db_num)
self._hash.add_node(self._db_num_id_map[db_num])
super(ConsistentHashingRouter, self).mark_connection_up(db_num)
@routing_params
def _setup_router(self, args, kwargs, **fkwargs):
self._db_num_id_map = dict([(db_num, host.identifier) for db_num, host in self.cluster.hosts.iteritems()])
self._hash = Ketama(self._db_num_id_map.values())
return True
@routing_params
def _pre_routing(self, *args, **kwargs):
self.check_down_connections()
return super(ConsistentHashingRouter, self)._pre_routing(*args, **kwargs)
@routing_params
|
disqus/nydus
|
nydus/db/routers/keyvalue.py
|
PartitionRouter._route
|
python
|
def _route(self, attr, args, kwargs, **fkwargs):
key = get_key(args, kwargs)
return [crc32(str(key)) % len(self.cluster)]
|
The first argument is assumed to be the ``key`` for routing.
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/keyvalue.py#L84-L90
| null |
class PartitionRouter(BaseRouter):
@routing_params
|
disqus/nydus
|
nydus/db/promise.py
|
promise_method
|
python
|
def promise_method(func):
name = func.__name__
@wraps(func)
def wrapped(self, *args, **kwargs):
cls_name = type(self).__name__
if getattr(self, '_%s__resolved' % (cls_name,)):
return getattr(getattr(self, '_%s__wrapped' % (cls_name,)), name)(*args, **kwargs)
return func(self, *args, **kwargs)
return wrapped
|
A decorator which ensures that once a method has been marked as resolved
(via Class.__resolved)) will then propagate the attribute (function) call
upstream.
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/promise.py#L13-L27
| null |
"""
nydus.db.promise
~~~~~~~~~~~~~~~~
:copyright: (c) 2011 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from nydus.db.exceptions import CommandError
from functools import wraps
def change_resolution(command, value):
"""
Public API to change the resolution of an already resolved EventualCommand result value.
"""
command._EventualCommand__wrapped = value
command._EventualCommand__resolved = True
class EventualCommand(object):
# introspection support:
__members__ = property(lambda self: self.__dir__())
def __init__(self, attr, args=None, kwargs=None):
self.__attr = attr
self.__called = False
self.__wrapped = None
self.__resolved = False
self.__args = args or []
self.__kwargs = kwargs or {}
self.__ident = ':'.join(map(lambda x: str(hash(str(x))), [self.__attr, self.__args, self.__kwargs]))
def __call__(self, *args, **kwargs):
self.__called = True
self.__args = args
self.__kwargs = kwargs
self.__ident = ':'.join(map(lambda x: str(hash(str(x))), [self.__attr, self.__args, self.__kwargs]))
return self
def __hash__(self):
# We return our ident
return hash(self.__ident)
def __repr__(self):
if self.__resolved:
return repr(self.__wrapped)
return u'<EventualCommand: %s args=%s kwargs=%s>' % (self.__attr, self.__args, self.__kwargs)
def __str__(self):
if self.__resolved:
return str(self.__wrapped)
return repr(self)
def __unicode__(self):
if self.__resolved:
return unicode(self.__wrapped)
return unicode(repr(self))
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def __setattr__(self, name, value):
if name.startswith('_%s' % (type(self).__name__,)):
return object.__setattr__(self, name, value)
return setattr(self.__wrapped, name, value)
def __delattr__(self, name):
if name.startswith('_%s' % (type(self).__name__,)):
raise TypeError("can't delete %s." % name)
delattr(self.__wrapped, name)
def __deepcopy__(self, memo):
from copy import deepcopy
return deepcopy(self.__wrapped, memo)
# Need to pretend to be the wrapped class, for the sake of objects that care
# about this (especially in equality tests)
def __get_class(self):
return self.__wrapped.__class__
__class__ = property(__get_class)
def __dict__(self):
try:
return vars(self.__wrapped)
except RuntimeError:
return AttributeError('__dict__')
__dict__ = property(__dict__)
def __setitem__(self, key, value):
self.__wrapped[key] = value
def __delitem__(self, key):
del self.__wrapped[key]
def __setslice__(self, i, j, seq):
self.__wrapped[i:j] = seq
def __delslice__(self, i, j):
del self.__wrapped[i:j]
def __instancecheck__(self, cls):
if self._wrapped is None:
return False
return isinstance(self._wrapped, cls)
__lt__ = lambda x, o: x.__wrapped < o
__le__ = lambda x, o: x.__wrapped <= o
__eq__ = lambda x, o: x.__wrapped == o
__ne__ = lambda x, o: x.__wrapped != o
__gt__ = lambda x, o: x.__wrapped > o
__ge__ = lambda x, o: x.__wrapped >= o
__cmp__ = lambda x, o: cmp(x.__wrapped, o)
# attributes are currently not callable
# __call__ = lambda x, *a, **kw: x.__wrapped(*a, **kw)
__nonzero__ = lambda x: bool(x.__wrapped)
__len__ = lambda x: len(x.__wrapped)
__getitem__ = lambda x, i: x.__wrapped[i]
__iter__ = lambda x: iter(x.__wrapped)
__contains__ = lambda x, i: i in x.__wrapped
__getslice__ = lambda x, i, j: x.__wrapped[i:j]
__add__ = lambda x, o: x.__wrapped + o
__sub__ = lambda x, o: x.__wrapped - o
__mul__ = lambda x, o: x.__wrapped * o
__floordiv__ = lambda x, o: x.__wrapped // o
__mod__ = lambda x, o: x.__wrapped % o
__divmod__ = lambda x, o: x.__wrapped.__divmod__(o)
__pow__ = lambda x, o: x.__wrapped ** o
__lshift__ = lambda x, o: x.__wrapped << o
__rshift__ = lambda x, o: x.__wrapped >> o
__and__ = lambda x, o: x.__wrapped & o
__xor__ = lambda x, o: x.__wrapped ^ o
__or__ = lambda x, o: x.__wrapped | o
__div__ = lambda x, o: x.__wrapped.__div__(o)
__truediv__ = lambda x, o: x.__wrapped.__truediv__(o)
__neg__ = lambda x: -(x.__wrapped)
__pos__ = lambda x: +(x.__wrapped)
__abs__ = lambda x: abs(x.__wrapped)
__invert__ = lambda x: ~(x.__wrapped)
__complex__ = lambda x: complex(x.__wrapped)
__int__ = lambda x: int(x.__wrapped)
__long__ = lambda x: long(x.__wrapped)
__float__ = lambda x: float(x.__wrapped)
__oct__ = lambda x: oct(x.__wrapped)
__hex__ = lambda x: hex(x.__wrapped)
__index__ = lambda x: x.__wrapped.__index__()
__coerce__ = lambda x, o: x.__coerce__(x, o)
__enter__ = lambda x: x.__enter__()
__exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)
@property
def is_error(self):
return isinstance(self.__wrapped, CommandError)
@promise_method
def was_called(self):
return self.__called
@promise_method
def resolve(self, conn):
value = getattr(conn, self.__attr)(*self.__args, **self.__kwargs)
return self.resolve_as(value)
@promise_method
def resolve_as(self, value):
self.__wrapped = value
self.__resolved = True
return value
@promise_method
def get_command(self):
return (self.__attr, self.__args, self.__kwargs)
@promise_method
def get_name(self):
return self.__attr
@promise_method
def get_args(self):
return self.__args
@promise_method
def get_kwargs(self):
return self.__kwargs
@promise_method
def set_args(self, args):
self.__args = args
@promise_method
def set_kwargs(self, kwargs):
self.__kwargs = kwargs
@promise_method
def clone(self):
return EventualCommand(self.__attr, self.__args, self.__kwargs)
|
disqus/nydus
|
nydus/db/base.py
|
BaseCluster.get_conn
|
python
|
def get_conn(self, *args, **kwargs):
connections = self.__connections_for('get_conn', args=args, kwargs=kwargs)
if len(connections) is 1:
return connections[0]
else:
return connections
|
Returns a connection object from the router given ``args``.
Useful in cases where a connection cannot be automatically determined
during all steps of the process. An example of this would be
Redis pipelines.
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/base.py#L100-L113
| null |
class BaseCluster(object):
"""
Holds a cluster of connections.
"""
class MaxRetriesExceededError(Exception):
pass
def __init__(self, hosts, backend, router=BaseRouter, max_connection_retries=20, defaults=None):
self.hosts = dict(
(conn_number, create_connection(backend, conn_number, host_settings, defaults))
for conn_number, host_settings
in iter_hosts(hosts)
)
self.max_connection_retries = max_connection_retries
self.install_router(router)
def __len__(self):
return len(self.hosts)
def __getitem__(self, name):
return self.hosts[name]
def __getattr__(self, name):
return CallProxy(self, name)
def __iter__(self):
for name in self.hosts.iterkeys():
yield name
def install_router(self, router):
self.router = router(self)
def execute(self, path, args, kwargs):
connections = self.__connections_for(path, args=args, kwargs=kwargs)
results = []
for conn in connections:
for retry in xrange(self.max_connection_retries):
func = conn
for piece in path.split('.'):
func = getattr(func, piece)
try:
results.append(func(*args, **kwargs))
except tuple(conn.retryable_exceptions), e:
if not self.router.retryable:
raise e
elif retry == self.max_connection_retries - 1:
raise self.MaxRetriesExceededError(e)
else:
conn = self.__connections_for(path, retry_for=conn.num, args=args, kwargs=kwargs)[0]
else:
break
# If we only had one db to query, we simply return that res
if len(results) == 1:
return results[0]
else:
return results
def disconnect(self):
"""Disconnects all connections in cluster"""
for connection in self.hosts.itervalues():
connection.disconnect()
def map(self, workers=None, **kwargs):
return DistributedContextManager(self, workers, **kwargs)
@routing_params
def __connections_for(self, attr, args, kwargs, **fkwargs):
return [self[n] for n in self.router.get_dbs(attr=attr, args=args, kwargs=kwargs, **fkwargs)]
|
disqus/nydus
|
nydus/db/routers/base.py
|
BaseRouter.get_dbs
|
python
|
def get_dbs(self, attr, args, kwargs, **fkwargs):
if not self._ready:
if not self.setup_router(args=args, kwargs=kwargs, **fkwargs):
raise self.UnableToSetupRouter()
retval = self._pre_routing(attr=attr, args=args, kwargs=kwargs, **fkwargs)
if retval is not None:
args, kwargs = retval
if not (args or kwargs):
return self.cluster.hosts.keys()
try:
db_nums = self._route(attr=attr, args=args, kwargs=kwargs, **fkwargs)
except Exception as e:
self._handle_exception(e)
db_nums = []
return self._post_routing(attr=attr, db_nums=db_nums, args=args, kwargs=kwargs, **fkwargs)
|
Returns a list of db keys to route the given call to.
:param attr: Name of attribute being called on the connection.
:param args: List of arguments being passed to ``attr``.
:param kwargs: Dictionary of keyword arguments being passed to ``attr``.
>>> redis = Cluster(router=BaseRouter)
>>> router = redis.router
>>> router.get_dbs('incr', args=('key name', 1))
[0,1,2]
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/base.py#L50-L81
|
[
"def _handle_exception(self, e):\n \"\"\"\n Handle/transform exceptions and return it\n \"\"\"\n raise e\n"
] |
class BaseRouter(object):
"""
Handles routing requests to a specific connection in a single cluster.
For the most part, all public functions will receive arguments as ``key=value``
pairs and should expect as much. Functions which receive ``args`` and ``kwargs``
from the calling function will receive default values for those, and need not
worry about handling missing arguments.
"""
retryable = False
class UnableToSetupRouter(Exception):
pass
def __init__(self, cluster=None, *args, **kwargs):
self._ready = False
self.cluster = cluster
@routing_params
# Backwards compatibilty
get_db = get_dbs
@routing_params
def setup_router(self, args, kwargs, **fkwargs):
"""
Call method to perform any setup
"""
self._ready = self._setup_router(args=args, kwargs=kwargs, **fkwargs)
return self._ready
@routing_params
def _setup_router(self, args, kwargs, **fkwargs):
"""
Perform any initialization for the router
Returns False if setup could not be completed
"""
return True
@routing_params
def _pre_routing(self, attr, args, kwargs, **fkwargs):
"""
Perform any prerouting with this method and return the key
"""
return args, kwargs
@routing_params
def _route(self, attr, args, kwargs, **fkwargs):
"""
Perform routing and return db_nums
"""
return self.cluster.hosts.keys()
@routing_params
def _post_routing(self, attr, db_nums, args, kwargs, **fkwargs):
"""
Perform any postrouting actions and return db_nums
"""
return db_nums
def _handle_exception(self, e):
"""
Handle/transform exceptions and return it
"""
raise e
|
disqus/nydus
|
nydus/db/routers/base.py
|
BaseRouter.setup_router
|
python
|
def setup_router(self, args, kwargs, **fkwargs):
self._ready = self._setup_router(args=args, kwargs=kwargs, **fkwargs)
return self._ready
|
Call method to perform any setup
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/base.py#L87-L93
| null |
class BaseRouter(object):
"""
Handles routing requests to a specific connection in a single cluster.
For the most part, all public functions will receive arguments as ``key=value``
pairs and should expect as much. Functions which receive ``args`` and ``kwargs``
from the calling function will receive default values for those, and need not
worry about handling missing arguments.
"""
retryable = False
class UnableToSetupRouter(Exception):
pass
def __init__(self, cluster=None, *args, **kwargs):
self._ready = False
self.cluster = cluster
@routing_params
def get_dbs(self, attr, args, kwargs, **fkwargs):
"""
Returns a list of db keys to route the given call to.
:param attr: Name of attribute being called on the connection.
:param args: List of arguments being passed to ``attr``.
:param kwargs: Dictionary of keyword arguments being passed to ``attr``.
>>> redis = Cluster(router=BaseRouter)
>>> router = redis.router
>>> router.get_dbs('incr', args=('key name', 1))
[0,1,2]
"""
if not self._ready:
if not self.setup_router(args=args, kwargs=kwargs, **fkwargs):
raise self.UnableToSetupRouter()
retval = self._pre_routing(attr=attr, args=args, kwargs=kwargs, **fkwargs)
if retval is not None:
args, kwargs = retval
if not (args or kwargs):
return self.cluster.hosts.keys()
try:
db_nums = self._route(attr=attr, args=args, kwargs=kwargs, **fkwargs)
except Exception as e:
self._handle_exception(e)
db_nums = []
return self._post_routing(attr=attr, db_nums=db_nums, args=args, kwargs=kwargs, **fkwargs)
# Backwards compatibilty
get_db = get_dbs
@routing_params
@routing_params
def _setup_router(self, args, kwargs, **fkwargs):
"""
Perform any initialization for the router
Returns False if setup could not be completed
"""
return True
@routing_params
def _pre_routing(self, attr, args, kwargs, **fkwargs):
"""
Perform any prerouting with this method and return the key
"""
return args, kwargs
@routing_params
def _route(self, attr, args, kwargs, **fkwargs):
"""
Perform routing and return db_nums
"""
return self.cluster.hosts.keys()
@routing_params
def _post_routing(self, attr, db_nums, args, kwargs, **fkwargs):
"""
Perform any postrouting actions and return db_nums
"""
return db_nums
def _handle_exception(self, e):
"""
Handle/transform exceptions and return it
"""
raise e
|
disqus/nydus
|
nydus/db/routers/base.py
|
BaseRouter._route
|
python
|
def _route(self, attr, args, kwargs, **fkwargs):
return self.cluster.hosts.keys()
|
Perform routing and return db_nums
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/base.py#L111-L115
| null |
class BaseRouter(object):
"""
Handles routing requests to a specific connection in a single cluster.
For the most part, all public functions will receive arguments as ``key=value``
pairs and should expect as much. Functions which receive ``args`` and ``kwargs``
from the calling function will receive default values for those, and need not
worry about handling missing arguments.
"""
retryable = False
class UnableToSetupRouter(Exception):
pass
def __init__(self, cluster=None, *args, **kwargs):
self._ready = False
self.cluster = cluster
@routing_params
def get_dbs(self, attr, args, kwargs, **fkwargs):
"""
Returns a list of db keys to route the given call to.
:param attr: Name of attribute being called on the connection.
:param args: List of arguments being passed to ``attr``.
:param kwargs: Dictionary of keyword arguments being passed to ``attr``.
>>> redis = Cluster(router=BaseRouter)
>>> router = redis.router
>>> router.get_dbs('incr', args=('key name', 1))
[0,1,2]
"""
if not self._ready:
if not self.setup_router(args=args, kwargs=kwargs, **fkwargs):
raise self.UnableToSetupRouter()
retval = self._pre_routing(attr=attr, args=args, kwargs=kwargs, **fkwargs)
if retval is not None:
args, kwargs = retval
if not (args or kwargs):
return self.cluster.hosts.keys()
try:
db_nums = self._route(attr=attr, args=args, kwargs=kwargs, **fkwargs)
except Exception as e:
self._handle_exception(e)
db_nums = []
return self._post_routing(attr=attr, db_nums=db_nums, args=args, kwargs=kwargs, **fkwargs)
# Backwards compatibilty
get_db = get_dbs
@routing_params
def setup_router(self, args, kwargs, **fkwargs):
"""
Call method to perform any setup
"""
self._ready = self._setup_router(args=args, kwargs=kwargs, **fkwargs)
return self._ready
@routing_params
def _setup_router(self, args, kwargs, **fkwargs):
"""
Perform any initialization for the router
Returns False if setup could not be completed
"""
return True
@routing_params
def _pre_routing(self, attr, args, kwargs, **fkwargs):
"""
Perform any prerouting with this method and return the key
"""
return args, kwargs
@routing_params
@routing_params
def _post_routing(self, attr, db_nums, args, kwargs, **fkwargs):
"""
Perform any postrouting actions and return db_nums
"""
return db_nums
def _handle_exception(self, e):
"""
Handle/transform exceptions and return it
"""
raise e
|
disqus/nydus
|
nydus/db/routers/base.py
|
RoundRobinRouter.check_down_connections
|
python
|
def check_down_connections(self):
now = time.time()
for db_num, marked_down_at in self._down_connections.items():
if marked_down_at + self.retry_timeout <= now:
self.mark_connection_up(db_num)
|
Iterates through all connections which were previously listed as unavailable
and marks any that have expired their retry_timeout as being up.
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/base.py#L175-L184
| null |
class RoundRobinRouter(BaseRouter):
"""
Basic retry router that performs round robin
"""
# Raised if all hosts in the hash have been marked as down
class HostListExhausted(Exception):
pass
class InvalidDBNum(Exception):
pass
# If this router can be retried on if a particular db index it gave out did
# not work
retryable = True
# How many requests to serve in a situation when a host is down before
# the down hosts are assesed for readmittance back into the pool of serving
# requests.
#
# If the attempt_reconnect_threshold is hit, it does not guarantee that the
# down hosts will be put back - only that the router will CHECK to see if
# the hosts CAN be put back. The elegibility of a host being put back is
# handlede in the check_down_connections method, which by default will
# readmit a host if it was marked down more than retry_timeout seconds ago.
attempt_reconnect_threshold = 100000
# Number of seconds a host must be marked down before it is elligable to be
# put back in the pool and retried.
retry_timeout = 30
def __init__(self, *args, **kwargs):
self._get_db_attempts = 0
self._down_connections = {}
super(RoundRobinRouter, self).__init__(*args, **kwargs)
@classmethod
def ensure_db_num(cls, db_num):
try:
return int(db_num)
except ValueError:
raise cls.InvalidDBNum()
def flush_down_connections(self):
"""
Marks all connections which were previously listed as unavailable as being up.
"""
self._get_db_attempts = 0
for db_num in self._down_connections.keys():
self.mark_connection_up(db_num)
def mark_connection_down(self, db_num):
db_num = self.ensure_db_num(db_num)
self._down_connections[db_num] = time.time()
def mark_connection_up(self, db_num):
db_num = self.ensure_db_num(db_num)
self._down_connections.pop(db_num, None)
@routing_params
def _setup_router(self, args, kwargs, **fkwargs):
self._hosts_cycler = cycle(self.cluster.hosts.keys())
return True
@routing_params
def _pre_routing(self, attr, args, kwargs, retry_for=None, **fkwargs):
self._get_db_attempts += 1
if self._get_db_attempts > self.attempt_reconnect_threshold:
self.check_down_connections()
if retry_for is not None:
self.mark_connection_down(retry_for)
return args, kwargs
@routing_params
def _route(self, attr, args, kwargs, **fkwargs):
now = time.time()
for i in xrange(len(self.cluster)):
db_num = self._hosts_cycler.next()
marked_down_at = self._down_connections.get(db_num, False)
if not marked_down_at or (marked_down_at + self.retry_timeout <= now):
return [db_num]
else:
raise self.HostListExhausted()
@routing_params
def _post_routing(self, attr, db_nums, args, kwargs, **fkwargs):
if db_nums and db_nums[0] in self._down_connections:
self.mark_connection_up(db_nums[0])
return db_nums
|
disqus/nydus
|
nydus/db/routers/base.py
|
RoundRobinRouter.flush_down_connections
|
python
|
def flush_down_connections(self):
self._get_db_attempts = 0
for db_num in self._down_connections.keys():
self.mark_connection_up(db_num)
|
Marks all connections which were previously listed as unavailable as being up.
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/base.py#L186-L192
|
[
"def mark_connection_up(self, db_num):\n db_num = self.ensure_db_num(db_num)\n self._down_connections.pop(db_num, None)\n"
] |
class RoundRobinRouter(BaseRouter):
"""
Basic retry router that performs round robin
"""
# Raised if all hosts in the hash have been marked as down
class HostListExhausted(Exception):
pass
class InvalidDBNum(Exception):
pass
# If this router can be retried on if a particular db index it gave out did
# not work
retryable = True
# How many requests to serve in a situation when a host is down before
# the down hosts are assesed for readmittance back into the pool of serving
# requests.
#
# If the attempt_reconnect_threshold is hit, it does not guarantee that the
# down hosts will be put back - only that the router will CHECK to see if
# the hosts CAN be put back. The elegibility of a host being put back is
# handlede in the check_down_connections method, which by default will
# readmit a host if it was marked down more than retry_timeout seconds ago.
attempt_reconnect_threshold = 100000
# Number of seconds a host must be marked down before it is elligable to be
# put back in the pool and retried.
retry_timeout = 30
def __init__(self, *args, **kwargs):
self._get_db_attempts = 0
self._down_connections = {}
super(RoundRobinRouter, self).__init__(*args, **kwargs)
@classmethod
def ensure_db_num(cls, db_num):
try:
return int(db_num)
except ValueError:
raise cls.InvalidDBNum()
def check_down_connections(self):
"""
Iterates through all connections which were previously listed as unavailable
and marks any that have expired their retry_timeout as being up.
"""
now = time.time()
for db_num, marked_down_at in self._down_connections.items():
if marked_down_at + self.retry_timeout <= now:
self.mark_connection_up(db_num)
def mark_connection_down(self, db_num):
db_num = self.ensure_db_num(db_num)
self._down_connections[db_num] = time.time()
def mark_connection_up(self, db_num):
db_num = self.ensure_db_num(db_num)
self._down_connections.pop(db_num, None)
@routing_params
def _setup_router(self, args, kwargs, **fkwargs):
self._hosts_cycler = cycle(self.cluster.hosts.keys())
return True
@routing_params
def _pre_routing(self, attr, args, kwargs, retry_for=None, **fkwargs):
self._get_db_attempts += 1
if self._get_db_attempts > self.attempt_reconnect_threshold:
self.check_down_connections()
if retry_for is not None:
self.mark_connection_down(retry_for)
return args, kwargs
@routing_params
def _route(self, attr, args, kwargs, **fkwargs):
now = time.time()
for i in xrange(len(self.cluster)):
db_num = self._hosts_cycler.next()
marked_down_at = self._down_connections.get(db_num, False)
if not marked_down_at or (marked_down_at + self.retry_timeout <= now):
return [db_num]
else:
raise self.HostListExhausted()
@routing_params
def _post_routing(self, attr, db_nums, args, kwargs, **fkwargs):
if db_nums and db_nums[0] in self._down_connections:
self.mark_connection_up(db_nums[0])
return db_nums
|
disqus/nydus
|
nydus/contrib/ketama.py
|
Ketama._build_circle
|
python
|
def _build_circle(self):
total_weight = 0
for node in self._nodes:
total_weight += self._weights.get(node, 1)
for node in self._nodes:
weight = self._weights.get(node, 1)
ks = math.floor((40 * len(self._nodes) * weight) / total_weight)
for i in xrange(0, int(ks)):
b_key = self._md5_digest('%s-%s-salt' % (node, i))
for l in xrange(0, 4):
key = ((b_key[3 + l * 4] << 24)
| (b_key[2 + l * 4] << 16)
| (b_key[1 + l * 4] << 8)
| b_key[l * 4])
self._hashring[key] = node
self._sorted_keys.append(key)
self._sorted_keys.sort()
|
Creates hash ring.
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/contrib/ketama.py#L35-L60
|
[
"def _md5_digest(self, key):\n return map(ord, hashlib.md5(key).digest())\n"
] |
class Ketama(object):
def __init__(self, nodes=None, weights=None):
"""
nodes - List of nodes(strings)
weights - Dictionary of node wheights where keys are nodes names.
if not set, all nodes will be equal.
"""
self._hashring = dict()
self._sorted_keys = []
self._nodes = set(nodes or [])
self._weights = weights if weights else {}
self._build_circle()
def _get_node_pos(self, key):
"""
Return node position(integer) for a given key. Else return None
"""
if not self._hashring:
return None
key = self._gen_key(key)
nodes = self._sorted_keys
pos = bisect(nodes, key)
if pos == len(nodes):
return 0
return pos
def _gen_key(self, key):
"""
Return long integer for a given key, that represent it place on
the hash ring.
"""
b_key = self._md5_digest(key)
return self._hashi(b_key, lambda x: x)
def _hashi(self, b_key, fn):
return ((b_key[fn(3)] << 24)
| (b_key[fn(2)] << 16)
| (b_key[fn(1)] << 8)
| b_key[fn(0)])
def _md5_digest(self, key):
return map(ord, hashlib.md5(key).digest())
def remove_node(self, node):
"""
Removes node from circle and rebuild it.
"""
try:
self._nodes.remove(node)
del self._weights[node]
except (KeyError, ValueError):
pass
self._hashring = dict()
self._sorted_keys = []
self._build_circle()
def add_node(self, node, weight=1):
"""
Adds node to circle and rebuild it.
"""
self._nodes.add(node)
self._weights[node] = weight
self._hashring = dict()
self._sorted_keys = []
self._build_circle()
def get_node(self, key):
"""
Return node for a given key. Else return None.
"""
pos = self._get_node_pos(key)
if pos is None:
return None
return self._hashring[self._sorted_keys[pos]]
|
disqus/nydus
|
nydus/contrib/ketama.py
|
Ketama._get_node_pos
|
python
|
def _get_node_pos(self, key):
if not self._hashring:
return None
key = self._gen_key(key)
nodes = self._sorted_keys
pos = bisect(nodes, key)
if pos == len(nodes):
return 0
return pos
|
Return node position(integer) for a given key. Else return None
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/contrib/ketama.py#L62-L76
|
[
"def _gen_key(self, key):\n \"\"\"\n Return long integer for a given key, that represent it place on\n the hash ring.\n \"\"\"\n b_key = self._md5_digest(key)\n return self._hashi(b_key, lambda x: x)\n"
] |
class Ketama(object):
def __init__(self, nodes=None, weights=None):
"""
nodes - List of nodes(strings)
weights - Dictionary of node wheights where keys are nodes names.
if not set, all nodes will be equal.
"""
self._hashring = dict()
self._sorted_keys = []
self._nodes = set(nodes or [])
self._weights = weights if weights else {}
self._build_circle()
def _build_circle(self):
"""
Creates hash ring.
"""
total_weight = 0
for node in self._nodes:
total_weight += self._weights.get(node, 1)
for node in self._nodes:
weight = self._weights.get(node, 1)
ks = math.floor((40 * len(self._nodes) * weight) / total_weight)
for i in xrange(0, int(ks)):
b_key = self._md5_digest('%s-%s-salt' % (node, i))
for l in xrange(0, 4):
key = ((b_key[3 + l * 4] << 24)
| (b_key[2 + l * 4] << 16)
| (b_key[1 + l * 4] << 8)
| b_key[l * 4])
self._hashring[key] = node
self._sorted_keys.append(key)
self._sorted_keys.sort()
def _gen_key(self, key):
"""
Return long integer for a given key, that represent it place on
the hash ring.
"""
b_key = self._md5_digest(key)
return self._hashi(b_key, lambda x: x)
def _hashi(self, b_key, fn):
return ((b_key[fn(3)] << 24)
| (b_key[fn(2)] << 16)
| (b_key[fn(1)] << 8)
| b_key[fn(0)])
def _md5_digest(self, key):
return map(ord, hashlib.md5(key).digest())
def remove_node(self, node):
"""
Removes node from circle and rebuild it.
"""
try:
self._nodes.remove(node)
del self._weights[node]
except (KeyError, ValueError):
pass
self._hashring = dict()
self._sorted_keys = []
self._build_circle()
def add_node(self, node, weight=1):
"""
Adds node to circle and rebuild it.
"""
self._nodes.add(node)
self._weights[node] = weight
self._hashring = dict()
self._sorted_keys = []
self._build_circle()
def get_node(self, key):
"""
Return node for a given key. Else return None.
"""
pos = self._get_node_pos(key)
if pos is None:
return None
return self._hashring[self._sorted_keys[pos]]
|
disqus/nydus
|
nydus/contrib/ketama.py
|
Ketama._gen_key
|
python
|
def _gen_key(self, key):
b_key = self._md5_digest(key)
return self._hashi(b_key, lambda x: x)
|
Return long integer for a given key, that represent it place on
the hash ring.
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/contrib/ketama.py#L78-L84
|
[
"def _hashi(self, b_key, fn):\n return ((b_key[fn(3)] << 24)\n | (b_key[fn(2)] << 16)\n | (b_key[fn(1)] << 8)\n | b_key[fn(0)])\n",
"def _md5_digest(self, key):\n return map(ord, hashlib.md5(key).digest())\n"
] |
class Ketama(object):
def __init__(self, nodes=None, weights=None):
"""
nodes - List of nodes(strings)
weights - Dictionary of node wheights where keys are nodes names.
if not set, all nodes will be equal.
"""
self._hashring = dict()
self._sorted_keys = []
self._nodes = set(nodes or [])
self._weights = weights if weights else {}
self._build_circle()
def _build_circle(self):
"""
Creates hash ring.
"""
total_weight = 0
for node in self._nodes:
total_weight += self._weights.get(node, 1)
for node in self._nodes:
weight = self._weights.get(node, 1)
ks = math.floor((40 * len(self._nodes) * weight) / total_weight)
for i in xrange(0, int(ks)):
b_key = self._md5_digest('%s-%s-salt' % (node, i))
for l in xrange(0, 4):
key = ((b_key[3 + l * 4] << 24)
| (b_key[2 + l * 4] << 16)
| (b_key[1 + l * 4] << 8)
| b_key[l * 4])
self._hashring[key] = node
self._sorted_keys.append(key)
self._sorted_keys.sort()
def _get_node_pos(self, key):
"""
Return node position(integer) for a given key. Else return None
"""
if not self._hashring:
return None
key = self._gen_key(key)
nodes = self._sorted_keys
pos = bisect(nodes, key)
if pos == len(nodes):
return 0
return pos
def _hashi(self, b_key, fn):
return ((b_key[fn(3)] << 24)
| (b_key[fn(2)] << 16)
| (b_key[fn(1)] << 8)
| b_key[fn(0)])
def _md5_digest(self, key):
return map(ord, hashlib.md5(key).digest())
def remove_node(self, node):
"""
Removes node from circle and rebuild it.
"""
try:
self._nodes.remove(node)
del self._weights[node]
except (KeyError, ValueError):
pass
self._hashring = dict()
self._sorted_keys = []
self._build_circle()
def add_node(self, node, weight=1):
"""
Adds node to circle and rebuild it.
"""
self._nodes.add(node)
self._weights[node] = weight
self._hashring = dict()
self._sorted_keys = []
self._build_circle()
def get_node(self, key):
"""
Return node for a given key. Else return None.
"""
pos = self._get_node_pos(key)
if pos is None:
return None
return self._hashring[self._sorted_keys[pos]]
|
disqus/nydus
|
nydus/contrib/ketama.py
|
Ketama.remove_node
|
python
|
def remove_node(self, node):
try:
self._nodes.remove(node)
del self._weights[node]
except (KeyError, ValueError):
pass
self._hashring = dict()
self._sorted_keys = []
self._build_circle()
|
Removes node from circle and rebuild it.
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/contrib/ketama.py#L95-L107
|
[
"def _build_circle(self):\n \"\"\"\n Creates hash ring.\n \"\"\"\n total_weight = 0\n for node in self._nodes:\n total_weight += self._weights.get(node, 1)\n\n for node in self._nodes:\n weight = self._weights.get(node, 1)\n\n ks = math.floor((40 * len(self._nodes) * weight) / total_weight)\n\n for i in xrange(0, int(ks)):\n b_key = self._md5_digest('%s-%s-salt' % (node, i))\n\n for l in xrange(0, 4):\n key = ((b_key[3 + l * 4] << 24)\n | (b_key[2 + l * 4] << 16)\n | (b_key[1 + l * 4] << 8)\n | b_key[l * 4])\n\n self._hashring[key] = node\n self._sorted_keys.append(key)\n\n self._sorted_keys.sort()\n"
] |
class Ketama(object):
def __init__(self, nodes=None, weights=None):
"""
nodes - List of nodes(strings)
weights - Dictionary of node wheights where keys are nodes names.
if not set, all nodes will be equal.
"""
self._hashring = dict()
self._sorted_keys = []
self._nodes = set(nodes or [])
self._weights = weights if weights else {}
self._build_circle()
def _build_circle(self):
"""
Creates hash ring.
"""
total_weight = 0
for node in self._nodes:
total_weight += self._weights.get(node, 1)
for node in self._nodes:
weight = self._weights.get(node, 1)
ks = math.floor((40 * len(self._nodes) * weight) / total_weight)
for i in xrange(0, int(ks)):
b_key = self._md5_digest('%s-%s-salt' % (node, i))
for l in xrange(0, 4):
key = ((b_key[3 + l * 4] << 24)
| (b_key[2 + l * 4] << 16)
| (b_key[1 + l * 4] << 8)
| b_key[l * 4])
self._hashring[key] = node
self._sorted_keys.append(key)
self._sorted_keys.sort()
def _get_node_pos(self, key):
"""
Return node position(integer) for a given key. Else return None
"""
if not self._hashring:
return None
key = self._gen_key(key)
nodes = self._sorted_keys
pos = bisect(nodes, key)
if pos == len(nodes):
return 0
return pos
def _gen_key(self, key):
"""
Return long integer for a given key, that represent it place on
the hash ring.
"""
b_key = self._md5_digest(key)
return self._hashi(b_key, lambda x: x)
def _hashi(self, b_key, fn):
return ((b_key[fn(3)] << 24)
| (b_key[fn(2)] << 16)
| (b_key[fn(1)] << 8)
| b_key[fn(0)])
def _md5_digest(self, key):
return map(ord, hashlib.md5(key).digest())
def add_node(self, node, weight=1):
"""
Adds node to circle and rebuild it.
"""
self._nodes.add(node)
self._weights[node] = weight
self._hashring = dict()
self._sorted_keys = []
self._build_circle()
def get_node(self, key):
"""
Return node for a given key. Else return None.
"""
pos = self._get_node_pos(key)
if pos is None:
return None
return self._hashring[self._sorted_keys[pos]]
|
disqus/nydus
|
nydus/contrib/ketama.py
|
Ketama.add_node
|
python
|
def add_node(self, node, weight=1):
self._nodes.add(node)
self._weights[node] = weight
self._hashring = dict()
self._sorted_keys = []
self._build_circle()
|
Adds node to circle and rebuild it.
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/contrib/ketama.py#L109-L118
|
[
"def _build_circle(self):\n \"\"\"\n Creates hash ring.\n \"\"\"\n total_weight = 0\n for node in self._nodes:\n total_weight += self._weights.get(node, 1)\n\n for node in self._nodes:\n weight = self._weights.get(node, 1)\n\n ks = math.floor((40 * len(self._nodes) * weight) / total_weight)\n\n for i in xrange(0, int(ks)):\n b_key = self._md5_digest('%s-%s-salt' % (node, i))\n\n for l in xrange(0, 4):\n key = ((b_key[3 + l * 4] << 24)\n | (b_key[2 + l * 4] << 16)\n | (b_key[1 + l * 4] << 8)\n | b_key[l * 4])\n\n self._hashring[key] = node\n self._sorted_keys.append(key)\n\n self._sorted_keys.sort()\n"
] |
class Ketama(object):
def __init__(self, nodes=None, weights=None):
"""
nodes - List of nodes(strings)
weights - Dictionary of node wheights where keys are nodes names.
if not set, all nodes will be equal.
"""
self._hashring = dict()
self._sorted_keys = []
self._nodes = set(nodes or [])
self._weights = weights if weights else {}
self._build_circle()
def _build_circle(self):
"""
Creates hash ring.
"""
total_weight = 0
for node in self._nodes:
total_weight += self._weights.get(node, 1)
for node in self._nodes:
weight = self._weights.get(node, 1)
ks = math.floor((40 * len(self._nodes) * weight) / total_weight)
for i in xrange(0, int(ks)):
b_key = self._md5_digest('%s-%s-salt' % (node, i))
for l in xrange(0, 4):
key = ((b_key[3 + l * 4] << 24)
| (b_key[2 + l * 4] << 16)
| (b_key[1 + l * 4] << 8)
| b_key[l * 4])
self._hashring[key] = node
self._sorted_keys.append(key)
self._sorted_keys.sort()
def _get_node_pos(self, key):
"""
Return node position(integer) for a given key. Else return None
"""
if not self._hashring:
return None
key = self._gen_key(key)
nodes = self._sorted_keys
pos = bisect(nodes, key)
if pos == len(nodes):
return 0
return pos
def _gen_key(self, key):
"""
Return long integer for a given key, that represent it place on
the hash ring.
"""
b_key = self._md5_digest(key)
return self._hashi(b_key, lambda x: x)
def _hashi(self, b_key, fn):
return ((b_key[fn(3)] << 24)
| (b_key[fn(2)] << 16)
| (b_key[fn(1)] << 8)
| b_key[fn(0)])
def _md5_digest(self, key):
return map(ord, hashlib.md5(key).digest())
def remove_node(self, node):
"""
Removes node from circle and rebuild it.
"""
try:
self._nodes.remove(node)
del self._weights[node]
except (KeyError, ValueError):
pass
self._hashring = dict()
self._sorted_keys = []
self._build_circle()
def get_node(self, key):
"""
Return node for a given key. Else return None.
"""
pos = self._get_node_pos(key)
if pos is None:
return None
return self._hashring[self._sorted_keys[pos]]
|
disqus/nydus
|
nydus/db/__init__.py
|
create_cluster
|
python
|
def create_cluster(settings):
# Pull in our client
settings = copy.deepcopy(settings)
backend = settings.pop('engine', settings.pop('backend', None))
if isinstance(backend, basestring):
Conn = import_string(backend)
elif backend:
Conn = backend
else:
raise KeyError('backend')
# Pull in our cluster
cluster = settings.pop('cluster', None)
if not cluster:
Cluster = Conn.get_cluster()
elif isinstance(cluster, basestring):
Cluster = import_string(cluster)
else:
Cluster = cluster
# Pull in our router
router = settings.pop('router', None)
if not router:
Router = BaseRouter
elif isinstance(router, basestring):
Router = import_string(router)
else:
Router = router
# Build the connection cluster
return Cluster(
router=Router,
backend=Conn,
**settings
)
|
Creates a new Nydus cluster from the given settings.
:param settings: Dictionary of the cluster settings.
:returns: Configured instance of ``nydus.db.base.Cluster``.
>>> redis = create_cluster({
>>> 'backend': 'nydus.db.backends.redis.Redis',
>>> 'router': 'nydus.db.routers.redis.PartitionRouter',
>>> 'defaults': {
>>> 'host': 'localhost',
>>> 'port': 6379,
>>> },
>>> 'hosts': {
>>> 0: {'db': 0},
>>> 1: {'db': 1},
>>> 2: {'db': 2},
>>> }
>>> })
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/__init__.py#L28-L82
|
[
"def import_string(import_name, silent=False):\n \"\"\"Imports an object based on a string. If *silent* is True the return\n value will be None if the import fails.\n\n Simplified version of the function with same name from `Werkzeug`_.\n\n :param import_name:\n The dotted name for the object to import.\n :param silent:\n If True, import errors are ignored and None is returned instead.\n :returns:\n The imported object.\n \"\"\"\n import_name = str(import_name)\n try:\n if '.' in import_name:\n module, obj = import_name.rsplit('.', 1)\n return getattr(__import__(module, None, None, [obj]), obj)\n else:\n return __import__(import_name)\n except (ImportError, AttributeError):\n if not silent:\n raise\n"
] |
"""
nydus.db
~~~~~~~~
Disqus generic connections wrappers.
>>> from nydus.db import create_cluster
>>> redis = create_cluster({
>>> 'backend': 'nydus.db.backends.redis.Redis',
>>> })
>>> res = conn.incr('foo')
>>> assert res == 1
:copyright: (c) 2011-2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
__all__ = ('create_cluster', 'connections', 'Cluster')
import copy
from nydus import conf
from nydus.db.base import LazyConnectionHandler
from nydus.db.routers.base import BaseRouter
from nydus.utils import import_string, apply_defaults
connections = LazyConnectionHandler(lambda: conf.CONNECTIONS)
|
disqus/nydus
|
nydus/db/backends/memcache.py
|
grouped_command
|
python
|
def grouped_command(commands):
base = commands[0]
name = base.get_name()
multi_command = EventualCommand('%s_multi' % name)
if name in ('get', 'delete'):
args = [c.get_args()[0] for c in commands]
elif base.get_name() == 'set':
args = dict(c.get_args()[0:2] for c in commands)
else:
raise ValueError('Command not supported: %r' % (base.get_name(),))
multi_command(args, *grouped_args_for_command(base), **base.get_kwargs())
return multi_command
|
Given a list of commands (which are assumed groupable), return
a new command which is a batch (multi) command.
For ``set`` commands the outcome will be::
set_multi({key: value}, **kwargs)
For ``get`` and ``delete`` commands, the outcome will be::
get_multi(list_of_keys, **kwargs)
(Or respectively ``delete_multi``)
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/backends/memcache.py#L67-L94
|
[
"def grouped_args_for_command(command):\n \"\"\"\n Returns a list of arguments that are shared for this command.\n\n When comparing similar commands, these arguments represent the\n groupable signature for said commands.\n \"\"\"\n if command.get_name() == 'set':\n return command.get_args()[2:]\n return command.get_args()[1:]\n"
] |
"""
nydus.db.backends.memcache
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from __future__ import absolute_import
import pylibmc
from itertools import izip
from nydus.db.backends import BaseConnection, BasePipeline
from nydus.db.promise import EventualCommand
from nydus.utils import peek
class Memcache(BaseConnection):
retryable_exceptions = frozenset([pylibmc.Error])
supports_pipelines = True
def __init__(self, num, host='localhost', port=11211, binary=True,
behaviors=None, **options):
self.host = host
self.port = port
self.binary = binary
self.behaviors = behaviors
super(Memcache, self).__init__(num)
@property
def identifier(self):
mapping = vars(self)
return "memcache://%(host)s:%(port)s/" % mapping
def connect(self):
host = "%s:%i" % (self.host, self.port)
return pylibmc.Client([host], binary=self.binary, behaviors=self.behaviors)
def disconnect(self):
self.connection.disconnect_all()
def get_pipeline(self, *args, **kwargs):
return MemcachePipeline(self)
class MemcachePipeline(BasePipeline):
def execute(self):
grouped = regroup_commands(self.pending)
results = resolve_grouped_commands(grouped, self.connection)
return results
def grouped_args_for_command(command):
"""
Returns a list of arguments that are shared for this command.
When comparing similar commands, these arguments represent the
groupable signature for said commands.
"""
if command.get_name() == 'set':
return command.get_args()[2:]
return command.get_args()[1:]
def can_group_commands(command, next_command):
"""
Returns a boolean representing whether these commands can be
grouped together or not.
A few things are taken into account for this decision:
For ``set`` commands:
- Are all arguments other than the key/value the same?
For ``delete`` and ``get`` commands:
- Are all arguments other than the key the same?
"""
multi_capable_commands = ('get', 'set', 'delete')
if next_command is None:
return False
name = command.get_name()
# TODO: support multi commands
if name not in multi_capable_commands:
return False
if name != next_command.get_name():
return False
# if the shared args (key, or key/value) do not match, we cannot group
if grouped_args_for_command(command) != grouped_args_for_command(next_command):
return False
# If the keyword arguments do not much (e.g. key_prefix, or timeout on set)
# then we cannot group
if command.get_kwargs() != next_command.get_kwargs():
return False
return True
def regroup_commands(commands):
"""
Returns a list of tuples:
[(command_to_run, [list, of, commands])]
If the list of commands has a single item, the command was not grouped.
"""
grouped = []
pending = []
def group_pending():
if not pending:
return
new_command = grouped_command(pending)
result = []
while pending:
result.append(pending.pop(0))
grouped.append((new_command, result))
for command, next_command in peek(commands):
# if the previous command was a get, and this is a set we must execute
# any pending commands
# TODO: unless this command is a get_multi and it matches the same option
# signature
if can_group_commands(command, next_command):
# if previous command does not match this command
if pending and not can_group_commands(pending[0], command):
group_pending()
pending.append(command)
else:
# if pending exists for this command, group it
if pending and can_group_commands(pending[0], command):
pending.append(command)
else:
grouped.append((command.clone(), [command]))
# We couldn't group with previous command, so ensure we bubble up
group_pending()
group_pending()
return grouped
def resolve_grouped_commands(grouped, connection):
results = {}
for master_command, grouped_commands in grouped:
result = master_command.resolve(connection)
# this command was not grouped
if len(grouped_commands) == 1:
results[grouped_commands[0]] = result
else:
if isinstance(result, dict):
# XXX: assume first arg is key
for command in grouped_commands:
results[command] = result.get(command.get_args()[0])
else:
for command, value in izip(grouped_commands, result):
results[command] = value
return results
|
disqus/nydus
|
nydus/db/backends/memcache.py
|
can_group_commands
|
python
|
def can_group_commands(command, next_command):
multi_capable_commands = ('get', 'set', 'delete')
if next_command is None:
return False
name = command.get_name()
# TODO: support multi commands
if name not in multi_capable_commands:
return False
if name != next_command.get_name():
return False
# if the shared args (key, or key/value) do not match, we cannot group
if grouped_args_for_command(command) != grouped_args_for_command(next_command):
return False
# If the keyword arguments do not much (e.g. key_prefix, or timeout on set)
# then we cannot group
if command.get_kwargs() != next_command.get_kwargs():
return False
return True
|
Returns a boolean representing whether these commands can be
grouped together or not.
A few things are taken into account for this decision:
For ``set`` commands:
- Are all arguments other than the key/value the same?
For ``delete`` and ``get`` commands:
- Are all arguments other than the key the same?
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/backends/memcache.py#L97-L135
|
[
"def grouped_args_for_command(command):\n \"\"\"\n Returns a list of arguments that are shared for this command.\n\n When comparing similar commands, these arguments represent the\n groupable signature for said commands.\n \"\"\"\n if command.get_name() == 'set':\n return command.get_args()[2:]\n return command.get_args()[1:]\n"
] |
"""
nydus.db.backends.memcache
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from __future__ import absolute_import
import pylibmc
from itertools import izip
from nydus.db.backends import BaseConnection, BasePipeline
from nydus.db.promise import EventualCommand
from nydus.utils import peek
class Memcache(BaseConnection):
retryable_exceptions = frozenset([pylibmc.Error])
supports_pipelines = True
def __init__(self, num, host='localhost', port=11211, binary=True,
behaviors=None, **options):
self.host = host
self.port = port
self.binary = binary
self.behaviors = behaviors
super(Memcache, self).__init__(num)
@property
def identifier(self):
mapping = vars(self)
return "memcache://%(host)s:%(port)s/" % mapping
def connect(self):
host = "%s:%i" % (self.host, self.port)
return pylibmc.Client([host], binary=self.binary, behaviors=self.behaviors)
def disconnect(self):
self.connection.disconnect_all()
def get_pipeline(self, *args, **kwargs):
return MemcachePipeline(self)
class MemcachePipeline(BasePipeline):
def execute(self):
grouped = regroup_commands(self.pending)
results = resolve_grouped_commands(grouped, self.connection)
return results
def grouped_args_for_command(command):
"""
Returns a list of arguments that are shared for this command.
When comparing similar commands, these arguments represent the
groupable signature for said commands.
"""
if command.get_name() == 'set':
return command.get_args()[2:]
return command.get_args()[1:]
def grouped_command(commands):
"""
Given a list of commands (which are assumed groupable), return
a new command which is a batch (multi) command.
For ``set`` commands the outcome will be::
set_multi({key: value}, **kwargs)
For ``get`` and ``delete`` commands, the outcome will be::
get_multi(list_of_keys, **kwargs)
(Or respectively ``delete_multi``)
"""
base = commands[0]
name = base.get_name()
multi_command = EventualCommand('%s_multi' % name)
if name in ('get', 'delete'):
args = [c.get_args()[0] for c in commands]
elif base.get_name() == 'set':
args = dict(c.get_args()[0:2] for c in commands)
else:
raise ValueError('Command not supported: %r' % (base.get_name(),))
multi_command(args, *grouped_args_for_command(base), **base.get_kwargs())
return multi_command
def regroup_commands(commands):
"""
Returns a list of tuples:
[(command_to_run, [list, of, commands])]
If the list of commands has a single item, the command was not grouped.
"""
grouped = []
pending = []
def group_pending():
if not pending:
return
new_command = grouped_command(pending)
result = []
while pending:
result.append(pending.pop(0))
grouped.append((new_command, result))
for command, next_command in peek(commands):
# if the previous command was a get, and this is a set we must execute
# any pending commands
# TODO: unless this command is a get_multi and it matches the same option
# signature
if can_group_commands(command, next_command):
# if previous command does not match this command
if pending and not can_group_commands(pending[0], command):
group_pending()
pending.append(command)
else:
# if pending exists for this command, group it
if pending and can_group_commands(pending[0], command):
pending.append(command)
else:
grouped.append((command.clone(), [command]))
# We couldn't group with previous command, so ensure we bubble up
group_pending()
group_pending()
return grouped
def resolve_grouped_commands(grouped, connection):
results = {}
for master_command, grouped_commands in grouped:
result = master_command.resolve(connection)
# this command was not grouped
if len(grouped_commands) == 1:
results[grouped_commands[0]] = result
else:
if isinstance(result, dict):
# XXX: assume first arg is key
for command in grouped_commands:
results[command] = result.get(command.get_args()[0])
else:
for command, value in izip(grouped_commands, result):
results[command] = value
return results
|
disqus/nydus
|
nydus/db/backends/memcache.py
|
regroup_commands
|
python
|
def regroup_commands(commands):
grouped = []
pending = []
def group_pending():
if not pending:
return
new_command = grouped_command(pending)
result = []
while pending:
result.append(pending.pop(0))
grouped.append((new_command, result))
for command, next_command in peek(commands):
# if the previous command was a get, and this is a set we must execute
# any pending commands
# TODO: unless this command is a get_multi and it matches the same option
# signature
if can_group_commands(command, next_command):
# if previous command does not match this command
if pending and not can_group_commands(pending[0], command):
group_pending()
pending.append(command)
else:
# if pending exists for this command, group it
if pending and can_group_commands(pending[0], command):
pending.append(command)
else:
grouped.append((command.clone(), [command]))
# We couldn't group with previous command, so ensure we bubble up
group_pending()
group_pending()
return grouped
|
Returns a list of tuples:
[(command_to_run, [list, of, commands])]
If the list of commands has a single item, the command was not grouped.
|
train
|
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/backends/memcache.py#L138-L182
|
[
"def peek(value):\n generator = iter(value)\n prev = generator.next()\n for item in generator:\n yield prev, item\n prev = item\n yield prev, None\n",
"def can_group_commands(command, next_command):\n \"\"\"\n Returns a boolean representing whether these commands can be\n grouped together or not.\n\n A few things are taken into account for this decision:\n\n For ``set`` commands:\n\n - Are all arguments other than the key/value the same?\n\n For ``delete`` and ``get`` commands:\n\n - Are all arguments other than the key the same?\n \"\"\"\n multi_capable_commands = ('get', 'set', 'delete')\n\n if next_command is None:\n return False\n\n name = command.get_name()\n\n # TODO: support multi commands\n if name not in multi_capable_commands:\n return False\n\n if name != next_command.get_name():\n return False\n\n # if the shared args (key, or key/value) do not match, we cannot group\n if grouped_args_for_command(command) != grouped_args_for_command(next_command):\n return False\n\n # If the keyword arguments do not much (e.g. key_prefix, or timeout on set)\n # then we cannot group\n if command.get_kwargs() != next_command.get_kwargs():\n return False\n\n return True\n",
"def group_pending():\n if not pending:\n return\n\n new_command = grouped_command(pending)\n result = []\n while pending:\n result.append(pending.pop(0))\n grouped.append((new_command, result))\n"
] |
"""
nydus.db.backends.memcache
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from __future__ import absolute_import
import pylibmc
from itertools import izip
from nydus.db.backends import BaseConnection, BasePipeline
from nydus.db.promise import EventualCommand
from nydus.utils import peek
class Memcache(BaseConnection):
retryable_exceptions = frozenset([pylibmc.Error])
supports_pipelines = True
def __init__(self, num, host='localhost', port=11211, binary=True,
behaviors=None, **options):
self.host = host
self.port = port
self.binary = binary
self.behaviors = behaviors
super(Memcache, self).__init__(num)
@property
def identifier(self):
mapping = vars(self)
return "memcache://%(host)s:%(port)s/" % mapping
def connect(self):
host = "%s:%i" % (self.host, self.port)
return pylibmc.Client([host], binary=self.binary, behaviors=self.behaviors)
def disconnect(self):
self.connection.disconnect_all()
def get_pipeline(self, *args, **kwargs):
return MemcachePipeline(self)
class MemcachePipeline(BasePipeline):
def execute(self):
grouped = regroup_commands(self.pending)
results = resolve_grouped_commands(grouped, self.connection)
return results
def grouped_args_for_command(command):
"""
Returns a list of arguments that are shared for this command.
When comparing similar commands, these arguments represent the
groupable signature for said commands.
"""
if command.get_name() == 'set':
return command.get_args()[2:]
return command.get_args()[1:]
def grouped_command(commands):
"""
Given a list of commands (which are assumed groupable), return
a new command which is a batch (multi) command.
For ``set`` commands the outcome will be::
set_multi({key: value}, **kwargs)
For ``get`` and ``delete`` commands, the outcome will be::
get_multi(list_of_keys, **kwargs)
(Or respectively ``delete_multi``)
"""
base = commands[0]
name = base.get_name()
multi_command = EventualCommand('%s_multi' % name)
if name in ('get', 'delete'):
args = [c.get_args()[0] for c in commands]
elif base.get_name() == 'set':
args = dict(c.get_args()[0:2] for c in commands)
else:
raise ValueError('Command not supported: %r' % (base.get_name(),))
multi_command(args, *grouped_args_for_command(base), **base.get_kwargs())
return multi_command
def can_group_commands(command, next_command):
"""
Returns a boolean representing whether these commands can be
grouped together or not.
A few things are taken into account for this decision:
For ``set`` commands:
- Are all arguments other than the key/value the same?
For ``delete`` and ``get`` commands:
- Are all arguments other than the key the same?
"""
multi_capable_commands = ('get', 'set', 'delete')
if next_command is None:
return False
name = command.get_name()
# TODO: support multi commands
if name not in multi_capable_commands:
return False
if name != next_command.get_name():
return False
# if the shared args (key, or key/value) do not match, we cannot group
if grouped_args_for_command(command) != grouped_args_for_command(next_command):
return False
# If the keyword arguments do not much (e.g. key_prefix, or timeout on set)
# then we cannot group
if command.get_kwargs() != next_command.get_kwargs():
return False
return True
def resolve_grouped_commands(grouped, connection):
results = {}
for master_command, grouped_commands in grouped:
result = master_command.resolve(connection)
# this command was not grouped
if len(grouped_commands) == 1:
results[grouped_commands[0]] = result
else:
if isinstance(result, dict):
# XXX: assume first arg is key
for command in grouped_commands:
results[command] = result.get(command.get_args()[0])
else:
for command, value in izip(grouped_commands, result):
results[command] = value
return results
|
ARMmbed/mbed-connector-api-python
|
mbed_connector_api/mbed_connector_api.py
|
connector.getConnectorVersion
|
python
|
def getConnectorVersion(self):
result = asyncResult()
data = self._getURL("/",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_mdc_version",data.status_code)
result.is_done = True
return result
|
GET the current Connector version.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
|
train
|
https://github.com/ARMmbed/mbed-connector-api-python/blob/a5024a01dc67cc192c8bf7a70b251fcf0a3f279b/mbed_connector_api/mbed_connector_api.py#L72-L87
|
[
"def fill(self, data):\n\tif type(data) == r.models.Response:\n\t\ttry:\n\t\t\tself.result = json.loads(data.content)\n\t\texcept:\n\t\t\tself.result = []\n\t\t\tif isinstance(data.content,str): # string handler\n\t\t\t\tself.result = data.content\n\t\t\telif isinstance(data.content,int): # int handler\n\t\t\t\tself.log.debug(\"data returned is an integer, not sure what to do with that\")\n\t\t\telse: # all other handler\n\t\t\t\tself.log.debug(\"unhandled data type, type of content : %s\" %type(data.content))\n\t\tself.status_code = data.status_code\n\t\tself.raw_data = data.content\n\telse:\n\t\t#error\n\t\tself.log.error(\"type not found : %s\"%type(data))\n\treturn\n",
"def _getURL(self, url,query={},versioned=True):\n\tif versioned:\n\t\treturn r.get(self.address+self.apiVersion+url,headers={\"Authorization\":\"Bearer \"+self.bearer},params=query)\n\telse:\n\t\treturn r.get(self.address+url,headers={\"Authorization\":\"Bearer \"+self.bearer},params=query)\n"
] |
class connector:
"""
Interface class to use the connector.mbed.com REST API.
This class will by default handle asyncronous events.
All function return :class:'.asyncResult' objects
"""
# Return connector version number and recent rest API version number it supports
# Return API version of connector
def getApiVersions(self):
"""
Get the REST API versions that connector accepts.
:returns: :class:asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/rest-versions",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_rest_version",data.status_code)
result.is_done = True
return result
# Returns metadata about connector limits as JSON blob
def getLimits(self):
"""return limits of account in async result object.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/limits")
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("limit",data.status_code)
result.is_done = True
return result
# return json list of all endpoints.
# optional type field can be used to match all endpoints of a certain type.
def getEndpoints(self,typeOfEndpoint=""):
"""
Get list of all endpoints on the domain.
:param str typeOfEndpoint: Optional filter endpoints returned by type
:return: list of all endpoints
:rtype: asyncResult
"""
q = {}
result = asyncResult()
if typeOfEndpoint:
q['type'] = typeOfEndpoint
result.extra['type'] = typeOfEndpoint
data = self._getURL("/endpoints", query = q)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_endpoints",data.status_code)
result.is_done = True
return result
# return json list of all resources on an endpoint
def getResources(self,ep,noResp=False,cacheOnly=False):
"""
Get list of resources on an endpoint.
:param str ep: Endpoint to get the resources of
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: list of resources
:rtype: asyncResult
"""
# load query params if set to other than defaults
q = {}
result = asyncResult()
result.endpoint = ep
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
self.log.debug("ep = %s, query=%s",ep,q)
data = self._getURL("/endpoints/"+ep, query=q)
result.fill(data)
# check sucess of call
if data.status_code == 200: # sucess
result.error = False
self.log.debug("getResources sucess, status_code = `%s`, content = `%s`", str(data.status_code),data.content)
else: # fail
result.error = response_codes("get_resources",data.status_code)
self.log.debug("getResources failed with error code `%s`" %str(data.status_code))
result.is_done = True
return result
# return async object
def getResourceValue(self,ep,res,cbfn="",noResp=False,cacheOnly=False):
"""
Get value of a specific resource on a specific endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback function to be called on completion
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: value of the resource, usually a string
:rtype: asyncResult
"""
q = {}
result = asyncResult(callback=cbfn) #set callback fn for use in async handler
result.endpoint = ep
result.resource = res
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
data = self._getURL("/endpoints/"+ep+res, query=q)
result.fill(data)
if data.status_code == 200: # immediate success
result.error = False
result.is_done = True
if cbfn:
cbfn(result)
return result
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else: # fail
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
def putResourceValue(self,ep,res,data,cbfn=""):
"""
Put a value to a resource on an endpoint
:param str ep: name of endpoint
:param str res: name of resource
:param str data: data to send via PUT
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
"""
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/endpoints/"+ep+res,payload=data)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
#return async object
def postResource(self,ep,res,data="",cbfn=""):
'''
POST data to a resource on an endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param str data: Optional - data to send via POST
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._postURL("/endpoints/"+ep+res,data)
if data.status_code == 201: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
def deleteEndpoint(self,ep,cbfn=""):
'''
Send DELETE message to an endpoint.
:param str ep: name of endpoint
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
data = self._deleteURL("/endpoints/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# subscribe to endpoint/resource, the cbfn is given an asynch object that
# represents the result. it is up to the user to impliment the notification
# channel callback in a higher level library.
def putResourceSubscription(self,ep,res,cbfn=""):
'''
Subscribe to changes in a specific resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/subscriptions/"+ep+res)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("subscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteEndpointSubscriptions(self,ep):
'''
Delete all subscriptions on specified endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._deleteURL("/subscriptions/"+ep)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("delete_endpoint_subscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteResourceSubscription(self,ep,res):
'''
Delete subscription to a resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
result.resource = res
data = self._deleteURL("/subscriptions/"+ep+res)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteAllSubscriptions(self):
'''
Delete all subscriptions on the domain (all endpoints, all resources)
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/subscriptions/")
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
# result field is a string
def getEndpointSubscriptions(self,ep):
'''
Get list of all subscriptions on a given endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._getURL("/subscriptions/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
# result field is a string
def getResourceSubscription(self,ep,res):
'''
Get list of all subscriptions for a resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
result.resource = res
data = self._getURL("/subscriptions/"+ep+res)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def putPreSubscription(self,JSONdata):
'''
Set pre-subscription rules for all endpoints / resources on the domain.
This can be useful for all current and future endpoints/resources.
:param json JSONdata: data to use as pre-subscription data. Wildcards are permitted
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
if isinstance(JSONdata,str) and self._isJSON(JSONdata):
self.log.warn("pre-subscription data was a string, converting to a list : %s",JSONdata)
JSONdata = json.loads(JSONdata) # convert json string to list
if not (isinstance(JSONdata,list) and self._isJSON(JSONdata)):
self.log.error("pre-subscription data is not valid. Please make sure it is a valid JSON list")
result = asyncResult()
data = self._putURL("/subscriptions",JSONdata, versioned=False)
if data.status_code == 204: # immediate success with no response
result.error = False
result.is_done = True
result.result = []
else:
result.error = response_codes("presubscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def getPreSubscription(self):
'''
Get the current pre-subscription data from connector
:return: JSON that represents the pre-subscription data in the ``.result`` field
:rtype: asyncResult
'''
result = asyncResult()
data = self._getURL("/subscriptions")
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.json()
else:
result.error = response_codes("presubscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def putCallback(self,url,headers=""):
'''
Set the callback URL. To be used in place of LongPolling when deploying a webapp.
**note**: make sure you set up a callback URL in your web app
:param str url: complete url, including port, where the callback url is located
:param str headers: Optional - Headers to have Connector send back with all calls
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
payloadToSend = {"url":url}
if headers:
payload['headers':headers]
data = self._putURL(url="/notification/callback",payload=payloadToSend, versioned=False)
if data.status_code == 204: #immediate success
result.error = False
result.result = data.content
else:
result.error = response_codes("put_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
def getCallback(self):
'''
Get the callback URL currently registered with Connector.
:return: callback url in ``.result``, error if applicable in ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._getURL("/notification/callback",versioned=False)
if data.status_code == 200: #immediate success
result.error = False
result.result = data.json()
else:
result.error = response_codes("get_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
def deleteCallback(self):
'''
Delete the Callback URL currently registered with Connector.
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/notification/callback")
if data.status_code == 204: #immediate success
result.result = data.content
result.error = False
else:
result.error = response_codes("delete_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
# set a specific handler to call the cbfn
def setHandler(self,handler,cbfn):
'''
Register a handler for a particular notification type.
These are the types of notifications that are acceptable.
| 'async-responses'
| 'registrations-expired'
| 'de-registrations'
| 'reg-updates'
| 'registrations'
| 'notifications'
:param str handler: name of the notification type
:param fnptr cbfn: function to pass the notification channel messages to.
:return: Nothing.
'''
if handler == "async-responses":
self.async_responses_callback = cbfn
elif handler == "registrations-expired":
self.registrations_expired_callback = cbfn
elif handler == "de-registrations":
self.de_registrations_callback = cbfn
elif handler == "reg-updates":
self.reg_updates_callback = cbfn
elif handler == "registrations":
self.registrations_callback = cbfn
elif handler == "notifications":
self.notifications_callback = cbfn
else:
self.log.warn("'%s' is not a legitimate notification channel option. Please check your spelling.",handler)
# this function needs to spin off a thread that is constantally polling,
# should match asynch ID's to values and call their function
def startLongPolling(self, noWait=False):
'''
Start LongPolling Connector for notifications.
:param bool noWait: Optional - use the cached values in connector, do not wait for the device to respond
:return: Thread of constantly running LongPoll. To be used to kill the thred if necessary.
:rtype: pythonThread
'''
# check Asynch ID's against insternal database of ID's
# Call return function with the value given, maybe decode from base64?
wait = ''
if(noWait == True):
wait = "?noWait=true"
# check that there isn't another thread already running, only one longPolling instance per is acceptable
if(self.longPollThread.isAlive()):
self.log.warn("LongPolling is already active.")
else:
# start infinite longpolling thread
self._stopLongPolling.clear()
self.longPollThread.start()
self.log.info("Spun off LongPolling thread")
return self.longPollThread # return thread instance so user can manually intervene if necessary
# stop longpolling by switching the flag off.
def stopLongPolling(self):
'''
Stop LongPolling thread
:return: none
'''
if(self.longPollThread.isAlive()):
self._stopLongPolling.set()
self.log.debug("set stop longpolling flag")
else:
self.log.warn("LongPolling thread already stopped")
return
# Thread to constantly long poll connector and process the feedback.
# TODO: pass wait / noWait on to long polling thread, currently the user can set it but it doesnt actually affect anything.
def longPoll(self, versioned=True):
self.log.debug("LongPolling Started, self.address = %s" %self.address)
while(not self._stopLongPolling.is_set()):
try:
if versioned:
data = r.get(self.address+self.apiVersion+'/notification/pull',headers={"Authorization":"Bearer "+self.bearer,"Connection":"keep-alive","accept":"application/json"})
else:
data = r.get(self.address+'/notification/pull',headers={"Authorization":"Bearer "+self.bearer,"Connection":"keep-alive","accept":"application/json"})
self.log.debug("Longpoll Returned, len = %d, statuscode=%d",len(data.text),data.status_code)
# process callbacks
if data.status_code == 200: # 204 means no content, do nothing
self.handler(data.content)
self.log.debug("Longpoll data = "+data.content)
except:
self.log.error("longPolling had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
self.log.info("Killing Longpolling Thread")
# parse the notification channel responses and call appropriate handlers
def handler(self,data):
'''
Function to handle notification data as part of Callback URL handler.
:param str data: data posted to Callback URL by connector.
:return: nothing
'''
if isinstance(data,r.models.Response):
self.log.debug("data is request object = %s", str(data.content))
data = data.content
elif isinstance(data,str):
self.log.info("data is json string with len %d",len(data))
if len(data) == 0:
self.log.warn("Handler received data of 0 length, exiting handler.")
return
else:
self.log.error("Input is not valid request object or json string : %s" %str(data))
return False
try:
data = json.loads(data)
if 'async-responses' in data.keys():
self.async_responses_callback(data)
if 'notifications' in data.keys():
self.notifications_callback(data)
if 'registrations' in data.keys():
self.registrations_callback(data)
if 'reg-updates' in data.keys():
self.reg_updates_callback(data)
if 'de-registrations' in data.keys():
self.de_registrations_callback(data)
if 'registrations-expired' in data.keys():
self.registrations_expired_callback(data)
except:
self.log.error("handle router had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
# Turn on / off debug messages based on the onOff variable
def debug(self,onOff,level='DEBUG'):
'''
Enable / Disable debugging
:param bool onOff: turn debugging on / off
:return: none
'''
if onOff:
if level == 'DEBUG':
self.log.setLevel(logging.DEBUG)
self._ch.setLevel(logging.DEBUG)
self.log.debug("Debugging level DEBUG enabled")
elif level == "INFO":
self.log.setLevel(logging.INFO)
self._ch.setLevel(logging.INFO)
self.log.info("Debugging level INFO enabled")
elif level == "WARN":
self.log.setLevel(logging.WARN)
self._ch.setLevel(logging.WARN)
self.log.warn("Debugging level WARN enabled")
elif level == "ERROR":
self.log.setLevel(logging.ERROR)
self._ch.setLevel(logging.ERROR)
self.log.error("Debugging level ERROR enabled")
else:
self.log.setLevel(logging.ERROR)
self._ch.setLevel(logging.ERROR)
self.log.error("Unrecognized debug level `%s`, set to default level `ERROR` instead",level)
# internal async-requests handler.
# data input is json data
def _asyncHandler(self,data):
try:
responses = data['async-responses']
for entry in responses:
if entry['id'] in self.database['async-responses'].keys():
result = self.database['async-responses'].pop(entry['id']) # get the asynch object out of database
# fill in async-result object
if 'error' in entry.keys():
# error happened, handle it
result.error = response_codes('async-responses-handler',entry['status'])
result.error.error = entry['error']
result.is_done = True
if result.callback:
result.callback(result)
else:
return result
else:
# everything is good, fill it out
result.result = b64decode(entry['payload'])
result.raw_data = entry
result.status = entry['status']
result.error = False
for thing in entry.keys():
result.extra[thing]=entry[thing]
result.is_done = True
# call associated callback function
if result.callback:
result.callback(result)
else:
self.log.warn("No callback function given")
else:
# TODO : object not found int asynch database
self.log.warn("No asynch entry for '%s' found in databse",entry['id'])
except:
# TODO error handling here
self.log.error("Bad data encountered and failed to elegantly handle it. ")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
return
# default handler for notifications. User should impliment all of these in
# a L2 implimentation or in their webapp.
# @input data is a dictionary
def _defaultHandler(self,data):
if 'async-responses' in data.keys():
self.log.info("async-responses detected : len = %d",len(data["async-responses"]))
self.log.debug(data["async-responses"])
if 'notifications' in data.keys():
self.log.info("notifications' detected : len = %d",len(data["notifications"]))
self.log.debug(data["notifications"])
if 'registrations' in data.keys():
self.log.info("registrations' detected : len = %d",len(data["registrations"]))
self.log.debug(data["registrations"])
if 'reg-updates' in data.keys():
# removed because this happens every 10s or so, spamming the output
self.log.info("reg-updates detected : len = %d",len(data["reg-updates"]))
self.log.debug(data["reg-updates"])
if 'de-registrations' in data.keys():
self.log.info("de-registrations detected : len = %d",len(data["de-registrations"]))
self.log.debug(data["de-registrations"])
if 'registrations-expired' in data.keys():
self.log.info("registrations-expired detected : len = %d",len(data["registrations-expired"]))
self.log.debug(data["registrations-expired"])
# make the requests.
# url is the API url to hit
# query are the optional get params
# versioned tells the API whether to hit the /v#/ version. set to false for
# commands that break with this, like the API and Connector version calls
# TODO: spin this off to be non-blocking
def _getURL(self, url,query={},versioned=True):
if versioned:
return r.get(self.address+self.apiVersion+url,headers={"Authorization":"Bearer "+self.bearer},params=query)
else:
return r.get(self.address+url,headers={"Authorization":"Bearer "+self.bearer},params=query)
# put data to URL with json payload in dataIn
def _putURL(self, url,payload=None,versioned=True):
if self._isJSON(payload):
self.log.debug("PUT payload is json")
if versioned:
return r.put(self.address+self.apiVersion+url,json=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.put(self.address+url,json=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
self.log.debug("PUT payload is NOT json")
if versioned:
return r.put(self.address+self.apiVersion+url,data=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.put(self.address+url,data=payload,headers={"Authorization":"Bearer "+self.bearer})
# put data to URL with json payload in dataIn
def _postURL(self, url,payload="",versioned=True):
addr = self.address+self.apiVersion+url if versioned else self.address+url
h = {"Authorization":"Bearer "+self.bearer}
if payload:
self.log.info("POSTing with payload: %s ",payload)
return r.post(addr,data=payload,headers=h)
else:
self.log.info("POSTing")
return r.post(addr,headers=h)
# delete endpoint
def _deleteURL(self, url,versioned=True):
if versioned:
return r.delete(self.address+self.apiVersion+url,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.delete(self.address+url,headers={"Authorization":"Bearer "+self.bearer})
# check if input is json, return true or false accordingly
def _isJSON(self,dataIn):
try:
json.dumps(dataIn)
return True
except:
self.log.debug("[_isJSON] exception triggered, input is not json")
return False
# extend dictionary class so we can instantiate multiple levels at once
class vividict(dict):
def __missing__(self, key):
value = self[key] = type(self)()
return value
# Initialization function, set the token used by this object.
def __init__( self,
token,
webAddress="https://api.connector.mbed.com",
port="80",):
# set token
self.bearer = token
# set version of REST API
self.apiVersion = "/v2"
# Init database, used for callback fn's for various tasks (asynch, subscriptions...etc)
self.database = self.vividict()
self.database['notifications']
self.database['registrations']
self.database['reg-updates']
self.database['de-registrations']
self.database['registrations-expired']
self.database['async-responses']
# longpolling variable
self._stopLongPolling = threading.Event() # must initialize false to avoid race condition
self._stopLongPolling.clear()
#create thread for long polling
self.longPollThread = threading.Thread(target=self.longPoll,name="mdc-api-longpoll")
self.longPollThread.daemon = True # Do this so the thread exits when the overall process does
# set default webAddress and port to mbed connector
self.address = webAddress
self.port = port
# Initialize the callbacks
self.async_responses_callback = self._asyncHandler
self.registrations_expired_callback = self._defaultHandler
self.de_registrations_callback = self._defaultHandler
self.reg_updates_callback = self._defaultHandler
self.registrations_callback = self._defaultHandler
self.notifications_callback = self._defaultHandler
# add logger
self.log = logging.getLogger(name="mdc-api-logger")
self.log.setLevel(logging.ERROR)
self._ch = logging.StreamHandler()
self._ch.setLevel(logging.ERROR)
formatter = logging.Formatter("\r\n[%(levelname)s \t %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
self._ch.setFormatter(formatter)
self.log.addHandler(self._ch)
|
ARMmbed/mbed-connector-api-python
|
mbed_connector_api/mbed_connector_api.py
|
connector.getEndpoints
|
python
|
def getEndpoints(self,typeOfEndpoint=""):
q = {}
result = asyncResult()
if typeOfEndpoint:
q['type'] = typeOfEndpoint
result.extra['type'] = typeOfEndpoint
data = self._getURL("/endpoints", query = q)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_endpoints",data.status_code)
result.is_done = True
return result
|
Get list of all endpoints on the domain.
:param str typeOfEndpoint: Optional filter endpoints returned by type
:return: list of all endpoints
:rtype: asyncResult
|
train
|
https://github.com/ARMmbed/mbed-connector-api-python/blob/a5024a01dc67cc192c8bf7a70b251fcf0a3f279b/mbed_connector_api/mbed_connector_api.py#L126-L146
|
[
"def fill(self, data):\n\tif type(data) == r.models.Response:\n\t\ttry:\n\t\t\tself.result = json.loads(data.content)\n\t\texcept:\n\t\t\tself.result = []\n\t\t\tif isinstance(data.content,str): # string handler\n\t\t\t\tself.result = data.content\n\t\t\telif isinstance(data.content,int): # int handler\n\t\t\t\tself.log.debug(\"data returned is an integer, not sure what to do with that\")\n\t\t\telse: # all other handler\n\t\t\t\tself.log.debug(\"unhandled data type, type of content : %s\" %type(data.content))\n\t\tself.status_code = data.status_code\n\t\tself.raw_data = data.content\n\telse:\n\t\t#error\n\t\tself.log.error(\"type not found : %s\"%type(data))\n\treturn\n",
"def _getURL(self, url,query={},versioned=True):\n\tif versioned:\n\t\treturn r.get(self.address+self.apiVersion+url,headers={\"Authorization\":\"Bearer \"+self.bearer},params=query)\n\telse:\n\t\treturn r.get(self.address+url,headers={\"Authorization\":\"Bearer \"+self.bearer},params=query)\n"
] |
class connector:
"""
Interface class to use the connector.mbed.com REST API.
This class will by default handle asyncronous events.
All function return :class:'.asyncResult' objects
"""
# Return connector version number and recent rest API version number it supports
def getConnectorVersion(self):
"""
GET the current Connector version.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_mdc_version",data.status_code)
result.is_done = True
return result
# Return API version of connector
def getApiVersions(self):
"""
Get the REST API versions that connector accepts.
:returns: :class:asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/rest-versions",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_rest_version",data.status_code)
result.is_done = True
return result
# Returns metadata about connector limits as JSON blob
def getLimits(self):
"""return limits of account in async result object.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/limits")
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("limit",data.status_code)
result.is_done = True
return result
# return json list of all endpoints.
# optional type field can be used to match all endpoints of a certain type.
def getEndpoints(self,typeOfEndpoint=""):
"""
Get list of all endpoints on the domain.
:param str typeOfEndpoint: Optional filter endpoints returned by type
:return: list of all endpoints
:rtype: asyncResult
"""
q = {}
result = asyncResult()
if typeOfEndpoint:
q['type'] = typeOfEndpoint
result.extra['type'] = typeOfEndpoint
data = self._getURL("/endpoints", query = q)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_endpoints",data.status_code)
result.is_done = True
return result
# return json list of all resources on an endpoint
def getResources(self,ep,noResp=False,cacheOnly=False):
"""
Get list of resources on an endpoint.
:param str ep: Endpoint to get the resources of
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: list of resources
:rtype: asyncResult
"""
# load query params if set to other than defaults
q = {}
result = asyncResult()
result.endpoint = ep
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
self.log.debug("ep = %s, query=%s",ep,q)
data = self._getURL("/endpoints/"+ep, query=q)
result.fill(data)
# check sucess of call
if data.status_code == 200: # sucess
result.error = False
self.log.debug("getResources sucess, status_code = `%s`, content = `%s`", str(data.status_code),data.content)
else: # fail
result.error = response_codes("get_resources",data.status_code)
self.log.debug("getResources failed with error code `%s`" %str(data.status_code))
result.is_done = True
return result
# return async object
def getResourceValue(self,ep,res,cbfn="",noResp=False,cacheOnly=False):
"""
Get value of a specific resource on a specific endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback function to be called on completion
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: value of the resource, usually a string
:rtype: asyncResult
"""
q = {}
result = asyncResult(callback=cbfn) #set callback fn for use in async handler
result.endpoint = ep
result.resource = res
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
data = self._getURL("/endpoints/"+ep+res, query=q)
result.fill(data)
if data.status_code == 200: # immediate success
result.error = False
result.is_done = True
if cbfn:
cbfn(result)
return result
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else: # fail
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
def putResourceValue(self,ep,res,data,cbfn=""):
"""
Put a value to a resource on an endpoint
:param str ep: name of endpoint
:param str res: name of resource
:param str data: data to send via PUT
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
"""
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/endpoints/"+ep+res,payload=data)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
#return async object
def postResource(self,ep,res,data="",cbfn=""):
'''
POST data to a resource on an endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param str data: Optional - data to send via POST
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._postURL("/endpoints/"+ep+res,data)
if data.status_code == 201: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
def deleteEndpoint(self,ep,cbfn=""):
'''
Send DELETE message to an endpoint.
:param str ep: name of endpoint
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
data = self._deleteURL("/endpoints/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# subscribe to endpoint/resource, the cbfn is given an asynch object that
# represents the result. it is up to the user to impliment the notification
# channel callback in a higher level library.
def putResourceSubscription(self,ep,res,cbfn=""):
'''
Subscribe to changes in a specific resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/subscriptions/"+ep+res)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("subscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteEndpointSubscriptions(self,ep):
'''
Delete all subscriptions on specified endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._deleteURL("/subscriptions/"+ep)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("delete_endpoint_subscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteResourceSubscription(self,ep,res):
'''
Delete subscription to a resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
result.resource = res
data = self._deleteURL("/subscriptions/"+ep+res)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteAllSubscriptions(self):
'''
Delete all subscriptions on the domain (all endpoints, all resources)
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/subscriptions/")
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
# result field is a string
def getEndpointSubscriptions(self,ep):
'''
Get list of all subscriptions on a given endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._getURL("/subscriptions/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
# result field is a string
def getResourceSubscription(self,ep,res):
'''
Get list of all subscriptions for a resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
result.resource = res
data = self._getURL("/subscriptions/"+ep+res)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def putPreSubscription(self,JSONdata):
'''
Set pre-subscription rules for all endpoints / resources on the domain.
This can be useful for all current and future endpoints/resources.
:param json JSONdata: data to use as pre-subscription data. Wildcards are permitted
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
if isinstance(JSONdata,str) and self._isJSON(JSONdata):
self.log.warn("pre-subscription data was a string, converting to a list : %s",JSONdata)
JSONdata = json.loads(JSONdata) # convert json string to list
if not (isinstance(JSONdata,list) and self._isJSON(JSONdata)):
self.log.error("pre-subscription data is not valid. Please make sure it is a valid JSON list")
result = asyncResult()
data = self._putURL("/subscriptions",JSONdata, versioned=False)
if data.status_code == 204: # immediate success with no response
result.error = False
result.is_done = True
result.result = []
else:
result.error = response_codes("presubscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def getPreSubscription(self):
'''
Get the current pre-subscription data from connector
:return: JSON that represents the pre-subscription data in the ``.result`` field
:rtype: asyncResult
'''
result = asyncResult()
data = self._getURL("/subscriptions")
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.json()
else:
result.error = response_codes("presubscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def putCallback(self,url,headers=""):
'''
Set the callback URL. To be used in place of LongPolling when deploying a webapp.
**note**: make sure you set up a callback URL in your web app
:param str url: complete url, including port, where the callback url is located
:param str headers: Optional - Headers to have Connector send back with all calls
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
payloadToSend = {"url":url}
if headers:
payload['headers':headers]
data = self._putURL(url="/notification/callback",payload=payloadToSend, versioned=False)
if data.status_code == 204: #immediate success
result.error = False
result.result = data.content
else:
result.error = response_codes("put_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
def getCallback(self):
'''
Get the callback URL currently registered with Connector.
:return: callback url in ``.result``, error if applicable in ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._getURL("/notification/callback",versioned=False)
if data.status_code == 200: #immediate success
result.error = False
result.result = data.json()
else:
result.error = response_codes("get_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
def deleteCallback(self):
'''
Delete the Callback URL currently registered with Connector.
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/notification/callback")
if data.status_code == 204: #immediate success
result.result = data.content
result.error = False
else:
result.error = response_codes("delete_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
# set a specific handler to call the cbfn
def setHandler(self,handler,cbfn):
'''
Register a handler for a particular notification type.
These are the types of notifications that are acceptable.
| 'async-responses'
| 'registrations-expired'
| 'de-registrations'
| 'reg-updates'
| 'registrations'
| 'notifications'
:param str handler: name of the notification type
:param fnptr cbfn: function to pass the notification channel messages to.
:return: Nothing.
'''
if handler == "async-responses":
self.async_responses_callback = cbfn
elif handler == "registrations-expired":
self.registrations_expired_callback = cbfn
elif handler == "de-registrations":
self.de_registrations_callback = cbfn
elif handler == "reg-updates":
self.reg_updates_callback = cbfn
elif handler == "registrations":
self.registrations_callback = cbfn
elif handler == "notifications":
self.notifications_callback = cbfn
else:
self.log.warn("'%s' is not a legitimate notification channel option. Please check your spelling.",handler)
# this function needs to spin off a thread that is constantally polling,
# should match asynch ID's to values and call their function
def startLongPolling(self, noWait=False):
'''
Start LongPolling Connector for notifications.
:param bool noWait: Optional - use the cached values in connector, do not wait for the device to respond
:return: Thread of constantly running LongPoll. To be used to kill the thred if necessary.
:rtype: pythonThread
'''
# check Asynch ID's against insternal database of ID's
# Call return function with the value given, maybe decode from base64?
wait = ''
if(noWait == True):
wait = "?noWait=true"
# check that there isn't another thread already running, only one longPolling instance per is acceptable
if(self.longPollThread.isAlive()):
self.log.warn("LongPolling is already active.")
else:
# start infinite longpolling thread
self._stopLongPolling.clear()
self.longPollThread.start()
self.log.info("Spun off LongPolling thread")
return self.longPollThread # return thread instance so user can manually intervene if necessary
# stop longpolling by switching the flag off.
def stopLongPolling(self):
'''
Stop LongPolling thread
:return: none
'''
if(self.longPollThread.isAlive()):
self._stopLongPolling.set()
self.log.debug("set stop longpolling flag")
else:
self.log.warn("LongPolling thread already stopped")
return
# Thread to constantly long poll connector and process the feedback.
# TODO: pass wait / noWait on to long polling thread, currently the user can set it but it doesnt actually affect anything.
def longPoll(self, versioned=True):
self.log.debug("LongPolling Started, self.address = %s" %self.address)
while(not self._stopLongPolling.is_set()):
try:
if versioned:
data = r.get(self.address+self.apiVersion+'/notification/pull',headers={"Authorization":"Bearer "+self.bearer,"Connection":"keep-alive","accept":"application/json"})
else:
data = r.get(self.address+'/notification/pull',headers={"Authorization":"Bearer "+self.bearer,"Connection":"keep-alive","accept":"application/json"})
self.log.debug("Longpoll Returned, len = %d, statuscode=%d",len(data.text),data.status_code)
# process callbacks
if data.status_code == 200: # 204 means no content, do nothing
self.handler(data.content)
self.log.debug("Longpoll data = "+data.content)
except:
self.log.error("longPolling had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
self.log.info("Killing Longpolling Thread")
# parse the notification channel responses and call appropriate handlers
def handler(self,data):
'''
Function to handle notification data as part of Callback URL handler.
:param str data: data posted to Callback URL by connector.
:return: nothing
'''
if isinstance(data,r.models.Response):
self.log.debug("data is request object = %s", str(data.content))
data = data.content
elif isinstance(data,str):
self.log.info("data is json string with len %d",len(data))
if len(data) == 0:
self.log.warn("Handler received data of 0 length, exiting handler.")
return
else:
self.log.error("Input is not valid request object or json string : %s" %str(data))
return False
try:
data = json.loads(data)
if 'async-responses' in data.keys():
self.async_responses_callback(data)
if 'notifications' in data.keys():
self.notifications_callback(data)
if 'registrations' in data.keys():
self.registrations_callback(data)
if 'reg-updates' in data.keys():
self.reg_updates_callback(data)
if 'de-registrations' in data.keys():
self.de_registrations_callback(data)
if 'registrations-expired' in data.keys():
self.registrations_expired_callback(data)
except:
self.log.error("handle router had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
# Turn on / off debug messages based on the onOff variable
def debug(self,onOff,level='DEBUG'):
'''
Enable / Disable debugging
:param bool onOff: turn debugging on / off
:return: none
'''
if onOff:
if level == 'DEBUG':
self.log.setLevel(logging.DEBUG)
self._ch.setLevel(logging.DEBUG)
self.log.debug("Debugging level DEBUG enabled")
elif level == "INFO":
self.log.setLevel(logging.INFO)
self._ch.setLevel(logging.INFO)
self.log.info("Debugging level INFO enabled")
elif level == "WARN":
self.log.setLevel(logging.WARN)
self._ch.setLevel(logging.WARN)
self.log.warn("Debugging level WARN enabled")
elif level == "ERROR":
self.log.setLevel(logging.ERROR)
self._ch.setLevel(logging.ERROR)
self.log.error("Debugging level ERROR enabled")
else:
self.log.setLevel(logging.ERROR)
self._ch.setLevel(logging.ERROR)
self.log.error("Unrecognized debug level `%s`, set to default level `ERROR` instead",level)
# internal async-requests handler.
# data input is json data
def _asyncHandler(self,data):
try:
responses = data['async-responses']
for entry in responses:
if entry['id'] in self.database['async-responses'].keys():
result = self.database['async-responses'].pop(entry['id']) # get the asynch object out of database
# fill in async-result object
if 'error' in entry.keys():
# error happened, handle it
result.error = response_codes('async-responses-handler',entry['status'])
result.error.error = entry['error']
result.is_done = True
if result.callback:
result.callback(result)
else:
return result
else:
# everything is good, fill it out
result.result = b64decode(entry['payload'])
result.raw_data = entry
result.status = entry['status']
result.error = False
for thing in entry.keys():
result.extra[thing]=entry[thing]
result.is_done = True
# call associated callback function
if result.callback:
result.callback(result)
else:
self.log.warn("No callback function given")
else:
# TODO : object not found int asynch database
self.log.warn("No asynch entry for '%s' found in databse",entry['id'])
except:
# TODO error handling here
self.log.error("Bad data encountered and failed to elegantly handle it. ")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
return
# default handler for notifications. User should impliment all of these in
# a L2 implimentation or in their webapp.
# @input data is a dictionary
def _defaultHandler(self,data):
if 'async-responses' in data.keys():
self.log.info("async-responses detected : len = %d",len(data["async-responses"]))
self.log.debug(data["async-responses"])
if 'notifications' in data.keys():
self.log.info("notifications' detected : len = %d",len(data["notifications"]))
self.log.debug(data["notifications"])
if 'registrations' in data.keys():
self.log.info("registrations' detected : len = %d",len(data["registrations"]))
self.log.debug(data["registrations"])
if 'reg-updates' in data.keys():
# removed because this happens every 10s or so, spamming the output
self.log.info("reg-updates detected : len = %d",len(data["reg-updates"]))
self.log.debug(data["reg-updates"])
if 'de-registrations' in data.keys():
self.log.info("de-registrations detected : len = %d",len(data["de-registrations"]))
self.log.debug(data["de-registrations"])
if 'registrations-expired' in data.keys():
self.log.info("registrations-expired detected : len = %d",len(data["registrations-expired"]))
self.log.debug(data["registrations-expired"])
# make the requests.
# url is the API url to hit
# query are the optional get params
# versioned tells the API whether to hit the /v#/ version. set to false for
# commands that break with this, like the API and Connector version calls
# TODO: spin this off to be non-blocking
def _getURL(self, url,query={},versioned=True):
if versioned:
return r.get(self.address+self.apiVersion+url,headers={"Authorization":"Bearer "+self.bearer},params=query)
else:
return r.get(self.address+url,headers={"Authorization":"Bearer "+self.bearer},params=query)
# put data to URL with json payload in dataIn
def _putURL(self, url,payload=None,versioned=True):
if self._isJSON(payload):
self.log.debug("PUT payload is json")
if versioned:
return r.put(self.address+self.apiVersion+url,json=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.put(self.address+url,json=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
self.log.debug("PUT payload is NOT json")
if versioned:
return r.put(self.address+self.apiVersion+url,data=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.put(self.address+url,data=payload,headers={"Authorization":"Bearer "+self.bearer})
# put data to URL with json payload in dataIn
def _postURL(self, url,payload="",versioned=True):
addr = self.address+self.apiVersion+url if versioned else self.address+url
h = {"Authorization":"Bearer "+self.bearer}
if payload:
self.log.info("POSTing with payload: %s ",payload)
return r.post(addr,data=payload,headers=h)
else:
self.log.info("POSTing")
return r.post(addr,headers=h)
# delete endpoint
def _deleteURL(self, url,versioned=True):
if versioned:
return r.delete(self.address+self.apiVersion+url,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.delete(self.address+url,headers={"Authorization":"Bearer "+self.bearer})
# check if input is json, return true or false accordingly
def _isJSON(self,dataIn):
try:
json.dumps(dataIn)
return True
except:
self.log.debug("[_isJSON] exception triggered, input is not json")
return False
# extend dictionary class so we can instantiate multiple levels at once
class vividict(dict):
def __missing__(self, key):
value = self[key] = type(self)()
return value
# Initialization function, set the token used by this object.
def __init__( self,
token,
webAddress="https://api.connector.mbed.com",
port="80",):
# set token
self.bearer = token
# set version of REST API
self.apiVersion = "/v2"
# Init database, used for callback fn's for various tasks (asynch, subscriptions...etc)
self.database = self.vividict()
self.database['notifications']
self.database['registrations']
self.database['reg-updates']
self.database['de-registrations']
self.database['registrations-expired']
self.database['async-responses']
# longpolling variable
self._stopLongPolling = threading.Event() # must initialize false to avoid race condition
self._stopLongPolling.clear()
#create thread for long polling
self.longPollThread = threading.Thread(target=self.longPoll,name="mdc-api-longpoll")
self.longPollThread.daemon = True # Do this so the thread exits when the overall process does
# set default webAddress and port to mbed connector
self.address = webAddress
self.port = port
# Initialize the callbacks
self.async_responses_callback = self._asyncHandler
self.registrations_expired_callback = self._defaultHandler
self.de_registrations_callback = self._defaultHandler
self.reg_updates_callback = self._defaultHandler
self.registrations_callback = self._defaultHandler
self.notifications_callback = self._defaultHandler
# add logger
self.log = logging.getLogger(name="mdc-api-logger")
self.log.setLevel(logging.ERROR)
self._ch = logging.StreamHandler()
self._ch.setLevel(logging.ERROR)
formatter = logging.Formatter("\r\n[%(levelname)s \t %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
self._ch.setFormatter(formatter)
self.log.addHandler(self._ch)
|
ARMmbed/mbed-connector-api-python
|
mbed_connector_api/mbed_connector_api.py
|
connector.getResources
|
python
|
def getResources(self,ep,noResp=False,cacheOnly=False):
# load query params if set to other than defaults
q = {}
result = asyncResult()
result.endpoint = ep
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
self.log.debug("ep = %s, query=%s",ep,q)
data = self._getURL("/endpoints/"+ep, query=q)
result.fill(data)
# check sucess of call
if data.status_code == 200: # sucess
result.error = False
self.log.debug("getResources sucess, status_code = `%s`, content = `%s`", str(data.status_code),data.content)
else: # fail
result.error = response_codes("get_resources",data.status_code)
self.log.debug("getResources failed with error code `%s`" %str(data.status_code))
result.is_done = True
return result
|
Get list of resources on an endpoint.
:param str ep: Endpoint to get the resources of
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: list of resources
:rtype: asyncResult
|
train
|
https://github.com/ARMmbed/mbed-connector-api-python/blob/a5024a01dc67cc192c8bf7a70b251fcf0a3f279b/mbed_connector_api/mbed_connector_api.py#L149-L178
|
[
"def fill(self, data):\n\tif type(data) == r.models.Response:\n\t\ttry:\n\t\t\tself.result = json.loads(data.content)\n\t\texcept:\n\t\t\tself.result = []\n\t\t\tif isinstance(data.content,str): # string handler\n\t\t\t\tself.result = data.content\n\t\t\telif isinstance(data.content,int): # int handler\n\t\t\t\tself.log.debug(\"data returned is an integer, not sure what to do with that\")\n\t\t\telse: # all other handler\n\t\t\t\tself.log.debug(\"unhandled data type, type of content : %s\" %type(data.content))\n\t\tself.status_code = data.status_code\n\t\tself.raw_data = data.content\n\telse:\n\t\t#error\n\t\tself.log.error(\"type not found : %s\"%type(data))\n\treturn\n",
"def _getURL(self, url,query={},versioned=True):\n\tif versioned:\n\t\treturn r.get(self.address+self.apiVersion+url,headers={\"Authorization\":\"Bearer \"+self.bearer},params=query)\n\telse:\n\t\treturn r.get(self.address+url,headers={\"Authorization\":\"Bearer \"+self.bearer},params=query)\n"
] |
class connector:
"""
Interface class to use the connector.mbed.com REST API.
This class will by default handle asyncronous events.
All function return :class:'.asyncResult' objects
"""
# Return connector version number and recent rest API version number it supports
def getConnectorVersion(self):
"""
GET the current Connector version.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_mdc_version",data.status_code)
result.is_done = True
return result
# Return API version of connector
def getApiVersions(self):
"""
Get the REST API versions that connector accepts.
:returns: :class:asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/rest-versions",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_rest_version",data.status_code)
result.is_done = True
return result
# Returns metadata about connector limits as JSON blob
def getLimits(self):
"""return limits of account in async result object.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/limits")
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("limit",data.status_code)
result.is_done = True
return result
# return json list of all endpoints.
# optional type field can be used to match all endpoints of a certain type.
def getEndpoints(self,typeOfEndpoint=""):
"""
Get list of all endpoints on the domain.
:param str typeOfEndpoint: Optional filter endpoints returned by type
:return: list of all endpoints
:rtype: asyncResult
"""
q = {}
result = asyncResult()
if typeOfEndpoint:
q['type'] = typeOfEndpoint
result.extra['type'] = typeOfEndpoint
data = self._getURL("/endpoints", query = q)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_endpoints",data.status_code)
result.is_done = True
return result
# return json list of all resources on an endpoint
def getResources(self,ep,noResp=False,cacheOnly=False):
"""
Get list of resources on an endpoint.
:param str ep: Endpoint to get the resources of
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: list of resources
:rtype: asyncResult
"""
# load query params if set to other than defaults
q = {}
result = asyncResult()
result.endpoint = ep
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
self.log.debug("ep = %s, query=%s",ep,q)
data = self._getURL("/endpoints/"+ep, query=q)
result.fill(data)
# check sucess of call
if data.status_code == 200: # sucess
result.error = False
self.log.debug("getResources sucess, status_code = `%s`, content = `%s`", str(data.status_code),data.content)
else: # fail
result.error = response_codes("get_resources",data.status_code)
self.log.debug("getResources failed with error code `%s`" %str(data.status_code))
result.is_done = True
return result
# return async object
def getResourceValue(self,ep,res,cbfn="",noResp=False,cacheOnly=False):
"""
Get value of a specific resource on a specific endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback function to be called on completion
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: value of the resource, usually a string
:rtype: asyncResult
"""
q = {}
result = asyncResult(callback=cbfn) #set callback fn for use in async handler
result.endpoint = ep
result.resource = res
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
data = self._getURL("/endpoints/"+ep+res, query=q)
result.fill(data)
if data.status_code == 200: # immediate success
result.error = False
result.is_done = True
if cbfn:
cbfn(result)
return result
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else: # fail
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
def putResourceValue(self,ep,res,data,cbfn=""):
"""
Put a value to a resource on an endpoint
:param str ep: name of endpoint
:param str res: name of resource
:param str data: data to send via PUT
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
"""
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/endpoints/"+ep+res,payload=data)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
#return async object
def postResource(self,ep,res,data="",cbfn=""):
'''
POST data to a resource on an endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param str data: Optional - data to send via POST
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._postURL("/endpoints/"+ep+res,data)
if data.status_code == 201: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
def deleteEndpoint(self,ep,cbfn=""):
'''
Send DELETE message to an endpoint.
:param str ep: name of endpoint
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
data = self._deleteURL("/endpoints/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# subscribe to endpoint/resource, the cbfn is given an asynch object that
# represents the result. it is up to the user to impliment the notification
# channel callback in a higher level library.
def putResourceSubscription(self,ep,res,cbfn=""):
'''
Subscribe to changes in a specific resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/subscriptions/"+ep+res)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("subscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteEndpointSubscriptions(self,ep):
'''
Delete all subscriptions on specified endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._deleteURL("/subscriptions/"+ep)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("delete_endpoint_subscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteResourceSubscription(self,ep,res):
'''
Delete subscription to a resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
result.resource = res
data = self._deleteURL("/subscriptions/"+ep+res)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteAllSubscriptions(self):
'''
Delete all subscriptions on the domain (all endpoints, all resources)
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/subscriptions/")
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
# result field is a string
def getEndpointSubscriptions(self,ep):
'''
Get list of all subscriptions on a given endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._getURL("/subscriptions/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
# result field is a string
def getResourceSubscription(self,ep,res):
'''
Get list of all subscriptions for a resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
result.resource = res
data = self._getURL("/subscriptions/"+ep+res)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def putPreSubscription(self,JSONdata):
'''
Set pre-subscription rules for all endpoints / resources on the domain.
This can be useful for all current and future endpoints/resources.
:param json JSONdata: data to use as pre-subscription data. Wildcards are permitted
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
if isinstance(JSONdata,str) and self._isJSON(JSONdata):
self.log.warn("pre-subscription data was a string, converting to a list : %s",JSONdata)
JSONdata = json.loads(JSONdata) # convert json string to list
if not (isinstance(JSONdata,list) and self._isJSON(JSONdata)):
self.log.error("pre-subscription data is not valid. Please make sure it is a valid JSON list")
result = asyncResult()
data = self._putURL("/subscriptions",JSONdata, versioned=False)
if data.status_code == 204: # immediate success with no response
result.error = False
result.is_done = True
result.result = []
else:
result.error = response_codes("presubscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def getPreSubscription(self):
'''
Get the current pre-subscription data from connector
:return: JSON that represents the pre-subscription data in the ``.result`` field
:rtype: asyncResult
'''
result = asyncResult()
data = self._getURL("/subscriptions")
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.json()
else:
result.error = response_codes("presubscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def putCallback(self,url,headers=""):
'''
Set the callback URL. To be used in place of LongPolling when deploying a webapp.
**note**: make sure you set up a callback URL in your web app
:param str url: complete url, including port, where the callback url is located
:param str headers: Optional - Headers to have Connector send back with all calls
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
payloadToSend = {"url":url}
if headers:
payload['headers':headers]
data = self._putURL(url="/notification/callback",payload=payloadToSend, versioned=False)
if data.status_code == 204: #immediate success
result.error = False
result.result = data.content
else:
result.error = response_codes("put_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
def getCallback(self):
'''
Get the callback URL currently registered with Connector.
:return: callback url in ``.result``, error if applicable in ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._getURL("/notification/callback",versioned=False)
if data.status_code == 200: #immediate success
result.error = False
result.result = data.json()
else:
result.error = response_codes("get_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
def deleteCallback(self):
'''
Delete the Callback URL currently registered with Connector.
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/notification/callback")
if data.status_code == 204: #immediate success
result.result = data.content
result.error = False
else:
result.error = response_codes("delete_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
# set a specific handler to call the cbfn
def setHandler(self,handler,cbfn):
'''
Register a handler for a particular notification type.
These are the types of notifications that are acceptable.
| 'async-responses'
| 'registrations-expired'
| 'de-registrations'
| 'reg-updates'
| 'registrations'
| 'notifications'
:param str handler: name of the notification type
:param fnptr cbfn: function to pass the notification channel messages to.
:return: Nothing.
'''
if handler == "async-responses":
self.async_responses_callback = cbfn
elif handler == "registrations-expired":
self.registrations_expired_callback = cbfn
elif handler == "de-registrations":
self.de_registrations_callback = cbfn
elif handler == "reg-updates":
self.reg_updates_callback = cbfn
elif handler == "registrations":
self.registrations_callback = cbfn
elif handler == "notifications":
self.notifications_callback = cbfn
else:
self.log.warn("'%s' is not a legitimate notification channel option. Please check your spelling.",handler)
# this function needs to spin off a thread that is constantally polling,
# should match asynch ID's to values and call their function
def startLongPolling(self, noWait=False):
'''
Start LongPolling Connector for notifications.
:param bool noWait: Optional - use the cached values in connector, do not wait for the device to respond
:return: Thread of constantly running LongPoll. To be used to kill the thred if necessary.
:rtype: pythonThread
'''
# check Asynch ID's against insternal database of ID's
# Call return function with the value given, maybe decode from base64?
wait = ''
if(noWait == True):
wait = "?noWait=true"
# check that there isn't another thread already running, only one longPolling instance per is acceptable
if(self.longPollThread.isAlive()):
self.log.warn("LongPolling is already active.")
else:
# start infinite longpolling thread
self._stopLongPolling.clear()
self.longPollThread.start()
self.log.info("Spun off LongPolling thread")
return self.longPollThread # return thread instance so user can manually intervene if necessary
# stop longpolling by switching the flag off.
def stopLongPolling(self):
'''
Stop LongPolling thread
:return: none
'''
if(self.longPollThread.isAlive()):
self._stopLongPolling.set()
self.log.debug("set stop longpolling flag")
else:
self.log.warn("LongPolling thread already stopped")
return
# Thread to constantly long poll connector and process the feedback.
# TODO: pass wait / noWait on to long polling thread, currently the user can set it but it doesnt actually affect anything.
def longPoll(self, versioned=True):
self.log.debug("LongPolling Started, self.address = %s" %self.address)
while(not self._stopLongPolling.is_set()):
try:
if versioned:
data = r.get(self.address+self.apiVersion+'/notification/pull',headers={"Authorization":"Bearer "+self.bearer,"Connection":"keep-alive","accept":"application/json"})
else:
data = r.get(self.address+'/notification/pull',headers={"Authorization":"Bearer "+self.bearer,"Connection":"keep-alive","accept":"application/json"})
self.log.debug("Longpoll Returned, len = %d, statuscode=%d",len(data.text),data.status_code)
# process callbacks
if data.status_code == 200: # 204 means no content, do nothing
self.handler(data.content)
self.log.debug("Longpoll data = "+data.content)
except:
self.log.error("longPolling had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
self.log.info("Killing Longpolling Thread")
# parse the notification channel responses and call appropriate handlers
def handler(self,data):
'''
Function to handle notification data as part of Callback URL handler.
:param str data: data posted to Callback URL by connector.
:return: nothing
'''
if isinstance(data,r.models.Response):
self.log.debug("data is request object = %s", str(data.content))
data = data.content
elif isinstance(data,str):
self.log.info("data is json string with len %d",len(data))
if len(data) == 0:
self.log.warn("Handler received data of 0 length, exiting handler.")
return
else:
self.log.error("Input is not valid request object or json string : %s" %str(data))
return False
try:
data = json.loads(data)
if 'async-responses' in data.keys():
self.async_responses_callback(data)
if 'notifications' in data.keys():
self.notifications_callback(data)
if 'registrations' in data.keys():
self.registrations_callback(data)
if 'reg-updates' in data.keys():
self.reg_updates_callback(data)
if 'de-registrations' in data.keys():
self.de_registrations_callback(data)
if 'registrations-expired' in data.keys():
self.registrations_expired_callback(data)
except:
self.log.error("handle router had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
# Turn on / off debug messages based on the onOff variable
def debug(self,onOff,level='DEBUG'):
'''
Enable / Disable debugging
:param bool onOff: turn debugging on / off
:return: none
'''
if onOff:
if level == 'DEBUG':
self.log.setLevel(logging.DEBUG)
self._ch.setLevel(logging.DEBUG)
self.log.debug("Debugging level DEBUG enabled")
elif level == "INFO":
self.log.setLevel(logging.INFO)
self._ch.setLevel(logging.INFO)
self.log.info("Debugging level INFO enabled")
elif level == "WARN":
self.log.setLevel(logging.WARN)
self._ch.setLevel(logging.WARN)
self.log.warn("Debugging level WARN enabled")
elif level == "ERROR":
self.log.setLevel(logging.ERROR)
self._ch.setLevel(logging.ERROR)
self.log.error("Debugging level ERROR enabled")
else:
self.log.setLevel(logging.ERROR)
self._ch.setLevel(logging.ERROR)
self.log.error("Unrecognized debug level `%s`, set to default level `ERROR` instead",level)
# internal async-requests handler.
# data input is json data
def _asyncHandler(self,data):
try:
responses = data['async-responses']
for entry in responses:
if entry['id'] in self.database['async-responses'].keys():
result = self.database['async-responses'].pop(entry['id']) # get the asynch object out of database
# fill in async-result object
if 'error' in entry.keys():
# error happened, handle it
result.error = response_codes('async-responses-handler',entry['status'])
result.error.error = entry['error']
result.is_done = True
if result.callback:
result.callback(result)
else:
return result
else:
# everything is good, fill it out
result.result = b64decode(entry['payload'])
result.raw_data = entry
result.status = entry['status']
result.error = False
for thing in entry.keys():
result.extra[thing]=entry[thing]
result.is_done = True
# call associated callback function
if result.callback:
result.callback(result)
else:
self.log.warn("No callback function given")
else:
# TODO : object not found int asynch database
self.log.warn("No asynch entry for '%s' found in databse",entry['id'])
except:
# TODO error handling here
self.log.error("Bad data encountered and failed to elegantly handle it. ")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
return
# default handler for notifications. User should impliment all of these in
# a L2 implimentation or in their webapp.
# @input data is a dictionary
def _defaultHandler(self,data):
if 'async-responses' in data.keys():
self.log.info("async-responses detected : len = %d",len(data["async-responses"]))
self.log.debug(data["async-responses"])
if 'notifications' in data.keys():
self.log.info("notifications' detected : len = %d",len(data["notifications"]))
self.log.debug(data["notifications"])
if 'registrations' in data.keys():
self.log.info("registrations' detected : len = %d",len(data["registrations"]))
self.log.debug(data["registrations"])
if 'reg-updates' in data.keys():
# removed because this happens every 10s or so, spamming the output
self.log.info("reg-updates detected : len = %d",len(data["reg-updates"]))
self.log.debug(data["reg-updates"])
if 'de-registrations' in data.keys():
self.log.info("de-registrations detected : len = %d",len(data["de-registrations"]))
self.log.debug(data["de-registrations"])
if 'registrations-expired' in data.keys():
self.log.info("registrations-expired detected : len = %d",len(data["registrations-expired"]))
self.log.debug(data["registrations-expired"])
# make the requests.
# url is the API url to hit
# query are the optional get params
# versioned tells the API whether to hit the /v#/ version. set to false for
# commands that break with this, like the API and Connector version calls
# TODO: spin this off to be non-blocking
def _getURL(self, url,query={},versioned=True):
if versioned:
return r.get(self.address+self.apiVersion+url,headers={"Authorization":"Bearer "+self.bearer},params=query)
else:
return r.get(self.address+url,headers={"Authorization":"Bearer "+self.bearer},params=query)
# put data to URL with json payload in dataIn
def _putURL(self, url,payload=None,versioned=True):
if self._isJSON(payload):
self.log.debug("PUT payload is json")
if versioned:
return r.put(self.address+self.apiVersion+url,json=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.put(self.address+url,json=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
self.log.debug("PUT payload is NOT json")
if versioned:
return r.put(self.address+self.apiVersion+url,data=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.put(self.address+url,data=payload,headers={"Authorization":"Bearer "+self.bearer})
# put data to URL with json payload in dataIn
def _postURL(self, url,payload="",versioned=True):
addr = self.address+self.apiVersion+url if versioned else self.address+url
h = {"Authorization":"Bearer "+self.bearer}
if payload:
self.log.info("POSTing with payload: %s ",payload)
return r.post(addr,data=payload,headers=h)
else:
self.log.info("POSTing")
return r.post(addr,headers=h)
# delete endpoint
def _deleteURL(self, url,versioned=True):
if versioned:
return r.delete(self.address+self.apiVersion+url,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.delete(self.address+url,headers={"Authorization":"Bearer "+self.bearer})
# check if input is json, return true or false accordingly
def _isJSON(self,dataIn):
try:
json.dumps(dataIn)
return True
except:
self.log.debug("[_isJSON] exception triggered, input is not json")
return False
# extend dictionary class so we can instantiate multiple levels at once
class vividict(dict):
def __missing__(self, key):
value = self[key] = type(self)()
return value
# Initialization function, set the token used by this object.
def __init__( self,
token,
webAddress="https://api.connector.mbed.com",
port="80",):
# set token
self.bearer = token
# set version of REST API
self.apiVersion = "/v2"
# Init database, used for callback fn's for various tasks (asynch, subscriptions...etc)
self.database = self.vividict()
self.database['notifications']
self.database['registrations']
self.database['reg-updates']
self.database['de-registrations']
self.database['registrations-expired']
self.database['async-responses']
# longpolling variable
self._stopLongPolling = threading.Event() # must initialize false to avoid race condition
self._stopLongPolling.clear()
#create thread for long polling
self.longPollThread = threading.Thread(target=self.longPoll,name="mdc-api-longpoll")
self.longPollThread.daemon = True # Do this so the thread exits when the overall process does
# set default webAddress and port to mbed connector
self.address = webAddress
self.port = port
# Initialize the callbacks
self.async_responses_callback = self._asyncHandler
self.registrations_expired_callback = self._defaultHandler
self.de_registrations_callback = self._defaultHandler
self.reg_updates_callback = self._defaultHandler
self.registrations_callback = self._defaultHandler
self.notifications_callback = self._defaultHandler
# add logger
self.log = logging.getLogger(name="mdc-api-logger")
self.log.setLevel(logging.ERROR)
self._ch = logging.StreamHandler()
self._ch.setLevel(logging.ERROR)
formatter = logging.Formatter("\r\n[%(levelname)s \t %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
self._ch.setFormatter(formatter)
self.log.addHandler(self._ch)
|
ARMmbed/mbed-connector-api-python
|
mbed_connector_api/mbed_connector_api.py
|
connector.getResourceValue
|
python
|
def getResourceValue(self,ep,res,cbfn="",noResp=False,cacheOnly=False):
q = {}
result = asyncResult(callback=cbfn) #set callback fn for use in async handler
result.endpoint = ep
result.resource = res
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
data = self._getURL("/endpoints/"+ep+res, query=q)
result.fill(data)
if data.status_code == 200: # immediate success
result.error = False
result.is_done = True
if cbfn:
cbfn(result)
return result
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else: # fail
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
|
Get value of a specific resource on a specific endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback function to be called on completion
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: value of the resource, usually a string
:rtype: asyncResult
|
train
|
https://github.com/ARMmbed/mbed-connector-api-python/blob/a5024a01dc67cc192c8bf7a70b251fcf0a3f279b/mbed_connector_api/mbed_connector_api.py#L182-L217
|
[
"def fill(self, data):\n\tif type(data) == r.models.Response:\n\t\ttry:\n\t\t\tself.result = json.loads(data.content)\n\t\texcept:\n\t\t\tself.result = []\n\t\t\tif isinstance(data.content,str): # string handler\n\t\t\t\tself.result = data.content\n\t\t\telif isinstance(data.content,int): # int handler\n\t\t\t\tself.log.debug(\"data returned is an integer, not sure what to do with that\")\n\t\t\telse: # all other handler\n\t\t\t\tself.log.debug(\"unhandled data type, type of content : %s\" %type(data.content))\n\t\tself.status_code = data.status_code\n\t\tself.raw_data = data.content\n\telse:\n\t\t#error\n\t\tself.log.error(\"type not found : %s\"%type(data))\n\treturn\n",
"def _getURL(self, url,query={},versioned=True):\n\tif versioned:\n\t\treturn r.get(self.address+self.apiVersion+url,headers={\"Authorization\":\"Bearer \"+self.bearer},params=query)\n\telse:\n\t\treturn r.get(self.address+url,headers={\"Authorization\":\"Bearer \"+self.bearer},params=query)\n"
] |
class connector:
"""
Interface class to use the connector.mbed.com REST API.
This class will by default handle asyncronous events.
All function return :class:'.asyncResult' objects
"""
# Return connector version number and recent rest API version number it supports
def getConnectorVersion(self):
"""
GET the current Connector version.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_mdc_version",data.status_code)
result.is_done = True
return result
# Return API version of connector
def getApiVersions(self):
"""
Get the REST API versions that connector accepts.
:returns: :class:asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/rest-versions",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_rest_version",data.status_code)
result.is_done = True
return result
# Returns metadata about connector limits as JSON blob
def getLimits(self):
"""return limits of account in async result object.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/limits")
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("limit",data.status_code)
result.is_done = True
return result
# return json list of all endpoints.
# optional type field can be used to match all endpoints of a certain type.
def getEndpoints(self,typeOfEndpoint=""):
"""
Get list of all endpoints on the domain.
:param str typeOfEndpoint: Optional filter endpoints returned by type
:return: list of all endpoints
:rtype: asyncResult
"""
q = {}
result = asyncResult()
if typeOfEndpoint:
q['type'] = typeOfEndpoint
result.extra['type'] = typeOfEndpoint
data = self._getURL("/endpoints", query = q)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_endpoints",data.status_code)
result.is_done = True
return result
# return json list of all resources on an endpoint
def getResources(self,ep,noResp=False,cacheOnly=False):
"""
Get list of resources on an endpoint.
:param str ep: Endpoint to get the resources of
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: list of resources
:rtype: asyncResult
"""
# load query params if set to other than defaults
q = {}
result = asyncResult()
result.endpoint = ep
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
self.log.debug("ep = %s, query=%s",ep,q)
data = self._getURL("/endpoints/"+ep, query=q)
result.fill(data)
# check sucess of call
if data.status_code == 200: # sucess
result.error = False
self.log.debug("getResources sucess, status_code = `%s`, content = `%s`", str(data.status_code),data.content)
else: # fail
result.error = response_codes("get_resources",data.status_code)
self.log.debug("getResources failed with error code `%s`" %str(data.status_code))
result.is_done = True
return result
# return async object
def getResourceValue(self,ep,res,cbfn="",noResp=False,cacheOnly=False):
"""
Get value of a specific resource on a specific endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback function to be called on completion
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: value of the resource, usually a string
:rtype: asyncResult
"""
q = {}
result = asyncResult(callback=cbfn) #set callback fn for use in async handler
result.endpoint = ep
result.resource = res
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
data = self._getURL("/endpoints/"+ep+res, query=q)
result.fill(data)
if data.status_code == 200: # immediate success
result.error = False
result.is_done = True
if cbfn:
cbfn(result)
return result
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else: # fail
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
def putResourceValue(self,ep,res,data,cbfn=""):
"""
Put a value to a resource on an endpoint
:param str ep: name of endpoint
:param str res: name of resource
:param str data: data to send via PUT
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
"""
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/endpoints/"+ep+res,payload=data)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
#return async object
def postResource(self,ep,res,data="",cbfn=""):
'''
POST data to a resource on an endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param str data: Optional - data to send via POST
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._postURL("/endpoints/"+ep+res,data)
if data.status_code == 201: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
def deleteEndpoint(self,ep,cbfn=""):
'''
Send DELETE message to an endpoint.
:param str ep: name of endpoint
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
data = self._deleteURL("/endpoints/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# subscribe to endpoint/resource, the cbfn is given an asynch object that
# represents the result. it is up to the user to impliment the notification
# channel callback in a higher level library.
def putResourceSubscription(self,ep,res,cbfn=""):
'''
Subscribe to changes in a specific resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/subscriptions/"+ep+res)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("subscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteEndpointSubscriptions(self,ep):
'''
Delete all subscriptions on specified endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._deleteURL("/subscriptions/"+ep)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("delete_endpoint_subscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteResourceSubscription(self,ep,res):
'''
Delete subscription to a resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
result.resource = res
data = self._deleteURL("/subscriptions/"+ep+res)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteAllSubscriptions(self):
'''
Delete all subscriptions on the domain (all endpoints, all resources)
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/subscriptions/")
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
# result field is a string
def getEndpointSubscriptions(self,ep):
'''
Get list of all subscriptions on a given endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._getURL("/subscriptions/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
# result field is a string
def getResourceSubscription(self,ep,res):
'''
Get list of all subscriptions for a resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
result.resource = res
data = self._getURL("/subscriptions/"+ep+res)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def putPreSubscription(self,JSONdata):
'''
Set pre-subscription rules for all endpoints / resources on the domain.
This can be useful for all current and future endpoints/resources.
:param json JSONdata: data to use as pre-subscription data. Wildcards are permitted
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
if isinstance(JSONdata,str) and self._isJSON(JSONdata):
self.log.warn("pre-subscription data was a string, converting to a list : %s",JSONdata)
JSONdata = json.loads(JSONdata) # convert json string to list
if not (isinstance(JSONdata,list) and self._isJSON(JSONdata)):
self.log.error("pre-subscription data is not valid. Please make sure it is a valid JSON list")
result = asyncResult()
data = self._putURL("/subscriptions",JSONdata, versioned=False)
if data.status_code == 204: # immediate success with no response
result.error = False
result.is_done = True
result.result = []
else:
result.error = response_codes("presubscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def getPreSubscription(self):
'''
Get the current pre-subscription data from connector
:return: JSON that represents the pre-subscription data in the ``.result`` field
:rtype: asyncResult
'''
result = asyncResult()
data = self._getURL("/subscriptions")
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.json()
else:
result.error = response_codes("presubscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def putCallback(self,url,headers=""):
'''
Set the callback URL. To be used in place of LongPolling when deploying a webapp.
**note**: make sure you set up a callback URL in your web app
:param str url: complete url, including port, where the callback url is located
:param str headers: Optional - Headers to have Connector send back with all calls
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
payloadToSend = {"url":url}
if headers:
payload['headers':headers]
data = self._putURL(url="/notification/callback",payload=payloadToSend, versioned=False)
if data.status_code == 204: #immediate success
result.error = False
result.result = data.content
else:
result.error = response_codes("put_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
def getCallback(self):
'''
Get the callback URL currently registered with Connector.
:return: callback url in ``.result``, error if applicable in ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._getURL("/notification/callback",versioned=False)
if data.status_code == 200: #immediate success
result.error = False
result.result = data.json()
else:
result.error = response_codes("get_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
def deleteCallback(self):
'''
Delete the Callback URL currently registered with Connector.
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/notification/callback")
if data.status_code == 204: #immediate success
result.result = data.content
result.error = False
else:
result.error = response_codes("delete_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
# set a specific handler to call the cbfn
def setHandler(self,handler,cbfn):
'''
Register a handler for a particular notification type.
These are the types of notifications that are acceptable.
| 'async-responses'
| 'registrations-expired'
| 'de-registrations'
| 'reg-updates'
| 'registrations'
| 'notifications'
:param str handler: name of the notification type
:param fnptr cbfn: function to pass the notification channel messages to.
:return: Nothing.
'''
if handler == "async-responses":
self.async_responses_callback = cbfn
elif handler == "registrations-expired":
self.registrations_expired_callback = cbfn
elif handler == "de-registrations":
self.de_registrations_callback = cbfn
elif handler == "reg-updates":
self.reg_updates_callback = cbfn
elif handler == "registrations":
self.registrations_callback = cbfn
elif handler == "notifications":
self.notifications_callback = cbfn
else:
self.log.warn("'%s' is not a legitimate notification channel option. Please check your spelling.",handler)
# this function needs to spin off a thread that is constantally polling,
# should match asynch ID's to values and call their function
def startLongPolling(self, noWait=False):
'''
Start LongPolling Connector for notifications.
:param bool noWait: Optional - use the cached values in connector, do not wait for the device to respond
:return: Thread of constantly running LongPoll. To be used to kill the thred if necessary.
:rtype: pythonThread
'''
# check Asynch ID's against insternal database of ID's
# Call return function with the value given, maybe decode from base64?
wait = ''
if(noWait == True):
wait = "?noWait=true"
# check that there isn't another thread already running, only one longPolling instance per is acceptable
if(self.longPollThread.isAlive()):
self.log.warn("LongPolling is already active.")
else:
# start infinite longpolling thread
self._stopLongPolling.clear()
self.longPollThread.start()
self.log.info("Spun off LongPolling thread")
return self.longPollThread # return thread instance so user can manually intervene if necessary
# stop longpolling by switching the flag off.
def stopLongPolling(self):
'''
Stop LongPolling thread
:return: none
'''
if(self.longPollThread.isAlive()):
self._stopLongPolling.set()
self.log.debug("set stop longpolling flag")
else:
self.log.warn("LongPolling thread already stopped")
return
# Thread to constantly long poll connector and process the feedback.
# TODO: pass wait / noWait on to long polling thread, currently the user can set it but it doesnt actually affect anything.
def longPoll(self, versioned=True):
self.log.debug("LongPolling Started, self.address = %s" %self.address)
while(not self._stopLongPolling.is_set()):
try:
if versioned:
data = r.get(self.address+self.apiVersion+'/notification/pull',headers={"Authorization":"Bearer "+self.bearer,"Connection":"keep-alive","accept":"application/json"})
else:
data = r.get(self.address+'/notification/pull',headers={"Authorization":"Bearer "+self.bearer,"Connection":"keep-alive","accept":"application/json"})
self.log.debug("Longpoll Returned, len = %d, statuscode=%d",len(data.text),data.status_code)
# process callbacks
if data.status_code == 200: # 204 means no content, do nothing
self.handler(data.content)
self.log.debug("Longpoll data = "+data.content)
except:
self.log.error("longPolling had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
self.log.info("Killing Longpolling Thread")
# parse the notification channel responses and call appropriate handlers
def handler(self,data):
'''
Function to handle notification data as part of Callback URL handler.
:param str data: data posted to Callback URL by connector.
:return: nothing
'''
if isinstance(data,r.models.Response):
self.log.debug("data is request object = %s", str(data.content))
data = data.content
elif isinstance(data,str):
self.log.info("data is json string with len %d",len(data))
if len(data) == 0:
self.log.warn("Handler received data of 0 length, exiting handler.")
return
else:
self.log.error("Input is not valid request object or json string : %s" %str(data))
return False
try:
data = json.loads(data)
if 'async-responses' in data.keys():
self.async_responses_callback(data)
if 'notifications' in data.keys():
self.notifications_callback(data)
if 'registrations' in data.keys():
self.registrations_callback(data)
if 'reg-updates' in data.keys():
self.reg_updates_callback(data)
if 'de-registrations' in data.keys():
self.de_registrations_callback(data)
if 'registrations-expired' in data.keys():
self.registrations_expired_callback(data)
except:
self.log.error("handle router had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
# Turn on / off debug messages based on the onOff variable
def debug(self,onOff,level='DEBUG'):
'''
Enable / Disable debugging
:param bool onOff: turn debugging on / off
:return: none
'''
if onOff:
if level == 'DEBUG':
self.log.setLevel(logging.DEBUG)
self._ch.setLevel(logging.DEBUG)
self.log.debug("Debugging level DEBUG enabled")
elif level == "INFO":
self.log.setLevel(logging.INFO)
self._ch.setLevel(logging.INFO)
self.log.info("Debugging level INFO enabled")
elif level == "WARN":
self.log.setLevel(logging.WARN)
self._ch.setLevel(logging.WARN)
self.log.warn("Debugging level WARN enabled")
elif level == "ERROR":
self.log.setLevel(logging.ERROR)
self._ch.setLevel(logging.ERROR)
self.log.error("Debugging level ERROR enabled")
else:
self.log.setLevel(logging.ERROR)
self._ch.setLevel(logging.ERROR)
self.log.error("Unrecognized debug level `%s`, set to default level `ERROR` instead",level)
# internal async-requests handler.
# data input is json data
def _asyncHandler(self,data):
try:
responses = data['async-responses']
for entry in responses:
if entry['id'] in self.database['async-responses'].keys():
result = self.database['async-responses'].pop(entry['id']) # get the asynch object out of database
# fill in async-result object
if 'error' in entry.keys():
# error happened, handle it
result.error = response_codes('async-responses-handler',entry['status'])
result.error.error = entry['error']
result.is_done = True
if result.callback:
result.callback(result)
else:
return result
else:
# everything is good, fill it out
result.result = b64decode(entry['payload'])
result.raw_data = entry
result.status = entry['status']
result.error = False
for thing in entry.keys():
result.extra[thing]=entry[thing]
result.is_done = True
# call associated callback function
if result.callback:
result.callback(result)
else:
self.log.warn("No callback function given")
else:
# TODO : object not found int asynch database
self.log.warn("No asynch entry for '%s' found in databse",entry['id'])
except:
# TODO error handling here
self.log.error("Bad data encountered and failed to elegantly handle it. ")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
return
# default handler for notifications. User should impliment all of these in
# a L2 implimentation or in their webapp.
# @input data is a dictionary
def _defaultHandler(self,data):
if 'async-responses' in data.keys():
self.log.info("async-responses detected : len = %d",len(data["async-responses"]))
self.log.debug(data["async-responses"])
if 'notifications' in data.keys():
self.log.info("notifications' detected : len = %d",len(data["notifications"]))
self.log.debug(data["notifications"])
if 'registrations' in data.keys():
self.log.info("registrations' detected : len = %d",len(data["registrations"]))
self.log.debug(data["registrations"])
if 'reg-updates' in data.keys():
# removed because this happens every 10s or so, spamming the output
self.log.info("reg-updates detected : len = %d",len(data["reg-updates"]))
self.log.debug(data["reg-updates"])
if 'de-registrations' in data.keys():
self.log.info("de-registrations detected : len = %d",len(data["de-registrations"]))
self.log.debug(data["de-registrations"])
if 'registrations-expired' in data.keys():
self.log.info("registrations-expired detected : len = %d",len(data["registrations-expired"]))
self.log.debug(data["registrations-expired"])
# make the requests.
# url is the API url to hit
# query are the optional get params
# versioned tells the API whether to hit the /v#/ version. set to false for
# commands that break with this, like the API and Connector version calls
# TODO: spin this off to be non-blocking
def _getURL(self, url,query={},versioned=True):
if versioned:
return r.get(self.address+self.apiVersion+url,headers={"Authorization":"Bearer "+self.bearer},params=query)
else:
return r.get(self.address+url,headers={"Authorization":"Bearer "+self.bearer},params=query)
# put data to URL with json payload in dataIn
def _putURL(self, url,payload=None,versioned=True):
if self._isJSON(payload):
self.log.debug("PUT payload is json")
if versioned:
return r.put(self.address+self.apiVersion+url,json=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.put(self.address+url,json=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
self.log.debug("PUT payload is NOT json")
if versioned:
return r.put(self.address+self.apiVersion+url,data=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.put(self.address+url,data=payload,headers={"Authorization":"Bearer "+self.bearer})
# put data to URL with json payload in dataIn
def _postURL(self, url,payload="",versioned=True):
addr = self.address+self.apiVersion+url if versioned else self.address+url
h = {"Authorization":"Bearer "+self.bearer}
if payload:
self.log.info("POSTing with payload: %s ",payload)
return r.post(addr,data=payload,headers=h)
else:
self.log.info("POSTing")
return r.post(addr,headers=h)
# delete endpoint
def _deleteURL(self, url,versioned=True):
if versioned:
return r.delete(self.address+self.apiVersion+url,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.delete(self.address+url,headers={"Authorization":"Bearer "+self.bearer})
# check if input is json, return true or false accordingly
def _isJSON(self,dataIn):
try:
json.dumps(dataIn)
return True
except:
self.log.debug("[_isJSON] exception triggered, input is not json")
return False
# extend dictionary class so we can instantiate multiple levels at once
class vividict(dict):
def __missing__(self, key):
value = self[key] = type(self)()
return value
# Initialization function, set the token used by this object.
def __init__( self,
token,
webAddress="https://api.connector.mbed.com",
port="80",):
# set token
self.bearer = token
# set version of REST API
self.apiVersion = "/v2"
# Init database, used for callback fn's for various tasks (asynch, subscriptions...etc)
self.database = self.vividict()
self.database['notifications']
self.database['registrations']
self.database['reg-updates']
self.database['de-registrations']
self.database['registrations-expired']
self.database['async-responses']
# longpolling variable
self._stopLongPolling = threading.Event() # must initialize false to avoid race condition
self._stopLongPolling.clear()
#create thread for long polling
self.longPollThread = threading.Thread(target=self.longPoll,name="mdc-api-longpoll")
self.longPollThread.daemon = True # Do this so the thread exits when the overall process does
# set default webAddress and port to mbed connector
self.address = webAddress
self.port = port
# Initialize the callbacks
self.async_responses_callback = self._asyncHandler
self.registrations_expired_callback = self._defaultHandler
self.de_registrations_callback = self._defaultHandler
self.reg_updates_callback = self._defaultHandler
self.registrations_callback = self._defaultHandler
self.notifications_callback = self._defaultHandler
# add logger
self.log = logging.getLogger(name="mdc-api-logger")
self.log.setLevel(logging.ERROR)
self._ch = logging.StreamHandler()
self._ch.setLevel(logging.ERROR)
formatter = logging.Formatter("\r\n[%(levelname)s \t %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
self._ch.setFormatter(formatter)
self.log.addHandler(self._ch)
|
ARMmbed/mbed-connector-api-python
|
mbed_connector_api/mbed_connector_api.py
|
connector.putResourceValue
|
python
|
def putResourceValue(self,ep,res,data,cbfn=""):
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/endpoints/"+ep+res,payload=data)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
|
Put a value to a resource on an endpoint
:param str ep: name of endpoint
:param str res: name of resource
:param str data: data to send via PUT
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
|
train
|
https://github.com/ARMmbed/mbed-connector-api-python/blob/a5024a01dc67cc192c8bf7a70b251fcf0a3f279b/mbed_connector_api/mbed_connector_api.py#L220-L245
|
[
"def _putURL(self, url,payload=None,versioned=True):\n\tif self._isJSON(payload):\n\t\tself.log.debug(\"PUT payload is json\")\n\t\tif versioned:\n\t\t\treturn r.put(self.address+self.apiVersion+url,json=payload,headers={\"Authorization\":\"Bearer \"+self.bearer})\n\t\telse:\n\t\t\treturn r.put(self.address+url,json=payload,headers={\"Authorization\":\"Bearer \"+self.bearer})\n\telse:\n\t\tself.log.debug(\"PUT payload is NOT json\")\n\t\tif versioned:\n\t\t\treturn r.put(self.address+self.apiVersion+url,data=payload,headers={\"Authorization\":\"Bearer \"+self.bearer})\n\t\telse:\n\t\t\treturn r.put(self.address+url,data=payload,headers={\"Authorization\":\"Bearer \"+self.bearer})\n"
] |
class connector:
"""
Interface class to use the connector.mbed.com REST API.
This class will by default handle asyncronous events.
All function return :class:'.asyncResult' objects
"""
# Return connector version number and recent rest API version number it supports
def getConnectorVersion(self):
"""
GET the current Connector version.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_mdc_version",data.status_code)
result.is_done = True
return result
# Return API version of connector
def getApiVersions(self):
"""
Get the REST API versions that connector accepts.
:returns: :class:asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/rest-versions",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_rest_version",data.status_code)
result.is_done = True
return result
# Returns metadata about connector limits as JSON blob
def getLimits(self):
"""return limits of account in async result object.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/limits")
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("limit",data.status_code)
result.is_done = True
return result
# return json list of all endpoints.
# optional type field can be used to match all endpoints of a certain type.
def getEndpoints(self,typeOfEndpoint=""):
"""
Get list of all endpoints on the domain.
:param str typeOfEndpoint: Optional filter endpoints returned by type
:return: list of all endpoints
:rtype: asyncResult
"""
q = {}
result = asyncResult()
if typeOfEndpoint:
q['type'] = typeOfEndpoint
result.extra['type'] = typeOfEndpoint
data = self._getURL("/endpoints", query = q)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_endpoints",data.status_code)
result.is_done = True
return result
# return json list of all resources on an endpoint
def getResources(self,ep,noResp=False,cacheOnly=False):
"""
Get list of resources on an endpoint.
:param str ep: Endpoint to get the resources of
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: list of resources
:rtype: asyncResult
"""
# load query params if set to other than defaults
q = {}
result = asyncResult()
result.endpoint = ep
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
self.log.debug("ep = %s, query=%s",ep,q)
data = self._getURL("/endpoints/"+ep, query=q)
result.fill(data)
# check sucess of call
if data.status_code == 200: # sucess
result.error = False
self.log.debug("getResources sucess, status_code = `%s`, content = `%s`", str(data.status_code),data.content)
else: # fail
result.error = response_codes("get_resources",data.status_code)
self.log.debug("getResources failed with error code `%s`" %str(data.status_code))
result.is_done = True
return result
# return async object
def getResourceValue(self,ep,res,cbfn="",noResp=False,cacheOnly=False):
"""
Get value of a specific resource on a specific endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback function to be called on completion
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: value of the resource, usually a string
:rtype: asyncResult
"""
q = {}
result = asyncResult(callback=cbfn) #set callback fn for use in async handler
result.endpoint = ep
result.resource = res
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
data = self._getURL("/endpoints/"+ep+res, query=q)
result.fill(data)
if data.status_code == 200: # immediate success
result.error = False
result.is_done = True
if cbfn:
cbfn(result)
return result
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else: # fail
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
def putResourceValue(self,ep,res,data,cbfn=""):
"""
Put a value to a resource on an endpoint
:param str ep: name of endpoint
:param str res: name of resource
:param str data: data to send via PUT
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
"""
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/endpoints/"+ep+res,payload=data)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
#return async object
def postResource(self,ep,res,data="",cbfn=""):
'''
POST data to a resource on an endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param str data: Optional - data to send via POST
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._postURL("/endpoints/"+ep+res,data)
if data.status_code == 201: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
def deleteEndpoint(self,ep,cbfn=""):
'''
Send DELETE message to an endpoint.
:param str ep: name of endpoint
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
data = self._deleteURL("/endpoints/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# subscribe to endpoint/resource, the cbfn is given an asynch object that
# represents the result. it is up to the user to impliment the notification
# channel callback in a higher level library.
def putResourceSubscription(self,ep,res,cbfn=""):
'''
Subscribe to changes in a specific resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/subscriptions/"+ep+res)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("subscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteEndpointSubscriptions(self,ep):
'''
Delete all subscriptions on specified endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._deleteURL("/subscriptions/"+ep)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("delete_endpoint_subscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteResourceSubscription(self,ep,res):
'''
Delete subscription to a resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
result.resource = res
data = self._deleteURL("/subscriptions/"+ep+res)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteAllSubscriptions(self):
'''
Delete all subscriptions on the domain (all endpoints, all resources)
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/subscriptions/")
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
# result field is a string
def getEndpointSubscriptions(self,ep):
'''
Get list of all subscriptions on a given endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._getURL("/subscriptions/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
# result field is a string
def getResourceSubscription(self,ep,res):
'''
Get list of all subscriptions for a resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
result.resource = res
data = self._getURL("/subscriptions/"+ep+res)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def putPreSubscription(self,JSONdata):
'''
Set pre-subscription rules for all endpoints / resources on the domain.
This can be useful for all current and future endpoints/resources.
:param json JSONdata: data to use as pre-subscription data. Wildcards are permitted
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
if isinstance(JSONdata,str) and self._isJSON(JSONdata):
self.log.warn("pre-subscription data was a string, converting to a list : %s",JSONdata)
JSONdata = json.loads(JSONdata) # convert json string to list
if not (isinstance(JSONdata,list) and self._isJSON(JSONdata)):
self.log.error("pre-subscription data is not valid. Please make sure it is a valid JSON list")
result = asyncResult()
data = self._putURL("/subscriptions",JSONdata, versioned=False)
if data.status_code == 204: # immediate success with no response
result.error = False
result.is_done = True
result.result = []
else:
result.error = response_codes("presubscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def getPreSubscription(self):
'''
Get the current pre-subscription data from connector
:return: JSON that represents the pre-subscription data in the ``.result`` field
:rtype: asyncResult
'''
result = asyncResult()
data = self._getURL("/subscriptions")
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.json()
else:
result.error = response_codes("presubscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def putCallback(self,url,headers=""):
'''
Set the callback URL. To be used in place of LongPolling when deploying a webapp.
**note**: make sure you set up a callback URL in your web app
:param str url: complete url, including port, where the callback url is located
:param str headers: Optional - Headers to have Connector send back with all calls
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
payloadToSend = {"url":url}
if headers:
payload['headers':headers]
data = self._putURL(url="/notification/callback",payload=payloadToSend, versioned=False)
if data.status_code == 204: #immediate success
result.error = False
result.result = data.content
else:
result.error = response_codes("put_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
def getCallback(self):
'''
Get the callback URL currently registered with Connector.
:return: callback url in ``.result``, error if applicable in ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._getURL("/notification/callback",versioned=False)
if data.status_code == 200: #immediate success
result.error = False
result.result = data.json()
else:
result.error = response_codes("get_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
def deleteCallback(self):
'''
Delete the Callback URL currently registered with Connector.
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/notification/callback")
if data.status_code == 204: #immediate success
result.result = data.content
result.error = False
else:
result.error = response_codes("delete_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
# set a specific handler to call the cbfn
def setHandler(self,handler,cbfn):
'''
Register a handler for a particular notification type.
These are the types of notifications that are acceptable.
| 'async-responses'
| 'registrations-expired'
| 'de-registrations'
| 'reg-updates'
| 'registrations'
| 'notifications'
:param str handler: name of the notification type
:param fnptr cbfn: function to pass the notification channel messages to.
:return: Nothing.
'''
if handler == "async-responses":
self.async_responses_callback = cbfn
elif handler == "registrations-expired":
self.registrations_expired_callback = cbfn
elif handler == "de-registrations":
self.de_registrations_callback = cbfn
elif handler == "reg-updates":
self.reg_updates_callback = cbfn
elif handler == "registrations":
self.registrations_callback = cbfn
elif handler == "notifications":
self.notifications_callback = cbfn
else:
self.log.warn("'%s' is not a legitimate notification channel option. Please check your spelling.",handler)
# this function needs to spin off a thread that is constantally polling,
# should match asynch ID's to values and call their function
def startLongPolling(self, noWait=False):
'''
Start LongPolling Connector for notifications.
:param bool noWait: Optional - use the cached values in connector, do not wait for the device to respond
:return: Thread of constantly running LongPoll. To be used to kill the thred if necessary.
:rtype: pythonThread
'''
# check Asynch ID's against insternal database of ID's
# Call return function with the value given, maybe decode from base64?
wait = ''
if(noWait == True):
wait = "?noWait=true"
# check that there isn't another thread already running, only one longPolling instance per is acceptable
if(self.longPollThread.isAlive()):
self.log.warn("LongPolling is already active.")
else:
# start infinite longpolling thread
self._stopLongPolling.clear()
self.longPollThread.start()
self.log.info("Spun off LongPolling thread")
return self.longPollThread # return thread instance so user can manually intervene if necessary
# stop longpolling by switching the flag off.
def stopLongPolling(self):
'''
Stop LongPolling thread
:return: none
'''
if(self.longPollThread.isAlive()):
self._stopLongPolling.set()
self.log.debug("set stop longpolling flag")
else:
self.log.warn("LongPolling thread already stopped")
return
# Thread to constantly long poll connector and process the feedback.
# TODO: pass wait / noWait on to long polling thread, currently the user can set it but it doesnt actually affect anything.
def longPoll(self, versioned=True):
self.log.debug("LongPolling Started, self.address = %s" %self.address)
while(not self._stopLongPolling.is_set()):
try:
if versioned:
data = r.get(self.address+self.apiVersion+'/notification/pull',headers={"Authorization":"Bearer "+self.bearer,"Connection":"keep-alive","accept":"application/json"})
else:
data = r.get(self.address+'/notification/pull',headers={"Authorization":"Bearer "+self.bearer,"Connection":"keep-alive","accept":"application/json"})
self.log.debug("Longpoll Returned, len = %d, statuscode=%d",len(data.text),data.status_code)
# process callbacks
if data.status_code == 200: # 204 means no content, do nothing
self.handler(data.content)
self.log.debug("Longpoll data = "+data.content)
except:
self.log.error("longPolling had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
self.log.info("Killing Longpolling Thread")
# parse the notification channel responses and call appropriate handlers
def handler(self,data):
'''
Function to handle notification data as part of Callback URL handler.
:param str data: data posted to Callback URL by connector.
:return: nothing
'''
if isinstance(data,r.models.Response):
self.log.debug("data is request object = %s", str(data.content))
data = data.content
elif isinstance(data,str):
self.log.info("data is json string with len %d",len(data))
if len(data) == 0:
self.log.warn("Handler received data of 0 length, exiting handler.")
return
else:
self.log.error("Input is not valid request object or json string : %s" %str(data))
return False
try:
data = json.loads(data)
if 'async-responses' in data.keys():
self.async_responses_callback(data)
if 'notifications' in data.keys():
self.notifications_callback(data)
if 'registrations' in data.keys():
self.registrations_callback(data)
if 'reg-updates' in data.keys():
self.reg_updates_callback(data)
if 'de-registrations' in data.keys():
self.de_registrations_callback(data)
if 'registrations-expired' in data.keys():
self.registrations_expired_callback(data)
except:
self.log.error("handle router had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
# Turn on / off debug messages based on the onOff variable
def debug(self,onOff,level='DEBUG'):
'''
Enable / Disable debugging
:param bool onOff: turn debugging on / off
:return: none
'''
if onOff:
if level == 'DEBUG':
self.log.setLevel(logging.DEBUG)
self._ch.setLevel(logging.DEBUG)
self.log.debug("Debugging level DEBUG enabled")
elif level == "INFO":
self.log.setLevel(logging.INFO)
self._ch.setLevel(logging.INFO)
self.log.info("Debugging level INFO enabled")
elif level == "WARN":
self.log.setLevel(logging.WARN)
self._ch.setLevel(logging.WARN)
self.log.warn("Debugging level WARN enabled")
elif level == "ERROR":
self.log.setLevel(logging.ERROR)
self._ch.setLevel(logging.ERROR)
self.log.error("Debugging level ERROR enabled")
else:
self.log.setLevel(logging.ERROR)
self._ch.setLevel(logging.ERROR)
self.log.error("Unrecognized debug level `%s`, set to default level `ERROR` instead",level)
# internal async-requests handler.
# data input is json data
def _asyncHandler(self,data):
try:
responses = data['async-responses']
for entry in responses:
if entry['id'] in self.database['async-responses'].keys():
result = self.database['async-responses'].pop(entry['id']) # get the asynch object out of database
# fill in async-result object
if 'error' in entry.keys():
# error happened, handle it
result.error = response_codes('async-responses-handler',entry['status'])
result.error.error = entry['error']
result.is_done = True
if result.callback:
result.callback(result)
else:
return result
else:
# everything is good, fill it out
result.result = b64decode(entry['payload'])
result.raw_data = entry
result.status = entry['status']
result.error = False
for thing in entry.keys():
result.extra[thing]=entry[thing]
result.is_done = True
# call associated callback function
if result.callback:
result.callback(result)
else:
self.log.warn("No callback function given")
else:
# TODO : object not found int asynch database
self.log.warn("No asynch entry for '%s' found in databse",entry['id'])
except:
# TODO error handling here
self.log.error("Bad data encountered and failed to elegantly handle it. ")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
return
# default handler for notifications. User should impliment all of these in
# a L2 implimentation or in their webapp.
# @input data is a dictionary
def _defaultHandler(self,data):
if 'async-responses' in data.keys():
self.log.info("async-responses detected : len = %d",len(data["async-responses"]))
self.log.debug(data["async-responses"])
if 'notifications' in data.keys():
self.log.info("notifications' detected : len = %d",len(data["notifications"]))
self.log.debug(data["notifications"])
if 'registrations' in data.keys():
self.log.info("registrations' detected : len = %d",len(data["registrations"]))
self.log.debug(data["registrations"])
if 'reg-updates' in data.keys():
# removed because this happens every 10s or so, spamming the output
self.log.info("reg-updates detected : len = %d",len(data["reg-updates"]))
self.log.debug(data["reg-updates"])
if 'de-registrations' in data.keys():
self.log.info("de-registrations detected : len = %d",len(data["de-registrations"]))
self.log.debug(data["de-registrations"])
if 'registrations-expired' in data.keys():
self.log.info("registrations-expired detected : len = %d",len(data["registrations-expired"]))
self.log.debug(data["registrations-expired"])
# make the requests.
# url is the API url to hit
# query are the optional get params
# versioned tells the API whether to hit the /v#/ version. set to false for
# commands that break with this, like the API and Connector version calls
# TODO: spin this off to be non-blocking
def _getURL(self, url,query={},versioned=True):
if versioned:
return r.get(self.address+self.apiVersion+url,headers={"Authorization":"Bearer "+self.bearer},params=query)
else:
return r.get(self.address+url,headers={"Authorization":"Bearer "+self.bearer},params=query)
# put data to URL with json payload in dataIn
def _putURL(self, url,payload=None,versioned=True):
if self._isJSON(payload):
self.log.debug("PUT payload is json")
if versioned:
return r.put(self.address+self.apiVersion+url,json=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.put(self.address+url,json=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
self.log.debug("PUT payload is NOT json")
if versioned:
return r.put(self.address+self.apiVersion+url,data=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.put(self.address+url,data=payload,headers={"Authorization":"Bearer "+self.bearer})
# put data to URL with json payload in dataIn
def _postURL(self, url,payload="",versioned=True):
addr = self.address+self.apiVersion+url if versioned else self.address+url
h = {"Authorization":"Bearer "+self.bearer}
if payload:
self.log.info("POSTing with payload: %s ",payload)
return r.post(addr,data=payload,headers=h)
else:
self.log.info("POSTing")
return r.post(addr,headers=h)
# delete endpoint
def _deleteURL(self, url,versioned=True):
if versioned:
return r.delete(self.address+self.apiVersion+url,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.delete(self.address+url,headers={"Authorization":"Bearer "+self.bearer})
# check if input is json, return true or false accordingly
def _isJSON(self,dataIn):
try:
json.dumps(dataIn)
return True
except:
self.log.debug("[_isJSON] exception triggered, input is not json")
return False
# extend dictionary class so we can instantiate multiple levels at once
class vividict(dict):
def __missing__(self, key):
value = self[key] = type(self)()
return value
# Initialization function, set the token used by this object.
def __init__( self,
token,
webAddress="https://api.connector.mbed.com",
port="80",):
# set token
self.bearer = token
# set version of REST API
self.apiVersion = "/v2"
# Init database, used for callback fn's for various tasks (asynch, subscriptions...etc)
self.database = self.vividict()
self.database['notifications']
self.database['registrations']
self.database['reg-updates']
self.database['de-registrations']
self.database['registrations-expired']
self.database['async-responses']
# longpolling variable
self._stopLongPolling = threading.Event() # must initialize false to avoid race condition
self._stopLongPolling.clear()
#create thread for long polling
self.longPollThread = threading.Thread(target=self.longPoll,name="mdc-api-longpoll")
self.longPollThread.daemon = True # Do this so the thread exits when the overall process does
# set default webAddress and port to mbed connector
self.address = webAddress
self.port = port
# Initialize the callbacks
self.async_responses_callback = self._asyncHandler
self.registrations_expired_callback = self._defaultHandler
self.de_registrations_callback = self._defaultHandler
self.reg_updates_callback = self._defaultHandler
self.registrations_callback = self._defaultHandler
self.notifications_callback = self._defaultHandler
# add logger
self.log = logging.getLogger(name="mdc-api-logger")
self.log.setLevel(logging.ERROR)
self._ch = logging.StreamHandler()
self._ch.setLevel(logging.ERROR)
formatter = logging.Formatter("\r\n[%(levelname)s \t %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
self._ch.setFormatter(formatter)
self.log.addHandler(self._ch)
|
ARMmbed/mbed-connector-api-python
|
mbed_connector_api/mbed_connector_api.py
|
connector.deleteEndpoint
|
python
|
def deleteEndpoint(self,ep,cbfn=""):
'''
Send DELETE message to an endpoint.
:param str ep: name of endpoint
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
data = self._deleteURL("/endpoints/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
|
Send DELETE message to an endpoint.
:param str ep: name of endpoint
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
|
train
|
https://github.com/ARMmbed/mbed-connector-api-python/blob/a5024a01dc67cc192c8bf7a70b251fcf0a3f279b/mbed_connector_api/mbed_connector_api.py#L276-L298
|
[
"def _deleteURL(self, url,versioned=True):\n\tif versioned:\n\t\treturn r.delete(self.address+self.apiVersion+url,headers={\"Authorization\":\"Bearer \"+self.bearer})\n\telse:\n\t\treturn r.delete(self.address+url,headers={\"Authorization\":\"Bearer \"+self.bearer})\n"
] |
class connector:
"""
Interface class to use the connector.mbed.com REST API.
This class will by default handle asyncronous events.
All function return :class:'.asyncResult' objects
"""
# Return connector version number and recent rest API version number it supports
def getConnectorVersion(self):
"""
GET the current Connector version.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_mdc_version",data.status_code)
result.is_done = True
return result
# Return API version of connector
def getApiVersions(self):
"""
Get the REST API versions that connector accepts.
:returns: :class:asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/rest-versions",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_rest_version",data.status_code)
result.is_done = True
return result
# Returns metadata about connector limits as JSON blob
def getLimits(self):
"""return limits of account in async result object.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/limits")
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("limit",data.status_code)
result.is_done = True
return result
# return json list of all endpoints.
# optional type field can be used to match all endpoints of a certain type.
def getEndpoints(self,typeOfEndpoint=""):
"""
Get list of all endpoints on the domain.
:param str typeOfEndpoint: Optional filter endpoints returned by type
:return: list of all endpoints
:rtype: asyncResult
"""
q = {}
result = asyncResult()
if typeOfEndpoint:
q['type'] = typeOfEndpoint
result.extra['type'] = typeOfEndpoint
data = self._getURL("/endpoints", query = q)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_endpoints",data.status_code)
result.is_done = True
return result
# return json list of all resources on an endpoint
def getResources(self,ep,noResp=False,cacheOnly=False):
"""
Get list of resources on an endpoint.
:param str ep: Endpoint to get the resources of
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: list of resources
:rtype: asyncResult
"""
# load query params if set to other than defaults
q = {}
result = asyncResult()
result.endpoint = ep
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
self.log.debug("ep = %s, query=%s",ep,q)
data = self._getURL("/endpoints/"+ep, query=q)
result.fill(data)
# check sucess of call
if data.status_code == 200: # sucess
result.error = False
self.log.debug("getResources sucess, status_code = `%s`, content = `%s`", str(data.status_code),data.content)
else: # fail
result.error = response_codes("get_resources",data.status_code)
self.log.debug("getResources failed with error code `%s`" %str(data.status_code))
result.is_done = True
return result
# return async object
def getResourceValue(self,ep,res,cbfn="",noResp=False,cacheOnly=False):
"""
Get value of a specific resource on a specific endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback function to be called on completion
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: value of the resource, usually a string
:rtype: asyncResult
"""
q = {}
result = asyncResult(callback=cbfn) #set callback fn for use in async handler
result.endpoint = ep
result.resource = res
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
data = self._getURL("/endpoints/"+ep+res, query=q)
result.fill(data)
if data.status_code == 200: # immediate success
result.error = False
result.is_done = True
if cbfn:
cbfn(result)
return result
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else: # fail
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
def putResourceValue(self,ep,res,data,cbfn=""):
"""
Put a value to a resource on an endpoint
:param str ep: name of endpoint
:param str res: name of resource
:param str data: data to send via PUT
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
"""
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/endpoints/"+ep+res,payload=data)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
#return async object
def postResource(self,ep,res,data="",cbfn=""):
'''
POST data to a resource on an endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param str data: Optional - data to send via POST
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._postURL("/endpoints/"+ep+res,data)
if data.status_code == 201: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
def deleteEndpoint(self,ep,cbfn=""):
'''
Send DELETE message to an endpoint.
:param str ep: name of endpoint
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
data = self._deleteURL("/endpoints/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# subscribe to endpoint/resource, the cbfn is given an asynch object that
# represents the result. it is up to the user to impliment the notification
# channel callback in a higher level library.
def putResourceSubscription(self,ep,res,cbfn=""):
'''
Subscribe to changes in a specific resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/subscriptions/"+ep+res)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("subscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteEndpointSubscriptions(self,ep):
'''
Delete all subscriptions on specified endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._deleteURL("/subscriptions/"+ep)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("delete_endpoint_subscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteResourceSubscription(self,ep,res):
'''
Delete subscription to a resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
result.resource = res
data = self._deleteURL("/subscriptions/"+ep+res)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteAllSubscriptions(self):
'''
Delete all subscriptions on the domain (all endpoints, all resources)
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/subscriptions/")
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
# result field is a string
def getEndpointSubscriptions(self,ep):
'''
Get list of all subscriptions on a given endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._getURL("/subscriptions/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
# result field is a string
def getResourceSubscription(self,ep,res):
'''
Get list of all subscriptions for a resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
result.resource = res
data = self._getURL("/subscriptions/"+ep+res)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def putPreSubscription(self,JSONdata):
'''
Set pre-subscription rules for all endpoints / resources on the domain.
This can be useful for all current and future endpoints/resources.
:param json JSONdata: data to use as pre-subscription data. Wildcards are permitted
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
if isinstance(JSONdata,str) and self._isJSON(JSONdata):
self.log.warn("pre-subscription data was a string, converting to a list : %s",JSONdata)
JSONdata = json.loads(JSONdata) # convert json string to list
if not (isinstance(JSONdata,list) and self._isJSON(JSONdata)):
self.log.error("pre-subscription data is not valid. Please make sure it is a valid JSON list")
result = asyncResult()
data = self._putURL("/subscriptions",JSONdata, versioned=False)
if data.status_code == 204: # immediate success with no response
result.error = False
result.is_done = True
result.result = []
else:
result.error = response_codes("presubscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def getPreSubscription(self):
'''
Get the current pre-subscription data from connector
:return: JSON that represents the pre-subscription data in the ``.result`` field
:rtype: asyncResult
'''
result = asyncResult()
data = self._getURL("/subscriptions")
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.json()
else:
result.error = response_codes("presubscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def putCallback(self,url,headers=""):
'''
Set the callback URL. To be used in place of LongPolling when deploying a webapp.
**note**: make sure you set up a callback URL in your web app
:param str url: complete url, including port, where the callback url is located
:param str headers: Optional - Headers to have Connector send back with all calls
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
payloadToSend = {"url":url}
if headers:
payload['headers':headers]
data = self._putURL(url="/notification/callback",payload=payloadToSend, versioned=False)
if data.status_code == 204: #immediate success
result.error = False
result.result = data.content
else:
result.error = response_codes("put_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
def getCallback(self):
'''
Get the callback URL currently registered with Connector.
:return: callback url in ``.result``, error if applicable in ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._getURL("/notification/callback",versioned=False)
if data.status_code == 200: #immediate success
result.error = False
result.result = data.json()
else:
result.error = response_codes("get_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
def deleteCallback(self):
'''
Delete the Callback URL currently registered with Connector.
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/notification/callback")
if data.status_code == 204: #immediate success
result.result = data.content
result.error = False
else:
result.error = response_codes("delete_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
# set a specific handler to call the cbfn
def setHandler(self,handler,cbfn):
'''
Register a handler for a particular notification type.
These are the types of notifications that are acceptable.
| 'async-responses'
| 'registrations-expired'
| 'de-registrations'
| 'reg-updates'
| 'registrations'
| 'notifications'
:param str handler: name of the notification type
:param fnptr cbfn: function to pass the notification channel messages to.
:return: Nothing.
'''
if handler == "async-responses":
self.async_responses_callback = cbfn
elif handler == "registrations-expired":
self.registrations_expired_callback = cbfn
elif handler == "de-registrations":
self.de_registrations_callback = cbfn
elif handler == "reg-updates":
self.reg_updates_callback = cbfn
elif handler == "registrations":
self.registrations_callback = cbfn
elif handler == "notifications":
self.notifications_callback = cbfn
else:
self.log.warn("'%s' is not a legitimate notification channel option. Please check your spelling.",handler)
# this function needs to spin off a thread that is constantally polling,
# should match asynch ID's to values and call their function
def startLongPolling(self, noWait=False):
'''
Start LongPolling Connector for notifications.
:param bool noWait: Optional - use the cached values in connector, do not wait for the device to respond
:return: Thread of constantly running LongPoll. To be used to kill the thred if necessary.
:rtype: pythonThread
'''
# check Asynch ID's against insternal database of ID's
# Call return function with the value given, maybe decode from base64?
wait = ''
if(noWait == True):
wait = "?noWait=true"
# check that there isn't another thread already running, only one longPolling instance per is acceptable
if(self.longPollThread.isAlive()):
self.log.warn("LongPolling is already active.")
else:
# start infinite longpolling thread
self._stopLongPolling.clear()
self.longPollThread.start()
self.log.info("Spun off LongPolling thread")
return self.longPollThread # return thread instance so user can manually intervene if necessary
# stop longpolling by switching the flag off.
def stopLongPolling(self):
'''
Stop LongPolling thread
:return: none
'''
if(self.longPollThread.isAlive()):
self._stopLongPolling.set()
self.log.debug("set stop longpolling flag")
else:
self.log.warn("LongPolling thread already stopped")
return
# Thread to constantly long poll connector and process the feedback.
# TODO: pass wait / noWait on to long polling thread, currently the user can set it but it doesnt actually affect anything.
def longPoll(self, versioned=True):
self.log.debug("LongPolling Started, self.address = %s" %self.address)
while(not self._stopLongPolling.is_set()):
try:
if versioned:
data = r.get(self.address+self.apiVersion+'/notification/pull',headers={"Authorization":"Bearer "+self.bearer,"Connection":"keep-alive","accept":"application/json"})
else:
data = r.get(self.address+'/notification/pull',headers={"Authorization":"Bearer "+self.bearer,"Connection":"keep-alive","accept":"application/json"})
self.log.debug("Longpoll Returned, len = %d, statuscode=%d",len(data.text),data.status_code)
# process callbacks
if data.status_code == 200: # 204 means no content, do nothing
self.handler(data.content)
self.log.debug("Longpoll data = "+data.content)
except:
self.log.error("longPolling had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
self.log.info("Killing Longpolling Thread")
# parse the notification channel responses and call appropriate handlers
def handler(self,data):
'''
Function to handle notification data as part of Callback URL handler.
:param str data: data posted to Callback URL by connector.
:return: nothing
'''
if isinstance(data,r.models.Response):
self.log.debug("data is request object = %s", str(data.content))
data = data.content
elif isinstance(data,str):
self.log.info("data is json string with len %d",len(data))
if len(data) == 0:
self.log.warn("Handler received data of 0 length, exiting handler.")
return
else:
self.log.error("Input is not valid request object or json string : %s" %str(data))
return False
try:
data = json.loads(data)
if 'async-responses' in data.keys():
self.async_responses_callback(data)
if 'notifications' in data.keys():
self.notifications_callback(data)
if 'registrations' in data.keys():
self.registrations_callback(data)
if 'reg-updates' in data.keys():
self.reg_updates_callback(data)
if 'de-registrations' in data.keys():
self.de_registrations_callback(data)
if 'registrations-expired' in data.keys():
self.registrations_expired_callback(data)
except:
self.log.error("handle router had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
# Turn on / off debug messages based on the onOff variable
def debug(self,onOff,level='DEBUG'):
'''
Enable / Disable debugging
:param bool onOff: turn debugging on / off
:return: none
'''
if onOff:
if level == 'DEBUG':
self.log.setLevel(logging.DEBUG)
self._ch.setLevel(logging.DEBUG)
self.log.debug("Debugging level DEBUG enabled")
elif level == "INFO":
self.log.setLevel(logging.INFO)
self._ch.setLevel(logging.INFO)
self.log.info("Debugging level INFO enabled")
elif level == "WARN":
self.log.setLevel(logging.WARN)
self._ch.setLevel(logging.WARN)
self.log.warn("Debugging level WARN enabled")
elif level == "ERROR":
self.log.setLevel(logging.ERROR)
self._ch.setLevel(logging.ERROR)
self.log.error("Debugging level ERROR enabled")
else:
self.log.setLevel(logging.ERROR)
self._ch.setLevel(logging.ERROR)
self.log.error("Unrecognized debug level `%s`, set to default level `ERROR` instead",level)
# internal async-requests handler.
# data input is json data
def _asyncHandler(self,data):
try:
responses = data['async-responses']
for entry in responses:
if entry['id'] in self.database['async-responses'].keys():
result = self.database['async-responses'].pop(entry['id']) # get the asynch object out of database
# fill in async-result object
if 'error' in entry.keys():
# error happened, handle it
result.error = response_codes('async-responses-handler',entry['status'])
result.error.error = entry['error']
result.is_done = True
if result.callback:
result.callback(result)
else:
return result
else:
# everything is good, fill it out
result.result = b64decode(entry['payload'])
result.raw_data = entry
result.status = entry['status']
result.error = False
for thing in entry.keys():
result.extra[thing]=entry[thing]
result.is_done = True
# call associated callback function
if result.callback:
result.callback(result)
else:
self.log.warn("No callback function given")
else:
# TODO : object not found int asynch database
self.log.warn("No asynch entry for '%s' found in databse",entry['id'])
except:
# TODO error handling here
self.log.error("Bad data encountered and failed to elegantly handle it. ")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
return
# default handler for notifications. User should impliment all of these in
# a L2 implimentation or in their webapp.
# @input data is a dictionary
def _defaultHandler(self,data):
if 'async-responses' in data.keys():
self.log.info("async-responses detected : len = %d",len(data["async-responses"]))
self.log.debug(data["async-responses"])
if 'notifications' in data.keys():
self.log.info("notifications' detected : len = %d",len(data["notifications"]))
self.log.debug(data["notifications"])
if 'registrations' in data.keys():
self.log.info("registrations' detected : len = %d",len(data["registrations"]))
self.log.debug(data["registrations"])
if 'reg-updates' in data.keys():
# removed because this happens every 10s or so, spamming the output
self.log.info("reg-updates detected : len = %d",len(data["reg-updates"]))
self.log.debug(data["reg-updates"])
if 'de-registrations' in data.keys():
self.log.info("de-registrations detected : len = %d",len(data["de-registrations"]))
self.log.debug(data["de-registrations"])
if 'registrations-expired' in data.keys():
self.log.info("registrations-expired detected : len = %d",len(data["registrations-expired"]))
self.log.debug(data["registrations-expired"])
# make the requests.
# url is the API url to hit
# query are the optional get params
# versioned tells the API whether to hit the /v#/ version. set to false for
# commands that break with this, like the API and Connector version calls
# TODO: spin this off to be non-blocking
def _getURL(self, url,query={},versioned=True):
if versioned:
return r.get(self.address+self.apiVersion+url,headers={"Authorization":"Bearer "+self.bearer},params=query)
else:
return r.get(self.address+url,headers={"Authorization":"Bearer "+self.bearer},params=query)
# put data to URL with json payload in dataIn
def _putURL(self, url,payload=None,versioned=True):
if self._isJSON(payload):
self.log.debug("PUT payload is json")
if versioned:
return r.put(self.address+self.apiVersion+url,json=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.put(self.address+url,json=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
self.log.debug("PUT payload is NOT json")
if versioned:
return r.put(self.address+self.apiVersion+url,data=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.put(self.address+url,data=payload,headers={"Authorization":"Bearer "+self.bearer})
# put data to URL with json payload in dataIn
def _postURL(self, url,payload="",versioned=True):
addr = self.address+self.apiVersion+url if versioned else self.address+url
h = {"Authorization":"Bearer "+self.bearer}
if payload:
self.log.info("POSTing with payload: %s ",payload)
return r.post(addr,data=payload,headers=h)
else:
self.log.info("POSTing")
return r.post(addr,headers=h)
# delete endpoint
def _deleteURL(self, url,versioned=True):
if versioned:
return r.delete(self.address+self.apiVersion+url,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.delete(self.address+url,headers={"Authorization":"Bearer "+self.bearer})
# check if input is json, return true or false accordingly
def _isJSON(self,dataIn):
try:
json.dumps(dataIn)
return True
except:
self.log.debug("[_isJSON] exception triggered, input is not json")
return False
# extend dictionary class so we can instantiate multiple levels at once
class vividict(dict):
def __missing__(self, key):
value = self[key] = type(self)()
return value
# Initialization function, set the token used by this object.
def __init__( self,
token,
webAddress="https://api.connector.mbed.com",
port="80",):
# set token
self.bearer = token
# set version of REST API
self.apiVersion = "/v2"
# Init database, used for callback fn's for various tasks (asynch, subscriptions...etc)
self.database = self.vividict()
self.database['notifications']
self.database['registrations']
self.database['reg-updates']
self.database['de-registrations']
self.database['registrations-expired']
self.database['async-responses']
# longpolling variable
self._stopLongPolling = threading.Event() # must initialize false to avoid race condition
self._stopLongPolling.clear()
#create thread for long polling
self.longPollThread = threading.Thread(target=self.longPoll,name="mdc-api-longpoll")
self.longPollThread.daemon = True # Do this so the thread exits when the overall process does
# set default webAddress and port to mbed connector
self.address = webAddress
self.port = port
# Initialize the callbacks
self.async_responses_callback = self._asyncHandler
self.registrations_expired_callback = self._defaultHandler
self.de_registrations_callback = self._defaultHandler
self.reg_updates_callback = self._defaultHandler
self.registrations_callback = self._defaultHandler
self.notifications_callback = self._defaultHandler
# add logger
self.log = logging.getLogger(name="mdc-api-logger")
self.log.setLevel(logging.ERROR)
self._ch = logging.StreamHandler()
self._ch.setLevel(logging.ERROR)
formatter = logging.Formatter("\r\n[%(levelname)s \t %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
self._ch.setFormatter(formatter)
self.log.addHandler(self._ch)
|
ARMmbed/mbed-connector-api-python
|
mbed_connector_api/mbed_connector_api.py
|
connector.deleteEndpointSubscriptions
|
python
|
def deleteEndpointSubscriptions(self,ep):
'''
Delete all subscriptions on specified endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._deleteURL("/subscriptions/"+ep)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("delete_endpoint_subscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
|
Delete all subscriptions on specified endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
|
train
|
https://github.com/ARMmbed/mbed-connector-api-python/blob/a5024a01dc67cc192c8bf7a70b251fcf0a3f279b/mbed_connector_api/mbed_connector_api.py#L329-L348
|
[
"def _deleteURL(self, url,versioned=True):\n\tif versioned:\n\t\treturn r.delete(self.address+self.apiVersion+url,headers={\"Authorization\":\"Bearer \"+self.bearer})\n\telse:\n\t\treturn r.delete(self.address+url,headers={\"Authorization\":\"Bearer \"+self.bearer})\n"
] |
class connector:
"""
Interface class to use the connector.mbed.com REST API.
This class will by default handle asyncronous events.
All function return :class:'.asyncResult' objects
"""
# Return connector version number and recent rest API version number it supports
def getConnectorVersion(self):
"""
GET the current Connector version.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_mdc_version",data.status_code)
result.is_done = True
return result
# Return API version of connector
def getApiVersions(self):
"""
Get the REST API versions that connector accepts.
:returns: :class:asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/rest-versions",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_rest_version",data.status_code)
result.is_done = True
return result
# Returns metadata about connector limits as JSON blob
def getLimits(self):
"""return limits of account in async result object.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/limits")
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("limit",data.status_code)
result.is_done = True
return result
# return json list of all endpoints.
# optional type field can be used to match all endpoints of a certain type.
def getEndpoints(self,typeOfEndpoint=""):
"""
Get list of all endpoints on the domain.
:param str typeOfEndpoint: Optional filter endpoints returned by type
:return: list of all endpoints
:rtype: asyncResult
"""
q = {}
result = asyncResult()
if typeOfEndpoint:
q['type'] = typeOfEndpoint
result.extra['type'] = typeOfEndpoint
data = self._getURL("/endpoints", query = q)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_endpoints",data.status_code)
result.is_done = True
return result
# return json list of all resources on an endpoint
def getResources(self,ep,noResp=False,cacheOnly=False):
"""
Get list of resources on an endpoint.
:param str ep: Endpoint to get the resources of
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: list of resources
:rtype: asyncResult
"""
# load query params if set to other than defaults
q = {}
result = asyncResult()
result.endpoint = ep
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
self.log.debug("ep = %s, query=%s",ep,q)
data = self._getURL("/endpoints/"+ep, query=q)
result.fill(data)
# check sucess of call
if data.status_code == 200: # sucess
result.error = False
self.log.debug("getResources sucess, status_code = `%s`, content = `%s`", str(data.status_code),data.content)
else: # fail
result.error = response_codes("get_resources",data.status_code)
self.log.debug("getResources failed with error code `%s`" %str(data.status_code))
result.is_done = True
return result
# return async object
def getResourceValue(self,ep,res,cbfn="",noResp=False,cacheOnly=False):
"""
Get value of a specific resource on a specific endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback function to be called on completion
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: value of the resource, usually a string
:rtype: asyncResult
"""
q = {}
result = asyncResult(callback=cbfn) #set callback fn for use in async handler
result.endpoint = ep
result.resource = res
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
data = self._getURL("/endpoints/"+ep+res, query=q)
result.fill(data)
if data.status_code == 200: # immediate success
result.error = False
result.is_done = True
if cbfn:
cbfn(result)
return result
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else: # fail
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
def putResourceValue(self,ep,res,data,cbfn=""):
"""
Put a value to a resource on an endpoint
:param str ep: name of endpoint
:param str res: name of resource
:param str data: data to send via PUT
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
"""
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/endpoints/"+ep+res,payload=data)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
#return async object
def postResource(self,ep,res,data="",cbfn=""):
'''
POST data to a resource on an endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param str data: Optional - data to send via POST
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._postURL("/endpoints/"+ep+res,data)
if data.status_code == 201: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
def deleteEndpoint(self,ep,cbfn=""):
'''
Send DELETE message to an endpoint.
:param str ep: name of endpoint
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
data = self._deleteURL("/endpoints/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# subscribe to endpoint/resource, the cbfn is given an asynch object that
# represents the result. it is up to the user to impliment the notification
# channel callback in a higher level library.
def putResourceSubscription(self,ep,res,cbfn=""):
'''
Subscribe to changes in a specific resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/subscriptions/"+ep+res)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("subscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteEndpointSubscriptions(self,ep):
'''
Delete all subscriptions on specified endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._deleteURL("/subscriptions/"+ep)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("delete_endpoint_subscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteResourceSubscription(self,ep,res):
'''
Delete subscription to a resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
result.resource = res
data = self._deleteURL("/subscriptions/"+ep+res)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteAllSubscriptions(self):
'''
Delete all subscriptions on the domain (all endpoints, all resources)
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/subscriptions/")
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
# result field is a string
def getEndpointSubscriptions(self,ep):
'''
Get list of all subscriptions on a given endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._getURL("/subscriptions/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
# result field is a string
def getResourceSubscription(self,ep,res):
'''
Get list of all subscriptions for a resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
result.resource = res
data = self._getURL("/subscriptions/"+ep+res)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def putPreSubscription(self,JSONdata):
'''
Set pre-subscription rules for all endpoints / resources on the domain.
This can be useful for all current and future endpoints/resources.
:param json JSONdata: data to use as pre-subscription data. Wildcards are permitted
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
if isinstance(JSONdata,str) and self._isJSON(JSONdata):
self.log.warn("pre-subscription data was a string, converting to a list : %s",JSONdata)
JSONdata = json.loads(JSONdata) # convert json string to list
if not (isinstance(JSONdata,list) and self._isJSON(JSONdata)):
self.log.error("pre-subscription data is not valid. Please make sure it is a valid JSON list")
result = asyncResult()
data = self._putURL("/subscriptions",JSONdata, versioned=False)
if data.status_code == 204: # immediate success with no response
result.error = False
result.is_done = True
result.result = []
else:
result.error = response_codes("presubscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def getPreSubscription(self):
'''
Get the current pre-subscription data from connector
:return: JSON that represents the pre-subscription data in the ``.result`` field
:rtype: asyncResult
'''
result = asyncResult()
data = self._getURL("/subscriptions")
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.json()
else:
result.error = response_codes("presubscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def putCallback(self,url,headers=""):
'''
Set the callback URL. To be used in place of LongPolling when deploying a webapp.
**note**: make sure you set up a callback URL in your web app
:param str url: complete url, including port, where the callback url is located
:param str headers: Optional - Headers to have Connector send back with all calls
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
payloadToSend = {"url":url}
if headers:
payload['headers':headers]
data = self._putURL(url="/notification/callback",payload=payloadToSend, versioned=False)
if data.status_code == 204: #immediate success
result.error = False
result.result = data.content
else:
result.error = response_codes("put_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
def getCallback(self):
'''
Get the callback URL currently registered with Connector.
:return: callback url in ``.result``, error if applicable in ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._getURL("/notification/callback",versioned=False)
if data.status_code == 200: #immediate success
result.error = False
result.result = data.json()
else:
result.error = response_codes("get_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
def deleteCallback(self):
'''
Delete the Callback URL currently registered with Connector.
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/notification/callback")
if data.status_code == 204: #immediate success
result.result = data.content
result.error = False
else:
result.error = response_codes("delete_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
# set a specific handler to call the cbfn
def setHandler(self,handler,cbfn):
'''
Register a handler for a particular notification type.
These are the types of notifications that are acceptable.
| 'async-responses'
| 'registrations-expired'
| 'de-registrations'
| 'reg-updates'
| 'registrations'
| 'notifications'
:param str handler: name of the notification type
:param fnptr cbfn: function to pass the notification channel messages to.
:return: Nothing.
'''
if handler == "async-responses":
self.async_responses_callback = cbfn
elif handler == "registrations-expired":
self.registrations_expired_callback = cbfn
elif handler == "de-registrations":
self.de_registrations_callback = cbfn
elif handler == "reg-updates":
self.reg_updates_callback = cbfn
elif handler == "registrations":
self.registrations_callback = cbfn
elif handler == "notifications":
self.notifications_callback = cbfn
else:
self.log.warn("'%s' is not a legitimate notification channel option. Please check your spelling.",handler)
# this function needs to spin off a thread that is constantally polling,
# should match asynch ID's to values and call their function
def startLongPolling(self, noWait=False):
'''
Start LongPolling Connector for notifications.
:param bool noWait: Optional - use the cached values in connector, do not wait for the device to respond
:return: Thread of constantly running LongPoll. To be used to kill the thred if necessary.
:rtype: pythonThread
'''
# check Asynch ID's against insternal database of ID's
# Call return function with the value given, maybe decode from base64?
wait = ''
if(noWait == True):
wait = "?noWait=true"
# check that there isn't another thread already running, only one longPolling instance per is acceptable
if(self.longPollThread.isAlive()):
self.log.warn("LongPolling is already active.")
else:
# start infinite longpolling thread
self._stopLongPolling.clear()
self.longPollThread.start()
self.log.info("Spun off LongPolling thread")
return self.longPollThread # return thread instance so user can manually intervene if necessary
# stop longpolling by switching the flag off.
def stopLongPolling(self):
'''
Stop LongPolling thread
:return: none
'''
if(self.longPollThread.isAlive()):
self._stopLongPolling.set()
self.log.debug("set stop longpolling flag")
else:
self.log.warn("LongPolling thread already stopped")
return
# Thread to constantly long poll connector and process the feedback.
# TODO: pass wait / noWait on to long polling thread, currently the user can set it but it doesnt actually affect anything.
def longPoll(self, versioned=True):
self.log.debug("LongPolling Started, self.address = %s" %self.address)
while(not self._stopLongPolling.is_set()):
try:
if versioned:
data = r.get(self.address+self.apiVersion+'/notification/pull',headers={"Authorization":"Bearer "+self.bearer,"Connection":"keep-alive","accept":"application/json"})
else:
data = r.get(self.address+'/notification/pull',headers={"Authorization":"Bearer "+self.bearer,"Connection":"keep-alive","accept":"application/json"})
self.log.debug("Longpoll Returned, len = %d, statuscode=%d",len(data.text),data.status_code)
# process callbacks
if data.status_code == 200: # 204 means no content, do nothing
self.handler(data.content)
self.log.debug("Longpoll data = "+data.content)
except:
self.log.error("longPolling had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
self.log.info("Killing Longpolling Thread")
# parse the notification channel responses and call appropriate handlers
def handler(self,data):
'''
Function to handle notification data as part of Callback URL handler.
:param str data: data posted to Callback URL by connector.
:return: nothing
'''
if isinstance(data,r.models.Response):
self.log.debug("data is request object = %s", str(data.content))
data = data.content
elif isinstance(data,str):
self.log.info("data is json string with len %d",len(data))
if len(data) == 0:
self.log.warn("Handler received data of 0 length, exiting handler.")
return
else:
self.log.error("Input is not valid request object or json string : %s" %str(data))
return False
try:
data = json.loads(data)
if 'async-responses' in data.keys():
self.async_responses_callback(data)
if 'notifications' in data.keys():
self.notifications_callback(data)
if 'registrations' in data.keys():
self.registrations_callback(data)
if 'reg-updates' in data.keys():
self.reg_updates_callback(data)
if 'de-registrations' in data.keys():
self.de_registrations_callback(data)
if 'registrations-expired' in data.keys():
self.registrations_expired_callback(data)
except:
self.log.error("handle router had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
# Turn on / off debug messages based on the onOff variable
def debug(self,onOff,level='DEBUG'):
'''
Enable / Disable debugging
:param bool onOff: turn debugging on / off
:return: none
'''
if onOff:
if level == 'DEBUG':
self.log.setLevel(logging.DEBUG)
self._ch.setLevel(logging.DEBUG)
self.log.debug("Debugging level DEBUG enabled")
elif level == "INFO":
self.log.setLevel(logging.INFO)
self._ch.setLevel(logging.INFO)
self.log.info("Debugging level INFO enabled")
elif level == "WARN":
self.log.setLevel(logging.WARN)
self._ch.setLevel(logging.WARN)
self.log.warn("Debugging level WARN enabled")
elif level == "ERROR":
self.log.setLevel(logging.ERROR)
self._ch.setLevel(logging.ERROR)
self.log.error("Debugging level ERROR enabled")
else:
self.log.setLevel(logging.ERROR)
self._ch.setLevel(logging.ERROR)
self.log.error("Unrecognized debug level `%s`, set to default level `ERROR` instead",level)
# internal async-requests handler.
# data input is json data
def _asyncHandler(self,data):
try:
responses = data['async-responses']
for entry in responses:
if entry['id'] in self.database['async-responses'].keys():
result = self.database['async-responses'].pop(entry['id']) # get the asynch object out of database
# fill in async-result object
if 'error' in entry.keys():
# error happened, handle it
result.error = response_codes('async-responses-handler',entry['status'])
result.error.error = entry['error']
result.is_done = True
if result.callback:
result.callback(result)
else:
return result
else:
# everything is good, fill it out
result.result = b64decode(entry['payload'])
result.raw_data = entry
result.status = entry['status']
result.error = False
for thing in entry.keys():
result.extra[thing]=entry[thing]
result.is_done = True
# call associated callback function
if result.callback:
result.callback(result)
else:
self.log.warn("No callback function given")
else:
# TODO : object not found int asynch database
self.log.warn("No asynch entry for '%s' found in databse",entry['id'])
except:
# TODO error handling here
self.log.error("Bad data encountered and failed to elegantly handle it. ")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
return
# default handler for notifications. User should impliment all of these in
# a L2 implimentation or in their webapp.
# @input data is a dictionary
def _defaultHandler(self,data):
if 'async-responses' in data.keys():
self.log.info("async-responses detected : len = %d",len(data["async-responses"]))
self.log.debug(data["async-responses"])
if 'notifications' in data.keys():
self.log.info("notifications' detected : len = %d",len(data["notifications"]))
self.log.debug(data["notifications"])
if 'registrations' in data.keys():
self.log.info("registrations' detected : len = %d",len(data["registrations"]))
self.log.debug(data["registrations"])
if 'reg-updates' in data.keys():
# removed because this happens every 10s or so, spamming the output
self.log.info("reg-updates detected : len = %d",len(data["reg-updates"]))
self.log.debug(data["reg-updates"])
if 'de-registrations' in data.keys():
self.log.info("de-registrations detected : len = %d",len(data["de-registrations"]))
self.log.debug(data["de-registrations"])
if 'registrations-expired' in data.keys():
self.log.info("registrations-expired detected : len = %d",len(data["registrations-expired"]))
self.log.debug(data["registrations-expired"])
# make the requests.
# url is the API url to hit
# query are the optional get params
# versioned tells the API whether to hit the /v#/ version. set to false for
# commands that break with this, like the API and Connector version calls
# TODO: spin this off to be non-blocking
def _getURL(self, url,query={},versioned=True):
if versioned:
return r.get(self.address+self.apiVersion+url,headers={"Authorization":"Bearer "+self.bearer},params=query)
else:
return r.get(self.address+url,headers={"Authorization":"Bearer "+self.bearer},params=query)
# put data to URL with json payload in dataIn
def _putURL(self, url,payload=None,versioned=True):
if self._isJSON(payload):
self.log.debug("PUT payload is json")
if versioned:
return r.put(self.address+self.apiVersion+url,json=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.put(self.address+url,json=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
self.log.debug("PUT payload is NOT json")
if versioned:
return r.put(self.address+self.apiVersion+url,data=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.put(self.address+url,data=payload,headers={"Authorization":"Bearer "+self.bearer})
# put data to URL with json payload in dataIn
def _postURL(self, url,payload="",versioned=True):
addr = self.address+self.apiVersion+url if versioned else self.address+url
h = {"Authorization":"Bearer "+self.bearer}
if payload:
self.log.info("POSTing with payload: %s ",payload)
return r.post(addr,data=payload,headers=h)
else:
self.log.info("POSTing")
return r.post(addr,headers=h)
# delete endpoint
def _deleteURL(self, url,versioned=True):
if versioned:
return r.delete(self.address+self.apiVersion+url,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.delete(self.address+url,headers={"Authorization":"Bearer "+self.bearer})
# check if input is json, return true or false accordingly
def _isJSON(self,dataIn):
try:
json.dumps(dataIn)
return True
except:
self.log.debug("[_isJSON] exception triggered, input is not json")
return False
# extend dictionary class so we can instantiate multiple levels at once
class vividict(dict):
def __missing__(self, key):
value = self[key] = type(self)()
return value
# Initialization function, set the token used by this object.
def __init__( self,
token,
webAddress="https://api.connector.mbed.com",
port="80",):
# set token
self.bearer = token
# set version of REST API
self.apiVersion = "/v2"
# Init database, used for callback fn's for various tasks (asynch, subscriptions...etc)
self.database = self.vividict()
self.database['notifications']
self.database['registrations']
self.database['reg-updates']
self.database['de-registrations']
self.database['registrations-expired']
self.database['async-responses']
# longpolling variable
self._stopLongPolling = threading.Event() # must initialize false to avoid race condition
self._stopLongPolling.clear()
#create thread for long polling
self.longPollThread = threading.Thread(target=self.longPoll,name="mdc-api-longpoll")
self.longPollThread.daemon = True # Do this so the thread exits when the overall process does
# set default webAddress and port to mbed connector
self.address = webAddress
self.port = port
# Initialize the callbacks
self.async_responses_callback = self._asyncHandler
self.registrations_expired_callback = self._defaultHandler
self.de_registrations_callback = self._defaultHandler
self.reg_updates_callback = self._defaultHandler
self.registrations_callback = self._defaultHandler
self.notifications_callback = self._defaultHandler
# add logger
self.log = logging.getLogger(name="mdc-api-logger")
self.log.setLevel(logging.ERROR)
self._ch = logging.StreamHandler()
self._ch.setLevel(logging.ERROR)
formatter = logging.Formatter("\r\n[%(levelname)s \t %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
self._ch.setFormatter(formatter)
self.log.addHandler(self._ch)
|
ARMmbed/mbed-connector-api-python
|
mbed_connector_api/mbed_connector_api.py
|
connector.deleteAllSubscriptions
|
python
|
def deleteAllSubscriptions(self):
'''
Delete all subscriptions on the domain (all endpoints, all resources)
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/subscriptions/")
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
|
Delete all subscriptions on the domain (all endpoints, all resources)
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
|
train
|
https://github.com/ARMmbed/mbed-connector-api-python/blob/a5024a01dc67cc192c8bf7a70b251fcf0a3f279b/mbed_connector_api/mbed_connector_api.py#L373-L390
|
[
"def _deleteURL(self, url,versioned=True):\n\tif versioned:\n\t\treturn r.delete(self.address+self.apiVersion+url,headers={\"Authorization\":\"Bearer \"+self.bearer})\n\telse:\n\t\treturn r.delete(self.address+url,headers={\"Authorization\":\"Bearer \"+self.bearer})\n"
] |
class connector:
"""
Interface class to use the connector.mbed.com REST API.
This class will by default handle asyncronous events.
All function return :class:'.asyncResult' objects
"""
# Return connector version number and recent rest API version number it supports
def getConnectorVersion(self):
"""
GET the current Connector version.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_mdc_version",data.status_code)
result.is_done = True
return result
# Return API version of connector
def getApiVersions(self):
"""
Get the REST API versions that connector accepts.
:returns: :class:asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/rest-versions",versioned=False)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_rest_version",data.status_code)
result.is_done = True
return result
# Returns metadata about connector limits as JSON blob
def getLimits(self):
"""return limits of account in async result object.
:returns: asyncResult object, populates error and result fields
:rtype: asyncResult
"""
result = asyncResult()
data = self._getURL("/limits")
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("limit",data.status_code)
result.is_done = True
return result
# return json list of all endpoints.
# optional type field can be used to match all endpoints of a certain type.
def getEndpoints(self,typeOfEndpoint=""):
"""
Get list of all endpoints on the domain.
:param str typeOfEndpoint: Optional filter endpoints returned by type
:return: list of all endpoints
:rtype: asyncResult
"""
q = {}
result = asyncResult()
if typeOfEndpoint:
q['type'] = typeOfEndpoint
result.extra['type'] = typeOfEndpoint
data = self._getURL("/endpoints", query = q)
result.fill(data)
if data.status_code == 200:
result.error = False
else:
result.error = response_codes("get_endpoints",data.status_code)
result.is_done = True
return result
# return json list of all resources on an endpoint
def getResources(self,ep,noResp=False,cacheOnly=False):
"""
Get list of resources on an endpoint.
:param str ep: Endpoint to get the resources of
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: list of resources
:rtype: asyncResult
"""
# load query params if set to other than defaults
q = {}
result = asyncResult()
result.endpoint = ep
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
self.log.debug("ep = %s, query=%s",ep,q)
data = self._getURL("/endpoints/"+ep, query=q)
result.fill(data)
# check sucess of call
if data.status_code == 200: # sucess
result.error = False
self.log.debug("getResources sucess, status_code = `%s`, content = `%s`", str(data.status_code),data.content)
else: # fail
result.error = response_codes("get_resources",data.status_code)
self.log.debug("getResources failed with error code `%s`" %str(data.status_code))
result.is_done = True
return result
# return async object
def getResourceValue(self,ep,res,cbfn="",noResp=False,cacheOnly=False):
"""
Get value of a specific resource on a specific endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback function to be called on completion
:param bool noResp: Optional - specify no response necessary from endpoint
:param bool cacheOnly: Optional - get results from cache on connector, do not wake up endpoint
:return: value of the resource, usually a string
:rtype: asyncResult
"""
q = {}
result = asyncResult(callback=cbfn) #set callback fn for use in async handler
result.endpoint = ep
result.resource = res
if noResp or cacheOnly:
q['noResp'] = 'true' if noResp == True else 'false'
q['cacheOnly'] = 'true' if cacheOnly == True else 'false'
# make query
data = self._getURL("/endpoints/"+ep+res, query=q)
result.fill(data)
if data.status_code == 200: # immediate success
result.error = False
result.is_done = True
if cbfn:
cbfn(result)
return result
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else: # fail
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
def putResourceValue(self,ep,res,data,cbfn=""):
"""
Put a value to a resource on an endpoint
:param str ep: name of endpoint
:param str res: name of resource
:param str data: data to send via PUT
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
"""
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/endpoints/"+ep+res,payload=data)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
#return async object
def postResource(self,ep,res,data="",cbfn=""):
'''
POST data to a resource on an endpoint.
:param str ep: name of endpoint
:param str res: name of resource
:param str data: Optional - data to send via POST
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._postURL("/endpoints/"+ep+res,data)
if data.status_code == 201: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
def deleteEndpoint(self,ep,cbfn=""):
'''
Send DELETE message to an endpoint.
:param str ep: name of endpoint
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
data = self._deleteURL("/endpoints/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("resource",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# subscribe to endpoint/resource, the cbfn is given an asynch object that
# represents the result. it is up to the user to impliment the notification
# channel callback in a higher level library.
def putResourceSubscription(self,ep,res,cbfn=""):
'''
Subscribe to changes in a specific resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:param fnptr cbfn: Optional - callback funtion to call when operation is completed
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult(callback=cbfn)
result.endpoint = ep
result.resource = res
data = self._putURL("/subscriptions/"+ep+res)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
elif data.status_code == 202:
self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result
else:
result.error = response_codes("subscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteEndpointSubscriptions(self,ep):
'''
Delete all subscriptions on specified endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._deleteURL("/subscriptions/"+ep)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("delete_endpoint_subscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteResourceSubscription(self,ep,res):
'''
Delete subscription to a resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
result.resource = res
data = self._deleteURL("/subscriptions/"+ep+res)
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def deleteAllSubscriptions(self):
'''
Delete all subscriptions on the domain (all endpoints, all resources)
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/subscriptions/")
if data.status_code == 204: #immediate success
result.error = False
result.is_done = True
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
# result field is a string
def getEndpointSubscriptions(self,ep):
'''
Get list of all subscriptions on a given endpoint ``ep``
:param str ep: name of endpoint
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
data = self._getURL("/subscriptions/"+ep)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
# return async object
# result field is a string
def getResourceSubscription(self,ep,res):
'''
Get list of all subscriptions for a resource ``res`` on an endpoint ``ep``
:param str ep: name of endpoint
:param str res: name of resource
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
result.endpoint = ep
result.resource = res
data = self._getURL("/subscriptions/"+ep+res)
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.content
else:
result.error = response_codes("unsubscribe",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def putPreSubscription(self,JSONdata):
'''
Set pre-subscription rules for all endpoints / resources on the domain.
This can be useful for all current and future endpoints/resources.
:param json JSONdata: data to use as pre-subscription data. Wildcards are permitted
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
if isinstance(JSONdata,str) and self._isJSON(JSONdata):
self.log.warn("pre-subscription data was a string, converting to a list : %s",JSONdata)
JSONdata = json.loads(JSONdata) # convert json string to list
if not (isinstance(JSONdata,list) and self._isJSON(JSONdata)):
self.log.error("pre-subscription data is not valid. Please make sure it is a valid JSON list")
result = asyncResult()
data = self._putURL("/subscriptions",JSONdata, versioned=False)
if data.status_code == 204: # immediate success with no response
result.error = False
result.is_done = True
result.result = []
else:
result.error = response_codes("presubscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def getPreSubscription(self):
'''
Get the current pre-subscription data from connector
:return: JSON that represents the pre-subscription data in the ``.result`` field
:rtype: asyncResult
'''
result = asyncResult()
data = self._getURL("/subscriptions")
if data.status_code == 200: #immediate success
result.error = False
result.is_done = True
result.result = data.json()
else:
result.error = response_codes("presubscription",data.status_code)
result.is_done = True
result.raw_data = data.content
result.status_code = data.status_code
return result
def putCallback(self,url,headers=""):
'''
Set the callback URL. To be used in place of LongPolling when deploying a webapp.
**note**: make sure you set up a callback URL in your web app
:param str url: complete url, including port, where the callback url is located
:param str headers: Optional - Headers to have Connector send back with all calls
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
payloadToSend = {"url":url}
if headers:
payload['headers':headers]
data = self._putURL(url="/notification/callback",payload=payloadToSend, versioned=False)
if data.status_code == 204: #immediate success
result.error = False
result.result = data.content
else:
result.error = response_codes("put_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
def getCallback(self):
'''
Get the callback URL currently registered with Connector.
:return: callback url in ``.result``, error if applicable in ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._getURL("/notification/callback",versioned=False)
if data.status_code == 200: #immediate success
result.error = False
result.result = data.json()
else:
result.error = response_codes("get_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
def deleteCallback(self):
'''
Delete the Callback URL currently registered with Connector.
:return: successful ``.status_code`` / ``.is_done``. Check the ``.error``
:rtype: asyncResult
'''
result = asyncResult()
data = self._deleteURL("/notification/callback")
if data.status_code == 204: #immediate success
result.result = data.content
result.error = False
else:
result.error = response_codes("delete_callback_url",data.status_code)
result.raw_data = data.content
result.status_code = data.status_code
result.is_done = True
return result
# set a specific handler to call the cbfn
def setHandler(self,handler,cbfn):
'''
Register a handler for a particular notification type.
These are the types of notifications that are acceptable.
| 'async-responses'
| 'registrations-expired'
| 'de-registrations'
| 'reg-updates'
| 'registrations'
| 'notifications'
:param str handler: name of the notification type
:param fnptr cbfn: function to pass the notification channel messages to.
:return: Nothing.
'''
if handler == "async-responses":
self.async_responses_callback = cbfn
elif handler == "registrations-expired":
self.registrations_expired_callback = cbfn
elif handler == "de-registrations":
self.de_registrations_callback = cbfn
elif handler == "reg-updates":
self.reg_updates_callback = cbfn
elif handler == "registrations":
self.registrations_callback = cbfn
elif handler == "notifications":
self.notifications_callback = cbfn
else:
self.log.warn("'%s' is not a legitimate notification channel option. Please check your spelling.",handler)
# this function needs to spin off a thread that is constantally polling,
# should match asynch ID's to values and call their function
def startLongPolling(self, noWait=False):
'''
Start LongPolling Connector for notifications.
:param bool noWait: Optional - use the cached values in connector, do not wait for the device to respond
:return: Thread of constantly running LongPoll. To be used to kill the thred if necessary.
:rtype: pythonThread
'''
# check Asynch ID's against insternal database of ID's
# Call return function with the value given, maybe decode from base64?
wait = ''
if(noWait == True):
wait = "?noWait=true"
# check that there isn't another thread already running, only one longPolling instance per is acceptable
if(self.longPollThread.isAlive()):
self.log.warn("LongPolling is already active.")
else:
# start infinite longpolling thread
self._stopLongPolling.clear()
self.longPollThread.start()
self.log.info("Spun off LongPolling thread")
return self.longPollThread # return thread instance so user can manually intervene if necessary
# stop longpolling by switching the flag off.
def stopLongPolling(self):
'''
Stop LongPolling thread
:return: none
'''
if(self.longPollThread.isAlive()):
self._stopLongPolling.set()
self.log.debug("set stop longpolling flag")
else:
self.log.warn("LongPolling thread already stopped")
return
# Thread to constantly long poll connector and process the feedback.
# TODO: pass wait / noWait on to long polling thread, currently the user can set it but it doesnt actually affect anything.
def longPoll(self, versioned=True):
self.log.debug("LongPolling Started, self.address = %s" %self.address)
while(not self._stopLongPolling.is_set()):
try:
if versioned:
data = r.get(self.address+self.apiVersion+'/notification/pull',headers={"Authorization":"Bearer "+self.bearer,"Connection":"keep-alive","accept":"application/json"})
else:
data = r.get(self.address+'/notification/pull',headers={"Authorization":"Bearer "+self.bearer,"Connection":"keep-alive","accept":"application/json"})
self.log.debug("Longpoll Returned, len = %d, statuscode=%d",len(data.text),data.status_code)
# process callbacks
if data.status_code == 200: # 204 means no content, do nothing
self.handler(data.content)
self.log.debug("Longpoll data = "+data.content)
except:
self.log.error("longPolling had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
self.log.info("Killing Longpolling Thread")
# parse the notification channel responses and call appropriate handlers
def handler(self,data):
'''
Function to handle notification data as part of Callback URL handler.
:param str data: data posted to Callback URL by connector.
:return: nothing
'''
if isinstance(data,r.models.Response):
self.log.debug("data is request object = %s", str(data.content))
data = data.content
elif isinstance(data,str):
self.log.info("data is json string with len %d",len(data))
if len(data) == 0:
self.log.warn("Handler received data of 0 length, exiting handler.")
return
else:
self.log.error("Input is not valid request object or json string : %s" %str(data))
return False
try:
data = json.loads(data)
if 'async-responses' in data.keys():
self.async_responses_callback(data)
if 'notifications' in data.keys():
self.notifications_callback(data)
if 'registrations' in data.keys():
self.registrations_callback(data)
if 'reg-updates' in data.keys():
self.reg_updates_callback(data)
if 'de-registrations' in data.keys():
self.de_registrations_callback(data)
if 'registrations-expired' in data.keys():
self.registrations_expired_callback(data)
except:
self.log.error("handle router had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
# Turn on / off debug messages based on the onOff variable
def debug(self,onOff,level='DEBUG'):
'''
Enable / Disable debugging
:param bool onOff: turn debugging on / off
:return: none
'''
if onOff:
if level == 'DEBUG':
self.log.setLevel(logging.DEBUG)
self._ch.setLevel(logging.DEBUG)
self.log.debug("Debugging level DEBUG enabled")
elif level == "INFO":
self.log.setLevel(logging.INFO)
self._ch.setLevel(logging.INFO)
self.log.info("Debugging level INFO enabled")
elif level == "WARN":
self.log.setLevel(logging.WARN)
self._ch.setLevel(logging.WARN)
self.log.warn("Debugging level WARN enabled")
elif level == "ERROR":
self.log.setLevel(logging.ERROR)
self._ch.setLevel(logging.ERROR)
self.log.error("Debugging level ERROR enabled")
else:
self.log.setLevel(logging.ERROR)
self._ch.setLevel(logging.ERROR)
self.log.error("Unrecognized debug level `%s`, set to default level `ERROR` instead",level)
# internal async-requests handler.
# data input is json data
def _asyncHandler(self,data):
try:
responses = data['async-responses']
for entry in responses:
if entry['id'] in self.database['async-responses'].keys():
result = self.database['async-responses'].pop(entry['id']) # get the asynch object out of database
# fill in async-result object
if 'error' in entry.keys():
# error happened, handle it
result.error = response_codes('async-responses-handler',entry['status'])
result.error.error = entry['error']
result.is_done = True
if result.callback:
result.callback(result)
else:
return result
else:
# everything is good, fill it out
result.result = b64decode(entry['payload'])
result.raw_data = entry
result.status = entry['status']
result.error = False
for thing in entry.keys():
result.extra[thing]=entry[thing]
result.is_done = True
# call associated callback function
if result.callback:
result.callback(result)
else:
self.log.warn("No callback function given")
else:
# TODO : object not found int asynch database
self.log.warn("No asynch entry for '%s' found in databse",entry['id'])
except:
# TODO error handling here
self.log.error("Bad data encountered and failed to elegantly handle it. ")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb
return
# default handler for notifications. User should impliment all of these in
# a L2 implimentation or in their webapp.
# @input data is a dictionary
def _defaultHandler(self,data):
if 'async-responses' in data.keys():
self.log.info("async-responses detected : len = %d",len(data["async-responses"]))
self.log.debug(data["async-responses"])
if 'notifications' in data.keys():
self.log.info("notifications' detected : len = %d",len(data["notifications"]))
self.log.debug(data["notifications"])
if 'registrations' in data.keys():
self.log.info("registrations' detected : len = %d",len(data["registrations"]))
self.log.debug(data["registrations"])
if 'reg-updates' in data.keys():
# removed because this happens every 10s or so, spamming the output
self.log.info("reg-updates detected : len = %d",len(data["reg-updates"]))
self.log.debug(data["reg-updates"])
if 'de-registrations' in data.keys():
self.log.info("de-registrations detected : len = %d",len(data["de-registrations"]))
self.log.debug(data["de-registrations"])
if 'registrations-expired' in data.keys():
self.log.info("registrations-expired detected : len = %d",len(data["registrations-expired"]))
self.log.debug(data["registrations-expired"])
# make the requests.
# url is the API url to hit
# query are the optional get params
# versioned tells the API whether to hit the /v#/ version. set to false for
# commands that break with this, like the API and Connector version calls
# TODO: spin this off to be non-blocking
def _getURL(self, url,query={},versioned=True):
if versioned:
return r.get(self.address+self.apiVersion+url,headers={"Authorization":"Bearer "+self.bearer},params=query)
else:
return r.get(self.address+url,headers={"Authorization":"Bearer "+self.bearer},params=query)
# put data to URL with json payload in dataIn
def _putURL(self, url,payload=None,versioned=True):
if self._isJSON(payload):
self.log.debug("PUT payload is json")
if versioned:
return r.put(self.address+self.apiVersion+url,json=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.put(self.address+url,json=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
self.log.debug("PUT payload is NOT json")
if versioned:
return r.put(self.address+self.apiVersion+url,data=payload,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.put(self.address+url,data=payload,headers={"Authorization":"Bearer "+self.bearer})
# put data to URL with json payload in dataIn
def _postURL(self, url,payload="",versioned=True):
addr = self.address+self.apiVersion+url if versioned else self.address+url
h = {"Authorization":"Bearer "+self.bearer}
if payload:
self.log.info("POSTing with payload: %s ",payload)
return r.post(addr,data=payload,headers=h)
else:
self.log.info("POSTing")
return r.post(addr,headers=h)
# delete endpoint
def _deleteURL(self, url,versioned=True):
if versioned:
return r.delete(self.address+self.apiVersion+url,headers={"Authorization":"Bearer "+self.bearer})
else:
return r.delete(self.address+url,headers={"Authorization":"Bearer "+self.bearer})
# check if input is json, return true or false accordingly
def _isJSON(self,dataIn):
try:
json.dumps(dataIn)
return True
except:
self.log.debug("[_isJSON] exception triggered, input is not json")
return False
# extend dictionary class so we can instantiate multiple levels at once
class vividict(dict):
def __missing__(self, key):
value = self[key] = type(self)()
return value
# Initialization function, set the token used by this object.
def __init__( self,
token,
webAddress="https://api.connector.mbed.com",
port="80",):
# set token
self.bearer = token
# set version of REST API
self.apiVersion = "/v2"
# Init database, used for callback fn's for various tasks (asynch, subscriptions...etc)
self.database = self.vividict()
self.database['notifications']
self.database['registrations']
self.database['reg-updates']
self.database['de-registrations']
self.database['registrations-expired']
self.database['async-responses']
# longpolling variable
self._stopLongPolling = threading.Event() # must initialize false to avoid race condition
self._stopLongPolling.clear()
#create thread for long polling
self.longPollThread = threading.Thread(target=self.longPoll,name="mdc-api-longpoll")
self.longPollThread.daemon = True # Do this so the thread exits when the overall process does
# set default webAddress and port to mbed connector
self.address = webAddress
self.port = port
# Initialize the callbacks
self.async_responses_callback = self._asyncHandler
self.registrations_expired_callback = self._defaultHandler
self.de_registrations_callback = self._defaultHandler
self.reg_updates_callback = self._defaultHandler
self.registrations_callback = self._defaultHandler
self.notifications_callback = self._defaultHandler
# add logger
self.log = logging.getLogger(name="mdc-api-logger")
self.log.setLevel(logging.ERROR)
self._ch = logging.StreamHandler()
self._ch.setLevel(logging.ERROR)
formatter = logging.Formatter("\r\n[%(levelname)s \t %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
self._ch.setFormatter(formatter)
self.log.addHandler(self._ch)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.