code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import range
from past.utils import old_div
import array
import io
import logging
import struct
def get_displacement(offset):
"""Calculate the displacement."""
result = 0
while offset >= 0x10:
offset >>= 1
result += 1
return result
DISPLACEMENT_TABLE = array.array(
'B', [get_displacement(x) for x in range(8192)])
COMPRESSED_MASK = 1 << 15
SIGNATURE_MASK = 3 << 12
SIZE_MASK = (1 << 12) - 1
TAG_MASKS = [(1 << i) for i in range(0, 8)]
def decompress_data(cdata, logger=None):
"""Decompresses the data."""
if not logger:
lznt1_logger = logging.getLogger("ntfs.lznt1")
else:
lznt1_logger = logger.getChild("lznt1")
# Change to DEBUG to turn on module level debugging.
lznt1_logger.setLevel(logging.ERROR)
in_fd = io.BytesIO(cdata)
output_fd = io.BytesIO()
block_end = 0
while in_fd.tell() < len(cdata):
block_offset = in_fd.tell()
uncompressed_chunk_offset = output_fd.tell()
block_header = struct.unpack("<H", in_fd.read(2))[0]
lznt1_logger.debug("Header %#x @ %#x", block_header, block_offset)
if block_header & SIGNATURE_MASK != SIGNATURE_MASK:
break
size = (block_header & SIZE_MASK)
lznt1_logger.debug("Block size %s", size + 3)
block_end = block_offset + size + 3
if block_header & COMPRESSED_MASK:
while in_fd.tell() < block_end:
header = ord(in_fd.read(1))
lznt1_logger.debug("Tag %#x", header)
for mask in TAG_MASKS:
if in_fd.tell() >= block_end:
break
if header & mask:
pointer = struct.unpack("<H", in_fd.read(2))[0]
displacement = DISPLACEMENT_TABLE[
output_fd.tell() - uncompressed_chunk_offset - 1]
symbol_offset = (pointer >> (12 - displacement)) + 1
symbol_length = (pointer & (0xFFF >> displacement)) + 3
output_fd.seek(-symbol_offset, 2)
data = output_fd.read(symbol_length)
# Pad the data to make it fit.
if 0 < len(data) < symbol_length:
data = data * (old_div(symbol_length, len(data)) + 1)
data = data[:symbol_length]
output_fd.seek(0, 2)
lznt1_logger.debug(
"Wrote %s @ %s/%s: Phrase %s %s %x",
len(data), in_fd.tell(),
output_fd.tell(), symbol_length, symbol_offset,
pointer)
output_fd.write(data)
else:
data = in_fd.read(1)
lznt1_logger.debug("Symbol %#x", ord(data))
output_fd.write(data)
else:
# Block is not compressed
data = in_fd.read(size + 1)
output_fd.write(data)
result = output_fd.getvalue()
return result | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/filesystems/lznt1.py | 0.41052 | 0.228135 | lznt1.py | pypi |
"""Routines for manipulating yara rule definitions."""
from __future__ import print_function
from builtins import str
from builtins import map
import string
import sys
import yaml
import pyparsing
_RULE = pyparsing.Keyword("rule")
_KEYWORD = (pyparsing.Literal("wide") |
pyparsing.Literal("fullword") |
pyparsing.Literal("ascii") |
pyparsing.Literal("nocase"))
_IDENTIFIER = pyparsing.Word(pyparsing.alphanums + '_' + "$")
_REGEX = (pyparsing.QuotedString("/", escChar="\\", unquoteResults=False) +
pyparsing.Optional(pyparsing.Word("sig")))
_LEFT_CURLY = pyparsing.Literal("{")
_RIGHT_CURLY = pyparsing.Literal("}")
_COLON = pyparsing.Literal(':')
_EQUALS = pyparsing.Literal("=")
def anything_beetween(opener_and_closer):
"""Builds a (pyparsing) parser for the content inside delimiters.
Args:
opener_and_closer: a string containing two elements: opener and closer
Returns:
A (pyparsing) parser for the content inside delimiters.
"""
opener = pyparsing.Literal(opener_and_closer[0])
closer = pyparsing.Literal(opener_and_closer[1])
char_removal_mapping = dict.fromkeys(list(map(ord, opener_and_closer)))
other_chars = str(string.printable).translate(char_removal_mapping)
word_without_delimiters = pyparsing.Word(other_chars).setName(
"other_chars")
anything = pyparsing.Forward()
delimited_block = opener + anything + closer
# pylint: disable=expression-not-assigned
anything << pyparsing.ZeroOrMore(
word_without_delimiters.setName("word_without_delimiters")
| delimited_block.setName("delimited_block")
)
# Combine all the parts into a single string.
return pyparsing.Combine(anything)
def anything_in(opener_and_closer):
opener = opener_and_closer[0]
closer = opener_and_closer[1]
anything = anything_beetween(opener_and_closer)
return opener + anything + closer
def anything_in_curly():
return anything_in('{}')
def meta_section():
return pyparsing.Group(
pyparsing.Literal("meta") +
_COLON +
pyparsing.OneOrMore(
statement()
).setResultsName("statements")
).setResultsName("meta")
def statement():
return pyparsing.Group(
_IDENTIFIER.setResultsName("lhs") + _EQUALS +
pyparsing.Combine(
(anything_in_curly() |
pyparsing.QuotedString("'", escChar="\\", unquoteResults=False) |
pyparsing.QuotedString("\"", escChar="\\", unquoteResults=False) |
_REGEX) +
pyparsing.ZeroOrMore(_KEYWORD),
adjacent=False,
joinString=" ",
).setResultsName("rhs")
)
def strings_section():
return pyparsing.Group(
pyparsing.Literal("strings") +
_COLON +
pyparsing.OneOrMore(statement()).setResultsName("statements")
).setResultsName("strings")
def condition_section():
return (_IDENTIFIER +
_COLON +
pyparsing.SkipTo(_RIGHT_CURLY).setResultsName("statement")
).setResultsName("condition")
def section():
return (strings_section() |
meta_section() |
condition_section())
def rule():
return (_RULE +
_IDENTIFIER.setResultsName("name") +
_LEFT_CURLY +
pyparsing.OneOrMore(section()) +
_RIGHT_CURLY)
def yara_parser():
return pyparsing.OneOrMore(rule())
def rule_to_ast(parsed_rule):
condition = parsed_rule["condition"]["statement"]
result = dict(name=parsed_rule["name"],
meta={},
strings=[],
condition=condition)
for x in parsed_rule.get("meta", {}).get("statements", []):
result["meta"][x["lhs"]] = x["rhs"]
for x in parsed_rule.get("strings", {}).get("statements", []):
result["strings"].append((x["lhs"], x["rhs"]))
return result
def parse_yara_to_ast(yara_rules):
"""Parse a yara rules file into a python AST."""
# Strip c like comments.
yara_rules = pyparsing.cppStyleComment.suppress().transformString(
yara_rules)
result = []
for rules, _, _ in rule().parseWithTabs().scanString(yara_rules):
try:
result.append(rule_to_ast(rules))
except Exception:
pass
return result
def ast_to_yara(parsed_rules):
result = []
for rule_ast in parsed_rules:
result.append("rule %s {" % rule_ast["name"])
metadata = rule_ast.get("meta")
if metadata:
result.append(" meta:")
for k, v in metadata.items():
result.append(" %s = %s" % (k, v))
if rule_ast.get("strings"):
result.append(" strings:")
for k, v in sorted(rule_ast["strings"]):
result.append(" %s = %s" % (k, v))
result.append(" condition: %s" % rule_ast["condition"])
result.append(" }")
return "\n".join(result)
if __name__ == "__main__":
action = sys.argv[1]
filename = sys.argv[2]
if action == "parse":
data = open(filename).read()
print(yaml.safe_dump(
parse_yara_to_ast(data),
default_flow_style=False))
elif action == "encode":
data = open(filename).read()
print(ast_to_yara(yaml.safe_load(data)))
else:
raise RuntimeError("Unknown action %s" % action) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/tools/yara_support.py | 0.640636 | 0.37605 | yara_support.py | pypi |
from builtins import str
from builtins import zip
from past.builtins import basestring
from builtins import object
import binascii
import capstone
import re
import struct
from capstone import x86_const
from rekall import addrspace
from rekall import plugin
from rekall import obj
from rekall import testlib
from rekall_lib import utils
class Disassembler(object):
__abstract = True
def __init__(self, mode, session=None, address_space=None):
self.mode = mode
self.session = session
self.address_space = (
address_space or
addrspace.BaseAddressSpace.classes["DummyAddressSpace"](
session=session))
def disassemble(self, data, offset):
""" Starts disassembly of data """
def is_return(self):
return False
def is_branch(self):
return False
def target(self):
return None
class Instruction(object):
"""A Decoded instruction."""
__abstract = True
class CapstoneInstruction(Instruction):
"""A capstone decoded instruction."""
# We need to build reverse maps to properly interpret capston
# instructions.
INSTRUCTIONS = {}
REGISTERS = {}
OP = {}
@classmethod
def _init_class(cls):
for constant in dir(x86_const):
components = constant.split("_")
value = getattr(x86_const, constant)
if components[0] == "X86":
if components[1] == "INS":
cls.INSTRUCTIONS[value] = components[2]
elif components[1] == "REG":
cls.REGISTERS[value] = components[2]
elif components[1] == "OP":
cls.OP[value] = components[2]
cls.REGISTERS[0] = None
def __init__(self, insn, session=None, address_space=None):
self.address_space = address_space
self.insn = insn
self.address = insn.address
self.size = insn.size
self.mnemonic = insn.mnemonic
self._comment = ""
self._operands = None # Cache the operands.
self.session = session
self.resolver = session.address_resolver
if not self.REGISTERS:
self._init_class()
@utils.safe_property
def operands(self):
if self._operands is not None:
return self._operands
result = []
# For invalid instructions there are no operands
if self.insn.id == 0:
return result
for op in self.insn.operands:
operand = dict(type=self.OP[op.type], size=op.size)
if operand["type"] == "REG":
operand["reg"] = self.REGISTERS[op.reg]
elif operand["type"] == "MEM":
# This is of the form: [base_reg + disp + index_reg * scale]
mem = op.mem
operand["base"] = self.REGISTERS[mem.base]
operand["disp"] = mem.disp
operand["index"] = self.REGISTERS[mem.index]
operand["scale"] = mem.scale
if operand["base"] == "RIP":
target = self.insn.address + mem.disp + self.insn.size
operand["address"] = target
operand["target"] = self._read_target(target, operand)
self._comment = self.format_indirect(target, op.size)
# Simple indirect address.
if not operand["base"] and not operand["index"]:
operand["address"] = mem.disp & 0xffffffffffffffff
operand["target"] = self._read_target(operand["address"], operand)
self._comment = self.format_indirect(operand["address"], op.size)
elif operand["type"] == "IMM":
operand["target"] = operand["address"] = op.imm.real
self._comment = ", ".join(self.resolver.format_address(
op.imm.real))
result.append(operand)
# Cache for next time.
self._operands = result
return result
def _read_target(self, target, operand):
data = self.address_space.read(target, operand["size"])
if operand["size"] == 8:
return struct.unpack("<Q", data)[0]
if operand["size"] == 4:
return struct.unpack("<I", data)[0]
def GetCanonical(self):
"""Returns the canonical model of the instruction."""
result = dict(mnemonic=self.INSTRUCTIONS[self.insn.id],
str="%s %s" % (self.insn.mnemonic, self.insn.op_str),
operands=self.operands)
result["comment"] = self._comment
return result
@utils.safe_property
def comment(self):
return self.GetCanonical()["comment"]
@utils.safe_property
def op_str(self):
return self.GetCanonical()["str"]
@utils.safe_property
def text(self):
canonical = self.GetCanonical()
if canonical["comment"]:
return "%s (%s)" % (canonical["str"], canonical["comment"])
return canonical["str"]
@utils.safe_property
def hexbytes(self):
return utils.SmartUnicode(binascii.hexlify(self.insn.bytes))
def format_indirect(self, operand, size):
if size == 1:
type = "byte"
elif size == 2:
type = "unsigned short"
elif size == 4:
type = "unsigned int"
else:
type = "address"
target = self.session.profile.Object(
type, offset=operand, vm=self.address_space).v()
if target == None:
return ""
target_name = ", ".join(self.resolver.format_address(target))
operand_name = ", ".join(self.resolver.format_address(operand))
if target_name:
return "0x%x %s -> %s" % (target, operand_name, target_name)
else:
return "0x%x %s" % (target, operand_name)
def is_return(self):
return self.mnemonic.startswith("ret")
# https://en.wikibooks.org/wiki/X86_Assembly/Control_Flow
def is_branch(self):
"""Is this instruction a branch?
e.g. JNE JE JG JLE JL JGE JMP JA JAE JB JBE JO JNO JZ JNZ JS JNS
"""
return self.mnemonic.startswith("j")
@utils.safe_property
def target(self):
if self.mnemonic[0] == "j":
operand = self.operands[0]
if operand["type"] in ("IMM", "MEM"):
return operand.get("address")
# We can not determine the target of REG jumps without the
# registers.
def match_rule(self, rule, context):
"""Match the rule against this instruction."""
# Speed optimization. Most of the time the rule matches the mnemonic.
mnemonic = rule.get("mnemonic")
if mnemonic and mnemonic != self.INSTRUCTIONS[self.insn.id]:
return False
return self._MatchRule(rule, self.GetCanonical(), context)
def _MatchRule(self, rule, instruction, context):
if isinstance(rule, dict):
for k, v in rule.items():
expected = instruction.get(k)
if not self._MatchRule(v, expected, context):
return False
return True
if isinstance(rule, (list, tuple)):
for subrule, subinst in zip(rule, instruction):
if subrule and not self._MatchRule(subrule, subinst, context):
return False
return True
if isinstance(rule, basestring):
# Rules starting with $ are capture variables.
if rule[0] == "$":
context[rule] = instruction
return True
# Rules starting with ~ are regular expressions.
if isinstance(instruction, basestring) and rule[0] == "~":
return re.match(rule[1:], instruction)
return rule == instruction
class Capstone(Disassembler):
def __init__(self, mode, **kwargs):
super(Capstone, self).__init__(mode, **kwargs)
if self.mode == "I386":
self.cs = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_32)
elif self.mode == "AMD64":
self.cs = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64)
elif self.mode == "MIPS":
self.cs = capstone.Cs(capstone.CS_ARCH_MIPS, capstone.CS_MODE_32 +
capstone.CS_MODE_BIG_ENDIAN)
# This is not really supported yet.
elif self.mode == "ARM":
self.cs = capstone.Cs(capstone.CS_ARCH_ARM, capstone.CS_MODE_ARM)
else:
raise NotImplementedError(
"No disassembler available for this arch.")
self.cs.detail = True
self.cs.skipdata_setup = ("db", None, None)
self.cs.skipdata = True
def disassemble(self, data, offset):
for insn in self.cs.disasm(data, int(offset)):
yield CapstoneInstruction(insn, session=self.session,
address_space=self.address_space)
class Disassemble(plugin.TypedProfileCommand, plugin.Command):
"""Disassemble the given offset."""
__name = "dis"
__args = [
dict(name="offset", type="SymbolAddress", positional=True,
help="An offset to disassemble. This can also be the name of "
"a symbol with an optional offset. For example: "
"tcpip!TcpCovetNetBufferList."),
dict(name="address_space", type="AddressSpace",
help="The address space to use."),
dict(name="length", type="IntParser",
help="The number of instructions (lines) to disassemble."),
dict(name="end", type="IntParser",
help="The end address to disassemble up to."),
dict(name="mode", default=None,
choices=["I386", "AMD64", "MIPS"], type="Choices",
help="Disassemble Mode (AMD64 or I386). Defaults to 'auto'."),
dict(name="branch", default=False, type="Boolean",
help="If set we follow all branches to cover all code."),
dict(name="canonical", default=False, type="Boolean",
help="If set emit canonical instructions. These can be used to "
"develop signatures."),
]
table_header = [
dict(type="TreeNode", name="address",
width=20, child=dict(style="address")),
dict(name="rel", style="address", width=5),
dict(name="opcode", width=20),
dict(name="instruction", width=40),
dict(name="comment"),
]
def __init__(self, *args, **kwargs):
super(Disassemble, self).__init__(*args, **kwargs)
# If length is not specified only disassemble one pager of output.
self.length = self.plugin_args.length
if self.length is None:
self.length = self.session.GetParameter("paging_limit", 50)
# If end is specified, keep going until we hit the end.
if self.plugin_args.end is not None:
self.length = 2**62
# If we are doing branch analysis we can not suspend this plugin. We
# must do everything all the time.
if self.plugin_args.branch:
self.length = 2**62
# All the visited addresses (for branch analysis).
self._visited = set()
self.offset = self.plugin_args.offset
def disassemble(self, offset, depth=0):
"""Disassemble the number of instructions required.
Yields:
A tuple of (Address, Opcode, Instructions).
"""
# Disassemble the data one page at the time.
func = Function(offset=offset, vm=self.plugin_args.address_space,
session=self.session, mode=self.plugin_args.mode)
for instruction in func.disassemble(self.length):
offset = instruction.address
if offset in self._visited:
return
# Exit condition can be specified by length.
if (self.length is not None and
len(self._visited) > self.length):
return
# Exit condition can be specified by end address.
if self.plugin_args.end and offset > self.plugin_args.end:
return
# Yield this data.
yield depth, instruction
# If the user asked for full branch analysis we follow all
# branches. This gives us full code coverage for a function - we
# just disassemble until the function exists from all branches.
if self.plugin_args.branch:
self._visited.add(offset)
# A return stops this branch.
if instruction.is_return():
return
target = instruction.target
if target:
# Start disassembling the branch. When the branch is
# exhausted we resume disassembling the continued
# branch.
for x in self.disassemble(target, depth=depth+1):
yield x
# A JMP stops disassembling this branch. This happens with
# tail end optimization where a JMP would meet a RET which
# unwinds past the JMP.
if instruction.mnemonic.startswith("jmp"):
return
def render_canonical(self, renderer):
"""Renders a canonical description of each instruction.
Canonical descriptions are machine readable representations of the
instruction which can be used to write disassembler signatures.
"""
# If length nor end are specified only disassemble one pager output.
if self.plugin_args.end is None and self.plugin_args.length is None:
self.length = self.session.GetParameter("paging_limit") - 5
renderer.table_header([
('Instruction', "instruction", ''),
], suppress_headers=True)
for _, instruction in self.disassemble(self.offset):
renderer.table_row(instruction.GetCanonical())
def render(self, renderer, **options):
"""Disassemble code at a given address.
Disassembles code starting at address for a number of bytes
given by the length parameter (default: 128).
Note: This feature requires capstone, available at
http://www.capstone-engine.org/
The mode is '32bit' or '64bit'. If not supplied, the disassembler
mode is taken from the profile.
"""
if self.plugin_args.canonical:
return self.render_canonical(renderer, **options)
return super(Disassemble, self).render(renderer, **options)
def collect(self):
self._visited.clear()
offset = None
for depth, instruction in self.disassemble(self.offset):
offset = instruction.address
relative = None
resolver = self.session.address_resolver
if resolver:
(f_offset, f_names) = resolver.get_nearest_constant_by_address(
offset)
f_name = ", ".join(f_names)
self.session.report_progress(
"Disassembled %s: 0x%x", f_name, offset)
if offset - f_offset == 0:
yield dict(
address="------ %s ------\n" % f_name,
annotation=True)
if offset - f_offset < 0x1000:
relative = offset - f_offset
yield dict(address=instruction.address,
rel=relative,
opcode=instruction.hexbytes,
instruction=instruction.op_str,
comment=instruction.comment, depth=depth)
# Continue from where we left off when the user calls us again with the
# v() plugin.
self.offset = offset
class TestDisassemble(testlib.SimpleTestCase):
PARAMETERS = dict(
# We want to test symbol discovery via export table detection so turn it
# on.
commandline=("dis --length %(length)s %(func)s "
"--name_resolution_strategies Export"),
func=0x805031be,
length=20
)
class Function(obj.BaseAddressComparisonMixIn, obj.BaseObject):
"""A base object representing code snippets."""
def __init__(self, mode=None, args=None, **kwargs):
super(Function, self).__init__(**kwargs)
self.args = args
if mode is None:
mode = self.obj_context.get("mode")
if mode is None:
# Autodetect disassembling mode
highest_usermode_address = self.obj_session.GetParameter(
"highest_usermode_address")
# We are disassembling user space.
if self.obj_offset < highest_usermode_address:
mode = self.obj_session.GetParameter(
"process_context").address_mode
# fall back to the kernel's mode.
if not mode:
mode = self.obj_session.profile.metadata("arch") or "I386"
self.dis = Capstone(mode, address_space=self.obj_vm,
session=self.obj_session)
self.mode = mode
def __int__(self):
return self.obj_offset
def __hash__(self):
return self.obj_offset + hash(str(self.obj_vm))
def __str__(self):
if self.mode == "AMD64":
format_string = "%0#14x %s"
else:
format_string = "%0#10x %s"
result = []
for instruction in self.disassemble():
result.append(format_string % (
instruction.address, instruction.text))
return "\n".join(result)
def __iter__(self):
return iter(self.disassemble())
def __getitem__(self, item):
for i, x in enumerate(self.disassemble()):
if i == item:
return x
def Rewind(self, length=0, align=True):
"""Returns another function which starts before this function.
If align is specified, we increase the length repeatedly until the
new function disassebles exactly to the same offset of this
function.
"""
while 1:
offset = self.obj_offset - length
result = self.obj_profile.Function(vm=self.obj_vm, offset=offset)
if not align:
return result
for instruction in result.disassemble(instructions=length):
# An exact match.
if instruction.address == self.obj_offset:
return result
# We overshot ourselves, try again.
if instruction.address > self.obj_offset:
length += 1
break
def disassemble(self, instructions=10):
"""Generate some instructions."""
count = 0
buffer_offset = offset = self.obj_offset
while 1:
# By default read 2 pages.
data = self.obj_vm.read(buffer_offset, 0x2000)
for instruction in self.dis.disassemble(data, buffer_offset):
offset = instruction.address
# If we disassemble past one page, we read another two
# pages. This guarantees that we have enough data for full
# instructions.
if offset - buffer_offset > 0x1000:
buffer_offset = offset
break
yield instruction
count += 1
if count > instructions:
return
buffer_offset = offset
# Register the Function class in all profiles.
obj.Profile.COMMON_CLASSES["Function"] = Function | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/tools/disassembler.py | 0.722331 | 0.233914 | disassembler.py | pypi |
from builtins import str
import arrow
import binascii
from rekall import addrspace
from rekall import obj
from rekall import session
from rekall.ui import json_renderer
from rekall_lib import utils
class BaseAddressSpaceObjectRenderer(json_renderer.StateBasedObjectRenderer):
renders_type = "BaseAddressSpace"
@json_renderer.CacheableState
def DecodeFromJsonSafe(self, value, options):
value = super(BaseAddressSpaceObjectRenderer,
self).DecodeFromJsonSafe(value, options)
cls_name = value.pop("cls")
cls = addrspace.BaseAddressSpace.classes[cls_name]
if value["base"] == "PhysicalAS":
value["base"] = (self.session.physical_address_space or
self.session.plugins.load_as().GetPhysicalAddressSpace())
return cls(session=self.session, **value)
def GetState(self, item, **_):
result = dict(cls=utils.SmartUnicode(item.__class__.__name__))
if item.base is not item:
result["base"] = item.base
if item.base is self.renderer.session.physical_address_space:
result["base"] = "PhysicalAS"
return result
class FileAddressSpaceObjectRenderer(BaseAddressSpaceObjectRenderer):
renders_type = "FileAddressSpace"
def GetState(self, item, **options):
state = super(FileAddressSpaceObjectRenderer, self).GetState(
item, **options)
state["filename"] = utils.SmartUnicode(item.fname)
return state
class AttributeDictObjectRenderer(json_renderer.StateBasedObjectRenderer):
renders_type = "AttributeDict"
def GetState(self, item, **_):
return dict(data=dict(item))
def DecodeFromJsonSafe(self, state, options):
state = super(AttributeDictObjectRenderer, self).DecodeFromJsonSafe(
state, options)
return utils.AttributeDict(state.get("data", {}))
class SlottedObjectObjectRenderer(json_renderer.StateBasedObjectRenderer):
renders_type = "SlottedObject"
def GetState(self, item, **_):
return dict((k, getattr(item, k))
for k in item.__slots__ if not k.startswith("_"))
def DecodeFromJsonSafe(self, state, options):
state = super(SlottedObjectObjectRenderer, self).DecodeFromJsonSafe(
state, options)
# Deliberately do not go through the constructor. Use __new__ directly
# so we can restore object state by assigning to the slots.
result = utils.SlottedObject.__new__(utils.SlottedObject)
for k, v in six.iteritems(state):
setattr(result, k, v)
return result
class IA32PagedMemoryObjectRenderer(BaseAddressSpaceObjectRenderer):
renders_type = "IA32PagedMemory"
def GetState(self, item, **options):
state = super(IA32PagedMemoryObjectRenderer, self).GetState(
item, **options)
state["dtb"] = item.dtb
return state
class SessionObjectRenderer(json_renderer.StateBasedObjectRenderer):
renders_type = "Session"
def GetState(self, item, **options):
state = super(SessionObjectRenderer, self).GetState(item, **options)
state["session_id"] = item.session_id
state_dict = state["state"] = {}
for parameter, type in item.SERIALIZABLE_STATE_PARAMETERS:
value = None
if item.HasParameter(parameter):
value = item.GetParameter(parameter)
state_dict[parameter] = (value, type)
return state
@json_renderer.CacheableState
def DecodeFromJsonSafe(self, state, options):
state = super(SessionObjectRenderer, self).DecodeFromJsonSafe(
state, options)
mro = state["mro"].split(":")
result = session.Session.classes[mro[0]]()
with result:
for k, v in six.iteritems(state["state"]):
result.SetParameter(k, v[0])
return result
class ProfileObjectRenderer(json_renderer.StateBasedObjectRenderer):
renders_type = "Profile"
def GetState(self, item, **_):
return dict(name=item.name)
@json_renderer.CacheableState
def DecodeFromJsonSafe(self, state, options):
state = super(ProfileObjectRenderer, self).DecodeFromJsonSafe(
state, options)
result = self.session.LoadProfile(state["name"])
if result == None:
return None
return result
class SetObjectRenderer(json_renderer.StateBasedObjectRenderer):
"""Encode a python set()."""
renders_type = ("set", "frozenset")
def GetState(self, item, **_):
return dict(data=list(item))
def DecodeFromJsonSafe(self, state, options):
return set(state["data"])
class NoneObjectRenderer(json_renderer.StateBasedObjectRenderer):
"""Encode a None Object."""
renders_type = "NoneObject"
def GetState(self, item, **_):
return dict(reason=item.FormatReason())
def DecodeFromJsonSafe(self, state, options):
state = super(NoneObjectRenderer, self).DecodeFromJsonSafe(
state, options)
return obj.NoneObject(state.get("reason"))
class UnixTimestampJsonObjectRenderer(json_renderer.StateBasedObjectRenderer):
renders_type = "UnixTimeStamp"
def Summary(self, item, **_):
return item.get("string_value", "")
def GetState(self, item, **_):
return dict(epoch=item.v(),
string_value=utils.SmartUnicode(item))
def DecodeFromJsonSafe(self, state, options):
return self.session.profile.UnixTimeStamp(value=state.get("epoch", 0))
class ArrowObjectRenderer(json_renderer.StateBasedObjectRenderer):
renders_type = "Arrow"
def GetState(self, item, **_):
return dict(epoch=item.float_timestamp,
string_value=item.isoformat())
def DecodeFromJsonSafe(self, state, options):
return arrow.Arrow.fromtimestamp(state["epoch"])
class PointerObjectRenderer(json_renderer.BaseObjectRenderer):
"""Encode a Pointer."""
renders_type = "Pointer"
def GetState(self, item, **options):
state = super(PointerObjectRenderer, self).GetState(item, **options)
state["target"] = item.target
state["target_args"] = item.target_args
return state
class ArrayObjectRenderer(PointerObjectRenderer):
renders_type = "Array"
def GetState(self, item, **options):
state = super(ArrayObjectRenderer, self).GetState(item, **options)
state["count"] = item.count
return state
class JsonAttributedStringRenderer(json_renderer.StateBasedObjectRenderer):
"""Encode an attributed string."""
renders_type = "AttributedString"
def GetState(self, item, **options):
state = super(JsonAttributedStringRenderer, self).GetState(
item, **options)
state["value"] = utils.SmartUnicode(item.value)
state["highlights"] = item.highlights
return state
class JsonHexdumpRenderer(json_renderer.StateBasedObjectRenderer):
"""Encode a hex dumped string."""
renders_type = "HexDumpedString"
def GetState(self, item, **options):
state = super(JsonHexdumpRenderer, self).GetState(item, **options)
state["value"] = utils.SmartUnicode(binascii.hexlify(item.value))
state["highlights"] = item.highlights
return state
class JsonInstructionRenderer(json_renderer.StateBasedObjectRenderer):
renders_type = "Instruction"
def GetState(self, item, **_):
return dict(value=utils.SmartUnicode(item))
class JsonEnumerationRenderer(json_renderer.StateBasedObjectRenderer):
"""For enumerations store both their value and the enum name."""
renders_type = ["Enumeration", "BitField"]
def GetState(self, item, **_):
return dict(enum=utils.SmartUnicode(item),
value=int(item))
def Summary(self, item, **_):
return item.get("enum", "")
class JsonFormattedAddress(json_renderer.StateBasedObjectRenderer):
renders_type = ["FormattedAddress"]
def GetState(self, item, **_):
return dict(address=item.address,
symbol=utils.SmartUnicode(item))
def Summary(self, item, **_):
return utils.SmartUnicode(item)
class RunEncoder(json_renderer.StateBasedObjectRenderer):
renders_type = ["Run"]
def GetState(self, item, **_):
return dict(start=item.start, end=item.end,
address_space=item.address_space,
file_offset=item.file_offset,
data=item.data)
def Summary(self, item, **_):
return utils.SmartUnicode(self.GetState())
class JsonRangedCollectionObjectRenderer(
json_renderer.StateBasedObjectRenderer):
"""Serialize RangedCollection objects."""
renders_type = ["RangedCollection"]
def EncodeToJsonSafe(self, item, **_):
# Optimized this since we know we do not need to escape any item since
# this is a simple list of integers.
encoded = []
for start, end, data in item:
encoded.append((start, end, self._encode_value(data)))
return dict(data=encoded, mro="RangedCollection")
def DecodeFromJsonSafe(self, state, options):
result = utils.RangedCollection()
for start, end, encoded_data in state["data"]:
result.insert(
start, end, self._decode_value(encoded_data, options))
return result | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/renderers/json_storage.py | 0.627723 | 0.1873 | json_storage.py | pypi |
from builtins import str
from builtins import range
import time
import openpyxl
from openpyxl import styles
from openpyxl.styles import colors
from openpyxl.styles import fills
from rekall_lib import utils
from rekall.ui import renderer
from rekall.ui import text
import six
if six.PY3:
long = int
# pylint: disable=unexpected-keyword-arg,no-value-for-parameter
# pylint: disable=redefined-outer-name
HEADER_STYLE = styles.Style(font=styles.Font(bold=True))
SECTION_STYLE = styles.Style(
fill=styles.PatternFill(
fill_type=fills.FILL_SOLID, start_color=styles.Color(colors.RED)))
FORMAT_STYLE = styles.Style(
alignment=styles.Alignment(vertical="top", wrap_text=False))
class XLSObjectRenderer(renderer.ObjectRenderer):
"""By default the XLS renderer delegates to the text renderer."""
renders_type = "object"
renderers = ["XLSRenderer"]
STYLE = None
def _GetDelegateObjectRenderer(self, item):
return self.ForTarget(item, "TextRenderer")(
session=self.session, renderer=self.renderer.delegate_text_renderer)
def RenderHeader(self, worksheet, column):
cell = worksheet.cell(
row=worksheet.current_row, column=worksheet.current_column)
cell.value = column.name
cell.style = HEADER_STYLE
# Advance the pointer by 1 cell.
worksheet.current_column += 1
def RenderCell(self, value, worksheet, **options):
# By default just render a single value into the current cell.
cell = worksheet.cell(
row=worksheet.current_row, column=worksheet.current_column)
cell.value = self.GetData(value, **options)
if self.STYLE:
cell.style = self.STYLE
# Advance the pointer by 1 cell.
worksheet.current_column += 1
def GetData(self, value, **options):
if isinstance(value, (int, float, long)):
return value
return utils.SmartUnicode(
self._GetDelegateObjectRenderer(value).render_row(
value, **options))
class XLSColumn(text.TextColumn):
def __init__(self, type=None, table=None, renderer=None, session=None,
**options):
super(XLSColumn, self).__init__(table=table, renderer=renderer,
session=session, **options)
if type:
self.object_renderer = self.renderer.get_object_renderer(
type=type, target_renderer="XLSRenderer", **options)
class XLSTable(text.TextTable):
column_class = XLSColumn
def render_header(self):
current_ws = self.renderer.current_ws
for column in self.columns:
if column.object_renderer:
object_renderer = column.object_renderer
else:
object_renderer = XLSObjectRenderer(
session=self.session, renderer=self.renderer)
object_renderer.RenderHeader(self.renderer.current_ws, column)
current_ws.current_row += 1
current_ws.current_column = 1
def render_row(self, row=None, highlight=None, **options):
merged_opts = self.options.copy()
merged_opts.update(options)
# Get each column to write its own header.
current_ws = self.renderer.current_ws
for item in row:
# Get the object renderer for the item.
object_renderer = self.renderer.get_object_renderer(
target=item, type=merged_opts.get("type"), **merged_opts)
object_renderer.RenderCell(item, current_ws, **options)
current_ws.current_row += 1
current_ws.current_column = 1
class XLSRenderer(renderer.BaseRenderer):
"""A Renderer for xls files."""
name = "xls"
table_class = XLSTable
tablesep = ""
def __init__(self, output=None, **kwargs):
super(XLSRenderer, self).__init__(**kwargs)
# Make a single delegate text renderer for reuse. Most of the time we
# will just replicate the output from the TextRenderer inside the
# spreadsheet cell.
self.delegate_text_renderer = text.TextRenderer(session=self.session)
self.output = output or self.session.GetParameter("output")
# If no output filename was give, just make a name based on the time
# stamp.
if self.output == None:
self.output = "%s.xls" % time.ctime()
try:
self.wb = openpyxl.load_workbook(self.output)
self.current_ws = self.wb.create_sheet()
except IOError:
self.wb = openpyxl.Workbook()
self.current_ws = self.wb.active
def start(self, plugin_name=None, kwargs=None):
super(XLSRenderer, self).start(plugin_name=plugin_name, kwargs=kwargs)
# Make a new worksheet for this run.
if self.current_ws is None:
self.current_ws = self.wb.create_sheet()
ws = self.current_ws
ws.title = plugin_name or ""
ws.current_row = 1
ws.current_column = 1
return self
def flush(self):
super(XLSRenderer, self).flush()
self.current_ws = None
# Write the spreadsheet to a file.
self.wb.save(self.output)
def section(self, name=None, **_):
ws = self.current_ws
for i in range(10):
cell = ws.cell(row=ws.current_row, column=i + 1)
if i == 0:
cell.value = name
cell.style = SECTION_STYLE
ws.current_row += 1
ws.current_column = 1
def format(self, formatstring, *data):
worksheet = self.current_ws
if "%" in formatstring:
data = formatstring % data
else:
data = formatstring.format(*data)
cell = worksheet.cell(
row=worksheet.current_row, column=worksheet.current_column)
cell.value = data
cell.style = FORMAT_STYLE
worksheet.current_column += 1
if "\n" in data:
worksheet.current_row += 1
worksheet.current_column = 1
def table_header(self, *args, **options):
super(XLSRenderer, self).table_header(*args, **options)
self.table.render_header()
# Following here are object specific renderers.
class XLSEProcessRenderer(XLSObjectRenderer):
"""Expands an EPROCESS into three columns (address, name and PID)."""
renders_type = "_EPROCESS"
def RenderHeader(self, worksheet, column):
for heading in ["_EPROCESS", "Name", "PID"]:
cell = worksheet.cell(
row=worksheet.current_row, column=worksheet.current_column)
cell.value = heading
cell.style = HEADER_STYLE
worksheet.current_column += 1
def RenderCell(self, item, worksheet, **options):
for value in ["%#x" % item.obj_offset, item.name, item.pid]:
object_renderer = self.ForTarget(value, self.renderer)(
session=self.session, renderer=self.renderer, **options)
object_renderer.RenderCell(value, worksheet, **options)
class XLSStringRenderer(XLSObjectRenderer):
renders_type = "String"
def GetData(self, item, **_):
return utils.SmartStr(item)
class XLSStructRenderer(XLSObjectRenderer):
"""Hex format struct's offsets."""
renders_type = "Struct"
def GetData(self, item, **_):
return "%#x" % item.obj_offset
class XLSPointerRenderer(XLSObjectRenderer):
"""Renders the address of the pointer target as a hex string."""
renders_type = "Pointer"
def GetData(self, item, **_):
result = item.v()
if result == None:
return "-"
return "%#x" % result
class XLSNativeTypeRenderer(XLSObjectRenderer):
"""Renders native types as python objects."""
renders_type = "NativeType"
def GetData(self, item, **options):
result = item.v()
if result != None:
return result
class XLS_UNICODE_STRING_Renderer(XLSNativeTypeRenderer):
renders_type = "_UNICODE_STRING"
class XLSNoneObjectRenderer(XLSObjectRenderer):
renders_type = "NoneObject"
def GetData(self, item, **_):
_ = item
return "-"
class XLSDateTimeRenderer(XLSObjectRenderer):
"""Renders timestamps as python datetime objects."""
renders_type = "UnixTimeStamp"
STYLE = styles.Style(number_format='MM/DD/YYYY HH:MM:SS')
def GetData(self, item, **options):
if item.v() == 0:
return None
return item.as_datetime() | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/renderers/xls.py | 0.620277 | 0.15588 | xls.py | pypi |
from rekall.ui import json_renderer
from rekall.ui import text
from rekall.plugins.renderers import base_objects
from rekall.plugins.renderers import data_export
class ProcDataExport(data_export.DataExportBaseObjectRenderer):
renders_type = "proc"
def EncodeToJsonSafe(self, task, **_):
result = super(ProcDataExport, self).EncodeToJsonSafe(task)
result["Cybox"] = dict(
type=u"ProcessObj:ProcessObjectType",
Name=task.name,
PID=task.pid,
Creation_Time=task.p_start,
Parent_PID=task.p_ppid,
Image_Info=dict(
type=u"ProcessObj:ImageInfoType",
Path=task.p_comm,
Command_Line=task.p_comm,
File_Name=task.p_comm))
res = json_renderer.JsonObjectRenderer.EncodeToJsonSafe(self, result)
return res
def Summary(self, item, **_):
return "%s (%s)" % (item.get("Cybox", {}).get("Name", ""),
item.get("Cybox", {}).get("PID", ""))
class Fileproc_TextObjectRenderer(base_objects.StructTextRenderer):
renders_type = "fileproc"
COLUMNS = [
dict(name="human_type", width=15),
dict(name="human_name", width=40)
]
class Vnode_TextObjectRenderer(base_objects.StructTextRenderer):
renders_type = "vnode"
COLUMNS = [
dict(name="obj_offset", style="address"),
dict(name="full_path", width=40, nowrap=True)
]
class Clist_TextObjectRenderer(base_objects.StructTextRenderer):
renders_type = "clist"
COLUMNS = [
dict(name="obj_offset", style="address"),
dict(name="recovered_contents", width=34)
]
class Tty_TextObjectRenderer(base_objects.StructTextRenderer):
renders_type = "tty"
COLUMNS = [
dict(style="address", name="obj_offset"),
dict(type="vnode", name="vnode"),
dict(type="clist", name="input_buffer",
columns=[dict(name="recovered_contents",
width=34)]),
dict(type="clist", name="output_buffer",
columns=[dict(name="recovered_contents",
width=34)])
]
class Session_TextObjectRenderer(base_objects.StructTextRenderer):
renders_type = "session"
COLUMNS = [
dict(name="obj_offset", style="address"),
dict(name="s_sid"),
dict(name="s_leader", type="proc",
columns=[dict(name="pid"),
dict(name="command", width=30)]),
dict(name="s_login", width=20, nowrap=True)
]
class Socket_TextObjectRenderer(base_objects.StructTextRenderer):
renders_type = "socket"
COLUMNS = [
dict(name="obj_offset", style="address"),
dict(name="last_pid", width=10),
dict(name="human_type", width=20),
dict(name="human_name", width=60)
]
class Rtentry_TextObjectRenderer(base_objects.StructTextRenderer):
renders_type = "rtentry"
COLUMNS = [
dict(name="source_ip", type="sockaddr", width=18),
dict(name="dest_ip", type="sockaddr", width=18),
dict(name="name", align="c"),
dict(name="sent", width=8, align="r"),
dict(name="rx", width=8, align="r"),
dict(name="base_calendartime", width=30, align="c"),
dict(name="rt_expire", align="r"),
dict(name="delta", align="r")
]
class Sockaddr_TextObjectRenderer(text.TextObjectRenderer):
renders_type = "sockaddr"
def render_full(self, target, **_):
return text.Cell(target.address)
class Zone_TextObjectRenderer(base_objects.StructTextRenderer):
renders_type = "zone"
COLUMNS = [
dict(name="name", width=20),
dict(name="count_active", width=12),
dict(name="count_free", width=12),
dict(name="elem_size", width=12),
dict(name="tracks_pages", width=12),
dict(name="allows_foreign", width=12)
]
class Ifnet_TextObjectRenderer(base_objects.StructTextRenderer):
renders_type = "ifnet"
COLUMNS = [
dict(name="name", width=12),
dict(name="l2_addr", width=18),
dict(name="ipv4_addr", width=16),
dict(name="ipv6_addr", width=40)
]
class Proc_TextObjectRenderer(base_objects.StructTextRenderer):
renders_type = "proc"
COLUMNS = [
dict(style="address", name="obj_offset"),
dict(width=20, align="l", name="name"),
dict(width=5, align="r", name="pid")
] | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/renderers/darwin.py | 0.468791 | 0.23762 | darwin.py | pypi |
from builtins import str
import datetime
import pytz
from rekall.ui import renderer
from rekall.ui import json_renderer
from rekall.plugins.renderers import json_storage
from rekall_lib import utils
# Copy many of the normal json object renderers.
renderer.CopyObjectRenderers((
json_renderer.StringRenderer,
json_storage.ArrowObjectRenderer,
json_storage.AttributeDictObjectRenderer,
json_storage.BaseAddressSpaceObjectRenderer,
json_storage.FileAddressSpaceObjectRenderer,
json_storage.IA32PagedMemoryObjectRenderer,
json_storage.JsonAttributedStringRenderer,
json_storage.JsonEnumerationRenderer,
json_storage.JsonFormattedAddress,
json_storage.JsonHexdumpRenderer,
json_storage.JsonInstructionRenderer,
json_storage.NoneObjectRenderer,
json_storage.ProfileObjectRenderer,
json_storage.SessionObjectRenderer,
json_storage.SetObjectRenderer,
json_storage.SlottedObjectObjectRenderer,
json_storage.UnixTimestampJsonObjectRenderer,
), renderer="DataExportRenderer")
class DataExportRenderer(json_renderer.JsonRenderer):
"""An exporter for data."""
name = "data"
def table_row(self, *args, **options):
# Encode the options and merge them with the table row. This allows
# plugins to send additional data about the row in options.
result = self.encoder.Encode(options)
for i, arg in enumerate(args):
column_spec = self.table.column_specs[i].copy()
column_spec.update(options)
object_renderer = self.object_renderers[i]
if object_renderer is not None:
column_spec["type"] = object_renderer
column_name = column_spec["name"]
if column_name:
result[column_name] = self.encoder.Encode(
arg, **column_spec)
self.SendMessage(["r", result])
class NativeDataExportObjectRenderer(json_renderer.JsonObjectRenderer):
"""This is the fallback for all objects without a dedicated renderer."""
renderers = ["DataExportRenderer"]
def Summary(self, item, formatstring=None, header=False, **options):
"""Returns a short summary of the object.
The summary is a short human readable string, describing the object.
"""
try:
if formatstring == "[addrpad]" and not header:
return "%#014x" % item
except TypeError:
pass
# Since we are the default renderer we must ensure this works.
return utils.SmartStr(item)
class DataExportObjectRenderer(json_renderer.StateBasedObjectRenderer):
renderers = ["DataExportRenderer"]
class DataExportTimestampObjectRenderer(DataExportObjectRenderer):
renders_type = "datetime"
renderers = ["DataExportRenderer"]
EPOCH = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, pytz.UTC)
def GetState(self, item, **_):
return dict(epoch=(item - self.EPOCH).total_seconds(),
string_value=item.strftime("%Y-%m-%d %H:%M:%S%z"))
class DataExportBaseObjectRenderer(DataExportObjectRenderer):
renders_type = "BaseObject"
def GetState(self, item, **options):
result = super(DataExportBaseObjectRenderer, self).GetState(
item, **options)
result.update(offset=item.obj_offset,
type_name=utils.SmartUnicode(item.obj_type),
name=utils.SmartUnicode(item.obj_name),
vm=utils.SmartUnicode(item.obj_vm))
return result
class DataExportPointerObjectRenderer(DataExportBaseObjectRenderer):
renders_type = "Pointer"
def Summary(self, item, **options):
"""Returns the object formatted according to the column_spec."""
item = item["target"]
return self.FromEncoded(item, "DataExportRenderer")(
self.renderer).Summary(item, **options)
def GetState(self, item, **options):
result = super(DataExportPointerObjectRenderer, self).GetState(
item, **options)
result["target"] = item.v()
# Also encode the target object.
target_obj = item.deref()
target_obj_renderer = self.DelegateObjectRenderer(target_obj)
result["target_obj"] = target_obj_renderer.EncodeToJsonSafe(target_obj)
return result
class DataExportNativeTypeRenderer(DataExportObjectRenderer):
renders_type = "NativeType"
def EncodeToJsonSafe(self, item, **_):
return item.v()
class DataExportTupleRenderer(DataExportObjectRenderer):
renders_type = "tuple"
def EncodeToJsonSafe(self, item, **_):
if hasattr(item, "_fields"):
result = {}
for field in item._fields:
member = getattr(item, field)
target_obj_renderer = self.DelegateObjectRenderer(member)
result[field] = target_obj_renderer.EncodeToJsonSafe(member)
return result
result = []
for member in item:
target_obj_renderer = self.DelegateObjectRenderer(member)
result.append(target_obj_renderer.EncodeToJsonSafe(member))
return result
class DataExportRDFValueObjectRenderer(DataExportBaseObjectRenderer):
renders_type = "RDFValue"
def Summary(self, item, **_):
return utils.SmartStr(item.get("str", ""))
def GetState(self, item, **options):
return dict(str=item.SerializeToString())
class DataExportPhysicalAddressContextObjectRenderer(
DataExportRDFValueObjectRenderer):
renders_type = "PhysicalAddressContext"
def Summary(self, item, **_):
return utils.SmartStr(item.get("str", ""))
def GetState(self, item, **options):
return item.summary() | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/renderers/data_export.py | 0.572006 | 0.204918 | data_export.py | pypi |
from rekall import testlib
from rekall.plugins.common import memmap
from rekall.plugins.linux import common
from rekall_lib import utils
class LinuxPsList(common.LinProcessFilter):
"""Gathers active tasks by walking the task_struct->task list.
It does not display the swapper process. If the DTB column is blank, the
item is likely a kernel thread.
"""
__name = "pslist"
table_header = [
dict(name="proc", width=40, type="task_struct"),
dict(name="ppid", align="r", width=6),
dict(name="uid", align="r", width=6),
dict(name="gid", align="r", width=6),
dict(name="dtb", style="address"),
dict(name="start_time", align="r", width=24),
dict(name="binary")
]
def column_types(self):
task = self.session.profile.task_struct()
return dict(
proc=task,
ppid=0,
uid=utils.HexInteger(0),
gid=utils.HexInteger(0),
dtb=utils.HexInteger(0),
start_time=task.task_start_time,
binary="")
def collect(self):
for task in sorted(self.filter_processes()):
dtb = self.kernel_address_space.vtop(task.mm.pgd)
path = task.get_path(task.mm.m("exe_file"))
yield (task,
task.parent.pid,
task.uid,
task.gid,
dtb, task.task_start_time,
path)
class LinMemMap(memmap.MemmapMixIn, common.LinProcessFilter):
"""Dumps the memory map for linux tasks."""
__name = "memmap"
class LinMemDump(memmap.MemDumpMixin, common.LinProcessFilter):
"""Dump the addressable memory for a process."""
class TestLinMemDump(testlib.HashChecker):
mode = "mode_linux_memory"
PARAMETERS = dict(
commandline="memdump --proc_regex %(proc_name)s --dump_dir %(tempdir)s",
proc_name="bash",
)
# We only care about PIDTYPE_PID here.
# http://lxr.free-electrons.com/source/include/linux/pid.h?v=3.8#L6
# enum pid_type
# {
# PIDTYPE_PID,
# };
PIDTYPE_PID = 0
class PidHashTable(LinuxPsList):
"""List processes by enumerating the pid hash tables."""
__name = "pidhashtable"
def list_tasks(self):
# According to
# http://lxr.free-electrons.com/source/kernel/pid.c?v=3.8#L566, the
# pid_hash table is a pointer to a dynamically allocated array of
# hlist_head.
pidhash_shift = self.profile.get_constant_object(
"pidhash_shift", "unsigned int")
pidhash = self.profile.get_constant_object(
"pid_hash",
target="Pointer",
target_args=dict(
target="Array",
target_args=dict(
count=1 << pidhash_shift,
target="hlist_head"
)
)
)
seen = set()
# Now we iterate over all the hash slots in the hash table to retrieve
# their struct upid entries.
for hash_slot in pidhash:
for upid in hash_slot.list_of_type("upid", "pid_chain"):
# upid structures are contained inside pid structures:
# http://lxr.free-electrons.com/source/kernel/pid.c?v=3.8#L351
# container_of(pnr, struct pid, numbers[ns->level]);
level = upid.ns.level
pid = self.profile.pid(
upid.obj_offset -
self.profile.get_obj_offset("pid", "numbers") -
level * self.profile.get_obj_size("pid"))
# Here we only care about regular PIDs.
for task in pid.tasks[PIDTYPE_PID].list_of_type(
"task_struct", "pids"):
if task not in seen:
yield task
seen.add(task) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/linux/pslist.py | 0.622918 | 0.231984 | pslist.py | pypi |
from rekall.plugins.overlays import basic
from rekall.plugins.linux import heap_analysis
class Zsh(heap_analysis.HeapAnalysis):
"""Extracts the zsh command history, similar to the existing bash plugin.
"""
__name = "zsh"
table_header = [
dict(name="divider", type="Divider"),
dict(name="task", hidden=True),
dict(name="counter", width=8),
dict(name="started", width=24),
dict(name="ended", width=24),
dict(name="command")
]
def __init__(self, **kwargs):
super(Zsh, self).__init__(**kwargs)
self._zsh_profile = None
def collect(self):
if self.session.profile.metadata("arch") == 'AMD64':
self._zsh_profile = ZshProfile64(session=self.session)
else:
# default/fallback profile
self._zsh_profile = ZshProfile32(session=self.session)
chunk_size = self.get_aligned_size(
self._zsh_profile.get_obj_size('histent'))
for task in self.filter_processes():
if self.init_for_task(task):
yield dict(divider="Task: %s (%s)" % (task.name,
task.pid))
chunks_dict = dict()
data_offset = self.profile.get_obj_offset("malloc_chunk", "fd")
chunk_data_pointers = list()
for chunk in self.get_all_allocated_chunks():
chunks_dict[chunk.v() + data_offset] = chunk
chunk_data_pointers.append(chunk.v() + data_offset)
commands_dict = dict()
valid_histentry = None
# we first try to find a chunk that most probably contains a
# histent struct
for chunk in self.get_all_allocated_chunks():
if not chunk.chunksize() == chunk_size:
continue
histent = self._zsh_profile.histent(
offset=chunk.v()+data_offset, vm=self.process_as)
# we test if the current histent struct seems to be valid
# first test: do we know the chunks where relevant
# pointers point to
pointers = [histent.node.nam, histent.down, histent.up]
if not len(set(pointers) & set(chunk_data_pointers)) \
== len(pointers):
continue
# second test: points the previous/next histent entry to
# this histent entry?
if not histent.up.down == histent or not histent.down.up \
== histent:
continue
# we hopefully found one
valid_histentry = histent
break
if valid_histentry:
self.session.logging.info(
"We probably found a valid histent chunk and now "
"start walking.")
# entries are linked circular so walking in one direction
# should be sufficient
for histent in valid_histentry.walk_list('down'):
command = ''
try:
command = chunks_dict[histent.node.nam.v()]
command = command.to_string()
command = command[:command.index("\x00")]
except KeyError:
self.session.logging.warn(
"Unexpected error: chunk for given "
"command-reference does not seem to exist.")
except ValueError:
pass
if histent.stim == histent.ftim == 0 and command == '':
histent_vma = heap_analysis.get_vma_for_offset(
self.vmas, histent.v())
if histent_vma not in self.heap_vmas:
# we most probably found the "curline" histent
# struct located in zsh's .bss section. as it
# doesn't contain an actual executed command,
# we are skipping it
continue
command_number = histent.histnum
start = self.profile.UnixTimeStamp(value=histent.stim)
end = self.profile.UnixTimeStamp(value=histent.ftim)
commands_dict[command_number] = [start,
end,
repr(command)]
for key, value in sorted(commands_dict.items()):
yield dict(task=task, counter=key, started=value[0],
ended=value[1], command=value[2])
class ZshProfile32(basic.Profile32Bits, basic.BasicClasses):
"""Profile to parse internal zsh data structures."""
__abstract = True
# types come from zsh's zsh.h
zsh_vtype_32 = {
"histent": [48, {
"down": [16, ["Pointer", {
"target": "histent"
}]],
"ftim": [28, ["long int"]],
"histnum": [40, ["long long int"]],
"node": [0, ["hashnode"]],
"nwords": [36, ["int"]],
"stim": [24, ["long int"]],
"up": [12, ["Pointer", {
"target": "histent"
}]],
"words": [32, ["Pointer", {
"target": "short int"
}]],
"zle_text": [20, ["Pointer", {
"target": "char"
}]]
}],
"hashnode": [12, {
"flags": [8, ["int"]],
"nam": [4, ["Pointer", {
"target": "char"
}]],
"next": [0, ["Pointer", {
"target": "hashnode"
}]]
}]
}
def __init__(self, **kwargs):
super(ZshProfile32, self).__init__(**kwargs)
self.add_types(self.zsh_vtype_32)
class ZshProfile64(basic.ProfileLP64, basic.BasicClasses):
"""Profile to parse internal zsh data structures."""
__abstract = True
# types come from zsh's zsh.h
zsh_vtype_64 = {
"histent": [88, {
"down": [32, ["Pointer", {
"target": "histent"
}]],
"ftim": [56, ["long int"]],
"histnum": [80, ["long int"]],
"node": [0, ["hashnode"]],
"nwords": [72, ["int"]],
"stim": [48, ["long int"]],
"up": [24, ["Pointer", {
"target": "histent"
}]],
"words": [64, ["Pointer", {
"target": "short int"
}]],
"zle_text": [40, ["Pointer", {
"target": "char"
}]]
}],
"hashnode": [24, {
"flags": [16, ["int"]],
"nam": [8, ["Pointer", {
"target": "char"
}]],
"next": [0, ["Pointer", {
"target": "hashnode"
}]]
}]
}
def __init__(self, **kwargs):
super(ZshProfile64, self).__init__(**kwargs)
self.add_types(self.zsh_vtype_64) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/linux/zsh.py | 0.457621 | 0.276447 | zsh.py | pypi |
from rekall.plugins.linux import common
class CheckAFInfo(common.LinuxPlugin):
"""Verifies the operation function pointers of network protocols."""
__name = "check_afinfo"
table_header = [
dict(name="symbol", width=30),
dict(name="member", width=30),
dict(name="address", style="address"),
dict(name="module")
]
def CreateChecks(self):
"""Builds the sequence of function checks we need to look at.
We support multiple kernels by adding a bunch of function names which
may not exisit on the current kernel. This is expected as the code
simply ignores struct members which are not defined on this kernel.
"""
# Older kernels have the operations in the structs.
members = sorted(self.profile.file_operations().members.keys())
if self.profile.has_type("seq_operations"):
# Newer kernels use seq_ops struct.
members.extend(["seq_ops.%s" % x
for x in self.profile.seq_operations().members])
if self.profile.has_type("file_operations"):
# Newer kernels use seq_ops struct.
members.extend(["seq_fops.%s" % x
for x in self.profile.file_operations().members])
return [
dict(name="tcp",
constant_type="tcp_seq_afinfo",
global_vars=[
"tcp6_seq_afinfo",
"tcp4_seq_afinfo"
],
members=members,
),
dict(name="udp",
constant_type="udp_seq_afinfo",
global_vars=[
"udplite6_seq_afinfo",
"udp6_seq_afinfo",
"udplite4_seq_afinfo",
"udp4_seq_afinfo"
],
members=members,
),
]
def check_members(self, struct, members):
"""Yields struct members which are not known to exist in any module."""
for member in members:
ptr = struct.m(member)
if not ptr:
continue
# This is really a function pointer.
func = ptr.dereference_as(target="Function",
target_args=dict(name=member))
yield member, func
def check_functions(self, checks):
"""Apply the checks to the kernel and yields the results."""
for check in checks:
for variable in check["global_vars"]:
var_ptr = self.profile.get_constant_object(
variable, target=check["constant_type"],
vm=self.kernel_address_space)
for member, func in self.check_members(
var_ptr, check["members"]):
yield variable, member, func
def collect(self):
checks = self.CreateChecks()
for variable, member, func in self.check_functions(checks):
location = ", ".join(
self.session.address_resolver.format_address(
func.obj_offset))
# Point out suspicious constants.
highlight = None if location else "important"
yield dict(symbol=variable, member=member, address=func,
module=location, highlight=highlight) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/linux/check_afinfo.py | 0.712332 | 0.347205 | check_afinfo.py | pypi |
"""Miscelaneous information gathering plugins."""
from builtins import str
__author__ = "Michael Cohen <scudette@google.com>"
import hashlib
from rekall import obj
from rekall.plugins import core
from rekall.plugins.linux import common
class LinuxSetProcessContext(core.SetProcessContextMixin,
common.LinProcessFilter):
"""A cc plugin for windows."""
class LinVtoP(core.VtoPMixin, common.LinProcessFilter):
"""Describe virtual to physical translation on Linux platforms."""
class LinuxHighestUserAddress(common.AbstractLinuxParameterHook):
"""The highest address for user mode/kernel mode division."""
name = "highest_usermode_address"
def calculate(self):
"""Returns TASK_SIZE_MAX."""
arch = self.session.profile.metadata("arch")
if arch == "I386" or arch == "ARM":
return self.session.GetParameter("linux_page_offset")
elif arch == "AMD64":
# #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
return (1 << 47) - 0x1000
else:
self.session.logging.warn("Set TASK_SIZE_MAX for arch %s", arch)
return 2**64
class LinImageFingerprint(common.AbstractLinuxParameterHook):
"""Fingerprint the current image.
This parameter tries to get something unique about the image quickly. The
idea is that two different images (even of the same system at different
points in time) will have very different fingerprints. The fingerprint is
used as a key to cache persistent information about the system.
Live systems can not have a stable fingerprint and so return a NoneObject()
here.
We return a list of tuples:
(physical_offset, expected_data)
The list uniquely identifies the image. If one were to read all physical
offsets and find the expected_data at these locations, then we have a very
high level of confidence that the image is unique and matches the
fingerprint.
"""
name = "image_fingerprint"
def calculate(self):
if not self.session.physical_address_space:
return None
if self.session.physical_address_space.volatile:
return obj.NoneObject("No fingerprint for volatile image.")
result = []
profile = self.session.profile
address_space = self.session.GetParameter("default_address_space")
banner = profile.get_constant_object("linux_banner", "String")
result.append((address_space.vtop(banner.obj_offset), banner.v()))
# Current system tick count.
jiffies = profile.get_constant_object("jiffies", "String",
dict(length=8, term=None))
result.append((address_space.vtop(jiffies.obj_offset), jiffies.v()))
# List of processes should also be pretty unique.
for task in self.session.plugins.pslist().filter_processes():
name = task.name.cast("String", length=30)
task_name_offset = address_space.vtop(name.obj_offset)
# Read the raw data for the task name. Usually the task name is
# encoded in utf8 but then we might not be able to compare it
# exactly - we really want bytes here.
result.append((task_name_offset, name.v()))
return dict(
hash=hashlib.sha1(str(result).encode("utf8")).hexdigest(),
tests=result) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/linux/misc.py | 0.7641 | 0.294973 | misc.py | pypi |
from builtins import str
__author__ = "Michael Cohen <scudette@gmail.com>"
from rekall import scan
from rekall.plugins.overlays import basic
from rekall.plugins.linux import common
class TimestampScanner(scan.BaseScanner):
"""Search for the realine timestamps.
These have a special signature which looks like "#" followed by the
time since the epoch - for example #1384457055.
"""
checks = [
# We use a quick string search first for this rather unique string.
('StringCheck', dict(needle=b"#")),
# Refine the search with a more precise regex.
('RegexCheck', dict(regex=b"\#\d{10}")),
]
class HeapTimestampScanner(common.HeapScannerMixIn, TimestampScanner):
pass
class LinHistoryScanner(scan.PointerScanner):
"""Scan for the realine history struct.
This looks for references to the timestamps discovered by the
TimestampScanner above.
"""
def scan(self, **kwargs):
for hit in super(LinHistoryScanner, self).scan(**kwargs):
timestamp_relative_offset = self.profile.get_obj_offset(
"_hist_entry", "timestamp")
hist_entry = self.profile._hist_entry(
offset=hit - timestamp_relative_offset,
vm=self.address_space)
yield hist_entry
class HeapHistoryScanner(common.HeapScannerMixIn, LinHistoryScanner):
"""Only scan for history in the heap."""
class BashProfile64(basic.ProfileLP64, basic.BasicClasses):
"""Profile to parse internal bash data structures."""
__abstract = True
# types come from bash's ./lib/readline/history.h
bash_vtype_64 = {
"_hist_entry": [24, {
"line": [0, ["Pointer", dict(target="String")]],
"timestamp": [8, ["Pointer", dict(target="String")]],
"data": [16, ["Pointer", dict(target="String")]],
}],
}
def __init__(self, **kwargs):
super(BashProfile64, self).__init__(**kwargs)
self.add_types(self.bash_vtype_64)
class BashProfile32(basic.Profile32Bits, basic.BasicClasses):
"""Profile to parse internal bash data structures."""
__abstract = True
# types come from bash's ./lib/readline/history.h
bash_vtype_32 = {
"_hist_entry": [0xC, {
"line": [0, ["Pointer", dict(target="String")]],
"timestamp": [4, ["Pointer", dict(target="String")]],
"data": [8, ["Pointer", dict(target="String")]],
}],
}
def __init__(self, **kwargs):
super(BashProfile32, self).__init__(**kwargs)
self.add_types(self.bash_vtype_32)
class BashHistory(common.LinProcessFilter):
"""Scan the bash process for history.
Based on original algorithm by Andrew Case.
"""
__name = "bash"
__args = [
dict(name="scan_entire_address_space", type="Boolean",
help="Scan the entire process address space, not only the heap."),
dict(name="proc_regex", default="^bash$", type="RegEx",
help="The processes we should examine."),
]
table_header = [
dict(name="divider", type="Divider"),
dict(name="task", hidden=True),
dict(name="timestamp", width=24),
dict(name="command"),
]
def __init__(self, *args, **kwargs):
super(BashHistory, self).__init__(*args, **kwargs)
if self.profile.metadata("arch") == "AMD64":
self.bash_profile = BashProfile64(session=self.session)
else:
self.bash_profile = BashProfile32(session=self.session)
def get_timestamps(self, scanner):
"""Scan process memory for things that look like a timestamp."""
results = {}
for hit in scanner.scan():
timestamp = int(scanner.address_space.read(hit+1, 10))
results[hit] = timestamp
return results
def collect(self):
for task in self.filter_processes():
process_as = task.get_process_address_space()
# Choose the correct scanner to use depending on the flags.
if self.plugin_args.scan_entire_address_space:
timestamp_scanner = TimestampScanner(
profile=self.profile, session=self.session,
address_space=process_as)
else:
timestamp_scanner = HeapTimestampScanner(
profile=self.profile, session=self.session,
address_space=process_as, task=task)
timestamps = self.get_timestamps(timestamp_scanner)
if not timestamps:
continue
yield dict(divider="Task: %s (%s)" % (task.name,
task.pid))
if self.plugin_args.scan_entire_address_space:
scanner = LinHistoryScanner(
profile=self.bash_profile, session=self.session,
address_space=process_as, pointers=timestamps)
else:
scanner = HeapHistoryScanner(
profile=self.bash_profile, session=self.session,
address_space=process_as, task=task,
pointers=timestamps)
hits = sorted(scanner.scan(), key=lambda x: x.timestamp.deref())
for hit in hits:
timestamp = self.profile.UnixTimeStamp(
value=int(str(hit.timestamp.deref())[1:]))
yield dict(
task=task,
timestamp=timestamp,
command=hit.line.deref()) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/linux/bash.py | 0.781539 | 0.370311 | bash.py | pypi |
from builtins import str
from builtins import range
from rekall.plugins.linux import common
class Lsmod(common.LinuxPlugin):
'''Gathers loaded kernel modules.'''
name = "lsmod"
table_header = [
dict(name="virtual", style="address"),
dict(name="start", style="address"),
dict(name="size", width=10),
dict(name="name", width=20)
]
def get_module_list(self):
modules = self.profile.get_constant_object(
"modules", target="list_head", vm=self.kernel_address_space)
# walk the modules list
for module in modules.list_of_type("module", "list"):
yield module
def collect(self):
for module in self.get_module_list():
yield (module.obj_offset,
module.base,
module.size,
module.name)
class LsmodSections(common.LinuxPlugin):
"""Display all the ELF sections of kernel modules."""
name = "lsmod_sections"
table_header = [
dict(name="name", width=20),
dict(name="section", width=30),
dict(name="address", style="address")
]
def get_module_sections(self, module):
num_sects = module.sect_attrs.nsections or 0
for i in range(num_sects):
section_attr = module.sect_attrs.attrs[i]
yield section_attr
def collect(self):
lsmod = self.session.plugins.lsmod()
for module in lsmod.get_module_list():
for section_attr in self.get_module_sections(module):
yield (module.name, section_attr.name.deref(),
section_attr.address)
class Lsmod_parameters(common.LinuxPlugin):
"""Display parameters for all kernel modules."""
name = "lsmod_parameters"
_arg_lookuptable = {
"linux!param_get_bool": ("bool", {}),
"linux!param_get_byte": ("char", {}),
"linux!param_get_charp": ("Pointer", dict(target="String")),
"linux!param_get_int": ("int", {}),
"linux!param_get_invbool": ("byte", {}),
"linux!param_get_long": ("long", {}),
"linux!param_get_short": ("short", {}),
"linux!param_get_uint": ("unsigned int", {}),
"linux!param_get_ulong": ("unsigned long", {}),
"linux!param_get_ushort": ("unsigned short", {}),
}
table_header = [
dict(name="name", width=20),
dict(name="key", width=40),
dict(name="value", width=20)
]
def __init__(self, *args, **kwargs):
super(Lsmod_parameters, self).__init__(*args, **kwargs)
self.arg_lookuptable = {}
resolver = self.session.address_resolver
for x, y in list(self._arg_lookuptable.items()):
try:
address = resolver.get_constant_object(
x, "Function").obj_offset
self.arg_lookuptable[address] = y
except ValueError:
pass
def get_module_parameters(self, module):
for kernel_param in module.m("kp"):
if kernel_param.getter_addr == None:
continue
getter_function = self.profile.Function(
offset=kernel_param.getter_addr,
vm=self.kernel_address_space)
value = None
lookup = self.arg_lookuptable.get(kernel_param.getter_addr)
if lookup:
type, args = lookup
# The arg type is a pointer to a basic type.
value = kernel_param.m("u1").arg.dereference_as(
target=type, target_args=args)
elif getter_function == self.profile.get_constant_object(
"param_get_string", target="Function",
vm=self.kernel_address_space):
value = kernel_param.m("u1").str.deref().v()
#It is an array of values.
elif getter_function == self.profile.get_constant_object(
"param_array_get", target="Function",
vm=self.kernel_address_space):
array = kernel_param.m("u1").arr
getter_function = self.profile.Function(
offset=array.getter_addr, vm=self.kernel_address_space)
# Is this a known getter function?
lookup = self.arg_lookuptable.get(getter_function)
if lookup and array.elemsize:
# Decode according to this function.
type, args = lookup
result = []
offset = array.elem.deref().obj_offset
number_of_elements = array.num.deref() or array.max
while len(result) < number_of_elements:
result.append(
self.profile.Object(type, offset=offset,
vm=self.kernel_address_space))
offset += array.elemsize
value = ",".join([str(x) for x in result])
else:
self.session.logging.debug("Unknown function getter %r",
getter_function)
value = self.session.address_resolver.format_address(
getter_function)
yield kernel_param.name.deref(), value
def collect(self):
lsmod = self.session.plugins.lsmod()
for module in lsmod.get_module_list():
for key, value in self.get_module_parameters(module):
yield (module.name, key, value)
class Moddump(common.LinuxPlugin):
'''Dumps loaded kernel modules.'''
__name = "moddump"
__args = [
dict(name="dump_dir", help="Dump directory.",
required=True),
dict(name="regexp", default=None, type="RegEx",
help="Regexp on the module name.")
]
def dump_module(self, module):
module_start = int(module.base)
return module.obj_vm.read(module_start, module.size)
def render(self, renderer):
lsmod_plugin = self.session.plugins.lsmod(session=self.session)
for module in lsmod_plugin.get_module_list():
if self.plugin_args.regexp:
if not module.name:
continue
if not self.plugin_args.regexp.search(module.name):
continue
file_name = "{0}.{1:#x}.lkm".format(module.name,
module.base)
with renderer.open(directory=self.plugin_args.dump_dir,
filename=file_name,
mode="wb") as mod_file:
mod_data = self.dump_module(module)
mod_file.write(mod_data)
renderer.format("Wrote {0} bytes to {1}\n",
module.size, file_name) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/linux/lsmod.py | 0.521959 | 0.241132 | lsmod.py | pypi |
from builtins import range
from rekall import testlib
from rekall.plugins.linux import common
class CheckIdt(common.LinuxPlugin):
""" Checks if the IDT has been altered """
__name = "check_idt"
table_header = [
dict(name="index", style="address"),
dict(name="address", style="address"),
dict(name="type", width=18, align="r"),
dict(name="present", width=7, align="r"),
dict(name="dpl", width=3, align="r"),
dict(name="symbol")
]
def CheckTable(self, table, check_indexes=None):
"""Given an IDT table yields information about all its entries.
Args:
table: An IDT table object (gate_struct64 or desc_struct).
check_indexes: A list of indexes to check. If not set we do 0:255.
Yields:
slot, address, function or module containing this function.
"""
if check_indexes is None:
check_indexes = list(range(256))
for i in check_indexes:
entry = table[i]
yield i, entry
def CheckIDTTables(self):
"""
This works by walking the IDT table for the entries that Linux uses
and verifies that each is a symbol in the kernel
"""
# arch/x86/include/asm/desc_defs.h
# hw handlers + system call
if self.profile.metadata('arch') == "I386":
idt_type = "desc_struct"
else:
idt_type = "gate_struct64"
# idt_table is defined in arch/x86/kernel/traps.c for 32-bit kernels
# and in arch/x86/kernel/head_64.S on 64-bit kernels.
# idt_table entries are set via the set_*_gate set of functions in
# arch/x86/include/asm/desc.h.
idt_table = self.profile.get_constant_object(
"idt_table",
target="Array",
target_args=dict(
target=idt_type,
count=256)
)
return self.CheckTable(idt_table)
def collect(self):
for (i, entry) in self.CheckIDTTables():
symbol = ", ".join(
self.session.address_resolver.format_address(
entry.address))
# Point out suspicious constants.
highlight = None if symbol.startswith("linux") else "important"
yield dict(index=i, address=entry.address, type=entry.gate_type,
present=entry.present, dpl=entry.dpl, symbol=symbol,
highlight=highlight)
class TestCheckIdt(testlib.SimpleTestCase):
PARAMETERS = dict(commandline="check_idt") | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/linux/check_idt.py | 0.699152 | 0.360827 | check_idt.py | pypi |
from builtins import range
from rekall.plugins.linux import common
class Banner(common.LinuxPlugin):
"""Prints the Linux banner information."""
__name = "banner"
table_header = [
dict(name="banner", width=80)
]
def collect(self):
banner = self.profile.get_constant_object(
"linux_banner", target="String", vm=self.kernel_address_space)
yield dict(banner=banner)
class CpuInfo(common.LinuxPlugin):
"""Prints information about each active processor."""
__name = "cpuinfo"
table_header = [
dict(name="CPU", width=4),
dict(name="vendor", width=20),
dict(name="model", width=80)
]
def online_cpus(self):
"""returns a list of online cpus (the processor numbers)"""
#later kernels.
cpus = (self.profile.get_constant("cpu_online_bits") or
self.profile.get_constant("cpu_present_map"))
if not cpus:
raise AttributeError("Unable to determine number of online CPUs "
"for memory capture")
bmap = self.profile.Object(
"unsigned long", offset=cpus, vm=self.kernel_address_space)
for i in range(0, bmap.obj_size):
if bmap & (1 << i):
yield i
def calculate(self):
cpus = list(self.online_cpus())
if len(cpus) > 1 and (self.profile.get_constant("cpu_info") or
self.profile.get_constant("per_cpu__cpu_info")):
return self.get_info_smp()
elif self.profile.get_constant("boot_cpu_data"):
return self.get_info_single()
else:
raise AttributeError("Unable to get CPU info for memory capture")
def get_info_single(self):
cpu = self.profile.cpuinfo_x86(
self.profile.get_constant("boot_cpu_data"),
vm=self.kernel_address_space)
yield 0, cpu
# pulls the per_cpu cpu info
# will break apart the per_cpu code if a future plugin needs it
def get_info_smp(self):
cpus = list(self.online_cpus())
# get the highest numbered cpu
max_cpu = cpus[-1]
per_offsets = self.profile.Array(
target='unsigned long', count=max_cpu,
offset=self.profile.get_constant("__per_cpu_offset"),
vm=self.kernel_address_space)
i = 0
for i in cpus:
offset = per_offsets[i]
cpuinfo_addr = (self.profile.get_constant("cpu_info") or
self.profile.get_constant("per_cpu__cpu_info"))
addr = cpuinfo_addr + offset.v()
var = self.profile.Object("cpuinfo_x86", offset=addr,
vm=self.kernel_address_space)
yield i, var
def collect(self):
for processor, cpu in self.calculate():
yield dict(CPU=processor, vendor=cpu.x86_vendor_id,
model=cpu.x86_model_id) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/linux/cpuinfo.py | 0.685423 | 0.247299 | cpuinfo.py | pypi |
from rekall import testlib
from rekall.plugins.linux import common
class Lsof(common.LinProcessFilter):
"""Lists open files."""
__name = "lsof"
table_header = [
dict(name="divider", type="Divider"),
dict(name="proc", hidden=True),
dict(name="file_struct", hidden=True),
dict(name="name", width=30),
dict(name="pid", width=6, align="r"),
dict(name="user", width=8),
dict(name="fd", width=4),
dict(name="size", width=12),
dict(name="offset", width=12),
dict(name="node", width=8),
dict(name="path"),
]
def get_open_files(self, task):
"""List all the files open by a task."""
# The user space file descriptor is simply the offset into the fd
# array.
for i, file_ptr in enumerate(task.files.fds):
file_struct = file_ptr.deref()
if file_struct:
yield file_struct, i
def lsof(self):
for task in self.filter_processes():
for file_struct, fd in self.get_open_files(task):
yield task, file_struct, fd
def collect(self):
for task in self.filter_processes():
yield dict(divider=task)
for file_struct, fd in self.get_open_files(task):
yield dict(proc=task,
name=task.comm,
pid=task.pid,
user=task.uid,
fd=fd,
file_struct=file_struct,
size=file_struct.m("f_path.dentry.d_inode.i_size"),
offset=file_struct.m("f_pos"),
node=file_struct.m("f_path.dentry.d_inode.i_ino"),
path=task.get_path(file_struct))
class TestLsof(testlib.SimpleTestCase):
@classmethod
def is_active(cls, session):
return Lsof.is_active(session)
PARAMETERS = dict(
commandline="lsof --proc_regex %(proc_name)s",
proc_name="bash"
) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/linux/lsof.py | 0.586049 | 0.2328 | lsof.py | pypi |
from rekall.plugins.overlays import basic
from rekall.plugins.linux import common
class CheckModules(common.LinuxPlugin):
"""Compares module list to sysfs info, if available.
Sysfs contains a kset objects for a number of kernel objects (kobjects). One
of the ksets is the "module_kset" which holds references to all loaded
kernel modules.
Each struct module object holds within it a kobj struct for reference
counting. This object is referenced both from the struct module and the
sysfs kset.
This plugin traverses the kset and resolves the kobj back to its containing
object (which is the struct module itself). We then compare the struct
module with the list of known modules (which is obtained by traversing the
module's list member). So if a module were to simply unlink itself from the
list, it would still be found by its reference from sysfs.
"""
__name = "check_modules"
table_header = [
dict(name="module", style="address"),
dict(name="mod_name", width=30),
dict(name="ref_count", width=10, align="c"),
dict(name="known"),
]
@classmethod
def is_active(cls, config):
if super(CheckModules, cls).is_active(config):
return config.profile.get_constant("module_kset", False)
def get_kset_modules(self):
module_kset = self.profile.get_constant_object(
"module_kset", target="kset", vm=self.kernel_address_space)
for kobj in module_kset.list.list_of_type("kobject", "entry"):
if kobj.name:
yield kobj
def collect(self):
lsmod = self.session.plugins.lsmod(session=self.session)
# We check the container module for membership so we do not get fulled
# by simple name clashes.
modules = set(lsmod.get_module_list())
for kobj in self.get_kset_modules():
ref_count = kobj.kref.refcount.counter
# Real modules have at least 3 references in sysfs.
if ref_count < 3:
continue
container_module = basic.container_of(kobj, "module", "mkobj")
yield dict(module=container_module, mod_name=container_module.name,
ref_count=ref_count, known=container_module in modules) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/linux/check_modules.py | 0.408631 | 0.372648 | check_modules.py | pypi |
from rekall import obj
from rekall.plugins.linux import common
arp_overlay = {
'neigh_table': [None, {
# From /include/linux/socket.h
'family': [None, ['Enumeration', dict(
choices={
0: "AF_UNSPEC",
1: "AF_UNIX",
2: "AF_INET",
10: "AF_INET6",
},
target="unsigned int",
)]]
}],
'neighbour': [None, {
"ha": [None, ["Array", dict(
target="byte",
count=lambda x: x.dev.addr_len)]],
}],
}
class ArpModification(obj.ProfileModification):
@classmethod
def modify(cls, profile):
profile.add_overlay(arp_overlay)
class Arp(common.LinuxPlugin):
"""print the ARP table."""
# This plugin seems broken now.
__name = "arp"
table_header = [
dict(name="ip", width=45, align="r"),
dict(name="mac", width=20, align="r"),
dict(name="dev", width=15, align="r")
]
def __init__(self, **kwargs):
super(Arp, self).__init__(**kwargs)
self.profile = ArpModification(self.profile)
def get_handle_tables(self):
# In earlier Linux neigh_table is a linked list.
if self.session.profile.neigh_table().m("next") != None:
tables = self.profile.get_constant_object(
"neigh_tables",
target="Pointer",
target_args=dict(
target="neigh_table"
)
)
for table in tables.walk_list("next"):
for x in self.handle_table(table):
yield x
# But since 3.19 it is an array of pointers to neigh_tables.
# http://lxr.free-electrons.com/source/net/core/neighbour.c?v=3.19#L1517
# static struct neigh_table *neigh_tables[NEIGH_NR_TABLES]
# NEIGH_NR_TABLES is in an enum and it is 3.
else:
tables = self.profile.get_constant_object(
"neigh_tables",
target="Array",
target_args=dict(
target="Pointer",
count=3,
target_args=dict(
target="neigh_table"
)
)
)
for table in tables:
for x in self.handle_table(table):
yield x
def handle_table(self, ntable):
# Support a few ways of finding these parameters depending on kernel
# versions.
hash_size = (ntable.m("hash_mask") or
ntable.nht.m("hash_mask") or
1 << ntable.nht.hash_shift)
hash_table = ntable.m("hash_buckets") or ntable.nht.hash_buckets
buckets = self.profile.Array(offset=hash_table,
vm=self.kernel_address_space,
target='Pointer', count=hash_size,
target_args=dict(target="neighbour"))
for neighbour in buckets:
if neighbour:
for x in self.walk_neighbour(neighbour.deref()):
yield x
def walk_neighbour(self, neighbour):
while 1:
# get the family from each neighbour in order to work with IPv4 and
# IPv6.
family = neighbour.tbl.family
if family == "AF_INET":
ip = neighbour.primary_key.cast("Ipv4Address")
elif family == "AF_INET6":
ip = neighbour.primary_key.cast("Ipv6Address")
else:
ip = '?'
mac = ":".join(["%.02x" % x for x in neighbour.ha])
devname = neighbour.dev.name
yield dict(ip=ip, mac=mac, dev=devname)
neighbour = neighbour.next.deref()
if not neighbour:
break
def collect(self):
for x in self.get_handle_tables():
yield x | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/linux/arp.py | 0.674908 | 0.200989 | arp.py | pypi |
"""This module implements filesystem-related plugins for Linux."""
from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
from rekall import testlib
from rekall.plugins import core
from rekall.plugins.linux import common
from rekall.plugins.overlays.linux import vfs
__author__ = "Jordi Sanchez <parki.san@gmail.com>"
def InodeToPermissionString(inode):
"""Represents an inode's permisions as an ls-like string."""
result = []
if inode.type == "S_IFSOCK":
result.append("s")
elif inode.type == "S_IFLNK":
result.append("l")
elif inode.type == "S_IFREG":
result.append("-")
elif inode.type == "S_IFBLK":
result.append("b")
elif inode.type == "S_IFDIR":
result.append("d")
elif inode.type == "S_IFCHR":
result.append("c")
elif inode.type == "S_IFIFO":
result.append("f")
else:
result.append(" ")
result.append("r" if inode.mode.S_IRUSR else "-")
result.append("w" if inode.mode.S_IWUSR else "-")
if inode.mode.S_ISUID:
result.append("s" if inode.mode.S_IXUSR else "S")
else:
result.append("x" if inode.mode.S_IXUSR else "-")
result.append("r" if inode.mode.S_IRGRP else "-")
result.append("w" if inode.mode.S_IWGRP else "-")
if inode.mode.S_ISGID:
result.append("s" if inode.mode.S_IXGRP else "S")
else:
result.append("x" if inode.mode.S_IXGRP else "-")
result.append("r" if inode.mode.S_IROTH else "-")
result.append("w" if inode.mode.S_IWOTH else "-")
result.append("x" if inode.mode.S_IXOTH else "-")
if inode.mode.S_ISVTX:
result.append("t" if inode.mode.S_IXOTH else "T")
return "".join(result)
class Mfind(common.LinuxPlugin):
"""Finds a file by name in memory."""
__name = "mfind"
__args = [
dict(name="path", default="/", positional=True,
help="Path to the file."),
dict(name="device",
help="Name of the device to match.")
]
table_header = [
dict(name="divider", type="Divider"),
dict(name="device", hidden=True),
dict(name="mount", hidden=True),
dict(name="perms", width=11),
dict(name="uid", width=10, align="r"),
dict(name="gid", width=10, align="r"),
dict(name="size", width=14, align="r"),
dict(name="mtime", width=24),
dict(name="atime", width=24),
dict(name="ctime", width=24),
dict(name="inode", width=10, align="r"),
dict(name="file", hidden=True),
dict(name="path"),
]
def find(self, path, device=None, mountpoint=None):
"""Yields a list of files matching the path on the given mountpoint.
If no mountpoint is specified, all mountpoints are searched.
This is akin to doing ls -ld, except that a list is returned because
several mount points may hold files which are candidates for such path.
"""
if not mountpoint:
mount_plugin = self.session.plugins.mount(session=self.session)
mountpoints = mount_plugin.get_mount_points()
else:
mountpoints = [mountpoint]
for mountpoint in mountpoints:
if device != None and mountpoint.device != device:
continue
if path and not path.startswith(str(mountpoint.name)):
continue
current_file = vfs.File(mountpoint=mountpoint,
dentry=mountpoint.sb.s_root,
is_root=True,
session=self.session)
if path == str(mountpoint.name):
# Return a file for the mountpoint root
yield current_file
else:
remaining_path = path[len(mountpoint.name):]
traversal_list = remaining_path.split("/")
i = 0
found = True
while i < len(traversal_list):
component_to_search = traversal_list[i]
if component_to_search == "." or not component_to_search:
i += 1
continue
found = False
for file_ in current_file.walk():
if file_.name == component_to_search:
found = True
current_file = file_
i += 1
if found:
yield current_file
def collect(self):
mount_plugin = self.session.plugins.mount(session=self.session)
mountpoints = mount_plugin.get_mount_points()
for mountpoint in mountpoints:
files = list(self.find(self.plugin_args.path,
device=self.plugin_args.device,
mountpoint=mountpoint))
if files:
divider = "Files on device %s mounted at %s.\n" % (
mountpoint.device, mountpoint.name)
yield dict(divider=divider)
for file_ in files:
yield self.collect_file(mountpoint, file_)
def collect_file(self, mountpoint, file_):
inode = file_.dentry.d_inode
fullpath = file_.fullpath
atime = self.session.profile.UnixTimeStamp(
value=inode.i_atime.tv_sec)
mtime = self.session.profile.UnixTimeStamp(
value=inode.i_mtime.tv_sec)
ctime = self.session.profile.UnixTimeStamp(
value=inode.i_ctime.tv_sec)
return dict(perms=InodeToPermissionString(inode),
uid=inode.i_uid, gid=inode.i_gid,
size=inode.i_size, mtime=mtime,
atime=atime, ctime=ctime,
inode=inode.i_ino,
path=fullpath,
device=mountpoint.device,
file=file_,
mount=mountpoint.name)
class Mls(Mfind):
"""Lists the files in a mounted filesystem."""
__name = "mls"
__args = [
dict(name="recursive", type="Boolean",
help="Recursive listing"),
dict(name="unallocated", type="Boolean",
help="Show files that have no inode information."),
]
def collect(self):
for file_info in super(Mls, self).collect():
entry = file_info.get("file")
if entry:
yield dict(
divider="Files on device %s mounted at %s.\n" % (
entry.mountpoint.device, entry.mountpoint.name))
if not entry.is_directory():
yield self.collect_file(entry.mountpoint, entry)
else:
for file_ in entry.walk(
recursive=self.plugin_args.recursive,
unallocated=self.plugin_args.unallocated):
yield self.collect_file(entry.mountpoint, file_)
class Mcat(core.DirectoryDumperMixin, Mfind):
"""Returns the contents available in memory for a given file.
Ranges of the file that are not present in memory are returned blank.
"""
__name = "mcat"
table_header = [
dict(name="start", width=12),
dict(name="end", width=12),
dict(name="path", width=80),
dict(name="dump_name", width=80),
]
def collect(self):
renderer = self.session.GetRenderer()
for file_info in super(Mcat, self).collect():
file_obj = file_info.get("file")
if not file_obj:
continue
page_size = self.session.kernel_address_space.PAGE_SIZE
buffer_size = 1024*1024
buffer = b""
# Write buffered output as a sparse file.
path = file_info["path"]
with renderer.open(
filename=path,
directory=self.plugin_args.dump_dir,
mode="wb") as fd:
for range_start, range_end in file_obj.extents:
yield dict(start=range_start, end=range_end,
path=path, dump_name=fd.name)
fd.seek(range_start)
for offset in range(range_start, range_end, page_size):
page_index = old_div(offset, page_size)
to_write = min(page_size, file_obj.size - offset)
data = file_obj.GetPage(page_index)
if data != None:
buffer += data[:to_write]
else:
buffer += b"\x00" * to_write
# Dump the buffer when it's full.
if len(buffer) >= buffer_size:
fd.write(buffer)
buffer = b""
# Dump the remaining data in the buffer.
if buffer != b"":
fd.write(buffer)
buffer = b""
class TestMfind(testlib.HashChecker):
PARAMETERS = dict(
commandline="mfind %(file)s"
)
class TestMls(testlib.HashChecker):
PARAMETERS = dict(
commandline="mls %(file)s"
)
class TestMcat(testlib.HashChecker):
PARAMETERS = dict(
commandline="mcat %(file)s --dump_dir %(tempdir)s"
) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/linux/fs.py | 0.561335 | 0.172764 | fs.py | pypi |
from rekall import testlib
from rekall_lib import utils
from rekall.plugins import core
from rekall.plugins.addrspaces import intel
from rekall.plugins.common import pfn
from rekall.plugins.linux import common
class ProcMaps(common.LinProcessFilter):
"""Gathers process maps for linux."""
__name = "maps"
table_header = [
dict(name="divider", type="Divider"),
dict(name="task", hidden=True),
dict(name="start", style="address"),
dict(name="end", style="address"),
dict(name="flags", width=6),
dict(name="pgoff", style="address"),
dict(name="major", width=6),
dict(name="minor", width=6),
dict(name="inode", width=13),
dict(name="file_path"),
]
def collect(self):
for task in self.filter_processes():
if not task.mm:
continue
yield dict(divider="Proc %s (%s)" % (task.name, task.pid))
for vma in task.mm.mmap.walk_list("vm_next"):
if vma.vm_file:
inode = vma.vm_file.dentry.d_inode
major, minor = inode.i_sb.major, inode.i_sb.minor
ino = inode.i_ino
pgoff = vma.vm_pgoff << 12
fname = task.get_path(vma.vm_file)
else:
(major, minor, ino, pgoff) = [0] * 4
if (vma.vm_start <= task.mm.start_brk and
vma.vm_end >= task.mm.brk):
fname = "[heap]"
elif (vma.vm_start <= task.mm.start_stack and
vma.vm_end >= task.mm.start_stack):
fname = "[stack]"
else:
fname = ""
yield dict(task=task,
start=vma.vm_start,
end=vma.vm_end,
flags=vma.vm_flags,
pgoff=pgoff,
major=major,
minor=minor,
inode=ino,
file_path=fname)
class TestProcMaps(testlib.SimpleTestCase):
PARAMETERS = dict(
commandline="maps --proc_regex %(proc_name)s",
proc_name="bash"
)
class LinVadDump(core.DirectoryDumperMixin, common.LinProcessFilter):
"""Dump the VMA memory for a process."""
__name = "vaddump"
def render(self, renderer):
for task in self.filter_processes():
if not task.mm:
continue
renderer.format("Pid: {0:6}\n", task.pid)
# Get the task and all process specific information
task_space = task.get_process_address_space()
name = task.comm
for vma in task.mm.mmap.walk_list("vm_next"):
if not vma.vm_file:
continue
filename = "{0}.{1}.{2:08x}-{3:08x}.dmp".format(
name, task.pid, vma.vm_start, vma.vm_end)
renderer.format(u"Writing {0}, pid {1} to {2}\n",
task.comm, task.pid, filename)
with renderer.open(directory=self.dump_dir,
filename=filename,
mode='wb') as fd:
self.CopyToFile(task_space, vma.vm_start, vma.vm_end, fd)
class TestLinVadDump(testlib.HashChecker):
mode = "mode_linux_memory"
PARAMETERS = dict(
commandline="vaddump --proc_regex %(proc_name)s --dump_dir %(tempdir)s",
proc_name="bash"
)
class LinuxVADMap(pfn.VADMapMixin, common.LinProcessFilter):
"""Inspect each page in the VAD and report its status.
This allows us to see the address translation status of each page in the
VAD.
"""
def _CreateMetadata(self, collection):
metadata = {}
for descriptor_cls, args, kwargs in reversed(collection.descriptors):
if issubclass(descriptor_cls, intel.PhysicalAddressDescriptor):
metadata["offset"] = kwargs["address"]
metadata.setdefault("type", "Valid")
elif issubclass(descriptor_cls, intel.InvalidAddress):
metadata["type"] = "Invalid"
return metadata
def GeneratePageMetatadata(self, task):
address_space = self.session.GetParameter("default_address_space")
for vma in task.mm.mmap.walk_list("vm_next"):
start = vma.vm_start
end = vma.vm_end
# Skip the entire region.
if end < self.plugin_args.start:
continue
# Done.
if start > self.plugin_args.end:
break
for vaddr in utils.xrange(start, end, 0x1000):
if self.plugin_args.start <= vaddr <= self.plugin_args.end:
yield vaddr, self._CreateMetadata(
address_space.describe_vtop(vaddr)) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/linux/proc_maps.py | 0.41052 | 0.168549 | proc_maps.py | pypi |
"""The plugins in this module are mainly used to visually test renderers."""
from __future__ import division
from builtins import range
from past.utils import old_div
__author__ = "Adam Sindelar <adamsh@google.com>"
import itertools
from rekall import plugin
from rekall import algo
from rekall_lib import utils
from rekall.plugins.renderers import visual_aides
class RekallBovineExperience3000(plugin.Command):
"""Renders Bessy the cow and some beer.
This is a text renderer stress-test. It uses multiple features at the
same time:
- Multiple coloring rules per line (this was a doozy).
- Two columns with colors next to each other.
- Text with its own newlines isn't rewrapped.
- It still wraps if it overflows the cell.
- Bovine readiness and international spirit.
"""
__name = "moo"
def render(self, renderer):
renderer.table_header([
dict(name="Dogma", width=35, style="full"),
dict(name="Bessy", width=65, type="bool", style="cow"),
dict(name="Pilsner", width=50, style="full"),
dict(name="Nowrap", width=10, nowrap=True)])
fixtures = self.session.LoadProfile("tests/fixtures")
beer = fixtures.data["ascii_art"]["beer"]
phys_map = fixtures.data["fixtures"]["phys_map"]
renderer.table_row(
("This is a renderer stress-test. The flags should have correct"
" colors, the beer should be yellow and the cell on the left"
" should not bleed into the cell on the right.\n"
"This is a really "
"long column of text with its own newlines in it!\n"
"This bovine experience has been brought to you by Rekall."),
True,
utils.AttributedString("\n".join(beer["ascii"]),
beer["highlights"]),
("This is a fairly long line that shouldn't get wrapped.\n"
"The same row has another line that shouldn't get wrapped."))
renderer.section("Heatmap test:")
cells = []
for digit in itertools.islice(algo.EulersDecimals(), 0xff):
cells.append(dict(heat=float(digit + 1) * .1, value=digit))
randomized = visual_aides.Heatmap(
caption="Offset (p)",
# Some of the below xs stand for eXtreme. The other ones just
# look cool.
column_headers=["%0.2x" % x for x in range(0, 0xff, 0x10)],
row_headers=["0x%0.6x" % x for x
in range(0x0, 0xfffff, 0x10000)],
cells=cells,
greyscale=False)
gradual = visual_aides.Heatmap(
caption="Offset (v)",
column_headers=["%0.2x" % x for x in range(0, 0xff, 0x10)],
row_headers=["0x%0.6x" % x for x
in range(0x0, 0xfffff, 0x10000)],
cells=[dict(value="%x" % x, heat=old_div(x, 255.0)) for x in range(256)],
greyscale=False)
ranges_legend = visual_aides.MapLegend(phys_map["ranges_legend"])
ranges = visual_aides.RunBasedMap(
caption="Offset (p)",
legend=ranges_legend,
runs=phys_map["runs"])
renderer.table_header([dict(name="Random Heatmap", style="full",
width=60, align="c"),
dict(name="Gradual Heatmap", style="full",
width=60, align="c"),
dict(name="Legend", style="full",
orientation="horizontal")])
renderer.table_row(randomized, gradual, visual_aides.HeatmapLegend())
renderer.table_header([dict(name="Greyscale Random", style="full",
width=60, align="c"),
dict(name="Memory Ranges", style="full",
width=80, align="c"),
dict(name="Ranges Legend", style="full",
width=30, orientation="vertical")])
randomized.greyscale = True
renderer.table_row(randomized, ranges, ranges_legend) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/common/bovine.py | 0.844313 | 0.37691 | bovine.py | pypi |
from builtins import object
__author__ = "Michael Cohen <scudette@gmail.com>"
from rekall import plugin
from rekall.ui import text
from rekall.plugins import core
from rekall_lib import utils
class MemmapMixIn(object):
"""A Mixin to create the memmap plugins for all the operating systems."""
@classmethod
def args(cls, parser):
"""Declare the command line args we need."""
super(MemmapMixIn, cls).args(parser)
parser.add_argument(
"--coalesce", default=False, type="Boolean",
help="Merge contiguous pages into larger ranges.")
parser.add_argument(
"--all", default=False, type="Boolean",
help="Use the entire range of address space.")
def __init__(self, *pos_args, **kwargs):
"""Calculates the memory regions mapped by a process or the kernel.
If no process filtering directives are provided, enumerates the kernel
address space.
"""
self.coalesce = kwargs.pop("coalesce", False)
self.all = kwargs.pop("all", False)
super(MemmapMixIn, self).__init__(*pos_args, **kwargs)
def _render_map(self, task_space, renderer, highest_address):
renderer.format(u"Dumping address space at DTB {0:#x}\n\n",
task_space.dtb)
renderer.table_header([("Virtual", "offset_v", "[addrpad]"),
("Physical", "offset_p", "[addrpad]"),
("Size", "process_size", "[addr]")])
if self.coalesce:
ranges = task_space.merge_base_ranges()
else:
ranges = task_space.get_mappings()
for run in ranges:
# When dumping out processes do not dump the kernel.
if not self.all and run.start > highest_address:
break
renderer.table_row(run.start, run.file_offset, run.length)
def render(self, renderer):
if not self.filtering_requested:
# Dump the entire kernel address space.
return self._render_map(self.kernel_address_space, renderer, 2**64)
max_memory = self.session.GetParameter("highest_usermode_address")
for task in self.filter_processes():
renderer.section()
renderer.RenderProgress("Dumping pid {0}".format(task.pid))
task_space = task.get_process_address_space()
renderer.format(u"Process: '{0}' pid: {1:6}\n\n",
task.name, task.pid)
if not task_space:
renderer.write("Unable to read pages for task.\n")
continue
self._render_map(task_space, renderer, max_memory)
class MemDumpMixin(core.DirectoryDumperMixin, MemmapMixIn):
"""Dump the addressable memory for a process.
Note that because the addressable memory is sparse we do not maintain
alignment in the output file. Instead, we also write an index file which
describes all the sparse runs in the dump - but the dump file has all the
data concatenated.
"""
name = "memdump"
def dump_process(self, eprocess, fd, index_fd):
task_as = eprocess.get_process_address_space()
temp_renderer = text.TextRenderer(session=self.session,
fd=index_fd)
with temp_renderer.start():
temp_renderer.table_header([
("File Address", "file_addr", "[addrpad]"),
("Length", "length", "[addrpad]"),
("Virtual Addr", "virtual", "[addrpad]")])
# Only dump the userspace portion of addressable memory.
max_memory = self.session.GetParameter("highest_usermode_address")
blocksize = 1024 * 1024
for run in task_as.get_address_ranges(end=max_memory):
for offset in utils.xrange(run.start, run.end, blocksize):
to_read = min(blocksize, run.end - offset)
if to_read == 0:
break
data = task_as.read(offset, to_read)
file_offset = fd.tell()
fd.write(data)
# Write the index file.
temp_renderer.table_row(file_offset, to_read, offset)
def render(self, renderer):
if self.dump_dir is None:
raise plugin.PluginError("Dump directory not specified.")
for task in self.filter_processes():
renderer.section()
filename = u"{0}_{1:d}.dmp".format(task.name, task.pid)
renderer.format(u"Writing {0} to {1}\n",
task, filename)
with renderer.open(directory=self.dump_dir,
filename=filename,
mode='wb') as fd:
with renderer.open(directory=self.dump_dir,
filename=filename + ".idx",
mode='wt') as index_fd:
self.dump_process(task, fd, index_fd) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/common/memmap.py | 0.639398 | 0.244848 | memmap.py | pypi |
from builtins import object
__author__ = "Michael Cohen <scudette@google.com>"
"""A Common mixin for implementing plugins based on scanning."""
from rekall import addrspace
class BaseScannerPlugin(object):
"""A mixin that implements scanner memory region selectors.
Most scanners are very similar - they search for specific byte patterns over
some sections of memory, validate those and present the results. Depending
on the type of structures searched for, different regions of memory need to
be looked at.
This mixin attempts to present a common interface to all scanning plugins,
where users may select different regions using common selector options, and
those will be generated automatically.
The plugin may select a set of default regions to scan, which are most
relevant to the specific data searched for, but the user may override the
defaults at all times.
NOTE: This plugin must be mixed with the specific OS's ProcessFilter
implementation in order to bring in standard process selectors.
"""
__args = [
dict(name="scan_physical", default=False, type="Boolean",
help="Scan the physical address space only."),
dict(name="scan_kernel", default=False, type="Boolean",
help="Scan the entire kernel address space."),
# Process Scanning options.
dict(name="scan_process_memory", default=False, type="Boolean",
help="Scan all of process memory. Uses process selectors to "
"narrow down selections."),
]
scanner_defaults = {}
def scan_specification_requested(self):
"""Return True if the user requested any specific regions."""
for k, v in list(self.plugin_args.items()):
if k.startswith("scan_") and v:
return True
return False
def generate_memory_ranges(self):
"""Parse the plugin args and generate memory ranges.
Yields rekall.addrspace.Run objects.
"""
if not self.scan_specification_requested():
# Copy the plugin defaults into the args.
for k in self.plugin_args:
if k.startswith("scan_"):
self.plugin_args[k] = self.scanner_defaults.get(k, False)
# Physical address space requested.
if self.plugin_args.scan_physical:
yield addrspace.Run(
start=0, end=self.session.physical_address_space.end(),
address_space=self.session.physical_address_space,
data=dict(type="PhysicalAS"))
# Scan all of the kernel address space.
if self.plugin_args.scan_kernel:
yield addrspace.Run(
start=0, end=self.session.kernel_address_space.end(),
address_space=self.session.kernel_address_space,
data=dict(type="KernelAS"))
# Scan the complete process memory, not including the kernel.
if self.plugin_args.scan_process_memory:
# We use direct inheritance here so we can support process
# selectors.
for task in self.filter_processes():
cc = self.session.plugins.cc()
with cc:
# Switch to the process address space.
cc.SwitchProcessContext(task)
end = self.session.GetParameter("highest_usermode_address")
resolver = self.session.address_resolver
for module in sorted(resolver.GetAllModules(),
key=lambda x: x.start):
# Skip modules in kernel space.
if module.start > end:
break
comment = "%s (%s), %s" % (
task.name, task.pid, module.name)
self.session.logging.info(
"Scanning %s (%s) in: %s [%#x-%#x]",
task.name, task.pid, comment,
module.start, module.end)
yield addrspace.Run(
start=module.start, end=module.end,
address_space=self.session.default_address_space,
data=dict(type=comment, module=module, task=task)) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/common/scanners.py | 0.872211 | 0.39257 | scanners.py | pypi |
from __future__ import division
from builtins import str
from past.builtins import basestring
from past.utils import old_div
__author__ = (
"Michael Cohen <scudette@google.com>",
"Adam Sindelar <adamsh@google.com>",
"Jordi Sanchez <nop@google.com>"
)
import binascii
import hashlib
from rekall import obj
from rekall_lib import utils
import six
class IndexProfileLoader(obj.ProfileSectionLoader):
name = "$INDEX"
def LoadIntoProfile(self, session, profile, index):
profile.LoadIndex(index)
return profile
class Index(obj.Profile):
"""A profile which contains an index to locate other profiles."""
index = None
base_offset = 0
PERFECT_MATCH = 1.0
GOOD_MATCH = 0.75
def LoadIndex(self, index):
self.index = index
def copy(self):
result = super(Index, self).copy()
result.index = self.index.copy()
return result
def _TestSymbols(self, address_space, offset, possible_values):
"""Match any of the possible_values at offset.
Return True if there is a match.
"""
for value in possible_values:
value = binascii.unhexlify(value)
data = address_space.read(offset, len(value))
if value == data:
return data
def _TestProfile(self, address_space, image_base, profile, symbols,
minimal_match=1):
"""Match _all_ the symbols against this data."""
count_matched = 0
count_unmatched = 0
for offset, possible_values in symbols:
# The possible_values can be a single string which means there is
# only one option. If it is a list, then any of the symbols may
# match at this offset to be considered a match.
if isinstance(possible_values, basestring):
possible_values = [possible_values]
# If the offset is not mapped in we can not compare it. Skip it.
offset_to_check = image_base + offset
if address_space.vtop(offset_to_check) == None:
continue
match = self._TestSymbols(
address_space=address_space,
offset=offset_to_check,
possible_values=possible_values)
if match:
self.session.report_progress(
"%s matched offset %#x+%#x=%#x (%s)",
profile, offset, image_base, offset+image_base,
utils.encode_string (match))
count_matched += 1
else:
# FIXME: We get here if the comparison point does not match -
# does it make sense to allow some points to not match? Should
# we consider these a failure to match?
count_unmatched += 1
# Require at least this many comparison points to be matched.
if count_matched < minimal_match:
return 0
if count_matched > 0:
self.session.report_progress(
"%s matches %d/%d comparison points",
profile, count_matched, count_matched + count_unmatched)
return float(count_matched) / (count_matched + count_unmatched)
return 0
def IndexHits(self, image_base, address_space=None, minimal_match=1):
if address_space == None:
address_space = self.session.GetParameter("default_address_space")
for profile, symbols in six.iteritems(self.index):
match = self._TestProfile(
address_space=address_space,
image_base=image_base,
profile=profile,
minimal_match=minimal_match,
symbols=symbols)
yield match, profile
def LookupIndex(self, image_base, address_space=None, minimal_match=1):
partial_matches = []
for match, profile in self.IndexHits(image_base, address_space,
minimal_match=minimal_match):
if match == self.PERFECT_MATCH:
# Yield perfect matches right away.
yield (profile, self.PERFECT_MATCH)
elif match > 0:
# Imperfect matches will be saved and returned in order of
# accuracy.
partial_matches.append((match, profile))
partial_matches.sort(reverse=True)
for match, profile in partial_matches:
yield (profile, match)
class SymbolOffsetIndex(Index):
"""A specialized index that works on symbols-offsets."""
def __init__(self, *args, **kwargs):
super(SymbolOffsetIndex, self).__init__(*args, **kwargs)
if not self.index:
self.index = {}
@utils.safe_property
def hashes(self):
return self.index.get("$HASHES", {})
@utils.safe_property
def traits(self):
return self.index.get("$TRAITS", {})
@utils.safe_property
def profiles(self):
return self.index.get("$PROFILES", {})
@utils.safe_property
def duplicates(self):
return [p for p in self.index.get("$PROFILES") if p not in self.hashes]
def LookupProfile(self, symbols):
"""Returns which profiles in the index match a dict of symbols.
Returns:
A list of tuples of (profile, num_matched_traits).
"""
profiles = []
try:
relative_symbols = self.RelativizeSymbols(symbols.copy())
except ValueError as e:
self.session.logging.debug(str(e))
return []
for profile, traits in six.iteritems(self.traits):
matched_traits = 0
for trait in traits:
# A trait is a list of symbol-offset tuples.
match = all([relative_symbols.get(symbol) == offset
for (symbol, offset) in trait
if isinstance(symbol, basestring)])
if match:
matched_traits += 1
if matched_traits > 0:
profiles.append((profile, matched_traits))
return profiles
def LookupHash(self, profile_hash):
"""Returns the profile with hash profile_hash."""
return self.hashes.get(profile_hash)
@classmethod
def FilterSymbols(cls, symbols):
"""Filters a dict of symbols, discarding irrelevant ones."""
return symbols
@classmethod
def CalculateRawProfileHash(cls, profile):
"""Calculates a hash of a list of symbols."""
# Skip superfluous symbols.
symbols = profile["$CONSTANTS"]
ordered_symbol_list = sorted(
["(%s, %d)" % (k, v)
for (k, v) in six.iteritems(cls.FilterSymbols(symbols))])
hasher = hashlib.sha256()
hasher.update("|".join(ordered_symbol_list))
return hasher.hexdigest()
@classmethod
def CalculateRawSymbolsHash(cls, profile):
"""Calculates a hash of a list of symbols."""
# Skip superfluous symbols.
symbols = profile["$CONSTANTS"]
ordered_symbol_list = sorted(symbols.keys())
hasher = hashlib.sha256()
hasher.update("|".join(ordered_symbol_list))
return hasher.hexdigest()
def ProfileMetadata(self, profile_name):
return self.profiles.get(profile_name)
@classmethod
def ProfileMatchesTrait(cls, profile, trait):
"""Whether a profile matches another profile's trait.
A trait is a list of tuples (symbol, offset) that uniquely identify
a profile.
"""
return all([profile.get_constant(t[0]) == t[1] for t in trait])
@classmethod
def RawProfileMatchesTrait(cls, profile, trait):
"""Whether a raw profile (JSON) matches another profile's trait.
A trait is a list of tuples (symbol, offset) that uniquely identify
a profile.
"""
try:
return all([profile.get(t[0]) == t[1] for t in trait])
except:
return False
@classmethod
def BuildIndex(cls, hashes=None, traits=None, duplicates=None, spec=None,
iomanager=None):
"""Builds a SymbolOffset index from traits, profiles, hashes and a spec.
Args:
hashes: A dictionary of hash:profile_id. Hashes must be obtained via
the SymbolOffsetIndex.CalculateRawProfileHash() method.
traits: A dictionary of profile_id:traits. Traits are the result
of calling the SymbolOffsetIndex.FindTraits() method.
profiles: A dictionary of profile_id metadata. Profile metadata
is obtained via SymbolOffsetIndex.GetProfileMetadata().
duplicates: A list of newly found profile ids that are duplicate.
"""
spec = spec or {}
metadata = dict(Type="Index",
ProfileClass=spec.get("implementation", cls.__name__),
BaseSymbol=spec.get("base_symbol"))
hashes = hashes or {}
traits = traits or {}
# Assert all profiles that have hashes have traits as well
if not all([profile in hashes.values() for profile in traits]):
raise ValueError("Not all profiles with traits have hashes")
# Assert all profiles that have traits have hashes as well
if not all([profile in traits for profile in hashes.values()]):
raise ValueError("Not all profiles with hashes have traits")
profiles = dict([(profile_id,
cls.GetProfileMetadata(
iomanager=iomanager, profile_id=profile_id))
for profile_id in traits])
duplicates = duplicates or []
for duplicate_profile in duplicates:
profiles[duplicate_profile] = cls.GetProfileMetadata(
iomanager=iomanager, profile_id=duplicate_profile)
index = {
"$METADATA": metadata,
"$INDEX": {
"$TRAITS": traits or {},
"$PROFILES": profiles or {},
"$HASHES": hashes or {},
}
}
return index
@classmethod
def GetProfileMetadata(cls, iomanager=None, profile_id=None):
profile_metadata = dict()
file_mtime = iomanager.Metadata(profile_id)["LastModified"]
profile_metadata["LastModified"] = file_mtime
return profile_metadata
def __len__(self):
return len(self.traits)
def __iter__(self):
"""Yields tuples of profile_id, traits.
Each trait is a list of tuples of (symbol, offset) that make this
profile unique within the repository.
"""
for profile, traits in six.iteritems(self.index.get("$TRAITS")):
yield profile, traits
def RelativizeSymbols(self, symbols, base_symbol=None):
"""Modifies a dict of symbols so its offsets relative to base_symbol.
If no base_symbol is provided and the index itself doesn't define one
then returns the symbols as is.
Args:
symbols: A dictionary of symbol:value
base_symbol: The name of the symbol to base others' values on.
"""
if not base_symbol:
base_symbol = self.metadata("BaseSymbol")
if not base_symbol:
return symbols
base_value = symbols.get(base_symbol)
if not base_value:
raise ValueError("Symbol %s not found in profile", base_symbol)
new_symbols = symbols.copy()
for symbol, value in six.iteritems(new_symbols):
new_symbols[symbol] = value - base_value
return new_symbols
class LinuxSymbolOffsetIndex(SymbolOffsetIndex):
"""Specialized symbol-offset index for linux."""
@classmethod
def FilterSymbols(cls, symbols):
"""Filters a dict of symbols, discarding irrelevant ones."""
return dict([(k, v) for (k, v) in six.iteritems(symbols)
if not "." in k and k != "__irf_end"])
@classmethod
def BuildIndex(cls, hashes=None, traits=None, duplicates=None, spec=None,
iomanager=None):
index = super(LinuxSymbolOffsetIndex, cls).BuildIndex(
hashes=hashes, traits=traits, spec=spec, duplicates=duplicates,
iomanager=iomanager)
# By default, we'll calculate KASLR from linux_proc_banner which is
# present on all kernels.
spec = spec or {}
index["$METADATA"]["BaseSymbol"] = spec.get("base_symbol",
"linux_proc_banner")
return index | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/common/profile_index.py | 0.796015 | 0.352146 | profile_index.py | pypi |
# pylint: disable=protected-access
from builtins import object
import bisect
from rekall import testlib
from rekall.ui import json_renderer
class Pas2VasResolver(object):
"""An object which resolves physical addresses to virtual addresses."""
def __init__(self, session):
self.session = session
self.dirty = True
# Maintains some maps to ensure fast lookups.
self.dtb2task = {}
self.dtb2maps = {}
self.dtb2userspace = {}
# Add the kernel.
self.dtb2task[self.session.GetParameter("dtb")] = "Kernel"
pslist = self.session.plugins.pslist()
for task in pslist.filter_processes():
task_dtb = task.dtb
if task_dtb != None:
self.dtb2task[task_dtb] = task.obj_offset
def _get_highest_user_address(self):
return 2**64-1
def GetTaskStruct(self, address):
"""Returns the task struct for the address.
Should be overridden by OS specific implementations.
"""
return address
def PA2VA_for_DTB(self, physical_address, dtb, userspace=None):
if dtb == None:
return None, None
# Choose the userspace mode automatically.
if userspace is None:
userspace = dtb != self.session.kernel_address_space.dtb
# Build a map for this dtb.
lookup_map = self.dtb2maps.get(dtb)
# If we want the full resolution and the previous cached version was for
# userspace only, discard this lookup map and rebuild it.
if not userspace and self.dtb2userspace.get(dtb):
lookup_map = None
if lookup_map is None:
lookup_map = self.dtb2maps[dtb] = self.build_address_map(
dtb, userspace=userspace)
self.dtb2userspace[dtb] = userspace
if lookup_map:
if physical_address > lookup_map[0][0]:
# This efficiently finds the entry in the map just below the
# physical_address.
lookup_pa, length, lookup_va = lookup_map[
bisect.bisect(
lookup_map, (physical_address, 2**64, 0, 0))-1]
if (lookup_pa <= physical_address and
lookup_pa + length > physical_address):
# Yield the pid and the virtual offset
task = self.dtb2task.get(dtb)
if task is not None:
task = self.GetTaskStruct(task)
else:
task = "Kernel"
return lookup_va + physical_address - lookup_pa, task
return None, None
def build_address_map(self, dtb, userspace=True):
"""Given the virtual_address_space, build the address map."""
# This lookup map is sorted by the physical address. We then use
# bisect to efficiently look up the physical page.
tmp_lookup_map = []
self.dirty = True
if dtb != None:
address_space = self.session.kernel_address_space.__class__(
base=self.session.physical_address_space,
session=self.session,
dtb=dtb)
highest_virtual_address = self.session.GetParameter(
"highest_usermode_address")
for run in address_space.get_mappings():
# Only consider userspace addresses for processes.
if userspace and run.start > highest_virtual_address:
break
tmp_lookup_map.append((run.file_offset, run.length, run.start))
self.session.report_progress(
"Enumerating memory for dtb %#x (%#x)", dtb, run.start)
# Now sort the map and return it.
tmp_lookup_map.sort()
return tmp_lookup_map
class Pas2VasMixin(object):
"""Resolves a physical address to a virtual addrress in a process."""
name = "pas2vas"
__args = [
dict(name="offsets", type="ArrayIntParser",
help="A list of physical offsets to resolve."),
]
def get_virtual_address(self, physical_address, tasks=None):
resolver = self.session.GetParameter("physical_address_resolver")
if tasks is None:
tasks = list(self.filter_processes())
# First try the kernel.
virtual_address, _ = resolver.PA2VA_for_DTB(
physical_address, dtb=self.session.kernel_address_space.dtb,
userspace=False)
if virtual_address:
yield virtual_address, "Kernel"
# Find which process owns it.
for task in tasks:
virtual_offset, task = resolver.PA2VA_for_DTB(
physical_address, task.dtb, userspace=True)
if virtual_offset is not None:
yield virtual_offset, task
def render(self, renderer):
renderer.table_header([('Physical', 'virtual_offset', '[addrpad]'),
('Virtual', 'physical_offset', '[addrpad]'),
('Pid', 'pid', '>6'),
('Name', 'name', '')])
tasks = list(self.filter_processes())
for physical_address in self.plugin_args.offsets:
for virtual_address, task in self.get_virtual_address(
physical_address, tasks):
if task is 'Kernel':
renderer.table_row(physical_address, virtual_address,
0, 'Kernel')
else:
renderer.table_row(
physical_address, virtual_address,
task.pid, task.name)
class Pas2VasResolverJsonObjectRenderer(json_renderer.StateBasedObjectRenderer):
"""Encode and decode the pas2vas maps efficiently."""
renders_type = "Pas2VasResolver"
def EncodeToJsonSafe(self, item, **_):
result = {}
result["dtb2task"] = item.dtb2task
result["dtb2maps"] = item.dtb2maps
result["dtb2userspace"] = item.dtb2userspace
result["mro"] = ":".join(self.get_mro(item))
return result
def DecodeFromJsonSafe(self, value, _):
# Get the original class to instantiate the required item.
cls = self.GetImplementationFromMRO(Pas2VasResolver, value)
result = cls(session=self.session)
for attr in ["dtb2maps", "dtb2userspace", "dtb2task"]:
if attr in value:
setattr(result, attr, value[attr])
result.dirty = False
return result
class TestPas2Vas(testlib.SimpleTestCase):
PARAMETERS = dict(
commandline="pas2vas --offsets %(offset)s - %(pids)s ",
pid=0,
) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/common/pas2kas.py | 0.78037 | 0.303267 | pas2kas.py | pypi |
from builtins import object
class PhysicalAddressContext(object):
"""A lazy evaluator for context information around physical addresses."""
def __init__(self, session, address):
self.session = session
self.address = address
def summary(self):
rammap_plugin = self.session.plugins.rammap(
start=self.address, end=self.address+1)
for row in rammap_plugin.collect():
return row
def __str__(self):
rammap_plugin = self.session.plugins.rammap(
start=self.address, end=self.address+1)
if rammap_plugin != None:
return rammap_plugin.summary()[0]
return u"Phys: %#x" % self.address
class VADMapMixin(object):
"""A plugin to display information about virtual address pages."""
name = "vadmap"
__args = [
dict(name="start", default=0, type="IntParser",
help="Start reading from this page."),
dict(name="end", default=2**63, type="IntParser",
help="Stop reading at this offset."),
]
table_header = [
dict(name='_EPROCESS', type="_EPROCESS", hidden=True),
dict(name="Divider", type="Divider"),
dict(name="VAddr", style="address"),
dict(name="PAddr", style="address", hidden=True),
dict(name="length", style="address"),
dict(name="type", width=20),
dict(name="comment"),
]
def FormatMetadata(self, type, metadata, offset=None):
result = ""
if not metadata:
result = "Invalid PTE "
if "filename" in metadata:
result += "%s " % metadata["filename"]
if "number" in metadata:
result = "PF %s " % metadata["number"]
if type == "Valid" or type == "Transition":
result += "PhysAS "
if offset:
result += "@ %#x " % offset
if "ProtoType" in metadata:
result += "(P) "
return result
def GeneratePageMetatadata(self, task):
"""A Generator of vaddr, metadata for each page."""
_ = task
return []
def collect(self):
for task in self.filter_processes():
yield dict(_EPROCESS=task,
Divider="Pid: {0} {1}\n".format(task.pid, task.name))
with self.session.plugins.cc() as cc:
cc.SwitchProcessContext(task)
old_offset = 0
old_vaddr = 0
length = 0x1000
old_metadata = {}
for vaddr, metadata in self.GeneratePageMetatadata(task):
# Remove the offset so we can merge on identical
# metadata (offset will change for each page).
offset = metadata.pop("offset", None)
# Coalesce similar rows.
if ((offset is None or old_offset is None or
self.plugin_args.verbosity < 5 or
offset == old_offset + length) and
metadata == old_metadata and
vaddr == old_vaddr + length):
length += 0x1000
continue
type = old_metadata.get("type", None)
if type:
comment = self.FormatMetadata(type, old_metadata,
vaddr)
yield dict(VAddr=vaddr, PAddr=offset, length=length,
type=type, comment=comment)
old_metadata = metadata
old_vaddr = vaddr
old_offset = offset
length = 0x1000
if old_metadata:
comment = self.FormatMetadata(type, old_metadata, vaddr)
yield dict(VAddr=vaddr, PAddr=offset, length=length,
type=type, comment=comment) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/common/pfn.py | 0.875188 | 0.277336 | pfn.py | pypi |
"""The module implements the base class for address resolution."""
from __future__ import division
from builtins import str
from past.builtins import basestring
from past.utils import old_div
from builtins import object
__author__ = "Michael Cohen <scudette@gmail.com>"
import re
from rekall import config
from rekall import obj
from rekall_lib import utils
config.DeclareOption(
"--name_resolution_strategies", default=["Module", "Symbol", "Export"],
group="Interface", type="ChoiceArray",
choices=["Module", "Symbol", "Export"])
class Module(object):
"""A range in the virtual address space which maps an executable.
Each binary in the address space has its own profile, which knows about
symbols within it. This simple class is just a container to facilitate
access to the profile that represents this module.
Within Rekall, each module has a name. Rekall uses a simple syntax to refer
to an address in the address space by name (see below).
"""
def __init__(self, name=None, start=None, end=None, profile=None,
session=None):
self.name = name
self.start = int(start)
self.end = int(end)
self.profile = profile
self.session = session
def __str__(self):
return u"%s: %s" % (self.__class__.__name__, self.name)
class AddressResolverMixin(object):
"""The basic building block for constructing an address resolver plugin.
An address resolver maintains a collection of Modules and abstracts access
to specific symbol names within the modules.
Rekall uses a symbolic notation to refer to specific addresses within the
address space. The address resolver is responsible for parsing this notation
and resolving it to an actual address.
Rules of symbol syntax
======================
The address space is divided into "modules". A module has a name, a start
address and an end address. Modules can also contain a profile which knows
about symbols related to that module.
1. Module reference: The start address of a module can be refered to by its
name. e.g: "nt", "ntdll", "tcpip".
2. If a module contains a valid profile, the profile may also know about
symbols within the module. We can refer to these
symbols. e.g. "nt!MmGetIoSessionState"
3. If an exact symbol is not found, it can be referred to with an offset
from another symbol name. e.g. "nt!MmGetIoSessionState+5FE" (Note
integers are given in hex).
4. If the symbol is preceeded with a "*" - it means that the symbol is a
pointer. The address will be read as a pointer and the symbol name will
resolve to the address of the pointer's target.
"""
__args = [
dict(name="symbol", type="ArrayString", default=[],
help="List of symbols to lookup"),
]
table_header = [
dict(name="Symbol", width=20),
dict(name="Offset", width=20, style="address"),
]
# The name of the plugin.
name = "address_resolver"
# The format of a symbol name. Used by get_address_by_name().
ADDRESS_NAME_REGEX = re.compile(
r"(?P<deref>[*])?" # Pointer dereference.
r"((?P<address>0x[0-9A-Fa-f]+)|" # Alternative - Either an address, or,
r"(?P<module>[A-Za-z_0-9\.\\]+)" # Module name - can include extension
# (.exe, .sys)
r"!?" # ! separates module name from symbol
# name.
r"(?P<symbol>[^ +-]+)?" # Symbol name.
r")" # End alternative.
r"(?P<op> *[+-] *)?" # Possible arithmetic operator.
r"(?P<offset>[0-9a-fA-Fx]+)?") # Possible hex offset.
def __init__(self, **kwargs):
super(AddressResolverMixin, self).__init__(**kwargs)
self.reset()
def reset(self):
# A ranged collection of Module() objects.
self._address_ranges = utils.RangedCollection()
# A lookup between module names and the Module object itself.
self._modules_by_name = {}
self._initialized = False
def NormalizeModuleName(self, module_name):
if module_name is not None:
module_name = utils.SmartUnicode(module_name)
module_name = re.split(r"[/\\]", module_name)[-1]
return module_name.lower()
def _EnsureInitialized(self):
"""Initialize this address resolver."""
def AddModule(self, module):
self._address_ranges.insert(module.start, module.end, module)
if module.name:
self._modules_by_name[module.name] = module
def _ParseAddress(self, name):
"""Parses the symbol from Rekall symbolic notation.
Raises:
TypeError if the expression has a syntax error.
Returns:
a dict containing the different components of the expression.
"""
m = self.ADDRESS_NAME_REGEX.match(name)
if m:
capture = m.groupdict()
if not capture.get("address"):
module = capture.get("module")
if not module:
raise TypeError("Module name not specified.")
capture["module"] = self.NormalizeModuleName(module)
if capture["op"] and not (capture["symbol"] or
capture["address"] or
capture["module"]):
raise TypeError("Operator %s must have an operand." %
capture["op"])
if capture["op"] and not (capture["symbol"] or capture["address"] or
capture["module"]):
raise TypeError(
"Operator %s must operate on a symbol or address." %
capture["op"])
return capture
raise TypeError("Unable to parse %r as a symbol name" % name)
def modules(self):
self._EnsureInitialized()
for _, _, module in self._address_ranges:
yield module
def GetContainingModule(self, address):
"""Finds the module containing the specified address.
Returns:
A Module() instance.
"""
self._EnsureInitialized()
address = obj.Pointer.integer_to_address(address)
_, _, module = self._address_ranges.get_containing_range(address)
return module
def GetModuleByName(self, name):
self._EnsureInitialized()
return self._modules_by_name.get(self.NormalizeModuleName(name))
def GetAllModules(self):
self._EnsureInitialized()
return list(self._modules_by_name.values())
def get_constant_object(self, name, target=None, **kwargs):
"""Instantiate the named constant with these args.
This method is the main entry point for instantiating constants. It is
preferred than calling the profile's method of the same name directly
since it will be responsible with loading the right profile.
"""
self._EnsureInitialized()
# Parse the name
components = self._ParseAddress(name)
if not components["symbol"]:
raise ValueError("No symbol name specified.")
module = self._modules_by_name.get(components["module"])
if module is not None:
# Just delegate to the module's profile.
if module.profile:
return module.profile.get_constant_object(
components["symbol"], target=target, **kwargs)
return obj.NoneObject("Profile for name %s unknown." % name, log=True)
def get_address_by_name(self, name):
"""Convert the symbol annotated by name to an address."""
self._EnsureInitialized()
try:
return int(name)
except (ValueError, TypeError):
pass
if not isinstance(name, basestring):
raise TypeError("Name should be a string.")
module = None
components = self._ParseAddress(name)
module_name = self.NormalizeModuleName(components["module"])
address = components["address"]
if address is not None:
address = int(address, 0)
# User did not specify an address
else:
module = self._modules_by_name.get(module_name)
if not module:
return obj.NoneObject(
"No module %s found" % module_name, log=True)
# Found the module we use its base address
address = module.start
# Search for a symbol in the module.
symbol = components["symbol"]
if symbol:
# Get the profile for this module.
if module.profile:
address = module.profile.get_constant(symbol, is_address=True)
else:
return obj.NoneObject("No profile found for module", log=True)
# Support basic offset operations (+/-).
op = components["op"]
if op:
op = op.strip()
# Parse the offset as hex or decimal.
offset = int(components["offset"], 0)
if op == "+":
address += offset
elif op == "-":
address -= offset
else:
raise TypeError("Operator '%s' not supported" % op)
# If the symbol was a dereference, we need to read the address from
# this offset.
if components.get("deref"):
try:
address = module.profile.Pointer(address).v()
except AttributeError:
address = self.session.profile.Pointer(address).v()
return address
def format_address(self, address, max_distance=0x1000000):
"""Format the address as a symbol name.
This means to try and find the containing module, the symbol within the
module or possibly an offset from a known symbol. e.g.
nt!PspCidTable
nt!PspCidTable + 0x10
nt + 0x234
Returns a list of symbol names for the address. The list is empty if the
address is not in a containing module if the nearest known symbol is
farther than max_distance away.
"""
self._EnsureInitialized()
_, symbols = self.get_nearest_constant_by_address(
address, max_distance=max_distance)
return sorted(symbols)
def get_nearest_constant_by_address(self, address, max_distance=0x1000000):
"""Searches for a known symbol at an address lower than this.
Returns a tuple (nearest_offset, list of symbol names).
"""
self._EnsureInitialized()
address = obj.Pointer.integer_to_address(address)
symbols = []
module = self.GetContainingModule(address)
if not module or not module.name:
return (-1, [])
if module.profile != None:
offset, symbols = module.profile.get_nearest_constant_by_address(
address)
# Symbols not found at all, use module name.
if not symbols:
if address - module.start > max_distance:
return (-1, [])
if address == module.start:
return (module.start, [module.name])
return (module.start, [
"%s+%#x" % (module.name, address - module.start)])
if address - offset > max_distance:
return (-1, [])
# Exact symbols found.
if offset == address:
return (offset, ["%s!%s" % (module.name, x) for x in symbols])
# Approximate symbol found, check if the profile knows its type.
for x in symbols:
if x in module.profile.constant_types:
type_name = self._format_type(module, x, address)
if type_name is not None:
return (offset, ["%s!%s" % (module.name, type_name)])
return (offset, ["%s!%s+%#x" % (module.name, x, address - offset)
for x in symbols])
def _format_type(self, module, symbol, offset):
"""Use the type information to format the address within the struct."""
result = symbol
member_obj = module.profile.get_constant_object(symbol)
while offset > member_obj.obj_offset:
if isinstance(member_obj, obj.Struct):
members = [
getattr(member_obj, x, None) for x in member_obj.members]
member_collection = utils.SortedCollection(
(x.obj_offset, x) for x in members)
member_offset, member_below = (
member_collection.get_value_smaller_than(offset))
# No member below this offset?
if member_offset is None:
result += "+%s" % (offset - member_obj.obj_offset)
break
result += ".%s" % member_below.obj_name
member_obj = member_below
elif isinstance(member_obj, obj.Array):
# Next lowest index is a whole number of items.
item = member_obj[0]
next_lowest_index = (
offset - member_obj.obj_offset) // item.obj_size
result += "[%s]" % next_lowest_index
member_obj = member_obj[next_lowest_index]
else:
result += "+%s" % (offset - member_obj.obj_offset)
break
return result
def search_symbol(self, pattern):
"""Searches symbols for the pattern.
pattern may contain wild cards (*). Note that currently a module name is
required. Example pattern:
nt!Ps*
"""
self._EnsureInitialized()
result = []
components = self._ParseAddress(pattern)
module_name = self.NormalizeModuleName(components["module"])
if module_name == None:
raise RuntimeError(
"Module name must be specified for symbol search.")
module = self._modules_by_name.get(module_name)
if module:
# Match all symbols.
symbol_regex = re.compile(components["symbol"].replace("*", ".*"))
if module.profile:
for constant in module.profile.constants:
if symbol_regex.match(constant):
result.append("%s!%s" % (module_name, constant))
return result
def collect(self):
for symbol in self.plugin_args.symbol:
yield symbol, self.get_address_by_name(symbol) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/common/address_resolver.py | 0.903469 | 0.318075 | address_resolver.py | pypi |
import six
from rekall import obj
from rekall import plugin
from rekall import testlib
class TestFindPlugins(testlib.SimpleTestCase):
PLUGIN = "which_plugin"
PARAMETERS = dict(
commandline="which_plugin %(struct)s",
struct="proc"
)
class TestCollect(testlib.SimpleTestCase):
PLUGIN = "collect"
PARAMETERS = dict(
commandline="collect %(struct)s",
struct="proc"
)
class FindPlugins(plugin.TypedProfileCommand, plugin.ProfileCommand):
"""Find which plugin(s) are available to produce the desired output."""
name = "which_plugin"
type_name = None
producers_only = False
__args = [
dict(name="type_name", required=True, positional=True,
help="The name of the type we're looking for. "
"E.g.: 'proc' will find psxview, pslist, etc."),
dict(name="producers_only", required=False, type="Boolean",
help="Only include producers: plugins that output "
"only this struct and have no side effects.")
]
def collect(self):
if self.plugin_args.producers_only:
pertinent_cls = plugin.Producer
else:
pertinent_cls = plugin.TypedProfileCommand
for plugin_cls in six.itervalues(plugin.Command.classes):
if not plugin_cls.is_active(self.session):
continue
if not issubclass(plugin_cls, pertinent_cls):
continue
table_header = plugin_cls.table_header
if table_header:
if isinstance(table_header, list):
table_header = plugin.PluginHeader(*table_header)
try:
for t in table_header.types_in_output:
if (isinstance(t, type) and
self.plugin_args.type_name == t.__name__):
yield plugin_cls(session=self.session)
elif self.plugin_args.type_name == t:
yield plugin_cls(session=self.session)
except plugin.Error:
# We were unable to instantiate this plugin to figure out
# what it wants to emit. We did our best so move on.
continue
def render(self, renderer):
renderer.table_header([
dict(name="plugin", type="Plugin", style="compact", width=30)
])
for command in self.collect():
renderer.table_row(command)
class Collect(plugin.TypedProfileCommand, plugin.ProfileCommand):
"""Collect instances of struct of type 'type_name'.
This plugin will find all other plugins that produce 'type_name' and merge
all their output. For example, running collect 'proc' will give you a
rudimentary psxview.
This plugin is mostly used by other plugins, like netstat and psxview.
"""
name = "collect"
type_name = None
__args = [
dict(name="type_name", required=True, positional=True,
help="The type (struct) to collect.")
]
@classmethod
def GetPrototype(cls, session):
"""Instantiate with suitable default arguments."""
return cls(None, session=session)
def collect(self):
which = self.session.plugins.which_plugin(
type_name=self.plugin_args.type_name,
producers_only=True)
results = {}
for producer in which.collect():
# We know the producer plugin implements 'produce' because
# 'which_plugin' guarantees it.
self.session.logging.debug("Producing %s from producer %r",
self.type_name, producer)
for result in producer.produce():
previous = results.get(result.indices)
if previous:
previous.obj_producers.add(producer.name)
else:
result.obj_producers = set([producer.name])
results[result.indices] = result
return six.itervalues(results)
def render(self, renderer):
renderer.table_header([
dict(name=self.plugin_args.type_name,
type=self.plugin_args.type_name),
dict(name="producers")
])
for result in self.collect():
renderer.table_row(result, tuple(sorted(result.obj_producers))) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/common/efilter_plugins/collection.py | 0.560253 | 0.188641 | collection.py | pypi |
"""Informational plugins for assistance of efilter operations."""
from efilter.protocols import structured
from rekall import plugin
from rekall import obj
from rekall import session
from rekall import testlib
class Describe(plugin.TypedProfileCommand, plugin.ProfileCommand):
"""Describe the output of a plugin."""
name = "describe"
PROFILE_REQUIRED = False
__args = [
dict(name="plugin_name", required=True, positional=True,
help="A plugin or plugin name to describe."),
dict(name="args", required=False, default={}, type="dict",
positional=True,
help="args to run the plugin with."),
dict(name="max_depth", positional=True, required=False,
type="IntParser", default=3,
help="The maximum depth to follow mappings."),
]
table_header = [
dict(name="Field", type="TreeNode", max_depth=5, width=50),
dict(name="Type"),
]
def collect_members(self, item, depth):
if depth > self.plugin_args.max_depth:
return
try:
for member in sorted(structured.getmembers(item)):
type_instance = structured.resolve(item, member)
# If it was given as a type, we need an instance here.
yield dict(
Field=member,
Type=self._determine_type_name(type_instance),
depth=depth,
)
if isinstance(type_instance, obj.Pointer):
type_instance = type_instance.dereference()
for x in self.collect_members(type_instance, depth + 1):
yield x
except (TypeError, NotImplementedError):
pass
def _determine_type_name(self, column_type_instance):
if isinstance(column_type_instance, type):
column_type_instance = column_type_instance()
object_type = None
try:
object_type = column_type_instance.obj_type
except AttributeError:
pass
if object_type is None:
object_type = type(column_type_instance).__name__
return object_type
def _get_exemplar_row(self, instance):
if self.plugin_args.args:
for row in instance.collect():
# Skip divider rows because they are mostly empty.
if isinstance(row, dict) and "divider" in row:
continue
return row
return instance.column_types()
def collect(self):
plugin_name = self.plugin_args.plugin_name
if isinstance(plugin_name, session.PluginRunner):
plugin_name = self.plugin_args.plugin_name.plugin_name
plugin_cls = self.session.plugins.GetPluginClass(plugin_name)
if not plugin_cls:
raise plugin.PluginError("Please specify a valid plugin.")
plugin_args = self.plugin_args.args.copy()
plugin_args["ignore_required"] = True
instance = plugin_cls(session=self.session, **plugin_args)
table_header = getattr(instance, "table_header", None)
if not table_header:
raise plugin.PluginError(
"Plugin %s is not a Typed Plugin. It can not be used in "
"searches." % plugin_name)
column_types = self._get_exemplar_row(instance)
for i, column in enumerate(table_header):
column_name = column["name"]
if isinstance(column_types, dict):
column_type_instance = column_types.get(column_name)
else:
column_type_instance = column_types[i]
yield dict(
Field=column_name,
Type=self._determine_type_name(column_type_instance),
)
for x in self.collect_members(column_type_instance, 1):
yield x
class TestDescribe(testlib.SimpleTestCase):
PARAMETERS = dict(commandline="describe pslist") | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/common/efilter_plugins/info.py | 0.755457 | 0.214928 | info.py | pypi |
from efilter.protocols import applicative
from efilter.protocols import associative
from efilter.protocols import eq
from efilter.protocols import number
from efilter.protocols import ordered
from efilter.protocols import repeated
from efilter.protocols import string
from efilter.protocols import structured
from rekall import obj
from rekall.plugins.overlays import basic
from rekall_lib import registry
from rekall_lib import utils
import arrow
import parsedatetime
import pytz
import time
def resolve_Pointer(ptr, member):
"""Delegate to target of the pointer, if any."""
target_obj = ptr.deref()
if not target_obj:
ptr.session.logging.warn(
"Attempting to access member %r of a void pointer %r.", member, ptr)
if target_obj:
return structured.resolve(target_obj, member)
# Pointer.member is implemented as Pointer.dereference().member.
structured.IStructured.implement(
for_type=obj.Pointer,
implementations={
structured.resolve: resolve_Pointer
}
)
# AttributeDict is like a dict, except it does not raise when accessed
# via an attribute - it just returns None. Plugins can return an
# AttributeDict when they may return arbitrary columns and then
# Efilter can simply reference these columns via the "." operator. If
# the field does not exist, the column will simply have None there.
structured.IStructured.implement(
for_type=utils.AttributeDict,
implementations={
structured.resolve: lambda d, m: d.get(m),
structured.getmembers_runtime: lambda d: list(d),
}
)
# SlottedObject is similar in functionality to AttributeDict but it is much
# faster and so it is preferred.
structured.IStructured.implement(
for_type=utils.SlottedObject,
implementations={
structured.resolve: lambda s, m: getattr(s, m, None),
structured.getmembers_runtime: lambda d: d.__slots__,
}
)
# This lets us recurse into a NoneObject without raising errors.
structured.IStructured.implement(
for_type=obj.NoneObject,
implementations={
structured.resolve: lambda x, y: x,
}
)
# This lets us do flags.member.
structured.IStructured.implement(
for_type=basic.Flags,
implementations={
structured.resolve: getattr,
structured.getmembers_runtime: lambda x: list(x.maskmap),
}
)
# This lets us get indices out of Arrays.
associative.IAssociative.implement(
for_type=obj.Array,
implementations={
associative.select: lambda obj, key: obj[key],
}
)
# This lets us do some_array.some_member. Useful for accessing properties.
structured.IStructured.implement(
for_type=obj.Array,
implementations={
structured.resolve: getattr
}
)
# Pointers are only repeated if the thing they are pointing to is.
repeated.isrepeating.implement(
for_type=obj.Pointer,
implementation=lambda x: repeated.isrepeating(x.deref()))
repeated.IRepeated.implement(
for_type=obj.Array,
implementations={
repeated.getvalues: lambda x: iter(x)
}
)
string.IString.implement(
for_type=basic.String,
implementations={
string.string: lambda x: utils.SmartUnicode(x)
}
)
# Number operations on a pointer manipulate the pointer's value.
number.INumber.implement(
for_types=(obj.Pointer, obj.NumericProxyMixIn),
implementations={
number.sum: lambda x, y: int(x) + y,
number.product: lambda x, y: int(x) * y,
number.difference: lambda x, y: int(x) - y,
number.quotient: lambda x, y: int(x) / y
}
)
def _robust_lt(x, y):
try:
return x < y
except Exception as e:
# No valid comparison between the two items, return False
return False
# Rekall objects with NumericProxyMixIn are orderable.
ordered.IOrdered.implement(
for_types=(obj.NumericProxyMixIn, ),
implementations={
ordered.lt: _robust_lt,
}
)
def _string_lt(x, y):
if string.isstring(y):
return string.string(x) < string.string(y)
return False
# We can compare a string like object with another string like object.
ordered.IOrdered.implement(
for_types=(obj.StringProxyMixIn,),
implementations={
ordered.lt: _string_lt,
}
)
# Handle UnixTimeStamp comparisons. The timestamp formats we accept
# can be seen in https://bear.im/code/parsedatetime/docs/index.html
@registry.memoize
def _parse_datetime(string, timezone):
res, code = parsedatetime.Calendar().parseDT(
string, sourceTime=time.localtime(),
tzinfo=pytz.timezone(timezone),
)
if code == 0:
raise ValueError("Unable to parse %s as a timestamp" % string)
return arrow.Arrow.fromdatetime(res)
def _timestamp_lt(unix_timestamp, y):
if string.isstring(y):
timestamp = _parse_datetime(
y, unix_timestamp.obj_session.GetParameter("timezone", "UTC"))
if timestamp != None:
return unix_timestamp.as_arrow() < timestamp
return False
def _timestamp_eq(unix_timestamp, y):
if string.isstring(y):
timestamp = _parse_datetime(
y, unix_timestamp.obj_session.GetParameter("timezone", "UTC"))
if timestamp != None:
return unix_timestamp.as_arrow() == timestamp
return False
# Special handling for timestamps.
ordered.IOrdered.implement(
for_types=(basic.UnixTimeStamp,),
implementations={
ordered.lt: _timestamp_lt,
}
)
eq.IEq.implement(
for_types=(basic.UnixTimeStamp,),
implementations={
eq.eq: _timestamp_eq,
}
) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/common/efilter_plugins/protocols.py | 0.687735 | 0.246885 | protocols.py | pypi |
from builtins import str
from builtins import object
import functools
import re
import six
from efilter import query as q
from efilter import api
from efilter.protocols import applicative
from efilter.protocols import repeated
from efilter.protocols import structured
from efilter.transforms import solve
from rekall_lib import utils
# Exported EFilter functions. These can be used within efilter
# queries. For example select hex(cmd_address) from dis(0xfa8000895a32).
def hex_function(value):
"""A Function to format the output as a hex string."""
if value == None:
return
if repeated.isrepeating(value):
return [hex_function(x) for x in value]
return "%#x" % int(value)
def str_function(value):
if value == None:
return
return utils.SmartUnicode(value)
def int_function(value):
if value == None:
return
return int(value)
def noncase_search_function(regex, value):
"""Case insensitive regex search function."""
return bool(re.search(utils.SmartUnicode(regex),
utils.SmartUnicode(value), re.I))
def substitute(pattern, repl, target):
if target is None:
return
if isinstance(target, (list, tuple)):
result = []
for item in target:
result.append(substitute(pattern, repl, item))
return result
else:
return re.sub(pattern, repl, six.text_type(target), re.I)
EFILTER_SCOPES = dict(
hex=api.user_func(
hex_function, arg_types=[int], return_type=[str]),
str=api.user_func(
str_function, arg_types=[], return_type=[str]),
int=api.user_func(
int_function, arg_types=[], return_type=[int]),
regex_search=api.user_func(
noncase_search_function, arg_types=[str, str],
return_type=[bool]),
concat=api.user_func(lambda *args: "".join(args)),
sub=api.user_func(substitute),
)
class GeneratorRunner(object):
def __init__(self, cb):
self.cb = cb
def apply(self, args, kwargs):
return repeated.lazy(functools.partial(self.cb, *args, **kwargs))
# Implement IApplicative for Command.
applicative.IApplicative.implement(
for_type=GeneratorRunner,
implementations={
applicative.apply:
lambda x, *args, **kwargs: x.apply(*args, **kwargs),
}
)
class EfilterRunner(object):
"""An easy to use class for using Efilter.
The Efilter interface is fairly complex but most people just want to filter
a range of callables. This class is a helper class to help with using
Efilter.
All one needs to do is to extend this class and implement any functions
which should exist in the EFilter namespace. For example, to add a foo()
function:
class NewRunner(search.EfilterRunner):
def run_foo(self):
for x in range(10):
yield dict(A=x, B=2*x)
for x in NewRunner().filter("select * from foo()"):
print x
{'A': 0, 'B': 0}
{'A': 1, 'B': 2}
{'A': 2, 'B': 4}
{'A': 3, 'B': 6}
{'A': 4, 'B': 8}
"""
def resolve(self, name):
function = EFILTER_SCOPES.get(name)
if function:
return function
method = getattr(self, "run_" + name, None)
if method:
return GeneratorRunner(method)
raise KeyError("No plugin named %r." % name)
def getmembers_runtime(self):
return [c["name"] for c in self.columns]
def filter(self, query, **query_args):
query = q.Query(query, params=query_args)
return repeated.getvalues(solve.solve(query, self).value)
structured.IStructured.implicit_dynamic(EfilterRunner)
class ListFilter(EfilterRunner):
"""A helper to filter a list of dicts using efilter."""
_list = None
def run_list(self):
return self._list
def filter(self, filter_exr, data, **query_args):
"""Filter the data using the filter expression.
Args:
filter_exr: essentially the where clause.
data: A list of dicts, each dict representing a row.
"""
if not filter_exr:
return data
self._list = data
query = "select * from list()"
if filter_exr:
query += "where " + filter_exr
return super(ListFilter, self).filter(query, **query_args) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/common/efilter_plugins/helpers.py | 0.711832 | 0.305516 | helpers.py | pypi |
# This file is produced when the main "version.py update" command is run. That
# command copies this file to all sub-packages which contain
# setup.py. Configuration is maintain in version.yaml at the project's top
# level.
import json
import os
import subprocess
try:
# We are looking for the git repo which contains this file.
MY_DIR = os.path.dirname(os.path.abspath(__file__))
except:
MY_DIR = None
def is_tree_dirty():
try:
return bool(subprocess.check_output(
["git", "diff", "--name-only"], stderr=subprocess.PIPE,
cwd=MY_DIR,
).splitlines())
except (OSError, subprocess.CalledProcessError):
return False
def get_version_file_path(version_file="version.yaml"):
try:
return os.path.join(subprocess.check_output(
["git", "rev-parse", "--show-toplevel"], stderr=subprocess.PIPE,
cwd=MY_DIR,
).decode("utf-8").strip(), version_file)
except (OSError, subprocess.CalledProcessError):
return None
def number_of_commit_since(version_file="version.yaml"):
"""Returns the number of commits since version.yaml was changed."""
try:
last_commit_to_touch_version_file = subprocess.check_output(
["git", "log", "--no-merges", "-n", "1", "--pretty=format:%H",
version_file], cwd=MY_DIR, stderr=subprocess.PIPE,
).strip()
all_commits = subprocess.check_output(
["git", "log", "--no-merges", "-n", "1000", "--pretty=format:%H"],
stderr=subprocess.PIPE, cwd=MY_DIR,
).splitlines()
return all_commits.index(last_commit_to_touch_version_file)
except (OSError, subprocess.CalledProcessError, ValueError):
return None
def get_current_git_hash():
try:
return subprocess.check_output(
["git", "log", "--no-merges", "-n", "1", "--pretty=format:%H"],
stderr=subprocess.PIPE, cwd=MY_DIR,
).strip()
except (OSError, subprocess.CalledProcessError):
return None
def tag_version_data(version_data, version_path="version.yaml"):
current_hash = get_current_git_hash()
# Not in a git repository.
if current_hash is None:
version_data["error"] = "Not in a git repository."
else:
version_data["revisionid"] = current_hash
version_data["dirty"] = is_tree_dirty()
version_data["dev"] = number_of_commit_since(
get_version_file_path(version_path))
# Format the version according to pep440:
pep440 = version_data["version"]
if int(version_data.get("post", 0)) > 0:
pep440 += ".post" + version_data["post"]
elif int(version_data.get("rc", 0)) > 0:
pep440 += ".rc" + version_data["rc"]
if version_data.get("dev", 0):
# A Development release comes _before_ the main release.
last = version_data["version"].rsplit(".", 1)
version_data["version"] = "%s.%s" % (last[0], int(last[1]) + 1)
pep440 = version_data["version"] + ".dev" + str(version_data["dev"])
version_data["pep440"] = pep440
return version_data
def get_versions():
return tag_version_data(raw_versions(), """version.yaml""")
def raw_versions():
return json.loads("""
{
"codename": "EFilter",
"post": "0",
"rc": "0",
"version": "1.6.0"
}
""") | /rekall_efilter-1.6.0-py3-none-any.whl/efilter/_version.py | 0.501465 | 0.166506 | _version.py | pypi |
from builtins import object
__author__ = "Adam Sindelar <adamsh@google.com>"
from efilter import protocol
from efilter.protocols import repeated
from efilter.protocols import structured
class Scope(dict):
def __init__(self, other):
super(dict, self).__init__()
if not protocol.implements(other, structured.IStructured):
raise TypeError("Can only set scope from IStructured.")
# Copy the scope locally.
for key in structured.getmembers_runtime(other):
self[key] = structured.resolve(other, key)
def scope_reflect_runtime_member(scope, name):
try:
member = scope[name]
# For repeated values we take the type of the first
# element. This is not great :-(. If Rekall returns a None for
# the first element then this will fail.
if repeated.isrepeating(member):
for x in member:
return type(x)
return member
except KeyError:
return protocol.AnyType()
# Lets us pretend that dicts are objects, which makes it easy for users to
# declare variables.
structured.IStructured.implement(
for_type=Scope,
implementations={
structured.resolve: lambda d, m: d[m],
structured.getmembers_runtime: lambda d: list(d.keys()),
structured.reflect_runtime_member: scope_reflect_runtime_member,
}
)
class ScopeStack(object):
"""Stack of IStructured scopes from global to local.
Arguments:
scopes: A flat list of scopes from local (idx -1) to global (idx 0).
Note that ScopeStackStack instances passed to the constructor are
flattened.
Each scope is either a subclass of IStructured or an instance of
such subclass. When the ScopeStack is used in type inference
the individual scopes are usually instances of type, or whatever
objects the host application uses to emulate types. When used at
runtime, they are, of course, instances.
"""
scopes = ()
@property
def globals(self):
return self.scopes[0]
@property
def locals(self):
return self.scopes[-1]
def __repr__(self):
return "ScopeStack(%s)" % ", ".join((repr(s) for s in self.scopes))
def __init__(self, *scopes):
flattened_scopes = []
for scope in scopes:
if isinstance(scope, type(self)):
flattened_scopes.extend(scope.scopes)
elif isinstance(scope, Scope):
flattened_scopes.append(scope)
elif isinstance(scope, type):
flattened_scopes.append(Scope(scope))
elif protocol.implements(scope, structured.IStructured):
flattened_scopes.append(Scope(scope))
else:
raise TypeError("Scopes must be instances or subclasses of "
"IStructured; got %r." % (scope,))
self.scopes = flattened_scopes
# IStructured implementation.
def resolve(self, name):
"""Call IStructured.resolve across all scopes and return first hit."""
for scope in reversed(self.scopes):
try:
return structured.resolve(scope, name)
except (KeyError, AttributeError):
continue
raise AttributeError(name)
def getmembers(self):
"""Gets members (vars) from all scopes, using both runtime and static.
This method will attempt both static and runtime getmembers. This is the
recommended way of getting available members.
Returns:
Set of available vars.
Raises:
NotImplementedError if any scope fails to implement 'getmembers'.
"""
names = set()
for scope in self.scopes:
if isinstance(scope, type):
names.update(structured.getmembers_static(scope))
else:
names.update(structured.getmembers_runtime(scope))
return names
def getmembers_runtime(self):
"""Gets members (vars) from all scopes using ONLY runtime information.
You most likely want to use ScopeStack.getmembers instead.
Returns:
Set of available vars.
Raises:
NotImplementedError if any scope fails to implement 'getmembers'.
"""
names = set()
for scope in self.scopes:
names.update(structured.getmembers_runtime(scope))
return names
@classmethod
def getmembers_static(cls):
"""Gets members (vars) from all scopes using ONLY static information.
You most likely want to use ScopeStack.getmembers instead.
Returns:
Set of available vars.
Raises:
NotImplementedError if any scope fails to implement 'getmembers'.
"""
names = set()
for scope in cls.scopes:
names.update(structured.getmembers_static(scope))
return names
def reflect(self, name):
"""Reflect 'name' starting with local scope all the way up to global.
This method will attempt both static and runtime reflection. This is the
recommended way of using reflection.
Returns:
Type of 'name', or protocol.AnyType.
Caveat:
The type of 'name' does not necessarily have to be an instance of
Python's type - it depends on what the host application returns
through the reflection API. For example, Rekall uses objects
generated at runtime to simulate a native (C/C++) type system.
"""
# Return whatever the most local scope defines this as, or bubble all
# the way to the top.
result = None
for scope in reversed(self.scopes):
try:
if isinstance(scope, type):
result = structured.reflect_static_member(scope, name)
else:
result = structured.reflect_runtime_member(scope, name)
if result not in (None, protocol.AnyType):
return result
except (NotImplementedError, KeyError, AttributeError):
continue
return protocol.AnyType
def reflect_runtime_member(self, name):
"""Reflect 'name' using ONLY runtime reflection.
You most likely want to use ScopeStack.reflect instead.
Returns:
Type of 'name', or protocol.AnyType.
"""
for scope in reversed(self.scopes):
try:
return structured.reflect_runtime_member(scope, name)
except (NotImplementedError, KeyError, AttributeError):
continue
return protocol.AnyType
@classmethod
def reflect_static_member(cls, name):
"""Reflect 'name' using ONLY static reflection.
You most likely want to use ScopeStack.reflect instead.
Returns:
Type of 'name', or protocol.AnyType.
"""
for scope in reversed(cls.scopes):
try:
return structured.reflect_static_member(scope, name)
except (NotImplementedError, KeyError, AttributeError):
continue
return protocol.AnyType
structured.IStructured.implicit_static(ScopeStack) | /rekall_efilter-1.6.0-py3-none-any.whl/efilter/scope.py | 0.823293 | 0.241266 | scope.py | pypi |
__author__ = "Adam Sindelar <adamsh@google.com>"
class EfilterError(Exception):
query = None
_root = None
message = None
start = None
end = None
def __init__(self, query=None, message=None, root=None, start=None,
end=None):
super(EfilterError, self).__init__(message)
self.query = query
self.message = message
self.root = root
if start is not None:
self.start = start
if end is not None:
self.end = end
@property
def root(self):
return self._root
@root.setter
def root(self, value):
self._root = value
try:
self.start = value.start
self.end = value.end
except AttributeError:
self.start = None
self.end = None
@property
def text(self):
return self.message
@property
def adjusted_start(self):
"""Start of the error in self.source (with the >>> and <<< delims)."""
if self.start is not None:
return self.start
@property
def adjusted_end(self):
"""End of the error in self.source (with the >>> and <<< delims)."""
if self.end is not None:
return self.end + 9
@property
def source(self):
if not self.query:
return None
if self.start is not None and self.end is not None:
return "%s >>> %s <<< %s" % (
self.query[0:self.start],
self.query[self.start:self.end],
self.query[self.end:])
elif self.query:
return self.query
def __str__(self):
return "%s (%s) in query %r" % (
type(self).__name__,
self.text,
self.source)
def __repr__(self):
return "%s(message=%r, start=%r, end=%r)" % (
type(self), self.message, self.start, self.end)
class EfilterLogicError(EfilterError):
pass
class EfilterNoneError(EfilterError):
pass
class EfilterParseError(EfilterError):
token = None
def __init__(self, *args, **kwargs):
self.token = kwargs.pop("token", None)
super(EfilterParseError, self).__init__(*args, **kwargs)
class EfilterKeyError(EfilterError):
key = None
@property
def text(self):
if self.message:
return self.message
if self.key:
return "No such key %r." % self.key
return None
def __init__(self, *args, **kwargs):
self.key = kwargs.pop("key", None)
super(EfilterKeyError, self).__init__(*args, **kwargs)
class EfilterTypeError(EfilterError):
expected = None
actual = None
@property
def text(self):
if self.message:
return self.message
if self.expected and self.actual:
return "Expected type %r, got %r instead." % (self.expected,
self.actual)
return None
def __init__(self, *args, **kwargs):
self.expected = kwargs.pop("expected", None)
self.actual = kwargs.pop("actual", None)
super(EfilterTypeError, self).__init__(*args, **kwargs) | /rekall_efilter-1.6.0-py3-none-any.whl/efilter/errors.py | 0.784236 | 0.187579 | errors.py | pypi |
from builtins import next
from builtins import object
__author__ = "Adam Sindelar <adamsh@google.com>"
import itertools
import threading
import six
from efilter import protocol
from efilter.protocols import applicative
from efilter.protocols import counted
from efilter.protocols import reducer
from efilter.protocols import repeated
from efilter.protocols import structured
class TypedFunction(object):
"""Represents an EFILTER-callable function.
Each function in the standard library is an instance of a subclass of
this class. Subclasses override __call__.
"""
name = None
def apply(self, args, kwargs):
return self(*args, **kwargs)
def __call__(self):
raise NotImplementedError()
applicative.IApplicative.implicit_dynamic(TypedFunction)
class TypedReducer(object):
"""Represents an EFILTER-callable reducer function.
TypedReducer supports the IReducer protocol, but also works as a function
(IApplicative), to allow it to reduce values inside rows in a query.
"""
name = None
# IApplicative
def apply(self, args, kwargs):
return self(*args, **kwargs)
def __call__(self, data, chunk_size=None):
return reducer.reduce(self, data, chunk_size)
# IReducer
def fold(self, chunk):
raise NotImplementedError()
def merge(self, left, right):
raise NotImplementedError()
def finalize(self, intermediate):
raise NotImplementedError()
applicative.IApplicative.implicit_dynamic(TypedReducer)
reducer.IReducer.implicit_dynamic(TypedReducer)
class SingletonReducer(object):
"""Preserves a literal value and ensures it's a singleton."""
name = "singleton"
def fold(self, chunk):
iterator = iter(chunk)
first = next(iterator)
for item in iterator:
if item != first:
raise ValueError("All values in a singleton reducer must be "
"equal to each other. Got %r != %r." % (
first, item))
return first
def merge(self, left, right):
if left != right:
raise ValueError("All values in a singleton reducer must be "
"equal to each other. Got %r != %r." % (
left, right))
return left
def finalize(self, intermediate):
return intermediate
class LibraryModule(object):
"""Represents a part of the standard library.
Each library module consists of a collection of vars, which are mostly
instances of TypedFunction. The stdcore module also contains basic types,
such as 'str' or 'int', in addition to functions.
"""
vars = None
name = None
# This is a class-level global storing all instances by their name.
ALL_MODULES = {}
_all_modules_lock = threading.Lock()
def __init__(self, vars, name):
self.vars = vars
self.name = name
self._all_modules_lock.acquire()
try:
if name in self.ALL_MODULES:
raise ValueError("Duplicate module name %r." % name)
self.ALL_MODULES[name] = self
finally:
self._all_modules_lock.release()
def __del__(self):
"""If modules are being used properly this will only happen on exit."""
self._all_modules_lock.acquire()
try:
del self.ALL_MODULES[self.name]
finally:
self._all_modules_lock.release()
def __repr__(self):
return "LibraryModule(name=%r, vars=%r)" % (self.name, self.vars)
def getmembers_runtime(self):
return list(self.vars.keys())
def resolve(self, name):
return self.vars[name]
structured.IStructured.implicit_static(LibraryModule)
class First(TypedFunction):
"""Return the first value from an IRepeated."""
name = "first"
def __call__(self, x):
for value in repeated.getvalues(x):
return value
class Take(TypedFunction):
"""Take only the first 'count' elements from 'x' (tuple or IRepeated).
This implementation is lazy.
Example:
take(2, (1, 2, 3, 4)) -> (1, 2)
Arguments:
count: How many elements to return.
x: The tuple or IRepeated to take from.
Returns:
A lazy IRepeated.
"""
name = "take"
def __call__(self, count, x):
def _generator():
if isinstance(x, tuple):
values = x
else:
values = repeated.getvalues(x)
for idx, value in enumerate(values):
if idx == count:
break
yield value
return repeated.lazy(_generator)
class Drop(TypedFunction):
"""Drop the first 'count' elements from 'x' (tuple or IRepeated).
This implementation is lazy.
Example:
drop(2, (1, 2, 3, 4)) -> (3, 4)
Arguments:
count: How many elements to drop.
x: The tuple or IRepeated to drop from.
Returns:
A lazy IRepeated.
"""
name = "drop"
def __call__(self, count, x):
def _generator():
if isinstance(x, tuple):
values = x
else:
values = repeated.getvalues(x)
for idx, value in enumerate(values):
if idx < count:
continue
yield value
return repeated.lazy(_generator)
class Lower(TypedFunction):
"""Make a string lowercase."""
name = "lower"
def __call__(self, x):
return x.lower()
class Find(TypedFunction):
"""Returns the position of 'needle' in 'string', or -1 if not found."""
name = "find"
def __call__(self, string, needle):
return string.find(needle)
class Count(TypedFunction):
"""Counts the number of elements in a tuple or of values in a repeated."""
name = "count"
def __call__(self, chunk):
return counted.count(chunk)
class Reverse(TypedFunction):
"""Reverse a tuple of a repeated and maintains the type."""
name = "reverse"
def __call__(self, x):
if isinstance(x, tuple):
return tuple(reversed(x))
return repeated.meld(*reversed(repeated.getvalues(x)))
class Materialize(TypedFunction):
"""Force a repeated value (e.g. output of map) to materialize in memory."""
name = "materialize"
def __call__(self, rv):
return repeated.repeated(*list(rv))
MODULE = LibraryModule(name="stdcore",
vars={Take.name: Take(),
Drop.name: Drop(),
Count.name: Count(),
Reverse.name: Reverse(),
Lower.name: Lower(),
Find.name: Find(),
SingletonReducer.name: SingletonReducer(),
First.name: First(),
Materialize.name: Materialize(),
# Built-in types below:
"int": int,
"str": six.text_type,
"bytes": six.binary_type,
"float": float}) | /rekall_efilter-1.6.0-py3-none-any.whl/efilter/stdlib/core.py | 0.783077 | 0.279632 | core.py | pypi |
__author__ = "Adam Sindelar <adamsh@google.com>"
import six
from efilter.ext import csv_reader
from efilter.protocols import repeated
from efilter.stdlib import core
class Lines(core.TypedFunction):
"""Return an IRepeated with lines from text file 'path'.
Arguments:
path: String with the path to the file to read in.
Raises:
IOError if the file can't be opened for whatever reason.
Returns:
An object implementing IRepeated containing the lines of in the file
as strings.
"""
name = "lines"
def __call__(self, path):
fd = open(path, "r")
# We don't close fd here, because repeated.lines is lazy and will read
# on demand. The descriptor will be closed in the repeated value's
# destructor.
return repeated.lines(fd)
@classmethod
def reflect_static_args(cls):
return (six.string_types[0],)
@classmethod
def reflect_static_return(cls):
return repeated.IRepeated
class CSV(core.TypedFunction):
"""Return an IRepeated with file at 'path' decoded as CSV.
Arguments:
path: Same as 'Lines'
decode_header: Use the first line in the file for column names and
return a dict per line, instead of tuple per line. (default: False.)
delim: Column separator (default: ",").
quote: Quote character (defalt: double quote).
trim: Eliminate leading whitespace (default: True).
Raises:
IOError if the file can't be opened for whatever reason.
Returns:
An IRepeated containing the lines in the CSV file decoded as either
a tuple of values per line, or a dict of values per line, if
'decode_header' is True.
"""
name = "csv"
def __call__(self, path, decode_header=False, delim=",", quote="\"",
trim=True):
fd = open(path, "r")
# We don't close fd here, because repeated.lines is lazy and will read
# on demand. The descriptor will be closed in the repeated value's
# destructor.
return csv_reader.LazyCSVReader(fd=fd, output_dicts=decode_header,
delim=delim, quote=quote, trim=trim)
@classmethod
def reflect_static_args(cls):
return (six.string_types[0], bool)
@classmethod
def reflect_static_return(cls):
return repeated.IRepeated
MODULE = core.LibraryModule(name="stdio",
vars={CSV.name: CSV(),
Lines.name: Lines()}) | /rekall_efilter-1.6.0-py3-none-any.whl/efilter/stdlib/io.py | 0.804021 | 0.317466 | io.py | pypi |
__author__ = "Adam Sindelar <adamsh@google.com>"
from efilter import dispatch
from efilter import ast
from efilter import query as q
@dispatch.multimethod
def normalize(expr):
"""Optimizes the AST for better performance and simpler structure.
The returned query will be logically equivalent to what was provided but
transformations will be made to flatten and optimize the structure. This
works by recognizing certain patterns and replacing them with nicer ones,
eliminating pointless expressions, and so on.
# Collapsing nested variadic expressions:
Example:
Intersection(x, Interestion(y, z)) => Intersection(x, y, z)
# Empty branch elimination:
Example:
Intersection(x) => x
"""
_ = expr
raise NotImplementedError()
@normalize.implementation(for_type=q.Query)
def normalize(query):
new_root = normalize(query.root)
return q.Query(query, root=new_root)
@normalize.implementation(for_type=ast.Expression)
def normalize(expr):
return expr
@normalize.implementation(for_type=ast.BinaryExpression)
def normalize(expr):
"""Normalize both sides, but don't eliminate the expression."""
lhs = normalize(expr.lhs)
rhs = normalize(expr.rhs)
return type(expr)(lhs, rhs, start=lhs.start, end=rhs.end)
@normalize.implementation(for_type=ast.Apply)
def normalize(expr):
"""No elimination, but normalize arguments."""
args = [normalize(arg) for arg in expr.args]
return type(expr)(expr.func, *args, start=expr.start, end=expr.end)
@normalize.implementation(for_type=ast.VariadicExpression)
def normalize(expr):
"""Pass through n-ary expressions, and eliminate empty branches.
Variadic and binary expressions recursively visit all their children.
If all children are eliminated then the parent expression is also
eliminated:
(& [removed] [removed]) => [removed]
If only one child is left, it is promoted to replace the parent node:
(& True) => True
"""
children = []
for child in expr.children:
branch = normalize(child)
if branch is None:
continue
if type(branch) is type(expr):
children.extend(branch.children)
else:
children.append(branch)
if len(children) == 0:
return None
if len(children) == 1:
return children[0]
return type(expr)(*children, start=children[0].start,
end=children[-1].end) | /rekall_efilter-1.6.0-py3-none-any.whl/efilter/transforms/normalize.py | 0.831691 | 0.455078 | normalize.py | pypi |
from builtins import next
__author__ = "Adam Sindelar <adamsh@google.com>"
import six
from efilter import dispatch
from efilter import ast
from efilter import syntax
from efilter import query as q
from efilter.parsers.dottysql import grammar
BUILTINS = dict((v, k) for k, v in six.iteritems(grammar.BUILTINS))
def __expression_precedence(expr):
operator = grammar.OPERATORS.by_handler.get(type(expr))
if operator:
return operator.precedence, operator.assoc
return None, None
@dispatch.multimethod
def asdottysql(expr):
"""Produces equivalent DottySQL output to the AST.
This class follows the visitor pattern. See documentation on VisitorEngine.
"""
_ = expr
raise NotImplementedError()
@asdottysql.implementation(for_type=q.Query)
def asdottysql(query):
return asdottysql(query.root)
@asdottysql.implementation(for_types=(ast.Within, ast.Cast, ast.Reducer))
def asdottysql_builtin(expr):
if not type(expr) in BUILTINS:
return "<Subexpression cannot be formatted as DottySQL.>"
body = ", ".join([asdottysql(x) for x in expr.children])
return "%s(%s)" % (BUILTINS[type(expr)], body)
@asdottysql.implementation(for_type=ast.Map)
def asdottysql_map(expr):
lhs = asdottysql(expr.lhs)
rhs = asdottysql(expr.rhs)
if (isinstance(expr.lhs, (ast.Map, ast.Var))
and isinstance(expr.rhs, (ast.Map, ast.Var))):
return "%s.%s" % (lhs, rhs)
return "map(%s, %s)" % (lhs, rhs)
@asdottysql.implementation(for_type=ast.Let)
def asdottysql_let(expr):
if not isinstance(expr.lhs, ast.Bind):
return "<Non-literal let cannot be formatted as DottySQL>"
pairs = []
for pair in expr.lhs.children:
if not isinstance(pair.lhs, ast.Literal):
return "<Non-literal binding names cannot be formatted as DottySQL>"
pairs.append("%s = %s" % (pair.lhs.value, asdottysql(pair.rhs)))
return "let(%s) %s" % (", ".join(pairs), asdottysql(expr.rhs))
@asdottysql.implementation(for_types=(ast.NumericExpression, ast.Relation,
ast.LogicalOperation))
def asdottysql_operator(expr):
operator = grammar.OPERATORS.by_handler[type(expr)]
children = []
for child in expr.children:
precedence, _ = __expression_precedence(child)
if precedence is not None and precedence < operator.precedence:
children.append("(%s)" % asdottysql(child))
else:
children.append(asdottysql(child))
separator = " %s " % operator.name
return separator.join(children)
def _format_binary(lhs, rhs, operator, lspace=" ", rspace=" "):
left = asdottysql(lhs)
right = asdottysql(rhs)
lhs_precedence, lassoc = __expression_precedence(lhs)
if lassoc == "left" and lhs_precedence is not None:
lhs_precedence += 1
if lhs_precedence is not None and lhs_precedence < operator.precedence:
left = "(%s)" % left
rhs_precedence, rassoc = __expression_precedence(rhs)
if rassoc == "right" and rhs_precedence is not None:
rhs_precedence += 1
if rhs_precedence is not None and rhs_precedence < operator.precedence:
right = "(%s)" % right
return "".join((left, lspace, operator.name, rspace, right))
@asdottysql.implementation(for_type=ast.Complement)
def asdottysql(expr):
if (isinstance(expr.value, ast.Equivalence)
and len(expr.value.children) == 2):
return _format_binary(expr.value.children[0],
expr.value.children[1],
grammar.OPERATORS.by_name["!="])
if isinstance(expr.value, ast.Membership):
return _format_binary(expr.value.children[0],
expr.value.children[1],
grammar.OPERATORS.by_name["not in"])
child_precedence, assoc = __expression_precedence(expr.value)
if assoc == "left" and child_precedence:
child_precedence += 1
if (child_precedence is not None
and child_precedence < __expression_precedence(expr)[0]):
return "not (%s)" % asdottysql(expr.value)
return "not %s" % asdottysql(expr.value)
@asdottysql.implementation(for_type=ast.Bind)
def asdottysql(expr):
return "bind(%s)" % ", ".join(asdottysql(x) for x in expr.children)
@asdottysql.implementation(for_type=ast.Pair)
def asdottysql(expr):
return _format_binary(expr.lhs, expr.rhs, grammar.OPERATORS.by_name[":"],
lspace="")
@asdottysql.implementation(for_types=(ast.RegexFilter,
ast.Membership))
def asdottysql(expr):
return _format_binary(expr.lhs, expr.rhs,
grammar.OPERATORS.by_handler[type(expr)])
@asdottysql.implementation(for_type=ast.Apply)
def asdottysql(expr):
arguments = iter(expr.children)
func = next(arguments)
return "%s(%s)" % (asdottysql(func),
", ".join([asdottysql(arg) for arg in arguments]))
@asdottysql.implementation(for_type=ast.Select)
def asdottysql(expr):
arguments = iter(expr.children)
source = asdottysql(next(arguments))
if not isinstance(expr.lhs, (ast.ValueExpression, ast.Repeat, ast.Tuple,
ast.Map, ast.Select, ast.Apply, ast.Bind)):
source = "(%s)" % source
return "%s[%s]" % (source,
", ".join([asdottysql(arg) for arg in arguments]))
@asdottysql.implementation(for_type=ast.Resolve)
def asdottysql(expr):
if not isinstance(expr.rhs, ast.Literal):
return "<expression cannot be formatted as DottySQL>"
return _format_binary(expr.lhs, ast.Var(expr.rhs.value),
grammar.OPERATORS.by_handler[ast.Resolve], lspace="",
rspace="")
@asdottysql.implementation(for_type=ast.Repeat)
def asdottysql(expr):
return "(%s)" % ", ".join(asdottysql(x) for x in expr.children)
@asdottysql.implementation(for_type=ast.Tuple)
def asdottysql(expr):
return "[%s]" % ", ".join(asdottysql(x) for x in expr.children)
@asdottysql.implementation(for_type=ast.IfElse)
def asdottysql(expr):
branches = ["if %s then %s" % (asdottysql(c), asdottysql(v))
for c, v in expr.conditions()]
if_ = " else ".join(branches)
else_ = expr.default()
if not else_ or else_ == ast.Literal(None):
return if_
return "%s else %s" % (if_, asdottysql(else_))
@asdottysql.implementation(for_type=ast.Literal)
def asdottysql(expr):
return repr(expr.value)
@asdottysql.implementation(for_type=ast.Var)
def asdottysql(expr):
return expr.value
syntax.Syntax.register_formatter(shorthand="dottysql", formatter=asdottysql) | /rekall_efilter-1.6.0-py3-none-any.whl/efilter/transforms/asdottysql.py | 0.610918 | 0.253422 | asdottysql.py | pypi |
from builtins import next
from builtins import object
__author__ = "Adam Sindelar <adamsh@google.com>"
from efilter.protocols import applicative
from efilter.protocols import counted
from efilter.protocols import eq
from efilter.protocols import ordered
from efilter.protocols import repeated
class LazyRepetition(object):
"""Repeated variable backed by a restartable generator.
Arguments:
generator_func: A function that returns a generator.
"""
_generator_func = None
def __init__(self, generator_func):
if not callable(generator_func):
raise TypeError("Generator function must be callable.")
self._generator_func = generator_func
def __iter__(self):
return self._generator_func()
def __repr__(self):
return "LazyRepetition(generator_func=%r)" % (
self._generator_func)
# IRepeated protocol implementation (see IRepeated for behavior docs).
def getvalues(self):
"""Yields all the values from 'generator_func' and type-checks.
Yields:
Whatever 'generator_func' yields.
Raises:
TypeError: if subsequent values are of a different type than first
value.
ValueError: if subsequent iteration returns a different number of
values than the first iteration over the generator. (This would
mean 'generator_func' is not stable.)
"""
for value in self._generator_func():
yield value
# ICounted implementation:
def count(self):
count = 0
for _ in self:
count += 1
return count
repeated.IRepeated.implicit_static(LazyRepetition)
# We really mean the toplevel object whatever this is (due to futurize
# this might be futurize.builtins.newobject)
repeated.lazy.implement(for_type=object.__mro__[-1],
implementation=LazyRepetition)
counted.ICounted.implicit_static(LazyRepetition)
def eq_implementation(self, other):
if not repeated.isrepeating(other):
return False
for my_item, other_item in zip(self, other):
if my_item != other_item:
return False
return True
eq.IEq.implement(
for_type=LazyRepetition,
implementations={
eq.eq: eq_implementation,
eq.ne: lambda x, y: not (x == y)
}
) | /rekall_efilter-1.6.0-py3-none-any.whl/efilter/ext/lazy_repetition.py | 0.804367 | 0.238694 | lazy_repetition.py | pypi |
from efilter import ast
from efilter import syntax
from efilter.parsers.common import ast_transforms
from efilter.parsers.common import grammar
from efilter.parsers.common import parser
from efilter.parsers.common import tokenizer
class ObjectFilterSyntax(syntax.Syntax):
OPERATORS = [
# Aliases for equivalence:
grammar.Operator(name="equals", precedence=3, assoc="left",
handler=ast.Equivalence, docstring=None, prefix=None,
infix=grammar.Token("symbol", "equals"), suffix=None),
grammar.Operator(name="is", precedence=3, assoc="left",
handler=ast.Equivalence, docstring=None, prefix=None,
infix=grammar.Token("symbol", "is"), suffix=None),
grammar.Operator(name="==", precedence=3, assoc="left",
handler=ast.Equivalence, docstring=None, prefix=None,
infix=grammar.Token("symbol", "=="), suffix=None),
grammar.Operator(name="notequals", precedence=3, assoc="left",
handler=ast_transforms.ComplementEquivalence,
docstring=None, prefix=None,
infix=grammar.Token("symbol", "notequals"),
suffix=None),
grammar.Operator(name="isnot", precedence=3, assoc="left",
handler=ast_transforms.ComplementEquivalence,
docstring=None, prefix=None,
infix=grammar.Token("symbol", "isnot"), suffix=None),
grammar.Operator(name="!=", precedence=3, assoc="left",
handler=ast_transforms.ComplementEquivalence,
docstring=None, prefix=None,
infix=grammar.Token("symbol", "!="), suffix=None),
# Logical:
grammar.Operator(name="or", precedence=0, assoc="left",
handler=ast.Union, docstring="Logical OR.",
prefix=None, suffix=None,
infix=grammar.Token("symbol", "or")),
grammar.Operator(name="and", precedence=1, assoc="left",
handler=ast.Intersection, docstring="Logical AND.",
prefix=None, suffix=None,
infix=grammar.Token("symbol", "and")),
grammar.Operator(name="||", precedence=0, assoc="left",
handler=ast.Union, docstring="Logical OR.",
prefix=None, suffix=None,
infix=grammar.Token("symbol", "||")),
grammar.Operator(name="&&", precedence=1, assoc="left",
handler=ast.Intersection, docstring="Logical AND.",
prefix=None, suffix=None,
infix=grammar.Token("symbol", "&&")),
# Comparisons:
grammar.Operator(name=">=", precedence=3, assoc="left",
handler=ast.PartialOrderedSet,
docstring="Equal-or-greater-than.", prefix=None,
suffix=None, infix=grammar.Token("symbol", ">=")),
grammar.Operator(name="<=", precedence=3, assoc="left",
handler=ast_transforms.ReversePartialOrderedSet,
docstring="Equal-or-less-than.", prefix=None,
suffix=None, infix=grammar.Token("symbol", "<=")),
grammar.Operator(name=">", precedence=3, assoc="left",
handler=ast.StrictOrderedSet,
docstring="Greater-than.", prefix=None, suffix=None,
infix=grammar.Token("symbol", ">")),
grammar.Operator(name="<", precedence=3, assoc="left",
handler=ast_transforms.ReverseStrictOrderedSet,
docstring="Less-than.", prefix=None, suffix=None,
infix=grammar.Token("symbol", "<")),
# Set ops:
grammar.Operator(name="notinset", precedence=3, assoc="left",
handler=ast_transforms.ComplementMembership,
docstring="Left-hand operand is not in list.",
prefix=None, suffix=None,
infix=(grammar.Token("symbol", "notinset"))),
grammar.Operator(name="inset", precedence=3, assoc="left",
handler=ast.Membership,
docstring="Left-hand operand is in list.",
prefix=None, suffix=None,
infix=grammar.Token("symbol", "inset")),
grammar.Operator(name="notcontains", precedence=3, assoc="left",
handler=ast_transforms.ReverseComplementMembership,
docstring="Right-hand operand is not in list.",
prefix=None, suffix=None,
infix=(grammar.Token("symbol", "notcontains"))),
grammar.Operator(name="contains", precedence=3, assoc="left",
handler=ast_transforms.ReverseMembership,
docstring="Right-hand operand is in list.",
prefix=None, suffix=None,
infix=grammar.Token("symbol", "contains")),
# Miscellaneous:
grammar.Operator(name="unary -", precedence=5, assoc="right",
handler=ast_transforms.NegateValue,
docstring=None, infix=None, suffix=None,
prefix=grammar.Token("symbol", "-")),
grammar.Operator(name="list builder", precedence=14, assoc="left",
handler=ast.Tuple, docstring=None,
prefix=grammar.Token("lbracket", "["),
infix=grammar.Token("comma", ","),
suffix=grammar.Token("rbracket", "]")),
grammar.Operator(name="regexp", precedence=3, assoc="left",
handler=ast.RegexFilter,
docstring="Match LHS against regex on RHS.",
prefix=None, suffix=None,
infix=grammar.Token("symbol", "regexp")),
grammar.Operator(name=".", precedence=12, assoc="left",
handler=ast_transforms.NormalizeResolve,
docstring="OBJ.MEMBER -> return MEMBER of OBJ.",
prefix=None, suffix=None,
infix=grammar.Token("symbol", ".")),
]
def __init__(self, original, params=None, scope=None):
super(ObjectFilterSyntax, self).__init__(original)
self.scope = scope
if params is not None:
raise ValueError("ObjectFilterSyntax doesn't support parameters.")
t = tokenizer.LazyTokenizer(original)
self.parser = parser.ExpressionParser(operators=self.OPERATORS,
tokenizer=t)
@property
def root(self):
return self.parser.parse()
syntax.Syntax.register_parser(ObjectFilterSyntax, shorthand="objectfilter") | /rekall_efilter-1.6.0-py3-none-any.whl/efilter/parsers/legacy/objectfilter.py | 0.795777 | 0.284135 | objectfilter.py | pypi |
from builtins import object
__author__ = "Adam Sindelar <adamsh@google.com>"
from efilter import errors
from efilter.parsers.common import grammar
class TokenStream(object):
"""Manages and enforces grammar over tokenizer output.
Most recursive descent parsers need a mechanism to accept, reject, expect
or peek at the next token based on matching loging supplied by grammar
functions. This class manages the tokenizer for the parser, and enforces
the expectations set by grammar.
Arguments:
tokenizer: Must support the tokenizer interface (skip and peek).
"""
tokenizer = None
matched = None
def __init__(self, tokenizer=None):
self.tokenizer = tokenizer
def match(self, f, *args):
"""Match grammar function 'f' against next token and set 'self.matched'.
Arguments:
f: A grammar function - see efilter.parsers.common.grammar. Must
return TokenMatch or None.
args: Passed to 'f', if any.
Returns:
Instance of efilter.parsers.common.grammar.TokenMatch or None.
Comment:
If a match is returned, it will also be stored in self.matched.
"""
try:
match = f(self.tokenizer, *args)
except StopIteration:
# The grammar function might have tried to access more tokens than
# are available. That's not really an error, it just means it didn't
# match.
return
if match is None:
return
if not isinstance(match, grammar.TokenMatch):
raise TypeError("Invalid grammar function %r returned %r."
% (f, match))
self.matched = match
return match
def accept(self, f, *args):
"""Like 'match', but consume the token (tokenizer advances.)"""
match = self.match(f, *args)
if match is None:
return
self.tokenizer.skip(len(match.tokens))
return match
def reject(self, f, *args):
"""Like 'match', but throw a parse error if 'f' matches.
This is useful when a parser wants to be strict about specific things
being prohibited. For example, DottySQL bans the use of SQL keywords as
variable names.
"""
match = self.match(f, *args)
if match:
token = self.peek(0)
raise errors.EfilterParseError(
query=self.tokenizer.source, token=token,
message="Was not expecting a %s here." % token.name)
def expect(self, f, *args):
"""Like 'accept' but throws a parse error if 'f' doesn't match."""
match = self.accept(f, *args)
if match:
return match
try:
func_name = f.__name__
except AttributeError:
func_name = "<unnamed grammar function>"
start, end = self.current_position()
raise errors.EfilterParseError(
query=self.tokenizer.source, start=start, end=end,
message="Was expecting %s here." % (func_name))
def current_position(self):
"""Return a tuple of (start, end)."""
token = self.tokenizer.peek(0)
if token:
return token.start, token.end
return self.tokenizer.position, self.tokenizer.position + 1
def peek(self, n):
"""Same as self.tokenizer.peek."""
return self.tokenizer.peek(n)
def skip(self, n):
"""Same as self.tokenizer.skip."""
return self.tokenizer.skip(n)
def __iter__(self):
"""Self as iter(self.tokenizer)."""
return iter(self.tokenizer) | /rekall_efilter-1.6.0-py3-none-any.whl/efilter/parsers/common/token_stream.py | 0.858348 | 0.350144 | token_stream.py | pypi |
from builtins import next
from builtins import object
__author__ = "Adam Sindelar <adamsh@google.com>"
import collections
import re
import six
from efilter import errors
from efilter.parsers.common import grammar
class Pattern(collections.namedtuple("Pattern",
"name states regex action next_state")):
"""Defines a token pattern for the tokenizer.
Arguments:
name: The name of the pattern will be used to name the token.
states: The pattern will only be applied if we're in one these states.
regex: A regular expression to try and match from the current point.
A named matched group 'token' will be saved in Token.value.
action: The handler to call.
next_state: The next state we transition to if this pattern matched.
"""
def __new__(cls, name, states, regex, action, next_state):
return super(Pattern, cls).__new__(
cls, name, states,
re.compile(regex, re.DOTALL | re.M | re.S | re.U),
action, next_state)
class LazyTokenizer(object):
"""Configurable tokenizer usable with most expression grammars.
This class is directly usable, and will, by default, produce tokens for
string and number literals, parens, brackets, commas and words (symbols).
Notes on performance:
The runtime complexity grows with the number of patterns (m) and the
number of tokens (n) in source. It is O(n*m) in the worst case.
The tokenizer is lazy, and uses a deque to cache parsed tokens which
haven't been skipped yet. When using 'peek' without 'skip' all tokens
have to be cached, and this leads to O(n) memory complexity!
Extending the tokenizer:
This class is capable of tokenizing most any sane expression language,
but can be further extended to (1) yield more specific token names for
certain grammar (e.g. distinguishing between symbols and operators),
as well as (2) supporting further tokens, such as curly braces.
In the majority of cases, adding more patterns will be sufficient. For
example, to support curly braces, one would add the following to
DEFAULT_PATTERNS:
Pattern("lbrace", # Give the token a new name.
("INITIAL",), # Match this only if you're not in a string.
r"(?P<token>\\{)", # The regex should match an lbrace, and
# capture it in the group named 'token'.
"emit", # This will yield a Token(name='lbrace', value='{').
None, # Matching an lbrace doesn't change the state.
),
Pattern("rbrace", ("INITIAL",), r"(?P<token>\\})", "emit", None)
For more complex use cases, it may be necessary to implement additional
actions, which are just instance methods. Take a look at how string
literals are implemented (string_start, string_end) for an example.
Built-in actions:
emit: Will emit a token with the supplied name and value set to whatever
the named match group 'token' contains.
emit_param: The tokenizer will emit a parse-time parameter for
interpolation by a parser. The parameter token can be indexed,
keyed on a string, or both. Indexing happens automatically, starting
from 0.
emit_int: The tokenizer will emit an integer obtained by interpreting
the matched substring as an integer in base 10.
emit_hex: Same as 'emit_int' but base 16.
emit_oct: Same as 'emit_int' but base 8.
emit_float: Same as 'emit_int' but emits a base 10 float.
string_end: Emits a token with the last matched string.
Public interface:
next_token: Returns the next token and advances the tokenizer.
skip: Skips N tokens ahead, without returning them.
peek: Looks ahead over N tokens WITHOUT advancing the tokenizer. This
fills up the token lookahead queue with N tokens - avoid supplying
large values of N.
__iter__: Returns an iterator that doesn't advance the tokenizer (
same as calling peek() with increasing values of N). This can fill
up the token queue quickly and should not be the primary interface.
Arguments:
source: Source string that will be lexed.
patterns (optional): Overrides self.DEFAULT_PATTERNS
"""
# Used if no patterns are supplied to the constructor. Subclasses can
# override.
DEFAULT_PATTERNS = (
# Parens/brackets and separators.
Pattern("lparen", ("INITIAL,"), r"(?P<token>\()", "emit", None),
Pattern("rparen", ("INITIAL,"), r"(?P<token>\))", "emit", None),
Pattern("lbracket", ("INITIAL,"), r"(?P<token>\[)", "emit", None),
Pattern("rbracket", ("INITIAL,"), r"(?P<token>\])", "emit", None),
Pattern("comma", ("INITIAL,"), r"(?P<token>,)", "emit", None),
# Built-time parameters.
Pattern("param", ("INITIAL",), r"\{(?P<token>[a-z_0-9]*)\}",
"emit_param", None),
Pattern("param", ("INITIAL,"), r"(?P<token>\?)", "emit_param", None),
# Numeric literals.
Pattern("literal", ("INITIAL,"),
r"(?P<token>\d+\.\d+)", "emit_float", None),
Pattern("literal", ("INITIAL,"), r"(?P<token>0\d+)", "emit_oct", None),
Pattern("literal", ("INITIAL,"),
r"(?P<token>0x[0-9a-zA-Z]+)", "emit_hex", None),
Pattern("literal", ("INITIAL,"), r"(?P<token>\d+)", "emit_int", None),
# String literals.
Pattern(None, ("INITIAL",), r"\"", "string_start", "STRING"),
Pattern(None, ("INITIAL",), r"'", "string_start", "SQ_STRING"),
Pattern("literal", ("STRING",), "\"", "string_end", None),
Pattern(None, ("STRING",), r"\\(.)", "string_escape", None),
Pattern(None, ("STRING",), r"([^\\\"]+)", "string_append", None),
Pattern("literal", ("SQ_STRING",), "'", "string_end", None),
Pattern(None, ("SQ_STRING",), r"\\(.)", "string_escape", None),
Pattern(None, ("SQ_STRING",), r"([^\\']+)", "string_append",
None),
# Prefer to match symbols only as far as they go, should they be
# followed by a literal with no whitespace in between.
Pattern("symbol", ("INITIAL",), r"([a-zA-Z_][\w_]*)", "emit", None),
# Special characters are also valid as a symbol, but we won't match them
# eagerly so as to not swallow valid literals that follow.
Pattern("symbol", ("INITIAL",), r"([-+*\/=~\.><\[\]!:]+)", "emit",
None),
# Whitespace is ignored.
Pattern(None, ("INITIAL",), r"(\s+)", None, None),
)
# Ordered instances of Pattern.
patterns = None
# A deque with tokens that have been parsed, but haven't been skipped yet.
lookahead = None
# The input string being tokenized.
source = None
# List of states, as determined by rules in 'patterns'.
state_stack = None
# The length of 'source'.
limit = None
# The latest matched literal string.
string = None
def __init__(self, source, patterns=None):
self.source = source
self.state_stack = ["INITIAL"]
self.current_token = None
self._position = 0
self.limit = len(source)
self.lookahead = collections.deque()
self._param_idx = 0
self.patterns = patterns or self.DEFAULT_PATTERNS
# Make sure current_token starts containing something.
self.next_token()
# API for the parser:
@property
def position(self):
"""Returns the logical position (unaffected by lookahead)."""
if self.lookahead:
return self.lookahead[0].start
return self._position
def __iter__(self):
"""Look ahead from current position."""
if self.current_token is not None:
yield self.current_token
for token in self.lookahead:
yield token
while True:
token = self._parse_next_token()
if not token:
return
self.lookahead.append(token)
yield token
def peek(self, steps=1):
"""Look ahead, doesn't affect current_token and next_token."""
try:
tokens = iter(self)
for _ in six.moves.range(steps):
next(tokens)
return next(tokens)
except StopIteration:
return None
def skip(self, steps=1):
"""Skip ahead by 'steps' tokens."""
for _ in six.moves.range(steps):
self.next_token()
def next_token(self):
"""Returns the next logical token, advancing the tokenizer."""
if self.lookahead:
self.current_token = self.lookahead.popleft()
return self.current_token
self.current_token = self._parse_next_token()
return self.current_token
# Implementation:
def _pop_state(self, **_):
try:
self.state_stack.pop()
except IndexError:
self._error("Pop state called on an empty stack.", self.position)
def _parse_next_token(self):
"""Will parse patterns until it gets to the next token or EOF."""
while self._position < self.limit:
token = self._next_pattern()
if token:
return token
return None
def _next_pattern(self):
"""Parses the next pattern by matching each in turn."""
current_state = self.state_stack[-1]
position = self._position
for pattern in self.patterns:
if current_state not in pattern.states:
continue
m = pattern.regex.match(self.source, position)
if not m:
continue
position = m.end()
token = None
if pattern.next_state:
self.state_stack.append(pattern.next_state)
if pattern.action:
callback = getattr(self, pattern.action, None)
if callback is None:
raise RuntimeError(
"No method defined for pattern action %s!" %
pattern.action)
if "token" in m.groups():
value = m.group("token")
else:
value = m.group(0)
token = callback(string=value, match=m,
pattern=pattern)
self._position = position
return token
self._error("Don't know how to match next. Did you forget quotes?",
start=self._position, end=self._position + 1)
def _error(self, message, start, end=None):
"""Raise a nice error, with the token highlighted."""
raise errors.EfilterParseError(
source=self.source, start=start, end=end, message=message)
# Actions:
def emit(self, string, match, pattern, **_):
"""Emits a token using the current pattern match and pattern label."""
return grammar.Token(name=pattern.name, value=string,
start=match.start(), end=match.end())
def emit_param(self, match, pattern, **_):
param_name = match.group(1)
if not param_name or param_name == "?":
param_name = self._param_idx
self._param_idx += 1
elif param_name and re.match(r"^\d+$", param_name):
param_name = int(param_name)
return grammar.Token(name=pattern.name, value=param_name,
start=match.start(), end=match.end())
def emit_int(self, string, match, pattern, **_):
return grammar.Token(name=pattern.name, value=int(string),
start=match.start(), end=match.end())
def emit_oct(self, string, match, pattern, **_):
return grammar.Token(name=pattern.name, value=int(string, 8),
start=match.start(), end=match.end())
def emit_hex(self, string, match, pattern, **_):
return grammar.Token(name=pattern.name, value=int(string, 16),
start=match.start(), end=match.end())
def emit_float(self, string, match, pattern, **_):
return grammar.Token(name=pattern.name, value=float(string),
start=match.start(), end=match.end())
# String parsing:
def string_start(self, match, **_):
self.string = ""
self.string_position = match.start()
escaped_map = {
"r": "\r",
"n": "\n",
"t": "\t",
"b": "\b",
}
def string_escape(self, string, match, **_):
escaped_char = string[1]
self.string += self.escaped_map.get(escaped_char, escaped_char)
def string_append(self, string="", **_):
self.string += string
def string_end(self, pattern, match, **_):
self._pop_state()
return grammar.Token(name=pattern.name, value=self.string,
start=self.string_position, end=match.end()) | /rekall_efilter-1.6.0-py3-none-any.whl/efilter/parsers/common/tokenizer.py | 0.832032 | 0.596962 | tokenizer.py | pypi |
from builtins import next
from builtins import range
from builtins import object
__author__ = "Adam Sindelar <adamsh@google.com>"
import collections
import itertools
import six
class Token(collections.namedtuple("Token", "name value start end")):
"""Represents one token, which is what grammars operate on."""
def __new__(cls, name, value, start=None, end=None):
return super(Token, cls).__new__(cls, name, value, start, end)
def __repr__(self):
return "Token(name='%s', value='%s', start=%d, end=%d)" % (
self.name, self.value, self.start or 0, self.end or 0)
def __eq__(self, other):
"""Tokens compare on name and value, not on position."""
return (self.name, self.value) == (other.name, other.value)
def __hash__(self):
"""Tokens hash on name and value, not on position."""
return hash((self.name, self.value))
class Operator(collections.namedtuple(
"Operator",
"name precedence assoc handler docstring prefix infix suffix")):
"""Declares an operator in a grammar with functions to match it.
Operators can have prefix, infix and suffix parts, each of which is
represented by the token (like Token("keyword", "+", None)). Each operator
must have at least one of the *fixes. This class has no restriction on which
*fixes can be used together, but the individual parsers may not support
every combination. For example, DottySQL doesn't parse circumfix
(prefix + suffix) operators.
Previously, DottySQL used grammar functions for suffixes, which works well
when there is only a small number of them, but is very slow if there are
many operators. In practice, the grammar functions matching *fixes almost
always just call _keyword, which means they can be replaced with a lookup
in the operator table.
Arguments:
name: The literal name of the operator, such as "+" or "not".
precedence: Integer precedence with operators of the same arity.
handler: Callable that emits AST for this operator.
docstring: Documentation for the operator.
assoc: Associativity - can be left or right for infix operators.
suffix: (OPTIONAL) The token (not grammar function) of the suffix.
prefix: (OPTIONAL) The token (not grammar function) of the prefix.
infix: (OPTIONAL) The token (not grammar function) of the infix.
"""
class TokenLookupTable(object):
"""Ordered associative container where tokens are keys.
Public properties:
case_sensitive (default False): If set to False, all lookups will be
converted to lower case. NOTE: Does not affect insertion:
case-insensitive grammar should insert operators in lower case.
"""
_max_len = 1 # Longest match so far.
_table = None # Ordered dict keyed on tokens.
# This affects only lookups, not insertion.
case_sensitive = False
def __init__(self, *entries):
self._table = collections.OrderedDict()
for tokens, entry in entries:
self.set(tokens, entry)
def set(self, tokens, entry):
if isinstance(tokens, Token):
tokens = (tokens,)
elif isinstance(tokens, tuple):
self._max_len = max(self._max_len, len(tokens))
else:
raise TypeError(
"TokenLookupTable only supports instances of Token or "
"tuples thereof for keys. Got %r." % tokens)
if tokens in self._table:
raise ValueError("Duplicate token key %r for %r." % (
tokens, entry))
self._table[tokens] = entry
def _normalize_token(self, token):
if (isinstance(token.value, six.string_types)
and not self.case_sensitive):
return token._replace(value=token.value.lower())
return token
def match(self, tokens):
# Try to match longest known match first.
for match_len in range(self._max_len, 0, -1):
needle = tuple((self._normalize_token(t)
for t in itertools.islice(tokens, match_len)))
result = self._table.get(needle)
if result:
return result, needle
return None, None
class OperatorTable(object):
"""A complete set of operators in a grammar, keyed on their *fix tokens."""
prefix = None
infix = None
suffix = None
by_name = None
by_handler = None
def __init__(self, *operators):
self.prefix = TokenLookupTable()
self.infix = TokenLookupTable()
self.suffix = TokenLookupTable()
self.by_name = dict()
self.by_handler = dict()
for operator in operators:
if operator.name in self.by_name:
raise ValueError("Duplicit operator name %r." % operator.name)
self.by_name[operator.name] = operator
if operator.handler not in self.by_handler:
# Multiple operators can have the same handler, in which case
# they are probably aliases that mean the same thing. In that
# case the first operator "wins" and will likely be what
# the formatter for this syntax ends up using as default when
# it formats this AST.
self.by_handler[operator.handler] = operator
# An operator can have multiple components, but it is only indexed
# by the first one to prevent ambiguity.
if operator.prefix:
self.prefix.set(operator.prefix, operator)
elif operator.infix:
self.infix.set(operator.infix, operator)
elif operator.suffix:
self.suffix.set(operator.suffix, operator)
# Grammar primitives and helpers. (No grammar functions until the end of file.)
class TokenMatch(collections.namedtuple(
"TokenMatch", "operator value tokens")):
"""Represents a one or more matching tokens and, optionally, their contents.
Arguments:
operator: The Operator instance that matched, if any.
value: The literal value that matched, if any.
tokens: The actual tokens the match consumed.
"""
@property
def start(self):
return self.tokens[0].start
@property
def end(self):
return self.tokens[-1].end
@property
def first(self):
return self.tokens[0]
def keyword(tokens, expected):
"""Case-insensitive keyword match."""
try:
token = next(iter(tokens))
except StopIteration:
return
if token and token.name == "symbol" and token.value.lower() == expected:
return TokenMatch(None, token.value, (token,))
def multi_keyword(tokens, keyword_parts):
"""Match a case-insensitive keyword consisting of multiple tokens."""
tokens = iter(tokens)
matched_tokens = []
limit = len(keyword_parts)
for idx in six.moves.range(limit):
try:
token = next(tokens)
except StopIteration:
return
if (not token or token.name != "symbol" or
token.value.lower() != keyword_parts[idx]):
return
matched_tokens.append(token)
return TokenMatch(None, token.value, matched_tokens)
def keywords(tokens, expected):
"""Match against any of a set/dict of keywords.
Not that this doesn't support multi-part keywords. Any multi-part keywords
must be special-cased in their grammar function.
"""
try:
token = next(iter(tokens))
except StopIteration:
return
if token and token.name == "symbol" and token.value.lower() in expected:
return TokenMatch(None, token.value, (token,))
def prefix(tokens, operator_table):
"""Match a prefix of an operator."""
operator, matched_tokens = operator_table.prefix.match(tokens)
if operator:
return TokenMatch(operator, None, matched_tokens)
def infix(tokens, operator_table):
"""Match an infix of an operator."""
operator, matched_tokens = operator_table.infix.match(tokens)
if operator:
return TokenMatch(operator, None, matched_tokens)
def suffix(tokens, operator_table):
"""Match a suffix of an operator."""
operator, matched_tokens = operator_table.suffix.match(tokens)
if operator:
return TokenMatch(operator, None, matched_tokens)
def token_name(tokens, expected):
"""Match a token name (type)."""
try:
token = next(iter(tokens))
except StopIteration:
return
if token and token.name == expected:
return TokenMatch(None, token.value, (token,))
def match_tokens(expected_tokens):
"""Generate a grammar function that will match 'expected_tokens' only."""
if isinstance(expected_tokens, Token):
# Match a single token.
def _grammar_func(tokens):
try:
next_token = next(iter(tokens))
except StopIteration:
return
if next_token == expected_tokens:
return TokenMatch(None, next_token.value, (next_token,))
elif isinstance(expected_tokens, tuple):
# Match multiple tokens.
match_len = len(expected_tokens)
def _grammar_func(tokens):
upcoming = tuple(itertools.islice(tokens, match_len))
if upcoming == expected_tokens:
return TokenMatch(None, None, upcoming)
else:
raise TypeError(
"'expected_tokens' must be an instance of Token or a tuple "
"thereof. Got %r." % expected_tokens)
return _grammar_func
# Some common grammar functions:
def literal(tokens):
return token_name(tokens, "literal")
def symbol(tokens):
return token_name(tokens, "symbol")
def lparen(tokens):
return token_name(tokens, "lparen")
def rparen(tokens):
return token_name(tokens, "rparen")
def lbracket(tokens):
return token_name(tokens, "lbracket")
def rbracket(tokens):
return token_name(tokens, "rbracket")
def comma(tokens):
return token_name(tokens, "comma") | /rekall_efilter-1.6.0-py3-none-any.whl/efilter/parsers/common/grammar.py | 0.868924 | 0.333652 | grammar.py | pypi |
from builtins import object
__author__ = "Adam Sindelar <adamsh@google.com>"
from efilter import ast
from efilter import errors
from efilter.parsers.common import grammar
from efilter.parsers.common import token_stream
class ExpressionParser(object):
"""Precedence-climbing parser with support for *fix operators.
Precedence-climbing parsers refer to an operator precedence table which can
be modified at runtime. This implementation supports prefix, infix, suffix
and mixfix operators and can be used to support grammars that aren't known
ahead of time.
This parser also supports circumfix operators with repeated infix
separators, which allows for list builders and the like. For example:
# This builds a list:
Operator(prefix=Token("lbracket", "["),
infix=Token("comma", ","),
suffix=Token("rbracket", "]"),
handler=ast.Tuple)
# The above doesn't conflict with, for example, array subscription
# because mixfix and circumfix operators are non-ambiguous:
Operator(prefix=None,
infix=Token("lbracket", "["),
suffix=Token("rbracket", "]"),
handler=ast.Select)
Precedence-climbing is particularly suitable for atom/operator expressions,
but doesn't extend well to more complex grammars, such as SQL, function
application, C-like languages, etc. For those more complex use-cases, this
class can still be invoked for the subsections that are pure expression
syntax.
* Sometimes called postcirfumfix: infix + suffix part, like x[y].
"""
operators = None
@property
def original(self):
return self.tokens.tokenizer.source
def __init__(self, operators, tokenizer):
self.operators = grammar.OperatorTable(*operators)
self.tokens = token_stream.TokenStream(tokenizer)
def parse(self):
result = self.expression()
if self.tokens.peek(0):
token = self.tokens.peek(0)
raise errors.EfilterParseError(
message="Unexpected %s '%s' here." % (token.name, token.value),
query=self.original, token=token)
if result is None:
raise errors.EfilterParseError(
message="Query %r is empty." % self.original)
return result
def expression(self, previous_precedence=0):
lhs = self.atom()
return self.operator(lhs, previous_precedence)
def atom(self):
# Unary operator.
if self.tokens.accept(grammar.prefix, self.operators):
operator = self.tokens.matched.operator
start = self.tokens.matched.start
children = [self.expression(operator.precedence)]
# Allow infix to be repeated in circumfix operators.
if operator.infix:
while self.tokens.accept(grammar.match_tokens(operator.infix)):
children.append(self.expression())
# If we have a suffix expect it now.
if operator.suffix:
self.tokens.expect(grammar.match_tokens(operator.suffix))
return operator.handler(*children, start=start,
end=self.tokens.matched.end,
source=self.original)
if self.tokens.accept(grammar.literal):
return ast.Literal(self.tokens.matched.value, source=self.original,
start=self.tokens.matched.start,
end=self.tokens.matched.end)
if self.tokens.accept(grammar.symbol):
return ast.Var(self.tokens.matched.value, source=self.original,
start=self.tokens.matched.start,
end=self.tokens.matched.end)
if self.tokens.accept(grammar.lparen):
expr = self.expression()
self.tokens.expect(grammar.rparen)
return expr
if self.tokens.peek(0):
raise errors.EfilterParseError(
message="Was not expecting %r here." % self.tokens.peek(0).name,
token=self.tokens.peek(0))
else:
raise errors.EfilterParseError("Unexpected end of input.")
def _infix_of_min_precedence(self, tokens, precedence):
match = grammar.infix(tokens, self.operators)
if not match:
return
if match.operator.precedence < precedence:
return
return match
def operator(self, lhs, min_precedence):
while self.tokens.accept(self._infix_of_min_precedence, min_precedence):
operator = self.tokens.matched.operator
if operator.prefix:
raise ValueError("infix+prefix operators aren't supported.")
if operator.suffix:
rhs = self.expression()
self.tokens.expect(grammar.match_tokens(operator.suffix))
rhs.end = self.tokens.matched.end
else:
rhs = self.atom()
next_min_precedence = operator.precedence
if operator.assoc == "left":
next_min_precedence += 1
while self.tokens.match(grammar.infix, self.operators):
if (self.tokens.matched.operator.precedence
< next_min_precedence):
break
rhs = self.operator(rhs,
self.tokens.matched.operator.precedence)
if not rhs:
raise errors.EfilterParseError(
message="Expecting the operator RHS here.",
token=self.tokens.peek(0))
lhs = operator.handler(lhs, rhs, start=lhs.start, end=rhs.end,
source=self.original)
return lhs | /rekall_efilter-1.6.0-py3-none-any.whl/efilter/parsers/common/parser.py | 0.875295 | 0.35973 | parser.py | pypi |
from rekall_lib import utils
import collections
import yaml
class OrderedYamlDict(yaml.YAMLObject, collections.OrderedDict):
"""A class which produces an ordered dict."""
yaml_tag = "tag:yaml.org,2002:map"
@classmethod
def to_yaml(cls, dumper, data):
value = []
node = yaml.nodes.MappingNode(cls.yaml_tag, value)
for key, item in data.items():
node_key = dumper.represent_data(key)
node_value = dumper.represent_data(item)
value.append((node_key, node_value))
return node
@classmethod
def construct_mapping(cls, loader, node, deep=False):
"""Based on yaml.loader.BaseConstructor.construct_mapping."""
if not isinstance(node, yaml.MappingNode):
raise yaml.loader.ConstructorError(
None, None, "expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = OrderedYamlDict()
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.loader.ConstructorError(
"while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" % exc, key_node.start_mark)
value = loader.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
@classmethod
def from_yaml(cls, loader, node):
"""Parse the yaml file into an OrderedDict so we can preserve order."""
fields = cls.construct_mapping(loader, node, deep=True)
result = cls()
for k, v in list(fields.items()):
result[k] = v
return result
def decode(data):
return yaml.safe_load(utils.SmartUnicode(data)) or OrderedYamlDict()
def encode(raw_data):
return utils.SmartStr(yaml.safe_dump(raw_data, default_flow_style=False))
class PrettyPrinterDumper(yaml.SafeDumper):
"""A dumper which produces pretty printed YAML.
See:
http://stackoverflow.com/questions/6432605/any-yaml-libraries-in-python-that-support-dumping-of-long-strings-as-block-liter
"""
def unicode_representer(_, data):
has_wide_lines = False
for line in data.splitlines():
if len(line) > 80:
has_wide_lines = True
break
if has_wide_lines:
return yaml.ScalarNode(
u'tag:yaml.org,2002:str', data, style='>')
if "\n" in data:
return yaml.ScalarNode(
u'tag:yaml.org,2002:str', data, style='|')
return yaml.ScalarNode(
u'tag:yaml.org,2002:str', data, style='')
def represent_orderedyamldict(dumper, data):
value = []
for item_key, item_value in list(data.items()):
node_key = dumper.represent_data(item_key)
if type(item_value) not in [
str, str, list, dict, OrderedYamlDict, bool, int, int]:
raise AttributeError("Non representable yaml.")
node_value = dumper.represent_data(item_value)
value.append((node_key, node_value))
return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)
PrettyPrinterDumper.add_representer(
str, unicode_representer)
PrettyPrinterDumper.add_representer(
str, unicode_representer)
PrettyPrinterDumper.add_representer(
OrderedYamlDict, represent_orderedyamldict)
def safe_dump(data, **kwargs):
kwargs["default_flow_style"] = False
return yaml.dump_all(
[data], None, Dumper=PrettyPrinterDumper, **kwargs)
def ordered_load(stream, Loader=yaml.SafeLoader,
object_pairs_hook=collections.OrderedDict):
"""Load a yaml stream into OrderedDict.
This preserves the order of yaml files.
"""
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader) | /rekall_lib-1.7.2rc1.zip/rekall_lib-1.7.2rc1/rekall_lib/yaml_utils.py | 0.77907 | 0.350171 | yaml_utils.py | pypi |
# This file is produced when the main "version.py update" command is run. That
# command copies this file to all sub-packages which contain
# setup.py. Configuration is maintain in version.yaml at the project's top
# level.
def get_versions():
return tag_version_data(raw_versions(), """version.yaml""")
def raw_versions():
return json.loads("""
{
"codename": "Hurricane Ridge",
"post": "0",
"rc": "1",
"version": "1.7.2"
}
""")
import json
import os
import subprocess
try:
# We are looking for the git repo which contains this file.
MY_DIR = os.path.dirname(os.path.abspath(__file__))
except:
MY_DIR = None
def is_tree_dirty():
try:
return bool(subprocess.check_output(
["git", "diff", "--name-only"], stderr=subprocess.PIPE,
cwd=MY_DIR,
).splitlines())
except (OSError, subprocess.CalledProcessError):
return False
def get_version_file_path(version_file="version.yaml"):
try:
return os.path.join(subprocess.check_output(
["git", "rev-parse", "--show-toplevel"], stderr=subprocess.PIPE,
cwd=MY_DIR,
).decode("utf-8").strip(), version_file)
except (OSError, subprocess.CalledProcessError):
return None
def number_of_commit_since(version_file="version.yaml"):
"""Returns the number of commits since version.yaml was changed."""
try:
last_commit_to_touch_version_file = subprocess.check_output(
["git", "log", "--no-merges", "-n", "1", "--pretty=format:%H",
version_file], cwd=MY_DIR, stderr=subprocess.PIPE,
).strip()
all_commits = subprocess.check_output(
["git", "log", "--no-merges", "-n", "1000", "--pretty=format:%H"],
stderr=subprocess.PIPE, cwd=MY_DIR,
).splitlines()
return all_commits.index(last_commit_to_touch_version_file)
except (OSError, subprocess.CalledProcessError, ValueError):
return None
def get_current_git_hash():
try:
return subprocess.check_output(
["git", "log", "--no-merges", "-n", "1", "--pretty=format:%H"],
stderr=subprocess.PIPE, cwd=MY_DIR,
).strip()
except (OSError, subprocess.CalledProcessError):
return None
def tag_version_data(version_data, version_path="version.yaml"):
current_hash = get_current_git_hash()
# Not in a git repository.
if current_hash is None:
version_data["error"] = "Not in a git repository."
else:
version_data["revisionid"] = current_hash
version_data["dirty"] = is_tree_dirty()
version_data["dev"] = number_of_commit_since(
get_version_file_path(version_path))
# Format the version according to pep440:
pep440 = version_data["version"]
if int(version_data.get("post", 0)) > 0:
pep440 += ".post" + version_data["post"]
elif int(version_data.get("rc", 0)) > 0:
pep440 += ".rc" + version_data["rc"]
if version_data.get("dev", 0):
# A Development release comes _before_ the main release.
last = version_data["version"].rsplit(".", 1)
version_data["version"] = "%s.%s" % (last[0], int(last[1]) + 1)
pep440 = version_data["version"] + ".dev" + str(version_data["dev"])
version_data["pep440"] = pep440
return version_data | /rekall_lib-1.7.2rc1.zip/rekall_lib-1.7.2rc1/rekall_lib/_version.py | 0.459076 | 0.196749 | _version.py | pypi |
from rekall_lib import serializer
from rekall_lib import utils
from Crypto import Random
from Crypto.Hash import SHA256
from Crypto.Signature import PKCS1_v1_5
from Crypto.PublicKey import RSA
# Needed to make PyInstaller include these modules.
from Crypto.Cipher import ARC2
from Crypto.Cipher import DES
rng = Random.new().read
class CipherError(TypeError):
"""Denotes a crypto error."""
class RSAPublicKey(serializer.SerializedObject):
_value = None
def to_primitive(self, with_type=True):
if not self._value:
raise RuntimeError("Key not initialized yet.")
return utils.SmartUnicode(self._value.exportKey("PEM"))
@classmethod
def from_primitive(cls, pem_string, session=None):
result = cls(session)
try:
result._value = RSA.importKey(utils.SmartStr(pem_string))
except (TypeError, ValueError) as e:
raise CipherError("Public Key invalid: %s" % e)
return result
def from_raw_key(self, value):
self._value = value
return self
def verify(self, message, signature):
hash = SHA256.new(message)
signer = PKCS1_v1_5.new(self._value)
return signer.verify(hash, signature)
def client_id(self):
return "C.%s" % (SHA256.new(self._value.publickey().exportKey(
"PEM")).hexdigest()[:16])
def __str__(self):
return self.to_json()
def __repr__(self):
digest = SHA256.new(self.to_primitive()).hexdigest()
return "<%s (%s)>" % (self.__class__.__name__, digest)
def __bool__(self):
return bool(self._value)
class RSAPrivateKey(serializer.SerializedObject):
"""A type representing an private key."""
_value = ""
def generate_key(self):
self._value = RSA.generate(2048)
self._signal_modified()
return self
def to_primitive(self, with_type=True):
if not self._value:
raise RuntimeError("Key not initialized yet.")
return utils.SmartUnicode(self._value.exportKey("PEM"))
@classmethod
def from_primitive(cls, pem_string, session=None):
result = cls(session=session)
try:
result._value = RSA.importKey(utils.SmartUnicode(pem_string))
except (TypeError, ValueError) as e:
raise CipherError("Private Key invalid: %s" % e)
return result
def public_key(self):
return RSAPublicKey(session=self._session).from_raw_key(
self._value.publickey())
def sign(self, message):
hash = SHA256.new(message)
signer = PKCS1_v1_5.new(self._value)
return signer.sign(hash)
def __bool__(self):
return bool(self._value)
class HTTPAssertion(serializer.SerializedObject):
"""An assertion that will be signed with the HTTPSignature."""
schema = [
dict(name="timestamp", type="epoch"),
dict(name="url"),
]
class HTTPSignature(serializer.SerializedObject):
"""A message used to sign the data delivered in HTTPLocation.write_file().
The message is delivered in the x-rekall-signature header. Note that due to
limitations with the AppEngine environment we must use PyCrypto and this
does not support all the cryptography primitives.
"""
schema = [
dict(name="client_id",
doc="The client id this message came from."),
dict(name="public_key", type=RSAPublicKey,
doc="The public key in PEM format."),
dict(name="signature", type="bytes"),
dict(name="assertion"),
] | /rekall_lib-1.7.2rc1.zip/rekall_lib-1.7.2rc1/rekall_lib/crypto.py | 0.801237 | 0.206534 | crypto.py | pypi |
"""These are various utilities for rekall."""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import chr
from builtins import bytes
from builtins import zip
from builtins import range
from past.builtins import basestring
from builtins import object
import builtins
import pickle
import io
import importlib
import itertools
import json
import ntpath
import re
import shutil
import socket
import sys
import tempfile
import threading
import traceback
import time
import weakref
import six
import sortedcontainers
from past.builtins import basestring
from rekall_lib import registry
from future.utils import with_metaclass
if six.PY3:
long = int
def SmartStr(string, encoding="utf8"):
"""Forces the string to be an encoded byte string."""
if six.PY3:
if isinstance(string, str):
return string.encode(encoding, "ignore")
elif isinstance(string, bytes):
return string
elif hasattr(string, "__bytes__"):
return string.__bytes__()
# If there is no dedicated __bytes__ method, just encode the
# unicode string as utf8.
return bytes(SmartUnicode(string), "utf8")
if six.PY2:
if isinstance(string, str):
return string
elif hasattr(string, "__bytes__"):
return string.__bytes__()
# Encode the result of the __str__ method.
return str(string).encode(encoding)
def SmartUnicode(string, encoding="utf8"):
"""Forces the string into a unicode object."""
if six.PY3:
if isinstance(string, bytes):
return string.decode(encoding, "ignore")
# Call the object's __str__ method which should return an unicode
# object.
return str(string)
elif six.PY2:
if isinstance(string, str):
return string.decode(encoding)
return unicode(string)
def MaybeConsume(prefix, string):
"""Try to strip prefix from string."""
if string.startswith(prefix):
return string[len(prefix):]
return string
def Hexdump(data, width=16):
"""Hexdump function shared by various plugins """
for offset in range(0, len(data), width):
row_data = data[offset:offset + width]
translated_data = [
my_chr(x) if my_ord(x) < 127 and my_ord(x) > 32 else "."
for x in row_data]
hexdata = " ".join(["{0:02x}".format(my_ord(x)) for x in row_data])
yield offset, hexdata, translated_data
def WriteHexdump(renderer, data, base=0, width=16):
"""Write the hexdump to the fd."""
renderer.table_header([dict(name="offset", style="address"),
dict(name="hex", width=width * 3),
dict(name='data', width=width)])
for offset, hexdata, translated_data in Hexdump(data):
renderer.table_row(base + offset, hexdata, "".join(translated_data))
# This is a synchronize decorator.
def Synchronized(f):
"""Synchronization decorator."""
def NewFunction(self, *args, **kw):
if self.lock:
with self.lock:
return f(self, *args, **kw)
else:
return f(self, *args, **kw)
return NewFunction
class Node(object):
"""An entry to a linked list."""
next = None
prev = None
data = None
def __init__(self, data):
self.data = data
def __str__(self):
return "Node:" + SmartStr(self.data)
class LinkedList(object):
"""A simple doubly linked list used for fast caches."""
def __init__(self):
# We are the head node.
self.next = self.prev = self
self.size = 0
self.lock = threading.RLock()
def Append(self, data):
return self.AppendNode(Node(data))
def AppendNode(self, node):
self.size += 1
last_node = self.prev
last_node.next = node
node.prev = last_node
node.next = self
self.prev = node
return node
def PopLeft(self):
"""Returns the head node and removes it from the list."""
if self.next is self:
raise IndexError("Pop from empty list.")
first_node = self.next
self.Unlink(first_node)
return first_node.data
def Pop(self):
"""Returns the tail node and removes it from the list."""
if self.prev is self:
raise IndexError("Pop from empty list.")
last_node = self.tail
self.Unlink(last_node)
return last_node.data
def Unlink(self, node):
"""Removes a given node from the list."""
self.size -= 1
node.prev.next = node.next
node.next.prev = node.prev
node.next = node.prev = None
def __iter__(self):
p = self.next
while p is not self:
yield p.data
p = p.next
def __len__(self):
return self.size
def __str__(self):
p = self.next
s = []
while p is not self:
s.append(str(p.data))
p = p.next
return "[" + ", ".join(s) + "]"
class FastStore(object):
"""This is a cache which expires objects in oldest first manner.
This implementation first appeared in PyFlag and refined in GRR.
This class implements an LRU cache which needs fast updates of the LRU order
for random elements. This is implemented by using a dict for fast lookups
and a linked list for quick deletions / insertions.
"""
STORES = {}
def __init__(self, max_size=10, kill_cb=None, lock=False):
"""Constructor.
Args:
max_size: The maximum number of objects held in cache.
kill_cb: An optional function which will be called on each
object terminated from cache.
lock: If True this cache will be thread safe.
"""
self._age = LinkedList()
self._hash = {}
self._limit = max_size
self._kill_cb = kill_cb
self.lock = None
if lock:
self.lock = threading.RLock()
self.hits = self.misses = 0
self.creator = GetStack()
self.STORES[id(self)] = weakref.proxy(
self, lambda _, id=id(self), s=self.STORES: s.pop(id))
def __len__(self):
return len(self._hash)
@Synchronized
def Expire(self):
"""Expires old cache entries."""
while len(self._age) > self._limit:
x = self._age.PopLeft()
self.ExpireObject(x)
@Synchronized
def Put(self, key, item):
"""Add the object to the cache."""
hit = self._hash.get(key, self)
if hit is not self:
self._age.Unlink(hit[0])
node = self._age.Append(key)
self._hash[key] = (node, item)
self.Expire()
return key
@Synchronized
def ExpireObject(self, key):
"""Expire a specific object from cache."""
_, item = self._hash.pop(key, (None, None))
if self._kill_cb and item is not None:
self._kill_cb(item)
return item
@Synchronized
def ExpireRegEx(self, regex):
"""Expire all the objects with the key matching the regex."""
reg = re.compile(regex)
for key in list(self._hash.keys()):
if reg.match(key):
self.ExpireObject(key)
@Synchronized
def ExpirePrefix(self, prefix):
"""Expire all the objects with the key having a given prefix."""
for key in list(self._hash.keys()):
if key.startswith(prefix):
self.ExpireObject(key)
@Synchronized
def Get(self, key):
"""Fetch the object from cache.
Objects may be flushed from cache at any time. Callers must always
handle the possibility of KeyError raised here.
Args:
key: The key used to access the object.
Returns:
Cached object.
Raises:
KeyError: If the object is not present in the cache.
"""
hit = self._hash.get(key, self)
if hit is self:
self.misses += 1
raise KeyError(key)
# Remove the item and put to the end of the age list
node, item = hit
self._age.Unlink(node)
self._age.AppendNode(node)
self.hits += 1
return item
def __iter__(self):
return iter(list(self._hash.items()))
def keys(self):
return list(self._hash.keys())
@Synchronized
def __contains__(self, key):
result = key in self._hash
if result:
node, _ = self._hash[key]
self._age.Unlink(node)
self._age.AppendNode(node)
return result
@Synchronized
def __getitem__(self, key):
return self.Get(key)
@Synchronized
def Flush(self):
"""Flush all items from cache."""
while self._age:
x = self._age.PopLeft()
self.ExpireObject(x)
self._hash = {}
@Synchronized
def __getstate__(self):
"""When pickled the cache is fushed."""
if self._kill_cb:
raise RuntimeError("Unable to pickle a store with a kill callback.")
self.Flush()
return dict(max_size=self._limit)
def __setstate__(self, state):
self.__init__(max_size=state["max_size"])
class AgeBasedCache(FastStore):
"""A cache which removes objects after some time."""
def __init__(self, max_age=20, **kwargs):
super(AgeBasedCache, self).__init__(**kwargs)
self.max_age = max_age
def Put(self, key, item):
super(AgeBasedCache, self).Put(key, (item, time.time()))
def Get(self, key):
item, timestamp = super(AgeBasedCache, self).Get(key)
if timestamp + self.max_age > time.time():
return item
else:
self.ExpireObject(key)
raise KeyError("Item too old.")
def my_ord(char):
"""An ord() function which accepts both ints and chars.
In Python3 indexing a byte array returns ints already but in Py2
this returns string which need to have ord() called on them.
"""
if isinstance(char, int):
return char
return ord(char)
def my_chr(value):
if isinstance(value, str):
return value
return chr(value)
# Compensate for Windows python not supporting socket.inet_ntop and some
# Linux systems (i.e. OpenSuSE 11.2 w/ Python 2.6) not supporting IPv6.
def inet_ntop(address_family, packed_ip):
def inet_ntop4(packed_ip):
if not isinstance(packed_ip, basestring):
raise TypeError("must be string, not {0}".format(type(packed_ip)))
if len(packed_ip) != 4:
raise ValueError("invalid length of packed IP address string")
return "{0}.{1}.{2}.{3}".format(*[my_ord(x) for x in packed_ip])
def inet_ntop6(packed_ip):
if not isinstance(packed_ip, basestring):
raise TypeError("must be string, not {0}".format(type(packed_ip)))
if len(packed_ip) != 16:
raise ValueError("invalid length of packed IP address string")
words = []
for i in range(0, 16, 2):
words.append((my_ord(packed_ip[i]) << 8) |
my_ord(packed_ip[i + 1]))
# Replace a run of 0x00s with None
numlen = [(k, len(list(g))) for k, g in itertools.groupby(words)]
max_zero_run = sorted(sorted(
numlen, key=lambda x: x[1], reverse=True),
key=lambda x: x[0])[0]
words = []
for k, l in numlen:
if (k == 0) and (l == max_zero_run[1]) and not None in words:
words.append(None)
else:
for i in range(l):
words.append(k)
# Handle encapsulated IPv4 addresses
encapsulated = ""
if (words[0] is None) and (len(words) == 3 or (
len(words) == 4 and words[1] == 0xffff)):
words = words[:-2]
encapsulated = inet_ntop4(packed_ip[-4:])
# If we start or end with None, then add an additional :
if words[0] is None:
words = [None] + words
if words[-1] is None:
words += [None]
# Join up everything we've got using :s
return (":".join(
["{0:x}".format(w) if w is not None else "" for w in words]) +
encapsulated)
if address_family == socket.AF_INET:
return inet_ntop4(packed_ip)
elif address_family == socket.AF_INET6:
return inet_ntop6(packed_ip)
raise socket.error("[Errno 97] Address family not supported by protocol")
def ConditionalImport(name):
try:
return importlib.import_module(name)
except ImportError:
pass
# This is only available on unix systems.
fcntl = ConditionalImport("fcntl")
class FileLock(object):
"""A self releasing file lock."""
def __init__(self, fd):
self.fd = fd
def __enter__(self):
if fcntl:
fcntl.flock(self.fd.fileno(), fcntl.LOCK_EX)
return self.fd
def __exit__(self, exc_type, exc_value, traceback):
if fcntl:
fcntl.flock(self.fd.fileno(), fcntl.LOCK_UN)
class TempDirectory(object):
"""A self cleaning temporary directory."""
def __enter__(self):
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.name, True)
class AttributedString(with_metaclass(registry.UniqueObjectIdMetaclass, object)):
"""This is just a container for a string and some metadata."""
highlights = None
def __init__(self, value, highlights=None, **options):
self.highlights = highlights
self.value = value
self.options = options
def __unicode__(self):
return str(self.value)
def __str__(self):
return str(self.value)
def __repr__(self):
return "%s(%s, highlights=%s)" % (self.__class__.__name__,
repr(self.value),
repr(self.highlights))
class HexDumpedString(AttributedString):
"""A string which should be hex dumped."""
class HexInteger(int):
"""An int which should be rendered as a hex digit."""
def __hex__(self):
return super(HexInteger, self).__hex__().rstrip("L")
class FormattedAddress(object):
"""A container for an address that should be formatted.
Addresses are usually formatted with the address resolver, but this can be
expensive as the address resolver needs to work out what modules exist in
the address space and build profiles for them. Although this is necessary
for actually formatting the address, sometimes plugins want to return a
formatted address, but this might be filtered away - in which case the
formatting effort is wasted.
This container object encapsulates the resolver and the address and then
uses an ObjectRenderer to do the actual formatting at rendering time. If the
address is discarded, no formatting is done and we save some cycles.
"""
def __init__(self, resolver, address, max_distance=1e6, max_count=100,
hex_if_unknown=True):
self.resolver = resolver
self.address = address
self.max_distance = max_distance
self.max_count = max_count
self.hex_if_unknown = hex_if_unknown
def __str__(self):
names = self.resolver.format_address(
self.address, max_distance=self.max_distance)[:self.max_count]
if names:
return ", ".join(names)
elif self.hex_if_unknown:
return "%#x" % self.address
else:
return ""
class SlottedObject(object):
"""A general purpose PODO."""
# Declare this object's fields here.
__slots__ = ()
def __init__(self):
for k in self.__slots__:
setattr(self, k, None)
def keys(self):
return [x for x in dir(self) if not x.startswith("_")]
class AttributeDict(with_metaclass(registry.UniqueObjectIdMetaclass, dict)):
"""A dict that can be accessed via attributes.
This object is very slow due to use of __setstate__. Please consider using
SlottedObject instead.
"""
dirty = False
_object_id = None
def __setattr__(self, attr, value):
try:
# Check that the object itself has this attribute.
object.__getattribute__(self, attr)
return object.__setattr__(self, attr, value)
except AttributeError:
self.Set(attr, value)
def Get(self, item, default=None):
return self.get(item, default)
def Set(self, attr, value):
self.dirty = True
# Setting a key to None means to remove it from the cache. NOTE! This
# must be exactly None not a NoneObject() since it should be possible to
# cache a NoneObject() as the value of some key.
if value is None:
self.pop(attr, None)
else:
self[attr] = value
def __getattr__(self, attr):
# Do not allow private attributes to be set.
if attr.startswith("_"):
raise AttributeError(attr)
return self.get(attr)
def __dir__(self):
return sorted(self)
def FormatIPAddress(family, value):
"""Formats a value as an ascii IP address determined by family."""
if value == None:
return value
return socket.inet_ntop(
getattr(socket, str(family)),
value.obj_vm.read(value.obj_offset, value.obj_size))
def ntoh(value):
size = value.obj_size
if size == 2:
return socket.ntohs(value.v())
elif size == 4:
return socket.ntohl(value.v())
from rekall import obj
return obj.NoneObject("Not a valid integer")
def Invert(dictionary):
"""Inverts keys and values in dictionary.
Assume the keys and values are unique.
"""
return {str(v):k for k, v in list(dictionary.items())}
def PPrint(data, depth=0):
"""A pretty printer for a profile.
This only supports dict, list, unicode strings or non-unicode
strings which can be encoded in utf8.
This produces both a valid json and a valid python file.
"""
result = []
if type(data) is bool:
return str(data).lower()
if isinstance(data, dict):
# Empty dicts emitted on one line.
if not data:
return u"{}"
result.append(u"{")
tmp = []
for key in sorted(data):
value = data[key]
# Only emit non-empty dicts.
if value != {}:
tmp.append(
u" %s%s: %s" % (
u" " * depth,
json.dumps(SmartUnicode(key)),
PPrint(data[key], depth + 1).strip()))
result.append(u", \n".join(tmp)[depth:])
result.append(u"}")
return u"\n".join([(" " * depth + x) for x in result])
if isinstance(data, (list, tuple)):
for item in data:
pp_item = PPrint(item, depth)
result.append(pp_item.strip())
res = u"[" + u", ".join(result) + u"]"
return res
# These types should not be written with quotes.
if isinstance(data, (int, long, float)):
return SmartUnicode(data)
# JSON encodes None as null.
elif data is None:
return u"null"
return "\"%s\"" % SmartUnicode(data)
DEFINE_REGEX = re.compile(r"#define\s+([A-Z0-9_]+)\s+((0x)?[0-9A-Z]+)")
def MaskMapFromDefines(text):
"""Generates a maskmap dict from a list of #defines.
This function allows us to copy the relevant #define sections from header
files without needing to manually edit them. We get to keep the comments etc
for readability.
"""
result = {}
for line in text.splitlines():
m = DEFINE_REGEX.search(line)
if m:
name = m.group(1)
value = m.group(2)
if m.group(3):
value = int(value, 16)
elif value.startswith("0"):
value = int(value, 8)
else:
value = int(value)
result[name] = value
return result
def EnumerationFromDefines(text):
"""Generate an Enumeration from a list of #defines."""
result = {}
for line in text.splitlines():
m = DEFINE_REGEX.search(line)
if m:
name = m.group(1)
value = m.group(2)
if m.group(3):
value = int(value, 16)
else:
value = int(value)
result[str(value)] = name
return result
class SortedCollection(sortedcontainers.SortedDict):
def __init__(self, *args, **kwargs):
self.key_func = kwargs.pop("key", lambda x: x[0])
super(SortedCollection, self).__init__(*args, **kwargs)
def insert(self, item):
key = self.key_func(item)
self[key] = item
def __iter__(self):
return iter(list(self.values()))
def get_value_smaller_than(self, k):
for x in self.irange(maximum=k, reverse=True):
return x, self[x]
return None, None
def get_value_larger_than(self, k):
for x in self.irange(k):
return x, self[x]
return None, None
def find_le(self, k):
for x in self.irange(0, k, reverse=True):
return self[x]
raise ValueError('No item found with key below: %r' % (k,))
def find_ge(self, k):
for x in self.irange(k):
return self[x]
raise ValueError('No item found with key at or above: %r' % (k,))
class RangedCollection(object):
"""A convenience wrapper around SortedCollection for ranges."""
def __init__(self):
self.collection = SortedCollection()
def insert(self, start, end, data):
start = int(start)
end = int(end)
self.collection[(start, end)] = data
def get_next_range_start(self, address):
"""Gets the start address of the next range larger than address."""
range, _ = self.collection.get_value_larger_than((address, None))
if range is not None:
return range[0]
def get_containing_range(self, address):
"""Retrieve the data associated with the range that contains value.
Retuns:
A tuple of start, end, data for the range that contains address.
"""
tmp, data = self.collection.get_value_smaller_than((address + 1, address + 1))
if tmp is not None:
start, end = tmp
if start <= address < end:
return start, end, data
return None, None, None
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.collection == other.collection
return NotImplemented
def clear(self):
self.collection.clear()
def __getitem__(self, item):
key = self.collection.iloc[item]
return key[0], key[1], self.collection[key]
def __iter__(self):
for (start, end), data in list(self.collection.items()):
yield start, end, data
def __reversed__(self):
for key in reversed(self.collection):
start, end = key
yield start, end, self.collection[key]
def __str__(self):
result = []
for start, end, data in self:
result.append("<%#x, %#x> %s" % (start, end, data))
return "\n".join(result)
class JITIteratorCallable(object):
def __init__(self, func, *args):
if not callable(func):
raise RuntimeError("Function must be callable")
self.func = func
self.args = args
def __iter__(self):
for x in self.func(*self.args):
yield x
def __contains__(self, item):
return item in list(self)
def __str__(self):
return str(list(self))
def __call__(self):
return list(self)
class JITIterator(JITIteratorCallable):
def __init__(self, baseclass):
super(JITIterator, self).__init__(
lambda: (x.name for x in list(baseclass.classes.values()) if x.name))
def CopyFDs(in_fd, out_fd, length=2**64):
"""Copy from one fd to another.
If length is specified, we stop when we copied this many bytes. We always
stop when in_fd reaches EOF.
"""
while length > 0:
data = in_fd.read(min(10000000, length))
if not data:
return
out_fd.write(data)
length -= len(data)
def CopyAStoFD(in_as, out_fd, start=0, length=2**64,
cb=lambda off, length: None):
"""Copy an address space into a file-like object."""
blocksize = 1024 * 1024
for run in in_as.get_address_ranges(start=start, end=start+length):
for offset in range(run.start, run.end, blocksize):
to_read = min(blocksize, run.end - offset, length)
if to_read == 0:
break
data = in_as.read(offset, to_read)
out_fd.seek(offset)
out_fd.write(data)
length -= len(data)
cb(offset, len(data))
def issubclass(obj, cls): # pylint: disable=redefined-builtin
"""A sane implementation of issubclass.
See http://bugs.python.org/issue10569
Python bare issubclass must be protected by an isinstance test first since
it can only work on types and raises when provided something which is not a
type.
Args:
obj: Any object or class.
cls: The class to check against.
Returns:
True if obj is a subclass of cls and False otherwise.
"""
return isinstance(obj, type) and builtins.issubclass(obj, cls)
def XOR(string1, string2):
"""Returns string1 xor string2."""
xored = [my_ord(x) ^ my_ord(y)
for x, y in zip(string1, string2)]
if six.PY3:
return bytes(xored)
return b"".join(chr(x) for x in xored)
def xrange(start, end, step=1):
"""In Python2 the xrange builtin is broken.
It raises when start or end do not fit in an int. Since python does not
generally care about this we need to implement a clean version of this
builtin.
"""
x = start
while x < end:
yield x
x += step
def SafePickle(data):
"""An unpickler for serialized tuple/lists/strings etc.
Does not support recovering instances.
"""
return pickle.dumps(data, -1)
def SafeUnpickler(data):
"""An unpickler for serialized tuple/lists/strings etc.
Does not support recovering instances.
"""
unpickler = pickle.Unpickler(io.StringIO(data))
unpickler.find_global = None
return unpickler.load()
def SplitPath(path):
"""Splits the path into a list of components."""
result = []
while 1:
path, filename = ntpath.split(path)
if filename:
result.append(filename)
else:
if path and path not in ("\\", "/"):
result.append(path)
break
result.reverse()
return result
def CaseInsensitiveDictLookup(key, dictionary):
"""Lookup the dictionary with a case insensitive key."""
# First try as is.
result = dictionary.get(key)
if result is None:
for k, v in list(dictionary.items()):
if k.lower() == key.lower():
return v
return result
def TimeIt(f):
def NewFunction(self, *args, **kw):
try:
now = time.time()
return f(self, *args, **kw)
finally:
print("Took %s sec" % (time.time() - now))
return NewFunction
def GetStack():
"""Returns the current call stack as a string."""
return "".join(traceback.format_stack())
def InternObject(obj):
"""Copies and interns strings in a recursive object."""
obj_cls = obj.__class__
if obj_cls is str:
return sys.intern(obj)
if obj_cls is str:
return sys.intern(str(obj))
if obj_cls is dict:
result = {}
for k, v in list(obj.items()):
k = InternObject(k)
v = InternObject(v)
result[k] = v
return result
if obj_cls is list:
return [InternObject(x) for x in obj]
return obj
class safe_property(property):
"""Re-Raises AttributeError in properties.
In Python @property swallows AttributeError and calls __getattr__. This is
rarely what you want because sometime an AttributeError is erronously raised
from legitimately broken property code and just swallowing it automatically
can cause weird error messages (e.g. Attribute foobar does not exist, if
foobar is a property) or even worse, it calls __getattr__ which does
something completely different.
"""
def __get__(self, *args, **kwargs):
try:
return super(safe_property, self).__get__(*args, **kwargs)
except AttributeError as e:
message = "AttributeError raised: %s" % e
# Retain the original backtrace but re-raise a RuntimeError to
# prevent the property from calling __getattr__.
raise RuntimeError(message, sys.exc_info()[2])
def EscapeForFilesystem(filename):
"""Creates a filesystem suitable name.
Very conservative.
"""
s = SmartUnicode(filename).strip().replace(" ", "_")
return re.sub(r"(?u)[^-\w.]", "", s)
def get_all_subclasses(base=None):
for x in base.__subclasses__():
yield x.__name__
for y in get_all_subclasses(x):
yield y
def join_path(*args):
result = "/".join(args)
result = re.sub("/+", "/", result)
return result.strip("/")
def normpath(path):
return "/" + join_path(*path.split("/"))
def intern_str(string):
if six.PY3:
return sys.intern(str(string))
return intern(str(string))
def encode_string(string):
"""Encode a string so that non printables are escaped.
Returns unicode string.
e.g. b"hello\x88xdr" -> "hello\x88xdr"
"""
byte_string = SmartStr(string)
result = []
for c in byte_string:
c = my_ord(c)
if c > 128 or c < 32:
result.extend("\\x%02x" % c)
else:
result.append(chr(c))
return "".join(result)
class Deduplicate(object):
"""Deduplicate an iterator.
Key values which have previously been seen are omitted. The key is
derived from iterator items by applying a callable on each
iterated item.
"""
def __init__(self, iterator, key=lambda x: x):
self._key = key
self._seen = set()
self._iterator = iterator
def __iter__(self):
for item in self._iterator:
key = self._key(item)
if key not in self._seen:
self._seen.add(key)
yield item | /rekall_lib-1.7.2rc1.zip/rekall_lib-1.7.2rc1/rekall_lib/utils.py | 0.752649 | 0.168857 | utils.py | pypi |
from builtins import str
from past.builtins import basestring
from builtins import object
import collections
import json
import yaml
import arrow
import base64
from rekall_lib import registry
from rekall_lib import utils
from future.utils import with_metaclass
import six
if six.PY3:
unicode = str
def StripImpl(name):
if name.endswith("Impl"):
return name[:-4]
return name
class Session(object):
"""A session keeps serialization state."""
_unstrict_serialization = False
class FieldDescriptor(object):
"""A descriptor for a field."""
def __init__(self, descriptor):
self.descriptor = descriptor
def validate(self, value, session=None):
_ = session
return value
def to_primitive(self, value, with_type=True):
return value
def from_primitive(self, value, session=None):
_ = session
return value
def get_default(self, session=None):
_ = session
return self.descriptor.get("default")
class IntDescriptor(FieldDescriptor):
def validate(self, value, session=None):
_ = session
return int(value)
def get_default(self, session=None):
return self.descriptor.get("default", 0)
class BoolDescriptor(FieldDescriptor):
def validate(self, value, session=None):
_ = session
if isinstance(value, bool):
return value
return bool(value)
def get_default(self, session=None):
return self.descriptor.get("default", False)
class FloatDescriptor(FieldDescriptor):
def validate(self, value, session=None):
_ = session
return float(value)
def get_default(self, session=None):
return self.descriptor.get("default", 0.0)
class EpochDescriptor(FieldDescriptor):
def validate(self, value, session=None):
if isinstance(value, (float, int)):
value = arrow.Arrow.fromtimestamp(value)
elif not isinstance(value, arrow.Arrow):
raise ValueError("Value must be timestamp or arrow.Arrow instance.")
return value
def to_primitive(self, value, with_type=True):
return value.float_timestamp
def from_primitive(self, value, session=None):
_ = session
return self.validate(value)
class DictDescriptor(FieldDescriptor):
def validate(self, value, session=None):
_ = session
if not isinstance(value, dict):
raise ValueError("Value must be a dict")
return value
def get_default(self, session=None):
return {}
class UnicodeDescriptor(FieldDescriptor):
def validate(self, value, session=None):
_ = session
if not isinstance(value, basestring):
raise ValueError("Value must be unicode string")
return str(value)
def get_default(self, session=None):
return str(self.descriptor.get("default", ""))
class StringDescriptor(FieldDescriptor):
"""Stores raw bytes."""
def validate(self, value, session=None):
_ = session
if not isinstance(value, basestring):
raise ValueError("Value must be string")
return utils.SmartStr(value)
def to_primitive(self, value, with_type=True):
return utils.SmartUnicode(base64.b64encode(value))
def from_primitive(self, value, session=None):
return base64.b64decode(utils.SmartStr(value))
def get_default(self, session=None):
return str(self.descriptor.get("default", ""))
class ChoicesDescriptor(FieldDescriptor):
def validate(self, value, session=None):
_ = session
choices = self.descriptor["choices"]
if callable(choices):
choices = choices()
if value not in choices:
raise ValueError("Value must be one of %s" % choices)
return str(value)
class NestedDescriptor(FieldDescriptor):
"""A nested field type."""
# The SerializedObject class for the nested object.
nested = None
def validate(self, value, session=None):
# Check that the assigned value is a subclass of the nested class.
nested_cls = SerializedObject.ImplementationByClass(self.nested)
# Direct assignment of the correct type.
if value.__class__ is nested_cls:
return value
# Assign a dict to this object, parse from primitive.
elif isinstance(value, (dict, basestring, int, int, float)):
return nested_cls.from_primitive(value, session=session)
# A subclass is assigned.
elif issubclass(value.__class__, nested_cls):
return value
raise ValueError("value is not valid.")
def to_primitive(self, value, with_type=True):
result = value.to_primitive(with_type=with_type)
# If we are actually containing a subclass of the nested class then make
# sure to mark the data with the full class name so it can be properly
# unserialized.
if value.__class__.__name__ != self.nested:
result["__type__"] = StripImpl(value.__class__.__name__)
return result
def from_primitive(self, value, session=None):
if isinstance(value, SerializedObject):
return value
if isinstance(value, dict):
# Support instantiating a derived class from the raw data.
value_cls_name = value.get("__type__", self.nested)
# Allow specialized implementations for serializable types.
value_cls = SerializedObject.get_implemetation(value_cls_name)
if value_cls is None:
raise TypeError(
"Unknown implementation for %s" % value_cls_name)
# Validate that the value is an instance of the nested class.
nested_cls = SerializedObject.ImplementationByClass(self.nested)
if not issubclass(value_cls, nested_cls):
raise TypeError(
"Object %s can not be initialized from type %s" %
(self.nested, value_cls_name))
value = value.copy()
value.pop("__type__", None)
return value_cls.from_primitive(value, session=session)
nested_cls = SerializedObject.get_implemetation(self.nested)
return nested_cls.from_primitive(value, session=session)
def get_default(self, session=None):
return SerializedObject.get_implemetation(self.nested)(
session=session)
class RepeatedHelper(list):
def __init__(self, descriptor, initializer=None, session=None):
super(RepeatedHelper, self).__init__(initializer or [])
self.descriptor = descriptor
self._hooks = []
if not session:
session = Session()
self._session = session
def add_update_cb(self, cb):
self._hooks.append(cb)
def to_primitive(self, with_type=True):
result = []
for x in self:
result.append(x.to_primitive(with_type=with_type))
return result
def _signal_modified(self):
"""Signal all interested parties that this object is modified."""
if self._hooks:
for hook in self._hooks:
hook()
self._hook = []
def append(self, item):
item = self.descriptor.validate(item, session=self._session)
super(RepeatedHelper, self).append(item)
self._signal_modified()
def extend(self, items):
items = [self.descriptor.validate(x, session=self._session)
for x in items]
super(RepeatedHelper, self).extend(items)
self._signal_modified()
class RepeatedDescriptor(FieldDescriptor):
"""Described repeated fields."""
def __init__(self, descriptor):
super(RepeatedDescriptor, self).__init__(descriptor)
field_type = descriptor.get("type", "unicode")
field_name = descriptor["name"]
# If the type is a class then check the name in the dispatcher.
if isinstance(field_type, type):
field_type = DISPATCHER.get(field_type.__name__)
else:
field_type = DISPATCHER.get(field_type)
if field_type is None:
raise TypeError("Unknown type for field %s" % field_name)
self.descriptor_obj = field_type(self.descriptor)
def validate(self, value, session=None):
return RepeatedHelper(
self.descriptor_obj,
[self.descriptor_obj.validate(x, session=session)
for x in value],
session=session)
def to_primitive(self, value, with_type=True):
return [self.descriptor_obj.to_primitive(
x, with_type=with_type) for x in value]
def from_primitive(self, value, session=None):
if not isinstance(value, (list, tuple)):
raise TypeError(
"Nested Field %s can only be initialized from lists" %
(self.descriptor["name"]))
return RepeatedHelper(
self.descriptor_obj,
[self.descriptor_obj.from_primitive(x, session=session)
for x in value],
session=session)
def get_default(self, session=None):
if "default" in self.descriptor:
return self.descriptor["default"][:]
return RepeatedHelper(self.descriptor_obj, session=session)
# This dispatches the class implementing as declared type. The
# dispatcher maps the declared field type to the descriptor which
# handles it.
DISPATCHER = dict(
int=IntDescriptor,
choices=ChoicesDescriptor,
epoch=EpochDescriptor,
dict=DictDescriptor,
bool=BoolDescriptor,
float=FloatDescriptor,
unicode=UnicodeDescriptor,
str=StringDescriptor,
bytes=StringDescriptor,
)
class SerializedObjectCompiler(registry.MetaclassRegistry):
"""Compile the SerializedObject class after it is defined.
The user specifies the schema when they declare the class. We then create
field descriptors for all declared fields and automatically insert accessors
for all fields.
"""
def __new__(mcs, cls_name, parents, dct):
"""We parse the schema and create accessors for fields."""
# Parse the schema and add properties for all fields.
descriptors = collections.OrderedDict()
for parent in parents:
descriptors.update(getattr(parent, "_descriptors", {}))
for field in dct.get("schema", []):
field_name = field["name"]
field_type = field.get("type", "unicode")
repeated = field.get("repeated")
if isinstance(field_type, basestring):
field_type = DISPATCHER.get(field_type)
elif issubclass(field_type, SerializedObject):
field_type = DISPATCHER.get(field_type.__name__)
if field_type is None:
raise TypeError("Unknown field %s" % field)
if not issubclass(field_type, FieldDescriptor):
raise TypeError("Unsupported field type %s" % field)
if repeated:
descriptors[field_name] = RepeatedDescriptor(field)
else:
descriptors[field_name] = field_type(field)
getter = lambda self, n=field_name: self.GetMember(n)
setter = lambda self, v, n=field_name: self.SetMember(n, v)
dct[field_name] = utils.safe_property(
getter, setter, None, field_name)
# The descriptors for all the fields.
dct["_descriptors"] = descriptors
# Add a new descriptor type for fields that declare this type.
DISPATCHER[cls_name] = type("%sDescriptor" % cls_name,
(NestedDescriptor, ),
dict(nested=cls_name))
return super(SerializedObjectCompiler, mcs).__new__(
mcs, cls_name, parents, dct)
class SerializedObject(with_metaclass(SerializedObjectCompiler, object)):
"""An object with a fixed schema which can be easily serialized."""
# This contains the object's schema.
schema = [
]
def __init__(self, session=None):
if session is None:
session = Session()
self._data = {}
self._session = session
self._hooks = []
self._unknowns = {}
@classmethod
def get_implemetation(cls, name):
"""Gets a class implementing the name specified.
In order to implement the Pimpl pattern we allow implementations to
define classes implementing an simple serializable type.
For example, if a serializable type is:
class Foo(SerializedObject):...
Then we may implement this object in another file like:
class FooImpl(Foo): ...
Unserializing will then choose the implementation over the base type
when creating it from the Raw JSON. For example, the following JSON
object will actually contain an instance if FooImpl:
{"__type__": "Foo"}
We need PIMPL in order to separate the definition of the
SerializedObject which may need to be used in code which is not capable
of running any of the methods offered by the baseclasses (but may still
need to create and serialize such objects).
The receiver of the serialized JSON object will then instantiate the
object with concrete implementations.
For example: in common code between client and server:
class Foo(SerializedObject):
schema = [
...
]
def some_method(self):
raise NotImplementedError()
Clients can import this code and not have to have concrete
implementations for the methods. In Client code:
x = Foo.from_keywords(foo=1, bar2=2)
send x.to_primitive() ->
{"__type__": "Foo", "foo": 1, "bar": 2}
Then the server will define the actual implementation:
class FooImpl(Foo):
def some_method(self):
..... <- real implementation
and can then simply parse it and receive the implementation:
x = serializer.unserialize(json_dict)
x.some_method() <-- Run the FooImpl.some_method()
For now we keep it really simple: An implementation class name must have
the suffix "Impl" which implements the base SerializedObject and must
also inherit from it.
"""
# Fallback to the base type if not available.
base_cls = cls.ImplementationByClass(name)
# Match an implementation if that is available.
result = cls.ImplementationByClass(name + "Impl")
if result is not None:
if not issubclass(result, base_cls):
raise AttributeError(
"Class Implementation %s must inherit from %s" % (
result, name))
return result
else:
return base_cls
@classmethod
def from_keywords(cls, session=None, **kwargs):
if session is None:
session = Session()
result = cls(session=session)
for k, v in kwargs.items():
result.SetMember(k, v)
return result
def copy(self):
"""Make a copy of this message."""
return self.__class__.from_primitive(
session=self._session, data=self.to_primitive())
def add_update_cb(self, cb):
self._hooks.append(cb)
def _signal_modified(self):
"""Signal all interested parties that this object is modified."""
if self._hooks:
for hook in self._hooks:
hook()
self._hook = []
@classmethod
def get_descriptors(cls):
return [x.descriptor for x in cls._descriptors.values()]
def HasMember(self, name):
return name in self._data
def GetMultiName(self, names):
item = self
for name in names:
item = item.GetMember(name)
if item is None:
break
return item
def GetMember(self, name, get_default=True):
if "." in name:
return self.GetMultiName(name.split("."))
result = self._data.get(name)
if result is None and get_default:
default = self._descriptors[name].get_default(
session=self._session)
if isinstance(default, (SerializedObject, RepeatedHelper)):
default.add_update_cb(
lambda n=name, d=default: self.SetMember(n, d))
elif isinstance(default, dict):
self.SetMember(name, default)
return default
return result
def SetMember(self, name, value):
self._signal_modified()
# Setting to None deletes the field.
if value is None:
self._data.pop(name, None)
return
try:
value = self._descriptors[name].validate(
value, session=self._session)
except ValueError as e:
# When decoding old data, we do not want to raise an error if the
# field is invalid. This can happen if the field definition has
# since changed In that case we would rather set the field to None
# than to have invalid data in that field.
# When used normally, this code should raise because it is called
# during member assignments.
if not self._session._unstrict_serialization:
raise ValueError("While validating %s.%s: %s" % (
self.__class__.__name__, name, e))
except KeyError:
raise ValueError("Unable to set member %s in %s: No such field." %
(name, self.__class__.__name__))
value = None
self._data[name] = value
def set_unknown(self, k, v):
self._unknowns[k] = v
def iteritems(self):
for key in self._descriptors:
value = self.GetMember(key, get_default=False)
if value is not None:
yield key, value
def update(self, _other=None, **kwargs):
if _other:
kwargs.update(_other)
for k, v in kwargs.items():
self.SetMember(k, v)
def merge(self, other):
"""Merge the other object into this one."""
for k, v in other.items():
if isinstance(v, SerializedObject):
self.GetMember(k).merge(v)
else:
self.SetMember(k, v)
return self
def to_primitive(self, with_type=True):
"""Convert ourselves to a dict."""
result = self._unknowns.copy()
for k, v in self.iteritems():
result[k] = self._descriptors[k].to_primitive(
v, with_type=with_type)
if with_type:
result["__type__"] = StripImpl(self.__class__.__name__)
return result
def to_json(self):
return json.dumps(self.to_primitive(), sort_keys=True)
@classmethod
def from_json(cls, json_string, session=None, strict_parsing=False):
data = json.loads(utils.SmartUnicode(json_string) or "{}")
return unserialize(data, session=session,
strict_parsing=strict_parsing, type=cls)
@classmethod
def from_primitive(cls, data, session=None):
"""Load ourselves from a pure dict."""
if not data:
data = {}
if isinstance(data, SerializedObject):
return data
if not isinstance(data, dict):
raise ValueError("Must be initialized from dict")
cls_type = data.get("__type__", cls.__name__)
data_cls = cls.get_implemetation(cls_type)
if data_cls is None or not issubclass(data_cls, cls):
raise ValueError(
"Incompatible class types: %s != %s (Should be inherited)" % (
cls_type, cls.__name__))
result = data_cls(session=session)
for k, v in data.items():
if k == "__type__":
continue
descriptor = data_cls._descriptors.get(k)
if descriptor is None:
if session and not session._unstrict_serialization:
raise ValueError("Unknown field %s.%s" % (
data_cls.__name__, k))
# We do not know about this field, we preserve it but do not set
# it.
result.set_unknown(k, v)
else:
result.SetMember(k, descriptor.from_primitive(
v, session=session))
return result
def __bool__(self):
return bool(self._data)
def __eq__(self, other):
if self.__class__ is not other.__class__:
return False
return self._data == other._data
def __unicode__(self):
return str(self.to_primitive())
def __repr__(self):
return repr(self.to_primitive())
def __setattr__(self, item, value):
if not item.startswith("_") and item not in self._descriptors:
raise AttributeError("Invalid field %s" % item)
super(SerializedObject, self).__setattr__(item, value)
def cast(self, target_cls):
"""Cast the current object into the target class.
This method forces this object to be converted to the target class. This
means that all data fields on this object will be assigned to the target
class if it supports these fields. Fields which are not supported by the
target class will be ignored.
"""
return target_cls.from_primitive(
self.to_primitive(False), session=self._session)
class OrderedYamlDict(yaml.YAMLObject, collections.OrderedDict):
"""A class which produces an ordered dict."""
yaml_tag = "tag:yaml.org,2002:map"
@classmethod
def to_yaml(cls, dumper, data):
value = []
node = yaml.nodes.MappingNode(cls.yaml_tag, value)
for key, item in data.items():
node_key = dumper.represent_data(key)
node_value = dumper.represent_data(item)
value.append((node_key, node_value))
return node
@classmethod
def construct_mapping(cls, loader, node, deep=False):
"""Based on yaml.loader.BaseConstructor.construct_mapping."""
if not isinstance(node, yaml.MappingNode):
raise yaml.loader.ConstructorError(
None, None, "expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = OrderedYamlDict()
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.loader.ConstructorError(
"while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" % exc, key_node.start_mark)
value = loader.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
@classmethod
def from_yaml(cls, loader, node):
"""Parse the yaml file into an OrderedDict so we can preserve order."""
fields = cls.construct_mapping(loader, node, deep=True)
result = cls()
for k, v in list(fields.items()):
result[k] = v
return result
def load_from_dicts(data, names=None):
"""Loads definitions from a yaml file.
Returns a dict mapping class names to class implementations.
"""
# If not specified define all the classes.
if names is None:
names = list(data.keys())
result = {}
for name in names:
schema = data[name]
result[name] = type(name, (SerializedObject, ), dict(schema=schema))
return result
def unserialize(data, session=None, strict_parsing=True, type=None):
"""Unserialize a dict into a SerializedObject.
Args:
strict_parsing: If enabled we silently drop invalid field assignments
instead of raise exceptions. This is useful when the system likely to
generate the data has changed its definitions.
"""
if isinstance(data, basestring):
data = json.loads(data)
impl = type
if impl is None:
if not isinstance(data, dict) or "__type__" not in data:
raise ValueError(
"Unserialize is only possible from typed serialized dict.")
type_name = data["__type__"]
impl = SerializedObject.get_implemetation(type_name)
if impl is None:
raise ValueError(
"No implementation for serialized type %s" % data["__type__"])
if session is None:
session = Session()
if strict_parsing:
return impl.from_primitive(data, session=session)
session._unstrict_serialization = True
try:
return impl.from_primitive(data, session=session)
finally:
session._unstrict_serialization = False
def robust_unserialize(data, default=None):
try:
return unserialize(data)
except ValueError:
return default | /rekall_lib-1.7.2rc1.zip/rekall_lib-1.7.2rc1/rekall_lib/serializer.py | 0.651022 | 0.153549 | serializer.py | pypi |
from builtins import object
from rekall_lib import serializer
class Status(object):
"""Represents the status of a network operation."""
def __init__(self, code=200, data=""):
self.code = code
self.data = data
def ok(self):
return self.code == 200
class Location(serializer.SerializedObject):
"""A type specifying a location to upload/download files."""
# This one object can represent a number of location types.
schema = []
def to_path(self):
return ""
def read_file(self):
"""Gets the contents of location as a string."""
raise NotImplementedError()
def write_file(self, data):
"""Writes data to the location."""
raise NotImplementedError()
class DevNull(Location):
"""Just swallow all data."""
def write_file(self, data):
pass
def read_file(self):
return ""
class FileLocation(Location):
"""A Location specifier that handles file paths on the local filesystem.
Note that this does not work remotely and so it is mostly useful for tests.
"""
schema = [
dict(name="path_prefix",
doc="The path prefix to enforce."),
dict(name="path_template", default="",
doc="The path template to expand."),
]
class HTTPLocation(Location):
"""A Location specifier that manages a remote HTTP connection.
This location uses simple GET/POST to read/write smallish files over the
network.
"""
schema = [
dict(name="base",
doc="The base URL of the server."),
dict(name="path_prefix",
doc="The path to load"),
dict(name="path_template", default="/",
doc="The path template to expand."),
]
class BlobUploadSpecs(serializer.SerializedObject):
"""Sent by the server in the first BlobUploader exchange."""
schema = [
dict(name="url",
doc="The URL to upload to."),
dict(name="method", default="POST",
doc="The method to upload (currently only POST)"),
dict(name="name", default="file",
doc="The uploaded filename to use"),
]
class BlobUploader(HTTPLocation):
"""An uploader of blobs.
In order to upload a blob, the client needs to contact the server to receive
an upload URL. This happens in two steps:
1) The server initiates a read_file() request as per the HTTPLocation
above. The response is parsed as a BlobUploadSpecs.
2) The spec is used to perform the actual upload.
"""
class FileInformation(serializer.SerializedObject):
schema = [
dict(name="filename"),
dict(name="st_size", type="int"),
]
class FileUploadRequest(serializer.SerializedObject):
schema = [
dict(name="flow_id"),
dict(name="file_information", type=FileInformation),
dict(name="sha1hash", doc="Used for deduping the file uploads")
]
class FileUploadResponse(serializer.SerializedObject):
schema = [
dict(name="action", type="choices",
choices=["Upload", "Skip"], default="Upload"),
dict(name="url"),
]
class FileUploadLocation(HTTPLocation):
schema = [
dict(name="flow_id"),
]
class NotificationLocation(HTTPLocation):
"""A location which notifies when it changes."""
def Start(self, callback):
"""Start the loop. Never returns.
callback is called with each notification.
"""
raise NotImplementedError() | /rekall_lib-1.7.2rc1.zip/rekall_lib-1.7.2rc1/rekall_lib/rekall_types/location.py | 0.878164 | 0.457137 | location.py | pypi |
from rekall_lib import yaml_utils
from rekall_lib.rekall_types import location
from rekall_lib.rekall_types import actions
from rekall_lib import serializer
ClientInformation = Uname = None
# It is more convenient to load the following automatically from yaml data.
specs = yaml_utils.ordered_load("""
ClientInformation:
- name: client_name
type: unicode
- name: client_version
type: int
- name: revision
type: int
- name: build_time
type: unicode
- name: client_description
type: unicode
- name: labels
repeated: true
type: unicode
CpuSample:
- name: user_cpu_time
type: int
- name: system_cpu_time
type: int
- name: cpu_percent
type: int
- name: timestamp
type: epoch
doc: The time of this sample.
IOSample:
- name: read_count
type: int
- name: write_count
type: int
- name: read_bytes
type: int
- name: write_bytes
type: int
- name: timestamp
type: epoch
doc: The time of this sample.
ClientStats:
- name: cpu_samples
repeated: true
type: CpuSample
- name: RSS_size
type: int
- name: VMS_size
type: int
- name: memory_percent
type: int
- name: bytes_received
type: int
- name: bytes_sent
type: int
- name: io_samples
repeated: true
type: IOSample
- name: create_time
type: int
- name: boot_time
type: int
CpuSeconds:
- name: user_cpu_time
type: int
- name: system_cpu_time
type: int
Uname:
- name: system
type: unicode
doc: The system platform (Windows|Darwin|Linux).
- name: node
type: unicode
doc: The hostname of this system.
- name: release
type: unicode
doc: The OS release identifier e.g. 7, OSX, debian.
- name: version
type: unicode
doc: The OS version ID e.g. 6.1.7601SP1, 10.9.2, 14.04.
- name: machine
type: unicode
doc: The system architecture e.g. AMD64, x86_64.
- name: kernel
type: unicode
doc: The kernel version string e.g. 6.1.7601, 13.1.0, 3.15-rc2.
- name: fqdn
type: unicode
doc: "The system's fully qualified domain name."
- name: install_date
type: epoch
doc: When system was installed.
- name: libc_ver
type: unicode
doc: The C library version
- name: architecture
type: unicode
doc: The architecture of this binary. (Note this can be different from the machine
architecture in the case of a 32 bit binary running on a 64 bit system)
- name: pep425tag
type: unicode
doc: The unique signature of this python system (as defined by PEP425 tags).
""")
# Inject the definitions into this module's namespace.
globals().update(serializer.load_from_dicts(specs))
class StartupMessage(serializer.SerializedObject):
"""A ticket written to the startup queue."""
schema = [
dict(name="client_id"),
dict(name="client_info", type=ClientInformation,
doc="Information about the client agent itself."),
dict(name="boot_time", type="epoch",
doc="Time the system booted last."),
dict(name="agent_start_time", type="epoch",
doc="Time the agent started."),
dict(name="timestamp", type="epoch",
doc="The timestamp this message was created."),
dict(name="system_info", type=Uname,
doc="Information about the running system"),
dict(name="labels", repeated=True),
]
class StartupAction(actions.Action):
"""The startup message.
When the client starts up it sends a message to the server containing vital
information about itself. This allows the client to self enroll without any
server action at all. The workflow is:
1) The client reads the deployment manifest file. The manifest is validated.
2) The manifest file contains a Flow specifying to run the StartupAction.
2) The client prepares and populates a StartupMessageBatch() message.
3) The client writes the StartupMessageTicket() message to its specified
Location.
4) The client proceeds to poll for its jobs queue. The client is now
enrolled.
In the server an EnrollerBatch runs collecting the StartupMessage messages
and updating the relevant ClientInformation() objects at the client's VFS
path.
Using this information the client may be tasked with new flows.
This enrollment scheme has several benefits:
1) It does not depend on server load. Clients are enrolled immediately and
do not need to wait for the server to do anything.
2) The interrogate step is done at once at startup time every time. The
system therefore has a fresh view of all clients all the time. Unlike GRR
which runs the interrogate flow weekly it is not necessary to wait for an
interrogation in order to view fresh client information.
3) We can handle a huge influx of enrollment messages with minimal server
resources. While agents are immediately enrolled, the rate at which
clients can be tasked depends only on the rate at which the
EnrollerBatch() can process through them.
This is important when the system is first deployed because at that time all
the new clients will be attempting to communicate at the same time.
"""
schema = [
dict(name="location", type=location.Location)
] | /rekall_lib-1.7.2rc1.zip/rekall_lib-1.7.2rc1/rekall_lib/rekall_types/client.py | 0.689515 | 0.205595 | client.py | pypi |
import time
from rekall_lib import serializer
from rekall_lib.rekall_types import actions
from rekall_lib.rekall_types import location
from rekall_lib.rekall_types import resources
class Status(serializer.SerializedObject):
schema = [
dict(name="timestamp", type="epoch"),
dict(name="status", type="choices",
choices=["Pending", "Started", "Done", "Error", "Crash"]),
dict(name="error",
doc="If an error occurred, here will be the error message."),
dict(name="backtrace",
doc="If an error occurred, here will be the backtrace."),
]
class FlowStatus(Status):
"""Information about flow's progress.
As the agent works through the flow, this ticket will be updated with status
information.
"""
schema = [
dict(name="client_id"),
dict(name="flow_id"),
dict(name="quota", type=resources.Quota,
doc="The total resources used until now."),
dict(name="logs", repeated=True,
doc="Log lines from the client."),
dict(name="current_action", type=actions.Action,
doc="The currently running client action."),
dict(name="collection_ids", repeated=True,
doc="The collections produced by the flow."),
dict(name="total_uploaded_files", type="int", default=0,
doc="Total number of files we uploaded."),
dict(name="files", type=location.Location, repeated=True, hidden=True,
doc="The list of files uploaded."),
]
class HuntStatus(serializer.SerializedObject):
"""High level information about the hunt."""
schema = [
dict(name="total_clients", type="int"),
dict(name="total_success", type="int"),
dict(name="total_errors", type="int"),
]
class Ticket(serializer.SerializedObject):
"""Baseclass for all tickets.
A Ticket is sent by the client to modify the flow status. Tickets are sent
when the flow status changes, such as when it is accepted, completed or
progressed.
"""
schema = [
dict(name="location", type=location.Location,
doc="Where the ticket should be written."),
]
def send_status(self, status):
"""Send a complete response to the specified location.
Args: status is a Status() instance.
"""
status.timestamp = time.time()
self.location.write_file(status.to_json())
class Flow(serializer.SerializedObject):
"""A Flow is a sequence of client actions.
To launch a flow simply build a Flow object and call its start() method.
"""
schema = [
dict(name="name",
doc="A name for this flow"),
dict(name="client_id",
doc="A client id to target this flow on."),
dict(name="queue",
doc="A queue to launch this on. When specified this flow is "
"run as a hunt."),
dict(name="flow_id",
doc="Unique ID of this flow, will be populated when launched."),
dict(name="condition",
doc="An EFilter query to evaluate if the flow should be run."),
dict(name="created_time", type="epoch",
doc="When the flow was created."),
dict(name="creator", private=True,
doc="The user that created this flow."),
dict(name="ttl", type="int", default=60*60*24,
doc="How long should this flow remain active."),
dict(name="ticket", type=Ticket,
doc="Ticket keeping the state of this flow."),
dict(name="actions", type=actions.Action, repeated=True,
doc="The action requests sent to the client."),
dict(name="quota", type=resources.Quota,
doc="The total resources the flow is allowed to use."),
dict(name="status", type=FlowStatus,
doc="The final status of this flow - to be sent to the ticket."),
dict(name="file_upload", type=location.Location,
doc="If included, we use this location to upload files to."),
]
def is_hunt(self):
"""Is this flow running as a hunt?"""
return self.queue
def generate_actions(self):
"""Yields one or more Action() objects.
Should be overridden by derived classes.
"""
return []
def start(self):
"""Launch the flow."""
raise NotImplementedError()
def expiration(self):
return time.time() + self.ttl
class CannedFlow(serializer.SerializedObject):
"""A canned flow can be used to make a flow object."""
schema = [
dict(name="name"),
dict(name="description"),
dict(name="category"),
dict(name="actions", type=actions.Action, repeated=True)
]
class LastClientState(serializer.SerializedObject):
"""Information kept about the last client ping."""
schema = [
dict(name="timestamp", type="epoch",
comment="The last time the client pinged us."),
dict(name="latlong",
comment="The location from where the request came from."),
dict(name="city",
comment="The city where the request came from"),
dict(name="ip",
comment="The IP address where the request came from"),
]
class JobFile(serializer.SerializedObject):
"""The contents of the jobs file.
The job file contains a list of flows to execute. Each flow contains a list
of client actions.
"""
schema = [
dict(name="flows", type=Flow, repeated=True,
doc="A list of flows issued to this client."),
]
class Manifest(serializer.SerializedObject):
"""The manifest is the first thing retrieved from the server.
The client uses this to authenticate the server and run the startup flow.
"""
schema = [
dict(name="startup", type=Flow,
doc="The initial flow to run when connecting to the server."),
]
class PluginConfiguration(serializer.SerializedObject):
"""Plugin specific configuration."""
schema = []
class ClientPolicy(serializer.SerializedObject):
"""The persistent state of the agent."""
schema = [
dict(name="manifest_location", type=location.Location,
doc="The location of the installation manifest file. "
"NOTE: This must be unauthenticated because it contains "
"information required to initialize the connection."),
dict(name="writeback_path",
doc="Any persistent changes will be written to this location."),
dict(name="labels", repeated=True,
doc="A set of labels for this client."),
dict(name="poll", type="bool", default=True,
help="If set, the agent will poll the server for new jobs. "
"Otherwise the agent will poll once and exit."),
dict(name="poll_min", type="int", default=5,
doc="How frequently to poll the server."),
dict(name="poll_max", type="int", default=60,
doc="How frequently to poll the server."),
dict(name="notifier", type=location.NotificationLocation,
doc="If this is set we use the notifier to also "
"control poll rate."),
dict(name="plugins", type=PluginConfiguration, repeated=True,
doc="Free form plugin specific configuration."),
dict(name="secret", default="",
doc="A shared secret between the client and server. "
"This is used to share data with all clients but "
"hide it from others.")
]
class ServerPolicy(serializer.SerializedObject):
"""The configuration of all server side batch jobs.
There are many ways to organize the agent's server side code. Although
inherently the Rekall agent is all about tranferring files to the server,
there has to be a systemic arrangement of where to store these files and how
to deliver them (i.e. the Location object's specification).
The final choice of Location objects is therefore implemented via the
ServerPolicy object. Depending on the type of deployment, different
parameters will be required, but ultimately the ServerPolicy object will be
responsible to produce the required Location objects.
This is the baseclass of all ServerPolicy objects.
"""
schema = []
class Configuration(serializer.SerializedObject):
"""The agent configuration system.
Both client side and server side configuration exist here, but on clients,
the server side will be omitted.
"""
schema = [
dict(name="server", type=ServerPolicy,
doc="The server's configuration."),
dict(name="client", type=ClientPolicy,
doc="The client's configuration."),
]
class AuditMessage(serializer.SerializedObject):
"""An audit message written in the audit log."""
schema = [
dict(name="format",
doc="Format string to format the audit message."),
dict(name="user",
doc="The user who made the request."),
dict(name="token_id",
doc="The token that was used to make this request - if any."),
]
def format_message(self):
return self.format % self.to_primitive() | /rekall_lib-1.7.2rc1.zip/rekall_lib-1.7.2rc1/rekall_lib/rekall_types/agent.py | 0.76708 | 0.263795 | agent.py | pypi |
# This file is produced when the main "version.py update" command is run. That
# command copies this file to all sub-packages which contain
# setup.py. Configuration is maintain in version.yaml at the project's top
# level.
def get_versions():
return tag_version_data(raw_versions(), """version.yaml""")
def raw_versions():
return json.loads("""
{
"codename": "Hurricane Ridge",
"post": "0",
"rc": "1",
"version": "1.7.2"
}
""")
import json
import os
import subprocess
try:
# We are looking for the git repo which contains this file.
MY_DIR = os.path.dirname(os.path.abspath(__file__))
except:
MY_DIR = None
def is_tree_dirty():
try:
return bool(subprocess.check_output(
["git", "diff", "--name-only"], stderr=subprocess.PIPE,
cwd=MY_DIR,
).splitlines())
except (OSError, subprocess.CalledProcessError):
return False
def get_version_file_path(version_file="version.yaml"):
try:
return os.path.join(subprocess.check_output(
["git", "rev-parse", "--show-toplevel"], stderr=subprocess.PIPE,
cwd=MY_DIR,
).decode("utf-8").strip(), version_file)
except (OSError, subprocess.CalledProcessError):
return None
def number_of_commit_since(version_file="version.yaml"):
"""Returns the number of commits since version.yaml was changed."""
try:
last_commit_to_touch_version_file = subprocess.check_output(
["git", "log", "--no-merges", "-n", "1", "--pretty=format:%H",
version_file], cwd=MY_DIR, stderr=subprocess.PIPE,
).strip()
all_commits = subprocess.check_output(
["git", "log", "--no-merges", "-n", "1000", "--pretty=format:%H"],
stderr=subprocess.PIPE, cwd=MY_DIR,
).splitlines()
return all_commits.index(last_commit_to_touch_version_file)
except (OSError, subprocess.CalledProcessError, ValueError):
return None
def get_current_git_hash():
try:
return subprocess.check_output(
["git", "log", "--no-merges", "-n", "1", "--pretty=format:%H"],
stderr=subprocess.PIPE, cwd=MY_DIR,
).strip()
except (OSError, subprocess.CalledProcessError):
return None
def tag_version_data(version_data, version_path="version.yaml"):
current_hash = get_current_git_hash()
# Not in a git repository.
if current_hash is None:
version_data["error"] = "Not in a git repository."
else:
version_data["revisionid"] = current_hash
version_data["dirty"] = is_tree_dirty()
version_data["dev"] = number_of_commit_since(
get_version_file_path(version_path))
# Format the version according to pep440:
pep440 = version_data["version"]
if int(version_data.get("post", 0)) > 0:
pep440 += ".post" + version_data["post"]
elif int(version_data.get("rc", 0)) > 0:
pep440 += ".rc" + version_data["rc"]
if version_data.get("dev", 0):
# A Development release comes _before_ the main release.
last = version_data["version"].rsplit(".", 1)
version_data["version"] = "%s.%s" % (last[0], int(last[1]) + 1)
pep440 = version_data["version"] + ".dev" + str(version_data["dev"])
version_data["pep440"] = pep440
return version_data | /rekall-1.7.2rc1.zip/rekall-1.7.2rc1/_version.py | 0.459076 | 0.196749 | _version.py | pypi |
__author__ = "Adam Sindelar <adamsh@google.com>"
import json
import os
import re
import subprocess
import sys
def run_xcodebuild():
"""Build macho/main.c."""
result = subprocess.call(["xcodebuild"], stdout=sys.stderr,
cwd=os.path.dirname(os.path.realpath(__file__)))
if result != 0:
raise OSError("xcodebuild failed.")
def find_dsym():
"""Find the binary with the DWARF stream we're interested in."""
for root, dirs, _ in os.walk(os.path.dirname(os.path.realpath(__file__))):
for dir_ in dirs:
if dir_.lower() == "macho.dsym":
return os.path.join(root, dir_, "Contents", "Resources",
"DWARF", "macho")
raise OSError("Couldn't find resulting dSYM.")
class DWARFDumpParser(object):
"""A rudimentary line-feed parser of dwarfdump output.
See DWARFDumpParser.parse for a discussion of output.
This implementation takes various shortcuts and only supports what's
needed to generate a rudimentary Mach-O profile that can be used to
bootstrap Rekall's more robust Mach-O support. Only struct, member and
various typedef declarations are supported with no nesting. The only
attributes that are processed are name, type, byte size and offset.
Arguments:
lines: Iterable of dwarfdump output, like a File object.
"""
DWARF_STRUCT_DECL = re.compile(r"^(0x[a-fA-F0-9]+):\s*TAG_structure_type")
DWARF_MEMBER_DECL = re.compile(r"^(0x[a-fA-F0-9]+):\s*TAG_member")
DWARF_TYPEDEF_DECL = re.compile(
r"^(0x[a-fA-F0-9]+):\s*TAG_(?:typedef|pointer_type|subroutine_type)")
DWARF_BASETYPE_DECL = re.compile(r"^(0x[a-fA-F0-9]+):\s*TAG_base_type")
DWARF_AT_NAME = re.compile(r"^\s*AT_name\(\s*\"([^\"]+)\"\s*\)")
DWARF_AT_SIZE = re.compile(r"^\s*AT_byte_size\(\s*(0x[a-fA-F0-9]+)\s*\)")
DWARF_AT_TYPE = re.compile(r"^\s*AT_type\( \{\s*(0x[a-fA-F0-9]+)\s*\} ")
DWARF_AT_OFFSET = re.compile(
r"^\s*AT_data_member_location\(\s*\+?([\d+]+)\s*\)")
state_decl_dwarf_id = None
current_state = None
vtypes = None
lines = None
struct_name = None
struct_size = None
members = None
member_name = None
member_type = None
member_offset = None
typedefs = None
def __init__(self, lines):
self.vtypes = {}
self.typedefs = {}
self.basetypes = {}
self.lines = lines
self.current_state = self.state_init
def reset(self):
"""Reset buffers for a new struct declaration."""
self.reset_member()
self.struct_name = None
self.struct_size = None
self.members = {}
def reset_member(self):
"""Reset buffers for a new member declaration."""
self.member_name = None
self.member_type = None
self.member_offset = None
def parse(self):
"""Parse dwarfdump output and return Rekall vtypes.
This takes a few shortcuts - for examplem pointer types aren't really
understood and just become unsigned integeres. Unions are ignored, as
are nested types. Any type the parser doesn't understand is interpreted
as an unsigned integer, because if you just treat everything as an
unsigned int, things work out, like, 95% of the time.
Returns: A dict of vtypes, keyed on struct name. Contents are the
Rekall vtype format.
"""
self.reset()
for line in self.lines:
self.current_state(line)
self.finalize_struct()
for vtype in self.vtypes.itervalues():
for member in vtype[1].itervalues():
type_id = member[1]
while not type_id in self.basetypes:
type_id = self.typedefs.get(type_id)
if type_id is None:
# Must've been typedefed through something we don't
# understand. Probably a union type or something.
# We just skip it - the bootstrap only needs enough
# information to get a DWARF stream out of a Mach-O.
break
if type_id:
member[1] = [self.basetypes[type_id]]
else:
member[1] = ["unsigned int"] # Trust me, I'm an engineer.
return self.vtypes
def finalize_member(self):
if self.member_name is not None:
self.members[self.member_name] = [self.member_offset,
self.member_type]
self.reset_member()
def finalize_struct(self):
self.finalize_member()
if self.struct_name is not None:
self.vtypes[self.struct_name] = [self.struct_size, self.members]
self.reset()
def detect_new_state(self, line):
"""If the line is a new declaration, decide what the new state is.
Returns:
One of the self.state_* methods which is to become the new state
handler.
"""
match = self.DWARF_STRUCT_DECL.match(line)
if match:
self.state_decl_dwarf_id = int(match.group(1), 16)
return self.state_struct
match = self.DWARF_MEMBER_DECL.match(line)
if match:
self.state_decl_dwarf_id = int(match.group(1), 16)
return self.state_member
match = self.DWARF_TYPEDEF_DECL.match(line)
if match:
self.state_decl_dwarf_id = int(match.group(1), 16)
return self.state_typedef
match = self.DWARF_BASETYPE_DECL.match(line)
if match:
self.state_decl_dwarf_id = int(match.group(1), 16)
return self.state_basetype
def state_init(self, line):
"""Initial state - detects any decl and switches to that state."""
new_state = self.detect_new_state(line)
if new_state:
self.current_state = new_state
def state_struct(self, line):
"""State inside a struct declaration."""
new_state = self.detect_new_state(line)
if new_state:
if new_state != self.state_member:
self.finalize_struct()
self.current_state = new_state
return
match = self.DWARF_AT_NAME.match(line)
if match:
self.struct_name = match.group(1)
return
match = self.DWARF_AT_SIZE.match(line)
if match:
self.struct_size = int(match.group(1), 16)
return
def state_member(self, line):
"""State inside a member declaration, inside a struct."""
new_state = self.detect_new_state(line)
if new_state:
if new_state == self.state_member:
self.finalize_member()
else:
self.finalize_struct()
self.current_state = new_state
return
match = self.DWARF_AT_NAME.match(line)
if match:
self.member_name = match.group(1)
return
match = self.DWARF_AT_TYPE.match(line)
if match:
self.member_type = int(match.group(1), 16)
return
match = self.DWARF_AT_OFFSET.match(line)
if match:
self.member_offset = int(match.group(1))
def state_typedef(self, line):
"""State inside a typedef-like declaration."""
new_state = self.detect_new_state(line)
if new_state:
self.current_state = new_state
return
match = self.DWARF_AT_TYPE.match(line)
if match:
self.typedefs[self.state_decl_dwarf_id] = int(match.group(1), 16)
self.current_state = self.state_init
def state_basetype(self, line):
"""State inside a basetype declaration."""
new_state = self.detect_new_state(line)
if new_state:
self.current_state = new_state
return
match = self.DWARF_AT_NAME.match(line)
if match:
self.basetypes[self.state_decl_dwarf_id] = match.group(1)
self.current_state = self.state_init
def run_dwarfdump(path):
"""Run dwarfdump on 'path' and parse the output.
Returns:
Rekall vtypes for the file at 'path'.
"""
lines = subprocess.check_output(["dwarfdump", path]).split("\n")
parser = DWARFDumpParser(lines)
return parser.parse()
def main():
sys.stderr.write("Will xcodebuild the macho project.\n")
run_xcodebuild()
path = find_dsym()
sys.stderr.write("dSYM dump at path %s\n" % path)
vtypes = run_dwarfdump(path)
sys.stderr.write("Dumping %d vtypes to stdout.\n" % len(vtypes))
json.dump({"$STRUCTS": vtypes,
"$METADATA": {"ProfileClass": "MachoProfile",
"Type": "Profile"}}, sys.stdout)
if __name__ == "__main__":
main() | /rekall-1.7.2rc1.zip/rekall-1.7.2rc1/src/profiles/macho/bootstrap.py | 0.554109 | 0.215268 | bootstrap.py | pypi |
# This file is produced when the main "version.py update" command is run. That
# command copies this file to all sub-packages which contain
# setup.py. Configuration is maintain in version.yaml at the project's top
# level.
def get_versions():
return tag_version_data(raw_versions(), """version.yaml""")
def raw_versions():
return json.loads("""
{
"codename": "Hurricane Ridge",
"post": "0",
"rc": "1",
"version": "1.7.2"
}
""")
import json
import os
import subprocess
try:
# We are looking for the git repo which contains this file.
MY_DIR = os.path.dirname(os.path.abspath(__file__))
except:
MY_DIR = None
def is_tree_dirty():
try:
return bool(subprocess.check_output(
["git", "diff", "--name-only"], stderr=subprocess.PIPE,
cwd=MY_DIR,
).splitlines())
except (OSError, subprocess.CalledProcessError):
return False
def get_version_file_path(version_file="version.yaml"):
try:
return os.path.join(subprocess.check_output(
["git", "rev-parse", "--show-toplevel"], stderr=subprocess.PIPE,
cwd=MY_DIR,
).decode("utf-8").strip(), version_file)
except (OSError, subprocess.CalledProcessError):
return None
def number_of_commit_since(version_file="version.yaml"):
"""Returns the number of commits since version.yaml was changed."""
try:
last_commit_to_touch_version_file = subprocess.check_output(
["git", "log", "--no-merges", "-n", "1", "--pretty=format:%H",
version_file], cwd=MY_DIR, stderr=subprocess.PIPE,
).strip()
all_commits = subprocess.check_output(
["git", "log", "--no-merges", "-n", "1000", "--pretty=format:%H"],
stderr=subprocess.PIPE, cwd=MY_DIR,
).splitlines()
return all_commits.index(last_commit_to_touch_version_file)
except (OSError, subprocess.CalledProcessError, ValueError):
return None
def get_current_git_hash():
try:
return subprocess.check_output(
["git", "log", "--no-merges", "-n", "1", "--pretty=format:%H"],
stderr=subprocess.PIPE, cwd=MY_DIR,
).strip()
except (OSError, subprocess.CalledProcessError):
return None
def tag_version_data(version_data, version_path="version.yaml"):
current_hash = get_current_git_hash()
# Not in a git repository.
if current_hash is None:
version_data["error"] = "Not in a git repository."
else:
version_data["revisionid"] = current_hash
version_data["dirty"] = is_tree_dirty()
version_data["dev"] = number_of_commit_since(
get_version_file_path(version_path))
# Format the version according to pep440:
pep440 = version_data["version"]
if int(version_data.get("post", 0)) > 0:
pep440 += ".post" + version_data["post"]
elif int(version_data.get("rc", 0)) > 0:
pep440 += ".rc" + version_data["rc"]
if version_data.get("dev", 0):
# A Development release comes _before_ the main release.
last = version_data["version"].rsplit(".", 1)
version_data["version"] = "%s.%s" % (last[0], int(last[1]) + 1)
pep440 = version_data["version"] + ".dev" + str(version_data["dev"])
version_data["pep440"] = pep440
return version_data | /rekall-1.7.2rc1.zip/rekall-1.7.2rc1/tools/layout_expert/layout_expert/_version.py | 0.459076 | 0.196749 | _version.py | pypi |
# [Bootstrap](http://getbootstrap.com) [](http://badge.fury.io/bo/bootstrap) [](http://travis-ci.org/twbs/bootstrap) [](https://david-dm.org/twbs/bootstrap#info=devDependencies)
[](https://saucelabs.com/u/bootstrap)
Bootstrap is a sleek, intuitive, and powerful front-end framework for faster and easier web development, created by [Mark Otto](http://twitter.com/mdo) and [Jacob Thornton](http://twitter.com/fat), and maintained by the [core team](https://github.com/twbs?tab=members) with the massive support and involvement of the community.
To get started, check out <http://getbootstrap.com>!
## Table of contents
- [Quick start](#quick-start)
- [Bugs and feature requests](#bugs-and-feature-requests)
- [Documentation](#documentation)
- [Compiling CSS and JavaScript](#compiling-css-and-javascript)
- [Contributing](#contributing)
- [Community](#community)
- [Versioning](#versioning)
- [Authors](#authors)
- [Copyright and license](#copyright-and-license)
## Quick start
Three quick start options are available:
- [Download the latest release](https://github.com/twbs/bootstrap/archive/v3.1.1.zip).
- Clone the repo: `git clone https://github.com/twbs/bootstrap.git`.
- Install with [Bower](http://bower.io): `bower install bootstrap`.
Read the [Getting Started page](http://getbootstrap.com/getting-started/) for information on the framework contents, templates and examples, and more.
### What's included
Within the download you'll find the following directories and files, logically grouping common assets and providing both compiled and minified variations. You'll see something like this:
```
bootstrap/
βββ css/
β βββ bootstrap.css
β βββ bootstrap.min.css
β βββ bootstrap-theme.css
β βββ bootstrap-theme.min.css
βββ js/
β βββ bootstrap.js
β βββ bootstrap.min.js
βββ fonts/
βββ glyphicons-halflings-regular.eot
βββ glyphicons-halflings-regular.svg
βββ glyphicons-halflings-regular.ttf
βββ glyphicons-halflings-regular.woff
```
We provide compiled CSS and JS (`bootstrap.*`), as well as compiled and minified CSS and JS (`bootstrap.min.*`). Fonts from Glyphicons are included, as is the optional Bootstrap theme.
## Bugs and feature requests
Have a bug or a feature request? Please first read the [issue guidelines](https://github.com/twbs/bootstrap/blob/master/CONTRIBUTING.md#using-the-issue-tracker) and search for existing and closed issues. If your problem or idea is not addressed yet, [please open a new issue](https://github.com/twbs/bootstrap/issues/new).
## Documentation
Bootstrap's documentation, included in this repo in the root directory, is built with [Jekyll](http://jekyllrb.com) and publicly hosted on GitHub Pages at <http://getbootstrap.com>. The docs may also be run locally.
### Running documentation locally
1. If necessary, [install Jekyll](http://jekyllrb.com/docs/installation) (requires v1.x).
- **Windows users:** Read [this unofficial guide](https://github.com/juthilo/run-jekyll-on-windows/) to get Jekyll up and running without problems. We use Pygments for syntax highlighting, so make sure to read the sections on installing Python and Pygments.
2. From the root `/bootstrap` directory, run `jekyll serve` in the command line.
- **Windows users:** While we use Jekyll's `encoding` setting, you might still need to change the command prompt's character encoding ([code page](http://en.wikipedia.org/wiki/Windows_code_page)) to UTF-8 so Jekyll runs without errors. For Ruby 2.0.0, run `chcp 65001` first. For Ruby 1.9.3, you can alternatively do `SET LANG=en_EN.UTF-8`.
3. Open <http://localhost:9001> in your browser, and voilΓ .
Learn more about using Jekyll by reading its [documentation](http://jekyllrb.com/docs/home/).
### Documentation for previous releases
Documentation for v2.3.2 has been made available for the time being at <http://getbootstrap.com/2.3.2/> while folks transition to Bootstrap 3.
[Previous releases](https://github.com/twbs/bootstrap/releases) and their documentation are also available for download.
## Compiling CSS and JavaScript
Bootstrap uses [Grunt](http://gruntjs.com/) with convenient methods for working with the framework. It's how we compile our code, run tests, and more. To use it, install the required dependencies as directed and then run some Grunt commands.
### Install Grunt
From the command line:
1. Install `grunt-cli` globally with `npm install -g grunt-cli`.
2. Navigate to the root `/bootstrap` directory, then run `npm install`. npm will look at [package.json](https://github.com/twbs/bootstrap/blob/master/package.json) and automatically install the necessary local dependencies listed there.
When completed, you'll be able to run the various Grunt commands provided from the command line.
**Unfamiliar with `npm`? Don't have node installed?** That's a-okay. npm stands for [node packaged modules](http://npmjs.org/) and is a way to manage development dependencies through node.js. [Download and install node.js](http://nodejs.org/download/) before proceeding.
### Available Grunt commands
#### Build - `grunt`
Run `grunt` to run tests locally and compile the CSS and JavaScript into `/dist`. **Uses [Less](http://lesscss.org/) and [UglifyJS](http://lisperator.net/uglifyjs/).**
#### Only compile CSS and JavaScript - `grunt dist`
`grunt dist` creates the `/dist` directory with compiled files. **Uses [Less](http://lesscss.org/) and [UglifyJS](http://lisperator.net/uglifyjs/).**
#### Tests - `grunt test`
Runs [JSHint](http://jshint.com) and [QUnit](http://qunitjs.com/) tests headlessly in [PhantomJS](http://phantomjs.org/) (used for CI).
#### Watch - `grunt watch`
This is a convenience method for watching just Less files and automatically building them whenever you save.
### Troubleshooting dependencies
Should you encounter problems with installing dependencies or running Grunt commands, uninstall all previous dependency versions (global and local). Then, rerun `npm install`.
## Contributing
Please read through our [contributing guidelines](https://github.com/twbs/bootstrap/blob/master/CONTRIBUTING.md). Included are directions for opening issues, coding standards, and notes on development.
Moreover, if your pull request contains JavaScript patches or features, you must include relevant unit tests. All HTML and CSS should conform to the [Code Guide](http://github.com/mdo/code-guide), maintained by [Mark Otto](http://github.com/mdo).
Editor preferences are available in the [editor config](https://github.com/twbs/bootstrap/blob/master/.editorconfig) for easy use in common text editors. Read more and download plugins at <http://editorconfig.org>.
## Community
Keep track of development and community news.
- Follow [@twbootstrap on Twitter](http://twitter.com/twbootstrap).
- Read and subscribe to [The Official Bootstrap Blog](http://blog.getbootstrap.com).
- Chat with fellow Bootstrappers in IRC. On the `irc.freenode.net` server, in the `##twitter-bootstrap` channel.
- Implementation help may be found at Stack Overflow (tagged [`twitter-bootstrap-3`](http://stackoverflow.com/questions/tagged/twitter-bootstrap-3)).
## Versioning
For transparency into our release cycle and in striving to maintain backward compatibility, Bootstrap is maintained under the Semantic Versioning guidelines. Sometimes we screw up, but we'll adhere to these rules whenever possible.
Releases will be numbered with the following format:
`<major>.<minor>.<patch>`
And constructed with the following guidelines:
- Breaking backward compatibility **bumps the major** while resetting minor and patch
- New additions without breaking backward compatibility **bumps the minor** while resetting the patch
- Bug fixes and misc changes **bumps only the patch**
For more information on SemVer, please visit <http://semver.org/>.
## Authors
**Mark Otto**
- <http://twitter.com/mdo>
- <http://github.com/mdo>
**Jacob Thornton**
- <http://twitter.com/fat>
- <http://github.com/fat>
## Copyright and license
Code and documentation copyright 2011-2014 Twitter, Inc. Code released under [the MIT license](LICENSE). Docs released under [Creative Commons](docs/LICENSE).
| /rekall_gui-1.5.0.post4.tar.gz/rekall_gui-1.5.0.post4/manuskript/static/bower_components/bootstrap/README.md | 0.718989 | 0.878575 | README.md | pypi |
angular-hotkeys
================
Configuration-centric keyboard shortcuts for your Angular apps.
[](https://coveralls.io/r/chieffancypants/angular-hotkeys?branch=master)

**Requirements**: Angular 1.2+
### Features:
- Define hotkeys on an entire route, automatically binding and unbinding them as you navigate
- Automatic listing of shortcuts when users hit the `?` key
- Super duper unit tests
### Installation:
#### via bower:
```
$ bower install chieffancypants/angular-hotkeys --save
```
#### via npm:
```
$ npm install angular-hotkeys --save
```
*please use either the minified or unminified file in the `build` directory*
### Why I made this:
Other projects out there rely too heavily on HTML markup for keyboard shortcuts. For example:
```html
<div class="player">
<div class="playPause-btn" shortcut="{space: playPause}"></div>
<div class="mute-btn" shortcut="{'ctrl+down': mute}"></div>
</div>
```
While this is a great approach for many Angular apps, some applications do not have a 1 to 1 relationship between DOM elements and controller methods. In my case, many methods on the controller were **only** accessible through the keyboard.
Additionally, this only allows you to pass a function reference, you can't pass arguments to the function you intend to call. So instead of simply calling `seek(currentTime + 30)` and `seek(currentTime + 60)`, I needed to create a ton of helper functions on the scope (such as `forward30` and `forward60`), and litter my HTML like this:
```html
<div class="player" shortcut="{space: playPause,
'alt+right': forward30,
'ctrl+right': forward60,
'left': back30,
'ctrl+left': back60,
up: volumeUp,
down: volumeDown,
'ctrl+down': mute,
'ctrl+up': unmute,
f: fullscreen,
h: showHelp}">
<div class="playPause-btn"></div>
<div class="mute-btn"></div>
</div>
```
With a few dozen shortcuts, this left the DOM really messy, and with multiple views and directive templates, it was next to impossible to remember where all the different shortcuts were. This became a maintenance nightmare.
### Usage:
You can either define hotkeys in your Controller, or in your Route configuration (or both). To start, though, require the lib as a dependency for your angular app:
```js
angular.module('myApp', ['ngRoute', 'cfp.hotkeys']);
```
Behind the scenes, I'm using the [Mousetrap](https://github.com/ccampbell/mousetrap) library to manage the key bindings. Check out the docs there for more information on what kind of key combinations can be used. This library is included in the files from the `build` directory, so there is no need to install and include Mousetrap separately.
#### Binding hotkeys in controllers:
It is important to note that by default, hotkeys bound using the `hotkeys.add()`
method are persistent, meaning they will continue to exist through route
changes, DOM manipulation, or anything else.
However, it is possible to bind the hotkey to a particular scope, and when that
scope is destroyed, the hotkey is automatically removed. This should be
considered the best practice when binding hotkeys from a controller. For this
usage example, see the `hotkeys.bindTo()` method below:
```js
angular.module('myApp').controller('NavbarCtrl', function($scope, hotkeys) {
$scope.volume = 5;
// You can pass it an object. This hotkey will not be unbound unless manually removed
// using the hotkeys.del() method
hotkeys.add({
combo: 'ctrl+up',
description: 'This one goes to 11',
callback: function() {
$scope.volume += 1;
}
});
// when you bind it to the controller's scope, it will automatically unbind
// the hotkey when the scope is destroyed (due to ng-if or something that changes the DOM)
hotkeys.bindTo($scope)
.add({
combo: 'w',
description: 'blah blah',
callback: function() {}
})
// you can chain these methods for ease of use:
.add ({...});
});
```
#### Binding hotkeys in routes:
You can also define hotkeys on an entire route, and this lib will bind and unbind them as you navigate the app.
```js
angular.module('myApp').config(function ($routeProvider) {
$routeProvider.when('/', {
controller: 'RestaurantsController',
templateUrl: 'views/restaurants.html',
hotkeys: [
['p', 'Sort by price', 'sort(price)']
]
});
});
```
#### Binding hotkeys in directives:
Lastly, even though binding hotkeys in your templates/html tends to be a bad idea, it can be super useful for simple shortcuts. Think along the lines of a modal directive where you simply want to bind to the escape key or something equally simple. Accomplishing this within a controller is too much overhead, and it may lead to code-reuse.
Example of how directive-based hotkeys works:
```html
<modal title="Modal Title" hotkey="{esc: close}">
```
### Configuration
**Disable the cheatsheet:**
Disabling the cheatsheet can be accomplished by configuring the `hotkeysProvider`:
```js
angular.module('myApp', ['cfp.hotkeys'])
.config(function(hotkeysProvider) {
hotkeysProvider.includeCheatSheet = false;
})
```
**Cheatsheet template:**
```js
angular.module('myApp', ['cfp.hotkeys'])
.config(function(hotkeysProvider) {
hotkeysProvider.template = '<div class="my-own-cheatsheet">...</div>';
})
```
### API
#### hotkeys.add(object)
`object`: An object with the following parameters:
- `combo`: They keyboard combo (shortcut) you want to bind to
- `description`: [OPTIONAL] The description for what the combo does and is only used for the Cheat Sheet. If it is not supplied, it will not show up, and in effect, allows you to have unlisted hotkeys.
- `callback`: The function to execute when the key(s) are pressed. Passes along two arguments, `event` and `hotkey`
- `action`: [OPTIONAL] The type of event to listen for, such as `keypress`, `keydown` or `keyup`. Usage of this parameter is discouraged as the underlying library will pick the most suitable option automatically. This should only be necessary in advanced situations.
- `allowIn`: [OPTIONAL] an array of tag names to allow this combo in ('INPUT', 'SELECT', and/or 'TEXTAREA')
```js
hotkeys.add({
combo: 'ctrl+w',
description: 'Description goes here',
callback: function(event, hotkey) {
event.preventDefault();
}
});
// this hotkey will not show up on the cheat sheet:
hotkeys.add({
combo: 'ctrl+x',
callback: function(event, hotkey) {...}
});
```
#### hotkeys.get(key)
Returns the Hotkey object
```js
hotkeys.get('ctrl+w');
// -> Hotkey { combo: ['ctrl+w'], description: 'Description goes here', callback: function (event, hotkey) }
```
#### hotkeys.del(key)
Removes and unbinds a hotkey
```js
hotkeys.del('ctrl+w');
```
### Allowing hotkeys in form elements
By default, Mousetrap prevents hotkey callbacks from firing when their event originates from an `input`, `select`, or `textarea` element. To enable hotkeys in these elements, specify them in the `allowIn` parameter:
```js
hotkeys.add({
combo: 'ctrl+w',
description: 'Description goes here',
allowIn: ['INPUT', 'SELECT', 'TEXTAREA'],
callback: function(event, hotkey) {
event.preventDefault();
}
});
```
## Credits:
Muchas gracias to Craig Campbell for his [Mousetrap](https://github.com/ccampbell/mousetrap) library, which provides the underlying library for handling keyboard shortcuts.
| /rekall_gui-1.5.0.post4.tar.gz/rekall_gui-1.5.0.post4/manuskript/static/bower_components/angular-hotkeys/README.md | 0.443359 | 0.817101 | README.md | pypi |
from rekall.helpers import INFTY
# Adapters for logical combinations of predicates
def not_pred(pred):
"""Negates the predicate."""
def new_pred(*args):
return not pred(*args)
return new_pred
def and_pred(*preds):
"""ANDs the predicates."""
def new_pred(*args):
for pred in preds:
if not pred(*args):
return False
return True
return new_pred
def or_pred(*preds):
"""ORs the predicates."""
def new_pred(*args):
for pred in preds:
if pred(*args):
return True
return False
return new_pred
def true_pred():
"""Returns a predicate that always returns ``True``."""
def new_pred(*args):
return True
return new_pred
def false_pred():
"""Returns a predicate that always returns ``False``."""
def new_pred(*args):
return False
return new_pred
# Predicates on payloads.
def payload_satisfies(pred):
"""This wraps a predicate so it is applied to the payloads of intervals.
The output function expects one or more Intervals as input (depending on
how many arguments ``pred`` expects) and applies the predicate to the
payloads of the Intervals instead of the Intervals themselves.
Arg:
pred: The predicate to wrap.
Returns:
An output function that applies ``pred`` to payloads.
"""
def new_pred(*interval_args):
return pred(*[i.payload for i in interval_args])
return new_pred
def on_key(key, pred):
"""This wraps a predicate so it is applied to a value in a dict instead of
the dict itself.
The output function expects one or more dicts as input (depending on how
many arguments ``pred`` expects) and applies the predicate to ``d[key]``
for every dict ``d`` of input.
Arg:
key: The key of the dict to apply the predicate to.
pred: The predicate to wrap.
Returns:
An output function that applies ``pred`` to keyed values of dict(s).
"""
def new_pred(*args):
return pred(*[arg[key] for arg in args])
return new_pred
# Temporal predicates
def before(min_dist=0, max_dist=INFTY):
"""Returns a function that computes whether a temporal interval is before
another, optionally filtering the time difference to be between
``min_dist`` and ``max_dist`` (inclusive).
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the time difference between the start of the second interval and the end of
the first interval is between ``min_dist`` and ``max_dist``, inclusive.
Arg:
min_dist: The minimum time difference between the two intervals.
Negative values are undefined.
max_dist: The maximum time difference between the two intervals. If
this is ``INFTY``, then the maximum time difference is unbounded.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the first interval is before the second interval.
"""
def fn(intrvl1, intrvl2):
time_diff = intrvl2['t1'] - intrvl1['t2']
return (time_diff >= min_dist and
(max_dist == INFTY or time_diff <= max_dist))
return fn
def after(min_dist=0, max_dist=INFTY):
"""Returns a function that computes whether a temporal interval is after
another, optionally filtering the time difference to be between
``min_dist`` and ``max_dist`` (inclusive).
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the time difference between the start of the first interval and the end of
the second interval is between ``min_dist`` and ``max_dist``, inclusive.
Arg:
min_dist: The minimum time difference between the two intervals.
Negative values are undefined.
max_dist: The maximum time difference between the two intervals. If
this is ``INFTY``, then the maximum time difference is unbounded.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the first interval is after the second interval.
"""
def fn(intrvl1, intrvl2):
time_diff = intrvl1['t1'] - intrvl2['t2']
return (time_diff >= min_dist and
(max_dist == INFTY or time_diff <= max_dist))
return fn
def overlaps():
"""Returns a function that computes whether a temporal interval overlaps
another in any way (including just at the endpoints).
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the two intervals overlap in any way
Returns:
An output function that takes two temporal intervals and returns
``True`` if the two intervals overlap in any way.
"""
return lambda intrvl1, intrvl2: ((intrvl1['t1'] < intrvl2['t1'] and intrvl1['t2'] > intrvl2['t1']) or
(intrvl1['t1'] < intrvl2['t2'] and intrvl1['t2'] > intrvl2['t2']) or
(intrvl1['t1'] <= intrvl2['t1'] and intrvl1['t2'] >= intrvl2['t2']) or
(intrvl1['t1'] >= intrvl2['t1'] and intrvl1['t2'] <= intrvl2['t2']))
def overlaps_before():
"""Returns a function that computes whether a temporal interval has
non-zero overlap with another interval, and starts before it.
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the first interval starts before the second interval, and the two intervals
have a non-zero amount of overlap.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the first interval starts before the second interval, and
the two intervals have non-zero overlap.
"""
return lambda intrvl1, intrvl2: (intrvl1['t2'] > intrvl2['t1'] and intrvl1['t2'] < intrvl2['t2'] and
intrvl1['t1'] < intrvl2['t1'])
def overlaps_after():
"""Returns a function that computes whether a temporal interval has
non-zero overlap with another interval, and starts after it.
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the first interval starts after the second interval, and the two intervals
have a non-zero amount of overlap.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the first interval starts after the second interval, and
the two intervals have non-zero overlap.
"""
return lambda intrvl1, intrvl2: (intrvl1['t1'] > intrvl2['t1'] and intrvl1['t1'] < intrvl2['t2'] and
intrvl1['t2'] > intrvl2['t2'])
def starts(epsilon=0):
"""Returns a function that computes whether a temporal interval has the
same start time as another interval (+/- epsilon), and ends before it.
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the first interval starts at the same time as the second interval (+/-
``epsilon``), and the first interval ends before the second interval.
Args:
epsilon: The maximum difference between the start time of the first
interval and the start time of the second interval.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the first interval starts at the same time as the second
interval, and ends before the second interval ends.
"""
return lambda intrvl1, intrvl2: (abs(intrvl1['t1'] - intrvl2['t1']) <= epsilon
and intrvl1['t2'] < intrvl2['t2'])
def starts_inv(epsilon=0):
"""Returns a function that computes whether a temporal interval has the
same start time as another interval (+/- epsilon), and ends before it.
This is the inverse of the ``starts`` predicate; it checks whether the
second interval starts the first interval.
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the second interval starts at the same time as the first interval (+/-
``epsilon``), and the second interval ends before the first interval.
Args:
epsilon: The maximum difference between the start time of the second
interval and the start time of the first interval.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the second interval starts at the same time as the first
interval, and ends before the first interval ends.
"""
return lambda intrvl1, intrvl2: (abs(intrvl1['t1'] - intrvl2['t1']) <= epsilon
and intrvl2['t2'] < intrvl1['t2'])
def finishes(epsilon=0):
"""Returns a function that computes whether a temporal interval has the
same end time as another interval (+/- epsilon), and starts after it.
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the first interval ends at the same time as the second interval (+/-
``epsilon``), and the first interval starts after the second interval.
Args:
epsilon: The maximum difference between the end time of the first
interval and the end time of the second interval.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the first interval ends at the same time as the second
interval, and starts after the second interval starts.
"""
return lambda intrvl1, intrvl2: (abs(intrvl1['t2'] - intrvl2['t2']) <= epsilon
and intrvl1['t1'] > intrvl2['t1'])
def finishes_inv(epsilon=0):
"""Returns a function that computes whether a temporal interval has the
same end time as another interval (+/- epsilon), and starts after it.
This is the inverse of the ``finishes`` predicate; it checks whether the
second interval finishes the first interval.
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the second interval ends at the same time as the first interval (+/-
``epsilon``), and the second interval starts after the first interval.
Args:
epsilon: The maximum difference between the end time of the second
interval and the end time of the first interval.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the second interval ends at the same time as the first
interval, and starts after the first interval starts.
"""
return lambda intrvl1, intrvl2: (abs(intrvl1['t2'] - intrvl2['t2']) <= epsilon
and intrvl2['t1'] > intrvl1['t1'])
def during():
"""Returns a function that computes whether a temporal interval takes place
entirely during another temporal interval (i.e. it starts after the other
interval starts and ends before the other interval ends).
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the first interval starts strictly after the second interval starts and
ends strictly before the second interval ends.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the first interval takes place strictly during the second
interval.
"""
return lambda intrvl1, intrvl2: intrvl1['t1'] > intrvl2['t1'] and intrvl1['t2'] < intrvl2['t2']
def during_inv():
"""Returns a function that computes whether a temporal interval takes place
entirely during another temporal interval (i.e. it starts after the other
interval starts and ends before the other interval ends).
This is the inverse of the ``during`` predicate; it checks whether the
second interval takes place during the first interval.
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the second interval starts strictly after the first interval starts and
ends strictly before the first interval ends.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the second interval takes place strictly during the first
interval.
"""
return lambda intrvl1, intrvl2: intrvl2['t1'] > intrvl1['t1'] and intrvl2['t2'] < intrvl1['t2']
def meets_before(epsilon=0):
"""Returns a function that computes whether a temporal interval ends at the
same time as another interval starts (+/- epsilon).
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the absolute time difference between the end of the first interval and the
start of the second interval is less than ``epsilon``.
Args:
epsilon: The maximum time difference between the end of the first
interval and the start of the second interval.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the first interval ends at the same time as the second
interval starts.
"""
return lambda intrvl1, intrvl2: abs(intrvl1['t2']-intrvl2['t1']) <= epsilon
def meets_after(epsilon=0):
"""Returns a function that computes whether a temporal interval ends at the
same time as another interval starts (+/- epsilon).
This is the inverse of the ``meets_before`` predicate; it checks whether
the first interval starts when the second interval ends.
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the absolute time difference between the start of the first interval and
the end of the second interval is less than ``epsilon``.
Args:
epsilon: The maximum time difference between the start of the first
interval and the end of the second interval.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the first interval starts at the same time as the second
interval ends.
"""
return lambda intrvl1, intrvl2: abs(intrvl2['t2']-intrvl1['t1']) <= epsilon
def equal():
"""Returns a function that computes whether two temporal intervals are
strictly equal.
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the two intervals have equal start times and equal end times.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the two intervals have equal start times and equal end
times.
"""
return lambda intrvl1, intrvl2: intrvl1['t1'] == intrvl2['t1'] and intrvl1['t2'] == intrvl2['t2']
# Unary bounding box predicates.
def _area(bbox):
"""Computes area of a 2D bounding box.
Args:
bbox: A dict with keys 'x1', 'x2', 'y1', 'y2' encoding spatial
co-ordinates.
Returns:
The area of the bounding box.
"""
return (bbox['x2'] - bbox['x1']) * (bbox['y2'] - bbox['y1'])
def _width(bbox):
"""Computes width of a 2D bounding box.
Args:
bbox: A dict with keys 'x1', 'x2', 'y1', 'y2' encoding spatial
co-ordinates.
Returns:
The width (in the X dimension) of the bounding box.
"""
return bbox['x2'] - bbox['x1']
def _height(bbox):
"""Computes height of a 2D bounding box.
Args:
bbox: A dict with keys 'x1', 'x2', 'y1', 'y2' encoding spatial
co-ordinates.
Returns:
The height (in the Y dimension) of the bounding box.
"""
return bbox['y2'] - bbox['y1']
def position(x1, y1, x2, y2, epsilon=0.1):
"""Returns a function that computes whether a 2D bounding box has certain
co-ordinates (+/- epsilon).
The output function takes in a 2D bounding box (dict with keys 'x1', 'x2',
'y1', 'y2') and returns ``True`` if the absolute difference between the
dict's values and the specified co-ordinates are all less than ``epsilon``.
Args:
x1: Value to compare against the bounding box's 'x1' field.
y1: Value to compare against the bounding box's 'y1' field.
x2: Value to compare against the bounding box's 'x2' field.
y2: Value to compare against the bounding box's 'y2' field.
epsilon: Maximum difference against specified co-ordinates.
Returns:
A function that takes a 2D bounding box and returns ``True`` if the
bounding box's spatial co-ordinates are all within ``epsilon`` of ``x1``,
``y1``, ``x2``, and ``y2``.
"""
return lambda bbox: (abs(bbox['x1'] - x1) < epsilon and
abs(bbox['y1'] - y1) < epsilon and
abs(bbox['x2'] - x2) < epsilon and
abs(bbox['y2'] - y2) < epsilon)
def has_value(key, target, epsilon=0.1):
"""Returns a function that computes whether a specified value in a dict
is within ``epsilon`` of ``target``.
The output function takes in a dict ``d`` and returns ``True`` if the
absolute difference between ``d[key]`` and ``target`` is less than
``epsilon``.
Args:
key: Lookup key for the value in the dict to compare.
target: The value to compare against.
epsilon: Maximum difference between the two values.
Returns:
A function that takes a dict and returns ``True`` if the absolute value
between ``dict[key]`` and ``target`` is less than ``epsilon``.
"""
return lambda bbox: abs(bbox[key] - target) < epsilon
def area_exactly(area, epsilon=0.1):
"""Returns a function that computes whether a 2D bounding box has a certain
area (+/- epsilon).
The output function takes in a 2D bounding box (dict with keys 'x1', 'x2',
'y1', 'y2') and returns ``True`` if the absolute difference between the
bounding box's area and the specified area is less than ``epsilon``.
Args:
area: Target area value.
epsilon: Maximum difference between the bounding box's area and ``area``.
Returns:
A function that takes a 2D bounding box and returns ``True`` if the
bounding box's area is within ``epsilon`` of ``area``.
"""
return lambda bbox: abs(_area(bbox) - area) < epsilon
def area_at_least(area):
"""Returns a function that computes whether a 2D bounding box's area is at
least ``area``.
The output function takes in a 2D bounding box (dict with keys 'x1', 'x2',
'y1', 'y2') and returns ``True`` if the bounding box's area is greater than
or equal to ``area``.
Args:
area: Target area value.
Returns:
A function that takes a 2D bounding box and returns ``True`` if the
bounding box's area is greater than or equal to ``area``.
"""
return lambda bbox: _area(bbox) >= area
def area_at_most(area):
"""Returns a function that computes whether a 2D bounding box's area less
than or equal to ``area``.
The output function takes in a 2D bounding box (dict with keys 'x1', 'x2',
'y1', 'y2') and returns ``True`` if the bounding box's area is less than
or equal to ``area``.
Args:
area: Target area value.
Returns:
A function that takes a 2D bounding box and returns ``True`` if the
bounding box's area is less than or equal to ``area``.
"""
return lambda bbox: _area(bbox) <= area
def area_between(area1, area2):
"""Returns a function that computes whether a 2D bounding box's area is
between ``area1`` and ``area2`` (inclusive).
The output function takes in a 2D bounding box (dict with keys 'x1', 'x2',
'y1', 'y2') and returns ``True`` if the bounding box's area is between
``area1`` and ``area2``.
Args:
area1: Minimum area value.
area2: Maximum area value.
Returns:
A function that takes a 2D bounding box and returns ``True`` if the
bounding box's area is between ``area1`` and ``area2``.
"""
return lambda bbox: _area(bbox) >= area1 and _area(bbox) <= area2
def width_exactly(width, epsilon=0.1):
"""Returns a function that computes whether a 2D bounding box has a certain
width (+/- epsilon).
The output function takes in a 2D bounding box (dict with keys 'x1', 'x2',
'y1', 'y2') and returns ``True`` if the absolute difference between the
bounding box's width and the specified width is less than ``epsilon``.
Args:
width: Target width value.
epsilon: Maximum difference between the bounding box's width and
``width``.
Returns:
A function that takes a 2D bounding box and returns ``True`` if the
bounding box's width is within ``epsilon`` of ``width``.
"""
return lambda bbox: abs(_width(bbox) - width) < epsilon
def width_at_least(width):
"""Returns a function that computes whether a 2D bounding box's width is at
least ``width``.
The output function takes in a 2D bounding box (dict with keys 'x1', 'x2',
'y1', 'y2') and returns ``True`` if the bounding box's width is greater than
or equal to ``width``.
Args:
width: Target width value.
Returns:
A function that takes a 2D bounding box and returns ``True`` if the
bounding box's width is greater than or equal to ``width``.
"""
return lambda bbox: _width(bbox) >= width
def width_at_most(width):
"""Returns a function that computes whether a 2D bounding box's width is
less than or equal to ``width``.
The output function takes in a 2D bounding box (dict with keys 'x1', 'x2',
'y1', 'y2') and returns ``True`` if the bounding box's width is less than
or equal to ``width``.
Args:
width: Target width value.
Returns:
A function that takes a 2D bounding box and returns ``True`` if the
bounding box's width is less than or equal to ``width``.
"""
return lambda bbox: _width(bbox) <= width
def width_between(width1, width2):
"""Returns a function that computes whether a 2D bounding box's width is
between ``width1`` and ``width2`` (inclusive).
The output function takes in a 2D bounding box (dict with keys 'x1', 'x2',
'y1', 'y2') and returns ``True`` if the bounding box's width is between
``width1`` and ``width2``.
Args:
width1: Minimum width value.
width2: Maximum width value.
Returns:
A function that takes a 2D bounding box and returns ``True`` if the
bounding box's width is between ``width1`` and ``width2``.
"""
return lambda bbox: _width(bbox) >= width1 and _width(bbox) <= width2
def height_exactly(height, epsilon=0.1):
"""Returns a function that computes whether a 2D bounding box has a certain
height (+/- epsilon).
The output function takes in a 2D bounding box (dict with keys 'x1', 'x2',
'y1', 'y2') and returns ``True`` if the absolute difference between the
bounding box's height and the specified height is less than ``epsilon``.
Args:
height: Target height value.
epsilon: Maximum difference between the bounding box's height and
``height``.
Returns:
A function that takes a 2D bounding box and returns ``True`` if the
bounding box's height is within ``epsilon`` of ``height``.
"""
return lambda bbox: abs(_height(bbox) - height) < epsilon
def height_at_least(height):
"""Returns a function that computes whether a 2D bounding box's height is
at least ``height``.
The output function takes in a 2D bounding box (dict with keys 'x1', 'x2',
'y1', 'y2') and returns ``True`` if the bounding box's height is greater than
or equal to ``height``.
Args:
height: Target height value.
Returns:
A function that takes a 2D bounding box and returns ``True`` if the
bounding box's height is greater than or equal to ``height``.
"""
return lambda bbox: _height(bbox) >= height
def height_at_most(height):
"""Returns a function that computes whether a 2D bounding box's height is
less than or equal to ``height``.
The output function takes in a 2D bounding box (dict with keys 'x1', 'x2',
'y1', 'y2') and returns ``True`` if the bounding box's height is less than
or equal to ``height``.
Args:
height: Target height value.
Returns:
A function that takes a 2D bounding box and returns ``True`` if the
bounding box's height is less than or equal to ``height``.
"""
return lambda bbox: _height(bbox) <= height
def height_between(height1, height2):
"""Returns a function that computes whether a 2D bounding box's height is
between ``height1`` and ``height2`` (inclusive).
The output function takes in a 2D bounding box (dict with keys 'x1', 'x2',
'y1', 'y2') and returns ``True`` if the bounding box's height is between
``height1`` and ``height2``.
Args:
height1: Minimum height value.
height2: Maximum height value.
Returns:
A function that takes a 2D bounding box and returns ``True`` if the
bounding box's height is between ``height1`` and ``height2``.
"""
return lambda bbox: _height(bbox) >= height1 and _height(bbox) <= height2
# Binary bounding box predicates.
def left_of():
"""Returns a function that takes two 2D bounding boxes and computes whether
the first one is strictly to the left of the second one.
The output function takes in two 2D bounding boxes (dicts with keys 'x1',
'x2', 'y1', 'y2') and returns ``True`` if the first bounding box's 'x2' value
is less than the second bounding box's 'x1' value.
Returns:
A function that takes two 2D bounding boxes and returns ``True`` if the
first bounding box is strictly to the left of the second one.
"""
return lambda bbox1, bbox2: bbox1['x2'] < bbox2['x1']
def right_of():
"""Returns a function that takes two 2D bounding boxes and computes whether
the first one is strictly to the right of the second one.
The output function takes in two 2D bounding boxes (dicts with keys 'x1',
'x2', 'y1', 'y2') and returns ``True`` if the first bounding box's 'x1' value
is greater than the second bounding box's 'x2' value.
Returns:
A function that takes two 2D bounding boxes and returns ``True`` if the
first bounding box is strictly to the right of the second one.
"""
return lambda bbox1, bbox2: bbox1['x1'] > bbox2['x2']
def above():
"""Returns a function that takes two 2D bounding boxes and computes whether
the first one is strictly above the second one.
The output function takes in two 2D bounding boxes (dicts with keys 'x1',
'x2', 'y1', 'y2') and returns ``True`` if the first bounding box's 'y2' value
is less than the second bounding box's 'y1' value.
Returns:
A function that takes two 2D bounding boxes and returns ``True`` if the
first bounding box is strictly above the second one.
"""
return lambda bbox1, bbox2: bbox1['y2'] < bbox2['y1']
def below():
"""Returns a function that takes two 2D bounding boxes and computes whether
the first one is strictly below the second one.
The output function takes in two 2D bounding boxes (dicts with keys 'x1',
'x2', 'y1', 'y2') and returns ``True`` if the first bounding box's 'y1' value
is greater than the second bounding box's 'y2' value.
Returns:
A function that takes two 2D bounding boxes and returns ``True`` if the
first bounding box is strictly below the second one.
"""
return lambda bbox1, bbox2: bbox1['y1'] > bbox2['y2']
def same_area(epsilon=0.1):
"""Returns a function that takes two 2D bounding boxes and computes whether
the difference in their areas is less than ``epsilon``.
The output function takes in two 2D bounding boxes (dicts with keys 'x1',
'x2', 'y1', 'y2') and returns ``True`` if the difference in their areas is
less than ``epsilon``.
Args:
epsilon: The maximum difference in area between the two bounding boxes.
Returns:
A function that takes two 2D bounding boxes and returns ``True`` if the
difference in their areas is less than ``epsilon``.
"""
return lambda bbox1, bbox2: abs(_area(bbox1) - _area(bbox2)) < epsilon
def more_area():
"""Returns a function that takes two 2D bounding boxes and computes whether
the first one has strictly more area than the second one.
The output function takes in two 2D bounding boxes (dicts with keys 'x1',
'x2', 'y1', 'y2') and returns ``True`` if the first bounding box has
strictly more area than the second one.
Returns:
A function that takes two 2D bounding boxes and returns ``True`` if the
first bounding box has strictly more area than the second one.
"""
return lambda bbox1, bbox2: _area(bbox1) > _area(bbox2)
def less_area():
"""Returns a function that takes two 2D bounding boxes and computes whether
the first one has strictly less area than the second one.
The output function takes in two 2D bounding boxes (dicts with keys 'x1',
'x2', 'y1', 'y2') and returns ``True`` if the first bounding box has
strictly less area than the second one.
Returns:
A function that takes two 2D bounding boxes and returns ``True`` if the
first bounding box has strictly less area than the second one.
"""
return lambda bbox1, bbox2: _area(bbox1) < _area(bbox2)
def same_width(epsilon=0.1):
"""Returns a function that takes two 2D bounding boxes and computes whether
the difference in their widths is less than ``epsilon``.
The output function takes in two 2D bounding boxes (dicts with keys 'x1',
'x2', 'y1', 'y2') and returns ``True`` if the difference in their widths is
less than ``epsilon``.
Args:
epsilon: The maximum difference in area between the two bounding boxes.
Returns:
A function that takes two 2D bounding boxes and returns ``True`` if the
difference in their widths is less than ``epsilon``.
"""
return lambda bbox1, bbox2: abs(_width(bbox1) - _width(bbox2)) < epsilon
def more_width():
"""Returns a function that takes two 2D bounding boxes and computes whether
the first one is strictly wider than the second one.
The output function takes in two 2D bounding boxes (dicts with keys 'x1',
'x2', 'y1', 'y2') and returns ``True`` if the first bounding box is
strictly wider than the second one.
Returns:
A function that takes two 2D bounding boxes and returns ``True`` if the
first bounding box is strictly wider than the second one.
"""
return lambda bbox1, bbox2: _width(bbox1) > _width(bbox2)
def less_width():
"""Returns a function that takes two 2D bounding boxes and computes whether
the first one is strictly narrower than the second one.
The output function takes in two 2D bounding boxes (dicts with keys 'x1',
'x2', 'y1', 'y2') and returns ``True`` if the first bounding box is
strictly narrower than the second one.
Returns:
A function that takes two 2D bounding boxes and returns ``True`` if the
first bounding box is strictly narrower than the second one.
"""
return lambda bbox1, bbox2: _width(bbox1) < _width(bbox2)
def same_height(epsilon=0.1):
"""Returns a function that takes two 2D bounding boxes and computes whether
the difference in their heights is less than ``epsilon``.
The output function takes in two 2D bounding boxes (dicts with keys 'x1',
'x2', 'y1', 'y2') and returns ``True`` if the difference in their heights is
less than ``epsilon``.
Args:
epsilon: The maximum difference in area between the two bounding boxes.
Returns:
A function that takes two 2D bounding boxes and returns ``True`` if the
difference in their heights is less than ``epsilon``.
"""
return lambda bbox1, bbox2: abs(_height(bbox1) - _height(bbox2)) < epsilon
def more_height():
"""Returns a function that takes two 2D bounding boxes and computes whether
the first one is strictly taller than the second one.
The output function takes in two 2D bounding boxes (dicts with keys 'x1',
'x2', 'y1', 'y2') and returns ``True`` if the first bounding box is
strictly taller than the second one.
Returns:
A function that takes two 2D bounding boxes and returns ``True`` if the
first bounding box is strictly taller than the second one.
"""
return lambda bbox1, bbox2: _height(bbox1) > _height(bbox2)
def less_height():
"""Returns a function that takes two 2D bounding boxes and computes whether
the first one is strictly shorter than the second one.
The output function takes in two 2D bounding boxes (dicts with keys 'x1',
'x2', 'y1', 'y2') and returns ``True`` if the first bounding box is
strictly shorter than the second one.
Returns:
A function that takes two 2D bounding boxes and returns ``True`` if the
first bounding box is strictly shorter than the second one.
"""
return lambda bbox1, bbox2: _height(bbox1) < _height(bbox2)
def same_value(key, epsilon=0.1):
"""Returns a function that takes two dicts and computes whether
the difference between two of their values is less than ``epsilon``.
The output function takes in two dicts ``d1`` and ``d2`` and returns
``True`` if the absolute difference between ``d1[key]`` and ``d2[key]`` is
less than ``epsilon``.
Args:
epsilon: The maximum difference between the two values of the two
dicts.
Returns:
A function that takes two dicts and returns ``True`` if the
absolute difference between two of their values is less than
``epsilon``.
"""
return lambda bbox1, bbox2: abs(bbox1[value_name] - bbox2[value_name]) < epsilon
def inside():
"""Returns a function that takes two 2D bounding boxes and computes whether
the first one is inside the second one.
The output function takes in two 2D bounding boxes (dicts with keys 'x1',
'x2', 'y1', 'y2') and returns ``True`` if the first one is inside the
second one (boundaries inclusive).
Returns:
A function that takes two 2D bounding boxes and returns ``True`` if the
first one is inside the second one.
"""
return lambda bbox1, bbox2: (
bbox2['x1'] >= bbox1['x1'] and
bbox2['x2'] <= bbox1['x2'] and
bbox2['y1'] >= bbox1['y1'] and
bbox2['y2'] <= bbox1['y2'])
def contains():
"""Returns a function that takes two 2D bounding boxes and computes whether
the first one contains the second one.
The output function takes in two 2D bounding boxes (dicts with keys 'x1',
'x2', 'y1', 'y2') and returns ``True`` if the first one contains the
second one (boundaries inclusive).
Returns:
A function that takes two 2D bounding boxes and returns ``True`` if the
first one contains the second one.
"""
return lambda bbox1, bbox2: inside()(bbox2, bbox1)
def _iou(bbox1, bbox2):
"""Compute intersection over union of two bounding boxes."""
x1 = max(bbox1['x1'], bbox2['x1'])
y1 = max(bbox1['y1'], bbox2['y1'])
x2 = min(bbox1['x2'], bbox2['x2'])
y2 = min(bbox1['y2'], bbox2['y2'])
if x2 <= x1 or y2 <= y1:
return 0
intersection_area = _area({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2})
union_area = _area(bbox1) + _area(bbox2) - intersection_area
return intersection_area / union_area
def iou_at_least(n):
"""Returns a function that takes two 2D bounding boxes and computes whether
their intersection over area is at least ``n``.
Args:
n: Minimum IOU (float).
Returns:
A function that takes in two 2D bounding boxes and returns ``True`` if
their IOU is at least ``n``.
"""
return lambda bbox1, bbox2: _iou(bbox1, bbox2) > n
# List predicates
def length_exactly(n):
"""Returns a function that checks whether a list has length exactly ``n``.
Args:
n: The length to check against.
Returns:
A function that takes in a list ``l`` and returns ``True`` if
``len(l)`` is equal to ``n``.
"""
return lambda l: len(l) == n
def length_at_least(n):
"""Returns a function that checks whether a list has length at least ``n``.
Args:
n: The length to check against.
Returns:
A function that takes in a list ``l`` and returns ``True`` if
``len(l)`` is greater than or equal to ``n``.
"""
return lambda l: len(l) >= n
def length_at_most(n):
"""Returns a function that checks whether a list has length less than or
equal to ``n``.
Args:
n: The length to check against.
Returns:
A function that takes in a list ``l`` and returns ``True`` if
``len(l)`` is less than or equal to ``n``.
"""
return lambda l: len(l) <= n
def length_between(n1, n2):
"""Returns a function that checks whether a list's length is between ``n1``
and ``n2`` (inclusive).
Args:
n1: The minimum length of the list.
n2: The maximum length of the list.
Returns:
A function that takes in a list ``l`` and returns ``True`` if
``len(l)`` is between ``n1`` and ``n2`` (inclusive).
"""
return lambda l: (len(l) >= n1 and len(l) <= n2) | /rekallpy-0.3.2.tar.gz/rekallpy-0.3.2/rekall/predicates.py | 0.941728 | 0.622316 | predicates.py | pypi |
import cloudpickle
from math import ceil
import multiprocessing as mp
import random
from tqdm import tqdm
from rekall.helpers import perf_count
from rekall import IntervalSetMapping
class TaskException(Exception):
"""Exception to throw when a worker encounters error during task.
Use `raise from` syntex to wrap the exception around the real error.
Example::
try:
...
except Exception as e:
raise TaskException() from e
"""
def __repr__(self):
return "TaskException from {0}".format(repr(self.__cause__))
class RekallRuntimeException(Exception):
"""Exception raised when Runtime encounters error."""
pass
class AbstractAsyncTaskResult():
"""Definition of the AsyncTaskResult interface
This represents a Future-like object. Custom implementations of WorkerPool
interface need to return objects with this interface in `map` method.
"""
def get(self):
"""Returns the value inside the AsyncTaskResult.
This blocks until the value is ready.
Raises:
TaskException: if the AsyncTaskResult contains error.
"""
raise NotImplementedError()
def done(self):
"""Returns whether the value is ready"""
raise NotImplementedError()
class _FutureWrapper(AbstractAsyncTaskResult):
"""Wraps a mp.pool.AsyncResult object to throw TaskException."""
def __init__(self, future):
self._f = future
def get(self):
try:
return self._f.get()
except Exception as e:
raise TaskException() from e
def done(self):
return self._f.ready()
class AbstractWorkerPool():
"""Definition of the WorkerPool interface
Notes:
A WorkerPool instance is specialized to running one function, which is
why the function to execute is not here in the interface but is instead
passed to the worker pool factory function.
"""
def map(self, tasks, done):
"""Maps the tasks over the available workers in the pool
Args:
tasks: A list of tasks to execute. Each task is a set of arguments
to run the function with.
done: A callback function that is called when any task finishes.
It takes the set of arguments for the finished task, and
optionally an error that the task encountered if there is one.
Returns:
A list of AsyncTaskResults, one for each task in tasks.
"""
raise NotImplementedError()
def shut_down(self):
"""Clean up the worker pool after all tasks have finished.
Implementations should release any resources used by the worker pool.
"""
raise NotImplementedError()
class InlineSingleProcessPool(AbstractWorkerPool):
"""A single-process implmentation of WorkerPool interface."""
class _Lazy(AbstractAsyncTaskResult):
"""A wrapper that defers the execution until result is requested"""
def __init__(self, getter, done_cb):
self.getter = getter
self.done_cb = done_cb
def get(self):
try:
r = self.getter()
except Exception as e:
self.done_cb(e)
raise TaskException() from e
self.done_cb()
return r
def done(self):
return True
def __init__(self, fn):
"""Initializes with the function to run."""
self.fn = fn
def map(self, tasks, done):
def get_callback(vids):
def callback(e=None):
done(vids, e)
return callback
def get_getter(fn, vids):
def getter():
return fn(vids)
return getter
return [InlineSingleProcessPool._Lazy(
get_getter(self.fn, task),
get_callback(task)) for task in tasks]
def shut_down(self):
pass
# Helper functions for creating child processes:
# We set the function to execute as a global variable on the child process
# to avoid pickling the function.
def _child_process_init(context):
global GLOBAL_CONTEXT
GLOBAL_CONTEXT=context
def _apply_global_context_as_function(vids):
global GLOBAL_CONTEXT
fn = GLOBAL_CONTEXT
return fn(vids)
class ForkedProcessPool(AbstractWorkerPool):
"""A WorkerPool implementation using forking.
The worker processes will inherit the global context from the main process
such as global variables. However, forking a multithreaded program safely
is very tricky. In particular, any global thread pool object in the parent
process is forked but the actual threads are not available in the forked
child processes.
"""
def __init__(self, fn, num_workers):
"""Initializes the instance
Args:
fn: The function to run in child processes.
num_workers: Number of child processes to create.
"""
self._pool = mp.get_context("fork").Pool(
processes=num_workers,
initializer=_child_process_init,
initargs=(fn,))
def map(self, tasks, done_callback):
def get_success_callback(vids):
def success(result):
done_callback(vids)
return success
def get_error_callback(vids):
def error(err):
done_callback(vids, err)
return error
return [_FutureWrapper(self._pool.apply_async(
_apply_global_context_as_function,
args=(task,),
callback=get_success_callback(task),
error_callback=get_error_callback(task))) for task in tasks]
def shut_down(self):
self._pool.terminate()
# When spawning, arguments to initializer are pickled.
# To allow arbitrary lambdas with closure, use cloudpickle to serialize the
# function to execute
def _apply_serialized_function(serialized_func, vids):
fn = cloudpickle.loads(serialized_func)
return fn(vids)
class SpawnedProcessPool(AbstractWorkerPool):
"""A WorkerPool implementation using spawning.
It creates worker processes by spawning new python interpreters.
The worker processes do not inherit any context from the main process.
In particular, they have no access to the global variables and imported
modules in the main process.
"""
def __init__(self, fn, num_workers, initializer=None):
"""Initializes the instance.
Args:
fn: The function to run in child processes.
num_workers: Number of child processes to create.
initializer: A function to run in the child process after it is
created. It can be used to set up necessary resources in the
worker.
"""
self._pool = mp.get_context("spawn").Pool(
processes=num_workers,
initializer=initializer)
self._pickled_fn = cloudpickle.dumps(fn)
def map(self, tasks, done_callback):
def get_success_callback(vids):
def success(result):
done_callback(vids)
return success
def get_error_callback(vids):
def error(err):
done_callback(vids, err)
return error
return [_FutureWrapper(self._pool.apply_async(
_apply_serialized_function,
args=(self._pickled_fn, task),
callback=get_success_callback(task),
error_callback=get_error_callback(task))) for task in tasks]
def shut_down(self):
self._pool.terminate()
# WorkerPool Factories
def inline_pool_factory(fn):
"""Creates a InlineSingleProcessPool."""
return InlineSingleProcessPool(fn)
def get_forked_process_pool_factory(num_workers=mp.cpu_count()):
"""Returns a factory for ForkedProcessPool.
Args:
num_workers (optional): Number of child processes to fork.
Defaults to the number of CPU cores on the machine.
Returns:
A factory for ForkedProcessPool.
"""
def factory(fn):
return ForkedProcessPool(fn, num_workers)
return factory
def get_spawned_process_pool_factory(num_workers=mp.cpu_count()):
"""Returns a factory for SpawnedProcessPool.
Args:
num_workers (optional): Number of child processes to spawn.
Defaults to the number of CPU cores on the machine.
Returns:
A factory for SpawnedProcessPool.
"""
def factory(fn):
return SpawnedProcessPool(fn, num_workers)
return factory
class _WorkerPoolContext():
""" Wrapper class to allow `with` syntax on WorkerPools"""
def __init__(self, pool):
self._pool = pool
def __enter__(self):
return self._pool
def __exit__(self, *args):
self._pool.shut_down()
def _get_callback(pbar, args_with_err, print_error=True):
"""
Returns a callback that, when called after a task finishes, updates the
progress bar, and if there is an error, add the task to args_with_err and
optionally prints the error to stdout.
"""
def callback(task_args, err=None):
if err is None:
if pbar is not None:
pbar.update(len(task_args))
else:
if print_error:
print("Error when processing {0}:{1}".format(
task_args, repr(err)))
args_with_err.extend(task_args)
return callback
def _create_tasks(args, chunksize):
"""Splits args into tasks of `chunksize` each."""
total = len(args)
num_tasks = int(ceil(total/chunksize))
tasks = []
for task_i in range(num_tasks):
start = chunksize*task_i
end = min(total,start+chunksize)
tasks.append(args[start:end])
return tasks
def union_combiner(result1, result2):
"""Combiner that calls union method on the result."""
return result1.union(result2)
def disjoint_domain_combiner(result1, result2):
""" A faster combiner than union_combiner for IntervalSetMapping.
Assumes that the results are of type IntervalSetMapping and every
chunk produces its own set of domain keys that are disjoint from results of
other chunks.
Args:
result1, result2 (IntervalSetMapping): partial results from
some chunks of total work.
Returns:
A IntervalSetMapping that is the union of the two.
Raises:
RekallRuntimeException: Raised if results have common domain key.
"""
d1 = result1.get_grouped_intervals()
d2 = result2.get_grouped_intervals()
k1,k2 = set(d1.keys()), set(d2.keys())
if k1.isdisjoint(k2):
return IntervalSetMapping({**d1, **d2})
intersection = k1 & k2
raise RekallRuntimeException(
"DisjointDomainCombiner used on results"
" with overlapping domains {0}".format(intersection))
def _pop_future_to_yield(futures):
"""Polling until a future is ready with exponential backoff"""
MAX_WAIT = 1
MIN_WAIT = 0.001
FACTOR = 1.5
def grow(wait_time):
return min(MAX_WAIT, wait_time * FACTOR)
from time import sleep
wait_time = MIN_WAIT
while True:
for i,f in enumerate(futures):
if f.done():
return futures.pop(i)
sleep(wait_time)
wait_time = grow(wait_time)
class Runtime():
"""Manages execution of function on large number of inputs.
Given a function that can return results for a batch of inputs, and a
potentially long list of inputs to run the function with, Runtime helps to
divide the inputs into small chunks, also called tasks, and dispatches
the tasks to a pool of workers created by worker_pool_factory. It also
gracefully handles exceptions in workers and can assemble the partial
results.
An example function::
def query(video_ids):
# Gets the intervals in the input batch of videos
frames_with_opposing_faces = ...
# Returns a IntervalSetMapping with video_id as domain key.
return frames_with_opposing_faces
# A list of 100K video_ids
ALL_VIDEO_IDS = ...
In the example, query(ALL_VIDEO_IDS) is not practical to run in one go.
To get the same results, one can use Runtime in one of two ways.
The first way is to dispatch all tasks and wait::
# Running the query on all videos, in chunks of 5 on 16 processes.
rt = Runtime(get_forked_process_pool_factory(num_workers=16))
# Will block until everything finishes
# results is a IntervalSetMapping with all intervals found.
results, failed_video_ids = rt.run(
query, ALL_VIDEO_IDS, combiner=disjoint_domain_combiner,
chunksize=5)
The second way is to use iterator::
# Get an iterator that yields partial results from each chunk of 5.
rt = Runtime(get_forked_process_pool_factory(num_workers=16))
gen = rt.get_result_iterator(query, ALL_VIDEO_IDS, chunksize=5)
# Blocks until the first batch is done.
# results_from_one_batch is a IntervalSetMapping with intervals
# found in one task (a chunk of 5 videos).
results_from_one_batch = next(gen)
"""
def __init__(self, worker_pool_factory):
"""Initialized with a WorkerPool Factory
Args:
worker_pool_factory: A function that takes the query to execute,
and returns a worker pool to execute the query.
"""
self._get_worker_pool = worker_pool_factory
@classmethod
def inline(cls):
"""Inline Runtime executes each chunk in sequence in one process."""
return cls(inline_pool_factory)
def run(self, query, args, combiner=union_combiner,
randomize=True, chunksize=1,
progress=False, profile=False,
print_error=True):
"""Dispatches all tasks to workers and waits until everything finishes.
See class documentation for an example of how to use run().
Exception raised in `query` are suppressed and the unsuccessful subset
of `args` is returned at the end. However, such errors can be printed
as soon as they occur.
Args:
query: A function that can return partial results for any batch of
input arguments.
args: A potentially long list of input arguments to execute the
query with.
combiner (optional): A function that takes two partial results and
returns the combination of the two.
Defaults to union_combiner which assumes the partial results
have a `union` method.
randomize (optional): Whether to create and dispatch tasks in
random order.
Defaults to True.
chunksize (optional): The size of the input batch for each task.
Defaults to 1.
progress (optional): Whether to display a progress bar.
Defaults to False.
profile (optional): Whether to output wall time of various internal
stages to stdout.
Defaults to False.
print_error (optional): Whether to output task errors to stdout.
Defaults to True.
Returns:
A pair ``(query_output, args_with_err)`` where ``query_output`` is
the combined results from successful tasks, and ``args_with_err``
is a list that is a subset of args that failed to execute.
"""
with perf_count("Executing query in Runtime", enable=profile):
with _WorkerPoolContext(self._get_worker_pool(query)) as pool:
total_work = len(args)
with tqdm(total=total_work, disable=not progress) as pbar:
with perf_count("Executing in workers", enable=profile):
args_with_err = []
with perf_count("Dispatching tasks", enable=profile):
if randomize:
random.shuffle(args)
async_results = pool.map(
_create_tasks(args, chunksize),
_get_callback(pbar, args_with_err,
print_error))
combined_result = None
for future in async_results:
try:
r = future.get()
except TaskException:
continue
if combined_result is None:
combined_result = r
else:
combined_result = combiner(combined_result, r)
if combined_result is None and total_work>0:
raise RekallRuntimeException("All tasks failed!")
return (combined_result, args_with_err)
def get_result_iterator(self, query, args, randomize=True, chunksize=1,
print_error=True, dispatch_size=mp.cpu_count()):
"""Incrementally dispatches tasks as partial results are consumed.
See class documentation for an example of how to use
get_result_iterator().
Exception raised in `query` are suppressed and if any tasks failed,
will raise a RekallRuntimeException after all successful tasks' results
have been yielded. However, such errors can be printed as soon as they
occur.
Args:
query, args, randomize, chunksize, print_error: Same as in run().
dispatch_size (int, optional): Number of tasks to dispatch at a
time. In this mode, tasks are incrementally dispatched
as partial results from preivous tasks are yielded.
If not positive, will dispatch all tasks at once.
Defaults to the number of CPU cores.
Yields:
Partial results from each task.
Raises:
RekallRuntimeException: Raised after all successful task results
have been yielded if there have been failed tasks.
"""
with _WorkerPoolContext(self._get_worker_pool(query)) as pool:
args_with_err = []
if randomize:
random.shuffle(args)
tasks = _create_tasks(args, chunksize)
if dispatch_size is None or dispatch_size<=0:
dispatch_size = len(tasks)
outstanding_tasks = tasks
async_results = []
num_finished_tasks = 0
while num_finished_tasks < len(tasks):
# Maybe make a dispatch
num_to_yield = len(async_results)
if (num_to_yield <= dispatch_size/2 and
len(outstanding_tasks) > 0):
task_batch = outstanding_tasks[:dispatch_size]
outstanding_tasks = outstanding_tasks[dispatch_size:]
async_results.extend(pool.map(
task_batch,
_get_callback(None, args_with_err, print_error)))
if randomize:
future_to_yield = _pop_future_to_yield(async_results)
else:
future_to_yield = async_results.pop(0)
num_finished_tasks += 1
try:
r = future_to_yield.get()
except TaskException:
continue
yield r
if len(args_with_err) > 0:
raise RekallRuntimeException(
"The following tasks failed: {0}".format(
args_with_err)) | /rekallpy-0.3.2.tar.gz/rekallpy-0.3.2/rekall/runtime.py | 0.861974 | 0.183575 | runtime.py | pypi |
from collections.abc import MutableMapping
from operator import attrgetter
from types import MethodType
from tqdm import tqdm
from rekall.interval import Interval
from rekall.interval_set import IntervalSet
from rekall.helpers import perf_count
def _empty_set():
return IntervalSet([])
class IntervalSetMapping(MutableMapping):
"""A wrapper around a dictionary from key to IntervalSet.
It uses method reflection to expose all the same methods as IntervalSet,
and delegates the method to the underlying IntervalSet of each domain in
the collection. When calling binary methods such as ``join`` or ``minus``
on two IntervalSetMapping's, it will match up IntervalSet's by key and
perform the method on the underlying IntervalSet's for each domain.
For each in-system method of IntervalSet (i.e. the return value is an
IntervalSet), the corresponding method on IntervalSetMapping returns an
IntervalSetMapping as well.
For each out-of-system method on IntervalSet, namely ``size``, ``empty``,
``fold``, and ``match``, the corresponding method on IntervalSetMapping
returns a dictionary from the key to the result of the method call on the
underlying IntervalSet.
IntervalSetMapping exposes Python's getter/setter paradigm as well, so
individual IntervalSet's can be referenced using bracket notation and their
key.
The methods to wrap from IntervalSet are defined by the class constants:
UNARY_METHODS, BINARY_METHODS and OUT_OF_SYSTEM_UNARY_METHODS.
Example:
Here are some examples of how IntervalSetMapping reflects IntervalSet's
methods::
ism1 = IntervalSetMapping({
1: IntervalSet(...),
2: IntervalSet(...),
10: IntervalSet(...)
})
ism2 = IntervalSetMapping({
1: IntervalSet(...),
3: IntervalSet(...),
10: IntervalSet(...)
})
# Unary methods
ism1.map(mapper) == IntervalSetMapping({
1: ism1[1].map(mapper), # IntervalSet
2: ism1[2].map(mapper), # IntervalSet
10: ism1[10].map(mapper) # IntervalSet
})
# Binary methods
ism1.join(ism2, ...) == IntervalSetMapping({
1: ism1[1].join(ism2[1], ...), # IntervalSet
10: ism1[10].join(ism2[10], ...) # IntervalSet
})
# Out of system unary methods:
ism1.size() == {
1: ism1[1].size(), # Number
2: ism1[2].size(), # Number
10: ism1[10].size() # Number
}
Atrributes:
UNARY_METHODS: List of methods that IntervalSetMapping reflects from
IntervalSet and that will return a IntervalSetMapping where the
IntervalSet under each group is transformed under the unary
operation. See IntervalSet documentation for arguments and behavior
for each method.
BINARY_METHODS: List of methods that IntervalSetMapping reflects from
IntervalSet and that will take a second IntervalSetMapping and
will return an IntervalSetMapping where the binary operation is
performed on the two IntervalSets with the same key. See
IntervalSet documentation for arguments and behavior for each
method.
OUT_OF_SYSTEM_UNARY_METHODS: List of methods that IntervalSetMapping
reflects from IntervalSet and that will return a dictionary
mapping from IntervalSet keys to return values of the methods.
"""
UNARY_METHODS = ["filter_size", "map", "filter", "group_by", "fold_to_set",
"map_payload", "dilate", "group_by_axis", "coalesce", "split"]
BINARY_METHODS = ["merge", "union", "join", "minus", "filter_against",
"collect_by_interval"]
OUT_OF_SYSTEM_UNARY_METHODS = ["size", "duration", "empty", "fold", "match"]
def __new__(cls, *args, **kwargs):
"""Creates class instance and adds IntervalSet methods on it."""
instance = super(IntervalSetMapping, cls).__new__(cls)
for method in cls.UNARY_METHODS:
setattr(instance, method,
MethodType(
cls._get_wrapped_unary_method(method),
instance))
for method in cls.BINARY_METHODS:
setattr(instance, method,
MethodType(
cls._get_wrapped_binary_method(method),
instance))
for method in cls.OUT_OF_SYSTEM_UNARY_METHODS:
setattr(instance, method,
MethodType(
cls._get_wrapped_out_of_system_unary_method(method),
instance))
return instance
def __init__(self, grouped_intervals):
"""Initializes with a dictionary from key to IntervalSet.
Args:
grouped_intervals: A dictionary from key to IntervalSet.
"""
self._grouped_intervals = grouped_intervals
def __repr__(self):
return str(self._grouped_intervals)
# Makes this class pickleable
def __getstate__(self):
return self._grouped_intervals
def __setstate__(self, grouped_intervals):
self._grouped_intervals = grouped_intervals
# Dictionary/MutableMapping Interface
def __getitem__(self, key):
return self._grouped_intervals.get(key, _empty_set())
def __setitem__(self, key, value):
self._grouped_intervals[key] = value
def __delitem__(self, key):
del self._grouped_intervals[key]
def __iter__(self):
return sorted(list(self._grouped_intervals.keys())).__iter__()
def __len__(self):
return len(self._grouped_intervals)
# Get keys, items, values from underlying dict
def keys(self):
return self._grouped_intervals.keys()
def values(self):
return self._grouped_intervals.values()
def items(self):
return self._grouped_intervals.items()
@classmethod
def from_iterable(cls, iterable, key_parser, bounds_parser,
payload_parser=lambda _:None, progress=False, total=None):
"""Constructs an IntervalSetMapping from an iterable.
Args:
iterable: An iterable of arbitrary elements. Each element will
become an interval in the collection.
key_parser: A function that takes an element in iterable and
returns the key for the interval.
bounds_parser: A function that takes an element in iterable and
returns the bounds for the interval.
payload_parser (optional): A function that takes an element in
iterable and returns the payload for the interval.
Defaults to producing None for all elements.
progress (Bool, optional): Whether to display a progress bar using
tqdm. Defaults to False.
total (int, optional): Total number of elements in iterable.
Only used to estimate ETA for the progress bar, and only takes
effect if progress is True.
Returns:
A IntervalSetMapping constructed from iterable and the parsers
provided.
Note:
Everything in iterable will be materialized in RAM.
"""
key_to_intervals = {}
for row in (tqdm(iterable, total=total)
if progress and total is not None else tqdm(iterable)
if progress else iterable):
interval = Interval(bounds_parser(row), payload_parser(row))
key = key_parser(row)
if key in key_to_intervals:
key_to_intervals[key].append(interval)
else:
key_to_intervals[key] = [interval]
return cls({key: IntervalSet(intervals) for key, intervals in
key_to_intervals.items()})
@classmethod
def from_intervalset(cls, intervalset, key_fn):
"""Constructs an IntervalSetMapping from an IntervalSet by grouping
by ``key_fn``.
Args:
intervalset (IntervalSet): An interval set containing all
intervals to put in the mapping.
key_fn: A function that takes an interval and returns the domain
key.
Returns:
An IntervalSetMapping with the same intervals organized into
domains by their key accroding to ``key_fn``.
"""
def reducer(acc, interval):
key = key_fn(interval)
if key not in acc:
acc[key] = [interval]
else:
acc[key].append(interval)
return acc
grouped = intervalset.fold(reducer, {})
return cls({k:IntervalSet(v) for k,v in grouped.items()})
def get_grouped_intervals(self):
"""Get dictionary from key to IntervalSet."""
return self._grouped_intervals
def get_flattened_intervalset(self):
"""Get an IntervalSet containing all intervals in all the
IntervalSets."""
output = []
for intervals in self.get_grouped_intervals().values():
output.extend(intervals.get_intervals())
return IntervalSet(output)
def add_key_to_payload(self):
"""Adds key to payload of each interval in each IntervalSet.
If each interval in an IntervalSet with key K had payload P before, it
now has the tuple ``(P, K)`` as payload.
Returns:
A new IntervalSetMapping with the transformed intervals.
Note:
The original IntervalSetMapping is unchanged. This is the
same behavior as all unary methods of IntervalSet.
"""
return IntervalSetMapping({
k: intervalset.map_payload(lambda p:(p,k))
for k, intervalset in self.get_grouped_intervals().items()})
@staticmethod
def _remove_empty_intervalsets(grouped_intervals):
new_map = {}
for key, intervalset in grouped_intervals.items():
if not intervalset.empty():
new_map[key] = intervalset
return new_map
@staticmethod
def _get_wrapped_unary_method(name):
def method(self, *args, profile=False, progress_bar=False, **kwargs):
with perf_count(name, profile):
selfmap = self.get_grouped_intervals()
keys_to_process = selfmap.keys()
if progress_bar:
keys_to_process = tqdm(keys_to_process)
def func(set1):
return getattr(IntervalSet, name)(set1,*args,**kwargs)
results_map = {v:func(selfmap[v]) for v in keys_to_process}
return IntervalSetMapping(
IntervalSetMapping._remove_empty_intervalsets(
results_map))
return method
@staticmethod
def _get_wrapped_binary_method(name):
def method(self, other, *args, profile=False, progress_bar=False, **kwargs):
with perf_count(name, profile):
selfmap = self.get_grouped_intervals()
othermap = other.get_grouped_intervals()
keys = set(selfmap.keys()).union(othermap.keys())
if progress_bar:
keys = tqdm(keys)
def func(set1, set2):
return getattr(IntervalSet, name)(
set1,set2,*args,**kwargs)
results_map = {v:
func(
selfmap.get(v, IntervalSet([])),
othermap.get(v, IntervalSet([]))) for v in keys}
return IntervalSetMapping(
IntervalSetMapping._remove_empty_intervalsets(
results_map))
return method
@staticmethod
def _get_wrapped_out_of_system_unary_method(name):
def method(self, *args, profile=False, progress_bar=False, **kwargs):
with perf_count(name, profile):
selfmap = self.get_grouped_intervals()
keys = selfmap.keys()
if progress_bar:
keys = tqdm(keys)
def func(set1):
return getattr(IntervalSet, name)(set1,*args,**kwargs)
return {v:func(selfmap[v]) for v in keys}
return method | /rekallpy-0.3.2.tar.gz/rekallpy-0.3.2/rekall/interval_set_mapping.py | 0.923541 | 0.604574 | interval_set_mapping.py | pypi |
class Interval:
"""A single Interval.
An Interval is a wrapper around a Bounds and a payload. The payload can be
referenced with the 'payload' key - i.e. ``interval['payload']``, as can
the fields of the Bounds. The bounds field itself can also be referenced
with type 'bounds' key.
Attributes:
bounds: Bounds object.
payload: payload object.
"""
def __init__(self, bounds, payload=None):
"""Initializes an interval with certain bounds and payload.
Args:
bounds: Bounds for this Interval.
payload (optional): Metadata of arbitrary type. Defaults to None.
"""
self.bounds = bounds
self.payload = payload
def __getitem__(self, arg):
"""Access bounds, payload, or a co-ordinate of bounds using key access.
Strings 'bounds' and 'payload' are hard-coded to return the bounds or
payload attributes, respectively.
"""
if arg == 'bounds':
return self.bounds
if arg == 'payload':
return self.payload
return self.bounds[arg]
def __setitem__(self, key, item):
"""Set bounds, payload, or a co-ordinate of bounds using key access.
Strings 'bounds' and 'payload' are hard-coded to reference the bounds
or payload attributes, respectively.
"""
if key == 'bounds':
self.bounds = item
elif key == 'payload':
self.payload = item
else:
self.bounds[key] = item
def __repr__(self):
"""String representation is ``<Interval {bounds} payload:{payload}>``."""
return "<Interval {} payload:{}>".format(self.bounds, self.payload)
def __lt__(self, other):
return self['bounds'] < other['bounds']
def copy(self):
"""Returns a copy of the Interval."""
return Interval(self.bounds.copy(), self.payload)
def combine(self,
other,
bounds_combiner,
payload_combiner=lambda p1, p2: p1):
"""Combines two Intervals into one by separately combining the bounds
and the payload.
Args:
other: The other Interval to combine with.
bounds_combiner: The function to combine the bounds. Takes two
Bounds objects as input and returns one Bounds object.
payload_combiner: The function to combine the two payloads. Takes
two payload objects as input and returns one payload object.
Returns:
A new Interval combined using ``bounds_combiner`` and
``payload_combiner``.
"""
return Interval(bounds_combiner(self.bounds, other.bounds),
payload_combiner(self.payload, other.payload))
def P(pred):
"""This wraps a predicate so it is applied to the payload of Intervals
instead of the Intervals themselves.
The output function expects one or more Intervals as input (depending
on how many arguments ``pred`` expects) and applies the predicate to
the payloads of the Intervals instead of the Interavls themselves.
Arg:
pred: The predicate to wrap.
Returns:
An output function that applies ``pred`` to payloads.
"""
def new_pred(*interval_args):
return pred(*[i.payload for i in interval_args])
return new_pred
def size(self, axis=None):
"""Get the size of the bounds along some axis.
Args:
axis (optional): The axis to compute size on. Represented as a pair
of co-ordinates, such as ``('t1', 't2')``. Defaults to ``None``,
which uses the primary axis of ``self``'s Bounds.
Returns:
The size of the bounds across some axis.
"""
return self.bounds.size(axis)
def to_json(self, payload_to_json):
"""Converts the interval to a JSON object.
Args:
payload_to_json: Function that converts the payload to a JSON object.
Returns:
JSON object for the interval
"""
return {
'bounds': self.bounds.to_json(),
'payload': payload_to_json(self.payload)
} | /rekallpy-0.3.2.tar.gz/rekallpy-0.3.2/rekall/interval.py | 0.965755 | 0.70912 | interval.py | pypi |
import sys
from contextlib import contextmanager
INFTY = "infty"
def panic(message):
""" Print an error message and exit. """
print(message, file=sys.stderr)
sys.exit(1)
# Helper functions to define bboxes representing regions of the screen
def make_bbox(x1, y1, x2, y2):
"""A helper function to make a 2D bounding box.
Args:
x1: The x1 value of the bounding box.
y1: The y1 value of the bounding box.
x2: The x2 value of the bounding box.
y2: The y2 value of the bounding box.
Returns:
A dict with keys 'x1', 'y1', 'x2', 'y2' representing a bounding box.
"""
return { 'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2 }
def full_frame():
"""Returns a bounding box representing the full frame."""
return make_bbox(0., 0., 1., 1.)
def left_half(bbox=full_frame()):
"""Returns a bounding box covering the left half of ``bbox``."""
return make_bbox(bbox['x1'], bbox['y1'],
(bbox['x1'] + bbox['x2']) / 2., bbox['y2'])
def right_half(bbox=full_frame()):
"""Returns a bounding box covering the right half of ``bbox``."""
return make_bbox((bbox['x1'] + bbox['x2']) / 2., bbox['y1'],
bbox['x2'], bbox['y2'])
def top_half(bbox=full_frame()):
"""Returns a bounding box covering the top half of ``bbox``."""
return make_bbox(bbox['x1'], bbox['y1'],
bbox['x2'], (bbox['y1'] + bbox['y2']) / 2.)
def bottom_half(bbox=full_frame()):
"""Returns a bounding box covering the bottom half of ``bbox``."""
return make_bbox(bbox['x1'], (bbox['y1'] + bbox['y2']) / 2.,
bbox['x2'], bbox['y2'])
def top_left(bbox=full_frame()):
"""Returns a bounding box covering the top left quadrant of ``bbox``."""
return left_half(top_half(bbox))
def top_right(bbox=full_frame()):
"""Returns a bounding box covering the top right quadrant of ``bbox``."""
return right_half(top_half(bbox))
def bottom_left(bbox=full_frame()):
"""Returns a bounding box covering the bottom left quadrant of ``bbox``."""
return left_half(bottom_half(bbox))
def bottom_right(bbox=full_frame()):
"""Returns a bounding box covering the bottom right quadrant of ``bbox``."""
return right_half(bottom_half(bbox))
def center(bbox=full_frame()):
"""Returns a bounding box covering a quarter of ``bbox``, starting in the
middle."""
width = bbox['x2'] - bbox['x1']
height = bbox['y2'] - bbox['y1']
return bbox(bbox['x1'] + width / 4.,
bbox['y1'] + height / 4.,
bbox['x2'] - width / 4.,
bbox['y2'] - height / 4.)
# Performance profling util
@contextmanager
def perf_count(name, enable=True):
"""Prints wall time for the code block to STDOUT
Example:
with perf_count("test code"):
sleep(10)
# Writes to stdout:
# test code starts.
# test code ends after 10.01 seconds
"""
if not enable:
yield
else:
print("{0} starts.".format(name))
s = perf_counter()
yield
t = perf_counter()
print("{0} ends after {1:.2f} seconds".format(name, t-s)) | /rekallpy-0.3.2.tar.gz/rekallpy-0.3.2/rekall/helpers.py | 0.792464 | 0.723102 | helpers.py | pypi |
from time import strftime, time
import os
import sys
import pickle
class Tuner:
"""Base class for all Tuners (see sub-classes for details).
args:
search_space (dict): A dictionary of parameters to search over.
See note below for more details.
eval_fn: Given a configuration, evaluate the black box function and
return the score.
maximize (bool): Maximize the output of ``eval_fn`` if True,
otherwise minimize it.
budget (int): Maximum number of times to call the evaluation
function.
log (bool): Whether to log results
log_dir (string): Directory to log all results to
run_dir (string): Directory to log results from a set of runs
run_name (string): Name of this run
start_config (dict): Some tuners ask for a starting configuration.
If start_config is specified, start with this config.
start_score (float): If start_config is specified, you can also specify
its score if you know it ahead of time.
score_fn: Your eval function may not return exactly the value you
want to optimize. This function parses the output of `eval_fn`
to pass to the optimizer.
score_log_fn: Your eval function may not return exactly what you
want to log. This function parses the output of `eval_fn` to
log.
Example::
def eval_config(params):
# Run the Rekall query
query_results = rekall_query(
param1 = params['param1'],
param2 = params['param2'])
# Evaluate the results
score = evaluate(query_results)
return score
search_space = {
'param1': [0.0, 1.0, 2.0], # discrete
'param2': { 'range': (10.0, 20.0) } # linear range
}
tuner = RandomTuner(search_space, eval_config, budget = 50)
best_score, best_config, score_history, execution_times, total_cost = tuner.tune()
"""
def __init__(
self,
search_space,
eval_fn,
maximize=True,
budget=500,
log=True,
log_dir=None,
run_dir=None,
run_name=None,
start_config=None,
start_score=None,
score_fn=lambda x: x,
score_log_fn=lambda x: x
):
self.scores = []
self.execution_times = []
self.best_config = start_config
self.best_score = start_score
self.cost = 0
self.search_space = search_space
self.eval_fn = eval_fn
self.maximize = maximize
self.budget = budget
self.log = log
self.orig_log_dir = log_dir
self.orig_run_dir = run_dir
self.orig_run_name = run_name
self.start_config = start_config
self.start_score = start_score
self.score_fn = score_fn
self.score_log_fn = score_log_fn
if self.log:
# Logging subdirectory
self.init_date = strftime("%Y_%m_%d")
self.init_time = strftime("%H_%M_%S")
self.log_dir = log_dir or os.getcwd()
run_dir = run_dir or self.init_date
run_name = run_name or self.init_time
self.log_rootdir = os.path.join(self.log_dir, run_dir)
self.log_subdir = os.path.join(self.log_dir, run_dir, run_name)
if not os.path.exists(self.log_subdir):
os.makedirs(self.log_subdir)
self.save_path = os.path.join(self.log_subdir, 'best_config.pkl')
self.report_path = os.path.join(self.log_subdir, 'tuner_report.pkl')
self.log_path = os.path.join(self.log_subdir, 'log.txt')
def evaluate_config(self, config):
"""Evaluate the config."""
score = -1000000 if self.maximize else 1000000
try:
start = time()
eval_fn_output = self.eval_fn(config)
score = self.score_fn(eval_fn_output)
self.cost += 1
self.scores.append(self.score_log_fn(eval_fn_output))
if (self.best_score is None or
(self.maximize and score > self.best_score) or
(not self.maximize and score < self.best_score)):
self.best_score = score
self.best_config = config
if self.log:
self.log_msg('New best score: {}, current cost: {}'.format(
score, self.cost))
end = time()
self.execution_times.append(end - start)
except:
print('Error:', sys.exc_info()[0])
return score
def log_msg(self, msg):
"""Log something to the log file."""
if self.log:
with open(self.log_path, 'a') as f:
f.write('{}\n'.format(msg))
f.close()
def tune(self, **kwargs):
"""Run the tuning algorithm!"""
self.tune_impl(**kwargs)
if self.log:
with open(self.report_path, 'wb') as f:
pickle.dump({
'best_score': self.best_score,
'best_config': self.best_config,
'scores': self.scores,
'execution_times': self.execution_times,
'cost': self.cost
}, f)
return (self.best_score, self.best_config, self.scores,
self.execution_times, self.cost)
def tune_impl(self, **kwargs):
"""The implementation of the tuning algorithm.
Sub-classes should implement this!"""
pass | /rekallpy-0.3.2.tar.gz/rekallpy-0.3.2/rekall/tuner/tuner.py | 0.668123 | 0.321087 | tuner.py | pypi |
from scipy import optimize
from rekall.tuner import Tuner
class ScipyLBFGSBTuner(Tuner):
def tune_impl(self, **kwargs):
"""Call scipy's L-BFGS-B optimizer for bounded optimization.
Currently does not work with discrete variables!
Reference: https://docs.scipy.org/doc/scipy/reference/optimize.minimize-lbfgsb.html#optimize-minimize-lbfgsb
args in kwargs:
init_method: How to initialize the first config.
One of ['average', 'random'].
If not specified, default to 'average'.
'average' initializes the config at the average of continuous ranges,
'random' randomly initializes the config.
If start_config was specified upon initialization, use that value always.
"""
if 'init_method' in kwargs:
init_method = kwargs['init_method']
else:
init_method = 'average'
if self.start_config is not None:
config = self.start_config
elif init_method is 'average':
coordinates = list(self.search_space.keys())
config = {}
# Initialize the config
for coordinate in coordinates:
param = self.search_space[coordinate]
if isinstance(param, dict):
if 'range' in param:
minval, maxval = param['range']
config[coordinate] = (maxval + minval) / 2
# elif 'subset' in param:
# choices = param['subset']
# config[k] = choices[:random.randint(1, len(param['subset']))]
elif isinstance(param, list):
config[k] = param[0]
elif init_method is 'random':
config = RandomTuner.generate_configs(self.search_space, 1)[0]
else:
print('{} is invalid init_method!'.format(init_method))
return
sorted_vars = sorted(list(config.keys()))
def config_to_array(config):
return [
config[var]
for var in sorted_vars
]
def array_to_config(arr):
return {
var: value
for var, value in zip(sorted_vars, arr)
}
def optimization_function(arr):
config = array_to_config(arr)
score = self.evaluate_config(config)
if self.maximize:
score = -1 * score
return score
bounds = [
self.search_space[var]['range']
for var in sorted_vars
]
optimize.minimize(
optimization_function,
config_to_array(config),
method = 'L-BFGS-B',
bounds = bounds,
options = {
'maxfun': self.budget
}
) | /rekallpy-0.3.2.tar.gz/rekallpy-0.3.2/rekall/tuner/scipy_l_bfgs_b.py | 0.754644 | 0.245571 | scipy_l_bfgs_b.py | pypi |
from scipy import optimize
from rekall.tuner import Tuner
class ScipyNelderMeadTuner(Tuner):
def tune_impl(self, **kwargs):
"""Call scipy's Nelder-Mead optimizer. Ignores bounds in search space.
Currently does not work with discrete variables!
Reference: https://docs.scipy.org/doc/scipy/reference/optimize.minimize-neldermead.html#optimize-minimize-neldermead
args in kwargs:
init_method: How to initialize the first config.
One of ['average', 'random'].
If not specified, default to 'average'.
'average' initializes the config at the average of continuous ranges,
'random' randomly initializes the config.
If start_config was specified upon initialization, use that value always.
"""
if 'init_method' in kwargs:
init_method = kwargs['init_method']
else:
init_method = 'average'
if self.start_config is not None:
config = self.start_config
elif init_method is 'average':
coordinates = list(self.search_space.keys())
config = {}
# Initialize the config
for coordinate in coordinates:
param = self.search_space[coordinate]
if isinstance(param, dict):
if 'range' in param:
minval, maxval = param['range']
config[coordinate] = (maxval + minval) / 2
# elif 'subset' in param:
# choices = param['subset']
# config[k] = choices[:random.randint(1, len(param['subset']))]
elif isinstance(param, list):
config[k] = param[0]
elif init_method is 'random':
config = RandomTuner.generate_configs(self.search_space, 1)[0]
else:
print('{} is invalid init_method!'.format(init_method))
return
sorted_vars = sorted(list(config.keys()))
def config_to_array(config):
return [
config[var]
for var in sorted_vars
]
def array_to_config(arr):
return {
var: value
for var, value in zip(sorted_vars, arr)
}
def optimization_function(arr):
config = array_to_config(arr)
score = self.evaluate_config(config)
if self.maximize:
score = -1 * score
return score
optimize.minimize(
optimization_function,
config_to_array(config),
method = 'Nelder-Mead',
options = {
'maxfev': self.budget
}
) | /rekallpy-0.3.2.tar.gz/rekallpy-0.3.2/rekall/tuner/scipy_nelder_mead.py | 0.754553 | 0.230551 | scipy_nelder_mead.py | pypi |
from rekall.tuner import Tuner
from rekall.tuner.random import RandomTuner
import random
class CoordinateDescentTuner(Tuner):
"""This tuner performs coordinate descent over the search space."""
def line_search(self, config, cur_param, epsilon, budget, cur_score = 0):
'''
Vary cur_param within the bounds of the search_space (holding the other
parameters constant), maximizing the accuracy function.
Let X be the current param, and let F(X) represent the accuracy function.
Let Y be the range of X in the search_space.
If F(X + epsilon * Y) > F(X):
* Find the smallest positive integer l such that
F(X + l * epsilon * Y) < F(X + (l - 1) * epsilon * Y), and return
X + (l - 1) * epsilon * Y as the new value of the parameter
Otherwise:
* Find the smallest positive integer l such that
F(X - l * epsilon * Y) < F(X - (l - 1) * epsilon * Y), and return
X - (l - 1) * epsilon * Y as the new value of the parameter
'''
minval, maxval = self.search_space[cur_param]['range']
Y = maxval - minval
delta = epsilon * Y
orig_val = config[cur_param]
if orig_val + delta > maxval and orig_val - delta < minval:
return orig_val, cur_score
# Determine direction
cur_val = orig_val + delta
config[cur_param] = cur_val
local_cost = 0
score = self.evaluate_config(config)
local_cost += 1
# If the score is worse, try other direction
if ((self.maximize and score < cur_score) or
(not self.maximize and score > cur_score)):
delta = -1 * delta
cur_val = orig_val + delta
config[cur_param] = cur_val
score = self.evaluate_config(config)
local_cost += 1
# Neither direction works
if ((self.maximize and score < cur_score) or
(not self.maximize and score > cur_score)):
return orig_val, cur_score
# Find the optimal value of l
prev_score = score
while local_cost < budget and self.cost < self.budget:
if cur_val + delta > maxval or cur_val + delta < minval:
break
cur_val += delta
config[cur_param] = cur_val
score = self.evaluate_config(config)
local_cost += 1
# If this score is worse
if ((self.maximize and score < cur_score) or
(not self.maximize and score > cur_score)):
# Go back one step
cur_val -= delta
break
prev_score = score
return cur_val, prev_score
def tune_impl(self, **kwargs):
'''
Start with the midpoint of the search space
Then cycle through co-ordinates.
For each co-ordinate:
* If the co-ordinate is discrete, try all the choices
* If the co-ordinate is numerical, run line search with alpha and
a budget of 10
If all the co-ordinates stay the same, try again with alpha = alpha * decay_rate.
args:
alpha: initial alpha value for line search
decay_rate: rate to decay alpha
init_method: How to initialize the first config.
One of ``['average', 'random']``.
If not specified, default to 'average'.
'average' initializes the config at the average of continuous ranges,
'random' randomly initializes the config.
If start_config was specified upon initialization, use that value always.
line_search_budget: Budget to give to line search. Must be at least 2.
Defaults to 10.
randomize_param_order: Whether to randomize the order of coordinates
for coordinate descent. Defaults to False.
'''
if 'alpha' not in kwargs or 'decay_rate' not in kwargs:
print('Coordinate descent requires alpha and decay_rate!')
return
alpha = kwargs['alpha']
decay_rate = kwargs['decay_rate']
if 'init_method' in kwargs:
init_method = kwargs['init_method']
else:
init_method = 'average'
if 'line_search_budget' in kwargs:
line_search_budget = kwargs['line_search_budget']
else:
line_search_budget = 10
if 'randomize_param_order' in kwargs:
randomize_param_order = bool(kwargs['randomize_param_order'])
else:
randomize_param_order = False
coordinates = sorted(list(self.search_space.keys()))
if self.start_config is not None:
config = self.start_config
elif init_method == 'average':
config = {}
# Initialize the config
for coordinate in coordinates:
param = self.search_space[coordinate]
if isinstance(param, dict):
if 'range' in param:
minval, maxval = param['range']
config[coordinate] = (maxval + minval) / 2
# elif 'subset' in param:
# choices = param['subset']
# config[k] = choices[:random.randint(1, len(param['subset']))]
elif isinstance(param, list):
config[k] = param[0]
elif init_method == 'random':
config = RandomTuner.generate_configs(self.search_space, 1)[0]
else:
print('{} is invalid init_method!'.format(init_method))
return
if self.start_config is not None and self.start_score is not None:
score = self.start_score
self.best_score = score
self.log_msg('Starting score: {}'.format(score))
else:
score = self.evaluate_config(config)
def config_to_point(config):
coords = sorted(coordinates)
point = tuple([
config[coord]
for coord in coords
])
return point
visited_points = set()
cur_score = score
rounds = 0
rounds_since_last_improvement = 0
last_best_score = cur_score
while self.cost < self.budget:
self.log_msg('Round {}, current cost {}'.format(rounds, self.cost))
changed = False
new_configs = False
if randomize_param_order:
random.shuffle(coordinates)
for coordinate in coordinates:
if self.cost > self.budget:
break
self.log_msg('Coordinate {}, current cost {}'.format(coordinate, self.cost))
orig_val = config[coordinate]
param = self.search_space[coordinate]
# Discrete params
if isinstance(param, list):
max_score = cur_score
best_choice = orig_val
for choice in param:
if choice == orig_val:
continue
config[coordinate] = choice
score = self.evaluate_config(config)
if ((self.maximize and score > max_score) or
(not self.maximize and score < max_score)):
best_choice = choice
max_score = score
if self.cost >= self.budget:
break
self.log_msg('Old: {}, new: {}'.format(orig_val, best_choice))
if best_choice != orig_val:
changed = True
config[coordinate] = best_choice
cur_score = max_score
# Numerical params
elif isinstance(param, dict):
if 'range' in param:
best_choice, max_score = self.line_search(
config, coordinate, alpha, line_search_budget,
cur_score = cur_score)
self.log_msg('Old: {}, New: {}'.format(orig_val, best_choice))
if best_choice != orig_val:
changed = True
config[coordinate] = best_choice
cur_score = max_score
config_point = config_to_point(config)
if config_point not in visited_points:
visited_points.add(config_point)
new_configs = True
if cur_score == last_best_score:
rounds_since_last_improvement += 1
else:
rounds_since_last_improvement = 0
if not changed or rounds_since_last_improvement >= 5 or not new_configs:
alpha *= decay_rate
self.log_msg('New alpha: {}, current cost {}'.format(alpha, self.cost))
if alpha < .000001:
break
rounds_since_last_improvement = 0
rounds += 1 | /rekallpy-0.3.2.tar.gz/rekallpy-0.3.2/rekall/tuner/coordinate_descent.py | 0.714429 | 0.433562 | coordinate_descent.py | pypi |
import math
from rekall.tuner import Tuner
from rekall.tuner.random import RandomTuner
from rekall.tuner.successive_halving import SuccessiveHalvingTuner
class HyperbandTuner(Tuner):
"""This tuner performs a hyperband search over the search space.
See https://arxiv.org/abs/1603.06560.
"""
@classmethod
def finite_horizon_hyperband_schedule(cls, max_iter, eta):
logeta = lambda x: math.log(x) / math.log(eta)
s_max = int(logeta(max_iter))
B = (s_max + 1) * max_iter
hyperband_schedule = []
for s in reversed(range(s_max + 1)):
n = int(math.ceil(int(B/max_iter/(s+1))*eta**s))
r = max_iter*eta**(-s)
# n is the number of configurations to start with
# r is the number of iterations to start with
# number of rounds is ceiling(logeta(n+.01))
hyperband_schedule.append({
'K': int(n),
'eta': int(eta),
'T': int(r),
'N': math.ceil(logeta(n+.01))
})
return hyperband_schedule
@classmethod
def estimate_cost(cls, schedule):
cost = 0
for bracket in schedule:
cost += SuccessiveHalvingTuner.estimate_cost(
bracket['eta'], bracket['N'], bracket['K'], bracket['T']
)
return cost
def tune_impl(self, **kwargs):
"""
Implement hyperband search over parameter space, with a given tuner to
train iterations.
See ``finite_horizon_hyperband_schedule`` to print out the schedule for
given values of `max_iter` and `eta`.
args:
max_iter: Maximum number of iterations.
eta: Proportion of configs to cut in each round of successive halving.
tuner: ``Tuner`` class to use for internal training rounds.
tuner_params: Optional params to pass to the internal tuner.
"""
if ('eta' not in kwargs or
'max_iter' not in kwargs or
'tuner' not in kwargs):
print('SuccessiveHalvingTuner requires max_itera, eta, tuner params.')
return
eta = kwargs['eta']
max_iter = kwargs['max_iter']
tuner = kwargs['tuner']
if 'tuner_params' in kwargs:
tuner_params = kwargs['tuner_params']
else:
tuner_params = {}
schedule = HyperbandTuner.finite_horizon_hyperband_schedule(max_iter, eta)
for bracket in schedule:
self.log_msg('Bracket: {}'.format(bracket))
if self.cost >= self.budget:
self.log_msg('Cost {} surpassed budget {}, ending rounds early.'.format(
self.cost, self.budget
))
break
bracket_cost = SuccessiveHalvingTuner.estimate_cost(
bracket['eta'], bracket['N'], bracket['K'], bracket['T']
)
successive_halving_tuner = SuccessiveHalvingTuner(
self.search_space, self.eval_fn, maximize=self.maximize,
budget = bracket_cost, log=False
)
(best_score_bracket, best_config_bracket, scores,
execution_times, cost) = successive_halving_tuner.tune(
eta = bracket['eta'], N = bracket['N'], K = bracket['K'], T = bracket['T'],
tuner = tuner, tuner_params = tuner_params
)
self.scores += scores
self.cost += cost
self.execution_times += execution_times
if (self.best_score is None or
(self.maximize and best_score_bracket > self.best_score) or
(not self.maximize and best_score_bracket < self.best_score)):
self.best_score = best_score_bracket
self.best_config = best_config_bracket
self.log_msg('New best score: {}, current cost: {}'.format(
best_score_bracket, self.cost)) | /rekallpy-0.3.2.tar.gz/rekallpy-0.3.2/rekall/tuner/hyperband.py | 0.796846 | 0.324918 | hyperband.py | pypi |
from rekall.tuner import Tuner
from rekall.tuner.random import RandomTuner
class SuccessiveHalvingTuner(Tuner):
"""This tuner does successive halving over the search space."""
@classmethod
def estimate_cost(cls, eta, N, K, T):
'''Estimate the cost of successive halving'''
cost = 0
num_configs = K
num_epochs = T
for cur_round in range(N):
cost += num_configs * num_epochs
num_configs = int(num_configs / eta)
num_epochs = int(num_epochs * eta)
return cost
def tune_impl(self, **kwargs):
"""
Performs successive halving - start with K random configurations, each
running for T iterations of some sub-tuner.
In each round, take the 1 / eta top configurations, and in the next round
train for eta times more iterations.
args:
eta: Halving ratio.
N: Number of rounds.
K: Initial number of configurations.
T: Number of rounds.
tuner: ``Tuner`` class to use for internal training rounds.
tuner_params: Optional params to pass to the internal tuner.
"""
if ('eta' not in kwargs or
'N' not in kwargs or
'K' not in kwargs or
'T' not in kwargs or
'tuner' not in kwargs):
print('SuccessiveHalvingTuner requires eta, N, K, T, tuner params.')
return
eta = kwargs['eta']
N = kwargs['N']
K = kwargs['K']
T = kwargs['T']
tuner = kwargs['tuner']
if 'tuner_params' in kwargs:
tuner_params = kwargs['tuner_params']
else:
tuner_params = {}
num_configs = K
cur_configs = RandomTuner.generate_configs(self.search_space, num_configs)
config_scores = []
num_epochs = T
for cur_round in range(N):
self.log_msg('Round {}, {} configs, {} epochs'.format(cur_round, len(cur_configs), num_epochs))
best_configs_and_scores = []
for i, config in enumerate(cur_configs):
if self.cost >= self.budget:
self.log_msg('Cost {} surpassed budget {}, ending rounds early.'.format(
self.cost, self.budget
))
break
cur_best_config = config
if len(config_scores) <= i:
config_scores.append(self.evaluate_config(config))
cur_best_score = config_scores[i]
training_iterations = num_epochs if cur_round > 0 else num_epochs - 1
new_tuner = tuner(
self.search_space,
self.eval_fn,
maximize=self.maximize,
budget=training_iterations,
log=False,
start_config=cur_best_config.copy(),
start_score=cur_best_score
)
(cur_best_score, cur_best_config, scores,
execution_times, cost) = new_tuner.tune(**tuner_params)
self.scores += scores
self.cost += cost
self.execution_times += execution_times
if (self.best_score is None or
(self.maximize and cur_best_score > self.best_score) or
(not self.maximize and cur_best_score < self.best_score)):
self.best_score = cur_best_score
self.best_config = cur_best_config
self.log_msg('New best score: {}, current cost: {}'.format(
cur_best_score, self.cost))
best_configs_and_scores.append((cur_best_score, cur_best_config))
best_configs_and_scores = sorted(best_configs_and_scores, key=lambda score_and_config: score_and_config[0])
num_configs = int(num_configs / eta)
num_epochs = int(num_epochs * eta)
if num_configs < 1:
num_configs = 1
cur_configs = [ config for score, config in best_configs_and_scores[:num_configs] ]
config_scores = [ score for score, config in best_configs_and_scores[:num_configs] ] | /rekallpy-0.3.2.tar.gz/rekallpy-0.3.2/rekall/tuner/successive_halving.py | 0.747432 | 0.301079 | successive_halving.py | pypi |
from rekall.bounds import Bounds, utils
from rekall.predicates import overlaps
class Bounds1D(Bounds):
"""Object representing a one-dimensional (temporal) bound.
This class has co-ordinates 't1' and 't2', representing the start and end
in a temporal dimension, respectively.
This class has no built-in casts, since there's only one dimension.
"""
def __init__(self, t1, t2):
"""Initialize this Bounds1D object by explicitly passing in values for
't1' and 't2'.
Args:
t1: The value for 't1'.
t2: The value for 't2'.
Returns:
A Bounds1D object with 't1' and 't2' co-ordinates.
"""
self.data = {
't1': t1,
't2': t2
}
@classmethod
def fromTuple(cls, t1t2_tuple):
"""Create a Bounds1D object with a tuple of length two.
Args:
t1t2_tuple: A tuple of length two. The tuple items can be any type
with an ordering function. The first tuple item becomes the
value for 't1', and the second tuple item becomes the value for
't2'.
Returns:
A Bounds1D object with 't1' and 't2' co-ordinates, specified by
``t1t2_tuple``.
"""
return cls(*list(t1t2_tuple))
def __lt__(self, other):
"""Ordering of a Bounds1D is by 't1' first and then 't2'."""
return (self['t1'], self['t2']) < (other['t1'], other['t2'])
def __repr__(self):
"""String representation is ``'t1:val t2:val'``."""
return 't1:{} t2:{}'.format(self['t1'], self['t2'])
def primary_axis(self):
"""The primary axis is time."""
return ('t1', 't2')
def span(self, other):
"""Returns the minimum Bound spanning both ``self`` and ``other``.
Returns:
A single Bounds1D spanning ``self`` and ``other``.
"""
return Bounds1D.fromTuple(utils.bounds_span(
(self['t1'], self['t2']),
(other['t1'], other['t2'])
))
def intersect(self, other):
"""Returns the bound intersecting ``self`` and ``other``, or
``None`` if the bounds do not overlap.
Returns:
A single Bounds1D covering the intersection of ``self`` and
``other``, or ``None`` if the two bounds do not overlap.
"""
if overlaps()(self, other):
return Bounds1D.fromTuple(utils.bounds_intersect(
(self['t1'], self['t2']),
(other['t1'], other['t2'])
))
else:
return None
def copy(self):
"""Returns a copy of this bound."""
return Bounds1D(self['t1'], self['t2'])
def T():
"""Returns a tuple representing the time axis."""
return ('t1', 't2') | /rekallpy-0.3.2.tar.gz/rekallpy-0.3.2/rekall/bounds/bounds1D.py | 0.937655 | 0.627238 | bounds1D.py | pypi |
from rekall.bounds import Bounds, utils
from rekall.predicates import overlaps
class Bounds3D(Bounds):
"""Object representing a three-dimensional (time, x, y) bound.
The class has co-ordinates 't1', 't2', 'x1', 'x2', 'y1', 'y2', representing
start and end co-ordinates in the time, x, and y dimensions respectively.
This class has two built-in one-dimensional casts - ``X()`` and ``Y()``
cast the time dimensions to the x and y dimensions so that temporal
predicates can be used on one-dimensional spatial dimensions.
"""
def __init__(self, t1, t2, x1=0., x2=1., y1=0., y2=1.):
"""Initialize this Bounds3D object by manually passing in all six
co-ordinates.
Args:
t1: 't1' value.
t2: 't2' value.
x1: 'x1' value.
x2: 'x2' value.
y1: 'y1' value.
y2: 'y2' value.
Returns:
A Bounds3D object with the six co-ordinates specified by the
arguments.
"""
self.data = {
't1': t1,
't2': t2,
'x1': x1,
'x2': x2,
'y1': y1,
'y2': y2
}
@classmethod
def fromTuple(cls, tuple_3d):
"""Initialize a Bounds3D object with a tuple of length two or six.
Args:
tuple3d: A tuple of length two or six. The items represent, in order,
't1', 't2', 'x1', 'x2', 'y1', and 'y2', respectively. If the
tuple is only of length two, 'x1' and 'y1' get set to 0., and
'x2' and 'y2' get set to 1.
Returns:
A Bounds3D object with the six co-ordinates specified by the six
items in ``tuple3d``.
"""
return cls(*list(tuple_3d))
def __lt__(self, other):
"""Ordering is by 't1', 't2', 'x1', 'x2', 'y1', 'y2'."""
return (self['t1'], self['t2'], self['x1'], self['x2'], self['y1'],
self['y2']) < (other['t1'], other['t2'], other['x1'],
other['x2'], other['y1'], other['y2'])
def __repr__(self):
"""String representation is
``'t1:val t2:val x1:val x2:val y1:val y2:val'``."""
return 't1:{} t2:{} x1:{} x2:{} y1:{} y2:{}'.format(
self['t1'], self['t2'], self['x1'], self['x2'], self['y1'],
self['y2'])
def primary_axis(self):
"""Primary axis is time."""
return ('t1', 't2')
def copy(self):
"""Returns a copy of this bound."""
return Bounds3D(self['t1'], self['t2'], self['x1'], self['x2'],
self['y1'], self['y2'])
def T(pred):
"""Returns a function that transforms predicates by casting accesses to
't1' to 't1' and accesses to 't2' to 't2'. This doesn't actually
transform anything, but it's a nice helper function for readability.
Arg:
pred: The predicate to cast.
Returns:
The same predicate as ``pred``.
"""
return Bounds.cast({'t1': 't1', 't2': 't2'})(pred)
def X(pred):
"""Returns a function that transforms predicates by casting accesses to
't1' to 'x1' and accesses to 't2' to 'x2'.
Example:
Here is an example of casting an example predicate::
# This predicate tests whether a bound's 't2' value is greater
# than its 't1' value
def example_pred(bounds):
return bounds['t2'] > bounds['t1']
# t1 = 0, t2 = 1, x1 = 1, x2 = 0, y1 = 1, y2 = 0
higher_t2_lower_x2 = Bounds3D(0, 1, 1, 0, 1, 0)
example_pred(higher_t2_lower_x2) # this is True, since t2 > t1
Bounds3D.X(example_pred)(higher_t2_lower_x2) # this is False, since x2 < x1
Arg:
pred: The predicate to cast.
Returns:
The same predicate as ``pred``, except accesses to 't1' are cast to
'x1', and accesses to 't2' are cast to 'x2'.
"""
return Bounds.cast({'t1': 'x1', 't2': 'x2'})(pred)
def Y(pred):
"""Returns a function that transforms predicates by casting accesses to
't1' to 'y1' and accesses to 't2' to 'y2'.
Example:
Here is an example of casting an example predicate::
# This predicate tests whether a bound's 't2' value is greater
# than its 't1' value
def example_pred(bounds):
return bounds['t2'] > bounds['t1']
# t1 = 0, t2 = 1, x1 = 1, x2 = 0, y1 = 1, y2 = 0
higher_t2_lower_x2 = Bounds3D(0, 1, 1, 0, 1, 0)
example_pred(higher_t2_lower_y2) # this is True, since t2 > t1
Bounds3D.Y(example_pred)(higher_t2_lower_y2) # this is False, since y2 < y1
Arg:
pred: The predicate to cast.
Returns:
The same predicate as ``pred``, except accesses to 't1' are cast to
'y1', and accesses to 't2' are cast to 'y2'.
"""
return Bounds.cast({'t1': 'y1', 't2': 'y2'})(pred)
def XY(pred):
"""Returns a function that transforms predicates by casting accesses to
'x1' to 'x1', 'x2' to 'x2', 'y1' to 'y1', and 'y2' to 'y2'. This
doesn't actually transform anything, but it's a nice helper function
for readability.
Arg:
pred: The predicate to cast.
Returns:
The same predicate as ``pred``.
"""
return Bounds.cast({
'x1': 'x1',
'x2': 'x2',
'y1': 'y1',
'y2': 'y2'
})(pred)
def combine_per_axis(self, other, t_combiner, x_combiner, y_combiner):
"""Combines two Bounds using a one-dimensional Combiner function for
each axis.
Args:
other: The other Bounds3D to combine with.
t_combiner: A function that takes two bounds and returns one.
Takes two tuples as input and returns a tuple of two items.
Used to combine temporal bounds.
x_combiner: A function that takes two bounds and returns one.
Takes two tuples as input and returns a tuple of two items.
Used to combine X bounds.
y_combiner: A function that takes two bounds and returns one.
Takes two tuples as input and returns a tuple of two items.
Used to combine Y bounds.
Returns:
A new Bounds3D combined using the three combination functions.
"""
new_t = t_combiner((self['t1'], self['t2']),
(other['t1'], other['t2']))
new_x = x_combiner((self['x1'], self['x2']),
(other['x1'], other['x2']))
new_y = y_combiner((self['y1'], self['y2']),
(other['y1'], other['y2']))
return Bounds3D.fromTuple(list(new_t) + list(new_x) + list(new_y))
def span(self, other):
"""Returns the minimum Bound spanning ``self`` and ``other`` in all
three dimensions.
Returns:
A single Bounds3D spanning ``self`` and ``other``.
"""
return self.combine_per_axis(other, utils.bounds_span,
utils.bounds_span, utils.bounds_span)
def intersect_time_span_space(self, other):
"""Returns the bound intersecting ``other`` in time and spanning
``self`` and ``other`` in space. Returns ``None`` if ``self`` and
``other`` do not overlap in time.
Returns:
A single Bounds3D at the intersection of ``self`` and ``other`` in
time but spanning them in space, or ``None`` if they do not
overlap in time.
"""
if overlaps()(self, other):
return self.combine_per_axis(other, utils.bounds_intersect,
utils.bounds_span, utils.bounds_span)
else:
return None
def expand_to_frame(self):
"""Returns a bound with the same time extent but with full spatial
extent.
Assumes that X/Y co-ordinates are in relative spatial co-ordinates.
"""
return Bounds3D(self['t1'], self['t2'], 0., 1., 0., 1.)
def length(self):
"""Returns the length of the time interval."""
return utils.bound_size((self['t1'], self['t2']))
def width(self):
"""Returns the width (X dimension) of the time interval."""
return utils.bound_size((self['x1'], self['x2']))
def height(self):
"""Returns the height (Y dimension) of the time interval."""
return utils.bound_size((self['y1'], self['y2']))
def T_axis():
"""Returns a tuple representing the time axis."""
return ('t1', 't2')
def X_axis():
"""Returns a tuple representing the X axis."""
return ('x1', 'x2')
def Y_axis():
"""Returns a tuple representing the Y axis."""
return ('y1', 'y2') | /rekallpy-0.3.2.tar.gz/rekallpy-0.3.2/rekall/bounds/bounds3D.py | 0.957646 | 0.605857 | bounds3D.py | pypi |
class Bounds:
"""
The ``Bounds`` class is a simple wrapper around a dictionary. Typically,
the keys in this dictionary should represent physical bounds, like ``t1``,
``t2`` for time or ``x1``, ``x2`` for space.
Each class that inherits from ``Bounds`` should define a ``data`` dict upon
initialization. This allows fields from the bounds to be referenced using
``[]`` notation.
Each child class should also implement the following methods:
``__lt__`` for sorting
``__repr__`` for printing
``primary_axis`` to specify a tuple representing the major axis for
optimization. For videos, the primary will typically be ``('t1', 't2')``.
``copy`` for producing a copy of this Bounds
The ``Bounds`` class comes with a ``combine`` method that takes two Bounds
instances and a combiner function and combines them into one Bounds.
Child classes may want to implement common combination functions or provide
mechanisms to use different combination functions across multiple axes.
The ``Bounds`` class also provides a ``cast`` mechanism to recast
predicates to use other dimensions. The ``cast`` mechanism takes in a
function that expects one or more ``dict``-like objects as input and remaps
keys of the input.
Example:
Let's define a simple function that prints the 't' field of an object::
def print_t(obj):
print(obj['t'])
Now suppose we have an object with a key of 'x' instead::
my_obj = { 'x': 1 }
Then we can cast ``print_t`` to print out the 'x' field instead of the
't' field::
>>> cast({'t': 'x'})(print_t)(my_obj)
1
See the example below for a full example of a Bounds class.
Example:
The example below defines a two-dimensional Bounds object with two
dimensions, defined by ``t1``, ``t2``, ``x1``, and ``x2``::
from rekall.predicates import overlaps
class Bounds2D(Bounds):
def __init__(self, t1, t2, x1, x2):
self.data = { 't1': t1, 't2': t2, 'x1': x1, 'x2': x2 }
def __lt__(self, other):
return ((self['t1'], self['t2'], self['x1'], self['x2']) <
(other['t1'], other['t2'], other['x1'], other['x2']))
def __repr__(self):
return 't1:{} t2:{} x1:{} x2:{}'.format(
self['t1'], self['t2'], self['x1'], self['x2'])
def primary_axis(self):
return ('t1', 't2')
def T(pred):
return cast({ 't1': 'x1', 't2': 'x2' })(pred)
bounds1 = Bounds2D(0, 1, 0.5, 0.7)
bounds2 = Bounds2D(2, 3, 0.4, 0.6)
# overlaps expects two objects with fields 't1' and 't2' and
# computes whether there is overlap in that dimension
# This is False, since there is no time overlap
overlaps()(bounds1, bounds2)
# This is True, since there is overlap in the X dimension
Bounds2D.X(overlaps())(bounds1, bounds2)
# This is True.
bounds1 < bounds2
# This returns ('t1', 't2')
bounds1.primary_axis()
Attributes:
data: dict mapping from co-ordinate keys to co-ordinate values
"""
def __getitem__(self, arg):
"""Get ``arg`` from ``self.data``."""
return self.data[arg]
def __setitem__(self, key, item):
"""Set ``self.data[key]`` to ``item``."""
self.data[key] = item
def combine(self, other, combiner):
"""Combines two Bounds into a single new Bound using ``combiner``.
args:
other: The other Bound to combine with.
combiner: A function that takes two Bounds as input (``self`` and
``other``) and returns a single new Bound.
Returns:
The output of ``combiner(self, other)``.
"""
return combiner(self, other)
def cast(schema):
"""Return a function that takes in a predicate function and remaps key
lookups according to ``schema``.
The output function takes in a predicate function ``p`` and returns a
modified function ``p'``. It expects ``p`` to take in some amount of
``dict``-like objects and re-map lookups in those objects according to
``schema``.
In particular, let ``obj`` be an argument to ``p``. Then for every key
``k`` in ``schema``, ``obj[k]`` will be re-mapped to ``obj[schema[k]]``
in ``p'``.
Example:
Let's define a simple function that prints the 't' field of an
object::
def print_t(obj):
print(obj['t'])
Now suppose we have an object with a key of 'x' instead::
my_obj = { 'x': 1 }
Then we can cast ``print_t`` to print out the 'x' field instead of
the 't' field::
>>> cast({'t': 'x'})(print_t)(my_obj)
1
Args:
schema: A ``dict`` representing re-mappings for the target
function. For every key ``k`` in ``schema``, ``k`` is re-mapped
to ``schema[k]`` in the transformed function.
Returns:
A function that transforms a predicate function by remapping
key-value lookups in the predicate function's arguments.
"""
class WrappedArg:
def __init__(self, orig_obj, schema):
self.orig_obj = orig_obj
self.schema = schema
def __getitem__(self, arg):
if arg in self.schema:
return self.orig_obj[self.schema[arg]]
else:
return self.orig_obj[arg]
def wrap_pred(pred):
def new_pred(*args):
return pred(*[WrappedArg(a, schema) for a in args])
return new_pred
return wrap_pred
def size(self, axis=None):
"""Get the size of the bounds along some axis.
Args:
axis (optional): The axis to compute size on. Represented as a pair
of co-ordinates, such as ``('t1', 't2')``. Defaults to ``None``,
which uses the ``primary_axis`` of ``self``.
Returns:
The size of the bounds across some axis.
"""
if axis is None:
axis = self.primary_axis()
return self[axis[1]] - self[axis[0]]
def to_json(self):
"""Converts the bounds to a JSON object."""
return self.data
def __lt__(self, other):
"""Method to compare two Bounds. Child classes should implement
this."""
pass
def __repr__(self):
"""Method to get a string representation of a Bound. Child classes
should implement this."""
pass
def primary_axis(self):
"""Method to get the primary axis of a Bound for optimizations. Child
classes should implement this."""
pass
def copy(self):
"""Method to get another Bound that has the same data as this Bound.
Child classes should implement this."""
pass | /rekallpy-0.3.2.tar.gz/rekallpy-0.3.2/rekall/bounds/abstract_bounds.py | 0.955423 | 0.870927 | abstract_bounds.py | pypi |
from airflow.utils.decorators import apply_defaults
from rekcurd_airflow.operators.rekcurd_operator import RekcurdOperator
from airflow.exceptions import AirflowException
import json
class ModelSwitchOperator(RekcurdOperator):
"""
Switch Rekcurd model
:param app_id: The targetted Rekcurd application ID.
:type app_id: integer
:param service_id: The targetted Rekcurd service ID.
:type service_id: integer
:param model_id: ID of the model to be switched to.
The targetted service will use this model.
:type model_id: integer
:param model_provide_task_id: ID of the task providing model_id by returing value.
If `model_id` is NOT None, this param is ignored.
:type model_provide_task_id: string
"""
@apply_defaults
def __init__(self,
app_id,
service_id,
timeout=300,
model_id=None,
model_provide_task_id=None,
*args, **kwargs):
super().__init__(
endpoint='/api/applications/{}/services/{}'.format(app_id, service_id),
method='PUT',
timeout=timeout,
headers={"Content-Type": "application/x-www-form-urlencoded"},
*args,
**kwargs)
if model_id is None and model_provide_task_id is None:
raise ValueError('Value must be assigned to either `model_id` or `model_provide_task_id`.')
if model_id is not None and model_provide_task_id is not None:
msg = '`model_provide_task_id` is ignored because `model_id` is not None'
self.log.warning(msg)
self.__model_id = model_id
self.__xcom_task_id = model_provide_task_id
def execute(self, context):
if self.__model_id is None:
self.__model_id = context['ti'].xcom_pull(task_ids=self.__xcom_task_id)
self.data = "model_id={}".format(self.__model_id)
result = json.loads(super().execute(context))
if result.get('status'):
self.log.info(result['message'])
else:
raise AirflowException(result['message']) | /rekcurd-airflow-0.1.0.tar.gz/rekcurd-airflow-0.1.0/rekcurd_airflow/operators/model_switch_operator.py | 0.702224 | 0.330714 | model_switch_operator.py | pypi |
from airflow.utils.decorators import apply_defaults
from rekcurd_airflow.operators.rekcurd_operator import RekcurdOperator
from airflow.exceptions import AirflowException
from airflow.hooks.http_hook import HttpHook
import json
from requests import Request
from urllib.parse import urljoin
class ModelUploadOperator(RekcurdOperator):
MODEL_KEY = 'rekcurd_model_key'
MODEL_DESCRIPTION_KEY = 'rekcurd_model_desc_key'
"""
Upload Rekcurd model
:param app_id: The targetted Rekcurd application ID.
:type app_id: integer
:param model_file_path: file path to the model to be uploaded
:type model_file_path: string
:param model_description: description of the model
:type model_description: string
:param model_provide_task_id: ID of the task providing model information by xcom_push
If `model_file_path` is NOT None, this param is ignored.
"""
@apply_defaults
def __init__(self,
app_id,
timeout=300,
model_file_path=None,
model_description=None,
model_provide_task_id=None,
*args, **kwargs):
super().__init__(
endpoint='/api/applications/{}/models'.format(app_id),
timeout=timeout,
method=None,
*args,
**kwargs)
if model_file_path is None and model_provide_task_id is None:
raise ValueError('Value must be assigned to either `model_file_path` or `model_provide_task_id`.')
if model_file_path is not None and model_provide_task_id is not None:
self.log.warning('`model_provide_task_id` is ignored because `model_file_path` is not None')
self.__model_path = model_file_path
self.__desc = model_description
self.__xcom_task_id = model_provide_task_id
def execute(self, context):
model, desc = self.get_model_data(context)
result = json.loads(self.upload(model, desc))
if result.get('status'):
self.log.info(result['message'])
else:
raise AirflowException(result['message'])
model_id = self.get_model_id(desc)
self.log.info('ID of the uploaded model: {}'.format(model_id))
return model_id
def get_model_data(self, context):
if self.__model_path is None:
model = context['ti'].xcom_pull(
key=self.MODEL_KEY,
task_ids=self.__xcom_task_id)
desc = context['ti'].xcom_pull(
key=self.MODEL_DESCRIPTION_KEY,
task_ids=self.__xcom_task_id)
if desc is None:
desc = self.__desc
else:
with open(self.__model_path, 'rb') as modelfile:
model = modelfile.read()
desc = self.__desc
return model, desc
def upload(self, model, desc):
http = HttpHook('POST', http_conn_id=self.http_conn_id)
session = http.get_conn(self.headers)
req = Request('POST',
urljoin(http.base_url, self.endpoint),
data={'description': desc},
files={'file': model},
headers=self.headers)
prepped_request = session.prepare_request(req)
response = http.run_and_check(session, prepped_request, self.extra_options)
return response.text
def get_model_id(self, desc):
http = HttpHook('GET', http_conn_id=self.http_conn_id)
response = http.run(self.endpoint, headers=self.headers, extra_options=self.extra_options)
models = json.loads(response.text)
model = None
for m in models:
if m['description'] == desc:
if model is None or model['register_date'] < m['register_date']:
model = m
if model is None:
raise AirflowException('The uploaded model was not found in Rekcurd')
return model['model_id'] | /rekcurd-airflow-0.1.0.tar.gz/rekcurd-airflow-0.1.0/rekcurd_airflow/operators/model_upload_operator.py | 0.699357 | 0.190799 | model_upload_operator.py | pypi |
from airflow.utils.decorators import apply_defaults
from rekcurd_airflow.operators.rekcurd_operator import RekcurdOperator
from airflow.exceptions import AirflowException
import json
class ModelEvaluateOperator(RekcurdOperator):
"""
evaluate Rekcurd model
:param app_id: The targetted Rekcurd application ID.
:type app_id: integer
:param overwrite: True if overwrite result even if the evaluated result already exists.
:type overwrite: bool
:param model_id: ID of the model to be evaluated.
:type model_id: integer
:param model_provide_task_id: ID of the task providing model_id by returing value.
If `model_id` is NOT None, this param is ignored.
:type model_provide_task_id: string
:param evaluation_id: ID of the evaluation data.
:type evaluation_id: integer
:param evaluation_provide_task_id: ID of the task providing evaluation_id by returing value.
If `model_id` is NOT None, this param is ignored.
:type evaluation_provide_task_id: string
"""
@apply_defaults
def __init__(self,
app_id,
timeout=300,
overwrite=False,
model_id=None,
model_provide_task_id=None,
evaluation_id=None,
evaluation_provide_task_id=None,
*args, **kwargs):
super().__init__(
endpoint='/api/applications/{}/evaluate'.format(app_id),
method='POST',
timeout=timeout,
headers={"Content-Type": "application/x-www-form-urlencoded"},
*args,
**kwargs)
if model_id is None and model_provide_task_id is None:
raise ValueError('Value must be assigned to either `model_id` or `model_provide_task_id`.')
if evaluation_id is None and evaluation_provide_task_id is None:
raise ValueError('Value must be assigned to either `evaluation_id` or `evaluation_provide_task_id`.')
if model_id is not None and model_provide_task_id is not None:
msg = '`model_provide_task_id` is ignored because `model_id` is not None'
self.log.warning(msg)
if evaluation_id is not None and evaluation_provide_task_id is not None:
msg = '`evaluation_provide_task_id` is ignored because `evaluation_id` is not None'
self.log.warning(msg)
self.__model_id = model_id
self.__evaluation_id = evaluation_id
self.__model_task_id = model_provide_task_id
self.__evaluation_task_id = evaluation_provide_task_id
self.__overwrite = overwrite
def execute(self, context):
if self.__model_id is None:
self.__model_id = context['ti'].xcom_pull(task_ids=self.__model_task_id)
if self.__evaluation_id is None:
self.__evaluation_id = context['ti'].xcom_pull(task_ids=self.__evaluation_task_id)
self.data = "model_id={}&evaluation_id={}&overwrite={}".format(
self.__model_id, self.__evaluation_id, str(self.__overwrite).lower())
result = json.loads(super().execute(context))
if result.get('status'):
self.log.info('succcessfully evaluated.')
else:
raise AirflowException('failed to evaluate.')
return result | /rekcurd-airflow-0.1.0.tar.gz/rekcurd-airflow-0.1.0/rekcurd_airflow/operators/model_evaluate_operator.py | 0.778397 | 0.330701 | model_evaluate_operator.py | pypi |
import datetime
import math
from flask_restplus import Namespace, fields, Resource, reqparse, inputs
from . import status_model, apply_rekcurd_to_kubernetes, load_kubernetes_deployment_info
from rekcurd_dashboard.core import RekcurdDashboardClient
from rekcurd_dashboard.models import db, ApplicationModel, ServiceModel
from rekcurd_dashboard.protobuf import rekcurd_pb2
service_deployment_api_namespace = Namespace('service_deployments', description='Service Deployment API Endpoint.')
success_or_not = service_deployment_api_namespace.model('Success', status_model)
service_deployment_params = service_deployment_api_namespace.model('Deployment', {
'application_name': fields.String(
readOnly=True,
description='Application name.'
),
'service_id': fields.String(
readOnly=True,
description='Service ID.'
),
'display_name': fields.String(
required=True,
description='Display name.',
example='dev-001'
),
'description': fields.String(
required=False,
description='Description.',
example='This is a sample.'
),
'service_level': fields.String(
required=True,
description='Service level. [development/beta/staging/sandbox/production]',
example='development'
),
'version': fields.String(
required=False,
description='Rekcurd gRPC spec version. Default is the latest version.',
example='v1'
),
'insecure_host': fields.String(
required=False,
description='Rekcurd server insecure host. Default is "[::]".',
example='[::]'
),
'insecure_port': fields.Integer(
required=False,
description='Rekcurd server insecure port. Default is "5000".',
example=5000
),
'replicas_default': fields.Integer(
required=False,
description='Number of pod at beginning. Default is "1".',
example=1
),
'replicas_minimum': fields.Integer(
required=False,
description='Minimum number of pod for auto-scaling. Default is "1".',
example=1
),
'replicas_maximum': fields.Integer(
required=False,
description='Maximum number of pod for auto-scaling. Default is "1".',
example=1
),
'autoscale_cpu_threshold': fields.Integer(
required=False,
description='Threshold of CPU usage for auto-scaling. Default "80".',
example=80
),
'policy_max_surge': fields.Integer(
required=False,
description='Maximum number of surged pod when updating. Default "ceil(0.25 * <replicas_default>)".',
example=1
),
'policy_max_unavailable': fields.Integer(
required=False,
description='Maximum number of unavailable pod when updating. Default "floor(0.25 * <replicas_default>)".',
example=0
),
'policy_wait_seconds': fields.Integer(
required=False,
description='Booting second of your service. You MUST specify the accurate number. '
'This value become a safety net for your production services. Default is "300".',
example=300
),
'container_image': fields.String(
required=True,
description='Location of your service container image.',
example='centos:centos7'
),
'service_model_assignment': fields.Integer(
required=True,
description='Model ID which is assigned to the service.',
example=1
),
'service_git_url': fields.String(
required=False,
description='URL of your git repository. If you use "rekcurd/rekcurd:tagname" image, '
'this field is necessary.',
example='https://github.com/rekcurd/rekcurd-example.git'
),
'service_git_branch': fields.String(
required=False,
description='Name of your git branch. If you use "rekcurd/rekcurd:tagname" image, '
'this field is necessary.',
example='master'
),
'service_boot_script': fields.String(
required=False,
description='Booting script for your service. If you use "rekcurd/rekcurd:tagname" image, '
'this field is necessary.',
example='start.sh'
),
'resource_request_cpu': fields.Float(
required=True,
description='CPU reservation for your service.',
example=1.0
),
'resource_request_memory': fields.String(
required=True,
description='Memory reservation for your service.',
example='512Mi'
),
'resource_limit_cpu': fields.Float(
required=False,
description='Upper limit of CPU reservation. Default is "resource_request_cpu".',
example=1.0
),
'resource_limit_memory': fields.String(
required=False,
description='Upper limit of memory reservation. Default is "resource_request_memory".',
example='512Mi'
),
'commit_message': fields.String(
required=False,
description='Commit message.',
example='Initial deployment for "development" env. Default is a commit date.'
)
})
@service_deployment_api_namespace.route(
'/projects/<int:project_id>/applications/<application_id>/single_service_registration')
class ApiSingleServiceRegistration(Resource):
"""
Registration for non-Kubernetes service.
"""
single_worker_parser = reqparse.RequestParser()
single_worker_parser.add_argument(
'display_name', location='form', type=str, required=True, help='Display name. Must be unique.')
single_worker_parser.add_argument(
'description', location='form', type=str, required=False, help='Description.')
single_worker_parser.add_argument(
'version', location='form', type=str, required=False,
choices=('v0', 'v1', 'v2'),
default=rekcurd_pb2.DESCRIPTOR.GetOptions().Extensions[rekcurd_pb2.rekcurd_grpc_proto_version],
help='Rekcurd gRPC spec version. Default is the latest version.')
single_worker_parser.add_argument(
'service_level', location='form', type=str, required=True,
choices=('development','beta','staging','sandbox','production'),
help='Service level. [development/beta/staging/sandbox/production].')
single_worker_parser.add_argument(
'service_model_assignment', location='form', type=int, required=True,
help='Model ID which is assigned to the service.')
single_worker_parser.add_argument(
'insecure_host', location='form', type=str, default="localhost", required=False,
help='Rekcurd server host. Default is "localhost".')
single_worker_parser.add_argument(
'insecure_port', location='form', type=int, default=5000, required=False,
help='Rekcurd server port. Default is "5000".')
@service_deployment_api_namespace.marshal_with(success_or_not)
@service_deployment_api_namespace.expect(single_worker_parser)
def post(self, project_id: int, application_id: str):
"""Add non-Kubenetes service."""
args = self.single_worker_parser.parse_args()
display_name = args["display_name"]
description = args["description"]
service_level = args["service_level"]
version = args["version"] or rekcurd_pb2.DESCRIPTOR.GetOptions().Extensions[rekcurd_pb2.rekcurd_grpc_proto_version]
service_model_assignment = args["service_model_assignment"]
insecure_host = args["insecure_host"]
insecure_port = args["insecure_port"]
application_model: ApplicationModel = db.session.query(ApplicationModel).filter(
ApplicationModel.application_id == application_id).first_or_404()
rekcurd_dashboard_client = RekcurdDashboardClient(
host=insecure_host, port=insecure_port, application_name=application_model.application_name,
service_level=service_level, rekcurd_grpc_version=version)
service_info = rekcurd_dashboard_client.run_service_info()
service_id = service_info["service_name"] # TODO: renaming
service_model = ServiceModel(
service_id=service_id, application_id=application_id, display_name=display_name,
description=description, service_level=service_level, version=version,
model_id=service_model_assignment, insecure_host=insecure_host,
insecure_port=insecure_port
)
db.session.add(service_model)
db.session.commit()
db.session.close()
return {"status": True, "message": "Success."}
@service_deployment_api_namespace.route('/projects/<int:project_id>/applications/<application_id>/service_deployment')
class ApiServiceDeployment(Resource):
service_deployment_parser = reqparse.RequestParser()
service_deployment_parser.add_argument(
'display_name', location='form', type=str, required=True,
help='Display name.')
service_deployment_parser.add_argument(
'description', location='form', type=str, required=False,
help='Description.')
service_deployment_parser.add_argument(
'version', location='form', type=str, required=False,
choices=('v0', 'v1', 'v2'),
default=rekcurd_pb2.DESCRIPTOR.GetOptions().Extensions[rekcurd_pb2.rekcurd_grpc_proto_version],
help='Rekcurd gRPC spec version. Default is the latest version.')
service_deployment_parser.add_argument(
'service_level', location='form', type=str, required=True,
choices=('development','beta','staging','sandbox','production'),
help='Service level. [development/beta/staging/sandbox/production].')
service_deployment_parser.add_argument(
'insecure_host', location='form', type=str, default="[::]", required=False,
help='Rekcurd server insecure host. Default is "[::]".')
service_deployment_parser.add_argument(
'insecure_port', location='form', type=int, default=5000, required=False,
help='Rekcurd server insecure port. Default is "5000".')
service_deployment_parser.add_argument(
'replicas_default', location='form', type=int, default=1, required=False,
help='Number of pod at beginning. Default is "1".')
service_deployment_parser.add_argument(
'replicas_minimum', location='form', type=int, default=1, required=False,
help='Minimum number of pod for auto-scaling. Default is "1".')
service_deployment_parser.add_argument(
'replicas_maximum', location='form', type=int, default=1, required=False,
help='Maximum number of pod for auto-scaling. Default is "1".')
service_deployment_parser.add_argument(
'autoscale_cpu_threshold', location='form', type=int, default=80, required=False,
help='Threshold of CPU usage for auto-scaling. Default "80".')
service_deployment_parser.add_argument(
'policy_max_surge', location='form', type=int, required=False,
help='Maximum number of surged pod when updating. Default "ceil(0.25 * <replicas_default>)".')
service_deployment_parser.add_argument(
'policy_max_unavailable', location='form', type=int, required=False,
help='Maximum number of unavailable pod when updating. Default "floor(0.25 * <replicas_default>)".')
service_deployment_parser.add_argument(
'policy_wait_seconds', location='form', type=int, default=300, required=False,
help='Booting second of your service. You MUST specify the accurate number. '
'This value become a safety net for your production services. Default is "300".')
service_deployment_parser.add_argument(
'container_image', location='form', type=str, required=True,
help='Location of your service container image.')
service_deployment_parser.add_argument(
'service_model_assignment', location='form', type=int, required=True,
help='Model ID which is assigned to the service.')
service_deployment_parser.add_argument(
'service_git_url', location='form', type=str, default="", required=False,
help='URL of your git repository. If you use "rekcurd/rekcurd:tagname" image, this field is necessary.')
service_deployment_parser.add_argument(
'service_git_branch', location='form', type=str, default="", required=False,
help='Name of your git branch. If you use "rekcurd/rekcurd:tagname" image, this field is necessary.')
service_deployment_parser.add_argument(
'service_boot_script', location='form', type=str, default="", required=False,
help='Booting script for your service. If you use "rekcurd/rekcurd:tagname" image, this field is necessary.')
service_deployment_parser.add_argument(
'resource_request_cpu', location='form', type=float, default=1.0, required=True,
help='CPU reservation for your service.')
service_deployment_parser.add_argument(
'resource_request_memory', location='form', type=str, default='512Mi', required=True,
help='Memory reservation for your service.')
service_deployment_parser.add_argument(
'resource_limit_cpu', location='form', type=float, required=False,
help='Upper limit of CPU reservation. Default is "resource_request_cpu".')
service_deployment_parser.add_argument(
'resource_limit_memory', location='form', type=str, required=False,
help='Upper limit of memory reservation. Default is "resource_request_memory".')
service_deployment_parser.add_argument(
'debug_mode', location='form', type=inputs.boolean, default=False, required=False,
help='Debug mode.')
@service_deployment_api_namespace.marshal_with(success_or_not)
@service_deployment_api_namespace.expect(service_deployment_parser)
def post(self, project_id: int, application_id: str):
"""Add Kubenetes service."""
args = self.service_deployment_parser.parse_args()
args['commit_message'] = "Update at {0:%Y%m%d%H%M%S}".format(datetime.datetime.utcnow())
if args['policy_max_surge'] is None:
args['policy_max_surge'] = math.ceil(0.25 * args['replicas_default'])
if args['policy_max_unavailable'] is None:
args['policy_max_unavailable'] = math.floor(0.25 * args['replicas_default'])
if args['resource_limit_cpu'] is None:
args['resource_limit_cpu'] = args['resource_request_cpu']
if args['resource_limit_memory'] is None:
args['resource_limit_memory'] = args['resource_request_memory']
apply_rekcurd_to_kubernetes(project_id=project_id, application_id=application_id, **args)
db.session.commit()
db.session.close()
return {"status": True, "message": "Success."}
@service_deployment_api_namespace.route('/projects/<int:project_id>/applications/<application_id>/service_deployment/<service_id>')
class ApiServiceIdDeployment(Resource):
patch_parser = reqparse.RequestParser()
patch_parser.add_argument(
'display_name', location='form', type=str, required=True,
help='Display name.')
patch_parser.add_argument(
'description', location='form', type=str, required=False,
help='Description.')
patch_parser.add_argument(
'version', location='form', type=str, required=True,
choices=('v0', 'v1', 'v2'),
default=rekcurd_pb2.DESCRIPTOR.GetOptions().Extensions[rekcurd_pb2.rekcurd_grpc_proto_version],
help='Rekcurd gRPC spec version. Default is the latest version.')
patch_parser.add_argument(
'service_level', location='form', type=str, required=True,
choices=('development','beta','staging','sandbox','production'),
help='Service level. [development/beta/staging/sandbox/production].')
patch_parser.add_argument(
'insecure_host', location='form', type=str, required=True,
help='Rekcurd server insecure host. Default is "[::]".')
patch_parser.add_argument(
'insecure_port', location='form', type=int, required=True,
help='Rekcurd server insecure port. Default is "5000".')
patch_parser.add_argument(
'replicas_default', location='form', type=int, required=True,
help='Number of pod at beginning. Default is "1".')
patch_parser.add_argument(
'replicas_minimum', location='form', type=int, required=True,
help='Minimum number of pod for auto-scaling. Default is "1".')
patch_parser.add_argument(
'replicas_maximum', location='form', type=int, required=True,
help='Maximum number of pod for auto-scaling. Default is "1".')
patch_parser.add_argument(
'autoscale_cpu_threshold', location='form', type=int, required=True,
help='Threshold of CPU usage for auto-scaling. Default "80".')
patch_parser.add_argument(
'policy_max_surge', location='form', type=int, required=True,
help='Maximum number of surged pod when updating. Default "ceil(0.25 * <replicas_default>)".')
patch_parser.add_argument(
'policy_max_unavailable', location='form', type=int, required=True,
help='Maximum number of unavailable pod when updating. Default "floor(0.25 * <replicas_default>)".')
patch_parser.add_argument(
'policy_wait_seconds', location='form', type=int, required=True,
help='Booting second of your service. You MUST specify the accurate number. '
'This value become a safety net for your production services. Default is "300".')
patch_parser.add_argument(
'container_image', location='form', type=str, required=True,
help='Location of your service container image.')
patch_parser.add_argument(
'service_model_assignment', location='form', type=int, required=True,
help='Model ID which is assigned to the service.')
patch_parser.add_argument(
'service_git_url', location='form', type=str, required=True,
help='URL of your git repository. If you use "rekcurd/rekcurd:tagname" image, this field is necessary.')
patch_parser.add_argument(
'service_git_branch', location='form', type=str, required=True,
help='Name of your git branch. If you use "rekcurd/rekcurd:tagname" image, this field is necessary.')
patch_parser.add_argument(
'service_boot_script', location='form', type=str, required=True,
help='Booting script for your service. If you use "rekcurd/rekcurd:tagname" image, this field is necessary.')
patch_parser.add_argument(
'resource_request_cpu', location='form', type=float, required=True,
help='CPU reservation for your service.')
patch_parser.add_argument(
'resource_request_memory', location='form', type=str, required=True,
help='Memory reservation for your service.')
patch_parser.add_argument(
'resource_limit_cpu', location='form', type=float, required=True,
help='Upper limit of CPU reservation. Default is "resource_request_cpu".')
patch_parser.add_argument(
'resource_limit_memory', location='form', type=str, required=True,
help='Upper limit of memory reservation. Default is "resource_request_memory".')
patch_parser.add_argument(
'debug_mode', location='form', type=inputs.boolean, default=False, required=False,
help='Debug mode.')
@service_deployment_api_namespace.marshal_with(service_deployment_params)
def get(self, project_id: int, application_id: str, service_id: str):
"""Get Kubernetes deployment info."""
deployment_info = load_kubernetes_deployment_info(project_id, application_id, service_id)
service_model: ServiceModel = db.session.query(ServiceModel).filter(
ServiceModel.service_id == service_id).first_or_404()
deployment_info["display_name"] = service_model.display_name
deployment_info["description"] = service_model.description
return deployment_info
@service_deployment_api_namespace.marshal_with(success_or_not)
def put(self, project_id: int, application_id: str, service_id: str):
"""Update Kubernetes deployment info."""
deployment_info = load_kubernetes_deployment_info(project_id, application_id, service_id)
service_model: ServiceModel = db.session.query(ServiceModel).filter(
ServiceModel.service_id == service_id).first_or_404()
is_updated = False
if service_model.version != deployment_info["version"]:
is_updated = True
service_model.version = deployment_info["version"]
if service_model.model_id != deployment_info["service_model_assignment"]:
is_updated = True
service_model.model_id = deployment_info["service_model_assignment"]
if service_model.insecure_host != deployment_info["insecure_host"]:
is_updated = True
service_model.insecure_host = deployment_info["insecure_host"]
if service_model.insecure_port != deployment_info["insecure_port"]:
is_updated = True
service_model.insecure_port = deployment_info["insecure_port"]
if is_updated:
db.session.commit()
db.session.close()
return {"status": True, "message": "Success."}
@service_deployment_api_namespace.marshal_with(success_or_not)
@service_deployment_api_namespace.expect(patch_parser)
def patch(self, project_id: int, application_id: str, service_id: str):
"""Rolling update of Kubernetes deployment configurations."""
args = self.patch_parser.parse_args()
args['commit_message'] = "Update at {0:%Y%m%d%H%M%S}".format(datetime.datetime.utcnow())
apply_rekcurd_to_kubernetes(
project_id=project_id, application_id=application_id, service_id=service_id, **args)
service_model: ServiceModel = db.session.query(ServiceModel).filter(
ServiceModel.service_id == service_id).first_or_404()
is_updated = False
if service_model.display_name != args["display_name"]:
is_updated = True
service_model.version = args["display_name"]
if service_model.description != args["description"]:
is_updated = True
service_model.version = args["description"]
if service_model.version != args["version"]:
is_updated = True
service_model.version = args["version"]
if service_model.model_id != args["service_model_assignment"]:
is_updated = True
service_model.model_id = args["service_model_assignment"]
if service_model.insecure_host != args["insecure_host"]:
is_updated = True
service_model.insecure_host = args["insecure_host"]
if service_model.insecure_port != args["insecure_port"]:
is_updated = True
service_model.insecure_port = args["insecure_port"]
if is_updated:
db.session.commit()
db.session.close()
return {"status": True, "message": "Success."} | /rekcurd_dashboard-1.0.1.post4.tar.gz/rekcurd_dashboard-1.0.1.post4/rekcurd_dashboard/apis/api_service_deployment.py | 0.625209 | 0.151938 | api_service_deployment.py | pypi |
import datetime
import tempfile
from flask_restplus import Namespace, fields, Resource, reqparse
from werkzeug.datastructures import FileStorage
from . import DatetimeToTimestamp, status_model
from rekcurd_dashboard.core import RekcurdDashboardClient
from rekcurd_dashboard.data_servers import DataServer
from rekcurd_dashboard.models import db, DataServerModel, DataServerModeEnum, ApplicationModel, ServiceModel, ModelModel
from rekcurd_dashboard.utils import RekcurdDashboardException
model_api_namespace = Namespace('models', description='Model API Endpoint.')
success_or_not = model_api_namespace.model('Status', status_model)
model_model_params = model_api_namespace.model('Model', {
'model_id': fields.Integer(
readOnly=True,
description='Model ID.'
),
'application_id': fields.String(
readOnly=True,
description='Application ID.'
),
'filepath': fields.String(
readOnly=True,
description='Model file path.',
example='ml-1234567.model'
),
'register_date': DatetimeToTimestamp(
readOnly=True,
description='Register date.'
),
'description': fields.String(
required=True,
description='Description.',
example='This is a sample.'
)
})
@model_api_namespace.route('/projects/<int:project_id>/applications/<application_id>/models')
class ApiModels(Resource):
upload_model_parser = reqparse.RequestParser()
upload_model_parser.add_argument('filepath', location='files',
type=FileStorage, required=True)
upload_model_parser.add_argument('description', type=str, required=True, location='form')
@model_api_namespace.marshal_list_with(model_model_params)
def get(self, project_id: int, application_id: str):
"""get_models"""
return ModelModel.query.filter_by(application_id=application_id).all()
@model_api_namespace.marshal_with(success_or_not)
@model_api_namespace.expect(upload_model_parser)
def post(self, project_id: int, application_id: str):
"""upload_model"""
args = self.upload_model_parser.parse_args()
file: FileStorage = args['filepath']
description: str = args['description']
data_server_model: DataServerModel = db.session.query(
DataServerModel).filter(DataServerModel.project_id == project_id).first_or_404()
application_model: ApplicationModel = db.session.query(
ApplicationModel).filter(ApplicationModel.application_id == application_id).first()
if data_server_model.data_server_mode == DataServerModeEnum.LOCAL:
"""Only if DataServerModeEnum.LOCAL, send file to the server."""
filepath = "ml-{0:%Y%m%d%H%M%S}.model".format(datetime.datetime.utcnow())
service_models = db.session.query(
ServiceModel).filter(ServiceModel.application_id == application_id).all()
for service_model in service_models:
rekcurd_dashboard_application = RekcurdDashboardClient(
host=service_model.insecure_host, port=service_model.insecure_port,
application_name=application_model.application_name,
service_level=service_model.service_level, rekcurd_grpc_version=service_model.version)
response_body = rekcurd_dashboard_application.run_upload_model(filepath, file)
if not response_body.get("status", True):
raise RekcurdDashboardException(response_body.get("message", "Error."))
response_body = {"status": True, "message": "Success."}
else:
"""Otherwise, upload file."""
with tempfile.NamedTemporaryFile() as fp:
fp.write(file.read())
data_server = DataServer()
filepath = data_server.upload_model(data_server_model, application_model, fp.name)
response_body = {"status": True, "message": "Success."}
model_model = ModelModel(application_id=application_id, filepath=filepath, description=description)
db.session.add(model_model)
db.session.commit()
db.session.close()
return response_body
@model_api_namespace.route('/projects/<int:project_id>/applications/<application_id>/models/<int:model_id>')
class ApiModelId(Resource):
update_config_parser = reqparse.RequestParser()
update_config_parser.add_argument('description', type=str, required=True, location='form')
@model_api_namespace.marshal_with(model_model_params)
def get(self, project_id: int, application_id: str, model_id: int):
"""get_model"""
return ModelModel.query.filter_by(model_id=model_id).first_or_404()
@model_api_namespace.marshal_with(success_or_not)
@model_api_namespace.expect(update_config_parser)
def patch(self, project_id: int, application_id: str, model_id: int):
"""update_model"""
args = self.update_config_parser.parse_args()
description = args['description']
model_model = db.session.query(ModelModel).filter(ModelModel.model_id==model_id).first_or_404()
model_model.description = description
db.session.commit()
db.session.close()
return {"status": True, "message": "Success."}
@model_api_namespace.marshal_with(success_or_not)
def delete(self, project_id: int, application_id: str, model_id: int):
"""delete_model"""
model_model = db.session.query(ModelModel).filter(ModelModel.model_id==model_id).one_or_none()
if model_model is None:
raise RekcurdDashboardException("No such model_id.")
num = db.session.query(ServiceModel).filter(ServiceModel.model_id==model_id).count()
if num > 0:
raise RekcurdDashboardException("Model is used by some services.")
# TODO: Delete file.
db.session.query(ModelModel).filter(ModelModel.model_id==model_id).delete()
db.session.commit()
db.session.close()
return {"status": True, "message": "Success."} | /rekcurd_dashboard-1.0.1.post4.tar.gz/rekcurd_dashboard-1.0.1.post4/rekcurd_dashboard/apis/api_model.py | 0.511229 | 0.156943 | api_model.py | pypi |
from flask_restplus import Namespace, fields, Resource, reqparse
from . import status_model, load_istio_routing, apply_new_route_weight
from rekcurd_dashboard.models import db, KubernetesModel, ApplicationModel, ServiceModel
from rekcurd_dashboard.utils import RekcurdDashboardException
service_routing_api_namespace = Namespace('service_routings', description='Service Routing API Endpoint.')
success_or_not = service_routing_api_namespace.model('Success', status_model)
service_weight_params = service_routing_api_namespace.model('Weight', {
'display_name': fields.String(
readOnly=True,
description='Service name.'
),
'service_id': fields.String(
readOnly=True,
description='Service ID.'
),
'service_weight': fields.Integer(
readOnly=True,
description='Service weight.'
)
})
service_routing_params = service_routing_api_namespace.model('Routing', {
'application_name': fields.String(
readOnly=True,
description='Application name.'
),
'service_level': fields.String(
readOnly=True,
description='Service level. [development/beta/staging/sandbox/production]',
example='development'
),
'service_weights': fields.List(
fields.Nested(service_weight_params),
readOnly=True,
description='Service weights.'
)
})
service_weight_expect = service_routing_api_namespace.model('Weight', {
'service_id': fields.String(
required=True,
description='Service ID.'
),
'service_weight': fields.Integer(
required=True,
description='Service weight.'
)
})
service_routing_expect = service_routing_api_namespace.model('Routing', {
'service_level': fields.String(
required=True,
description='Service level. [development/beta/staging/sandbox/production]',
example='development'
),
'service_weights': fields.List(
fields.Nested(service_weight_expect),
required=True,
description='Service weights.'
)
})
@service_routing_api_namespace.route('/projects/<int:project_id>/applications/<application_id>/service_routing')
class ApiServiceRouting(Resource):
get_parser = reqparse.RequestParser()
get_parser.add_argument(
'service_level', location='args', type=str, required=True,
choices=('development', 'beta', 'staging', 'sandbox', 'production'),
help='Service level. [development/beta/staging/sandbox/production].')
service_routing_parser = reqparse.RequestParser()
service_routing_parser.add_argument(
'service_level', type=str, required=True,
choices=('development','beta','staging','sandbox','production'),
help='Service level. [development/beta/staging/sandbox/production].')
service_routing_parser.add_argument(
'service_weights', type=dict, action='append', required=True,
help='Service weights.')
@service_routing_api_namespace.marshal_with(service_routing_params)
@service_routing_api_namespace.expect(get_parser)
def get(self, project_id: int, application_id: str):
"""Get routing info."""
args = self.get_parser.parse_args()
service_level = args["service_level"]
kubernetes_model: KubernetesModel = db.session.query(KubernetesModel).filter(
KubernetesModel.project_id == project_id).first()
application_model: ApplicationModel = db.session.query(ApplicationModel).filter(
ApplicationModel.application_id == application_id).first_or_404()
service_models = db.session.query(ServiceModel).filter(
ServiceModel.application_id == application_id,
ServiceModel.service_level == service_level).all()
if not service_models:
raise RekcurdDashboardException("No services available.")
service_id_name = dict()
for service_model in service_models:
service_id_name[service_model.service_id] = service_model.display_name
routes = load_istio_routing(kubernetes_model, application_model, service_level)
response_body = dict()
response_body["application_name"] = application_model.application_name
response_body["service_level"] = service_level
service_weights = list()
response_body["service_weights"] = service_weights
for route in routes:
service_id = route["destination"]["host"][4:]
display_name = service_id_name.pop(service_id)
weight = route["weight"]
service_weights.append({
"display_name": display_name,
"service_id": service_id,
"service_weight": weight})
for service_id, display_name in service_id_name.items():
service_weights.append({
"display_name": display_name,
"service_id": service_id,
"service_weight": 0})
return response_body
@service_routing_api_namespace.marshal_with(success_or_not)
@service_routing_api_namespace.expect(service_routing_expect)
def patch(self, project_id: int, application_id: str):
"""Update routing weights."""
service_routing_args = self.service_routing_parser.parse_args()
service_level: str = service_routing_args["service_level"]
service_ids = []
service_weights = []
for service_weight in service_routing_args["service_weights"]:
service_ids.append(service_weight["service_id"])
service_weights.append(service_weight["service_weight"])
apply_new_route_weight(project_id, application_id, service_level, service_ids, service_weights)
return {"status": True, "message": "Success."} | /rekcurd_dashboard-1.0.1.post4.tar.gz/rekcurd_dashboard-1.0.1.post4/rekcurd_dashboard/apis/api_service_routing.py | 0.714528 | 0.197425 | api_service_routing.py | pypi |
import json
import os
import sys
from typing import Any, Callable, Dict, List, Optional, Union, cast
from urllib.parse import urlparse
import click
import requests
from requests.models import Response
from rekono.client.api import Rekono
class RekonoCliCommand(click.MultiCommand):
'''Base features for Rekono CLI command.'''
# Environment variables
api_token_env = 'REKONO_TOKEN' # Environment variable to set API token
backend_url_env = 'REKONO_URL' # Environment variable to set backend
# Initialization of variables
commands: List[str] = [] # List of supported commands
commands_mapping: Dict[str, str] = {} # Mapping between commands and methods
default_mapping: Optional[str] = None # Default method if mapping not found
help_messages: Dict[str, str] = {} # Help messages for each command
api_options: List[Callable] = [] # API options for all commands
display_options: List[Callable] = [] # Display options for all commands
entity_options: List[Callable] = [] # Specific options for post and put
def list_commands(self, ctx: click.Context) -> List[str]:
'''Return list of CLI commands.
Args:
ctx (click.Context): Click context.
Returns:
List[str]: List of CLI commands.
'''
return self.commands
def get_command(self, ctx: click.Context, cmd_name: str) -> Optional[click.Command]:
'''Get CLI command by name.
Args:
ctx (click.Context): Click context.
cmd_name (str): Command name.
Returns:
Optional[click.Command]: Click command.
'''
related_command_method = self.commands_mapping.get(cmd_name, self.default_mapping) # Get mapped method
if cmd_name in self.commands and related_command_method and hasattr(self, related_command_method):
command: click.Command = getattr(self, related_command_method) # Get command method
command.help = self.help_messages.get(cmd_name) # Set help message
if related_command_method in ['post_entity', 'put_entity']: # POST or PUT entity request
self._apply_command_options(command, self.entity_options) # Set entity options
self._apply_command_options(command, self.api_options) # Set base API options
self._apply_command_options(command, self.display_options) # Set base display options
return command
return None
def _apply_command_options(self, command: Callable, options: List[Callable]) -> Callable:
'''Apply multiple options to specific command.
Args:
command (Callable): Command to configure
options (List[Callable]): List of options to apply
Returns:
Callable: Configured command
'''
for option in options:
command = option(command)
return command
@classmethod
def _get_url(cls, url: str) -> str:
'''Get valid Rekono base URL from the user provided value.
Args:
url (str): User provided URL.
Returns:
str: Valid Rekono URL.
'''
parser = urlparse(url) # Parse provided URL
if not parser.netloc: # Invalid URL
click.echo(click.style('URL is invalid', fg='red'), err=True, color=True)
url = click.prompt('URL', type=str) # Ask user for base URL
return cls._get_url(url) # Retry URL validation
return f'{parser.scheme or "https"}://{parser.netloc}'
@staticmethod
def _parse_key_value_params(items: List[str]) -> Dict[str, str]:
'''Parse key=value options.
Args:
items (List[str]): List of key=value values.
Returns:
Dict[str, str]: Dictionary with all keys and values.
'''
data = {}
for item in items or []:
if '=' in item: # Check separator
key, value = item.split('=', 1) # Get key and value
data[key.strip()] = value.strip()
return data
@staticmethod
def _get_body(body: Optional[str]) -> Optional[str]:
'''Validate body value to be sent via HTTP.
Args:
body (Optional[str]): Body value.
Returns:
Optional[str]: Validated body value.
'''
if body:
try:
json.loads(body) # Try to parse body value
except json.decoder.JSONDecodeError: # Invalid body value
click.echo(click.style('Invalid JSON format for body value', fg='red'), err=True, color=True)
sys.exit(1)
return body
@staticmethod
def _get_endpoint(endpoint: str) -> str:
'''Get valid endpoint value without parameters or other URL information.
Args:
endpoint (str): Endpoint value provided by user.
Returns:
str: Valid endpoint value.
'''
return urlparse(endpoint).path
@classmethod
def _rekono_factory(cls, url: str, no_verify: bool = False, headers: List[str] = []) -> Rekono:
'''Create Rekono client entity.
Args:
url (str): Base Rekono URL.
no_verify (bool, optional): Disable TLS validation. Defaults to False.
headers (List[str], optional): Extra HTTP request headers. Defaults to [].
Returns:
Rekono: Rekono API client.
'''
token = os.getenv(cls.api_token_env) # Get API token from environment
if not token: # API token is not provided
token = click.prompt('API token', type=str, hide_input=True) # Ask for API token
return Rekono( # Create Rekono API client
cls._get_url(url), # Get valid Rekono URL
token=cast(str, token),
headers=cls._parse_key_value_params(headers), # Get HTTP headers
verify=not no_verify
)
@staticmethod
def _get_data_from_responses(responses: List[Response]) -> Union[List[Dict[str, Any]], Dict[str, Any]]:
'''Get data from Rekono API responses.
Args:
responses (List[Response]): List of Rekono API responses.
Returns:
Union[List[Dict[str, Any]], Dict[str, Any]]: Data returned by Rekono.
'''
content = []
for response in responses: # For each response
try:
body = response.json() # Parse JSON body from response
except requests.exceptions.JSONDecodeError:
continue
if isinstance(body, dict): # Response body is a dictionary
body = body.get('results', body) # Get results field if it exists
if isinstance(body, list): # Content is a list
content.extend(body) # Save content
elif isinstance(body, dict): # Content is a dictionary
if len(responses) == 1: # Only one response
return body # Return response content
content.append(body) # Save content
return content
@classmethod
def _display_responses(
cls,
responses: List[Response],
show_headers: bool,
only_show_status_code: bool,
quiet: bool
) -> None:
'''Display Rekono API responses via standard output.
Args:
responses (List[Response]): Rekono API responses.
show_headers (bool): Display HTTP response headers.
only_show_status_code (bool): Only display status code from HTTP response.
quiet (bool): Don't display anything from response.
'''
if quiet: # No content should be displayed
return
if only_show_status_code or show_headers: # Headers or status should be displayed
for response in responses: # For each response
if only_show_status_code: # Just display status code
click.echo(response.status_code)
elif show_headers: # Show response headers
click.echo()
# Display HTTP request and response summary
click.echo(f'{response.request.method} {response.request.path_url} {response.status_code}')
for header, value in response.headers.items():
click.echo(f'{header}: {value}') # Display HTTP response headers
click.echo()
data = cls._get_data_from_responses([response]) # Get content from response
click.echo(json.dumps(data, ensure_ascii=True, indent=4)) # Display content via standard output
else: # Standard display options
data = cls._get_data_from_responses(responses) # Get content from responses
click.echo(json.dumps(data, ensure_ascii=True, indent=4)) # Display content via standard output
@classmethod
def _save_output(cls, responses: List[Response], filepath: Optional[str]) -> None:
'''Save responses content in JSON file.
Args:
responses (List[Response]): Rekono API responses.
filepath (Optional[str]): Filepath to the JSON file where content should be saved.
'''
if not filepath: # JSON filepath isn't provided
return
data = cls._get_data_from_responses(responses) # Get data from responses
with open(filepath, 'w', encoding='utf-8') as file: # Open JSON file
json.dump(data, file, ensure_ascii=True, indent=4) # Write content in JSON file | /rekono-cli-2.0.1.post1.tar.gz/rekono-cli-2.0.1.post1/rekono/framework/commands/command.py | 0.72027 | 0.153296 | command.py | pypi |
from typing import List
import click
from rekono.framework.arguments import endpoint_argument
from rekono.framework.commands.command import RekonoCliCommand
from rekono.framework.options import (all_pages_option, body_option,
file_option, headers_option, json_option,
no_verify_option, parameters_option,
quiet_option, show_headers_option,
show_status_code_option, url_option)
class ApiCommand(RekonoCliCommand):
'''Base Rekono CLI command to make API requests.'''
commands = ['get', 'post', 'put', 'delete'] # List of supported commands
commands_mapping = { # Mapping between commands and methods
'get': 'get',
'post': 'post',
'put': 'put',
'delete': 'delete'
}
help_messages = { # Help messages for each command
'get': 'GET request to Rekono API',
'post': 'POST request to Rekono API',
'put': 'PUT request to Rekono API',
'delete': 'DELETE request to Rekono API'
}
api_options = [url_option, headers_option, no_verify_option] # API options for all commands
display_options = [show_headers_option, show_status_code_option, quiet_option] # Display options for all commands
@staticmethod
@click.command
@endpoint_argument
@parameters_option
@all_pages_option
@json_option
def get(
endpoint: str,
url: str,
headers: List[str],
no_verify: bool,
parameters: List[str],
pagination: bool,
show_headers: bool,
only_show_status_code: bool,
quiet: bool,
json_output: str
):
'''GET request to Rekono API.
Args:
endpoint (str): Endpoint to call.
url (str): Rekono base URL.
headers (List[str]): HTTP headers to send in key=value format.
parameters (List[str]): HTTP query parameters to send in key=value format.
no_verify (bool): Disable TLS validation.
pagination (bool): Enable iteration over all API pages.
show_headers (bool): Display HTTP response headers.
only_show_status_code (bool): Just display HTTP response status code.
quiet (bool): Don't display anything from response.
json_output (str): Filepath to the JSON file where content should be saved.
'''
client = ApiCommand._rekono_factory(url, no_verify, headers)
response_or_responses = client.get(
ApiCommand._get_endpoint(endpoint),
parameters=ApiCommand._parse_key_value_params(parameters),
pagination=pagination
)
responses = response_or_responses if isinstance(response_or_responses, list) else [response_or_responses]
ApiCommand._display_responses(responses, show_headers, only_show_status_code, quiet)
ApiCommand._save_output(responses, json_output)
@staticmethod
@click.command
@endpoint_argument
@body_option
@file_option
@json_option
def post(
endpoint: str,
url: str,
headers: List[str],
no_verify: bool,
body: str,
filepath: str,
show_headers: bool,
only_show_status_code: bool,
quiet: bool,
json_output: str
):
'''POST request to Rekono API.
Args:
endpoint (str): Endpoint to call.
url (str): Rekono base URL.
headers (List[str]): HTTP headers to send in key=value format.
no_verify (bool): Disable TLS validation.
body (str): HTTP body to send in JSON format.
filepath (click.Path): File to upload.
show_headers (bool): Display HTTP response headers.
only_show_status_code (bool): Just display HTTP response status code.
quiet (bool): Don't display anything from response.
json_output (str): Filepath to the JSON file where content should be saved.
'''
client = ApiCommand._rekono_factory(url, no_verify, headers)
response = client.post(ApiCommand._get_endpoint(endpoint), ApiCommand._get_body(body), filepath)
ApiCommand._display_responses([response], show_headers, only_show_status_code, quiet)
ApiCommand._save_output([response], json_output)
@staticmethod
@click.command
@endpoint_argument
@body_option
@json_option
def put(
endpoint: str,
url: str,
headers: List[str],
no_verify: bool,
body: str,
show_headers: bool,
only_show_status_code: bool,
quiet: bool,
json_output: str
):
'''PUT request to Rekono API.
Args:
endpoint (str): Endpoint to call.
url (str): Rekono base URL.
headers (List[str]): HTTP headers to send in key=value format.
no_verify (bool): Disable TLS validation.
body (str): HTTP body to send in JSON format.
show_headers (bool): Display HTTP response headers.
only_show_status_code (bool): Just display HTTP response status code.
quiet (bool): Don't display anything from response.
json_output (str): Filepath to the JSON file where content should be saved.
'''
client = ApiCommand._rekono_factory(url, no_verify, headers)
response = client.put(ApiCommand._get_endpoint(endpoint), ApiCommand._get_body(body))
ApiCommand._display_responses([response], show_headers, only_show_status_code, quiet)
ApiCommand._save_output([response], json_output)
@staticmethod
@click.command
@endpoint_argument
def delete(
endpoint: str,
url: str,
headers: List[str],
no_verify: bool,
show_headers: bool,
only_show_status_code: bool,
quiet: bool
):
'''DELETE request to Rekono API.
Args:
endpoint (str): Endpoint to call.
url (str): Rekono base URL.
headers (List[str]): HTTP headers to send in key=value format.
no_verify (bool): Disable TLS validation.
show_headers (bool): Display HTTP response headers.
only_show_status_code (bool): Just display HTTP response status code.
quiet (bool): Don't display anything from response.
'''
client = ApiCommand._rekono_factory(url, no_verify, headers)
response = client.delete(ApiCommand._get_endpoint(endpoint))
ApiCommand._display_responses([response], show_headers, only_show_status_code, quiet) | /rekono-cli-2.0.1.post1.tar.gz/rekono-cli-2.0.1.post1/rekono/framework/commands/api.py | 0.789842 | 0.170025 | api.py | pypi |
# rel2tree
Convert your list of data into `JSON` serializable structure.
## Motivation
Let's suppose you have a set of data given as a list of dicts:
```py
import json
[
{"name": "Jane", "city": "New York", "sales": 23},
{"name": "Joe", "city": "New York", "sales": 11},
{"name": "Jane", "city": "Chicago", "sales": 21},
{"name": "Jane", "city": "New York", "sales": 4},
{"name": "Joe", "city": "New York", "sales": 13},
{"name": "Joe", "city": "Chicago", "sales": 31},
{"name": "Jane", "city": "New York", "sales": 7},
]
```
You may want a nice summary, something like this:
```json
[
{
"name": "Jane",
"cities": [
{
"city": "New York",
"sales": 34
},
{
"city": "Chicago",
"sales": 21
}
],
"sum": 55
},
{
"name": "Joe",
"cities": [
{
"city": "New York",
"sales": 24
},
{
"city": "Chicago",
"sales": 31
}
],
"sum": 55
}
]
```
This can be done relatively easily by iterating over the data
set and building the final structure.
```py
summary = {}
for record in data:
this_person = summary.setdefault(record["name"], {
"name": record["name"],
"cities": {},
"sum": 0,
})
this_person_cities = this_person["cities"].setdefault(record["city"], {
"city": record["city"],
"sum": 0,
})
this_person_cities["sum"] += record["sales"]
this_person["sum"] += record["sum"]
summary = list(summary.values())
for person in summary:
person["cities"] = list(person["cities"].values())
print(json.dumps(summary))
```
Although the above code works well, but it has some problems.
- Not declarative: by looking at the code it is not trivial to tell the final data
structure.
- Error-prone.
- The complexity grows with more complex business logic
or by adding an additional level.
- Not reusable.
Let's see how you do it with `rel2tree`:
```py
from rel2tree import f # NOQA
summary = f.groupby(lambda x: x["name"], f.dict({
"name": f.groupkey(),
"cities": f.groupby(lambda x: x["city"], f.dict({
"city": f.groupkey(),
"sum": f.map(lambda x: x["sales"]).t(sum)
})),
"sum": f.map(lambda x: x["sales"]).t(sum)
}))
print(json.dumps(summary(data)))
```
## Tutorial
### `map`, `sort`, `filter`, `distinct`
The only object one can import from `rel2tree` is `f`, which is of type `F`
so we will call it an `F` object.
`f` is callable, but - on it's own does nothing:
```py
print(f(2))
# 2
```
Let's say we have a list of numbers (`numbers`) and we want
to duplicate all of it's elements. This can be done in many ways:
- using a list comprehension:
```py
out = [2 * x for x in numbers]
```
- using map:
```py
out = map(lambda x: 2 * x, numbers)
```
- defining a function (for reusability)
```py
import functools
dup = functools.partial(map, lambda x: 2 * x)
out = dup(numbers)
```
Using an `f` it looks like this:
```py
numbers = range(15)
dup = f.map(lambda x: 2 * x)
out = dup(numbers)
```
This simply made our third approach a little more terse.
Now what if our task is to add 1 to each element after
duplication? Can we reuse our `dup` function? As
the result of `f.map` has the same type as `f`, we can
use map again:
```py
dupplus1 = dup.map(lambda x: x + 1)
```
`f.sort(fnc)` sorts our list based on the value of `fnc`
applied to the items (just as the `key` argument of python's)
`sorted`. `f.filter(fnc)` keeps only those `i` items, where
`fnc(i)` is ture(ish). These methods also return `F`s
(internally the type of `f` is `F`) so they are chainable.
The `F` below first duplicates, then filters out big
numbers and finally sorts them. (`f.sort`, without a function sorts the elements.)
```py
f.map(lambda x: 2 * x).filter(lambda x: x < 10).sort()
```
### `dict`
Back to our `numbers`, but with the desired output of
```json
{
"even": [0, 2, 4, 6, 8, 10, 12, 14],
"odd": [1, 3, 5, 7, 9, 11, 13]
}
```
We can combine the dict method to achive this:
```py
summary = f.dict({
"even": f.filter(lambda x: (x % 2 == 0)),
"odd": f.filter(lambda x: (x % 2 == 1)),
})
```
If the dictionary values are `F` objects, those objects will be called with
the input list to form the final values, otherwise the values will be left as is.
### `groupby`
To generalize the above example, we can group our numbers based on the remainder
devided by, say, 3:
```py
summary = f.groupby(lambda x: x % 3)
# [[0, 3, 6, 9, 12], [1, 4, 7, 10, 13], [2, 5, 8, 11, 14]]
```
To make it more informative, the desired output should be:
```json
[
{ "remainder": 0, "numbers": [0, 3, 6, 9, 12] },
{ "remainder": 1, "numbers": [1, 4, 7, 10, 13] },
{ "remainder": 2, "numbers": [2, 5, 8, 11, 14] }
]
```
This can be done by using `groupkey`:
```py
summary = f.groupby(lambda x: x % 3, f.dict({
"remainder": f.groupkey(),
"numbers": f
}))
```
`f.groupkey(level=0)` gives the deepest level group key, while `f.groupkey(1)`
is the one level above group key in case of nested `groupby`'s.
| /rel2tree-7.0.0.tar.gz/rel2tree-7.0.0/README.md | 0.541409 | 0.916745 | README.md | pypi |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
# classes
class GatedRMSNorm(nn.Module):
def __init__(
self,
dim,
eps = 1e-8
):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.w = nn.Parameter(torch.ones(dim))
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True) * self.scale
normed_x = x / norm.clamp(min = self.eps) * self.g
return normed_x * (x * self.w).sigmoid()
def FeedForward(dim, mult = 4):
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Linear(dim * mult, dim)
)
class ReLA(nn.Module):
def __init__(
self,
*,
dim,
causal = True,
dim_head = 64,
heads = 8,
num_memory_kv = 0,
relu_squared = False
):
super().__init__()
self.heads = heads
inner_dim = dim_head * heads
self.scale = dim_head ** -0.5
self.causal = causal
self.relu_squared = relu_squared
self.norm = GatedRMSNorm(dim)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.mem_k = nn.Parameter(torch.randn(num_memory_kv, inner_dim))
self.mem_v = nn.Parameter(torch.randn(num_memory_kv, inner_dim))
self.norm_values = GatedRMSNorm(dim_head)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
)
def forward(self, x, mask = None):
b, device = x.shape[0], x.device
x = self.norm(x)
h = self.heads
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
mem_k, mem_v = map(lambda t: repeat(t, 'n d -> b n d', b = b), (self.mem_k, self.mem_v))
k = torch.cat((mem_k, k), dim = 1)
v = torch.cat((mem_v, v), dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
attn = F.relu(sim)
if self.relu_squared:
attn = attn ** 2
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
attn = attn.masked_fill(~mask, 0.)
if self.causal:
i, j = attn.shape[-2:]
causal_mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
attn = attn.masked_fill(causal_mask, 0.)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = self.norm_values(out)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class ReLATransformer(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
depth,
max_seq_len,
causal = True,
heads = 8,
dim_head = 64,
num_memory_kv = 0,
no_ff = False,
ff_mult = 4,
relu_squared = False
):
super().__init__()
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
ReLA(dim = dim, relu_squared = relu_squared, heads = heads, dim_head = dim_head, num_memory_kv = num_memory_kv, causal = causal),
FeedForward(dim = dim, mult = ff_mult) if not no_ff else None
]))
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(self, x, mask = None):
n, device = x.shape[1], x.device
x = self.token_emb(x)
pos_emb = self.pos_emb(torch.arange(n, device = device))
x = x + rearrange(pos_emb, 'n d -> 1 n d')
for attn, ff in self.layers:
x = attn(x, mask = mask) + x
if exists(ff):
x = ff(x) + x
return self.to_logits(x) | /rela-transformer-0.0.7.tar.gz/rela-transformer-0.0.7/rela_transformer/rela_transformer.py | 0.942082 | 0.445469 | rela_transformer.py | pypi |
from functools import partial
import torch
import random
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
def exists(val):
return val is not None
def default(value, default):
return value if exists(value) else default
def log(t, eps=1e-9):
return torch.log(t + eps)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = None, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = default(ignore_index, pad_value)
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
was_training = self.net.training
num_dims = len(start_tokens.shape)
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)
logits = logits[:, -1, :]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
gumbel_noise = -log(-log(torch.zeros_like(filtered_logits).uniform_(0, 1)))
sample = ((filtered_logits / temperature) + gumbel_noise).argmax(dim=-1)
out = torch.cat((out, sample[:, None]), dim=-1)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
self.net.train(was_training)
return out
def forward(self, x, *args, **kwargs):
inp, labels = x[:, :-1], x[:, 1:]
out = self.net(inp, *args, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), labels, ignore_index = self.ignore_index)
return loss | /rela-transformer-0.0.7.tar.gz/rela-transformer-0.0.7/rela_transformer/autoregressive_wrapper.py | 0.8989 | 0.235988 | autoregressive_wrapper.py | pypi |
from datetime import datetime
import random
import emoji
from random import randint
rand = {}
class long(object):
"""
longs
"""
now = None
time2 = datetime.now()
now2 = time2.strftime("%Y-%m-%d %H:%M:%S")
dt_start = datetime.strptime(now2, "%Y-%m-%d %H:%M:%S")
now_date = datetime.now()
time = 0
def __init__(self):
pass
@classmethod
def seconds(cls, data):
"""
:return:
"""
cls.now = datetime.strptime(data, "%Y-%m-%d %H:%M:%S")
cls.time = cls.dt_start - cls.now
cls.time = int(cls.time.total_seconds())
return cls.time
@classmethod
def minutes(cls, data):
"""
:return:
"""
cls.now = datetime.strptime(data, "%Y-%m-%d %H:%M:%S")
cls.time = cls.dt_start - cls.now
cls.time = int(cls.time.total_seconds() // 60)
return cls.time
@classmethod
def hours(cls, data):
"""
:return:
"""
cls.now = datetime.strptime(data, "%Y-%m-%d %H:%M:%S")
cls.time = cls.dt_start - cls.now
cls.time = int(cls.time.total_seconds() // 60) // 60
return cls.time
@classmethod
def days(cls, data):
"""
:return:
"""
cls.now = datetime.strptime(data, "%Y-%m-%d %H:%M:%S")
cls.time = cls.dt_start - cls.now
cls.time = int((cls.time.total_seconds() // 60) // 60) // 24
return cls.time
@classmethod
def mounts(cls, data):
"""
:return:
"""
cls.now = datetime.strptime(data, "%Y-%m-%d %H:%M:%S")
cls.time = cls.dt_start - cls.now
cls.time = int(((cls.time.total_seconds() // 60) // 60) // 24) // 30
return cls.time
@classmethod
def years(cls, data):
"""
:return:
"""
cls.now = datetime.strptime(data, "%Y-%m-%d %H:%M:%S")
cls.time = cls.dt_start - cls.now
cls.time = int(((cls.time.total_seconds() // 60) // 60) // 24) // 365
return cls.time
class mathematics(object):
"""
math
"""
kolv = 0
result = 0
def __init__(self):
pass
@classmethod
def average(cls, *nums: int):
"""
:param nums:
:return:
"""
for i in nums:
cls.kolv += 1
cls.result += i
return float(cls.result) / cls.kolv
@classmethod
def percent(cls, num: int, percent: int, mod: str = "add"):
"""
reladdons percent function
:param num:
:param percent:
:param mod:
:return:
"""
if mod == "add":
return num + (num / 100 * percent)
elif mod == "remove":
return num - (num / 100 * percent)
elif mod == "percent":
return num / 100
elif mod == "get":
return num / 100 * percent
else:
raise Exception("mod error(add/remove/get/percent)")
class randoms(object):
"""
random
"""
num3 = 0
args_list2 = []
out = []
args_list = {}
def __init__(self):
pass
@classmethod
def randint(cls, number: int = 1, number2: int = None, key: str = None, array_long: int = 2,
shuffle_long: int = 1):
"""
:param number:
:param number2:
:param key:
:param array_long:
:param shuffle_long:
:return:
"""
cls.num3 += 1
if key is None:
key = str(random.randint(1, 999999))
if number2 is None:
rand[key] = []
for i in range(array_long):
rand[key].append(random.randint(1, number))
for i in range(shuffle_long):
random.shuffle(rand[key])
return rand[key][0], rand[key]
else:
rand[key] = []
for i in range(array_long):
rand[key].append(random.randint(number, number2))
for i in range(shuffle_long):
random.shuffle(rand[key])
return rand[key][0], rand[key]
@classmethod
def chance(cls, _from: int, _to: int):
"""
:param _from:
:param _to:
:return:
"""
if randint(_from, _to) == randint(_from, _to):
return True
return False
@classmethod
def coefficient(cls, coefficient: float):
"""
:return:
"""
if randint(0, 100) in range(0, int(coefficient * 100)):
return True
return False
@classmethod
def choice(cls, *args, output: int = 1, shuffle_long: int = 2, array_long: int = 1, key: str = None):
"""
:param args:
:param output:
:param shuffle_long:
:param array_long:
:param key:
:return:
"""
if key is None:
key = str(random.randint(1, 999999))
rand[key] = []
for i in args:
cls.args_list2.append(i)
for i in range(len(args)):
cls.args_list[str(i)] = cls.args_list2[random.randint(0, len(args) - 1)]
for i in range(array_long):
rand[key].append(cls.args_list[str(random.randint(0, len(args) - 1))])
for i in range(shuffle_long):
random.shuffle(rand[key])
for i in range(output):
cls.out.append(rand[key][i])
return cls.out
def razr(num):
"""
:param num:
:return:
"""
num = int(num)
return '{:,}'.format(num).replace('.', ' ')
class work_text(object):
"""
work with text
"""
text_no_emoji = None
def __init__(self):
self.text_no_emoji = ""
@classmethod
def remove_emoji(cls, text):
"""
:param text:
:return:
"""
cls.text_no_emoji = emoji.get_emoji_regexp().sub(u'', text)
return cls.text_no_emoji
@classmethod
def palindrome(cls, word: str):
"""
:param word:
:return:
"""
if word != word[::-1]:
return False
return True | /reladdons-0.0.5.5.tar.gz/reladdons-0.0.5.5/reladdons.py | 0.580233 | 0.191554 | reladdons.py | pypi |
# relatable
relatable is a Python package for converting a collection of documents,
such as a MongoDB collection, into an interrelated set of tables, such as a
schema in a relational database.
## Installation
```
pip3 install relatable
```
## Example of use
In this example we will walk through a use case of `relatable` for the sample dataset found in the repository of this
package in the data folder.
To generate a relational schema for this dataset, let's instantiate a `RelationalSchema` with the list of documents as
input:
```
from relatable import RelationalSchema
import json
with open("data/example_input.json", "r") as fp:
docs = json.load(fp)
rs = RelationalSchema(docs)
```
Once the `RelationalSchema` is instantiated, we can check its metadata. This metadata is a list of flat dictionaries, so
we can make use of Pandas to load it into a DataFrame:
```
import pandas as pd
pd.DataFrame(rs.generate_metadata())
```
| | table | field | type | nullable | unique |
|----:|:----------------------------|:-------------------------------|:--------|:---------|:-------|
| 0 | main | main_id | Integer | False | True |
| 1 | main | name | String | False | True |
| 2 | main | age | Integer | False | True |
| 3 | experience | experience_id | Integer | False | True |
| 4 | experience | main_id | Integer | False | False |
| 5 | experience | experience.company | String | False | True |
| 6 | experience | experience.role | String | False | True |
| 7 | experience | experience.from | Integer | False | True |
| 8 | experience | experience.to | Integer | False | False |
| 9 | experience.technologies | experience.technologies_id | Integer | False | True |
| 10 | experience.technologies | experience_id | Integer | False | False |
| 11 | experience.technologies | main_id | Integer | False | False |
| 12 | experience.technologies | experience.technologies | String | False | True |
| 13 | experience.responsibilities | experience.responsibilities_id | Integer | False | True |
| 14 | experience.responsibilities | experience_id | Integer | False | False |
| 15 | experience.responsibilities | main_id | Integer | False | False |
| 16 | experience.responsibilities | experience.responsibilities | String | False | True |
We can see that `RelationalSchema` has inferred a relational schema consisting of four tables with primary keys and
foreign keys interrelating the tables.
It would be nice to rename these tables with a more descriptive name, and also rename some columns. We can do so with
the `rename` and `rename_column` methods:
```
rs.rename_table("main", "person")
rs.rename_table("experience", "job")
rs.rename_table("experience.technologies", "technology")
rs.rename_table("experience.responsibilities", "responsibility")
for name in ["company", "role", "from", "to"]:
rs.tables["job"].rename_column(f"experience.{name}", name)
rs.tables["technology"].rename_column("experience.technologies", "technology")
rs.tables["responsibility"].rename_column("experience.responsibilities", "responsibility")
pd.DataFrame(rs.generate_metadata())
```
| | table | field | type | nullable | unique |
|----:|:---------------|:------------------|:--------|:---------|:-------|
| 0 | person | person_id | Integer | False | True |
| 1 | person | name | String | False | True |
| 2 | person | age | Integer | False | True |
| 3 | job | job_id | Integer | False | True |
| 4 | job | person_id | Integer | False | False |
| 5 | job | company | String | False | True |
| 6 | job | role | String | False | True |
| 7 | job | from | Integer | False | True |
| 8 | job | to | Integer | False | False |
| 9 | technology | technology_id | Integer | False | True |
| 10 | technology | job_id | Integer | False | False |
| 11 | technology | person_id | Integer | False | False |
| 12 | technology | technology | String | False | True |
| 13 | responsibility | responsibility_id | Integer | False | True |
| 14 | responsibility | job_id | Integer | False | False |
| 15 | responsibility | person_id | Integer | False | False |
| 16 | responsibility | responsibility | String | False | True |
The relationships between the tables are the following:
- The table `person` represents the main entity of the dataset, with a row for each person.
- The table `job` references the table `person`.
- The tables `technology` and `responsibility` reference the table `job`, and inherits the reference of `person` from
`job`.
Finally, let's look at each of the tables:
```
dfs = [pd.DataFrame(t.data).set_index(t.primary_key) for t in rs.tables]
```
Table `person`:
| person_id | name | age |
|----------:|:------|----:|
| 0 | Alice | 34 |
| 1 | Bob | 27 |
Table `job`:
| job_id | person_id | company | role | from | to |
|-------:|----------:|:---------|:----------------------|-----:|-----:|
| 0 | 0 | Google | Software Engineer | 2020 | 2022 |
| 1 | 0 | Facebook | Senior Data Scientist | 2017 | 2020 |
| 2 | 1 | OpenAI | NLP Engineer | 2019 | 2022 |
Table `technology`:
| technology_id | job_id | person_id | technology |
|--------------:|-------:|----------:|:-----------|
| 0 | 0 | 0 | C++ |
| 1 | 0 | 0 | LolCode |
| 2 | 1 | 0 | Python |
| 3 | 1 | 0 | Excel |
| 4 | 2 | 1 | Triton |
| 5 | 2 | 1 | LaTeX |
Table `responsibility`:
| responsibility_id | job_id | person_id | responsibility |
|------------------:|-------:|----------:|:-----------------------------------------------------------|
| 0 | 0 | 0 | Google stuff |
| 1 | 0 | 0 | Mark TensorFlow issues as "Won't Do" |
| 2 | 1 | 0 | Censor media |
| 3 | 1 | 0 | Learn the foundations of ML |
| 4 | 1 | 0 | Do Kaggle competitions |
| 5 | 2 | 1 | Assert that GPT-2 is racist |
| 6 | 2 | 1 | Assert that GPT-3 is racist |
| 7 | 2 | 1 | Develop a prototype of a premium non-racist language model |
# Example of use with the Airbnb MongoDB sample dataset
Another example of use with the Airbnb MongoDB sample dataset, downloadable
[here](https://github.com/neelabalan/mongodb-sample-dataset/blob/main/sample_airbnb/listingsAndReviews.json) can be
found in the repository of this package in the script `airbnb_example.py`
| /relatable-0.3.0.tar.gz/relatable-0.3.0/README.md | 0.405449 | 0.889241 | README.md | pypi |
import unittest
from collections import OrderedDict, Mapping, MutableMapping
class RelationError(Exception):
"""
Simply a marker to indicate source of exception
"""
pass
class Relation(MutableMapping):
"""
Relation() implements a discrete mathematical relation. A relation a simply a pairing of elements of one set,
the domain, with another, the range. Rephrasing more formally, a relation is a collection of tuples (x,y) where
x is in the domain and y is in the range. A relation, implemented as code, can perform a variety of common tasks:
- Inversion: quickly find the values(range) associated with a key(domain)
- Partitioning: group values into unique buckets
- Aliasing: maintain a unique pairing between keys and values
- Tagging: associate two sets in an arbitrary manner
These roughly correspond to the four cardinalities of a relation:
- Many-to-one (M:1): a function, each range value having possibly multiple values in the domain
- One-to-many (1:M): a categorization, where each element in the domain is associated with a unique group of values in the range
- One-to-one (1:1): an isomorphism, where each element in the domain is uniquely identified with a single range value
- Many-to-many (M:N): an unrestricted pairing of domain and range
"""
CARDINALITIES = '1:1','1:M','M:1','M:N'
# map the four cardinalities to their inverted cardinality
INVERTED_CARDINALITY = {'1:1':'1:1','1:M':'M:1','M:1':'1:M','M:N':'M:N'}
def __init__(self, init=None, cardinality='M:N', ordered=False):
if not ordered:
self.forward = {}
self.inverse = {}
else:
self.forward = OrderedDict()
self.inverse = OrderedDict()
if cardinality not in Relation.CARDINALITIES:
raise RelationError('Invalid cardinality:' + str(cardinality))
else:
self.cardinality=cardinality
if init is not None:
self.update(init)
def isordered(self):
return isinstance(self.forward,OrderedDict)
def __invert__(self):
"""
Create a shallow copy of a relation.
Changes to inversion will affect original and vice-versa
"""
new = Relation(cardinality=Relation.INVERTED_CARDINALITY[self.cardinality])
new.inverse = self.forward
new.forward = self.inverse
return new
@staticmethod
def _remove(mapping, reference):
empty=[]
for key,values in mapping.items():
if reference in values:
mapping[key].remove(reference)
if len(mapping[key]) == 0:
empty.append(key) # mark for removal
for e in empty:
del mapping[e]
def _remove_domain(self, key):
del self.forward[key]
Relation._remove(self.inverse,key)
__delitem__ = _remove_domain
def _remove_range(self, key):
del self.inverse[key]
Relation._remove(self.forward,key)
def __setitem__(self, domain, target):
"""
Set range for a domain value. Behavior dependent on cardinality:
- M:1: assign/overwrite range to for domain, similar to dictionary behavior
- 1:1: assign one domain uniquely to one range; remove pairing from other domains
- M:N: append range to domain values, append domain to range values
- 1:M: append range to domain values. Remove range from other domains.
"""
if not isinstance(domain,set):
domain = [domain]
if not isinstance(target,set):
target = [target]
for d in domain:
for t in target:
if self.cardinality in ['1:1','M:1']:
if d in self.forward:
self._remove_domain(d)
if self.cardinality in ['1:1','1:M']:
if t in self.inverse:
self._remove_range(t)
self.forward.setdefault(d,set()).add(t)
self.inverse.setdefault(t,set()).add(d)
def __getitem__(self, domain):
"""
Get range for a domain value. Behavior dependent on cardinality
- M:1: single value
- 1:1: single value
- M:N: set of values
- 1:M: set of values
"""
if self.cardinality in ['1:1','M:1']:
for target in self.forward[domain]:
return target
else:
return self.forward[domain]
def copy(self):
"""
Create one-level copy of a Relation
"""
r = Relation(cardinality=self.cardinality, ordered=self.isordered())
r.forward.update(self.forward)
r.inverse.update(self.inverse)
return r
def extend(self, mapping):
if not isinstance(mapping, Mapping):
raise RelationError('Cannot extend using ' + str(mapping))
else:
for data in mapping:
self[data] = mapping[data]
return self
def __str__ (self):
def setstr(x):
return '{' + ','.join(x) + '}'
s = []
s.append(self.cardinality)
s.append('->')
for d in self.forward:
s.append(str(d)+':'+setstr(self.forward[d]))
s.append('<-')
for t in self.inverse:
s.append(str(t)+':'+setstr(self.inverse[t]))
return '\n'.join(s)
def clear(self):
self.__init__(cardinality=self.cardinality, ordered=self.isordered())
def __len__(self):
return len(self.forward)
def values(self):
""" All values in range """
return self.inverse.keys()
def keys(self):
""" All values in domain """
return self.forward.keys()
def __iter__(self):
return self.forward.__iter__()
# Naming shortcuts
class Isomorphism(Relation):
def __init__(self, init=None, ordered=False):
Relation.__init__(self, init, cardinality='1:1', ordered=ordered)
class Function(Relation):
def __init__(self, init=None, ordered=False):
Relation.__init__(self, init, cardinality='M:1', ordered=ordered)
class Partition(Relation):
def __init__(self, init=None, ordered=False):
Relation.__init__(self, init, cardinality='1:M', ordered=ordered)
# Unit Tests
class Relation_Tests(unittest.TestCase):
def test_basic(self):
fruit = Relation(ordered=True)
fruit['apple']='red'
fruit['apple']='shiny'
fruit['apple']='round'
fruit['melon']='round'
fruit['melon']='green'
fruit['watermelon']='red'
fruit['watermelon']='green'
fruit['watermelon']='ovoid'
fruit['pear']='yellow'
fruit['kiwi']='green'
fruit['kiwi']='seedy'
assert 'seedy' in ~fruit
assert fruit.pop('kiwi') == set(['green','seedy'])
assert 'seedy' not in fruit.values()
for f in fruit:
assert isinstance(f, str)
assert list(fruit.keys()) == ['apple','melon','watermelon','pear']
assert len(fruit) == 4
assert len(~fruit) == 6
assert len(fruit['apple']) == 3
assert len(fruit['watermelon']) == 3
assert 'pear' in (~fruit)['yellow']
assert 'yellow' in fruit['pear']
assert fruit['pear'] == set(['yellow'])
del fruit['pear']
assert len(fruit) == 3
assert len(~fruit) == 5
del fruit['apple']
assert len(fruit) == 2
assert len(~fruit) == 4
foo = fruit.copy()
assert len(~foo) == len(~fruit)
assert len(foo) == len(fruit)
assert len(fruit.keys()) == len(fruit)
assert len(fruit.values()) == len(~fruit)
assert len(fruit.items()) == len(fruit)
assert fruit.get('armadillo') is None
fruit.clear()
assert len(fruit) == 0
assert fruit.get('melon') is None
def test_creation(self):
m=Isomorphism({'a':1,'b':2,'c':11})
mp = ~m
assert mp[2] == 'b'
m=Function({'a':1,'b':1,'c':11})
mp = ~m
assert mp[1] == {'b','a'}
m=Partition({'a':1,'b':2,'c':11})
m['a'] = 3
mp = ~m
assert mp[1] == 'a'
assert mp[2] != 'a'
assert mp[3] == 'a'
def test_composite(self):
fruits = {'apple':'red','cherry':'red','strawberry':'red','banana':'yellow'}
fruit = Relation(fruits)
assert len(fruit) == len(fruits)
more = {'yellow':'pear','pomegranate':'red','watermelon':'seedy'}
fruit.extend(more)
assert len(fruit) == len(more) + len(fruits)
even_more = Isomorphism({'papya':'starchy','grape':'tangy'})
fruit.extend(even_more)
assert len(fruit) == len(even_more) + len(more) + len(fruits)
def test_CARDINALITIES(self):
fruit = Relation(cardinality='1:1')
fruit['apple']='red'
fruit['pear']='yellow'
fruit['apple']='green'
assert 'apple' in fruit
fruit['watermelon']='green'
assert 'apple' not in fruit
fruit['papya']='green'
assert 'watermelon' not in fruit
fruit = Relation(fruit, cardinality='M:1')
fruit['papya']='green'
fruit['rasberry']='blue'
fruit['rasberry']='red'
assert fruit['rasberry'] == 'red'
fruit['cranberry']='red'
assert 'rasberry' in fruit
assert fruit['rasberry'] == 'red'
fruit = Relation(fruit, cardinality='1:M')
fruit['cranberry'] = 'round'
fruit['lemon'] = 'sour'
fruit['cranberry'] = 'sour'
assert 'lemon' not in fruit
assert len(fruit['cranberry']) > 1
fruit['pear']='sweet'
assert (~fruit)['sweet'] == 'pear'
fruit = Relation(fruit, cardinality='M:N')
fruit['apple'] = 'sweet'
assert len((~fruit)['sweet']) == 2
fruit['apple'] = 'fruit'
assert len(fruit['apple']) == 2
if __name__ == '__main__':
unittest.main() | /relate-0.22.tar.gz/relate-0.22/src/relate.py | 0.786049 | 0.666222 | relate.py | pypi |
from collections import OrderedDict
from uuid import UUID
from future.moves.urllib.parse import urlparse
from six import string_types, callable
from datetime import datetime
from inspect import isfunction
from dateutil import parser
from importlib import import_module
from .types import TypedSequence, TypedMapping, TypedSet
from .functions import to_model
CHILD_ERROR_MSG = "Failed to convert value ({}) to child object class ({}). " \
+ "... [Original error message: {}]"
def to_child_field(cls):
"""
Returns an callable instance that will convert a value to a Child object.
:param cls: Valid class type of the Child.
:return: instance of ChildConverter.
"""
class ChildConverter(object):
def __init__(self, cls):
self._cls = cls
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, value):
try:
# Issue #33: if value is the class and callable, then invoke
if value == self._cls and callable(value):
value = value()
return to_model(self.cls, value)
except ValueError as e:
error_msg = CHILD_ERROR_MSG.format(value, self.cls, str(e))
raise ValueError(error_msg)
return ChildConverter(cls)
def to_sequence_field(cls):
"""
Returns a callable instance that will convert a value to a Sequence.
:param cls: Valid class type of the items in the Sequence.
:return: instance of the SequenceConverter.
"""
class SequenceConverter(object):
def __init__(self, cls):
self._cls = cls
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, values):
values = values or []
args = [to_model(self.cls, value) for value in values]
return TypedSequence(cls=self.cls, args=args)
return SequenceConverter(cls)
def to_set_field(cls):
"""
Returns a callable instance that will convert a value to a Sequence.
:param cls: Valid class type of the items in the Sequence.
:return: instance of the SequenceConverter.
"""
class SetConverter(object):
def __init__(self, cls):
self._cls = cls
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, values):
values = values or set()
args = {to_model(self.cls, value) for value in values}
return TypedSet(cls=self.cls, args=args)
return SetConverter(cls)
def to_mapping_field(cls, key): # pragma: no mccabe
"""
Returns a callable instance that will convert a value to a Mapping.
:param cls: Valid class type of the items in the Sequence.
:param key: Attribute name of the key value in each item of cls instance.
:return: instance of the MappingConverter.
"""
class MappingConverter(object):
def __init__(self, cls, key):
self._cls = cls
self.key = key
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, values):
kwargs = OrderedDict()
if isinstance(values, TypedMapping):
return values
if not isinstance(values, (type({}), type(None))):
raise TypeError("Invalid type : {}".format(type(values)))
if values:
for key_value, item in values.items():
if isinstance(item, dict):
item[self.key] = key_value
item = to_model(self.cls, item)
kwargs[key_value] = item
return TypedMapping(cls=self.cls, kwargs=kwargs, key=self.key)
return MappingConverter(cls, key)
def str_if_not_none(value):
"""
Returns an str(value) if the value is not None.
:param value: None or a value that can be converted to a str.
:return: None or str(value)
"""
if not(value is None or isinstance(value, string_types)):
value = str(value)
return value
def int_if_not_none(value):
"""
Returns an int(value) if the value is not None.
:param value: None or a value that can be converted to an int.
:return: None or int(value)
"""
return None if value is None else int(value)
def float_if_not_none(value):
"""
Returns an float(value) if the value is not None.
:param value: None or a value that can be converted to an float.
:return: None or float(value)
"""
return None if value is None else float(value)
def str_to_url(value):
"""
Returns a UUID(value) if the value provided is a str.
:param value: str or UUID object
:return: UUID object
"""
return urlparse(value) if isinstance(value, string_types) else value
def str_to_uuid(value):
"""
Returns a UUID(value) if the value provided is a str.
:param value: str or UUID object
:return: UUID object
"""
if isfunction(value):
value = value()
return UUID(value) if isinstance(value, string_types) else value
def to_date_field(formatter):
"""
Returns a callable instance that will convert a string to a Date.
:param formatter: String that represents data format for parsing.
:return: instance of the DateConverter.
"""
class DateConverter(object):
def __init__(self, formatter):
self.formatter = formatter
def __call__(self, value):
if isinstance(value, string_types):
value = datetime.strptime(value, self.formatter).date()
if isinstance(value, datetime):
value = value.date()
return value
return DateConverter(formatter)
def to_datetime_field(formatter):
"""
Returns a callable instance that will convert a string to a DateTime.
:param formatter: String that represents data format for parsing.
:return: instance of the DateTimeConverter.
"""
class DateTimeConverter(object):
def __init__(self, formatter):
self.formatter = formatter
def __call__(self, value):
if isinstance(value, string_types):
value = parser.parse(value)
return value
return DateTimeConverter(formatter)
def to_time_field(formatter):
"""
Returns a callable instance that will convert a string to a Time.
:param formatter: String that represents data format for parsing.
:return: instance of the TimeConverter.
"""
class TimeConverter(object):
def __init__(self, formatter):
self.formatter = formatter
def __call__(self, value):
if isinstance(value, string_types):
value = datetime.strptime(value, self.formatter).time()
return value
return TimeConverter(formatter)
def resolve_class(cls):
if isinstance(cls, str):
module_name, model_name = cls.rsplit(".", 1)
module = import_module(module_name)
cls = getattr(module, model_name)
return cls | /related-2-0.7.3.tar.gz/related-2-0.7.3/src/related/converters.py | 0.90469 | 0.382718 | converters.py | pypi |
from decimal import Decimal
from future.moves.urllib.parse import ParseResult
from collections import OrderedDict
from enum import Enum
from uuid import UUID
from datetime import date, datetime, time
from attr._compat import iteritems
from .functions import to_dict
from .types import (
TypedSequence, TypedMapping, TypedSet, DEFAULT_DATE_FORMAT,
DEFAULT_DATETIME_FORMAT, DEFAULT_TIME_FORMAT
)
@to_dict.register(list) # noqa F811
@to_dict.register(set)
@to_dict.register(tuple)
def _(obj, **kwargs):
suppress_empty_values = kwargs.get("suppress_empty_values", False)
retain_collection_types = kwargs.get("retain_collection_types", False)
if not suppress_empty_values or len(obj):
cf = obj.__class__ if retain_collection_types else list
return cf([to_dict(i, **kwargs) for i in obj])
@to_dict.register(dict) # noqa F811
def _(obj, **kwargs):
suppress_empty_values = kwargs.get("suppress_empty_values", False)
dict_factory = kwargs.get("dict_factory", OrderedDict)
items = []
for kk, vv in iteritems(obj):
vv = to_dict(vv, **kwargs)
if (not suppress_empty_values) or (vv is not None):
items.append((to_dict(kk, **kwargs), vv))
if not suppress_empty_values or len(items):
return dict_factory(items)
@to_dict.register(TypedSequence) # noqa F811
def _(obj, **kwargs):
return to_dict(obj.list, **kwargs)
@to_dict.register(TypedSet) # noqa F811
def _(obj, **kwargs):
return to_dict(obj.set, **kwargs)
@to_dict.register(TypedMapping) # noqa F811
def _(obj, **kwargs):
suppress_map_key_values = kwargs.get("suppress_map_key_values", False)
suppress_empty_values = kwargs.get("suppress_empty_values", False)
rv = kwargs.get("dict_factory", OrderedDict)()
items = obj.items()
for key_value, item in items:
sub_dict = to_dict(item, **kwargs)
if suppress_map_key_values:
sub_dict.pop(obj.key)
rv[key_value] = sub_dict
if not suppress_empty_values or len(items):
return rv
@to_dict.register(Enum) # noqa F811
def _(obj, **kwargs):
return obj.value
@to_dict.register(UUID) # noqa F811
def _(obj, **kwargs):
return str(obj)
@to_dict.register(ParseResult) # noqa F811
def _(obj, **kwargs):
return obj.geturl()
@to_dict.register(date) # noqa F811
def _(obj, **kwargs):
formatter = kwargs.get('formatter') or DEFAULT_DATE_FORMAT
return obj.strftime(formatter)
@to_dict.register(datetime) # noqa F811
def _(obj, **kwargs):
formatter = kwargs.get('formatter') or DEFAULT_DATETIME_FORMAT
return (obj.isoformat() if formatter == "ISO_FORMAT"
else obj.strftime(formatter))
@to_dict.register(time) # noqa F811
def _(obj, **kwargs):
formatter = kwargs.get('formatter') or DEFAULT_TIME_FORMAT
return obj.strftime(formatter)
@to_dict.register(Decimal) # noqa F811
def _(obj, **kwargs):
return str(obj) | /related-2-0.7.3.tar.gz/related-2-0.7.3/src/related/dispatchers.py | 0.614163 | 0.194904 | dispatchers.py | pypi |
from decimal import Decimal
from future.moves.urllib.parse import ParseResult
from attr import attrib, NOTHING
from collections import OrderedDict
from uuid import uuid4, UUID
from datetime import date, datetime, time
from six import string_types
from . import _init_fields, types, converters, validators
def BooleanField(default=NOTHING, required=True, repr=True, cmp=True,
key=None, kw_only: bool = False):
"""
Create new bool field on a model.
:param default: any boolean value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
:param bool kw_only: have a generated __init__ with keyword-only arguments,
relaxing the required ordering of default and non-default valued attributes.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, bool)
return attrib(default=default, validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key), kw_only=kw_only)
def ChildField(cls, default=NOTHING, required=True, repr=True, cmp=True,
key=None, kw_only: bool = False):
"""
Create new child field on a model.
:param cls: class (or name) of the model to be related.
:param default: any object value of type cls
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
:param bool kw_only: have a generated __init__ with keyword-only arguments,
relaxing the required ordering of default and non-default valued attributes.
"""
default = _init_fields.init_default(required, default, None)
converter = converters.to_child_field(cls)
validator = _init_fields.init_validator(
required, object if isinstance(cls, str) else cls
)
return attrib(default=default, converter=converter, validator=validator,
repr=repr, cmp=cmp, metadata=dict(key=key), kw_only=kw_only)
def DateField(formatter=types.DEFAULT_DATE_FORMAT, default=NOTHING,
required=True, repr=True, cmp=True, key=None, kw_only: bool = False):
"""
Create new date field on a model.
:param formatter: date formatter string (default: "%Y-%m-%d")
:param default: any date or string that can be converted to a date value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
:param bool kw_only: have a generated __init__ with keyword-only arguments,
relaxing the required ordering of default and non-default valued attributes.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, date)
converter = converters.to_date_field(formatter)
return attrib(default=default, converter=converter, validator=validator,
repr=repr, cmp=cmp,
metadata=dict(formatter=formatter, key=key), kw_only=kw_only)
def DateTimeField(formatter=types.DEFAULT_DATETIME_FORMAT, default=NOTHING,
required=True, repr=True, cmp=True, key=None, kw_only: bool = False):
"""
Create new datetime field on a model.
:param formatter: datetime formatter string (default: "ISO_FORMAT")
:param default: any datetime or string that can be converted to a datetime
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
:param bool kw_only: have a generated __init__ with keyword-only arguments,
relaxing the required ordering of default and non-default valued attributes.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, datetime)
converter = converters.to_datetime_field(formatter)
return attrib(default=default, converter=converter, validator=validator,
repr=repr, cmp=cmp,
metadata=dict(formatter=formatter, key=key), kw_only=kw_only)
def TimeField(formatter=types.DEFAULT_TIME_FORMAT, default=NOTHING,
required=True, repr=True, cmp=True, key=None, kw_only: bool = False):
"""
Create new time field on a model.
:param formatter: time formatter string (default: "%H:%M:%S")
:param default: any time or string that can be converted to a time value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
:param bool kw_only: have a generated __init__ with keyword-only arguments,
relaxing the required ordering of default and non-default valued attributes.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, time)
converter = converters.to_time_field(formatter)
return attrib(default=default, converter=converter, validator=validator,
repr=repr, cmp=cmp,
metadata=dict(formatter=formatter, key=key), kw_only=kw_only)
def FloatField(default=NOTHING, required=True, repr=True, cmp=True,
key=None, kw_only: bool = False):
"""
Create new float field on a model.
:param default: any float value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
:param bool kw_only: have a generated __init__ with keyword-only arguments,
relaxing the required ordering of default and non-default valued attributes.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, float)
return attrib(default=default, converter=converters.float_if_not_none,
validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key), kw_only=kw_only)
def IntegerField(default=NOTHING, required=True, repr=True, cmp=True,
key=None, kw_only: bool = False):
"""
Create new int field on a model.
:param default: any integer value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
:param bool kw_only: have a generated __init__ with keyword-only arguments,
relaxing the required ordering of default and non-default valued attributes.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, int)
return attrib(default=default, converter=converters.int_if_not_none,
validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key), kw_only=kw_only)
def MappingField(cls, child_key, default=NOTHING, required=True, repr=False,
key=None, kw_only: bool = False):
"""
Create new mapping field on a model.
:param cls: class (or name) of the model to be related in Sequence.
:param child_key: key field on the child object to be used as the map key.
:param default: any mapping type
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
:param bool kw_only: have a generated __init__ with keyword-only arguments,
relaxing the required ordering of default and non-default valued attributes.
"""
default = _init_fields.init_default(required, default, OrderedDict())
converter = converters.to_mapping_field(cls, child_key)
validator = _init_fields.init_validator(required, types.TypedMapping)
return attrib(default=default, converter=converter, validator=validator,
repr=repr, metadata=dict(key=key), kw_only=kw_only)
def RegexField(regex, default=NOTHING, required=True, repr=True, cmp=True,
key=None, kw_only: bool = False):
"""
Create new str field on a model.
:param regex: regex validation string (e.g. "[^@]+@[^@]+" for email)
:param default: any string value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
:param bool kw_only: have a generated __init__ with keyword-only arguments,
relaxing the required ordering of default and non-default valued attributes.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, string_types,
validators.regex(regex))
return attrib(default=default, converter=converters.str_if_not_none,
validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key), kw_only=kw_only)
def SequenceField(cls, default=NOTHING, required=True, repr=False, key=None,
kw_only: bool = False):
"""
Create new sequence field on a model.
:param cls: class (or name) of the model to be related in Sequence.
:param default: any TypedSequence or list
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
:param bool kw_only: have a generated __init__ with keyword-only arguments,
relaxing the required ordering of default and non-default valued attributes.
"""
default = _init_fields.init_default(required, default, [])
converter = converters.to_sequence_field(cls)
validator = _init_fields.init_validator(required, types.TypedSequence)
return attrib(default=default, converter=converter, validator=validator,
repr=repr, metadata=dict(key=key), kw_only=kw_only)
def SetField(cls, default=NOTHING, required=True, repr=False, key=None,
kw_only: bool = False):
"""
Create new set field on a model.
:param cls: class (or name) of the model to be related in Set.
:param default: any TypedSet or set
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
:param bool kw_only: have a generated __init__ with keyword-only arguments,
relaxing the required ordering of default and non-default valued attributes.
"""
default = _init_fields.init_default(required, default, set())
converter = converters.to_set_field(cls)
validator = _init_fields.init_validator(required, types.TypedSet)
return attrib(default=default, converter=converter, validator=validator,
repr=repr, metadata=dict(key=key), kw_only=kw_only)
def StringField(default=NOTHING, required=True, repr=True, cmp=True,
key=None, kw_only: bool = False):
"""
Create new str field on a model.
:param default: any string value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
:param bool kw_only: have a generated __init__ with keyword-only arguments,
relaxing the required ordering of default and non-default valued attributes.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, string_types)
return attrib(default=default, converter=converters.str_if_not_none,
validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key), kw_only=kw_only)
def URLField(default=NOTHING, required=True, repr=True, cmp=True, key=None,
kw_only: bool = False):
"""
Create new UUID field on a model.
:param default: any value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
:param bool kw_only: have a generated __init__ with keyword-only arguments,
relaxing the required ordering of default and non-default valued attributes.
"""
cls = ParseResult
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, cls)
return attrib(default=default, converter=converters.str_to_url,
validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key), kw_only=kw_only)
def UUIDField(default=NOTHING, required=False, repr=True, cmp=True, key=None,
kw_only: bool = False):
"""
Create new UUID field on a model.
:param default: any value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
:param bool kw_only: have a generated __init__ with keyword-only arguments,
relaxing the required ordering of default and non-default valued attributes.
"""
cls = UUID
default = _init_fields.init_default(required, default, uuid4)
validator = _init_fields.init_validator(required, cls)
return attrib(default=default, converter=converters.str_to_uuid,
validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key), kw_only=kw_only)
def DecimalField(default=NOTHING, required=True, repr=True, cmp=True,
key=None, kw_only: bool = False):
"""
Create new decimal field on a model.
:param default: any decimal value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
:param bool kw_only: have a generated __init__ with keyword-only arguments,
relaxing the required ordering of default and non-default valued attributes.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, Decimal)
return attrib(default=default, converter=lambda x: Decimal(x),
validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key), kw_only=kw_only) | /related-2-0.7.3.tar.gz/related-2-0.7.3/src/related/fields.py | 0.887546 | 0.195229 | fields.py | pypi |
try:
from urllib import urlencode
from urlparse import urlparse, parse_qsl
except ImportError: # Python3
from urllib.parse import urlencode, urlparse, parse_qsl
import re
import time
from bs4 import BeautifulSoup
import feedparser
class RelatedNewsScraper(object):
def __init__(self):
self.base_url = "https://news.google.com/news"
self.topics_map = {"world": "w",
"us": "n",
"elections": "e",
"business": "b",
"tech": "tc",
"entertainment": "e",
"sports": "s",
"science": "snc",
"health": "m"}
def add_url_params(self, url, new_params):
"""
Returns the url reformatted with the additional parameters added.
"""
parsed = urlparse(url)
params_raw = parsed.query
params = dict(parse_qsl(params_raw))
params.update(new_params)
encoded_params = urlencode(params, doseq=True)
ncl = None
if "ncl" in params:
ncl = params["ncl"]
new_url = self.base_url + "?" + encoded_params
return new_url, ncl
def scrape_latest(self, topic=None):
"""
Returns a dictionary containing groups of related articles.
Each group of articles is referenced using a unique identifier. Each
article in that group is represented as a dictionary with the following
attributes:
-title: title of the article
-source: news source where article is published
-url: url to the article
-published: date the article was published
A topic can optionally be specified. The options are:
-world
-US
-elections
-tech
-business
-entertainment
-sports
-science
-health
"""
params = {"output": "rss"}
if topic:
topic = topic.lower()
if topic in self.topics_map:
params["topic"] = self.topics_map[topic]
else:
err_str = "Error: topic not valid. Try world, US, elections, tech, "
err_str += "business, entertainment, sports, science, or health."
print err_str
return
feed_url, _ = self.add_url_params(self.base_url, params)
feed = feedparser.parse(feed_url)
data = {}
for entry in feed["entries"]:
description = entry["summary"]
soup = BeautifulSoup(description, "lxml")
related_url = soup.find("a", href=re.compile("news/more\?"))["href"]
related_rss, ncl = self.add_url_params(related_url, {"output": "rss"})
data[ncl] = self.scrape_related(related_rss)
return data
def scrape_related(self, feed_url):
"""
Returns a list of articles, represented as dictionaries with the following
attributes:
-title: title of the article
-source: news source where article is published
-url: url to the article
-published: date the article was published
"""
related_articles = []
feed = feedparser.parse(feed_url)
for entry in feed["entries"]:
article = {}
link_parsed = urlparse(entry["link"])
params_raw = link_parsed.query
params = dict(parse_qsl(params_raw))
article["url"] = params["url"]
article["published"] = time.strftime("%m-%d-%Y", entry["published_parsed"])
title_parts = entry["title"].split(" - ")
article["title"] = " - ".join(title_parts[:-1])
article["source"] = title_parts[-1]
related_articles.append(article)
return related_articles | /related-google-news-0.1.tar.gz/related-google-news-0.1/related_google_news/related.py | 0.667581 | 0.237698 | related.py | pypi |
from collections import OrderedDict
from uuid import UUID
from future.moves.urllib.parse import urlparse
from six import string_types
from datetime import datetime
from inspect import isfunction
from dateutil import parser
from importlib import import_module
from .types import TypedSequence, TypedMapping, TypedSet
from .functions import to_model
CHILD_ERROR_MSG = "Failed to convert value ({}) to child object class ({}). " \
+ "... [Original error message: {}]"
def to_child_field(cls):
"""
Returns an callable instance that will convert a value to a Child object.
:param cls: Valid class type of the Child.
:return: instance of ChildConverter.
"""
class ChildConverter(object):
def __init__(self, cls):
self._cls = cls
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, value):
try:
return to_model(self.cls, value)
except ValueError as e:
error_msg = CHILD_ERROR_MSG.format(value, self.cls, str(e))
raise ValueError(error_msg)
return ChildConverter(cls)
def to_sequence_field(cls):
"""
Returns a callable instance that will convert a value to a Sequence.
:param cls: Valid class type of the items in the Sequence.
:return: instance of the SequenceConverter.
"""
class SequenceConverter(object):
def __init__(self, cls):
self._cls = cls
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, values):
values = values or []
args = [to_model(self.cls, value) for value in values]
return TypedSequence(cls=self.cls, args=args)
return SequenceConverter(cls)
def to_set_field(cls):
"""
Returns a callable instance that will convert a value to a Sequence.
:param cls: Valid class type of the items in the Sequence.
:return: instance of the SequenceConverter.
"""
class SetConverter(object):
def __init__(self, cls):
self._cls = cls
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, values):
values = values or set()
args = {to_model(self.cls, value) for value in values}
return TypedSet(cls=self.cls, args=args)
return SetConverter(cls)
def to_mapping_field(cls, key): # pragma: no mccabe
"""
Returns a callable instance that will convert a value to a Mapping.
:param cls: Valid class type of the items in the Sequence.
:param key: Attribute name of the key value in each item of cls instance.
:return: instance of the MappingConverter.
"""
class MappingConverter(object):
def __init__(self, cls, key):
self._cls = cls
self.key = key
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, values):
kwargs = OrderedDict()
if isinstance(values, TypedMapping):
return values
if not isinstance(values, (type({}), type(None))):
raise TypeError("Invalid type : {}".format(type(values)))
if values:
for key_value, item in values.items():
if isinstance(item, dict):
item[self.key] = key_value
item = to_model(self.cls, item)
kwargs[key_value] = item
return TypedMapping(cls=self.cls, kwargs=kwargs, key=self.key)
return MappingConverter(cls, key)
def str_if_not_none(value):
"""
Returns an str(value) if the value is not None.
:param value: None or a value that can be converted to a str.
:return: None or str(value)
"""
if not(value is None or isinstance(value, string_types)):
value = str(value)
return value
def int_if_not_none(value):
"""
Returns an int(value) if the value is not None.
:param value: None or a value that can be converted to an int.
:return: None or int(value)
"""
return None if value is None else int(value)
def float_if_not_none(value):
"""
Returns an float(value) if the value is not None.
:param value: None or a value that can be converted to an float.
:return: None or float(value)
"""
return None if value is None else float(value)
def str_to_url(value):
"""
Returns a UUID(value) if the value provided is a str.
:param value: str or UUID object
:return: UUID object
"""
return urlparse(value) if isinstance(value, string_types) else value
def str_to_uuid(value):
"""
Returns a UUID(value) if the value provided is a str.
:param value: str or UUID object
:return: UUID object
"""
if isfunction(value):
value = value()
return UUID(value) if isinstance(value, string_types) else value
def to_date_field(formatter):
"""
Returns a callable instance that will convert a string to a Date.
:param formatter: String that represents data format for parsing.
:return: instance of the DateConverter.
"""
class DateConverter(object):
def __init__(self, formatter):
self.formatter = formatter
def __call__(self, value):
if isinstance(value, string_types):
value = datetime.strptime(value, self.formatter).date()
if isinstance(value, datetime):
value = value.date()
return value
return DateConverter(formatter)
def to_datetime_field(formatter):
"""
Returns a callable instance that will convert a string to a DateTime.
:param formatter: String that represents data format for parsing.
:return: instance of the DateTimeConverter.
"""
class DateTimeConverter(object):
def __init__(self, formatter):
self.formatter = formatter
def __call__(self, value):
if isinstance(value, string_types):
value = parser.parse(value)
return value
return DateTimeConverter(formatter)
def to_time_field(formatter):
"""
Returns a callable instance that will convert a string to a Time.
:param formatter: String that represents data format for parsing.
:return: instance of the TimeConverter.
"""
class TimeConverter(object):
def __init__(self, formatter):
self.formatter = formatter
def __call__(self, value):
if isinstance(value, string_types):
value = datetime.strptime(value, self.formatter).time()
return value
return TimeConverter(formatter)
def resolve_class(cls):
if isinstance(cls, str):
module_name, model_name = cls.rsplit(".", 1)
module = import_module(module_name)
cls = getattr(module, model_name)
return cls | /related-li-0.7.0.tar.gz/related-li-0.7.0/src/related/converters.py | 0.907526 | 0.383006 | converters.py | pypi |
from decimal import Decimal
from future.moves.urllib.parse import ParseResult
from collections import OrderedDict
from enum import Enum
from uuid import UUID
from datetime import date, datetime, time
from attr._compat import iteritems
from .functions import to_dict
from .types import (
TypedSequence, TypedMapping, TypedSet, DEFAULT_DATE_FORMAT,
DEFAULT_DATETIME_FORMAT, DEFAULT_TIME_FORMAT
)
@to_dict.register(list) # noqa F811
@to_dict.register(set)
@to_dict.register(tuple)
def _(obj, **kwargs):
suppress_empty_values = kwargs.get("suppress_empty_values", False)
retain_collection_types = kwargs.get("retain_collection_types", False)
if not suppress_empty_values or len(obj):
cf = obj.__class__ if retain_collection_types else list
return cf([to_dict(i, **kwargs) for i in obj])
@to_dict.register(dict) # noqa F811
def _(obj, **kwargs):
suppress_empty_values = kwargs.get("suppress_empty_values", False)
dict_factory = kwargs.get("dict_factory", OrderedDict)
items = []
for kk, vv in iteritems(obj):
vv = to_dict(vv, **kwargs)
if (not suppress_empty_values) or (vv is not None):
items.append((to_dict(kk, **kwargs), vv))
if not suppress_empty_values or len(items):
return dict_factory(items)
@to_dict.register(TypedSequence) # noqa F811
def _(obj, **kwargs):
return to_dict(obj.list, **kwargs)
@to_dict.register(TypedSet) # noqa F811
def _(obj, **kwargs):
return to_dict(obj.set, **kwargs)
@to_dict.register(TypedMapping) # noqa F811
def _(obj, **kwargs):
suppress_map_key_values = kwargs.get("suppress_map_key_values", False)
suppress_empty_values = kwargs.get("suppress_empty_values", False)
rv = kwargs.get("dict_factory", OrderedDict)()
items = obj.items()
for key_value, item in items:
sub_dict = to_dict(item, **kwargs)
if suppress_map_key_values:
sub_dict.pop(obj.key)
rv[key_value] = sub_dict
if not suppress_empty_values or len(items):
return rv
@to_dict.register(Enum) # noqa F811
def _(obj, **kwargs):
return obj.value
@to_dict.register(UUID) # noqa F811
def _(obj, **kwargs):
return str(obj)
@to_dict.register(ParseResult) # noqa F811
def _(obj, **kwargs):
return obj.geturl()
@to_dict.register(date) # noqa F811
def _(obj, **kwargs):
formatter = kwargs.get('formatter') or DEFAULT_DATE_FORMAT
return obj.strftime(formatter)
@to_dict.register(datetime) # noqa F811
def _(obj, **kwargs):
formatter = kwargs.get('formatter') or DEFAULT_DATETIME_FORMAT
return (obj.isoformat() if formatter == "ISO_FORMAT"
else obj.strftime(formatter))
@to_dict.register(time) # noqa F811
def _(obj, **kwargs):
formatter = kwargs.get('formatter') or DEFAULT_TIME_FORMAT
return obj.strftime(formatter)
@to_dict.register(Decimal) # noqa F811
def _(obj, **kwargs):
return str(obj) | /related-li-0.7.0.tar.gz/related-li-0.7.0/src/related/dispatchers.py | 0.614163 | 0.194904 | dispatchers.py | pypi |
from decimal import Decimal
from future.moves.urllib.parse import ParseResult
from attr import attrib, NOTHING
from collections import OrderedDict
from uuid import uuid4, UUID
from datetime import date, datetime, time
from six import string_types, PY2
from . import _init_fields, types, converters, validators
attrib_additional_kwargs = {} if PY2 else {'kw_only': True}
def BooleanField(default=NOTHING, required=True, repr=True, cmp=True,
key=None):
"""
Create new bool field on a model.
:param default: any boolean value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, bool)
return attrib(default=default, validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key), **attrib_additional_kwargs)
def ChildField(cls, default=NOTHING, required=True, repr=True, cmp=True,
key=None):
"""
Create new child field on a model.
:param cls: class (or name) of the model to be related.
:param default: any object value of type cls
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
converter = converters.to_child_field(cls)
validator = _init_fields.init_validator(
required, object if isinstance(cls, str) else cls
)
return attrib(default=default, convert=converter, validator=validator,
repr=repr, cmp=cmp, metadata=dict(key=key),
**attrib_additional_kwargs)
def DateField(formatter=types.DEFAULT_DATE_FORMAT, default=NOTHING,
required=True, repr=True, cmp=True, key=None):
"""
Create new date field on a model.
:param default: any date or string that can be converted to a date value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, date)
converter = converters.to_date_field(formatter)
return attrib(default=default, convert=converter, validator=validator,
repr=repr, cmp=cmp,
metadata=dict(formatter=formatter, key=key),
**attrib_additional_kwargs)
def DateTimeField(formatter=types.DEFAULT_DATETIME_FORMAT, default=NOTHING,
required=True, repr=True, cmp=True, key=None):
"""
Create new datetime field on a model.
:param default: any datetime or string that can be converted to a datetime
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, datetime)
converter = converters.to_datetime_field(formatter)
return attrib(default=default, convert=converter, validator=validator,
repr=repr, cmp=cmp,
metadata=dict(formatter=formatter, key=key),
**attrib_additional_kwargs)
def TimeField(formatter=types.DEFAULT_TIME_FORMAT, default=NOTHING,
required=True, repr=True, cmp=True, key=None):
"""
Create new time field on a model.
:param default: any time or string that can be converted to a time value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, time)
converter = converters.to_time_field(formatter)
return attrib(default=default, convert=converter, validator=validator,
repr=repr, cmp=cmp,
metadata=dict(formatter=formatter, key=key),
**attrib_additional_kwargs)
def FloatField(default=NOTHING, required=True, repr=True, cmp=True,
key=None):
"""
Create new float field on a model.
:param default: any float value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, float)
return attrib(default=default, convert=converters.float_if_not_none,
validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key), **attrib_additional_kwargs)
def IntegerField(default=NOTHING, required=True, repr=True, cmp=True,
key=None):
"""
Create new int field on a model.
:param default: any integer value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, int)
return attrib(default=default, convert=converters.int_if_not_none,
validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key), **attrib_additional_kwargs)
def MappingField(cls, child_key, default=NOTHING, required=True, repr=False,
key=None):
"""
Create new mapping field on a model.
:param cls: class (or name) of the model to be related in Sequence.
:param key: key field on the child object to be used as the mapping key.
:param default: any mappingtype
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, OrderedDict())
converter = converters.to_mapping_field(cls, child_key)
validator = _init_fields.init_validator(required, types.TypedMapping)
return attrib(default=default, convert=converter, validator=validator,
repr=repr, metadata=dict(key=key),
**attrib_additional_kwargs)
def RegexField(regex, default=NOTHING, required=True, repr=True, cmp=True,
key=None):
"""
Create new str field on a model.
:param default: any string value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, string_types,
validators.regex(regex))
return attrib(default=default, convert=converters.str_if_not_none,
validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key), **attrib_additional_kwargs)
def SequenceField(cls, default=NOTHING, required=True, repr=False, key=None):
"""
Create new sequence field on a model.
:param cls: class (or name) of the model to be related in Sequence.
:param default: any TypedSequence or list
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, [])
converter = converters.to_sequence_field(cls)
validator = _init_fields.init_validator(required, types.TypedSequence)
return attrib(default=default, convert=converter, validator=validator,
repr=repr, metadata=dict(key=key),
**attrib_additional_kwargs)
def SetField(cls, default=NOTHING, required=True, repr=False, key=None):
"""
Create new set field on a model.
:param cls: class (or name) of the model to be related in Set.
:param default: any TypedSet or set
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, set())
converter = converters.to_set_field(cls)
validator = _init_fields.init_validator(required, types.TypedSet)
return attrib(default=default, convert=converter, validator=validator,
repr=repr, metadata=dict(key=key),
**attrib_additional_kwargs)
def StringField(default=NOTHING, required=True, repr=True, cmp=True,
key=None):
"""
Create new str field on a model.
:param default: any string value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, string_types)
return attrib(default=default, convert=converters.str_if_not_none,
validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key), **attrib_additional_kwargs)
def URLField(default=NOTHING, required=False, repr=True, cmp=True, key=None):
"""
Create new UUID field on a model.
:param default: any value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
cls = ParseResult
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, cls)
return attrib(default=default, convert=converters.str_to_url,
validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key), **attrib_additional_kwargs)
def UUIDField(default=NOTHING, required=False, repr=True, cmp=True, key=None):
"""
Create new UUID field on a model.
:param default: any value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
cls = UUID
default = _init_fields.init_default(required, default, uuid4)
validator = _init_fields.init_validator(required, cls)
return attrib(default=default, convert=converters.str_to_uuid,
validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key), **attrib_additional_kwargs)
def DecimalField(default=NOTHING, required=True, repr=True, cmp=True,
key=None):
"""
Create new decimal field on a model.
:param default: any decimal value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, Decimal)
return attrib(default=default, convert=lambda x: Decimal(x),
validator=validator, repr=repr, cmp=cmp,
metadata=dict(key=key), **attrib_additional_kwargs) | /related-li-0.7.0.tar.gz/related-li-0.7.0/src/related/fields.py | 0.872429 | 0.19349 | fields.py | pypi |
from datetime import datetime
from importlib import import_module
from inspect import isfunction
from uuid import UUID
from dateutil import parser
from future.moves.urllib.parse import urlparse
from six import callable, string_types
from .functions import to_model
from .types import TypedMapping, TypedSequence, TypedSet
CHILD_ERROR_MSG = (
"Failed to convert value ({}) to child object class ({}). "
+ "... [Original error message: {}]"
)
def to_child_field(cls):
"""
Returns an callable instance that will convert a value to a Child object.
:param cls: Valid class type of the Child.
:return: instance of ChildConverter.
"""
class ChildConverter(object):
def __init__(self, cls):
self._cls = cls
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, value):
try:
# Issue #33: if value is the class and callable, then invoke
try:
if (
value == self._cls or isinstance(value, self._cls)
) and callable(value):
value = value()
except TypeError as te:
print("HERE")
return to_model(self.cls, value)
except ValueError as e:
error_msg = CHILD_ERROR_MSG.format(value, self.cls, str(e))
raise ValueError(error_msg)
return ChildConverter(cls)
def to_sequence_field(cls):
"""
Returns a callable instance that will convert a value to a Sequence.
:param cls: Valid class type of the items in the Sequence.
:return: instance of the SequenceConverter.
"""
class SequenceConverter(object):
def __init__(self, cls):
self._cls = cls
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, values):
values = values or []
args = [to_model(self.cls, value) for value in values]
return TypedSequence(cls=self.cls, args=args)
return SequenceConverter(cls)
def to_set_field(cls):
"""
Returns a callable instance that will convert a value to a Sequence.
:param cls: Valid class type of the items in the Sequence.
:return: instance of the SequenceConverter.
"""
class SetConverter(object):
def __init__(self, cls):
self._cls = cls
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, values):
values = values or set()
args = {to_model(self.cls, value) for value in values}
return TypedSet(cls=self.cls, args=args)
return SetConverter(cls)
def to_mapping_field(cls, key): # pragma: no mccabe
"""
Returns a callable instance that will convert a value to a Mapping.
:param cls: Valid class type of the items in the Sequence.
:param key: Attribute name of the key value in each item of cls instance.
:return: instance of the MappingConverter.
"""
class MappingConverter(object):
def __init__(self, cls, key):
self._cls = cls
self.key = key
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, values):
kwargs = {}
if isinstance(values, TypedMapping):
return values
if not isinstance(values, (type({}), type(None))):
raise TypeError("Invalid type : {}".format(type(values)))
if values:
for key_value, item in values.items():
if isinstance(item, dict):
item[self.key] = key_value
item = to_model(self.cls, item)
kwargs[key_value] = item
return TypedMapping(cls=self.cls, kwargs=kwargs, key=self.key)
return MappingConverter(cls, key)
def str_if_not_none(value):
"""
Returns an str(value) if the value is not None.
:param value: None or a value that can be converted to a str.
:return: None or str(value)
"""
if not (value is None or isinstance(value, string_types)):
value = str(value)
return value
def int_if_not_none(value):
"""
Returns an int(value) if the value is not None.
:param value: None or a value that can be converted to an int.
:return: None or int(value)
"""
return None if value is None else int(value)
def float_if_not_none(value):
"""
Returns an float(value) if the value is not None.
:param value: None or a value that can be converted to an float.
:return: None or float(value)
"""
return None if value is None else float(value)
def str_to_url(value):
"""
Returns a UUID(value) if the value provided is a str.
:param value: str or UUID object
:return: UUID object
"""
return urlparse(value) if isinstance(value, string_types) else value
def str_to_uuid(value):
"""
Returns a UUID(value) if the value provided is a str.
:param value: str or UUID object
:return: UUID object
"""
if isfunction(value):
value = value()
return UUID(value) if isinstance(value, string_types) else value
def to_date_field(formatter):
"""
Returns a callable instance that will convert a string to a Date.
:param formatter: String that represents data format for parsing.
:return: instance of the DateConverter.
"""
class DateConverter(object):
def __init__(self, formatter):
self.formatter = formatter
def __call__(self, value):
if isinstance(value, string_types):
value = datetime.strptime(value, self.formatter).date()
if isinstance(value, datetime):
value = value.date()
return value
return DateConverter(formatter)
def to_datetime_field(formatter):
"""
Returns a callable instance that will convert a string to a DateTime.
:param formatter: String that represents data format for parsing.
:return: instance of the DateTimeConverter.
"""
class DateTimeConverter(object):
def __init__(self, formatter):
self.formatter = formatter
def __call__(self, value):
if isinstance(value, string_types):
value = parser.parse(value)
return value
return DateTimeConverter(formatter)
def to_time_field(formatter):
"""
Returns a callable instance that will convert a string to a Time.
:param formatter: String that represents data format for parsing.
:return: instance of the TimeConverter.
"""
class TimeConverter(object):
def __init__(self, formatter):
self.formatter = formatter
def __call__(self, value):
if isinstance(value, string_types):
value = datetime.strptime(value, self.formatter).time()
return value
return TimeConverter(formatter)
def resolve_class(cls):
if isinstance(cls, str):
module_name, model_name = cls.rsplit(".", 1)
module = import_module(module_name)
cls = getattr(module, model_name)
return cls | /related_mltoolbox-1.1.0-py3-none-any.whl/related/converters.py | 0.86009 | 0.357652 | converters.py | pypi |
from datetime import date, datetime, time
from decimal import Decimal
from enum import Enum
from uuid import UUID
from future.moves.urllib.parse import ParseResult
from .functions import to_dict
from .types import (
DEFAULT_DATE_FORMAT,
DEFAULT_DATETIME_FORMAT,
DEFAULT_TIME_FORMAT,
TypedMapping,
TypedSequence,
TypedSet,
)
@to_dict.register(list) # noqa F811
@to_dict.register(set)
@to_dict.register(tuple)
def _(obj, **kwargs):
suppress_empty_values = kwargs.get("suppress_empty_values", False)
retain_collection_types = kwargs.get("retain_collection_types", False)
if not suppress_empty_values or len(obj):
cf = obj.__class__ if retain_collection_types else list
return cf([to_dict(i, **kwargs) for i in obj])
@to_dict.register(dict) # noqa F811
def _(obj, **kwargs):
suppress_empty_values = kwargs.get("suppress_empty_values", False)
dict_factory = kwargs.get("dict_factory", dict)
items = []
for kk, vv in obj.items():
vv = to_dict(vv, **kwargs)
if (not suppress_empty_values) or (vv is not None):
items.append((to_dict(kk, **kwargs), vv))
if not suppress_empty_values or len(items):
return dict_factory(items)
@to_dict.register(TypedSequence) # noqa F811
def _(obj, **kwargs):
return to_dict(obj.list, **kwargs)
@to_dict.register(TypedSet) # noqa F811
def _(obj, **kwargs):
return to_dict(obj.set, **kwargs)
@to_dict.register(TypedMapping) # noqa F811
def _(obj, **kwargs):
suppress_map_key_values = kwargs.get("suppress_map_key_values", False)
suppress_empty_values = kwargs.get("suppress_empty_values", False)
rv = kwargs.get("dict_factory", dict)()
items = obj.items()
for key_value, item in items:
sub_dict = to_dict(item, **kwargs)
if suppress_map_key_values:
sub_dict.pop(obj.key)
rv[key_value] = sub_dict
if not suppress_empty_values or len(items):
return rv
@to_dict.register(Enum) # noqa F811
def _(obj, **kwargs):
return obj.value
@to_dict.register(UUID) # noqa F811
def _(obj, **kwargs):
return str(obj)
@to_dict.register(ParseResult) # noqa F811
def _(obj, **kwargs):
return obj.geturl()
@to_dict.register(date) # noqa F811
def _(obj, **kwargs):
formatter = kwargs.get("formatter") or DEFAULT_DATE_FORMAT
return obj.strftime(formatter)
@to_dict.register(datetime) # noqa F811
def _(obj, **kwargs):
formatter = kwargs.get("formatter") or DEFAULT_DATETIME_FORMAT
return obj.isoformat() if formatter == "ISO_FORMAT" else obj.strftime(formatter)
@to_dict.register(time) # noqa F811
def _(obj, **kwargs):
formatter = kwargs.get("formatter") or DEFAULT_TIME_FORMAT
return obj.strftime(formatter)
@to_dict.register(Decimal) # noqa F811
def _(obj, **kwargs):
return str(obj) | /related_mltoolbox-1.1.0-py3-none-any.whl/related/dispatchers.py | 0.599485 | 0.198685 | dispatchers.py | pypi |
from datetime import date, datetime, time
from decimal import Decimal
from uuid import UUID, uuid4
from attr import NOTHING, attrib
from future.moves.urllib.parse import ParseResult
from six import string_types
from . import _init_fields, converters, types, validators
def BooleanField(default=NOTHING, required=True, repr=True, cmp=True, key=None):
"""
Create new bool field on a model.
:param default: any boolean value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, bool)
return attrib(
default=default, validator=validator, repr=repr, cmp=cmp, metadata=dict(key=key)
)
def ChildField(cls, default=NOTHING, required=True, repr=True, cmp=True, key=None):
"""
Create new child field on a model.
:param cls: class (or name) of the model to be related.
:param default: any object value of type cls
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
converter = converters.to_child_field(cls)
validator = _init_fields.init_validator(
required, object if isinstance(cls, str) else cls
)
return attrib(
default=default,
converter=converter,
validator=validator,
repr=repr,
cmp=cmp,
metadata=dict(key=key),
)
def DateField(
formatter=types.DEFAULT_DATE_FORMAT,
default=NOTHING,
required=True,
repr=True,
cmp=True,
key=None,
):
"""
Create new date field on a model.
:param formatter: date formatter string (default: "%Y-%m-%d")
:param default: any date or string that can be converted to a date value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, date)
converter = converters.to_date_field(formatter)
return attrib(
default=default,
converter=converter,
validator=validator,
repr=repr,
cmp=cmp,
metadata=dict(formatter=formatter, key=key),
)
def DateTimeField(
formatter=types.DEFAULT_DATETIME_FORMAT,
default=NOTHING,
required=True,
repr=True,
cmp=True,
key=None,
):
"""
Create new datetime field on a model.
:param formatter: datetime formatter string (default: "ISO_FORMAT")
:param default: any datetime or string that can be converted to a datetime
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, datetime)
converter = converters.to_datetime_field(formatter)
return attrib(
default=default,
converter=converter,
validator=validator,
repr=repr,
cmp=cmp,
metadata=dict(formatter=formatter, key=key),
)
def TimeField(
formatter=types.DEFAULT_TIME_FORMAT,
default=NOTHING,
required=True,
repr=True,
cmp=True,
key=None,
):
"""
Create new time field on a model.
:param formatter: time formatter string (default: "%H:%M:%S")
:param default: any time or string that can be converted to a time value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, time)
converter = converters.to_time_field(formatter)
return attrib(
default=default,
converter=converter,
validator=validator,
repr=repr,
cmp=cmp,
metadata=dict(formatter=formatter, key=key),
)
def FloatField(default=NOTHING, required=True, repr=True, cmp=True, key=None):
"""
Create new float field on a model.
:param default: any float value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, float)
return attrib(
default=default,
converter=converters.float_if_not_none,
validator=validator,
repr=repr,
cmp=cmp,
metadata=dict(key=key),
)
def IntegerField(default=NOTHING, required=True, repr=True, cmp=True, key=None):
"""
Create new int field on a model.
:param default: any integer value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, int)
return attrib(
default=default,
converter=converters.int_if_not_none,
validator=validator,
repr=repr,
cmp=cmp,
metadata=dict(key=key),
)
def MappingField(cls, child_key, default=NOTHING, required=True, repr=False, key=None):
"""
Create new mapping field on a model.
:param cls: class (or name) of the model to be related in Sequence.
:param child_key: key field on the child object to be used as the map key.
:param default: any mapping type
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, {})
converter = converters.to_mapping_field(cls, child_key)
validator = _init_fields.init_validator(required, types.TypedMapping)
return attrib(
default=default,
converter=converter,
validator=validator,
repr=repr,
metadata=dict(key=key),
)
def RegexField(regex, default=NOTHING, required=True, repr=True, cmp=True, key=None):
"""
Create new str field on a model.
:param regex: regex validation string (e.g. "[^@]+@[^@]+" for email)
:param default: any string value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(
required, string_types, validators.regex(regex)
)
return attrib(
default=default,
converter=converters.str_if_not_none,
validator=validator,
repr=repr,
cmp=cmp,
metadata=dict(key=key),
)
def SequenceField(cls, default=NOTHING, required=True, repr=False, key=None):
"""
Create new sequence field on a model.
:param cls: class (or name) of the model to be related in Sequence.
:param default: any TypedSequence or list
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, [])
converter = converters.to_sequence_field(cls)
validator = _init_fields.init_validator(required, types.TypedSequence)
return attrib(
default=default,
converter=converter,
validator=validator,
repr=repr,
metadata=dict(key=key),
)
def SetField(cls, default=NOTHING, required=True, repr=False, key=None):
"""
Create new set field on a model.
:param cls: class (or name) of the model to be related in Set.
:param default: any TypedSet or set
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, set())
converter = converters.to_set_field(cls)
validator = _init_fields.init_validator(required, types.TypedSet)
return attrib(
default=default,
converter=converter,
validator=validator,
repr=repr,
metadata=dict(key=key),
)
def StringField(default=NOTHING, required=True, repr=True, cmp=True, key=None):
"""
Create new str field on a model.
:param default: any string value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, string_types)
return attrib(
default=default,
converter=converters.str_if_not_none,
validator=validator,
repr=repr,
cmp=cmp,
metadata=dict(key=key),
)
def URLField(default=NOTHING, required=True, repr=True, cmp=True, key=None):
"""
Create new UUID field on a model.
:param default: any value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
cls = ParseResult
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, cls)
return attrib(
default=default,
converter=converters.str_to_url,
validator=validator,
repr=repr,
cmp=cmp,
metadata=dict(key=key),
)
def UUIDField(default=NOTHING, required=False, repr=True, cmp=True, key=None):
"""
Create new UUID field on a model.
:param default: any value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
cls = UUID
default = _init_fields.init_default(required, default, uuid4)
validator = _init_fields.init_validator(required, cls)
return attrib(
default=default,
converter=converters.str_to_uuid,
validator=validator,
repr=repr,
cmp=cmp,
metadata=dict(key=key),
)
def DecimalField(default=NOTHING, required=True, repr=True, cmp=True, key=None):
"""
Create new decimal field on a model.
:param default: any decimal value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, Decimal)
return attrib(
default=default,
converter=lambda x: Decimal(x),
validator=validator,
repr=repr,
cmp=cmp,
metadata=dict(key=key),
) | /related_mltoolbox-1.1.0-py3-none-any.whl/related/fields.py | 0.903236 | 0.171234 | fields.py | pypi |
import re
import time
import requests
import pandas as pd
import jaro
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
from ftfy import fix_text
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
class ScorerNotAvailable(Exception):
pass
class VectorizerNotDefined(Exception):
pass
class APIKeyNotDefined(Exception):
pass
def ngrams(string: str, n=10) -> list:
"""
Takes an input string, cleans it and converts to ngrams.
:param string: str
:param n: int
:return: list
"""
string = str(string)
string = string.lower() # lower case
string = fix_text(string) # fix text
string = string.encode("ascii", errors="ignore").decode() # remove non ascii chars
chars_to_remove = [")", "(", ".", "|", "[", "]", "{", "}", "'", "-"]
rx = '[' + re.escape(''.join(chars_to_remove)) + ']' # remove punc, brackets etc...
string = re.sub(rx, '', string)
string = string.replace('&', 'and')
string = string.title() # normalise case - capital at start of each word
string = re.sub(' +', ' ', string).strip() # get rid of multiple spaces and replace with a single
string = ' ' + string + ' ' # pad names for ngrams...
ngrams = zip(*[string[i:] for i in range(n)])
return [''.join(ngram) for ngram in ngrams]
def generateRelatedOntologies(query: str, choices: list, method: str, **kwargs) -> list or pd.DataFrame:
"""
Generates ontologies in choices that are related to the query based on the method selected.
:param query: str
:param choices: list
:param method: str (partial_ratio, jaro_winkler, tf_idf)
:return: list
"""
if method == 'partial_ratio':
try:
kwargs['df_ontology']
except KeyError:
related = process.extractBests(query, choices, scorer=fuzz.partial_ratio, limit=100)
return related[1:]
else:
df_ontology = kwargs['df_ontology']
related = process.extractBests(query, choices, scorer=fuzz.partial_ratio, limit=100)
df_related_score = pd.DataFrame(related[1:], columns=['LABEL', 'partial_ratio'])
df_related = df_ontology[df_ontology['LABEL'].isin([i[0] for i in related[1:]])]
df_data = df_related.merge(df_related_score, on='LABEL')
df_data = df_data.sort_values(by=['partial_ratio'], ascending=False)
return df_data
elif method == 'jaro_winkler':
try:
kwargs['df_ontology']
except KeyError:
related = process.extractBests(query, choices, scorer=jaro.jaro_winkler_metric, limit=100)
return related[1:]
else:
df_ontology = kwargs['df_ontology']
related = process.extractBests(query, choices, scorer=jaro.jaro_winkler_metric, limit=100)
df_related_score = pd.DataFrame(related[1:], columns=['LABEL', 'jaro_winkler'])
df_related = df_ontology[df_ontology['LABEL'].isin([i[0] for i in related[1:]])]
df_data = df_related.merge(df_related_score, on='LABEL')
df_data = df_data.sort_values(by=['jaro_winkler'], ascending=False)
return df_data
elif method == 'tf_idf':
try:
kwargs['vectorizer'], kwargs['tf_idf_matrix']
except KeyError:
raise VectorizerNotDefined("Please define a vectorizer in the function call.")
else:
vectorizer = kwargs['vectorizer']
tf_idf_matrix = kwargs['tf_idf_matrix']
try:
kwargs['df_ontology']
except KeyError:
fitted_query = vectorizer.transform([query])
scores = cosine_similarity(tf_idf_matrix, fitted_query)
return scores
else:
df_ontology = kwargs['df_ontology']
fitted_query = vectorizer.transform([query])
scores = cosine_similarity(tf_idf_matrix, fitted_query)
df_ontology_temp = df_ontology
df_ontology_temp['tf_idf'] = scores
df_ontology_temp = df_ontology_temp.sort_values(by=['tf_idf'], ascending=False)
df_data = df_ontology_temp[1:101]
return df_data
elif method == 'UMLS':
try:
kwargs['apikey']
except KeyError:
raise APIKeyNotDefined('Please provide your NLM UMLS API key.')
else:
base_uri = 'https://uts-ws.nlm.nih.gov'
path = '/search/current/'
query = {'apiKey': kwargs['apikey'], 'string': query, 'sabs': 'SNOMEDCT_US', 'returnIdType': 'code'}
output = requests.get(base_uri + path, params=query)
outputJson = output.json()
results = (([outputJson['result']])[0])['results']
related = [(item['name'], item['ui']) for item in results]
return related
else:
raise ScorerNotAvailable("Please define scorer from available options in the configuration file.")
def partial_ratio(string_1: str, string_2: str) -> float:
"""
Calculates the fuzzywuzzy partial ratio between 2 strings.
:param string_1: str
:param string_2: str
:return: float
"""
ratio = fuzz.partial_ratio(string_1.lower(), string_2.lower())
return ratio
def jaro_winkler(string_1: str, string_2: str) -> float:
"""
Calculates the Jaro-Winkler score between 2 strings.
:param string_1: str
:param string_2: str
:return: float
"""
score = jaro.jaro_winkler_metric(string_1.lower(), string_2.lower())
return score
def tf_idf(list_of_ontologies: list) -> float:
"""
Calculates the cosine similarity between 2 strings after Term Frequency - Inverse Document Frequency Vectorization.
:param list_of_ontologies: list
:return: float
"""
t1 = time.time()
vectorizer = TfidfVectorizer(min_df=1, analyzer=ngrams)
tf_idf_matrix = vectorizer.fit_transform(list_of_ontologies)
t = time.time() - t1
print("Time:", t)
print(tf_idf_matrix.shape)
score = 0.0
return score | /related_ontologies-0.2.0-py3-none-any.whl/related_ontologies/related.py | 0.608012 | 0.287468 | related.py | pypi |
import torch
import numpy as np
pred_tag2idx = {
'P-B': 0, 'P-I': 1, 'O': 2
}
arg_tag2idx = {
'A0-B': 0, 'A0-I': 1,
'A1-B': 2, 'A1-I': 3,
'A2-B': 4, 'A2-I': 5,
'A3-B': 6, 'A3-I': 7,
'O': 8,
}
def get_word2piece(sentence, tokenizer):
words = sentence.split(' ')
word2piece = {idx: list() for idx in range(len(words))}
sentence_pieces = list()
piece_idx = 1
for word_idx, word in enumerate(words):
pieces = tokenizer.tokenize(word)
sentence_pieces += pieces
for piece_idx_added, piece in enumerate(pieces):
word2piece[word_idx].append(piece_idx + piece_idx_added)
piece_idx += len(pieces)
assert len(sentence_pieces) == piece_idx - 1
return word2piece
def get_pred_idxs(pred_tags):
idxs = list()
for pred_tag in pred_tags:
idxs.append([idx.item() for idx in (pred_tag != 2).nonzero()])
return idxs
def get_pred_mask(tensor):
"""
Generate predicate masks by converting predicate index with 'O' tag to 1.
Other indexes are converted to 0 which means non-masking.
:param tensor: predicate tagged tensor with the shape of (B, L),
where B is the batch size, L is the sequence length.
:return: masked binary tensor with the same shape.
"""
res = tensor.clone()
res[tensor == pred_tag2idx['O']] = 1
res[tensor != pred_tag2idx['O']] = 0
return torch.tensor(res, dtype=torch.bool, device=tensor.device)
def filter_pred_tags(pred_tags, tokens):
"""
Filter useless tokens by converting them into 'Outside' tag.
We treat 'Inside' tag before 'Beginning' tag as meaningful signal,
so changed them to 'Beginning' tag unlike [Stanovsky et al., 2018].
:param pred_tags: predicate tags with the shape of (B, L).
:param tokens: list format sentence pieces with the shape of (B, L)
:return: tensor of filtered predicate tags with the same shape.
"""
assert len(pred_tags) == len(tokens)
assert len(pred_tags[0]) == len(tokens[0])
# filter by tokens ([CLS], [SEP], [PAD] tokens should be allocated as 'O')
for pred_idx, cur_tokens in enumerate(tokens):
for tag_idx, token in enumerate(cur_tokens):
if token in ['[CLS]', '[SEP]', '[PAD]']:
pred_tags[pred_idx][tag_idx] = pred_tag2idx['O']
# filter by tags
pred_copied = pred_tags.clone()
for pred_idx, cur_pred_tag in enumerate(pred_copied):
flag = False
tag_copied = cur_pred_tag.clone()
for tag_idx, tag in enumerate(tag_copied):
if not flag and tag == pred_tag2idx['P-B']:
flag = True
elif not flag and tag == pred_tag2idx['P-I']:
pred_tags[pred_idx][tag_idx] = pred_tag2idx['P-B']
flag = True
elif flag and tag == pred_tag2idx['O']:
flag = False
return pred_tags
def filter_arg_tags(arg_tags, pred_tags, tokens):
"""
Same as the description of @filter_pred_tags().
:param arg_tags: argument tags with the shape of (B, L).
:param pred_tags: predicate tags with the same shape.
It is used to force predicate position to be allocated the 'Outside' tag.
:param tokens: list of string tokens with the length of L.
It is used to force special tokens like [CLS] to be allocated the 'Outside' tag.
:return: tensor of filtered argument tags with the same shape.
"""
# filter by tokens ([CLS], [SEP], [PAD] tokens should be allocated as 'O')
for arg_idx, cur_arg_tag in enumerate(arg_tags):
for tag_idx, token in enumerate(tokens):
if token in ['[CLS]', '[SEP]', '[PAD]']:
arg_tags[arg_idx][tag_idx] = arg_tag2idx['O']
# filter by tags
arg_copied = arg_tags.clone()
for arg_idx, (cur_arg_tag, cur_pred_tag) in enumerate(zip(arg_copied, pred_tags)):
pred_idxs = [idx[0].item() for idx
in (cur_pred_tag != pred_tag2idx['O']).nonzero()]
arg_tags[arg_idx][pred_idxs] = arg_tag2idx['O']
cur_arg_copied = arg_tags[arg_idx].clone()
flag_idx = 999
for tag_idx, tag in enumerate(cur_arg_copied):
if tag == arg_tag2idx['O']:
flag_idx = 999
continue
arg_n = tag // 2 # 0: A0 / 1: A1 / ...
inside = tag % 2 # 0: begin / 1: inside
if not inside and flag_idx != arg_n:
flag_idx = arg_n
# connect_args
elif not inside and flag_idx == arg_n:
arg_tags[arg_idx][tag_idx] = arg_tag2idx[f'A{arg_n}-I']
elif inside and flag_idx != arg_n:
arg_tags[arg_idx][tag_idx] = arg_tag2idx[f'A{arg_n}-B']
flag_idx = arg_n
return arg_tags
def get_max_prob_args(arg_tags, arg_probs):
"""
Among predicted argument tags, remain only arguments with highest probs.
The comparison of probability is made only between the same argument labels.
:param arg_tags: argument tags with the shape of (B, L).
:param arg_probs: argument softmax probabilities with the shape of (B, L, T),
where B is the batch size, L is the sequence length, and T is the # of tag labels.
:return: tensor of filtered argument tags with the same shape.
"""
for cur_arg_tag, cur_probs in zip(arg_tags, arg_probs):
cur_tag_probs = [cur_probs[idx][tag] for idx, tag in enumerate(cur_arg_tag)]
for arg_n in range(4):
b_tag = arg_tag2idx[f"A{arg_n}-B"]
i_tag = arg_tag2idx[f"A{arg_n}-I"]
flag = False
total_tags = []
cur_tags = []
for idx, tag in enumerate(cur_arg_tag):
if not flag and tag == b_tag:
flag = True
cur_tags.append(idx)
elif flag and tag == i_tag:
cur_tags.append(idx)
elif flag and tag == b_tag:
total_tags.append(cur_tags)
cur_tags = [idx]
elif tag != b_tag or tag != i_tag:
total_tags.append(cur_tags)
cur_tags = []
flag = False
max_idxs, max_prob = None, 0.0
for idxs in total_tags:
all_probs = [cur_tag_probs[idx].item() for idx in idxs]
if len(all_probs) == 0:
continue
cur_prob = all_probs[0]
if cur_prob > max_prob:
max_prob = cur_prob
max_idxs = idxs
if max_idxs is None:
continue
del_idxs = [idx for idx, tag in enumerate(cur_arg_tag)
if (tag in [b_tag, i_tag]) and (idx not in max_idxs)]
cur_arg_tag[del_idxs] = arg_tag2idx['O']
return arg_tags
def get_single_predicate_idxs(pred_tags):
"""
Divide each single batch based on predicted predicates.
It is necessary for predicting argument tags with specific predicate.
:param pred_tags: tensor of predicate tags with the shape of (B, L)
EX >>> tensor([[2, 0, 0, 1, 0, 1, 0, 2, 2, 2],
[2, 2, 2, 0, 1, 0, 1, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 2, 2, 0, 1]])
:return: list of tensors with the shape of (B, P, L)
the number P can be different for each batch.
EX >>> [tensor([[2., 0., 2., 2., 2., 2., 2., 2., 2., 2.],
[2., 2., 0., 1., 2., 2., 2., 2., 2., 2.],
[2., 2., 2., 2., 0., 1., 2., 2., 2., 2.],
[2., 2., 2., 2., 2., 2., 0., 2., 2., 2.]]),
tensor([[2., 2., 2., 0., 1., 2., 2., 2., 2., 2.],
[2., 2., 2., 2., 2., 0., 1., 2., 2., 2.]]),
tensor([[2., 2., 2., 2., 2., 2., 2., 2., 0., 1.]])]
"""
total_pred_tags = []
for cur_pred_tag in pred_tags:
cur_sent_preds = []
begin_idxs = [idx[0].item() for idx in (cur_pred_tag == pred_tag2idx['P-B']).nonzero()]
for i, b_idx in enumerate(begin_idxs):
cur_pred = np.full(cur_pred_tag.shape[0], pred_tag2idx['O'])
cur_pred[b_idx] = pred_tag2idx['P-B']
if i == len(begin_idxs) - 1:
end_idx = cur_pred_tag.shape[0]
else:
end_idx = begin_idxs[i + 1]
for j, tag in enumerate(cur_pred_tag[b_idx:end_idx]):
if tag.item() == pred_tag2idx['O']:
break
elif tag.item() == pred_tag2idx['P-I']:
cur_pred[b_idx + j] = pred_tag2idx['P-I']
cur_sent_preds.append(cur_pred)
total_pred_tags.append(cur_sent_preds)
return [torch.Tensor(pred_tags) for pred_tags in total_pred_tags]
def get_tuple(sentence, pred_tags, arg_tags, tokenizer):
"""
Get string format tuples from given predicate indexes and argument tags.
:param sentence: string format raw sentence.
:param pred_tags: tensor of predicate tags with the shape of (# of predicates, sequence length).
:param arg_tags: tensor of argument tags with the same shape.
:param tokenizer: transformer BertTokenizer (bert-base-cased or bert-base-multilingual-cased)
:return extractions: list of strings each element means predicate, arg0, arg1, ...
:return extraction_idxs: list of indexes of each argument for calculating confidence score.
"""
word2piece = get_word2piece(sentence, tokenizer)
words = sentence.split(' ')
assert pred_tags.shape[0] == arg_tags.shape[0] # number of predicates
pred_tags = pred_tags.tolist()
arg_tags = arg_tags.tolist()
extractions = list()
extraction_idxs = list()
# loop for each predicate
for cur_pred_tag, cur_arg_tags in zip(pred_tags, arg_tags):
cur_extraction = list()
cur_extraction_idxs = list()
# get predicate
pred_labels = [pred_tag2idx['P-B'], pred_tag2idx['P-I']]
cur_predicate_idxs = [idx for idx, tag in enumerate(cur_pred_tag) if tag in pred_labels]
if len(cur_predicate_idxs) == 0:
predicates_str = ''
else:
cur_pred_words = list()
for word_idx, piece_idxs in word2piece.items():
if set(piece_idxs) <= set(cur_predicate_idxs):
cur_pred_words.append(word_idx)
if len(cur_pred_words) == 0:
predicates_str = ''
cur_predicate_idxs = list()
else:
predicates_str = ' '.join([words[idx] for idx in cur_pred_words])
cur_extraction.append(predicates_str)
cur_extraction_idxs.append(cur_predicate_idxs)
# get arguments
for arg_n in range(4):
cur_arg_labels = [arg_tag2idx[f'A{arg_n}-B'], arg_tag2idx[f'A{arg_n}-I']]
cur_arg_idxs = [idx for idx, tag in enumerate(cur_arg_tags) if tag in cur_arg_labels]
if len(cur_arg_idxs) == 0:
cur_arg_str = ''
else:
cur_arg_words = list()
for word_idx, piece_idxs in word2piece.items():
if set(piece_idxs) <= set(cur_arg_idxs):
cur_arg_words.append(word_idx)
if len(cur_arg_words) == 0:
cur_arg_str = ''
cur_arg_idxs = list()
else:
cur_arg_str = ' '.join([words[idx] for idx in cur_arg_words])
cur_extraction.append(cur_arg_str)
cur_extraction_idxs.append(cur_arg_idxs)
extractions.append(cur_extraction)
extraction_idxs.append(cur_extraction_idxs)
return extractions, extraction_idxs
def get_confidence_score(pred_probs, arg_probs, extraction_idxs):
"""
get the confidence score of each extraction for drawing PR-curve.
:param pred_probs: (sequence length, # of predicate labels)
:param arg_probs: (# of predicates, sequence length, # of argument labels)
:param extraction_idxs: [[[2, 3, 4], [0, 1], [9, 10]], [[0, 1, 2], [7, 8], [4, 5]], ...]
"""
confidence_scores = list()
for cur_arg_prob, cur_ext_idxs in zip(arg_probs, extraction_idxs):
if len(cur_ext_idxs[0]) == 0:
confidence_scores.append(0)
continue
cur_score = 0
# predicate score
pred_score = max(pred_probs[cur_ext_idxs[0][0]]).item()
cur_score += pred_score
# argument score
for arg_idx in cur_ext_idxs[1:]:
if len(arg_idx) == 0:
continue
begin_idxs = _find_begins(arg_idx)
arg_score = np.mean([max(cur_arg_prob[cur_idx]).item() for cur_idx in begin_idxs])
cur_score += arg_score
confidence_scores.append(cur_score)
return confidence_scores
def _find_begins(idxs):
found_begins = [idxs[0]]
cur_flag_idx = idxs[0]
for cur_idx in idxs[1:]:
if cur_idx - cur_flag_idx != 1:
found_begins.append(cur_idx)
cur_flag_idx = cur_idx
return found_begins | /src/bert_oie/bio.py | 0.613352 | 0.534855 | bio.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
from torch.nn.modules.container import ModuleList
from transformers import BertModel
class ArgModule(nn.Module):
def __init__(self, arg_layer, n_layers):
"""
Module for extracting arguments based on given encoder output and predicates.
It uses ArgExtractorLayer as a base block and repeat the block N('n_layers') times
:param arg_layer: an instance of the ArgExtractorLayer() class (required)
:param n_layers: the number of sub-layers in the ArgModule (required).
"""
super(ArgModule, self).__init__()
self.layers = _get_clones(arg_layer, n_layers)
self.n_layers = n_layers
def forward(self, encoded, predicate, pred_mask=None):
"""
:param encoded: output from sentence encoder with the shape of (L, B, D),
where L is the sequence length, B is the batch size, D is the embedding dimension
:param predicate: output from predicate module with the shape of (L, B, D)
:param pred_mask: mask that prevents attention to tokens which are not predicates
with the shape of (B, L)
:return: tensor like Transformer Decoder Layer Output
"""
output = encoded
for layer_idx in range(self.n_layers):
output = self.layers[layer_idx](
target=output, source=predicate, key_mask=pred_mask)
return output
class ArgExtractorLayer(nn.Module):
def __init__(self,
d_model=768,
n_heads=8,
d_feedforward=2048,
dropout=0.1,
activation='relu'):
"""
A layer similar to Transformer decoder without decoder self-attention.
(only encoder-decoder multi-head attention followed by feed-forward layers)
:param d_model: model dimensionality (default=768 from BERT-base)
:param n_heads: number of heads in multi-head attention layer
:param d_feedforward: dimensionality of point-wise feed-forward layer
:param dropout: drop rate of all layers
:param activation: activation function after first feed-forward layer
"""
super(ArgExtractorLayer, self).__init__()
self.multihead_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.linear1 = nn.Linear(d_model, d_feedforward)
self.dropout1 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
def forward(self, target, source, key_mask=None):
"""
Single Transformer Decoder layer without self-attention
:param target: a tensor which takes a role as a query
:param source: a tensor which takes a role as a key & value
:param key_mask: key mask tensor with the shape of (batch_size, sequence_length)
"""
# Multi-head attention layer (+ add & norm)
attended = self.multihead_attn(
target, source, source,
key_padding_mask=key_mask)[0]
skipped = target + self.dropout1(attended)
normed = self.norm1(skipped)
# Point-wise feed-forward layer (+ add & norm)
projected = self.linear2(self.dropout2(self.activation(self.linear1(normed))))
skipped = normed + self.dropout1(projected)
normed = self.norm2(skipped)
return normed
class BERTBiLSTM(nn.Module):
def __init__(self,
bert_config='bert-base-cased',
lstm_dropout=0.3,
pred_clf_dropout=0.,
arg_clf_dropout=0.3,
pos_emb_dim=256,
pred_n_labels=3,
arg_n_labels=9):
super(BERTBiLSTM, self).__init__()
self.pred_n_labels = pred_n_labels
self.arg_n_labels = arg_n_labels
self.bert = BertModel.from_pretrained(
bert_config,
output_hidden_states=True)
d_model = self.bert.config.hidden_size
self.pred_dropout = nn.Dropout(pred_clf_dropout)
self.pred_classifier = nn.Linear(d_model, self.pred_n_labels)
self.position_emb = nn.Embedding(3, pos_emb_dim, padding_idx=0)
d_model += pos_emb_dim
self.arg_module = nn.LSTM(
input_size=d_model,
hidden_size=d_model,
num_layers=3,
dropout=lstm_dropout,
batch_first=True,
bidirectional=True)
self.arg_dropout = nn.Dropout(arg_clf_dropout)
self.arg_classifier = nn.Linear(d_model * 2, arg_n_labels)
def forward(self,
input_ids,
attention_mask,
predicate_mask=None,
predicate_hidden=None,
total_pred_labels=None,
arg_labels=None):
# predicate extraction
bert_hidden = self.bert(input_ids, attention_mask)[0]
pred_logit = self.pred_classifier(self.pred_dropout(bert_hidden))
# predicate loss
if total_pred_labels is not None:
loss_fct = nn.CrossEntropyLoss()
active_loss = attention_mask.view(-1) == 1
active_logits = pred_logit.view(-1, self.pred_n_labels)
active_labels = torch.where(
active_loss, total_pred_labels.view(-1),
torch.tensor(loss_fct.ignore_index).type_as(total_pred_labels))
pred_loss = loss_fct(active_logits, active_labels)
# argument extraction
position_vectors = self.position_emb(_get_position_idxs(predicate_mask, input_ids))
bert_hidden = torch.cat([bert_hidden, position_vectors], dim=2)
arg_hidden = self.arg_module(bert_hidden)[0]
arg_logit = self.arg_classifier(self.arg_dropout(arg_hidden))
# argument loss
if arg_labels is not None:
loss_fct = nn.CrossEntropyLoss()
active_loss = attention_mask.view(-1) == 1
active_logits = arg_logit.view(-1, self.arg_n_labels)
active_labels = torch.where(
active_loss, arg_labels.view(-1),
torch.tensor(loss_fct.ignore_index).type_as(arg_labels))
arg_loss = loss_fct(active_logits, active_labels)
# total loss
batch_loss = pred_loss + arg_loss
outputs = (batch_loss, pred_loss, arg_loss)
return outputs
def extract_predicate(self,
input_ids,
attention_mask):
bert_hidden = self.bert(input_ids, attention_mask)[0]
pred_logit = self.pred_classifier(bert_hidden)
return pred_logit, bert_hidden
def extract_argument(self,
input_ids,
predicate_hidden,
predicate_mask):
position_vectors = self.position_emb(_get_position_idxs(predicate_mask, input_ids))
arg_input = torch.cat([predicate_hidden, position_vectors], dim=2)
arg_hidden = self.arg_module(arg_input)[0]
return self.arg_classifier(arg_hidden)
def _get_activation_fn(activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
else:
raise RuntimeError("activation should be relu/gelu, not %s." % activation)
def _get_clones(module, n):
return ModuleList([copy.deepcopy(module) for _ in range(n)])
def _get_position_idxs(pred_mask, input_ids):
position_idxs = torch.zeros(pred_mask.shape, dtype=int, device=pred_mask.device)
for mask_idx, cur_mask in enumerate(pred_mask):
position_idxs[mask_idx, :] += 2
cur_nonzero = (cur_mask == 0).nonzero()
start = torch.min(cur_nonzero).item()
end = torch.max(cur_nonzero).item()
position_idxs[mask_idx, start:end + 1] = 1
pad_start = max(input_ids[mask_idx].nonzero()).item() + 1
position_idxs[mask_idx, pad_start:] = 0
return position_idxs
def _get_pred_feature(pred_hidden, pred_mask):
B, L, D = pred_hidden.shape
pred_features = torch.zeros((B, L, D), device=pred_mask.device)
for mask_idx, cur_mask in enumerate(pred_mask):
pred_position = (cur_mask == 0).nonzero().flatten()
pred_feature = torch.mean(pred_hidden[mask_idx, pred_position], dim=0)
pred_feature = torch.cat(L * [pred_feature.unsqueeze(0)])
pred_features[mask_idx, :, :] = pred_feature
return pred_features | /src/bert_oie/model.py | 0.955194 | 0.412294 | model.py | pypi |
# TODO(hayesall): Modes. Where do I put them, how do I store them?
# A more-general "schema" would be helpful. Plus it would probably be
# cleaner if I separated advice about structure, types, and search procedures.
# TODO(hayesall): `load` could be made iterable with a generator expression.
# Possibly useful for iterating over all folds, e.g. for cross validation.
# TODO(hayesall): It doesn't make sense to allow a `data_home` parameter
# when a user cannot modify the parameter in the `_make_file_path`
# function.
from io import BytesIO
from io import TextIOWrapper
import json
import logging
import pathlib
from urllib.request import urlopen
from zipfile import ZipFile
from typing import Tuple
from typing import Optional
from ._base import get_data_home
from .types import RelationalDataset
VERSION_URL = (
"https://github.com/srlearn/datasets/releases/download/{version}/{archive}_{version}.zip"
)
OLD_VERSION_URL = (
# TODO(hayesall): Used in v0.0.2 and v0.0.3, remove in the beta release.
"https://github.com/srlearn/datasets/releases/download/{version}/{archive}.zip"
)
DATASETS = [
"toy_cancer",
"toy_father",
"citeseer",
"cora",
"uwcse",
"webkb",
"financial_nlp_small",
"nell_sports",
"icml",
"boston_housing",
"drug_interactions",
"toy_machines",
"california_housing",
"roofworld20",
]
LATEST_VERSION = "v0.0.6"
def latest_version() -> str:
"""Get the latest ``srlearn/datasets`` version from GitHub's REST API.
!!! danger end
GitHub's REST API is limited to 60 requests per hour when an OAuth
token is not passed. This function is included for convenience, but
should only be used interactively.
Usually you'll be better looking at the latest version from your browser,
see the [Releases Page](https://github.com/srlearn/datasets/tags).
Read more on the
[GitHub REST API Authentication](https://docs.github.com/en/rest/guides/getting-started-with-the-rest-api#authentication).
Returns:
The latest version of datasets stored in the
[`srlearn/datasets`](https://github.com/srlearn/datasets/) repository.
Examples:
```python
from relational_datasets.request import latest_version
latest_version()
# 'v0.0.3'
```
"""
with urlopen(
"https://api.github.com/repos/srlearn/datasets/releases/latest"
) as url:
api_response = json.loads(url.read().decode("utf-8"))
return api_response["tag_name"]
def deserialize_zipfile(
data_location: str, name: str, *, fold: int = 1
) -> Tuple[RelationalDataset, RelationalDataset]:
"""Deserialize a zipfile, returning train and test sets.
!!! warning end
This method is presented here to illustrate how data are unpacked from
zip archives. The user is responsible for the structure of custom archives.
The [`srlearn/datasets`](https://github.com/srlearn/datasets/tree/main/srlearn)
repository defines the assumptions for how datasets are stored.
Structure generally falls into two categories, where `{{ name }}`
represents the dataset (e.g. `cora`, `toy_cancer`):
```
{{ name }}
βββ README.md
βββ {{ name }}
ββββ background.txt
ββββ train βββ train_pos.txt, train_neg.txt, train_facts.txt
ββββ test ββββ test_pos.txt, test_neg.txt, test_facts.txt
```
... or:
```
{{ name }}
βββ README.md
βββ {{ name }}
ββββ background.txt
ββββ fold1
β ββββ train βββ train_pos.txt, train_neg.txt, train_facts.txt
β ββββ test ββββ test_pos.txt, test_neg.txt, test_facts.txt
ββββ fold2
.
.
```
Arguments:
data_location: Location of a zipfile.
name: Name of the dataset.
fold: In datasets with multiple folds, return this fold. This value is
ignored if the data is not split into multiple folds.
Returns:
Tuple of training and test sets.
Examples:
This loads fold-2 of cora-v0.0.3 using an absolute path to the dataset,
assuming that it is already downloaded:
```python
from relational_datasets.request import deserialize_zipfile
train, test = deserialize_zipfile(
'/home/user/relational_datasets/cora_v0.0.3.zip',
'cora',
fold=2,
)
```
This can also load from the current directory:
```python
from relational_datasets.request import deserialize_zipfile
train, test = deserialize_zipfile(
'./cora_v0.0.3.zip',
'cora',
)
```
"""
with ZipFile(data_location) as myzip:
folds = _n_folds(myzip)
if folds == 0:
# This dataset contains no folds.
with myzip.open(f"{name}/train/train_pos.txt", "r") as _fh:
train_pos = TextIOWrapper(_fh).read().splitlines()
with myzip.open(f"{name}/train/train_neg.txt", "r") as _fh:
train_neg = TextIOWrapper(_fh).read().splitlines()
with myzip.open(f"{name}/train/train_facts.txt", "r") as _fh:
train_facts = TextIOWrapper(_fh).read().splitlines()
with myzip.open(f"{name}/test/test_pos.txt", "r") as _fh:
test_pos = TextIOWrapper(_fh).read().splitlines()
with myzip.open(f"{name}/test/test_neg.txt", "r") as _fh:
test_neg = TextIOWrapper(_fh).read().splitlines()
with myzip.open(f"{name}/test/test_facts.txt", "r") as _fh:
test_facts = TextIOWrapper(_fh).read().splitlines()
elif fold > folds:
print(fold, folds)
raise ValueError("Fold does not exist.")
else:
with myzip.open(f"{name}/fold{fold}/train/train_pos.txt", "r") as _fh:
train_pos = TextIOWrapper(_fh).read().splitlines()
with myzip.open(f"{name}/fold{fold}/train/train_neg.txt", "r") as _fh:
train_neg = TextIOWrapper(_fh).read().splitlines()
with myzip.open(f"{name}/fold{fold}/train/train_facts.txt", "r") as _fh:
train_facts = TextIOWrapper(_fh).read().splitlines()
with myzip.open(f"{name}/fold{fold}/test/test_pos.txt", "r") as _fh:
test_pos = TextIOWrapper(_fh).read().splitlines()
with myzip.open(f"{name}/fold{fold}/test/test_neg.txt", "r") as _fh:
test_neg = TextIOWrapper(_fh).read().splitlines()
with myzip.open(f"{name}/fold{fold}/test/test_facts.txt", "r") as _fh:
test_facts = TextIOWrapper(_fh).read().splitlines()
return (
RelationalDataset._make([train_pos, train_neg, train_facts]),
RelationalDataset._make([test_pos, test_neg, test_facts]),
)
def load(
name: str, version: Optional[str] = None, *, fold: int = 1
) -> Tuple[RelationalDataset, RelationalDataset]:
"""Get train/test instances of a dataset
Arguments:
name: Dataset name (e.g. `toy_cancer`)
version: Dataset version (e.g. `v0.0.3`)
fold: In datasets with multiple folds, return this fold. This value is
ignored if the data is not split into multiple folds.
Returns:
Returns the training and test.
Raises:
urllib.error.URLError: If the data is not in the cache and cannot be
downloaded, a failed request will raise this exception.
Examples:
Load version ``v0.0.3`` of the ``toy_cancer`` dataset:
```python
>>> from relational_datasets import load
>>> train, test = load("toy_cancer", "v0.0.3")
>>> train.pos
['cancer(alice).', 'cancer(bob).', 'cancer(chuck).', 'cancer(fred).']
```
"""
data_location = fetch(name, version)
return deserialize_zipfile(data_location, name=name, fold=fold)
def fetch(name: str, version: Optional[str] = None) -> str:
"""Get a dataset with a name/version. Return path to a zipfile.
Something else.
Arguments:
name: Dataset name, usually lowercase with underscores.
version: Dataset version. Downloads a default (`v0.0.3`) if not provided.
Returns:
A string representing the path to the downloaded dataset. For example:
```python
'/home/user/relational_datasets/toy_cancer_v0.0.3.zip'
```
The path is converted to a string from a `pathlib` object, so it should
work cross-platform.
Raises:
urllib.error.URLError: If the data is not in the cache and cannot be
downloaded, a failed request will raise this exception.
Examples:
Fetch `toy_cancer` dataset, version `v0.0.3`:
```python
from relational_datasets import fetch
fetch('toy_cancer', 'v0.0.3')
# '/home/user/relational_datasets/toy_cancer_v0.0.3.zip'
```
"""
# TODO(hayesall): This logic might be moved into the same function.
data_file = _make_file_path(name, version)
if data_file.is_file():
return str(data_file)
# Else the data needs to be downloaded.
download_url = _make_data_url(name, version)
with urlopen(download_url) as url:
data = BytesIO(url.read())
with open(data_file, "wb") as _fh:
_fh.write(data.getbuffer())
return str(data_file)
def _make_file_path(name: str, version: Optional[str] = "") -> pathlib.Path:
"""Create a file path where data are stored.
If a `version` is not provided, the `LATEST_VERSION` is used.
"""
if not version:
return pathlib.Path(get_data_home()).joinpath(f"{name}_{LATEST_VERSION}.zip")
return pathlib.Path(get_data_home()).joinpath(f"{name}_{version}.zip")
def _make_data_url(name: str, version: Optional[str] = "") -> str:
"""Create a URL for a dataset with ``name`` and ``version``.
If a ``version`` is not provided, the latest version is returned.
"""
assert name in DATASETS
if version in ["v0.0.2", "v0.0.3"]:
logging.warning("Versions v0.0.2, v0.0.3 will be deprecated in the future.")
return OLD_VERSION_URL.format(archive=name, version=version)
if not version:
version = LATEST_VERSION
return VERSION_URL.format(archive=name, version=version)
def _has_folds(zip: ZipFile) -> bool:
"""Does this zipfile contain folds?"""
file_names = zip.namelist()
has_folds = any(["fold" in path for path in file_names])
return has_folds
def _n_folds(zip: ZipFile) -> int:
"""How many folds does this zipfile contain?"""
if not _has_folds(zip):
return 0
numbers = []
for path in zip.namelist():
if "fold" in path:
numbers.append(int(path.split("fold")[1].split("/")[0]))
return max(numbers) | /relational_datasets-0.4.0-py3-none-any.whl/relational_datasets/request.py | 0.583678 | 0.760206 | request.py | pypi |
from typing import List, Tuple, Optional
import numpy as np
from ..types import RelationalDataset
def _is_multiclass(y: np.ndarray) -> bool:
return np.unique(y).size > 2
def _get_task(y: np.ndarray) -> str:
"""Return classification/regression"""
if str(y.dtype)[:3] == 'int':
if _is_multiclass(y):
return 'multiclass-classification'
return 'classification'
elif str(y.dtype)[:5] == 'float':
return 'regression'
raise TypeError("Could not determine classification or regression from `y` with type: " + str(y.dtype))
def from_numpy(X: np.ndarray, y: np.ndarray, names: Optional[List[str]] = None) -> Tuple[RelationalDataset, List[str]]:
"""Convert numpy data (`X`) and target (`y`) arrays to a RelationalDataset
with modes.
Arguments:
X: Integer matrix of covariates
y: Integer or float array containing the target variable
names: List of strings representing the variable names
Returns:
Tuple of `RelationalDataset` and a list of strings containing the modes
Raises:
TypeError: When classification vs. regression cannot be determined from
the types of the input values.
Examples:
Demonstrates converting a set of binary classification data.
```python
from relational_datasets.convert import from_numpy
import numpy as np
data, modes = from_numpy(
np.array([[0, 1, 1], [0, 1, 2], [1, 2, 2]]),
np.array([0, 0, 1]),
)
```
"""
assert X.shape[0] == y.shape[0]
# TODO(hayesall): All `enumerate` calls start from `1` to maintain
# parity with Julia module.
# TODO(hayesall): This is a way to "fail fast": if we cannot determine
# type of the `y` vector, the conversion is not possible.
_task = _get_task(y)
if names:
assert len(names) == X.shape[1] + 1
else:
# + 2 to start from 1.
names = [f"v{i}" for i in range(1, X.shape[1] + 2)]
pos, neg, facts = [], [], []
if _task == "classification":
for i, row in enumerate(y, 1):
if row:
pos.append(f"{names[-1]}(id{i}).")
else:
neg.append(f"{names[-1]}(id{i}).")
elif _task == "multiclass-classification":
for i, row in enumerate(y, 1):
pos.append(f"{names[-1]}(id{i},{names[-1]}_{row}).")
else:
# _get_task(y) == "regression"
for i, row in enumerate(y, 1):
pos.append(f"regressionExample({names[-1]}(id{i}),{row}).")
for i, col in enumerate(X.T):
var = names[i]
facts += [f"{var}(id{j},{var}_{row})." for j, row in enumerate(col, 1)]
modes = [f"{name}(+id,#var{name})." for name in names[:-1]]
if _task == "multiclass-classification":
modes += [f"{names[-1]}(+id,#classlabel)."]
else:
modes += [f"{names[-1]}(+id)."]
return RelationalDataset(pos=pos, neg=neg, facts=facts), modes | /relational_datasets-0.4.0-py3-none-any.whl/relational_datasets/convert/convert_numpy.py | 0.792223 | 0.879923 | convert_numpy.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.