code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import re
import numpy as np
# contains the data structure
class Vocabulary:
"""Stores the tokens and their conversion to vocabulary indexes."""
def __init__(self, tokens=None, starting_id=0):
self._tokens = {}
self._current_id = starting_id
if tokens:
for token, idx in tokens.items():
self._add(token, idx)
self._current_id = max(self._current_id, idx + 1)
def __getitem__(self, token_or_id):
return self._tokens[token_or_id]
def add(self, token):
"""Adds a token."""
if not isinstance(token, str):
raise TypeError("Token is not a string")
if token in self:
return self[token]
self._add(token, self._current_id)
self._current_id += 1
return self._current_id - 1
def update(self, tokens):
"""Adds many tokens."""
return [self.add(token) for token in tokens]
def __delitem__(self, token_or_id):
other_val = self._tokens[token_or_id]
del self._tokens[other_val]
del self._tokens[token_or_id]
def __contains__(self, token_or_id):
return token_or_id in self._tokens
def __eq__(self, other_vocabulary):
return self._tokens == other_vocabulary._tokens # pylint: disable=W0212
def __len__(self):
return len(self._tokens) // 2
def encode(self, tokens):
"""Encodes a list of tokens as vocabulary indexes."""
vocab_index = np.zeros(len(tokens), dtype=np.float32)
for i, token in enumerate(tokens):
vocab_index[i] = self._tokens[token]
return vocab_index
def decode(self, vocab_index):
"""Decodes a vocabulary index matrix to a list of tokens."""
tokens = []
for idx in vocab_index:
tokens.append(self[idx])
return tokens
def _add(self, token, idx):
if idx not in self._tokens:
self._tokens[token] = idx
self._tokens[idx] = token
else:
raise ValueError("IDX already present in vocabulary")
def tokens(self):
"""Returns the tokens from the vocabulary"""
return [t for t in self._tokens if isinstance(t, str)]
class SMILESTokenizer:
"""Deals with the tokenization and untokenization of SMILES."""
REGEXPS = {
"brackets": re.compile(r"(\[[^\]]*\])"),
"2_ring_nums": re.compile(r"(%\d{2})"),
"brcl": re.compile(r"(Br|Cl)")
}
REGEXP_ORDER = ["brackets", "2_ring_nums", "brcl"]
def tokenize(self, data, with_begin_and_end=True):
"""Tokenizes a SMILES string."""
def split_by(data, regexps):
if not regexps:
return list(data)
regexp = self.REGEXPS[regexps[0]]
splitted = regexp.split(data)
tokens = []
for i, split in enumerate(splitted):
if i % 2 == 0:
tokens += split_by(split, regexps[1:])
else:
tokens.append(split)
return tokens
tokens = split_by(data, self.REGEXP_ORDER)
if with_begin_and_end:
tokens = ["^"] + tokens + ["$"]
return tokens
def untokenize(self, tokens):
"""Untokenizes a SMILES string."""
smi = ""
for token in tokens:
if token == "$":
break
if token != "^":
smi += token
return smi
def create_vocabulary(smiles_list, tokenizer):
"""Creates a vocabulary for the SMILES syntax."""
tokens = set()
for smi in smiles_list:
tokens.update(tokenizer.tokenize(smi, with_begin_and_end=False))
vocabulary = Vocabulary()
vocabulary.update(["$", "^"] + sorted(tokens)) # end token is 0 (also counts as padding)
return vocabulary | /reinvent_models-0.0.15rc1.tar.gz/reinvent_models-0.0.15rc1/reinvent_models/reinvent_core/models/vocabulary.py | 0.776623 | 0.422207 | vocabulary.py | pypi |
from typing import List, Tuple
import numpy as np
import torch
import torch.nn as tnn
import torch.nn.functional as tnnf
from reinvent_models.model_factory.enums.model_mode_enum import ModelModeEnum
from reinvent_models.reinvent_core.models import vocabulary as mv
class RNN(tnn.Module):
"""
Implements a N layer GRU(M) cell including an embedding layer
and an output linear layer back to the size of the vocabulary
"""
def __init__(self, voc_size, layer_size=512, num_layers=3, cell_type='gru', embedding_layer_size=256, dropout=0.,
layer_normalization=False):
"""
Implements a N layer GRU|LSTM cell including an embedding layer and an output linear layer back to the size of the
vocabulary
:param voc_size: Size of the vocabulary.
:param layer_size: Size of each of the RNN layers.
:param num_layers: Number of RNN layers.
:param embedding_layer_size: Size of the embedding layer.
"""
super(RNN, self).__init__()
self._layer_size = layer_size
self._embedding_layer_size = embedding_layer_size
self._num_layers = num_layers
self._cell_type = cell_type.lower()
self._dropout = dropout
self._layer_normalization = layer_normalization
self._embedding = tnn.Embedding(voc_size, self._embedding_layer_size)
if self._cell_type == 'gru':
self._rnn = tnn.GRU(self._embedding_layer_size, self._layer_size, num_layers=self._num_layers,
dropout=self._dropout, batch_first=True)
elif self._cell_type == 'lstm':
self._rnn = tnn.LSTM(self._embedding_layer_size, self._layer_size, num_layers=self._num_layers,
dropout=self._dropout, batch_first=True)
else:
raise ValueError('Value of the parameter cell_type should be "gru" or "lstm"')
self._linear = tnn.Linear(self._layer_size, voc_size)
def forward(self, input_vector, hidden_state=None): # pylint: disable=W0221
"""
Performs a forward pass on the model. Note: you pass the **whole** sequence.
:param input_vector: Input tensor (batch_size, seq_size).
:param hidden_state: Hidden state tensor.
"""
batch_size, seq_size = input_vector.size()
if hidden_state is None:
size = (self._num_layers, batch_size, self._layer_size)
if self._cell_type == "gru":
hidden_state = torch.zeros(*size)
else:
hidden_state = [torch.zeros(*size), torch.zeros(*size)]
embedded_data = self._embedding(input_vector) # (batch,seq,embedding)
output_vector, hidden_state_out = self._rnn(embedded_data, hidden_state)
if self._layer_normalization:
output_vector = tnnf.layer_norm(output_vector, output_vector.size()[1:])
output_vector = output_vector.reshape(-1, self._layer_size)
output_data = self._linear(output_vector).view(batch_size, seq_size, -1)
return output_data, hidden_state_out
def get_params(self):
"""
Returns the configuration parameters of the model.
"""
return {
'dropout': self._dropout,
'layer_size': self._layer_size,
'num_layers': self._num_layers,
'cell_type': self._cell_type,
'embedding_layer_size': self._embedding_layer_size
}
class Model:
"""
Implements an RNN model using SMILES.
"""
def __init__(self, vocabulary: mv.Vocabulary, tokenizer, network_params=None, max_sequence_length=256,
no_cuda=False):
"""
Implements an RNN.
:param vocabulary: Vocabulary to use.
:param tokenizer: Tokenizer to use.
:param network_params: Dictionary with all parameters required to correctly initialize the RNN class.
:param max_sequence_length: The max size of SMILES sequence that can be generated.
"""
self.vocabulary = vocabulary
self.tokenizer = tokenizer
self.max_sequence_length = max_sequence_length
self._model_modes = ModelModeEnum()
if not isinstance(network_params, dict):
network_params = {}
self.network = RNN(len(self.vocabulary), **network_params)
if torch.cuda.is_available() and not no_cuda:
self.network.cuda()
self._nll_loss = tnn.NLLLoss(reduction="none")
def set_mode(self, mode: str):
if mode == self._model_modes.TRAINING:
self.network.train()
elif mode == self._model_modes.INFERENCE:
self.network.eval()
else:
raise ValueError(f"Invalid model mode '{mode}")
@classmethod
def load_from_file(cls, file_path: str, sampling_mode=False):
"""
Loads a model from a single file
:param file_path: input file path
:return: new instance of the RNN or an exception if it was not possible to load it.
"""
if torch.cuda.is_available():
save_dict = torch.load(file_path)
else:
save_dict = torch.load(file_path, map_location=lambda storage, loc: storage)
network_params = save_dict.get("network_params", {})
model = Model(
vocabulary=save_dict['vocabulary'],
tokenizer=save_dict.get('tokenizer', mv.SMILESTokenizer()),
network_params=network_params,
max_sequence_length=save_dict['max_sequence_length']
)
model.network.load_state_dict(save_dict["network"])
if sampling_mode:
model.network.eval()
return model
def save(self, file: str):
"""
Saves the model into a file
:param file: it's actually a path
"""
save_dict = {
'vocabulary': self.vocabulary,
'tokenizer': self.tokenizer,
'max_sequence_length': self.max_sequence_length,
'network': self.network.state_dict(),
'network_params': self.network.get_params()
}
torch.save(save_dict, file)
def likelihood_smiles(self, smiles) -> torch.Tensor:
tokens = [self.tokenizer.tokenize(smile) for smile in smiles]
encoded = [self.vocabulary.encode(token) for token in tokens]
sequences = [torch.tensor(encode, dtype=torch.long) for encode in encoded]
def collate_fn(encoded_seqs):
"""Function to take a list of encoded sequences and turn them into a batch"""
max_length = max([seq.size(0) for seq in encoded_seqs])
collated_arr = torch.zeros(len(encoded_seqs), max_length, dtype=torch.long) # padded with zeroes
for i, seq in enumerate(encoded_seqs):
collated_arr[i, :seq.size(0)] = seq
return collated_arr
padded_sequences = collate_fn(sequences)
return self.likelihood(padded_sequences)
def likelihood(self, sequences) -> torch.Tensor:
"""
Retrieves the likelihood of a given sequence. Used in training.
:param sequences: (batch_size, sequence_length) A batch of sequences
:return: (batch_size) Log likelihood for each example.
"""
logits, _ = self.network(sequences[:, :-1]) # all steps done at once
log_probs = logits.log_softmax(dim=2)
return self._nll_loss(log_probs.transpose(1, 2), sequences[:, 1:]).sum(dim=1)
def sample_smiles(self, num=128, batch_size=128) -> Tuple[List, np.array]:
"""
Samples n SMILES from the model.
:param num: Number of SMILES to sample.
:param batch_size: Number of sequences to sample at the same time.
:return:
:smiles: (n) A list with SMILES.
:likelihoods: (n) A list of likelihoods.
"""
batch_sizes = [batch_size for _ in range(num // batch_size)] + [num % batch_size]
smiles_sampled = []
likelihoods_sampled = []
for size in batch_sizes:
if not size:
break
seqs, likelihoods = self._sample(batch_size=size)
smiles = [self.tokenizer.untokenize(self.vocabulary.decode(seq)) for seq in seqs.cpu().numpy()]
smiles_sampled.extend(smiles)
likelihoods_sampled.append(likelihoods.data.cpu().numpy())
del seqs, likelihoods
return smiles_sampled, np.concatenate(likelihoods_sampled)
def sample_sequences_and_smiles(self, batch_size=128) -> Tuple[torch.Tensor, List, torch.Tensor]:
seqs, likelihoods = self._sample(batch_size=batch_size)
smiles = [self.tokenizer.untokenize(self.vocabulary.decode(seq)) for seq in seqs.cpu().numpy()]
return seqs, smiles, likelihoods
# @torch.no_grad()
def _sample(self, batch_size=128) -> Tuple[torch.Tensor, torch.Tensor]:
start_token = torch.zeros(batch_size, dtype=torch.long)
start_token[:] = self.vocabulary["^"]
input_vector = start_token
sequences = [self.vocabulary["^"] * torch.ones([batch_size, 1], dtype=torch.long)]
# NOTE: The first token never gets added in the loop so the sequences are initialized with a start token
hidden_state = None
nlls = torch.zeros(batch_size)
for _ in range(self.max_sequence_length - 1):
logits, hidden_state = self.network(input_vector.unsqueeze(1), hidden_state)
logits = logits.squeeze(1)
probabilities = logits.softmax(dim=1)
log_probs = logits.log_softmax(dim=1)
input_vector = torch.multinomial(probabilities, 1).view(-1)
sequences.append(input_vector.view(-1, 1))
nlls += self._nll_loss(log_probs, input_vector)
if input_vector.sum() == 0:
break
sequences = torch.cat(sequences, 1)
return sequences.data, nlls
def get_network_parameters(self):
return self.network.parameters() | /reinvent_models-0.0.15rc1.tar.gz/reinvent_models-0.0.15rc1/reinvent_models/reinvent_core/models/model.py | 0.948322 | 0.500977 | model.py | pypi |
from typing import List
from torch.utils.data import DataLoader
from reinvent_models.link_invent.dataset.paired_dataset import PairedDataset
from reinvent_models.link_invent.dto.linkinvent_batch_dto import LinkInventBatchDTO
from reinvent_models.link_invent.link_invent_model import LinkInventModel
from reinvent_models.model_factory.generative_model_base import GenerativeModelBase
from reinvent_models.link_invent.dto import BatchLikelihoodDTO
from reinvent_models.link_invent.dto.sampled_sequence_dto import SampledSequencesDTO
class LinkInventAdapter(GenerativeModelBase):
def __init__(self, path_to_file: str, mode: str):
self.generative_model = LinkInventModel.load_from_file(path_to_file, mode)
self.vocabulary = self.generative_model.vocabulary
self.max_sequence_length = self.generative_model.max_sequence_length
self.network = self.generative_model.network
def save_to_file(self, path):
self.generative_model.save_to_file(path)
def likelihood(self, warheads_seqs, warheads_seq_lengths, linker_seqs, linker_seq_lengths):
return self.generative_model.likelihood(warheads_seqs, warheads_seq_lengths, linker_seqs, linker_seq_lengths)
def likelihood_smiles(self, sampled_sequence_list: List[SampledSequencesDTO]) -> BatchLikelihoodDTO:
input_output_list = [[ss.input, ss.output] for ss in sampled_sequence_list]
dataset = PairedDataset(input_output_list, self.get_vocabulary())
dataloader = DataLoader(dataset, batch_size=len(dataset), collate_fn=dataset.collate_fn, shuffle=False)
for input_batch, output_batch in dataloader:
likelihood = self.generative_model.likelihood(*input_batch, *output_batch)
batch = LinkInventBatchDTO(input_batch, output_batch)
dto = BatchLikelihoodDTO(batch, likelihood)
return dto
def sample(self, warheads_seqs, warheads_seq_lengths):
return self.generative_model.sample(warheads_seqs, warheads_seq_lengths)
def set_mode(self, mode: str):
self.generative_model.set_mode(mode)
def get_network_parameters(self):
return self.generative_model.get_network_parameters()
def get_vocabulary(self):
return self.vocabulary | /reinvent_models-0.0.15rc1.tar.gz/reinvent_models-0.0.15rc1/reinvent_models/model_factory/link_invent_adapter.py | 0.924989 | 0.446193 | link_invent_adapter.py | pypi |
from typing import List
from torch.utils.data import DataLoader
from reinvent_models.lib_invent.models.dataset import DecoratorDataset
from reinvent_models.lib_invent.models.model import DecoratorModel
from reinvent_models.link_invent.dto.linkinvent_batch_dto import LinkInventBatchDTO
from reinvent_models.model_factory.generative_model_base import GenerativeModelBase
from reinvent_models.link_invent.dto import BatchLikelihoodDTO
from reinvent_models.link_invent.dto.sampled_sequence_dto import SampledSequencesDTO
class LibInventAdapter(GenerativeModelBase):
def __init__(self, path_to_file: str, mode: str):
self.generative_model = DecoratorModel.load_from_file(path_to_file, mode)
self.vocabulary = self.generative_model.vocabulary
self.max_sequence_length = self.generative_model.max_sequence_length
self.network = self.generative_model.network
def save_to_file(self, path):
self.generative_model.save(path)
def likelihood(self, scaffold_seqs, scaffold_seq_lengths, decoration_seqs, decoration_seq_lengths):
return self.generative_model.likelihood(scaffold_seqs, scaffold_seq_lengths, decoration_seqs, decoration_seq_lengths)
def sample(self, scaffold_seqs, scaffold_seq_lengths):
return self.generative_model.sample_decorations(scaffold_seqs, scaffold_seq_lengths)
def set_mode(self, mode: str):
self.generative_model.set_mode(mode)
def get_network_parameters(self):
return self.generative_model.get_network_parameters()
def get_vocabulary(self):
return self.vocabulary
def likelihood_smiles(self, sampled_sequence_list: List[SampledSequencesDTO]) -> BatchLikelihoodDTO:
input_output_list = [[ss.input, ss.output] for ss in sampled_sequence_list]
dataset = DecoratorDataset(input_output_list, self.vocabulary)
dataloader = DataLoader(dataset, batch_size=len(dataset), collate_fn=dataset.collate_fn, shuffle=False)
for input_batch, output_batch in dataloader:
likelihood = self.generative_model.likelihood(*input_batch, *output_batch)
batch = LinkInventBatchDTO(input_batch, output_batch)
dto = BatchLikelihoodDTO(batch, likelihood)
return dto | /reinvent_models-0.0.15rc1.tar.gz/reinvent_models-0.0.15rc1/reinvent_models/model_factory/lib_invent_adapter.py | 0.923273 | 0.390272 | lib_invent_adapter.py | pypi |
import numpy as np
import math
from scipy.interpolate import interp1d
from reinvent_scoring.scoring.enums import TransformationTypeEnum
from reinvent_scoring.scoring.enums import TransformationParametersEnum
class TransformationFactory:
def __init__(self):
self._transformation_function_registry = self._default_transformation_function_registry()
def _default_transformation_function_registry(self) -> dict:
enum = TransformationTypeEnum()
transformation_list = {
enum.SIGMOID: self.sigmoid_transformation,
enum.REVERSE_SIGMOID: self.reverse_sigmoid_transformation,
enum.DOUBLE_SIGMOID: self.double_sigmoid,
enum.NO_TRANSFORMATION: self.no_transformation,
enum.RIGHT_STEP: self.right_step,
enum.LEFT_STEP: self.left_step,
enum.STEP: self.step,
enum.CUSTOM_INTERPOLATION: self.custom_interpolation
}
return transformation_list
def get_transformation_function(self, parameters: dict):
transformation_type = parameters[TransformationParametersEnum.TRANSFORMATION_TYPE]
transformation_function = self._transformation_function_registry[transformation_type]
return transformation_function
def no_transformation(self, predictions: list, parameters: dict) -> np.array:
return np.array(predictions, dtype=np.float32)
def right_step(self, predictions, parameters) -> np.array:
_low = parameters[TransformationParametersEnum.LOW]
def _right_step_formula(value, low):
if value >= low:
return 1
return 0
transformed = [_right_step_formula(value, _low) for value in predictions]
return np.array(transformed, dtype=np.float32)
def left_step(self, predictions, parameters) -> np.array:
_low = parameters[TransformationParametersEnum.LOW]
def _left_step_formula(value, low):
if value <= low:
return 1
return 0
transformed = [_left_step_formula(value, _low) for value in predictions]
return np.array(transformed, dtype=np.float32)
def step(self, predictions, parameters) -> np.array:
_low = parameters[TransformationParametersEnum.LOW]
_high = parameters[TransformationParametersEnum.HIGH]
def _step_formula(value, low, high):
if low <= value <= high:
return 1
return 0
transformed = [_step_formula(value, _low, _high) for value in predictions]
return np.array(transformed, dtype=np.float32)
def sigmoid_transformation(self, predictions: list, parameters: dict) -> np.array:
_low = parameters[TransformationParametersEnum.LOW]
_high = parameters[TransformationParametersEnum.HIGH]
_k = parameters[TransformationParametersEnum.K]
def _exp(pred_val, low, high, k) -> float:
return math.pow(10, (10 * k * (pred_val - (low + high) * 0.5) / (low - high)))
transformed = [1 / (1 + _exp(pred_val, _low, _high, _k)) for pred_val in predictions]
return np.array(transformed, dtype=np.float32)
def reverse_sigmoid_transformation(self, predictions: list, parameters: dict) -> np.array:
_low = parameters[TransformationParametersEnum.LOW]
_high = parameters[TransformationParametersEnum.HIGH]
_k = parameters[TransformationParametersEnum.K]
def _reverse_sigmoid_formula(value, low, high, k) -> float:
try:
return 1 / (1 + 10 ** (k * (value - (high + low) / 2) * 10 / (high - low)))
except:
return 0
transformed = [_reverse_sigmoid_formula(pred_val, _low, _high, _k) for pred_val in predictions]
return np.array(transformed, dtype=np.float32)
def double_sigmoid(self, predictions: list, parameters: dict) -> np.array:
_low = parameters[TransformationParametersEnum.LOW]
_high = parameters[TransformationParametersEnum.HIGH]
_coef_div = parameters[TransformationParametersEnum.COEF_DIV]
_coef_si = parameters[TransformationParametersEnum.COEF_SI]
_coef_se = parameters[TransformationParametersEnum.COEF_SE]
def _double_sigmoid_formula(value, low, high, coef_div=100., coef_si=150., coef_se=150.):
try:
A = 10 ** (coef_se * (value / coef_div))
B = (10 ** (coef_se * (value / coef_div)) + 10 ** (coef_se * (low / coef_div)))
C = (10 ** (coef_si * (value / coef_div)) / (
10 ** (coef_si * (value / coef_div)) + 10 ** (coef_si * (high / coef_div))))
return (A / B) - C
except:
return 0
transformed = [_double_sigmoid_formula(pred_val, _low, _high, _coef_div, _coef_si, _coef_se) for pred_val in
predictions]
return np.array(transformed, dtype=np.float32)
def custom_interpolation(self, predictions: list, parameters: dict) -> np.array:
"""Adapted from the paper:
'Efficient Multi-Objective Molecular Optimization in a Continuous Latent Space'
by Robin Winter, Floriane Montanari, Andreas Steffen, Hans Briem, Frank Noé and Djork-Arné Clevert.
"""
def _transformation_function(interpolation_map, truncate_left=True, truncate_right=True):
origin = [point['origin'] for point in interpolation_map]
destination = [point['destination'] for point in interpolation_map]
assert len(origin) == len(destination)
if truncate_left:
origin = [origin[0] - 1] + origin
destination = [destination[0]] + destination
if truncate_right:
origin.append(origin[-1] + 1)
destination.append(destination[-1])
return interp1d(origin, destination, fill_value='extrapolate')
desirability = parameters.get(TransformationParametersEnum.INTERPOLATION_MAP, [{"origin": 0.0, "destination": 0.0},
{"origin": 1.0, "destination": 1.0}])
truncate_left = parameters.get(TransformationParametersEnum.TRUNCATE_LEFT, True)
truncate_right = parameters.get(TransformationParametersEnum.TRUNCATE_RIGHT, True)
transformation = _transformation_function(desirability, truncate_left, truncate_right)
transformed = transformation(predictions)
return transformed | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/score_transformations.py | 0.702632 | 0.411111 | score_transformations.py | pypi |
from typing import List
import pandas as pd
from reinvent_scoring.scoring.score_summary import ComponentSummary
from reinvent_scoring.scoring.enums.scoring_function_component_enum import ScoringFunctionComponentNameEnum
class DiversityFilterMemory:
def __init__(self):
self._sf_component_name = ScoringFunctionComponentNameEnum()
df_dict = {"Step": [], "Scaffold": [], "SMILES": []}
self._memory_dataframe = pd.DataFrame(df_dict)
def update(self, indx: int, score: float, smile: str, scaffold: str, components: List, step: int):
component_scores = {c.parameters.name: float(c.total_score[indx]) for c in components}
component_scores = self._include_raw_score(indx, component_scores, components)
component_scores[self._sf_component_name.TOTAL_SCORE] = float(score)
if not self.smiles_exists(smile):
self._add_to_memory_dataframe(step, smile, scaffold, component_scores)
def _add_to_memory_dataframe(self, step: int, smile: str, scaffold: str, component_scores: {} = None):
data = []
headers = []
for name, score in component_scores.items():
headers.append(name)
data.append(score)
headers.append("Step")
data.append(step)
headers.append("Scaffold")
data.append(scaffold)
headers.append("SMILES")
data.append(smile)
new_data = pd.DataFrame([data], columns=headers)
self._memory_dataframe = pd.concat([self._memory_dataframe, new_data], ignore_index=True, sort=False)
def get_memory(self) -> pd.DataFrame:
return self._memory_dataframe
def set_memory(self, memory: pd.DataFrame):
self._memory_dataframe = memory
def smiles_exists(self, smiles: str):
if len(self._memory_dataframe) == 0:
return False
return smiles in self._memory_dataframe['SMILES'].values
def scaffold_instances_count(self, scaffold: str):
return (self._memory_dataframe["Scaffold"].values == scaffold).sum()
def number_of_scaffolds(self):
return len(set(self._memory_dataframe["Scaffold"].values))
def number_of_smiles(self):
return len(set(self._memory_dataframe["SMILES"].values))
def _include_raw_score(self, indx: int, component_scores: dict, components: List[ComponentSummary]):
raw_scores = {f'raw_{c.parameters.name}': float(c.raw_score[indx]) for c in components if
c.raw_score is not None}
all_scores = {**component_scores, **raw_scores}
return all_scores | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/diversity_filters/lib_invent/diversity_filter_memory.py | 0.806891 | 0.375277 | diversity_filter_memory.py | pypi |
from copy import deepcopy
import numpy as np
from rdkit.Chem.Scaffolds import MurckoScaffold
from reinvent_scoring.scoring.diversity_filters.curriculum_learning import DiversityFilterParameters
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.base_diversity_filter import BaseDiversityFilter
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.memory_record_dto import MemoryRecordDTO
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.update_diversity_filter_dto import \
UpdateDiversityFilterDTO
class IdenticalTopologicalScaffold(BaseDiversityFilter):
"""Penalizes compounds based on exact Topological Scaffolds previously generated."""
def __init__(self, parameters: DiversityFilterParameters):
super().__init__(parameters)
def update_score(self, dto: UpdateDiversityFilterDTO) -> np.array:
score_summary = deepcopy(dto.score_summary)
scores = score_summary.total_score
smiles = score_summary.scored_smiles
for i in score_summary.valid_idxs:
smile = self._chemistry.convert_to_rdkit_smiles(smiles[i])
scaffold = self._calculate_scaffold(smile)
scores[i] = 0 if self._smiles_exists(smile) else scores[i]
if scores[i] >= self.parameters.minscore:
loggable_data = self._compose_loggable_data(dto.loggable_data[i]) if dto.loggable_data else ''
memory_dto = MemoryRecordDTO(i, dto.step, scores[i], smile, scaffold, loggable_data,
score_summary.scaffold_log)
self._add_to_memory(memory_dto)
scores[i] = self._penalize_score(scaffold, scores[i])
return scores
def _calculate_scaffold(self, smile):
mol = self._chemistry.smile_to_mol(smile)
if mol:
try:
scaffold = MurckoScaffold.MakeScaffoldGeneric(MurckoScaffold.GetScaffoldForMol(mol))
scaffold_smiles = self._chemistry.mol_to_smiles(scaffold)
except ValueError:
scaffold_smiles = ''
else:
scaffold_smiles = ''
return scaffold_smiles | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/diversity_filters/curriculum_learning/identical_topological_scaffold.py | 0.617974 | 0.521167 | identical_topological_scaffold.py | pypi |
import abc
import numpy as np
import pandas as pd
from reinvent_chemistry.conversions import Conversions
from reinvent_scoring.scoring.diversity_filters.curriculum_learning import DiversityFilterParameters, \
DiversityFilterMemory
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.loggable_data_dto import UpdateLoggableDataDTO
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.memory_record_dto import MemoryRecordDTO
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.update_diversity_filter_dto import \
UpdateDiversityFilterDTO
class BaseDiversityFilter(abc.ABC):
@abc.abstractmethod
def __init__(self, parameters: DiversityFilterParameters):
self.parameters = parameters
self._diversity_filter_memory = DiversityFilterMemory()
self._chemistry = Conversions()
@abc.abstractmethod
def update_score(self, update_dto: UpdateDiversityFilterDTO) -> np.array:
raise NotImplementedError("The method 'update_score' is not implemented!")
def get_memory_as_dataframe(self) -> pd.DataFrame:
return self._diversity_filter_memory.get_memory()
def set_memory_from_dataframe(self, memory: pd.DataFrame):
self._diversity_filter_memory.set_memory(memory)
def number_of_smiles_in_memory(self) -> int:
return self._diversity_filter_memory.number_of_smiles()
def number_of_scaffold_in_memory(self) -> int:
return self._diversity_filter_memory.number_of_scaffolds()
def update_bucket_size(self, bucket_size: int):
self.parameters.bucket_size = bucket_size
def _calculate_scaffold(self, smile):
raise NotImplementedError
def _smiles_exists(self, smile):
return self._diversity_filter_memory.smiles_exists(smile)
def _add_to_memory(self, memory_dto: MemoryRecordDTO):
self._diversity_filter_memory.update(memory_dto)
def _penalize_score(self, scaffold, score):
"""Penalizes the score if the scaffold bucket is full"""
if self._diversity_filter_memory.scaffold_instances_count(scaffold) > self.parameters.bucket_size:
score = 0.
return score
def _compose_loggable_data(self, dto: UpdateLoggableDataDTO):
prior_likelihood = f'{dto.prior_likelihood}|' if dto.prior_likelihood else ''
likelihood = f'{dto.likelihood}|' if dto.likelihood else ''
input = f'{dto.input}|' if dto.input else ''
output = f'{dto.output}' if dto.output else ''
loggable_data = f'{prior_likelihood}{likelihood}{input}{output}'
return loggable_data | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/diversity_filters/curriculum_learning/base_diversity_filter.py | 0.685002 | 0.472379 | base_diversity_filter.py | pypi |
from copy import deepcopy
import numpy as np
from rdkit.Chem.Scaffolds import MurckoScaffold
from reinvent_scoring.scoring.diversity_filters.curriculum_learning import DiversityFilterParameters
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.base_diversity_filter import BaseDiversityFilter
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.loggable_data_dto import UpdateLoggableDataDTO
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.memory_record_dto import MemoryRecordDTO
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.update_diversity_filter_dto import \
UpdateDiversityFilterDTO
class IdenticalMurckoScaffold(BaseDiversityFilter):
"""Penalizes compounds based on exact Murcko Scaffolds previously generated."""
def __init__(self, parameters: DiversityFilterParameters):
super().__init__(parameters)
def update_score(self, dto: UpdateDiversityFilterDTO) -> np.array:
score_summary = deepcopy(dto.score_summary)
scores = score_summary.total_score
smiles = score_summary.scored_smiles
for i in score_summary.valid_idxs:
smile = self._chemistry.convert_to_rdkit_smiles(smiles[i])
scaffold = self._calculate_scaffold(smile)
scores[i] = 0 if self._smiles_exists(smile) else scores[i]
if scores[i] >= self.parameters.minscore:
loggable_data = self._compose_loggable_data(dto.loggable_data[i]) if dto.loggable_data else ''
memory_dto = MemoryRecordDTO(i, dto.step, scores[i], smile, scaffold, loggable_data,
score_summary.scaffold_log)
self._add_to_memory(memory_dto)
scores[i] = self._penalize_score(scaffold, scores[i])
return scores
#TODO: move this to reinvent chemistry
def _calculate_scaffold(self, smile):
mol = self._chemistry.smile_to_mol(smile)
if mol:
try:
scaffold = MurckoScaffold.GetScaffoldForMol(mol)
scaffold_smiles = self._chemistry.mol_to_smiles(scaffold)
except ValueError:
scaffold_smiles = ''
else:
scaffold_smiles = ''
return scaffold_smiles | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/diversity_filters/curriculum_learning/identical_murcko_scaffold.py | 0.498779 | 0.522994 | identical_murcko_scaffold.py | pypi |
from typing import List, Dict
import pandas as pd
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.column_names_enum import ColumnNamesEnum
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.memory_record_dto import MemoryRecordDTO
from reinvent_scoring.scoring.score_summary import ComponentSummary
from reinvent_scoring.scoring.enums.scoring_function_component_enum import ScoringFunctionComponentNameEnum
class DiversityFilterMemory:
def __init__(self):
self._sf_component_name = ScoringFunctionComponentNameEnum()
self._column_name = ColumnNamesEnum()
df_dict = {self._column_name.STEP: [], self._column_name.SCAFFOLD: [], self._column_name.SMILES: [],
self._column_name.METADATA: []}
self._memory_dataframe = pd.DataFrame(df_dict)
def update(self, dto: MemoryRecordDTO):
component_scores = {c.parameters.name: float(c.total_score[dto.id]) for c in dto.components}
component_scores = self._include_raw_score(dto.id, component_scores, dto.components)
component_scores[self._sf_component_name.TOTAL_SCORE] = float(dto.score)
if not self.smiles_exists(dto.smile): self._add_to_memory_dataframe(dto, component_scores)
def _add_to_memory_dataframe(self, dto: MemoryRecordDTO, component_scores: Dict):
data = []
headers = []
for name, score in component_scores.items():
headers.append(name)
data.append(score)
headers.append(self._column_name.STEP)
data.append(dto.step)
headers.append(self._column_name.SCAFFOLD)
data.append(dto.scaffold)
headers.append(self._column_name.SMILES)
data.append(dto.smile)
headers.append(self._column_name.METADATA)
data.append(dto.loggable_data)
new_data = pd.DataFrame([data], columns=headers)
self._memory_dataframe = pd.concat([self._memory_dataframe, new_data], ignore_index=True, sort=False)
def get_memory(self) -> pd.DataFrame:
return self._memory_dataframe
def set_memory(self, memory: pd.DataFrame):
self._memory_dataframe = memory
def smiles_exists(self, smiles: str):
if len(self._memory_dataframe) == 0:
return False
return smiles in self._memory_dataframe[self._column_name.SMILES].values
def scaffold_instances_count(self, scaffold: str):
return (self._memory_dataframe[self._column_name.SCAFFOLD].values == scaffold).sum()
def number_of_scaffolds(self):
return len(set(self._memory_dataframe[self._column_name.SCAFFOLD].values))
def number_of_smiles(self):
return len(set(self._memory_dataframe[self._column_name.SMILES].values))
def _include_raw_score(self, indx: int, component_scores: dict, components: List[ComponentSummary]):
raw_scores = {f'raw_{c.parameters.name}': float(c.raw_score[indx]) for c in components if
c.raw_score is not None}
all_scores = {**component_scores, **raw_scores}
return all_scores | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/diversity_filters/curriculum_learning/diversity_filter_memory.py | 0.787605 | 0.403743 | diversity_filter_memory.py | pypi |
from copy import deepcopy
import numpy as np
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem.AtomPairs import Pairs
from rdkit.Chem.Scaffolds import MurckoScaffold
from reinvent_scoring.scoring.diversity_filters.curriculum_learning import DiversityFilterParameters
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.base_diversity_filter import BaseDiversityFilter
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.memory_record_dto import MemoryRecordDTO
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.update_diversity_filter_dto import \
UpdateDiversityFilterDTO
class ScaffoldSimilarity(BaseDiversityFilter):
"""Penalizes compounds based on atom pair Tanimoto similarity to previously generated Murcko Scaffolds."""
def __init__(self, parameters: DiversityFilterParameters):
super().__init__(parameters)
self._scaffold_fingerprints = {}
def update_score(self, dto: UpdateDiversityFilterDTO) -> np.array:
score_summary = deepcopy(dto.score_summary)
scores = score_summary.total_score
smiles = score_summary.scored_smiles
for i in score_summary.valid_idxs:
smile = self._chemistry.convert_to_rdkit_smiles(smiles[i])
scaffold = self._calculate_scaffold(smile)
# check, if another scaffold should be used as "bucket", because it is very similar as defined by the
# "minsimilarity" threshold; if not, this call is a no-op and the smiles' normal Murcko scaffold will be used in case
# -> usage of the "murcko scaffold filter" is actually a special case, where "minsimilarity" is 1.0
scaffold = self._find_similar_scaffold(scaffold)
scores[i] = 0 if self._smiles_exists(smile) else scores[i]
if scores[i] >= self.parameters.minscore:
loggable_data = self._compose_loggable_data(dto.loggable_data[i]) if dto.loggable_data else ''
memory_dto = MemoryRecordDTO(i, dto.step, scores[i], smile, scaffold, loggable_data,
score_summary.scaffold_log)
self._add_to_memory(memory_dto)
scores[i] = self._penalize_score(scaffold, scores[i])
return scores
def _calculate_scaffold(self, smile):
mol = self._chemistry.smile_to_mol(smile)
if mol:
try:
scaffold = MurckoScaffold.GetScaffoldForMol(mol)
scaffold_smiles = self._chemistry.mol_to_smiles(scaffold)
except ValueError:
scaffold_smiles = ''
else:
scaffold_smiles = ''
return scaffold_smiles
def _find_similar_scaffold(self, scaffold):
"""
this function tries to find a "similar" scaffold (according to the threshold set by parameter "minsimilarity") and if at least one
scaffold satisfies this criteria, it will replace the smiles' scaffold with the most similar one
-> in effect, this reduces the number of scaffold buckets in the memory (the lower parameter "minsimilarity", the more
pronounced the reduction)
generate a "mol" scaffold from the smile and calculate an atom pair fingerprint
:param scaffold: scaffold represented by a smiles string
:return: closest scaffold given a certain similarity threshold
"""
if scaffold is not '':
fp = Pairs.GetAtomPairFingerprint(Chem.MolFromSmiles(scaffold))
# make a list of the stored fingerprints for similarity calculations
fps = list(self._scaffold_fingerprints.values())
# check, if a similar scaffold entry already exists and if so, use this one instead
if len(fps) > 0:
similarity_scores = DataStructs.BulkDiceSimilarity(fp, fps)
closest = np.argmax(similarity_scores)
if similarity_scores[closest] >= self.parameters.minsimilarity:
scaffold = list(self._scaffold_fingerprints.keys())[closest]
fp = self._scaffold_fingerprints[scaffold]
self._scaffold_fingerprints[scaffold] = fp
return scaffold | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/diversity_filters/curriculum_learning/scaffold_similarity.py | 0.71889 | 0.626281 | scaffold_similarity.py | pypi |
import abc
from typing import List
import numpy as np
import pandas as pd
from reinvent_scoring.scoring.diversity_filters.reinvent_core.diversity_filter_memory import DiversityFilterMemory
from reinvent_scoring.scoring.diversity_filters.reinvent_core.diversity_filter_parameters import DiversityFilterParameters
from reinvent_scoring.scoring.score_summary import FinalSummary, ComponentSummary
from reinvent_chemistry.conversions import Conversions
class BaseDiversityFilter(abc.ABC):
@abc.abstractmethod
def __init__(self, parameters: DiversityFilterParameters):
self.parameters = parameters
self._diversity_filter_memory = DiversityFilterMemory()
self._chemistry = Conversions()
@abc.abstractmethod
def update_score(self, score_summary: FinalSummary, step=0) -> np.array:
raise NotImplementedError("The method 'update_score' is not implemented!")
def get_memory_as_dataframe(self) -> pd.DataFrame:
return self._diversity_filter_memory.get_memory()
def set_memory_from_dataframe(self, memory: pd.DataFrame):
self._diversity_filter_memory.set_memory(memory)
def number_of_smiles_in_memory(self) -> int:
return self._diversity_filter_memory.number_of_smiles()
def number_of_scaffold_in_memory(self) -> int:
return self._diversity_filter_memory.number_of_scaffolds()
def update_bucket_size(self, bucket_size: int):
self.parameters.bucket_size = bucket_size
def _calculate_scaffold(self, smile):
raise NotImplementedError
def _smiles_exists(self, smile):
return self._diversity_filter_memory.smiles_exists(smile)
def _add_to_memory(self, indx: int, score, smile, scaffold, components: List[ComponentSummary], step):
self._diversity_filter_memory.update(indx, score, smile, scaffold, components, step)
def _penalize_score(self, scaffold, score):
"""Penalizes the score if the scaffold bucket is full"""
if self._diversity_filter_memory.scaffold_instances_count(scaffold) > self.parameters.bucket_size:
score = 0.
return score | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/diversity_filters/reinvent_core/base_diversity_filter.py | 0.837819 | 0.53868 | base_diversity_filter.py | pypi |
from typing import List, Dict
import pandas as pd
from reinvent_scoring.scoring.score_summary import ComponentSummary
from reinvent_scoring.scoring.enums.scoring_function_component_enum import ScoringFunctionComponentNameEnum
class DiversityFilterMemory:
def __init__(self):
self._sf_component_name = ScoringFunctionComponentNameEnum()
df_dict = {"Step": [], "Scaffold": [], "SMILES": []}
self._memory_dataframe = pd.DataFrame(df_dict)
def update(self, indx: int, score: float, smile: str, scaffold: str, components: List[ComponentSummary], step: int):
component_scores = {c.parameters.name: float(c.total_score[indx]) for c in components}
component_scores = self._include_raw_score(indx, component_scores, components)
component_scores[self._sf_component_name.TOTAL_SCORE] = float(score)
if not self.smiles_exists(smile):
self._add_to_memory_dataframe(step, smile, scaffold, component_scores)
def _add_to_memory_dataframe(self, step: int, smile: str, scaffold: str, component_scores: Dict):
data = []
headers = []
for name, score in component_scores.items():
headers.append(name)
data.append(score)
headers.append("Step")
data.append(step)
headers.append("Scaffold")
data.append(scaffold)
headers.append("SMILES")
data.append(smile)
new_data = pd.DataFrame([data], columns=headers)
self._memory_dataframe = pd.concat([self._memory_dataframe, new_data], ignore_index=True, sort=False)
def get_memory(self) -> pd.DataFrame:
return self._memory_dataframe
def set_memory(self, memory: pd.DataFrame):
self._memory_dataframe = memory
def smiles_exists(self, smiles: str):
if len(self._memory_dataframe) == 0:
return False
return smiles in self._memory_dataframe['SMILES'].values
def scaffold_instances_count(self, scaffold: str):
return (self._memory_dataframe["Scaffold"].values == scaffold).sum()
def number_of_scaffolds(self):
return len(set(self._memory_dataframe["Scaffold"].values))
def number_of_smiles(self):
return len(set(self._memory_dataframe["SMILES"].values))
def _include_raw_score(self, indx: int, component_scores: dict, components: List[ComponentSummary]):
raw_scores = {f'raw_{c.parameters.name}': float(c.raw_score[indx]) for c in components if
c.raw_score is not None}
all_scores = {**component_scores, **raw_scores}
return all_scores | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/diversity_filters/reinvent_core/diversity_filter_memory.py | 0.805861 | 0.372248 | diversity_filter_memory.py | pypi |
from copy import deepcopy
import numpy as np
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem.AtomPairs import Pairs
from rdkit.Chem.Scaffolds import MurckoScaffold
from reinvent_scoring.scoring.diversity_filters.reinvent_core.base_diversity_filter import BaseDiversityFilter
from reinvent_scoring.scoring.diversity_filters.reinvent_core.diversity_filter_parameters import DiversityFilterParameters
from reinvent_scoring.scoring.score_summary import FinalSummary
class ScaffoldSimilarity(BaseDiversityFilter):
"""Penalizes compounds based on atom pair Tanimoto similarity to previously generated Murcko Scaffolds."""
def __init__(self, parameters: DiversityFilterParameters):
super().__init__(parameters)
self._scaffold_fingerprints = {}
def update_score(self, score_summary: FinalSummary, step=0) -> np.array:
score_summary = deepcopy(score_summary)
scores = score_summary.total_score
smiles = score_summary.scored_smiles
for i in score_summary.valid_idxs:
smile = self._chemistry.convert_to_rdkit_smiles(smiles[i])
scaffold = self._calculate_scaffold(smile)
# check, if another scaffold should be used as "bucket", because it is very similar as defined by the
# "minsimilarity" threshold; if not, this call is a no-op and the smiles' normal Murcko scaffold will be used in case
# -> usage of the "murcko scaffold filter" is actually a special case, where "minsimilarity" is 1.0
scaffold = self._find_similar_scaffold(scaffold)
scores[i] = 0 if self._smiles_exists(smile) else scores[i]
if scores[i] >= self.parameters.minscore:
self._add_to_memory(i, scores[i], smile, scaffold, score_summary.scaffold_log, step)
scores[i] = self._penalize_score(scaffold, scores[i])
return scores
def _calculate_scaffold(self, smile):
mol = Chem.MolFromSmiles(smile)
if mol:
try:
scaffold = MurckoScaffold.GetScaffoldForMol(mol)
return Chem.MolToSmiles(scaffold, isomericSmiles=False)
except ValueError:
scaffold_smiles = ''
else:
scaffold_smiles = ''
return scaffold_smiles
def _find_similar_scaffold(self, scaffold):
"""
this function tries to find a "similar" scaffold (according to the threshold set by parameter "minsimilarity") and if at least one
scaffold satisfies this criteria, it will replace the smiles' scaffold with the most similar one
-> in effect, this reduces the number of scaffold buckets in the memory (the lower parameter "minsimilarity", the more
pronounced the reduction)
generate a "mol" scaffold from the smile and calculate an atom pair fingerprint
:param scaffold: scaffold represented by a smiles string
:return: closest scaffold given a certain similarity threshold
"""
if scaffold is not '':
fp = Pairs.GetAtomPairFingerprint(Chem.MolFromSmiles(scaffold))
# make a list of the stored fingerprints for similarity calculations
fps = list(self._scaffold_fingerprints.values())
# check, if a similar scaffold entry already exists and if so, use this one instead
if len(fps) > 0:
similarity_scores = DataStructs.BulkDiceSimilarity(fp, fps)
closest = np.argmax(similarity_scores)
if similarity_scores[closest] >= self.parameters.minsimilarity:
scaffold = list(self._scaffold_fingerprints.keys())[closest]
fp = self._scaffold_fingerprints[scaffold]
self._scaffold_fingerprints[scaffold] = fp
return scaffold | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/diversity_filters/reinvent_core/scaffold_similarity.py | 0.729712 | 0.590336 | scaffold_similarity.py | pypi |
from typing import List
from reinvent_scoring.scoring.component_parameters import ComponentParameters
from reinvent_scoring.scoring.enums import ScoringFunctionComponentNameEnum
from reinvent_scoring.scoring.score_components import BaseScoreComponent
from reinvent_scoring.scoring.score_components import TanimotoSimilarity, \
JaccardDistance, CustomAlerts, QedScore, MatchingSubstructure, \
RocsSimilarity, ParallelRocsSimilarity, PredictivePropertyComponent, SelectivityComponent, \
SASComponent, MolWeight, PSA, RotatableBonds, HBD_Lipinski, HBA_Lipinski, \
NumRings, SlogP, AZdock, RatPKPiP, PiPLogPredictionComponent, PiPPredictionComponent, \
QptunaPiPModelComponent, StringPiPPredictionComponent, GraphLength, NumberOfStereoCenters, \
LinkerLengthRatio, LinkerGraphLength, LinkerEffectiveLength, LinkerNumRings, LinkerNumAliphaticRings, \
LinkerNumAromaticRings, LinkerNumSPAtoms, LinkerNumSP2Atoms, LinkerNumSP3Atoms, LinkerNumHBA, \
LinkerNumHBD, LinkerMolWeight, LinkerRatioRotatableBonds, DockStream, NumAromaticRings, NumAliphaticRings
from reinvent_scoring.scoring.score_components.console_invoked import Icolos
class ScoreComponentFactory:
def __init__(self, parameters: List[ComponentParameters]):
self._parameters = parameters
self._current_components = self._deafult_scoring_component_registry()
def _deafult_scoring_component_registry(self) -> dict:
enum = ScoringFunctionComponentNameEnum()
component_map = {
enum.MATCHING_SUBSTRUCTURE: MatchingSubstructure,
enum.ROCS_SIMILARITY: RocsSimilarity,
enum.PREDICTIVE_PROPERTY: PredictivePropertyComponent,
enum.TANIMOTO_SIMILARITY: TanimotoSimilarity,
enum.JACCARD_DISTANCE: JaccardDistance,
enum.CUSTOM_ALERTS: CustomAlerts,
enum.QED_SCORE: QedScore,
enum.MOLECULAR_WEIGHT: MolWeight,
enum.TPSA: PSA,
enum.NUM_ROTATABLE_BONDS: RotatableBonds,
enum.GRAPH_LENGTH: GraphLength,
enum.NUM_HBD_LIPINSKI: HBD_Lipinski,
enum.NUM_HBA_LIPINSKI: HBA_Lipinski,
enum.NUM_RINGS: NumRings,
enum.NUM_AROMATIC_RINGS: NumAromaticRings,
enum.NUM_ALIPHATIC_RINGS: NumAliphaticRings,
enum.SLOGP: SlogP,
enum.NUMBER_OF_STEREO_CENTERS: NumberOfStereoCenters,
enum.PARALLEL_ROCS_SIMILARITY: ParallelRocsSimilarity,
enum.SELECTIVITY: SelectivityComponent,
enum.SA_SCORE: SASComponent,
enum.AZDOCK: AZdock,
enum.AZ_LOGD74_PIP: PiPPredictionComponent,
enum.CACO2_INTR_PIP: PiPLogPredictionComponent,
enum.CACO2_EFFLUX_PIP: PiPPredictionComponent,
enum.HH_CLINT_PIP: PiPLogPredictionComponent,
enum.HLM_CLINT_PIP: PiPLogPredictionComponent,
enum.RH_CLINT_PIP: PiPLogPredictionComponent,
enum.SOLUBILITY_DD_PIP: PiPLogPredictionComponent,
enum.HERG_PIP: PiPPredictionComponent,
enum.RAT_PK_PIP: RatPKPiP,
enum.RA_SCORE: PiPPredictionComponent,
enum.KPUU_PIP: PiPLogPredictionComponent,
enum.QPTUNA_PIP_MODEL: QptunaPiPModelComponent,
enum.THP1_CYTOTOXICITY: StringPiPPredictionComponent,
enum.LINKER_GRAPH_LENGTH: LinkerGraphLength,
enum.LINKER_EFFECTIVE_LENGTH: LinkerEffectiveLength,
enum.LINKER_LENGTH_RATIO: LinkerLengthRatio,
enum.LINKER_NUM_RINGS: LinkerNumRings,
enum.LINKER_NUM_ALIPHATIC_RINGS: LinkerNumAliphaticRings,
enum.LINKER_NUM_AROMATIC_RINGS: LinkerNumAromaticRings,
enum.LINKER_NUM_SP_ATOMS: LinkerNumSPAtoms,
enum.LINKER_NUM_SP2_ATOMS: LinkerNumSP2Atoms,
enum.LINKER_NUM_SP3_ATOMS: LinkerNumSP3Atoms,
enum.LINKER_NUM_HBA: LinkerNumHBA,
enum.LINKER_NUM_HBD: LinkerNumHBD,
enum.LINKER_MOL_WEIGHT: LinkerMolWeight,
enum.LINKER_RATIO_ROTATABLE_BONDS: LinkerRatioRotatableBonds,
enum.DOCKSTREAM: DockStream,
enum.ICOLOS: Icolos,
# enum.AIZYNTH: BuildingBlockAvailabilityComponent
}
return component_map
def create_score_components(self) -> [BaseScoreComponent]:
def create_component(component_params):
if component_params.component_type in self._current_components:
component = self._current_components[component_params.component_type]
component_instance = component(component_params)
else:
raise KeyError(f'Component: {component_params.component_type} is not implemented.'
f' Consider checking your input.')
return component_instance
components = [create_component(component) for component in self._parameters]
return components | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/score_components/score_component_factory.py | 0.754553 | 0.356139 | score_component_factory.py | pypi |
from abc import ABC, abstractmethod
from typing import List
from reinvent_chemistry.conversions import Conversions
from reinvent_scoring.scoring.enums import TransformationTypeEnum, TransformationParametersEnum
from reinvent_scoring.scoring.score_transformations import TransformationFactory
from reinvent_scoring.scoring.component_parameters import ComponentParameters
from reinvent_scoring.scoring.score_summary import ComponentSummary
from reinvent_scoring.scoring.enums import ComponentSpecificParametersEnum
class BaseScoreComponent(ABC):
def __init__(self, parameters: ComponentParameters):
self.component_specific_parameters = ComponentSpecificParametersEnum()
self.parameters = parameters
self._chemistry = Conversions()
self._transformation_function = self._assign_transformation(self.parameters.specific_parameters)
@abstractmethod
def calculate_score(self, molecules: List, step=-1) -> ComponentSummary:
raise NotImplementedError("calculate_score method is not implemented")
def calculate_score_for_step(self, molecules: List, step=-1) -> ComponentSummary:
return self.calculate_score(molecules)
def _assign_transformation(self, specific_parameters: {}):
transformation_type = TransformationTypeEnum()
factory = TransformationFactory()
if not self.parameters.specific_parameters: #FIXME: this is a hack
self.parameters.specific_parameters = {}
transform_params = self.parameters.specific_parameters.get(
self.component_specific_parameters.TRANSFORMATION, {})
if transform_params:
transform_function = factory.get_transformation_function(transform_params)
else:
self.parameters.specific_parameters[
self.component_specific_parameters.TRANSFORMATION] = {
TransformationParametersEnum.TRANSFORMATION_TYPE: transformation_type.NO_TRANSFORMATION
}
transform_function = factory.no_transformation
return transform_function | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/score_components/base_score_component.py | 0.792785 | 0.246007 | base_score_component.py | pypi |
import pickle
from typing import List
from reinvent_scoring.scoring.predictive_model.base_model_container import BaseModelContainer
from reinvent_scoring.scoring.predictive_model.model_container import ModelContainer
from reinvent_scoring.scoring.component_parameters import ComponentParameters
from reinvent_scoring.scoring.score_components import BaseScoreComponent
from reinvent_scoring.scoring.score_summary import ComponentSummary
from reinvent_scoring.scoring.score_transformations import TransformationFactory
from reinvent_scoring.scoring.enums import TransformationTypeEnum, TransformationParametersEnum
class SelectivityComponent(BaseScoreComponent):
def __init__(self, parameters: ComponentParameters):
super().__init__(parameters)
self._transformation_type = TransformationTypeEnum()
self._model_transformation = self._assign_delta_transformation(
{TransformationParametersEnum.TRANSFORMATION_TYPE: self._transformation_type.NO_TRANSFORMATION})
self._activity_params = self._prepare_activity_parameters(parameters)
self._off_target_params = self._prepare_offtarget_parameters(parameters)
self._activity_model = self._load_model(self._activity_params)
self._off_target_activity_model = self._load_model(self._off_target_params)
self._delta_params = self._prepare_delta_parameters(parameters)
self._delta_transformation = self._assign_delta_transformation(self._delta_params)
def calculate_score(self, molecules: List) -> ComponentSummary:
score, offtarget_score = self._calculate_offtarget_activity(molecules, self._activity_params,
self._off_target_params, self._delta_params)
score_summary = ComponentSummary(total_score=score, parameters=self._off_target_params,
raw_score=offtarget_score)
return score_summary
def _load_model(self, parameters: ComponentParameters):
try:
activity_model = self._load_scikit_model(parameters)
except Exception as e:
model_path = self.parameters.specific_parameters.get(self.component_specific_parameters.MODEL_PATH, "")
raise Exception(f"The loaded file `{model_path}` isn't a valid scikit-learn model: {e}.")
return activity_model
def _load_scikit_model(self, parameters: ComponentParameters) -> BaseModelContainer:
model_path = parameters.specific_parameters.get(self.component_specific_parameters.MODEL_PATH, "")
with open(model_path, "rb") as f:
scikit_model = pickle.load(f)
models_are_identical = self._activity_params.specific_parameters[
self.component_specific_parameters.SCIKIT] == \
self._off_target_params.specific_parameters[
self.component_specific_parameters.SCIKIT]
model_is_regression = self._off_target_params.specific_parameters[
self.component_specific_parameters.SCIKIT] == "regression"
both_models_are_regression = models_are_identical and model_is_regression
if both_models_are_regression:
parameters.specific_parameters[self.component_specific_parameters.TRANSFORMATION] = {}
self._assign_model_transformation(both_models_are_regression)
packaged_model = ModelContainer(scikit_model, parameters.specific_parameters)
return packaged_model
def _calculate_offtarget_activity(self, molecules, activity_params, offtarget_params, delta_params):
raw_activity_score = self._activity_model\
.predict(molecules, activity_params.specific_parameters)
raw_offtarget_score = self._off_target_activity_model \
.predict(molecules, offtarget_params.specific_parameters)
activity_score = self._apply_model_transformation(raw_activity_score, activity_params.specific_parameters)
offtarget_score = self._apply_model_transformation(raw_offtarget_score, offtarget_params.specific_parameters)
delta = activity_score - offtarget_score
transformed_score = self._delta_transformation(delta, delta_params) if delta_params[
self.component_specific_parameters.TRANSFORMATION] else delta
transformed_score[transformed_score < 0.01] = 0.01
return transformed_score, raw_offtarget_score
def _assign_delta_transformation(self, specific_parameters: {}):
factory = TransformationFactory()
transform_function = factory.get_transformation_function(specific_parameters)
return transform_function
def _prepare_activity_parameters(self, parameters: ComponentParameters) -> ComponentParameters:
specific_parameters = parameters.specific_parameters["activity_specific_parameters"]
activity_params = ComponentParameters(name=self.parameters.name,
weight=self.parameters.weight,
component_type=self.parameters.component_type,
specific_parameters=specific_parameters
)
return activity_params
def _prepare_offtarget_parameters(self, parameters: ComponentParameters) -> ComponentParameters:
specific_parameters = parameters.specific_parameters["offtarget_specific_parameters"]
offtarget_params = ComponentParameters(name=self.parameters.name,
weight=self.parameters.weight,
component_type=self.parameters.component_type,
specific_parameters=specific_parameters
)
return offtarget_params
def _prepare_delta_parameters(self, parameters: ComponentParameters) -> dict:
specific_params = parameters.specific_parameters["delta_transformation_parameters"]
specific_params[self.component_specific_parameters.TRANSFORMATION] = \
"regression" == self._activity_params.specific_parameters[self.component_specific_parameters.SCIKIT] == \
self._off_target_params.specific_parameters[self.component_specific_parameters.SCIKIT]
return specific_params
def _apply_model_transformation(self, predicted_activity, parameters: dict):
transform_params = parameters.get(self.component_specific_parameters.TRANSFORMATION, {})
if transform_params:
activity = self._model_transformation(predicted_activity, transform_params)
else:
activity = predicted_activity
return activity
def _assign_model_transformation(self, both_models_are_regression: bool):
if both_models_are_regression:
return
if self._activity_params.specific_parameters.get(self.component_specific_parameters.SCIKIT) == "regression":
self._model_transformation = self._assign_delta_transformation(
self._activity_params.specific_parameters.get(self.component_specific_parameters.TRANSFORMATION, {}))
if self._off_target_params.specific_parameters.get(self.component_specific_parameters.SCIKIT) == "regression":
self._model_transformation = self._assign_delta_transformation(
self._off_target_params.specific_parameters.get(self.component_specific_parameters.TRANSFORMATION, {})) | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/score_components/standard/selectivity_component.py | 0.836154 | 0.233761 | selectivity_component.py | pypi |
import pickle
from typing import List
from reinvent_scoring.scoring.predictive_model.model_container import ModelContainer
from reinvent_scoring.scoring.component_parameters import ComponentParameters
from reinvent_scoring.scoring.score_components import BaseScoreComponent
from reinvent_scoring.scoring.score_summary import ComponentSummary
from reinvent_scoring.scoring.score_transformations import TransformationFactory
from reinvent_scoring.scoring.enums import TransformationTypeEnum, TransformationParametersEnum
class PredictivePropertyComponent(BaseScoreComponent):
def __init__(self, parameters: ComponentParameters):
super().__init__(parameters)
self.activity_model = self._load_model(parameters)
self._transformation_function = self._assign_transformation(parameters.specific_parameters)
def calculate_score(self, molecules: List, step=-1) -> ComponentSummary:
score, raw_score = self._predict_and_transform(molecules)
score_summary = ComponentSummary(total_score=score, parameters=self.parameters, raw_score=raw_score)
return score_summary
def _predict_and_transform(self, molecules: List):
score = self.activity_model.predict(molecules, self.parameters.specific_parameters)
transformed_score = self._apply_transformation(score, self.parameters.specific_parameters)
return transformed_score, score
def _load_model(self, parameters: ComponentParameters):
try:
activity_model = self._load_container(parameters)
except:
model_path = self.parameters.specific_parameters.get(self.component_specific_parameters.MODEL_PATH, "")
raise Exception(f"The loaded file `{model_path}` isn't a valid scikit-learn model")
return activity_model
def _load_container(self, parameters: ComponentParameters):
model_path = self.parameters.specific_parameters.get(self.component_specific_parameters.MODEL_PATH, "")
with open(model_path, "rb") as f:
scikit_model = pickle.load(f)
packaged_model = ModelContainer(scikit_model, parameters.specific_parameters)
return packaged_model
def _apply_transformation(self, predicted_activity, parameters: dict):
transform_params = parameters.get(self.component_specific_parameters.TRANSFORMATION)
if transform_params:
activity = self._transformation_function(predicted_activity, transform_params)
else:
activity = predicted_activity
return activity
def _assign_transformation(self, specific_parameters: dict):
transformation_type = TransformationTypeEnum()
transform_params = specific_parameters.get(self.component_specific_parameters.TRANSFORMATION)
if not transform_params:
specific_parameters[self.component_specific_parameters.TRANSFORMATION] = {
TransformationParametersEnum.TRANSFORMATION_TYPE: transformation_type.NO_TRANSFORMATION
}
factory = TransformationFactory()
transform_function = factory.get_transformation_function(transform_params)
return transform_function | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/score_components/standard/predictive_property_component.py | 0.851413 | 0.206394 | predictive_property_component.py | pypi |
from abc import abstractmethod
from typing import List
import numpy as np
from reinvent_scoring.scoring.component_parameters import ComponentParameters
from reinvent_scoring.scoring.score_components import BaseScoreComponent
from reinvent_scoring.scoring.score_summary import ComponentSummary
class BaseRESTComponent(BaseScoreComponent):
def __init__(self, parameters: ComponentParameters):
super().__init__(parameters)
self._request_url = self._create_url(self.parameters.component_type)
self._request_header = self._create_header()
def calculate_score(self, molecules: List, step=-1) -> ComponentSummary:
valid_smiles = self._chemistry.mols_to_smiles(molecules)
score, raw_score = self._score_smiles(valid_smiles)
score_summary = ComponentSummary(total_score=score, parameters=self.parameters, raw_score=raw_score)
return score_summary
def _score_smiles(self, smiles: List[str]) -> np.array:
response = self._post_request(self._request_url, smiles, self._request_header)
results_raw = self._parse_response(response, len(smiles))
results = self._apply_score_transformation(results_raw)
return results, results_raw
def _post_request(self, url, smiles, header):
data = self._format_data(smiles)
result = self._execute_request(url, data, header)
return result
@abstractmethod
def _format_data(self, smiles: List[str]) -> dict:
raise NotImplementedError("_format_data method is not implemented")
@abstractmethod
def _execute_request(self, request_url, data, header) -> dict:
raise NotImplementedError("_execute_request method is not implemented")
@abstractmethod
def _parse_response(self, response_json: dict, data_size: int) -> np.array:
raise NotImplementedError("_parse_response method is not implemented")
def _apply_score_transformation(self, results_raw: np.array) -> np.array:
"""Returns np.array with non-NaN elements transformed by transformation function, and all NaN elements
transformed into 0. """
valid_mask = ~np.isnan(results_raw)
results_raw_valid = results_raw[valid_mask]
transform_params = self.parameters.specific_parameters.get(
self.component_specific_parameters.TRANSFORMATION, {}
)
results_transformed = self._transformation_function(results_raw_valid, transform_params)
results = np.zeros(len(results_raw), dtype=np.float32)
results[valid_mask] = results_transformed
return results
@abstractmethod
def _create_url(self, component_name) -> str:
raise NotImplementedError("_create_url method is not implemented")
@abstractmethod
def _create_header(self) -> dict:
raise NotImplementedError("_create_header method is not implemented") | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/score_components/pip/base_rest_component.py | 0.871009 | 0.313748 | base_rest_component.py | pypi |
import json
import os
from abc import abstractmethod
from typing import List
import numpy as np
import requests
from reinvent_scoring.scoring.component_parameters import ComponentParameters
from reinvent_scoring.scoring.enums import EnvironmentalVariablesEnum
from reinvent_scoring.scoring.score_components.pip.base_rest_component import BaseRESTComponent
class BasePiPModelComponent(BaseRESTComponent):
def __init__(self, parameters: ComponentParameters):
self._environment_keys = EnvironmentalVariablesEnum()
super().__init__(parameters)
def _execute_request(self, request_url, data, header) -> dict:
request = requests.post(request_url, json=data, headers=header)
if request.status_code != 200:
raise ValueError(
f" Status: {request.status_code} Reason: ({request.reason})."
f"Response content: {request.content}\n"
f"Response content: {request.text}"
)
return request.json()
def _parse_response(self, response_json: dict, data_size: int) -> np.array:
compounds = response_json['jsonData']['data']
results_raw = np.empty(data_size, dtype=np.float32)
results_raw[:] = np.nan
try:
for compound in compounds:
try:
index = int(compound["id"])
results_raw[index] = self._parse_single_compound(compound)
except (ValueError, TypeError, KeyError):
pass # If parsing failed, keep value NaN for this compound and continue.
finally:
return results_raw
@abstractmethod
def _parse_single_compound(self, compound):
raise NotImplementedError("_parse_compound method is not implemented")
def _format_data(self, smiles: List[str]) -> dict:
molecules = [{"molData": smi, "id": f"{i}"} for i, smi in enumerate(smiles)]
data = {
"jsonData": {
"data": molecules,
"metadata": {
"molFormat":
"smiles"
},
"parameters": {
}
}
}
return data
def _create_url(self, async_path: str) -> str:
pip_url = self._get_enviornment_variable(self._environment_keys.PIP_URL)
request_url = pip_url.format(async_path)
return request_url
def _create_header(self) -> dict:
pip_key = self._get_enviornment_variable(self._environment_keys.PIP_KEY)
header = {
'Content-Type': 'application/vnd.az.batch.v1+json', 'x-api-key': pip_key,
'Accept': 'application/vnd.az.resultset.v1+json'
}
return header
def _get_enviornment_variable(self, variable: str) -> str:
try:
return os.environ[variable]
except KeyError:
return self._retrieve_pip_key_from_config(variable)
def _retrieve_pip_key_from_config(self, variable: str) -> str:
try:
project_root = os.path.dirname(__file__)
with open(os.path.join(project_root, '../../../configs/config.json'), 'r') as f:
config = json.load(f)
environmental_variables = config[self._environment_keys.ENVIRONMENTAL_VARIABLES]
return environmental_variables[variable]
except KeyError as ex:
raise KeyError(f"Key {variable} not found in reinvent_scoring/configs/config.json") | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/score_components/pip/base_pip_model_component.py | 0.546496 | 0.152379 | base_pip_model_component.py | pypi |
import json
import os
from abc import abstractmethod
from typing import List
import numpy as np
from reinvent_scoring.scoring.component_parameters import ComponentParameters
from reinvent_scoring.scoring.enums import EnvironmentalVariablesEnum
from reinvent_scoring.scoring.score_components.pip.base_rest_component import BaseRESTComponent
import requests
import time
class BasePiPModelBatchingComponent(BaseRESTComponent):
def __init__(self, parameters: ComponentParameters):
self._environment_keys = EnvironmentalVariablesEnum()
super().__init__(parameters)
self._get_header = self._create_get_header()
def _execute_request(self, request_url, data, header) -> dict:
request = requests.post(request_url, json=data, headers=header)
if request.status_code != 202:
raise ValueError(
f" Status: {request.status_code} Reason: ({request.reason})."
f"Response content: {request.content}\n"
f"Response content: {request.text}"
)
response = self._get_results(request)
return response.json()
def _parse_response(self, response_json: dict, data_size: int) -> np.array:
compounds = response_json['jsonData']['data']
results_raw = np.empty(data_size, dtype=np.float32)
results_raw[:] = np.nan
try:
for compound in compounds:
try:
index = int(compound["id"])
results_raw[index] = self._parse_single_compound(compound)
except (ValueError, TypeError, KeyError):
pass # If parsing failed, keep value NaN for this compound and continue.
finally:
return results_raw
@abstractmethod
def _parse_single_compound(self, compound):
raise NotImplementedError("_parse_compound method is not implemented")
def _format_data(self, smiles: List[str]) -> dict:
molecules = [{"molData": smi, "id": f"{i}"} for i, smi in enumerate(smiles)]
data = {
"jsonData": {
"data": molecules,
"metadata": {
"molFormat":
"smiles"
},
"parameters": {
}
}
}
return data
def _create_url(self, async_path: str) -> str:
pip_url = self._get_enviornment_variable(self._environment_keys.PIP_URL)
request_url = pip_url.format(async_path)
return request_url
def _create_get_url(self, component_name) -> str:
pip_url = self._get_enviornment_variable(self._environment_keys.PIP_GET_RESULTS)
request_url = pip_url.format(component_name)
return request_url
def _create_header(self) -> dict:
pip_key = self._get_enviornment_variable(self._environment_keys.PIP_KEY)
header = {
'Content-Type': 'application/vnd.az.batch.v1+json', 'x-api-key': pip_key,
'Accept': 'application/vnd.az.resultset.v1+json',
'Prefer': 'respond-async'
}
return header
def _create_get_header(self) -> dict:
pip_key = self._get_enviornment_variable(self._environment_keys.PIP_KEY)
header = {'Content-Type': 'application/json', 'x-api-key': pip_key}
return header
def _get_enviornment_variable(self, variable: str) -> str:
try:
return os.environ[variable]
except KeyError:
return self._retrieve_pip_key_from_config(variable)
def _retrieve_pip_key_from_config(self, variable: str) -> str:
try:
project_root = os.path.dirname(__file__)
with open(os.path.join(project_root, '../../../configs/config.json'), 'r') as f:
config = json.load(f)
environmental_variables = config[self._environment_keys.ENVIRONMENTAL_VARIABLES]
return environmental_variables[variable]
except KeyError as ex:
raise KeyError(f"Key {variable} not found in reinvent_scoring/configs/config.json")
def _get_results(self, response):
"""
Documentation for this get call can be found at:
https://confluence.astrazeneca.net/pages/viewpage.action?spaceKey=ADD&title=Batching+and+caching+service
"""
async_location = response.headers.get('Location', None)
url = self._create_get_url(async_location)
time_threshold = 300
time_delay = 1
while time_delay > 0 :
try:
response = requests.get(url=url, headers=self._get_header)
time_delay = int(response.headers.get('retry-after', 0))
time_threshold-=time_delay
time.sleep(time_delay)
if time_threshold <= 0:
raise TimeoutError(f'Terminated since retrieving results took too long')
except:
raise ValueError(
f" Status: {response.status_code} Reason: ({response.reason})."
f"Response content: {response.content}\n"
f"Response content: {response.text}"
)
result = response
return result | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/score_components/pip/base_pip_model_batching_component.py | 0.584627 | 0.15785 | base_pip_model_batching_component.py | pypi |
import numpy as np
from typing import List
from reinvent_scoring.scoring.utils import _is_development_environment
from reinvent_scoring.scoring.component_parameters import ComponentParameters
from reinvent_scoring.scoring.score_components.structural.base_structural_component import BaseStructuralComponent
class DockStream(BaseStructuralComponent):
def __init__(self, parameters: ComponentParameters):
super().__init__(parameters)
self._configuration_path = self.parameters.specific_parameters[self.component_specific_parameters.DOCKSTREAM_CONFPATH]
self._docker_script_path = self.parameters.specific_parameters[self.component_specific_parameters.DOCKSTREAM_DOCKERSCRIPTPATH]
self._environment_path = self.parameters.specific_parameters[self.component_specific_parameters.DOCKSTREAM_ENVPATH]
def _add_debug_mode_if_selected(self, command):
if self.parameters.specific_parameters.get(self.component_specific_parameters.DOCKSTREAM_DEBUG, False)\
or _is_development_environment():
command = ' '.join([command, "-debug"])
return command
def _create_command(self, smiles: List[str], step):
concat_smiles = '"' + ';'.join(smiles) + '"'
command = ' '.join([self._environment_path,
self._docker_script_path,
"-conf", self._configuration_path,
"-output_prefix", self._get_step_string(step),
"-smiles", concat_smiles,
"-print_scores"])
# check, if DockStream is to be executed in debug mode, which will cause its loggers to print out
# much more detailed information
command = self._add_debug_mode_if_selected(command)
return command
def _calculate_score(self, smiles: List[str], step) -> np.array:
# create the external command
command = self._create_command(smiles, step)
# send the batch smiles and retrieve the result as a list of strings
results = self._send_request_with_stepwize_read(command, len(smiles))
# note: some ligands might have failed in DockStream (embedding or docking) although they are valid
# RDkit molecules -> "docker.py" will return "NA"'s for failed molecules, as '0' could be a perfectly
# normal value; anything that cannot be cast to a floating point number will result in '0'
scores = []
for score in results:
try:
score = float(score)
except ValueError:
score = 0
scores.append(score)
transform_params = self.parameters.specific_parameters.get(
self.component_specific_parameters.TRANSFORMATION, {}
)
transformed_scores = self._transformation_function(scores, transform_params)
return np.array(transformed_scores), np.array(scores)
def _parse_result(self, result):
return str(result).strip() | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/score_components/structural/dockstream.py | 0.74382 | 0.252119 | dockstream.py | pypi |
import io
import subprocess
from abc import abstractmethod
from typing import List
import numpy as np
from reinvent_scoring.scoring.component_parameters import ComponentParameters
from reinvent_scoring.scoring.score_components import BaseScoreComponent
from reinvent_scoring.scoring.score_summary import ComponentSummary
class BaseStructuralComponent(BaseScoreComponent):
def __init__(self, parameters: ComponentParameters):
super().__init__(parameters)
def calculate_score_for_step(self, molecules: List, step=-1) -> ComponentSummary:
return self.calculate_score(molecules, step)
def calculate_score(self, molecules: List, step=-1) -> ComponentSummary:
valid_smiles = self._chemistry.mols_to_smiles(molecules)
score, raw_score = self._calculate_score(valid_smiles, step)
score_summary = ComponentSummary(total_score=score, parameters=self.parameters, raw_score=raw_score)
return score_summary
def _get_step_string(self, step) -> str:
if step == -1:
return "\"\""
return "".join(["\"e", str(step).zfill(4), "_\""])
@abstractmethod
def _calculate_score(self, smiles: List[str], step) -> np.array:
raise NotImplementedError("_calculate_score method is not implemented")
@abstractmethod
def _create_command(self, input_file, step) -> str:
raise NotImplementedError("_create_command method is not implemented")
def _send_request_with_stepwize_read(self, command, data_size: int):
with subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True) as proc:
wrapped_proc_in = io.TextIOWrapper(proc.stdin, 'utf-8')
wrapped_proc_out = io.TextIOWrapper(proc.stdout, 'utf-8')
result = [self._parse_result(wrapped_proc_out.readline()) for i in range(data_size)]
wrapped_proc_in.close()
wrapped_proc_out.close()
proc.wait()
proc.terminate()
return result
@abstractmethod
def _parse_result(self, result) -> str:
raise NotImplementedError("_parse_result method is not implemented") | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/score_components/structural/base_structural_component.py | 0.752922 | 0.305037 | base_structural_component.py | pypi |
import numpy as np
from typing import List
from reinvent_scoring.scoring.utils import _is_development_environment
from reinvent_scoring.scoring.component_parameters import ComponentParameters
from reinvent_scoring.scoring.score_components.structural.base_structural_component import BaseStructuralComponent
class AZdock(BaseStructuralComponent):
def __init__(self, parameters: ComponentParameters):
super().__init__(parameters)
self._configuration_path = self.parameters.specific_parameters[self.component_specific_parameters.AZDOCK_CONFPATH]
self._docker_script_path = self.parameters.specific_parameters[self.component_specific_parameters.AZDOCK_DOCKERSCRIPTPATH]
self._environment_path = self.parameters.specific_parameters[self.component_specific_parameters.AZDOCK_ENVPATH]
def _add_debug_mode_if_selected(self, command):
if self.parameters.specific_parameters.get(self.component_specific_parameters.AZDOCK_DEBUG, False)\
or _is_development_environment():
command = ' '.join([command, "-debug"])
return command
def _create_command(self, smiles: List[str], step):
concat_smiles = '"' + ';'.join(smiles) + '"'
command = ' '.join([self._environment_path,
self._docker_script_path,
"-conf", self._configuration_path,
"-output_prefix", self._get_step_string(step),
"-smiles", concat_smiles,
"-print_scores"])
# check, if AZdock is to be executed in debug mode, which will cause its loggers to print out much more detailed
# information
command = self._add_debug_mode_if_selected(command)
return command
def _calculate_score(self, smiles: List[str], step) -> np.array:
# create the external command
command = self._create_command(smiles, step)
# send the batch smiles and retrieve the result as a list of strings
results = self._send_request_with_stepwize_read(command, len(smiles))
# note: some ligands might have failed in AZdock (embedding or docking) although they are valid RDkit molecules
# -> "docker.py" will return "NA"'s for failed molecules, as '0' could be a perfectly normal value; anything
# that cannot be cast to a floating point number will result in '0'
scores = []
for score in results:
try:
score = float(score)
except ValueError:
score = 0
scores.append(score)
transform_params = self.parameters.specific_parameters.get(
self.component_specific_parameters.TRANSFORMATION, {}
)
transformed_scores = self._transformation_function(scores, transform_params)
return np.array(transformed_scores), np.array(scores)
def _parse_result(self, result):
return str(result).strip() | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/score_components/structural/azdock.py | 0.740362 | 0.259731 | azdock.py | pypi |
import pickle
from typing import List
import numpy as np
from rdkit.Chem import Mol
from rdkit.Chem.Descriptors import ExactMolWt
from reinvent_chemistry import Descriptors
from reinvent_scoring.scoring.component_parameters import ComponentParameters
from reinvent_scoring.scoring.score_components import BaseScoreComponent
from reinvent_scoring.scoring.score_components.synthetic_accessibility.sascorer import calculateScore
from reinvent_scoring.scoring.score_summary import ComponentSummary
class SASComponent(BaseScoreComponent):
def __init__(self, parameters: ComponentParameters):
super().__init__(parameters)
self.activity_model = self._load_model(parameters)
self._descriptors = Descriptors()
self.fp_parameters = dict(
radius=3,
size=4096, # Descriptors class calls this parameter "size", RDKit calls it "nBits".
use_features=False, # RDKit has False as default, Descriptors class has True.
)
def calculate_score(self, molecules: List[Mol], step=-1) -> ComponentSummary:
score = self.predict_from_molecules(molecules)
score_summary = ComponentSummary(total_score=score, parameters=self.parameters)
return score_summary
def predict_from_molecules(self, molecules: List[Mol]) -> np.ndarray:
if len(molecules) == 0:
return np.array([])
descriptors = self._calculate_descriptors(molecules)
# Normally, predict_proba takes a 2d array, one row per observation,
# but a list of 1d arrays works too.
sas_predictions = self.activity_model.predict_proba(descriptors)
return sas_predictions[:, 1]
def _load_model(self, parameters: ComponentParameters):
try:
# TODO: in the future should use self.component_specific_parameters.MODEL_PATH
# model_path = self.parameters.specific_parameters.get(self.component_specific_parameters.MODEL_PATH, "")
model_path = self.parameters.specific_parameters.get("saz_model_path", "")
activity_model = self._load_scikit_model(model_path)
except:
# model_path = self.parameters.specific_parameters.get(self.component_specific_parameters.MODEL_PATH, "")
model_path = self.parameters.specific_parameters.get("saz_model_path", "")
raise Exception(f"The loaded file `{model_path}` isn't a valid scikit-learn model")
return activity_model
def _load_scikit_model(self, model_path: str):
with open(model_path, "rb") as f:
scikit_model = pickle.load(f)
return scikit_model
def _calculate_descriptors(self, molecules: List[Mol]) -> List[np.ndarray]:
descriptors = [self._sas_descriptor(mol) for mol in molecules]
return descriptors
def _sas_descriptor(self, mol: Mol) -> np.ndarray:
"""Returns SAS descriptor for a molecule, to be used as input to SAS model.
SAS descriptor consists of three parts:
1. SA score by Ertl and Schuffenhauer (Novartis), part of RDKit, copied to this repo.
2. Molecular weight.
3. Morgan fingerprint, with counts (ECFP6).
The three parts are concatenated into one 1d numpy array.
"""
sascore = calculateScore(mol)
molwt = ExactMolWt(mol)
fp = self._fingerprint(mol)
descriptor = np.concatenate([[sascore], [molwt], fp])
return descriptor
def _fingerprint(self, mol: Mol) -> np.ndarray:
fps = self._descriptors.molecules_to_count_fingerprints([mol], parameters=self.fp_parameters)
return fps[0] | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/score_components/synthetic_accessibility/sas_component.py | 0.713332 | 0.332013 | sas_component.py | pypi |
from abc import ABC, abstractmethod
from typing import List
import numpy as np
from pathos.multiprocessing import ProcessPool
from reinvent_chemistry.conversions import Conversions
from reinvent_scoring.scoring.component_parameters import ComponentParameters
from reinvent_scoring.scoring.score_components.score_component_factory import ScoreComponentFactory
from reinvent_scoring.scoring.score_summary import ComponentSummary, FinalSummary
from reinvent_scoring.scoring.enums import ComponentSpecificParametersEnum
from reinvent_scoring.scoring.enums import ScoringFunctionComponentNameEnum
def _update_raw_score(summary: ComponentSummary, query_length: int, valid_indices: List[int]) -> ComponentSummary:
if summary.raw_score is not None:
raw_score = np.full(query_length, 0, dtype=np.float32)
assert len(valid_indices) == len(summary.raw_score)
for idx, value in zip(valid_indices, summary.raw_score):
raw_score[idx] = value
summary.raw_score = raw_score
return summary
def _update_total_score(summary: ComponentSummary, query_length: int, valid_indices: List[int]) -> ComponentSummary:
total_score = np.full(query_length, 0, dtype=np.float32)
assert len(valid_indices) == len(summary.total_score)
for idx, value in zip(valid_indices, summary.total_score):
total_score[idx] = value
summary.total_score = total_score
summary = _update_raw_score(summary, query_length, valid_indices)
return summary
def parallel_run(component_smiles_pair):
component = component_smiles_pair[0]
molecules = component_smiles_pair[1]
valid_indices = component_smiles_pair[2]
smiles = component_smiles_pair[3]
scores = component.calculate_score(molecules)
scores = _update_total_score(scores, len(smiles), valid_indices)
return scores
def parallel_run_for_step(component_smiles_pair):
component = component_smiles_pair[0]
molecules = component_smiles_pair[1]
valid_indices = component_smiles_pair[2]
smiles = component_smiles_pair[3]
step = component_smiles_pair[4]
scores = component.calculate_score_for_step(molecules, step)
scores = _update_total_score(scores, len(smiles), valid_indices)
return scores
class BaseScoringFunction(ABC):
def __init__(self, parameters: List[ComponentParameters], parallel=False):
self.component_enum = ScoringFunctionComponentNameEnum()
self.component_specific_parameters = ComponentSpecificParametersEnum()
self._chemistry = Conversions()
factory = ScoreComponentFactory(parameters)
self.scoring_components = factory.create_score_components()
if parallel:
self.get_final_score = self._parallel_final_score
self.get_final_score_for_step = self._parallel_final_score_for_step
def get_final_score_for_step(self, smiles: List[str], step: int) -> FinalSummary:
molecules, valid_indices = self._chemistry.smiles_to_mols_and_indices(smiles)
query_size = len(smiles)
summaries = [_update_total_score(sc.calculate_score_for_step(molecules, step), query_size, valid_indices) for sc
in self.scoring_components]
return self._score_summary(summaries, smiles, valid_indices)
def get_final_score(self, smiles: List[str]) -> FinalSummary:
molecules, valid_indices = self._chemistry.smiles_to_mols_and_indices(smiles)
query_size = len(smiles)
summaries = [_update_total_score(sc.calculate_score(molecules), query_size, valid_indices) for sc
in self.scoring_components]
return self._score_summary(summaries, smiles, valid_indices)
def _score_summary(self, summaries: List[ComponentSummary], smiles: List[str],
valid_indices: List[int]) -> FinalSummary:
penalty = self._compute_penalty_components(summaries, smiles)
non_penlaty = self._compute_non_penalty_components(summaries, smiles)
product = penalty * non_penlaty
final_summary = self._create_final_summary(product, summaries, smiles, valid_indices)
return final_summary
def _create_final_summary(self, final_score, summaries: List[ComponentSummary], smiles: List[str],
valid_indices: List[int]) -> FinalSummary:
return FinalSummary(total_score=np.array(final_score, dtype=np.float32),
scored_smiles=smiles,
valid_idxs=valid_indices,
scaffold_log_summary=summaries)
def _compute_penalty_components(self, summaries: List[ComponentSummary], smiles: List[str]):
penalty = np.full(len(smiles), 1, dtype=np.float32)
for summary in summaries:
if self._component_is_penalty(summary):
penalty = penalty * summary.total_score
return penalty
@abstractmethod
def _compute_non_penalty_components(self, summaries: List[ComponentSummary], smiles: List[str]):
raise NotImplementedError("_score_summary method is not implemented")
def _component_is_penalty(self, summary: ComponentSummary) -> bool:
return (summary.parameters.component_type == self.component_enum.MATCHING_SUBSTRUCTURE) or (
summary.parameters.component_type == self.component_enum.CUSTOM_ALERTS)
def _parallel_final_score(self, smiles: List[str]) -> FinalSummary:
molecules, valid_indices = self._chemistry.smiles_to_mols_and_indices(smiles)
component_smiles_pairs = [[component, molecules, valid_indices, smiles] for component in
self.scoring_components]
pool = ProcessPool(nodes=len(self.scoring_components))
mapped_pool = pool.map(parallel_run, component_smiles_pairs)
pool.clear()
return self._score_summary(mapped_pool, smiles, valid_indices)
def _parallel_final_score_for_step(self, smiles: List[str], step: int) -> FinalSummary:
molecules, valid_indices = self._chemistry.smiles_to_mols_and_indices(smiles)
component_smiles_pairs = [[component, molecules, valid_indices, smiles, step] for component in
self.scoring_components]
pool = ProcessPool(nodes=len(self.scoring_components))
mapped_pool = pool.map(parallel_run_for_step, component_smiles_pairs)
pool.clear()
return self._score_summary(mapped_pool, smiles, valid_indices) | /reinvent_scoring-0.0.73-py3-none-any.whl/reinvent_scoring/scoring/function/base_scoring_function.py | 0.852813 | 0.473292 | base_scoring_function.py | pypi |
import click
import ee
import geopandas as gpd
import bootstrap # noqa
from config.model_settings import DataConfig, OSMConfig, StreetViewConfig
from src.load_ee_data import LoadEEData
from src.generate_building_centroids import GenerateBuildingCentroids
from src.get_google_streetview import GetGoogleStreetView
class GenerateBuildingCentroidsFlow:
def __init__(self):
self.data_settings = DataConfig
self.osm_settings = OSMConfig
def execute(self):
data_config = DataConfig()
osm_config = OSMConfig()
building_generator = GenerateBuildingCentroids.from_dataclass_config(
data_config, osm_config
)
return building_generator.execute()
class LoadDataFlow:
def __init__(self):
self.config = DataConfig()
def execute(self):
# Trigger the authentication flow.
ee.Authenticate()
data_loader = LoadEEData.from_dataclass_config(self.config)
data_loader.execute()
def execute_for_country(self, building_footprint_gdf):
# Trigger the authentication flow.
ee.Authenticate()
data_loader = LoadEEData.from_dataclass_config(self.config)
return data_loader.execute_for_country(building_footprint_gdf)
class GetGoogleStreetViewFlow:
def __init__(self):
self.streetview_config = StreetViewConfig()
def execute(self):
streetview_downloader = GetGoogleStreetView.from_dataclass_config(
self.streetview_config
)
return streetview_downloader.execute()
def execute_for_country(self, satellite_data_df):
# Trigger the authentication flow.
ee.Authenticate()
streetview_downloader = GetGoogleStreetView.from_dataclass_config(
self.streetview_config
)
return streetview_downloader.execute_for_country(satellite_data_df)
@click.command("generate_building_centroids", help="Retrieve building centroids")
def generate_building_centroids():
building_footprint_gdf = GenerateBuildingCentroidsFlow().execute()
building_footprint_gdf.to_csv(f"{StreetViewConfig.PLACE}_man_made_petroleum_well.csv")
@click.command("load_data", help="Load data from Google Earth Engine")
def load_data():
LoadDataFlow().execute()
@click.command(
"get_google_streetview", help="Retrieve streetview images for building locations"
)
def get_google_streetview():
GetGoogleStreetViewFlow().execute()
@click.command("run_pipeline", help="Run full analysis pipeline")
def run_full_pipeline():
building_footprint_gdf = GenerateBuildingCentroidsFlow().execute()
building_footprint_gdf.to_csv(f"{StreetViewConfig.PLACE}_man_made_petroleum_well.csv")
# building_footprint_gdf = gpd.read_file(f"{StreetViewConfig.PLACE}_man_made_petroleum_well.csv")
building_footprint_gdf.set_geometry("centroid_geometry")
satellite_data_df = LoadDataFlow().execute_for_country(building_footprint_gdf)
satellite_data_df.to_csv(f"{StreetViewConfig.PLACE}_CH4.csv")
# GetGoogleStreetViewFlow().execute_for_country(satellite_data_df)
@click.group(
"reinventing-catastrophe-modelling",
help="Library aiming to reinvent catastrophe modelling using a combination of satellite data and urban analytics techniques",
)
@click.pass_context
def cli(ctx):
...
cli.add_command(generate_building_centroids)
cli.add_command(load_data)
cli.add_command(get_google_streetview)
cli.add_command(run_full_pipeline)
if __name__ == "__main__":
cli() | /reinventing-catastrophe-modelling-0.1.2.tar.gz/reinventing-catastrophe-modelling-0.1.2/reinventing_catastrophe_modelling/__main__.py | 0.6508 | 0.214897 | __main__.py | pypi |
import os
from pathlib import Path
from dataclasses import field
from typing import Dict, Tuple, Sequence
from pydantic.dataclasses import dataclass
from pydantic import StrictStr
@dataclass
class StreetViewConfig:
SIZE: str = "600x300"
HEADING: str = "151.78"
PITCH: str = "-0.76"
KEY = os.environ.get("GOOGLE_DEV_API_KEY")
LOCAL_IMAGE_FOLDER: str = (
f"{Path(__file__).resolve().parent.parent.parent}/local_data/streetview_images"
)
LOCAL_LINKS_FOLDER: str = (
f"{Path(__file__).resolve().parent.parent.parent}/local_data/streetview_links"
)
LOCAL_METADATA_FOLDER: str = f"{Path(__file__).resolve().parent.parent.parent}/local_data/streetview_metadata"
PLACE = "Iraq"
META_BASE = "https://maps.googleapis.com/maps/api/streetview/metadata?"
@dataclass
class OSMConfig:
TAGS = {"man_made": "petroleum_well"}
PLACE = "Iraq"
@dataclass
class DataConfig:
COUNTRY_CODES = ["IQ"]
YEAR: int = 2021
MON_START: int = 7
DATE_START: int = 13
YEAR_END: int = 2022
MON_END: int = 1
DATE_END: int = 8
PLACE = "Iraq"
BASE_FOLDER = "/ee_data"
LANDSAT_IMAGE_COLLECTION: str = "LANDSAT/LC08/C01/T1"
MODEL_NAME = "COPERNICUS"
LANDSAT_IMAGE_BAND: Sequence[str] = field(
default_factory=lambda: ["B4", "B3", "B2"]
)
NIGHTTIME_LIGHT_IMAGE_COLLECTION: str = "NOAA/VIIRS/DNB/MONTHLY_V1/VCMCFG"
NIGHTTIME_LIGHT_IMAGE_BAND: str = "avg_rad"
METEROLOGICAL_IMAGE_COLLECTION: str = "NOAA/GFS0P25"
METEROLOGICAL_IMAGE_BAND: Sequence[str] = field(
default_factory=lambda: [
"temperature_2m_above_ground",
"relative_humidity_2m_above_ground",
"total_precipitation_surface",
"total_cloud_cover_entire_atmosphere",
"u_component_of_wind_10m_above_ground",
"v_component_of_wind_10m_above_ground",
]
)
POPULATION_IMAGE_COLLECTION: str = (
"CIESIN/GPWv411/GPW_Basic_Demographic_Characteristics"
)
POPULATION_IMAGE_BAND = "basic_demographic_characteristics"
LAND_COVER_IMAGE_COLLECTION: str = "COPERNICUS/Landcover/100m/Proba-V-C3/Global"
LAND_COVER_IMAGE_BAND: str = "discrete_classification"
METHANE_IMAGE_COLLECTION: str = "COPERNICUS/S5P/OFFL/L3_CH4"
METHANE_IMAGE_BAND: Sequence[str] = field(
default_factory=lambda: ["CH4_column_volume_mixing_ratio_dry_air", "aerosol_height"]
)
COUNTRY_BOUNDING_BOXES: Dict[
StrictStr, Tuple[StrictStr, Tuple[float, float, float, float]]
] = field(
default_factory=lambda: {
"AF": (
"Afghanistan",
(60.5284298033, 29.318572496, 75.1580277851, 38.4862816432),
),
"AO": (
"Angola",
(11.6400960629, -17.9306364885, 24.0799052263, -4.43802336998),
),
"AL": (
"Albania",
(19.3044861183, 39.624997667, 21.0200403175, 42.6882473822),
),
"AE": (
"United Arab Emirates",
(51.5795186705, 22.4969475367, 56.3968473651, 26.055464179),
),
"AR": (
"Argentina",
(-73.4154357571, -55.25, -53.628348965, -21.8323104794),
),
"AM": (
"Armenia",
(43.5827458026, 38.7412014837, 46.5057198423, 41.2481285671),
),
"AQ": ("Antarctica", (-180.0, -90.0, 180.0, -63.2706604895)),
"TF": ("Fr. S. and Antarctic Lands", (68.72, -49.775, 70.56, -48.625)),
"AU": (
"Australia",
(113.338953078, -43.6345972634, 153.569469029, -10.6681857235),
),
"AT": (
"Austria",
(9.47996951665, 46.4318173285, 16.9796667823, 49.0390742051),
),
"AZ": (
"Azerbaijan",
(44.7939896991, 38.2703775091, 50.3928210793, 41.8606751572),
),
"BI": (
"Burundi",
(29.0249263852, -4.49998341229, 30.752262811, -2.34848683025),
),
"BE": (
"Belgium",
(2.51357303225, 49.5294835476, 6.15665815596, 51.4750237087),
),
"BJ": (
"Benin",
(0.772335646171, 6.14215770103, 3.79711225751, 12.2356358912),
),
"BF": (
"Burkina Faso",
(-5.47056494793, 9.61083486576, 2.17710778159, 15.1161577418),
),
"BD": (
"Bangladesh",
(88.0844222351, 20.670883287, 92.6727209818, 26.4465255803),
),
"BG": (
"Bulgaria",
(22.3805257504, 41.2344859889, 28.5580814959, 44.2349230007),
),
"BS": ("Bahamas", (-78.98, 23.71, -77.0, 27.04)),
"BA": ("Bosnia and Herz.", (15.7500260759, 42.65, 19.59976, 45.2337767604)),
"BY": (
"Belarus",
(23.1994938494, 51.3195034857, 32.6936430193, 56.1691299506),
),
"BZ": (
"Belize",
(-89.2291216703, 15.8869375676, -88.1068129138, 18.4999822047),
),
"BO": (
"Bolivia",
(-69.5904237535, -22.8729187965, -57.4983711412, -9.76198780685),
),
"BR": (
"Brazil",
(-73.9872354804, -33.7683777809, -34.7299934555, 5.24448639569),
),
"BN": (
"Brunei",
(114.204016555, 4.007636827, 115.450710484, 5.44772980389),
),
"BT": (
"Bhutan",
(88.8142484883, 26.7194029811, 92.1037117859, 28.2964385035),
),
"BW": (
"Botswana",
(19.8954577979, -26.8285429827, 29.4321883481, -17.6618156877),
),
"CF": (
"Central African Rep.",
(14.4594071794, 2.2676396753, 27.3742261085, 11.1423951278),
),
"CA": ("Canada", (-140.99778, 41.6751050889, -52.6480987209, 83.23324)),
"CH": (
"Switzerland",
(6.02260949059, 45.7769477403, 10.4427014502, 47.8308275417),
),
"CL": ("Chile", (-75.6443953112, -55.61183, -66.95992, -17.5800118954)),
"CN": (
"China",
(73.6753792663, 18.197700914, 135.026311477, 53.4588044297),
),
"CI": (
"Ivory Coast",
(-8.60288021487, 4.33828847902, -2.56218950033, 10.5240607772),
),
"CM": (
"Cameroon",
(8.48881554529, 1.72767263428, 16.0128524106, 12.8593962671),
),
"CD": (
"Congo (Kinshasa)",
(12.1823368669, -13.2572266578, 31.1741492042, 5.25608775474),
),
"CG": (
"Congo (Brazzaville)",
(11.0937728207, -5.03798674888, 18.4530652198, 3.72819651938),
),
"CO": (
"Colombia",
(-78.9909352282, -4.29818694419, -66.8763258531, 12.4373031682),
),
"CR": (
"Costa Rica",
(-85.94172543, 8.22502798099, -82.5461962552, 11.2171192489),
),
"CU": (
"Cuba",
(-84.9749110583, 19.8554808619, -74.1780248685, 23.1886107447),
),
"CY": (
"Cyprus",
(32.2566671079, 34.5718694118, 34.0048808123, 35.1731247015),
),
"CZ": (
"Czech Rep.",
(12.2401111182, 48.5553052842, 18.8531441586, 51.1172677679),
),
"DE": (
"Germany",
(5.98865807458, 47.3024876979, 15.0169958839, 54.983104153),
),
"DJ": ("Djibouti", (41.66176, 10.9268785669, 43.3178524107, 12.6996385767)),
"DK": (
"Denmark",
(8.08997684086, 54.8000145534, 12.6900061378, 57.730016588),
),
"DO": (
"Dominican Rep.",
(-71.9451120673, 17.598564358, -68.3179432848, 19.8849105901),
),
"DZ": (
"Algeria",
(-8.68439978681, 19.0573642034, 11.9995056495, 37.1183806422),
),
"EC": (
"Ecuador",
(-80.9677654691, -4.95912851321, -75.2337227037, 1.3809237736),
),
"EG": ("Egypt", (24.70007, 22.0, 36.86623, 31.58568)),
"ER": (
"Eritrea",
(36.3231889178, 12.4554157577, 43.0812260272, 17.9983074),
),
"ES": (
"Spain",
(-9.39288367353, 35.946850084, 3.03948408368, 43.7483377142),
),
"EE": (
"Estonia",
(23.3397953631, 57.4745283067, 28.1316992531, 59.6110903998),
),
"ET": ("Ethiopia", (32.95418, 3.42206, 47.78942, 14.95943)),
"FI": (
"Finland",
(20.6455928891, 59.846373196, 31.5160921567, 70.1641930203),
),
"FJ": ("Fiji", (-180.0, -18.28799, 180.0, -16.0208822567)),
"FK": ("Falkland Is.", (-61.2, -52.3, -57.75, -51.1)),
"FR": (
"France",
(-54.5247541978, 2.05338918702, 9.56001631027, 51.1485061713),
),
"GA": (
"Gabon",
(8.79799563969, -3.97882659263, 14.4254557634, 2.32675751384),
),
"GB": (
"United Kingdom",
(-7.57216793459, 49.959999905, 1.68153079591, 58.6350001085),
),
"GE": (
"Georgia",
(39.9550085793, 41.0644446885, 46.6379081561, 43.553104153),
),
"GH": (
"Ghana",
(-3.24437008301, 4.71046214438, 1.0601216976, 11.0983409693),
),
"GN": (
"Guinea",
(-15.1303112452, 7.3090373804, -7.83210038902, 12.5861829696),
),
"GM": (
"Gambia",
(-16.8415246241, 13.1302841252, -13.8449633448, 13.8764918075),
),
"GW": (
"Guinea Bissau",
(-16.6774519516, 11.0404116887, -13.7004760401, 12.6281700708),
),
"GQ": (
"Eq. Guinea",
(9.3056132341, 1.01011953369, 11.285078973, 2.28386607504),
),
"GR": (
"Greece",
(20.1500159034, 34.9199876979, 26.6041955909, 41.8269046087),
),
"GL": ("Greenland", (-73.297, 60.03676, -12.20855, 83.64513)),
"GT": (
"Guatemala",
(-92.2292486234, 13.7353376327, -88.2250227526, 17.8193260767),
),
"GY": (
"Guyana",
(-61.4103029039, 1.26808828369, -56.5393857489, 8.36703481692),
),
"HN": (
"Honduras",
(-89.3533259753, 12.9846857772, -83.147219001, 16.0054057886),
),
"HR": (
"Croatia",
(13.6569755388, 42.47999136, 19.3904757016, 46.5037509222),
),
"HT": (
"Haiti",
(-74.4580336168, 18.0309927434, -71.6248732164, 19.9156839055),
),
"HU": (
"Hungary",
(16.2022982113, 45.7594811061, 22.710531447, 48.6238540716),
),
"ID": (
"Indonesia",
(95.2930261576, -10.3599874813, 141.03385176, 5.47982086834),
),
"IN": (
"India",
(68.1766451354, 7.96553477623, 97.4025614766, 35.4940095078),
),
"IE": (
"Ireland",
(-9.97708574059, 51.6693012559, -6.03298539878, 55.1316222195),
),
"IR": (
"Iran",
(44.1092252948, 25.0782370061, 63.3166317076, 39.7130026312),
),
"IQ": (
"Iraq",
(38.7923405291, 29.0990251735, 48.5679712258, 37.3852635768),
),
"IS": (
"Iceland",
(-24.3261840479, 63.4963829617, -13.609732225, 66.5267923041),
),
"IL": (
"Israel",
(34.2654333839, 29.5013261988, 35.8363969256, 33.2774264593),
),
"IT": ("Italy", (6.7499552751, 36.619987291, 18.4802470232, 47.1153931748)),
"JM": (
"Jamaica",
(-78.3377192858, 17.7011162379, -76.1996585761, 18.5242184514),
),
"JO": (
"Jordan",
(34.9226025734, 29.1974946152, 39.1954683774, 33.3786864284),
),
"JP": (
"Japan",
(129.408463169, 31.0295791692, 145.543137242, 45.5514834662),
),
"KZ": (
"Kazakhstan",
(46.4664457538, 40.6623245306, 87.3599703308, 55.3852501491),
),
"KE": ("Kenya", (33.8935689697, -4.67677, 41.8550830926, 5.506)),
"KG": (
"Kyrgyzstan",
(69.464886916, 39.2794632025, 80.2599902689, 43.2983393418),
),
"KH": (
"Cambodia",
(102.3480994, 10.4865436874, 107.614547968, 14.5705838078),
),
"KR": (
"S. Korea",
(126.117397903, 34.3900458847, 129.468304478, 38.6122429469),
),
"KW": (
"Kuwait",
(46.5687134133, 28.5260627304, 48.4160941913, 30.0590699326),
),
"LA": ("Laos", (100.115987583, 13.88109101, 107.564525181, 22.4647531194)),
"LB": (
"Lebanon",
(35.1260526873, 33.0890400254, 36.6117501157, 34.6449140488),
),
"LR": (
"Liberia",
(-11.4387794662, 4.35575511313, -7.53971513511, 8.54105520267),
),
"LY": ("Libya", (9.31941084152, 19.58047, 25.16482, 33.1369957545)),
"LK": (
"Sri Lanka",
(79.6951668639, 5.96836985923, 81.7879590189, 9.82407766361),
),
"LS": (
"Lesotho",
(26.9992619158, -30.6451058896, 29.3251664568, -28.6475017229),
),
"LT": (
"Lithuania",
(21.0558004086, 53.9057022162, 26.5882792498, 56.3725283881),
),
"LU": (
"Luxembourg",
(5.67405195478, 49.4426671413, 6.24275109216, 50.1280516628),
),
"LV": (
"Latvia",
(21.0558004086, 55.61510692, 28.1767094256, 57.9701569688),
),
"MA": (
"Morocco",
(-17.0204284327, 21.4207341578, -1.12455115397, 35.7599881048),
),
"MD": (
"Moldova",
(26.6193367856, 45.4882831895, 30.0246586443, 48.4671194525),
),
"MG": (
"Madagascar",
(43.2541870461, -25.6014344215, 50.4765368996, -12.0405567359),
),
"MX": ("Mexico", (-117.12776, 14.5388286402, -86.811982388, 32.72083)),
"MK": (
"Macedonia",
(20.46315, 40.8427269557, 22.9523771502, 42.3202595078),
),
"ML": (
"Mali",
(-12.1707502914, 10.0963607854, 4.27020999514, 24.9745740829),
),
"MM": (
"Myanmar",
(92.3032344909, 9.93295990645, 101.180005324, 28.335945136),
),
"ME": ("Montenegro", (18.45, 41.87755, 20.3398, 43.52384)),
"MN": (
"Mongolia",
(87.7512642761, 41.5974095729, 119.772823928, 52.0473660345),
),
"MZ": (
"Mozambique",
(30.1794812355, -26.7421916643, 40.7754752948, -10.3170960425),
),
"MR": (
"Mauritania",
(-17.0634232243, 14.6168342147, -4.92333736817, 27.3957441269),
),
"MW": (
"Malawi",
(32.6881653175, -16.8012997372, 35.7719047381, -9.23059905359),
),
"MY": (
"Malaysia",
(100.085756871, 0.773131415201, 119.181903925, 6.92805288332),
),
"NA": (
"Namibia",
(11.7341988461, -29.045461928, 25.0844433937, -16.9413428687),
),
"NC": (
"New Caledonia",
(164.029605748, -22.3999760881, 167.120011428, -20.1056458473),
),
"NE": (
"Niger",
(0.295646396495, 11.6601671412, 15.9032466977, 23.4716684026),
),
"NG": (
"Nigeria",
(2.69170169436, 4.24059418377, 14.5771777686, 13.8659239771),
),
"NI": (
"Nicaragua",
(-87.6684934151, 10.7268390975, -83.147219001, 15.0162671981),
),
"NL": (
"Netherlands",
(3.31497114423, 50.803721015, 7.09205325687, 53.5104033474),
),
"NO": (
"Norway",
(4.99207807783, 58.0788841824, 31.29341841, 80.6571442736),
),
"NP": (
"Nepal",
(80.0884245137, 26.3978980576, 88.1748043151, 30.4227169866),
),
"NZ": (
"New Zealand",
(166.509144322, -46.641235447, 178.517093541, -34.4506617165),
),
"OM": ("Oman", (52.0000098, 16.6510511337, 59.8080603372, 26.3959343531)),
"PK": (
"Pakistan",
(60.8742484882, 23.6919650335, 77.8374507995, 37.1330309108),
),
"PA": (
"Panama",
(-82.9657830472, 7.2205414901, -77.2425664944, 9.61161001224),
),
"PE": (
"Peru",
(-81.4109425524, -18.3479753557, -68.6650797187, -0.0572054988649),
),
"PH": (
"Philippines",
(117.17427453, 5.58100332277, 126.537423944, 18.5052273625),
),
"PG": (
"Papua New Guinea",
(141.000210403, -10.6524760881, 156.019965448, -2.50000212973),
),
"PL": (
"Poland",
(14.0745211117, 49.0273953314, 24.0299857927, 54.8515359564),
),
"PR": (
"Puerto Rico",
(-67.2424275377, 17.946553453, -65.5910037909, 18.5206011011),
),
"KP": (
"N. Korea",
(124.265624628, 37.669070543, 130.780007359, 42.9853868678),
),
"PT": (
"Portugal",
(-9.52657060387, 36.838268541, -6.3890876937, 42.280468655),
),
"PY": (
"Paraguay",
(-62.6850571357, -27.5484990374, -54.2929595608, -19.3427466773),
),
"QA": (
"Qatar",
(50.7439107603, 24.5563308782, 51.6067004738, 26.1145820175),
),
"RO": (
"Romania",
(20.2201924985, 43.6884447292, 29.62654341, 48.2208812526),
),
"RU": ("Russia", (-180.0, 41.151416124, 180.0, 81.2504)),
"RW": (
"Rwanda",
(29.0249263852, -2.91785776125, 30.8161348813, -1.13465911215),
),
"SA": (
"Saudi Arabia",
(34.6323360532, 16.3478913436, 55.6666593769, 32.161008816),
),
"SD": ("Sudan", (21.93681, 8.61972971293, 38.4100899595, 22.0)),
"SS": ("S. Sudan", (23.8869795809, 3.50917, 35.2980071182, 12.2480077571)),
"SN": (
"Senegal",
(-17.6250426905, 12.332089952, -11.4678991358, 16.5982636581),
),
"SB": (
"Solomon Is.",
(156.491357864, -10.8263672828, 162.398645868, -6.59933847415),
),
"SL": (
"Sierra Leone",
(-13.2465502588, 6.78591685631, -10.2300935531, 10.0469839543),
),
"SV": (
"El Salvador",
(-90.0955545723, 13.1490168319, -87.7235029772, 14.4241327987),
),
"SO": ("Somalia", (40.98105, -1.68325, 51.13387, 12.02464)),
"RS": ("Serbia", (18.82982, 42.2452243971, 22.9860185076, 46.1717298447)),
"SR": (
"Suriname",
(-58.0446943834, 1.81766714112, -53.9580446031, 6.0252914494),
),
"SK": (
"Slovakia",
(16.8799829444, 47.7584288601, 22.5581376482, 49.5715740017),
),
"SI": (
"Slovenia",
(13.6981099789, 45.4523163926, 16.5648083839, 46.8523859727),
),
"SE": (
"Sweden",
(11.0273686052, 55.3617373725, 23.9033785336, 69.1062472602),
),
"SZ": (
"Swaziland",
(30.6766085141, -27.2858794085, 32.0716654803, -25.660190525),
),
"SY": (
"Syria",
(35.7007979673, 32.312937527, 42.3495910988, 37.2298725449),
),
"TD": ("Chad", (13.5403935076, 7.42192454674, 23.88689, 23.40972)),
"TG": (
"Togo",
(-0.0497847151599, 5.92883738853, 1.86524051271, 11.0186817489),
),
"TH": (
"Thailand",
(97.3758964376, 5.69138418215, 105.589038527, 20.4178496363),
),
"TJ": (
"Tajikistan",
(67.4422196796, 36.7381712916, 74.9800024759, 40.9602133245),
),
"TM": (
"Turkmenistan",
(52.5024597512, 35.2706639674, 66.5461503437, 42.7515510117),
),
"TL": (
"East Timor",
(124.968682489, -9.39317310958, 127.335928176, -8.27334482181),
),
"TT": ("Trinidad and Tobago", (-61.95, 10.0, -60.895, 10.89)),
"TN": (
"Tunisia",
(7.52448164229, 30.3075560572, 11.4887874691, 37.3499944118),
),
"TR": (
"Turkey",
(26.0433512713, 35.8215347357, 44.7939896991, 42.1414848903),
),
"TW": (
"Taiwan",
(120.106188593, 21.9705713974, 121.951243931, 25.2954588893),
),
"TZ": ("Tanzania", (29.3399975929, -11.7209380022, 40.31659, -0.95)),
"UG": ("Uganda", (29.5794661801, -1.44332244223, 35.03599, 4.24988494736)),
"UA": (
"Ukraine",
(22.0856083513, 44.3614785833, 40.0807890155, 52.3350745713),
),
"UY": (
"Uruguay",
(-58.4270741441, -34.9526465797, -53.209588996, -30.1096863746),
),
"US": (
"United States",
(-171.791110603, 18.91619, -66.96466, 71.3577635769),
),
"UZ": (
"Uzbekistan",
(55.9289172707, 37.1449940049, 73.055417108, 45.5868043076),
),
"VE": (
"Venezuela",
(-73.3049515449, 0.724452215982, -59.7582848782, 12.1623070337),
),
"VN": (
"Vietnam",
(102.170435826, 8.59975962975, 109.33526981, 23.3520633001),
),
"VU": (
"Vanuatu",
(166.629136998, -16.5978496233, 167.844876744, -14.6264970842),
),
"PS": (
"West Bank",
(34.9274084816, 31.3534353704, 35.5456653175, 32.5325106878),
),
"WO": (
"World",
(180, 90, -180, -90),
),
"YE": (
"Yemen",
(42.6048726743, 12.5859504257, 53.1085726255, 19.0000033635),
),
"ZA": (
"South Africa",
(16.3449768409, -34.8191663551, 32.830120477, -22.0913127581),
),
"ZM": (
"Zambia",
(21.887842645, -17.9612289364, 33.4856876971, -8.23825652429),
),
"ZW": (
"Zimbabwe",
(25.2642257016, -22.2716118303, 32.8498608742, -15.5077869605),
),
}
) | /reinventing-catastrophe-modelling-0.1.2.tar.gz/reinventing-catastrophe-modelling-0.1.2/reinventing_catastrophe_modelling/config/model_settings.py | 0.663015 | 0.211132 | model_settings.py | pypi |
from pathlib import Path
import requests
import google_streetview.api
import google_streetview.helpers
import pandas as pd
from reinventing_catastrophe_modelling.config.model_settings import StreetViewConfig
from utils.utils import write_csv
class GetGoogleStreetView:
def __init__(
self,
size: str,
heading: str,
pitch: str,
key: str,
image_folder: str,
links_folder: str,
metadata_folder: str,
place: str,
meta_base: str,
):
self.size = size
self.heading = heading
self.pitch = pitch
self.key = key
self.image_folder = image_folder
self.links_folder = links_folder
self.metadata_folder = metadata_folder
self.place = place
self.meta_base = meta_base
@classmethod
def from_dataclass_config(
cls, streetview_config: StreetViewConfig
) -> "GetGoogleStreetView":
return cls(
size=streetview_config.SIZE,
heading=streetview_config.HEADING,
pitch=streetview_config.PITCH,
key=streetview_config.KEY,
image_folder=streetview_config.LOCAL_IMAGE_FOLDER,
links_folder=streetview_config.LOCAL_LINKS_FOLDER,
metadata_folder=streetview_config.LOCAL_METADATA_FOLDER,
place=streetview_config.PLACE,
meta_base=streetview_config.META_BASE,
)
def execute_for_country(self, satellite_data_df):
lat_lon_str = self.generate_lat_lon_string(satellite_data_df)
params = self._generate_params(lat_lon_str)
results = self.get_google_streetview(google_streetview.helpers.api_list(params))
self.save_streetview_information(results)
satellite_streetview_data_df = self.add_links_to_satellite_df(satellite_data_df)
satellite_streetview_metadata_df = self.add_metadata_to_satellite_df(
satellite_streetview_data_df
)
write_csv(
satellite_streetview_metadata_df,
f"{Path(__file__).resolve().parent.parent.parent}/local_data/{self.place}.csv",
)
def generate_lat_lon_string(self, satellite_data_df):
satellite_lat_lon_unique = satellite_data_df[
["latitude", "longitude"]
].drop_duplicates()
satellite_lat_lon_unique["lat_lon_str"] = self._join_lat_lon(
satellite_lat_lon_unique
)
return ";".join(satellite_lat_lon_unique["lat_lon_str"])
def get_google_streetview(self, params):
return google_streetview.api.results(params)
def save_streetview_information(self, results):
results.download_links(self.image_folder)
results.save_links(f"{self.links_folder}/streetview_links.txt")
results.save_metadata(f"{self.metadata_folder}/streetview_metadata.json")
def add_links_to_satellite_df(self, satellite_data_df):
satellite_data_df["lat_lon_str"] = self._join_lat_lon(satellite_data_df)
street_view_links_df = pd.read_csv(
f"{self.links_folder}/streetview_links.txt", sep="\n", names=["URL"]
)
street_view_links_df["latitude"] = street_view_links_df["URL"].str.extract(
"location=(.*)%2C"
)
street_view_links_df["longitude"] = street_view_links_df["URL"].str.extract(
"%2C(.*)&pitch"
)
street_view_links_df[["latitude", "longitude"]] = street_view_links_df[
["latitude", "longitude"]
].apply(pd.to_numeric, errors="coerce")
return satellite_data_df.merge(
street_view_links_df, on=["latitude", "longitude"]
)
def add_metadata_to_satellite_df(self, satellite_data_df):
for lat_lon in satellite_data_df["lat_lon_str"]:
meta_params = {"key": self.key, "location": lat_lon}
satellite_data_df["metadata"] = str(
requests.get(self.meta_base, params=meta_params)
)
return satellite_data_df
def _join_lat_lon(self, satellite_data_df):
return satellite_data_df[["latitude", "longitude"]].apply(
lambda x: ",".join(x.astype(str)), axis=1
)
def _generate_params(self, lat_lon_str):
param_dict = {}
param_dict["size"] = self.size
param_dict["location"] = lat_lon_str
param_dict["pitch"] = self.pitch
param_dict["key"] = self.key
return param_dict | /reinventing-catastrophe-modelling-0.1.2.tar.gz/reinventing-catastrophe-modelling-0.1.2/reinventing_catastrophe_modelling/src/get_google_streetview.py | 0.571886 | 0.159446 | get_google_streetview.py | pypi |
from typing import Tuple, Sequence, Any
import datetime
from joblib import Parallel, delayed
import pandas as pd
import ee
from reinventing_catastrophe_modelling.config.model_settings import DataConfig
from reinventing_catastrophe_modelling.utils.utils import ee_array_to_df
class LoadEEData:
def __init__(
self,
countries: Sequence,
year: int,
mon_start: int,
date_start: int,
year_end: int,
mon_end: int,
date_end: int,
image_collection: str,
image_band: str,
folder: str,
model_name: str,
place: str,
):
self.countries = countries
self.year = year
self.mon_start = mon_start
self.date_start = date_start
self.year_end = year_end
self.mon_end = mon_end
self.date_end = date_end
self.image_collection = image_collection
self.image_band = image_band
self.folder = folder
self.model_name = model_name
self.place = place
@classmethod
def from_dataclass_config(cls, config: DataConfig) -> "LoadEEData":
countries = []
for country in config.COUNTRY_CODES:
country_info = config.COUNTRY_BOUNDING_BOXES.get(country, "WO")
countries.append(country_info)
return cls(
countries=countries,
year=config.YEAR,
mon_start=config.MON_START,
date_start=config.DATE_START,
year_end=config.YEAR_END,
mon_end=config.MON_END,
date_end=config.DATE_END,
image_collection=config.METHANE_IMAGE_COLLECTION,
image_band=config.METHANE_IMAGE_BAND,
folder=config.BASE_FOLDER,
model_name=config.MODEL_NAME,
place=config.PLACE,
)
def execute(self):
Parallel(n_jobs=-1, backend="multiprocessing", verbose=5)(
delayed(self.execute_for_country) for country in self.countries
)
def execute_for_country(self, building_footprint_gdf):
print(f"Downloading {self.model_name} data for {self.place}")
building_footprint_gdf = self._get_xy(building_footprint_gdf)
building_footprints_satellite_list = []
for lon, lat in zip(
building_footprint_gdf.x, building_footprint_gdf.y
):
# Initialize the library.
ee.Initialize()
centroid_point = ee.Geometry.Point(lon, lat)
s_date, e_date = self._generate_start_end_date()
collection = (
ee.ImageCollection(self.image_collection)
.select(self.image_band)
.filterDate(s_date, e_date)
)
landsat_centroid_point = collection.getRegion(centroid_point, 10).getInfo()
building_footprints_satellite_list.append(
ee_array_to_df(landsat_centroid_point, self.image_band)
)
return pd.concat(building_footprints_satellite_list)
def prepare_dates(self) -> Tuple[datetime.date, datetime.date]:
start, end = self._generate_start_end_date()
date_list = self._date_range(start, end)
return self._generate_dates(date_list)
def _generate_start_end_date(self) -> Tuple[datetime.date, datetime.date]:
start = str(datetime.date(self.year, self.mon_start, self.date_start))
end = str(datetime.date(self.year_end, self.mon_end, self.date_end))
return start, end
def _date_range(self, start, end) -> Sequence[Any]:
r = (end + datetime.timedelta(days=1) - start).days
return [start + datetime.timedelta(days=i) for i in range(0, r, 7)]
def _generate_dates(self, date_list) -> Sequence[str]:
return [str(date) for date in date_list]
def _get_xy(self, building_footprint_gdf):
building_footprint_gdf["x"] = building_footprint_gdf.centroid_geometry.map(
lambda p: p.x
)
building_footprint_gdf["y"] = building_footprint_gdf.centroid_geometry.map(
lambda p: p.y
)
return building_footprint_gdf | /reinventing-catastrophe-modelling-0.1.2.tar.gz/reinventing-catastrophe-modelling-0.1.2/reinventing_catastrophe_modelling/src/load_ee_data.py | 0.830147 | 0.193014 | load_ee_data.py | pypi |
======
reiter
======
Wrapper for Python iterators and iterables that implements a list-like random-access interface by caching retrieved items for later reuse.
|pypi| |readthedocs| |actions| |coveralls|
.. |pypi| image:: https://badge.fury.io/py/reiter.svg
:target: https://badge.fury.io/py/reiter
:alt: PyPI version and link.
.. |readthedocs| image:: https://readthedocs.org/projects/reiter/badge/?version=latest
:target: https://reiter.readthedocs.io/en/latest/?badge=latest
:alt: Read the Docs documentation status.
.. |actions| image:: https://github.com/lapets/reiter/workflows/lint-test-cover-docs/badge.svg
:target: https://github.com/lapets/reiter/actions/workflows/lint-test-cover-docs.yml
:alt: GitHub Actions status.
.. |coveralls| image:: https://coveralls.io/repos/github/lapets/reiter/badge.svg?branch=main
:target: https://coveralls.io/github/lapets/reiter?branch=main
:alt: Coveralls test coverage summary.
Installation and Usage
----------------------
This library is available as a `package on PyPI <https://pypi.org/project/reiter>`__:
.. code-block:: bash
python -m pip install reiter
The library can be imported in the usual way:
.. code-block:: python
import reiter
from reiter import reiter
Examples
^^^^^^^^
.. |reiter| replace:: ``reiter``
.. _reiter: https://reiter.readthedocs.io/en/0.8.0/_source/reiter.html#reiter.reiter.reiter
This library makes it possible to wrap any `iterator <https://docs.python.org/3/glossary.html#term-iterator>`__ or `iterable <https://docs.python.org/3/glossary.html#term-iterable>`__ object within an interface that enables repeated iteration over -- and random access by index of -- the items contained within that object. A |reiter|_ instance yields the same sequence of items as the wrapped iterator or iterable:
.. code-block:: python
>>> from reiter import reiter
>>> xs = iter([1, 2, 3])
>>> ys = reiter(xs)
>>> list(ys)
[1, 2, 3]
.. |iter| replace:: ``iter``
.. _iter: https://docs.python.org/3/library/functions.html#iter
Unlike iterators and some iterable objects (including those that are built-in and those that are user-defined), an instance of the |reiter|_ class *always* allows iteration over its items any number of times. More specifically, every invocation of |iter|_ (explicit or implicit) returns an iterator that begins iteration from the first item found in the originally wrapped iterator or iterable:
.. code-block:: python
>>> list(iter(ys)), list(iter(ys))
([1, 2, 3], [1, 2, 3])
>>> list(ys), list(ys)
([1, 2, 3], [1, 2, 3])
Furthermore, it is also possible to access elements by their index:
.. code-block:: python
>>> xs = iter([1, 2, 3])
>>> ys = reiter(xs)
>>> ys[0], ys[1], ys[2]
(1, 2, 3)
.. |next| replace:: ``next``
.. _next: https://docs.python.org/3/library/functions.html#next
.. |StopIteration| replace:: ``StopIteration``
.. _StopIteration: https://docs.python.org/3/library/exceptions.html#StopIteration
The built-in Python |next|_ function is also supported, and any attempt to retrieve an item once the sequence of items is exhausted raises the |StopIteration|_ exception in the usual manner:
.. code-block:: python
>>> xs = reiter(iter([1, 2, 3]))
>>> next(xs), next(xs), next(xs)
(1, 2, 3)
>>> next(xs)
Traceback (most recent call last):
...
StopIteration
However, all items yielded during iteration can be accessed by their index, and it is also possible to iterate over those items again:
.. code-block:: python
>>> xs[0], xs[1], xs[2]
(1, 2, 3)
>>> [x for x in xs]
[1, 2, 3]
.. |reiter___getitem__| replace:: ``__getitem__``
.. _reiter___getitem__: https://reiter.readthedocs.io/en/0.8.0/_source/reiter.html#reiter.reiter.reiter.__getitem__
Retrieval of yielded items using slice notation is also supported via the |reiter___getitem__|_ method:
.. code-block:: python
>>> xs = reiter(iter([1, 2, 3]))
>>> xs[0:2]
[1, 2]
.. |reiter_has| replace:: ``has``
.. _reiter_has: https://reiter.readthedocs.io/en/0.8.0/_source/reiter.html#reiter.reiter.reiter.has
.. |reiter_length| replace:: ``length``
.. _reiter_length: https://reiter.readthedocs.io/en/0.8.0/_source/reiter.html#reiter.reiter.reiter.length
Instances of |reiter|_ support additional inspection methods, as well. For example, the |reiter_has|_ method returns a boolean value indicating whether a next item is available and the |reiter_length|_ method returns the length of the sequence of items emitted by the instance (once no more items can be emitted):
.. code-block:: python
>>> xs = reiter(iter([1, 2, 3]))
>>> xs.has(), xs.has(), xs.has(), xs.has()
(True, True, True, False)
>>> xs.length()
3
Development
-----------
All installation and development dependencies are fully specified in ``pyproject.toml``. The ``project.optional-dependencies`` object is used to `specify optional requirements <https://peps.python.org/pep-0621>`__ for various development tasks. This makes it possible to specify additional options (such as ``docs``, ``lint``, and so on) when performing installation using `pip <https://pypi.org/project/pip>`__:
.. code-block:: bash
python -m pip install .[docs,lint]
Documentation
^^^^^^^^^^^^^
The documentation can be generated automatically from the source files using `Sphinx <https://www.sphinx-doc.org>`__:
.. code-block:: bash
python -m pip install .[docs]
cd docs
sphinx-apidoc -f -E --templatedir=_templates -o _source .. && make html
Testing and Conventions
^^^^^^^^^^^^^^^^^^^^^^^
All unit tests are executed and their coverage is measured when using `pytest <https://docs.pytest.org>`__ (see the ``pyproject.toml`` file for configuration details):
.. code-block:: bash
python -m pip install .[test]
python -m pytest
Alternatively, all unit tests are included in the module itself and can be executed using `doctest <https://docs.python.org/3/library/doctest.html>`__:
.. code-block:: bash
python src/reiter/reiter.py -v
Style conventions are enforced using `Pylint <https://pylint.readthedocs.io>`__:
.. code-block:: bash
python -m pip install .[lint]
python -m pylint src/reiter
Contributions
^^^^^^^^^^^^^
In order to contribute to the source code, open an issue or submit a pull request on the `GitHub page <https://github.com/lapets/reiter>`__ for this library.
Versioning
^^^^^^^^^^
The version number format for this library and the changes to the library associated with version number increments conform with `Semantic Versioning 2.0.0 <https://semver.org/#semantic-versioning-200>`__.
Publishing
^^^^^^^^^^
This library can be published as a `package on PyPI <https://pypi.org/project/reiter>`__ by a package maintainer. First, install the dependencies required for packaging and publishing:
.. code-block:: bash
python -m pip install .[publish]
Ensure that the correct version number appears in ``pyproject.toml``, and that any links in this README document to the Read the Docs documentation of this package (or its dependencies) have appropriate version numbers. Also ensure that the Read the Docs project for this library has an `automation rule <https://docs.readthedocs.io/en/stable/automation-rules.html>`__ that activates and sets as the default all tagged versions. Create and push a tag for this version (replacing ``?.?.?`` with the version number):
.. code-block:: bash
git tag ?.?.?
git push origin ?.?.?
Remove any old build/distribution files. Then, package the source into a distribution archive:
.. code-block:: bash
rm -rf build dist src/*.egg-info
python -m build --sdist --wheel .
Finally, upload the package distribution archive to `PyPI <https://pypi.org>`__:
.. code-block:: bash
python -m twine upload dist/*
| /reiter-0.8.0.tar.gz/reiter-0.8.0/README.rst | 0.928749 | 0.752695 | README.rst | pypi |
# CLS REIXS Analysis
This is a library to analyse, plot, and export REIXS beamline data. The package is meant to provide a framework to load data into jupyter and enable data interaction.
Further [beamline information](https://reixs.lightsource.ca/) is available on the Website of the Canadian Light Source.
## Installation
Install the package from PyPi with the pip package manager. This is the recommended way to obtain a copy for your local machine and will install all required dependencies.
```
$ pip install reixs
```
You will also need [Jupyter Notebook](https://github.com/jupyter) together with python 3 on your local machine.
In case that certain widgets aren't rendered properly, make sure to enable the appropriate jupyter extensions
```
$ jupyter nbextension enable --py widgetsnbextension
```
## Running
Launch your local jupyter installation with
```
$ jupyter notebook
```
## Examples
### Load the required module
Before you start, you will need to import the required reixs package, enable bokeh plotting, and set the base directory.
```
## Define base directory
basedir = "/home/braun/ownCloud/Beamtime/example_data/"
## Setup necessarry inputs
from reixs.LoadData import *
from bokeh.io import show, output_notebook
output_notebook(hide_banner=True)
```
### 1d plots
#### General Loader1d
```
sca = Load1d()
sca.load(basedir,'FileName.dat','x_stream','y_stream',1,2,3,4) # Loads multiple scans individually
sca.add(basedir,'FileName.dat','x_stream','y_stream',1,2,3,4) # Adds multiple scans
sca.subtract(basedir,'FileName.dat','x_stream','y_stream',1,2,3,4,norm=False) # Subtracts scans from the first scan
sca.xlim(lower_lim,upper_lim) # Sets the horizontal axis plot region
sca.ylim(lower_lim,upper_lim) # Sets the vertical axis plot region
sca.plot_legend("pos string as per bokeh") # Determines a specific legend position
sca.vline(position) # Draws a vertical line
sca.hline(position) # Draws a horizontal line
sca.label(pos_x,pos_y,'Text') # Adds a label to the plot
sca.plot() # Plots the defined object
sca.exporter() # Exports the data by calling an exporter widget
```
0. Create "Loader" object
1. Specify the variable for the base directory (basedir)
2. Enter the file name of the scan to analyse ('FileName.dat')
3. Options for **x_stream** quantities include:
- All quantities in the header file
- _Mono Energy_ for the excitation energy
- _MCP Energy_ (uncalibrated)
- _SDD Energy_ (uncalibrated)
- _XEOL Energy_ (uncalibrated, actually the wavelength scale)
- _Points_ (by index)
4. Options for **y_stream** quantities include:
- All quantities in the header file
- _TEY_ (Total Electron Yield: sample normalized by mesh)
- _TFY_ (Total Fluorescence Yield, normalized by mesh)
- _PFY_ and _iPFY_ (Partial Fluorescence Yield and Inverse Partial Fluorescence Yield, both normalized by mesh)
Specify ROI with brackets, either by XAS edge or energy:
e.g. _PFY[O]_ for PFY at the O K edge
e.g. _PFY[490:560]_ for PFY from 490eV to 560eV
- _specPFY_ (spectrometer PFY, normalized by mesh)
specify energy range
e.g. specPFY[500:520]
- _XES_ and _rXES_ (X-Ray emission and resonant x-ray emission at selected energies from the spectrometer MCP data)
e.g. rXES[560:565]
- _XRF_ and _rXRF_ (X-Ray fluorescence and resonant x-ray fluorescence at selected energies from the SDD data)
e.g. rXRF[550:570]
- _XEOL_ and _rXEOL_ (XEOL data from the optical spectrometer)
- _POY_ and _TOY_ (Partial optical yield and total optical yield, normalized by mesh)
e.g. POY[300:750]
- _EY_ or _Sample_ (Sample current, not normalized by mesh)
- _Mesh_ (Mesh current)
- _ET_ (Energy Transfer data, integrates over energy loss ROI and probes constant final states, sometimes referred to as CET scan)
specify energy transfer region
e.g. ET[-2:5] to probe mostly scattering close to the elastic line
- _rLOSS_ (Resonantly excited emission data on energy loss scale, integrates over incident energy ROIS and probes constant intermediate states, sometimes referred to as CIE scan)
specify incident energy region
e.g. rLOSS[620:640]
5. List all scans to analyse (comma-separated)
6. Set optional flags. Options include:
- _norm_ (Normalizes to [0,1])
- _xcoffset_ (Defines a constant shift in the x-stream)
- _xoffset_ (Takes a list of tuples and defines a polynomial fit of the x-stream)
- _ycoffset_ (Defines a constant shift in the y-stream)
- _yoffset_ (Takes a list of tuples and defines a polynomial fit of the y-stream)
e.g. offset = [(100,102),(110,112),(120,121)]
- _background_ (Subtracts a XEOL background from XEOL scans)
Set to True, uses the getXEOLback function with the background data stored (only supported with HDF5)
Specify scan number, subtracts the XEOL scan taken at this particular scan
- _energyloss_ (Transfers the resultant MCP scale to energy loss
Set to True, then takes mean of mono energy array
Specify float with the incident photon energy
- _grid_x_ (Takes a list with three arguments to apply 1d interpolation gridding)
e.g. grid_x = [Start Energy, Stop Energy, Delta]
- _savgol_ (Takes a list with two or three arguments to apply data smoothing and derivatives)
e.g. savgol = [Window length, Polynomial order, deriavtive] as specified in the scipy Savitzky-Golay filter
- _binsize_ (int, allows to perform data binning to improve Signal-to-Noise)
- _legend_items_ (dict={scan_number:"name"}, overwrites generic legend names; works for the _load_ method)
- _legend_item_ (str, overwrites generic legend name in the _add_/_subtract_ method)
#### Absorption Scans
```
xas = XASLoader()
xas.load(basedir,'Plate2a.dat','TEY',1,4,6)
xas.load(basedir,'Plate2a.dat','PFY[O]',1,4)
xas.add(basedir,'Plate2a.dat','PFY[500:520]',1,4)
xas.subtract(basedir,'Plate2a.dat','specPFY[500:520]',1,4,6)
xas.plot()
xas.exporter()
```
#### Emission Scans (MCP)
```
xes = XESLoader()
# Options: XES, rXES
xes.load(basedir,'Plate2a.dat','XES',3,xoffset=[(515,520),(520,525),(530,535)])
xes.load(basedir,'Plate2a.dat','rXES[520:560]',4)
xes.add(basedir,'Plate2a.dat','XES',1,4)
xes.subtract(basedir,'Plate2a.dat','XES',1,4)
xes.plot()
xes.exporter()
```
#### XRF Scans (SDD)
```
xrf = XRFLoader()
# Options XRF,rXRF
xrf.load(basedir,'Plate2a.dat','XRF',3)
xrf.load(basedir,'Plate2a.dat','rXRF[520:560]',4)
xrf.add(basedir,'Plate2a.dat','XRF',1,4,)
xrf.subtract(basedir,'Plate2a.dat','XRF',1,4)
xrf.plot()
xrf.exporter()
```
#### XEOL Scans (Optical Spectrometer)
```
xeol = XEOLLoader()
#Options: XEOL, rXEOL
xeol.load(basedir,'RIXS_ES_QA.dat','XEOL',1,2,3,4,background=3)
xeol.load(basedir,'RIXS_ES_QA.dat','XEOL',1,2,3,4,background=True)
xeol.plot()
```
### 2d Images
#### General loader for MCA detector data
Note: Can only load one scan at a time!
```
load2d = Load2d()
load2d.load(basedir,'Filename.dat','x_stream','y_stream','detector',1)
load2d.plot()
load2d.exporter()
```
0. Create "Loader" object
1. Specify the variable for the base directory (basedir)
2. Enter the file name of the scan to analyse ('FileName.dat')
3. Options for **x_stream** quantities include:
- All quantities in the header file
- _Mono Energy_ for the excitation energy
4. Options for **y_stream** quantities include:
- _SDD Energy_ (Energy scale of the SDD detector)
- _MCP Energy_ (Energy scale of the MCP detector)
- _XEOL Energy_ (Wavelength scale of the XEOL optical spectrometer)
5. Options for **detector** quantities include:
- _SDD_ (SDD detector MCA)
- _MCP_ (MCP detector MCA)
- _XEOL_ (XEOL optical spectrometer MCA)
6. List all scans to analyse (comma-separated)
7. Set optional flags. Options include:
- _norm_ (Normalizes to [0,1])
- _xcoffset_ (Defines a constant shift in the x-stream)
- _xoffset_ (Takes a list of tuples and defines a polynomial fit of the x-stream)
- _ycoffset_ (Defines a constant shift in the y-stream)
- _yoffset_ (Takes a list of tuples and defines a polynomial fit of the y-stream)
e.g. offset = [(100,102),(110,112),(120,121)]
- _background_ (Subtracts a XEOL background from XEOL scans)
Set to True, uses the getXEOLback function with the background data stored (only supported with HDF5)
Specify scan number, subtracts the XEOL scan taken at this particular scan
- _energyloss_ (Transfers the excitation-emission map to energy loss scale
- _grid_x_ (Takes a list with three arguments to apply 1d interpolation gridding)
e.g. grid_x = [Start Energy, Stop Energy, Delta]
#### EEMs (normalized by mesh current, special version of the general 2d image loader)
Note: Can only load one scan at a time!
```
eems = EEMsLoader()
eems.load(basedir,'Plate2a.dat','SDD',1)
eems.load(basedir,'Plate2a.dat','MCP',1)
eems.load(basedir,'RIXS_ES_QA.dat','XEOL',2,background=3)
eems.plot()
eems.exporter()
```
### Mesh Scans (Plots a 2d histogram)
```
mesh = LoadMesh()
mesh.load(basedir,'Filename.txt','x_stream','y_stream','z_stream',24)
mesh.plot()
mesh.exporter()
```
0. Create "Loader" object
1. Specify the variable for the base directory (basedir)
2. Enter the file name of the scan to analyse ('FileName.dat')
3. Options for **x_stream** quantities include:
- All quantities in the header file
- _Mono Energy_ for the excitation energy
- _SDD Energy_ (Energy scale of the SDD detector)
- _MCP Energy_ (Energy scale of the MCP detector)
- _XEOL Energy_ (Wavelength scale of the XEOL optical spectrometer)
4. Options for **y_stream** quantities include:
- All quantities in the header file
- _Mono Energy_ for the excitation energy
- _SDD Energy_ (Energy scale of the SDD detector)
- _MCP Energy_ (Energy scale of the MCP detector)
- _XEOL Energy_ (Wavelength scale of the XEOL optical spectrometer)
5. Options for **z_stream** quantities include:
- All quantities in the header file
- All special quantities as specified for the Load1d() function
6. List all scans to analyse (comma-separated)
7. Set optional flags. Options include:
- _norm_ (Normalizes to [0,1])
- _xcoffset_ (Defines a constant shift in the x-stream)
- _xoffset_ (Takes a list of tuples and defines a polynomial fit of the x-stream)
- _ycoffset_ (Defines a constant shift in the y-stream)
- _yoffset_ (Takes a list of tuples and defines a polynomial fit of the y-stream)
e.g. offset = [(100,102),(110,112),(120,121)]
- _background_ (Subtracts a XEOL background from XEOL scans)
Set to True, uses the getXEOLback function with the background data stored (only supported with HDF5)
Specify scan number, subtracts the XEOL scan taken at this particular scan
| /reixs-0.6.4.tar.gz/reixs-0.6.4/README.md | 0.643889 | 0.963334 | README.md | pypi |
from fuzzywuzzy import fuzz
import numpy as np
from gensim.parsing.preprocessing import preprocess_string, strip_tags
from gensim.utils import deaccent
from .ML.config import Config as mlconfig
class SearchResult:
def __init__(self, match_article: dict, query_article: dict, clf, rank: int):
"""
:param match_article: Article details from search provider (e.g. CrossRef)
:param query_article: The input article (e.g. from the user)
:param clf: The classifier to score how similar the articles are.
:param rank:
"""
self.query_article = query_article
self.match_article = match_article
self.clf = clf
self.rank = rank
def to_dict(self) -> dict:
match_article = self.match_article
# add # of authors on the query article. This might affect model confidence?
match_article['n_auths_query'] = len(self.query_article['authors'].split(','))
match_article['authors_list'] = self.authors_list(match_article['author'])
match_article.update(self.match_names(
query_authors=self.query_article['authors'],
match_authors=self.match_article['authors_list']
)
)
match_article['similarity'] = fuzz.ratio(self.query_article['manuscript_title'],
match_article['title']) if 'title' in match_article else 0
match_article['classifier_score'] = self.classify(match_article)
match_article['rank'] = self.rank
return match_article
@staticmethod
def authors_list(authors: list) -> list:
match_authors = []
for author in authors:
given = author.get('given', '')
family = author.get('family', '')
match_authors.append(given + '+' + family)
return match_authors
def pre_process_name(self, name):
"""
Takes a string as input, removes accents and
converts to lowercase
"""
if type(name)==str and len(name)>0:
name = deaccent(name)
name = name.lower()
first_name = name[0]
if '+' in name:
last_name = name[name.rfind('+') + 1:]
else:
last_name = name[1:]
first_name = first_name.replace('.','').replace('-','').replace('\'','').replace(' ','')
first_init = first_name[0] if len(first_name)>0 else ''
last_name = last_name.replace('.','').replace('-','').replace('\'','').replace(' ','')
name = (first_init, last_name)
return name
# @staticmethod
def match_names(self, match_authors, query_authors):
"""
Checks to see if one or all names in the query article are found
in the match article returned by the search.
"""
query_names = list()
for query_name in query_authors.split(','):
query_name = query_name.strip()
query_name = self.pre_process_name(query_name)
query_names.append(query_name)
match_names = list()
for match_name in match_authors:
match_name = self.pre_process_name(match_name)
match_names.append(match_name)
match_names_set = set(match_names)
return {
'author_match_one': int(any(query_name in match_names_set for query_name in query_names)),
'author_match_all': int(all(query_name in match_names_set for query_name in query_names)),
}
def classify(self, match_article: dict):
predictor_cols = mlconfig.predictor_cols
predictors = [match_article[x] for x in predictor_cols]
X = np.array([float(x) for x in predictors])
clf_scores = self.clf.predict_proba(np.reshape(X, (1, len(predictor_cols))))
score = clf_scores[0][1]
return score | /rejected_article_tracker-20210707a0-py3-none-any.whl/rejected_article_tracker/src/SearchResult.py | 0.681303 | 0.297304 | SearchResult.py | pypi |
import pandas as pd
class Result:
def __init__(self, original, winner):
self.original = original
self.winner = winner
def to_dict(self):
earliest_date = self.earliest_date()
n_days = self.n_days_for_decision(earliest_date, self.original['decision_date'])
# title should be a list containing the title as a string
# however, sometimes appears as just a string
# on the off-chance that the inconsistency extends to other structures
# explicitly check for list, string and, if it's anything else, switch to empty string.
self.title = self.winner.get('full_title', self.winner.get('title',''))
self.title = self.title[0].strip() if type(self.title) == list else self.title
self.title = self.title.strip() if type(self.title) == str else ''
result = {
"submission_date": self.original["submission_date"].strftime("%Y-%m-%d"),
"decision_date": self.decision_date(),
"match_doi": self.winner['DOI'] if 'DOI' in self.winner else '',
"match_type": self.winner['type'].strip() if 'type' in self.winner else '',
"match_title": self.title,
"match_authors": ", ".join(self.winner['authors_list']),
"match_publisher": self.winner["publisher"],
"match_journal": self.journal_title(),
"match_pub_date": '-'.join(str(x) for x in self.winner['issued']['date-parts'][0]),
"match_earliest_date": earliest_date.strftime("%Y-%m-%d"),
"match_similarity": self.winner["similarity"],
"match_one": self.winner['author_match_one'],
"match_all": self.winner['author_match_all'],
"match_crossref_score": self.winner['score'],
"match_crossref_cites": self.winner['is-referenced-by-count'],
"match_rank": self.winner['rank'],
"match_total_decision_days": n_days.days if hasattr(n_days, 'days') else 0
}
self.original.update(result)
return self.original
def decision_date(self):
if isinstance(self.original["decision_date"], pd.Timestamp):
return self.original["decision_date"].strftime("%Y-%m-%d")
return ""
def journal_title(self):
j_title = self.winner['container-title']
if type(j_title)==list and len(j_title)>0:
j_title_str = self.winner['container-title'][0].strip()
else:
j_title_str = ''
return j_title_str
def earliest_date(self) -> pd.Timestamp:
"""
Given a crossref works record, find the earliest date.
"""
tags = ['issued', 'created', 'indexed', 'deposited']
stamps = []
for tag in tags:
if tag in self.winner and 'timestamp' in self.winner[tag]:
stamps.append(int(self.winner[tag]['timestamp']))
t_stamp = min(stamps)
return pd.to_datetime(t_stamp, unit='ms', utc=True)
@staticmethod
def n_days_for_decision(earliest_date, decision_date):
return earliest_date - decision_date if isinstance(decision_date, pd.Timestamp) else ''
@staticmethod
def journal_acronym(journal_id):
return journal_id[:journal_id.find('-')] | /rejected_article_tracker-20210707a0-py3-none-any.whl/rejected_article_tracker/src/Result.py | 0.54698 | 0.204243 | Result.py | pypi |
from .CrossRefWorksRecord import CrossRefWorksRecord
from fuzzywuzzy import fuzz
class TrainingRow:
def __init__(self, works_record:dict, query_article: dict):
"""
:param works_record: Article works_record from search provider (e.g. CrossRef)
:param query_article: The input article (e.g. from the user)
:param clf: The classifier to score how similar the articles are.
:param rank:
"""
self.works_record = CrossRefWorksRecord(works_record, limit_cols=True).works_record #to_dict()
self.query_article = query_article
def to_dict(self) -> dict:
training_row = self.works_record
cr_title = self.works_record.get('full_title','')
training_row['authors_list'] = self.authors_list(self.works_record.get('author', list()))
training_row.update(self.match_names(query_authors=self.query_article['authors'],
authors=self.works_record['authors_list']))
training_row['similarity'] = fuzz.ratio(self.query_article['title'],cr_title)
training_row['query_id'] = 'id:'+self.query_article['id']
training_row['query_title'] = self.query_article['title']
training_row['query_authors'] = self.query_article['authors']
training_row['query_created'] = self.query_article['created']
training_row['query_doi'] = self.query_article.get('doi','') # this should always be found
# training_row['correct_yn'] = self.works_record['correct_yn']
training_row['match_title'] = cr_title
return training_row
@staticmethod
def authors_list(authors: list) -> list:
query_authors = list()
for author in authors:
given = author.get('given', '')
family = author.get('family', '')
query_authors.append(given + '+' + family)
return query_authors
@staticmethod
def match_names(query_authors, authors):
names1 = [(name[0], name[name.rfind('+') + 1:]) for name in query_authors.split(', ')]
names2 = [(name[0], name[name.rfind('+') + 1:]) for name in authors]
return {
'author_match_one': int(any(name in names2 for name in names1)),
'author_match_all': int(all(name in names2 for name in names1)),
} | /rejected_article_tracker-20210707a0-py3-none-any.whl/rejected_article_tracker/src/ML/TrainingRow.py | 0.736116 | 0.395805 | TrainingRow.py | pypi |
import pandas as pd
from .ArXivAuthorNames import ArXivAuthorNames
from .ArXivManuscriptIdRaw import ArXivManuscriptIdRaw
"""
This class simply takes an arxiv oai-pmh record and coverts it into a
format expected by the CrossRef class in ..src
This way, we can perform searches through the same infrastructure as the
tracker normally uses.
Note that, when we build training data, we use a different class to
handle arxiv oai-pmh records.
"""
class ArXivArticleItem:
def __init__(self, items):
"""
:param items: dict
manuscript_id: str
journal_name: str
manuscript_title: str
submission_date: str Most date formats will work.
decision_date: str Most date formats will work.
authors: str Semi-colon-separated list of names in the format:
"{last name}, {first_names}; {last name}, {first_names}"
e.g. "De Vries, Ieke; Goggin, Kelly"
final_decision: str
"""
created_date = self.__required(items, 'created')
created_date = pd.Timestamp(created_date)
_id = self.__required(items, 'id')
raw_id = ArXivManuscriptIdRaw(_id).id()
if not isinstance(created_date, pd.Timestamp):
raise ValueError('"created" needs to be a valid date')
self.items = {
'manuscript_id': _id,
'raw_manuscript_id': raw_id,
# 'journal_name': self.__required(items, 'journal_name'),
'manuscript_title': self.__required(items, 'title'),
'submission_date': created_date,
'decision_date': created_date,
'authors': items['authors'],
'text_sub_date': created_date.strftime("%Y-%m-%d"),
'final_decision': created_date
}
def to_dict(self):
return self.items
@staticmethod
def __required(items, name):
if not items[name]:
raise ValueError('field "' + name + '" required')
return items[name] | /rejected_article_tracker-20210707a0-py3-none-any.whl/rejected_article_tracker/src/ML/ArXivArticleItem.py | 0.548674 | 0.332635 | ArXivArticleItem.py | pypi |
[](https://github.com/RedisJSON/redisjson-py/blob/master/LICENSE)
[](https://circleci.com/gh/RedisJSON/redisjson-py/tree/master)
[](https://badge.fury.io/py/rejson)
[](https://github.com/RedisJSON/redisjson-py)
[](https://github.com/RedisJSON/redisjson-py/releases/latest)
[](https://coveralls.io/github/RedisLabs/rejson-py?branch=master)
# RedisJSON Python Client
[](https://groups.google.com/forum/#!forum/redisjson)
[](https://gitter.im/RedisLabs/RedisJSON?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
rejson-py is a package that allows storing, updating and querying objects as
JSON documents in a [Redis](https://redis.io) database that is extended with the
[ReJSON module](https://github.com/redislabsmodules/rejson). The package extends
[redis-py](https://github.com/andymccurdy/redis-py)'s interface with ReJSON's
API, and performs on-the-fly serialization/deserialization of objects to/from
JSON.
## Installation
```bash
$ pip install rejson
```
## Usage example
```python
from rejson import Client, Path
rj = Client(host='localhost', port=6379, decode_responses=True)
# Set the key `obj` to some object
obj = {
'answer': 42,
'arr': [None, True, 3.14],
'truth': {
'coord': 'out there'
}
}
rj.jsonset('obj', Path.rootPath(), obj)
# Get something
print 'Is there anybody... {}?'.format(
rj.jsonget('obj', Path('.truth.coord'))
)
# Delete something (or perhaps nothing), append something and pop it
rj.jsondel('obj', Path('.arr[0]'))
rj.jsonarrappend('obj', Path('.arr'), 'something')
print '{} popped!'.format(rj.jsonarrpop('obj', Path('.arr')))
# Update something else
rj.jsonset('obj', Path('.answer'), 2.17)
# And use just like the regular redis-py client
jp = rj.pipeline()
jp.set('foo', 'bar')
jp.jsonset('baz', Path.rootPath(), 'qaz')
jp.execute()
# If you use non-ascii character in your JSON data, you can add the no_escape flag to JSON.GET command
obj_non_ascii = {
'non_ascii_string': 'hyvää'
}
rj.jsonset('non-ascii', Path.rootPath(), obj_non_ascii)
print '{} is a non-ascii string'.format(rj.jsonget('non-ascii', Path('.non_ascii_string'), no_escape=True))
```
## Encoding/Decoding
rejson-py uses Python's [json](https://docs.python.org/2/library/json.html).
The client can be set to use custom encoders/decoders at creation, or by calling
explicitly the [setEncoder()](./API.md#setencoder) and
[setDecoder()](./API.md#setencoder) methods, respectively.
The following shows how to use this for a custom class that's stored as
a JSON string for example:
```python
from json import JSONEncoder, JSONDecoder
from rejson import Client
class CustomClass(object):
"Some non-JSON-serializable"
def __init__(self, s=None):
if s is not None:
# deserialize the instance from the serialization
if s.startswith('CustomClass:'):
...
else:
raise Exception('unknown format')
else:
# initialize the instance
...
def __str__(self):
_str = 'CustomClass:'
# append the instance's state to the serialization
...
return _str
...
class CustomEncoder(JSONEncoder):
"A custom encoder for the custom class"
def default(self, obj):
if isinstance(obj, CustomClass):
return str(obj)
return json.JSONEncoder.encode(self, obj)
class TestDecoder(JSONDecoder):
"A custom decoder for the custom class"
def decode(self, obj):
d = json.JSONDecoder.decode(self, obj)
if isinstance(d, basestring) and d.startswith('CustomClass:'):
return CustomClass(d)
return d
# Create a new instance of CustomClass
obj = CustomClass()
# Create a new client with the custom encoder and decoder
rj = Client(encoder=CustomEncoder(), decoder=CustomDecoder())
# Store the object
rj.jsonset('custom', Path.rootPath(), obj))
# Retrieve it
obj = rj.jsonget('custom', Path.rootPath())
```
## API
As rejson-py exposes the same methods as redis-py, it can be used as a drop-in
replacement. On top of Redis' core commands, the client also adds ReJSON's
vocabulary and a couple of helper methods. These are documented in the
[API.md](API.md) file, which can be generated by running:
```bash
$ python gendoc rejson > API.md
```
For complete documentation about ReJSON's commands, refer to [ReJSON's website](http://rejson.io).
## License
[BSD 2-Clause](https://github.com/RedisLabs/rejson-py/blob/master/LICENSE)
| /rejson-0.5.4.tar.gz/rejson-0.5.4/README.md | 0.679604 | 0.900923 | README.md | pypi |
from typing import Dict, List, Optional
import reka.api.driver as driver
from reka.errors import InvalidConversationError
def chat(
human: str,
conversation_history: Optional[List[Dict[str, str]]] = None,
retrieval_dataset: Optional[str] = None,
model_name: str = "text-phoenix-v1",
request_output_len: int = 256,
temperature: float = 1.0,
random_seed: Optional[int] = None,
runtime_top_k: int = 1024,
runtime_top_p: float = 0.95,
repetition_penalty: float = 1.0,
len_penalty: float = 1.0,
stop_tokens: Optional[List[str]] = None,
assistant_start_text: Optional[str] = None,
) -> Dict[str, str]:
"""Chat endpoint.
Example usage:
```python
import reka
reka.API_KEY = "APIKEY"
conversation_history = [
{"type": "human", "text": "Hi, my name is John."},
{"type": "model", "text": "Hi, I'm Reka's assistant."},
]
response = reka.chat(
human="What was my name?",
conversation_history=conversation_history,
)
print(response) # {"type": "model", "text": "Your name is John.\\n\\n"}
```
Args:
human: latest message from human.
conversation_history: where each dict has a key "type"
indicating the speaker, either "human" or "model", and a key "text"
containing the message from the speaker. If not set, will default to
an empty history.
retrieval_dataset: Previousy adapted dataset to do retrieval on.
model_name: Name of model. Currently only supports text-phoenix-v1.
request_output_len: Completion length in tokens.
temperature: Softmax temperature, higher is more diverse.
random_seed: Seed to obtain different results.
runtime_top_k: Keep only k top tokens when sampling.
runtime_top_p: Keep only top p quantile when sampling.
repetition_penalty: Untested! Penalize repetitions. 1 means no penalty.
len_penalty: Untested! Penalize short answers. 1 means no penalty.
stop_tokens: Optional list of words on which to stop generation.
assistant_start_text: Optional text that the assistant response should start with.
Raises:
InvalidConversationError: if the conversation history is not valid.
Returns:
Dict[str, str]: A dict containing `{"type": "model", "text": <response from the model>}`
"""
full_conv = (conversation_history or []) + [{"type": "human", "text": human}]
_check_conversation_history(full_conv, is_vlm=False)
json_dict = dict(
conversation_history=full_conv,
retrieval_dataset=retrieval_dataset,
model_name=model_name,
request_output_len=request_output_len,
temperature=temperature,
random_seed=random_seed,
runtime_top_k=runtime_top_k,
runtime_top_p=runtime_top_p,
repetition_penalty=repetition_penalty,
len_penalty=len_penalty,
stop_tokens=stop_tokens or [],
assistant_start_text=assistant_start_text,
)
response = driver.make_request(
method="post",
endpoint="chat",
headers={"Content-Type": "application/json"},
json=json_dict,
)
return response
def vlm_chat(
conversation_history: List[Dict[str, str]],
retrieval_dataset: Optional[str] = None,
model_name: str = "default_vlm",
request_output_len: int = 256,
temperature: float = 1.0,
random_seed: Optional[int] = None,
runtime_top_k: int = 1024,
runtime_top_p: float = 0.95,
repetition_penalty: float = 1.0,
len_penalty: float = 1.0,
stop_tokens: Optional[List[str]] = None,
assistant_start_text: Optional[str] = None,
) -> Dict[str, str]:
"""VLM Chat endpoint.
Example usage:
```python
import reka
reka.API_KEY = "APIKEY"
conversation_history = [
{"type": "human", "text": "What's in the photo?", "image_url": "http://images.cocodataset.org/test2017/000000557146.jpg"},
]
response = reka.vlm_chat(conversation_history=conversation_history)
print(response) # {"type": "model", "text": "A cat laying on the ground with a toy."}
```
Args:
conversation_history: list of dicts, where each dict has a key "type"
indicating the speaker, either "human" or "model", and a key "text"
containing the message from the speaker. This should end with a human turn,
and the first turn should be a human turn that also contains an "image_url"
key.
retrieval_dataset: Previously adapted dataset to do retrieval on.
model_name: Name of model. Currently only supports text-phoenix-v1.
request_output_len: Completion length in tokens.
temperature: Softmax temperature, higher is more diverse.
random_seed: Seed to obtain different results.
runtime_top_k: Keep only k top tokens when sampling.
runtime_top_p: Keep only top p quantile when sampling.
repetition_penalty: Untested! Penalize repetitions. 1 means no penalty.
len_penalty: Untested! Penalize short answers. 1 means no penalty.
stop_tokens: Optional list of words on which to stop generation.
assistant_start_text: Optional text that the assistant response should start with.
Raises:
InvalidConversationError: if the conversation history is not valid.
Returns:
Dict[str, str]: A dict containing `{"type": "model", "text": <response from the model>}`
"""
_check_conversation_history(conversation_history, is_vlm=True)
json_dict = dict(
conversation_history=conversation_history,
retrieval_dataset=retrieval_dataset,
model_name=model_name,
request_output_len=request_output_len,
temperature=temperature,
random_seed=random_seed,
runtime_top_k=runtime_top_k,
runtime_top_p=runtime_top_p,
repetition_penalty=repetition_penalty,
len_penalty=len_penalty,
stop_tokens=stop_tokens or [],
assistant_start_text=assistant_start_text,
)
response = driver.make_request(
method="post",
endpoint="vlm-chat",
headers={"Content-Type": "application/json"},
json=json_dict,
)
return response
def _check_conversation_history(
conversation_history: List[Dict[str, str]], is_vlm: bool = False
) -> None:
"""Checks that a conversation is well constructed.
Raises InvalidConversationError otherwise.
"""
if len(conversation_history) == 0:
raise InvalidConversationError("Conversation history cannot be empty")
for i, turn in enumerate(conversation_history):
expected_keys = {"type", "text"} | (
{"image_url"} if is_vlm and i == 0 else set()
)
turn_keys = set(turn.keys())
if turn_keys != expected_keys:
raise InvalidConversationError(
f"Expected keys {expected_keys} for turn {i} '{turn}', got keys {turn_keys}."
)
for key, value in turn.items():
if not isinstance(value, str):
raise InvalidConversationError(
f"Expected string for value of '{key}' in turn {i} '{turn}', got {type(value)}."
)
expected_type = ["human", "model"][i % 2]
if turn["type"] != expected_type:
raise InvalidConversationError(
f"Expected type '{expected_type}' for turn {i} '{turn}', got '{turn['type']}'. Conversations should "
"alternate between 'human' and 'model', starting with 'human'."
)
if conversation_history[-1]["type"] != "human":
raise InvalidConversationError("Conversation should end with a 'human' turn.") | /reka-api-0.0.86.tar.gz/reka-api-0.0.86/src/reka/api/chat.py | 0.952386 | 0.54359 | chat.py | pypi |
from __future__ import annotations
import dataclasses
from typing import Any, Dict, cast
import reka.api.driver as driver
import reka.api.job as job
from reka.errors import RetrievalError
from requests.exceptions import HTTPError
@dataclasses.dataclass
class PrepareRetrievalStatusResponse:
"""Status of a `prepare-retrieval` job.
Args:
job_status: The current status.
detail: Further details, if any.
"""
job_status: job.JobStatus
detail: str
def is_error(self) -> bool:
"""Whether the job has ended with an error."""
return self.job_status == job.JobStatus.ERROR
def is_complete(self) -> bool:
"""Whether the job has completed successfully."""
return self.job_status == job.JobStatus.COMPLETE
def is_running(self) -> bool:
"""Whether the job is still running."""
return self.job_status == job.JobStatus.RUNNING
def is_done(self) -> bool:
"""Whether the job has completed, either succesfully or with an error."""
return self.job_status in [job.JobStatus.COMPLETE, job.JobStatus.ERROR]
@classmethod
def from_dict(cls, dict: Dict[str, Any]) -> PrepareRetrievalStatusResponse:
try:
status_str = dict.pop("job_status")
job_status = job.JobStatus[status_str]
dict["job_status"] = job_status
return cls(**dict)
except KeyError as e:
raise TypeError(f"Invalid parameterisation for {cls.__name__}") from e
def prepare_retrieval(dataset_name: str) -> str:
"""Prepare dataset_name for retrieval later, creating a `prepare-retrieval` job.
Args:
dataset_name: name of previously uploaded dataset to prepare for retrieval
Raises:
RetrievalError if there is something wrong with retrieval preparation, e.g.
no such dataset uploaded, or retrieval already prepared
Returns:
ID of the `prepare-retrieval` job, to be used for tracking.
"""
try:
resp = cast(
str,
driver.make_request(
method="post", endpoint=f"datasets/{dataset_name}/prepare-retrieval"
),
)
return resp
except HTTPError as e:
if (
e.response.status_code == 400
and "Retrieval has already been prepared for dataset" in e.response.text
):
raise RetrievalError(
underlying=e,
reason="Retrieval has already been prepared for this dataset.",
)
elif e.response.status_code == 404:
raise RetrievalError(
underlying=e,
reason="Tried to prepare retrieval for non-existent dataset.",
)
else:
raise RetrievalError(
underlying=e,
)
def retrieval_job_status(job_id: str) -> PrepareRetrievalStatusResponse:
"""Given a prepare retrieval job id, check the status.
Args:
job_id: id of the `prepare-retrieval` job.
Returns:
The current state of the job.
"""
try:
resp = job.job_status(job_id=job_id, job_type="prepare-retrieval")
return PrepareRetrievalStatusResponse.from_dict(resp)
except HTTPError as e:
if e.response.status_code == 404:
raise RetrievalError(underlying=e, reason="Retrieval job not found.")
else:
raise RetrievalError(underlying=e) | /reka-api-0.0.86.tar.gz/reka-api-0.0.86/src/reka/api/retrieval.py | 0.927831 | 0.25825 | retrieval.py | pypi |
from typing import Any, Dict, List, Optional, cast
import reka.api.driver as driver
from reka.errors import DatasetError
from requests.exceptions import HTTPError
def list_datasets() -> List[str]:
"""List all datasets available to the user of the `API_KEY`.
Returns:
List of dataset names.
"""
resp = driver.make_request(
method="get",
endpoint="datasets",
)
return cast(List[str], resp)
def add_dataset(
filepath: str,
name: str,
description: Optional[str] = None,
) -> Dict[str, Any]:
"""Upload a dataset to run jobs on it later.
NOTE: If the `name` is deemed inappropriate by the server, e.g. "../../etc/shadow", it will
be changed to a secure name, which is returned in the response.
Args:
filepath: str, local path to a text file or a zipped collection of text files.
name: str, what should the dataset be called
description: Optional[str], optional metadata description
Returns:
Dictionary object representing what happened with the uploaded file.
"""
descr = description or ""
form = {
"dataset_name": name,
"dataset_description": descr,
}
with open(filepath, "rb") as f:
resp = driver.make_request(
method="post",
endpoint="datasets",
data=form,
files={"file": f},
)
return resp
def delete_dataset(name: str) -> Dict[str, Any]:
"""Delete a dataset with a given name for the user of this API_KEY.
Args:
name: name of the dataset to delete.
Returns: Dictionary object with keys `name` (*str*, the dataset name), `ok` (*bool•), and `info` (*str*).
"""
try:
return driver.make_request(
method="delete",
endpoint=f"datasets/{name}",
)
except HTTPError as e:
if e.response.status_code == 404:
raise DatasetError(underlying=e, reason="Unable to delete missing dataset.")
else:
raise DatasetError(underlying=e) | /reka-api-0.0.86.tar.gz/reka-api-0.0.86/src/reka/api/dataset.py | 0.923065 | 0.348313 | dataset.py | pypi |
from typing import List, Optional, cast
import reka.api.driver as driver
def completion(
prompt: str,
model_name: str = "text-phoenix-v1",
request_output_len: int = 256,
temperature: float = 1.0,
random_seed: int = 0,
runtime_top_k: int = 1024,
runtime_top_p: float = 0.95,
repetition_penalty: float = 1.0,
len_penalty: float = 1.0,
stop_tokens: Optional[List[str]] = None,
) -> str:
"""Request a text completion in synchronous mode.
Example usage:
```python
import reka
reka.API_KEY = "APIKEY"
result = reka.completion("What is the capital of the UK?")
print(completion) # "The capital of the United Kingdom is London. ..."
```
Args:
prompt: string.
model_name: Name of model. Currently only supports text-phoenix-v1.
request_output_len: Completion length in tokens.
temperature: Softmax temperature, higher is more diverse.
random_seed: Seed to obtain different results.
runtime_top_k: Keep only k top tokens when sampling.
runtime_top_p: Keep only top p quantile when sampling.
repetition_penalty: Untested! Penalize repetitions. 1 means no penalty.
len_penalty: Untested! Penalize short answers. 1 means no penalty.
stop_tokens: Optinoal list of words on which to stop generation.
Returns:
model completion.
"""
json_dict = dict(
prompts=[prompt],
model_name=model_name,
request_output_len=request_output_len,
temperature=temperature,
random_seed=random_seed,
runtime_top_k=runtime_top_k,
runtime_top_p=runtime_top_p,
repetition_penalty=repetition_penalty,
len_penalty=len_penalty,
stop_tokens=stop_tokens or [],
)
response = driver.make_request(
method="post",
endpoint="completion",
headers={"Content-Type": "application/json"},
json=json_dict,
)
return cast(str, response["text"][0]) | /reka-api-0.0.86.tar.gz/reka-api-0.0.86/src/reka/api/completion.py | 0.91533 | 0.530115 | completion.py | pypi |
# This file is produced when the main "version.py update" command is run. That
# command copies this file to all sub-packages which contain
# setup.py. Configuration is maintain in version.yaml at the project's top
# level.
def get_versions():
return tag_version_data(raw_versions(), """version.yaml""")
def raw_versions():
return json.loads("""
{
"codename": "Hurricane Ridge",
"post": "0",
"rc": "0",
"version": "1.7.1"
}
""")
import json
import os
import subprocess
try:
# We are looking for the git repo which contains this file.
MY_DIR = os.path.dirname(os.path.abspath(__file__))
except:
MY_DIR = None
def is_tree_dirty():
try:
return bool(subprocess.check_output(
["git", "diff", "--name-only"], stderr=subprocess.PIPE,
cwd=MY_DIR,
).splitlines())
except (OSError, subprocess.CalledProcessError):
return False
def get_version_file_path(version_file="version.yaml"):
try:
return os.path.join(subprocess.check_output(
["git", "rev-parse", "--show-toplevel"], stderr=subprocess.PIPE,
cwd=MY_DIR,
).decode("utf-8").strip(), version_file)
except (OSError, subprocess.CalledProcessError):
return None
def number_of_commit_since(version_file="version.yaml"):
"""Returns the number of commits since version.yaml was changed."""
try:
last_commit_to_touch_version_file = subprocess.check_output(
["git", "log", "--no-merges", "-n", "1", "--pretty=format:%H",
version_file], cwd=MY_DIR, stderr=subprocess.PIPE,
).strip()
all_commits = subprocess.check_output(
["git", "log", "--no-merges", "-n", "1000", "--pretty=format:%H"],
stderr=subprocess.PIPE, cwd=MY_DIR,
).splitlines()
return all_commits.index(last_commit_to_touch_version_file)
except (OSError, subprocess.CalledProcessError, ValueError):
return None
def get_current_git_hash():
try:
return subprocess.check_output(
["git", "log", "--no-merges", "-n", "1", "--pretty=format:%H"],
stderr=subprocess.PIPE, cwd=MY_DIR,
).strip()
except (OSError, subprocess.CalledProcessError):
return None
def tag_version_data(version_data, version_path="version.yaml"):
current_hash = get_current_git_hash()
# Not in a git repository.
if current_hash is None:
version_data["error"] = "Not in a git repository."
else:
version_data["revisionid"] = current_hash
version_data["dirty"] = is_tree_dirty()
version_data["dev"] = number_of_commit_since(
get_version_file_path(version_path))
# Format the version according to pep440:
pep440 = version_data["version"]
if int(version_data.get("post", 0)) > 0:
pep440 += ".post" + version_data["post"]
elif int(version_data.get("rc", 0)) > 0:
pep440 += ".rc" + version_data["rc"]
if version_data.get("dev", 0):
# A Development release comes _before_ the main release.
last = version_data["version"].rsplit(".", 1)
version_data["version"] = "%s.%s" % (last[0], int(last[1]) + 1)
pep440 = version_data["version"] + ".dev" + str(version_data["dev"])
version_data["pep440"] = pep440
return version_data | /rekall_agent-1.7.1.zip/rekall_agent-1.7.1/_version.py | 0.459076 | 0.194272 | _version.py | pypi |
import arrow
import six
from rekall_lib import registry
from rekall_lib.rekall_types import collections
if six.PY3:
unicode = str
def _coerce_timestamp(value):
if isinstance(value, arrow.Arrow):
return value.float_timestamp
return float(value)
class JSONCollectionImpl(collections.JSONCollection):
"""A collection which writes its result as JSON."""
_allowed_types = {
"int": int,
"float": float,
# Dates as epoch timestamps are stored as floats.
"epoch": _coerce_timestamp,
"any": lambda x: x # Used for opaque types that can not be further
# processed.
}
if six.PY3:
_allowed_types["unicode"] = str
_allowed_types["str"] = bytes
else:
_allowed_types["unicode"] = unicode # Unicode data.
_allowed_types["str"] = str # Used for binary data.
def __init__(self, *args, **kwargs):
super(JSONCollectionImpl, self).__init__(*args, **kwargs)
self._dirty = False
self.part_number = 0
@registry.memoize_method
def _find_table(self, table=None):
if isinstance(table, (str, unicode)):
for i in self.tables:
if i.name == table:
return i
if table is None:
if len(self.tables) > 1:
RuntimeError("Collection contains multiple tables and no "
"table is specified.")
return self.tables[0]
raise RuntimeError("Unknown table %s" % table)
def sanitize_row(self, row, table=None):
"""Convert the row into primitives.
The collection can only store primitives and so we must convert the
objects to these primitives.
"""
table = self._find_table(table)
sanitized_row = []
# Make sure we only collect the columns which are specified. NOTE:
# The EFilter query must name the columns exactly the same as the
# collection spec.
for column in table.columns:
name = str(column.name)
try:
value = row[name]
except KeyError:
sanitized_row.append(None)
continue
if value is None:
sanitized_row.append(None)
continue
sanitized_row.append(self._allowed_types[
column.type or "unicode"](value))
return sanitized_row
def insert(self, table=None, row=None, **kwargs):
table_data = self.table_data.setdefault(table or 'default', [])
table_data.append(self.sanitize_row(row or kwargs, table=table))
if len(table_data) >= self.max_rows:
self.flush()
def start(self):
self._dirty = True
return self
def flush(self):
if self.table_data:
self.location.write_file(self.to_json(),
part=self.part_number)
self.table_data = {}
self.part_number += 1 | /rekall_agent-1.7.1.zip/rekall_agent-1.7.1/rekall_agent/json_collection.py | 0.617859 | 0.235658 | json_collection.py | pypi |
__author__ = "Michael Cohen <scudette@google.com>"
from artifacts import definitions
from rekall.plugins.response import forensic_artifacts
from rekall_agent import flow
from rekall_agent.flows import find
from rekall_lib.rekall_types import agent
class FileSourceType(forensic_artifacts.SourceType):
_field_definitions = [
dict(name="paths", default=[]),
dict(name="separator", default="/", type=basestring,
optional=True),
]
# These fields will be present in the ArtifactResult object we return.
_FIELDS = [
dict(name="st_mode", type="unicode"),
dict(name="st_nlink", type="int"),
dict(name="st_uid", type="unicode"),
dict(name="st_gid", type="unicode"),
dict(name="st_size", type="int"),
dict(name="st_mtime", type="epoch"),
dict(name="filename", type="unicode"),
]
def actions(self, flow_obj, download=False, name=None, **_):
"""Generate actions for the client."""
# Create the new flow based on the current flow. We do not actually
# launch it, but just collect its actions.
subflow = flow_obj.cast(find.FileFinderFlow)
subflow.globs = self.paths
subflow.download = download
if name is None:
name = self.artifact.name
subflow.set_collection_name("{flow_id}/%s" % name)
for action in subflow.generate_actions():
yield action
SOURCE_TYPES = {
definitions.TYPE_INDICATOR_FILE: FileSourceType,
}
class Artifact(agent.Flow):
"""Launch artifacts on the client."""
schema = [
dict(name="artifacts", repeated=True, private=True, user=True,
doc="The list of artifacts to launch."),
dict(name="copy_files", type="bool", user=True,
doc="Should we also download the files."),
]
def generate_actions(self):
self._artifact_profile = self._session.LoadProfile("artifacts")
self._collected_artifacts = []
for artifact_name in self.artifacts:
try:
definition = self._artifact_profile.GetDefinitionByName(
artifact_name, source_types=SOURCE_TYPES)
except KeyError:
self._session.logging.error(
"Unknown artifact %s" % artifact_name)
continue
for source in definition.sources:
for action in source.actions(self, download=self.copy_files):
yield action | /rekall_agent-1.7.1.zip/rekall_agent-1.7.1/rekall_agent/flows/artifact_flow.py | 0.670285 | 0.186077 | artifact_flow.py | pypi |
# Operand type for instruction's operands
MIPS_OP_INVALID = 0
MIPS_OP_REG = 1
MIPS_OP_IMM = 2
MIPS_OP_MEM = 3
# MIPS registers
MIPS_REG_INVALID = 0
# General purpose registers
MIPS_REG_0 = 1
MIPS_REG_1 = 2
MIPS_REG_2 = 3
MIPS_REG_3 = 4
MIPS_REG_4 = 5
MIPS_REG_5 = 6
MIPS_REG_6 = 7
MIPS_REG_7 = 8
MIPS_REG_8 = 9
MIPS_REG_9 = 10
MIPS_REG_10 = 11
MIPS_REG_11 = 12
MIPS_REG_12 = 13
MIPS_REG_13 = 14
MIPS_REG_14 = 15
MIPS_REG_15 = 16
MIPS_REG_16 = 17
MIPS_REG_17 = 18
MIPS_REG_18 = 19
MIPS_REG_19 = 20
MIPS_REG_20 = 21
MIPS_REG_21 = 22
MIPS_REG_22 = 23
MIPS_REG_23 = 24
MIPS_REG_24 = 25
MIPS_REG_25 = 26
MIPS_REG_26 = 27
MIPS_REG_27 = 28
MIPS_REG_28 = 29
MIPS_REG_29 = 30
MIPS_REG_30 = 31
MIPS_REG_31 = 32
# DSP registers
MIPS_REG_DSPCCOND = 33
MIPS_REG_DSPCARRY = 34
MIPS_REG_DSPEFI = 35
MIPS_REG_DSPOUTFLAG = 36
MIPS_REG_DSPOUTFLAG16_19 = 37
MIPS_REG_DSPOUTFLAG20 = 38
MIPS_REG_DSPOUTFLAG21 = 39
MIPS_REG_DSPOUTFLAG22 = 40
MIPS_REG_DSPOUTFLAG23 = 41
MIPS_REG_DSPPOS = 42
MIPS_REG_DSPSCOUNT = 43
# ACC registers
MIPS_REG_AC0 = 44
MIPS_REG_AC1 = 45
MIPS_REG_AC2 = 46
MIPS_REG_AC3 = 47
# COP registers
MIPS_REG_CC0 = 48
MIPS_REG_CC1 = 49
MIPS_REG_CC2 = 50
MIPS_REG_CC3 = 51
MIPS_REG_CC4 = 52
MIPS_REG_CC5 = 53
MIPS_REG_CC6 = 54
MIPS_REG_CC7 = 55
# FPU registers
MIPS_REG_F0 = 56
MIPS_REG_F1 = 57
MIPS_REG_F2 = 58
MIPS_REG_F3 = 59
MIPS_REG_F4 = 60
MIPS_REG_F5 = 61
MIPS_REG_F6 = 62
MIPS_REG_F7 = 63
MIPS_REG_F8 = 64
MIPS_REG_F9 = 65
MIPS_REG_F10 = 66
MIPS_REG_F11 = 67
MIPS_REG_F12 = 68
MIPS_REG_F13 = 69
MIPS_REG_F14 = 70
MIPS_REG_F15 = 71
MIPS_REG_F16 = 72
MIPS_REG_F17 = 73
MIPS_REG_F18 = 74
MIPS_REG_F19 = 75
MIPS_REG_F20 = 76
MIPS_REG_F21 = 77
MIPS_REG_F22 = 78
MIPS_REG_F23 = 79
MIPS_REG_F24 = 80
MIPS_REG_F25 = 81
MIPS_REG_F26 = 82
MIPS_REG_F27 = 83
MIPS_REG_F28 = 84
MIPS_REG_F29 = 85
MIPS_REG_F30 = 86
MIPS_REG_F31 = 87
MIPS_REG_FCC0 = 88
MIPS_REG_FCC1 = 89
MIPS_REG_FCC2 = 90
MIPS_REG_FCC3 = 91
MIPS_REG_FCC4 = 92
MIPS_REG_FCC5 = 93
MIPS_REG_FCC6 = 94
MIPS_REG_FCC7 = 95
# AFPR128
MIPS_REG_W0 = 96
MIPS_REG_W1 = 97
MIPS_REG_W2 = 98
MIPS_REG_W3 = 99
MIPS_REG_W4 = 100
MIPS_REG_W5 = 101
MIPS_REG_W6 = 102
MIPS_REG_W7 = 103
MIPS_REG_W8 = 104
MIPS_REG_W9 = 105
MIPS_REG_W10 = 106
MIPS_REG_W11 = 107
MIPS_REG_W12 = 108
MIPS_REG_W13 = 109
MIPS_REG_W14 = 110
MIPS_REG_W15 = 111
MIPS_REG_W16 = 112
MIPS_REG_W17 = 113
MIPS_REG_W18 = 114
MIPS_REG_W19 = 115
MIPS_REG_W20 = 116
MIPS_REG_W21 = 117
MIPS_REG_W22 = 118
MIPS_REG_W23 = 119
MIPS_REG_W24 = 120
MIPS_REG_W25 = 121
MIPS_REG_W26 = 122
MIPS_REG_W27 = 123
MIPS_REG_W28 = 124
MIPS_REG_W29 = 125
MIPS_REG_W30 = 126
MIPS_REG_W31 = 127
MIPS_REG_HI = 128
MIPS_REG_LO = 129
MIPS_REG_P0 = 130
MIPS_REG_P1 = 131
MIPS_REG_P2 = 132
MIPS_REG_MPL0 = 133
MIPS_REG_MPL1 = 134
MIPS_REG_MPL2 = 135
MIPS_REG_ENDING = 136
MIPS_REG_ZERO = MIPS_REG_0
MIPS_REG_AT = MIPS_REG_1
MIPS_REG_V0 = MIPS_REG_2
MIPS_REG_V1 = MIPS_REG_3
MIPS_REG_A0 = MIPS_REG_4
MIPS_REG_A1 = MIPS_REG_5
MIPS_REG_A2 = MIPS_REG_6
MIPS_REG_A3 = MIPS_REG_7
MIPS_REG_T0 = MIPS_REG_8
MIPS_REG_T1 = MIPS_REG_9
MIPS_REG_T2 = MIPS_REG_10
MIPS_REG_T3 = MIPS_REG_11
MIPS_REG_T4 = MIPS_REG_12
MIPS_REG_T5 = MIPS_REG_13
MIPS_REG_T6 = MIPS_REG_14
MIPS_REG_T7 = MIPS_REG_15
MIPS_REG_S0 = MIPS_REG_16
MIPS_REG_S1 = MIPS_REG_17
MIPS_REG_S2 = MIPS_REG_18
MIPS_REG_S3 = MIPS_REG_19
MIPS_REG_S4 = MIPS_REG_20
MIPS_REG_S5 = MIPS_REG_21
MIPS_REG_S6 = MIPS_REG_22
MIPS_REG_S7 = MIPS_REG_23
MIPS_REG_T8 = MIPS_REG_24
MIPS_REG_T9 = MIPS_REG_25
MIPS_REG_K0 = MIPS_REG_26
MIPS_REG_K1 = MIPS_REG_27
MIPS_REG_GP = MIPS_REG_28
MIPS_REG_SP = MIPS_REG_29
MIPS_REG_FP = MIPS_REG_30
MIPS_REG_S8 = MIPS_REG_30
MIPS_REG_RA = MIPS_REG_31
MIPS_REG_HI0 = MIPS_REG_AC0
MIPS_REG_HI1 = MIPS_REG_AC1
MIPS_REG_HI2 = MIPS_REG_AC2
MIPS_REG_HI3 = MIPS_REG_AC3
MIPS_REG_LO0 = MIPS_REG_HI0
MIPS_REG_LO1 = MIPS_REG_HI1
MIPS_REG_LO2 = MIPS_REG_HI2
MIPS_REG_LO3 = MIPS_REG_HI3
# MIPS instruction
MIPS_INS_INVALID = 0
MIPS_INS_ABSQ_S = 1
MIPS_INS_ADD = 2
MIPS_INS_ADDIUPC = 3
MIPS_INS_ADDQH = 4
MIPS_INS_ADDQH_R = 5
MIPS_INS_ADDQ = 6
MIPS_INS_ADDQ_S = 7
MIPS_INS_ADDSC = 8
MIPS_INS_ADDS_A = 9
MIPS_INS_ADDS_S = 10
MIPS_INS_ADDS_U = 11
MIPS_INS_ADDUH = 12
MIPS_INS_ADDUH_R = 13
MIPS_INS_ADDU = 14
MIPS_INS_ADDU_S = 15
MIPS_INS_ADDVI = 16
MIPS_INS_ADDV = 17
MIPS_INS_ADDWC = 18
MIPS_INS_ADD_A = 19
MIPS_INS_ADDI = 20
MIPS_INS_ADDIU = 21
MIPS_INS_ALIGN = 22
MIPS_INS_ALUIPC = 23
MIPS_INS_AND = 24
MIPS_INS_ANDI = 25
MIPS_INS_APPEND = 26
MIPS_INS_ASUB_S = 27
MIPS_INS_ASUB_U = 28
MIPS_INS_AUI = 29
MIPS_INS_AUIPC = 30
MIPS_INS_AVER_S = 31
MIPS_INS_AVER_U = 32
MIPS_INS_AVE_S = 33
MIPS_INS_AVE_U = 34
MIPS_INS_BADDU = 35
MIPS_INS_BAL = 36
MIPS_INS_BALC = 37
MIPS_INS_BALIGN = 38
MIPS_INS_BC = 39
MIPS_INS_BC0F = 40
MIPS_INS_BC0FL = 41
MIPS_INS_BC0T = 42
MIPS_INS_BC0TL = 43
MIPS_INS_BC1EQZ = 44
MIPS_INS_BC1F = 45
MIPS_INS_BC1FL = 46
MIPS_INS_BC1NEZ = 47
MIPS_INS_BC1T = 48
MIPS_INS_BC1TL = 49
MIPS_INS_BC2EQZ = 50
MIPS_INS_BC2F = 51
MIPS_INS_BC2FL = 52
MIPS_INS_BC2NEZ = 53
MIPS_INS_BC2T = 54
MIPS_INS_BC2TL = 55
MIPS_INS_BC3F = 56
MIPS_INS_BC3FL = 57
MIPS_INS_BC3T = 58
MIPS_INS_BC3TL = 59
MIPS_INS_BCLRI = 60
MIPS_INS_BCLR = 61
MIPS_INS_BEQ = 62
MIPS_INS_BEQC = 63
MIPS_INS_BEQL = 64
MIPS_INS_BEQZALC = 65
MIPS_INS_BEQZC = 66
MIPS_INS_BGEC = 67
MIPS_INS_BGEUC = 68
MIPS_INS_BGEZ = 69
MIPS_INS_BGEZAL = 70
MIPS_INS_BGEZALC = 71
MIPS_INS_BGEZALL = 72
MIPS_INS_BGEZALS = 73
MIPS_INS_BGEZC = 74
MIPS_INS_BGEZL = 75
MIPS_INS_BGTZ = 76
MIPS_INS_BGTZALC = 77
MIPS_INS_BGTZC = 78
MIPS_INS_BGTZL = 79
MIPS_INS_BINSLI = 80
MIPS_INS_BINSL = 81
MIPS_INS_BINSRI = 82
MIPS_INS_BINSR = 83
MIPS_INS_BITREV = 84
MIPS_INS_BITSWAP = 85
MIPS_INS_BLEZ = 86
MIPS_INS_BLEZALC = 87
MIPS_INS_BLEZC = 88
MIPS_INS_BLEZL = 89
MIPS_INS_BLTC = 90
MIPS_INS_BLTUC = 91
MIPS_INS_BLTZ = 92
MIPS_INS_BLTZAL = 93
MIPS_INS_BLTZALC = 94
MIPS_INS_BLTZALL = 95
MIPS_INS_BLTZALS = 96
MIPS_INS_BLTZC = 97
MIPS_INS_BLTZL = 98
MIPS_INS_BMNZI = 99
MIPS_INS_BMNZ = 100
MIPS_INS_BMZI = 101
MIPS_INS_BMZ = 102
MIPS_INS_BNE = 103
MIPS_INS_BNEC = 104
MIPS_INS_BNEGI = 105
MIPS_INS_BNEG = 106
MIPS_INS_BNEL = 107
MIPS_INS_BNEZALC = 108
MIPS_INS_BNEZC = 109
MIPS_INS_BNVC = 110
MIPS_INS_BNZ = 111
MIPS_INS_BOVC = 112
MIPS_INS_BPOSGE32 = 113
MIPS_INS_BREAK = 114
MIPS_INS_BSELI = 115
MIPS_INS_BSEL = 116
MIPS_INS_BSETI = 117
MIPS_INS_BSET = 118
MIPS_INS_BZ = 119
MIPS_INS_BEQZ = 120
MIPS_INS_B = 121
MIPS_INS_BNEZ = 122
MIPS_INS_BTEQZ = 123
MIPS_INS_BTNEZ = 124
MIPS_INS_CACHE = 125
MIPS_INS_CEIL = 126
MIPS_INS_CEQI = 127
MIPS_INS_CEQ = 128
MIPS_INS_CFC1 = 129
MIPS_INS_CFCMSA = 130
MIPS_INS_CINS = 131
MIPS_INS_CINS32 = 132
MIPS_INS_CLASS = 133
MIPS_INS_CLEI_S = 134
MIPS_INS_CLEI_U = 135
MIPS_INS_CLE_S = 136
MIPS_INS_CLE_U = 137
MIPS_INS_CLO = 138
MIPS_INS_CLTI_S = 139
MIPS_INS_CLTI_U = 140
MIPS_INS_CLT_S = 141
MIPS_INS_CLT_U = 142
MIPS_INS_CLZ = 143
MIPS_INS_CMPGDU = 144
MIPS_INS_CMPGU = 145
MIPS_INS_CMPU = 146
MIPS_INS_CMP = 147
MIPS_INS_COPY_S = 148
MIPS_INS_COPY_U = 149
MIPS_INS_CTC1 = 150
MIPS_INS_CTCMSA = 151
MIPS_INS_CVT = 152
MIPS_INS_C = 153
MIPS_INS_CMPI = 154
MIPS_INS_DADD = 155
MIPS_INS_DADDI = 156
MIPS_INS_DADDIU = 157
MIPS_INS_DADDU = 158
MIPS_INS_DAHI = 159
MIPS_INS_DALIGN = 160
MIPS_INS_DATI = 161
MIPS_INS_DAUI = 162
MIPS_INS_DBITSWAP = 163
MIPS_INS_DCLO = 164
MIPS_INS_DCLZ = 165
MIPS_INS_DDIV = 166
MIPS_INS_DDIVU = 167
MIPS_INS_DERET = 168
MIPS_INS_DEXT = 169
MIPS_INS_DEXTM = 170
MIPS_INS_DEXTU = 171
MIPS_INS_DI = 172
MIPS_INS_DINS = 173
MIPS_INS_DINSM = 174
MIPS_INS_DINSU = 175
MIPS_INS_DIV = 176
MIPS_INS_DIVU = 177
MIPS_INS_DIV_S = 178
MIPS_INS_DIV_U = 179
MIPS_INS_DLSA = 180
MIPS_INS_DMFC0 = 181
MIPS_INS_DMFC1 = 182
MIPS_INS_DMFC2 = 183
MIPS_INS_DMOD = 184
MIPS_INS_DMODU = 185
MIPS_INS_DMTC0 = 186
MIPS_INS_DMTC1 = 187
MIPS_INS_DMTC2 = 188
MIPS_INS_DMUH = 189
MIPS_INS_DMUHU = 190
MIPS_INS_DMUL = 191
MIPS_INS_DMULT = 192
MIPS_INS_DMULTU = 193
MIPS_INS_DMULU = 194
MIPS_INS_DOTP_S = 195
MIPS_INS_DOTP_U = 196
MIPS_INS_DPADD_S = 197
MIPS_INS_DPADD_U = 198
MIPS_INS_DPAQX_SA = 199
MIPS_INS_DPAQX_S = 200
MIPS_INS_DPAQ_SA = 201
MIPS_INS_DPAQ_S = 202
MIPS_INS_DPAU = 203
MIPS_INS_DPAX = 204
MIPS_INS_DPA = 205
MIPS_INS_DPOP = 206
MIPS_INS_DPSQX_SA = 207
MIPS_INS_DPSQX_S = 208
MIPS_INS_DPSQ_SA = 209
MIPS_INS_DPSQ_S = 210
MIPS_INS_DPSUB_S = 211
MIPS_INS_DPSUB_U = 212
MIPS_INS_DPSU = 213
MIPS_INS_DPSX = 214
MIPS_INS_DPS = 215
MIPS_INS_DROTR = 216
MIPS_INS_DROTR32 = 217
MIPS_INS_DROTRV = 218
MIPS_INS_DSBH = 219
MIPS_INS_DSHD = 220
MIPS_INS_DSLL = 221
MIPS_INS_DSLL32 = 222
MIPS_INS_DSLLV = 223
MIPS_INS_DSRA = 224
MIPS_INS_DSRA32 = 225
MIPS_INS_DSRAV = 226
MIPS_INS_DSRL = 227
MIPS_INS_DSRL32 = 228
MIPS_INS_DSRLV = 229
MIPS_INS_DSUB = 230
MIPS_INS_DSUBU = 231
MIPS_INS_EHB = 232
MIPS_INS_EI = 233
MIPS_INS_ERET = 234
MIPS_INS_EXT = 235
MIPS_INS_EXTP = 236
MIPS_INS_EXTPDP = 237
MIPS_INS_EXTPDPV = 238
MIPS_INS_EXTPV = 239
MIPS_INS_EXTRV_RS = 240
MIPS_INS_EXTRV_R = 241
MIPS_INS_EXTRV_S = 242
MIPS_INS_EXTRV = 243
MIPS_INS_EXTR_RS = 244
MIPS_INS_EXTR_R = 245
MIPS_INS_EXTR_S = 246
MIPS_INS_EXTR = 247
MIPS_INS_EXTS = 248
MIPS_INS_EXTS32 = 249
MIPS_INS_ABS = 250
MIPS_INS_FADD = 251
MIPS_INS_FCAF = 252
MIPS_INS_FCEQ = 253
MIPS_INS_FCLASS = 254
MIPS_INS_FCLE = 255
MIPS_INS_FCLT = 256
MIPS_INS_FCNE = 257
MIPS_INS_FCOR = 258
MIPS_INS_FCUEQ = 259
MIPS_INS_FCULE = 260
MIPS_INS_FCULT = 261
MIPS_INS_FCUNE = 262
MIPS_INS_FCUN = 263
MIPS_INS_FDIV = 264
MIPS_INS_FEXDO = 265
MIPS_INS_FEXP2 = 266
MIPS_INS_FEXUPL = 267
MIPS_INS_FEXUPR = 268
MIPS_INS_FFINT_S = 269
MIPS_INS_FFINT_U = 270
MIPS_INS_FFQL = 271
MIPS_INS_FFQR = 272
MIPS_INS_FILL = 273
MIPS_INS_FLOG2 = 274
MIPS_INS_FLOOR = 275
MIPS_INS_FMADD = 276
MIPS_INS_FMAX_A = 277
MIPS_INS_FMAX = 278
MIPS_INS_FMIN_A = 279
MIPS_INS_FMIN = 280
MIPS_INS_MOV = 281
MIPS_INS_FMSUB = 282
MIPS_INS_FMUL = 283
MIPS_INS_MUL = 284
MIPS_INS_NEG = 285
MIPS_INS_FRCP = 286
MIPS_INS_FRINT = 287
MIPS_INS_FRSQRT = 288
MIPS_INS_FSAF = 289
MIPS_INS_FSEQ = 290
MIPS_INS_FSLE = 291
MIPS_INS_FSLT = 292
MIPS_INS_FSNE = 293
MIPS_INS_FSOR = 294
MIPS_INS_FSQRT = 295
MIPS_INS_SQRT = 296
MIPS_INS_FSUB = 297
MIPS_INS_SUB = 298
MIPS_INS_FSUEQ = 299
MIPS_INS_FSULE = 300
MIPS_INS_FSULT = 301
MIPS_INS_FSUNE = 302
MIPS_INS_FSUN = 303
MIPS_INS_FTINT_S = 304
MIPS_INS_FTINT_U = 305
MIPS_INS_FTQ = 306
MIPS_INS_FTRUNC_S = 307
MIPS_INS_FTRUNC_U = 308
MIPS_INS_HADD_S = 309
MIPS_INS_HADD_U = 310
MIPS_INS_HSUB_S = 311
MIPS_INS_HSUB_U = 312
MIPS_INS_ILVEV = 313
MIPS_INS_ILVL = 314
MIPS_INS_ILVOD = 315
MIPS_INS_ILVR = 316
MIPS_INS_INS = 317
MIPS_INS_INSERT = 318
MIPS_INS_INSV = 319
MIPS_INS_INSVE = 320
MIPS_INS_J = 321
MIPS_INS_JAL = 322
MIPS_INS_JALR = 323
MIPS_INS_JALRS = 324
MIPS_INS_JALS = 325
MIPS_INS_JALX = 326
MIPS_INS_JIALC = 327
MIPS_INS_JIC = 328
MIPS_INS_JR = 329
MIPS_INS_JRADDIUSP = 330
MIPS_INS_JRC = 331
MIPS_INS_JALRC = 332
MIPS_INS_LB = 333
MIPS_INS_LBUX = 334
MIPS_INS_LBU = 335
MIPS_INS_LD = 336
MIPS_INS_LDC1 = 337
MIPS_INS_LDC2 = 338
MIPS_INS_LDC3 = 339
MIPS_INS_LDI = 340
MIPS_INS_LDL = 341
MIPS_INS_LDPC = 342
MIPS_INS_LDR = 343
MIPS_INS_LDXC1 = 344
MIPS_INS_LH = 345
MIPS_INS_LHX = 346
MIPS_INS_LHU = 347
MIPS_INS_LL = 348
MIPS_INS_LLD = 349
MIPS_INS_LSA = 350
MIPS_INS_LUXC1 = 351
MIPS_INS_LUI = 352
MIPS_INS_LW = 353
MIPS_INS_LWC1 = 354
MIPS_INS_LWC2 = 355
MIPS_INS_LWC3 = 356
MIPS_INS_LWL = 357
MIPS_INS_LWPC = 358
MIPS_INS_LWR = 359
MIPS_INS_LWUPC = 360
MIPS_INS_LWU = 361
MIPS_INS_LWX = 362
MIPS_INS_LWXC1 = 363
MIPS_INS_LI = 364
MIPS_INS_MADD = 365
MIPS_INS_MADDF = 366
MIPS_INS_MADDR_Q = 367
MIPS_INS_MADDU = 368
MIPS_INS_MADDV = 369
MIPS_INS_MADD_Q = 370
MIPS_INS_MAQ_SA = 371
MIPS_INS_MAQ_S = 372
MIPS_INS_MAXA = 373
MIPS_INS_MAXI_S = 374
MIPS_INS_MAXI_U = 375
MIPS_INS_MAX_A = 376
MIPS_INS_MAX = 377
MIPS_INS_MAX_S = 378
MIPS_INS_MAX_U = 379
MIPS_INS_MFC0 = 380
MIPS_INS_MFC1 = 381
MIPS_INS_MFC2 = 382
MIPS_INS_MFHC1 = 383
MIPS_INS_MFHI = 384
MIPS_INS_MFLO = 385
MIPS_INS_MINA = 386
MIPS_INS_MINI_S = 387
MIPS_INS_MINI_U = 388
MIPS_INS_MIN_A = 389
MIPS_INS_MIN = 390
MIPS_INS_MIN_S = 391
MIPS_INS_MIN_U = 392
MIPS_INS_MOD = 393
MIPS_INS_MODSUB = 394
MIPS_INS_MODU = 395
MIPS_INS_MOD_S = 396
MIPS_INS_MOD_U = 397
MIPS_INS_MOVE = 398
MIPS_INS_MOVF = 399
MIPS_INS_MOVN = 400
MIPS_INS_MOVT = 401
MIPS_INS_MOVZ = 402
MIPS_INS_MSUB = 403
MIPS_INS_MSUBF = 404
MIPS_INS_MSUBR_Q = 405
MIPS_INS_MSUBU = 406
MIPS_INS_MSUBV = 407
MIPS_INS_MSUB_Q = 408
MIPS_INS_MTC0 = 409
MIPS_INS_MTC1 = 410
MIPS_INS_MTC2 = 411
MIPS_INS_MTHC1 = 412
MIPS_INS_MTHI = 413
MIPS_INS_MTHLIP = 414
MIPS_INS_MTLO = 415
MIPS_INS_MTM0 = 416
MIPS_INS_MTM1 = 417
MIPS_INS_MTM2 = 418
MIPS_INS_MTP0 = 419
MIPS_INS_MTP1 = 420
MIPS_INS_MTP2 = 421
MIPS_INS_MUH = 422
MIPS_INS_MUHU = 423
MIPS_INS_MULEQ_S = 424
MIPS_INS_MULEU_S = 425
MIPS_INS_MULQ_RS = 426
MIPS_INS_MULQ_S = 427
MIPS_INS_MULR_Q = 428
MIPS_INS_MULSAQ_S = 429
MIPS_INS_MULSA = 430
MIPS_INS_MULT = 431
MIPS_INS_MULTU = 432
MIPS_INS_MULU = 433
MIPS_INS_MULV = 434
MIPS_INS_MUL_Q = 435
MIPS_INS_MUL_S = 436
MIPS_INS_NLOC = 437
MIPS_INS_NLZC = 438
MIPS_INS_NMADD = 439
MIPS_INS_NMSUB = 440
MIPS_INS_NOR = 441
MIPS_INS_NORI = 442
MIPS_INS_NOT = 443
MIPS_INS_OR = 444
MIPS_INS_ORI = 445
MIPS_INS_PACKRL = 446
MIPS_INS_PAUSE = 447
MIPS_INS_PCKEV = 448
MIPS_INS_PCKOD = 449
MIPS_INS_PCNT = 450
MIPS_INS_PICK = 451
MIPS_INS_POP = 452
MIPS_INS_PRECEQU = 453
MIPS_INS_PRECEQ = 454
MIPS_INS_PRECEU = 455
MIPS_INS_PRECRQU_S = 456
MIPS_INS_PRECRQ = 457
MIPS_INS_PRECRQ_RS = 458
MIPS_INS_PRECR = 459
MIPS_INS_PRECR_SRA = 460
MIPS_INS_PRECR_SRA_R = 461
MIPS_INS_PREF = 462
MIPS_INS_PREPEND = 463
MIPS_INS_RADDU = 464
MIPS_INS_RDDSP = 465
MIPS_INS_RDHWR = 466
MIPS_INS_REPLV = 467
MIPS_INS_REPL = 468
MIPS_INS_RINT = 469
MIPS_INS_ROTR = 470
MIPS_INS_ROTRV = 471
MIPS_INS_ROUND = 472
MIPS_INS_SAT_S = 473
MIPS_INS_SAT_U = 474
MIPS_INS_SB = 475
MIPS_INS_SC = 476
MIPS_INS_SCD = 477
MIPS_INS_SD = 478
MIPS_INS_SDBBP = 479
MIPS_INS_SDC1 = 480
MIPS_INS_SDC2 = 481
MIPS_INS_SDC3 = 482
MIPS_INS_SDL = 483
MIPS_INS_SDR = 484
MIPS_INS_SDXC1 = 485
MIPS_INS_SEB = 486
MIPS_INS_SEH = 487
MIPS_INS_SELEQZ = 488
MIPS_INS_SELNEZ = 489
MIPS_INS_SEL = 490
MIPS_INS_SEQ = 491
MIPS_INS_SEQI = 492
MIPS_INS_SH = 493
MIPS_INS_SHF = 494
MIPS_INS_SHILO = 495
MIPS_INS_SHILOV = 496
MIPS_INS_SHLLV = 497
MIPS_INS_SHLLV_S = 498
MIPS_INS_SHLL = 499
MIPS_INS_SHLL_S = 500
MIPS_INS_SHRAV = 501
MIPS_INS_SHRAV_R = 502
MIPS_INS_SHRA = 503
MIPS_INS_SHRA_R = 504
MIPS_INS_SHRLV = 505
MIPS_INS_SHRL = 506
MIPS_INS_SLDI = 507
MIPS_INS_SLD = 508
MIPS_INS_SLL = 509
MIPS_INS_SLLI = 510
MIPS_INS_SLLV = 511
MIPS_INS_SLT = 512
MIPS_INS_SLTI = 513
MIPS_INS_SLTIU = 514
MIPS_INS_SLTU = 515
MIPS_INS_SNE = 516
MIPS_INS_SNEI = 517
MIPS_INS_SPLATI = 518
MIPS_INS_SPLAT = 519
MIPS_INS_SRA = 520
MIPS_INS_SRAI = 521
MIPS_INS_SRARI = 522
MIPS_INS_SRAR = 523
MIPS_INS_SRAV = 524
MIPS_INS_SRL = 525
MIPS_INS_SRLI = 526
MIPS_INS_SRLRI = 527
MIPS_INS_SRLR = 528
MIPS_INS_SRLV = 529
MIPS_INS_SSNOP = 530
MIPS_INS_ST = 531
MIPS_INS_SUBQH = 532
MIPS_INS_SUBQH_R = 533
MIPS_INS_SUBQ = 534
MIPS_INS_SUBQ_S = 535
MIPS_INS_SUBSUS_U = 536
MIPS_INS_SUBSUU_S = 537
MIPS_INS_SUBS_S = 538
MIPS_INS_SUBS_U = 539
MIPS_INS_SUBUH = 540
MIPS_INS_SUBUH_R = 541
MIPS_INS_SUBU = 542
MIPS_INS_SUBU_S = 543
MIPS_INS_SUBVI = 544
MIPS_INS_SUBV = 545
MIPS_INS_SUXC1 = 546
MIPS_INS_SW = 547
MIPS_INS_SWC1 = 548
MIPS_INS_SWC2 = 549
MIPS_INS_SWC3 = 550
MIPS_INS_SWL = 551
MIPS_INS_SWR = 552
MIPS_INS_SWXC1 = 553
MIPS_INS_SYNC = 554
MIPS_INS_SYSCALL = 555
MIPS_INS_TEQ = 556
MIPS_INS_TEQI = 557
MIPS_INS_TGE = 558
MIPS_INS_TGEI = 559
MIPS_INS_TGEIU = 560
MIPS_INS_TGEU = 561
MIPS_INS_TLBP = 562
MIPS_INS_TLBR = 563
MIPS_INS_TLBWI = 564
MIPS_INS_TLBWR = 565
MIPS_INS_TLT = 566
MIPS_INS_TLTI = 567
MIPS_INS_TLTIU = 568
MIPS_INS_TLTU = 569
MIPS_INS_TNE = 570
MIPS_INS_TNEI = 571
MIPS_INS_TRUNC = 572
MIPS_INS_V3MULU = 573
MIPS_INS_VMM0 = 574
MIPS_INS_VMULU = 575
MIPS_INS_VSHF = 576
MIPS_INS_WAIT = 577
MIPS_INS_WRDSP = 578
MIPS_INS_WSBH = 579
MIPS_INS_XOR = 580
MIPS_INS_XORI = 581
# some alias instructions
MIPS_INS_NOP = 582
MIPS_INS_NEGU = 583
# special instructions
MIPS_INS_JALR_HB = 584
MIPS_INS_JR_HB = 585
MIPS_INS_ENDING = 586
# Group of MIPS instructions
MIPS_GRP_INVALID = 0
# Generic groups
MIPS_GRP_JUMP = 1
# Architecture-specific groups
MIPS_GRP_BITCOUNT = 128
MIPS_GRP_DSP = 129
MIPS_GRP_DSPR2 = 130
MIPS_GRP_FPIDX = 131
MIPS_GRP_MSA = 132
MIPS_GRP_MIPS32R2 = 133
MIPS_GRP_MIPS64 = 134
MIPS_GRP_MIPS64R2 = 135
MIPS_GRP_SEINREG = 136
MIPS_GRP_STDENC = 137
MIPS_GRP_SWAP = 138
MIPS_GRP_MICROMIPS = 139
MIPS_GRP_MIPS16MODE = 140
MIPS_GRP_FP64BIT = 141
MIPS_GRP_NONANSFPMATH = 142
MIPS_GRP_NOTFP64BIT = 143
MIPS_GRP_NOTINMICROMIPS = 144
MIPS_GRP_NOTNACL = 145
MIPS_GRP_NOTMIPS32R6 = 146
MIPS_GRP_NOTMIPS64R6 = 147
MIPS_GRP_CNMIPS = 148
MIPS_GRP_MIPS32 = 149
MIPS_GRP_MIPS32R6 = 150
MIPS_GRP_MIPS64R6 = 151
MIPS_GRP_MIPS2 = 152
MIPS_GRP_MIPS3 = 153
MIPS_GRP_MIPS3_32 = 154
MIPS_GRP_MIPS3_32R2 = 155
MIPS_GRP_MIPS4_32 = 156
MIPS_GRP_MIPS4_32R2 = 157
MIPS_GRP_MIPS5_32R2 = 158
MIPS_GRP_GP32BIT = 159
MIPS_GRP_GP64BIT = 160
MIPS_GRP_ENDING = 161 | /rekall-capstone-3.0.5.post2.zip/rekall-capstone-3.0.5.post2/capstone/mips_const.py | 0.419291 | 0.388763 | mips_const.py | pypi |
from future import standard_library
standard_library.install_aliases()
from builtins import object
from future.utils import with_metaclass
__author__ = "Michael Cohen <scudette@google.com>"
import io
import gzip
import json
import time
import os
import shutil
import urllib.request, urllib.error, urllib.parse
import urllib.parse
import zipfile
from rekall import constants
from rekall import obj
from rekall_lib import registry
from rekall_lib import utils
# The maximum size of a single data object we support. This represent the
# maximum amount of data we are prepared to read into memory at once.
MAX_DATA_SIZE = 100000000
class IOManagerError(IOError):
"""An IOError from the IO Manager."""
class EncodeError(IOError):
"""Raised when unable to encode to the IO Manager."""
class DecodeError(IOError):
"""Raised when unable to decode to the IO Manager."""
class IOManager(with_metaclass(registry.MetaclassRegistry, object)):
"""The baseclass for abstracted IO implementations.
The IO manager classes are responsible for managing access to profiles. A
profile is a JSON dict which is named using a standard notation. For
example, the profile for a certain NT kernel is:
nt/GUID/BF9E190359784C2D8796CF5537B238B42
The IO manager may actually store the profile file using some other scheme,
but that internal scheme is private to itself.
"""
__abstract = True
order = 100
def __init__(self, urn=None, mode="r", session=None, pretty_print=True,
version=constants.PROFILE_REPOSITORY_VERSION):
"""Initialize the IOManager.
Args:
urn: The path to the IO manager. This might contain a scheme or
protocol specific to a certain IO manager implementation.
mode: Can be "r" or "w".
session: The session object.
pretty_print: If specified we dump sorted yaml data - this ends up
being more compressible in reality.
version: The required version of the repository. The IOManager is free
to implement arbitrary storage for different versions if
required. Versioning the repository allows us to update the
repository file format transparently without affecting older
Rekall versions.
"""
self.mode = mode
self.urn = urn
self.version = version
if session == None:
raise RuntimeError("Session must be set")
self.session = session
self.pretty_print = pretty_print
self._inventory = None
self.location = ""
self._dirty = False
@utils.safe_property
def inventory(self):
if self._inventory is None:
self._inventory = self.GetData("inventory", default={})
return self._inventory
def ValidateInventory(self):
try:
metadata = self.inventory.get("$METADATA")
if (metadata.get("ProfileClass") == "Inventory"
and metadata.get("Type") == "Inventory"):
return True
except (AttributeError, IndexError, ValueError):
pass
self.session.logging.warn(
'Inventory for repository "%s" seems malformed. Are you behind a '
'captive portal or proxy? If this is a custom repository, did you '
'forget to create an inventory? You must use the '
'tools/profiles/build_profile_repo.py tool with the --inventory '
'flag.', self.location or self.urn)
# If the profile didn't validate, we still fix it so subsequent calls
# won't generate additional errors. StoreData and FlushInventory also
# rely on this behaviour.
if not self._inventory:
self._inventory = {
"$METADATA": dict(
Type="Inventory",
ProfileClass="Inventory"),
"$INVENTORY": {},
}
return False
def CheckInventory(self, path):
"""Checks the validity of the inventory and if the path exists in it.
The inventory is a json object at the root of the repository which lists
all the profiles in this repository. It allows us to determine quickly
if a profile exists in this repository.
"""
if self.ValidateInventory():
return path in self.inventory.get("$INVENTORY")
return False
def Metadata(self, path):
"""Returns metadata about a path."""
inventory = self.inventory.get("$INVENTORY", {})
return inventory.get(path, {})
def SetMetadata(self, name, options):
existing_options = self.Metadata(name)
existing_options.update(options)
self.inventory.setdefault("$INVENTORY", {})[name] = existing_options
self.FlushInventory()
def FlushInventory(self):
"""Write the inventory to the storage."""
if not self._dirty:
return
self.inventory.setdefault("$METADATA", dict(
Type="Inventory",
ProfileClass="Inventory"))
self.inventory.setdefault("$INVENTORY", dict())
self.StoreData("inventory", self.inventory)
self._dirty = False
def ListFiles(self):
"""Returns a generator over all the files in this container."""
return []
def Create(self, name, **options):
"""Creates a new file in the container.
Returns a file like object which should support the context manager
protocol. If the file already exists in the container, overwrite it.
For example:
with self.session.io_manager.Create("foobar") as fd:
fd.Write("hello world")
Args:
name: The name of the new file.
"""
def Destroy(self, name):
"""Destroys the file/directory at name's path."""
def Open(self, name):
"""Opens a container member for reading.
This should return a file like object which provides read access to
container members.
Raises:
IOManagerError: If the file is not found.
"""
def Encoder(self, data, **options):
if options.get("raw"):
return utils.SmartStr(data)
if self.pretty_print:
return utils.PPrint(data)
return json.dumps(data, sort_keys=True, **options)
def Decoder(self, raw):
return json.loads(utils.SmartUnicode(raw))
def GetData(self, name, raw=False, default=None):
"""Get the data object stored at container member.
This returns an arbitrary python object which is stored in the named
container member. For example, normally a dict or list. This function
wraps the Open() method above and add deserialization to retrieve the
actual object.
Returns None if the file is not found.
Args:
name: The name to retrieve the data under.
raw: If specified we do not parse the data, simply return it as is.
"""
if default is None:
default = obj.NoneObject()
try:
fd = self.Open(name)
data = fd.read(MAX_DATA_SIZE)
if raw:
return data
return self.Decoder(data)
except IOError:
return default
except Exception as e:
self.session.logging.error(
"Cannot parse profile %s because of decoding error '%s'.",
name, e)
return default
def StoreData(self, name, data, **options):
"""Stores the data in the named container member.
This serializes the data and stores it in the named member. Not all
types of data are serializable, so this may raise. For example, when
using JSON to store the data, arbitrary python objects may not be used.
Args:
name: The name under which the data will be stored.
data: The data to store.
Common options:
raw: If true we write the data directly without encoding to json. In
this case data should be a string.
uncompressed: File will not be compressed (default gzip compression).
"""
try:
to_write = self.Encoder(data, **options)
except EncodeError:
self.session.logging.error("Unable to serialize %s", name)
return
self._StoreData(name, to_write, **options)
# Update the inventory.
if name != "inventory":
self.inventory.setdefault("$INVENTORY", {})[name] = dict(
LastModified=time.time())
self.FlushInventory()
def _StoreData(self, name, to_write, **options):
with self.Create(name, **options) as fd:
fd.write(to_write)
self._dirty = True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
class DirectoryIOManager(IOManager):
"""An IOManager which stores everything in files.
We prefer to store the profile file as a gzip compressed file within a
versioned directory. For example the profile:
nt/GUID/BF9E190359784C2D8796CF5537B238B42
will be stored in:
$urn/nt/GUID/BF9E190359784C2D8796CF5537B238B42.gz
Where $urn is the path where the DirectoryIOManager was initialized with.
"""
# Any paths beginning with these prefixes will not be included in the
# inventory.
EXCLUDED_PATH_PREFIX = []
def __init__(self, urn=None, **kwargs):
super(DirectoryIOManager, self).__init__(**kwargs)
self.location = self.dump_dir = os.path.normpath(os.path.abspath(
os.path.expandvars(urn)))
if not self.version:
self.version = ""
self.check_dump_dir(self.dump_dir)
self.canonical_name = os.path.basename(self.dump_dir)
@utils.safe_property
def inventory(self):
# In DirectoryIOManager the inventory reflects the directory structure.
if self._inventory is None:
self._inventory = self.GetData("inventory", default={})
if not self._inventory:
self._inventory = self.RebuildInventory()
return self._inventory
def RebuildInventory(self):
"""Rebuild the inventory file."""
result = {
"$METADATA": dict(
Type="Inventory",
ProfileClass="Inventory"),
"$INVENTORY": {},
}
for member in self.ListFiles():
if not self._is_excluded_member(member):
result["$INVENTORY"][member] = self.Metadata(member)
return result
def _is_excluded_member(self, member):
for prefix in self.EXCLUDED_PATH_PREFIX:
if member.startswith(prefix):
return True
def CheckInventory(self, path):
"""Checks the validity of the inventory and if the path exists in it.
The inventory is a json object at the root of the repository which lists
all the profiles in this repository. It allows us to determine quickly
if a profile exists in this repository.
"""
if self.ValidateInventory():
path = self.GetAbsolutePathName(path)
return os.access(path, os.R_OK) or os.access(path + ".gz", os.R_OK)
return False
def Metadata(self, path):
path = self.GetAbsolutePathName(path)
try:
try:
st = os.stat(path + ".gz")
except OSError:
if os.path.isdir(path):
return {}
st = os.stat(path)
return dict(LastModified=st.st_mtime)
except OSError:
return {}
def check_dump_dir(self, dump_dir=None):
if not dump_dir:
raise IOManagerError("Please specify a dump directory.")
if self.mode == "w":
self.EnsureDirectoryExists(self.dump_dir)
if not os.path.isdir(dump_dir):
raise IOManagerError("%s is not a directory" % self.dump_dir)
def GetAbsolutePathName(self, name):
path = os.path.normpath(
os.path.join(self.dump_dir, self.version, name))
if not path.startswith(self.dump_dir):
raise IOManagerError("Path name is outside container.")
return path
def EnsureDirectoryExists(self, dirname):
try:
os.makedirs(dirname)
except OSError:
pass
def ListFiles(self):
top_level = os.path.join(self.dump_dir, self.version)
for root, _, files in os.walk(top_level):
for f in files:
path = os.path.normpath(os.path.join(root, f))
if path.endswith(".gz"):
path = path[:-3]
# Return paths relative to the dump dir.
yield path[len(top_level) + 1:]
def Create(self, name):
path = self.GetAbsolutePathName(name)
self.EnsureDirectoryExists(os.path.dirname(path))
return gzip.open(path + ".gz", "wb")
def Destroy(self, name):
path = self.GetAbsolutePathName(name)
return shutil.rmtree(path)
def Open(self, name):
path = self.GetAbsolutePathName(name)
try:
result = open(path, "rb")
except IOError:
result = gzip.open(path + ".gz")
self.session.logging.debug("Opened local file %s" % result.name)
return result
def _StoreData(self, name, to_write, **options):
path = self.GetAbsolutePathName(name)
self.EnsureDirectoryExists(os.path.dirname(path))
# If we are asked to write uncompressed files we do.
if options.get("uncompressed"):
with open(path, "wt") as out_fd:
out_fd.write(utils.SmartUnicode(to_write))
self._dirty = True
return
# We need to update the file atomically in case someone else is trying
# to open it right now. Since the files are expected to be fairly small
# its ok to compress into memory and just write atomically.
fd = io.BytesIO()
with gzip.GzipFile(mode="wb", fileobj=fd) as gzip_fd:
gzip_fd.write(utils.SmartStr(to_write))
with open(path + ".gz", "wb") as out_fd:
out_fd.write(fd.getvalue())
self._dirty = True
def __str__(self):
return u"Directory:%s" % self.dump_dir
# pylint: disable=protected-access
class SelfClosingFile(io.StringIO):
def __init__(self, name, manager):
self.name = name
self.manager = manager
io.StringIO.__init__(self)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.Close()
else:
self.manager._Cancel(self.name)
def Close(self):
self.manager._Write(self.name, self.getvalue())
class ZipFileManager(IOManager):
"""An IO Manager which stores files in a zip archive."""
order = 50
def __init__(self, urn=None, fd=None, **kwargs):
super(ZipFileManager, self).__init__(**kwargs)
if fd is None and not urn.lower().endswith("zip"):
if self.mode == "w":
raise IOManagerError(
"Zip files must have the .zip extensions.")
self.fd = fd
if urn is not None:
self.location = self.file_name = os.path.normpath(
os.path.abspath(urn))
self.canonical_name = os.path.splitext(os.path.basename(urn))[0]
self._OpenZipFile()
# The set of outstanding writers. When all outstanding writers have been
# closed we can flush the ZipFile.
self._outstanding_writers = set()
@utils.safe_property
def inventory(self):
"""We do not really need an inventory for zip files.
We return a fake one based on the zip file's modification time.
"""
result = {}
for zipinfo in self.zip.filelist:
result[zipinfo.filename] = zipinfo.date_time
return {
"$INVENTORY": result
}
def FlushInventory(self):
pass
def _OpenZipFile(self, mode=None):
try:
if self.fd is None:
self.zip = zipfile.ZipFile(
self.file_name, mode=mode or self.mode,
compression=zipfile.ZIP_DEFLATED)
elif self.mode == "r":
self.zip = zipfile.ZipFile(self.fd, mode="r")
elif self.mode == "a":
self.zip = zipfile.ZipFile(self.fd, mode="a")
except zipfile.BadZipfile:
raise IOManagerError("Unable to read zipfile.")
def ListFiles(self):
return self.zip.namelist()
def _Cancel(self, name):
self._outstanding_writers.remove(name)
def _Write(self, name, data):
self.zip.writestr(name, data)
self._outstanding_writers.remove(name)
if not self._outstanding_writers:
self.zip.close()
# Reopen the zip file so we may add new members.
self._OpenZipFile(mode="a")
def Create(self, name):
if self.mode not in ["w", "a"]:
raise IOManagerError("Container not opened for writing.")
result = SelfClosingFile(name, self)
self._outstanding_writers.add(name)
return result
def Destroy(self, name):
_ = name
raise IOManagerError(
"Removing a file from zipfile is not supported. Use a different "
"IOManager subclass.")
def Open(self, name):
if self.mode not in ["r", "a"]:
raise IOManagerError("Container not opened for reading.")
if self.zip is None:
self._OpenZipFile()
try:
return self.zip.open(name)
except KeyError as e:
raise IOManagerError(e)
def __enter__(self):
self._outstanding_writers.add(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._outstanding_writers.remove(self)
if exc_type is None and not self._outstanding_writers:
self.zip.close()
if self.mode in ["w", "a"]:
self._OpenZipFile(mode="a")
def Close(self):
self.zip.close()
def __str__(self):
return u"ZipFile:%s" % self.file_name
class URLManager(IOManager):
"""Supports opening profile repositories hosted over the web."""
def __init__(self, urn=None, mode="r", **kwargs):
super(URLManager, self).__init__(urn=urn, mode=mode, **kwargs)
if mode != "r":
raise IOManagerError("%s supports only reading." %
self.__class__.__name__)
self.url = urllib.parse.urlparse(utils.SmartUnicode(urn))
if self.url.scheme not in ("http", "https"):
raise IOManagerError("%s supports only http protocol." %
self.__class__.__name__)
def Create(self, name):
_ = name
raise IOManagerError("Write support to http is not supported.")
def Destroy(self, name):
_ = name
raise IOManagerError("Write support to http is not supported.")
def _GetURL(self, name):
url = self.url._replace(path="%s/%s/%s" % (
self.url.path, self.version, name))
return urllib.parse.urlunparse(url)
def Open(self, name):
url = self._GetURL(name)
try:
# Rekall repositories always use gzip to compress the files - so
# first try with the .gz extension.
fd = urllib.request.urlopen(url + ".gz", timeout=10)
self.session.logging.debug("Opened url %s.gz" % url)
return gzip.GzipFile(fileobj=io.BytesIO(fd.read(MAX_DATA_SIZE)))
except urllib.error.HTTPError:
# Try to load the file without the .gz extension.
self.session.logging.debug("Opened url %s" % url)
return urllib.request.urlopen(url, timeout=10)
def __str__(self):
return u"URL:%s" % self.urn
def Factory(urn, mode="r", session=None, **kwargs):
"""Try to instantiate the IOManager class."""
for cls in sorted(IOManager.classes.values(), key=lambda x: x.order):
try:
return cls(urn=urn, mode=mode, session=session, **kwargs)
except IOError:
pass
raise IOManagerError(
"Unable to find any managers which can work on %s" % urn) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/io_manager.py | 0.80954 | 0.263684 | io_manager.py | pypi |
from __future__ import division
from past.utils import old_div
from builtins import object
from rekall_lib import registry
from rekall_lib import utils
from future.utils import with_metaclass
class Zeroer(object):
def __init__(self):
self.store = utils.FastStore(10, lock=True)
def GetZeros(self, length):
try:
return self.store.Get(length)
except KeyError:
zeros = b"\x00" * length
self.store.Put(length, zeros)
return zeros
# Keep a bunch of zeros around for speed.
ZEROER = Zeroer()
class TranslationLookasideBuffer(object):
"""An implementation of a TLB.
This can be used by an address space to cache translations.
"""
PAGE_SHIFT = 12
PAGE_ALIGNMENT = (1 << PAGE_SHIFT) - 1
PAGE_MASK = ~ PAGE_ALIGNMENT
def __init__(self, max_size=10):
self.page_cache = utils.FastStore(max_size)
def Get(self, vaddr):
"""Returns the cached physical address for this virtual address."""
# The cache only stores page aligned virtual addresses. We add the page
# offset to the physical addresses automatically.
result = self.page_cache.Get(vaddr & self.PAGE_MASK)
# None is a valid cached value, it means no mapping exists.
if result is not None:
return result + (vaddr & self.PAGE_ALIGNMENT)
def Put(self, vaddr, paddr):
if vaddr & self.PAGE_ALIGNMENT:
raise TypeError("TLB must only cache aligned virtual addresses.")
self.page_cache.Put(vaddr, paddr)
class Run(object):
"""A container for runs."""
__slots__ = ("start", "end", "address_space", "file_offset", "data")
def __init__(self, start=None, end=None, address_space=None,
file_offset=None, data=None):
self.start = start
self.end = end
self.address_space = address_space
self.file_offset = file_offset
self.data = data
@utils.safe_property
def length(self):
return self.end - self.start
@length.setter
def length(self, value):
self.end = self.start + value
def copy(self, **kw):
kwargs = dict(start=self.start, end=self.end,
address_space=self.address_space,
file_offset=self.file_offset,
data=self.data)
kwargs.update(kw)
return self.__class__(**kwargs)
def __str__(self):
if self.file_offset is None:
return u"<%#x, %#x>" % (self.start, self.end)
return u"<%#x, %#x> -> %#x @ %s" % (
self.start, self.end, self.file_offset,
self.address_space)
class BaseAddressSpace(with_metaclass(registry.MetaclassRegistry, object)):
""" This is the base class of all Address Spaces. """
__abstract = True
order = 10
# This can be used to name the address space (e.g. process if etc).
name = ""
# Some useful metadata for address spaces.
# This signifies that this address space normally operates on memory
# images. This flag controls if this address space will participate in
# address space autoselection for image detection. Note that it can not be
# inherited but must be explicitly set.
__image = False
# This flag signifies whether this address space's contents are likely to
# change between reads. If an address space is NOT volatile (this flag is
# False) then reads from the same offset MUST always return the same bytes.
volatile = False
# This flag signifies whether this address space is for a virtual machine.
virtualized = False
def __init__(self, base=None, session=None, profile=None, **_):
"""Base is the AS we will be stacking on top of, opts are options which
we may use.
Args:
base: A base address space to stack on top of (i.e. delegate to it for
satisfying read requests).
session: An optional session object.
profile: An optional profile to use for parsing the address space
(e.g. needed for hibernation, crash etc.)
"""
if session is None and base is not None:
session = base.session
self.base = base
if base:
self.volatile = self.base.volatile
self.profile = profile
self.session = session
if session is None:
raise RuntimeError("Session must be provided.")
def as_assert(self, assertion, error=None):
"""Duplicate for the assert command (so that optimizations don't disable
them)
It had to be called as_assert, since assert is a keyword
"""
if not assertion:
raise ASAssertionError(
error or "Instantiation failed for unspecified reason")
def describe(self, addr):
"""Return a string describing an address."""
return "%#x" % addr
def read(self, unused_addr, length):
"""Should be overridden by derived classes."""
if length > self.session.GetParameter("buffer_size"):
raise IOError("Too much data to read.")
return ZEROER.GetZeros(length)
def get_mappings(self, start=0, end=2**64):
"""Generates a sequence of Run() objects.
Each Run object describes a single range transformation from this
address space to another address space at a potentially different
mapped_offset.
Runs are assumed to not overlap and are generated in increasing order.
Args:
start: The suggested start address we are interested in. This function
may omit runs that lie entirely below this start address. Note:
Runs are not adjusted to begin at the start address - it may be
possible that this method returns a run which starts earlier than
the specified start address.
"""
_ = start
_ = end
return []
def end(self):
runs = list(self.get_mappings())
if runs:
last_run = runs[-1]
return last_run.end
return 0
def get_address_ranges(self, start=0, end=0xfffffffffffff):
"""Generates the runs which fall between start and end.
Note that start and end are here specified in the virtual address
space. More importantly this does not say anything about the pages in
the physical address space - just because pages in the virtual address
space are contiguous does not mean they are also contiguous in the
physical address space.
Yields:
Run objects describing merged virtual address ranges. NOTE: These runs
do not have file_offset or address_space members since the file_offset
is not the same across the entire range and therefore it does not make
sense to directly read the base address space - If you want to do
this, use merge_base_ranges() instead.
"""
last_voffset = last_voffset_end = 0
for run in self.get_mappings(start=start, end=end):
# No more runs apply.
if run.start > end:
break
if run.start < start:
# We dont care about the file_offset here since it will be
# dropped later.
run = run.copy(start=start)
# This can take some time as we enumerate all the address ranges.
self.session.report_progress(
"%(name)s: Merging Address Ranges %(offset)#x %(spinner)s",
offset=run.start, name=self.name)
# Extend the last range if this range starts at the end of the last
# one.
if run.start == last_voffset_end:
last_voffset_end = run.end
else:
# Emit the last range
if last_voffset_end > last_voffset:
yield Run(start=last_voffset,
end=last_voffset_end)
# Reset the contiguous range.
last_voffset = run.start
last_voffset_end = min(run.end, end)
if last_voffset_end > last_voffset:
yield Run(start=last_voffset, end=last_voffset_end)
def merge_base_ranges(self, start=0, end=0xfffffffffffff):
"""Generates merged address ranges from get_mapping().
This method is subtly different from get_address_ranges in that runs are
contiguous in the base address space, hence the yielded runs have a
valid file_offset member. Callers can safely issue read operations to
the address space.
Yields:
runs which are contiguous in the base address space. This function
is designed to produce ranges more optimized for reducing the number
of read operations from the underlying base address space.
"""
contiguous_voffset = 0
contiguous_voffset_end = 0
contiguous_poffset = 0
last_run_length = 0
last_as = None
for run in self.get_mappings(start=start, end=end):
# No more runs apply.
if end and run.start > end:
break
if run.start < start:
run = run.copy(
start=start,
file_offset=run.file_offset + start - run.start)
# This can take some time as we enumerate all the address ranges.
self.session.report_progress(
"%(name)s: Merging Address Ranges %(offset)#x %(spinner)s",
offset=run.start, name=self.name)
# Try to join up adjacent pages as much as possible.
if (run.start == contiguous_voffset_end and
run.file_offset == contiguous_poffset + last_run_length and
run.address_space is last_as):
contiguous_voffset_end = min(run.end, end)
last_run_length = contiguous_voffset_end - contiguous_voffset
last_as = run.address_space
else:
if last_run_length > 0:
yield Run(start=contiguous_voffset,
end=contiguous_voffset_end,
address_space=last_as,
file_offset=contiguous_poffset)
# Reset the contiguous range.
contiguous_voffset = run.start
contiguous_voffset_end = min(run.end, end)
contiguous_poffset = run.file_offset or 0
last_run_length = contiguous_voffset_end - contiguous_voffset
last_as = run.address_space
if last_run_length > 0:
yield Run(start=contiguous_voffset,
end=contiguous_voffset_end,
address_space=last_as,
file_offset=contiguous_poffset)
def is_valid_address(self, _addr):
"""Tell us if the address is valid """
return True
def write(self, addr, buf):
"""Write to the address space, if writable.
The default behavior is to delegate the write to the base address space.
If an address space has no base then this function will throw an
IOError. Address spaces that actually implement writing should override.
Raises:
IOError if there is no base address space. Subclasses may raise
under additional circumstances.
Arguments:
addr: The address to write at, as understood by this AS (i.e.
a virtual address for virtual address spaces, physical for
physical).
buf: The data to write - most commonly a basestring instance.
Returns:
Number of bytes written.
"""
if not self.base:
raise IOError("No base address space set on %r." % self)
return self.base.write(self.vtop(addr), buf)
def vtop(self, addr):
"""Return the physical address of this virtual address."""
# For physical address spaces, this is a noop.
return addr
def vtop_run(self, addr):
"""Returns a Run object describing where addr can be read from."""
return Run(start=addr,
end=addr,
address_space=self,
file_offset=addr)
@classmethod
def metadata(cls, name, default=None):
"""Obtain metadata about this address space."""
return getattr(cls, "_%s__%s" % (cls.__name__, name), default)
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return u"<%s @ %#x %s>" % (
self.__class__.__name__, id(self), self.name)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.base == other.base)
def get_file_address_space(self, filename):
"""Implement this to return an address space for filename."""
def get_mapped_offset(self, filename, offset):
"""Implement this if we can map files into this address space."""
def ConfigureSession(self, session_obj):
"""Implement this method if you need to configure the session."""
def close(self):
pass
class BufferAddressSpace(BaseAddressSpace):
"""Specialized address space for internal use.
Provides transparent reads through to a string buffer, so that profile
types can be instantiated on top of strings.
"""
__image = False
@utils.safe_property
def writable(self):
"""Buffer AS is always writable, no matter what the session says."""
return True
def __init__(self, base_offset=0, data='', **kwargs):
super(BufferAddressSpace, self).__init__(**kwargs)
self.fname = "Buffer"
self.data = data
self.base_offset = base_offset
def assign_buffer(self, data, base_offset=0):
self.base_offset = base_offset
self.data = data
def is_valid_address(self, addr):
return not (addr < self.base_offset or addr > self.base_offset +
len(self.data))
def read(self, addr, length):
offset = addr - self.base_offset
data = self.data[offset: offset + length]
return data + ZEROER.GetZeros(length - len(data))
def write(self, addr, data):
if addr > len(self.data):
raise ValueError(
"Cannot write to offset %d of buffer with size %d." %
(addr, len(self.data)))
self.data = self.data[:addr] + data + self.data[addr + len(data):]
return len(data)
def get_mappings(self, start=None, end=2**64):
if self.end > start and self.end < end:
yield Run(start=self.base_offset,
end=self.end,
file_offset=self.base_offset,
address_space=self)
def get_buffer_offset(self, offset):
"""Returns the offset in self.data for the virtual offset."""
return offset - self.base_offset
def __repr__(self):
return "<%s @ %#x %s [%#X-%#X]>" % (
self.__class__.__name__, id(self), self.name,
self.base_offset, self.end())
def __len__(self):
return len(self.data)
def end(self):
"""Return the end address of the buffer."""
return self.base_offset + len(self.data)
class CachingAddressSpaceMixIn(object):
# The size of chunks we cache. This should be large enough to make file
# reads efficient.
CHUNK_SIZE = 32 * 1024
CACHE_SIZE = 10
def __init__(self, **kwargs):
super(CachingAddressSpaceMixIn, self).__init__(**kwargs)
self._cache = utils.FastStore(self.CACHE_SIZE)
def read(self, addr, length):
addr, length = int(addr), int(length)
result = b""
while length > 0:
data = self.read_partial(addr, length)
if not data:
break
result += data
length -= len(data)
addr += len(data)
return result
def cached_read_partial(self, addr, length):
"""Implement this to allow the caching mixin to cache these reads."""
# By default call the next read_partial in the inheritance tree.
return super(CachingAddressSpaceMixIn, self).read(addr, length)
def read_partial(self, addr, length):
if addr == None:
return addr
chunk_number = addr // self.CHUNK_SIZE
chunk_offset = addr % self.CHUNK_SIZE
# Do not cache large reads but still pad them to CHUNK_SIZE.
if chunk_offset == 0 and length > self.CHUNK_SIZE:
# Deliberately do a short read to avoid copying.
to_read = length - length % self.CHUNK_SIZE
return self.cached_read_partial(addr, to_read)
available_length = min(length, self.CHUNK_SIZE - chunk_offset)
try:
data = self._cache.Get(chunk_number)
except KeyError:
# Just read the data from the real class.
data = self.cached_read_partial(
chunk_number * self.CHUNK_SIZE, self.CHUNK_SIZE)
self._cache.Put(chunk_number, data)
return data[chunk_offset:chunk_offset + available_length]
class PagedReader(BaseAddressSpace):
"""An address space which reads in page size.
This automatically takes care of splitting a large read into smaller reads.
"""
PAGE_SIZE = 0x1000
PAGE_MASK = ~(PAGE_SIZE - 1)
__abstract = True
def _read_chunk(self, vaddr, length):
"""Read bytes from a virtual address.
Args:
vaddr: A virtual address to read from.
length: The number of bytes to read.
Returns:
As many bytes as can be read within this page.
"""
to_read = min(length, self.PAGE_SIZE - (vaddr % self.PAGE_SIZE))
paddr = self.vtop(vaddr)
if paddr is None:
return ZEROER.GetZeros(to_read)
return self.base.read(paddr, to_read)
def _write_chunk(self, vaddr, buf):
to_write = min(len(buf), self.PAGE_SIZE - (vaddr % self.PAGE_SIZE))
if not to_write:
return 0
paddr = self.vtop(vaddr)
if not paddr:
return 0
return self.base.write(paddr, buf[:to_write])
def write(self, addr, buf):
available = len(buf)
written = 0
while available > written:
chunk_len = self._write_chunk(addr + written, buf[written:])
if not chunk_len:
break
written += chunk_len
return written
def read(self, addr, length):
"""Read 'length' bytes from the virtual address 'vaddr'."""
if length > self.session.GetParameter("buffer_size"):
raise IOError("Too much data to read.")
addr, length = int(addr), int(length)
result = b''
while length > 0:
buf = self._read_chunk(addr, length)
if not buf:
break
result += buf
addr += len(buf)
length -= len(buf)
return result
def is_valid_address(self, addr):
vaddr = self.vtop(addr)
return vaddr != None and self.base.is_valid_address(vaddr)
class RunBasedAddressSpace(PagedReader):
"""An address space which uses a list of runs to specify a mapping.
This essentially delegates certain address ranges to other address spaces
"mapped" into this address space.
The runs are tuples of this form:
(virtual_address, physical_address, length, address_space)
- Virtual Address - An address in this address space's virtual address
space.
- Physical Address - An address in the delegate address space.
- Length - The length of the mapped region.
- Address space - the address space that should be read for this
region. Note that the physical address above refers to addresses in this
delegate address space.
"""
# This is a list of (memory_offset, file_offset, length) tuples.
runs = None
__abstract = True
def __init__(self, **kwargs):
super(RunBasedAddressSpace, self).__init__(**kwargs)
self.runs = utils.RangedCollection()
def add_run(self, virt_addr, file_address, file_len, address_space=None,
data=None):
"""Add a new run to this address space."""
if address_space is None:
address_space = self.base
start = virt_addr # Range start
end = virt_addr + file_len # Range end
self.runs.insert(start, end,
Run(start=start,
end=end,
address_space=address_space,
file_offset=file_address,
data=data))
def _read_chunk(self, addr, length):
"""Read from addr as much as possible up to a length of length."""
start, end, run = self.runs.get_containing_range(addr)
# addr is not in any range, pad to the next range.
if start is None:
end = self.runs.get_next_range_start(addr)
if end is None:
end = addr + length
return ZEROER.GetZeros(min(end - addr, length))
# Read as much as we can from this address space.
available_length = min(end - addr, length)
file_offset = run.file_offset + addr - start
return run.address_space.read(file_offset, available_length)
def _write_chunk(self, addr, buf):
length = len(buf)
start, end, run = self.runs.get_containing_range(addr)
# addr is not in any range, ignore to the next range.
if start is None:
end = self.runs.get_next_range_start(addr)
if end is None:
end = addr + length
return min(end - addr, length)
# Write as much as we can to this run.
available_length = min(end - addr, length)
file_offset = run.file_offset + addr - start
return run.address_space.write(file_offset, buf[:available_length])
def vtop_run(self, addr):
start, _, run = self.runs.get_containing_range(addr)
if start is not None:
return Run(start=addr,
end=run.end,
address_space=run.address_space,
file_offset=run.file_offset + addr - run.start)
def vtop(self, addr):
"""Returns the physical address for this virtual address.
Note that this does not mean much without also knowing the address space
to read from. Maybe we need to change this method's prototype?
"""
start, end, run = self.runs.get_containing_range(addr)
if start is not None:
if addr < end:
return run.file_offset + addr - start
def is_valid_address(self, addr):
return self.vtop(addr) is not None
def get_mappings(self, start=0, end=2**64):
"""Yields the mappings.
Yields: A seqence of Run objects representing each run.
"""
for _, _, run in self.runs:
if start > run.end:
continue
if run.start > end:
return
yield run
class Error(Exception):
"""Address space errors."""
class ASAssertionError(Error, IOError, AssertionError):
"""The address space failed to instantiate."""
class AddrSpaceError(Error):
"""Address Space Exception.
This exception is raised when an AS decides to not be instantiated. It is
used in the voting algorithm.
"""
def __init__(self):
self.reasons = []
Error.__init__(self, "No suitable address space mapping found")
def append_reason(self, driver, reason):
self.reasons.append((driver, reason))
def __str__(self):
result = u"%s: \nTried to open image as:\n" % Error.__str__(self)
for k, v in self.reasons:
result += u" {0}: {1}\n".format(k, v)
return result | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/addrspace.py | 0.884763 | 0.195575 | addrspace.py | pypi |
# This file is produced when the main "version.py update" command is run. That
# command copies this file to all sub-packages which contain
# setup.py. Configuration is maintain in version.yaml at the project's top
# level.
def get_versions():
return tag_version_data(raw_versions(), """version.yaml""")
def raw_versions():
return json.loads("""
{
"codename": "Hurricane Ridge",
"post": "0",
"rc": "1",
"version": "1.7.2"
}
""")
import json
import os
import subprocess
try:
# We are looking for the git repo which contains this file.
MY_DIR = os.path.dirname(os.path.abspath(__file__))
except:
MY_DIR = None
def is_tree_dirty():
try:
return bool(subprocess.check_output(
["git", "diff", "--name-only"], stderr=subprocess.PIPE,
cwd=MY_DIR,
).splitlines())
except (OSError, subprocess.CalledProcessError):
return False
def get_version_file_path(version_file="version.yaml"):
try:
return os.path.join(subprocess.check_output(
["git", "rev-parse", "--show-toplevel"], stderr=subprocess.PIPE,
cwd=MY_DIR,
).decode("utf-8").strip(), version_file)
except (OSError, subprocess.CalledProcessError):
return None
def number_of_commit_since(version_file="version.yaml"):
"""Returns the number of commits since version.yaml was changed."""
try:
last_commit_to_touch_version_file = subprocess.check_output(
["git", "log", "--no-merges", "-n", "1", "--pretty=format:%H",
version_file], cwd=MY_DIR, stderr=subprocess.PIPE,
).strip()
all_commits = subprocess.check_output(
["git", "log", "--no-merges", "-n", "1000", "--pretty=format:%H"],
stderr=subprocess.PIPE, cwd=MY_DIR,
).splitlines()
return all_commits.index(last_commit_to_touch_version_file)
except (OSError, subprocess.CalledProcessError, ValueError):
return None
def get_current_git_hash():
try:
return subprocess.check_output(
["git", "log", "--no-merges", "-n", "1", "--pretty=format:%H"],
stderr=subprocess.PIPE, cwd=MY_DIR,
).strip()
except (OSError, subprocess.CalledProcessError):
return None
def tag_version_data(version_data, version_path="version.yaml"):
current_hash = get_current_git_hash()
# Not in a git repository.
if current_hash is None:
version_data["error"] = "Not in a git repository."
else:
version_data["revisionid"] = current_hash
version_data["dirty"] = is_tree_dirty()
version_data["dev"] = number_of_commit_since(
get_version_file_path(version_path))
# Format the version according to pep440:
pep440 = version_data["version"]
if int(version_data.get("post", 0)) > 0:
pep440 += ".post" + version_data["post"]
elif int(version_data.get("rc", 0)) > 0:
pep440 += ".rc" + version_data["rc"]
if version_data.get("dev", 0):
# A Development release comes _before_ the main release.
last = version_data["version"].rsplit(".", 1)
version_data["version"] = "%s.%s" % (last[0], int(last[1]) + 1)
pep440 = version_data["version"] + ".dev" + str(version_data["dev"])
version_data["pep440"] = pep440
return version_data | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/_version.py | 0.459076 | 0.196749 | _version.py | pypi |
__author__ = "Michael Cohen <scudette@google.com>"
"""This file implements CPU quota limiting on the Rekall session.
The code works by wrapping a session object with progress handlers
which check for the CPU quota.
Note that when Rekall is used as a library, the caller must
deliberately wrap its own session with this module.
"""
import time
import psutil
from rekall import config
from rekall import plugin
config.DeclareOption(
"--cpu_quota", type="IntParser", group="Quotas",
help="Number of allocated CPU seconds Rekall is allowed to consume. "
"If not set, unlimited CPU time can be used.")
config.DeclareOption(
"--load_quota", type="IntParser", group="Quotas",
help="The target maximal process load level (in percent).")
def wrap_session(session, cpu_quota=None, load_quota=None):
"""Wraps the session limiting cpu quota."""
if load_quota is None:
load_quota = session.GetParameter("load_quota")
if cpu_quota is None:
cpu_quota = session.GetParameter("cpu_quota")
if cpu_quota == None and load_quota == None:
return session
# Store the process's current CPU utilization.
proc = psutil.Process()
cpu_times = proc.cpu_times()
start_time = cpu_times.user + cpu_times.system
state = dict(last=time.time(),
start_time=start_time,
proc=proc)
def quota_callback(*_, **__):
check_quota(state, cpu_quota, load_quota)
# Register our progress dispatcher.
session.progress.Register("quota", quota_callback)
return session
def check_quota(state, cpu_quota, load_quota):
"""A progress callback which checks quota is not exceeded."""
now = time.time()
# In order to not overwhelm psutil we throttle calls to once every
# few ms.
if now + 0.5 > state["last"]:
state["last"] = now
start_time = state["start_time"]
proc = state["proc"]
cpu_times = proc.cpu_times()
current = cpu_times.user + cpu_times.system
if cpu_quota and current > start_time + cpu_quota:
# CPU quota exceeded.
raise plugin.PluginError("CPU Quota exceeded (%s Seconds)." %
(current - start_time))
if load_quota:
while 1:
current_cpu_percent = proc.cpu_percent() * 100
# If our current CPU utilization exceeds the specified
# limits we sleep a bit.
if current_cpu_percent < load_quota:
break
time.sleep(0.1) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/quotas.py | 0.596668 | 0.236318 | quotas.py | pypi |
from builtins import str
from builtins import object
from future.utils import with_metaclass
__author__ = "Michael Cohen <scudette@gmail.com>"
# pylint: disable=protected-access
import re
from rekall import addrspace
from rekall import cache
from rekall import config
from rekall import kb
from rekall import obj
from rekall import scan
from rekall_lib import registry
from rekall_lib import utils
from rekall.plugins.addrspaces import amd64
from rekall.plugins.addrspaces import intel
from rekall.plugins.darwin import common as darwin_common
from rekall.plugins.linux import common as linux_common
from rekall.plugins.windows import common as win_common
from rekall.plugins.overlays.windows import pe_vtypes
class DetectionMethod(with_metaclass(registry.MetaclassRegistry, object)):
"""A baseclass to implement autodetection methods."""
name = None
order = 100
def __init__(self, session=None):
self.session = session
def Offsets(self):
"""Return a list of offsets we care about."""
return []
def Keywords(self):
"""Returns a list of keywords which will be searched.
Each time the keyword is matched, this instance will be called to
attempt detection.
"""
return []
find_dtb_impl = None
def VerifyProfile(self, profile_name):
"""Check that the profile name is valid."""
profile = self.session.LoadProfile(profile_name)
if profile != None:
return self._ApplyFindDTB(self.find_dtb_impl, profile)
def _ApplyFindDTB(self, find_dtb_cls, profile):
"""Verify profile by trying to use it to load the dtb.
If this succeeds the profile is likely correct.
"""
self.session.profile = profile
find_dtb_plugin = find_dtb_cls(session=self.session)
# Allow the dtb to be specified on the command line.
dtb = self.session.GetParameter("dtb")
if dtb:
# Verify the DTB to make sure it is correct.
if not find_dtb_plugin.VerifyHit(dtb):
return
address_space = find_dtb_plugin.CreateAS(dtb)
self.session.SetCache(
"default_address_space", address_space, volatile=False)
return profile
for address_space in find_dtb_plugin.address_space_hits():
# Might as well cache the results of this plugin so we dont need to
# run it twice.
self.session.kernel_address_space = address_space
# Start off with a default address space of the kernel.
with self.session as session:
session.SetCache("default_address_space", address_space,
volatile=False)
session.SetCache("dtb", address_space.dtb, volatile=False)
return profile
def DetectFromHit(self, hit, file_offset, address_space):
"""Gets called for each hit.
If a profile matches, return it, otherwise None.
"""
# By default use all detection modules.
config.DeclareOption("autodetect", group="Autodetection Overrides",
type="ChoiceArray", required=True,
choices=utils.JITIterator(DetectionMethod),
default=utils.JITIterator(DetectionMethod),
help="Autodetection method.")
config.DeclareOption("autodetect_threshold", default=1.0,
group="Autodetection Overrides",
help="Worst acceptable match for profile autodetection." +
" (Default 1.0)",
type="Float")
config.DeclareOption("autodetect_build_local", default="basic",
group="Autodetection Overrides",
choices=["full", "basic", "none"],
help="Attempts to fetch and build profile locally.",
type="Choices")
config.DeclareOption("autodetect_scan_length", default=2**64,
group="Autodetection Overrides",
help="How much of physical memory to scan before failing",
type="IntParser")
class WindowsIndexDetector(DetectionMethod):
"""Apply the windows index to detect the profile."""
find_dtb_impl = win_common.WinFindDTB
name = "nt_index"
def __init__(self, **kwargs):
super(WindowsIndexDetector, self).__init__(**kwargs)
self.eprocess_index = self.session.LoadProfile("nt/eprocess_index")
self.nt_index = self.session.LoadProfile("nt/index")
def Keywords(self):
"""We trigger when we see some common windows processes.
Since all windows processes also map the kernel we can detect it.
"""
return [b"cmd.exe\x00\x00", b"System\x00\x00", b"csrss.exe\x00\x00",
b"svchost.exe\x00\x00", b"lsass.exe\x00\x00",
b"winlogon.exe\x00\x00"]
def Offsets(self):
return [0]
def VerifyAMD64DTB(self, test_as):
"""Verify this address space.
Checks that the _KUSER_SHARED_DATA makes sense. This structure is always
at a known offset since it must be shared with user space apps.
"""
kuser_shared = self.eprocess_index._KUSER_SHARED_DATA(
offset=0xFFFFF78000000000, vm=test_as)
# Must be a valid version of windows.
if (kuser_shared.NtMajorVersion in [5, 6, 10] and
kuser_shared.NtMinorVersion in [0, 1, 2, 3]):
return True
def VerifyI386DTB(self, test_as):
"""Verify this address space.
Checks that the _KUSER_SHARED_DATA makes sense. This structure is always
at a known offset since it must be shared with user space apps.
"""
kuser_shared = self.eprocess_index._KUSER_SHARED_DATA(
offset=0xffdf0000, vm=test_as)
# Must be a valid version of windows.
if (kuser_shared.NtMajorVersion in [5, 6, 10] and
kuser_shared.NtMinorVersion in [0, 1, 2, 3]):
return True
def DetectWindowsDTB(self, filename_offset, address_space):
"""Checks the possible filename hit for a valid DTB address."""
for dtb_rel_offset, arch in self.eprocess_index.filename_to_dtb:
# We only apply indexes to 64 bit images.
if arch == "AMD64":
possible_dtb = self.eprocess_index.Object(
"unsigned long", offset=filename_offset - dtb_rel_offset,
vm=address_space).v()
# Discard impossible DTB values immediately. On 64 bit
# architectures, the DTB must be page aligned.
if not possible_dtb or possible_dtb & 0xFFF:
continue
test_as = amd64.AMD64PagedMemory(
session=self.session, base=address_space, dtb=possible_dtb)
if self.VerifyAMD64DTB(test_as):
yield test_as
elif arch == "I386":
possible_dtb = self.eprocess_index.Object(
"unsigned long", offset=filename_offset - dtb_rel_offset,
vm=address_space).v()
# Discard impossible DTB values immediately. On 32 bit
# architectures, the DTB must be aligned to 0x20 (with PAE).
if not possible_dtb or possible_dtb & 0x1F:
continue
# Only support PAE - we dont really see non PAE images any more.
test_as = intel.IA32PagedMemoryPae(
session=self.session, base=address_space, dtb=possible_dtb)
if self.VerifyI386DTB(test_as):
yield test_as
def _match_profile_for_kernel_base(self, kernel_base, test_as):
threshold = self.session.GetParameter("autodetect_threshold")
for profile, match in self.nt_index.LookupIndex(
kernel_base, address_space=test_as):
if match < threshold:
break
profile_obj = self.session.LoadProfile(profile)
if profile_obj:
return profile_obj
def DetectFromHit(self, hit, filename_offset, address_space):
# Make use of already known dtb and kernel_base parameters - this speeds
# up live analysis significantly since we do not need to search for
# anything then.
if filename_offset == 0:
if (self.session.HasParameter("dtb") and
self.session.HasParameter("kernel_base")):
test_as = amd64.AMD64PagedMemory(
session=self.session, base=address_space,
dtb=self.session.GetParameter("dtb"))
if self.VerifyAMD64DTB(test_as):
return self._match_profile_for_kernel_base(
self.session.GetParameter("kernel_base"),
test_as)
return
# Get potential kernel address spaces.
for test_as in self.DetectWindowsDTB(filename_offset, address_space):
# Try to find the kernel base. This can be improved in future by
# taking more than a single search point.
scanner = scan.MultiStringScanner(
address_space=test_as, needles=[
b"This program cannot be run in DOS mode",
])
if self.session.HasParameter("kernel_base"):
kernel_base = self.session.GetParameter("kernel_base")
return self._match_profile_for_kernel_base(
kernel_base, test_as)
for offset, _ in scanner.scan(
offset=0xF80000000000, maxlen=0x10000000000):
kernel_base = offset & 0xFFFFFFFFFFFFFF000
profile_obj = self._match_profile_for_kernel_base(
kernel_base, test_as)
if profile_obj:
self.session.logging.debug(
"Verifying profile %s by scanning processes for a valid DTB",
profile_obj)
if self.VerifyProfile(profile_obj):
return profile_obj
class PEImageFileDetector(DetectionMethod):
name = "pe"
order = 50
def __init__(self, **kwargs):
super(PEImageFileDetector, self).__init__(**kwargs)
self.pe_profile = self.session.LoadProfile("pe")
def Offsets(self):
# We only care about the first offset in the file.
return [0]
def DetectFromHit(self, hit, _, address_space):
# If the file is a PE file, we simply return the PE address space.
if self.pe_profile._IMAGE_DOS_HEADER(vm=address_space).NTHeader:
pe_as = pe_vtypes.PEFileAddressSpace(
base=address_space, profile=self.pe_profile)
self.session.kernel_address_space = pe_as
self.session.SetCache("default_image_base", pe_as.image_base)
machine_type = pe_as.nt_header.FileHeader.Machine
if machine_type == "IMAGE_FILE_MACHINE_AMD64":
self.pe_profile.set_metadata("arch", "AMD64")
else:
self.pe_profile.set_metadata("arch", "I386")
return self.pe_profile
class WindowsRSDSDetector(DetectionMethod):
"""A detection method based on the scanning for RSDS signatures."""
name = "rsds"
order = 90
# Windows kernel pdb files.
KERNEL_NAMES = win_common.KERNEL_NAMES
find_dtb_impl = win_common.WinFindDTB
def __init__(self, **kwargs):
super(WindowsRSDSDetector, self).__init__(**kwargs)
self.pe_profile = self.session.LoadProfile("pe")
def Keywords(self):
return [b"RSDS"]
def Offsets(self):
return [0]
def VerifyProfile(self, profile_name):
profile = self.session.LoadProfile(profile_name)
# If the user allows it we can just try to fetch and build the profile
# locally.
if profile == None and self.session.GetParameter(
"autodetect_build_local") in ("full", "basic"):
build_local_profile = self.session.plugins.build_local_profile()
try:
self.session.logging.debug(
"Will build local profile %s", profile_name)
build_local_profile.fetch_and_parse(profile_name)
profile = self.session.LoadProfile(
profile_name, use_cache=False)
except IOError:
pass
if profile != None:
return self._ApplyFindDTB(self.find_dtb_impl, profile)
def DetectFromHit(self, hit, offset, address_space):
# Make use of already known dtb and kernel_base parameters - this speeds
# up live analysis significantly since we do not need to search for
# anything then.
if (offset == 0 and self.session.HasParameter("dtb") and
self.session.HasParameter("kernel_base")):
test_as = amd64.AMD64PagedMemory(
session=self.session, base=address_space,
dtb=self.session.GetParameter("dtb"))
pe_helper = pe_vtypes.PE(
session=self.session,
address_space=test_as,
image_base=self.session.GetParameter("kernel_base"))
return self._test_rsds(pe_helper.RSDS)
# Try Windows by GUID:
rsds = self.pe_profile.CV_RSDS_HEADER(offset=offset, vm=address_space)
return self._test_rsds(rsds)
def _test_rsds(self, rsds):
if (rsds.Signature.is_valid() and
str(rsds.Filename) in self.KERNEL_NAMES):
profile = self.VerifyProfile("nt/GUID/%s" % rsds.GUID_AGE)
if profile:
self.session.logging.info(
"Detected %s with GUID %s", rsds.Filename,
rsds.GUID_AGE)
return profile
class WindowsKernelImageDetector(WindowsRSDSDetector):
name = "windows_kernel_file"
order = 50
def Offsets(self):
return [0]
KERNEL_PATHS = [r"C:\Windows\SysNative\ntoskrnl.exe",
r"C:\Windows\System32\ntoskrnl.exe"]
def DetectFromHit(self, hit, _, address_space):
for potential_path in self.KERNEL_PATHS:
# Try to make the kernel image into the address_space.
image_offset = address_space.get_mapped_offset(potential_path, 0)
if image_offset is not None:
file_as = addrspace.RunBasedAddressSpace(
base=address_space, session=self.session)
file_as.add_run(0, image_offset, 2**63)
pe_file_as = pe_vtypes.PEFileAddressSpace(
base=file_as, session=self.session)
pe_helper = pe_vtypes.PE(
session=self.session,
address_space=pe_file_as,
image_base=pe_file_as.image_base)
rsds = pe_helper.RSDS
self.session.logging.info(
"Found RSDS in kernel image: %s (%s)",
rsds.GUID_AGE, rsds.Filename)
result = self._test_rsds(rsds)
if result:
return result
class LinuxIndexDetector(DetectionMethod):
"""A kernel detector that uses live symbols to do exact matching.
LinuxIndexDetector uses kallsyms (or any other source of live symbols) to
match a kernel exactly by finding known-unique symbols.
"""
name = "linux_index"
find_dtb_impl = linux_common.LinuxFindDTB
def __init__(self, **kwargs):
super(LinuxIndexDetector, self).__init__(**kwargs)
self.index = self.session.LoadProfile("Linux/index")
def Offsets(self):
return [0]
def DetectFromHit(self, hit, offset, address_space):
if offset != 0:
return
self.session.logging.debug(
"LinuxIndexDetector:DetectFromHit(%x) = %s", offset, hit)
kaslr_reader = linux_common.KAllSyms(self.session)
# We create a dictionary of symbol:offset skipping symbols from
# exported modules.
symbol_dict = {}
for offset, symbol, _, module in kaslr_reader.ObtainSymbols():
# Ignore symbols in modules we only care about the kernel.
if not module:
symbol_dict[symbol] = offset
if not symbol_dict:
return
matching_profiles = self.index.LookupProfile(symbol_dict)
if len(matching_profiles) > 1:
self.session.logging.info(
"LinuxIndexDetector found %d matching profiles: %s",
len(matching_profiles),
', '.join([p[0] for p in matching_profiles]))
return
elif len(matching_profiles) == 1:
profile_id = matching_profiles[0][0]
self.session.logging.info(
"LinuxIndexDetector found profile %s with %d/%d matches.",
profile_id,
matching_profiles[0][1],
len(self.index.traits[profile_id]))
profile = self.session.LoadProfile(profile_id)
if profile:
# At this point we also know the kernel slide.
kallsyms_proc_banner = symbol_dict["linux_proc_banner"]
profile_proc_banner = profile.get_constant("linux_proc_banner",
is_address=False)
kernel_slide = kallsyms_proc_banner - profile_proc_banner
self.session.logging.info("Found slide 0x%x", kernel_slide)
self.session.SetCache("kernel_slide", kernel_slide)
verified_profile = self.VerifyProfile(profile)
if verified_profile:
return verified_profile
else:
self.session.SetCache("kernel_slide", None)
# If we were unable to find a matching Linux profile, we limit the scan
# length to prevent Rekall from spinning for a long time.
self.session.logging.warn("LinuxIndexDetector found no matches.")
self._LimitScanLength()
def _LimitScanLength(self):
self.session.SetParameter("autodetect_scan_length", 1024*1024*1024)
class LinuxBannerDetector(DetectionMethod):
"""Detect a linux kernel from its banner text."""
name = "linux"
LINUX_TEMPLATE = re.compile(
b"Linux version (\d+\.\d+\.\d+[^ ]+)")
find_dtb_impl = linux_common.LinuxFindDTB
def Keywords(self):
# The Linux kernels we care about contain this.
return [b"Linux version "]
def DetectFromHit(self, hit, offset, address_space):
guess = address_space.read(offset - 100, 300)
m = self.LINUX_TEMPLATE.search(guess)
if m:
# Try to guess the distribution.
distribution = "LinuxGeneric"
if b"Ubuntu" in guess:
distribution = "Ubuntu"
if b"Debian" in guess:
distribution = "Debian"
profile_name = "%s/%s" % (distribution, utils.SmartUnicode(m.group(1)))
profile = self.session.LoadProfile(profile_name)
if profile:
self.session.logging.info(
"Detected %s: %s", profile_name, m.group(0))
else:
return
# At this point we should know the kernel slide.
profile_proc_banner = profile.get_constant("linux_banner",
is_address=False)
expected_proc_banner = profile.phys_addr(profile_proc_banner)
kernel_slide = offset - expected_proc_banner
self.session.logging.info("Found slide 0x%x", kernel_slide)
self.session.SetCache("kernel_slide", kernel_slide)
verified_profile = self.VerifyProfile(profile)
if not verified_profile:
self.session.SetCache("kernel_slide", None)
return verified_profile
class DarwinIndexDetector(DetectionMethod):
"""Detect the Darwin version using the index.
To work around KASLR, we have an index of known symbols' offsets relative to
the Catfish string, along with the data we expect to find at those
offsets. Profile similarity is the percentage of these symbols that match as
expected.
Ideally, we'd like a 100% match, but in case we don't have the exact
profile, we'll make do with anything higher than 0% that can resolve the
DTB.
"""
name = "osx"
find_dtb_impl = darwin_common.DarwinFindDTB
def __init__(self, **kwargs):
super(DarwinIndexDetector, self).__init__(**kwargs)
self.index = self.session.LoadProfile("OSX/index")
def Keywords(self):
# Found in every OS X image. See documentation for DarwinFindKASLR for
# details.
return [b"Catfish \x00\x00"]
def DetectFromHit(self, hit, offset, address_space):
for profile_name, match in self.index.LookupIndex(
image_base=offset,
address_space=self.session.physical_address_space):
profile = self.VerifyProfile(profile_name)
if profile:
self.session.logging.info(
"New best match: %s (%.0f%% match)",
profile_name, match * 100)
self.session.SetCache("catfish_offset", offset, volatile=False)
return profile
class KernelASHook(kb.ParameterHook):
"""A ParameterHook for default_address_space.
This will only be called if default_address_space is not set. We load the
kernel address space, or load it if needed.
"""
name = "default_address_space"
volatile = False
def calculate(self):
if self.session.kernel_address_space:
return self.session.kernel_address_space
try:
return self.session.plugins.load_as().GetVirtualAddressSpace()
except Exception:
return obj.NoneObject("Address space not found")
class ProfileHook(kb.ParameterHook):
"""If the profile is not specified, we guess it."""
name = "profile_obj"
volatile = False
def ScanProfiles(self):
try:
self.session.SetCache("execution_phase", "ProfileAutodetect")
return self._ScanProfiles()
finally:
self.session.SetCache("execution_phase", None)
def _ScanProfiles(self):
address_space = self.session.physical_address_space
best_profile = None
best_match = 0
methods = []
needles = []
needle_lookup = {}
method_names = self.session.GetParameter("autodetect")
self.session.logging.debug(
"Will detect profile using these Detectors: %s" % ",".join(
method_names))
if not method_names:
raise RuntimeError("No autodetection methods specified. "
"Use the --autodetect parameter.")
for method_name in method_names:
for method in DetectionMethod.classes_by_name[method_name]:
methods.append(method(session=self.session))
methods.sort(key=lambda x: x.order)
for method in methods:
for keyword in method.Keywords():
needles.append(keyword)
needle_lookup.setdefault(keyword, []).append(method)
for offset in method.Offsets():
self.session.logging.debug("Trying method %s, offset %d",
method.name, offset)
profile = method.DetectFromHit(None, offset, address_space)
if profile:
self.session.logging.info(
"Detection method %s yielded profile %s",
method.name, profile)
return profile
# 10 GB by default.
autodetect_scan_length = self.session.GetParameter(
"autodetect_scan_length", 10*1024*1024*1024)
# Build and configure the scanner.
scanner = scan.MultiStringScanner(
profile=obj.NoneObject(),
address_space=address_space, needles=needles,
session=self.session)
scanner.progress_message = "Autodetecting profile: %(offset)#08x"
for offset, hit in scanner.scan(maxlen=autodetect_scan_length):
self.session.render_progress(
"guess_profile: autodetection hit @ %x - %s", offset, hit)
for method in needle_lookup[hit]:
profile = method.DetectFromHit(hit, offset, address_space)
if profile:
self.session.logging.debug(
"Detection method %s worked at offset %#x",
method.name, offset)
return profile
if best_match == 1.0:
# If we have an exact match we can stop scanning.
break
threshold = self.session.GetParameter("autodetect_threshold")
if best_match == 0:
self.session.logging.error(
"No profiles match this image. Try specifying manually.")
return obj.NoneObject("No profile detected")
elif best_match < threshold:
self.session.logging.error(
"Best match for profile is %s with %.0f%%, which is too low " +
"for given threshold of %.0f%%. Try lowering " +
"--autodetect-threshold.",
best_profile.name,
best_match * 100,
threshold * 100)
return obj.NoneObject("No profile detected")
else:
self.session.logging.info(
"Profile %s matched with %.0f%% confidence.",
best_profile.name,
best_match * 100)
return best_profile
def calculate(self):
"""Try to find the correct profile by scanning for PDB files."""
# Clear the profile for the duration of the scan.
self.session.profile = obj.NoneObject("Unset")
if not self.session.physical_address_space:
# Try to load the physical_address_space so we can scan it.
if not self.session.plugins.load_as().GetPhysicalAddressSpace():
# If a filename was specified this should have worked, unless we
# could not open it.
filename = self.session.GetParameter("filename")
if filename:
raise RuntimeError(
"Unable to instantiate physical_address_space from "
"filename %s." % filename)
# No physical address space - nothing to do here.
return obj.NoneObject("No Physical Address Space.")
# If the global cache is persistent we try to detect this image by
# fingerprint if we have seen it before.
if self.session.cache.__class__ == cache.FileCache:
name = self.session.cache.DetectImage(
self.session.physical_address_space)
if name:
self.session.logging.info(
"Detected fingerprinted image %s", name)
# Allow the user to specify the profile to use on the command line.
profile_name = self.session.GetParameter("profile")
if profile_name:
profile_obj = self.session.LoadProfile(profile_name)
if profile_obj != None:
return profile_obj
# Is the profile object already cached?
profile_obj = self.session.cache.Get("profile_obj")
if not profile_obj:
profile_obj = self.ScanProfiles()
if not profile_obj:
raise RuntimeError(
"Unable to find a valid profile for this image. "
"Try using -v for more details.")
# Update the session profile.
self.session.profile = profile_obj
if (self.session.GetParameter("cache") == "file" and
self.session.HasParameter("image_fingerprint")):
self.session.cache.SetFingerprint(
self.session.GetParameter("image_fingerprint"))
return profile_obj | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/guess_profile.py | 0.724188 | 0.166743 | guess_profile.py | pypi |
from builtins import object
import yara
from rekall import scan
from rekall import testlib
from rekall import plugin
from rekall.plugins.common import pfn
from rekall_lib import utils
class YaraScanMixin(object):
"""A common implementation of yara scanner.
This should be mixed with the OS specific Scanner (e.g. WinScanner) and
plugin.TypedProfileCommand.
"""
name = "yarascan"
table_header = [
dict(name="Owner", width=20),
dict(name="Rule", width=10),
dict(name="Match", hidden=True),
dict(name="Offset", style="address"),
dict(name="hexdump", hex_width=16, width=67),
dict(name="run", hidden=True),
dict(name="address_space", hidden=True),
dict(name="Context"),
]
__args = [
dict(name="hits", default=10, type="IntParser",
help="Quit after finding this many hits."),
dict(name="string", default=None,
help="A verbatim string to search for."),
dict(name="binary_string", default=None,
help="A binary string (encoded as hex) to search "
"for. e.g. 000102[1-200]0506"),
dict(name="yara_file", default=None,
help="The yara signature file to read."),
dict(name="yara_expression", default=None,
help="If provided we scan for this yara "
"expression."),
dict(name="context", default=0x40, type="IntParser",
help="Context to print after the hit."),
dict(name="pre_context", default=0, type="IntParser",
help="Context to print before the hit."),
]
scanner_defaults = dict(
scan_physical=True
)
def __init__(self, *args, **kwargs):
"""Scan using yara signatures."""
super(YaraScanMixin, self).__init__(*args, **kwargs)
# Compile the yara rules in advance.
if self.plugin_args.yara_expression:
self.rules_source = self.plugin_args.yara_expression
self.rules = yara.compile(source=self.rules_source)
elif self.plugin_args.binary_string:
self.compile_rule(
'rule r1 {strings: $a = {%s} condition: $a}' %
self.plugin_args.binary_string)
elif self.plugin_args.string:
self.compile_rule(
'rule r1 {strings: $a = "%s" condition: $a}' %
self.plugin_args.string)
elif self.plugin_args.yara_file:
self.compile_rule(open(self.plugin_args.yara_file).read())
elif not self.ignore_required:
raise plugin.PluginError("You must specify a yara rule file or "
"string to match.")
def compile_rule(self, rule):
self.rules_source = rule
try:
self.rules = yara.compile(source=rule)
except Exception as e:
raise plugin.PluginError(
"Failed to compile yara expression: %s" % e)
def generate_hits(self, run):
for buffer_as in scan.BufferASGenerator(
self.session, run.address_space, run.start, run.end):
self.session.report_progress(
"Scanning buffer %#x->%#x (length %#x)",
buffer_as.base_offset, buffer_as.end(),
buffer_as.end() - buffer_as.base_offset)
for match in self.rules.match(data=buffer_as.data):
for buffer_offset, name, value in match.strings:
hit_offset = buffer_offset + buffer_as.base_offset
yield match, hit_offset
def collect(self):
"""Render output."""
count = 0
for run in self.generate_memory_ranges():
for match, address in self.generate_hits(run):
count += 1
if count >= self.plugin_args.hits:
break
# Result hit the physical memory - Get some context on this hit.
if run.data.get("type") == "PhysicalAS":
symbol = pfn.PhysicalAddressContext(self.session, address)
else:
symbol = utils.FormattedAddress(
self.session.address_resolver, address,
max_distance=2**64)
yield dict(
Owner=run.data.get("task") or run.data.get("type"),
Match=match,
Rule=match.rule,
Offset=address,
hexdump=utils.HexDumpedString(
run.address_space.read(
address - self.plugin_args.pre_context,
self.plugin_args.context +
self.plugin_args.pre_context)),
Context=symbol,
# Provide the address space where the hit is reported.
address_space=run.address_space,
run=run)
class SimpleYaraScan(YaraScanMixin, plugin.TypedProfileCommand,
plugin.PhysicalASMixin, plugin.ProfileCommand):
"""A Simple plugin which only yarascans the physical Address Space.
This plugin should not trigger profile autodetection and therefore should be
usable on any file at all.
"""
name = "simple_yarascan"
__args = [
plugin.CommandOption("start", default=0, type="IntParser",
help="Start searching from this offset."),
plugin.CommandOption("limit", default=2**64, type="IntParser",
help="The length of data to search."),
]
table_header = [
dict(name="Rule", width=10),
dict(name="Match", hidden=True),
dict(name="Offset", style="address"),
dict(name="hexdump", hex_width=16, width=67),
]
PROFILE_REQUIRED = False
def collect(self):
"""Render output."""
count = 0
address_space = self.session.physical_address_space
for buffer_as in scan.BufferASGenerator(
self.session, address_space,
self.plugin_args.start,
self.plugin_args.start + self.plugin_args.limit):
self.session.report_progress(
"Scanning buffer %#x->%#x (%#x)",
buffer_as.base_offset, buffer_as.end(),
buffer_as.end() - buffer_as.base_offset)
for match in self.rules.match(data=buffer_as.data):
for buffer_offset, _, _ in match.strings:
hit_offset = buffer_offset + buffer_as.base_offset
count += 1
if count >= self.plugin_args.hits:
break
yield dict(
Match=match,
Rule=match.rule,
Offset=hit_offset,
hexdump=utils.HexDumpedString(
self.session.physical_address_space.read(
hit_offset - self.plugin_args.pre_context,
self.plugin_args.context +
self.plugin_args.pre_context)))
class TestYara(testlib.SimpleTestCase):
"""Test the yarascan module."""
PARAMETERS = dict(commandline="yarascan --string %(string)s --hits 10") | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/yarascanner.py | 0.708818 | 0.220196 | yarascanner.py | pypi |
from rekall import scan
from rekall_lib import utils
from rekall.plugins.overlays import basic
from rekall.plugins.windows import common
TYPES = {
"MESSAGE_RECORD": [0x50, {
"Message": [0, ["Pointer", dict(
target="UnicodeString"
)]],
"Sender": [8, ["Pointer", dict(
target="UnicodeString"
)]],
"Timestamp": [48, ["UnixTimeStamp"]],
"Prev": [64, ["Pointer", dict(
target="MESSAGE_RECORD"
)]],
"Next": [72, ["Pointer", dict(
target="MESSAGE_RECORD"
)]],
}],
"CHANNEL_RECORD": [0x50, {
"Protocol": [0x20, ["Pointer", dict(
target="String"
)]],
"Label": [0x28, ["Pointer", dict(
target="UnicodeString"
)]],
"Channel": [0x30, ["Pointer", dict(
target="UnicodeString"
)]],
"Welcome": [0x40, ["Pointer", dict(
target="UnicodeString"
)]],
"LastMessage": [0xc8, ["Pointer", dict(
target="MESSAGE_RECORD"
)]],
"FirstMessage": [0xd0, ["Pointer", dict(
target="MESSAGE_RECORD"
)]],
"FirstUser": [0xd8, ["Pointer", dict(
target="USER_RECORD"
)]],
"CurrentUser": [0xe0, ["Pointer", dict(
target="USER_RECORD"
)]],
"NextChannel": [0x108, ["Pointer", dict(
target="CHANNEL_RECORD"
)]],
}],
"CHAT_RECORD": [96, {
"ChatName": [0x30, ["Pointer", dict(
target="UnicodeString"
)]],
"Channel": [0x40, ["Pointer", dict(
target="CHANNEL_RECORD"
)]],
}],
"USER_RECORD": [0x28, {
"Nick": [0x00, ["Pointer", dict(
target="UnicodeString"
)]],
"NickID": [0x08, ["Pointer", dict(
target="UnicodeString"
)]],
"NextUser": [0x20, ["Pointer", dict(
target="USER_RECORD"
)]],
}],
"CHATS": [48, {
"Count": [0, ["unsigned int"]],
"Chats": [0x8, ["Pointer", dict(
target="Array",
target_args=dict(
target="Pointer",
count=lambda x: x.Count,
target_args=dict(
target="CHAT_RECORD"
)
)
)]],
}]
}
class MirandaProfile(basic.ProfileLLP64, basic.BasicClasses):
"""A basic profile for Miranda IM."""
@classmethod
def Initialize(cls, profile):
super(MirandaProfile, cls).Initialize(profile)
profile.add_overlay(TYPES)
class HeapScannerMixin(object):
def scan(self):
task = self.session.GetParameter("process_context")
for vad in task.RealVadRoot.traverse():
if vad.u.VadFlags.ProtectionEnum == "READWRITE":
# Only scan the VAD region.
for match in super(HeapScannerMixin, self).scan(
vad.Start, vad.Length):
yield match
class HeapScanner(HeapScannerMixin, scan.MultiStringScanner):
pass
class HeapPointerScanner(HeapScannerMixin, scan.PointerScanner):
pass
class Miranda(common.WindowsCommandPlugin):
name = "miranda"
def FindChannels(self):
scanner = HeapScanner(
session=self.session,
needles=[b"\xba\xba\xba\xabIRC_1\x00"])
irc_hits = []
for hit, _ in scanner.scan():
irc_hits.append(hit)
scanner = HeapPointerScanner(
session=self.session,
profile=self.session.profile, pointers=[x+4 for x in irc_hits])
for referrer in scanner.scan():
if self.session.default_address_space.read(
referrer - 0x24, 4) == b"\xba\xba\xba\xab":
yield self.miranda_profile.CHANNEL_RECORD(referrer - 0x20)
def render(self, renderer):
self.miranda_profile = MirandaProfile(session=self.session)
with self.session.plugins.cc(proc_regex="miranda") as cc:
cc.SwitchContext()
for channel in self.FindChannels():
# For each channel we start a new section.
renderer.section("Channel {0} {1:#x}".format(
channel.Channel, channel))
users = []
for x in channel.FirstUser.walk_list("NextUser", True):
users.append(utils.SmartUnicode(x.Nick.deref()))
renderer.table_header([("Users", "users", "120")])
renderer.table_row(",".join(users))
renderer.table_header([
("Timestamp", "timestamp", "30"),
("User", "user", "20"),
("Message", "message", "80"),
])
for message_record in channel.FirstMessage.walk_list("Next"):
renderer.table_row(
message_record.Timestamp,
message_record.Sender.deref(),
message_record.Message.deref()) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/contrib/miranda.py | 0.488283 | 0.3415 | miranda.py | pypi |
from rekall.plugins.overlays.windows import win8
win10_undocumented_amd64 = {
# wi10.raw 18:05:45> dis "nt!MiSessionInsertImage"
# call 0xf8014a9d4e80 nt!memset
# ... or rax, 3 <---- Base address is ORed with 3.
# mov dword ptr [rbp + 0x3c], 1 <--- ImageCountInThisSession
# mov qword ptr [rbp + 0x28], rax <---- Address
'_IMAGE_ENTRY_IN_SESSION': [None, {
'Address': [0x28, ["_EX_FAST_REF"]],
}],
}
win10_undocumented_i386 = {
'_IMAGE_ENTRY_IN_SESSION': [None, {
'Address': [0x14, ["Pointer"]],
}],
}
win10_overlays = {
'_MM_SESSION_SPACE': [None, {
# Specialized iterator to produce all the _IMAGE_ENTRY_IN_SESSION
# records. In Win10 these are stored in an AVL tree instead.
'ImageIterator': lambda x: x.ImageTree.Root.traverse(
type="_IMAGE_ENTRY_IN_SESSION")
}],
"_UNLOADED_DRIVERS": [None, {
"CurrentTime": [None, ["WinFileTime"]],
}],
"_MI_HARDWARE_STATE": [None, {
"SystemNodeInformation": [None, ["Pointer", dict(
target="Array",
target_args=dict(
target="_MI_SYSTEM_NODE_INFORMATION",
count=lambda x: x.obj_profile.get_constant_object(
"KeNumberNodes", "unsigned int").v(),
)
)]],
}],
}
def InitializeWindows10Profile(profile):
"""Initialize windows 10 profiles."""
win8.InitializeWindows8Profile(profile)
profile.add_overlay(win10_overlays)
if profile.metadata("arch") == "AMD64":
profile.add_overlay(win10_undocumented_amd64)
else:
profile.add_overlay(win10_undocumented_i386)
# Older Win10 releases include SystemNodeInformation inside
# _MI_SYSTEM_INFORMATION
if not profile.has_type("_MI_HARDWARE_STATE"):
profile.add_overlay({
"_MI_SYSTEM_INFORMATION": [None, {
"SystemNodeInformation": [None, ["Pointer", dict(
target="Array",
target_args=dict(
target="_MI_SYSTEM_NODE_INFORMATION",
count=lambda x: x.obj_profile.get_constant_object(
"KeNumberNodes", "unsigned int").v(),
)
)]],
}],
}) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/overlays/windows/win10.py | 0.438304 | 0.223854 | win10.py | pypi |
from builtins import range
__author__ = "Michael Cohen <scudette@gmail.com>"
from rekall import obj
# In XP the privileges are simple arrays in the _TOKEN object.
xp_style_overlays = {
"_TOKEN": [None, {
'Privileges': [None, ['Pointer', dict(
target='Array',
target_args=dict(
count=lambda x: x.PrivilegeCount,
target='_LUID_AND_ATTRIBUTES'
)
)]],
}],
}
class XP_TOKEN(obj.Struct):
"""XP Style privileges are just an array."""
def GetPrivileges(self):
"""Enumerates all privileges in this token.
Yields:
value, flags
"""
for privilege in self.Privileges:
flags = ["Present"]
if privilege.Attributes & 2:
flags.append("Enabled")
if privilege.Attributes & 1:
flags.append("Default")
yield privilege.Luid.v(), flags
class VISTA_TOKEN(obj.Struct):
"""A Vista Style _TOKEN object."""
def GetPrivileges(self):
"""Enumerates all privileges in this token."""
privilege_table = self.obj_session.GetParameter("privilege_table")
present = self.Privileges.Present.v()
enabled = self.Privileges.Enabled.v()
default = self.Privileges.EnabledByDefault.v()
for i in range(0, 64):
if i not in privilege_table:
continue
mask = 1 << i
flags = []
if mask & present:
flags.append("Present")
if mask & enabled:
flags.append("Enabled")
if mask & default:
flags.append("Default")
yield i, flags
def InitializeTokenProfiles(profile):
if profile.get_obj_offset("_TOKEN", "PrivilegeCount") != None:
# Uses XP Style Privilege array.
profile.add_overlay(xp_style_overlays)
profile.add_classes(_TOKEN=XP_TOKEN)
elif profile.get_obj_offset("_SEP_TOKEN_PRIVILEGES", "Present") != None:
# Uses Vista style Present, Enabled, Default bitfields.
profile.add_classes(_TOKEN=VISTA_TOKEN) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/overlays/windows/tokens.py | 0.469763 | 0.208904 | tokens.py | pypi |
# pylint: disable=protected-access
from rekall.plugins.overlays.windows import common
vista_overlays = {
'_EPROCESS': [None, {
# A symbolic link to the real vad root.
'RealVadRoot': lambda x: x.VadRoot.BalancedRoot
}],
'_MMADDRESS_NODE': [None, {
'Tag': [-4, ['String', dict(length=4)]],
}],
'_MMVAD_SHORT': [None, {
'Tag': [-4, ['String', dict(length=4)]],
'Start': lambda x: x.StartingVpn << 12,
'End': lambda x: ((x.EndingVpn + 1) << 12) - 1,
'Length': lambda x: x.End - x.Start + 1,
'CommitCharge': lambda x: x.u.VadFlags.CommitCharge,
}],
'_MMVAD': [None, {
'Tag': [-4, ['String', dict(length=4)]],
'ControlArea': lambda x: x.Subsection.ControlArea,
'Start': lambda x: x.StartingVpn << 12,
'End': lambda x: ((x.EndingVpn + 1) << 12) - 1,
'Length': lambda x: x.End - x.Start + 1,
'CommitCharge': lambda x: x.u.VadFlags.CommitCharge,
}],
'_MMVAD_LONG': [None, {
'Tag': [-4, ['String', dict(length=4)]],
'ControlArea': lambda x: x.Subsection.ControlArea,
'Start': lambda x: x.StartingVpn << 12,
'End': lambda x: ((x.EndingVpn + 1) << 12) - 1,
'Length': lambda x: x.End - x.Start + 1,
'CommitCharge': lambda x: x.u.VadFlags.CommitCharge,
}],
"_CONTROL_AREA": [None, {
'FilePointer': [None, ['_EX_FAST_REF', dict(
target="_FILE_OBJECT")]],
}],
'_MM_SESSION_SPACE': [None, {
# Specialized iterator to produce all the _IMAGE_ENTRY_IN_SESSION
# records.
'ImageIterator': lambda x: x.ImageList.list_of_type(
"_IMAGE_ENTRY_IN_SESSION", "Link")
}],
'_IMAGE_ENTRY_IN_SESSION': [None, {
'ImageBase': lambda x: x.Address.v() & ~7
}]
}
class _MMADDRESS_NODE(common.VadTraverser):
"""In win7 the base of all Vad objects in _MMADDRESS_NODE.
The Vad structures can be either _MMVAD_SHORT or _MMVAD or _MMVAD_LONG. At
the base of each struct there is an _MMADDRESS_NODE which contains the
LeftChild and RightChild members. In order to traverse the tree, we follow
the _MMADDRESS_NODE and create the required _MMVAD type at each point
depending on their tags.
"""
## The actual type depends on this tag value.
tag_map = {'Vadl': '_MMVAD_LONG',
'VadS': '_MMVAD_SHORT',
'Vad ': '_MMVAD',
'VadF': '_MMVAD_SHORT',
'Vadm': '_MMVAD_LONG',
}
class _ETHREAD(common._ETHREAD):
"""A class for Windows 7 ETHREAD objects"""
def owning_process(self):
"""Return the EPROCESS that owns this thread"""
return self.Tcb.Process.dereference_as("_EPROCESS")
def InitializeVistaProfile(profile):
if profile.metadata("arch") == "AMD64":
profile.add_constants(dict(PoolAlignment=16))
else:
profile.add_constants(dict(PoolAlignment=8))
profile.add_overlay(vista_overlays)
profile.add_classes(dict(
_ETHREAD=_ETHREAD,
_MMADDRESS_NODE=_MMADDRESS_NODE
)) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/overlays/windows/vista.py | 0.581184 | 0.440048 | vista.py | pypi |
AMD64 = {
# Reference:
# http://gate.upm.ro/os/LABs/Windows_OS_Internals_Curriculum_Resource_Kit-ACADEMIC/WindowsResearchKernel-WRK/WRK-v1.2/base/ntos/mm/wrtfault.c
# From http://www.cnblogs.com/kkindof/articles/2571227.html
# Reversed from MiSessionInsertImage
# win8.1.raw 18:05:45> dis "nt!MiSessionInsertImage"
# 0xf802d314344a 4E e871030300 CALL 0xf802d31737c0 nt!memset
# ...
# 0xf802d314345a 5E 48897b20 MOV [RBX+0x20], RDI
# typedef struct _IMAGE_ENTRY_IN_SESSION {
# LIST_ENTRY Link;
# PVOID Address;
# PVOID LastAddress;
# ULONG ImageCountInThisSession;
# LOGICAL ImageLoading;
# PMMPTE PrototypePtes;
# PKLDR_DATA_TABLE_ENTRY DataTableEntry;
# PSESSION_GLOBAL_SUBSECTION_INFO GlobalSubs;
# } IMAGE_ENTRY_IN_SESSION, * PIMAGE_ENTRY_IN_SESSION;
'_IMAGE_ENTRY_IN_SESSION': [None, {
'Link': [0, ['_LIST_ENTRY']],
'Address': [0x10, ['Pointer']],
'LastAddress': [0x18, ['Pointer']],
}],
# Reversed from tcpip.sys!TcpStartPartitionModule
"PARTITION_TABLE": [None, {
"Partitions": [8, ["Array", dict(
target="Pointer",
count=lambda x: x.obj_profile.get_constant_object(
"PartitionCount", "unsigned int"),
target_args=dict(
target="Array",
target_args=dict(
count=4,
target="FIRST_LEVEL_DIR"
)
)
)]],
}],
# ntoskrnl.exe!RtlCreateHashTable (PoolTag:HTab)
"FIRST_LEVEL_DIR": [0x24, {
"SizeOfSecondLevel": [0x8, ["unsigned int"]],
"Mask": [0x10, ["unsigned int"]],
# Reversed from ntoskrnl.exe!RtlpAllocateSecondLevelDir
"SecondLevel": [0x20, ["Pointer", dict(
target="Array",
# Actual hash table (PoolTag:HTab)
target_args=dict(
count=lambda x: x.SizeOfSecondLevel,
target="_LIST_ENTRY"
)
)]],
}],
'_SERVICE_DESCRIPTOR_TABLE' : [0x40, {
'Descriptors' : [0x0, ['Array', dict(
target='_SERVICE_DESCRIPTOR_ENTRY',
count=2
)]],
}],
# In 64 bit the KiServiceTable is a list of RVAs based off the table base to
# the destination pointers.
# Ref:
# http://forum.sysinternals.com/keservicedescriptortableshadow-address_topic14093.html
'_SERVICE_DESCRIPTOR_ENTRY' : [0x20, {
'KiServiceTable' : [0x0, ['Pointer', dict(
target="Array",
target_args=dict(
count=lambda x: x.ServiceLimit,
target="int",
)
)]],
'CounterBaseTable' : [0x8, ['Pointer']],
'ServiceLimit' : [0x10, ['unsigned long long']],
'ArgumentTable' : [0x18, ['Pointer']],
}],
# Documented in ./base/ntos/inc/mm.h WRK-v1.2.
"_UNLOADED_DRIVER": [0x28, {
"Name": [0, ["_UNICODE_STRING"]],
"StartAddress": [0x10, ["Pointer"]],
"EndAddress": [0x18, ["Pointer"]],
"CurrentTime": [0x20, ["WinFileTime"]],
}],
}
I386 = {
'_IMAGE_ENTRY_IN_SESSION': [None, {
'Link': [0x00, ['_LIST_ENTRY']],
'Address': [0x08, ['pointer', ['address']]],
'LastAddress': [0x0b, ['pointer', ['address']]],
}],
# Reversed from tcpip.sys!TcpStartPartitionModule
"PARTITION_TABLE": [None, {
"Partitions": [4, ["Array", dict(
target="Pointer",
count=lambda x: x.obj_profile.get_constant_object(
"PartitionCount", "unsigned int"),
target_args=dict(
target="Array",
target_args=dict(
count=4,
target="FIRST_LEVEL_DIR"
)
)
)]],
}],
# ntoskrnl.exe!RtlCreateHashTable
"FIRST_LEVEL_DIR": [0x24, {
"SizeOfSecondLevel": [0x8, ["unsigned int"]],
"Mask": [0x10, ["unsigned int"]],
# Reversed from ntoskrnl.exe!RtlpAllocateSecondLevelDir
"SecondLevel": [0x20, ["Pointer", dict(
target="Array",
target_args=dict(
count=lambda x: x.SizeOfSecondLevel,
target="_LIST_ENTRY"
)
)]],
}],
'_SERVICE_DESCRIPTOR_TABLE' : [0x20, {
'Descriptors' : [0x0, ['Array', dict(
target='_SERVICE_DESCRIPTOR_ENTRY',
count=2
)]],
}],
'_SERVICE_DESCRIPTOR_ENTRY' : [0x10, {
'KiServiceTable' : [0x0, ['Pointer', dict(
target="Array",
target_args=dict(
count=lambda x: x.ServiceLimit,
target="unsigned int",
)
)]],
'CounterBaseTable' : [0x4, ['Pointer']],
'ServiceLimit' : [0x8, ['unsigned long']],
'ArgumentTable' : [0xc, ['Pointer']],
}],
# Documented in ./base/ntos/inc/mm.h WRK-v1.2.
"_UNLOADED_DRIVER": [24, {
"Name": [0, ["_UNICODE_STRING"]],
"StartAddress": [8, ["Pointer"]],
"EndAddress": [12, ["Pointer"]],
"CurrentTime": [16, ["WinFileTime"]],
}],
}
# TODO: Move to their own profile.
# These come from the reactos ndk project.
ENUMS = {
"_KOBJECTS": {
"0": "EventNotificationObject",
"1": "EventSynchronizationObject",
"2": "MutantObject",
"3": "ProcessObject",
"4": "QueueObject",
"5": "SemaphoreObject",
"6": "ThreadObject",
"7": "GateObject",
"8": "TimerNotificationObject",
"9": "TimerSynchronizationObject",
"10": "Spare2Object",
"11": "Spare3Object",
"12": "Spare4Object",
"13": "Spare5Object",
"14": "Spare6Object",
"15": "Spare7Object",
"16": "Spare8Object",
"17": "Spare9Object",
"18": "ApcObject",
"19": "DpcObject",
"20": "DeviceQueueObject",
"21": "EventPairObject",
"22": "InterruptObject",
"23": "ProfileObject",
"24": "ThreadedDpcObject",
"25": "MaximumKernelObject"
},
} | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/overlays/windows/undocumented.py | 0.40251 | 0.215289 | undocumented.py | pypi |
from rekall.plugins.overlays.windows import common
# Windows XP specific overlays.
win_xp_overlays = {
'_EPROCESS' : [None, {
'VadRoot': [None, ['pointer', ['_MMVAD']]],
'RealVadRoot': lambda x: x.VadRoot.dereference(),
}],
'_MMVAD_SHORT': [None, {
'Tag': [-4, ['String', dict(length=4)]],
'Start': lambda x: x.StartingVpn << 12,
'End': lambda x: ((x.EndingVpn + 1) << 12) - 1,
'Length': lambda x: x.End - x.Start + 1,
'CommitCharge': lambda x: x.u.VadFlags.CommitCharge,
}],
'_MMVAD': [None, {
'Tag': [-4, ['String', dict(length=4)]],
'Start': lambda x: x.StartingVpn << 12,
'End': lambda x: ((x.EndingVpn + 1) << 12) - 1,
'Length': lambda x: x.End - x.Start + 1,
'CommitCharge': lambda x: x.u.VadFlags.CommitCharge,
}],
'_MMVAD_LONG': [None, {
'Tag': [-4, ['String', dict(length=4)]],
'Start': lambda x: x.StartingVpn << 12,
'End': lambda x: ((x.EndingVpn + 1) << 12) - 1,
'Length': lambda x: x.End - x.Start + 1,
'CommitCharge': lambda x: x.u.VadFlags.CommitCharge,
}],
# This is not documented in Windows XP but is in Windows 7.
"_OBJECT_HEADER_HANDLE_INFO": [16, {
"HandleCountDataBase": [0, ["Pointer", {
"target": "_OBJECT_HANDLE_COUNT_DATABASE"
}]],
"SingleEntry": [0, ["_OBJECT_HANDLE_COUNT_ENTRY", {}]]
}],
"_OBJECT_HANDLE_COUNT_ENTRY": [16, {
"HandleCount": [8, ["BitField", {
"end_bit": 24,
"target": "unsigned long"
}]],
"LockCount": [8, ["BitField", {
"end_bit": 32,
"start_bit": 24,
"target": "unsigned long"
}]],
"Process": [0, ["Pointer", {
"target": "_EPROCESS"
}]]
}],
'_MM_SESSION_SPACE': [None, {
# Specialized iterator to produce all the _IMAGE_ENTRY_IN_SESSION
# records.
'ImageIterator': lambda x: x.ImageList.list_of_type(
"_IMAGE_ENTRY_IN_SESSION", "Link")
}],
'_IMAGE_ENTRY_IN_SESSION': [None, {
'ImageBase': lambda x: x.Address.v()
}],
"_SECTION_OBJECT": [None, {
"Segment": [None, ["Pointer", dict(target="_SEGMENT")]]
}],
}
class _MMVAD(common.VadTraverser):
"""Windows XP uses the _MMVAD struct itself as a traversor.
i.e. The _MMVAD contains the LeftChild and RightChild.
"""
def InitializeXPProfile(profile):
if profile.metadata("arch") == "AMD64":
profile.add_constants(dict(PoolAlignment=16))
else:
profile.add_constants(dict(PoolAlignment=8))
profile.add_overlay(win_xp_overlays)
profile.add_classes(dict(_MMVAD=_MMVAD)) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/overlays/windows/xp.py | 0.473657 | 0.458106 | xp.py | pypi |
__author__ = ("Michael Cohen <scudette@gmail.com>",
"Adam Sindelar <adamsh@google.com")
from rekall.plugins.overlays import basic
macho_vtypes = {
'mach_header_64': [0x20, {
'cputype': [None, ['Enumeration', dict(
choices={
1: 'VAX',
6: 'MC680x0',
7: 'i386',
8: 'MIPS',
10: 'MC98000',
11: 'HPPA',
12: 'ARM',
13: 'MC88000',
14: 'SPARC',
15: 'i860',
16: 'Alpha',
18: 'PowerPC',
(0x01000000 | 7): 'X86_64',
(0x01000000 | 18): 'PowerPC_64',
},
target="unsigned int",
)]],
'filetype': [None, ['Enumeration', dict(
choices={
0x1: 'MH_OBJECT', # relocatable object file
0x2: "MH_EXECUTE", # demand paged executable file
0x3: "MH_FVMLIB", # fixed VM shared library file
0x4: 'MH_CORE', # core file
0x5: 'MH_PRELOAD', # preloaded executable file
0x6: 'MH_DYLIB', # dynamicly bound shared library file
0x7: 'MH_DYLINKER', # dynamic link editor
0x8: 'MH_BUNDLE', # dynamicly bound bundle file
},
target="unsigned int",
)]],
'flags': [None, ['Flags', dict(maskmap={
'MH_NOUNDEFS': 0x1,
'MH_INCRLINK': 0x2,
'MH_DYLDLINK': 0x4,
'MH_BINDATLOAD': 0x8,
'MH_PREBOUND': 0x10,
})]],
'segments': [lambda x: x.obj_size, ['Array', dict(
target="segment_command_64",
count=lambda x: x.ncmds)]],
}],
'segment_command_64': [0x48, {
'cmd': [None, ['Enumeration', dict(
choices={
0x1: 'LC_SEGMENT',
0x2: 'LC_SYMTAB',
0x3: 'LC_SYMSEG',
0x4: 'LC_THREAD',
0x5: 'LC_UNIXTHREAD',
0x6: 'LC_LOADFVMLIB',
0x7: 'LC_IDFVMLIB',
0x8: 'LC_IDENT',
0x9: 'LC_FVMFILE',
0xa: 'LC_PREPAGE',
0xb: 'LC_DYSYMTAB',
0xc: 'LC_LOAD_DYLIB',
0xd: 'LC_ID_DYLIB',
0xe: 'LC_LOAD_DYLINKER',
0xf: 'LC_ID_DYLINKER',
0x10: 'LC_PREBOUND_DYLIB',
0x11: 'LC_ROUTINES',
0x12: 'LC_SUB_FRAMEWORK',
0x13: 'LC_SUB_UMBRELLA',
0x14: 'LC_SUB_CLIENT',
0x15: 'LC_SUB_LIBRARY',
0x16: 'LC_TWOLEVEL_HINTS',
0x17: 'LC_PREBIND_CKSUM',
0x80000000 | 0x18: 'LC_LOAD_WEAK_DYLIB',
0x19: 'LC_SEGMENT_64',
0x1a: 'LC_ROUTINES_64',
0x1b: 'LC_UUID',
0x80000000 | 0x1c: 'LC_RPATH',
0x1d: 'LC_CODE_SIGNATURE',
0x1e: 'LC_SEGMENT_SPLIT_INFO',
0x80000000 | 0x1f: 'LC_REEXPORT_DYLIB',
0x20: 'LC_LAZY_LOAD_DYLIB',
0x21: 'LC_ENCRYPTION_INFO',
0x22: 'LC_DYLD_INFO',
0x80000000 | 0x22: 'LC_DYLD_INFO_ONLY',
},
target="unsigned int")]],
'segname': [None, ['String', dict(length=16)]],
}],
}
class MachoProfile(basic.ProfileLP64, basic.BasicClasses):
"""A profile for Mach-O files.
This profile contains types for both 32 and 64bit Mach-O files, although
only the latter is actually in use by anyone (including Apple).
"""
@classmethod
def Initialize(cls, profile):
super(MachoProfile, cls).Initialize(profile)
profile.add_overlay(macho_vtypes) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/overlays/darwin/macho.py | 0.475849 | 0.207917 | macho.py | pypi |
from rekall import obj
from rekall.plugins.overlays import basic
# These come out of the kernel symbols but are put here so we can use them
# outside the linux implementation.
elf_vtypes = {
"elf64_hdr": [64, {
'e_ident': [0, ['array', 16, ['unsigned char']]],
'e_type': [16, ['short unsigned int']],
'e_machine': [18, ['short unsigned int']],
'e_version': [20, ['unsigned int']],
'e_entry': [24, ['long long unsigned int']],
'e_phoff': [32, ['long long unsigned int']],
'e_shoff': [40, ['long long unsigned int']],
'e_flags': [48, ['unsigned int']],
'e_ehsize': [52, ['short unsigned int']],
'e_phentsize': [54, ['short unsigned int']],
'e_phnum': [56, ['short unsigned int']],
'e_shentsize': [58, ['short unsigned int']],
'e_shnum': [60, ['short unsigned int']],
'e_shstrndx': [62, ['short unsigned int']],
}],
'elf64_phdr': [56, {
'p_type': [0, [u'unsigned int']],
'p_flags': [4, [u'unsigned int']],
'p_offset': [8, [u'long long unsigned int']],
'p_vaddr': [16, [u'long long unsigned int']],
'p_paddr': [24, [u'long long unsigned int']],
'p_filesz': [32, [u'long long unsigned int']],
'p_memsz': [40, [u'long long unsigned int']],
'p_align': [48, [u'long long unsigned int']],
}],
'elf64_shdr': [64, {
'sh_name': [0, [u'unsigned int']],
'sh_type': [4, [u'unsigned int']],
'sh_flags': [8, [u'long long unsigned int']],
'sh_addr': [16, [u'long long unsigned int']],
'sh_offset': [24, [u'long long unsigned int']],
'sh_size': [32, [u'long long unsigned int']],
'sh_link': [40, [u'unsigned int']],
'sh_info': [44, [u'unsigned int']],
'sh_addralign': [48, [u'long long unsigned int']],
'sh_entsize': [56, [u'long long unsigned int']],
}],
'elf64_note': [12, {
'n_namesz': [0, ['unsigned int']],
'n_descsz': [4, ['unsigned int']],
'n_type': [8, ['unsigned int']],
}],
}
# Unfortunately the kernel uses #defines for many of these rather than enums, so
# we need to hand overlay them :-(.
elf_overlays = {
"elf64_hdr": [None, {
'e_ident': [None, ['Signature', dict(
value="\x7fELF\x02\x01\x01"
)]],
'e_type': [None, ['Enumeration', {
"choices": {
0: 'ET_NONE',
1: 'ET_REL',
2:'ET_EXEC',
3:'ET_DYN',
4:'ET_CORE',
0xff00:'ET_LOPROC',
0xffff:'ET_HIPROC'},
'target': 'unsigned char'}]],
'e_phoff': [None, ['Pointer', dict(
target='Array',
target_args=dict(
target='elf64_phdr',
target_size=lambda x: x.e_phentsize,
count=lambda x: x.e_phnum))]],
'e_shoff': [None, ['Pointer', dict(target='elf64_shdr')]],
}],
"elf64_phdr": [None, {
'p_type': [None, ['Enumeration', {
"choices": {
0: 'PT_NULL',
1: 'PT_LOAD',
2: 'PT_DYNAMIC',
3: 'PT_INTERP',
4: 'PT_NOTE',
5: 'PT_SHLIB',
6: 'PT_PHDR',
7: 'PT_TLS',
0x60000000 : 'PT_LOOS',
0x6fffffff :'PT_HIOS',
0x70000000 :'PT_LOPROC',
0x7fffffff :'PT_HIPROC',
0x6474e550 :'PT_GNU_EH_FRAME',
},
"target": "unsigned int"}]],
"p_flags": [None, ['Flags', dict(
maskmap=dict(
PF_R=0x4,
PF_W=0x2,
PF_X=0x1,
),
target='unsigned long')]],
"p_offset": [None, ["Pointer", dict(target="Void")]],
}],
"elf64_note": [None, {
'name': [lambda x: 12 + x.obj_offset,
['String', dict(length=lambda x: x.n_namesz)]],
'desc': [lambda x: 12 + x.n_namesz + x.obj_offset,
['String', dict(length=lambda x: x.n_descsz)]],
}],
}
class ELFFileImplementation(obj.ProfileModification):
"""An implementation of a parser for ELF files."""
@classmethod
def Modify(cls, profile):
profile.add_types(elf_vtypes)
profile.add_overlay(elf_overlays)
class ELFProfile(basic.ProfileLP64, basic.BasicClasses):
"""A profile for ELF files."""
def __init__(self, **kwargs):
super(ELFProfile, self).__init__(**kwargs)
ELFFileImplementation.Modify(self) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/overlays/linux/elf.py | 0.469034 | 0.488954 | elf.py | pypi |
from builtins import str
__author__ = "Michael Cohen <scudette@google.com>"
from rekall.ui import text
from rekall.plugins.renderers import data_export
from rekall_lib import utils
class FileSpec_Text(text.TextObjectRenderer):
renders_type = "FileSpec"
def render_row(self, target, width=None, **_):
if target.filesystem == "API":
return text.Cell(str(target.name), width=width)
else:
return text.Cell(u"%s (%s)" % (target.name, target.filesystem),
width=width)
class FileInformation_TextObjectRenderer(text.TextObjectRenderer):
renders_type = "FileInformation"
def render_row(self, target, **options):
return FileSpec_Text(
renderer=self.renderer, session=self.session).render_row(
target.filename, **options)
class UserTextObjectRenderer(text.TextObjectRenderer):
renders_type = "User"
def render_row(self, item, **_):
if item.username:
return text.Cell(u"%s (%s)" % (item.username, item.uid))
return text.Cell(str(item.uid))
class GroupTextObjectRenderer(text.TextObjectRenderer):
renders_type = "Group"
def render_row(self, item, **_):
if item.group_name:
return text.Cell(u"%s (%s)" % (item.group_name, item.gid))
return text.Cell(str(item.gid))
class DataExportFileSpecObjectRenderer(
data_export.DataExportBaseObjectRenderer):
renders_type = "FileSpec"
def Summary(self, item, **_):
return utils.SmartStr(item)
def GetState(self, item, **options):
return dict(filesystem=item.filesystem, name=item.name)
class PermissionsFileSpecObjectRenderer(
data_export.DataExportBaseObjectRenderer):
renders_type = "Permissions"
def Summary(self, item, **_):
return utils.SmartStr(item)
def GetState(self, item, **options):
return dict(perm=str(item), int_perm=int(item))
class LiveProcessTextRenderer(text.TextObjectRenderer):
renders_type = "LiveProcess"
def render_row(self, target, width=None, **_):
return text.Cell("%s (%s)" % (target.name, target.pid), width=width)
class LiveProcessDataExportRenderer(
data_export.DataExportBaseObjectRenderer):
renders_type = "LiveProcess"
def GetState(self, item, **_):
return item.as_dict() | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/response/renderers.py | 0.488527 | 0.16378 | renderers.py | pypi |
"""This module adds arbitrary file reading to Rekall."""
from __future__ import print_function
from builtins import str
from builtins import object
__author__ = "Michael Cohen <scudette@google.com>"
import binascii
import fnmatch
import hashlib
import itertools
import platform
import re
import os
import six
from rekall import plugin
from rekall.plugins.response import common
from rekall_lib import utils
BUFFER_SIZE = 10 * 1024 * 1024
class IRFind(common.AbstractIRCommandPlugin):
"""List files recursively from a root path."""
name = "find"
__args = [
dict(name="root", positional=True,
help="The root directory to start search from.")
]
table_header = [
dict(name="Perms", type="Permissions", width=16),
dict(name="Size", align="r", width=10),
dict(name="Path"),
]
def collect(self):
for root, dirs, files in os.walk(self.plugin_args.root):
for d in dirs + files:
full_path = os.path.join(root, d)
result = common.FileFactory(full_path, session=self.session)
if result:
yield dict(Perms=result.st_mode,
Size=result.st_size,
Path=result)
class IRStat(common.AbstractIRCommandPlugin):
name = "stat"
__args = [
dict(name="paths", positional=True, type="Array",
help="Paths to stat."),
]
table_header = [
dict(name="Perms", type="Permissions", width=16),
dict(name="Size", align="r", width=10),
dict(name="Path"),
]
def collect(self):
for full_path in self.plugin_args.paths:
result = common.FileFactory(full_path, session=self.session)
if result:
yield dict(Perms=result.st_mode, Size=result.st_size,
Path=result)
class Hash(object):
"""A class to hold a hash value."""
def __init__(self, type="md5", value=None):
self.type = type
self.value = value
def __str__(self):
return u"%s:%s" % (self.type, binascii.hexlify(self.value))
class IRHash(common.AbstractIRCommandPlugin):
name = "hash"
__args = [
dict(name="paths", positional=True, type="Array",
help="Paths to hash."),
dict(name="hash", type="ChoiceArray", default=["sha1"],
choices=["md5", "sha1", "sha256"],
help="One or more hashes to calculate.")
]
table_header = [
dict(name="Hashes", width=72),
dict(name="Path", type="FileInformation"),
]
def calculate_hashes(self, hashes, file_info):
hashers = dict((name, getattr(hashlib, name)()) for name in hashes)
fd = file_info.open()
while 1:
data = fd.read(BUFFER_SIZE)
if not data:
break
for hasher in hashers.values():
hasher.update(data)
for key in list(hashers):
hashers[key] = hashers[key].hexdigest()
return hashers
def collect(self):
for path in self.plugin_args.paths:
file_info = common.FileFactory(path)
if not file_info.st_mode.is_dir():
yield dict(
Hashes=self.calculate_hashes(
self.plugin_args.hash, file_info),
Path=file_info)
class Component(object):
def __init__(self, session, component=None, cache=None):
self.session = session
self.component = component
self.component_cache = cache
def stat(self, path):
key = utils.SmartUnicode(path)
try:
return self.component_cache[key]
except KeyError:
stat = common.FileFactory(path)
self.component_cache.Put(key, stat)
return stat
def __eq__(self, other):
return str(self) == utils.SmartUnicode(other)
def __hash__(self):
return hash(str(self))
def __str__(self):
return u"%s:%s" % (self.__class__.__name__, self.component)
class LiteralComponent(Component):
def case_insensitive_filesystem(self):
if platform.system() == "Windows":
return True
return False
def filter(self, path):
# For case insensitive filesystems we can just try to open the
# component.
if self.case_insensitive_filesystem():
result_pathspec = path.add(self.component)
stat = self.stat(result_pathspec)
if stat:
return [stat.filename]
else:
return []
# Since we must match a case insensitve filename we need to
# list all the files and find the best match.
stat = common.FileFactory(path)
if not stat:
return []
children = {}
for x in stat.list_names():
children.setdefault(x.lower(), []).append(x)
return [stat.filename.add(x)
for x in children.get(self.component.lower(), [])]
class RegexComponent(Component):
def __init__(self, *args, **kwargs):
super(RegexComponent, self).__init__(*args, **kwargs)
self.component_re = re.compile(self.component, re.I)
def filter(self, path):
stat = self.stat(path)
if not stat:
return
if stat.st_mode.is_dir() and not stat.st_mode.is_link():
self.session.report_progress("Searching %s", path)
for basename in stat.list_names():
if self.component_re.match(basename):
yield stat.filename.add(basename)
class RecursiveComponent(RegexComponent):
def __init__(self, depth=3, **kwargs):
super(RecursiveComponent, self).__init__(**kwargs)
self.depth = depth
def filter(self, path, depth=0):
self.session.report_progress("Recursing into %s", path)
# TODO: Deal with cross devices.
if depth >= self.depth:
return
stat = self.stat(path)
if not stat:
return
# Do not follow symlinks.
if stat.st_mode.is_dir() and not stat.st_mode.is_link():
# The top level counts as a hit, so that e.g. /**/*.txt
# matches /foo.txt as well.
if depth == 0:
yield stat.filename
for basename in stat.list_names():
if (self.component_re.match(basename) and
not stat.st_mode.is_link()):
subdir = stat.filename.add(basename)
yield subdir
for subitem in self.filter(subdir, depth+1):
yield subitem
class IRGlob(common.AbstractIRCommandPlugin):
"""Search for files by filename glob.
This code roughly based on the Glob flow in GRR.
"""
name = "glob"
__args = [
dict(name="globs", positional=True, type="ArrayString",
help="List of globs to return."),
dict(name="root",
help="Root directory to glob from."),
dict(name="case_insensitive", default=True, type="Bool",
help="Globs will be case insensitive."),
dict(name="path_sep",
help="Path separator character (/ or \\)"),
dict(name="filesystem", choices=list(common.FILE_SPEC_DISPATCHER),
type="Choices", default="API",
help="The virtual filesystem implementation to glob in.")
]
table_header = [
dict(name="path", type="FileInformation"),
]
def column_types(self):
return dict(path=common.FileInformation(filename="/etc"))
INTERPOLATED_REGEX = re.compile(r"%%([^%]+?)%%")
# Grouping pattern: e.g. {test.exe,foo.doc,bar.txt}
GROUPING_PATTERN = re.compile("({([^}]+,[^}]+)}|%%([^%]+?)%%)")
RECURSION_REGEX = re.compile(r"\*\*(\d*)")
# A regex indicating if there are shell globs in this path.
GLOB_MAGIC_CHECK = re.compile("[*?[]")
def __init__(self, *args, **kwargs):
super(IRGlob, self).__init__(*args, **kwargs)
self.component_cache = utils.FastStore(50)
# Default path seperator is platform dependent.
if not self.plugin_args.path_sep:
self.plugin_args.path_sep = (
"\\" if platform.system() == "Windows" else "/")
# By default use the root of the filesystem.
if self.plugin_args.root is None:
self.plugin_args.root = self.plugin_args.path_sep
def _interpolate_grouping(self, pattern):
# Take the pattern and split it into components around grouping
# patterns. Expand each grouping pattern to a set.
# e.g. /foo{a,b}/bar -> ["/foo", set(["a", "b"]), "/bar"]
result = []
components = []
offset = 0
for match in self.GROUPING_PATTERN.finditer(pattern):
match_str = match.group(0)
# Alternatives.
if match_str.startswith("{"):
components.append([pattern[offset:match.start()]])
# Expand the attribute into the set of possibilities:
alternatives = match.group(2).split(",")
components.append(set(alternatives))
offset = match.end()
# KnowledgeBase interpolation.
elif match_str.startswith("%"):
components.append([pattern[offset:match.start()]])
kb = self.session.GetParameter("knowledge_base")
alternatives = kb.expand(match_str)
components.append(set(alternatives))
offset = match.end()
else:
raise plugin.PluginError(
"Unknown interpolation %s" % match.group(0))
components.append([pattern[offset:]])
# Now calculate the cartesian products of all these sets to form all
# strings.
for vector in itertools.product(*components):
result.append(u"".join(vector))
# These should be all possible patterns.
# e.g. /fooa/bar , /foob/bar
return result
def convert_glob_into_path_components(self, pattern):
"""Converts a glob pattern into a list of pathspec components.
Wildcards are also converted to regular expressions. The pathspec
components do not span directories, and are marked as a regex or a
literal component.
We also support recursion into directories using the ** notation. For
example, /home/**2/foo.txt will find all files named foo.txt recursed 2
directories deep. If the directory depth is omitted, it defaults to 3.
Example:
/home/test**/*exe -> [{path: 'home', type: "LITERAL",
{path: 'test.*\\Z(?ms)', type: "RECURSIVE",
{path: '.*exe\\Z(?ms)', type="REGEX"}]]
Args:
pattern: A glob expression with wildcards.
Returns:
A list of PathSpec instances for each component.
Raises:
ValueError: If the glob is invalid.
"""
pattern_components = common.FileSpec(
pattern, path_sep=self.plugin_args.path_sep).components()
components = []
for path_component in pattern_components:
if not path_component:
continue
# A ** in the path component means recurse into directories that
# match the pattern.
m = self.RECURSION_REGEX.search(path_component)
if m:
depth = 3
# Allow the user to override the recursion depth.
if m.group(1):
depth = int(m.group(1))
path_component = path_component.replace(m.group(0), "*")
component = RecursiveComponent(
session=self.session,
component=fnmatch.translate(path_component),
cache=self.component_cache,
depth=depth)
elif self.GLOB_MAGIC_CHECK.search(path_component):
component = RegexComponent(
session=self.session,
cache=self.component_cache,
component=fnmatch.translate(path_component))
else:
component = LiteralComponent(
session=self.session,
cache=self.component_cache,
component=path_component)
components.append(component)
return components
def _filter(self, node, path):
"""Path is the pathspec of the path we begin evaluation with."""
self.session.report_progress("Checking %s", path)
for component, child_node in six.iteritems(node):
# Terminal node - yield the result.
if not child_node:
for subpath in component.filter(path):
yield subpath
else:
# Non - terminal node, walk the subnode recursively.
for matching_path in component.filter(path):
for subpath in self._filter(child_node, matching_path):
yield subpath
def make_component_tree(self, globs):
expanded_globs = []
for glob in globs:
expanded_globs.extend(self._interpolate_grouping(glob))
component_tree = {}
for glob in expanded_globs:
node = component_tree
for component in self.convert_glob_into_path_components(glob):
node = node.setdefault(component, {})
return component_tree
def collect_globs(self, globs):
component_tree = self.make_component_tree(globs)
root = common.FileSpec(self.plugin_args.root,
path_sep=self.plugin_args.path_sep)
for path in self._filter(component_tree, root):
yield common.FileFactory(path, session=self.session)
def collect(self):
for x in self.collect_globs(self.plugin_args.globs):
yield dict(path=x)
def print_component_tree(tree, depth=""):
"""This is used for debugging the component_tree."""
if not tree:
return
for k, v in tree.items():
print("%s %s:" % (depth, k))
print_component_tree(v, depth + " ")
class IRDump(IRGlob):
"""Hexdump files from disk."""
name = "hexdump_file"
__args = [
dict(name="start", type="IntParser", default=0,
help="An offset to hexdump."),
dict(name="length", type="IntParser", default=100,
help="Maximum length to dump."),
dict(name="width", type="IntParser", default=24,
help="Number of bytes per row"),
dict(name="rows", type="IntParser", default=4,
help="Number of bytes per row"),
]
table_header = [
dict(name="divider", type="Divider"),
dict(name="FileSpec", hidden=True),
dict(name="offset", style="address"),
dict(name="hexdump", width=65),
]
def collect(self):
for hit in super(IRDump, self).collect():
path = hit.get("path")
if path:
fd = path.open()
if fd:
yield dict(divider=path.filename)
to_read = min(
self.plugin_args.length,
self.plugin_args.width * self.plugin_args.rows)
for offset in utils.xrange(
self.plugin_args.start,
self.plugin_args.start + to_read,
self.plugin_args.width):
fd.seek(offset)
data = fd.read(self.plugin_args.width)
if not data:
break
yield dict(
offset=offset,
FileSpec=path.filename,
hexdump=utils.HexDumpedString(data),
nowrap=True,
hex_width=self.plugin_args.width) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/response/files.py | 0.761716 | 0.179818 | files.py | pypi |
import os
import platform
from rekall import addrspace
from rekall_lib import utils
from rekall.plugins import core
from rekall.plugins.common import address_resolver
from rekall.plugins.response import common
from rekall.plugins.response import processes
from rekall.plugins.overlays import basic
class LiveMap(utils.SlottedObject):
__slots__ = ("start", "end", "perms", "file_offset", "dev", "inode",
"filename")
@utils.safe_property
def length(self):
return self.end - self.start
def __str__(self):
return u"%s %08x-%08x %s" % (
self.filename or "", self.start, self.end, self.perms)
class IRMaps(processes.APIProcessFilter):
"""Examine the process memory maps."""
name = "maps"
__args = [
dict(name="regex", type="RegEx",
help="A regular expression to filter VAD filenames."),
dict(name="offset", type="SymbolAddress",
help="Only print the vad corresponding to this offset."),
dict(name="verbosity", type="IntParser", default=1,
help="With high verbosity print more information on each region."),
]
table_header = [
dict(name='proc', type="proc", hidden=True),
dict(name="divider", type="Divider"),
dict(name='Map', hidden=True),
dict(name='start', style="address"),
dict(name='end', style="address"),
dict(name='perms', width=4),
dict(name='filename')
]
def generate_maps(self, pid):
# Details of this are here: http://goo.gl/fmebo
# On linux its easy - just parse /proc/ filesystem.
try:
maps_data = open("/proc/%s/maps" % pid).read()
except (OSError, IOError):
return
for line in maps_data.splitlines():
result = LiveMap()
parts = line.split()
start, end = parts[0].split("-")
result.start = int(start, 16)
result.end = int(end, 16)
result.perms = parts[1]
result.file_offset = parts[2]
result.dev = parts[3]
result.inode = int(parts[4])
try:
result.filename = parts[5]
except IndexError:
pass
yield result
def merge_ranges(self, pid):
"""Generate merged ranges."""
old_maps = None
for maps in self.generate_maps(pid):
# Try to merge this range with the previous range.
if (old_maps and
old_maps.end == maps.start and
old_maps.filename == maps.filename):
old_maps.end = maps.end
continue
# Yield the old range:
if old_maps:
yield old_maps
old_maps = maps
# Emit the last range.
if old_maps:
yield old_maps
def collect(self):
generator = self.generate_maps
if self.plugin_args.verbosity <= 1:
generator = self.merge_ranges
for proc in self.filter_processes():
divider = "{0} pid: {1:6}\n".format(proc.name, proc.pid)
yield dict(divider=divider)
for maps in generator(proc.pid):
if (self.plugin_args.regex and not
self.plugin_args.regex.search(maps.filename or "")):
continue
if (self.plugin_args.offset is not None and
not maps.start <= self.plugin_args.offset <= maps.end):
continue
yield dict(proc=proc,
Map=maps,
start=maps.start,
end=maps.end,
perms=maps.perms,
filename=maps.filename)
class IRVadDump(core.DirectoryDumperMixin, IRMaps):
"""Dump the VMA memory for a process."""
name = "vaddump"
table_header = IRMaps.table_header[:] + [
dict(name='dumpfile'),
]
def collect(self):
for data in super(IRVadDump, self).collect():
if "proc" not in data:
continue
task = data["proc"]
start = data["start"]
end = data["end"]
filename = "{0}.{1}.{2:08x}-{3:08x}.dmp".format(
task.name, task.pid,
data["start"], data["end"])
data["dumpfile"] = filename
with self.session.GetRenderer().open(
directory=self.dump_dir,
filename=filename,
mode='wb') as fd:
task_space = task.get_process_address_space()
try:
self.CopyToFile(task_space, start, end, fd)
except OverflowError:
continue
yield data
class LinuxAPIProfile(common.APIBaseProfile):
"""Profile for Linux live analysis."""
def __init__(self, proc=None, **kwargs):
super(LinuxAPIProfile, self).__init__(**kwargs)
# TODO: Although it is possible to run 32 bit processes on 64 bit
# systems we dont detect this case. We set the profile architecture
# based on the operating system's platform.
arch, _ = platform.architecture()
if arch == "64bit":
basic.ProfileLP64.Initialize(self)
else:
basic.Profile32Bits.Initialize(self)
# Register the profile for Linux.
common.IRProfile = LinuxAPIProfile
class LinuxAPIProcessAddressSpace(addrspace.RunBasedAddressSpace):
"""An address space which read processes using ReadProcessMemory()."""
def __init__(self, pid=None, **kwargs):
super(LinuxAPIProcessAddressSpace, self).__init__(**kwargs)
self.pid = pid
try:
self.process_handle = open("/proc/%s/mem" % pid, "rb")
for maps in self.session.plugins.maps().merge_ranges(pid):
self.add_run(maps.start, maps.start, maps.length,
address_space=self, data=dict(
pid=pid, vad=maps))
except (IOError, OSError):
# We cant open the memory, just return an empty address space.
pass
def read(self, addr, length):
if length > self.session.GetParameter("buffer_size"):
raise IOError("Too much data to read.")
self.process_handle.seek(addr)
try:
return self.process_handle.read(length)
except IOError:
return addrspace.ZEROER.GetZeros(length)
def __str__(self):
return u"%s(%s)" % (self.__class__.__name__, self.pid)
# Register the process AS as a Linux one.
common.IRProcessAddressSpace = LinuxAPIProcessAddressSpace
class MapModule(address_resolver.Module):
"""A module representing a memory mapping."""
class LinuxAPIAddressResolver(address_resolver.AddressResolverMixin,
common.AbstractAPICommandPlugin):
"""A Linux specific address resolver plugin."""
@staticmethod
def NormalizeModuleName(module_name):
if not module_name:
return ""
return os.path.basename(module_name)
def _EnsureInitialized(self):
if self._initialized:
return
task = self.session.GetParameter("process_context")
for row in self.session.plugins.maps(pids=task.pid):
maps = row.get("Map")
if not maps:
continue
self.AddModule(MapModule(
name=(self.NormalizeModuleName(maps.filename) or
"map_%#x" % maps.start),
start=maps.start, end=maps.end, session=self.session))
self._initialized = True | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/response/linux.py | 0.55447 | 0.154791 | linux.py | pypi |
import psutil
from efilter.protocols import structured
from rekall_lib import utils
from rekall.plugins import core
from rekall.plugins.response import common
from rekall.plugins.overlays import basic
from rekall.plugins import yarascanner
class _LiveProcess(utils.SlottedObject):
"""An object to represent a live process.
This is the live equivalent of _EPROCESS.
"""
__slots__ = ("_proc", "_obj_profile", "session",
"start_time", "pid")
obj_offset = 0
def __init__(self, proc, session=None):
"""Construct a representation of the live process.
Args:
proc: The psutil.Process instance.
"""
# Hold on to the original psutil object.
self._proc = proc
self._obj_profile = None
self.session = session
super(_LiveProcess, self).__init__()
self.start_time = basic.UnixTimeStamp(
name="create_time", value=self.create_time, session=self.session)
@utils.safe_property
def obj_profile(self):
# Delay creation of the profile because it needs to look in the
# environment which is slow.
if self._obj_profile is None:
self._obj_profile = common.IRProfile(
session=self.session, proc=self)
return self._obj_profile
def __int__(self):
return self.pid
def _get_field(self, field_name):
try:
result = getattr(self._proc, field_name)
if callable(result):
result = result()
return result
except psutil.Error:
# Some processes do not have environ defined.
if field_name == "environ":
return {}
return None
except AttributeError:
return None
def __format__(self, formatspec):
"""Support the format() protocol."""
if not formatspec:
formatspec = "s"
if formatspec[-1] in "xdXD":
return format(int(self), formatspec)
return object.__format__(self, formatspec)
def __repr__(self):
return "<Live Process pid=%s>" % self.pid
def get_process_address_space(self):
return common.IRProcessAddressSpace(self.pid, session=self.session)
def as_dict(self):
try:
return self._proc.as_dict()
except Exception:
# This can happen if the process no longer exists.
return {}
# Automatically add accessors for psutil fields.
psutil_fields = ['cmdline', 'connections', 'cpu_affinity',
'cpu_percent', 'cpu_times', 'create_time',
'cwd', 'environ', 'exe', 'gids', 'io_counters',
'ionice', 'memory_full_info', 'memory_info',
'memory_info_ex', 'memory_maps', 'memory_percent',
'name', 'nice', 'num_ctx_switches', 'num_fds',
'num_threads', 'open_files', 'pid', 'ppid',
'status', 'terminal', 'threads', 'uids', 'username',
'num_handles']
# Generate accessors for psutil derived properties.
properties = dict(__slots__=())
for field in psutil_fields:
properties[field] = property(
lambda self, field=field: self._get_field(field))
LiveProcess = type("LiveProcess", (_LiveProcess, ), properties)
structured.IStructured.implement(
for_type=LiveProcess,
implementations={
structured.resolve: lambda d, m: getattr(d, m, None),
structured.getmembers_runtime: lambda d: set(psutil_fields + list(d.keys())),
}
)
class APIProcessFilter(common.AbstractAPICommandPlugin):
"""A live process filter using the system APIs."""
__abstract = True
__args = [
dict(name="pids", positional=True, type="ArrayIntParser", default=[],
help="One or more pids of processes to select."),
dict(name="proc_regex", default=None, type="RegEx",
help="A regex to select a process by name."),
]
@utils.safe_property
def filtering_requested(self):
return (self.plugin_args.pids or self.plugin_args.proc_regex)
def filter_processes(self):
"""Filters eprocess list using pids lists."""
for proc in self.list_process():
if not self.filtering_requested:
yield proc
else:
if int(proc.pid) in self.plugin_args.pids:
yield proc
elif (self.plugin_args.proc_regex and
self.plugin_args.proc_regex.match(
utils.SmartUnicode(proc.name))):
yield proc
def list_process(self):
result = [LiveProcess(x, session=self.session)
for x in psutil.process_iter()]
return result
class APILsof(APIProcessFilter):
"""A plugin which lists all open files."""
name = "lsof"
table_header = [
dict(name="divider", type="Divider"),
dict(name="proc", hidden=True),
dict(name="file", hidden=True),
dict(name="name", width=30),
dict(name="pid", width=6, align="r"),
dict(name="user", width=8),
dict(name="fd", width=4),
dict(name="mode", width=4),
dict(name="offset", width=12),
dict(name="node", width=8),
dict(name="path"),
]
def collect(self):
for proc in self.filter_processes():
yield dict(divider=proc)
for fd in (proc.open_files or []):
yield dict(file=fd, proc=proc,
name=proc.name,
pid=proc.pid,
user=proc.username,
fd=fd.fd,
offset=fd.position,
path=fd.path,
mode=fd.mode)
class APIPslist(APIProcessFilter):
"""A live pslist plugin using the APIs."""
name = "pslist"
table_header = [
dict(name="proc", hidden=True),
dict(name="Name", width=30),
dict(name="pid", width=6, align="r"),
dict(name="ppid", width=6, align="r"),
dict(name="Thds", width=6, align="r"),
dict(name="Hnds", width=8, align="r"),
dict(name="wow64", width=6),
dict(name="start", width=24),
dict(name="binary"),
]
def column_types(self):
return self._row(LiveProcess(psutil.Process(), session=self.session))
def is_wow64(self, proc):
"""Determine if the proc is Wow64."""
# Not the most accurate method but very fast.
return (proc.environ.get("PROCESSOR_ARCHITECTURE") == 'x86' and
proc.environ.get("PROCESSOR_ARCHITEW6432") == 'AMD64')
def _row(self, proc):
return dict(proc=proc,
Name=proc.name,
pid=proc.pid,
ppid=proc.ppid,
Thds=proc.num_threads,
Hnds=proc.num_handles,
wow64=self.is_wow64(proc),
start=proc.start_time,
binary=proc.exe)
def collect(self):
for proc in self.filter_processes():
yield self._row(proc)
class APISetProcessContext(core.SetProcessContextMixin,
APIProcessFilter):
"""A cc plugin for setting process context to live mode."""
name = "cc"
class APIProcessScanner(APIProcessFilter):
"""Scanner for scanning processes using the ReadProcessMemory() API."""
__abstract = True
def generate_memory_ranges(self):
with self.session.plugins.cc() as cc:
for task in self.filter_processes():
comment = "%s (%s)" % (task.name, task.pid)
cc.SwitchProcessContext(task)
process_address_space = self.session.GetParameter(
"default_address_space")
for _, _, run in process_address_space.runs:
vad = run.data["vad"]
self.session.logging.info(
"Scanning %s (%s) in: %s [%#x-%#x]",
task.name, task.pid, vad.filename or "",
vad.start, vad.end)
run.data["comment"] = comment
run.data["task"] = task
yield run
class ProcessYaraScanner(yarascanner.YaraScanMixin, APIProcessScanner):
"""Yara scan process memory using the ReadProcessMemory() API."""
name = "yarascan" | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/response/processes.py | 0.63341 | 0.187151 | processes.py | pypi |
""" A Hiber file Address Space """
from builtins import range
from rekall import addrspace
from rekall import obj
from rekall_lib import utils
from rekall.plugins.addrspaces import xpress
import struct
# pylint: disable=C0111
PAGE_SIZE = 0x1000
page_shift = 12
class HibernationSupport(obj.ProfileModification):
"""Support hibernation file structures for different versions of windows."""
vtypes = {
'_PO_MEMORY_RANGE_ARRAY_LINK' : [ 0x10, {
'NextTable' : [ 0x4, ['unsigned long']],
'EntryCount' : [ 0xc, ['unsigned long']],
} ],
'_PO_MEMORY_RANGE_ARRAY_RANGE' : [ 0x10, {
'StartPage' : [ 0x4, ['unsigned long']],
'EndPage' : [ 0x8, ['unsigned long']],
} ],
'_PO_MEMORY_RANGE_ARRAY' : [ 0x20, {
'MemArrayLink' : [ 0x0, ['_PO_MEMORY_RANGE_ARRAY_LINK']],
'RangeTable': [ 0x10, ['array', lambda x: x.MemArrayLink.EntryCount,
['_PO_MEMORY_RANGE_ARRAY_RANGE']]],
} ],
'_IMAGE_XPRESS_HEADER' : [ 0x20 , {
'u09' : [ 0x9, ['unsigned char']],
'u0A' : [ 0xA, ['unsigned char']],
'u0B' : [ 0xB, ['unsigned char']],
} ]
}
vistasp01_vtypes = {
'_PO_MEMORY_RANGE_ARRAY' : [ 0x20, {
'RangeTable': [ 0x10, ['array', lambda x: x.Link.EntryCount,
['_PO_MEMORY_RANGE_ARRAY_RANGE']]],
} ],
}
vistasp2_vtypes = {
'_PO_MEMORY_RANGE_ARRAY_LINK' : [ 0x10, {
'NextTable' : [ 0x4, ['unsigned long']],
'EntryCount' : [ 0x8, ['unsigned long']],
} ],
'_PO_MEMORY_RANGE_ARRAY_RANGE' : [ 0x8, {
'StartPage' : [ 0x0, ['unsigned long']],
'EndPage' : [ 0x4, ['unsigned long']],
} ],
'_PO_MEMORY_RANGE_ARRAY' : [ 0x20, {
'MemArrayLink' : [ 0x0, ['_PO_MEMORY_RANGE_ARRAY_LINK']],
'RangeTable': [ 0xc, ['array', lambda x: x.MemArrayLink.EntryCount,
['_PO_MEMORY_RANGE_ARRAY_RANGE']]],
} ],
}
win7_vtypes = {
'_PO_MEMORY_RANGE_ARRAY_LINK' : [ 0x10, {
'NextTable' : [ 0x0, ['unsigned long']],
'EntryCount' : [ 0x4, ['unsigned long']],
} ],
'_PO_MEMORY_RANGE_ARRAY_RANGE' : [ 0x8, {
'StartPage' : [ 0x0, ['unsigned long']],
'EndPage' : [ 0x4, ['unsigned long']],
} ],
'_PO_MEMORY_RANGE_ARRAY' : [ 0x20, {
'MemArrayLink' : [ 0x0, ['_PO_MEMORY_RANGE_ARRAY_LINK']],
'RangeTable': [ 0x8, ['array', lambda x: x.MemArrayLink.EntryCount,
['_PO_MEMORY_RANGE_ARRAY_RANGE']]],
} ],
}
win7_x64_vtypes = {
'_PO_MEMORY_RANGE_ARRAY_LINK' : [ 0x10, {
'NextTable' : [ 0x0, ['unsigned long long']],
'EntryCount' : [ 0x8, ['unsigned long']],
} ],
'_PO_MEMORY_RANGE_ARRAY_RANGE' : [ 0x10, {
'StartPage' : [ 0x0, ['unsigned long long']],
'EndPage' : [ 0x8, ['unsigned long long']],
} ],
'_PO_MEMORY_RANGE_ARRAY' : [ 0x20, {
'MemArrayLink' : [ 0x0, ['_PO_MEMORY_RANGE_ARRAY_LINK']],
'RangeTable': [ 0x10, ['array', lambda x: x.MemArrayLink.EntryCount,
['_PO_MEMORY_RANGE_ARRAY_RANGE']]],
} ],
}
x64_vtypes = {
'_PO_MEMORY_RANGE_ARRAY_LINK' : [ 0x20, {
'NextTable' : [ 0x8, ['unsigned long long']],
'EntryCount' : [ 0x14, ['unsigned long']],
} ],
'_PO_MEMORY_RANGE_ARRAY_RANGE' : [ 0x20, {
'StartPage' : [ 0x8, ['unsigned long long']],
'EndPage' : [ 0x10, ['unsigned long long']],
} ],
'_PO_MEMORY_RANGE_ARRAY' : [ 0x40, {
'MemArrayLink' : [ 0x0, ['_PO_MEMORY_RANGE_ARRAY_LINK']],
'RangeTable': [ 0x20, ['array', lambda x: x.MemArrayLink.EntryCount,
['_PO_MEMORY_RANGE_ARRAY_RANGE']]],
} ],
}
vistaSP2_x64_vtypes = {
'_PO_MEMORY_RANGE_ARRAY_LINK' : [ 0x18, {
'NextTable' : [ 0x8, ['unsigned long long']],
'EntryCount' : [ 0x10, ['unsigned long']],
} ],
'_PO_MEMORY_RANGE_ARRAY_RANGE' : [ 0x10, {
'StartPage' : [ 0x0, ['unsigned long long']],
'EndPage' : [ 0x8, ['unsigned long long']],
} ],
'_PO_MEMORY_RANGE_ARRAY' : [ 0x28, {
'MemArrayLink' : [ 0x0, ['_PO_MEMORY_RANGE_ARRAY_LINK']],
'RangeTable': [ 0x18, ['array', lambda x: x.MemArrayLink.EntryCount,
['_PO_MEMORY_RANGE_ARRAY_RANGE']]],
} ],
}
@classmethod
def modify(cls, profile):
profile.add_overlay(cls.vtypes)
profile.add_constants(HibrProcPage=0x2, HibrEntryCount=0xff)
major = profile.metadata("major")
minor = profile.metadata("minor")
build = profile.metadata("build")
architecture = profile.metadata("arch")
if architecture == "I386":
if major == 6 and minor == 0:
if build < 6000:
profile.add_overlay(cls.vistasp01_vtypes)
elif build == 6000:
profile.add_overlay(cls.vistasp01_vtypes)
profile.add_constants(HibrProcPage=0x4, HibrEntryCount=0xff)
elif build == 6001:
profile.add_overlay(cls.vistasp01_vtypes)
profile.add_constants(HibrProcPage=0x1, HibrEntryCount=0xff)
elif build == 6002:
profile.add_constants(HibrProcPage=0x1, HibrEntryCount=0x1fe)
profile.add_overlay(cls.vistasp2_vtypes)
elif major == 6 and minor == 1:
profile.add_constants(HibrProcPage=0x1, HibrEntryCount=0x1ff)
if build <= 7601:
profile.add_overlay(cls.win7_vtypes)
elif architecture == "AMD64":
# Windows 2003
if major == 5 and minor == 2 and build <= 3790:
profile.add_constants(HibrProcPage=0x2, HibrEntryCount=0x7f)
profile.add_overlay(cls.x64_vtypes)
elif major == 6 and minor == 0:
if build <= 6000:
profile.add_constants(HibrProcPage=0x4, HibrEntryCount=0x7f)
profile.add_overlay(cls.x64_vtypes)
elif build == 6001:
profile.add_constants(HibrProcPage=0x1, HibrEntryCount=0x7f)
profile.add_overlay(cls.x64_vtypes)
elif build == 6002:
profile.add_constants(HibrProcPage=0x1, HibrEntryCount=0xfe)
profile.add_overlay(cls.vistaSP2_x64_vtypes)
elif major == 6 and minor == 1:
profile.add_constants(HibrProcPage=0x1, HibrEntryCount=0xff)
if build <= 7601:
profile.add_overlay(cls.win7_x64_vtypes)
class WindowsHiberFileSpace(addrspace.BaseAddressSpace):
""" This is a hibernate address space for windows hibernation files.
In order for us to work we need to:
1) have a valid baseAddressSpace
2) the first 4 bytes must be 'hibr'
"""
__name = "hiber"
__image = True
order = 100
def __init__(self, **kwargs):
self.as_assert(self.base == None, "No base Address Space")
self.as_assert(self.base.read(0, 4).lower() in ["hibr", "wake"])
self.runs = []
self.PageDict = {}
self.HighestPage = 0
self.PageIndex = 0
self.AddressList = []
self.LookupCache = {}
self.PageCache = utils.FastStore(500)
self.MemRangeCnt = 0
self.offset = 0
self.entry_count = 0xFF
# Modify the profile by adding version specific definitions.
self.profile = HibernationSupport(self.profile)
# Extract header information
self.as_assert(self.profile.has_type("PO_MEMORY_IMAGE"),
"PO_MEMORY_IMAGE is not available in profile")
self.header = self.profile.Object('PO_MEMORY_IMAGE', offset=0, vm=self.base)
self.entry_count = self.profile.get_constant("HibrEntryCount")
proc_page = self.profile.get_constant("HibrProcPage")
# Check it's definitely a hibernation file
self.as_assert(self._get_first_table_page() is not None,
"No xpress signature found")
# Extract processor state
self.ProcState = self.profile.Object(
"_KPROCESSOR_STATE", offset=proc_page * 4096, vm=base)
## This is a pointer to the page table - any ASs above us dont
## need to search for it.
self.dtb = self.ProcState.SpecialRegisters.Cr3.v()
# This is a lengthy process, it was cached, but it may be best to delay this
# until it's absolutely necessary and/or convert it into a generator...
self.build_page_cache()
super(WindowsHiberFileSpace, self).__init__(**kwargs)
def _get_first_table_page(self):
if self.header:
return self.header.FirstTablePage
for i in range(10):
if self.base.read(i * PAGE_SIZE, 8) == "\x81\x81xpress":
return i - 1
def build_page_cache(self):
XpressIndex = 0
XpressHeader = self.profile.Object("_IMAGE_XPRESS_HEADER",
offset=(self._get_first_table_page() + 1) * 4096,
vm=self.base)
XpressBlockSize = self.get_xpress_block_size(XpressHeader)
MemoryArrayOffset = self._get_first_table_page() * 4096
while MemoryArrayOffset:
MemoryArray = self.profile.Object(
'_PO_MEMORY_RANGE_ARRAY', MemoryArrayOffset, self.base)
EntryCount = MemoryArray.MemArrayLink.EntryCount.v()
for i in MemoryArray.RangeTable:
start = i.StartPage.v()
end = i.EndPage.v()
LocalPageCnt = end - start
if end > self.HighestPage:
self.HighestPage = end
self.AddressList.append((start * 0x1000, # virtual address
start * 0x1000, # physical address
LocalPageCnt * 0x1000))
for j in range(0, LocalPageCnt):
if (XpressIndex and ((XpressIndex % 0x10) == 0)):
XpressHeader, XpressBlockSize = \
self.next_xpress(XpressHeader, XpressBlockSize)
PageNumber = start + j
XpressPage = XpressIndex % 0x10
if XpressHeader.obj_offset not in self.PageDict:
self.PageDict[XpressHeader.obj_offset] = [
(PageNumber, XpressBlockSize, XpressPage)]
else:
self.PageDict[XpressHeader.obj_offset].append(
(PageNumber, XpressBlockSize, XpressPage))
## Update the lookup cache
self.LookupCache[PageNumber] = (
XpressHeader.obj_offset, XpressBlockSize, XpressPage)
self.PageIndex += 1
XpressIndex += 1
NextTable = MemoryArray.MemArrayLink.NextTable.v()
# This entry count (EntryCount) should probably be calculated
if (NextTable and (EntryCount == self.entry_count)):
MemoryArrayOffset = NextTable * 0x1000
self.MemRangeCnt += 1
XpressHeader, XpressBlockSize = self.next_xpress(
XpressHeader, XpressBlockSize)
# Make sure the xpress block is after the Memory Table
while (XpressHeader.obj_offset < MemoryArrayOffset):
XpressHeader, XpressBlockSize = self.next_xpress(
XpressHeader, 0)
XpressIndex = 0
else:
MemoryArrayOffset = 0
def convert_to_raw(self, ofile):
page_count = 0
for _i, xb in enumerate(self.PageDict.keys()):
size = self.PageDict[xb][0][1]
data_z = self.base.read(xb + 0x20, size)
if size == 0x10000:
data_uz = data_z
else:
data_uz = xpress.xpress_decode(data_z)
for page, size, offset in self.PageDict[xb]:
ofile.seek(page * 0x1000)
ofile.write(data_uz[offset * 0x1000:offset * 0x1000 + 0x1000])
page_count += 1
del data_z, data_uz
yield page_count
def next_xpress(self, XpressHeader, XpressBlockSize):
XpressHeaderOffset = int(XpressBlockSize) + XpressHeader.obj_offset + \
XpressHeader.size()
## We only search this far
BLOCKSIZE = 1024
original_offset = XpressHeaderOffset
while 1:
data = self.base.read(XpressHeaderOffset, BLOCKSIZE)
Magic_offset = data.find("\x81\x81xpress")
if Magic_offset >= 0:
XpressHeaderOffset += Magic_offset
break
else:
XpressHeaderOffset += len(data)
## Only search this far in advance
if XpressHeaderOffset - original_offset > 10240:
return None, None
XpressHeader = self.profile.Object(
"_IMAGE_XPRESS_HEADER", XpressHeaderOffset, self.base)
XpressBlockSize = self.get_xpress_block_size(XpressHeader)
return XpressHeader, XpressBlockSize
def get_xpress_block_size(self, xpress_header):
u0B = xpress_header.u0B.v() << 24
u0A = xpress_header.u0A.v() << 16
u09 = xpress_header.u09.v() << 8
Size = u0B + u0A + u09
Size = Size >> 10
Size = Size + 1
if ((Size % 8) == 0):
return Size
return (Size & ~7) + 8
def get_header(self):
return self.header
def get_base(self):
return self.base
def get_signature(self):
return self.header.Signature
def get_system_time(self):
return self.header.SystemTime
def is_paging(self):
return (self.ProcState.SpecialRegisters.Cr0.v() >> 31) & 1
def is_pse(self):
return (self.ProcState.SpecialRegisters.Cr4.v() >> 4) & 1
def is_pae(self):
return (self.ProcState.SpecialRegisters.Cr4.v() >> 5) & 1
def get_number_of_memranges(self):
return self.MemRangeCnt
def get_number_of_pages(self):
return self.PageIndex
def get_addr(self, addr):
page = addr >> page_shift
if page in self.LookupCache:
(hoffset, size, pageoffset) = self.LookupCache[page]
return hoffset, size, pageoffset
return None, None, None
def get_block_offset(self, _xb, addr):
page = addr >> page_shift
if page in self.LookupCache:
(_hoffset, _size, pageoffset) = self.LookupCache[page]
return pageoffset
return None
def is_valid_address(self, addr):
XpressHeaderOffset, _XpressBlockSize, _XpressPage = self.get_addr(addr)
return XpressHeaderOffset != None
def read_xpress(self, baddr, BlockSize):
data_uz = self.PageCache.Get(baddr)
if data_uz is None:
data_read = self.base.read(baddr, BlockSize)
if BlockSize == 0x10000:
data_uz = data_read
else:
data_uz = xpress.xpress_decode(data_read)
self.PageCache.Put(baddr, data_uz)
return data_uz
def fread(self, length):
data = self.read(self.offset, length)
self.offset += len(data)
return data
def _partial_read(self, addr, len):
""" A function which reads as much as possible from the current page.
May return a short read.
"""
## The offset within the page where we start
page_offset = (addr & 0x00000FFF)
## How much data can we satisfy?
available = min(PAGE_SIZE - page_offset, len)
ImageXpressHeader, BlockSize, XpressPage = self.get_addr(addr)
if not ImageXpressHeader:
return None
baddr = ImageXpressHeader + 0x20
data = self.read_xpress(baddr, BlockSize)
## Each block decompressed contains 2**page_shift pages. We
## need to know which page to use here.
offset = XpressPage * 0x1000 + page_offset
return data[offset:offset + available]
def read(self, addr, length):
result = ''
while length > 0:
data = self._partial_read(addr, length)
if not data:
break
addr += len(data)
length -= len(data)
result += data
if result == '':
result = obj.NoneObject("Unable to read data at %s for length %s." % (
addr, length))
return result
def read_long(self, addr):
_baseaddr = self.get_addr(addr)
string = self.read(addr, 4)
if not string:
return obj.NoneObject("Could not read long at %s" % addr)
(longval,) = struct.unpack('=I', string)
return longval
def get_available_pages(self):
page_list = []
for _i, xb in enumerate(self.PageDict.keys()):
for page, _size, _offset in self.PageDict[xb]:
page_list.append([page * 0x1000, page * 0x1000, 0x1000])
return page_list
def get_address_range(self):
""" This relates to the logical address range that is indexable """
size = self.HighestPage * 0x1000 + 0x1000
return [0, size]
def check_address_range(self, addr):
memrange = self.get_address_range()
if addr < memrange[0] or addr > memrange[1]:
raise IOError
def get_available_addresses(self):
""" This returns the ranges of valid addresses """
for i in self.AddressList:
yield i
def close(self):
self.base.close() | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/addrspaces/hibernate.py | 0.535341 | 0.369144 | hibernate.py | pypi |
import mmap
import os
from rekall import addrspace
class MmapFileAddressSpace(addrspace.BaseAddressSpace):
""" This is an AS which uses an mmap of a file.
For this AS to be instantiated, we need
1) A valid config.LOCATION (starting with file://)
2) no one else has picked the AS before us
3) base == self (we dont operate on anyone else so we need to be
right at the bottom of the AS stack.)
"""
# We should be the AS of last resort but before the FileAddressSpace
order = 110
__image = True
def __init__(self, filename=None, **kwargs):
super(MmapFileAddressSpace, self).__init__(**kwargs)
self.as_assert(self.base is self, 'Must be first Address Space')
path = self.session.GetParameter("filename") or filename
self.as_assert(path and os.path.exists(path),
'Filename must be specified and exist')
self.fname = self.name = os.path.abspath(path)
self.mode = 'rb'
if self.session.GetParameter("writable_physical_memory"):
self.mode += '+'
self.fhandle = open(self.fname, self.mode)
self.fhandle.seek(0, 2)
self.fsize = self.fhandle.tell()
self.offset = 0
# On 64 bit architectures we can just map the entire image into our
# process. Its probably not worth the effort to make it work on 32 bit
# systems, which should just fall back to the slightly slower
# FileAddressSpace.
try:
self.map = mmap.mmap(self.fhandle.fileno(), self.fsize,
access=mmap.ACCESS_READ)
except Exception as e:
raise addrspace.ASAssertionError("Unable to mmap: %s" % e)
def read(self, addr, length):
result = ""
if addr != None:
result = self.map[addr:addr + length]
return result + addrspace.ZEROER.GetZeros(length - len(result))
def get_mappings(self, start=0, end=2**64):
yield addrspace.Run(start=0,
end=self.fsize, file_offset=0,
address_space=self.base)
def is_valid_address(self, addr):
if addr == None:
return False
return addr < self.fsize - 1
def close(self):
self.map.close()
self.fhandle.close()
def write(self, addr, data):
try:
self.map[addr:addr + len(data)] = data
except IOError:
return 0
return len(data)
def __eq__(self, other):
return (self.__class__ == other.__class__
and self.fname == other.fname) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/addrspaces/mmap_address_space.py | 0.463201 | 0.348839 | mmap_address_space.py | pypi |
"""This is a windows specific address space."""
from builtins import zip
from builtins import range
from builtins import object
import os
import struct
import weakref
import pywintypes
import win32file
from rekall import addrspace
from rekall.plugins.addrspaces import standard
PMEM_MODE_IOSPACE = 0
PMEM_MODE_PHYSICAL = 1
PMEM_MODE_PTE = 2
PMEM_MODE_PTE_PCI = 3
def CTL_CODE(DeviceType, Function, Method, Access):
return (DeviceType << 16) | (Access << 14) | (Function << 2) | Method
# IOCTLS for interacting with the driver.
INFO_IOCTRL = CTL_CODE(0x22, 0x103, 0, 3)
CTRL_IOCTRL = CTL_CODE(0x22, 0x101, 0, 3)
PAGE_SHIFT = 12
class Win32FileWrapper(object):
"""A simple wrapper that makes a win32 file handle look like an AS."""
def __init__(self, fhandle, size=None):
self.fhandle = fhandle
self.size = size
def read(self, offset, length):
try:
win32file.SetFilePointer(self.fhandle, offset, 0)
_, data = win32file.ReadFile(self.fhandle, length)
except Exception:
return addrspace.ZEROER.GetZeros(length)
return data
def write(self, offset, data):
win32file.SetFilePointer(self.fhandle, offset, 0)
# The WinPmem driver returns bytes_written == 0 always. This is probably
# a bug in its write routine, so we ignore it here. If the operation was
# successful we assume all bytes were written.
err, _bytes_written = win32file.WriteFile(self.fhandle, data)
if err == 0:
return len(data)
return 0
def end(self):
return self.size
def close(self):
win32file.CloseHandle(self.fhandle)
class Win32AddressSpace(addrspace.CachingAddressSpaceMixIn,
addrspace.RunBasedAddressSpace):
""" This is a direct file AS for use in windows.
In windows, in order to open raw devices we need to use the win32 apis. This
address space allows us to open the raw device as exported by e.g. the
winpmem driver.
"""
CHUNK_SIZE = 0x1000
def _OpenFileForRead(self, path):
try:
fhandle = self.fhandle = win32file.CreateFile(
path,
win32file.GENERIC_READ,
win32file.FILE_SHARE_READ | win32file.FILE_SHARE_WRITE,
None,
win32file.OPEN_EXISTING,
win32file.FILE_ATTRIBUTE_NORMAL,
None)
self._closer = weakref.ref(
self, lambda x: win32file.CloseHandle(fhandle))
self.write_enabled = False
return fhandle
except pywintypes.error as e:
raise IOError("Unable to open %s: %s" % (path, e))
def close(self):
for run in self.get_mappings():
run.address_space.close()
class Win32FileAddressSpace(Win32AddressSpace):
__name = "win32file"
# We should be the AS of last resort but in front of the non win32 version.
order = standard.FileAddressSpace.order - 5
__image = True
def __init__(self, base=None, filename=None, **kwargs):
self.as_assert(base == None, 'Must be first Address Space')
super(Win32FileAddressSpace, self).__init__(**kwargs)
path = filename or self.session.GetParameter("filename")
self.as_assert(path, "Filename must be specified in session (e.g. "
"session.SetParameter('filename', 'MyFile.raw').")
self.fname = path
# The file is just a regular file, we open for reading.
fhandle = self._OpenFileForRead(path)
# If we can not get the file size it means this is not a regular file -
# maybe a device.
self.fhandle_as = Win32FileWrapper(fhandle)
try:
file_size = win32file.GetFileSize(fhandle)
self.add_run(0, 0, file_size, self.fhandle_as)
except pywintypes.error:
# This may be a device, we just read the whole space.
self.add_run(0, 0, 2**63, self.fhandle_as)
self.volatile = True
class WinPmemAddressSpace(Win32AddressSpace):
"""An address space specifically designed for communicating with WinPmem."""
__name = "winpmem"
__image = True
# This is a live address space.
volatile = True
# We must be in front of the regular file based AS.
order = Win32FileAddressSpace.order - 5
# This AS can map files into itself.
__can_map_files = True
def __init__(self, base=None, filename=None, session=None, **kwargs):
self.as_assert(base == None, 'Must be first Address Space')
path = filename or session.GetParameter("filename")
self.as_assert(path.startswith("\\\\"),
"Filename does not look like a device.")
super(WinPmemAddressSpace, self).__init__(
filename=filename, session=session, **kwargs)
try:
# First open for write in case the driver is in write mode.
fhandle = self._OpenFileForWrite(path)
except IOError:
fhandle = self._OpenFileForRead(path)
self.fhandle_as = Win32FileWrapper(fhandle)
try:
self.ParseMemoryRuns(fhandle)
except Exception:
raise addrspace.ASAssertionError(
"This is not a WinPmem based driver.")
# Key: lower cased filename, value: offset where it is mapped.
self.mapped_files = {}
self.filesystems = {}
def _OpenFileForWrite(self, path):
try:
fhandle = self.fhandle = win32file.CreateFile(
path,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
win32file.FILE_SHARE_READ | win32file.FILE_SHARE_WRITE,
None,
win32file.OPEN_EXISTING,
win32file.FILE_ATTRIBUTE_NORMAL,
None)
self.write_enabled = True
self._closer = weakref.ref(
self, lambda x: win32file.CloseHandle(fhandle))
return fhandle
except pywintypes.error as e:
raise IOError("Unable to open %s: %s" % (path, e))
FIELDS = (["CR3", "NtBuildNumber", "KernBase", "KDBG"] +
["KPCR%02d" % i for i in range(32)] +
["PfnDataBase", "PsLoadedModuleList", "PsActiveProcessHead"] +
["Padding%s" % i for i in range(0xff)] +
["NumberOfRuns"])
def ParseMemoryRuns(self, fhandle):
# Set acquisition mode. If the driver does not support this mode it will
# just fall back to the default.
win32file.DeviceIoControl(
fhandle, CTRL_IOCTRL,
struct.pack("I", PMEM_MODE_PTE), 4, None)
result = win32file.DeviceIoControl(
fhandle, INFO_IOCTRL, b"", 102400, None)
fmt_string = "Q" * len(self.FIELDS)
self.memory_parameters = dict(zip(self.FIELDS, struct.unpack_from(
fmt_string, result)))
offset = struct.calcsize(fmt_string)
for x in range(self.memory_parameters["NumberOfRuns"]):
start, length = struct.unpack_from("QQ", result, x * 16 + offset)
self.add_run(start, start, length, self.fhandle_as)
def ConfigureSession(self, session):
dtb = self.memory_parameters["CR3"]
session.SetCache("dtb", int(dtb), volatile=False)
# Get the kernel base directly from the winpmem driver if that is
# available.
kernel_base = self.memory_parameters["KernBase"]
if kernel_base > 0:
self.session.SetCache("kernel_base", kernel_base, volatile=False)
def _map_raw_filename(self, filename):
# Parsing the NTFS can be expensive so we only do it when the user
# specifically wanted to be thorough.
if self.session.GetParameter("performance") != "thorough":
return
drive, base_filename = os.path.splitdrive(filename)
if not drive:
return
try:
ntfs_session = self.filesystems[drive]
except KeyError:
ntfs_session = self.filesystems[drive] = self.session.add_session(
filename=r"\\.\%s" % drive, verbose=True, autodetect=[],
profile="ntfs")
# Stat the MFT inode (MFT 2).
mft_stat = ntfs_session.plugins.istat(2)
# Lookup the mft entry by filename.
mft_entry = mft_stat.ntfs.MFTEntryByName(base_filename)
# Open the $DATA stream
return mft_entry.open_file()
def get_file_address_space(self, filename):
"""Return an address space for filename."""
try:
# Try to read the file with OS APIs.
file_as = Win32FileAddressSpace(filename=filename,
session=self.session)
return file_as
except IOError:
try:
# Try to read the file with raw access.
file_as = self._map_raw_filename(filename)
return file_as
except IOError:
# Cant read this file - no mapping available.
return
def get_mapped_offset(self, filename, file_offset, length=None):
# Normalize filename for case insenstive comparisons.
filename = filename.lower()
mapped_offset = self.mapped_files.get(filename)
if mapped_offset is None:
file_as = self.get_file_address_space(filename)
if not file_as:
return
# Add a guard page and align.
mapped_offset = self.mapped_files[filename] = (
(length or self.end()) + 0x10000) & 0xFFFFFFFFFFFFF000
self.add_run(mapped_offset, 0, file_as.end(), file_as)
if mapped_offset is not None:
return mapped_offset + file_offset | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/addrspaces/win32.py | 0.697094 | 0.264263 | win32.py | pypi |
from builtins import str
from builtins import object
import logging
import re
import os
from pyaff4 import data_store
try:
from pyaff4 import aff4_cloud
except ImportError:
aff4_cloud = None
from pyaff4 import aff4_directory
from pyaff4 import zip
from pyaff4 import lexicon
from pyaff4 import rdfvalue
from pyaff4 import plugins # pylint: disable=unused-import
from rekall import addrspace
from rekall import cache
from rekall_lib import yaml_utils
from rekall_lib import utils
from rekall.plugins.addrspaces import standard
# Control the logging level for the pyaff4 library logger.
LOGGER = logging.getLogger("pyaff4")
LOGGER.setLevel(logging.ERROR)
class AFF4StreamWrapper(object):
def __init__(self, stream):
self.stream = stream
def read(self, offset, length):
self.stream.seek(offset)
return self.stream.read(length)
def end(self):
return self.stream.Size()
def __str__(self):
return utils.SmartUnicode(self.stream.urn)
class AFF4AddressSpace(addrspace.CachingAddressSpaceMixIn,
addrspace.RunBasedAddressSpace):
"""Handle AFF4Map or AFF4Image type streams.
Since AFF4 volumes may contain multiple streams, we allow the stream to be
specified inside the volume path. For example suppose the volume located at:
/home/mic/images/myimage.aff4
Contains a stream called PhysicalMemory, then we can specify the filename
as:
/home/mic/images/myimage.aff4/PhysicalMemory
If we just specified the path to the volume, then this address space will
pick the first AFF4 stream which has an aff4:category of
lexicon.AFF4_MEMORY_PHYSICAL.
So if you have more than one physical memory stream in the same volume, you
will need to specify the full path to the stream within the volume.
"""
__name = "aff4"
__image = True
# This AS can map files into itself.
__can_map_files = True
order = standard.FileAddressSpace.order - 10
def __init__(self, filename=None, **kwargs):
super(AFF4AddressSpace, self).__init__(**kwargs)
self.as_assert(self.base == None,
"Must stack on another address space")
path = filename or self.session.GetParameter("filename")
self.as_assert(path != None, "Filename must be specified")
self.image = None
self.resolver = data_store.MemoryDataStore()
# If we have a cache directory, configure AFF4 to use it.
try:
cache_dir = cache.GetCacheDir(self.session)
if cache_dir:
self.resolver.Set(lexicon.AFF4_CONFIG_CACHE_DIR,
lexicon.AFF4_FILE_NAME,
rdfvalue.XSDString(
os.path.join(cache_dir, "aff4_cache")))
except IOError:
pass
# A map between the filename and the offset it is mapped into the
# address space.
self.mapped_files = {}
try:
volume_path, stream_path = self._LocateAFF4Volume(path)
except IOError as e:
self.session.logging.debug("Unable to open AFF4 image %s", e)
raise addrspace.ASAssertionError("Unable to open AFF4 volume")
# filename is a volume, and there is no stream specified, just autoload
# the stream if possible.
if not stream_path:
try:
self._AutoLoadAFF4Volume(volume_path)
return
except IOError as e:
raise addrspace.ASAssertionError(
"Unable to open AFF4 volume: %s" % e)
# If the user asked for a specific stream just load that one. Note that
# you can still load the pagefile manually using the --pagefile
# parameter.
try:
image_urn = volume_path.Append(stream_path)
self._LoadMemoryImage(image_urn)
except IOError as e:
raise addrspace.ASAssertionError(
"Unable to open AFF4 stream %s: %s" % (
stream_path, e))
def _LocateAFF4Volume(self, filename):
stream_name = []
path_components = list(filename.split(os.sep))
while path_components:
volume_path = os.sep.join(path_components)
volume_urn = rdfvalue.URN.NewURNFromFilename(volume_path)
volume_urn_parts = volume_urn.Parse()
if volume_urn_parts.scheme == "file":
if os.path.isfile(volume_path):
with zip.ZipFile.NewZipFile(
self.resolver, volume_urn) as volume:
if stream_name:
return volume.urn, os.sep.join(stream_name)
return volume.urn, None
elif os.path.isdir(volume_path):
with aff4_directory.AFF4Directory.NewAFF4Directory(
self.resolver, volume_urn) as volume:
if stream_name:
return volume.urn, os.sep.join(stream_name)
return volume.urn, None
else:
raise IOError("Not found: %s" % volume_urn)
elif volume_urn_parts.scheme == "gs" and aff4_cloud:
with aff4_cloud.AFF4GStore.NewAFF4GStore(
self.resolver, volume_urn) as volume:
if stream_name:
return volume.urn, os.sep.join(stream_name)
return volume.urn, None
# File does not exist - maybe the path stem points at a stream in
# the image.
else:
stream_name.insert(0, path_components.pop(-1))
raise IOError("Not found")
def _AutoLoadAFF4Volume(self, path):
with self.resolver.AFF4FactoryOpen(path) as volume:
self.volume_urn = volume.urn
# We are searching for images with the physical memory category.
for subject in self.resolver.QueryPredicateObject(
lexicon.AFF4_CATEGORY, lexicon.AFF4_MEMORY_PHYSICAL):
self._LoadMemoryImage(subject)
break
self.as_assert(self.image is not None,
"No physical memory categories found.")
self.filenames = {}
# Newer AFF4 images should have the AFF4_STREAM_ORIGINAL_FILENAME
# attribute set.
for (subject, _, value) in self.resolver.QueryPredicate(
lexicon.AFF4_STREAM_ORIGINAL_FILENAME):
# Normalize the filename for case insensitive filesysyems.
self.filenames[utils.SmartUnicode(value).lower()] = subject
# TODO: Deprecate this guessing once all images have the
# AFF4_STREAM_ORIGINAL_FILENAME attribute.
for subject in self.resolver.QuerySubject(re.compile(b".")):
relative_name = self.volume_urn.RelativePath(subject)
if relative_name:
filename = self._normalize_filename(relative_name)
self.filenames[filename] = subject
def _normalize_filename(self, filename):
"""Normalize the filename based on the source OS."""
m = re.match(r"/?([a-zA-Z]:[/\\].+)", filename)
if m:
# This is a windows filename.
filename = m.group(1).replace("/", "\\")
return filename.lower()
return filename
def _LoadMemoryImage(self, image_urn):
aff4_stream = self.resolver.AFF4FactoryOpen(image_urn)
self.image = AFF4StreamWrapper(aff4_stream)
# Add the ranges if this is a map.
try:
for map_range in aff4_stream.GetRanges():
self.add_run(map_range.map_offset,
map_range.map_offset,
map_range.length,
self.image)
except AttributeError:
self.add_run(0, 0, aff4_stream.Size(), self.image)
self.session.logging.info("Added %s as physical memory", image_urn)
def ConfigureSession(self, session):
self._parse_physical_memory_metadata(session, self.image.stream.urn)
def file_mapping_offset(self, filename):
"""Returns the offset where the filename should be mapped.
This function manages the session cache. By storing the file mappings in
the session cache we can guarantee repeatable mappings.
"""
mapped_files = self.session.GetParameter("file_mappings", {})
if filename in mapped_files:
return utils.CaseInsensitiveDictLookup(
filename, mapped_files)
# Give a bit of space for the mapping and page align it.
mapped_offset = (self.end() + 0x10000) & 0xFFFFFFFFFFFFF000
mapped_files[filename] = mapped_offset
self.session.SetCache("file_mappings", mapped_files)
return mapped_offset
def get_file_address_space(self, filename):
"""Return an address space for filename."""
subject = utils.CaseInsensitiveDictLookup(
filename, self.filenames)
if subject:
return AFF4StreamWrapper(self.resolver.AFF4FactoryOpen(subject))
return
def get_mapped_offset(self, filename, file_offset=0):
"""Map the filename into the address space.
If the filename is found in the AFF4 image, we return the offset in this
address space corresponding to file_offset in the mapped file.
If the file is not mapped, return None.
"""
mapped_offset = None
filename = self._normalize_filename(filename)
mapped_offset = utils.CaseInsensitiveDictLookup(
filename, self.mapped_files)
if mapped_offset is None:
# Try to map the file.
subject = utils.CaseInsensitiveDictLookup(
filename, self.filenames)
# Fall back to looking up the sysnative path in case the
# image was acquired by a 32 bit imager.
if not subject:
# The 32 bit WinPmem imager access native files via
# SysNative but they are really located in System32.
subject = utils.CaseInsensitiveDictLookup(
filename.replace("SysNative", "System32"),
self.filenames)
if subject:
stream = self.resolver.AFF4FactoryOpen(subject)
mapped_offset = self.file_mapping_offset(filename)
self.add_run(mapped_offset, 0, stream.Size(),
AFF4StreamWrapper(stream))
self.session.logging.info(
"Mapped %s into address %#x", stream.urn, mapped_offset)
else:
# Cache failures too.
mapped_offset = -1
# Cache for next time.
self.mapped_files[filename] = mapped_offset
if mapped_offset > 0:
return mapped_offset + file_offset
_parameter = [
("dtb", "Registers.CR3"),
("kernel_base", "KernBase"),
("vm_kernel_slide", "kaslr_slide")
]
def _parse_physical_memory_metadata(self, session, image_urn):
try:
with self.resolver.AFF4FactoryOpen(
image_urn.Append("information.yaml")) as fd:
metadata = yaml_utils.decode(fd.read(10000000))
for session_param, info_para in self._parameter:
# Allow the user to override the AFF4 file.
if session.HasParameter(session_param):
continue
tmp = metadata
value = None
for key in info_para.split("."):
value = tmp.get(key)
if value is None:
break
tmp = value
if value is not None:
session.SetCache(session_param, value, volatile=False)
except IOError:
session.logging.info(
"AFF4 volume does not contain %s/information.yaml" % image_urn)
def describe(self, address):
start, _, run = self.runs.get_containing_range(address)
if start is None:
# For unmapped streams just say we have no idea.
return u"%#x (Unmapped)" % address
# For normal physical memory addresses just be concise.
if run.address_space == self.image:
return u"%#x" % address
# For other mapped streams, just say which ones they are.
return u"%#x @ %s (Mapped %#x)" % (
address - start, run.address_space, address)
# pylint: disable=unused-import, wrong-import-order
# Add these so that pyinstaller builds these dependencies in.
import rdflib.plugins.memory
import rdflib.plugins.parsers.hturtle
import rdflib.plugins.parsers.notation3
import rdflib.plugins.parsers.nquads
import rdflib.plugins.parsers.nt
import rdflib.plugins.parsers.rdfxml
import rdflib.plugins.parsers.structureddata
import rdflib.plugins.parsers.trig
import rdflib.plugins.parsers.trix
import rdflib.plugins.serializers.n3
import rdflib.plugins.serializers.nquads
import rdflib.plugins.serializers.nt
import rdflib.plugins.serializers.rdfxml
import rdflib.plugins.serializers.trig
import rdflib.plugins.serializers.trix
import rdflib.plugins.serializers.turtle
import rdflib.plugins.sleepycat
import rdflib.plugins.sparql.processor
import rdflib.plugins.sparql.results.csvresults
import rdflib.plugins.sparql.results.jsonresults
import rdflib.plugins.sparql.results.tsvresults
import rdflib.plugins.sparql.results.txtresults
import rdflib.plugins.sparql.results.xmlresults
import rdflib.plugins.stores.auditable
import rdflib.plugins.stores.concurrent
import rdflib.plugins.stores.sparqlstore | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/addrspaces/aff4.py | 0.603231 | 0.244363 | aff4.py | pypi |
"""An Address Space for processing ELF64 coredumps."""
# References:
# VirtualBox core format:
# http://www.virtualbox.org/manual/ch12.html#guestcoreformat
# ELF64 format: http://downloads.openwatcom.org/ftp/devel/docs/elf-64-gen.pdf
# Note that as of version 1.6.0 WinPmem also uses ELF64 as the default imaging
# format. Except that WinPmem stores image metadata in a YAML file stored in the
# image. This address space supports both formats.
import re
import os
import yaml
from rekall import addrspace
from rekall import constants
from rekall.plugins.addrspaces import standard
from rekall.plugins.overlays.linux import elf
from rekall_lib import utils
PT_PMEM_METADATA = 0x6d656d70 # Spells 'pmem'
def ParseIOMap(string):
result = {}
line_re = re.compile("([0-9a-f]+)-([0-9a-f]+)\s*:\s*(.+)")
for line in string.splitlines():
m = line_re.search(line)
if m:
result.setdefault(m.group(3), []).append(
addrspace.Run(
start=int("0x"+m.group(1), 16),
end=int("0x"+m.group(2), 16)))
else:
raise IOError("Unable to parse iomap")
return result
class Elf64CoreDump(addrspace.RunBasedAddressSpace):
"""This AS supports ELF64 coredump format, as used by VirtualBox."""
order = 30
__name = "elf64"
__image = True
def __init__(self, **kwargs):
super(Elf64CoreDump, self).__init__(**kwargs)
# Check the file for sanity.
self.check_file()
self.offset = 0
self.fname = ''
self._metadata = {}
# Now parse the ELF file.
elf_profile = elf.ELFProfile(session=self.session)
self.elf64_hdr = elf_profile.elf64_hdr(vm=self.base, offset=0)
self.as_assert(self.elf64_hdr.e_type == "ET_CORE",
"Elf file is not a core file.")
self.name = "%s|%s" % (self.__class__.__name__, self.base.name)
# Iterate over all the program headers and map the runs.
for segment in self.elf64_hdr.e_phoff:
if segment.p_type == "PT_LOAD":
# Some load segments are empty.
if (segment.p_filesz == 0 or
segment.p_filesz != segment.p_memsz):
continue
# Add the run to the memory map.
virtual_address = int(segment.p_paddr) or int(segment.p_vaddr)
self.add_run(virtual_address, # Virtual Addr
int(segment.p_offset), # File Addr
int(segment.p_memsz)) # Run end.
elif segment.p_type == PT_PMEM_METADATA:
self.LoadMetadata(segment.p_offset)
def check_file(self):
"""Checks the base file handle for sanity."""
self.as_assert(self.base,
"Must stack on another address space")
## Must start with the magic for elf
self.as_assert((self.base.read(0, 4) == b"\177ELF"),
"Header signature invalid")
def LoadMetadata(self, offset):
"""Load the WinPmem metadata from the elf file."""
self.session.logging.error(
"DEPRECATED Elf metadata found! "
"This will not be supported in the next release.")
try:
data = utils.SmartUnicode(self.base.read(offset, 1024*1024))
yaml_file = data.split('...\n')[0]
metadata = yaml.safe_load(yaml_file)
except (yaml.YAMLError, TypeError) as e:
self.session.logging.error(
"Invalid file metadata, skipping: %s" % e)
return
for session_param, metadata_key in (("dtb", "CR3"),
("kernel_base", "KernBase")):
if metadata_key in metadata:
self.session.SetParameter(
session_param, metadata[metadata_key])
previous_section = metadata.pop("PreviousHeader", None)
if previous_section is not None:
self.LoadMetadata(previous_section)
pagefile_offset = metadata.get("PagefileOffset", None)
pagefile_size = metadata.get("PagefileSize", None)
if pagefile_offset is not None and pagefile_size is not None:
self.LoadPageFile(pagefile_offset, pagefile_size)
self._metadata.update(metadata)
pagefile_offset = 0
pagefile_end = 0
def LoadPageFile(self, pagefile_offset, pagefile_size):
"""We map the page file into the physical address space.
This allows us to treat all physical addresses equally - regardless if
they come from the memory or the page file.
"""
# Map the pagefile after the end of the physical address space.
vaddr = self.end() + 0x10000
self.session.logging.info(
"Loading pagefile into physical offset %#08x", vaddr)
# Map the pagefile into the
self.add_run(vaddr, pagefile_offset, pagefile_size)
# Remember the region for the pagefile.
self.pagefile_offset = vaddr
self.pagefile_end = vaddr + pagefile_size
def describe(self, addr):
if self.pagefile_offset <= addr <= self.pagefile_end:
return "%#x@Pagefile" % (
addr - self.pagefile_offset)
return "%#x" % addr
class KCoreAddressSpace(Elf64CoreDump):
"""A Linux kernel's /proc/kcore file also maps the entire physical ram.
http://lxr.free-electrons.com/source/Documentation/x86/x86_64/mm.txt
ffff880000000000 - ffffc7ffffffffff (=64 TB) direct mapping of all
physical memory.
In recent versions of Ubuntu the CONFIG_RANDOMIZE_MEMORY is
enabled. This makes the ELF headers randomized and so we need to
read /proc/iomap to work out the correct mapped range for physical
memory mapping.
"""
# We must run before the regular Elf64CoreDump address space in the voting
# order.
order = Elf64CoreDump.order - 1
__name = "elf64"
__image = True
def __init__(self, **kwargs):
super(KCoreAddressSpace, self).__init__(**kwargs)
# This is a live address space.
self.volatile = True
self.mapped_files = {}
runs = []
range_start = 0xffff880000000000
range_end = 0xffffc7ffffffffff
range_len = range_end - range_start
io_map_vm = self.get_file_address_space("/proc/iomem")
if io_map_vm != None:
io_map_data = utils.SmartUnicode(io_map_vm.read(0, 100000).split(b"\x00")[0])
io_map = ParseIOMap(io_map_data)
# Mapping in the ELF program header of the first physical
# memory range.
first_run = self.runs[0][2]
# Mapping in physical memory of the first physical memory
# range.
first_system_ram = io_map["System RAM"][0]
# The random offset added to all physical memory ranges
# when exported via the ELF header.
range_start = first_run.start - first_system_ram.start
# Only add the runs which correspond with the System RAM Io map.
for start, _, run in self.runs:
normalized_start = start - range_start
for ram_run in io_map["System RAM"]:
if normalized_start == ram_run.start:
runs.append((normalized_start,
run.file_offset, run.length))
break
else:
for start, _, run in self.runs:
if range_start < run.start < range_end:
runs.append((start - range_start,
run.file_offset, run.length))
self.as_assert(runs, "No kcore compatible virtual ranges.")
self.runs.clear()
# At this point, we think this is a valid, usable kcore file.
# RHEL, however, disables read access to /proc/kcore past the ELF
# headers and the file size reflects this. /proc/kcore usually has a
# size of at least 64TB (46bits of physical address space in x64).
# We use the file size to detect cases where kcore will be unusable.
if getattr(self.base, "fhandle", None):
try:
size = os.fstat(self.base.fhandle.fileno()).st_size
except IOError:
size = 0
self.as_assert(size > 2**45,
"This kcore file is too small (%d bytes) and likely "
"invalid for memory analysis. You may want to use pmem "
"instead." % size)
for x in runs:
self.add_run(*x)
def get_file_address_space(self, filename):
try:
# Try to read the file with OS APIs.
return standard.FileAddressSpace(filename=filename,
session=self.session)
except IOError:
return
def WriteElfFile(address_space, outfd, session=None):
"""Convert the address_space to an ELF Core dump file.
The Core dump will be written to outfd which is expected to have a .write()
method.
"""
runs = list(address_space.get_mappings())
elf_profile = elf.ELFProfile(session=session)
elf64_pheader = elf_profile.elf64_phdr()
elf64_pheader.p_type = "PT_LOAD"
elf64_pheader.p_align = 0x1000
elf64_pheader.p_flags = "PF_R"
elf64_header = elf_profile.elf64_hdr()
elf64_header.e_ident = elf64_header.e_ident.signature
elf64_header.e_type = 'ET_CORE'
elf64_header.e_phoff = elf64_header.obj_end
elf64_header.e_ehsize = elf64_header.obj_size
elf64_header.e_phentsize = elf64_pheader.obj_size
elf64_header.e_phnum = len(runs)
elf64_header.e_shnum = 0 # We don't have any sections.
# Where we start writing data.
file_offset = (elf64_header.obj_size +
# One Phdr for each run.
len(runs) * elf64_pheader.obj_size)
outfd.write(elf64_header.GetData())
for run in runs:
elf64_pheader.p_paddr = run.start
elf64_pheader.p_memsz = run.length
elf64_pheader.p_offset = file_offset
elf64_pheader.p_filesz = run.length
outfd.write(elf64_pheader.GetData())
file_offset += run.length
# Now just copy all the runs
total_data = 0
for run in runs:
offset = run.start
length = run.length
while length > 0:
data = address_space.read(offset, min(10000000, length))
session.report_progress("Writing %sMb", total_data//1024//1024)
outfd.write(data)
length -= len(data)
offset += len(data)
total_data += len(data) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/addrspaces/elfcore.py | 0.814754 | 0.295827 | elfcore.py | pypi |
""" These are standard address spaces supported by Rekall Memory Forensics """
from future import standard_library
standard_library.install_aliases()
from builtins import object
import io
import struct
import os
import weakref
from rekall import addrspace
from rekall import config
config.DeclareOption(
"-o", "--file_offset",
type="IntParser", help="A Relative offset for image file.")
class FDAddressSpace(addrspace.BaseAddressSpace):
"""An address space which operated on a file like object."""
__name = "filelike"
# We should be first.
order = 0
def __init__(self, base=None, fhandle=None, **kwargs):
self.as_assert(base == None, "Base passed to FDAddressSpace.")
self.as_assert(fhandle is not None, 'file handle must be provided')
self.fhandle = fhandle
try:
self.fhandle.seek(0, 2)
self.fsize = self.fhandle.tell()
except IOError:
# We failed to seek to the end - this is usual with devices so we
# assume they are volatile to be safe.
self.fsize = 0
self.volatile = True
self.session.logging.warn(
"Unable to determine file size, assuming file is volatile.")
self.offset = 0
super(FDAddressSpace, self).__init__(**kwargs)
def read(self, addr, length):
length = int(length)
addr = int(addr)
try:
self.fhandle.seek(addr)
data = self.fhandle.read(length)
return data + addrspace.ZEROER.GetZeros(length - len(data))
except IOError:
return addrspace.ZEROER.GetZeros(length)
def read_long(self, addr):
string = self.read(addr, 4)
(longval,) = struct.unpack('=I', string)
return longval
def get_mappings(self, start=0, end=2**64):
_ = end
yield addrspace.Run(start=0, end=self.fsize,
file_offset=0, address_space=self)
def is_valid_address(self, addr):
if addr == None:
return False
return True
def close(self):
self.fhandle.close()
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.fname == other.fname)
class FileAddressSpace(FDAddressSpace):
""" This is a direct file AS.
For this AS to be instantiated, we need
1) A valid config.filename
2) no one else has picked the AS before us
3) base == None (we dont operate on anyone else so we need to be
right at the bottom of the AS stack.)
"""
__name = "file"
# We should be the AS of last resort
order = 100
# This address space handles images.
__image = True
def __init__(self, base=None, filename=None, session=None, **kwargs):
self.as_assert(base == None, 'Must be first Address Space')
self.session = session
path = filename or (session and session.GetParameter("filename"))
self.as_assert(path, "Filename must be specified in session (e.g. "
"session.SetParameter('filename', 'MyFile.raw').")
self.name = os.path.basename(path)
self.fname = os.path.abspath(path)
self.mode = 'rb'
if path.startswith(r"\\\\.\\"):
raise RuntimeError(
"Unable to open a device without the win32file package "
"installed.")
try:
fhandle = open(self.fname, self.mode)
except (IOError, OSError) as e:
raise addrspace.ASAssertionError("%s" % e)
self._closer = weakref.ref(self, lambda x: fhandle.close())
super(FileAddressSpace, self).__init__(
fhandle=fhandle, session=session, **kwargs)
class GlobalOffsetAddressSpace(addrspace.RunBasedAddressSpace):
"""An address space to add a constant offset."""
__image = True
# Must come after all other address space.
order = 120
def __init__(self, **kwargs):
super(GlobalOffsetAddressSpace, self).__init__(**kwargs)
self.file_offset = self.session.GetParameter("file_offset")
self.as_assert(self.file_offset, "File offset not specified.")
self.as_assert(self.base.__class__ is not GlobalOffsetAddressSpace,
"Can not stack on GlobalOffsetAddressSpace")
self.add_run(0, self.file_offset, self.base.end())
class WritableAddressSpaceMixIn(object):
"""This address space can be used to create new files.
NOTE: This does not participate in voting or gets automatically
selected. It can only be instantiated directly.
"""
def write(self, addr, data):
self.fhandle.seek(addr)
self.fhandle.write(data)
self.fhandle.flush()
return len(data)
def is_valid_address(self, unused_addr):
# All addresses are valid, we just grow the file there.
return True
def end(self):
self.fhandle.seek(0, 2)
return self.fhandle.tell()
def read(self, addr, length):
try:
# Just null pad the file - even if we read past the end.
self.fhandle.seek(addr)
data = self.fhandle.read(length)
except OverflowError:
data = b""
if len(data) < length:
data += addrspace.ZEROER.GetZeros(length - len(data))
return data
class WritableAddressSpace(WritableAddressSpaceMixIn, FDAddressSpace):
def __init__(self, filename=None, mode="w+b", **kwargs):
self.as_assert(filename, "Filename must be specified.")
self.name = os.path.abspath(filename)
self.fname = self.name
self.mode = mode
fhandle = open(self.fname, self.mode)
self._closer = weakref.ref(self, lambda x: fhandle.close())
super(WritableAddressSpace, self).__init__(fhandle=fhandle, **kwargs)
class WritableFDAddressSpace(WritableAddressSpaceMixIn, FDAddressSpace):
"""An address space which can be initialized from a file handle.
Note that file handle must be writable.
"""
class DummyAddressSpace(WritableAddressSpaceMixIn, FDAddressSpace):
"""An AS which always returns nulls."""
__name = 'dummy'
def __init__(self, size=10 * 1024, session=None, **_):
super(DummyAddressSpace, self).__init__(
session=session,
fhandle=io.BytesIO(size * b"\x00"))
def getvalue(self):
"""Dump the entire address space as a byte string."""
return self.fhandle.getvalue() | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/addrspaces/standard.py | 0.766206 | 0.450239 | standard.py | pypi |
# pylint: disable=protected-access
from rekall import addrspace
from rekall import args
from rekall import scan
from rekall import kb
from rekall import plugin
from rekall.plugins import core
from rekall.plugins.common import scanners
from rekall_lib import registry
from rekall_lib import utils
# Windows kernel pdb filenames.
KERNEL_NAMES = set(
["ntkrnlmp.pdb", "ntkrnlpa.pdb", "ntoskrnl.pdb",
"ntkrpamp.pdb"])
# We require both a physical AS set and a valid profile for
# AbstractWindowsCommandPlugins.
class AbstractWindowsCommandPlugin(plugin.PhysicalASMixin,
plugin.TypedProfileCommand,
plugin.ProfileCommand):
"""A base class for all windows based plugins.
Windows based plugins require at a minimum a working profile, and a valid
physical address space.
"""
__abstract = True
mode = "mode_windows_memory"
class AbstractWindowsParameterHook(kb.ParameterHook):
mode = "mode_windows_memory"
DEFAULT_PROCESS_NAMES = [x + b"\x00" * (15 - len(x)) for x in [
b"cmd.exe", b"System", b"csrss.exe", b"svchost.exe", b"lsass.exe",
b"winlogon.exe", b"Idle"]]
class WinFindDTB(AbstractWindowsCommandPlugin, core.FindDTB):
"""A plugin to search for the Directory Table Base for windows systems.
There are a number of ways to find the DTB:
- Scanner method: Scans the image for a known kernel process, and read the
DTB from its Process Environment Block (PEB).
- Get the DTB from the KPCR structure.
- Note that the kernel is mapped into every process's address space (with
the exception of session space which might be different) so using any
process's DTB from the same session will work to read kernel data
structures. If this plugin fails, try psscan to find potential DTBs.
"""
__name = "find_dtb"
__args = [
dict(name="process_name", default=DEFAULT_PROCESS_NAMES,
help="The names of the processes to search for.")
]
def scan_for_process(self):
"""Scan the image for the idle process."""
maxlen = self.session.GetParameter("autodetect_scan_length",
10*1024*1024*1024)
self.image_name_offset = self.profile.get_obj_offset(
"_EPROCESS", "ImageFileName")
for offset, hit in scan.MultiStringScanner(
session=self.session,
needles=self.plugin_args.process_name,
profile=self.profile,
address_space=self.physical_address_space).scan(
0, maxlen=maxlen):
eprocess = self.profile.Object(
"_EPROCESS", offset=offset - self.image_name_offset,
vm=self.session.physical_address_space)
self.session.report_progress(
u"Found possible _EPROCESS @ 0x%X (In process %s) (DTB: 0x%X)",
eprocess.obj_offset,
utils.SmartUnicode(hit),
eprocess.Pcb.DirectoryTableBase.v())
yield eprocess
def address_space_hits(self):
"""Finds DTBs and yields virtual address spaces that expose kernel.
Yields:
BaseAddressSpace-derived instances, validated using the VerifyHit()
method.
"""
for dtb, eprocess in self.dtb_eprocess_hits():
address_space = self.VerifyHit(dtb)
if address_space is not None and self.TestEProcess(
address_space, eprocess):
yield address_space
def dtb_eprocess_hits(self):
for eprocess in self.scan_for_process():
result = eprocess.Pcb.DirectoryTableBase.v()
if result:
yield result, eprocess
def TestEProcess(self, address_space, eprocess):
# Reflect through the address space at ourselves. Note that the Idle
# process is not usually in the PsActiveProcessHead list, so we use
# the ThreadListHead instead.
list_head = eprocess.ThreadListHead.Flink
if list_head == 0:
self.session.logging.debug("_EPROCESS.ThreadListHead not valid.")
return
me = list_head.dereference(vm=address_space).Blink.Flink
if me.v() != list_head.v():
self.session.report_progress(
"_EPROCESS.ThreadListHead does not reflect.")
return
# We passed the tests.
return True
def VerifyHit(self, dtb):
"""Check the eprocess for sanity."""
# In windows the DTB must be page aligned, except for PAE images where
# its aligned to a 0x20 size.
if not self.profile.metadata("pae") and dtb & 0xFFF != 0:
return
if self.profile.metadata("pae") and dtb & 0xF != 0:
return
# Select simple address space implementations as test address spaces.
address_space = super(WinFindDTB, self).GetAddressSpaceImplementation()(
session=self.session, dtb=dtb,
base=self.session.physical_address_space)
# Check that the _KUSER_SHARED_DATA makes sense. This structure is
# always at a known offset since it must be shared with user space apps.
# Note: We must get the raw value here since image base is not yet
# known.
eprocess_index = self.session.LoadProfile("nt/eprocess_index")
# The profile does not have the KI_USER_SHARED_DATA_RAW
# symbol. This does not look like a kernel profile.
if self.profile.get_constant("KI_USER_SHARED_DATA_RAW") == None:
return
kuser_shared = eprocess_index._KUSER_SHARED_DATA(
offset=self.profile.get_constant("KI_USER_SHARED_DATA_RAW"),
vm=address_space)
# Must be a valid version of windows.
if (address_space.vtop(kuser_shared.obj_offset) and
(kuser_shared.NtMajorVersion not in [5, 6, 10] or
kuser_shared.NtMinorVersion not in [0, 1, 2, 3])):
return
self.session.SetCache("dtb", dtb)
return self.CreateAS(dtb)
def GetAddressSpaceImplementation(self):
"""Returns the correct address space class for this profile."""
# The virtual address space implementation is chosen by the profile.
architecture = self.profile.metadata("arch")
performance = self.session.GetParameter("performance")
if architecture == "AMD64":
# If the user prefers performance we will use the simplest Address
# Space Implementation.
if performance == "fast":
impl = "AMD64PagedMemory"
else:
impl = "WindowsAMD64PagedMemory"
# PAE profiles go with the pae address space.
elif architecture == "I386" and self.profile.metadata("pae"):
if performance == "fast":
impl = "IA32PagedMemoryPae"
else:
impl = "WindowsIA32PagedMemoryPae"
else:
return super(WinFindDTB, self).GetAddressSpaceImplementation()
as_class = addrspace.BaseAddressSpace.classes[impl]
return as_class
table_header = [
dict(name="_EPROCESS (P)", type="_EPROCESS"),
dict(name="dtv", style="address"),
dict(name="valid", width=10)
]
def collect(self):
for dtb, eprocess in self.dtb_eprocess_hits():
yield (eprocess, dtb,
self.VerifyHit(dtb) is not None)
## The following are checks for pool scanners.
class PoolTagCheck(scan.StringCheck):
"""This scanner checks for the occurrence of a pool tag.
It is basically a StringCheck but it offsets the check with a constant.
"""
def __init__(self, tag=None, **kwargs):
super(PoolTagCheck, self).__init__(needle=tag, **kwargs)
# The offset from the start of _POOL_HEADER to the tag. (Note we use the
# kernel profile for pool definitions.).
self.tag_offset = self.session.profile.get_obj_offset(
"_POOL_HEADER", "PoolTag")
if self.tag_offset == None:
raise RuntimeError(
"Unable to get PoolTag offset in _POOL_HEADER. "
"Is the profile correct?")
def skip(self, buffer_as, offset):
return super(PoolTagCheck, self).skip(
buffer_as, offset + self.tag_offset)
def check(self, buffer_as, offset):
return super(PoolTagCheck, self).check(
buffer_as, offset + self.tag_offset)
class MultiPoolTagCheck(scan.MultiStringFinderCheck):
"""This scanner checks for the occurrence of a pool tag.
It is basically a StringCheck but it offsets the check with a constant.
"""
def __init__(self, tags=None, **kwargs):
super(MultiPoolTagCheck, self).__init__(needles=tags, **kwargs)
# The offset from the start of _POOL_HEADER to the tag.
self.tag_offset = self.profile.get_obj_offset(
"_POOL_HEADER", "PoolTag")
def skip(self, buffer_as, offset):
return super(MultiPoolTagCheck, self).skip(
buffer_as, offset + self.tag_offset)
def check(self, buffer_as, offset):
return super(MultiPoolTagCheck, self).check(
buffer_as, offset + self.tag_offset)
class CheckPoolSize(scan.ScannerCheck):
""" Check pool block size """
def __init__(self, condition=None, min_size=None, **kwargs):
super(CheckPoolSize, self).__init__(**kwargs)
self.condition = condition
if min_size:
self.condition = lambda x: x >= min_size
self.pool_align = self.session.profile.constants['PoolAlignment']
if self.condition is None:
raise RuntimeError("No pool size provided")
def check(self, buffer_as, offset):
pool_hdr = self.session.profile._POOL_HEADER(
vm=buffer_as, offset=offset)
block_size = pool_hdr.BlockSize.v()
return self.condition(block_size * self.pool_align)
class CheckPoolType(scan.ScannerCheck):
""" Check the pool type """
def __init__(self, paged=False, non_paged=False, free=False, **kwargs):
super(CheckPoolType, self).__init__(**kwargs)
self.non_paged = non_paged
self.paged = paged
self.free = free
def check(self, buffer_as, offset):
pool_hdr = self.session.profile._POOL_HEADER(
vm=buffer_as, offset=offset)
return ((self.non_paged and pool_hdr.NonPagedPool) or
(self.free and pool_hdr.FreePool) or
(self.paged and pool_hdr.PagedPool))
class CheckPoolIndex(scan.ScannerCheck):
""" Checks the pool index """
def __init__(self, value=0, **kwargs):
super(CheckPoolIndex, self).__init__(**kwargs)
self.value = value
def check(self, buffer_as, offset):
pool_hdr = self.session.profile._POOL_HEADER(
vm=buffer_as, offset=offset)
return pool_hdr.PoolIndex == self.value
class PoolScanner(scan.BaseScanner):
"""A scanner for pool allocations."""
def scan(self, offset=0, maxlen=None):
"""Yields instances of _POOL_HEADER which potentially match."""
maxlen = maxlen or self.session.profile.get_constant("MaxPointer")
for hit in super(PoolScanner, self).scan(offset=offset, maxlen=maxlen):
yield self.session.profile._POOL_HEADER(
vm=self.address_space, offset=hit)
class KDBGHook(AbstractWindowsParameterHook):
"""A Hook to calculate the KDBG when needed."""
name = "kdbg"
def calculate(self):
# Try to just get the KDBG address using the profile.
kdbg = self.session.profile.get_constant_object(
"KdDebuggerDataBlock", "_KDDEBUGGER_DATA64",
vm=self.session.kernel_address_space)
# Verify it.
if kdbg.Header.OwnerTag == "KDBG":
return kdbg
# Cant find it from the profile, look for it the old way.
self.session.logging.info(
"KDBG not provided - Rekall will try to "
"automatically scan for it now using plugin.kdbgscan.")
for kdbg in self.session.plugins.kdbgscan(
session=self.session).hits():
# Just return the first one
self.session.logging.info(
"Found a KDBG hit %r. Hope it works. If not try setting it "
"manually.", kdbg)
return kdbg
class PsActiveProcessHeadHook(AbstractWindowsParameterHook):
"""The PsActiveProcessHead is actually found in the profile symbols."""
name = "PsActiveProcessHead"
def calculate(self):
return self.session.profile.get_constant_object(
"PsActiveProcessHead",
target="_LIST_ENTRY",
vm=self.session.kernel_address_space)
class PsLoadedModuleList(AbstractWindowsParameterHook):
"""The PsLoadedModuleList is actually found in the profile symbols."""
name = "PsLoadedModuleList"
def calculate(self):
return self.session.profile.get_constant_object(
"PsLoadedModuleList",
target="_LIST_ENTRY",
vm=self.session.kernel_address_space)
class WindowsCommandPlugin(plugin.KernelASMixin, AbstractWindowsCommandPlugin):
"""A windows plugin which requires the kernel address space."""
__abstract = True
class WinProcessFilter(WindowsCommandPlugin):
"""A class for filtering processes."""
__abstract = True
# Maintain the order of methods.
METHODS = [
"PsActiveProcessHead",
"CSRSS",
"PspCidTable",
"Sessions",
"Handles",
]
__args = [
dict(name="eprocess", type="ArrayIntParser",
default=plugin.Sentinel(),
help="Kernel addresses of eprocess structs."),
dict(name="pids", positional=True, type="ArrayIntParser",
default=plugin.Sentinel(),
help="One or more pids of processes to select."),
dict(name="proc_regex", default=None, type="RegEx",
help="A regex to select a process by name."),
dict(name="method", choices=METHODS, type="ChoiceArray",
default=METHODS, help="Method to list processes."),
]
@utils.safe_property
def filtering_requested(self):
return (not isinstance(self.plugin_args.pids, plugin.Sentinel) or
self.plugin_args.proc_regex is not None or
not isinstance(self.plugin_args.eprocess, plugin.Sentinel))
def filter_processes(self):
"""Filters eprocess list using pids lists."""
# If eprocess are given specifically only use those.
if not isinstance(self.plugin_args.eprocess, plugin.Sentinel):
for task in self.list_from_eprocess():
yield task
else:
for proc in self.list_eprocess():
if not self.filtering_requested:
yield proc
elif not isinstance(self.plugin_args.pids, plugin.Sentinel):
if int(proc.pid) in self.plugin_args.pids:
yield proc
elif (self.plugin_args.proc_regex and
self.plugin_args.proc_regex.match(
utils.SmartUnicode(proc.name))):
yield proc
def virtual_process_from_physical_offset(self, physical_offset):
"""Tries to return an eprocess in virtual space from a physical offset.
We do this by reflecting off the list elements.
Args:
physical_offset: The physcial offset of the process.
Returns:
an _EPROCESS object or a NoneObject on failure.
"""
physical_eprocess = self.profile._EPROCESS(
offset=int(physical_offset),
vm=self.physical_address_space)
return physical_eprocess.ThreadListHead.reflect(
vm=self.kernel_address_space).dereference_as(
"_EPROCESS", "ThreadListHead")
def list_from_eprocess(self):
if isinstance(self.plugin_args.eprocess, plugin.Sentinel):
return
for eprocess_offset in self.plugin_args.eprocess:
eprocess = self.profile._EPROCESS(
offset=eprocess_offset, vm=self.kernel_address_space)
yield eprocess
def list_eprocess(self):
"""List processes using chosen methods."""
# We actually keep the results from each method around in case we need
# to find out later which process was revealed by which method.
seen = set()
for proc in self.list_from_eprocess():
seen.add(proc.obj_offset)
for method in self.plugin_args.method:
for proc in self.session.GetParameter("pslist_%s" % method):
seen.add(proc)
result = []
for x in seen:
result.append(self.profile._EPROCESS(
x, vm=self.session.kernel_address_space))
return sorted(result, key=lambda x: x.pid)
class WinScanner(scanners.BaseScannerPlugin, WinProcessFilter):
"""Windows specific scanner implementation."""
__abstract = True
__args = [
dict(name="scan_kernel_paged_pool", default=False, type="Boolean",
help="Scan the kernel paged pool."),
dict(name="scan_kernel_nonpaged_pool", default=False, type="Boolean",
help="Scan the kernel non-paged pool."),
dict(name="scan_kernel_code", default=False, type="Boolean",
help="Scan the kernel image and loaded drivers."),
dict(name="scan_kernel_session_pools", default=False, type="Boolean",
help="Scan session pools for all processes."),
dict(name="limit", default=2**64, type="IntParser",
help="The length of data to search in each selected region."),
]
def generate_memory_ranges(self):
for run in super(WinScanner, self).generate_memory_ranges():
run.length = min(run.length, self.plugin_args.limit)
yield run
# If the user did not just ask to scan the entire kernel space, support
# dividing the kernel space into subregions.
if not self.plugin_args.scan_kernel:
regions = list(self.session.plugins.virt_map())
# Scan session pools in each process.
if self.plugin_args.scan_kernel_session_pools:
pools_plugin = self.session.plugins.pools()
for desc in pools_plugin.find_session_pool_descriptors():
comment = desc.Comment
self.session.logging.info(
"Scanning in: %s. [%#x-%#x]" % (
comment, desc.PoolStart, desc.PoolEnd))
run = addrspace.Run(
start=desc.PoolStart, end=desc.PoolEnd,
address_space=desc.obj_vm,
data=dict(type=comment))
run.length = min(run.length, self.plugin_args.limit)
yield run
# Non paged pool selection.
if self.plugin_args.scan_kernel_nonpaged_pool:
for region in regions:
type = utils.SmartUnicode(region["type"])
if "NonPagedPool" not in type:
continue
comment = "Pool %s" % type
self.session.logging.info(
"Scanning in: %s. [%#x-%#x]" % (
comment, region["virt_start"], region["virt_end"]))
run = addrspace.Run(
start=region["virt_start"], end=region["virt_end"],
address_space=self.session.kernel_address_space,
data=dict(type=comment))
run.length = min(run.length, self.plugin_args.limit)
yield run
if self.plugin_args.scan_kernel_paged_pool:
for region in regions:
if "PagedPool" != region["type"]:
continue
comment = "Pool %s" % region["type"]
self.session.logging.info("Scanning in: %s [%#x-%#x]" % (
comment, region["virt_start"], region["virt_end"]))
run = addrspace.Run(
start=region["virt_start"], end=region["virt_end"],
address_space=self.session.kernel_address_space,
data=dict(type=comment))
run.length = min(run.length, self.plugin_args.limit)
yield run
if self.plugin_args.scan_kernel_code:
cc = self.session.plugins.cc()
with cc:
cc.SwitchProcessContext(None)
for module in self.session.address_resolver.GetAllModules():
comment = "Module %s" % module.name
self.session.logging.info(
"Scanning in: %s [%#x-%#x]" % (
comment, module.start, module.end))
run = addrspace.Run(
start=module.start, end=module.end,
address_space=self.session.kernel_address_space,
data=dict(type=comment, module=module))
run.length = min(run.length, self.plugin_args.limit)
yield run
run.length = min(run.length, self.plugin_args.limit)
yield run
class PoolScannerPlugin(WinScanner, AbstractWindowsCommandPlugin):
__abstract = True
class PsListPsActiveProcessHeadHook(AbstractWindowsParameterHook):
name = "pslist_PsActiveProcessHead"
def calculate(self):
result = set()
for x in self.session.GetParameter("PsActiveProcessHead").list_of_type(
"_EPROCESS", "ActiveProcessLinks"):
result.add(x.obj_offset)
self.session.logging.debug(
"Listed %s processes using PsActiveProcessHead", len(result))
return result
class PsListCSRSSHook(AbstractWindowsParameterHook):
name = "pslist_CSRSS"
def calculate(self):
"""Enumerate processes using the csrss.exe handle table"""
result = set()
# First find csrss process using a simpler method.
for proc_offset in self.session.GetParameter(
"pslist_PsActiveProcessHead"):
proc = self.session.profile._EPROCESS(proc_offset)
if proc.name == "csrss.exe":
# Gather the handles to process objects
for handle in proc.ObjectTable.handles():
if handle.get_object_type() == "Process":
process = handle.dereference_as("_EPROCESS")
result.add(process.obj_offset)
self.session.logging.debug(
"Listed %s processes using CSRSS", len(result))
return result
class PsListPspCidTableHook(AbstractWindowsParameterHook):
name = "pslist_PspCidTable"
def calculate(self):
"""Enumerate processes by walking the PspCidTable"""
result = set()
# Follow the pointers to the table base
PspCidTable = self.session.profile.get_constant_object(
"PspCidTable",
target="Pointer",
target_args=dict(
target="_PSP_CID_TABLE"
)
)
# Walk the handle table
for handle in PspCidTable.handles():
if handle.get_object_type() == "Process":
process = handle.dereference_as("_EPROCESS")
result.add(process.obj_offset)
self.session.logging.debug(
"Listed %s processes using PspCidTable", len(result))
return result
class PsListSessionsHook(AbstractWindowsParameterHook):
name = "pslist_Sessions"
def calculate(self):
"""Enumerate processes by walking the SessionProcessLinks"""
result = set()
sessions = set()
# First find unique sessions using a simpler method.
for proc_offset in self.session.GetParameter(
"pslist_PsActiveProcessHead"):
proc = self.session.profile._EPROCESS(proc_offset)
if proc.Session in sessions:
continue
sessions.add(proc.Session)
# Now enumerate all tasks in session list.
for task in proc.Session.ProcessList.list_of_type(
"_EPROCESS", "SessionProcessLinks"):
result.add(task.obj_offset)
self.session.logging.debug(
"Listed %s processes using Sessions", len(result))
return result
class PsListHandlesHook(AbstractWindowsParameterHook):
name = "pslist_Handles"
def calculate(self):
"""Enumerate processes by walking the SessionProcessLinks"""
result = set()
handle_table_list_head = self.session.profile.get_constant_object(
"HandleTableListHead", "_LIST_ENTRY")
for table in handle_table_list_head.list_of_type(
"_HANDLE_TABLE", "HandleTableList"):
proc = table.QuotaProcess.deref()
if proc and proc.pid > 0:
result.add(proc.obj_offset)
self.session.logging.debug(
"Listed %s processes using Handles", len(result))
return result | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/common.py | 0.676299 | 0.177882 | common.py | pypi |
from rekall.plugins.windows import common
class PSTree(common.WinProcessFilter):
"""Print process list as a tree"""
__name = "pstree"
table_header = [
dict(name="_EPROCESS", type="TreeNode", max_depth=5, child=dict(
type="_EPROCESS", style="light")),
dict(name="ppid", width=6, align="r"),
dict(name="thd_count", width=6, align="r"),
dict(name="hnd_count", width=6, align="r"),
dict(name="create_time", width=24),
dict(name="cmd", width=40, hidden=True),
dict(name="path", width=40, hidden=True),
dict(name="audit", width=40, hidden=True),
]
def _find_root(self, pid_dict, pid):
# Prevent circular loops.
seen = set()
while pid in pid_dict and pid not in seen:
seen.add(pid)
pid = int(pid_dict[pid].InheritedFromUniqueProcessId)
return pid
def _make_process_dict(self):
"""Returns a dict keyed by pids with values _EPROCESS objects."""
result = {}
for eprocess in self.filter_processes():
result[int(eprocess.UniqueProcessId)] = eprocess
return result
def collect(self):
process_dict = self._make_process_dict()
def draw_children(pad, pid):
"""Given a pid output all its children."""
for task in sorted(list(process_dict.values()), key=lambda x: x.pid):
if task.InheritedFromUniqueProcessId != pid:
continue
process_params = task.Peb.ProcessParameters
yield dict(
_EPROCESS=task,
ppid=task.InheritedFromUniqueProcessId,
thd_count=task.ActiveThreads,
hnd_count=task.ObjectTable.m("HandleCount"),
create_time=task.CreateTime,
cmd=process_params.CommandLine,
path=process_params.ImagePathName,
audit=task.SeAuditProcessCreationInfo.ImageFileName.Name,
depth=pad)
process_dict.pop(task.pid, None)
for x in draw_children(pad + 1, task.pid):
yield x
while process_dict:
keys = list(process_dict.keys())
root = self._find_root(process_dict, keys[0])
for x in draw_children(0, root):
yield x | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/pstree.py | 0.756627 | 0.274899 | pstree.py | pypi |
# pylint: disable=protected-access
from builtins import str
__author__ = ("Michael Cohen <scudette@google.com> and "
"Francesco Picasso <francesco.picasso@gmail.com>")
import binascii
import logging
from Crypto.Cipher import AES
from Crypto.Cipher import DES3
from Crypto.Cipher import ARC4
from rekall import addrspace
from rekall import obj
from rekall.plugins.overlays.windows import pe_vtypes
from rekall.plugins.windows import common
from rekall.plugins.windows import lsadecryptxp
mimikatz_common_overlays = {
'_LSA_UNICODE_STRING': [None, {
'Value': lambda x: x.Buffer.dereference_as(
'UnicodeString', target_args=dict(length=x.Length)),
'Raw': lambda x: x.Buffer.dereference_as(
'String', target_args=dict(length=x.Length, term=None)).v(),
'RawMax': lambda x: x.Buffer.dereference_as(
'String', target_args=dict(length=x.MaximumLength, term=None)).v(),
}],
'_LSA_STRING': [None, {
'Value': lambda x: x.Buffer.dereference_as(
'String', target_args=dict(length=x.Length)),
'Raw': lambda x: x.Buffer.dereference_as(
'String', target_args=dict(length=x.Length, term=None)).v(),
'RawMax': lambda x: x.Buffer.dereference_as(
'String', target_args=dict(length=x.MaximumLength, term=None)).v(),
}],
'_LUID': [None, {
'Text': lambda x: '{:08x}:{:08x}'.format(x.HighPart, x.LowPart)
}],
'_SID': [None, {
'IdentifierAuthority': [None, ['Enumeration', dict(
choices={
'\x00\x00\x00\x00\x00\x00': 'Null Authority',
'\x00\x00\x00\x00\x00\x01': 'World Authority',
'\x00\x00\x00\x00\x00\x02': 'Local Authority',
'\x00\x00\x00\x00\x00\x03': 'Creator Authority',
'\x00\x00\x00\x00\x00\x04': 'NonUnique Authority',
'\x00\x00\x00\x00\x00\x05': 'NT Authority',
},
target='String',
target_args=dict(length=6, term=None)
)]],
'NumericIdentifier': [0x4, ['unsigned be int']],
'SubAuthority': [None, ['Array', dict(
target='unsigned long',
count=lambda x: x.SubAuthorityCount)]],
}],
}
class _SID(obj.Struct):
"""A Pretty printing implementation of sids.
Reference:
http://www.sekchek.com/downloads/white-papers/windows-about-sids.pdf
"""
def __str__(self):
"""Format the Sid using SDDL Notation."""
components = [self.Revision, self.NumericIdentifier]
components.extend(self.SubAuthority)
return u"S-" + u"-".join([str(x) for x in components])
class Lsasrv(pe_vtypes.BasicPEProfile):
"""A profile for lsasrv.dll"""
mimikatz_vtypes = [
'_LIST_ENTRY', '_LSA_UNICODE_STRING', '_LUID',
'_LSA_STRING', '_MSV1_0_PRIMARY_CREDENTIAL',
'_KIWI_BCRYPT_HANDLE_KEY', '_KIWI_HARD_KEY',
'_KIWI_MSV1_0_CREDENTIALS', '_KIWI_MSV1_0_PRIMARY_CREDENTIALS',
'_KIWI_GENERIC_PRIMARY_CREDENTIAL',
'_KIWI_MASTERKEY_CACHE_ENTRY', '_FILETIME']
windows_vtypes = ['_SID', '_SID_IDENTIFIER_AUTHORITY', '_GUID']
# TODO: should be special cases (1or2) addressed?
mimikatz_msv_versioned = {
5.1: '_KIWI_MSV1_0_LIST_51',
5.2: '_KIWI_MSV1_0_LIST_52',
6.0: '_KIWI_MSV1_0_LIST_60',
6.1: '_KIWI_MSV1_0_LIST_61_ANTI_MIMIKATZ',
6.2: '_KIWI_MSV1_0_LIST_62',
6.3: '_KIWI_MSV1_0_LIST_63',
}
mimikatz_key_versioned = {
5.1: '_KIWI_BCRYPT_KEY',
5.2: '_KIWI_BCRYPT_KEY',
6.0: '_KIWI_BCRYPT_KEY',
6.1: '_KIWI_BCRYPT_KEY',
6.2: '_KIWI_BCRYPT_KEY8',
6.3: '_KIWI_BCRYPT_KEY81',
}
@classmethod
def Initialize(cls, profile):
super(cls, Lsasrv).Initialize(profile)
arch = profile.session.profile.metadata('arch')
mimikatz_profile = profile.session.LoadProfile('mimikatz/%s' % arch)
if not mimikatz_profile:
raise IOError('Unable to load mimikatz profile from repository!')
kwargs = {}
for name in cls.mimikatz_vtypes:
kwargs[name] = mimikatz_profile.vtypes[name]
for name in cls.windows_vtypes:
kwargs[name] = profile.session.profile.vtypes[name]
profile.add_types(kwargs)
profile.add_types({
'SIZED_DATA': [lambda x: x.size + 4, {
'size': [0, ['unsigned long', {}]],
'data': [4, ['String', dict(length=lambda x: x.size)]],
}]
})
version = profile.session.profile.metadata('version')
if version not in cls.mimikatz_msv_versioned:
raise IOError('OS version not supported.')
profile.add_types({
'MSV1_0_LIST': mimikatz_profile.vtypes[
cls.mimikatz_msv_versioned[version]]
})
profile.add_types({
'_KIWI_BCRYPT_KEY': mimikatz_profile.vtypes[
cls.mimikatz_key_versioned[version]]
})
profile.add_classes(_SID=_SID)
profile.add_overlay(mimikatz_common_overlays)
profile.add_overlay({
'_KIWI_HARD_KEY': [None, {
'data': lambda x: x.m('data').cast(
'String', term=None, length=x.cbSecret)
}],
'MSV1_0_LIST': [None, {
'List': [0, ['_LIST_ENTRY']],
'pSid': [None, ['Pointer', dict(target='_SID')]],
'LogonType': [None, ['Enumeration', dict(
target='unsigned int',
choices={
2: 'Interactive',
3: 'Network',
4: 'Batch',
5: 'Service',
6: 'Proxy',
7: 'Unlock',
8: 'NetworkCleartext',
9: 'NewCredentials',
10: 'RemoteInteractive',
11: 'CachedInteractive',
12: 'CachedRemoteInteractive',
13: 'CachedUnlock',
},
)]],
}],
'_MSV1_0_PRIMARY_CREDENTIAL': [None, {
'NtOwfPassword': [None, ['String', dict(length=16)]],
'LmOwfPassword': [None, ['String', dict(length=16)]],
'ShaOwPassword': [None, ['String', dict(length=20)]],
}],
'_KIWI_MASTERKEY_CACHE_ENTRY': [None, {
'List': [0, ['_LIST_ENTRY']],
'key': [None, ['String', dict(length=lambda x: x.keySize)]],
}],
})
def init_crypto(self):
if self.session.profile.metadata('version') < 6.0:
self.decryption_enabled = self.init_crypto_nt5()
else:
self.decryption_enabled = self.init_crypto_nt6()
if not self.decryption_enabled:
logging.warning('AES: {}'.format(self.aes_key.encode('hex')))
logging.warning('IV: {}'.format(self.iv.encode('hex')))
logging.warning('DES key: {}'.format(self.des_key.encode('hex')))
logging.error('Unable to initialize decryption keys!')
def decrypt(self, encrypted):
if self.session.profile.metadata('version') < 6.0:
return self.decrypt_nt5(encrypted)
else:
return self.decrypt_nt6(encrypted)
def init_crypto_nt6(self):
self.iv = self.get_constant_object(
'InitializationVector', 'String', length=16, term=None).v()
aes_handle = self.get_constant_object(
'hAesKey', target='Pointer',
target_args=dict(target='_KIWI_BCRYPT_HANDLE_KEY'))
self.aes_key = aes_handle.key.hardkey.data.v()
des_handle = self.get_constant_object(
'h3DesKey', target='Pointer',
target_args=dict(target='_KIWI_BCRYPT_HANDLE_KEY'))
self.des_key = des_handle.key.hardkey.data.v()
try:
cipher = AES.new(self.aes_key, AES.MODE_CFB, self.iv)
cipher = DES3.new(self.des_key, DES3.MODE_CBC, self.iv[:8])
cipher = None
decryption_enabled = True
except ValueError as e_ve:
decryption_enabled = False
logging.warning('init_crypto_nt6 exception {}'.format(e_ve))
finally:
return decryption_enabled
def decrypt_nt6(self, encrypted):
if not self.decryption_enabled:
return obj.NoneObject()
cipher = None
if self.iv:
if len(encrypted) % 8:
cipher = AES.new(self.aes_key, AES.MODE_CFB, self.iv)
else:
if self.des_key:
cipher = DES3.new(self.des_key, DES3.MODE_CBC, self.iv[:8])
if cipher and encrypted:
return cipher.decrypt(encrypted)
return obj.NoneObject()
def init_crypto_nt5(self):
rc4_key_len = self.get_constant_object(
'g_cbRandomKey', 'unsigned long').v()
rc4_key_ptr = self.get_constant_object(
'g_pRandomKey', target='Pointer')
self.rc4_key = rc4_key_ptr.dereference_as(
'String', target_args=dict(length=rc4_key_len, term=None)).v()
desx_key_ptr = self.get_constant_object(
'g_pDESXKey', target='Pointer')
self.desx_key = desx_key_ptr.dereference_as(
'String', target_args=dict(length=144, term=None)).v()
self.feedback = self.get_constant_object(
'g_Feedback', target='String',
target_args=dict(length=8)).v()
try:
cipher = ARC4.new(self.rc4_key)
decryption_enabled = True
except ValueError as e_ve:
decryption_enabled = False
logging.warning('init_crypto_nt5 exception {}'.format(e_ve))
finally:
return decryption_enabled
def decrypt_nt5(self, encrypted):
if not self.decryption_enabled:
return obj.NoneObject()
cipher = None
if len(encrypted) % 8:
if self.rc4_key:
cipher = ARC4.new(self.rc4_key)
else:
if self.desx_key and self.feedback:
cipher = lsadecryptxp.XP_LsaDecryptMemory(
self.desx_key, self.feedback)
if cipher and encrypted:
return cipher.decrypt(encrypted)
return obj.NoneObject()
def get_lsass_logons(self):
logons = {}
lsass_logons = self.get_constant_object(
'LogonSessionList', target='_LIST_ENTRY')
for entry in lsass_logons.list_of_type('MSV1_0_LIST', 'List'):
logons[entry.LocallyUniqueIdentifier.Text] = entry
return logons
def _msv_primary_credentials(self, data):
vm = addrspace.BufferAddressSpace(data=data, session=self.session)
cred_obj = self.Object('_MSV1_0_PRIMARY_CREDENTIAL',
profile=self, vm=vm)
# TODO: check NULL Pointer dereference with this VM.
domain = ''
if cred_obj.LogonDomainName.Buffer.is_valid():
domain = cred_obj.LogonDomainName.Value
user_name = ''
if cred_obj.UserName.Buffer.is_valid():
user_name = cred_obj.UserName.Value
if cred_obj.isLmOwfPassword.v() == 1:
yield (domain, user_name, 'LM',
binascii.hexlify(cred_obj.LmOwfPassword.v()))
if cred_obj.isNtOwfPassword.v() == 1:
yield (domain, user_name, 'NTLM',
binascii.hexlify(cred_obj.NtOwfPassword.v()))
if cred_obj.isShaOwPassword.v() == 1:
yield (domain, user_name, 'SHA1',
binascii.hexlify(cred_obj.ShaOwPassword.v()))
def logons(self, lsass_logons):
for luid, lsass_logon in lsass_logons.items():
for cred in lsass_logon.Credentials.walk_list('next'):
for primary_cred in cred.PrimaryCredentials.walk_list('next'):
dec_cred = self.decrypt(primary_cred.Credentials.Raw)
if not dec_cred:
continue
cur_cred_type = primary_cred.Primary.Value
if cur_cred_type == u'Primary':
for (domain, user_name, secret_type,
secret) in self._msv_primary_credentials(dec_cred):
yield (luid, cur_cred_type, domain, user_name,
secret_type, secret)
else:
pass
def master_keys(self):
keys = self.get_constant_object(
'g_MasterKeyCacheList', target='_LIST_ENTRY')
for entry in keys.list_of_type('_KIWI_MASTERKEY_CACHE_ENTRY', 'List'):
logonId = entry.LogonId
if logonId.HighPart.v() == 0 and logonId.LowPart.v() == 0:
continue
yield (logonId.Text, '',
'', '', 'masterkey',
binascii.hexlify(self.decrypt(entry.key.v())))
class Wdigest(pe_vtypes.BasicPEProfile):
"""A profile for wdigest.dll"""
mimikatz_vtypes = [
'_LIST_ENTRY', '_LSA_UNICODE_STRING', '_LUID',
'_KIWI_WDIGEST_LIST_ENTRY', '_KIWI_GENERIC_PRIMARY_CREDENTIAL',
'_KIWI_HARD_KEY']
@classmethod
def Initialize(cls, profile):
super(cls, Wdigest).Initialize(profile)
arch = profile.session.profile.metadata('arch')
mimikatz_profile = profile.session.LoadProfile('mimikatz/%s' % arch)
kwargs = {}
for name in cls.mimikatz_vtypes:
kwargs[name] = mimikatz_profile.vtypes[name]
profile.add_types(kwargs)
profile.add_overlay(mimikatz_common_overlays)
kiwi_cred_offset = 8
if profile.session.profile.metadata('version') < 6.0:
kiwi_cred_offset = 12
profile.add_overlay({
'_KIWI_WDIGEST_LIST_ENTRY': [None, {
'List': [0, ['_LIST_ENTRY']],
'Cred': [lambda x: (x.LocallyUniqueIdentifier.obj_end +
kiwi_cred_offset),
['_KIWI_GENERIC_PRIMARY_CREDENTIAL']]
}],
'_KIWI_HARD_KEY': [None, {
'data': lambda x: x.m('data').cast(
'String', term=None, length=x.cbSecret)
}],
})
def logons(self):
# TODO: if the symbols are wrong? Add a check for the LIST validity.
logons = self.get_constant_object(
'l_LogSessList', target='_LIST_ENTRY')
for entry in logons.list_of_type('_KIWI_WDIGEST_LIST_ENTRY', 'List'):
yield entry
class Livessp(pe_vtypes.BasicPEProfile):
"""A profile for livessp.dll"""
mimikatz_vtypes = [
'_LIST_ENTRY', '_LSA_UNICODE_STRING', '_LUID',
'_KIWI_LIVESSP_LIST_ENTRY', '_KIWI_LIVESSP_PRIMARY_CREDENTIAL',
'_KIWI_GENERIC_PRIMARY_CREDENTIAL']
@classmethod
def Initialize(cls, profile):
super(cls, Livessp).Initialize(profile)
arch = profile.session.profile.metadata('arch')
mimikatz_profile = profile.session.LoadProfile('mimikatz/%s' % arch)
kwargs = {}
for name in cls.mimikatz_vtypes:
kwargs[name] = mimikatz_profile.vtypes[name]
profile.add_types(kwargs)
profile.add_overlay(mimikatz_common_overlays)
profile.add_overlay({
'_KIWI_LIVESSP_LIST_ENTRY': [None, {
'List': [0, ['_LIST_ENTRY']],
}]
})
def logons(self):
logons = self.get_constant_object(
'LiveGlobalLogonSessionList', target='_LIST_ENTRY')
for entry in logons.list_of_type('_KIWI_LIVESSP_LIST_ENTRY', 'List'):
yield (entry.LocallyUniqueIdentifier.Text,
'',
entry.suppCreds.dereference().credentials.Domaine.Value,
entry.suppCreds.dereference().credentials.UserName.Value,
'password',
entry.suppCreds.dereference().credentials.Password)
class Mimikatz(common.WindowsCommandPlugin):
"""Extract and decrypt passwords from the LSA Security Service."""
name = 'mimikatz'
table_header = [
dict(name='LUID', width=20),
dict(name='Type', width=16),
dict(name='Sess', width=2),
dict(name='SID', width=20),
dict(name='Module', width=7),
dict(name='Info', width=7),
dict(name='Domain', width=16),
dict(name='User', width=16),
dict(name='SType', width=9),
dict(name='Secret', width=32)
]
def __init__(self, *args, **kwargs):
super(Mimikatz, self).__init__(*args, **kwargs)
# Track the following modules. If we do not have them in the profile
# repository then try to get them directly from Microsoft.
tracked = self.session.GetParameter(
'autodetect_build_local_tracked') or []
needed = set(['lsasrv', 'wdigest', 'livessp'])
if not needed.issubset(tracked):
needed.update(tracked)
with self.session as session:
session.SetParameter('autodetect_build_local_tracked', needed)
def collect(self):
cc = self.session.plugins.cc()
for task in self.session.plugins.pslist(
proc_regex='lsass.exe').filter_processes():
cc.SwitchProcessContext(task)
lsasrv = None
lsasrv_module = self.session.address_resolver.GetModuleByName(
'lsasrv')
if lsasrv_module:
lsasrv = lsasrv_module.profile
if not isinstance(lsasrv, Lsasrv):
logging.warning('Unable to properly initialize lsasrv!')
lsasrv = None
if lsasrv:
lsasrv.init_crypto()
lsass_logons = lsasrv.get_lsass_logons()
for (luid, info, domain, user_name, secret_type,
secret) in lsasrv.logons(lsass_logons):
lsass_entry = lsass_logons.get(luid, obj.NoneObject())
yield (luid,
lsass_entry.LogonType,
lsass_entry.Session,
lsass_entry.pSid.deref(),
'msv',
info,
domain,
user_name,
secret_type,
secret)
wdigest = None
wdigest_module = self.session.address_resolver.GetModuleByName(
'wdigest')
if wdigest_module:
wdigest = wdigest_module.profile
if not isinstance(wdigest, Wdigest):
logging.warning('Unable to properly initialize wdigest.')
wdigest = None
else:
if not wdigest.get_constant('l_LogSessList'):
logging.warning('wdigest not initialized, KO.')
wdigest = None
if wdigest:
for entry in wdigest.logons():
luid = entry.LocallyUniqueIdentifier.Text
lsass_entry = lsass_logons.get(luid, obj.NoneObject())
yield (luid,
lsass_entry.LogonType,
lsass_entry.Session,
lsass_entry.pSid.deref(),
'wdigest',
'',
entry.Cred.Domaine.Value,
entry.Cred.UserName.Value,
'password',
lsasrv.decrypt(entry.Cred.Password.RawMax))
livessp = None
livessp_module = self.session.address_resolver.GetModuleByName(
'livessp')
if livessp_module:
livessp = livessp_module.profile
if not isinstance(livessp, Livessp):
logging.warning('Unable to properly initialize livessp.')
livessp = None
else:
if not livessp.get_constant('LiveGlobalLogonSessionList'):
logging.warning('livessp not initialized, KO.')
livessp = None
if livessp:
for (luid, info, domain, user_name, secret_type,
enc_secret) in livessp.logons():
lsass_entry = lsass_logons.get(luid, obj.NoneObject())
yield (luid,
lsass_entry.LogonType,
lsass_entry.Session,
lsass_entry.pSid.deref(),
'livessp',
info,
domain,
user_name,
secret_type,
lsasrv.decrypt(enc_secret))
if lsasrv:
for (luid, info, domain, user_name, secret_type,
secret) in lsasrv.master_keys():
lsass_entry = lsass_logons.get(luid, obj.NoneObject())
yield (luid,
lsass_entry.LogonType,
lsass_entry.Session,
lsass_entry.pSid.deref(),
'lsasrv',
info,
domain,
user_name,
secret_type,
secret) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/mimikatz.py | 0.48438 | 0.217847 | mimikatz.py | pypi |
"""Plugins to inspect the windows pools."""
__author__ = "Michael Cohen <scudette@google.com>"
# pylint: disable=protected-access
from rekall import obj
from rekall.plugins.windows import common
from rekall_lib import utils
# Some pool related utility functions.
def find_pool_alloc_before(session, offset, pool_tag):
"""Searches address_space for a pool allocation containing offset."""
# This method is only effective for small allocations right now because we
# need to find a pool tag (so allocation size is limited to one page).
# TODO: Extend this to big page pools.
base_offset = offset & ~0xFFF
data = session.kernel_address_space.read(base_offset, offset & 0xFFF)
buffer_offset = offset % 0x1000
pool_header_prototype = session.profile._POOL_HEADER()
while 1:
buffer_offset = data.rfind(pool_tag, 0, buffer_offset)
if buffer_offset == -1:
break
result = session.profile._POOL_HEADER(
(base_offset + buffer_offset -
pool_header_prototype.PoolTag.obj_offset),
vm=session.kernel_address_space)
end_of_allocation = result.obj_offset + result.size
# Allocation encompasses the required offset.
if end_of_allocation > offset:
yield result.obj_end
# After searching in small allocation, assume this is an allocation from
# Big Pool and go back several pages.
while base_offset > offset - 0x10000:
yield base_offset
base_offset -= 0x1000
class Pools(common.WindowsCommandPlugin):
"""Prints information about system pools.
Ref:
http://illmatics.com/Windows%208%20Heap%20Internals.pdf
https://media.blackhat.com/bh-dc-11/Mandt/BlackHat_DC_2011_Mandt_kernelpool-wp.pdf
https://immunityinc.com/infiltrate/archives/kernelpool_infiltrate2011.pdf
http://gate.upm.ro/os/LABs/Windows_OS_Internals_Curriculum_Resource_Kit-ACADEMIC/WindowsResearchKernel-WRK/WRK-v1.2/base/ntos/ex/pool.c
"""
name = "pools"
_pool_lookup = None
table_header = [
dict(name="descriptor", width=20, style="address"),
dict(name="type", width=20),
dict(name="index", width=5),
dict(name="size", width=10, align="r"),
dict(name="start", style="address"),
dict(name="end", style="address"),
dict(name="comment")
]
def find_non_paged_pool(self):
vector_pool = self.profile.get_constant_object(
"PoolVector",
target="Array",
target_args=dict(
count=2,
target="Pointer",
)
)
resolver = self.session.address_resolver
for desc in vector_pool[0].dereference_as(
"Array",
target_args=dict(
count=self.profile.get_constant_object(
"ExpNumberOfNonPagedPools", "unsigned int").v(),
target="_POOL_DESCRIPTOR",
)
):
# Windows XP uses these globals.
start_va = resolver.get_constant_object(
"nt!MmNonPagedPoolStart", "Pointer").v()
end_va = resolver.get_constant_object(
"nt!MmNonPagedPoolEnd", "Pointer").v()
# Windows 7.
if start_va == None:
# First determine the addresses of non paged pool:
# dis 'nt!MiReturnNonPagedPoolVa'
start_va = resolver.get_constant_object(
"nt!MiNonPagedPoolStartAligned", "Pointer").v()
end_va = resolver.get_constant_object(
"nt!MiNonPagedPoolEnd", "Pointer").v()
if end_va == None:
bitmap = resolver.get_constant_object(
"nt!MiNonPagedPoolBitMap", "_RTL_BITMAP")
# ? MiNonPagedPoolVaBitMap
# We dont bother to check the bitmap itself, just consider the
# maximum size of the pool as the maximum allocated bitmap
# currently. This will overestimate the actual size somewhat.
end_va = start_va + bitmap.SizeOfBitMap * 8 * 0x1000
# In windows 10 the start va moved to the MiState global.
if start_va == None:
mistate = resolver.get_constant_object(
"nt!MiState", "_MI_SYSTEM_INFORMATION")
for node_index, node_info in enumerate(mistate.multi_m(
"Hardware.SystemNodeInformation", # Win10 2016
"SystemNodeInformation" # Win10 2015
)):
start_va = node_info.NonPagedPoolFirstVa.v()
end_va = start_va
# Just go to the last bitmap
for bitmap in node_info.NonPagedBitMap:
end_va = max(end_va, start_va + bitmap.SizeOfBitMap * 8)
desc.PoolStart = start_va
desc.PoolEnd = end_va
desc.Comment = "Node %i" % node_index
yield desc
else:
desc.PoolStart = start_va
desc.PoolEnd = end_va
desc.Comment = ""
yield desc
def find_paged_pool(self):
vector_pool = self.profile.get_constant_object(
"PoolVector",
target="Array",
target_args=dict(
count=2,
target="Pointer",
)
)
# Paged pool.
paged_pool_start = self.profile.get_constant_object(
"MmPagedPoolStart", "Pointer").v()
if paged_pool_start == None:
paged_pool_start = self.profile.get_constant_object(
"MiPagedPoolStart", "Pointer").v()
paged_pool_end = (
paged_pool_start + self.profile.get_constant_object(
"MmSizeOfPagedPoolInBytes", "address"))
if paged_pool_start == None:
# Windows 7 stores the end of the pool only
# (nt!MiFreePagedPoolPages).
paged_pool_end = self.profile.get_constant_object(
"MmPagedPoolEnd", "Pointer").v()
bitmap = self.profile.get_constant_object(
"MmPagedPoolInfo", "_MM_PAGED_POOL_INFO").PagedPoolAllocationMap
if bitmap:
paged_pool_start = (
paged_pool_end - bitmap.SizeOfBitMap * 8 * 0x1000)
else:
paged_pool_start = (
paged_pool_end - self.profile.get_constant_object(
"MmSizeOfPagedPoolInBytes", "unsigned long long"))
# Windows 10 build 10586.th2_release.160126-1819 uses dynamic Paged Pool
# VA.
if paged_pool_start == None:
mistate = self.session.address_resolver.get_constant_object(
"nt!MiState", "_MI_SYSTEM_INFORMATION")
dynamic_paged_pool = mistate.multi_m(
# 10586.th2_release.160126-1819
"SystemVa.DynamicBitMapPagedPool",
# 10074.fbl_impressive.150424-1350
"DynamicBitMapPagedPool"
)
paged_pool_start = dynamic_paged_pool.BaseVa.v()
paged_pool_end = (
paged_pool_start +
dynamic_paged_pool.MaximumSize * 0x1000)
comment = ""
if not paged_pool_start:
if self.profile.metadata("arch") == "I386":
# On Win7x86 the paged pool is distributed (see virt_map
# plugin).
comment = "Fragmented (See virt_map plugin)"
paged_pool_start = paged_pool_end = None
else:
# Hard coded on Windows 7.
# http://www.codemachine.com/article_x64kvas.html
# http://www.reactos.org/wiki/Techwiki:Memory_Layout
paged_pool_start = obj.Pointer.integer_to_address(
0xFFFFF8A000000000)
paged_pool_end = obj.Pointer.integer_to_address(
0xFFFFF8CFFFFFFFFF)
for desc in vector_pool[1].dereference_as(
"Array",
target_args=dict(
count=self.profile.get_constant_object(
"ExpNumberOfPagedPools", "unsigned int").v() + 1,
target="_POOL_DESCRIPTOR",
)
):
# Hard coded for 64 bit OS.
desc.PoolStart = paged_pool_start
desc.PoolEnd = paged_pool_end
desc.Comment = comment
yield desc
def find_session_pool_descriptors(self):
descriptors = {}
for task in self.session.plugins.pslist().list_eprocess():
desc = task.Session.PagedPool.cast(
vm=task.get_process_address_space())
if desc:
desc.PoolStart = task.Session.PagedPoolStart.v()
desc.PoolEnd = task.Session.PagedPoolEnd.v()
desc.Comment = "Session %s" % task.Session.SessionId
descriptors[desc.obj_offset] = desc
return list(descriptors.values())
def find_all_pool_descriptors(self):
"""Finds all unique pool descriptors."""
descriptors = set(self.find_non_paged_pool())
descriptors.update(self.find_paged_pool())
descriptors.update(self.find_session_pool_descriptors())
return descriptors
def is_address_in_pool(self, address):
if self._pool_lookup is None:
self._pool_lookup = utils.RangedCollection()
for descriptor in self.find_all_pool_descriptors():
self._pool_lookup.insert(descriptor.PoolStart,
descriptor.PoolEnd,
descriptor)
return self._pool_lookup.get_containing_range(address)
def collect(self):
descriptors = self.find_all_pool_descriptors()
for desc in sorted(descriptors):
yield dict(descriptor=desc,
type=desc.PoolType,
index=desc.PoolIndex,
size=desc.m("TotalBytes") or desc.m("TotalPages") * 0x1000,
start=desc.PoolStart,
end=desc.PoolEnd,
comment=getattr(desc, "Comment", ""))
class PoolTracker(common.WindowsCommandPlugin):
"""Enumerate pool tag usage statistics."""
name = "pool_tracker"
table_header = [
dict(name="tag", width=4),
dict(name="nonpaged", width=20, align="r"),
dict(name="nonpaged_bytes", width=10, align="r"),
dict(name="paged", width=20, align="r"),
dict(name="paged_bytes", width=10, align="r"),
]
def collect(self):
table = self.profile.get_constant_object(
"PoolTrackTable",
target="Pointer",
target_args=dict(
target="Array",
target_args=dict(
count=self.profile.get_constant_object(
"PoolTrackTableSize", "unsigned int").v(),
target="_POOL_TRACKER_TABLE",
)
)
)
for item in table:
if item.Key == 0:
continue
self.session.report_progress()
yield (# Show the pool tag as ascii.
item.Key.cast("String", length=4),
"%s (%s)" % (item.NonPagedAllocs,
item.NonPagedAllocs - item.NonPagedFrees),
item.NonPagedBytes,
"%s (%s)" % (item.PagedAllocs,
item.PagedAllocs - item.PagedFrees),
item.PagedBytes,
) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/pool.py | 0.492188 | 0.315776 | pool.py | pypi |
"""Miscelaneous information gathering plugins."""
from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
__author__ = "Michael Cohen <scudette@google.com>"
import hashlib
import itertools
# pylint: disable=protected-access
from rekall import obj
from rekall.plugins import core
from rekall.plugins.overlays import basic
from rekall.plugins.windows import common
from rekall_lib import utils
class WinPhysicalMap(common.WindowsCommandPlugin):
"""Prints the boot physical memory map."""
__name = "phys_map"
table_header = [
dict(name="phys_start", style="address"),
dict(name="phys_end", style="address"),
dict(name="pages"),
]
def collect(self):
descriptor = self.profile.get_constant_object(
"MmPhysicalMemoryBlock",
target="Pointer",
target_args=dict(
target="_PHYSICAL_MEMORY_DESCRIPTOR",
))
for memory_range in descriptor.Run:
yield (memory_range.BasePage * 0x1000,
(memory_range.BasePage + memory_range.PageCount) * 0x1000,
memory_range.PageCount)
class WindowsSetProcessContext(core.SetProcessContextMixin,
common.WinProcessFilter):
"""A cc plugin for windows."""
class WinVirtualMap(common.WindowsCommandPlugin):
"""Prints the Windows Kernel Virtual Address Map.
Windows allocates virtual address ranges to various purposes. This
plugin deduces the virtual address map.
On 32 bit windows, the kernel virtual address space can be managed
dynamically. This plugin shows each region and what it is used for.
Note that on 64 bit windows the address space is large enough to not worry
about it. In that case, the offsets and regions are hard coded.
http://www.woodmann.com/forum/entry.php?219-Using-nt!_MiSystemVaType-to-navigate-dynamic-kernel-address-space-in-Windows7
The kernel debugger shows the virtual address map using the !vm
extension. For example:
> !vm 20
System Region Base Address NumberOfBytes
NonPagedPool : ffff810000000000 100000000000
Session : ffff910000000000 8000000000
SpecialPoolPaged : ffff978000000000 8000000000
SystemCache : ffff988000000000 100000000000
SystemPtes : ffffae8000000000 100000000000
UltraZero : ffffc00000000000 100000000000
PageTables : ffffd40000000000 8000000000
PagedPool : ffffd48000000000 100000000000
SpecialPoolNonPaged : ffffe50000000000 8000000000
PfnDatabase : ffffe80000000000 38000000000
Cfg : ffffebdd84214da8 28000000000
HyperSpace : ffffee8000000000 10000000000
SystemImages : fffff80000000000 8000000000
Rekall uses this information to refine its operations to increase
both efficiency and correctness. For example, when scanning
objects which should exist in non paged pools, by default, Rekall
only examines the NonPagedPool region. This speeds up operations
as well as reducing false positives from unrelated memory regions.
Later kernel version (Windows 10+) use a global nt!MiVisibleState
to maintain state information, including the virtual address
map. This plugin implements support for various versions.
"""
name = "virt_map"
table_header = [
dict(name="region", hidden=True),
dict(name="type", width=30),
dict(name="virt_start", style="address"),
dict(name="length", style="address"),
dict(name="virt_end", style="address"),
]
def collect(self):
self.rows = 0
for x in utils.Deduplicate(
itertools.chain(
self.collect_from_MiSystemVaType(),
self.collect_from_MiVisibleState(),
self.collect_from_pools()),
key=lambda x: (int(x["virt_start"]), int(x["virt_end"]))):
self.rows += 1
yield x
def collect_from_pools(self):
"""Fallback returns info from the pools plugin."""
if self.rows > 0:
return
pools_plugin = self.session.plugins.pools()
for pool_desc in pools_plugin.collect():
desc = pool_desc["descriptor"]
yield dict(type=desc.PoolType,
virt_start=desc.PoolStart,
virt_end=desc.PoolEnd,
length=desc.PoolEnd - desc.PoolStart)
def collect_from_MiVisibleState(self):
visible_state = self.session.profile.get_constant_object(
"MiVisibleState", target="Pointer", target_args=dict(
target="_MI_VISIBLE_STATE"))
region_types = self.session.profile.get_enum("_MI_ASSIGNED_REGION_TYPES")
# Types are described in the _MI_SYSTEM_VA_TYPE enum and are
# listed in the vector of regions.
for i, region in enumerate(visible_state.SystemVaRegions):
if region.NumberOfBytes > 0:
yield dict(
type=utils.MaybeConsume(
"AssignedRegion", region_types.get(i, "Unknown")),
region=region,
virt_start=region.BaseAddress.v(),
length=region.NumberOfBytes,
virt_end=region.BaseAddress.v() + region.NumberOfBytes)
def collect_from_MiSystemVaType(self):
system_va_table = self.profile.get_constant_object(
"MiSystemVaType",
target="Array",
target_args=dict(
target="Enumeration",
target_args=dict(
target="byte",
enum_name="_MI_SYSTEM_VA_TYPE"
),
)
)
if system_va_table == None:
return
system_range_start = self.profile.get_constant_object(
"MiSystemRangeStart", "unsigned int")
# The size varies on PAE profiles.
va_table_size = 0x1000 * 0x1000 / self.profile.get_obj_size("_MMPTE")
# Coalesce the ranges.
range_type = range_start = range_length = 0
for offset in range(system_range_start, 0xffffffff, va_table_size):
table_index = old_div((offset - system_range_start), va_table_size)
page_type = system_va_table[table_index]
if page_type != range_type:
if range_type:
yield dict(virt_start=range_start,
virt_end=range_start + range_length,
type=utils.SmartUnicode(range_type))
range_type = page_type
range_start = offset
range_length = va_table_size
else:
range_length += va_table_size
class Objects(common.WindowsCommandPlugin):
"""Displays all object Types on the system."""
name = "object_types"
table_header = [
dict(name="type", style="address"),
dict(name="index", align="r", width=5),
dict(name="NumberOfObjects", align="r", width=15),
dict(name="PoolType", width=20),
dict(name="name")
]
def object_types(self):
# The size of the type array depends on the operating system and it is
# hard coded. We can find out the size by seeing how many Type objects
# were allocated.
type_table = self.profile.get_constant_object(
"ObpObjectTypes",
target="Array", target_args=dict(
target="Pointer",
count=0,
target_args=dict(
target="_OBJECT_TYPE")
)
)
type_type = type_table[0] # The "Type" object.
type_table.count = type_type.TotalNumberOfObjects
for t in type_table:
if t:
yield t
def collect(self):
for obj_type in self.object_types():
yield dict(type=obj_type,
index=obj_type.Index,
NumberOfObjects=obj_type.TotalNumberOfObjects,
PoolType=obj_type.TypeInfo.PoolType,
name=obj_type.Name)
class ImageInfo(common.WindowsCommandPlugin):
"""List overview information about this image."""
name = "imageinfo"
table_header = [
dict(name="key", width=20),
dict(name="value")
]
@staticmethod
def KeQueryTimeIncrement(profile):
"""Return the time of each tick (float).
dis "nt!KeQueryTimeIncrement"
------ nt!KeQueryTimeIncrement ------
MOV EAX, [RIP+0x24af66] 0x26161 nt!KeMaximumIncrement
RET
"""
return profile.get_constant_object(
"KeMaximumIncrement", target="unsigned int") * 100e-9
def GetBootTime(self, kuser_shared):
"""Returns the number of seconds since boot.
Ref:
KeQueryTickCount * KeQueryTimeIncrement
reactos/include/ddk/wdm.h:
#define SharedTickCount (KI_USER_SHARED_DATA + 0x320)
#define KeQueryTickCount(CurrentCount) \
*(ULONG64*)(CurrentCount) = *(volatile ULONG64*)SharedTickCount
"""
current_tick_count = (
int(kuser_shared.TickCountQuad) or # Win7
int(kuser_shared.TickCountLow)) # WinXP
return current_tick_count * self.KeQueryTimeIncrement(self.profile)
def collect(self):
yield ("Kernel DTB", "%#x" % self.kernel_address_space.dtb)
for desc, name, type in (
("NT Build", "NtBuildLab", "String"),
("NT Build Ex", "NtBuildLabEx", "String"),
("Signed Drivers", "g_CiEnabled", "bool"),
):
yield dict(
key=desc,
value=self.profile.get_constant_object(name, target=type))
# Print kuser_shared things.
kuser_shared = self.profile.get_constant_object(
"KI_USER_SHARED_DATA", "_KUSER_SHARED_DATA")
yield ("Time (UTC)", kuser_shared.SystemTime)
# The bias is given in windows file time (i.e. in 100ns ticks).
bias = old_div(kuser_shared.TimeZoneBias.cast("long long"), 1e7)
yield ("Time (Local)", kuser_shared.SystemTime.display(
utc_shift=-bias))
yield ("Sec Since Boot", self.GetBootTime(kuser_shared))
yield ("NtSystemRoot", kuser_shared.NtSystemRoot)
class WinImageFingerprint(common.AbstractWindowsParameterHook):
"""Fingerprint the current image.
This parameter tries to get something unique about the image quickly. The
idea is that two different images (even of the same system at different
points in time) will have very different fingerprints. The fingerprint is
used as a key to cache persistent information about the system.
Live systems can not have a stable fingerprint and so return a NoneObject()
here.
We return a list of tuples:
(physical_offset, expected_data)
The list uniquely identifies the image. If one were to read all physical
offsets and find the expected_data at these locations, then we have a very
high level of confidence that the image is unique and matches the
fingerprint.
"""
name = "image_fingerprint"
def calculate(self):
if not self.session.physical_address_space:
return None
if self.session.physical_address_space.volatile:
return obj.NoneObject("No fingerprint for volatile image.")
result = []
profile = self.session.profile
phys_as = self.session.physical_address_space
address_space = self.session.GetParameter("default_address_space")
label = profile.get_constant_object("NtBuildLab", "String")
result.append((address_space.vtop(label.obj_offset), label.v()))
label = profile.get_constant_object("NtBuildLabEx", "String")
result.append((address_space.vtop(label.obj_offset), label.v()))
kuser_shared = profile.get_constant_object(
"KI_USER_SHARED_DATA", "_KUSER_SHARED_DATA")
system_time_offset = address_space.vtop(
kuser_shared.SystemTime.obj_offset)
result.append((system_time_offset, phys_as.read(system_time_offset, 8)))
tick_time_offset = address_space.vtop(
kuser_shared.multi_m("TickCountQuad", "TickCountLow").obj_offset)
result.append((tick_time_offset, phys_as.read(tick_time_offset, 8)))
# List of processes should also be pretty unique.
for task in self.session.plugins.pslist().filter_processes():
name = task.name.cast("String", length=30)
task_name_offset = address_space.vtop(name.obj_offset)
# Read the raw data for the task name. Usually the task name is
# encoded in utf8 but then we might not be able to compare it
# exactly - we really want bytes here.
result.append((task_name_offset, name.v()))
return dict(
hash=hashlib.sha1(str(result).encode("utf8")).hexdigest(),
tests=result)
class ObjectTree(common.WindowsCommandPlugin):
"""Visualize the kernel object tree.
Ref:
http://msdn.microsoft.com/en-us/library/windows/hardware/ff557762(v=vs.85).aspx
"""
name = "object_tree"
__args = [
dict(name="type_regex", default=".", type="RegEx",
help="Filter the type of objects shown.")
]
table_header = [
dict(name="_OBJECT_HEADER", style="address"),
dict(name="type", width=20),
dict(name="name", type="TreeNode"),
]
def GetObjectByName(self, path):
root = self.session.GetParameter("object_tree")
for component in utils.SplitPath(path):
root = root["Children"][component]
return self.profile.Object(type_name=root["type_name"],
offset=root["offset"])
def FileNameWithDrive(self, path):
"""Tries to resolve the path back to something with a drive letter."""
# First normalize the path.
try:
path = self.ResolveSymlinks(path)
for prefix, drive_letter in self.session.GetParameter(
"drive_letter_device_map").items():
prefix = self.ResolveSymlinks(prefix)
if path.startswith(prefix):
return drive_letter + path[len(prefix):]
# This will be triggered if the path does not resolve to anything in the
# object tree.
except KeyError:
return path
def ResolveSymlinks(self, path):
"""Takes a path and resolves any intermediate symlinks in it.
Returns:
A direct path to the object.
"""
components = path.split("\\")
return "\\".join(self._parse_path_components(components))
def _parse_path_components(self, components):
node = self.session.GetParameter("object_tree")
new_components = []
for i, component in enumerate(components):
if not component:
continue
if component == "??":
component = "GLOBAL??"
next_node = utils.CaseInsensitiveDictLookup(
component, node["Children"])
# If the first component is not found, search for it in the global
# namespace.
if next_node is None and i == 0 and component != "GLOBAL??":
return self._parse_path_components(["GLOBAL??"] + components)
if next_node is None:
raise KeyError(
"component %r not found at %s" % (
component, "\\".join(new_components)))
elif next_node["type"] == "SymbolicLink":
object_header = self.session.profile._OBJECT_HEADER(
next_node["offset"])
target = object_header.Object.LinkTarget.v()
# Append the next components to the target and re-parse
return self._parse_path_components(
target.split("\\") + components[i+1:])
elif next_node["type"] != "Directory":
return new_components + components[i:]
new_components.append(component)
node = next_node
return new_components
def _collect_directory(self, directory, seen, depth=0):
for obj_header in utils.Deduplicate(directory.list()):
name = str(obj_header.NameInfo.Name)
obj_type = str(obj_header.get_object_type())
if obj_type == "SymbolicLink":
name += u"-> %s (%s)" % (obj_header.Object.LinkTarget,
obj_header.Object.CreationTime)
if self.plugin_args.type_regex.search(obj_type):
yield dict(_OBJECT_HEADER=obj_header, type=obj_type,
name=name, depth=depth)
if obj_type == "Directory":
for x in self._collect_directory(
obj_header.Object, seen, depth=depth+1):
yield x
def collect(self):
# The root object.
root = self.GetObjectByName("/")
seen = set()
for x in self._collect_directory(root, seen):
yield x
class WindowsTimes(common.WindowsCommandPlugin):
"""Return current time, as known to the kernel."""
name = "times"
table_header = [
dict(name="Times"),
]
def collect(self):
kuser_shared = self.session.address_resolver.get_constant_object(
"nt!KI_USER_SHARED_DATA", "_KUSER_SHARED_DATA")
seconds_since_boot = self.session.plugins.imageinfo().GetBootTime(
kuser_shared)
kernel_time = kuser_shared.SystemTime
boot_timestamp = basic.UnixTimeStamp(
value=kernel_time - seconds_since_boot,
session=self.session)
yield [utils.AttributeDict(now=kernel_time, boot=boot_timestamp,
uptime=seconds_since_boot)] | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/misc.py | 0.735262 | 0.237675 | misc.py | pypi |
from builtins import str
from rekall import plugin
from rekall import scan
from rekall.plugins.windows import common
from rekall_lib import utils
class Modules(common.WindowsCommandPlugin):
"""Print list of loaded kernel modules."""
__name = "modules"
__args = [
dict(name="name_regex", type="RegEx",
help="Filter module names by this regex.")
]
table_header = [
dict(name="_LDR_DATA_TABLE_ENTRY", style="address"),
dict(name="name", width=20),
dict(name="base", style="address"),
dict(name="size", style="address"),
dict(name="path")
]
def lsmod(self):
""" A Generator for modules (uses _KPCR symbols) """
for module in self.session.GetParameter(
"PsLoadedModuleList").list_of_type(
"_LDR_DATA_TABLE_ENTRY", "InLoadOrderLinks"):
# Skip modules which do not match.
if (self.plugin_args.name_regex and
not self.plugin_args.name_regex.search(
utils.SmartUnicode(module.FullDllName))):
continue
yield module
def addresses(self):
"""Returns a list of module addresses."""
return sorted(self.mod_lookup.keys())
def collect(self):
object_tree_plugin = self.session.plugins.object_tree()
for module in self.lsmod():
yield dict(_LDR_DATA_TABLE_ENTRY=module,
name=module.BaseDllName,
base=module.DllBase,
size=module.SizeOfImage,
path=object_tree_plugin.FileNameWithDrive(
module.FullDllName.v()))
class RSDSScanner(scan.BaseScanner):
"""Scan for RSDS objects."""
checks = [
("StringCheck", dict(needle=b"RSDS"))
]
class ModVersions(Modules):
"""Try to determine the versions for all kernel drivers."""
__name = "version_modules"
table_header = [
dict(name="offset_v", style="address"),
dict(name="name", width=20),
dict(name="guid", width=33),
dict(name="pdb")
]
def ScanVersions(self):
pe_profile = self.session.LoadProfile("pe")
scanner = RSDSScanner(address_space=self.kernel_address_space,
session=self.session)
for module in self.lsmod():
for hit in scanner.scan(offset=int(module.DllBase),
maxlen=int(module.SizeOfImage)):
rsds = pe_profile.CV_RSDS_HEADER(offset=hit,
vm=self.kernel_address_space)
guid = "%s%x" % (rsds.GUID.AsString, rsds.Age)
yield module, rsds, guid
def collect(self):
for module, rsds, guid in self.ScanVersions():
yield dict(offset_v=rsds,
name=module.BaseDllName,
guid=guid,
pdb=rsds.Filename)
class VersionScan(plugin.PhysicalASMixin, plugin.TypedProfileCommand,
plugin.Command):
"""Scan the physical address space for RSDS versions."""
__name = "version_scan"
PHYSICAL_AS_REQUIRED = False
__args = [
dict(name="name_regex", type="RegEx", default=".",
help="Filter module names by this regex."),
dict(name="scan_filename", required=False, positional=True,
help="Optional file to scan. If not specified "
"we scan the physical address space.")
]
table_header = [
dict(name="offset", style="address"),
dict(name="guid", width=33),
dict(name="pdb", width=30)
]
def __init__(self, *args, **kwargs):
"""List kernel modules by walking the PsLoadedModuleList."""
super(VersionScan, self).__init__(*args, **kwargs)
if self.plugin_args.scan_filename is not None:
load_as = self.session.plugins.load_as()
self.physical_address_space = load_as.GuessAddressSpace(
filename=self.plugin_args.scan_filename)
def ScanVersions(self):
"""Scans the physical AS for RSDS structures."""
guids = set()
pe_profile = self.session.LoadProfile("pe")
scanner = RSDSScanner(address_space=self.physical_address_space,
session=self.session, profile=pe_profile)
for hit in scanner.scan(0, self.physical_address_space.end()):
rsds = pe_profile.CV_RSDS_HEADER(
offset=hit, vm=self.physical_address_space)
# The filename must end with pdb for valid pdb.
if not str(rsds.Filename).endswith("pdb"):
continue
guid = rsds.GUID_AGE
if guid not in guids:
guids.add(guid)
if self.plugin_args.name_regex.search(str(rsds.Filename)):
yield rsds, guid
def collect(self):
for rsds, guid in self.ScanVersions():
yield dict(offset=rsds, guid=guid, pdb=rsds.Filename)
class UnloadedModules(common.WindowsCommandPlugin):
"""Print a list of recently unloaded modules.
Ref:
http://volatility-labs.blogspot.de/2013/05/movp-ii-22-unloaded-windows-kernel_22.html
"""
name = "unloaded_modules"
table_header = [
dict(name="name", width=20),
dict(name="start", style="address"),
dict(name="end", style="address"),
dict(name="time")
]
def collect(self):
unloaded_table = self.profile.get_constant_object(
"MmUnloadedDrivers",
target="Pointer",
target_args=dict(
target="Array",
target_args=dict(
target="_UNLOADED_DRIVER",
count=self.profile.get_constant_object(
"MmLastUnloadedDriver", "unsigned int").v(),
)
)
)
# In Windows 10 this has moved to the MiState.
if unloaded_table == None:
mistate = self.profile.get_constant_object(
"MiState", target="_MI_SYSTEM_INFORMATION")
unloaded_table = mistate.multi_m(
"UnloadedDrivers",
"Vs.UnloadedDrivers"
).dereference_as(
"Array",
target_args=dict(
target="_UNLOADED_DRIVERS",
count=mistate.LastUnloadedDriver)
)
for driver in unloaded_table:
yield (driver.Name,
driver.StartAddress.v(),
driver.EndAddress.v(),
driver.CurrentTime) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/modules.py | 0.78789 | 0.159905 | modules.py | pypi |
from builtins import zip
__author__ = "Michael Cohen <scudette@gmail.com>"
from rekall import plugin
from rekall import testlib
from rekall.plugins.overlays.windows import pe_vtypes
from rekall.plugins.windows import common
class PEInfo(plugin.TypedProfileCommand, plugin.Command):
"""Print information about a PE binary."""
__name = "peinfo"
__args = [
dict(name="image_base", type="SymbolAddress", positional=True,
help="The base of the image."),
dict(name="executable", positional=True, required=False,
help="If provided we create an address space "
"from this file."),
dict(name="address_space", default=None,
help="The address space to use.")
]
def __init__(self, *args, **kwargs):
"""Dump a PE binary from memory.
Status is shown for each exported function:
- M: The function is mapped into memory.
Args:
image_base: The address of the image base (dos header). Can be a
module name.
address_space: The address space which contains the PE image. Can be
specified as "K" or "P".
filename: If provided we create an address space from this file.
"""
super(PEInfo, self).__init__(*args, **kwargs)
if (self.plugin_args.executable is None and
self.plugin_args.address_space is None):
# Resolve the correct address space. This allows the address space
# to be specified from the command line (e.g. "P")
load_as = self.session.plugins.load_as(session=self.session)
self.plugin_args.address_space = load_as.ResolveAddressSpace(
self.plugin_args.address_space)
if self.plugin_args.image_base is None:
self.plugin_args.image_base = self.session.GetParameter(
"default_image_base", 0)
self.pe_helper = pe_vtypes.PE(
address_space=self.plugin_args.address_space, session=self.session,
filename=self.plugin_args.executable,
image_base=self.plugin_args.image_base)
self.disassembler = self.session.plugins.dis(
address_space=self.pe_helper.vm,
session=self.session, length=4)
def render(self, renderer):
"""Print information about a PE file from memory."""
# Get our helper object to parse the PE file.
renderer.table_header([('Attribute', 'attribute', '<30'),
('Value', 'value', '60')])
for field in ["Machine", "TimeDateStamp", "Characteristics"]:
renderer.table_row(
field,
getattr(self.pe_helper.nt_header.FileHeader, field))
renderer.table_row("GUID/Age", self.pe_helper.RSDS.GUID_AGE)
renderer.table_row("PDB", self.pe_helper.RSDS.Filename)
for field in ["MajorOperatingSystemVersion",
"MinorOperatingSystemVersion",
"MajorImageVersion",
"MinorImageVersion",
"MajorSubsystemVersion",
"MinorSubsystemVersion"]:
renderer.table_row(
field,
getattr(self.pe_helper.nt_header.OptionalHeader, field))
renderer.format(
"\nSections (Relative to {0:addrpad}):\n",
self.pe_helper.image_base)
renderer.table_header([('Perm', 'perm', '4'),
('Name', 'name', '<8'),
('Raw Off', 'raw', '[addrpad]'),
('VMA', 'vma', '[addrpad]'),
('Size', 'size', '[addrpad]')])
for section in self.pe_helper.nt_header.Sections:
renderer.table_row(section.execution_flags, section.Name,
section.PointerToRawData,
section.VirtualAddress,
section.SizeOfRawData)
renderer.format("\nData Directories:\n")
renderer.table_header([('', 'name', '<40'),
('VMA', 'vma', '[addrpad]'),
('Size', 'size', '[addrpad]')])
for d in self.pe_helper.nt_header.OptionalHeader.DataDirectory:
renderer.table_row(d.obj_name, d.VirtualAddress, d.Size)
# Export/Import directory only if verbosity is higher than 1.
if self.plugin_args.verbosity >= 1:
renderer.format("\nImport Directory (Original):\n")
renderer.table_header([('Name', 'name', '<50'),
('Mapped Function', 'function', '60'),
('Ord', 'ord', '5')])
resolver = self.session.address_resolver
# Merge the results from both the Import table and the
# IAT. Sometimes the original Import Table is no longer mapped into
# memory (since its usually only used by the loader in order to
# build the IAT). In this case we can show something sensible using
# the address resolver.
for (dll, name, ordinal), (_, func, _) in zip(
self.pe_helper.ImportDirectory(),
self.pe_helper.IAT()):
renderer.table_row(
u"%s!%s" % (dll, name or ""),
resolver.format_address(func.v()),
ordinal)
if self.plugin_args.verbosity >= 2:
renderer.format("\nImport Address Table:\n")
renderer.table_header(
[('Name', 'name', '<20'),
('Address', 'address', '[addrpad]'),
('Disassembly', 'disassembly', '30')])
for name, function, ordinal in self.pe_helper.IAT():
disassembly = []
for x in self.disassembler.disassemble(function):
disassembly.append(x[-1].strip())
renderer.table_row(name, function, "\n".join(disassembly))
renderer.format("\nExport Directory:\n")
renderer.table_header([('Entry', 'entry', '[addrpad]'),
('Stat', 'status', '4'),
('Ord', 'ord', '5'),
('Name', 'name', '')])
resolver = self.session.address_resolver
for _ in self.pe_helper.ExportDirectory():
dll, function, name, ordinal = _
status = 'M' if function.dereference() else "-"
# Resolve the exported function through the symbol resolver.
symbol_name = resolver.format_address(function)
if symbol_name:
symbol_name = u"%s!%s (%s)" % (
dll, name or "", ", ".join(symbol_name))
else:
symbol_name = u"%s!%s" % (dll, name or "")
renderer.table_row(
function,
status,
ordinal,
symbol_name)
renderer.format("Version Information:\n")
renderer.table_header([('key', 'key', '<20'),
('value', 'value', '')])
for k, v in self.pe_helper.VersionInformation():
renderer.table_row(k, v)
class TestPEInfo(testlib.SimpleTestCase):
PARAMETERS = dict(
commandline="peinfo nt"
)
class ProcInfo(common.WinProcessFilter):
"""Dump detailed information about a running process."""
__name = "procinfo"
def render(self, renderer):
for task in self.filter_processes():
renderer.section()
renderer.format("Pid: {0} {1}\n",
task.UniqueProcessId, task.ImageFileName)
task_address_space = task.get_process_address_space()
if not task_address_space:
renderer.format("Peb Not mapped.\n")
continue
renderer.format("\nProcess Environment\n")
# The environment is just a sentinel terminated array of strings.
for line in task.Peb.ProcessParameters.Environment:
renderer.format(" {0}\n", line)
renderer.format("\nPE Infomation\n")
cc = self.session.plugins.cc()
with cc:
cc.SwitchProcessContext(task)
# Parse the PE file of the main process's executable.
pe = PEInfo(session=self.session,
image_base=task.Peb.ImageBaseAddress)
pe.render(renderer)
class TestProcInfo(testlib.SimpleTestCase):
PARAMETERS = dict(
commandline="procinfo %(pids)s"
) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/procinfo.py | 0.748444 | 0.155784 | procinfo.py | pypi |
# pylint: disable=protected-access
import os
from rekall.plugins.windows import common
from rekall.plugins import core
from rekall import plugin
from rekall_lib import utils
class PEDump(common.WindowsCommandPlugin):
"""Dump a PE binary from memory."""
__name = "pedump"
__args = [
dict(name="image_base", type="SymbolAddress", required=False,
positional=True,
help="The address of the image base (dos header)."),
dict(name="out_file",
help="The file name to write."),
dict(name="address_space", type="AddressSpace",
help="The address space to use."),
dict(name="out_fd",
help="A file like object to write the output.")
]
def __init__(self, *args, **kwargs):
super(PEDump, self).__init__(*args, **kwargs)
self.pe_profile = self.session.LoadProfile("pe")
def WritePEFile(self, fd=None, address_space=None, image_base=None):
"""Dumps the PE file found into the filelike object.
Note that this function can be used for any PE file (e.g. executable,
dll, driver etc). Only a base address need be specified. This makes this
plugin useful as a routine in other plugins.
Args:
fd: A writable filelike object which must support seeking.
address_space: The address_space to read from.
image_base: The offset of the dos file header.
"""
dos_header = self.pe_profile._IMAGE_DOS_HEADER(
offset=image_base, vm=address_space)
image_base = dos_header.obj_offset
nt_header = dos_header.NTHeader
# First copy the PE file header, then copy the sections.
data = dos_header.obj_vm.read(
image_base, min(1000000, nt_header.OptionalHeader.SizeOfHeaders))
if not data:
return
fd.seek(0)
fd.write(data)
for section in nt_header.Sections:
# Force some sensible maximum values here.
size_of_section = min(10000000, section.SizeOfRawData)
physical_offset = min(100000000, int(section.PointerToRawData))
data = section.obj_vm.read(
section.VirtualAddress + image_base, size_of_section)
fd.seek(physical_offset, 0)
fd.write(data)
def collect(self):
renderer = self.session.GetRenderer()
if self.plugin_args.out_file:
out_fd = renderer.open(
filename=self.plugin_args.out_file, mode="wb")
else:
out_fd = self.plugin_args.out_fd
if not out_fd:
self.session.logging.error(
"No output filename or file handle specified.")
return []
with out_fd:
self.session.logging.info(
"Dumping PE File at image_base %#x to %s",
self.plugin_args.image_base, out_fd.name)
self.WritePEFile(out_fd, self.plugin_args.address_space,
self.plugin_args.image_base)
return []
class ProcExeDump(core.DirectoryDumperMixin, common.WinProcessFilter):
"""Dump a process to an executable file sample"""
__name = "procdump"
dump_dir_optional = True
__args = [
dict(name="out_fd",
help="A file like object to write the output.")
]
table_header = [
dict(name="_EPROCESS", width=50),
dict(name="Filename"),
]
def __init__(self, *args, **kwargs):
"""Dump a process from memory into an executable.
In windows PE files are mapped into memory in sections. Each section is
mapped into a region within the process virtual memory from a region in
the executable file:
File on Disk Memory Image
0-> ------------ image base-> ------------
Header Header
------------ ------------
Section 1
------------ ------------
Section 2 Section 1
------------ ------------
------------
Section 2
------------
This plugin simply copies the sections from memory back into the file on
disk. Its likely that some of the pages in memory are not actually
memory resident, so we might get invalid page reads. In this case the
region on disk is null padded. If that happens it will not be possible
to run the executable, but the executable can still be disassembled and
analysed statically.
References:
http://code.google.com/p/corkami/downloads/detail?name=pe-20110117.pdf
NOTE: Malware can mess with the headers after loading. The remap option
allows to remap the sections on the disk file so they do not collide.
Args:
remap: If set, allows to remap the sections on disk so they do not
overlap.
out_fd: Alternatively, a filelike object can be provided directly.
"""
super(ProcExeDump, self).__init__(*args, **kwargs)
self.pedump = PEDump(session=self.session)
if self.dump_dir is None and not self.plugin_args.out_fd:
raise plugin.PluginError("Dump dir must be specified.")
def collect(self):
"""Renders the tasks to disk images, outputting progress as they go"""
for task in self.filter_processes():
pid = task.UniqueProcessId
task_address_space = task.get_process_address_space()
if not task_address_space:
self.session.logging.info(
"Can not get task address space - skipping.")
continue
if self.plugin_args.out_fd:
self.pedump.WritePEFile(
self.plugin_args.out_fd,
task_address_space, task.Peb.ImageBaseAddress)
yield task, "User FD"
# Create a new file.
else:
filename = u"executable.%s_%s.exe" % (
utils.EscapeForFilesystem(task.name), pid)
yield task, filename
with self.session.GetRenderer().open(
directory=self.dump_dir,
filename=filename,
mode="wb") as fd:
# The Process Environment Block contains the dos header:
self.pedump.WritePEFile(
fd, task_address_space, task.Peb.ImageBaseAddress)
class DLLDump(ProcExeDump):
"""Dump DLLs from a process address space"""
__name = "dlldump"
__args = [
dict(name="regex", default=".", type="RegEx",
help="A Regular expression for selecting the dlls to dump.")
]
table_header = [
dict(name="_EPROCESS"),
dict(name="base", style="address"),
dict(name="module", width=20),
dict(name="filename")
]
def collect(self):
for task in self.filter_processes():
task_as = task.get_process_address_space()
# Skip kernel and invalid processes.
for module in task.get_load_modules():
process_offset = task_as.vtop(task.obj_offset)
if process_offset:
# Skip the modules which do not match the regex.
if not self.plugin_args.regex.search(
utils.SmartUnicode(module.BaseDllName)):
continue
base_name = os.path.basename(
utils.SmartUnicode(module.BaseDllName))
dump_file = "module.{0}.{1:x}.{2:x}.{3}".format(
task.UniqueProcessId, process_offset, module.DllBase,
utils.EscapeForFilesystem(base_name))
yield dict(_EPROCESS=task,
base=module.DllBase,
module=module.BaseDllName,
filename=dump_file)
# Use the procdump module to dump out the binary:
with self.session.GetRenderer().open(
filename=dump_file,
directory=self.dump_dir,
mode="wb") as fd:
self.pedump.WritePEFile(fd, task_as, module.DllBase)
else:
self.session.logging.error(
"Cannot dump %s@%s at %#x\n",
task.ImageFileName, module.BaseDllName,
int(module.DllBase))
class ModDump(DLLDump):
"""Dump kernel drivers from kernel space."""
__name = "moddump"
address_spaces = None
def find_space(self, image_base):
"""Search through all process address spaces for a PE file."""
if self.address_spaces is None:
self.address_spaces = [self.kernel_address_space]
for task in self.filter_processes():
self.address_spaces.append(task.get_process_address_space())
for address_space in self.address_spaces:
if address_space.is_valid_address(image_base):
return address_space
table_header = [
dict(name="Name", width=30),
dict(name="Base", style="address"),
dict(name="Filename")
]
def collect(self):
modules_plugin = self.session.plugins.modules(session=self.session)
for module in modules_plugin.lsmod():
if self.plugin_args.regex.search(
utils.SmartUnicode(module.BaseDllName)):
address_space = self.find_space(module.DllBase)
if address_space:
dump_file = "driver.{0:x}.{1}".format(
module.DllBase, utils.EscapeForFilesystem(
module.BaseDllName))
yield (module.BaseDllName, module.DllBase, dump_file)
with self.session.GetRenderer().open(
filename=dump_file,
directory=self.dump_dir,
mode="wb") as fd:
self.pedump.WritePEFile(
fd, address_space, module.DllBase) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/procdump.py | 0.760028 | 0.166438 | procdump.py | pypi |
from rekall import obj
from rekall import scan
from rekall import plugin
from rekall.plugins.windows import common
from rekall_lib import utils
class KDBGScanner(scan.BaseScanner):
"""Scans for _KDDEBUGGER_DATA64 structures.
Note that this does not rely on signatures, as validity of hits is
calculated through list reflection.
"""
checks = [("StringCheck", dict(needle=b"KDBG"))]
def scan(self, offset=0, maxlen=None):
maxlen = maxlen or self.profile.get_constant("MaxPointer")
# How far into the struct the OwnerTag is.
owner_tag_offset = self.profile.get_obj_offset(
"_DBGKD_DEBUG_DATA_HEADER64", "OwnerTag")
# Depending on the memory model this behaves slightly differently.
architecture = self.profile.metadata("arch", "I386")
# This basically iterates over all hits on the string "KDBG".
for offset in super(KDBGScanner, self).scan(offset, maxlen):
# For each hit we overlay a _DBGKD_DEBUG_DATA_HEADER64 on it and
# reflect through the "List" member.
result = self.profile.Object("_KDDEBUGGER_DATA64",
offset=offset - owner_tag_offset,
vm=self.address_space)
# We verify this hit by reflecting through its header list.
list_entry = result.Header.List
# On 32 bit systems the Header.List member seems to actually be a
# LIST_ENTRY32 instead of a LIST_ENTRY64, but it is still padded to
# take the same space:
if architecture == "I386":
list_entry = list_entry.cast("LIST_ENTRY32")
if list_entry.reflect():
yield result
elif (list_entry.Flink == list_entry.Blink and
not list_entry.Flink.dereference()):
self.session.logging.debug(
"KDBG list_head is not mapped, assuming its valid.")
yield result
class KDBGScan(plugin.KernelASMixin, common.AbstractWindowsCommandPlugin):
"""Scan for possible _KDDEBUGGER_DATA64 structures.
The scanner is detailed here:
http://moyix.blogspot.com/2008/04/finding-kernel-global-variables-in.html
The relevant structures are detailed here:
http://doxygen.reactos.org/d3/ddf/include_2psdk_2wdbgexts_8h_source.html
We can see that _KDDEBUGGER_DATA64.Header is:
typedef struct _DBGKD_DEBUG_DATA_HEADER64 {
LIST_ENTRY64 List;
ULONG OwnerTag;
ULONG Size;
}
We essentially search for an owner tag of "KDBG", then overlay the
_KDDEBUGGER_DATA64 struct on it. We test for validity by reflecting
through the Header.List member.
"""
__name = "kdbgscan"
__args = [
dict(name="full_scan", type="Boolean",
help="Scan the full address space.")
]
def hits(self):
if self.plugin_args.full_scan:
start, end = 0, 2**64
else:
# The kernel image is always loaded in the same range called the
# "Initial Loader Mappings". Narrowing the possible range makes
# scanning much faster. (See
# http://www.codemachine.com/article_x64kvas.html)
if self.session.profile.metadata("arch") == "AMD64":
start, end = 0xFFFFF80000000000, 0xFFFFF87FFFFFFFFF
else:
start, end = 0x80000000, 0xFFFFFFFF
scanner = KDBGScanner(
session=self.session, profile=self.profile,
address_space=self.kernel_address_space)
# Yield actual objects here
for kdbg in scanner.scan(
obj.Pointer.integer_to_address(start),
end - start):
yield kdbg
table_header = [
dict(name="Key", width=50),
dict(name="Value")
]
table_options = dict(
suppress_headers=True
)
def collect(self):
"""Renders the KPCR values as text"""
for kdbg in self.hits():
yield "Offset (V)", utils.HexInteger(kdbg.obj_offset)
yield "Offset (P)", utils.HexInteger(kdbg.obj_vm.vtop(
kdbg.obj_offset))
# These fields can be gathered without dereferencing
# any pointers, thus they're available always
yield "KDBG owner tag check", kdbg.is_valid()
verinfo = kdbg.dbgkd_version64()
if verinfo:
yield "Version64", "{0:#x} (Major: {1}, Minor: {2})\n".format(
verinfo.obj_offset, verinfo.MajorVersion,
verinfo.MinorVersion)
yield "Service Pack (CmNtCSDVersion)", kdbg.ServicePack
yield "Build string (NtBuildLab)", kdbg.NtBuildLab.dereference()
# Count the total number of tasks from PsActiveProcessHead.
try:
pslist = kdbg.PsActiveProcessHead.list_of_type(
"_EPROCESS", "ActiveProcessLinks")
num_tasks = len([x for x in pslist if x.pid > 0])
except AttributeError:
num_tasks = 0
try:
modules = self.session.plugins.modules(session=self.session)
num_modules = len(list(modules.lsmod()))
except AttributeError:
num_modules = 0
yield "PsActiveProcessHead", "{0:#x} ({1} processes)".format(
kdbg.PsActiveProcessHead, num_tasks)
yield "PsLoadedModuleList", "{0:#x} ({1} modules)".format(
kdbg.PsLoadedModuleList, num_modules)
yield "KernelBase", "{0:#x} (Matches MZ: {1})".format(
kdbg.KernBase, kdbg.obj_vm.read(kdbg.KernBase, 2) == b"MZ")
# Parse the PE header of the kernel.
pe_profile = self.session.LoadProfile("pe")
dos_header = pe_profile._IMAGE_DOS_HEADER(
offset=kdbg.KernBase, vm=kdbg.obj_vm)
nt_header = dos_header.NTHeader
if nt_header:
yield ("Major (OptionalHeader)",
nt_header.OptionalHeader.MajorOperatingSystemVersion)
yield("Minor (OptionalHeader)",
nt_header.OptionalHeader.MinorOperatingSystemVersion)
# The CPU block.
for kpcr in kdbg.kpcrs():
yield "KPCR", "{0:#x} (CPU {1})".format(
kpcr.obj_offset, kpcr.ProcessorBlock.Number) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/kdbgscan.py | 0.475605 | 0.163947 | kdbgscan.py | pypi |
from rekall.plugins.overlays.windows import tcpip_vtypes
from rekall.plugins.windows import common
# pylint: disable=protected-access
class Connections(tcpip_vtypes.TcpipPluginMixin,
common.WindowsCommandPlugin):
"""
Print list of open connections [Windows XP Only]
---------------------------------------------
This module enumerates the active connections from tcpip.sys.
Note that if you are using a hibernated image this might not work
because Windows closes all sockets before hibernating. You might
find it more effective to do conscan instead.
Active TCP connections are found in a hash table. The Hash table is given by
the _TCBTable symbol. The size of the hash table is found in the
_MaxHashTableSize variable.
"""
__name = "connections"
mode = "mode_xp"
table_header = [
dict(name="offset_v", style="address"),
dict(name="local_net_address", width=25),
dict(name="remote_net_address", width=25),
dict(name="pid", width=6)
]
def collect(self):
# The _TCBTable is a pointer to the hash table.
TCBTable = self.tcpip_profile.get_constant_object(
"TCBTable",
target="Pointer",
vm=self.kernel_address_space,
target_args=dict(
target="Array",
target_args=dict(
count=int(self.tcpip_profile.get_constant_object(
"MaxHashTableSize", "unsigned int")),
target="Pointer",
target_args=dict(
target="_TCPT_OBJECT"
)
)
)
)
# Walk the hash table and print all the conenctions.
for slot in TCBTable.deref():
for conn in slot.walk_list("Next"):
offset = conn.obj_offset
local = "{0}:{1}".format(conn.LocalIpAddress, conn.LocalPort)
remote = "{0}:{1}".format(conn.RemoteIpAddress, conn.RemotePort)
yield (offset, local, remote, conn.Pid)
class Sockets(tcpip_vtypes.TcpipPluginMixin,
common.WindowsCommandPlugin):
"""
Print list of open sockets. [Windows xp only]
---------------------------------------------
This module enumerates the active sockets from tcpip.sys
Note that if you are using a hibernated image this might not work
because Windows closes all sockets before hibernating.
_ADDRESS_OBJECT are arranged in a hash table found by the _AddrObjTable
symbol. The hash table has a size found by the _AddrObjTableSize symbol.
"""
name = "sockets"
mode = "mode_xp"
table_header = [
dict(name="offset_v", style="address"),
dict(name="pid", width=6, align="r"),
dict(name="port", width=6, align="r"),
dict(name="proto", width=6, align="r"),
dict(name="protocol", width=10),
dict(name="address", width=15),
dict(name="create_time")
]
def column_types(self):
sock = self.tcpip_profile._ADDRESS_OBJECT()
return dict(offset_v=sock,
pid=sock.Pid,
port=sock.LocalPort,
proto=int(sock.Protocol),
protocol=sock.Protocol,
address=sock.LocalIpAddress,
create_time=sock.CreateTime)
def collect(self):
AddrObjTable = self.tcpip_profile.get_constant_object(
"AddrObjTable",
target="Pointer",
vm=self.kernel_address_space,
target_args=dict(
target="Array",
target_args=dict(
count=int(self.tcpip_profile.get_constant_object(
"AddrObjTableSize", "unsigned int")),
target="Pointer",
target_args=dict(
target="_ADDRESS_OBJECT"
)
)
)
)
for slot in AddrObjTable.deref():
for sock in slot.walk_list("Next"):
yield dict(offset_v=sock,
pid=sock.Pid,
port=sock.LocalPort,
proto=int(sock.Protocol),
protocol=sock.Protocol,
address=sock.LocalIpAddress,
create_time=sock.CreateTime) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/connections.py | 0.552298 | 0.190366 | connections.py | pypi |
__author__ = "Michael Cohen <scudette@google.com>"
from rekall.plugins.windows import common
from rekall.plugins.overlays.windows import tcpip_vtypes
class WinNetstat(tcpip_vtypes.TcpipPluginMixin, common.WindowsCommandPlugin):
"""Enumerate image for connections and sockets"""
__name = "netstat"
table_header = [
dict(name="offset", style="address"),
dict(name="protocol", width=8),
dict(name="local_addr", width=20),
dict(name="remote_addr", width=20),
dict(name="state", width=16),
dict(name="pid", width=5, align="r"),
dict(name="owner", width=14),
dict(name="created", width=7)
]
@classmethod
def is_active(cls, session):
# This plugin works with the _TCP_ENDPOINT interfaces. This interface
# uses the new HashTable entry in ntoskernl.exe.
return (super(WinNetstat, cls).is_active(session) and
session.profile.get_constant('RtlEnumerateEntryHashTable'))
def collect(self):
# First list established endpoints (TcpE pooltags).
partition_table = self.tcpip_profile.get_constant_object(
"PartitionTable",
target="Pointer",
target_args=dict(
target="PARTITION_TABLE",
)
)
for partition in partition_table.Partitions:
for first_level in partition:
for second_level in first_level.SecondLevel:
for endpoint in second_level.list_of_type(
"_TCP_ENDPOINT", "ListEntry"):
lendpoint = "{0}:{1}".format(
endpoint.LocalAddress(),
endpoint.LocalPort)
rendpoint = "{0}:{1}".format(
endpoint.RemoteAddress(),
endpoint.RemotePort)
yield dict(offset=endpoint,
protocol=None,
local_addr=lendpoint,
remote_addr=rendpoint,
state=endpoint.State,
pid=endpoint.Owner.pid,
owner=endpoint.Owner.name,
created=endpoint.CreateTime) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/network.py | 0.432543 | 0.159643 | network.py | pypi |
# pylint: disable=protected-access
from rekall import obj
from rekall.plugins.windows import common
class KPCR(common.WindowsCommandPlugin):
"""A plugin to print all KPCR blocks."""
__name = "kpcr"
def kpcr(self):
"""A generator of KPCR objects (one for each CPU)."""
# On windows 7 the KPCR is just stored in a symbol.
initial_pcr = self.profile.get_constant_object(
"KiInitialPCR",
"_KPCR")
# Validate the PCR through the self member.
self_Pcr = initial_pcr.m("SelfPcr") or initial_pcr.m("Self")
if self_Pcr.v() == initial_pcr.obj_offset:
return initial_pcr
# On windows XP the KPCR is hardcoded to 0xFFDFF000
pcr = self.profile._KPCR(0xFFDFF000)
if pcr.m("SelfPcr").v() == pcr.obj_offset:
return pcr
return self._get_kpcr_from_prcb() or obj.NoneObject("Unknown KPCR")
def _get_kpcr_from_prcb(self):
"""On windows 10, the processor control block is stored in a symbol.
We find the KPCR by subtracting its member offset from the
Processor control block.
"""
prcb = self.session.profile.get_constant_object(
"KiProcessorBlock", "Pointer", dict(target="_KPRCB")).deref()
if prcb != None:
pcr = self.session.profile._KPCR(
prcb.obj_offset - self.session.profile.get_obj_offset(
"_KPCR", "Prcb"))
if pcr.Self.v() == pcr.obj_offset:
return pcr
def render(self, renderer):
kpcr = self.kpcr()
renderer.section()
renderer.table_header([("Property", "property", "<30"),
("Value", "value", "<")])
renderer.table_row("Offset (V)", "%#x" % kpcr.obj_offset)
renderer.table_row("KdVersionBlock", kpcr.KdVersionBlock, style="full")
renderer.table_row("IDT", "%#x" % kpcr.IDT)
renderer.table_row("GDT", "%#x" % kpcr.GDT)
current_thread = kpcr.ProcessorBlock.CurrentThread
idle_thread = kpcr.ProcessorBlock.IdleThread
next_thread = kpcr.ProcessorBlock.NextThread
if current_thread:
renderer.format("{0:<30}: {1:#x} TID {2} ({3}:{4})\n",
"CurrentThread",
current_thread, current_thread.Cid.UniqueThread,
current_thread.owning_process().ImageFileName,
current_thread.Cid.UniqueProcess,
)
if idle_thread:
renderer.format("{0:<30}: {1:#x} TID {2} ({3}:{4})\n",
"IdleThread",
idle_thread, idle_thread.Cid.UniqueThread,
idle_thread.owning_process().ImageFileName,
idle_thread.Cid.UniqueProcess,
)
if next_thread:
renderer.format("{0:<30}: {1:#x} TID {2} ({3}:{4})\n",
"NextThread",
next_thread,
next_thread.Cid.UniqueThread,
next_thread.owning_process().ImageFileName,
next_thread.Cid.UniqueProcess,
)
renderer.format("{0:<30}: CPU {1} ({2} @ {3} MHz)\n",
"Details",
kpcr.ProcessorBlock.Number,
kpcr.ProcessorBlock.VendorString,
kpcr.ProcessorBlock.MHz)
renderer.format(
"{0:<30}: {1:#x}\n", "CR3/DTB",
kpcr.ProcessorBlock.ProcessorState.SpecialRegisters.Cr3) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/kpcr.py | 0.730482 | 0.154089 | kpcr.py | pypi |
# pylint: disable=protected-access
from future import standard_library
standard_library.install_aliases()
from rekall.plugins.windows import common
from rekall_lib import utils
class PoolScanModuleFast(common.PoolScanner):
def __init__(self, **kwargs):
super(PoolScanModuleFast, self).__init__(**kwargs)
self.checks = [
# Must have the right pool tag.
('PoolTagCheck', dict(
tag=self.profile.get_constant("MODULE_POOLTAG"))),
# Must be large enough for an _LDR_DATA_TABLE_ENTRY. Windows 8 seems
# to not allocate the full structure here so this test does not
# always work. Disabled for now.
# ('CheckPoolSize', dict(min_size=self.profile.get_obj_size(
# "_LDR_DATA_TABLE_ENTRY"))),
('CheckPoolType', dict(
paged=True, non_paged=True, free=True)),
('CheckPoolIndex', dict(value=0)),
]
class ModScan(common.PoolScannerPlugin):
"""Scan Physical memory for _LDR_DATA_TABLE_ENTRY objects."""
__name = "modscan"
table_header = [
dict(name="offset", style="address"),
dict(name="name", width=20),
dict(name="base", style="address"),
dict(name="size", style="address"),
dict(name="file")
]
scanner_defaults = dict(
scan_kernel_nonpaged_pool=True
)
def collect(self):
for run in self.generate_memory_ranges():
scanner = PoolScanModuleFast(profile=self.profile,
session=self.session,
address_space=run.address_space)
for pool_obj in scanner.scan(run.start, run.length):
if not pool_obj:
continue
ldr_entry = self.profile._LDR_DATA_TABLE_ENTRY(
vm=run.address_space, offset=pool_obj.obj_end)
# Must have a non zero size.
if ldr_entry.SizeOfImage == 0:
continue
# Must be page aligned.
if ldr_entry.DllBase & 0xFFF:
continue
yield (ldr_entry.obj_offset,
ldr_entry.BaseDllName.v(vm=self.kernel_address_space),
ldr_entry.DllBase,
ldr_entry.SizeOfImage,
ldr_entry.FullDllName.v(vm=self.kernel_address_space))
class PoolScanThreadFast(common.PoolScanner):
""" Carve out threat objects using the pool tag """
def __init__(self, **kwargs):
super(PoolScanThreadFast, self).__init__(**kwargs)
self.checks = [
('PoolTagCheck', dict(
tag=self.profile.get_constant("THREAD_POOLTAG"))),
('CheckPoolSize', dict(min_size=self.profile.get_obj_size(
"_ETHREAD"))),
('CheckPoolType', dict(
paged=True, non_paged=True, free=True)),
('CheckPoolIndex', dict(value=0)),
]
class ThrdScan(common.PoolScannerPlugin):
"""Scan physical memory for _ETHREAD objects"""
__name = "thrdscan"
table_header = [
dict(name="offset", style="address"),
dict(name="pid", width=6, align="r"),
dict(name="tid", width=6, align="r"),
dict(name="start", style="address"),
dict(name="create_time", width=24),
dict(name="exit_time", width=24),
dict(name="name", width=16),
dict(name="symbol"),
]
scanner_defaults = dict(
scan_kernel_nonpaged_pool=True
)
def collect(self):
with self.session.plugins.cc() as cc:
for run in self.generate_memory_ranges():
scanner = PoolScanThreadFast(
profile=self.profile, session=self.session,
address_space=run.address_space)
for pool_obj in scanner.scan(run.start, run.length):
thread = pool_obj.GetObject("Thread").Body.cast("_ETHREAD")
if not thread:
continue
if (thread.Cid.UniqueProcess.v() != 0 and
thread.StartAddress == 0):
continue
try:
# Check the Semaphore Type.
if thread.Tcb.SuspendSemaphore.Header.Type != 0x05:
continue
if thread.KeyedWaitSemaphore.Header.Type != 0x05:
continue
except AttributeError:
pass
# Resolve the thread back to an owning process if possible.
task = thread.Tcb.ApcState.Process.dereference_as(
"_EPROCESS", vm=self.session.kernel_address_space)
# Try to switch to the tasks address space in order to
# resolve symbols.
start_address = thread.Win32StartAddress.v()
if start_address < self.session.GetParameter(
"highest_usermode_address"):
if task != self.session.GetParameter("process_context"):
cc.SwitchProcessContext(task)
else:
cc.SwitchProcessContext()
yield (thread.obj_offset,
thread.Cid.UniqueProcess,
thread.Cid.UniqueThread,
thread.Win32StartAddress.v(),
thread.CreateTime,
thread.ExitTime,
task.ImageFileName,
utils.FormattedAddress(
self.session.address_resolver, start_address)) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/modscan.py | 0.637821 | 0.154823 | modscan.py | pypi |
from rekall import testlib
from rekall.plugins.windows import common
from rekall_lib import utils
class Handles(common.WinProcessFilter):
"""Print list of open handles for each process"""
__name = "handles"
__args = [
dict(name="object_types", type="ArrayStringParser",
help="Types of objects to show."),
dict(name="named_only", type="Boolean",
help="Output only handles with a name ."),
]
table_header = [
dict(name="_OBJECT_HEADER", style="address"),
dict(name="_EPROCESS", type="_EPROCESS"),
dict(name="handle", style="address"),
dict(name="access", style="address"),
dict(name="obj_type", width=16),
dict(name="details")
]
def column_types(self):
return dict(
offset_v=self.session.profile._OBJECT_HEADER(),
_EPROCESS=self.session.profile._EPROCESS(),
handle=utils.HexInteger(0),
access=utils.HexInteger(0),
obj_type="",
details="")
def enumerate_handles(self, task):
if task.ObjectTable.HandleTableList:
for handle in task.ObjectTable.handles():
name = u""
object_type = handle.get_object_type(self.kernel_address_space)
if object_type == None:
continue
if (self.plugin_args.object_types and
object_type not in self.plugin_args.object_types):
continue
elif object_type == "File":
file_obj = handle.dereference_as("_FILE_OBJECT")
name = file_obj.file_name_with_device()
elif object_type == "Key":
key_obj = handle.dereference_as("_CM_KEY_BODY")
name = key_obj.full_key_name()
elif object_type == "Process":
proc_obj = handle.dereference_as("_EPROCESS")
name = u"{0}({1})".format(
utils.SmartUnicode(proc_obj.ImageFileName),
proc_obj.UniqueProcessId)
elif object_type == "Thread":
thrd_obj = handle.dereference_as("_ETHREAD")
name = u"TID {0} PID {1}".format(
thrd_obj.Cid.UniqueThread,
thrd_obj.Cid.UniqueProcess)
elif handle.NameInfo.Name == None:
name = u""
else:
name = handle.NameInfo.Name
if not name and self.plugin_args.named_only:
continue
yield handle, object_type, name
def collect(self):
for task in self.filter_processes():
for count, (handle, object_type, name) in enumerate(
self.enumerate_handles(task)):
self.session.report_progress("%s: %s handles" % (
task.ImageFileName, count))
yield dict(_OBJECT_HEADER=handle,
_EPROCESS=task,
handle=utils.HexInteger(handle.HandleValue),
access=utils.HexInteger(handle.GrantedAccess),
obj_type=object_type,
details=utils.SmartUnicode(name))
class TestHandles(testlib.SimpleTestCase):
"""Test the Handler module."""
PARAMETERS = dict(commandline="handles %(pids)s") | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/handles.py | 0.5 | 0.184198 | handles.py | pypi |
# pylint: disable=protected-access
from future import standard_library
standard_library.install_aliases()
from rekall import testlib
from rekall_lib import utils
from rekall.plugins.common import memmap
from rekall.plugins.windows import common
class WinPsList(common.WinProcessFilter):
"""List processes for windows."""
__name = "pslist"
eprocess = None
table_header = [
dict(type="_EPROCESS", name="_EPROCESS"),
dict(name="ppid", width=6, align="r"),
dict(name="thread_count", width=6, align="r"),
dict(name="handle_count", width=8, align="r"),
dict(name="session_id", width=6, align="r"),
dict(name="wow64", width=6),
dict(name="process_create_time", width=24),
dict(name="process_exit_time", width=24)
]
def column_types(self):
result = self._row(self.session.profile._EPROCESS())
result["handle_count"] = result["ppid"]
result["session_id"] = result["ppid"]
return result
def _row(self, task):
return dict(_EPROCESS=task,
ppid=task.InheritedFromUniqueProcessId,
thread_count=task.ActiveThreads,
handle_count=task.ObjectTable.m("HandleCount"),
session_id=task.SessionId,
wow64=task.IsWow64,
process_create_time=task.CreateTime,
process_exit_time=task.ExitTime)
def collect(self):
for task in self.filter_processes():
yield self._row(task)
class WinDllList(common.WinProcessFilter):
"""Prints a list of dll modules mapped into each process."""
__name = "dlllist"
table_header = [
dict(name="divider", type="Divider"),
dict(name="_EPROCESS", hidden=True),
dict(name="base", style="address"),
dict(name="size", style="address"),
dict(name="reason", width=30),
dict(name="dll_path"),
]
def collect(self):
for task in self.filter_processes():
pid = task.UniqueProcessId
divider = "{0} pid: {1:6}\n".format(task.ImageFileName, pid)
if task.Peb:
divider += u"Command line : {0}\n".format(
task.Peb.ProcessParameters.CommandLine)
divider += u"{0}\n\n".format(task.Peb.CSDVersion)
yield dict(divider=divider)
for m in task.get_load_modules():
yield dict(base=m.DllBase,
size=m.SizeOfImage,
reason=m.LoadReason,
dll_path=m.FullDllName,
_EPROCESS=task)
else:
yield dict(divider="Unable to read PEB for task.\n")
class WinMemMap(memmap.MemmapMixIn, common.WinProcessFilter):
"""Calculates the memory regions mapped by a process."""
__name = "memmap"
def _get_highest_user_address(self):
return self.profile.get_constant_object(
"MmHighestUserAddress", "Pointer").v()
class Threads(common.WinProcessFilter):
"""Enumerate threads."""
name = "threads"
table_header = [
dict(name="_ETHREAD", style="address"),
dict(name="pid", align="r", width=6),
dict(name="tid", align="r", width=6),
dict(name="start", style="address"),
dict(name="start_symbol", width=30),
dict(name="Process", width=16),
dict(name="win32_start", style="address"),
dict(name="win32_start_symb")
]
def collect(self):
cc = self.session.plugins.cc()
with cc:
for task in self.filter_processes():
# Resolve names in the process context.
cc.SwitchProcessContext(process=task)
for thread in task.ThreadListHead.list_of_type(
"_ETHREAD", "ThreadListEntry"):
yield dict(_ETHREAD=thread,
pid=thread.Cid.UniqueProcess,
tid=thread.Cid.UniqueThread,
start=thread.StartAddress,
start_symbol=utils.FormattedAddress(
self.session.address_resolver,
thread.StartAddress),
Process=task.ImageFileName,
win32_start=thread.Win32StartAddress,
win32_start_symb=utils.FormattedAddress(
self.session.address_resolver,
thread.Win32StartAddress,
))
class WinMemDump(memmap.MemDumpMixin, common.WinProcessFilter):
"""Dump windows processes."""
class TestWinMemDump(testlib.HashChecker):
"""Test the pslist module."""
PARAMETERS = dict(
commandline="memdump %(pids)s --dump_dir %(tempdir)s",
pid=2624)
class TestMemmap(testlib.SimpleTestCase):
"""Test the pslist module."""
PARAMETERS = dict(
commandline="memmap %(pids)s",
pid=2624)
class TestMemmapCoalesce(testlib.SimpleTestCase):
"""Make sure that memmaps are coalesced properly."""
PARAMETERS = dict(commandline="memmap %(pids)s --coalesce",
pid=2624) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/taskmods.py | 0.693161 | 0.176104 | taskmods.py | pypi |
from builtins import str
import itertools
from rekall import plugin
from rekall.plugins.windows import common
# Add overlays
shimcache_xp_x86 = {
"SHIM_CACHE_HEADER": [None, {
"Magic": [0, ["unsigned int"]],
"MaxEntries": [0x4, ["unsigned int"]],
"TotalEntries": [0x8, ["unsigned int"]],
"LRU": [0x10, ["Array", dict(
target="unsigned int",
count=lambda x: x.TotalEntries
)]],
"Entries": [0x190, ["Array", dict(
count=lambda x: x.TotalEntries,
target="SHIM_CACHE_ENTRY",
)]],
}],
"SHIM_CACHE_ENTRY" : [0x228, {
"Path" : [0x0, ["UnicodeString", dict(length=0x208)]],
"LastModified" : [0x210, ["WinFileTime"]],
"FileSize": [0x218, ["long long"]],
"LastUpdate" : [0x220, ["WinFileTime"]],
}],
}
shimcache_win7_x64 = {
"SHIM_CACHE_ENTRY": [None, {
"ListEntry" : [0x0, ["_LIST_ENTRY"]],
"Path" : [0x10, ["_UNICODE_STRING"]],
"LastModified": [0x20, ["WinFileTime"]],
"InsertFlags": [0x28, ["unsigned int"]],
"ShimFlags": [0x2c, ["unsigned int"]],
"BlobSize": [0x30, ["unsigned long long"]],
"BlobBuffer" : [0x38, ["unsigned long"]],
}],
}
shimcache_win7_x86 = {
"SHIM_CACHE_ENTRY": [None, {
"ListEntry" :[0x0, ["_LIST_ENTRY"]],
"Path" : [0x08, ["_UNICODE_STRING"]],
"LastModified" : [0x10, ["WinFileTime"]],
"InsertFlags": [0x18, ["unsigned int"]],
"ShimFlags": [0x1c, ["unsigned int"]],
"BlobSize": [0x20, ["unsigned long long"]],
"BlobBuffer" : [0x24, ["unsigned long"]],
}],
}
shimcache_win8_x64 = {
"SHIM_CACHE_ENTRY_DETAIL": [None, {
"LastModified": [0x0, ["WinFileTime"]],
"InsertFlags": [0x08, ["unsigned int"]],
"ShimFlags": [0x0c, ["unsigned int"]],
"BlobSize": [0x10, ["unsigned long long"]],
"Padding": [0x18, ["unsigned long long"]],
"BlobBuffer": [0x20, ["unsigned long long"]],
}],
"SHIM_CACHE_ENTRY": [None, {
"ListEntry" : [0x0, ["_LIST_ENTRY"]],
"Path": [0x18, ["_UNICODE_STRING"]],
"ListEntryDetail": [0x38, ["Pointer", dict(
target="SHIM_CACHE_ENTRY_DETAIL"
)]],
}],
}
shimcache_win8_x86 = {
"SHIM_CACHE_ENTRY_DETAIL": [None, {
"LastModified": [0x0, ["WinFileTime"]],
"InsertFlags": [0x08, ["unsigned int"]],
"ShimFlags": [0x0c, ["unsigned int"]],
"BlobSize": [0x10, ["unsigned long"]],
"BlobBuffer": [0x14, ["unsigned long"]],
}],
"SHIM_CACHE_ENTRY": [None, {
"ListEntry": [0x0, ["_LIST_ENTRY"]],
"Path": [0x10, ["_UNICODE_STRING"]],
"ListEntryDetail": [0x20, ["Pointer", dict(
target="SHIM_CACHE_ENTRY_DETAIL"
)]],
}],
}
shimcache_win10_x86 = {
"SHIM_CACHE_ENTRY": [None, {
"ListEntry" : [0x0, ["_LIST_ENTRY"]],
"Path": [0xc, ["_UNICODE_STRING"]],
"ListEntryDetail" : [0x14, ["Pointer", dict(
target="SHIM_CACHE_ENTRY_DETAIL"
)]],
}],
"SHIM_CACHE_ENTRY_DETAIL" : [None, {
"LastModified": [0x08, ["WinFileTime"]],
"BlobSize": [0x10, ["unsigned long"]],
"BlobBuffer": [0x14, ["unsigned long long"]],
}],
"SHIM_CACHE_HANDLE": [0x10, {
"eresource": [0x0, ["Pointer", dict(target="_ERESOURCE")]],
"avl_table": [0x8, ["Pointer", dict(target="_RTL_AVL_TABLE")]],
}],
}
shimcache_win10_x64 = {
"SHIM_CACHE_ENTRY": [None, {
"ListEntry" : [0x0, ["_LIST_ENTRY"]],
"Path": [0x18, ["_UNICODE_STRING"]],
"ListEntryDetail" : [0x28, ["Pointer", dict(
target="SHIM_CACHE_ENTRY_DETAIL"
)]],
}],
"SHIM_CACHE_ENTRY_DETAIL" : [None, {
"LastModified": [0x08, ["WinFileTime"]],
"BlobSize": [0x10, ["unsigned long"]],
"BlobBuffer": [0x18, ["unsigned long long"]],
}],
"SHIM_CACHE_HANDLE": [0x10, {
"eresource": [0x0, ["Pointer", dict(target="_ERESOURCE")]],
"avl_table": [0x8, ["Pointer", dict(target="_RTL_AVL_TABLE")]],
}],
}
def AddShimProfiles(profile):
profile = profile.copy()
# Windows XP 32bit
if 5 < profile.metadata("version") < 6:
if profile.metadata("arch") == "I386":
profile.add_overlay(shimcache_xp_x86)
# Windows 7 uses this constant to store the shimcache.
elif profile.get_constant("g_ShimCache"):
if profile.metadata("arch") == "AMD64":
profile.add_overlay(shimcache_win7_x64)
else:
profile.add_overlay(shimcache_win7_x86)
# Windows 8 uses a special driver to hold the cache.
elif profile.get_constant("AhcCacheHandle"):
if profile.metadata("arch") == "AMD64":
profile.add_overlay(shimcache_win8_x64)
else:
profile.add_overlay(shimcache_win8_x86)
# Windows 8.1 and 10 use a special driver to hold the cache.
elif profile.session.address_resolver.get_address_by_name("ahcache"):
if profile.metadata("version") < 7:
if profile.metadata("arch") == "AMD64":
profile.add_overlay(shimcache_win8_x64)
else:
profile.add_overlay(shimcache_win8_x86)
else:
if profile.metadata("arch") == "AMD64":
profile.add_overlay(shimcache_win10_x64)
else:
profile.add_overlay(shimcache_win10_x86)
else:
raise plugin.PluginError("Unable to identify windows version.")
return profile
class ShimCacheMem(common.AbstractWindowsCommandPlugin):
"""Extract the Application Compatibility Shim Cache from kernel memory."""
name = "shimcachemem"
table_header = [
dict(name="Shim", style="address"),
dict(name="last_mod", width=30),
dict(name="last_update", hidden=True),
dict(name="size", width=10),
dict(name="Path")
]
def collect_xp(self):
"""Fetch the shimcache from XP.
According to the paper, on XP the cache is in shared memory inside the
process winlogon.exe. The cache begins with a header and a magic value
of 0xDEADBEEF.
For some reason the algorithm explained in the paper seems unnecessarily
complex. In Rekall we just search for a handle to the ShimCacheMemory
section object and use it.
"""
for row in self.session.plugins.handles(proc_regex="winlogon",
object_types="Section"):
if "ShimSharedMemory" in row["details"]:
# Found the section object.
section = row["_OBJECT_HEADER"].Object
# This is the process that created the shared object.
process_owner = section.Segment.u1.CreatingProcess.deref()
va = section.Segment.u2.FirstMappedVa.v()
if str(process_owner.name).lower() != u"winlogon.exe":
continue
# Switch to that process's context.
with self.session.plugins.cc() as cc:
cc.SwitchProcessContext(process_owner)
header = self.profile.SHIM_CACHE_HEADER(va)
return header.Entries
return []
def collect_from_avl_table(self, avl_table):
seen = set()
for node in avl_table.BalancedRoot.traverse_children():
entry = node.payload("SHIM_CACHE_ENTRY")
if entry.obj_offset in seen:
continue
seen.add(entry.obj_offset)
yield entry
# Sometimes there are some entries in the linked lists too.
for subentry in entry.ListEntry.list_of_type(
"SHIM_CACHE_ENTRY", "ListEntry"):
if subentry.obj_offset in seen:
continue
seen.add(subentry.obj_offset)
yield subentry
def collect_win7(self):
avl_table = self.profile.get_constant_object(
"g_ShimCache", "_RTL_AVL_TABLE")
return self.collect_from_avl_table(avl_table)
def collect_win8(self):
header_pointer = self.session.address_resolver.get_constant_object(
"nt!AhcCacheHandle", "Pointer")
avl_table = header_pointer.dereference_as("_RTL_AVL_TABLE",
profile=self.profile)
return self.collect_from_avl_table(avl_table)
def collect_win8_1(self):
header_pointer = self.session.address_resolver.get_constant_object(
"ahcache!AhcCacheHandle", "Pointer")
header = header_pointer.dereference_as("SHIM_CACHE_HANDLE",
profile=self.profile)
return self.collect_from_avl_table(header.avl_table)
def collect_win10(self):
header_pointer = self.session.address_resolver.get_constant_object(
"ahcache!AhcCacheHandle", "Pointer")
header = header_pointer.dereference_as("SHIM_CACHE_HANDLE",
profile=self.profile)
return self.collect_from_avl_table(header.avl_table)
def collect(self):
# We need this module's symbols.
self.session.address_resolver.track_modules("ahcache")
self.profile = AddShimProfiles(self.session.profile)
for entry in itertools.chain(self.collect_win10(),
self.collect_win8(),
self.collect_win8_1(),
self.collect_win7(),
self.collect_xp()):
# This field has moved around a bit between versions.
last_modified = entry.multi_m(
"LastModified",
"ListEntryDetail.LastModified"
)
yield dict(Shim=entry,
last_mod=last_modified,
last_update=entry.m("LastUpdate"),
size=entry.m("FileSize"),
Path=entry.Path) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/shimcache.py | 0.466116 | 0.274801 | shimcache.py | pypi |
# pylint: disable=protected-access
# References:
# http://volatility-labs.blogspot.ch/2012/09/movp-11-logon-sessions-processes-and.html
# Windows Internals 5th Edition. Chapter 9.
from rekall import obj
from rekall.ui import text
from rekall.plugins.windows import common
from rekall_lib import utils
class Sessions(common.WinProcessFilter):
"""List details on _MM_SESSION_SPACE (user logon sessions).
Windows uses sessions in order to separate processes. Sessions are used to
separate the address spaces of windows processes.
Note that this plugin traverses the ProcessList member of the session object
to list the processes - yet another list _EPROCESS objects are on.
"""
__name = "sessions"
table_header = [
dict(name="divider", type="Divider"),
dict(name="session_id", hidden=True),
dict(name="process", width=40),
dict(name="image"),
]
def session_spaces(self):
"""Generates unique _MM_SESSION_SPACE objects.
Generates unique _MM_SESSION_SPACE objects referenced by active
processes.
Yields:
_MM_SESSION_SPACE instantiated from the session space's address space.
"""
# Dedup based on sessions.
for proc in utils.Deduplicate(self.filter_processes(),
key=lambda x: x.Session):
ps_ad = proc.get_process_address_space()
session = proc.Session.deref(vm=ps_ad)
# Session pointer is invalid (e.g. for System process).
if session:
yield session
def find_session_space(self, session_id):
"""Get a _MM_SESSION_SPACE object by its ID.
Args:
session_id: the session ID to find.
Returns:
_MM_SESSION_SPACE instantiated from the session space's address space.
"""
for session in self.session_spaces():
if session.SessionId == session_id:
return session
return obj.NoneObject("Cannot locate a session %s", session_id)
def collect(self):
for session in self.session_spaces():
processes = list(session.ProcessList.list_of_type(
"_EPROCESS", "SessionProcessLinks"))
yield dict(divider=("_MM_SESSION_SPACE: {0:#x} ID: {1} "
"Processes: {2}".format(
session.obj_offset,
session.SessionId,
len(processes))))
for process in processes:
yield dict(session_id=session.SessionId,
process=process)
# Follow the undocumented _IMAGE_ENTRY_IN_SESSION list to find the
# kernel modules loaded in this session.
for image in session.ImageIterator:
yield dict(
session_id=session.SessionId,
image=image)
class ImageInSessionTextObjectRenderer(text.TextObjectRenderer):
renders_type = "_IMAGE_ENTRY_IN_SESSION"
def render_row(self, target, **options):
try:
module_name = self.session.address_resolver.format_address(
target.ImageBase)[0].split("!")[0]
except IndexError:
module_name = "?"
return text.Cell(u"%s (%#x-%#x)" % (
module_name,
target.ImageBase, target.LastAddress.v())) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/gui/sessions.py | 0.785679 | 0.204441 | sessions.py | pypi |
from builtins import str
import re
from rekall.plugins.windows import common
class Win32kAutodetect(common.WindowsCommandPlugin):
"""Automatically detect win32k struct layout."""
name = "win32k_autodetect"
table_header = [
dict(name="divider", type="Divider"),
dict(name="field", width=20),
dict(name="offset", style="address"),
dict(name="definition")
]
def collect(self):
win32k_module = self.session.address_resolver.GetModuleByName(
"win32k")
win32k_profile = win32k_module.profile
overlay = self.GetWin32kOverlay(win32k_profile)
for struct, definition in list(overlay.items()):
yield dict(divider="Struct %s" % struct)
for field, (offset, field_def) in sorted(list(definition[1].items()),
key=lambda x: x[1]):
yield dict(field=field, offset=offset,
definition=str(field_def))
def GetWin32kOverlay(self, win32k_profile):
# Make a temporary profile to work with.
self.temp_profile = win32k_profile
self.analyze_struct = self.session.plugins.analyze_struct(0)
# Start off with an empty overlay.
overlay = dict(tagDESKTOP=[None, {}],
tagWINDOWSTATION=[None, {}],
tagTHREADINFO=[None, {}],
)
with self.session.plugins.cc() as cc:
for task in self.session.plugins.pslist().filter_processes():
cc.SwitchProcessContext(task)
# Find a process context which makes the symbol valid.
if not self.wndstation():
continue
try:
self.Get_tagWINDOWSTATION_overlay(overlay)
self.Get_tagDESKTOP_overlay(overlay)
self.Get_tagTHREADINFO_overlay(overlay)
return overlay
except RuntimeError:
continue
return overlay
def wndstation(self):
return self.temp_profile.get_constant_object(
"grpWinStaList",
target="Pointer",
target_args=dict(
target="tagWINDOWSTATION")
).deref()
def _Match(self, regex, info):
for item in info:
if re.match(regex, item):
return True
def _AddField(self, regex, info, field_name, fields, description):
if field_name not in fields and self._Match(regex, info):
fields[field_name] = description
self.session.logging.debug(
"Detected field %s: %s @ %#x", field_name, info, description[0])
return True
def Get_tagWINDOWSTATION_overlay(self, overlay):
"""Derive the tagWINDOWSTATION overlay."""
fields = {}
offset = self.wndstation()
required_fields = set([
"rpwinstaNext", "rpdeskList", "pGlobalAtomTable"])
stations = set()
while not offset == None and offset not in stations:
stations.add(offset)
self.session.logging.debug("Checking tagWINDOWSTATION at %#x",
int(offset))
for o, info in self.analyze_struct.GuessMembers(offset, size=0x200):
if self._AddField(
"Tag:Win", info, "rpwinstaNext", fields,
[o, ["Pointer", dict(
target="tagWINDOWSTATION"
)]]):
continue
elif self._AddField(
"Tag:Des", info, "rpdeskList", fields,
[o, ["Pointer", dict(
target="tagDESKTOP"
)]]):
continue
elif self._AddField(
"Tag:AtmT", info, "pGlobalAtomTable", fields,
[o, ["Pointer", dict(
target="_RTL_ATOM_TABLE"
)]]):
continue
elif self._AddField(
"Const:win32k!gTerm", info, "pTerm", fields,
[o, ["Pointer", dict(
target="tagTERMINAL"
)]]):
continue
else:
self.session.logging.debug(
"Unhandled field %#x, %s" % (o, info))
continue
# Add the derived overlay to the profile so we can walk the list
# of window stations.
self.temp_profile.add_overlay(overlay)
offset = self.temp_profile.tagWINDOWSTATION(offset).rpwinstaNext
# We worked out all the fields, return the overlay.
if required_fields.issubset(fields):
overlay["tagWINDOWSTATION"][1].update(fields)
return overlay
self.session.logging.debug(
"tagWINDOWSTATION: Missing required fields %s",
required_fields.difference(fields))
raise RuntimeError("Unable to guess tagWINDOWSTATION")
def Get_tagDESKTOP_overlay(self, overlay):
fields = {}
required_fields = set([
"rpdeskNext", "rpwinstaParent", "hsectionDesktop"])
# Iterate over all tagDESKTOP objects.
desktops = set()
offset = self.wndstation().rpdeskList.v()
while not offset == None and offset not in desktops:
self.session.logging.debug("Checking tagDESKTOP at %#x", offset)
desktops.add(offset)
for o, info in self.analyze_struct.GuessMembers(
offset, search=0x400):
if self._AddField("Tag:Des", info, "rpdeskNext", fields,
[o, ["Pointer", dict(
target="tagDESKTOP"
)]]):
continue
elif self._AddField("Tag:Win", info, "rpwinstaParent", fields,
[o, ["Pointer", dict(
target="tagWINDOWSTATION"
)]]):
continue
elif self._AddField("Tag:Sec", info, "hsectionDesktop", fields,
[o, ["Pointer", dict(
target="_SECTION_OBJECT"
)]]):
continue
# The PtiList is a _LIST_ENTRY to a tagTHREADINFO (Usti tag).
elif ("_LIST_ENTRY" in info and
self._AddField("Tag:Usti", info, "PtiList", fields,
[o, ["_LIST_ENTRY"]])):
continue
# On WinXP a tagTHREADINFO allocation contains ProcessBilled.
elif ("_LIST_ENTRY" in info and not self._Match("Tag:", info)
and self._AddField(
"ProcessBilled:", info, "PtiList", fields,
[o, ["_LIST_ENTRY"]])):
continue
else:
self.session.logging.debug(
"Unhandled field %#x %s" % (o, info))
continue
# Add the derived overlay to the profile so we can walk the list
# of window stations.
self.temp_profile.add_overlay(overlay)
offset = self.temp_profile.tagDESKTOP(offset).rpdeskNext
# We worked out all the fields, return the overlay.
if required_fields.issubset(fields):
overlay["tagDESKTOP"][1].update(fields)
return overlay
self.session.logging.debug(
"tagDESKTOP: Missing required fields %s",
required_fields.difference(fields))
raise RuntimeError("Unable to guess tagDESKTOP")
def _Check_tagPROCESSINFO(self, offset):
"""Checks if a pointer points to tagPROCESSINFO."""
pointer = self.profile.Pointer(offset)
pool = self.analyze_struct.SearchForPoolHeader(pointer.v())
if pool.Tag == "Uspi":
return True
# Its definitely not a tagPROCESSINFO if it is a tagTHREADINFO.
if pool.Tag in ["Usti"]:
return False
# In windows XP tagPROCESSINFO allocations contain the _EPROCESS
# address in the ProcessBilled field of the allocation.
if pool.m("ProcessBilled").Peb:
return True
return False
def _AnalyzeTagTHREADINFO(self, offset, fields):
self.session.logging.debug("Checking tagTHREADINFO at %#x", offset)
for o, info in self.analyze_struct.GuessMembers(
offset, size=0x400, search=0x600):
if self._AddField("Tag:Thr", info, "pEThread", fields,
[o, ["Pointer", dict(
target="_ETHREAD"
)]]):
continue
elif self._AddField("Tag:Usqu", info, "pq", fields,
[o, ["Pointer", dict(
target="tagQ"
)]]):
continue
elif self._AddField("Tag:Uskb", info, "spklActive", fields,
[o, ["Pointer", dict(
target="tagKL"
)]]):
continue
elif self._AddField("Tag:Des", info, "rpdesk", fields,
[o, ["Pointer", dict(
target="tagDESKTOP"
)]]):
continue
elif ("_LIST_ENTRY" in info and
self._AddField("Tag:Usti", info, "GdiTmpTgoList", fields,
[o, ["_LIST_ENTRY"]])):
continue
elif (self._Check_tagPROCESSINFO(offset + o) and
self._AddField(".", info, "ppi", fields,
[o, ["Pointer", dict(
target="tagPROCESSINFO"
)]])):
continue
else:
self.session.logging.debug("Unhandled field %#x %s" % (o, info))
continue
def Get_tagTHREADINFO_overlay(self, overlay):
fields = {}
required_fields = set([
"pEThread", "pq", "spklActive", "rpdesk", "PtiLink", "ppi"
])
# Iterate over all tagTHREADINFO objects.
thread_infos = set()
for wndstation in self.wndstation().rpwinstaNext.walk_list(
"rpwinstaNext"):
for desktop in wndstation.rpdeskList.walk_list("rpdeskNext"):
thread_info_pool = self.analyze_struct.SearchForPoolHeader(
desktop.PtiList.Flink.v(), search=0x600)
if thread_info_pool and thread_info_pool not in thread_infos:
thread_infos.add(thread_info_pool)
# We can already determine the tagTHREADINFO's PtiLink:
PtiLink_offset = (desktop.PtiList.Flink.v() -
thread_info_pool.obj_end)
fields["PtiLink"] = [PtiLink_offset, ["_LIST_ENTRY"]]
self._AnalyzeTagTHREADINFO(thread_info_pool.obj_end, fields)
self.temp_profile.add_overlay(overlay)
# We worked out all the fields, return the overlay.
if required_fields.issubset(fields):
overlay["tagTHREADINFO"][1].update(fields)
return overlay
self.session.logging.debug(
"tagTHREADINFO: Missing required fields %s",
required_fields.difference(fields))
raise RuntimeError("Unable to guess tagTHREADINFO") | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/gui/autodetect.py | 0.554712 | 0.164718 | autodetect.py | pypi |
from rekall.plugins.windows import common
from rekall.plugins.windows.gui import win32k_core
class WindowsStations(win32k_core.Win32kPluginMixin,
common.WindowsCommandPlugin):
"""Displays all the windows stations by following lists."""
__name = "windows_stations"
table_header = [
dict(name="WindowStation", style="address"),
dict(name="Name", width=20),
dict(name="SesId", width=5),
dict(name="AtomTable", style="address"),
dict(name="Interactive", width=11),
dict(name="Desktops")
]
def stations_in_session(self, session):
# Get the start of the Window station list from
# win32k.sys. These are all the Windows stations that exist in
# this Windows session.
station_list = self.win32k_profile.get_constant_object(
"grpWinStaList",
target="Pointer",
target_args=dict(
target="tagWINDOWSTATION"
),
vm=session.obj_vm,
)
for station in station_list.walk_list("rpwinstaNext"):
yield station
def stations(self):
"""A generator of tagWINDOWSTATION objects."""
# Each windows session has a unique set of windows stations.
for session in self.session.plugins.sessions().session_spaces():
for station in self.stations_in_session(session):
yield station
def collect(self):
for window_station in self.stations():
desktops = [desk.Name for desk in window_station.desktops()]
yield dict(WindowStation=window_station,
Name=window_station.Name,
SesId=window_station.dwSessionId,
AtomTable=window_station.pGlobalAtomTable,
Interactive=window_station.Interactive,
Desktops=desktops)
class WinDesktops(WindowsStations):
"""Print information on each desktop."""
__name = "desktops"
table_header = [
dict(name="divider", type="Divider"),
dict(name="tagDESKTOP", style="address"),
dict(name="Name", width=20),
dict(name="Sid", width=3),
dict(name="Hooks", width=5),
dict(name="tagWND", style="address"),
dict(name="Winds", width=5),
dict(name="Thrd", width=5),
dict(name="_EPROCESS"),
]
def collect(self):
for window_station in self.stations():
for desktop in window_station.desktops():
divider = "Desktop: {0:#x}, Name: {1}\\{2}\n".format(
desktop,
window_station.Name,
desktop.Name)
divider += ("Heap: {0:#x}, Size: {1:#x}, Base: {2:#x}, "
"Limit: {3:#x}\n").format(
desktop.pheapDesktop.v(),
(desktop.DeskInfo.pvDesktopLimit.v() -
desktop.DeskInfo.pvDesktopBase.v()),
desktop.DeskInfo.pvDesktopBase,
desktop.DeskInfo.pvDesktopLimit,
)
yield dict(divider=divider)
window_count = len(list(desktop.windows(
desktop.DeskInfo.spwnd)))
for thrd in desktop.threads():
yield dict(
tagDESKTOP=desktop,
Name=desktop.Name,
Sid=desktop.dwSessionId,
Hooks=desktop.DeskInfo.fsHooks,
tagWND=desktop.DeskInfo.spwnd.deref(),
Winds=window_count,
Thrd=thrd.pEThread.Cid.UniqueThread,
_EPROCESS=thrd.ppi.Process.deref()) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/gui/windowstations.py | 0.617743 | 0.155303 | windowstations.py | pypi |
from rekall import plugin
from rekall import obj
from rekall import testlib
from rekall.plugins.overlays.windows import pe_vtypes
from rekall.plugins.windows import common
class ImpScan(common.WinProcessFilter):
"""Scan for calls to imported functions."""
__name = "impscan"
FORWARDED_IMPORTS = {
"RtlGetLastWin32Error" : "kernel32.dll!GetLastError",
"RtlSetLastWin32Error" : "kernel32.dll!SetLastError",
"RtlRestoreLastWin32Error" : "kernel32.dll!SetLastError",
"RtlAllocateHeap" : "kernel32.dll!HeapAlloc",
"RtlReAllocateHeap" : "kernel32.dll!HeapReAlloc",
"RtlFreeHeap" : "kernel32.dll!HeapFree",
"RtlEnterCriticalSection" : "kernel32.dll!EnterCriticalSection",
"RtlLeaveCriticalSection" : "kernel32.dll!LeaveCriticalSection",
"RtlDeleteCriticalSection" : "kernel32.dll!DeleteCriticalSection",
"RtlZeroMemory" : "kernel32.dll!ZeroMemory",
"RtlSizeHeap" : "kernel32.dll!HeapSize",
"RtlUnwind" : "kernel32.dll!RtlUnwind",
}
@classmethod
def args(cls, parser):
"""Declare the command line args we need."""
super(ImpScan, cls).args(parser)
parser.add_argument("-b", "--base", default=None, type="IntParser",
help="Base address in process memory if --pid is "
"supplied, otherwise an address in kernel space")
parser.add_argument("-s", "--size", default=None, type="IntParser",
help="Size of memory to scan")
parser.add_argument("-k", "--kernel", default=None, type="Boolean",
help="Scan in kernel space.")
def __init__(self, base=None, size=None, kernel=None, **kwargs):
"""Scans the imports from a module.
Often when dumping a PE executable from memory the import address tables
are over written. This makes it hard to resolve function names when
disassembling the binary.
This plugin enumerates all dlls in the process address space and
examines their export address tables. It then disassembles the
executable code for calls to external functions. We attempt to resolve
the names of the calls using the known exported functions we gathered in
step 1.
This technique can be used for a process, or the kernel itself. In the
former case, we examine dlls, while in the later case we examine kernel
modules using the modules plugin.
Args:
base: Start disassembling at this address - this is normally the base
address of the dll or module we care about. If omitted we use the
kernel base (if in kernel mode) or the main executable (if in
process mode).
size: Disassemble this many bytes from the address space. If omitted
we use the module which starts at base.
kernel: The mode to use. If set, we operate in kernel mode.
"""
super(ImpScan, self).__init__(**kwargs)
self.base = base
self.size = size
self.kernel = kernel
def _enum_apis(self, all_mods):
"""Enumerate all exported functions from kernel
or process space.
@param all_mods: list of _LDR_DATA_TABLE_ENTRY
To enum kernel APIs, all_mods is a list of drivers.
To enum process APIs, all_mods is a list of DLLs.
The function name is used if available, otherwise
we take the ordinal value.
"""
exports = {}
for i, mod in enumerate(all_mods):
self.session.report_progress("Scanning imports %s/%s" % (
i, len(all_mods)))
pe = pe_vtypes.PE(address_space=mod.obj_vm,
session=self.session, image_base=mod.DllBase)
for _, func_pointer, func_name, ordinal in pe.ExportDirectory():
function_name = func_name or ordinal or ''
exports[func_pointer.v()] = (mod, func_pointer, function_name)
return exports
def _iat_scan(self, addr_space, calls_imported, apis, base_address,
end_address):
"""Scan forward from the lowest IAT entry found for new import entries.
Args:
addr_space: an AS
calls_imported: Import database - a dict.
apis: dictionary of exported functions in the AS.
base_address: memory base address for this module.
end_address: end of valid address range.
"""
if not calls_imported:
return
# Search the iat from the earliest function address to the latest
# address for references to other functions.
start_addr = min(calls_imported.keys())
iat_size = min(max(calls_imported.keys()) - start_addr, 2000)
# The IAT is a table of pointers to functions.
iat = self.profile.Array(
offset=start_addr, vm=addr_space, target="Pointer",
count=iat_size, target_args=dict(target="Function"))
for func_pointer in iat:
func = func_pointer.dereference()
if (not func or
(func.obj_offset > base_address and
func.obj_offset < end_address)): # skip call to self
continue
# Add the export to our database of imported calls.
if (func.obj_offset in apis and
func_pointer.obj_offset not in calls_imported):
iat_addr = func_pointer.obj_offset
calls_imported[iat_addr] = (iat_addr, func)
def _original_import(self, mod_name, func_name):
"""Revert a forwarded import to the original module
and function name.
@param mod_name: current module name
@param func_name: current function name
"""
if func_name in self.FORWARDED_IMPORTS:
return self.FORWARDED_IMPORTS[func_name].split("!")
else:
return mod_name, func_name
CALL_RULE = {'mnemonic': 'CALL', 'operands': [
{'type': 'MEM', 'target': "$target", 'address': '$address'}]}
JMP_RULE = {'mnemonic': 'JMP', 'operands': [
{'type': 'MEM', 'target': "$target", 'address': '$address'}]}
def call_scan(self, addr_space, base_address, size_to_read):
"""Locate calls in a block of code.
Disassemble a block of data and yield possible calls to imported
functions. We're looking for instructions such as these:
x86:
CALL DWORD [0x1000400]
JMP DWORD [0x1000400]
x64:
CALL QWORD [RIP+0x989d]
On x86, the 0x1000400 address is an entry in the IAT or call table. It
stores a DWORD which is the location of the API function being called.
On x64, the 0x989d is a relative offset from the current instruction
(RIP).
So we simply disassemble the entire code section of the executable
looking for calls, then we collect all the targets of the calls.
@param addr_space: an AS to scan with
@param base_address: memory base address
@param data: buffer of data found at base_address
"""
func_obj = self.profile.Function(vm=addr_space, offset=base_address)
end_address = base_address + size_to_read
for instruction in func_obj.disassemble(2**32):
if instruction.address > end_address:
break
context = {}
if (instruction.match_rule(self.CALL_RULE, context) or
instruction.match_rule(self.JMP_RULE, context)):
target = context.get("$target")
if target:
yield (instruction.address,
context.get("$address"),
self.profile.Function(vm=addr_space, offset=target))
def find_process_imports(self, task):
task_space = task.get_process_address_space()
all_mods = list(task.get_load_modules())
# Exported function of all other modules in the address space.
apis = self._enum_apis(all_mods)
# PEB is paged or no DLLs loaded
if not all_mods:
self.session.logging.error("Cannot load DLLs in process AS")
return
# Its OK to blindly take the 0th element because the executable is
# always the first module to load.
base_address = int(all_mods[0].DllBase)
size_to_read = int(all_mods[0].SizeOfImage)
calls_imported = {}
for address, iat, destination in self.call_scan(
task_space, base_address, size_to_read):
self.session.report_progress("Resolving import %s->%s" % (
address, iat))
calls_imported[iat] = (address, destination)
# Scan the IAT for additional functions.
self._iat_scan(task_space, calls_imported, apis,
base_address, base_address + size_to_read)
for iat, (_, func_pointer) in sorted(calls_imported.items()):
tmp = apis.get(func_pointer.obj_offset)
if tmp:
module, func_pointer, func_name = tmp
yield iat, func_pointer, module, func_name
def find_kernel_import(self):
# If the user has not specified the base, we just use the kernel's
# image.
base_address = self.base
if base_address is None:
base_address = self.session.GetParameter("kernel_base")
# Get the size from the module list if its not supplied
size_to_read = self.size
if not size_to_read:
modlist = self.session.plugins.modules()
for module in modlist.lsmod():
if module.DllBase == base_address:
size_to_read = module.SizeOfImage
break
if not size_to_read:
raise plugin.PluginError("You must specify a size to read.")
all_mods = list(modlist.lsmod())
apis = self._enum_apis(all_mods)
calls_imported = {}
for address, iat, destination in self.call_scan(
self.kernel_address_space, base_address, size_to_read):
calls_imported[iat] = (address, destination)
self.session.report_progress(
"Found %s imports" % len(calls_imported))
# Scan the IAT for additional functions.
self._iat_scan(self.kernel_address_space, calls_imported, apis,
base_address, size_to_read)
for iat, (address, func_pointer) in sorted(calls_imported.items()):
module, func_pointer, func_name = apis.get(func_pointer.v(), (
obj.NoneObject("Unknown"),
obj.NoneObject("Unknown"),
obj.NoneObject("Unknown")))
yield iat, func_pointer, module, func_name
def render(self, renderer):
table_header = [("IAT", 'iat', "[addrpad]"),
("Call", 'call', "[addrpad]"),
("Module", 'moduole', "20"),
("Function", 'function', ""),
]
if self.kernel:
renderer.format("Kernel Imports\n")
renderer.table_header(table_header)
for iat, func, mod, func_name in self.find_kernel_import():
mod_name, func_name = self._original_import(
mod.BaseDllName, func_name)
renderer.table_row(iat, func, mod_name, func_name)
else:
for task in self.filter_processes():
renderer.section()
renderer.format("Process {0} PID {1}\n", task.ImageFileName,
task.UniqueProcessId)
renderer.table_header(table_header)
for iat, func, mod, func_name in self.find_process_imports(
task):
mod_name, func_name = self._original_import(
mod.BaseDllName, func_name)
renderer.table_row(iat, func, mod_name, func_name)
class TestImpScan(testlib.SimpleTestCase):
"""Test the impscan module."""
PARAMETERS = dict(commandline="impscan %(pids)s") | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/malware/impscan.py | 0.782787 | 0.23467 | impscan.py | pypi |
from builtins import range
from builtins import object
__author__ = "Michael Cohen <scudette@gmail.com>"
import yara
from rekall import plugin
from rekall import scan
from rekall.plugins import yarascanner
from rekall.plugins.common import pfn
from rekall.plugins.tools import yara_support
from rekall.plugins.windows import common
from rekall.plugins.windows import pagefile
from rekall_lib import utils
class WinYaraScan(yarascanner.YaraScanMixin, common.WinScanner):
"""Scan using yara signatures."""
scanner_defaults = dict(
scan_physical=True
)
class ContextBuffer(object):
"""A class to manage hits and create contiguous context buffers."""
def __init__(self, session):
self._context_cache = utils.FastStore(max_size=10000)
self.last_pfn_id = -1
self.last_context_list = None
self.hits_by_context = {}
self.session = session
self.address_space = session.physical_address_space
def _add_hit_offset(self, context_list, string_name, original_offset,
value):
for context in context_list:
hits_by_context_dict = self.hits_by_context.setdefault(context, {})
if string_name not in hits_by_context_dict:
hits_by_context_dict[string_name] = (original_offset,
value.encode("base64"))
def add_hit(self, string_name, hit_offset, value):
pfn_id = hit_offset >> 12
if pfn_id == self.last_pfn_id:
if self.last_context_list is not None:
self._add_hit_offset(
self.last_context_list, string_name, hit_offset, value)
else:
self.last_pfn_id = pfn_id
self.last_context_list = self.get_contexts(pfn_id << 12)
if self.last_context_list:
self._add_hit_offset(
self.last_context_list, string_name, hit_offset, value)
else:
self.session.logging.debug(
"No process context for hit at %#x", hit_offset)
def get_combined_context_buffers(self):
"""Yields pseudo_data for each context containing all hits."""
pad = "\xFF" * 10
for context, hits_dict in self.hits_by_context.items():
data = []
data_len = 0
# Map the original offset to the dummy buffer offset.
omap = {}
for hit_offset, encoded_value in hits_dict.values():
omap[data_len] = hit_offset
value = encoded_value.decode("base64")
# Some padding separates out the sigs.
data.append(value)
data.append(pad)
data_len += len(value) + len(pad)
yield context, omap, "".join(data)
def process_owners_from_physical_address(self, address):
"""Get the process owner from the physical address.
We could use the ptov() or rammap() plugin but this is a very fast
implementation which only cares about the identity of the owner.
"""
pfn_id = address >> 12
try:
return self._context_cache.Get(pfn_id)
except KeyError:
pass
# Try to find a process that owns this page. This is an optimized
# version of the algorithm in the pfn, ptov and rammap plugins.
pfn_database = self.session.profile.get_constant_object(
"MmPfnDatabase")
pfn_obj = pfn_database[pfn_id]
# This is a mapped file.
if pfn_obj.IsPrototype:
# This is the controlling PTE.
pte_address = pfn_obj.PteAddress.v()
try:
# All PTEs in that page are owned by the same owners.
return self._context_cache.Get(pte_address >> 12)
except KeyError:
descriptor = pagefile.WindowsFileMappingDescriptor(
session=self.session, pte_address=pte_address)
owners = [x[0] for x in descriptor.get_owners()]
self._context_cache.Put(pte_address >> 12, owners)
return owners
# We only care about the process owner so this is the first half of
# pfn.ptov._ptov_x64_hardware_PTE()
p_addr = address
pfns = []
for _ in range(4):
pfn_id = p_addr >> 12
try:
owners = self._context_cache.Get(pfn_id)
for pfn_id in pfns:
self._context_cache.Put(pfn_id, owners)
return owners
except KeyError:
pass
pfn_obj = pfn_database[pfn_id]
pfns.append(pfn_id)
# The PTE which controls this pfn.
pte = pfn_obj.PteAddress
# The physical address of the PTE.
p_addr = ((pfn_obj.u4.PteFrame << 12) |
(pte.v() & 0xFFF))
# The DTB must be page aligned.
descriptor = pagefile.WindowsDTBDescriptor(
session=self.session, dtb=p_addr & ~0xFFF)
owners = [descriptor.owner()]
for pfn_id in pfns:
self._context_cache.Put(pfn_id, owners)
return owners
def get_contexts(self, offset):
"""Get some context about this offset.
We use this context to group similar yara hits into logical groups.
Returns:
a list of things which can be used as contexts - i.e. they are unique
for all pages common within this context. Pages will be grouped by
these contexts and evaluated together.
"""
owners = self.process_owners_from_physical_address(offset)
if not owners:
return []
return [x.obj_offset for x in owners]
class WinPhysicalYaraScanner(common.AbstractWindowsCommandPlugin):
"""An experimental yara scanner over the physical address space.
Yara does not provide a streaming interface, which means that when we scan
for yara rules we can only ever match strings within the same buffer. This
is a problem for physical address space scanning because each page (although
it might appear to be contiguous) usually comes from a different
process/mapped file.
Therefore we need a more intelligent way to apply yara signatures on the
physical address space:
1. The original set of yara rules is converted into a single rule with all
the strings from all the rules in it. The rule has a condition "any of them"
which will match any string appearing in the scanned buffer.
2. This rule is then applied over the physical address space.
3. For each hit we derive a context and add the hit to the context.
4. Finally we test all the rules within the same context with the original
rule set.
"""
name = "yarascan_physical"
table_header = [
dict(name="Owner", width=20),
dict(name="Rule", width=10),
dict(name="Offset", style="address"),
dict(name="HexDump", hex_width=16, width=67),
dict(name="Context"),
]
__args = [
dict(name="hits", default=10, type="IntParser",
help="Quit after finding this many hits."),
dict(name="yara_expression",
help="If provided we scan for this yara "
"expression specified in the yara DSL."),
dict(name="yara_ast",
help="If provided we scan for this yara "
"expression specified in the yara JSON AST."),
dict(name="start", default=0, type="IntParser",
help="Start searching from this offset."),
dict(name="limit", default=2**64, type="IntParser",
help="The length of data to search."),
dict(name="context", default=0x40, type="IntParser",
help="Context to print after the hit."),
dict(name="pre_context", default=0, type="IntParser",
help="Context to print before the hit."),
]
scanner_defaults = dict(
scan_physical=True
)
def __init__(self, *args, **kwargs):
super(WinPhysicalYaraScanner, self).__init__(*args, **kwargs)
try:
# The user gave a yara DSL rule.
if self.plugin_args.yara_expression:
self.rules = yara.compile(
source=self.plugin_args.yara_expression)
self.parsed_rules = yara_support.parse_yara_to_ast(
self.plugin_args.yara_expression)
# User gave a yara AST.
elif self.plugin_args.yara_ast:
self.parsed_rules = self.plugin_args.yara_ast
self.rules = yara.compile(
source=yara_support.ast_to_yara(self.parsed_rules))
else:
raise plugin.PluginError("A yara expression must be provided.")
all_strings = []
rule_id = 0
for parsed_rule in self.parsed_rules:
name = parsed_rule["name"]
for k, v in parsed_rule["strings"]:
rule_name = "%s_%d_REKALL_%s" % (k, rule_id, name)
all_strings.append((rule_name, v))
rule_id += 1
self.parsed_unified_rule = [
dict(name="XX",
strings=all_strings,
condition="any of them")
]
self.plugin_args.unified_yara_expression = (
yara_support.ast_to_yara(self.parsed_unified_rule))
self.unified_rule = yara.compile(
source=self.plugin_args.unified_yara_expression)
self.context_buffer = ContextBuffer(self.session)
except Exception as e:
raise plugin.PluginError(
"Failed to compile yara expression: %s" % e)
def collect(self):
address_space = self.session.physical_address_space
for buffer_as in scan.BufferASGenerator(
self.session, address_space,
self.plugin_args.start,
self.plugin_args.start + self.plugin_args.limit):
self.session.report_progress(
"Scanning buffer %#x->%#x (%#x)",
buffer_as.base_offset, buffer_as.end(),
buffer_as.end() - buffer_as.base_offset)
for match in self.unified_rule.match(data=buffer_as.data):
for buffer_offset, string_name, value in sorted(match.strings):
hit_offset = buffer_offset + buffer_as.base_offset
self.context_buffer.add_hit(string_name, hit_offset, value)
# Now re-run the original expression on all unique contexts.
it = self.context_buffer.get_combined_context_buffers()
for context, original_offset_map, pseudo_data in it:
seen = set()
self.session.report_progress(
"Scanning pseudo buffer of length %d" % len(pseudo_data))
# Report any hits of the original sig on this context.
for match in self.rules.match(data=pseudo_data):
self.session.report_progress()
# Only report a single hit of the same rule on the same context.
dedup_key = (match.rule, context)
if dedup_key in seen:
continue
seen.add(dedup_key)
for buffer_offset, _, value in match.strings:
hit_offset = original_offset_map.get(buffer_offset)
if hit_offset is not None:
if isinstance(context, int):
owner = self.session.profile._EPROCESS(context)
else:
owner = context
yield dict(
Owner=owner,
Rule=match.rule,
Offset=hit_offset,
HexDump=utils.HexDumpedString(
address_space.read(
hit_offset - self.plugin_args.pre_context,
self.plugin_args.context +
self.plugin_args.pre_context)),
Context=pfn.PhysicalAddressContext(
self.session, hit_offset)
) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/malware/yarascan.py | 0.672009 | 0.160825 | yarascan.py | pypi |
from builtins import hex
from builtins import object
import struct
from rekall import testlib
from rekall.plugins.windows import common
from rekall.plugins.overlays.windows import pe_vtypes
from rekall_lib import utils
class DecodingError(Exception):
"""Raised when unable to decode an instruction."""
class HookHeuristic(object):
"""A Hook heuristic detects possible hooks.
This heuristic emulates some common CPU instructions to try and detect
control flow jumps within the first few instructions of a function.
These are essentially guesses based on the most common hook types. Be aware
that these are pretty easy to defeat which will cause the hook to be missed.
See rekall/src/hooks/amd64.asm and rekall/src/hooks/i386.asm For the test
cases which illustrate the type of hooks that we will detect.
"""
def __init__(self, session=None):
self.session = session
self.Reset()
def Reset(self):
# Keep track of registers, stack and main memory.
self.regs = {}
self.stack = []
self.memory = {}
def WriteToOperand(self, operand, value):
if operand["type"] == "REG":
self.regs[operand["reg"]] = value
elif operand["type"] == "IMM":
self.memory[operand["address"]] = value
elif operand["type"] == "MEM":
self.memory[self._get_mem_operand_target(operand)] = value
else:
raise DecodingError("Operand not supported")
def ReadFromOperand(self, operand):
"""Read the operand.
We support the following forms:
- Immediate (IMM): JMP 0x123456
- Absolute Memory Address (MEM): JMP [0x123456]
- Register (REG): JMP [EAX]
"""
# Read from register.
if operand["type"] == 'REG':
return self.regs.get(operand["reg"], 0)
# Immediate operand.
elif operand["type"] == 'IMM':
return operand["address"]
# Read the content of memory.
elif operand["type"] == "MEM":
return self._GetMemoryAddress(
self._get_mem_operand_target(operand), operand["size"])
else:
raise DecodingError("Operand not supported")
def _get_mem_operand_target(self, operand):
reg_base = operand["base"]
if reg_base == "RIP":
return operand["address"]
else:
# Register reference [base_reg + disp + index_reg * scale]
return (self.regs.get(reg_base, 0) +
operand["disp"] +
self.regs.get(operand["index"], 0) * operand["scale"])
def _GetMemoryAddress(self, offset, size):
try:
# First check our local cache for a previously written value.
return self.memory[offset]
except KeyError:
data = self.address_space.read(offset, size)
format_string = {1: "b", 2: "H", 4: "I", 8: "Q"}[size]
return struct.unpack(format_string, data)[0]
def process_lea(self, instruction):
"""Copies the address from the second operand to the first."""
operand = instruction.operands[1]
if operand["type"] == 'MEM':
self.WriteToOperand(instruction.operands[0],
self._get_mem_operand_target(operand))
else:
raise DecodingError("Invalid LEA source.")
def process_push(self, instruction):
value = self.ReadFromOperand(instruction.operands[0])
self.stack.append(value)
def process_pop(self, instruction):
try:
value = self.stack.pop(-1)
except IndexError:
value = 0
self.WriteToOperand(instruction.operands[0], value)
def process_ret(self, _):
if self.stack:
return self.stack.pop(-1)
def process_mov(self, instruction):
value = self.ReadFromOperand(instruction.operands[1])
self.WriteToOperand(instruction.operands[0], value)
def process_inc(self, instruction):
value = self.ReadFromOperand(instruction.operands[0])
self.WriteToOperand(instruction.operands[0], value + 1)
def process_dec(self, instruction):
value = self.ReadFromOperand(instruction.operands[0])
self.WriteToOperand(instruction.operands[0], value - 1)
def process_cmp(self, instruction):
"""We dont do anything with the comparison since we dont test for it."""
_ = instruction
def process_test(self, instruction):
"""We dont do anything with the comparison since we dont test for it."""
_ = instruction
def _Operate(self, instruction, operator):
value1 = self.ReadFromOperand(instruction.operands[0])
value2 = self.ReadFromOperand(instruction.operands[1])
self.WriteToOperand(
instruction.operands[0], operator(value1, value2))
def process_xor(self, instruction):
return self._Operate(instruction, lambda x, y: x ^ y)
def process_add(self, instruction):
return self._Operate(instruction, lambda x, y: x + y)
def process_sub(self, instruction):
return self._Operate(instruction, lambda x, y: x - y)
def process_and(self, instruction):
return self._Operate(instruction, lambda x, y: x & y)
def process_or(self, instruction):
return self._Operate(instruction, lambda x, y: x | y)
def process_shl(self, instruction):
return self._Operate(instruction, lambda x, y: x << (y % 0xFF))
def process_shr(self, instruction):
return self._Operate(instruction, lambda x, y: x >> (y % 0xFF))
def Inspect(self, function, instructions=10):
"""The main entry point to the Hook processor.
We emulate the function instructions and try to determine the jump
destination.
Args:
function: A basic.Function() instance.
"""
self.Reset()
self.address_space = function.obj_vm
for instruction in function.disassemble(instructions=instructions):
if instruction.is_return():
# RET Instruction terminates processing.
return self.process_ret(instruction)
elif instruction.mnemonic == "call":
return self.ReadFromOperand(instruction.operands[0])
# A JMP instruction.
elif instruction.is_branch():
return self.ReadFromOperand(instruction.operands[0])
else:
try:
handler = getattr(self, "process_%s" % instruction.mnemonic)
except AttributeError:
continue
# Handle the instruction.
try:
handler(instruction)
except Exception:
self.session.logging.error(
"Unable to handle instruction %s", instruction.op_str)
return
class CheckPEHooks(common.WindowsCommandPlugin):
"""Checks a pe file mapped into memory for hooks."""
name = "check_pehooks"
__args = [
dict(name="image_base", default=0,
positional=True, type="SymbolAddress",
help="The base address of the pe image in memory."),
dict(name="type", default="all",
choices=["all", "iat", "inline", "eat"],
type="Choice", help="Type of hook to display."),
dict(name="thorough", default=False, type="Boolean",
help="By default we take some optimization. This flags forces "
"thorough but slower checks."),
]
table_header = [
dict(name="Type", width=10),
dict(name="source", width=20),
dict(name="target", width=20),
dict(name="source_func", width=60),
dict(name="target_func"),
]
def reported_access(self, address):
"""Determines if the address should be reported.
This assesses the destination address for suspiciousness. For example if
the address resides in a VAD region which is not mapped by a dll then it
might be suspicious.
"""
destination_names = self.session.address_resolver.format_address(
address)
# For now very simple: If any of the destination_names start with vad_*
# it means that the address resolver cant determine which module they
# came from.
destination = hex(address)
for destination in destination_names:
if not destination.startswith("vad_"):
return False
return destination
def detect_IAT_hooks(self):
"""Detect Import Address Table hooks.
An IAT hook is where malware changes the IAT entry for a dll after its
loaded so that when it is called from within the DLL, flow control is
directed to the malware instead.
We determine the IAT entry is hooked if the address is outside the dll
which is imported.
"""
pe = pe_vtypes.PE(image_base=self.plugin_args.image_base,
session=self.session)
# First try to find all the names of the imported functions.
imports = [
(dll, func_name) for dll, func_name, _ in pe.ImportDirectory()]
resolver = self.session.address_resolver
for idx, (dll, func_address, _) in enumerate(pe.IAT()):
try:
target_dll, target_func_name = imports[idx]
target_dll = self.session.address_resolver.NormalizeModuleName(
target_dll)
except IndexError:
# We can not retrieve these function's name from the
# OriginalFirstThunk array - possibly because it is not mapped
# in.
target_dll = dll
target_func_name = ""
self.session.report_progress(
"Checking function %s!%s", target_dll, target_func_name)
# We only want the containing module.
module = resolver.GetContainingModule(func_address)
if module and target_dll == module.name:
continue
# Use ordinal if function has no name
if not len(target_func_name):
target_func_name = "(%s)" % idx
function_name = "%s!%s" % (target_dll, target_func_name)
# Function_name is the name which the PE file want
yield function_name, func_address
def collect_iat_hooks(self):
for function_name, func_address in self.detect_IAT_hooks():
yield dict(Type="IAT",
source=function_name,
target=utils.FormattedAddress(
self.session.address_resolver,
func_address, max_count=1, hex_if_unknown=True),
target_func=self.session.profile.Function(
func_address))
def detect_EAT_hooks(self, size=0):
"""Detect Export Address Table hooks.
An EAT hook is where malware changes the EAT entry for a dll after its
loaded so that a new DLL wants to link against it, the new DLL will use
the malware's function instead of the exporting DLL's function.
We determine the EAT entry is hooked if the address lies outside the
exporting dll.
"""
address_space = self.session.GetParameter("default_address_space")
pe = pe_vtypes.PE(image_base=self.plugin_args.image_base,
session=self.session,
address_space=address_space)
start = self.plugin_args.image_base
end = self.plugin_args.image_base + size
# If the dll size is not provided we parse it from the PE header.
if not size:
for _, _, virtual_address, section_size in pe.Sections():
# Only count executable sections.
section_end = (self.plugin_args.image_base +
virtual_address + section_size)
if section_end > end:
end = section_end
resolver = self.session.address_resolver
for dll, func, name, hint in pe.ExportDirectory():
self.session.report_progress("Checking export %s!%s", dll, name)
# Skip zero or invalid addresses.
if address_space.read(func.v(), 10) == b"\x00" * 10:
continue
# Report on exports which fall outside the dll.
if start < func.v() < end:
continue
function_name = "%s:%s (%s)" % (
resolver.NormalizeModuleName(dll), name, hint)
yield function_name, func
def collect_eat_hooks(self):
for function_name, func_address in self.detect_EAT_hooks():
yield dict(Type="EAT",
source=function_name,
target=utils.FormattedAddress(
self.session.address_resolver,
func_address, max_count=1, hex_if_unknown=True),
target_func=self.session.profile.Function(
func_address))
def detect_inline_hooks(self):
"""A Generator of hooked exported functions from this PE file.
Yields:
A tuple of (function, name, jump_destination)
"""
# Inspect the export directory for inline hooks.
pe = pe_vtypes.PE(image_base=self.plugin_args.image_base,
address_space=self.session.GetParameter(
"default_address_space"),
session=self.session)
pfn_db = self.session.profile.get_constant_object("MmPfnDatabase")
heuristic = HookHeuristic(session=self.session)
ok_pages = set()
for _, function, name, _ in pe.ExportDirectory():
# Dereference the function pointer.
function_address = function.deref().obj_offset
self.session.report_progress(
"Checking function %#x (%s)", function, name)
# Check if the page is private or a file mapping. Usually if a
# mapped page is modified it will be converted to a private page due
# to Windows copy on write semantics. We assume that hooks are only
# placed in memory, and therefore functions which are still mapped
# to disk files are not hooked and can be safely skipped.
if not self.plugin_args.thorough:
# We must do the vtop in the process address space. This is the
# physical page backing the function preamble.
phys_address = function.obj_vm.vtop(function_address)
# Page not mapped.
if phys_address == None:
continue
phys_page = phys_address >> 12
# We determined this page is ok before - we can skip it.
if phys_page in ok_pages:
continue
# Get the PFN DB record.
pfn_obj = pfn_db[phys_page]
# The page is controlled by a prototype PTE which means it is
# still a file mapping. It has not been changed.
if pfn_obj.IsPrototype:
ok_pages.add(phys_page)
continue
# Try to detect an inline hook.
destination = heuristic.Inspect(function, instructions=3) or ""
# If we did not detect a hook we skip this function.
if destination:
yield function, name, destination
def collect_inline_hooks(self):
for function, _, destination in self.detect_inline_hooks():
hook_detected = False
# Try to resolve the destination into a name.
destination_name = self.reported_access(destination)
# We know about it. We suppress the output for jumps that go into a
# known module. These should be visible using the regular vad
# module.
if destination_name:
hook_detected = True
# Skip non hooked results if verbosity is too low.
if self.plugin_args.verbosity < 10 and not hook_detected:
continue
# Only highlight results if verbosity is high.
highlight = ""
if hook_detected and self.plugin_args.verbosity > 1:
highlight = "important"
yield dict(Type="Inline",
source=utils.FormattedAddress(
self.session.address_resolver,
function.deref(), max_count=1),
target=utils.FormattedAddress(
self.session.address_resolver,
destination, max_count=1),
source_func=function.deref(),
target_func=self.session.profile.Function(
destination),
highlight=highlight)
def collect(self):
if self.plugin_args.type in ["all", "inline"]:
for x in self.collect_inline_hooks():
yield x
if self.plugin_args.type in ["all", "iat"]:
for x in self.collect_iat_hooks():
yield x
if self.plugin_args.type in ["all", "eat"]:
for x in self.collect_eat_hooks():
yield x
class EATHooks(common.WinProcessFilter):
"""Detect EAT hooks in process and kernel memory"""
name = "hooks_eat"
table_header = [
dict(name="divider", type="Divider"),
dict(name="_EPROCESS", hidden=True),
dict(name="Type", hidden=True),
dict(name="source", width=20),
dict(name="target", width=20),
dict(name="target_func"),
]
checker_method = CheckPEHooks.collect_eat_hooks
def column_types(self):
return dict(_EPROCESS=self.session.profile._EPROCESS,
source="",
target="",
target_func=self.session.profile.Function())
def collect_hooks(self, task, dll):
checker = self.session.plugins.check_pehooks(
image_base=dll.base, thorough=self.plugin_args.thorough)
for info in self.__class__.checker_method(checker):
info["_EPROCESS"] = task
yield info
def collect(self):
cc = self.session.plugins.cc()
with cc:
for task in self.filter_processes():
cc.SwitchProcessContext(task)
yield dict(divider="Process %s (%s)" % (task.name, task.pid))
for dll in task.get_load_modules():
for x in self.collect_hooks(task, dll):
yield x
class TestEATHooks(testlib.SimpleTestCase):
PLUGIN = "hooks_eat"
PARAMETERS = dict(
commandline="hooks_eat %(pids)s"
)
class IATHooks(EATHooks):
"""Detect IAT/EAT hooks in process and kernel memory"""
name = "hooks_iat"
checker_method = CheckPEHooks.collect_iat_hooks
class TestIATHooks(testlib.SimpleTestCase):
PLUGIN = "hooks_iat"
PARAMETERS = dict(
commandline="hooks_iat %(pids)s"
)
class InlineHooks(EATHooks):
"""Detect API hooks in process and kernel memory"""
name = "hooks_inline"
checker_method = CheckPEHooks.collect_inline_hooks
table_header = [
dict(name="divider", type="Divider"),
dict(name="_EPROCESS", hidden=True),
dict(name="source", width=20),
dict(name="target", width=20),
dict(name="Type", hidden=True),
dict(name="source_func", width=60),
dict(name="target_func"),
]
class TestInlineHooks(testlib.SimpleTestCase):
PLUGIN = "hooks_inline"
PARAMETERS = dict(
commandline="hooks_inline %(pids)s"
) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/malware/apihooks.py | 0.835785 | 0.516047 | apihooks.py | pypi |
from rekall.plugins.windows import common
from rekall.plugins.windows import filescan
from rekall_lib import utils
class DeviceTree(common.PoolScannerPlugin):
"Show device tree."
__name = "devicetree"
table_header = [
dict(name="Type", type="TreeNode", width=10, max_depth=10),
dict(name="Address", style="address", padding="0"),
dict(name="Name", width=30),
dict(name="device_type", width=30),
dict(name="Path"),
]
scanner_defaults = dict(
scan_kernel_nonpaged_pool=True
)
def generate_hits(self):
for run in self.generate_memory_ranges():
scanner = filescan.PoolScanDriver(
session=self.session, profile=self.profile,
address_space=run.address_space)
for pool_obj in scanner.scan(run.start, run.length):
for object_obj in pool_obj.IterObject("Driver", freed=True):
yield object_obj.Object
def collect(self):
for driver_obj in self.generate_hits():
yield dict(
Type=utils.AttributedString("DRV", [(0, 30, "BLACK", "RED")]),
Address=driver_obj.obj_offset,
Name=driver_obj.DriverName.v(vm=self.kernel_address_space),
depth=0)
first_device = driver_obj.DeviceObject.dereference(
vm=self.kernel_address_space)
for device in first_device.walk_list("NextDevice"):
device_header = self.profile.Object(
"_OBJECT_HEADER", offset=device.obj_offset -
device.obj_profile.get_obj_offset("_OBJECT_HEADER", "Body"),
vm=device.obj_vm)
device_name = device_header.NameInfo.Name.cast(
vm=self.kernel_address_space)
yield dict(
Type=utils.AttributedString(
"DEV", [(0, 30, "WHITE", "BLUE")]),
Address=device.obj_offset, Name=device_name,
device_type=device.DeviceType,
depth=1)
level = 1
for att_device in device.walk_list(
"AttachedDevice", include_current=False):
yield dict(
Type=utils.AttributedString(
"ATT", [(0, 30, "BLACK", "GREEN")]),
Address=att_device.obj_offset, Name=device_name,
device_type=att_device.DeviceType,
Path=att_device.DriverObject.DriverName,
depth=level + 1)
level += 1
class DriverIrp(common.PoolScannerPlugin):
"Driver IRP hook detection"
__name = "driverirp"
mod_re = None
__args = [
dict(name="regex", type="RegEx",
help='Analyze drivers matching REGEX'),
]
table_header = [
dict(name="divider", type="Divider"),
dict(name="driver", hidden=True),
dict(name="idx", width=4, align="r"),
dict(name="function", width=36),
dict(name="func_addres", style="address"),
dict(name="name")
]
def generate_hits(self):
if not self.scan_specification_requested():
obj_tree_plugin = self.session.plugins.object_tree(
type_regex="Driver")
for hit in obj_tree_plugin.collect():
yield hit["_OBJECT_HEADER"].Object
return
for run in self.generate_memory_ranges():
scanner = filescan.PoolScanDriver(
session=self.session, profile=self.profile,
address_space=run.address_space)
for pool_obj in scanner.scan(run.start, run.length):
for object_obj in pool_obj.IterObject("Driver", freed=True):
yield object_obj.Object
def collect(self):
invalid_address = self.session.address_resolver.get_constant_object(
"nt!IopInvalidDeviceRequest", "Function").obj_offset
for driver_obj in self.generate_hits():
driver_name = driver_obj.DriverName.v(vm=self.kernel_address_space)
# Continue if a regex was supplied and it doesn't match
if self.plugin_args.regex:
if not driver_name:
continue
# Continue if a regex was supplied and it doesn't match
if not self.plugin_args.regex.search(driver_name):
continue
driver_start = driver_obj.DriverStart.v()
driver_end = driver_obj.DriverStart.v() + driver_obj.DriverSize
interesting = False
functions = []
# Write the address and owner of each IRP function
for i, function in enumerate(driver_obj.MajorFunction):
# Make sure this is in the kernel address space.
function = driver_obj.MajorFunction[i].dereference(
vm=self.kernel_address_space)
func_addres = function.obj_offset
if func_addres == None:
continue
symbol = utils.FormattedAddress(
self.session.address_resolver, func_addres)
# Suppress function pointers which point at the default invalid
# address function.
if (self.plugin_args.verbosity < 5 and
func_addres == invalid_address):
continue
highlight = None
# Functions residing within the driver are not suspicious.
if not (driver_start <= func_addres <= driver_end):
interesting = True
# Extra important if we have no idea where it came from.
if not self.session.address_resolver.format_address(
func_addres):
highlight = "important"
functions.append(dict(driver=driver_obj,
idx=i,
function=function.obj_name,
func_addres=func_addres,
name=symbol,
highlight=highlight))
# By default only show interesting drivers.
if (self.plugin_args.verbosity < 2 and not interesting):
continue
# Write the standard header for each driver object
divider = "DriverName: %s %#x-%#x" % (
driver_name, driver_start, driver_end)
yield dict(divider=divider)
for x in functions:
yield x | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/malware/devicetree.py | 0.574275 | 0.177704 | devicetree.py | pypi |
# pylint: disable=protected-access
from rekall import obj
from rekall.plugins.windows import common
from rekall.plugins.overlays.windows import pe_vtypes
from rekall_lib import utils
callback_types = {
'_NOTIFICATION_PACKET' : [0x10, {
'ListEntry' : [0x0, ['_LIST_ENTRY']],
'DriverObject' : [0x8, ['pointer', ['_DRIVER_OBJECT']]],
'NotificationRoutine' : [0xC, ['unsigned int']],
}],
'_KBUGCHECK_CALLBACK_RECORD' : [0x20, {
'Entry' : [0x0, ['_LIST_ENTRY']],
'CallbackRoutine' : [0x8, ['unsigned int']],
'Buffer' : [0xC, ['pointer', ['void']]],
'Length' : [0x10, ['unsigned int']],
'Component' : [0x14, ['pointer', ['String', dict(length=64)]]],
'Checksum' : [0x18, ['pointer', ['unsigned int']]],
'State' : [0x1C, ['unsigned char']],
}],
'_KBUGCHECK_REASON_CALLBACK_RECORD' : [0x1C, {
'Entry' : [0x0, ['_LIST_ENTRY']],
'CallbackRoutine' : [0x8, ['unsigned int']],
'Component' : [0xC, ['pointer', ['String', dict(length=8)]]],
'Checksum' : [0x10, ['pointer', ['unsigned int']]],
'Reason' : [0x14, ['unsigned int']],
'State' : [0x18, ['unsigned char']],
}],
'_SHUTDOWN_PACKET' : [0xC, {
'Entry' : [0x0, ['_LIST_ENTRY']],
'DeviceObject' : [0x8, ['pointer', ['_DEVICE_OBJECT']]],
}],
'_EX_CALLBACK_ROUTINE_BLOCK' : [0x8, {
'RundownProtect' : [0x0, ['unsigned int']],
'Function' : [0x4, ['unsigned int']],
'Context' : [0x8, ['unsigned int']],
}],
'_GENERIC_CALLBACK' : [0xC, {
'Callback' : [0x4, ['pointer', ['void']]],
'Associated' : [0x8, ['pointer', ['void']]],
}],
'_REGISTRY_CALLBACK_LEGACY' : [0x38, {
'CreateTime' : [0x0, ['WinFileTime', {}]],
}],
'_REGISTRY_CALLBACK' : [None, {
'ListEntry' : [0x0, ['_LIST_ENTRY']],
'Function' : [0x1C, ['pointer', ['void']]],
}],
'_DBGPRINT_CALLBACK' : [0x14, {
'Function' : [0x8, ['pointer', ['void']]],
}],
'_NOTIFY_ENTRY_HEADER' : [None, {
'ListEntry' : [0x0, ['_LIST_ENTRY']],
'EventCategory' : [0x8, ['Enumeration', dict(
target='long', choices={
0: 'EventCategoryReserved',
1: 'EventCategoryHardwareProfileChange',
2: 'EventCategoryDeviceInterfaceChange',
3: 'EventCategoryTargetDeviceChange'})]],
'CallbackRoutine' : [0x14, ['unsigned int']],
'DriverObject' : [0x1C, ['pointer', ['_DRIVER_OBJECT']]],
}],
}
callback_types_x64 = {
'_GENERIC_CALLBACK' : [ 0x18, {
'Callback' : [ 0x8, ['pointer', ['void']]],
'Associated' : [ 0x10, ['pointer', ['void']]],
} ],
'_NOTIFICATION_PACKET' : [ 0x30, {
'ListEntry' : [ 0x0, ['_LIST_ENTRY']],
'DriverObject' : [ 0x10, ['pointer', ['_DRIVER_OBJECT']]],
'NotificationRoutine' : [ 0x18, ['address']],
} ],
'_SHUTDOWN_PACKET' : [ 0xC, {
'Entry' : [ 0x0, ['_LIST_ENTRY']],
'DeviceObject' : [ 0x10, ['pointer', ['_DEVICE_OBJECT']]],
} ],
'_DBGPRINT_CALLBACK' : [ 0x14, {
'Function' : [ 0x10, ['pointer', ['void']]],
} ],
'_NOTIFY_ENTRY_HEADER' : [ None, {
'ListEntry' : [ 0x0, ['_LIST_ENTRY']],
'EventCategory' : [ 0x10, ['Enumeration', dict(
target = 'long', choices = {
0: 'EventCategoryReserved',
1: 'EventCategoryHardwareProfileChange',
2: 'EventCategoryDeviceInterfaceChange',
3: 'EventCategoryTargetDeviceChange'})]],
'CallbackRoutine' : [ 0x20, ['address']],
'DriverObject' : [ 0x30, ['pointer', ['_DRIVER_OBJECT']]],
}],
'_REGISTRY_CALLBACK' : [ 0x50, {
'ListEntry' : [ 0x0, ['_LIST_ENTRY']],
'Function' : [ 0x20, ['pointer', ['void']]], # other could be 28
}],
# reactos/include/ddk/wdm.h :987
'_KBUGCHECK_CALLBACK_RECORD' : [None, {
'Entry' : [0x0, ['_LIST_ENTRY']],
'CallbackRoutine' : [0x10, ['Pointer']],
'Component' : [0x28, ['Pointer', dict(
target='String',
target_args=dict(
length=8
)
)]],
}],
# reactos/include/ddk/wdm.h :962
'_KBUGCHECK_REASON_CALLBACK_RECORD' : [None, {
'Entry' : [0x0, ['_LIST_ENTRY']],
'CallbackRoutine' : [0x10, ['Pointer']],
'Component' : [0x18, ['Pointer', dict(
target='String',
)]],
}],
}
class _SHUTDOWN_PACKET(obj.Struct):
"""Class for shutdown notification callbacks"""
def sanity_check(self, vm):
"""
Perform some checks to see if this object can exist in the provided
address space.
"""
if (not vm.is_valid_address(self.Entry.Flink) or
not vm.is_valid_address(self.Entry.Blink) or
not vm.is_valid_address(self.DeviceObject)):
return False
# Dereference the device object
device = self.DeviceObject.dereference(vm=vm)
# Carve out the device's object header and check its type
object_header = self.obj_profile.Object(
"_OBJECT_HEADER",
offset=(device.obj_offset -
self.obj_profile.get_obj_offset("_OBJECT_HEADER", "Body")),
vm=vm)
return object_header.get_object_type(vm) == "Device"
class AbstractCallbackScanner(common.PoolScanner):
"""Return the offset of the callback, no object headers"""
class PoolScanFSCallback(AbstractCallbackScanner):
"""PoolScanner for File System Callbacks"""
checks = [('PoolTagCheck', dict(tag=b"IoFs")),
('CheckPoolSize', dict(condition=lambda x: x == 0x18)),
('CheckPoolType', dict(non_paged=True, paged=True,
free=True)),
]
def scan(self, **kwargs):
for pool_header in super(PoolScanFSCallback, self).scan(**kwargs):
callback = self.profile.Object(
'_NOTIFICATION_PACKET', offset=pool_header.end(),
vm=self.address_space)
yield ("IoRegisterFsRegistrationChange",
callback.NotificationRoutine, None)
class PoolScanShutdownCallback(AbstractCallbackScanner):
"""PoolScanner for Shutdown Callbacks"""
checks = [('PoolTagCheck', dict(tag=b"IoSh")),
('CheckPoolSize', dict(condition=lambda x: x == 0x18)),
('CheckPoolType', dict(non_paged=True, paged=True,
free=True)),
('CheckPoolIndex', dict(value=0)),
]
def __init__(self, kernel_address_space=None, **kwargs):
super(PoolScanShutdownCallback, self).__init__(**kwargs)
self.kernel_address_space = kernel_address_space
def scan(self, offset=0, **kwargs):
for pool_header in super(PoolScanShutdownCallback, self).scan(
offset=offset, **kwargs):
# Instantiate the object in physical space but give it a native VM
# of kernel space
callback = self.profile._SHUTDOWN_PACKET(
offset=pool_header.end(), vm=self.address_space)
if not callback.sanity_check(self.kernel_address_space):
continue
# Get the callback's driver object. We've already
# checked the sanity of the device object pointer.
driver_obj = callback.DeviceObject.dereference(
vm=self.kernel_address_space).DriverObject
function_pointer = driver_obj.MajorFunction['IRP_MJ_SHUTDOWN']
details = driver_obj.DriverName
yield "IoRegisterShutdownNotification", function_pointer, details
class PoolScanGenericCallback(AbstractCallbackScanner):
"""PoolScanner for Generic Callbacks"""
checks = [('PoolTagCheck', dict(tag=b"Cbrb")),
('CheckPoolSize', dict(condition=lambda x: x == 0x18)),
('CheckPoolType', dict(non_paged=True, paged=True, free=True)),
]
def scan(self, **kwargs):
"""
Enumerate generic callbacks of the following types:
* PsSetCreateProcessNotifyRoutine
* PsSetThreadCreateNotifyRoutine
* PsSetLoadImageNotifyRoutine
* CmRegisterCallback (on XP only)
* DbgkLkmdRegisterCallback (on Windows 7 only)
The only issue is that you can't distinguish between the types by just
finding the generic callback structure
"""
for pool_header in super(PoolScanGenericCallback, self).scan(**kwargs):
callback = self.profile.Object(
'_GENERIC_CALLBACK', offset=pool_header.end(),
vm=self.address_space)
yield "GenericKernelCallback", callback.Callback, None
class PoolScanDbgPrintCallback(AbstractCallbackScanner):
"""PoolScanner for DebugPrint Callbacks on Vista and 7"""
checks = [('PoolTagCheck', dict(tag=b"DbCb")),
('CheckPoolSize', dict(condition=lambda x: x == 0x20)),
('CheckPoolType', dict(non_paged=True, paged=True, free=True)),
]
def scan(self, offset=0, **kwargs):
"""Enumerate DebugPrint callbacks on Vista and 7"""
for pool_header in super(PoolScanDbgPrintCallback, self).scan(
offset=offset, **kwargs):
callback = self.profile.Object(
'_DBGPRINT_CALLBACK', offset=pool_header.end(),
vm=self.address_space)
yield "DbgSetDebugPrintCallback", callback.Function, None
class PoolScanRegistryCallback(AbstractCallbackScanner):
"""PoolScanner for DebugPrint Callbacks on Vista and 7"""
checks = [('PoolTagCheck', dict(tag=b"CMcb")),
# Seen as 0x38 on Vista SP2 and 0x30 on 7 SP0
('CheckPoolSize', dict(condition=lambda x: x >= 0x38)),
('CheckPoolType', dict(non_paged=True, paged=True, free=True)),
('CheckPoolIndex', dict(value=4)),
]
def scan(self, offset=0, **kwargs):
"""
Enumerate registry callbacks on Vista and 7.
These callbacks are installed via CmRegisterCallback
or CmRegisterCallbackEx.
"""
for pool_header in super(PoolScanRegistryCallback, self).scan(
offset=offset, **kwargs):
callback = self.profile.Object(
'_REGISTRY_CALLBACK', offset=pool_header.end(),
vm=self.address_space)
yield "CmRegisterCallback", callback.Function, None
class PoolScanPnp9(AbstractCallbackScanner):
"""PoolScanner for Pnp9 (EventCategoryHardwareProfileChange)"""
checks = [('MultiPoolTagCheck', dict(tags=[b"Pnp9", b"PnpD", b"PnpC"])),
# seen as 0x2C on W7, 0x28 on vistasp0 (4 less but needs 8 less)
('CheckPoolSize', dict(condition=lambda x: x >= 0x30)),
('CheckPoolType', dict(non_paged=True, paged=True, free=True)),
('CheckPoolIndex', dict(value=1)),
]
def __init__(self, kernel_address_space=None, **kwargs):
self.kernel_address_space = kernel_address_space
super(PoolScanPnp9, self).__init__(**kwargs)
def scan(self, offset=0, **kwargs):
"""Enumerate IoRegisterPlugPlayNotification"""
for pool_header in super(PoolScanPnp9, self).scan(
offset=offset, **kwargs):
entry = self.profile.Object(
"_NOTIFY_ENTRY_HEADER", offset=pool_header.end(),
vm=self.address_space)
# Dereference the driver object pointer
driver = entry.DriverObject.dereference(
vm=self.kernel_address_space)
# Instantiate an object header for the driver name
header = self.profile.Object(
"_OBJECT_HEADER",
offset=(driver.obj_offset -
driver.obj_profile.get_obj_offset(
"_OBJECT_HEADER", "Body")),
vm=driver.obj_vm)
# Grab the object name
driver_name = header.NameInfo.Name.v()
yield entry.EventCategory, entry.CallbackRoutine, driver_name
class CallbackScan(common.WindowsCommandPlugin):
"""Print system-wide notification routines by scanning for them.
Note this plugin is quite inefficient - consider using the callbacks plugin
instead.
"""
__name = "callback_scan"
def __init__(self, scan_in_kernel_address_space=False, **kwargs):
super(CallbackScan, self).__init__(**kwargs)
self.scan_in_kernel_address_space = scan_in_kernel_address_space
if self.profile.metadata("arch") == "I386":
# Add some plugin specific vtypes.
self.profile.add_types(callback_types)
self.profile.add_classes({
'_SHUTDOWN_PACKET': _SHUTDOWN_PACKET,
})
self.profile = self.profile.copy()
pe_vtypes.PEProfile.Initialize(self.profile)
else:
raise obj.ProfileError("This plugin only supports 32 bit profiles "
"for now.")
def get_kernel_callbacks(self):
"""
Enumerate the Create Process, Create Thread, and Image Load callbacks.
On some systems, the byte sequences will be inaccurate or the exported
function will not be found. In these cases, the PoolScanGenericCallback
scanner will pick up the pool associated with the callbacks.
"""
routines = ["PspLoadImageNotifyRoutine",
"PspCreateThreadNotifyRoutine",
"PspCreateProcessNotifyRoutine"]
for symbol in routines:
# The list is an array of 8 _EX_FAST_REF objects
callbacks = self.profile.get_constant_object(
symbol,
target="Array",
target_args=dict(
count=8,
target='_EX_FAST_REF',
target_args=dict(
target="_GENERIC_CALLBACK",
)
)
)
for callback in callbacks:
if callback.Callback:
yield "GenericKernelCallback", callback.Callback, None
def get_bugcheck_callbacks(self):
"""
Enumerate generic Bugcheck callbacks.
Note: These structures don't exist in tagged pools, but you can find
them via KDDEBUGGER_DATA64 on all versions of Windows.
"""
KeBugCheckCallbackListHead = self.profile.get_constant_object(
"KeBugCheckCallbackListHead", "Pointer", target_args=dict(
target='_LIST_ENTRY'))
for l in KeBugCheckCallbackListHead.list_of_type(
"_KBUGCHECK_CALLBACK_RECORD", "Entry"):
yield ("KeBugCheckCallbackListHead", l.CallbackRoutine,
l.Component.dereference())
def get_registry_callbacks_legacy(self):
"""
Enumerate registry change callbacks.
On XP these are registered using CmRegisterCallback.
On Vista and Windows 7, these callbacks are registered using the
CmRegisterCallbackEx function.
"""
# The vector is an array of 100 _EX_FAST_REF objects
addrs = self.profile.get_constant_object(
"CmpCallBackVector",
target="Array",
target_args=dict(
count=100,
target="_EX_FAST_REF")
)
for addr in addrs:
callback = addr.dereference_as("_EX_CALLBACK_ROUTINE_BLOCK")
if callback:
yield "Registry", callback.Function, None
def get_bugcheck_reason_callbacks(self):
"""
Enumerate Bugcheck Reason callbacks.
"""
bugs = self.profile.get_constant_object(
"KeBugCheckReasonCallbackListHead",
target="_LIST_ENTRY")
for l in bugs.list_of_type(
"_KBUGCHECK_REASON_CALLBACK_RECORD", "Entry"):
yield ("KeRegisterBugCheckReasonCallback", l.CallbackRoutine,
l.Component.dereference())
def generate_hits(self):
# Get the OS version we're analyzing
version = self.profile.metadata('version')
# Run through the hits but
address_space = self.physical_address_space
if self.scan_in_kernel_address_space:
address_space = self.kernel_address_space
# Get a scanner group - this will scan for all these in one pass.
scanners = dict(
PoolScanFSCallback=PoolScanFSCallback(
address_space=address_space,
profile=self.profile),
PoolScanShutdownCallback=PoolScanShutdownCallback(
profile=self.profile,
address_space=address_space,
kernel_address_space=self.kernel_address_space),
PoolScanGenericCallback=PoolScanGenericCallback(
address_space=address_space,
profile=self.profile),
)
# Valid for Vista and later
if version >= 6.0:
scanners.update(
PoolScanDbgPrintCallback=PoolScanDbgPrintCallback(
address_space=address_space,
profile=self.profile),
PoolScanRegistryCallback=PoolScanRegistryCallback(
address_space=address_space,
profile=self.profile),
PoolScanPnp9=PoolScanPnp9(
profile=self.profile,
address_space=address_space,
kernel_address_space=self.kernel_address_space),
)
for scanner in list(scanners.values()):
for info in scanner.scan():
yield info
# First few routines are valid on all OS versions
for info in self.get_bugcheck_callbacks():
yield info
for info in self.get_bugcheck_reason_callbacks():
yield info
for info in self.get_kernel_callbacks():
yield info
# Valid for XP
if version == 5.1:
for info in self.get_registry_callbacks_legacy():
yield info
def render(self, renderer):
renderer.table_header([("Type", "type", "36"),
("Callback", "callback", "[addrpad]"),
("Symbol", "symbol", "50"),
("Details", "details", ""),
])
for (sym, cb, detail) in self.generate_hits():
symbol_name = utils.FormattedAddress(
self.session.address_resolver, cb)
renderer.table_row(sym, cb, symbol_name, detail)
class Callbacks(common.WindowsCommandPlugin):
"""Enumerate callback routines.
This plugin just enumerates installed callback routines from various
sources. It does not scan for them.
This plugin is loosely based on the original Volatility plugin of the same
name but much expanded using new information.
Reference:
<http://www.codemachine.com/notes.html>
"""
name = "callbacks"
table_header = [
dict(name="type", width=36),
dict(name="offset", style="address"),
dict(name="callback", style="address"),
dict(name="symbol", width=50),
dict(name="details"),
]
def __init__(self, *args, **kwargs):
super(Callbacks, self).__init__(*args, **kwargs)
self.profile.add_types(callback_types_x64)
def get_generic_callbacks(self):
resolver = self.session.address_resolver
for table, table_length in [
("nt!PspLoadImageNotifyRoutine",
"nt!PspLoadImageNotifyRoutineCount"),
("nt!PspCreateThreadNotifyRoutine",
"nt!PspCreateThreadNotifyRoutineCount"),
("nt!PspCreateProcessNotifyRoutine",
"nt!PspCreateProcessNotifyRoutineCount")]:
array_length = resolver.get_constant_object(
table_length, "unsigned long long")
array = resolver.get_constant_object(
table,
target="Array",
count=array_length,
target_args=dict(
target="_EX_FAST_REF",
target_args=dict(
target="_GENERIC_CALLBACK"
)
)
)
for callback in array:
function = callback.Callback
yield (table, callback, function,
utils.FormattedAddress(resolver, function))
def get_bugcheck_callbacks(self):
resolver = self.session.address_resolver
for list_head_name, type in [
("nt!KeBugCheckCallbackListHead", "_KBUGCHECK_CALLBACK_RECORD"),
("nt!KeBugCheckReasonCallbackListHead",
"_KBUGCHECK_REASON_CALLBACK_RECORD")]:
list_head = resolver.get_constant_object(
list_head_name, "_LIST_ENTRY")
for record in list_head.list_of_type(type, "Entry"):
function = record.CallbackRoutine
yield (list_head_name,
record,
function,
utils.FormattedAddress(resolver, function),
record.Component)
def collect(self):
for x in self.get_generic_callbacks():
yield x
for x in self.get_bugcheck_callbacks():
yield x | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/malware/callbacks.py | 0.410402 | 0.257634 | callbacks.py | pypi |
from builtins import str
from rekall import obj
from rekall import testlib
from rekall.plugins import core
from rekall.plugins.windows import common
# pylint: disable=protected-access
class Malfind(core.DirectoryDumperMixin, common.WinProcessFilter):
"Find hidden and injected code"
__name = "malfind"
dump_dir_optional = True
default_dump_dir = None
def _is_vad_empty(self, vad, address_space):
"""
Check if a VAD region is either entirely unavailable
due to paging, entirely consiting of zeros, or a
combination of the two. This helps ignore false positives
whose VAD flags match task._injection_filter requirements
but there's no data and thus not worth reporting it.
@param vad: an MMVAD object in kernel AS
@param address_space: the process address space
"""
PAGE_SIZE = 0x1000
all_zero_page = "\x00" * PAGE_SIZE
offset = 0
while offset < vad.Length:
next_addr = vad.Start + offset
if (address_space.is_valid_address(next_addr) and
address_space.read(next_addr, PAGE_SIZE) != all_zero_page):
return False
offset += PAGE_SIZE
return True
def _injection_filter(self, vad, task_as):
"""Detects injected vad regions.
This looks for private allocations that are committed,
memory-resident, non-empty (not all zeros) and with an
original protection that includes write and execute.
It is important to note that protections are applied at
the allocation granularity (page level). Thus the original
protection might not be the current protection, and it
also might not apply to all pages in the VAD range.
@param vad: an MMVAD object.
@returns: True if the MMVAD looks like it might
contain injected code.
"""
# Try to find injections.
protect = str(vad.u.VadFlags.ProtectionEnum)
write_exec = "EXECUTE" in protect and "WRITE" in protect
# The Write/Execute check applies to everything
if not write_exec:
return False
# This is a typical VirtualAlloc'd injection
if ((vad.u.VadFlags.PrivateMemory == 1 and
vad.Tag == "VadS") or
# This is a stuxnet-style injection
(vad.u.VadFlags.PrivateMemory == 0 and
protect != "EXECUTE_WRITECOPY")):
return not self._is_vad_empty(vad, task_as)
return False
def render(self, renderer):
cc = self.session.plugins.cc()
for task in self.filter_processes():
task_as = task.get_process_address_space()
if not task_as:
continue
with cc:
cc.SwitchProcessContext(task)
for vad in task.RealVadRoot.traverse():
self.session.report_progress("Checking %r of pid %s",
vad, task.UniqueProcessId)
if self._injection_filter(vad, task_as):
renderer.section()
renderer.format(
"Process: {0} Pid: {1} Address: {2:#x}\n",
task.ImageFileName, task.UniqueProcessId, vad.Start)
renderer.format("Vad Tag: {0} Protection: {1}\n",
vad.Tag, vad.u.VadFlags.ProtectionEnum)
renderer.format("Flags: {0}\n", vad.u.VadFlags)
renderer.format("\n")
dumper = self.session.plugins.dump(
offset=vad.Start, rows=4)
dumper.render(renderer, suppress_headers=True)
renderer.format("\n")
disassembler = self.session.plugins.dis(
offset=vad.Start, length=0x40)
disassembler.render(renderer, suppress_headers=True)
if self.dump_dir:
filename = "{0}.{1:d}.{2:08x}-{3:08x}.dmp".format(
task.ImageFileName, task.pid, vad.Start,
vad.End)
with renderer.open(directory=self.dump_dir,
filename=filename,
mode='wb') as fd:
self.session.report_progress(
"Dumping %s" % filename)
self.CopyToFile(task_as, vad.Start, vad.End, fd)
class LdrModules(common.WinProcessFilter):
"Detect unlinked DLLs"
__name = "ldrmodules"
table_header = [
dict(name="divider", type="Divider"),
dict(name="_EPROCESS", hidden=True),
dict(name="base", style="address"),
dict(name="in_load", width=5),
dict(name="in_load_path", width=80, hidden=True),
dict(name="in_init", width=5),
dict(name="in_init_path", width=80, hidden=True),
dict(name="in_mem", width=5),
dict(name="in_mem_path", width=80, hidden=True),
dict(name="mapped")
]
def column_types(self):
return dict(
_EPROCESS=self.session.profile._EPROCESS(),
base_address=0,
in_load=False,
in_load_path=self.session.profile._UNICODE_STRING(),
in_init=False,
in_init_path=self.session.profile._UNICODE_STRING(),
in_mem=False,
in_mem_path=self.session.profile._UNICODE_STRING(),
mapped_filename=self.session.profile._UNICODE_STRING(),
)
def list_mapped_files(self, task):
"""Iterates over all vads and returns executable regions.
Yields:
vad objects which are both executable and have a file name.
"""
self.session.report_progress("Inspecting Pid %s",
task.UniqueProcessId)
for vad in task.RealVadRoot.traverse():
try:
file_obj = vad.ControlArea.FilePointer
protect = str(vad.u.VadFlags.ProtectionEnum)
if "EXECUTE" in protect and "WRITE" in protect:
yield vad, file_obj.FileName
except AttributeError:
pass
def collect(self):
for task in self.filter_processes():
# Build a dictionary for all three PEB lists where the
# keys are base address and module objects are the values
inloadorder = dict((mod.DllBase.v(), mod)
for mod in task.get_load_modules())
ininitorder = dict((mod.DllBase.v(), mod)
for mod in task.get_init_modules())
inmemorder = dict((mod.DllBase.v(), mod)
for mod in task.get_mem_modules())
# Build a similar dictionary for the mapped files
mapped_files = dict((vad.Start, name)
for vad, name in self.list_mapped_files(task))
yield dict(divider=task)
# For each base address with a mapped file, print info on
# the other PEB lists to spot discrepancies.
for base in list(mapped_files.keys()):
yield dict(_EPROCESS=task,
base=base,
in_load=base in inloadorder,
in_load_path=inloadorder.get(
base, obj.NoneObject()).FullDllName,
in_init=base in ininitorder,
in_init_path=ininitorder.get(
base, obj.NoneObject()).FullDllName,
in_mem=base in inmemorder,
in_mem_path=inmemorder.get(
base, obj.NoneObject()).FullDllName,
mapped=mapped_files[base])
class TestLdrModules(testlib.SimpleTestCase):
PARAMETERS = dict(
commandline="ldrmodules %(pids)s"
) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/malware/malfind.py | 0.626238 | 0.195575 | malfind.py | pypi |
# pylint: disable=protected-access
from rekall.plugins.windows import common
class WindowsPsxView(common.WinProcessFilter):
"Find hidden processes with various process listings"
__name = "psxview"
METHODS = common.WinProcessFilter.METHODS + [
"PSScan", "Thrdproc"]
__args = [
dict(name="method", choices=list(METHODS), type="ChoiceArray",
default=list(METHODS), help="Method to list processes.",
override=True),
]
def render(self, renderer):
headers = [
dict(type="_EPROCESS", name="_EPROCESS"),
]
for method in self.plugin_args.method:
headers.append((method, method, "%s" % len(method)))
renderer.table_header(headers)
for eprocess in self.filter_processes():
row = [eprocess]
for method in self.plugin_args.method:
row.append(eprocess.obj_offset in
self.session.GetParameter("pslist_%s" % method))
renderer.table_row(*row)
class PsListPSScanHook(common.AbstractWindowsParameterHook):
name = "pslist_PSScan"
def calculate(self):
"""Enumerate processes by scanning for _EPROCESS."""
result = set()
psscan = self.session.plugins.psscan()
pslist = self.session.plugins.pslist()
for row in psscan.collect():
physical_eprocess = row["offset_p"]
if physical_eprocess.obj_vm == self.session.physical_address_space:
eprocess = pslist.virtual_process_from_physical_offset(
physical_eprocess)
else:
eprocess = physical_eprocess
if eprocess != None:
result.add(eprocess.obj_offset)
self.session.logging.debug(
"Listed %s processes using PSScan", len(result))
return result
class PsListThrdprocHook(common.AbstractWindowsParameterHook):
name = "pslist_Thrdproc"
def calculate(self):
"""Enumerate processes by scanning for threads."""
result = set()
thrdscan_plugin = self.session.plugins.thrdscan()
for results in thrdscan_plugin.collect():
ethread = self.session.profile._ETHREAD(results[0])
if ethread.ExitTime != 0:
continue
# Bounce back to the threads owner
process = ethread.Tcb.m('Process').dereference_as(
'_EPROCESS', vm=self.session.kernel_address_space)
if not process:
process = ethread.m('ThreadsProcess').dereference(
vm=self.session.kernel_address_space)
# Make sure the bounce succeeded
if (process and process.ExitTime == 0 and
process.UniqueProcessId > 0 and
process.UniqueProcessId < 0xFFFF):
result.add(process.obj_offset)
self.session.logging.debug(
"Listed %s processes using Thrdproc", len(result))
return result | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/malware/psxview.py | 0.612657 | 0.168857 | psxview.py | pypi |
#pylint: disable-msg=C0111
"""
@author: Brendan Dolan-Gavitt
@license: GNU General Public License 2.0 or later
@contact: bdolangavitt@wesleyan.edu
"""
from builtins import range
import struct
from rekall.plugins.windows.registry import hashdump
from Crypto import Hash
from Crypto import Cipher
lsa_types = {
'LSA_BLOB': [ 8, {
'cbData': [0, ['unsigned int']],
'cbMaxData': [4, ['unsigned int']],
'szData': [8, ['String', dict(length=lambda x: x.cbData)]],
}]
}
def get_lsa_key(sec_registry, bootkey):
enc_reg_key = sec_registry.open_key(["Policy", "PolSecretEncryptionKey"])
enc_reg_value = enc_reg_key.ValueList.List.dereference()[0]
if not enc_reg_value:
return None
obf_lsa_key = enc_reg_value.Data.dereference_as(
"String", length=enc_reg_value.DataLength).v()
if not obf_lsa_key:
return None
md5 = Hash.MD5.new()
md5.update(bootkey)
for _i in range(1000):
md5.update(obf_lsa_key[60:76])
rc4key = md5.digest()
rc4 = Cipher.ARC4.new(rc4key)
lsa_key = rc4.decrypt(obf_lsa_key[12:60])
return lsa_key[0x10:0x20]
def decrypt_secret(secret, key):
"""Python implementation of SystemFunction005.
Decrypts a block of data with DES using given key.
Note that key can be longer than 7 bytes."""
decrypted_data = ''
j = 0 # key index
for i in range(0, len(secret), 8):
enc_block = secret[i:i + 8]
block_key = key[j:j + 7]
des_key = hashdump.str_to_key(block_key)
des = Cipher.DES.new(des_key, Cipher.DES.MODE_ECB)
decrypted_data += des.decrypt(enc_block)
j += 7
if len(key[j:j + 7]) < 7:
j = len(key[j:j + 7])
(dec_data_len,) = struct.unpack("<L", decrypted_data[:4])
return decrypted_data[8:8 + dec_data_len]
def get_secret_by_name(secaddr, name, lsakey):
root = rawreg.get_root(secaddr)
if not root:
return None
enc_secret_key = rawreg.open_key(root, ["Policy", "Secrets", name, "CurrVal"])
if not enc_secret_key:
return None
enc_secret_value = enc_secret_key.ValueList.List.dereference()[0]
if not enc_secret_value:
return None
enc_secret = secaddr.read(enc_secret_value.Data,
enc_secret_value.DataLength)
if not enc_secret:
return None
return decrypt_secret(enc_secret[0xC:], lsakey)
def get_secrets(sys_registry, sec_registry):
bootkey = hashdump.get_bootkey(sys_registry)
lsakey = get_lsa_key(sec_registry, bootkey)
secrets_key = sec_registry.open_key(["Policy", "Secrets"])
if not secrets_key:
return
for key in secrets_key.subkeys():
sec_val_key = key.open_subkey("CurrVal")
if not sec_val_key:
continue
for enc_secret_value in list(sec_val_key.values()):
enc_secret = enc_secret_value.Data.dereference_as(
"String", length=enc_secret_value.DataLength).v()
if enc_secret:
secret = decrypt_secret(enc_secret[0xC:], lsakey)
yield key.Name, secret | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/registry/lsasecrets.py | 0.452294 | 0.210584 | lsasecrets.py | pypi |
from past.builtins import basestring
import binascii
import re
from rekall import addrspace
from rekall.plugins import core
from rekall.plugins.windows.registry import registry
from rekall.plugins.overlays import basic
from rekall_lib import utils
class PrintKey(registry.RegistryPlugin):
"""Print a registry key, and its subkeys and values"""
__name = "printkey"
@classmethod
def args(cls, parser):
"""Declare the command line args we need."""
super(PrintKey, cls).args(parser)
parser.add_argument("-k", "--key", default="",
help="Registry key to print.")
parser.add_argument("-r", "--recursive", default=False,
type="Boolean",
help='If set print the entire subtree.')
def __init__(self, key="", recursive=False, **kwargs):
"""Print all keys and values contained by a registry key.
Args:
key: The key name to list. If not provided we list the root key in the
hive.
recursive: If set print the entire subtree.
"""
super(PrintKey, self).__init__(**kwargs)
self.key = key
self.recursive = recursive
def _list_keys(self, key=None):
yield key
if self.recursive:
for subkey in key.subkeys():
for subkey in self._list_keys(subkey):
yield subkey
def list_keys(self):
"""Return the keys that match."""
seen = set()
for hive_offset in self.hive_offsets:
reg = registry.RegistryHive(
profile=self.profile, session=self.session,
kernel_address_space=self.kernel_address_space,
hive_offset=hive_offset)
key = reg.open_key(self.key)
for subkey in self._list_keys(key):
if subkey in seen:
break
seen.add(subkey)
yield reg, subkey
def voltext(self, key):
"""Returns a string representing (S)table or (V)olatile keys."""
return "(V)" if key.obj_offset & 0x80000000 else "(S)"
def render(self, renderer):
renderer.format("Legend: (S) = Stable (V) = Volatile\n\n")
for reg, key in self.list_keys():
self.session.report_progress(
"Printing %s", lambda key=key: key.Path)
if key:
renderer.format("----------------------------\n")
renderer.format("Registry: {0}\n", reg.Name)
renderer.format("Key name: {0} {1} @ {2:addrpad}\n", key.Name,
self.voltext(key), key.obj_vm.vtop(int(key)))
renderer.format("Last updated: {0}\n", key.LastWriteTime)
renderer.format("\n")
renderer.format("Subkeys:\n")
for subkey in key.subkeys():
if not subkey.Name:
renderer.format(
" Unknown subkey: {0}\n", subkey.Name.reason)
else:
renderer.format(u" {1} {0}\n",
subkey.Name, self.voltext(subkey))
renderer.format("\n")
renderer.format("Values:\n")
for value in list(key.values()):
renderer.format("{0:addrpad} ", value.obj_vm.vtop(value))
if value.Type == 'REG_BINARY':
data = value.DecodedData
if isinstance(data, basestring):
renderer.format(
u"{0:width=13} {1:width=15} : {2}\n",
value.Type, value.Name, self.voltext(value))
utils.WriteHexdump(renderer, value.DecodedData)
else:
renderer.format(
u"{0:width=13} {1:width=15} : {2} {3}\n",
value.Type, value.Name, self.voltext(value),
utils.SmartUnicode(value.DecodedData).strip())
class RegDump(core.DirectoryDumperMixin, registry.RegistryPlugin):
"""Dump all registry hives from memory into a dump directory."""
__name = 'regdump'
def dump_hive(self, hive_offset=None, reg=None, fd=None):
"""Write the hive into the fd.
Args:
hive_offset: The virtual offset where the hive is located.
reg: Optionally an instance of registry.Registry helper. If provided
hive_offset is ignored.
fd: The file like object we write to.
"""
if reg is None:
reg = registry.RegistryHive(
profile=self.profile,
kernel_address_space=self.kernel_address_space,
hive_offset=hive_offset)
count = 0
for data in reg.address_space.save():
fd.write(data)
count += len(data)
self.session.report_progress(
"Dumping {0}Mb".format(count/1024/1024))
def render(self, renderer):
# Get all the offsets if needed.
for hive_offset in self.hive_offsets:
reg = registry.RegistryHive(
profile=self.profile, session=self.session,
kernel_address_space=self.kernel_address_space,
hive_offset=hive_offset)
# Make up a filename for it, should be similar to the hive name.
filename = reg.Name.rsplit("\\", 1).pop()
# Sanitize it.
filename = re.sub(r"[^a-zA-Z0-9_\-@ ]", "_", filename)
# Make up the path.
renderer.section()
renderer.format("Dumping {0} into \"{1}\"\n", reg.Name, filename)
with renderer.open(directory=self.dump_dir,
filename=filename,
mode="wb") as fd:
self.dump_hive(reg=reg, fd=fd)
renderer.format("Dumped {0} bytes\n", fd.tell())
class HiveDump(registry.RegistryPlugin):
"""Prints out a hive"""
__name = "hivedump"
def _key_iterator(self, key, seen):
yield key
if key in seen:
return
seen.add(key)
for subkey in key.subkeys():
for subsubkey in self._key_iterator(subkey, seen):
yield subsubkey
def render(self, renderer):
seen = set()
for hive_offset in self.hive_offsets:
reg = registry.RegistryHive(
hive_offset=hive_offset, session=self.session,
kernel_address_space=self.kernel_address_space,
profile=self.profile)
renderer.section()
renderer.format("Hive {0}\n\n", reg.Name)
renderer.table_header([("Last Written", "timestamp", "<24"),
("Key", "key", "")])
for key in self._key_iterator(reg.root, seen):
renderer.table_row(key.LastWriteTime, key.Path)
# Special types to parse the SAM data structures.
sam_vtypes = {
"UNICODE_STRING": [12, {
"offset": [0, ["unsigned int"]],
"len": [4, ["unsigned int"]],
"Value": lambda x: x.obj_profile.UnicodeString(
offset=x.offset+0xCC,
length=x.len, vm=x.obj_vm),
}],
"Hash": [12, {
"offset": [0, ["unsigned int"]],
"len": [4, ["unsigned int"]],
"Value": lambda x: binascii.hexlify(x.obj_vm.read(
x.offset+0xCC, x.len)),
}],
"V": [None, {
"Type": [4, ["Enumeration", dict(
choices={
0xBC: "Default Admin User",
0xd4: "Custom Limited Acct",
0xb0: "Default Guest Acct"
},
target="unsigned int"
)]],
"UserName": [12, ['UNICODE_STRING']],
"FullName": [24, ['UNICODE_STRING']],
"Comment": [36, ['UNICODE_STRING']],
"LanHash": [156, ['Hash']],
"NTHash": [168, ['Hash']],
}],
"F": [None, {
"LastLoginTime": [8, ['WinFileTime']],
"PwdResetDate": [24, ["WinFileTime"]],
"AccountExpiration": [32, ["WinFileTime"]],
"PasswordFailedTime": [40, ["WinFileTime"]],
"LoginCount": [66, ["unsigned short int"]],
"FailedLoginCount": [64, ["unsigned short int"]],
"Rid": [48, ["unsigned int"]],
"Flags": [56, ["Flags", dict(
maskmap=utils.Invert({
0x0001: "Account Disabled",
0x0002: "Home directory required",
0x0004: "Password not required",
0x0008: "Temporary duplicate account",
0x0010: "Normal user account",
0x0020: "MNS logon user account",
0x0040: "Interdomain trust account",
0x0080: "Workstation trust account",
0x0100: "Server trust account",
0x0200: "Password does not expire",
0x0400: "Account auto locked"
}),
target="unsigned short int"
)]],
}],
}
class SAMProfile(basic.Profile32Bits, basic.BasicClasses):
"""A profile to parse the SAM."""
@classmethod
def Initialize(cls, profile):
super(SAMProfile, cls).Initialize(profile)
profile.add_overlay(sam_vtypes)
class Users(registry.RegistryPlugin):
"""Enumerate all users of this system.
Ref:
samparse.pl from RegRipper.
# copyright 2012 Quantum Analytics Research, LLC
# Author: H. Carvey, keydet89@yahoo.com
"""
name = "users"
def GenerateUsers(self):
"""Generates User RID keys, V and F structs for all users."""
sam_profile = SAMProfile(session=self.session)
for hive_offset in self.hive_offsets:
reg = registry.RegistryHive(
hive_offset=hive_offset, session=self.session,
kernel_address_space=self.kernel_address_space,
profile=self.profile)
users = reg.open_key("SAM/Domains/Account/Users")
for user_rid in users.subkeys():
# The V value holds the information we are after.
v_data = user_rid.open_value("V")
if not v_data:
continue
v = sam_profile.V(vm=addrspace.BufferAddressSpace(
data=v_data.DecodedData, session=self.session))
f_data = user_rid.open_value("F")
f = sam_profile.F(vm=addrspace.BufferAddressSpace(
data=f_data.DecodedData, session=self.session))
yield user_rid, v, f
def render(self, renderer):
for user_rid, v, f in self.GenerateUsers():
renderer.section()
renderer.format("Key {0} \n\n", user_rid.Path)
renderer.table_header(
columns=[("", "property", "20"),
("", "value", "")],
suppress_headers=True)
for field in v.members:
try:
renderer.table_row(field, getattr(v, field).Value)
except AttributeError:
renderer.table_row(field, getattr(v, field))
for field in f.members:
renderer.table_row(field, getattr(f, field))
class Services(registry.RegistryPlugin):
"""Enumerate all services."""
name = "services"
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms682450(v=vs.85).aspx
# CreateService function.
SERVICE_TYPE = {
0x00000004: 'SERVICE_ADAPTER',
0x00000002: 'SERVICE_FILE_SYSTEM_DRIVER',
0x00000001: 'SERVICE_KERNEL_DRIVER',
0x00000008: 'SERVICE_RECOGNIZER_DRIVER',
0x00000010: 'SERVICE_WIN32_OWN_PROCESS',
0x00000020: 'SERVICE_WIN32_SHARE_PROCESS'
}
START_TYPE = {
0x00000002: 'SERVICE_AUTO_START',
0x00000000: 'SERVICE_BOOT_START',
0x00000003: 'SERVICE_DEMAND_START',
0x00000004: 'SERVICE_DISABLED',
0x00000001: 'SERVICE_SYSTEM_START'
}
ERROR_CONTROL = {
0x00000003: 'SERVICE_ERROR_CRITICAL',
0x00000000: 'SERVICE_ERROR_IGNORE',
0x00000001: 'SERVICE_ERROR_NORMAL',
0x00000002: 'SERVICE_ERROR_SEVERE'
}
table_header = [
dict(name="divider", type="Divider"),
dict(name="Service", hidden=True),
dict(name="Key", width=20),
dict(name="Value", width=60),
]
def GenerateServices(self):
for hive_offset in self.hive_offsets:
reg = registry.RegistryHive(
profile=self.profile, session=self.session,
kernel_address_space=self.kernel_address_space,
hive_offset=hive_offset)
for service in reg.CurrentControlSet().open_subkey(
"Services").subkeys():
yield service
def collect(self):
for service in self.GenerateServices():
yield dict(Service=service, divider=service.Name)
for value in service.values():
k = value.Name
v = value.DecodedData
if value.Type == "REG_BINARY":
continue
if isinstance(v, list):
v = ",".join([utils.SmartUnicode(x) for x in v if x])
if k == "Type":
v = self.SERVICE_TYPE.get(v, v)
if k == "Start":
v = self.START_TYPE.get(v, v)
if k == "ErrorControl":
v = self.ERROR_CONTROL.get(v, v)
yield dict(Service=service, Key=k, Value=v) | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/windows/registry/printkey.py | 0.660172 | 0.1844 | printkey.py | pypi |
"""Miscelaneous information gathering plugins."""
from __future__ import division
from builtins import str
from past.utils import old_div
__author__ = "Michael Cohen <scudette@google.com>"
import hashlib
import re
from rekall import obj
from rekall.plugins import core
from rekall.plugins.darwin import common
from rekall.plugins.renderers import visual_aides
from rekall_lib import utils
class DarwinDMSG(common.AbstractDarwinCommand):
"""Print the kernel debug messages."""
__name = "dmesg"
def render(self, renderer):
renderer.table_header([
("Message", "message", "<80")])
# This is a circular buffer with the write pointer at the msg_bufx
# member.
msgbuf = self.profile.get_constant_object(
"_msgbufp",
target="Pointer",
target_args=dict(
target="msgbuf"
)
)
# Make sure the buffer is not too large.
size = min(msgbuf.msg_size, 0x400000)
if 0 < msgbuf.msg_bufx < size:
data = self.kernel_address_space.read(msgbuf.msg_bufc, size)
data = data[msgbuf.msg_bufx: size] + data[0:msgbuf.msg_bufx]
data = re.sub(b"\x00", b"", data)
for x in data.splitlines():
renderer.table_row(x)
class DarwinMachineInfo(common.AbstractDarwinCommand):
"""Show information about this machine."""
__name = "machine_info"
def render(self, renderer):
renderer.table_header([("Attribute", "attribute", "20"),
("Value", "value", "10")])
info = self.profile.get_constant_object(
"_machine_info", "machine_info")
for member in info.members:
renderer.table_row(member, info.m(member))
class DarwinMount(common.AbstractDarwinCommand):
"""Show mount points."""
__name = "mount"
def render(self, renderer):
renderer.table_header([
("Device", "device", "30"),
("Mount Point", "mount_point", "60"),
("Type", "type", "")])
mount_list = self.profile.get_constant_object(
"_mountlist", "mount")
for mount in mount_list.walk_list("mnt_list.tqe_next", False):
renderer.table_row(mount.mnt_vfsstat.f_mntonname,
mount.mnt_vfsstat.f_mntfromname,
mount.mnt_vfsstat.f_fstypename)
class DarwinPhysicalMap(common.AbstractDarwinCommand):
"""Prints the EFI boot physical memory map."""
__name = "phys_map"
def render(self, renderer):
renderer.table_header([
("Physical Start", "phys", "[addrpad]"),
("Physical End", "phys", "[addrpad]"),
("Virtual", "virt", "[addrpad]"),
("Pages", "pages", ">10"),
("Type", "type", "")])
boot_params = self.profile.get_constant_object(
"_PE_state", "PE_state").bootArgs
# Code from:
# xnu-1699.26.8/osfmk/i386/AT386/model_dep.c:560
memory_map = self.profile.Array(
boot_params.MemoryMap,
vm=self.physical_address_space,
target="EfiMemoryRange",
target_size=int(boot_params.MemoryMapDescriptorSize),
count=(old_div(boot_params.MemoryMapSize,
boot_params.MemoryMapDescriptorSize)))
runs = []
for memory_range in memory_map:
start = memory_range.PhysicalStart
end = (memory_range.PhysicalStart
+ 0x1000
* memory_range.NumberOfPages)
runs.append(dict(
value=utils.SmartUnicode(memory_range.Type),
start=start, end=end))
renderer.table_row(
start,
end,
memory_range.VirtualStart.cast("Pointer"),
memory_range.NumberOfPages,
memory_range.Type)
# Render a heatmap.
# Automatically lower resolution for large images.
resolution = 0x1000 * 0x10 # 16 pages - conservative start.
column_count = 12
end = runs[-1]["end"]
# Keep it under 200 rows.
while end / resolution / column_count > 200:
resolution *= 2
notes = ("Resolution: %(pages)d pages (%(mb).2f MB) per cell.\n"
"Note that colors of overlapping regions are blended "
"using a weighted average. Letters in cells indicate "
"which regions from the legend are present. They are "
"ordered proportionally, by their respective page "
"counts in each cell.") % dict(pages=old_div(resolution, 0x1000),
mb=old_div(resolution, 1024.0 ** 2))
legend = visual_aides.MapLegend(
notes=notes,
legend=[("Am", "kEfiACPIMemoryNVS", (0x00, 0xff, 0x00)),
("Ar", "kEfiACPIReclaimMemory", (0xc7, 0xff, 0x50)),
("Bc", "kEfiBootServicesCode", (0xff, 0xa5, 0x00)),
("Bd", "kEfiBootServicesData", (0xff, 0x00, 0x00)),
("M", "kEfiConventionalMemory", (0xff, 0xff, 0xff)),
("Ec", "kEfiLoaderCode", (0x00, 0xff, 0xff)),
("Ed", "kEfiLoaderData", (0x00, 0x00, 0xff)),
("I", "kEfiMemoryMappedIO", (0xff, 0xff, 0x00)),
("X", "kEfiReservedMemoryType", (0x00, 0x00, 0x00)),
("Rc", "kEfiRuntimeServicesCode", (0xff, 0x00, 0xff)),
("Rd", "kEfiRuntimeServicesData", (0xff, 0x00, 0x50))])
heatmap = visual_aides.RunBasedMap(
caption="Offset (p)",
legend=legend,
runs=runs,
resolution=resolution,
column_count=column_count)
renderer.table_header([
dict(name="Visual mapping", width=120, style="full"),
dict(name="Legend", orientation="vertical", style="full",
width=40)])
renderer.table_row(heatmap, legend)
class DarwinBootParameters(common.AbstractDarwinCommand):
"""Prints the kernel command line."""
name = "boot_cmdline"
table_header = [
dict(name="cmdline", type="str"),
]
def collect(self):
boot_args = self.profile.get_constant_object(
"_PE_state", "PE_state").bootArgs
yield dict(cmdline=boot_args.CommandLine.cast("String"))
class DarwinSetProcessContext(core.SetProcessContextMixin,
common.ProcessFilterMixin,
common.AbstractDarwinCommand):
"""A cc plugin for windows."""
class DarwinVtoP(core.VtoPMixin, common.ProcessFilterMixin,
common.AbstractDarwinCommand):
"""Describe virtual to physical translation on darwin platforms."""
class DarwinImageFingerprint(common.AbstractDarwinParameterHook):
"""Fingerprint the current image.
This parameter tries to get something unique about the image quickly. The
idea is that two different images (even of the same system at different
points in time) will have very different fingerprints. The fingerprint is
used as a key to cache persistent information about the system.
Live systems can not have a stable fingerprint and so return a NoneObject()
here.
We return a list of tuples:
(physical_offset, expected_data)
The list uniquely identifies the image. If one were to read all physical
offsets and find the expected_data at these locations, then we have a very
high level of confidence that the image is unique and matches the
fingerprint.
"""
name = "image_fingerprint"
def calculate(self):
if self.session.physical_address_space.volatile:
return obj.NoneObject("No fingerprint for volatile image.")
result = []
profile = self.session.profile
phys_as = self.session.physical_address_space
address_space = self.session.GetParameter("default_address_space")
label = profile.get_constant_object("_osversion", "String")
result.append((address_space.vtop(label.obj_offset), label.v()))
label = profile.get_constant_object("_version", "String")
result.append((address_space.vtop(label.obj_offset), label.v()))
label = profile.get_constant_object("_sched_tick", "String",
length=8, term=None)
result.append((address_space.vtop(label.obj_offset), label.v()))
catfish_offset = self.session.GetParameter("catfish_offset")
result.append((catfish_offset, phys_as.read(catfish_offset, 8)))
# List of processes should also be pretty unique.
for task in self.session.plugins.pslist().filter_processes():
name = task.name.cast("String", length=30)
task_name_offset = address_space.vtop(name.obj_offset)
result.append((task_name_offset, name.v()))
return dict(
hash=hashlib.sha1(utils.SmartStr(result)).hexdigest(),
tests=result)
class DarwinHighestUserAddress(common.AbstractDarwinParameterHook):
"""The highest address for user mode/kernel mode division."""
name = "highest_usermode_address"
def calculate(self):
return 0x800000000000 | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/darwin/misc.py | 0.824462 | 0.230833 | misc.py | pypi |
__author__ = "Adam Sindelar <adamsh@google.com>"
from rekall_lib import utils
from rekall.plugins.darwin import common
class DarwinZoneHook(common.AbstractDarwinParameterHook):
"""Lists all allocation zones."""
name = "zones"
def calculate(self):
first_zone = self.session.profile.get_constant_object(
"_first_zone",
target="Pointer",
target_args=dict(
target="zone"))
return [x.obj_offset for x in first_zone.walk_list("next_zone")]
class DarwinZoneCollector(common.AbstractDarwinCachedProducer):
name = "zones"
type_name = "zone"
class AbstractZoneElementFinder(common.AbstractDarwinParameterHook):
"""Finds all the valid structs in an allocation zone."""
__abstract = True
zone_name = None
type_name = None
def validate_element(self, element):
raise NotImplementedError("Subclasses must override.")
def calculate(self):
# Find the zone that contains our data.
zone = self.session.plugins.search(
"(select zone from zones() where zone.name == ?).zone",
query_parameters=[self.zone_name]).first_result
if not zone:
raise ValueError("Zone %r doesn't exist." % self.zone_name)
results = set()
for offset in zone.known_offsets:
element = self.session.profile.Object(offset=offset,
type_name=self.type_name)
if self.validate_element(element):
results.add(element.obj_offset)
return results
class DarwinDumpZone(common.AbstractDarwinCommand):
"""Dumps an allocation zone's contents."""
name = "dump_zone"
table_header = [
dict(name="offset", style="address"),
dict(name="data", width=34)
]
@classmethod
def args(cls, parser):
super(DarwinDumpZone, cls).args(parser)
parser.add_argument("--zone", default="buf.512")
def __init__(self, zone="buf.512", **kwargs):
super(DarwinDumpZone, self).__init__(**kwargs)
self.zone_name = zone
def collect(self):
zone = self.session.plugins.search(
"(select zone from zones() where zone.name == {zone_name}).zone",
query_parameters=dict(zone_name=self.zone_name),
silent=True
).first_result
if not zone:
raise ValueError("No such zone %r." % self.zone_name)
for offset in zone.known_offsets:
yield dict(offset=offset,
data=utils.HexDumpedString(
zone.obj_vm.read(offset, zone.elem_size)))
# All plugins below dump and validate elements from specific zones.
class DarwinSocketZoneFinder(AbstractZoneElementFinder):
name = "dead_sockets"
zone_name = "socket"
type_name = "socket"
def validate_element(self, socket):
return socket == socket.so_rcv.sb_so
class DarwinSocketZoneCollector(common.AbstractDarwinCachedProducer):
name = "dead_sockets"
type_name = "socket"
class DarwinTTYZoneFinder(AbstractZoneElementFinder):
name = "dead_ttys"
zone_name = "ttys"
type_name = "tty"
def validate_element(self, tty):
return tty.t_lock == tty
class DarwinTTYZoneCollector(common.AbstractDarwinCachedProducer):
name = "dead_ttys"
type_name = "tty"
class DarwinSessionZoneFinder(AbstractZoneElementFinder):
name = "dead_sessions"
zone_name = "session"
type_name = "session"
def validate_element(self, session):
return session.s_count > 0 and session.s_leader.p_argc > 0
class DarwinSessionZoneCollector(common.AbstractDarwinCachedProducer):
name = "dead_sessions"
type_name = "session"
class DarwinZoneVnodeFinder(AbstractZoneElementFinder):
zone_name = "vnodes"
type_name = "vnode"
name = "dead_vnodes"
def validate_element(self, vnode):
# Note for later: HFS-related vnodes can be validated
# by the pointer they have back to the vnode from the cnode (v_data).
return vnode.v_owner == 0 and vnode.v_mount != 0
class DarwinZoneVnodeCollector(common.AbstractDarwinCachedProducer):
name = "dead_vnodes"
type_name = "vnode"
class PsListDeadProcFinder(AbstractZoneElementFinder):
name = "dead_procs"
zone_name = "proc"
type_name = "proc"
def validate_element(self, element):
return element.validate()
class DarwinDeadProcessCollector(common.AbstractDarwinCachedProducer):
"""Lists dead processes using the proc allocation zone."""
name = "dead_procs"
type_name = "proc"
class DarwinZoneFileprocFinder(AbstractZoneElementFinder):
name = "dead_fileprocs"
type_name = "fileproc"
zone_name = "fileproc"
def validate_element(self, element):
return True
class DarwinDeadFileprocCollector(common.AbstractDarwinCachedProducer):
name = "dead_fileprocs"
type_name = "fileproc" | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/darwin/zones.py | 0.827724 | 0.231636 | zones.py | pypi |
__author__ = "Adam Sindelar <adamsh@google.com>"
from rekall import plugin
from rekall_lib import registry
from rekall.plugins import core
from rekall.plugins.darwin import common
from rekall.plugins.common import memmap
class DarwinPslist(common.ProcessFilterMixin,
common.AbstractDarwinCommand):
name = "pslist"
table_header = [
dict(width=40, name="proc", type="proc"),
dict(width=8, name="alive"),
dict(name="ppid", width=6),
dict(name="uid", width=6),
dict(name="is64bit", width=6),
dict(name="start_time", width=30, style="short"),
dict(name="cr3", width=15, style="address")
]
def collect(self):
for proc in self.filter_processes():
yield dict(
proc=proc,
alive=proc.obj_producers != {"dead_procs"},
ppid=proc.p_ppid,
uid=proc.p_uid,
is64bit=proc.task.map.pmap.pm_task_map == "TASK_MAP_64BIT",
start_time=proc.p_start.as_datetime(),
cr3=proc.task.map.pmap.pm_cr3
)
class DarwinPsxView(common.ProcessFilterMixin,
common.AbstractDarwinCommand):
name = "psxview"
# pylint: disable=no-self-argument
@registry.classproperty
@registry.memoize
def table_header(cls):
header = [dict(width=40, name="proc", type="proc")]
for method in cls.methods():
header.append(dict(name=method, width=8))
return plugin.PluginHeader(*header)
def collect(self):
methods = self.methods()
for proc in self.filter_processes():
row = [proc]
for method in methods:
row.append(method in proc.obj_producers)
yield row
class DarwinPsTree(common.AbstractDarwinCommand):
name = "pstree"
table_header = [
dict(name="depth", type="DepthIndicator", width=10),
dict(name="pid", width=6),
dict(name="ppid", width=6),
dict(name="uid", width=6),
dict(name="name", width=30)
]
def collect(self):
# Get the first process from pslist.
first_proc = self.session.plugins.search(
"(select * from pslist() where proc.pid == 0).proc").first_result
for proc, depth in self.recurse_proc(first_proc, 0):
yield [depth, proc.pid, proc.p_ppid, proc.p_uid, proc.command]
def recurse_proc(self, proc, depth):
if proc.validate():
yield proc, depth
for child in proc.p_children.lh_first.p_sibling:
for subproc, subdepth in self.recurse_proc(child, depth + 1):
yield subproc, subdepth
class DarwinMaps(common.ProcessFilterMixin, common.AbstractDarwinCommand):
"""Display the process maps."""
__name = "maps"
def render(self, renderer):
renderer.table_header([
dict(name="vm_map_entry", style="address"),
dict(name="Proc", width=40),
("Start", "start", "[addrpad]"),
("End", "end", "[addrpad]"),
("Protection", "protection", "6"),
dict(name="Map Name", wrap=False),
])
for proc in self.filter_processes():
for map in proc.task.map.hdr.walk_list(
"links.next", include_current=False):
# Format the map permissions nicesly.
protection = (
("r" if map.protection.VM_PROT_READ else "-") +
("w" if map.protection.VM_PROT_WRITE else "-") +
("x" if map.protection.VM_PROT_EXECUTE else "-"))
# Find the vnode this mapping is attached to.
vnode = map.find_vnode_object()
renderer.table_row(
map,
proc,
map.links.start,
map.links.end,
protection,
"sub_map" if map.is_sub_map else vnode.path,
)
class DarwinVadDump(core.DirectoryDumperMixin, common.ProcessFilterMixin,
common.AbstractDarwinCommand):
"""Dump the VMA memory for a process."""
__name = "vaddump"
def render(self, renderer):
for proc in self.filter_processes():
if not proc.task.map.pmap:
continue
renderer.format("Pid: {0:6}\n", proc.p_pid)
# Get the task and all process specific information
task_space = proc.get_process_address_space()
name = proc.p_comm
for vma in proc.task.map.hdr.walk_list(
"links.next", include_current=False):
filename = "{0}.{1}.{2:08x}-{3:08x}.dmp".format(
name, proc.p_pid, vma.links.start, vma.links.end)
renderer.format(u"Writing {0}, pid {1} to {2}\n",
proc.p_comm, proc.p_pid, filename)
with renderer.open(directory=self.dump_dir,
filename=filename,
mode='wb') as fd:
self.CopyToFile(task_space, vma.links.start,
vma.links.end, fd)
class DarwinPSAUX(common.ProcessFilterMixin, common.AbstractDarwinCommand):
"""List processes with their commandline."""
__name = "psaux"
def render(self, renderer):
renderer.table_header([
("Pid", "pid", "8"),
("Name", "name", "20"),
("Stack", "stack", "[addrpad]"),
("Length", "length", "8"),
("Argc", "argc", "8"),
("Arguments", "argv", "[wrap:80]")])
for proc in self.filter_processes():
renderer.table_row(
proc.p_pid,
proc.p_comm,
proc.user_stack,
proc.p_argslen,
proc.p_argc,
" ".join(proc.argv))
class DarwinMemMap(memmap.MemmapMixIn, common.ProcessFilterMixin,
common.AbstractDarwinCommand):
"""Prints the memory map for darwin tasks."""
__name = "memmap"
def _get_highest_user_address(self):
return 0x800000000000
class DarwinMemDump(memmap.MemDumpMixin, common.ProcessFilterMixin,
common.AbstractDarwinCommand):
"""Dumps the memory map for darwin tasks."""
# Plugins below represent different enumeration methods for process filter:
class PsListAllProcHook(common.AbstractDarwinParameterHook):
"""List all processes by following the _allproc list head."""
name = "allproc"
def calculate(self):
first = self.session.profile.get_constant_object(
"_allproc", target="proclist").lh_first
result = set(first.p_list)
return [x.obj_offset for x in result]
class PsListTasksHook(common.AbstractDarwinParameterHook):
"""List all processes by following the _allproc list head."""
name = "tasks"
def calculate(self):
"""List processes using the processor tasks queue.
See
/osfmk/kern/processor.c (processor_set_things)
"""
seen = set()
tasks = self.session.profile.get_constant_object(
"_tasks",
target="queue_entry",
vm=self.session.kernel_address_space)
for task in tasks.list_of_type("task", "tasks"):
proc = task.bsd_info.deref()
if proc:
seen.add(proc.obj_offset)
return seen
class PsListPgrpHashHook(common.AbstractDarwinParameterHook):
"""List all processes by following the _allproc list head."""
name = "pgrphash"
def calculate(self):
"""Process groups are organized in a hash chain.
xnu-1699.26.8/bsd/sys/proc_internal.h
"""
seen = set()
# Note that _pgrphash is initialized through:
# xnu-1699.26.8/bsd/kern/kern_proc.c:195
# hashinit(int elements, int type, u_long *hashmask)
# /xnu-1699.26.8/bsd/kern/kern_subr.c: 327
# hashinit(int elements, int type, u_long *hashmask) {
# ...
# *hashmask = hashsize - 1;
# Hence the value in _pgrphash is one less than the size of the hash
# table.
pgr_hash_table = self.session.profile.get_constant_object(
"_pgrphashtbl",
target="Pointer",
target_args=dict(
target="Array",
target_args=dict(
target="pgrphashhead",
count=self.session.profile.get_constant_object(
"_pgrphash", "unsigned long") + 1
)
)
)
for slot in pgr_hash_table.deref():
for pgrp in slot.lh_first.walk_list("pg_hash.le_next"):
for proc in pgrp.pg_members.lh_first.walk_list(
"p_pglist.le_next"):
seen.add(proc.obj_offset)
return seen
class PsListPidHashHook(common.AbstractDarwinParameterHook):
"""List all processes by following the _allproc list head."""
name = "pidhash"
def calculate(self):
"""Lists processes using pid hash tables.
xnu-1699.26.8/bsd/kern/kern_proc.c:834:
pfind_locked(pid_t pid)
"""
seen = set()
# Note that _pidhash is initialized through:
# xnu-1699.26.8/bsd/kern/kern_proc.c:194
# pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
# /xnu-1699.26.8/bsd/kern/kern_subr.c: 327
# hashinit(int elements, int type, u_long *hashmask) {
# ...
# *hashmask = hashsize - 1;
# Hence the value in pidhash is one less than the size of the hash
# table.
pid_hash_table = self.session.profile.get_constant_object(
"_pidhashtbl",
target="Pointer",
target_args=dict(
target="Array",
target_args=dict(
target="pidhashhead",
count=self.session.profile.get_constant_object(
"_pidhash", "unsigned long") + 1
)
)
)
for plist in pid_hash_table.deref():
for proc in plist.lh_first.walk_list("p_hash.le_next"):
if proc:
seen.add(proc.obj_offset)
return seen
class DarwinPgrpHashCollector(common.AbstractDarwinCachedProducer):
name = "pgrphash"
type_name = "proc"
class DarwinTaskProcessCollector(common.AbstractDarwinCachedProducer):
name = "tasks"
type_name = "proc"
class DarwinAllProcCollector(common.AbstractDarwinCachedProducer):
name = "allproc"
type_name = "proc"
class DarwinPidHashProcessCollector(common.AbstractDarwinCachedProducer):
name = "pidhash"
type_name = "proc" | /rekall-core-1.7.2rc1.zip/rekall-core-1.7.2rc1/rekall/plugins/darwin/processes.py | 0.569374 | 0.192065 | processes.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.