code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# coding: utf-8
from typing import List, Tuple, Dict
import torch
import logging
import sys
import os
import copy
import json
import collections
import subprocess
from tqdm import tqdm, trange
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
# My Staff
from utils.iter_helper import PadCollate, FewShotDataset
from utils.preprocessor import FewShotFeature, ModelInput
from utils.device_helper import prepare_model
from utils.model_helper import make_model, load_model
from models.modules.transition_scorer import FewShotTransitionScorer
from models.few_shot_seq_labeler import FewShotSeqLabeler
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
stream=sys.stdout)
logger = logging.getLogger(__name__)
RawResult = collections.namedtuple("RawResult", ["feature", "prediction"])
class TesterBase:
"""
Support features:
- multi-gpu [accelerating]
- distributed gpu [accelerating]
- padding when forward [better result & save space]
"""
def __init__(self, opt, device, n_gpu):
if opt.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
opt.gradient_accumulation_steps))
self.opt = opt
# Following is used to split the batch to save space
self.batch_size = opt.test_batch_size
self.device = device
self.n_gpu = n_gpu
def do_test(self, model: torch.nn.Module, test_features: List[FewShotFeature], id2label: dict,
log_mark: str = 'test_pred'):
logger.info("***** Running eval *****")
# print("***** Running eval *****")
logger.info(" Num features = %d", len(test_features))
logger.info(" Batch size = %d", self.batch_size)
all_results = []
model.eval()
data_loader = self.get_data_loader(test_features)
for batch in tqdm(data_loader, desc="Eval-Batch Progress"):
batch = tuple(t.to(self.device) for t in batch) # multi-gpu does scattering it-self
with torch.no_grad():
predictions = self.do_forward(batch, model)
for i, feature_gid in enumerate(batch[0]): # iter over feature global id
prediction = predictions[i]
feature = test_features[feature_gid.item()]
all_results.append(RawResult(feature=feature, prediction=prediction))
if model.emb_log:
model.emb_log.write('text_' + str(feature_gid.item()) + '\t'
+ '\t'.join(feature.test_feature_item.data_item.seq_in) + '\n')
# close file handler
if model.emb_log:
model.emb_log.close()
scores = self.eval_predictions(all_results, id2label, log_mark)
return scores
def get_data_loader(self, features):
dataset = TensorDataset([self.unpack_feature(f) for f in features])
if self.opt.local_rank == -1:
sampler = RandomSampler(dataset)
else:
sampler = DistributedSampler(dataset)
data_loader = DataLoader(dataset, sampler=sampler, batch_size=self.batch_size)
return data_loader
def clone_model(self, model, id2label):
# get a new instance
return copy.deepcopy(model)
def unpack_feature(self, feature) -> List[torch.Tensor]:
raise NotImplementedError
def do_forward(self, batch, model):
prediction = model(*batch)
return prediction
def eval_predictions(self, *args, **kwargs) -> float:
raise NotImplementedError
class FewShotTester(TesterBase):
"""
Support features:
- multi-gpu [accelerating]
- distributed gpu [accelerating]
- padding when forward [better result & save space]
"""
def __init__(self, opt, device, n_gpu):
super(FewShotTester, self).__init__(opt, device, n_gpu)
def get_data_loader(self, features):
dataset = FewShotDataset([self.unpack_feature(f) for f in features])
if self.opt.local_rank == -1:
sampler = SequentialSampler(dataset)
else:
sampler = DistributedSampler(dataset)
pad_collate = PadCollate(dim=-1, sp_dim=-2, sp_item_idx=[3, 8, 12]) # nwp_index, spt_tgt need special padding
data_loader = DataLoader(dataset, sampler=sampler, batch_size=self.batch_size, collate_fn=pad_collate)
return data_loader
def eval_predictions(self, all_results: List[RawResult], id2label: dict, log_mark: str) -> float:
""" Our result score is average score of all few-shot batches. """
all_batches = self.reform_few_shot_batch(all_results)
all_scores = []
for b_id, fs_batch in all_batches:
f1 = self.eval_one_few_shot_batch(b_id, fs_batch, id2label, log_mark)
all_scores.append(f1)
return sum(all_scores) * 1.0 / len(all_scores)
def eval_one_few_shot_batch(self, b_id, fs_batch: List[RawResult], id2label: dict, log_mark: str) -> float:
pred_file_name = '{}.{}.txt'.format(log_mark, b_id)
output_prediction_file = os.path.join(self.opt.output_dir, pred_file_name)
if self.opt.task == 'sl':
self.writing_sl_prediction(fs_batch, output_prediction_file, id2label)
precision, recall, f1 = self.eval_with_script(output_prediction_file)
elif self.opt.task == 'sc':
precision, recall, f1 = self.writing_sc_prediction(fs_batch, output_prediction_file, id2label)
else:
raise ValueError("Wrong task.")
return f1
def writing_sc_prediction(self, fs_batch: List[RawResult], output_prediction_file: str, id2label: dict):
tp, fp, fn = 0, 0, 0
writing_content = []
for result in fs_batch:
pred_ids = result.prediction # prediction is directly the predict ids [pad is removed in decoder]
feature = result.feature
pred_label = set([id2label[pred_id] for pred_id in pred_ids])
label = set(feature.test_feature_item.data_item.label)
writing_content.append({
'seq_in': feature.test_feature_item.data_item.seq_in,
'pred': list(pred_label),
'label': list(label),
})
tp, fp, fn = self.update_f1_frag(pred_label, label, tp, fp, fn) # update tp, fp, fn
with open(output_prediction_file, "w") as writer:
json.dump(writing_content, writer, indent=2)
return self.compute_f1(tp, fp, fn)
def update_f1_frag(self, pred_label, label, tp=0, fp=0, fn=0):
tp += len(pred_label & label)
fp += len(pred_label - label)
fn += len(label - pred_label)
return tp, fp, fn
def compute_f1(self, tp, fp, fn):
tp += 0.0000001 # to avoid zero division
fp += 0.0000001
fn += 0.0000001
precision = 1.0 * tp / (tp + fp)
recall = 1.0 * tp / (tp + fn)
f1 = 2 * precision * recall / (precision + recall)
return precision, recall, f1
def writing_sl_prediction(self, fs_batch: List[RawResult], output_prediction_file: str, id2label: dict):
writing_content = []
for result in fs_batch:
prediction = result.prediction
feature = result.feature
pred_ids = prediction # prediction is directly the predict ids
if len(pred_ids) != len(feature.test_feature_item.data_item.seq_in):
raise RuntimeError("Failed to align the pred_ids to texts: {},{} \n{},{} \n{},{}".format(
len(pred_ids), pred_ids,
len(feature.test_feature_item.data_item.seq_in), feature.test_feature_item.data_item.seq_in,
len(feature.test_feature_item.data_item.seq_out), feature.test_feature_item.data_item.seq_out
))
for pred_id, word, true_label in zip(pred_ids, feature.test_feature_item.data_item.seq_in, feature.test_feature_item.data_item.seq_out):
pred_label = id2label[pred_id]
writing_content.append('{0} {1} {2}'.format(word, true_label, pred_label))
writing_content.append('')
with open(output_prediction_file, "w") as writer:
writer.write('\n'.join(writing_content))
def eval_with_script(self, output_prediction_file):
script_args = ['perl', self.opt.eval_script]
with open(output_prediction_file, 'r') as res_file:
p = subprocess.Popen(script_args, stdout=subprocess.PIPE, stdin=res_file)
p.wait()
std_results = p.stdout.readlines()
if self.opt.verbose:
for r in std_results:
print(r)
std_results = str(std_results[1]).split()
precision = float(std_results[3].replace('%;', ''))
recall = float(std_results[5].replace('%;', ''))
f1 = float(std_results[7].replace('%;', '').replace("\\n'", ''))
return precision, recall, f1
def reform_few_shot_batch(self, all_results: List[RawResult]) -> List[List[Tuple[int, RawResult]]]:
"""
Our result score is average score of all few-shot batches.
So here, we classify all result according to few-shot batch id.
"""
all_batches = {}
for result in all_results:
b_id = result.feature.batch_gid
if b_id not in all_batches:
all_batches[b_id] = [result]
else:
all_batches[b_id].append(result)
return sorted(all_batches.items(), key=lambda x: x[0])
def unpack_feature(self, feature: FewShotFeature) -> List[torch.Tensor]:
ret = [
torch.LongTensor([feature.gid]),
# test
feature.test_input.token_ids,
feature.test_input.segment_ids,
feature.test_input.nwp_index,
feature.test_input.input_mask,
feature.test_input.output_mask,
# support
feature.support_input.token_ids,
feature.support_input.segment_ids,
feature.support_input.nwp_index,
feature.support_input.input_mask,
feature.support_input.output_mask,
# target
feature.test_target,
feature.support_target,
# Special
torch.LongTensor([len(feature.support_feature_items)]), # support num
]
return ret
def do_forward(self, batch, model):
(
gid, # 0
test_token_ids, # 1
test_segment_ids, # 2
test_nwp_index, # 3
test_input_mask, # 4
test_output_mask, # 5
support_token_ids, # 6
support_segment_ids, # 7
support_nwp_index, # 8
support_input_mask, # 9
support_output_mask, # 10
test_target, # 11
support_target, # 12
support_num, # 13
) = batch
prediction = model(
# loss, prediction = model(
test_token_ids,
test_segment_ids,
test_nwp_index,
test_input_mask,
test_output_mask,
support_token_ids,
support_segment_ids,
support_nwp_index,
support_input_mask,
support_output_mask,
test_target,
support_target,
support_num,
)
return prediction
def get_value_from_order_dict(self, order_dict, key):
""""""
for k, v in order_dict.items():
if key in k:
return v
return []
def clone_model(self, model, id2label):
""" clone only part of params """
# deal with data parallel model
new_model: FewShotSeqLabeler
old_model: FewShotSeqLabeler
if self.opt.local_rank != -1 or self.n_gpu > 1 and hasattr(model, 'module'): # the model is parallel class here
old_model = model.module
else:
old_model = model
emission_dict = old_model.emission_scorer.state_dict()
old_num_tags = len(self.get_value_from_order_dict(emission_dict, 'label_reps'))
config = {'num_tags': len(id2label), 'id2label': id2label}
if 'num_anchors' in old_model.config:
config['num_anchors'] = old_model.config['num_anchors'] # Use previous model's random anchors.
# get a new instance for different domain
new_model = make_model(opt=self.opt, config=config)
new_model = prepare_model(self.opt, new_model, self.device, self.n_gpu)
if self.opt.local_rank != -1 or self.n_gpu > 1:
sub_new_model = new_model.module
else:
sub_new_model = new_model
''' copy weights and stuff '''
if old_model.opt.task == 'sl' and old_model.transition_scorer:
# copy one-by-one because target transition and decoder will be left un-assigned
sub_new_model.context_embedder.load_state_dict(old_model.context_embedder.state_dict())
sub_new_model.emission_scorer.load_state_dict(old_model.emission_scorer.state_dict())
for param_name in ['backoff_trans_mat', 'backoff_start_trans_mat', 'backoff_end_trans_mat']:
sub_new_model.transition_scorer.state_dict()[param_name].copy_(
old_model.transition_scorer.state_dict()[param_name].data)
else:
sub_new_model.load_state_dict(old_model.state_dict())
return new_model
class SchemaFewShotTester(FewShotTester):
def __init__(self, opt, device, n_gpu):
super(SchemaFewShotTester, self).__init__(opt, device, n_gpu)
def get_data_loader(self, features):
""" add label index into special padding """
dataset = FewShotDataset([self.unpack_feature(f) for f in features])
if self.opt.local_rank == -1:
sampler = SequentialSampler(dataset)
else:
sampler = DistributedSampler(dataset)
pad_collate = PadCollate(dim=-1, sp_dim=-2, sp_item_idx=[3, 8, 12, 16]) # nwp_index, spt_tgt need sp-padding
data_loader = DataLoader(dataset, sampler=sampler, batch_size=self.batch_size, collate_fn=pad_collate)
return data_loader
def unpack_feature(self, feature: FewShotFeature) -> List[torch.Tensor]:
ret = [
torch.LongTensor([feature.gid]),
# test
feature.test_input.token_ids,
feature.test_input.segment_ids,
feature.test_input.nwp_index,
feature.test_input.input_mask,
feature.test_input.output_mask,
# support
feature.support_input.token_ids,
feature.support_input.segment_ids,
feature.support_input.nwp_index,
feature.support_input.input_mask,
feature.support_input.output_mask,
# target
feature.test_target,
feature.support_target,
# Special
torch.LongTensor([len(feature.support_feature_items)]), # support num
# label feature
feature.label_input.token_ids,
feature.label_input.segment_ids,
feature.label_input.nwp_index,
feature.label_input.input_mask,
feature.label_input.output_mask,
]
return ret
def do_forward(self, batch, model):
(
gid, # 0
test_token_ids, # 1
test_segment_ids, # 2
test_nwp_index, # 3
test_input_mask, # 4
test_output_mask, # 5
support_token_ids, # 6
support_segment_ids, # 7
support_nwp_index, # 8
support_input_mask, # 9
support_output_mask, # 10
test_target, # 11
support_target, # 12
support_num, # 13
# label feature
label_token_ids, # 14
label_segment_ids, # 15
label_nwp_index, # 16
label_input_mask, # 17
label_output_mask, # 18
) = batch
prediction = model(
test_token_ids,
test_segment_ids,
test_nwp_index,
test_input_mask,
test_output_mask,
support_token_ids,
support_segment_ids,
support_nwp_index,
support_input_mask,
support_output_mask,
test_target,
support_target,
support_num,
# label feature
label_token_ids,
label_segment_ids,
label_nwp_index,
label_input_mask,
label_output_mask,
)
return prediction
def eval_check_points(opt, tester, test_features, test_id2label, device):
all_cpt_file = list(filter(lambda x: '.cpt.pl' in x, os.listdir(opt.saved_model_path)))
all_cpt_file = sorted(all_cpt_file,
key=lambda x: int(x.replace('model.step', '').replace('.cpt.pl', '')))
max_score = 0
for cpt_file in all_cpt_file:
cpt_model = load_model(os.path.join(opt.saved_model_path, cpt_file))
testing_model = tester.clone_model(cpt_model, test_id2label)
if opt.mask_transition and opt.task == 'sl':
testing_model.label_mask = opt.test_label_mask.to(device)
test_score = tester.do_test(testing_model, test_features, test_id2label, log_mark='test_pred')
if test_score > max_score:
max_score = test_score
logger.info('cpt_file:{} - test:{}'.format(cpt_file, test_score))
return max_score
| [
"logging.basicConfig",
"logging.getLogger",
"utils.iter_helper.PadCollate",
"collections.namedtuple",
"copy.deepcopy",
"os.listdir",
"subprocess.Popen",
"torch.LongTensor",
"tqdm.tqdm",
"os.path.join",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.no_grad",
... | [((697, 859), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO', 'stream': 'sys.stdout'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO, stream=sys.stdout)\n", (716, 859), False, 'import logging\n'), ((919, 946), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (936, 946), False, 'import logging\n'), ((961, 1023), 'collections.namedtuple', 'collections.namedtuple', (['"""RawResult"""', "['feature', 'prediction']"], {}), "('RawResult', ['feature', 'prediction'])\n", (983, 1023), False, 'import collections\n'), ((2138, 2183), 'tqdm.tqdm', 'tqdm', (['data_loader'], {'desc': '"""Eval-Batch Progress"""'}), "(data_loader, desc='Eval-Batch Progress')\n", (2142, 2183), False, 'from tqdm import tqdm, trange\n'), ((3343, 3407), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'sampler': 'sampler', 'batch_size': 'self.batch_size'}), '(dataset, sampler=sampler, batch_size=self.batch_size)\n', (3353, 3407), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((3524, 3544), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (3537, 3544), False, 'import copy\n'), ((4461, 4514), 'utils.iter_helper.PadCollate', 'PadCollate', ([], {'dim': '(-1)', 'sp_dim': '(-2)', 'sp_item_idx': '[3, 8, 12]'}), '(dim=-1, sp_dim=-2, sp_item_idx=[3, 8, 12])\n', (4471, 4514), False, 'from utils.iter_helper import PadCollate, FewShotDataset\n'), ((4580, 4673), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'sampler': 'sampler', 'batch_size': 'self.batch_size', 'collate_fn': 'pad_collate'}), '(dataset, sampler=sampler, batch_size=self.batch_size, collate_fn\n =pad_collate)\n', (4590, 4673), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((5380, 5429), 'os.path.join', 'os.path.join', (['self.opt.output_dir', 'pred_file_name'], {}), '(self.opt.output_dir, pred_file_name)\n', (5392, 5429), False, 'import os\n'), ((12754, 12793), 'utils.model_helper.make_model', 'make_model', ([], {'opt': 'self.opt', 'config': 'config'}), '(opt=self.opt, config=config)\n', (12764, 12793), False, 'from utils.model_helper import make_model, load_model\n'), ((12814, 12873), 'utils.device_helper.prepare_model', 'prepare_model', (['self.opt', 'new_model', 'self.device', 'self.n_gpu'], {}), '(self.opt, new_model, self.device, self.n_gpu)\n', (12827, 12873), False, 'from utils.device_helper import prepare_model\n'), ((14301, 14358), 'utils.iter_helper.PadCollate', 'PadCollate', ([], {'dim': '(-1)', 'sp_dim': '(-2)', 'sp_item_idx': '[3, 8, 12, 16]'}), '(dim=-1, sp_dim=-2, sp_item_idx=[3, 8, 12, 16])\n', (14311, 14358), False, 'from utils.iter_helper import PadCollate, FewShotDataset\n'), ((14419, 14512), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'sampler': 'sampler', 'batch_size': 'self.batch_size', 'collate_fn': 'pad_collate'}), '(dataset, sampler=sampler, batch_size=self.batch_size, collate_fn\n =pad_collate)\n', (14429, 14512), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((3234, 3256), 'torch.utils.data.RandomSampler', 'RandomSampler', (['dataset'], {}), '(dataset)\n', (3247, 3256), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((3293, 3320), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['dataset'], {}), '(dataset)\n', (3311, 3320), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((4348, 4374), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['dataset'], {}), '(dataset)\n', (4365, 4374), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((4411, 4438), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['dataset'], {}), '(dataset)\n', (4429, 4438), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((6707, 6751), 'json.dump', 'json.dump', (['writing_content', 'writer'], {'indent': '(2)'}), '(writing_content, writer, indent=2)\n', (6716, 6751), False, 'import json\n'), ((8743, 8812), 'subprocess.Popen', 'subprocess.Popen', (['script_args'], {'stdout': 'subprocess.PIPE', 'stdin': 'res_file'}), '(script_args, stdout=subprocess.PIPE, stdin=res_file)\n', (8759, 8812), False, 'import subprocess\n'), ((9956, 9987), 'torch.LongTensor', 'torch.LongTensor', (['[feature.gid]'], {}), '([feature.gid])\n', (9972, 9987), False, 'import torch\n'), ((14188, 14214), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['dataset'], {}), '(dataset)\n', (14205, 14214), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((14251, 14278), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['dataset'], {}), '(dataset)\n', (14269, 14278), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((14641, 14672), 'torch.LongTensor', 'torch.LongTensor', (['[feature.gid]'], {}), '([feature.gid])\n', (14657, 14672), False, 'import torch\n'), ((17142, 17174), 'os.listdir', 'os.listdir', (['opt.saved_model_path'], {}), '(opt.saved_model_path)\n', (17152, 17174), False, 'import os\n'), ((17397, 17441), 'os.path.join', 'os.path.join', (['opt.saved_model_path', 'cpt_file'], {}), '(opt.saved_model_path, cpt_file)\n', (17409, 17441), False, 'import os\n'), ((2299, 2314), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2312, 2314), False, 'import torch\n')] |
#!/usr/bin/python
# Copyright 2016 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Charm event hooks."""
import json
import sys
from charmhelpers.core import hookenv
from charmhelpers.payload import execd
import cinder_contexts
HOOKS = hookenv.Hooks()
@HOOKS.hook('install')
def install():
"""Perform ``install`` handling."""
execd.execd_preinstall()
@HOOKS.hook('config-changed', 'upgrade-charm')
def upgrade_charm():
"""Perform ``config-changed`` or ``upgrade-charm`` handling."""
for rid in hookenv.relation_ids('storage-backend'):
storage_backend(rid)
@HOOKS.hook('storage-backend-relation-joined',
'storage-backend-relation-changed',
'storage-backend-relation-broken')
def storage_backend(rid=None):
"""Perform relation change handling.
This handles ``storage-backend-relation-joined``,
``storage-backend-relation-changed``, and
``storage-backend-relation-broken`` events.
:param rid: The relationship ID.
"""
hookenv.relation_set(
relation_id=rid,
backend_name=hookenv.service_name(),
subordinate_configuration=json.dumps(cinder_contexts.DellScContext())
)
if __name__ == '__main__':
try:
HOOKS.execute(sys.argv)
except hookenv.UnregisteredHookError as e:
hookenv.log('Unknown hook %s' % e)
| [
"charmhelpers.payload.execd.execd_preinstall",
"charmhelpers.core.hookenv.log",
"charmhelpers.core.hookenv.relation_ids",
"cinder_contexts.DellScContext",
"charmhelpers.core.hookenv.service_name",
"charmhelpers.core.hookenv.Hooks"
] | [((784, 799), 'charmhelpers.core.hookenv.Hooks', 'hookenv.Hooks', ([], {}), '()\n', (797, 799), False, 'from charmhelpers.core import hookenv\n'), ((884, 908), 'charmhelpers.payload.execd.execd_preinstall', 'execd.execd_preinstall', ([], {}), '()\n', (906, 908), False, 'from charmhelpers.payload import execd\n'), ((1062, 1101), 'charmhelpers.core.hookenv.relation_ids', 'hookenv.relation_ids', (['"""storage-backend"""'], {}), "('storage-backend')\n", (1082, 1101), False, 'from charmhelpers.core import hookenv\n'), ((1615, 1637), 'charmhelpers.core.hookenv.service_name', 'hookenv.service_name', ([], {}), '()\n', (1635, 1637), False, 'from charmhelpers.core import hookenv\n'), ((1848, 1882), 'charmhelpers.core.hookenv.log', 'hookenv.log', (["('Unknown hook %s' % e)"], {}), "('Unknown hook %s' % e)\n", (1859, 1882), False, 'from charmhelpers.core import hookenv\n'), ((1684, 1715), 'cinder_contexts.DellScContext', 'cinder_contexts.DellScContext', ([], {}), '()\n', (1713, 1715), False, 'import cinder_contexts\n')] |
from survey import Survey
from question import Question
class Collector:
def __init__(self,phone,name):
self.phone=phone
self.name=name
self.survey_list={}
self.contact_list={}
self.state=0;
self.cur=None
self.cur_question=None
def create_Survey(self,name):
self.cur=Survey(name)
if self.cur is None:
return -1
return 1
def add_question(self,question):
if self.cur is not None:
self.add_question(question)
def process_response(self,input):
if self.cur_question is None:
self.cur_question=Question()
self.cur_question.add_description(input)
return 1
elif self.cur_question.get_type() is None:
self.cur_question.set_type(input)
return 2
else:
self.cur_question.add_choice(input)
return 2
def add_contact_list(self,phone_list):
tmp=phone_list.split(",")
name=str(tmp[0]).lower()
for i in range(1,len(tmp)):
self.contact_list[name].append(str(tmp[i]))
def get_contact_list(self,name):
return self.contact_list[name]
def end_choice(self):
self.cur.add_question(self.cur_question)
self.cur_question=None
def end_survey(self):
self.survey_list[self.cur.get_name()]=self.cur
self.cur=None
def get_survey(self,name):
return self.survey_list[name]
| [
"question.Question",
"survey.Survey"
] | [((342, 354), 'survey.Survey', 'Survey', (['name'], {}), '(name)\n', (348, 354), False, 'from survey import Survey\n'), ((641, 651), 'question.Question', 'Question', ([], {}), '()\n', (649, 651), False, 'from question import Question\n')] |
"""
Class for handling options.
"""
import inspect
from typing import Union, Optional
from importlib._bootstrap import module_from_spec
from importlib._bootstrap_external import spec_from_file_location
from pathlib import Path
_path_directives = {'primary', 'secondary', 'default'}
class Figcon:
""" Class for handling finding and loading options """
def __init__(self,
default_path: Union[str, Path],
primary_path: Optional[Union[str, Path]] = None,
secondary_path: Optional[Union[str, Path]] = None,
config_name='config.py'
):
"""
Create an options object.
The order of precedent is primary -> secondary -> default.
Parameters
----------
default_path
The path used for the default parameters.
primary_path
The path used for primary parameters. If not specified use cwd.
secondary_path
The path used for secondary parameters. If not specified use home.
config_name
The name of the config files that can be found in primary,
secondary, or default paths.
"""
# create a dict for storing state
self.__state = {}
# set paths
self.default_path = default_path
self.primary_path = primary_path or Path.cwd()
self.secondary_path = secondary_path or Path.home()
self._config_file_name = config_name
# update state
self.update_options()
def __getattr__(self, item):
"""
Try to get the attribute, first from primary, then secondary, then default.
"""
try:
return self.__state[item]
except KeyError:
msg = (
f"Options contains no item {item}. You can set it in "
f"a {self._config_file_name} file in either {self.default_path},"
f" {self.primary_path} or {self.secondary_path}."
)
raise AttributeError(msg)
def _update_defaults(self, new, base=None):
"""
Try to intelligently update the base state, recursing into
identically named objects with __dict__ and updating.
This is a little complex, but seems to work so far...
"""
base = base or self.__state
# handle objects not already in instance state
disjoint = set(new) - set(base)
base.update({x: new[x] for x in disjoint})
# handle overlaps
overlap = set(base) & set(new)
for item in overlap:
obj1, obj2 = base[item], new[item]
if inspect.isfunction(obj2):
base[item] = obj2
elif hasattr(obj2, "__dict__") and hasattr(obj1, "__dict__"):
if obj1 is not obj2:
self._update_defaults(obj2.__dict__, obj1.__dict__)
else:
base[item] = obj2
def _load_config(self, path: Union[str, Path], file_name: Optional[str]=None):
""" load a parameter from the config model """
file_name = file_name or self._config_file_name
path = Path(path)
# If a directory was provided look for expected file
if path.is_dir():
expected = Path(path) / file_name
# if file not found look for hidden
if not expected.exists() and not file_name.startswith('.'):
expected = Path(path / ("." + file_name))
if not expected.exists():
return {}
mod = file_name.replace(".py", "")
# If a path to a file was passed
elif path.is_file():
expected = path
mod = expected.name.replace(".py", "")
# Get the spec and import module
spec = spec_from_file_location(mod, expected)
mod = module_from_spec(spec)
spec.loader.exec_module(mod)
# pull out imported stuff and return the rest
out = {}
for item, value in mod.__dict__.items():
# skip other modules or built-in stuff
if inspect.ismodule(value) or item.startswith("__"):
continue
# skip classes defined in other modules
if getattr(value, "__module__", mod.__name__) != mod.__name__:
continue
out[item] = value
return out
def update_options(self, primary_path=None, secondary_path=None):
"""
Refresh the attributes to pull options from.
Parameters
----------
primary_path
The directory of, or path to, the primary sconfig. If None use
the current working directory.
secondary_path
The directory of, or path to, the secondary sconfig. If None use
the user's home directory.
Notes
-----
If a directory is supplied the top level of the directory is scanned
for the expected filename. If None is found an exception is raised.
"""
self.__state.clear()
# define base, home, cwd (last takes priority)
dirs = {
'default': self.default_path,
'secondary': Path(secondary_path or self.secondary_path),
'primary': Path(primary_path or self.primary_path),
}
for directive, path in dirs.items(): # iterate locations and load, update state
self._update_defaults(self._load_config(path))
def set_option(self, **kwargs):
"""
Set an option using the option as the key.
Examples
--------
from ficcon import Figcon
config = Figcon()
config.set_option('bob', 'ham')
assert config.bob == 'ham'
"""
self.__state.update(**kwargs)
def __delattr__(self, item):
"""
Delete an option.
"""
self.__state.pop(item, None)
| [
"importlib._bootstrap.module_from_spec",
"pathlib.Path",
"pathlib.Path.cwd",
"pathlib.Path.home",
"inspect.ismodule",
"inspect.isfunction",
"importlib._bootstrap_external.spec_from_file_location"
] | [((3157, 3167), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (3161, 3167), False, 'from pathlib import Path\n'), ((3795, 3833), 'importlib._bootstrap_external.spec_from_file_location', 'spec_from_file_location', (['mod', 'expected'], {}), '(mod, expected)\n', (3818, 3833), False, 'from importlib._bootstrap_external import spec_from_file_location\n'), ((3848, 3870), 'importlib._bootstrap.module_from_spec', 'module_from_spec', (['spec'], {}), '(spec)\n', (3864, 3870), False, 'from importlib._bootstrap import module_from_spec\n'), ((1371, 1381), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (1379, 1381), False, 'from pathlib import Path\n'), ((1430, 1441), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (1439, 1441), False, 'from pathlib import Path\n'), ((2652, 2676), 'inspect.isfunction', 'inspect.isfunction', (['obj2'], {}), '(obj2)\n', (2670, 2676), False, 'import inspect\n'), ((5185, 5228), 'pathlib.Path', 'Path', (['(secondary_path or self.secondary_path)'], {}), '(secondary_path or self.secondary_path)\n', (5189, 5228), False, 'from pathlib import Path\n'), ((5253, 5292), 'pathlib.Path', 'Path', (['(primary_path or self.primary_path)'], {}), '(primary_path or self.primary_path)\n', (5257, 5292), False, 'from pathlib import Path\n'), ((3278, 3288), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (3282, 3288), False, 'from pathlib import Path\n'), ((3448, 3478), 'pathlib.Path', 'Path', (["(path / ('.' + file_name))"], {}), "(path / ('.' + file_name))\n", (3452, 3478), False, 'from pathlib import Path\n'), ((4094, 4117), 'inspect.ismodule', 'inspect.ismodule', (['value'], {}), '(value)\n', (4110, 4117), False, 'import inspect\n')] |
import pytest
from hypothesis import given, example
import hypothesis.strategies as strat
from vault_anyconfig.vault_anyconfig import VaultAnyConfig
@given(
contents=strat.text(min_size=1, alphabet=strat.characters(blacklist_categories=("C"))),
secret_key=strat.text(min_size=1, alphabet=strat.characters(blacklist_categories=("C"))),
)
@example(contents="aoeu", secret_key="data")
@example(contents="aoeu", secret_key="metadata")
def test_detect_kv_v1(contents, secret_key, gen_vault_response_kv1, gen_vault_response_kv2):
"""
Tests that kv1 is detected correctly
"""
read_response_v1 = gen_vault_response_kv1(contents, secret_key)
read_response_v2 = gen_vault_response_kv2(contents, secret_key)
assert VaultAnyConfig._VaultAnyConfig__is_key_value_v1(read_response_v1, secret_key)
assert not VaultAnyConfig._VaultAnyConfig__is_key_value_v1(read_response_v2, secret_key)
@given(
contents=strat.text(min_size=1, alphabet=strat.characters(blacklist_categories=("C"))),
secret_key=strat.text(min_size=1, alphabet=strat.characters(blacklist_categories=("C"))),
)
@example(contents="aoeu", secret_key="data")
@example(contents="aoeu", secret_key="metadata")
def test_detect_kv_v2(contents, secret_key, gen_vault_response_kv1, gen_vault_response_kv2):
"""
Tests that kv2 is detected correctly
"""
read_response_v1 = gen_vault_response_kv1(contents, secret_key)
read_response_v2 = gen_vault_response_kv2(contents, secret_key)
assert VaultAnyConfig._VaultAnyConfig__is_key_value_v2(read_response_v2)
assert not VaultAnyConfig._VaultAnyConfig__is_key_value_v2(read_response_v1)
| [
"vault_anyconfig.vault_anyconfig.VaultAnyConfig._VaultAnyConfig__is_key_value_v2",
"hypothesis.example",
"hypothesis.strategies.characters",
"vault_anyconfig.vault_anyconfig.VaultAnyConfig._VaultAnyConfig__is_key_value_v1"
] | [((349, 392), 'hypothesis.example', 'example', ([], {'contents': '"""aoeu"""', 'secret_key': '"""data"""'}), "(contents='aoeu', secret_key='data')\n", (356, 392), False, 'from hypothesis import given, example\n'), ((394, 441), 'hypothesis.example', 'example', ([], {'contents': '"""aoeu"""', 'secret_key': '"""metadata"""'}), "(contents='aoeu', secret_key='metadata')\n", (401, 441), False, 'from hypothesis import given, example\n'), ((1110, 1153), 'hypothesis.example', 'example', ([], {'contents': '"""aoeu"""', 'secret_key': '"""data"""'}), "(contents='aoeu', secret_key='data')\n", (1117, 1153), False, 'from hypothesis import given, example\n'), ((1155, 1202), 'hypothesis.example', 'example', ([], {'contents': '"""aoeu"""', 'secret_key': '"""metadata"""'}), "(contents='aoeu', secret_key='metadata')\n", (1162, 1202), False, 'from hypothesis import given, example\n'), ((740, 817), 'vault_anyconfig.vault_anyconfig.VaultAnyConfig._VaultAnyConfig__is_key_value_v1', 'VaultAnyConfig._VaultAnyConfig__is_key_value_v1', (['read_response_v1', 'secret_key'], {}), '(read_response_v1, secret_key)\n', (787, 817), False, 'from vault_anyconfig.vault_anyconfig import VaultAnyConfig\n'), ((1501, 1566), 'vault_anyconfig.vault_anyconfig.VaultAnyConfig._VaultAnyConfig__is_key_value_v2', 'VaultAnyConfig._VaultAnyConfig__is_key_value_v2', (['read_response_v2'], {}), '(read_response_v2)\n', (1548, 1566), False, 'from vault_anyconfig.vault_anyconfig import VaultAnyConfig\n'), ((833, 910), 'vault_anyconfig.vault_anyconfig.VaultAnyConfig._VaultAnyConfig__is_key_value_v1', 'VaultAnyConfig._VaultAnyConfig__is_key_value_v1', (['read_response_v2', 'secret_key'], {}), '(read_response_v2, secret_key)\n', (880, 910), False, 'from vault_anyconfig.vault_anyconfig import VaultAnyConfig\n'), ((1582, 1647), 'vault_anyconfig.vault_anyconfig.VaultAnyConfig._VaultAnyConfig__is_key_value_v2', 'VaultAnyConfig._VaultAnyConfig__is_key_value_v2', (['read_response_v1'], {}), '(read_response_v1)\n', (1629, 1647), False, 'from vault_anyconfig.vault_anyconfig import VaultAnyConfig\n'), ((205, 247), 'hypothesis.strategies.characters', 'strat.characters', ([], {'blacklist_categories': '"""C"""'}), "(blacklist_categories='C')\n", (221, 247), True, 'import hypothesis.strategies as strat\n'), ((299, 341), 'hypothesis.strategies.characters', 'strat.characters', ([], {'blacklist_categories': '"""C"""'}), "(blacklist_categories='C')\n", (315, 341), True, 'import hypothesis.strategies as strat\n'), ((966, 1008), 'hypothesis.strategies.characters', 'strat.characters', ([], {'blacklist_categories': '"""C"""'}), "(blacklist_categories='C')\n", (982, 1008), True, 'import hypothesis.strategies as strat\n'), ((1060, 1102), 'hypothesis.strategies.characters', 'strat.characters', ([], {'blacklist_categories': '"""C"""'}), "(blacklist_categories='C')\n", (1076, 1102), True, 'import hypothesis.strategies as strat\n')] |
"""BrandComparator2.ipynb
Server Link: https://colab.research.google.com/drive/15x-yWFGtF57rOCfi9tqEONlYWeGlkoml
[Colab Notebook with Explanation](https://colab.research.google.com/drive/1dYH5PAausru6lQy1dh5-aGC9S9DXO_bV?usp=sharing)
[Anvil App](https://NPFLBAAEVOXXUYZK.anvil.app/QED54JFBPJMZBQPITDWWVL75)
# Installing and importing
"""
import twint
import nest_asyncio
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import re as regex
from w3lib.html import replace_entities
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, RegexpTokenizer
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('vader_lexicon')
from wordcloud import WordCloud, STOPWORDS
# Models
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from textblob import TextBlob
# import flair
# Link to Client
import anvil.server
import anvil.media
anvil.server.connect("SARV2AEUWRM7TWXOPT2BQATM-NPFLBAAEVOXXUYZK")
"""# Text Processing Methods"""
def remove_by_regex(tweets, regexp):
tweets.loc[:, 'tweet'].replace(regexp, '', inplace=True)
return tweets
def remove_urls(tweets):
return remove_by_regex(tweets, regex.compile(r"(http?\://|https?\://|www)[^\s]+[\s]?"))
def remove_usernames(tweets):
return remove_by_regex(tweets, regex.compile(r"@\S+"))
def html_entity(tweet):
return replace_entities(tweet)
def remove_punctuation(tweet):
pat = r'[^a-zA-z0-9.,!?/:;\"\'\s]'
return regex.sub(pat, '', tweet)
def remove_emojis(tweets):
emoji_pattern = regex.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002500-\U00002BEF" # chinese characters
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"]+", regex.UNICODE)
return remove_by_regex(tweets,emoji_pattern)
def subStrings(brand):
temp=[]
for i in range(len(brand)):
for length in range(i+1,len(brand)+1):
temp.append(brand[i: length]);
return temp
"""# Model Methods"""
# MODELS
# VADER
# No code required
# TextBlob
def get_polarity(text):
return TextBlob(text).sentiment.polarity
# Flair
# flair_sentiment = flair.models.TextClassifier.load('en-sentiment')
# def get_sentiment(text):
# sentObj = flair.data.Sentence(text)
# flair_sentiment.predict(sentObj)
# total_sentiment = sentObj.labels
# return str(total_sentiment[0]).split()[0]
# BERT, GPT3 and others
#
"""# Visualisation Methods"""
def wordcloud(brand1,df1,brand2,df2):
stopwords = set(STOPWORDS)
tags=[]
tags.extend(brand1.split())
tags.extend(brand2.split())
tags.extend(subStrings("".join(brand1.split())))
tags.extend(subStrings("".join(brand2.split())))
stopwords.update(tags)
plt.figure(figsize=(10, 8), dpi=150)
plt.subplot(2, 2, 1)
pos = df1.loc[df1['sentiment_type'] == 'POSITIVE']
poswords=" ".join(word for word in pos.tweet if brand1 not in word)
wordcloud1 = WordCloud(stopwords=stopwords).generate(poswords)
plt.imshow(wordcloud1, interpolation='bilinear')
plt.title(str.title(brand1)+' positive')
plt.axis("off")
plt.subplot(2, 2, 2)
pos = df2.loc[df2['sentiment_type'] == 'NEGATIVE']
poswords=" ".join(word for word in pos.tweet if brand2 not in word)
wordcloud2 = WordCloud(stopwords=stopwords).generate(poswords)
plt.imshow(wordcloud2, interpolation='bilinear')
plt.title(str.title(brand2)+' negative')
plt.axis("off")
plt.subplot(2, 2, 3)
pos = df1.loc[df1['sentiment_type'] == 'NEGATIVE']
poswords=" ".join(word for word in pos.tweet if brand1 not in word)
wordcloud3 = WordCloud(stopwords=stopwords).generate(poswords)
plt.imshow(wordcloud3, interpolation='bilinear')
plt.title(str.title(brand1)+' negative')
plt.axis("off")
plt.subplot(2, 2, 4)
pos = df2.loc[df2['sentiment_type'] == 'POSITIVE']
poswords=" ".join(word for word in pos.tweet if brand2 not in word)
wordcloud4 = WordCloud(stopwords=stopwords).generate(poswords)
plt.imshow(wordcloud4, interpolation='bilinear')
plt.title(str.title(brand2)+' positive')
plt.axis("off")
plt.savefig('cloud.png')
mediaobj = anvil.media.from_file('cloud.png')
return mediaobj
def bargraph(brand1,df1,brand2,df2):
plt.figure(figsize=(15, 8), dpi=100)
plt.subplot(1, 2, 1)
df1.sentiment_type.value_counts().plot(kind='bar',title=str.title(brand1)+' tweets')
plt.subplot(1, 2, 2)
df2.sentiment_type.value_counts().plot(color='brown',kind='bar',title=str.title(brand2)+' tweets')
plt.savefig('bar.png')
mediaobj = anvil.media.from_file('bar.png')
return mediaobj
def donutchart(brand1,df1,brand2,df2):
fig, ax = plt.subplots()
size = 0.3
val1=df1.sentiment_type.value_counts().to_numpy(dtype='float32')
val2=df2.sentiment_type.value_counts().to_numpy(dtype='float32')
labels=['Positive','Neutral','Negative']
ax.pie(val1, labels=labels, radius=1, wedgeprops=dict(width=size, edgecolor='w'),colors=['darkgreen','darkblue','darkred'])
ax.pie(val2, radius=1-size, wedgeprops=dict(width=size, edgecolor='w'),colors=['green','blue','firebrick'])
ax.set(aspect="equal", title=str.title(brand1)+' (Outer) vs '+str.title(brand2)+' (Inner)')
fig = plt.gcf()
fig.set_size_inches(8,8)
fig.dpi=150
plt.savefig('donut.png')
mediaobj = anvil.media.from_file('donut.png')
return mediaobj
def piechart(brand1,brand2,posval1,negval1,posval2,negval2,pos1,neg1,pos2,neg2):
plt.subplot(1, 2, 1)
labels = ['Positive ['+str(posval1)+'%]' , 'Negative ['+str(negval1)+'%]']
sizes = [pos1, neg1]
colors = ['chartreuse','red']
patches, texts = plt.pie(sizes,colors=colors, startangle=90)
plt.style.use('default')
plt.legend(labels)
plt.title(str.title(brand1)+" Positive vs Negative")
plt.axis('equal')
plt.subplot(1, 2, 2)
labels = ['Positive ['+str(posval2)+'%]' , 'Negative ['+str(negval2)+'%]']
sizes = [pos2, neg2]
colors = ['chartreuse','red']
patches, texts = plt.pie(sizes,colors=colors, startangle=90)
plt.style.use('default')
plt.legend(labels)
plt.title(str.title(brand2)+" Positive vs Negative")
plt.axis('equal')
fig = plt.gcf()
fig.set_size_inches(10,5)
plt.savefig('pie.png')
mediaobj = anvil.media.from_file('pie.png')
return mediaobj
"""# Comparator Code"""
@anvil.server.callable
def comparator2(brand1, brand2, limit, model):
brand1=brand1.lower().strip()
brand2=brand2.lower().strip()
print(brand1+' vs '+brand2+' started')
# Tweet extraction
twint.storage.panda.clean()
c = twint.Config()
c.Search = brand1
c.Lang='en'
c.Hide_output = True
c.Limit = limit
c.Filter_retweets = True
c.Pandas = True
twint.run.Search(c)
df1 = twint.storage.panda.Tweets_df
df1 = df1[['username', 'name', 'id', 'language', 'tweet']]
twint.storage.panda.clean()
c.Search = brand2
twint.run.Search(c)
df2 = twint.storage.panda.Tweets_df
df2 = df2[['username', 'name', 'id', 'language', 'tweet']]
twint.storage.panda.clean()
# Processing tweets
df1 = df1.dropna(how = 'all')
df2 = df2.dropna(how = 'all')
df1.drop(df1[df1['language'] != 'en'].index, inplace = True)
df2.drop(df2[df2['language'] != 'en'].index, inplace = True)
df1.drop_duplicates(inplace = True)
df2.drop_duplicates(inplace = True)
criterion1 = df1['username'].map(lambda x: brand1 not in x)
criterion2 = df1['name'].map(lambda x: brand1 not in x)
df1 = df1[criterion1 & criterion2]
criterion1 = df2['username'].map(lambda x: brand2 not in x)
criterion2 = df2['name'].map(lambda x: brand2 not in x)
df2 = df2[criterion1 & criterion2]
df1.drop(columns = ['id','username', 'name', 'language'], inplace = True)
df2.drop(columns = ['id','username', 'name', 'language'], inplace = True)
stop=stopwords.words('english')
df1 = remove_usernames(df1)
df1 = remove_urls(df1)
df1 = df1.applymap(html_entity)
df1 = df1.applymap(lambda s:s.lower())
df1['tweet'].apply(lambda sentence: [word for word in sentence if word not in stop])
df1 = df1.applymap(remove_punctuation)
df1 = remove_emojis(df1)
df2 = remove_usernames(df2)
df2 = remove_urls(df2)
df2 = df2.applymap(html_entity)
df2 = df2.applymap(lambda s:s.lower())
df2['tweet'].apply(lambda sentence: [word for word in sentence if word not in stop])
df2 = df2.applymap(remove_punctuation)
df2 = remove_emojis(df2)
# Number of tweets
print('Size of dataset for','1st brand: '+str(df1.shape[0]),'2nd brand: '+str(df2.shape[0]),sep='\n')
numoftweets=df1.shape[0]+df2.shape[0]
# Feeding into model
if model==1:
# VADER
analyzer=SentimentIntensityAnalyzer()
df1['overall']=[analyzer.polarity_scores(x)['compound'] for x in df1['tweet']]
df2['overall']=[analyzer.polarity_scores(x)['compound'] for x in df2['tweet']]
df1['sentiment_type']=''
df2['sentiment_type']=''
df1.loc[df1.overall>0,'sentiment_type']='POSITIVE'
df1.loc[df1.overall==0,'sentiment_type']='NEUTRAL'
df1.loc[df1.overall<0,'sentiment_type']='NEGATIVE'
df2.loc[df2.overall>0,'sentiment_type']='POSITIVE'
df2.loc[df2.overall==0,'sentiment_type']='NEUTRAL'
df2.loc[df2.overall<0,'sentiment_type']='NEGATIVE'
elif model==2:
# TEXTBLOB
df1['polarity'] = df1['tweet'].apply(get_polarity)
df2['polarity'] = df2['tweet'].apply(get_polarity)
df1['sentiment_type']=''
df2['sentiment_type']=''
df1.loc[df1.polarity>0,'sentiment_type']='POSITIVE'
df2.loc[df2.polarity>0,'sentiment_type']='POSITIVE'
df1.loc[df1.polarity==0,'sentiment_type']='NEUTRAL'
df2.loc[df2.polarity==0,'sentiment_type']='NEUTRAL'
df1.loc[df1.polarity<0,'sentiment_type']='NEGATIVE'
df2.loc[df2.polarity<0,'sentiment_type']='NEGATIVE'
elif model==3:
# FLAIR (disabled - no neutral tweets)
# df1['sentiment_type']=''
# df2['sentiment_type']=''
# df1['sentiment_type']=df1['tweet'].apply(get_sentiment)
# df2['sentiment_type']=df2['tweet'].apply(get_sentiment)
pass
elif model==4:
# BERT, GPT3, Others
pass
else:
# Error
print("Recheck code")
# Saving tweets
df1.to_excel('tweets1.xlsx',index=False)
df2.to_excel('tweets2.xlsx',index=False)
# Statistics
pos1=df1.sentiment_type.value_counts()['POSITIVE']
neg1=df1.sentiment_type.value_counts()['NEGATIVE']
total=pos1+neg1+df1.sentiment_type.value_counts()['NEUTRAL']
posval1=round(pos1*100/total,2)
negval1=round(neg1*100/total,2)
pos2=df1.sentiment_type.value_counts()['POSITIVE']
neg2=df2.sentiment_type.value_counts()['NEGATIVE']
total=pos2+neg2+df1.sentiment_type.value_counts()['NEUTRAL']
posval2=round(pos2*100/total,2)
negval2=round(neg2*100/total,2)
objlist=[]
objlist.append(wordcloud(brand1,df1,brand2,df2))
objlist.append(bargraph(brand1,df1,brand2,df2))
objlist.append(donutchart(brand1,df1,brand2,df2))
objlist.append(piechart(brand1,brand2,posval1,negval1,posval2,negval2,pos1,neg1,pos2,neg2))
ratio1=round(posval1/negval1,4)
ratio2=round(posval2/negval2,4)
winner=brand1 if ratio1>ratio2 else brand2
del df1
del df2
return winner,objlist,numoftweets
"""# Comparator Call"""
nest_asyncio.apply()
anvil.server.wait_forever() | [
"nltk.download",
"re.compile",
"nest_asyncio.apply",
"twint.storage.panda.clean",
"matplotlib.pyplot.imshow",
"textblob.TextBlob",
"nltk.sentiment.vader.SentimentIntensityAnalyzer",
"nltk.corpus.stopwords.words",
"matplotlib.pyplot.style.use",
"w3lib.html.replace_entities",
"matplotlib.pyplot.ax... | [((611, 633), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (624, 633), False, 'import nltk\n'), ((634, 660), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (647, 660), False, 'import nltk\n'), ((661, 691), 'nltk.download', 'nltk.download', (['"""vader_lexicon"""'], {}), "('vader_lexicon')\n", (674, 691), False, 'import nltk\n'), ((12505, 12525), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (12523, 12525), False, 'import nest_asyncio\n'), ((1374, 1397), 'w3lib.html.replace_entities', 'replace_entities', (['tweet'], {}), '(tweet)\n', (1390, 1397), False, 'from w3lib.html import replace_entities\n'), ((1481, 1506), 're.sub', 'regex.sub', (['pat', '""""""', 'tweet'], {}), "(pat, '', tweet)\n", (1490, 1506), True, 'import re as regex\n'), ((1555, 1674), 're.compile', 'regex.compile', (['"""[😀-🙏🌀-🗿🚀-\U0001f6ff\U0001f1e0-🇿─-⯯✂-➰✂-➰Ⓜ-🉑🤦-🤷𐀀-\U0010ffff♀-♂☀-⭕\u200d⏏⏩⌚️〰]+"""', 'regex.UNICODE'], {}), "(\n '[😀-🙏🌀-🗿🚀-\\U0001f6ff\\U0001f1e0-🇿─-⯯✂-➰✂-➰Ⓜ-🉑🤦-🤷𐀀-\\U0010ffff♀-♂☀-⭕\\u200d⏏⏩⌚️〰]+'\n , regex.UNICODE)\n", (1568, 1674), True, 'import re as regex\n'), ((3696, 3718), 'nltk.corpus.stopwords.update', 'stopwords.update', (['tags'], {}), '(tags)\n', (3712, 3718), False, 'from nltk.corpus import stopwords\n'), ((3724, 3760), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)', 'dpi': '(150)'}), '(figsize=(10, 8), dpi=150)\n', (3734, 3760), True, 'import matplotlib.pyplot as plt\n'), ((3766, 3786), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (3777, 3786), True, 'import matplotlib.pyplot as plt\n'), ((3985, 4033), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud1'], {'interpolation': '"""bilinear"""'}), "(wordcloud1, interpolation='bilinear')\n", (3995, 4033), True, 'import matplotlib.pyplot as plt\n'), ((4083, 4098), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4091, 4098), True, 'import matplotlib.pyplot as plt\n'), ((4104, 4124), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (4115, 4124), True, 'import matplotlib.pyplot as plt\n'), ((4323, 4371), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud2'], {'interpolation': '"""bilinear"""'}), "(wordcloud2, interpolation='bilinear')\n", (4333, 4371), True, 'import matplotlib.pyplot as plt\n'), ((4421, 4436), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4429, 4436), True, 'import matplotlib.pyplot as plt\n'), ((4442, 4462), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (4453, 4462), True, 'import matplotlib.pyplot as plt\n'), ((4661, 4709), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud3'], {'interpolation': '"""bilinear"""'}), "(wordcloud3, interpolation='bilinear')\n", (4671, 4709), True, 'import matplotlib.pyplot as plt\n'), ((4759, 4774), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4767, 4774), True, 'import matplotlib.pyplot as plt\n'), ((4780, 4800), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (4791, 4800), True, 'import matplotlib.pyplot as plt\n'), ((4999, 5047), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud4'], {'interpolation': '"""bilinear"""'}), "(wordcloud4, interpolation='bilinear')\n", (5009, 5047), True, 'import matplotlib.pyplot as plt\n'), ((5097, 5112), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5105, 5112), True, 'import matplotlib.pyplot as plt\n'), ((5117, 5141), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cloud.png"""'], {}), "('cloud.png')\n", (5128, 5141), True, 'import matplotlib.pyplot as plt\n'), ((5254, 5290), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 8)', 'dpi': '(100)'}), '(figsize=(15, 8), dpi=100)\n', (5264, 5290), True, 'import matplotlib.pyplot as plt\n'), ((5295, 5315), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (5306, 5315), True, 'import matplotlib.pyplot as plt\n'), ((5409, 5429), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (5420, 5429), True, 'import matplotlib.pyplot as plt\n'), ((5537, 5559), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bar.png"""'], {}), "('bar.png')\n", (5548, 5559), True, 'import matplotlib.pyplot as plt\n'), ((5682, 5696), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5694, 5696), True, 'import matplotlib.pyplot as plt\n'), ((6241, 6250), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6248, 6250), True, 'import matplotlib.pyplot as plt\n'), ((6300, 6324), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""donut.png"""'], {}), "('donut.png')\n", (6311, 6324), True, 'import matplotlib.pyplot as plt\n'), ((6481, 6501), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (6492, 6501), True, 'import matplotlib.pyplot as plt\n'), ((6665, 6709), 'matplotlib.pyplot.pie', 'plt.pie', (['sizes'], {'colors': 'colors', 'startangle': '(90)'}), '(sizes, colors=colors, startangle=90)\n', (6672, 6709), True, 'import matplotlib.pyplot as plt\n'), ((6713, 6737), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""default"""'], {}), "('default')\n", (6726, 6737), True, 'import matplotlib.pyplot as plt\n'), ((6742, 6760), 'matplotlib.pyplot.legend', 'plt.legend', (['labels'], {}), '(labels)\n', (6752, 6760), True, 'import matplotlib.pyplot as plt\n'), ((6822, 6839), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (6830, 6839), True, 'import matplotlib.pyplot as plt\n'), ((6844, 6864), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (6855, 6864), True, 'import matplotlib.pyplot as plt\n'), ((7024, 7068), 'matplotlib.pyplot.pie', 'plt.pie', (['sizes'], {'colors': 'colors', 'startangle': '(90)'}), '(sizes, colors=colors, startangle=90)\n', (7031, 7068), True, 'import matplotlib.pyplot as plt\n'), ((7072, 7096), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""default"""'], {}), "('default')\n", (7085, 7096), True, 'import matplotlib.pyplot as plt\n'), ((7101, 7119), 'matplotlib.pyplot.legend', 'plt.legend', (['labels'], {}), '(labels)\n', (7111, 7119), True, 'import matplotlib.pyplot as plt\n'), ((7181, 7198), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (7189, 7198), True, 'import matplotlib.pyplot as plt\n'), ((7209, 7218), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7216, 7218), True, 'import matplotlib.pyplot as plt\n'), ((7253, 7275), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pie.png"""'], {}), "('pie.png')\n", (7264, 7275), True, 'import matplotlib.pyplot as plt\n'), ((7580, 7607), 'twint.storage.panda.clean', 'twint.storage.panda.clean', ([], {}), '()\n', (7605, 7607), False, 'import twint\n'), ((7616, 7630), 'twint.Config', 'twint.Config', ([], {}), '()\n', (7628, 7630), False, 'import twint\n'), ((7767, 7786), 'twint.run.Search', 'twint.run.Search', (['c'], {}), '(c)\n', (7783, 7786), False, 'import twint\n'), ((7895, 7922), 'twint.storage.panda.clean', 'twint.storage.panda.clean', ([], {}), '()\n', (7920, 7922), False, 'import twint\n'), ((7950, 7969), 'twint.run.Search', 'twint.run.Search', (['c'], {}), '(c)\n', (7966, 7969), False, 'import twint\n'), ((8078, 8105), 'twint.storage.panda.clean', 'twint.storage.panda.clean', ([], {}), '()\n', (8103, 8105), False, 'import twint\n'), ((8910, 8936), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (8925, 8936), False, 'from nltk.corpus import stopwords\n'), ((1191, 1249), 're.compile', 'regex.compile', (['"""(http?\\\\://|https?\\\\://|www)[^\\\\s]+[\\\\s]?"""'], {}), "('(http?\\\\://|https?\\\\://|www)[^\\\\s]+[\\\\s]?')\n", (1204, 1249), True, 'import re as regex\n'), ((1314, 1336), 're.compile', 'regex.compile', (['"""@\\\\S+"""'], {}), "('@\\\\S+')\n", (1327, 1336), True, 'import re as regex\n'), ((9784, 9812), 'nltk.sentiment.vader.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (9810, 9812), False, 'from nltk.sentiment.vader import SentimentIntensityAnalyzer\n'), ((3069, 3083), 'textblob.TextBlob', 'TextBlob', (['text'], {}), '(text)\n', (3077, 3083), False, 'from textblob import TextBlob\n'), ((3931, 3961), 'wordcloud.WordCloud', 'WordCloud', ([], {'stopwords': 'stopwords'}), '(stopwords=stopwords)\n', (3940, 3961), False, 'from wordcloud import WordCloud, STOPWORDS\n'), ((4269, 4299), 'wordcloud.WordCloud', 'WordCloud', ([], {'stopwords': 'stopwords'}), '(stopwords=stopwords)\n', (4278, 4299), False, 'from wordcloud import WordCloud, STOPWORDS\n'), ((4607, 4637), 'wordcloud.WordCloud', 'WordCloud', ([], {'stopwords': 'stopwords'}), '(stopwords=stopwords)\n', (4616, 4637), False, 'from wordcloud import WordCloud, STOPWORDS\n'), ((4945, 4975), 'wordcloud.WordCloud', 'WordCloud', ([], {'stopwords': 'stopwords'}), '(stopwords=stopwords)\n', (4954, 4975), False, 'from wordcloud import WordCloud, STOPWORDS\n')] |
# Copyright (C) 2021-2022 by the FEM on Colab authors
#
# This file is part of FEM on Colab-related actions.
#
# SPDX-License-Identifier: MIT
"""Tests for the open_in_colab_workflow.get_drive_url package."""
import os
import tempfile
import pytest
from open_in_colab_workflow.get_drive_url import get_drive_url
@pytest.mark.skipif("RCLONE_CONFIG_COLAB_TOKEN" not in os.environ, reason="Missing rclone environment variables")
def test_get_drive_url_existing(root_directory: str) -> None:
"""Test Google Drive URL for a file which was previously uploaded."""
data_directory = os.path.join(root_directory, "tests", "data")
absolute_path = os.path.join(data_directory, "upload_file_to_google_drive", "existing_file.txt")
relative_path = os.path.relpath(absolute_path, root_directory)
url = get_drive_url(relative_path, "GitHub/open_in_colab_workflow")
assert url == "https://drive.google.com/open?id=1MUq5LVW4ScYDE1f1sHRi3XDupYe5jOra"
@pytest.mark.skipif("RCLONE_CONFIG_COLAB_TOKEN" not in os.environ, reason="Missing rclone environment variables")
def test_get_drive_url_new(root_directory: str) -> None:
"""Test Google Drive URL for a file which was never uploaded."""
data_directory = os.path.join(root_directory, "tests", "data")
data_subdirectory = os.path.join(data_directory, "upload_file_to_google_drive")
with tempfile.NamedTemporaryFile(dir=data_subdirectory) as tmp:
relative_path = os.path.relpath(tmp.name, root_directory)
url = get_drive_url(relative_path, "GitHub/open_in_colab_workflow")
assert url is None
| [
"os.path.join",
"pytest.mark.skipif",
"tempfile.NamedTemporaryFile",
"open_in_colab_workflow.get_drive_url.get_drive_url",
"os.path.relpath"
] | [((317, 434), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('RCLONE_CONFIG_COLAB_TOKEN' not in os.environ)"], {'reason': '"""Missing rclone environment variables"""'}), "('RCLONE_CONFIG_COLAB_TOKEN' not in os.environ, reason=\n 'Missing rclone environment variables')\n", (335, 434), False, 'import pytest\n'), ((963, 1080), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('RCLONE_CONFIG_COLAB_TOKEN' not in os.environ)"], {'reason': '"""Missing rclone environment variables"""'}), "('RCLONE_CONFIG_COLAB_TOKEN' not in os.environ, reason=\n 'Missing rclone environment variables')\n", (981, 1080), False, 'import pytest\n'), ((587, 632), 'os.path.join', 'os.path.join', (['root_directory', '"""tests"""', '"""data"""'], {}), "(root_directory, 'tests', 'data')\n", (599, 632), False, 'import os\n'), ((653, 738), 'os.path.join', 'os.path.join', (['data_directory', '"""upload_file_to_google_drive"""', '"""existing_file.txt"""'], {}), "(data_directory, 'upload_file_to_google_drive', 'existing_file.txt'\n )\n", (665, 738), False, 'import os\n'), ((754, 800), 'os.path.relpath', 'os.path.relpath', (['absolute_path', 'root_directory'], {}), '(absolute_path, root_directory)\n', (769, 800), False, 'import os\n'), ((811, 872), 'open_in_colab_workflow.get_drive_url.get_drive_url', 'get_drive_url', (['relative_path', '"""GitHub/open_in_colab_workflow"""'], {}), "(relative_path, 'GitHub/open_in_colab_workflow')\n", (824, 872), False, 'from open_in_colab_workflow.get_drive_url import get_drive_url\n'), ((1223, 1268), 'os.path.join', 'os.path.join', (['root_directory', '"""tests"""', '"""data"""'], {}), "(root_directory, 'tests', 'data')\n", (1235, 1268), False, 'import os\n'), ((1293, 1352), 'os.path.join', 'os.path.join', (['data_directory', '"""upload_file_to_google_drive"""'], {}), "(data_directory, 'upload_file_to_google_drive')\n", (1305, 1352), False, 'import os\n'), ((1362, 1412), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'dir': 'data_subdirectory'}), '(dir=data_subdirectory)\n', (1389, 1412), False, 'import tempfile\n'), ((1445, 1486), 'os.path.relpath', 'os.path.relpath', (['tmp.name', 'root_directory'], {}), '(tmp.name, root_directory)\n', (1460, 1486), False, 'import os\n'), ((1501, 1562), 'open_in_colab_workflow.get_drive_url.get_drive_url', 'get_drive_url', (['relative_path', '"""GitHub/open_in_colab_workflow"""'], {}), "(relative_path, 'GitHub/open_in_colab_workflow')\n", (1514, 1562), False, 'from open_in_colab_workflow.get_drive_url import get_drive_url\n')] |
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
# Copyright Commvault Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""
SharepointInstance is the only class defined in this file.
SharepointInstance : Derived class from Instance Base class, representing a
Sharepoint Instance, and to perform operations on that instance
SharepointInstance:
_restore_common_options_json() -- setter for common options property in restore
_restore_json() -- Method which creates json for a restore job
_restore_v1_json() -- Method which creates json for v1 client for a restore job
"""
from cvpysdk.exception import SDKException
from ..instance import Instance
from past.builtins import basestring
class SharepointInstance(Instance):
""" Class representing a sharepoint instance, and to perform operations on that instance
"""
def _restore_browse_option_json(self, value):
"""setter for the Browse options for restore in Json"""
if not isinstance(value, dict):
raise SDKException('Instance', '101')
time_range_dict = {}
if value.get('to_time'):
time_range_dict['toTime'] = value.get('to_time')
self._browse_restore_json = {
"commCellId": int(self._commcell_object.commcell_id),
"showDeletedItems": value.get("showDeletedItems", False),
"backupset": {
"clientName": self._agent_object._client_object.client_name,
"appName": self._agent_object.agent_name,
"clientId": int(self._instance['clientId']),
"backupsetId": int(self._restore_association['backupsetId'])
},
"timeRange": time_range_dict
}
def _restore_common_options_json(self, value):
"""setter for the Common options of in restore JSON"""
if not isinstance(value, dict):
raise SDKException('Instance', '101')
self._commonoption_restore_json = {
"allVersion": True,
"offlineMiningRestore": False,
"skip": not value.get("unconditional_overwrite", False),
"restoreACLs": False,
"erExSpdbPathRestore": True,
"unconditionalOverwrite": value.get("unconditional_overwrite", False),
"siteReplicationrestore": False,
"append": False
}
def _restore_destination_json(self, value):
"""setter for the destination restore option in restore JSON"""
if not isinstance(value, dict):
raise SDKException('Subclient', '101')
self._destination_restore_json = {
"inPlace": value.get("in_place", True),
"destClient": {
"clientName": value.get("client_name", ""),
"clientId": value.get("client_id", -1)
}
}
def _restore_json(self, **kwargs):
"""
Creates json required for restore job
Kwargs:
paths (list) -- list of sites or webs to be restored
Example : [
"MB\\https://cvdevtenant.sharepoint.com/sites/TestSite\\/\\Shared Documents\\TestFolder",
"MB\\https://cvdevtenant.sharepoint.com/sites/TestSite\\/\\Lists\\TestList",
]
Returns:
rest_json (dict) -- dictionary with parameters set required for a restore job
"""
if(kwargs.get("v1",False)):
return self._restore_v1_json(**kwargs)
rest_json = super(SharepointInstance, self)._restore_json(**kwargs)
rest_json["taskInfo"]["task"]["initiatedFrom"] = 1
rest_json["taskInfo"]["subTasks"][0]["options"]["restoreOptions"]["sharePointDocRstOption"] = {}
rest_json["taskInfo"]["subTasks"][0]["options"]["restoreOptions"]\
["sharePointRstOption"]= {
"sharePointDocument": True,
"spRestoreToDisk": {
"restoreToDiskPath": "",
"restoreToDisk": False
}
}
rest_json["taskInfo"]["subTasks"][0]["options"]["commonOpts"] = {
"notifyUserOnJobCompletion": False
}
return rest_json
def _restore_v1_json(self, **kwargs):
"""
Creates json required for restore job for v1 client
Kwargs:
paths (list) -- list of sites or webs to be restored
Example : [
"MB\\https://cvdevtenant.sharepoint.com/sites/TestSite\\/\\Shared Documents\\TestFolder",
"MB\\https://cvdevtenant.sharepoint.com/sites/TestSite\\/\\Lists\\TestList",
]
Returns:
rest_json (dict) -- dictionary with parameters set required for a restore job
"""
restore_option = {}
if kwargs.get("restore_option"):
restore_option = kwargs["restore_option"]
for key in kwargs:
if not key == "restore_option":
restore_option[key] = kwargs[key]
else:
restore_option.update(kwargs)
if self._restore_association is None:
self._restore_association = self._instance
if restore_option.get('copy_precedence') is None:
restore_option['copy_precedence'] = 0
if restore_option.get('overwrite') is not None:
restore_option['unconditional_overwrite'] = restore_option['overwrite']
if restore_option.get('live_browse'):
restore_option['liveBrowse'] = True
else:
restore_option['liveBrowse'] = False
# restore_option should use client key for destination client info
client = restore_option.get("client", self._agent_object._client_object)
if isinstance(client, basestring):
client = self._commcell_object.clients.get(client)
restore_option["client_name"] = client.client_name
restore_option["client_id"] = int(client.client_id)
# set time zone
from_time = restore_option.get("from_time", None)
to_time = restore_option.get("to_time", None)
time_list = ['01/01/1970 00:00:00', '1/1/1970 00:00:00']
if from_time and from_time not in time_list:
restore_option["from_time"] = from_time
if to_time and to_time not in time_list:
restore_option["to_time"] = to_time
self._restore_browse_option_json(restore_option)
self._restore_common_options_json(restore_option)
self._restore_destination_json(restore_option)
self._restore_fileoption_json(restore_option)
self._restore_common_opts_json(restore_option)
if not restore_option.get('index_free_restore', False):
if restore_option.get("paths") == []:
raise SDKException('Subclient', '104')
request_json = {
"taskInfo": {
"associations": [self._restore_association],
"task": {
"taskType": 1,
"initiatedFrom": 1
},
"subTasks": [{
"subTask": {
"subTaskType": 3,
"operationType": 1001
},
"options": {
"restoreOptions": {
"sharePointDocRstOption": {
"isWorkflowAlertsRestoreOnly": False
},
"browseOption": self._browse_restore_json,
"commonOptions": self._commonoption_restore_json,
"destination": self._destination_restore_json,
"fileOption": self._fileoption_restore_json,
"sharePointRstOption": {
"sharePointDocument": True,
"spRestoreToDisk": {
"restoreToDiskPath": "",
"restoreToDisk": False
}
},
},
"commonOpts": self._commonopts_restore_json
}
}]
}
}
return request_json
| [
"cvpysdk.exception.SDKException"
] | [((1687, 1718), 'cvpysdk.exception.SDKException', 'SDKException', (['"""Instance"""', '"""101"""'], {}), "('Instance', '101')\n", (1699, 1718), False, 'from cvpysdk.exception import SDKException\n'), ((2558, 2589), 'cvpysdk.exception.SDKException', 'SDKException', (['"""Instance"""', '"""101"""'], {}), "('Instance', '101')\n", (2570, 2589), False, 'from cvpysdk.exception import SDKException\n'), ((3200, 3232), 'cvpysdk.exception.SDKException', 'SDKException', (['"""Subclient"""', '"""101"""'], {}), "('Subclient', '101')\n", (3212, 3232), False, 'from cvpysdk.exception import SDKException\n'), ((7496, 7528), 'cvpysdk.exception.SDKException', 'SDKException', (['"""Subclient"""', '"""104"""'], {}), "('Subclient', '104')\n", (7508, 7528), False, 'from cvpysdk.exception import SDKException\n')] |
from concurrent.futures import ThreadPoolExecutor
import queue
class BlockingThreadPoolExecutor(ThreadPoolExecutor):
def __init__(self, max_workers=None, thread_name_prefix=''):
super().__init__(max_workers=max_workers, thread_name_prefix=thread_name_prefix)
self._work_queue = queue.Queue(maxsize=max_workers)
| [
"queue.Queue"
] | [((300, 332), 'queue.Queue', 'queue.Queue', ([], {'maxsize': 'max_workers'}), '(maxsize=max_workers)\n', (311, 332), False, 'import queue\n')] |
import csv
name = "100_9x9Aya"
win = open("Data/Results-Split/" + name + "_win.txt", 'w+')
loss = open("Data/Results-Split/" + name + "_loss.txt", 'w+')
draw = open("Data/Results-Split/" + name + "_draw.txt", 'w+')
def convert(_input):
rows = ''
i = 0
while i < len(_input)-1:
rows = rows + _input[i]+","
i += 1
if _input[len(_input) - 1] == "1":
rows = rows + "1\n"
win.write(rows)
elif _input[len(_input) - 1] == "0":
rows = rows + "0\n"
loss.write(rows)
else:
rows = rows + "2\n"
draw.write(rows)
with open("Data/Binary/" + name + "_binary.txt", newline='') as file:
reader = csv.reader(file)
for row in reader:
convert(row)
file.close()
win.close()
loss.close()
draw.close()
| [
"csv.reader"
] | [((676, 692), 'csv.reader', 'csv.reader', (['file'], {}), '(file)\n', (686, 692), False, 'import csv\n')] |
# Check http://doc.qt.io/qtcreator/creator-debugging-helpers.html
# for more details or look at qttypes.py, stdtypes.py, boosttypes.py
# for more complex examples.
from dumper import Children, SubItem, UnnamedSubItem, DumperBase
from utils import DisplayFormat, TypeCode
from qttypes import *
import struct
####################### Your code below #######################
### Part 1
def qdump__Foo(d, value):
i = value["i"].integer()
j = value["j"].integer()
d.putValue("[%d,%d]" % (i,j))
d.putExpandable()
if d.isExpanded():
with Children(d):
d.putSubItem('j', value["j"])
# Don't try this at home :-)
# and the "i" (that is the one in quotes stand for type integer...
d.putSubItem('i', d.createValue(struct.pack("i",i), d.intType()))
with SubItem(d, "sum"):
d.putValue(i+j)
d.putType(d.intType()) # not really needed though
### Part 2
def qdump__MyNameSpace__Foo(d, value):
d.putValue("Secret!")
d.putPlainChildren(value)
### Part 3
#def qdump__Money(d, value):
# amount = value["m_amount"].floatingPoint()
# currency = value["m_currency"].integer()
# d.putValue("%s %s" % (("EUR" if (currency == 0) else "USD"), amount))
# d.putPlainChildren(value)
### Part 4
def qdump__Money(d, value):
str = d.call("@QString", value, "toString")
d.putStringValue(str)
d.putPlainChildren(value)
### Part 5
def qdump__FooOrBar(d, value):
str=d.parseAndEvaluate("fooOrBarToString(*((FooOrBar*)%s))" % value.laddress)
d.putStringValue(str)
d.putPlainChildren(value)
#### Part 6
def qdump__UserID(d, value):
employeeID = value.integer()
str=d.parseAndEvaluate("EmployeeDatabase::instance().lookup(%d)" % employeeID)
d.putStringValue(str)
def qdump__UserIDList(d, value):
d.createTypedefedType(d.lookupType("int"), "UserID");
d.formats[d.currentIName] = DisplayFormat.DirectQListStorage
d.putItem(value.cast("QList<UserID>"))
| [
"dumper.SubItem",
"dumper.Children",
"struct.pack"
] | [((565, 576), 'dumper.Children', 'Children', (['d'], {}), '(d)\n', (573, 576), False, 'from dumper import Children, SubItem, UnnamedSubItem, DumperBase\n'), ((861, 878), 'dumper.SubItem', 'SubItem', (['d', '"""sum"""'], {}), "(d, 'sum')\n", (868, 878), False, 'from dumper import Children, SubItem, UnnamedSubItem, DumperBase\n'), ((797, 816), 'struct.pack', 'struct.pack', (['"""i"""', 'i'], {}), "('i', i)\n", (808, 816), False, 'import struct\n')] |
#!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 <NAME> <<EMAIL>>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
info = {
'name' : "Puck.js",
'link' : [ "http://www.espruino.com/PuckJS" ],
'default_console' : "EV_SERIAL1",
'default_console_tx' : "D28",
'default_console_rx' : "D29",
'default_console_baudrate' : "9600",
'variables' : 2250, # How many variables are allocated for Espruino to use. RAM will be overflowed if this number is too high and code won't compile.
'bootloader' : 1,
'binary_name' : 'espruino_%v_puckjs.hex',
'build' : {
'optimizeflags' : '-Os',
'libraries' : [
'BLUETOOTH',
'NET',
'GRAPHICS',
'CRYPTO','SHA256','SHA512',
'AES',
'NFC',
'NEOPIXEL',
#'FILESYSTEM'
#'TLS'
],
'makefile' : [
'DEFINES+=-DHAL_NFC_ENGINEERING_BC_FTPAN_WORKAROUND=1', # Looks like proper production nRF52s had this issue
'DEFINES+=-DCONFIG_GPIO_AS_PINRESET', # Allow the reset pin to work
'DEFINES+=-DBLUETOOTH_NAME_PREFIX=\'"Puck.js"\'',
'DEFINES+=-DCUSTOM_GETBATTERY=jswrap_puck_getBattery',
'DEFINES+=-DNFC_DEFAULT_URL=\'"https://puck-js.com/go"\'',
'DFU_PRIVATE_KEY=targets/nrf5x_dfu/dfu_private_key.pem',
'DFU_SETTINGS=--application-version 0xff --hw-version 52 --sd-req 0x8C',
'INCLUDE += -I$(ROOT)/libs/puckjs',
'WRAPPERSOURCES += libs/puckjs/jswrap_puck.c'
]
}
};
chip = {
'part' : "NRF52832",
'family' : "NRF52",
'package' : "QFN48",
'ram' : 64,
'flash' : 512,
'speed' : 64,
'usart' : 1,
'spi' : 1,
'i2c' : 1,
'adc' : 1,
'dac' : 0,
'saved_code' : {
'address' : ((118 - 10) * 4096), # Bootloader takes pages 120-127, FS takes 118-119
'page_size' : 4096,
'pages' : 10,
'flash_available' : 512 - ((31 + 8 + 2 + 10)*4) # Softdevice uses 31 pages of flash, bootloader 8, FS 2, code 10. Each page is 4 kb.
},
};
devices = {
'LED1' : { 'pin' : 'D5' },
'LED2' : { 'pin' : 'D4' },
'LED3' : { 'pin' : 'D3' },
'IR' : { 'pin_anode' : 'D25', 'pin_cathode' : 'D26' },
'BTN1' : { 'pin' : 'D0', 'pinstate' : 'IN_PULLDOWN' },
'CAPSENSE' : { 'pin_rx' : 'D11', 'pin_tx' : 'D12' },
'NFC': { 'pin_a':'D9', 'pin_b':'D10' },
'MAG': { 'device': 'MAG3110',
'pin_pwr':'D18',
'pin_int':'D17',
'pin_sda':'D20',
'pin_scl':'D19' }
# Pin D22 is used for clock when driving neopixels - as not specifying a pin seems to break things
};
# left-right, or top-bottom order
board = {
'bottom' : [ 'D28', 'D29', 'D30', 'D31'],
'right' : [ 'GND', '3V', 'D2', 'D1' ],
'left2' : [ 'D6','D7','D8','D11','D13','D14','D16','D23','D24','D27' ],
'right2' : [ 'D15' ],
'_notes' : {
'D11' : "Capacitive sense. D12 is connected to this pin via a 1 MOhm resistor",
'D28' : "If pulled up to 1 on startup, D28 and D29 become Serial1",
'D22' : "This is used as SCK when driving Neopixels with 'require('neopixel').write'"
}
};
board["_css"] = """
#board {
width: 800px;
height: 800px;
top: 0px;
left : 0px;
background-image: url(img/PUCKJS_.jpg);
}
#boardcontainer {
height: 900px;
}
#bottom {
top: 639px;
left: 291px;
}
#right {
top: 304px;
left: 640px;
}
.bottompin { width: 46px; }
.rightpin { height: 51px; }
.pinD6 { position:absolute; left: 560px; top: 419px;}
.pinD7 { position:absolute; left: 548px; top: 369px;}
.pinD8 { position:absolute; left: 512px; top: 398px;}
.pinD11 { position:absolute; left: 586px; top: 236px;}
.pinD13 { position:absolute; left: 500px; top: 293px;}
.pinD14 { position:absolute; left: 523px; top: 270px;}
.pinD15 { position:absolute; right: -483px; top: 268px;}
.pinD16 { position:absolute; left: 499px; top: 244px;}
.pinD23 { position:absolute; left: 157px; top: 438px;}
.pinD24 { position:absolute; left: 157px; top: 382px;}
.pinD27 { position:absolute; left: 244px; top: 581px;}
""";
def get_pins():
pins = pinutils.generate_pins(0,31) # 32 General Purpose I/O Pins.
pinutils.findpin(pins, "PD0", True)["functions"]["XL1"]=0;
pinutils.findpin(pins, "PD1", True)["functions"]["XL2"]=0;
pinutils.findpin(pins, "PD9", True)["functions"]["NFC1"]=0;
pinutils.findpin(pins, "PD10", True)["functions"]["NFC2"]=0;
pinutils.findpin(pins, "PD2", True)["functions"]["ADC1_IN0"]=0;
pinutils.findpin(pins, "PD3", True)["functions"]["ADC1_IN1"]=0;
pinutils.findpin(pins, "PD4", True)["functions"]["ADC1_IN2"]=0;
pinutils.findpin(pins, "PD5", True)["functions"]["ADC1_IN3"]=0;
pinutils.findpin(pins, "PD28", True)["functions"]["ADC1_IN4"]=0;
pinutils.findpin(pins, "PD28", True)["functions"]["USART1_TX"]=0;
pinutils.findpin(pins, "PD29", True)["functions"]["USART1_RX"]=0;
pinutils.findpin(pins, "PD29", True)["functions"]["ADC1_IN5"]=0;
pinutils.findpin(pins, "PD30", True)["functions"]["ADC1_IN6"]=0;
pinutils.findpin(pins, "PD31", True)["functions"]["ADC1_IN7"]=0;
# everything is non-5v tolerant
for pin in pins:
pin["functions"]["3.3"]=0;
#The boot/reset button will function as a reset button in normal operation. Pin reset on PD21 needs to be enabled on the nRF52832 device for this to work.
return pins
| [
"pinutils.findpin",
"pinutils.generate_pins"
] | [((4535, 4564), 'pinutils.generate_pins', 'pinutils.generate_pins', (['(0)', '(31)'], {}), '(0, 31)\n', (4557, 4564), False, 'import pinutils\n'), ((4597, 4632), 'pinutils.findpin', 'pinutils.findpin', (['pins', '"""PD0"""', '(True)'], {}), "(pins, 'PD0', True)\n", (4613, 4632), False, 'import pinutils\n'), ((4658, 4693), 'pinutils.findpin', 'pinutils.findpin', (['pins', '"""PD1"""', '(True)'], {}), "(pins, 'PD1', True)\n", (4674, 4693), False, 'import pinutils\n'), ((4719, 4754), 'pinutils.findpin', 'pinutils.findpin', (['pins', '"""PD9"""', '(True)'], {}), "(pins, 'PD9', True)\n", (4735, 4754), False, 'import pinutils\n'), ((4781, 4817), 'pinutils.findpin', 'pinutils.findpin', (['pins', '"""PD10"""', '(True)'], {}), "(pins, 'PD10', True)\n", (4797, 4817), False, 'import pinutils\n'), ((4844, 4879), 'pinutils.findpin', 'pinutils.findpin', (['pins', '"""PD2"""', '(True)'], {}), "(pins, 'PD2', True)\n", (4860, 4879), False, 'import pinutils\n'), ((4910, 4945), 'pinutils.findpin', 'pinutils.findpin', (['pins', '"""PD3"""', '(True)'], {}), "(pins, 'PD3', True)\n", (4926, 4945), False, 'import pinutils\n'), ((4976, 5011), 'pinutils.findpin', 'pinutils.findpin', (['pins', '"""PD4"""', '(True)'], {}), "(pins, 'PD4', True)\n", (4992, 5011), False, 'import pinutils\n'), ((5042, 5077), 'pinutils.findpin', 'pinutils.findpin', (['pins', '"""PD5"""', '(True)'], {}), "(pins, 'PD5', True)\n", (5058, 5077), False, 'import pinutils\n'), ((5108, 5144), 'pinutils.findpin', 'pinutils.findpin', (['pins', '"""PD28"""', '(True)'], {}), "(pins, 'PD28', True)\n", (5124, 5144), False, 'import pinutils\n'), ((5175, 5211), 'pinutils.findpin', 'pinutils.findpin', (['pins', '"""PD28"""', '(True)'], {}), "(pins, 'PD28', True)\n", (5191, 5211), False, 'import pinutils\n'), ((5243, 5279), 'pinutils.findpin', 'pinutils.findpin', (['pins', '"""PD29"""', '(True)'], {}), "(pins, 'PD29', True)\n", (5259, 5279), False, 'import pinutils\n'), ((5311, 5347), 'pinutils.findpin', 'pinutils.findpin', (['pins', '"""PD29"""', '(True)'], {}), "(pins, 'PD29', True)\n", (5327, 5347), False, 'import pinutils\n'), ((5378, 5414), 'pinutils.findpin', 'pinutils.findpin', (['pins', '"""PD30"""', '(True)'], {}), "(pins, 'PD30', True)\n", (5394, 5414), False, 'import pinutils\n'), ((5445, 5481), 'pinutils.findpin', 'pinutils.findpin', (['pins', '"""PD31"""', '(True)'], {}), "(pins, 'PD31', True)\n", (5461, 5481), False, 'import pinutils\n')] |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import devlib
import json
import logging
import time
# Default energy measurements for each board
DEFAULT_ENERGY_METER = {
# ARM TC2: by default use HWMON
'tc2' : {
'instrument' : 'hwmon',
'conf' : {
'sites' : [ 'A7 Jcore', 'A15 Jcore' ],
'kinds' : [ 'energy']
}
},
# ARM Juno: by default use HWMON
'juno' : {
'instrument' : 'hwmon',
'conf' : {
'sites' : [ 'a53', 'a57' ],
'kinds' : [ 'energy' ]
}
},
# Hikey: by default use AEP
'hikey' : {
'instrument' : 'aep',
}
}
class EnergyMeter(object):
_meter = None
def __init__(self, target):
self._target = target
@staticmethod
def getInstance(target, conf, force=False):
if not force and EnergyMeter._meter:
return EnergyMeter._meter
# Initialize energy probe to board default
if 'board' in conf and \
conf['board'] in DEFAULT_ENERGY_METER:
emeter = DEFAULT_ENERGY_METER[conf['board']]
logging.debug('%14s - using default energy meter for [%s]',
'EnergyMeter', conf['board'])
else:
return None
if emeter['instrument'] == 'hwmon':
EnergyMeter._meter = HWMon(target, emeter['conf'])
elif emeter['instrument'] == 'aep':
EnergyMeter._meter = Aep(target)
return EnergyMeter._meter
def sample(self):
raise NotImplementedError('Missing implementation')
def reset(self):
raise NotImplementedError('Missing implementation')
def report(self, out_dir):
raise NotImplementedError('Missing implementation')
class HWMon(EnergyMeter):
def __init__(self, target, hwmon_conf=None):
super(HWMon, self).__init__(target)
# The HWMon energy meter
self._hwmon = None
# Energy readings
self.readings = {}
if 'hwmon' not in self._target.modules:
logging.info('%14s - HWMON module not enabled',
'EnergyMeter')
logging.warning('%14s - Energy sampling disabled by configuration',
'EnergyMeter')
return
# Initialize HWMON instrument
logging.info('%14s - Scanning for HWMON channels, may take some time...', 'EnergyMeter')
self._hwmon = devlib.HwmonInstrument(self._target)
# Configure channels for energy measurements
logging.debug('%14s - Enabling channels %s', 'EnergyMeter', hwmon_conf)
self._hwmon.reset(**hwmon_conf)
# Logging enabled channels
logging.info('%14s - Channels selected for energy sampling:',
'EnergyMeter')
for channel in self._hwmon.active_channels:
logging.info('%14s - %s', 'EnergyMeter', channel.label)
def sample(self):
if self._hwmon is None:
return
samples = self._hwmon.take_measurement()
for s in samples:
label = s.channel.label\
.replace('_energy', '')\
.replace(" ", "_")
value = s.value
if label not in self.readings:
self.readings[label] = {
'last' : value,
'delta' : 0,
'total' : 0
}
continue
self.readings[label]['delta'] = value - self.readings[label]['last']
self.readings[label]['last'] = value
self.readings[label]['total'] += self.readings[label]['delta']
logging.debug('SAMPLE: %s', self.readings)
return self.readings
def reset(self):
if self._hwmon is None:
return
self.sample()
for label in self.readings:
self.readings[label]['delta'] = 0
self.readings[label]['total'] = 0
logging.debug('RESET: %s', self.readings)
def report(self, out_dir, out_file='energy.json'):
if self._hwmon is None:
return
# Retrive energy consumption data
nrg = self.sample()
# Reformat data for output generation
clusters_nrg = {}
for ch in nrg:
nrg_total = nrg[ch]['total']
logging.info('%14s - Energy [%16s]: %.6f',
'EnergyReport', ch, nrg_total)
if self._target.little_core.upper() in ch.upper():
clusters_nrg['LITTLE'] = '{:.6f}'.format(nrg_total)
elif self._target.big_core.upper() in ch.upper():
clusters_nrg['big'] = '{:.6f}'.format(nrg_total)
else:
logging.warning('%14s - Unable to bind hwmon channel [%s]'\
' to a big.LITTLE cluster',
'EnergyReport', ch)
clusters_nrg[ch] = '{:.6f}'.format(nrg_total)
if 'LITTLE' not in clusters_nrg:
logging.warning('%14s - No energy data for LITTLE cluster',
'EnergyMeter')
if 'big' not in clusters_nrg:
logging.warning('%14s - No energy data for big cluster',
'EnergyMeter')
# Dump data as JSON file
nrg_file = '{}/{}'.format(out_dir, out_file)
with open(nrg_file, 'w') as ofile:
json.dump(clusters_nrg, ofile, sort_keys=True, indent=4)
return (clusters_nrg, nrg_file)
class Aep(EnergyMeter):
def __init__(self, target):
super(Aep, self).__init__(target)
# Energy readings
self.readings = {}
# Time (start and diff) for power measurment
self.time = {}
# Initialize instrument
# Only one channel (first AEP channel: pc1 ... probe channel 1) is used
self._aep = devlib.EnergyProbeInstrument(self._target, labels=["pc1"], resistor_values=[0.033])
# Configure channels for energy measurements
logging.debug('EnergyMeter - Enabling channels')
self._aep.reset()
# Logging enabled channels
logging.info('%14s - Channels selected for energy sampling:\n%s',
'EnergyMeter', str(self._aep.active_channels))
def __calc_nrg(self, samples):
power = {'sum' : 0, 'count' : 0, 'avg' : 0}
for s in samples:
power['sum'] += s[1].value # s[1] ... power value of channel 1
power['count'] += 1
power['avg'] = power['sum'] / power['count']
nrg = power['avg'] * self.time['diff']
logging.debug('avg power: %.6f count: %s time: %.6f nrg: %.6f',
power['avg'], power['count'], self.time['diff'] , nrg)
return nrg
def sample(self):
if self._aep is None:
return
self.time['diff'] = time.time() - self.time['start']
self._aep.stop()
csv_data = self._aep.get_data("/tmp/aep.csv")
samples = csv_data.measurements()
value = self.__calc_nrg(samples)
self.readings['last'] = value
self.readings['delta'] = value
self.readings['total'] = value
logging.debug('SAMPLE: %s', self.readings)
return self.readings
def reset(self):
if self._aep is None:
return
logging.debug('RESET: %s', self.readings)
self._aep.start()
self.time['start'] = time.time()
def report(self, out_dir, out_file='energy.json'):
if self._aep is None:
return
# Retrieve energy consumption data
nrg = self.sample()
# Reformat data for output generation
clusters_nrg = {}
clusters_nrg['LITTLE'] = '{:.6f}'.format(self.readings['total'])
# Dump data as JSON file
nrg_file = '{}/{}'.format(out_dir, out_file)
with open(nrg_file, 'w') as ofile:
json.dump(clusters_nrg, ofile, sort_keys=True, indent=4)
return (clusters_nrg, nrg_file)
# vim :set tabstop=4 shiftwidth=4 expandtab
| [
"logging.debug",
"logging.warning",
"logging.info",
"devlib.HwmonInstrument",
"devlib.EnergyProbeInstrument",
"time.time",
"json.dump"
] | [((2924, 3016), 'logging.info', 'logging.info', (['"""%14s - Scanning for HWMON channels, may take some time..."""', '"""EnergyMeter"""'], {}), "('%14s - Scanning for HWMON channels, may take some time...',\n 'EnergyMeter')\n", (2936, 3016), False, 'import logging\n'), ((3035, 3071), 'devlib.HwmonInstrument', 'devlib.HwmonInstrument', (['self._target'], {}), '(self._target)\n', (3057, 3071), False, 'import devlib\n'), ((3134, 3205), 'logging.debug', 'logging.debug', (['"""%14s - Enabling channels %s"""', '"""EnergyMeter"""', 'hwmon_conf'], {}), "('%14s - Enabling channels %s', 'EnergyMeter', hwmon_conf)\n", (3147, 3205), False, 'import logging\n'), ((3290, 3366), 'logging.info', 'logging.info', (['"""%14s - Channels selected for energy sampling:"""', '"""EnergyMeter"""'], {}), "('%14s - Channels selected for energy sampling:', 'EnergyMeter')\n", (3302, 3366), False, 'import logging\n'), ((4275, 4317), 'logging.debug', 'logging.debug', (['"""SAMPLE: %s"""', 'self.readings'], {}), "('SAMPLE: %s', self.readings)\n", (4288, 4317), False, 'import logging\n'), ((4578, 4619), 'logging.debug', 'logging.debug', (['"""RESET: %s"""', 'self.readings'], {}), "('RESET: %s', self.readings)\n", (4591, 4619), False, 'import logging\n'), ((6460, 6548), 'devlib.EnergyProbeInstrument', 'devlib.EnergyProbeInstrument', (['self._target'], {'labels': "['pc1']", 'resistor_values': '[0.033]'}), "(self._target, labels=['pc1'], resistor_values=\n [0.033])\n", (6488, 6548), False, 'import devlib\n'), ((6606, 6654), 'logging.debug', 'logging.debug', (['"""EnergyMeter - Enabling channels"""'], {}), "('EnergyMeter - Enabling channels')\n", (6619, 6654), False, 'import logging\n'), ((7189, 7311), 'logging.debug', 'logging.debug', (['"""avg power: %.6f count: %s time: %.6f nrg: %.6f"""', "power['avg']", "power['count']", "self.time['diff']", 'nrg'], {}), "('avg power: %.6f count: %s time: %.6f nrg: %.6f', power['avg'\n ], power['count'], self.time['diff'], nrg)\n", (7202, 7311), False, 'import logging\n'), ((7766, 7808), 'logging.debug', 'logging.debug', (['"""SAMPLE: %s"""', 'self.readings'], {}), "('SAMPLE: %s', self.readings)\n", (7779, 7808), False, 'import logging\n'), ((7918, 7959), 'logging.debug', 'logging.debug', (['"""RESET: %s"""', 'self.readings'], {}), "('RESET: %s', self.readings)\n", (7931, 7959), False, 'import logging\n'), ((8016, 8027), 'time.time', 'time.time', ([], {}), '()\n', (8025, 8027), False, 'import time\n'), ((1723, 1816), 'logging.debug', 'logging.debug', (['"""%14s - using default energy meter for [%s]"""', '"""EnergyMeter"""', "conf['board']"], {}), "('%14s - using default energy meter for [%s]', 'EnergyMeter',\n conf['board'])\n", (1736, 1816), False, 'import logging\n'), ((2660, 2722), 'logging.info', 'logging.info', (['"""%14s - HWMON module not enabled"""', '"""EnergyMeter"""'], {}), "('%14s - HWMON module not enabled', 'EnergyMeter')\n", (2672, 2722), False, 'import logging\n'), ((2755, 2841), 'logging.warning', 'logging.warning', (['"""%14s - Energy sampling disabled by configuration"""', '"""EnergyMeter"""'], {}), "('%14s - Energy sampling disabled by configuration',\n 'EnergyMeter')\n", (2770, 2841), False, 'import logging\n'), ((3452, 3510), 'logging.info', 'logging.info', (['"""%14s - %s"""', '"""EnergyMeter"""', 'channel.label'], {}), "('%14s - %s', 'EnergyMeter', channel.label)\n", (3464, 3510), False, 'import logging\n'), ((4946, 5019), 'logging.info', 'logging.info', (['"""%14s - Energy [%16s]: %.6f"""', '"""EnergyReport"""', 'ch', 'nrg_total'], {}), "('%14s - Energy [%16s]: %.6f', 'EnergyReport', ch, nrg_total)\n", (4958, 5019), False, 'import logging\n'), ((5607, 5681), 'logging.warning', 'logging.warning', (['"""%14s - No energy data for LITTLE cluster"""', '"""EnergyMeter"""'], {}), "('%14s - No energy data for LITTLE cluster', 'EnergyMeter')\n", (5622, 5681), False, 'import logging\n'), ((5760, 5831), 'logging.warning', 'logging.warning', (['"""%14s - No energy data for big cluster"""', '"""EnergyMeter"""'], {}), "('%14s - No energy data for big cluster', 'EnergyMeter')\n", (5775, 5831), False, 'import logging\n'), ((5998, 6054), 'json.dump', 'json.dump', (['clusters_nrg', 'ofile'], {'sort_keys': '(True)', 'indent': '(4)'}), '(clusters_nrg, ofile, sort_keys=True, indent=4)\n', (6007, 6054), False, 'import json\n'), ((7443, 7454), 'time.time', 'time.time', ([], {}), '()\n', (7452, 7454), False, 'import time\n'), ((8493, 8549), 'json.dump', 'json.dump', (['clusters_nrg', 'ofile'], {'sort_keys': '(True)', 'indent': '(4)'}), '(clusters_nrg, ofile, sort_keys=True, indent=4)\n', (8502, 8549), False, 'import json\n'), ((5332, 5444), 'logging.warning', 'logging.warning', (['"""%14s - Unable to bind hwmon channel [%s] to a big.LITTLE cluster"""', '"""EnergyReport"""', 'ch'], {}), "(\n '%14s - Unable to bind hwmon channel [%s] to a big.LITTLE cluster',\n 'EnergyReport', ch)\n", (5347, 5444), False, 'import logging\n')] |
import collections
TestCase = collections.namedtuple('TestCase', "description input_table_name input_commit_statement input_primary_key_fields expected_entries expected_sql")
tests=[
TestCase(
description="Rejected",
input_table_name="ALS2",
input_commit_statement="""ALTER TABLE ALS2 shrink space check""",
input_primary_key_fields=None,
expected_entries=[],
expected_sql=''
)
]
| [
"collections.namedtuple"
] | [((31, 184), 'collections.namedtuple', 'collections.namedtuple', (['"""TestCase"""', '"""description input_table_name input_commit_statement input_primary_key_fields expected_entries expected_sql"""'], {}), "('TestCase',\n 'description input_table_name input_commit_statement input_primary_key_fields expected_entries expected_sql'\n )\n", (53, 184), False, 'import collections\n')] |
from awscfncli2.runner import Boto3Profile
class TestStackSelector(object):
def test_update(self):
s1 = Boto3Profile('foo','bar')
s2 = Boto3Profile('foo', 'baz')
assert s1.region_name == 'bar'
s1.update(s2)
| [
"awscfncli2.runner.Boto3Profile"
] | [((119, 145), 'awscfncli2.runner.Boto3Profile', 'Boto3Profile', (['"""foo"""', '"""bar"""'], {}), "('foo', 'bar')\n", (131, 145), False, 'from awscfncli2.runner import Boto3Profile\n'), ((158, 184), 'awscfncli2.runner.Boto3Profile', 'Boto3Profile', (['"""foo"""', '"""baz"""'], {}), "('foo', 'baz')\n", (170, 184), False, 'from awscfncli2.runner import Boto3Profile\n')] |
import tensorflow as tf
from tensorflow.keras.layers import (
BatchNormalization,
LeakyReLU,
Activation,
Conv1D,
ELU,
Add,
)
from functools import partial
from tensorflow.compat.v1.keras.initializers import he_uniform
def _get_conv_activation_layer(params):
"""
:param params:
:returns: Required Activation function.
"""
conv_activation = params.get('conv_activation')
if conv_activation == 'ReLU':
return ReLU()
elif conv_activation == 'ELU':
return ELU()
return LeakyReLU(0.2)
class UpSamplingLayer:
def __init__(self, channel_out, kernel_size=5, stride=1):
self.seq = tf.keras.Sequential()
self.seq.add(
tf.keras.layers.Conv1D(
channel_out,
kernel_size=kernel_size,
strides=stride,
padding='SAME',
dilation_rate=1,
)
)
self.seq.add(BatchNormalization(axis=-1))
self.seq.add(LeakyReLU(0.2))
def __call__(self, x, training=True):
return self.seq(x, training=training)
class Model:
def __init__(
self,
inputs,
training=True,
ksize=5,
n_layers=12,
channels_interval=24,
logging=True,
):
conv_activation_layer = _get_conv_activation_layer({})
kernel_initializer = he_uniform(seed=50)
conv1d_factory = partial(
Conv1D,
strides=(2),
padding='same',
kernel_initializer=kernel_initializer,
)
def resnet_block(input_tensor, filter_size):
res = conv1d_factory(
filter_size, (1), strides=(1), use_bias=False
)(input_tensor)
conv1 = conv1d_factory(filter_size, (5), strides=(1))(
input_tensor
)
batch1 = BatchNormalization(axis=-1)(conv1, training=training)
rel1 = conv_activation_layer(batch1)
conv2 = conv1d_factory(filter_size, (5), strides=(1))(rel1)
batch2 = BatchNormalization(axis=-1)(conv2, training=training)
resconnection = Add()([res, batch2])
rel2 = conv_activation_layer(resconnection)
return rel2
self.n_layers = n_layers
self.channels_interval = channels_interval
out_channels = [
i * self.channels_interval for i in range(1, self.n_layers + 1)
]
self.middle = tf.keras.Sequential()
self.middle.add(
tf.keras.layers.Conv1D(
self.n_layers * self.channels_interval,
kernel_size=15,
strides=1,
padding='SAME',
dilation_rate=1,
)
)
self.middle.add(BatchNormalization(axis=-1))
self.middle.add(LeakyReLU(0.2))
decoder_out_channels_list = out_channels[::-1]
self.decoder = []
for i in range(self.n_layers):
self.decoder.append(
UpSamplingLayer(channel_out=decoder_out_channels_list[i])
)
self.out = tf.keras.Sequential()
self.out.add(
tf.keras.layers.Conv1D(
1,
kernel_size=1,
strides=1,
padding='SAME',
dilation_rate=1,
)
)
self.out.add(Activation('tanh'))
tmp = []
o = inputs
for i in range(self.n_layers):
o = resnet_block(o, out_channels[i])
tmp.append(o)
o = o[:, ::2]
if logging:
print(o)
o = self.middle(o, training=training)
if logging:
print(o)
for i in range(self.n_layers):
o = tf.image.resize(
o, [tf.shape(o)[0], tf.shape(o)[1] * 2], method='nearest'
)
o = tf.concat([o, tmp[self.n_layers - i - 1]], axis=2)
o = self.decoder[i](o, training=training)
if logging:
print(o)
if logging:
print(o, inputs)
o = tf.concat([o, inputs], axis=2)
o = self.out(o, training=training)
self.logits = o
| [
"tensorflow.shape",
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.compat.v1.keras.initializers.he_uniform",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.concat",
"functools.partial",
"tensorflow.keras.layers.Conv1D",
"te... | [((538, 552), 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (547, 552), False, 'from tensorflow.keras.layers import BatchNormalization, LeakyReLU, Activation, Conv1D, ELU, Add\n'), ((659, 680), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (678, 680), True, 'import tensorflow as tf\n'), ((1381, 1400), 'tensorflow.compat.v1.keras.initializers.he_uniform', 'he_uniform', ([], {'seed': '(50)'}), '(seed=50)\n', (1391, 1400), False, 'from tensorflow.compat.v1.keras.initializers import he_uniform\n'), ((1427, 1513), 'functools.partial', 'partial', (['Conv1D'], {'strides': '(2)', 'padding': '"""same"""', 'kernel_initializer': 'kernel_initializer'}), "(Conv1D, strides=2, padding='same', kernel_initializer=\n kernel_initializer)\n", (1434, 1513), False, 'from functools import partial\n'), ((2477, 2498), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (2496, 2498), True, 'import tensorflow as tf\n'), ((3119, 3140), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (3138, 3140), True, 'import tensorflow as tf\n'), ((4114, 4144), 'tensorflow.concat', 'tf.concat', (['[o, inputs]'], {'axis': '(2)'}), '([o, inputs], axis=2)\n', (4123, 4144), True, 'import tensorflow as tf\n'), ((521, 526), 'tensorflow.keras.layers.ELU', 'ELU', ([], {}), '()\n', (524, 526), False, 'from tensorflow.keras.layers import BatchNormalization, LeakyReLU, Activation, Conv1D, ELU, Add\n'), ((715, 828), 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', (['channel_out'], {'kernel_size': 'kernel_size', 'strides': 'stride', 'padding': '"""SAME"""', 'dilation_rate': '(1)'}), "(channel_out, kernel_size=kernel_size, strides=stride,\n padding='SAME', dilation_rate=1)\n", (737, 828), True, 'import tensorflow as tf\n'), ((951, 978), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (969, 978), False, 'from tensorflow.keras.layers import BatchNormalization, LeakyReLU, Activation, Conv1D, ELU, Add\n'), ((1001, 1015), 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1010, 1015), False, 'from tensorflow.keras.layers import BatchNormalization, LeakyReLU, Activation, Conv1D, ELU, Add\n'), ((2536, 2663), 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', (['(self.n_layers * self.channels_interval)'], {'kernel_size': '(15)', 'strides': '(1)', 'padding': '"""SAME"""', 'dilation_rate': '(1)'}), "(self.n_layers * self.channels_interval, kernel_size=\n 15, strides=1, padding='SAME', dilation_rate=1)\n", (2558, 2663), True, 'import tensorflow as tf\n'), ((2788, 2815), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (2806, 2815), False, 'from tensorflow.keras.layers import BatchNormalization, LeakyReLU, Activation, Conv1D, ELU, Add\n'), ((2841, 2855), 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2850, 2855), False, 'from tensorflow.keras.layers import BatchNormalization, LeakyReLU, Activation, Conv1D, ELU, Add\n'), ((3175, 3263), 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', (['(1)'], {'kernel_size': '(1)', 'strides': '(1)', 'padding': '"""SAME"""', 'dilation_rate': '(1)'}), "(1, kernel_size=1, strides=1, padding='SAME',\n dilation_rate=1)\n", (3197, 3263), True, 'import tensorflow as tf\n'), ((3386, 3404), 'tensorflow.keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (3396, 3404), False, 'from tensorflow.keras.layers import BatchNormalization, LeakyReLU, Activation, Conv1D, ELU, Add\n'), ((3898, 3948), 'tensorflow.concat', 'tf.concat', (['[o, tmp[self.n_layers - i - 1]]'], {'axis': '(2)'}), '([o, tmp[self.n_layers - i - 1]], axis=2)\n', (3907, 3948), True, 'import tensorflow as tf\n'), ((1880, 1907), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (1898, 1907), False, 'from tensorflow.keras.layers import BatchNormalization, LeakyReLU, Activation, Conv1D, ELU, Add\n'), ((2076, 2103), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (2094, 2103), False, 'from tensorflow.keras.layers import BatchNormalization, LeakyReLU, Activation, Conv1D, ELU, Add\n'), ((2158, 2163), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (2161, 2163), False, 'from tensorflow.keras.layers import BatchNormalization, LeakyReLU, Activation, Conv1D, ELU, Add\n'), ((3814, 3825), 'tensorflow.shape', 'tf.shape', (['o'], {}), '(o)\n', (3822, 3825), True, 'import tensorflow as tf\n'), ((3830, 3841), 'tensorflow.shape', 'tf.shape', (['o'], {}), '(o)\n', (3838, 3841), True, 'import tensorflow as tf\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib import layers
from tensorflow.contrib import metrics
from tensorflow.contrib import framework
from tensorflow.contrib.learn import MetricSpec
from tensorflow.python.platform import tf_logging as logging
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
import math
from mlengine.digits import test_digits
logging.set_verbosity(logging.INFO)
# This sample shows how to write Tensorflow models using the high-level layers API
# in Tensorflow. Using high-level APIs, you do not have to define placeholders and
# variables yourself. Also, you will not need to write your own training loop by
# using the Estimator interface instead.
#
# WARNING: tensorflow.contrib.learn.* APIs are still experimental and can change in breaking ways
# as they mature. API stability will be ensured when tensorflow.contrib.learn becomes tensorflow.learn
# Download images and labels into mnist.test (10K images+labels) and mnist.train (60K images+labels)
mnist = read_data_sets("data", one_hot=False, reshape=True, validation_size=0)
# In memory training data for this simple case.
# When data is too large to fit in memory, use Tensorflow queues.
def train_data_input_fn():
return tf.train.shuffle_batch([tf.constant(mnist.train.images), tf.constant(mnist.train.labels)],
batch_size=100, capacity=1100, min_after_dequeue=1000, enqueue_many=True)
# Eval data is an in-memory constant here.
def eval_data_input_fn():
return tf.constant(mnist.test.images), tf.constant(mnist.test.labels)
# Test data for a predictions run
def predict_input_fn():
return tf.cast(tf.constant(test_digits), tf.float32)
# Model loss (not needed in INFER mode)
def conv_model_loss(Ylogits, Y_, mode):
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=tf.one_hot(Y_,10))) * 100 \
if mode == learn.ModeKeys.TRAIN or mode == learn.ModeKeys.EVAL else None
# Model optimiser (only needed in TRAIN mode)
def conv_model_train_op(loss, mode):
return layers.optimize_loss(loss, framework.get_global_step(), learning_rate=0.003, optimizer="Adam",
# to remove learning rate decay, comment the next line
learning_rate_decay_fn=lambda lr, step: 0.0001 + tf.train.exponential_decay(lr, step, -2000, math.e)
) if mode == learn.ModeKeys.TRAIN else None
# Model evaluation metric (not needed in INFER mode)
def conv_model_eval_metrics(classes, Y_, mode):
# You can name the fields of your metrics dictionary as you like.
return {'accuracy': metrics.accuracy(classes, Y_)} \
if mode == learn.ModeKeys.TRAIN or mode == learn.ModeKeys.EVAL else None
# Model
def conv_model(X, Y_, mode):
XX = tf.reshape(X, [-1, 28, 28, 1])
biasInit = tf.constant_initializer(0.1, dtype=tf.float32)
Y1 = layers.conv2d(XX, num_outputs=6, kernel_size=[6, 6], biases_initializer=biasInit)
Y2 = layers.conv2d(Y1, num_outputs=12, kernel_size=[5, 5], stride=2, biases_initializer=biasInit)
Y3 = layers.conv2d(Y2, num_outputs=24, kernel_size=[4, 4], stride=2, biases_initializer=biasInit)
Y4 = layers.flatten(Y3)
Y5 = layers.relu(Y4, 200, biases_initializer=biasInit)
# to deactivate dropout on the dense layer, set keep_prob=1
Y5d = layers.dropout(Y5, keep_prob=0.75, noise_shape=None, is_training=mode==learn.ModeKeys.TRAIN)
Ylogits = layers.linear(Y5d, 10)
predict = tf.nn.softmax(Ylogits)
classes = tf.cast(tf.argmax(predict, 1), tf.uint8)
loss = conv_model_loss(Ylogits, Y_, mode)
train_op = conv_model_train_op(loss, mode)
eval_metrics = conv_model_eval_metrics(classes, Y_, mode)
return learn.ModelFnOps(
mode=mode,
# You can name the fields of your predictions dictionary as you like.
predictions={"predictions": predict, "classes": classes},
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metrics
)
# Configuration to save a checkpoint every 1000 steps.
training_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=None, save_checkpoints_steps=1000, gpu_memory_fraction=0.9)
estimator=learn.Estimator(model_fn=conv_model, model_dir="checkpoints", config=training_config)
# Trains for 10000 additional steps saving checkpoints on a regular basis. The next
# training will resume from the checkpoint unless you delete the "checkpoints" folder.
estimator.fit(input_fn=train_data_input_fn, steps=10000)
estimator.evaluate(input_fn=eval_data_input_fn, steps=1)
digits = estimator.predict(input_fn=predict_input_fn)
for digit in digits:
print(str(digit['classes']), str(digit['predictions'])) | [
"tensorflow.contrib.layers.conv2d",
"tensorflow.contrib.layers.flatten",
"tensorflow.contrib.learn.ModelFnOps",
"tensorflow.nn.softmax",
"tensorflow.contrib.layers.linear",
"tensorflow.contrib.learn.RunConfig",
"tensorflow.contrib.metrics.accuracy",
"tensorflow.train.exponential_decay",
"tensorflow.... | [((965, 1000), 'tensorflow.python.platform.tf_logging.set_verbosity', 'logging.set_verbosity', (['logging.INFO'], {}), '(logging.INFO)\n', (986, 1000), True, 'from tensorflow.python.platform import tf_logging as logging\n'), ((1603, 1673), 'tensorflow.contrib.learn.python.learn.datasets.mnist.read_data_sets', 'read_data_sets', (['"""data"""'], {'one_hot': '(False)', 'reshape': '(True)', 'validation_size': '(0)'}), "('data', one_hot=False, reshape=True, validation_size=0)\n", (1617, 1673), False, 'from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets\n'), ((4625, 4737), 'tensorflow.contrib.learn.RunConfig', 'tf.contrib.learn.RunConfig', ([], {'save_checkpoints_secs': 'None', 'save_checkpoints_steps': '(1000)', 'gpu_memory_fraction': '(0.9)'}), '(save_checkpoints_secs=None,\n save_checkpoints_steps=1000, gpu_memory_fraction=0.9)\n', (4651, 4737), True, 'import tensorflow as tf\n'), ((4745, 4835), 'tensorflow.contrib.learn.Estimator', 'learn.Estimator', ([], {'model_fn': 'conv_model', 'model_dir': '"""checkpoints"""', 'config': 'training_config'}), "(model_fn=conv_model, model_dir='checkpoints', config=\n training_config)\n", (4760, 4835), False, 'from tensorflow.contrib import learn\n'), ((3340, 3370), 'tensorflow.reshape', 'tf.reshape', (['X', '[-1, 28, 28, 1]'], {}), '(X, [-1, 28, 28, 1])\n', (3350, 3370), True, 'import tensorflow as tf\n'), ((3386, 3432), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.1)'], {'dtype': 'tf.float32'}), '(0.1, dtype=tf.float32)\n', (3409, 3432), True, 'import tensorflow as tf\n'), ((3442, 3528), 'tensorflow.contrib.layers.conv2d', 'layers.conv2d', (['XX'], {'num_outputs': '(6)', 'kernel_size': '[6, 6]', 'biases_initializer': 'biasInit'}), '(XX, num_outputs=6, kernel_size=[6, 6], biases_initializer=\n biasInit)\n', (3455, 3528), False, 'from tensorflow.contrib import layers\n'), ((3535, 3631), 'tensorflow.contrib.layers.conv2d', 'layers.conv2d', (['Y1'], {'num_outputs': '(12)', 'kernel_size': '[5, 5]', 'stride': '(2)', 'biases_initializer': 'biasInit'}), '(Y1, num_outputs=12, kernel_size=[5, 5], stride=2,\n biases_initializer=biasInit)\n', (3548, 3631), False, 'from tensorflow.contrib import layers\n'), ((3637, 3733), 'tensorflow.contrib.layers.conv2d', 'layers.conv2d', (['Y2'], {'num_outputs': '(24)', 'kernel_size': '[4, 4]', 'stride': '(2)', 'biases_initializer': 'biasInit'}), '(Y2, num_outputs=24, kernel_size=[4, 4], stride=2,\n biases_initializer=biasInit)\n', (3650, 3733), False, 'from tensorflow.contrib import layers\n'), ((3739, 3757), 'tensorflow.contrib.layers.flatten', 'layers.flatten', (['Y3'], {}), '(Y3)\n', (3753, 3757), False, 'from tensorflow.contrib import layers\n'), ((3767, 3816), 'tensorflow.contrib.layers.relu', 'layers.relu', (['Y4', '(200)'], {'biases_initializer': 'biasInit'}), '(Y4, 200, biases_initializer=biasInit)\n', (3778, 3816), False, 'from tensorflow.contrib import layers\n'), ((3891, 3989), 'tensorflow.contrib.layers.dropout', 'layers.dropout', (['Y5'], {'keep_prob': '(0.75)', 'noise_shape': 'None', 'is_training': '(mode == learn.ModeKeys.TRAIN)'}), '(Y5, keep_prob=0.75, noise_shape=None, is_training=mode ==\n learn.ModeKeys.TRAIN)\n', (3905, 3989), False, 'from tensorflow.contrib import layers\n'), ((3998, 4020), 'tensorflow.contrib.layers.linear', 'layers.linear', (['Y5d', '(10)'], {}), '(Y5d, 10)\n', (4011, 4020), False, 'from tensorflow.contrib import layers\n'), ((4035, 4057), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['Ylogits'], {}), '(Ylogits)\n', (4048, 4057), True, 'import tensorflow as tf\n'), ((4281, 4430), 'tensorflow.contrib.learn.ModelFnOps', 'learn.ModelFnOps', ([], {'mode': 'mode', 'predictions': "{'predictions': predict, 'classes': classes}", 'loss': 'loss', 'train_op': 'train_op', 'eval_metric_ops': 'eval_metrics'}), "(mode=mode, predictions={'predictions': predict, 'classes':\n classes}, loss=loss, train_op=train_op, eval_metric_ops=eval_metrics)\n", (4297, 4430), False, 'from tensorflow.contrib import learn\n'), ((2107, 2137), 'tensorflow.constant', 'tf.constant', (['mnist.test.images'], {}), '(mnist.test.images)\n', (2118, 2137), True, 'import tensorflow as tf\n'), ((2139, 2169), 'tensorflow.constant', 'tf.constant', (['mnist.test.labels'], {}), '(mnist.test.labels)\n', (2150, 2169), True, 'import tensorflow as tf\n'), ((2249, 2273), 'tensorflow.constant', 'tf.constant', (['test_digits'], {}), '(test_digits)\n', (2260, 2273), True, 'import tensorflow as tf\n'), ((4080, 4101), 'tensorflow.argmax', 'tf.argmax', (['predict', '(1)'], {}), '(predict, 1)\n', (4089, 4101), True, 'import tensorflow as tf\n'), ((1851, 1882), 'tensorflow.constant', 'tf.constant', (['mnist.train.images'], {}), '(mnist.train.images)\n', (1862, 1882), True, 'import tensorflow as tf\n'), ((1884, 1915), 'tensorflow.constant', 'tf.constant', (['mnist.train.labels'], {}), '(mnist.train.labels)\n', (1895, 1915), True, 'import tensorflow as tf\n'), ((2690, 2717), 'tensorflow.contrib.framework.get_global_step', 'framework.get_global_step', ([], {}), '()\n', (2715, 2717), False, 'from tensorflow.contrib import framework\n'), ((3179, 3208), 'tensorflow.contrib.metrics.accuracy', 'metrics.accuracy', (['classes', 'Y_'], {}), '(classes, Y_)\n', (3195, 3208), False, 'from tensorflow.contrib import metrics\n'), ((2458, 2476), 'tensorflow.one_hot', 'tf.one_hot', (['Y_', '(10)'], {}), '(Y_, 10)\n', (2468, 2476), True, 'import tensorflow as tf\n'), ((2878, 2929), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['lr', 'step', '(-2000)', 'math.e'], {}), '(lr, step, -2000, math.e)\n', (2904, 2929), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
# usage:
# fwhmSweep.py 56530 7 14
# fwhmSweep.py <mjd> <file number first> <file nimber last>
import glob
import pyfits
import sys, os
import numpy as np
from scipy import ndimage
from pylab import *
import scipy
directory="/data/ecam/%s/" % (sys.argv[1])
# if directory exist?
if os.path.exists(directory) != True:
sys.exit("Error: no directory %s " % (directory))
print(directory)
f1=int(sys.argv[2])
f2=int(sys.argv[3])
fwhmArr=[]
fwhmPix=[]
focArr=[]
for i in range(f1,f2):
ff='gimg-%04d' % (i)
fname='%s%s.fits' % (directory,ff)
if os.path.exists(fname):
hdulist=pyfits.open(fname,'readonly')
hdr = hdulist[0].header
imType=hdr['IMAGETYP']
if imType.strip() == 'object':
dat = np.array(hdulist[0].data)
datMax=dat.max() ;
datMin=dat.min();
datHm=datMin+(datMax-datMin)/2.0
cx,cy=ndimage.measurements.center_of_mass(dat>datHm)
ll=np.where(dat > datHm);
nsq=len (ll[0])
fw=2.0*np.sqrt(nsq/3.14); fwhmPix.append(fw)
fw1=fw*0.428; fwhmArr.append(fw1)
if 'FOCUS' in hdr:
foc=hdr['FOCUS']
else: foc=None
focArr.append(foc)
print("%s, centerX=%4i, centerY=%4i, fwhm = %4.2f pix, fwhm = %4.2f arcsec, foc=%s" % (ff, cy, cx, fw, fw1, foc))
else:
print("%s -- %s " % (ff,imType))
hdulist.close()
else:
print("%s -- no file" % (ff))
#plot(focArr, fwhmArr, 'ro')
#xlabel('Focus')
#ylabel('fwhm, arcsec')
#show()
arrayPix = scipy.array(fwhmPix)
minPix=arrayPix.min()-(arrayPix.max()-arrayPix.min())*0.1
maxPix=arrayPix.max()+(arrayPix.max()-arrayPix.min())*0.1
arrayFoc = scipy.array(focArr)
polycoeffs = scipy.polyfit(arrayFoc, arrayPix, 2)
yfit = scipy.polyval(polycoeffs, arrayFoc)
foc=-polycoeffs[1]/(2.0*polycoeffs[0])
print("Focus = ",foc)
from scipy.interpolate import interp1d
xnew = np.linspace(arrayFoc.min(),arrayFoc.max(), 20)
yfitNew = scipy.polyval(polycoeffs, xnew)
f2 =interp1d(xnew, yfitNew, kind='cubic')
ax1 = subplot(111)
title("ecam focus sweep")
ylim([minPix,maxPix])
xlabel('Focus')
ylabel('FWHM, pixels')
ax1.grid(True, color="blue")
plot(xnew, f2(xnew), '--')
plot(focArr, fwhmPix, 'r.', markersize=10)
#ax1.annotate('local min = %s' % foc,xy=(foc, arrayPix.max()), xytext=(foc, 5),)
ax2 = twinx()
plot(focArr, fwhmArr, 'r.')
ylabel('FWHM, arcsec')
ax2.yaxis.tick_right()
ylim([minPix*0.428,maxPix*0.428])
#ax2.grid(True, color="red")
show()
| [
"os.path.exists",
"numpy.sqrt",
"numpy.where",
"scipy.polyfit",
"scipy.array",
"scipy.interpolate.interp1d",
"numpy.array",
"scipy.polyval",
"sys.exit",
"scipy.ndimage.measurements.center_of_mass",
"pyfits.open"
] | [((1653, 1673), 'scipy.array', 'scipy.array', (['fwhmPix'], {}), '(fwhmPix)\n', (1664, 1673), False, 'import scipy\n'), ((1802, 1821), 'scipy.array', 'scipy.array', (['focArr'], {}), '(focArr)\n', (1813, 1821), False, 'import scipy\n'), ((1835, 1871), 'scipy.polyfit', 'scipy.polyfit', (['arrayFoc', 'arrayPix', '(2)'], {}), '(arrayFoc, arrayPix, 2)\n', (1848, 1871), False, 'import scipy\n'), ((1879, 1914), 'scipy.polyval', 'scipy.polyval', (['polycoeffs', 'arrayFoc'], {}), '(polycoeffs, arrayFoc)\n', (1892, 1914), False, 'import scipy\n'), ((2080, 2111), 'scipy.polyval', 'scipy.polyval', (['polycoeffs', 'xnew'], {}), '(polycoeffs, xnew)\n', (2093, 2111), False, 'import scipy\n'), ((2116, 2153), 'scipy.interpolate.interp1d', 'interp1d', (['xnew', 'yfitNew'], {'kind': '"""cubic"""'}), "(xnew, yfitNew, kind='cubic')\n", (2124, 2153), False, 'from scipy.interpolate import interp1d\n'), ((315, 340), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (329, 340), False, 'import sys, os\n'), ((356, 405), 'sys.exit', 'sys.exit', (["('Error: no directory %s ' % directory)"], {}), "('Error: no directory %s ' % directory)\n", (364, 405), False, 'import sys, os\n'), ((597, 618), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (611, 618), False, 'import sys, os\n'), ((636, 666), 'pyfits.open', 'pyfits.open', (['fname', '"""readonly"""'], {}), "(fname, 'readonly')\n", (647, 666), False, 'import pyfits\n'), ((787, 812), 'numpy.array', 'np.array', (['hdulist[0].data'], {}), '(hdulist[0].data)\n', (795, 812), True, 'import numpy as np\n'), ((944, 992), 'scipy.ndimage.measurements.center_of_mass', 'ndimage.measurements.center_of_mass', (['(dat > datHm)'], {}), '(dat > datHm)\n', (979, 992), False, 'from scipy import ndimage\n'), ((1007, 1028), 'numpy.where', 'np.where', (['(dat > datHm)'], {}), '(dat > datHm)\n', (1015, 1028), True, 'import numpy as np\n'), ((1080, 1099), 'numpy.sqrt', 'np.sqrt', (['(nsq / 3.14)'], {}), '(nsq / 3.14)\n', (1087, 1099), True, 'import numpy as np\n')] |
from pytest import fixture
from pytest_mock import MockerFixture
from stopwatch import Stopwatch
from .mocks.time import TimeMock
def describe_stopwatch() -> None:
@fixture
def time_mock() -> TimeMock:
return TimeMock()
def describe_start() -> None:
def with_stop(mocker: MockerFixture, time_mock: TimeMock) -> None:
mocker.patch('time.perf_counter', time_mock.perf_counter)
sw = Stopwatch(print_report=True)
time_mock.increment(1)
sw.stop()
assert sw.elapsed == 1
assert str(sw) == '1.00s'
assert repr(sw) == '<Stopwatch name=None elapsed=1.0>'
assert len(sw.laps) == 1
assert not sw.running
assert sw.name is None
def with_statement(mocker: MockerFixture, time_mock: TimeMock) -> None:
mocker.patch('time.perf_counter', time_mock.perf_counter)
sw = Stopwatch(print_report=True)
time_mock.increment(1)
sw.stop()
assert sw.elapsed == 1
assert str(sw) == '1.00s'
assert repr(sw) == '<Stopwatch name=None elapsed=1.0>'
assert len(sw.laps) == 1
assert not sw.running
assert sw.name is None
def with_name(mocker: MockerFixture, time_mock: TimeMock) -> None:
mocker.patch('time.perf_counter', time_mock.perf_counter)
with Stopwatch(name='lorem') as sw:
time_mock.increment(0.1)
assert sw.name is not None
assert sw.name == 'lorem'
def with_precision(mocker: MockerFixture, time_mock: TimeMock) -> None:
mocker.patch('time.perf_counter', time_mock.perf_counter)
with Stopwatch(precision=5) as sw:
time_mock.increment(0.123456789)
assert str(sw) == '123.45679ms'
sw.precision = 3
assert str(sw) == '123.457ms'
def describe_print_report() -> None:
def calls_print(mocker: MockerFixture, time_mock: TimeMock) -> None:
mocker.patch('time.perf_counter', time_mock.perf_counter)
print_mock = mocker.patch('builtins.print')
with Stopwatch(print_report=True) as sw:
time_mock.increment(0.1)
print_mock.assert_called_once()
print_mock.assert_called_with(sw._format())
def does_not_call_print(
mocker: MockerFixture, time_mock: TimeMock
) -> None:
mocker.patch('time.perf_counter', time_mock.perf_counter)
print_mock = mocker.patch('builtins.print')
with Stopwatch(print_report=False):
time_mock.increment(0.1)
print_mock.assert_not_called()
def with_name(mocker: MockerFixture, time_mock: TimeMock) -> None:
mocker.patch('time.perf_counter', time_mock.perf_counter)
mocker.patch('builtins.print')
with Stopwatch(name='lorem', print_report=True) as sw:
time_mock.increment(0.1)
assert sw._format().endswith('lorem')
sw._print_report = False
assert sw._format() == ''
def reset_laps_and_duration(
mocker: MockerFixture, time_mock: TimeMock
) -> None:
mocker.patch('time.perf_counter', time_mock.perf_counter)
with Stopwatch() as sw:
time_mock.increment(1.0)
assert sw.elapsed == 1.0
assert len(sw.laps) == 1
sw.reset()
assert sw.elapsed == 0.0
assert len(sw.laps) == 0
def add_new_laps(mocker: MockerFixture, time_mock: TimeMock) -> None:
mocker.patch('time.perf_counter', time_mock.perf_counter)
with Stopwatch() as sw:
for i in range(5):
with sw.lap():
time_mock.increment(i)
assert sw.elapsed == 10
assert len(sw.laps) == 5
assert sw.laps == [i for i in range(5)]
def describe_report() -> None:
def without_laps(mocker: MockerFixture, time_mock: TimeMock) -> None:
mocker.patch('time.perf_counter', time_mock.perf_counter)
with Stopwatch('sw1') as sw1:
time_mock.increment(1)
with Stopwatch() as sw2:
time_mock.increment(1)
with Stopwatch(precision=4) as sw3:
time_mock.increment(1)
assert sw1.report() == '[Stopwatch#sw1] total=1.00s'
assert sw2.report() == '[Stopwatch] total=1.00s'
assert sw3.report() == '[Stopwatch] total=1.0000s'
def with_laps(mocker: MockerFixture, time_mock: TimeMock) -> None:
mocker.patch('time.perf_counter', time_mock.perf_counter)
with Stopwatch() as sw:
for i in range(5):
with sw.lap():
time_mock.increment(i)
assert sw.report() == '[Stopwatch] total=10.00s, mean=2.00s, ' + \
'min=0.00s, median=2.00s, max=4.00s, dev=1.41s'
| [
"stopwatch.Stopwatch"
] | [((437, 465), 'stopwatch.Stopwatch', 'Stopwatch', ([], {'print_report': '(True)'}), '(print_report=True)\n', (446, 465), False, 'from stopwatch import Stopwatch\n'), ((937, 965), 'stopwatch.Stopwatch', 'Stopwatch', ([], {'print_report': '(True)'}), '(print_report=True)\n', (946, 965), False, 'from stopwatch import Stopwatch\n'), ((1420, 1443), 'stopwatch.Stopwatch', 'Stopwatch', ([], {'name': '"""lorem"""'}), "(name='lorem')\n", (1429, 1443), False, 'from stopwatch import Stopwatch\n'), ((1713, 1735), 'stopwatch.Stopwatch', 'Stopwatch', ([], {'precision': '(5)'}), '(precision=5)\n', (1722, 1735), False, 'from stopwatch import Stopwatch\n'), ((3297, 3308), 'stopwatch.Stopwatch', 'Stopwatch', ([], {}), '()\n', (3306, 3308), False, 'from stopwatch import Stopwatch\n'), ((3658, 3669), 'stopwatch.Stopwatch', 'Stopwatch', ([], {}), '()\n', (3667, 3669), False, 'from stopwatch import Stopwatch\n'), ((2153, 2181), 'stopwatch.Stopwatch', 'Stopwatch', ([], {'print_report': '(True)'}), '(print_report=True)\n', (2162, 2181), False, 'from stopwatch import Stopwatch\n'), ((2581, 2610), 'stopwatch.Stopwatch', 'Stopwatch', ([], {'print_report': '(False)'}), '(print_report=False)\n', (2590, 2610), False, 'from stopwatch import Stopwatch\n'), ((2902, 2944), 'stopwatch.Stopwatch', 'Stopwatch', ([], {'name': '"""lorem"""', 'print_report': '(True)'}), "(name='lorem', print_report=True)\n", (2911, 2944), False, 'from stopwatch import Stopwatch\n'), ((4096, 4112), 'stopwatch.Stopwatch', 'Stopwatch', (['"""sw1"""'], {}), "('sw1')\n", (4105, 4112), False, 'from stopwatch import Stopwatch\n'), ((4177, 4188), 'stopwatch.Stopwatch', 'Stopwatch', ([], {}), '()\n', (4186, 4188), False, 'from stopwatch import Stopwatch\n'), ((4253, 4275), 'stopwatch.Stopwatch', 'Stopwatch', ([], {'precision': '(4)'}), '(precision=4)\n', (4262, 4275), False, 'from stopwatch import Stopwatch\n'), ((4675, 4686), 'stopwatch.Stopwatch', 'Stopwatch', ([], {}), '()\n', (4684, 4686), False, 'from stopwatch import Stopwatch\n')] |
# -*- coding: utf-8 -*-
__author__ = "MJ (<EMAIL>)"
__license__ = "Apache 2.0"
# scip plugin
from ribbon.client.config.client_config import ClientConfig
from ribbon.eureka.discovery_enabled_server import DiscoveryEnabledServer
from ribbon.loadbalancer.dynamic_server_list_load_balancer import DynamicServerListLoadBalancer
from tests.eureka.client.discovery.shared.stubs import instance_info
class FakeEurekaClient:
def get_instances_by_virtual_host_name(self, a=None, b=None):
info1 = instance_info(port=100)
info2 = instance_info(port=200)
info3 = instance_info(port=300)
return [info1, info2, info3]
def test_init_without_given_any_params():
lb = DynamicServerListLoadBalancer()
assert lb.name == "LoadBalancer"
assert lb.rule.loadbalancer is None
assert lb.servers == []
assert lb.counter == 0
def test_init_with_given_config():
config = ClientConfig()
config.load_default_values()
config.add_property("ClientName", "MJ_is_awesome")
config.add_property("NFLoadBalancerPingInterval", 100)
config.add_property("NFLoadBalancerMaxTotalPingTime", 200)
lb = DynamicServerListLoadBalancer(config=config)
assert lb.name == "MJ_is_awesome"
assert lb._ping_interval_time_in_sec == 100
assert lb._max_total_ping_time_in_sec == 200
def test_choose_a_server_with_given_three_alive_server():
lb = DynamicServerListLoadBalancer()
lb.server_list.vip_addresses = "127.0.0.1" # if server_list's vip_addresses is empty we won't get any server
lb.server_list.eureka_client = FakeEurekaClient()
lb.update_list_of_servers()
choices = set()
choices.add(lb.choose_server("uselessKey").port)
choices.add(lb.choose_server("uselessKey").port)
choices.add(lb.choose_server("uselessKey").port)
assert len(lb.get_reachable_servers()) == 3
assert 100 in choices
assert 200 in choices
assert 300 in choices
def test_choose_a_server_with_given_two_alive_and_one_not_alive_server():
lb = DynamicServerListLoadBalancer()
lb.server_list.vip_addresses = "127.0.0.1" # if server_list's vip_addresses is empty we won't get any server
lb.server_list.eureka_client = FakeEurekaClient()
lb.update_list_of_servers()
lb.servers[2].is_alive = False
assert len(lb.get_reachable_servers()) == 2
| [
"ribbon.loadbalancer.dynamic_server_list_load_balancer.DynamicServerListLoadBalancer",
"ribbon.client.config.client_config.ClientConfig",
"tests.eureka.client.discovery.shared.stubs.instance_info"
] | [((697, 728), 'ribbon.loadbalancer.dynamic_server_list_load_balancer.DynamicServerListLoadBalancer', 'DynamicServerListLoadBalancer', ([], {}), '()\n', (726, 728), False, 'from ribbon.loadbalancer.dynamic_server_list_load_balancer import DynamicServerListLoadBalancer\n'), ((912, 926), 'ribbon.client.config.client_config.ClientConfig', 'ClientConfig', ([], {}), '()\n', (924, 926), False, 'from ribbon.client.config.client_config import ClientConfig\n'), ((1147, 1191), 'ribbon.loadbalancer.dynamic_server_list_load_balancer.DynamicServerListLoadBalancer', 'DynamicServerListLoadBalancer', ([], {'config': 'config'}), '(config=config)\n', (1176, 1191), False, 'from ribbon.loadbalancer.dynamic_server_list_load_balancer import DynamicServerListLoadBalancer\n'), ((1397, 1428), 'ribbon.loadbalancer.dynamic_server_list_load_balancer.DynamicServerListLoadBalancer', 'DynamicServerListLoadBalancer', ([], {}), '()\n', (1426, 1428), False, 'from ribbon.loadbalancer.dynamic_server_list_load_balancer import DynamicServerListLoadBalancer\n'), ((2021, 2052), 'ribbon.loadbalancer.dynamic_server_list_load_balancer.DynamicServerListLoadBalancer', 'DynamicServerListLoadBalancer', ([], {}), '()\n', (2050, 2052), False, 'from ribbon.loadbalancer.dynamic_server_list_load_balancer import DynamicServerListLoadBalancer\n'), ((502, 525), 'tests.eureka.client.discovery.shared.stubs.instance_info', 'instance_info', ([], {'port': '(100)'}), '(port=100)\n', (515, 525), False, 'from tests.eureka.client.discovery.shared.stubs import instance_info\n'), ((542, 565), 'tests.eureka.client.discovery.shared.stubs.instance_info', 'instance_info', ([], {'port': '(200)'}), '(port=200)\n', (555, 565), False, 'from tests.eureka.client.discovery.shared.stubs import instance_info\n'), ((582, 605), 'tests.eureka.client.discovery.shared.stubs.instance_info', 'instance_info', ([], {'port': '(300)'}), '(port=300)\n', (595, 605), False, 'from tests.eureka.client.discovery.shared.stubs import instance_info\n')] |
# Copyright (c) 2014-2017 esotericnonsense (<NAME>)
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://opensource.org/licenses/mit-license.php
from macros import MODES
class ModeHandler(object):
def __init__(self, base_callbacks):
self._mode = None
self._callbacks = {} # mode -> callback, one per mode.
self._base_callbacks = base_callbacks
self._keypress_handlers = {} # mode -> keypress handler.
def add_callback(self, key, callback):
self._callbacks[key] = callback
def add_keypress_handler(self, key, handler):
self._keypress_handlers[key] = handler
async def _call_callbacks(self, oldmode, newmode):
# Tell the old mode that it's no longer active
try:
cb1 = self._callbacks[oldmode]
except KeyError:
cb1 = None
if cb1 is not None:
await cb1(newmode)
# Tell the new mode that it's now active
try:
cb2 = self._callbacks[newmode]
except KeyError:
cb2 = None
if cb2 is not None:
await cb2(newmode)
# Base callbacks (FooterView, HeaderView)
for bcb in self._base_callbacks:
await bcb(newmode)
async def set_mode(self, newmode):
if self._mode == newmode:
return
await self._call_callbacks(self._mode, newmode)
self._mode = newmode
async def _seek_mode(self, seek):
if self._mode is None:
# Can't seek if no mode
return
idx = MODES.index(self._mode)
idx = (idx + seek) % len(MODES)
newmode = MODES[idx]
await self.set_mode(newmode)
async def handle_keypress(self, key):
# See if the current mode can handle it.
if self._mode is None:
return key
handler = None
try:
handler = self._keypress_handlers[self._mode]
except KeyError:
pass
if handler:
key = await handler(key)
if key is None:
return key
# See if it's related to switching modes.
if key == "KEY_LEFT":
await self._seek_mode(-1)
return None
if key == "KEY_RIGHT":
await self._seek_mode(1)
return None
if len(key) == 1:
for mode in MODES:
if mode[0] == key.lower():
await self.set_mode(mode)
return None
return key # Either none by this point, or still there.
| [
"macros.MODES.index"
] | [((1597, 1620), 'macros.MODES.index', 'MODES.index', (['self._mode'], {}), '(self._mode)\n', (1608, 1620), False, 'from macros import MODES\n')] |
import argparse
import os
import numpy as np
from torchdistill.datasets.transform import CustomCompose, CustomRandomResize
from torchdistill.datasets.util import load_coco_dataset, build_transform
from torchvision.datasets import ImageFolder, VOCSegmentation
from torchvision.transforms import transforms
from custom.transform import BPG
def get_argparser():
parser = argparse.ArgumentParser(description='BPG file size for ImageNet and COCO segmentation datasets')
parser.add_argument('--dataset', required=True, choices=['imagenet', 'coco_segment', 'pascal_segment'],
help='ckpt dir path')
return parser
def compute_bpg_file_size_with_transform(dataset, quality):
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224)
])
bpg_codec = BPG(bpg_quality=quality, encoder_path='~/manually_installed/libbpg-0.9.8/bpgenc',
decoder_path='~/manually_installed/libbpg-0.9.8/bpgdec')
file_size_list = list()
for img in dataset:
img = transform(img[0])
img, file_size_kbyte = bpg_codec.run(img)
file_size_list.append(file_size_kbyte)
file_sizes = np.array(file_size_list)
print('BPG quality: {}, File size [KB]: {} ± {}'.format(quality, file_sizes.mean(), file_sizes.std()))
def compute_bpg_file_size_for_imagenet_dataset():
dataset = ImageFolder(root=os.path.expanduser('~/dataset/ilsvrc2012/val'))
compute_bpg_file_size_with_transform(dataset, 50)
compute_bpg_file_size_with_transform(dataset, 45)
compute_bpg_file_size_with_transform(dataset, 40)
compute_bpg_file_size_with_transform(dataset, 35)
compute_bpg_file_size_with_transform(dataset, 30)
compute_bpg_file_size_with_transform(dataset, 25)
compute_bpg_file_size_with_transform(dataset, 20)
compute_bpg_file_size_with_transform(dataset, 15)
compute_bpg_file_size_with_transform(dataset, 10)
compute_bpg_file_size_with_transform(dataset, 5)
compute_bpg_file_size_with_transform(dataset, 0)
def compute_bpg_file_size(dataset, quality):
file_size_list = list()
bpg_codec = BPG(bpg_quality=quality, encoder_path='~/manually_installed/libbpg-0.9.8/bpgenc',
decoder_path='~/manually_installed/libbpg-0.9.8/bpgdec')
for img in dataset:
img = img[0]
img, file_size_kbyte = bpg_codec.run(img)
file_size_list.append(file_size_kbyte)
file_sizes = np.array(file_size_list)
print('BPG quality: {}, File size [KB]: {} ± {}'.format(quality, file_sizes.mean(), file_sizes.std()))
def compute_bpg_file_size_for_cocosegment_dataset():
split_config = {
'images': '~/dataset/coco2017/val2017',
'annotations': '~/dataset/coco2017/annotations/instances_val2017.json',
'annotated_only': False,
'is_segment': True,
'transforms_params': [
{'type': 'CustomRandomResize', 'params': {'min_size': 520, 'max_size': 520}}
]
}
is_segment = split_config.get('is_segment', False)
compose_cls = CustomCompose if is_segment else None
transforms = build_transform(split_config.get('transforms_params', None), compose_cls=compose_cls)
dataset = load_coco_dataset(split_config['images'], split_config['annotations'],
split_config['annotated_only'], split_config.get('random_horizontal_flip', None),
is_segment, transforms, split_config.get('bpg_quality', None))
compute_bpg_file_size(dataset, 50)
compute_bpg_file_size(dataset, 45)
compute_bpg_file_size(dataset, 40)
compute_bpg_file_size(dataset, 35)
compute_bpg_file_size(dataset, 30)
compute_bpg_file_size(dataset, 25)
compute_bpg_file_size(dataset, 20)
compute_bpg_file_size(dataset, 15)
compute_bpg_file_size(dataset, 10)
compute_bpg_file_size(dataset, 5)
compute_bpg_file_size(dataset, 0)
def compute_bpg_file_size_with_transform_and_target(dataset, transform, quality):
bpg_codec = BPG(bpg_quality=quality, encoder_path='~/manually_installed/libbpg-0.9.8/bpgenc',
decoder_path='~/manually_installed/libbpg-0.9.8/bpgdec')
file_size_list = list()
for img in dataset:
img, _ = transform(img[0], img[1])
img, file_size_kbyte = bpg_codec.run(img)
file_size_list.append(file_size_kbyte)
file_sizes = np.array(file_size_list)
print('bpg quality: {}, File size [KB]: {} ± {}'.format(quality, file_sizes.mean(), file_sizes.std()))
def compute_bpg_file_size_for_pascalsegment_dataset():
dataset = VOCSegmentation(root=os.path.expanduser('~/dataset/'), image_set='val', year='2012')
transform = CustomCompose([
CustomRandomResize(min_size=512, max_size=512)
])
compute_bpg_file_size_with_transform_and_target(dataset, transform, 50)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 45)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 40)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 35)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 30)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 25)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 20)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 15)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 10)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 5)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 0)
if __name__ == '__main__':
argparser = get_argparser()
args = argparser.parse_args()
if args.dataset == 'imagenet':
compute_bpg_file_size_for_imagenet_dataset()
elif args.dataset == 'coco_segment':
compute_bpg_file_size_for_cocosegment_dataset()
else:
compute_bpg_file_size_for_pascalsegment_dataset()
| [
"torchvision.transforms.transforms.CenterCrop",
"argparse.ArgumentParser",
"torchdistill.datasets.transform.CustomRandomResize",
"numpy.array",
"custom.transform.BPG",
"torchvision.transforms.transforms.Resize",
"os.path.expanduser"
] | [((376, 477), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""BPG file size for ImageNet and COCO segmentation datasets"""'}), "(description=\n 'BPG file size for ImageNet and COCO segmentation datasets')\n", (399, 477), False, 'import argparse\n'), ((834, 982), 'custom.transform.BPG', 'BPG', ([], {'bpg_quality': 'quality', 'encoder_path': '"""~/manually_installed/libbpg-0.9.8/bpgenc"""', 'decoder_path': '"""~/manually_installed/libbpg-0.9.8/bpgdec"""'}), "(bpg_quality=quality, encoder_path=\n '~/manually_installed/libbpg-0.9.8/bpgenc', decoder_path=\n '~/manually_installed/libbpg-0.9.8/bpgdec')\n", (837, 982), False, 'from custom.transform import BPG\n'), ((1191, 1215), 'numpy.array', 'np.array', (['file_size_list'], {}), '(file_size_list)\n', (1199, 1215), True, 'import numpy as np\n'), ((2137, 2285), 'custom.transform.BPG', 'BPG', ([], {'bpg_quality': 'quality', 'encoder_path': '"""~/manually_installed/libbpg-0.9.8/bpgenc"""', 'decoder_path': '"""~/manually_installed/libbpg-0.9.8/bpgdec"""'}), "(bpg_quality=quality, encoder_path=\n '~/manually_installed/libbpg-0.9.8/bpgenc', decoder_path=\n '~/manually_installed/libbpg-0.9.8/bpgdec')\n", (2140, 2285), False, 'from custom.transform import BPG\n'), ((2455, 2479), 'numpy.array', 'np.array', (['file_size_list'], {}), '(file_size_list)\n', (2463, 2479), True, 'import numpy as np\n'), ((4024, 4172), 'custom.transform.BPG', 'BPG', ([], {'bpg_quality': 'quality', 'encoder_path': '"""~/manually_installed/libbpg-0.9.8/bpgenc"""', 'decoder_path': '"""~/manually_installed/libbpg-0.9.8/bpgdec"""'}), "(bpg_quality=quality, encoder_path=\n '~/manually_installed/libbpg-0.9.8/bpgenc', decoder_path=\n '~/manually_installed/libbpg-0.9.8/bpgdec')\n", (4027, 4172), False, 'from custom.transform import BPG\n'), ((4392, 4416), 'numpy.array', 'np.array', (['file_size_list'], {}), '(file_size_list)\n', (4400, 4416), True, 'import numpy as np\n'), ((752, 774), 'torchvision.transforms.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (769, 774), False, 'from torchvision.transforms import transforms\n'), ((784, 810), 'torchvision.transforms.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (805, 810), False, 'from torchvision.transforms import transforms\n'), ((1406, 1452), 'os.path.expanduser', 'os.path.expanduser', (['"""~/dataset/ilsvrc2012/val"""'], {}), "('~/dataset/ilsvrc2012/val')\n", (1424, 1452), False, 'import os\n'), ((4616, 4648), 'os.path.expanduser', 'os.path.expanduser', (['"""~/dataset/"""'], {}), "('~/dataset/')\n", (4634, 4648), False, 'import os\n'), ((4720, 4766), 'torchdistill.datasets.transform.CustomRandomResize', 'CustomRandomResize', ([], {'min_size': '(512)', 'max_size': '(512)'}), '(min_size=512, max_size=512)\n', (4738, 4766), False, 'from torchdistill.datasets.transform import CustomCompose, CustomRandomResize\n')] |
import os
filt_path = os.path.abspath(__file__)
father_path = os.path.abspath(os.path.dirname(filt_path) + os.path.sep + ".")
GPU_ID = 0
# psenet相关
pse_long_size = 960 # 图片长边
pse_model_type = "mobilenetv2"
pse_scale = 1
if pse_model_type == "mobilenetv2":
pse_model_path = os.path.join(father_path, "models/psenet_lite_mbv2.pth")
# crnn相关
nh = 256
crnn_type = "lite_lstm"
crnn_vertical_model_path = os.path.join(father_path, "models/crnn_dw_lstm_vertical.pth")
if crnn_type == "lite_lstm":
LSTMFLAG = True
crnn_model_path = os.path.join(father_path, "models/crnn_lite_lstm_dw_v2.pth")
elif crnn_type == "lite_dense":
LSTMFLAG = False
crnn_model_path = os.path.join(father_path, "models/crnn_lite_dense_dw.pth")
elif crnn_type == "full_lstm":
LSTMFLAG = True
crnn_model_path = os.path.join(father_path, "models/ocr-lstm.pth")
elif crnn_type == "full_dense":
LSTMFLAG = False
crnn_model_path = os.path.join(father_path, "models/ocr-dense.pth")
# crnn_model_path = os.path.join(father_path,"models/ocr-lstm.pth")
# from crnn.keys import alphabet
from backend.chineseocr_lite.crnn.keys import alphabetChinese as alphabet
# angle_class相关
lable_map_dict = {0: "hengdao", 1: "hengzhen", 2: "shudao", 3: "shuzhen"} # hengdao: 文本行横向倒立 其他类似
rotae_map_dict = {"hengdao": 180, "hengzhen": 0, "shudao": 180, "shuzhen": 0} # 文本行需要旋转的角度
angle_type = "shufflenetv2_05"
# angle_type = "resnet18"
angle_model_path = os.path.join(father_path, "models/{}.pth".format(angle_type))
TIMEOUT = 30
version = 'api/v1'
| [
"os.path.abspath",
"os.path.dirname",
"os.path.join"
] | [((23, 48), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (38, 48), False, 'import os\n'), ((410, 471), 'os.path.join', 'os.path.join', (['father_path', '"""models/crnn_dw_lstm_vertical.pth"""'], {}), "(father_path, 'models/crnn_dw_lstm_vertical.pth')\n", (422, 471), False, 'import os\n'), ((282, 338), 'os.path.join', 'os.path.join', (['father_path', '"""models/psenet_lite_mbv2.pth"""'], {}), "(father_path, 'models/psenet_lite_mbv2.pth')\n", (294, 338), False, 'import os\n'), ((544, 604), 'os.path.join', 'os.path.join', (['father_path', '"""models/crnn_lite_lstm_dw_v2.pth"""'], {}), "(father_path, 'models/crnn_lite_lstm_dw_v2.pth')\n", (556, 604), False, 'import os\n'), ((680, 738), 'os.path.join', 'os.path.join', (['father_path', '"""models/crnn_lite_dense_dw.pth"""'], {}), "(father_path, 'models/crnn_lite_dense_dw.pth')\n", (692, 738), False, 'import os\n'), ((79, 105), 'os.path.dirname', 'os.path.dirname', (['filt_path'], {}), '(filt_path)\n', (94, 105), False, 'import os\n'), ((812, 860), 'os.path.join', 'os.path.join', (['father_path', '"""models/ocr-lstm.pth"""'], {}), "(father_path, 'models/ocr-lstm.pth')\n", (824, 860), False, 'import os\n'), ((936, 985), 'os.path.join', 'os.path.join', (['father_path', '"""models/ocr-dense.pth"""'], {}), "(father_path, 'models/ocr-dense.pth')\n", (948, 985), False, 'import os\n')] |
import os
from . import db
from .form import User
from .const import SCP_DIR, DISK_DIR
def delete_admin():
"""
删除管理员账号
:return:
"""
users = User.query.all()
for u in users:
db.session.delete(u)
db.session.commit()
def get_disk_main_dir():
"""
获取网盘主目录
:return:
"""
if os.path.isabs(DISK_DIR):
download_dir = DISK_DIR
else:
download_dir = os.path.join(SCP_DIR, DISK_DIR)
return download_dir
def get_dirs_files(path):
"""
获取指定目录的路径
:param path:
:return:
"""
files_found = os.listdir(path)
files_found.sort()
dirs = list()
files = list()
for f in files_found:
f_abs = os.path.join(path, f)
if os.path.isdir(f_abs):
dirs.append(f)
elif os.path.isfile(f_abs):
files.append(f)
return dirs, files
| [
"os.listdir",
"os.path.isabs",
"os.path.join",
"os.path.isfile",
"os.path.isdir"
] | [((328, 351), 'os.path.isabs', 'os.path.isabs', (['DISK_DIR'], {}), '(DISK_DIR)\n', (341, 351), False, 'import os\n'), ((580, 596), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (590, 596), False, 'import os\n'), ((418, 449), 'os.path.join', 'os.path.join', (['SCP_DIR', 'DISK_DIR'], {}), '(SCP_DIR, DISK_DIR)\n', (430, 449), False, 'import os\n'), ((699, 720), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (711, 720), False, 'import os\n'), ((732, 752), 'os.path.isdir', 'os.path.isdir', (['f_abs'], {}), '(f_abs)\n', (745, 752), False, 'import os\n'), ((794, 815), 'os.path.isfile', 'os.path.isfile', (['f_abs'], {}), '(f_abs)\n', (808, 815), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as pl
import os
from ipdb import set_trace as stop
os.environ["KERAS_BACKEND"] = "tensorflow"
from keras.optimizers import Adam
from keras.layers import Dense, LSTM, Input, TimeDistributed, Flatten
from keras.models import Model
import tensorflow as tf
import keras.backend.tensorflow_backend as ktf
from keras.utils import plot_model
class deep_lstm(object):
def __init__(self):
# Only allocate needed memory
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
session = tf.Session(config=config)
ktf.set_session(session)
self.batch_size = 16
self.x_train = []
self.y_train = []
for i in range(1000):
n = np.random.randint(3, high=10)
x_train = np.zeros((self.batch_size, n, 2, 1))
x_train[:,:,:,0] = np.random.rand(self.batch_size, n, 2)
a = np.random.rand(self.batch_size)
y_train = a[:,None,None,None] * x_train
self.x_train.append(y_train)
self.y_train.append(a)
self.max = np.max(np.array(self.y_train))
self.min = np.min(np.array(self.y_train))
for i in range(1000):
self.x_train[i] = (self.x_train[i] - self.min) / (self.max - self.min)
def define_network(self):
st = Input(shape=(None, 2, 1), name='input')
x = TimeDistributed(Flatten(), name='flatten')(st)
x = LSTM(64)(x)
output_alpha = Dense(1, name='alpha')(x)
self.model = Model(inputs=st, outputs=output_alpha)
plot_model(self.model, to_file='lstm_model.png', show_shapes=True)
def training_generator(self):
while 1:
for i in range(1000):
yield self.x_train[i].astype('float32'), self.y_train[i].astype('float32')
def compile_network(self):
self.model.compile(loss='mse', optimizer=Adam(lr=1e-3))
def train(self, n_iterations):
print("Training network...")
self.metrics = self.model.fit_generator(self.training_generator(), 1000, epochs=n_iterations)
def test(self):
n = np.array([3,5,7,10])
out_syn = np.zeros((4,16))
out_nn = np.zeros((4,16))
for i in range(4):
x_train = np.zeros((self.batch_size, n[i], 2, 1))
x_train[:,:,:,0] = np.random.rand(self.batch_size, n[i], 2)
a = np.random.rand(self.batch_size)
y_train = a[:,None,None,None] * x_train
y_train = (y_train - self.min) / (self.max - self.min)
pred = self.model.predict(y_train.astype('float32'), batch_size=16)
out_syn[i,:] = a
out_nn[i,:] = pred.flatten()
f, ax = pl.subplots(nrows=2, ncols=2)
ax = ax.flatten()
for i in range(4):
ax[i].plot(out_syn[i,:], out_nn[i,:], '.')
ax[i].plot([0,1], [0,1])
pl.show()
return out_nn, out_syn
if (__name__ == '__main__'):
out = deep_lstm()
out.define_network()
out.compile_network()
out.train(2)
nn, syn = out.test() | [
"keras.optimizers.Adam",
"keras.backend.tensorflow_backend.set_session",
"numpy.random.rand",
"keras.layers.Flatten",
"tensorflow.Session",
"keras.utils.plot_model",
"numpy.array",
"keras.layers.Input",
"numpy.zeros",
"numpy.random.randint",
"keras.models.Model",
"keras.layers.LSTM",
"keras.... | [((481, 497), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (495, 497), True, 'import tensorflow as tf\n'), ((561, 586), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (571, 586), True, 'import tensorflow as tf\n'), ((595, 619), 'keras.backend.tensorflow_backend.set_session', 'ktf.set_session', (['session'], {}), '(session)\n', (610, 619), True, 'import keras.backend.tensorflow_backend as ktf\n'), ((1355, 1394), 'keras.layers.Input', 'Input', ([], {'shape': '(None, 2, 1)', 'name': '"""input"""'}), "(shape=(None, 2, 1), name='input')\n", (1360, 1394), False, 'from keras.layers import Dense, LSTM, Input, TimeDistributed, Flatten\n'), ((1558, 1596), 'keras.models.Model', 'Model', ([], {'inputs': 'st', 'outputs': 'output_alpha'}), '(inputs=st, outputs=output_alpha)\n', (1563, 1596), False, 'from keras.models import Model\n'), ((1606, 1672), 'keras.utils.plot_model', 'plot_model', (['self.model'], {'to_file': '"""lstm_model.png"""', 'show_shapes': '(True)'}), "(self.model, to_file='lstm_model.png', show_shapes=True)\n", (1616, 1672), False, 'from keras.utils import plot_model\n'), ((2218, 2241), 'numpy.array', 'np.array', (['[3, 5, 7, 10]'], {}), '([3, 5, 7, 10])\n', (2226, 2241), True, 'import numpy as np\n'), ((2257, 2274), 'numpy.zeros', 'np.zeros', (['(4, 16)'], {}), '((4, 16))\n', (2265, 2274), True, 'import numpy as np\n'), ((2291, 2308), 'numpy.zeros', 'np.zeros', (['(4, 16)'], {}), '((4, 16))\n', (2299, 2308), True, 'import numpy as np\n'), ((2819, 2848), 'matplotlib.pyplot.subplots', 'pl.subplots', ([], {'nrows': '(2)', 'ncols': '(2)'}), '(nrows=2, ncols=2)\n', (2830, 2848), True, 'import matplotlib.pyplot as pl\n'), ((3003, 3012), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (3010, 3012), True, 'import matplotlib.pyplot as pl\n'), ((749, 778), 'numpy.random.randint', 'np.random.randint', (['(3)'], {'high': '(10)'}), '(3, high=10)\n', (766, 778), True, 'import numpy as np\n'), ((801, 837), 'numpy.zeros', 'np.zeros', (['(self.batch_size, n, 2, 1)'], {}), '((self.batch_size, n, 2, 1))\n', (809, 837), True, 'import numpy as np\n'), ((869, 906), 'numpy.random.rand', 'np.random.rand', (['self.batch_size', 'n', '(2)'], {}), '(self.batch_size, n, 2)\n', (883, 906), True, 'import numpy as np\n'), ((935, 966), 'numpy.random.rand', 'np.random.rand', (['self.batch_size'], {}), '(self.batch_size)\n', (949, 966), True, 'import numpy as np\n'), ((1122, 1144), 'numpy.array', 'np.array', (['self.y_train'], {}), '(self.y_train)\n', (1130, 1144), True, 'import numpy as np\n'), ((1172, 1194), 'numpy.array', 'np.array', (['self.y_train'], {}), '(self.y_train)\n', (1180, 1194), True, 'import numpy as np\n'), ((1467, 1475), 'keras.layers.LSTM', 'LSTM', (['(64)'], {}), '(64)\n', (1471, 1475), False, 'from keras.layers import Dense, LSTM, Input, TimeDistributed, Flatten\n'), ((1510, 1532), 'keras.layers.Dense', 'Dense', (['(1)'], {'name': '"""alpha"""'}), "(1, name='alpha')\n", (1515, 1532), False, 'from keras.layers import Dense, LSTM, Input, TimeDistributed, Flatten\n'), ((2370, 2409), 'numpy.zeros', 'np.zeros', (['(self.batch_size, n[i], 2, 1)'], {}), '((self.batch_size, n[i], 2, 1))\n', (2378, 2409), True, 'import numpy as np\n'), ((2441, 2481), 'numpy.random.rand', 'np.random.rand', (['self.batch_size', 'n[i]', '(2)'], {}), '(self.batch_size, n[i], 2)\n', (2455, 2481), True, 'import numpy as np\n'), ((2498, 2529), 'numpy.random.rand', 'np.random.rand', (['self.batch_size'], {}), '(self.batch_size)\n', (2512, 2529), True, 'import numpy as np\n'), ((1423, 1432), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1430, 1432), False, 'from keras.layers import Dense, LSTM, Input, TimeDistributed, Flatten\n'), ((1976, 1990), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (1980, 1990), False, 'from keras.optimizers import Adam\n')] |
"""
Utility functions for Electron-Microscopy
"""
import math
def voltage_to_wavelength(voltage):
"""
Convert from electron voltage to wavelength.
:param voltage: float, The electron voltage in kV.
:return: float, The electron wavelength in nm.
"""
return 12.2643247 / math.sqrt(voltage*1e3 + 0.978466*voltage**2)
def wavelength_to_voltage(wavelength):
"""
Convert from electron voltage to wavelength.
:param wavelength: float, The electron wavelength in nm.
:return: float, The electron voltage in kV.
"""
return (-1e3 + math.sqrt(1e6 + 4 * 12.2643247**2 * 0.978466 / wavelength**2)) / (2 * 0.978466)
| [
"math.sqrt"
] | [((296, 349), 'math.sqrt', 'math.sqrt', (['(voltage * 1000.0 + 0.978466 * voltage ** 2)'], {}), '(voltage * 1000.0 + 0.978466 * voltage ** 2)\n', (305, 349), False, 'import math\n'), ((575, 646), 'math.sqrt', 'math.sqrt', (['(1000000.0 + 4 * 12.2643247 ** 2 * 0.978466 / wavelength ** 2)'], {}), '(1000000.0 + 4 * 12.2643247 ** 2 * 0.978466 / wavelength ** 2)\n', (584, 646), False, 'import math\n')] |
#!/usr/bin/env python3
import time
import re
import glob
import os
import platform
import sys
import unicodedata
import urllib.parse
import codecs
import queue
from multiprocessing import Process, Event, Queue
from collections import Counter
baseDir = os.path.dirname(__file__)
sourceDir = os.path.join(baseDir, 'backup')
blacklist = (
'HilfeZurCreoleSyntax.txt',
)
class AnomalyFormatter:
"""
Formats found anomalies and buffers the resuklting text.
The text buffer is returned and erased by getText().
Also counts found anomalies.
"""
def __init__(self, textEscaper, textDecorator, maxPartLength=70):
self._buffer = []
self._escaper = textEscaper
self._decorator = textDecorator
self.maxPartLength = maxPartLength
self.qoute = '"'
self.ellipsis = '…'
self.sol = '|'
self.eol = '|'
self.minAfterLength = 20
self.counts = Counter()
self._lastPath = ''
self._lastLineNr = 0
def out(self, path, lineNr, startColumn, endColumn, line, anomaly):
b = self._buffer
d = self._decorator
q = self.qoute
if self._lastPath != path:
self._lastPath = path
self._lastLineNr = 0
self.counts['pathCount'] += 1
ePath = d.decorateText(self._escaper.escape(path), d.textBCyan)
pageName = os.path.basename(path).replace(' - ', '/')
if pageName[-4:] == '.txt':
pageName = pageName[0:-4]
url = 'https://larpwiki.de/' + urllib.parse.quote(pageName)
eUrl = d.decorateText(url, d.textWhite)
b.extend(('\n', ePath, ':\n'))
b.extend((' ', eUrl, '\n'))
if self._lastLineNr != lineNr:
if self._lastLineNr != lineNr:
self.counts['lineCount'] += 1
self._lastLineNr = lineNr
eLineNr = d.decorateText(str(lineNr + 1), d.textBYellow)
b.extend((' Line ', eLineNr, ':\n'))
self.counts['anomalyCount'] += 1
self.counts[anomaly] += 1
eColumn = d.decorateText(str(startColumn + 1), d.textBYellow)
ml = self.maxPartLength
# Extract as much of the anomaly as allowed and selected:
t = self._escaper.escapeLimitRight(line[startColumn:endColumn], ml)
part = t[0]
partCpLength = t[1]
partComplete = ((endColumn - startColumn - partCpLength) == 0)
ml = max(0, ml - len(part))
# Extract leading text but reserve some quota for trailing:
if partComplete:
mal = min(len(line) - endColumn, int(ml / 2), self.minAfterLength)
else:
mal = 0
bLength = min(startColumn, ml - mal)
t = self._escaper.escapeLimitLeft(line[:startColumn], bLength)
before = t[0]
beforeCpLength = t[1]
ml = max(0, ml - len(before))
# Extract as much of trailing text as available and quota left:
if partComplete:
t = self._escaper.escapeLimitRight(line[endColumn:], ml)
after = t[0]
afterCpLength = t[1]
else:
after = ''
afterCpLength = 0
if startColumn - beforeCpLength > 0:
sol = self.ellipsis
else:
sol = self.sol
if (startColumn + partCpLength + afterCpLength) < len(line):
eol = self.ellipsis
else:
eol = self.eol
before = d.decorateText(before, d.textYellow)
part = d.decorateText(part, d.textBYellow, d.textUnderline)
after = d.decorateText(after, d.textYellow)
b.extend((' Column ', eColumn, ', anomaly ', q, anomaly, q, ':\n'))
b.extend((' ', sol, q, before, part, after, q, eol, '\n'))
def getText(self):
text = ''.join(self._buffer)
self._buffer = []
return text
def getCounts(self):
counts = self.counts
self.counts = Counter()
return counts
class AnsiTextDecorator:
"""
Colorizes output for ANSI terminals
"""
textBlack = '30'
textRed = '31'
textGreen = '32'
textYellow = '33'
textBlue = '34'
textMagenta = '35'
textCyan = '36'
textGrey = '37'
textBGrey = '30;1'
textBRed = '31;1'
textBGreen = '32;1'
textBYellow = '33;1'
textBBlue = '34;1'
textBMagenta = '35;1'
textBCyan = '36;1'
textWhite = '37;1'
textBold = '1'
textItalic = '3'
textUnderline = '4'
backgroundBlack = '40'
backgroundRed = '41'
backgroundGreen = '42'
backgroundYellow = '43'
backgroundBlue = '44'
backgroundMagenta = '45'
backgroundCyan = '46'
backgroundGrey = '47'
def decorateText(self, text, *codes):
if not len(codes):
return text
codesStr = ''.join(('\x1B[' + code + 'm' for code in codes))
return '{0}{1}\x1B[0m'.format(codesStr, text)
class DummyTextDecorator(AnsiTextDecorator):
def decorateText(self, text, *codes):
return text
def makeTextDecorator(useAnsi=False):
if useAnsi:
return AnsiTextDecorator()
return DummyTextDecorator()
class TextEscaper:
"""
Escapes non-printable code points except space (0x20).
"""
def escape(self, text):
return repr(text)[1:-1].replace('"', r'\"')
def escapeLimitRight(self, text, maxLength):
if maxLength <= 0:
return '', 0
text = text[:maxLength]
textEsc = self.escape(text)
while len(textEsc) > maxLength:
text = text[0:-1]
textEsc = self.escape(text)
return textEsc, len(text)
def escapeLimitLeft(self, text, maxLength):
if maxLength <= 0:
return '', 0
text = text[-maxLength:]
textEsc = self.escape(text)
while len(textEsc) > maxLength:
text = text[1:]
textEsc = self.escape(text)
return textEsc, len(text)
_detectSmilieRe = re.compile(r'''(?:^|(?<=\s))
[:;,8B][-~]?(?:[)}\]|({[]{1,2}|[pPD])[=\#]?
(?:\s|$)''', re.VERBOSE)
def detectSmilie(line, offset):
"""
Detects simple western LTR ASCII smilies like ";~P="
A smilie starts with a symbol for the eyes, followed by an optional symbol
for the nose and a symbol for the mouth.
A symbol for the beard may follow.
The smilie has to begin and end at the start/end of line or after/before
whitespace.
"""
return _detectSmilieRe.match(line, offset) is not None
def checkForInvalidCodePoints(escaper, outputter, path, lineNr, line):
markAllowed = False
for cpIndex, cp in enumerate(line):
anomaly = True
unexpectedMark = False
cpCat = unicodedata.category(cp)
cpCatMain = cpCat[0]
# Don't report letters, numbers, punctuation, symbols,
# whitespace and some miscategorized whitespace:
if cpCatMain in 'LNPSZ' or cp in (
'\t',
'\xad', # SOFT HYPHEN, category Cf
'\u200d', # ZERO WIDTH JOINER, category Cf
'\u200e', # LEFT-TO-RIGHT MARK, category Cf
None
):
anomaly = False
# But report REPLACEMENT CHARACTER from category So, because
# it most likely is a character set conversion artifact:
if cp == '�':
anomaly = True
# Don't report marks following letters or other marks:
if cpCatMain == 'M':
if markAllowed:
anomaly = False
else:
# Not in letter cluster.
anomaly, unexpectedMark = True, True
elif cpCatMain == 'L':
markAllowed = True
else:
markAllowed = False
if anomaly:
cpName = unicodedata.name(cp, 'unnamed')
if unexpectedMark:
suffix = ' not preceded by a letter'
else:
suffix = ''
msg = 'Unicode {0} ({1}, category {2}){3}'
msg = msg.format(escaper.escape(cp), cpName, cpCat, suffix)
outputter.out(path, lineNr, cpIndex, cpIndex + 1, line, msg)
_checkForUseModListRe = re.compile(r'(\*|#(\*|#([*#])))[*#]*')
def checkForUseModList(outputter, path, lineNr, line, isDirective, isComment):
match = _checkForUseModListRe.match(line)
if match:
isDirective, isComment = False, False
start = match.start()
end = match.end()
outputter.out(path, lineNr, start, end, line, 'UseMod list')
return isDirective, isComment
_checkForNonCommentAfterRedirectRe = re.compile(r'\s*(\S.*?)\s*$')
def detectNonCommentAfterRedirect(outputter, path, lineNr, line):
match = _checkForNonCommentAfterRedirectRe.match(line)
if match:
start = match.start(1)
end = match.end(1)
msg = 'Non-empty non-comment line after valid redirect'
outputter.out(path, lineNr, start, end, line, msg)
return True
return False
_detectRedirect = re.compile(r'#REDIRECT(\s*)(?P<name>.*)')
def detectRedirect(outputter, path, lineNr, line, firstDirectiveLine
, validRedirectPresent):
match = _detectRedirect.match(line)
if match:
if firstDirectiveLine:
name = match.group('name')
if not name:
msg = 'Redirect without target'
outputter.out(path, lineNr, 0, len(line), line, msg)
else:
validRedirectPresent = True
else:
msg = 'Redirect in non-first line'
outputter.out(path, lineNr, 0, len(line), line, msg)
return validRedirectPresent, True
return validRedirectPresent, False
def detectUseModIndent(outputter, path, lineNr, line):
if line[0:1] != ':' or detectSmilie(line, 0):
return False
end = len(line) - len(line.lstrip(':'))
outputter.out(path, lineNr, 0, end, line, 'UseMod indentation')
return True
def detectUseModDefinitionList(outputter, path, lineNr, line):
if line[0:1] != ';' or detectSmilie(line, 0):
return False
outputter.out(path, lineNr, 0, 1, line, 'UseMod definition list')
return True
_detectUseModTagsRe = re.compile(r'''<(?P<close>[/]?)
(?P<name>(b|i|nowiki|pre|toc|tt))
>''', re.IGNORECASE | re.VERBOSE)
def detectUseModTags(outputter, path, lineNr, line):
matches = _detectUseModTagsRe.finditer(line)
for match in matches:
start = match.start()
end = match.end()
closing = match.group('close')
tagName = match.group('name').lower()
tagType = 'close' if closing else 'open'
msg = 'UseMod tag {0} {1}'.format(tagName, tagType)
outputter.out(path, lineNr, start, end, line, msg)
return False
_checkBrTagsRe = re.compile(r'''
(?P<open><[<`]*)
(?P<name>br)
(?P<close>[>`]*>)
''', re.IGNORECASE | re.VERBOSE)
def checkBrTags(outputter, path, lineNr, line):
"""
UseMod forced linebreak: <br>
MoinMoin forced linebreak: <<BR>>
"""
matches = _checkBrTagsRe.finditer(line)
for match in matches:
start = match.start()
end = match.end()
tagOpen = match.group('open')
tagName = match.group('name')
tagClose = match.group('close')
if (tagOpen == '<') and (tagClose == '>'):
msg = 'UseMod forced linebreak'
outputter.out(path, lineNr, start, end, line, msg)
return True
if ((tagOpen == '<<') and (tagClose[0:2] == '>>')
and (tagName != 'BR')):
msg = 'Invalid MoinMoin forced linebreak'
outputter.out(path, lineNr, start, end, line, msg)
return True
return False
_checkHeadlinesRe = re.compile(r'''
(?P<spaceBeforOpen>\s*) # Illegal.
(?P<openTag>[=]+) # Headline open tag.
(?P<spaceAfterOpen>\s*) # Required.
(?P<nIndicator>[\#*]*)\s* # Numbering from old wiki.
(?P<text>.*?) # Required headline text (non-greedy).
(?P<spaceBeforClose>\s*) # Required.
(?P<closeTag>[=]*) # Has to be same as open tag.
(?P<spaceAfterClose>\s*) # Illegal trailing whitespace.
$''', re.VERBOSE)
def checkHeadlines(outputter, path, lineNr, line):
match = _checkHeadlinesRe.match(line)
if match is None:
return False
spaceBeforOpen = match.group('spaceBeforOpen')
openTag = match.group('openTag')
openTagStart = match.start('openTag')
openTagEnd = match.end('openTag')
spaceAfterOpen = match.group('spaceAfterOpen')
nIndicator = match.group('nIndicator')
text = match.group('text')
spaceBeforClose = match.group('spaceBeforClose')
closeTag = match.group('closeTag')
spaceAfterClose = match.group('spaceAfterClose')
if spaceBeforOpen:
end = len(spaceBeforOpen)
msg = 'Headline after whitespace'
outputter.out(path, lineNr, 0, end, line, msg)
if len(openTag) > 5:
start = openTagStart
end = openTagEnd
msg = 'Headline of level > 5'
outputter.out(path, lineNr, start, end, line, msg)
if text:
iMatches = re.finditer(r"[`']{2,}", text)
for iMatch in iMatches:
start = match.start('text') + iMatch.start()
end = match.start('text') + iMatch.end()
msg = 'Headline contains markup'
outputter.out(path, lineNr, start, end, line, msg)
else:
end = len(line)
start = openTagEnd - 1
msg = 'Headline contains no text'
outputter.out(path, lineNr, start, end, line, msg)
return True
if not spaceAfterOpen:
if nIndicator:
start = match.start('nIndicator')
else:
start = match.start('text')
msg = 'Headline without whitespace after open tag'
outputter.out(path, lineNr, start, start + 1, line, msg)
if nIndicator:
start = match.start('nIndicator')
end = match.end('nIndicator')
msg = 'Headline with UseMod numbering indicator'
outputter.out(path, lineNr, start, end, line, msg)
if closeTag:
if len(openTag) != len(closeTag):
start = match.start('closeTag')
end = match.end('closeTag')
msg = ('Headline with different length open and close'
+ ' tags')
outputter.out(path, lineNr, start, end, line, msg)
if not spaceBeforClose:
start = match.start('closeTag')
msg = 'Headline without whitespace before close tag'
outputter.out(path, lineNr, start, start + 1, line, msg)
if spaceAfterClose:
start = match.start('spaceAfterClose')
end = match.end('spaceAfterClose')
msg = 'Headline ends with whitespace'
outputter.out(path, lineNr, start, end, line, msg)
else:
msg = 'Headline without close tag'
outputter.out(path, lineNr, len(line)-1, len(line), line, msg)
# Skip following checks when no close tag present.
return True
_detectUseModAnchorsRe = re.compile(r'(?:^|[^[])(\[#[^#\]]+\])(?:$|[^]])')
def detectUseModAnchors(outputter, path, lineNr, line):
matches = _detectUseModAnchorsRe.finditer(line)
for match in matches:
start = match.start(1)
end = match.end(1)
msg = 'UseMod anchor'
outputter.out(path, lineNr, start, end, line, msg)
return False
_checkLinksRe = re.compile(r'''
(?P<openBrackets>\[[\[`]*) # Valid links got 2 brackets
(?P<openQuote>"?) # Artifact from old wiki conversion
\s*
(?P<linkUrl>.*?) # Link URL (not greedy)
\s*
(?P<closeQuote>"?) # Artifact from old wiki conversion
(?P<closeBrackets>[\]`]*\]) # Valid links got 2 brackets
''', re.IGNORECASE | re.VERBOSE)
def checkLinks(outputter, path, lineNr, line):
matches = _checkLinksRe.finditer(line)
for match in matches:
start = match.start()
end = match.end()
openBrackets = match.group('openBrackets')
openQuote = match.group('openQuote')
linkUrl = match.group('linkUrl')
if openQuote:
msg = 'Fail-converted unnamed internal UseMod link'
outputter.out(path, lineNr, start, end, line, msg)
continue
if (len(openBrackets) == 1) and re.search(r':', linkUrl):
msg = 'Fail-converted external UseMod link'
outputter.out(path, lineNr, start, end, line, msg)
continue
return False
_detectUseModUploadsRe = re.compile(r'(^|\s)(?P<link>upload:\S+)(\s|$)', re.I)
def detectUseModUploads(outputter, path, lineNr, line):
matches = _detectUseModUploadsRe.finditer(line)
for match in matches:
start = match.start('link')
end = match.end('link')
msg = 'UseMod upload link'
outputter.out(path, lineNr, start, end, line, msg)
return False
# noinspection PyUnusedLocal
def detectMoinMoinComment(outputter, path, lineNr, line):
return line.startswith('##')
def makeCheckFile(checkFuns, cols, useAnsi):
escaper = TextEscaper()
decorator = makeTextDecorator(useAnsi)
maxPartLength = cols - 11
outputter = AnomalyFormatter(escaper, decorator, maxPartLength)
def checkFile(path):
# Read file and report broken UTF-8 encoding:
with open(path, 'rb') as file:
textBytes = file.read()
decoder = codecs.getincrementaldecoder('utf-8')()
lines, line, invalidEncoding = [], [], False
lastI = len(textBytes) + 1
for i in range(0, len(textBytes)):
try:
cp = decoder.decode(textBytes[i:i+1], i == lastI)
if len(cp) != 0:
if cp == '\n':
if line[-1:] == ['\r']:
del line[-1]
lines.append(''.join(line))
line = []
else:
line.append(cp)
except ValueError:
invalidEncoding = True
lineNr, cpIndex = len(lines) + 1, len(line)
lineStr = ''.join(line)
msg = 'UTF-8 invalid byte while decoding line!'
outputter.out(path, lineNr, cpIndex, cpIndex + 1, lineStr, msg)
break
if invalidEncoding:
return outputter.getText(), tuple(outputter.getCounts().items())
lines.append(''.join(line))
firstDirectiveLine = 1
validRedirectPresent = False
for lineNr, line in enumerate(lines):
isComment = detectMoinMoinComment(outputter, path, lineNr, line)
isDirective = not isComment and line.startswith('#')
checkForInvalidCodePoints(escaper, outputter, path, lineNr
, line)
isDirective, isComment = checkForUseModList(outputter, path
, lineNr, line, isDirective, isComment)
# No further wiki syntax checks for comments:
if isComment:
continue
# Determine first directive line
if (firstDirectiveLine == lineNr) and isComment:
firstDirectiveLine += 1
# Detect extra non-comment markup after valid redirect:
if validRedirectPresent and not isDirective:
skipRemaining = detectNonCommentAfterRedirect(outputter, path
, lineNr, line)
if skipRemaining:
continue
validRedirectPresent, skipRemaining = detectRedirect(outputter, path
, lineNr, line, firstDirectiveLine, validRedirectPresent)
if skipRemaining:
continue
if isDirective:
# Skip other directives.
continue
for checkFun in checkFuns:
skipRemaining = checkFun(outputter, path, lineNr, line)
if skipRemaining:
continue
return outputter.getText(), tuple(outputter.getCounts().items())
return checkFile
def workerProc(termE:Event, jobs:Queue, results:Queue, workerFactory, *args):
try:
workFun = workerFactory(*args)
while not termE.is_set():
try:
job = jobs.get(True, 0.02)
except queue.Empty:
continue
result = job, workFun(job)
results.put(result, True)
except KeyboardInterrupt:
pass
def handleResults(results:Queue, counts:Counter):
while True:
try:
job, (rText, rCounts) = results.get(False)
except queue.Empty:
return
counts['fileCount'] += 1
if len(rText) != 0:
print(rText, end='')
for name, count in rCounts:
counts[name] += count
def main():
checkFuns = (
detectUseModIndent,
detectUseModDefinitionList,
detectUseModTags,
checkBrTags,
checkHeadlines,
checkLinks,
detectUseModAnchors,
detectUseModUploads,
)
if sys.stdout.isatty() and (platform.system() != 'Windows'):
import subprocess
cols = int(subprocess.Popen(('tput', 'cols'),
stdout=subprocess.PIPE).stdout.read())
if cols <= 0:
cols = 80
useAnsi = True
else:
cols, useAnsi = 80, False
workerCount = max(1, len(os.sched_getaffinity(0)))
termE = Event()
jobs = Queue(maxsize=2*workerCount)
results = Queue(maxsize=2*workerCount)
workerArgs = termE, jobs, results, makeCheckFile, checkFuns, cols, useAnsi
workerPool = [Process(target=workerProc, args=workerArgs)
for _ in range(0, workerCount)]
for worker in workerPool:
worker.start()
counts = Counter()
blistedCount = 0
try:
print('Scanning files...')
paths = glob.iglob(os.path.join(sourceDir, "*.txt"))
for path in paths:
if not os.path.isfile(path):
continue
if path in blacklist:
blistedCount += 1
continue
while True:
handleResults(results, counts)
try:
jobs.put(path, True, 0.02)
break
except queue.Full:
pass
while not jobs.empty():
handleResults(results, counts)
time.sleep(0.02)
except KeyboardInterrupt:
print('')
print('Processing interrupted by user!')
termE.set()
while any(worker.is_alive() for worker in workerPool):
handleResults(results, counts)
time.sleep(0.02)
for worker in workerPool:
worker.join()
handleResults(results, counts)
decorator = makeTextDecorator(useAnsi)
fileCount, anomalyCount = counts['fileCount'], counts['anomalyCount']
pathCount, lineCount = counts['pathCount'], counts['lineCount']
del counts['fileCount']
del counts['anomalyCount']
del counts['pathCount']
del counts['lineCount']
eFileCount = decorator.decorateText(str(fileCount), decorator.textBYellow)
eBlistedCount = decorator.decorateText(str(blistedCount), decorator.textBYellow)
if anomalyCount != 0:
eAnomalyCount = decorator.decorateText(str(anomalyCount), decorator.textBYellow)
eLineCount = decorator.decorateText(str(lineCount), decorator.textBYellow)
ePathCount = decorator.decorateText(str(pathCount), decorator.textBYellow)
msg = ('Found {0} anomalies in {1} lines from {2} files'
+ ' ({3} scanned, {4} excluded):')
print('')
print(msg.format(eAnomalyCount, eLineCount, ePathCount, eFileCount
, eBlistedCount))
maxValueLen = len(str(max(counts.values())))
for name, count in sorted(counts.items()):
eCount = '{0:{1}}'.format(count, maxValueLen)
eCount = decorator.decorateText(eCount, decorator.textBYellow)
print(' {0} {1}'.format(eCount, name))
else:
msg = 'Found no anomalies in {0} files ({1} excluded).'
print('')
print(msg.format(fileCount, eBlistedCount))
if __name__ == '__main__':
main()
| [
"re.compile",
"multiprocessing.Process",
"time.sleep",
"re.search",
"subprocess.Popen",
"platform.system",
"re.finditer",
"multiprocessing.Event",
"os.path.isfile",
"os.path.dirname",
"sys.stdout.isatty",
"multiprocessing.Queue",
"codecs.getincrementaldecoder",
"os.path.join",
"collectio... | [((254, 279), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (269, 279), False, 'import os\n'), ((293, 324), 'os.path.join', 'os.path.join', (['baseDir', '"""backup"""'], {}), "(baseDir, 'backup')\n", (305, 324), False, 'import os\n'), ((5984, 6094), 're.compile', 're.compile', (['"""(?:^|(?<=\\\\s))\n[:;,8B][-~]?(?:[)}\\\\]|({[]{1,2}|[pPD])[=\\\\#]?\n(?:\\\\s|$)"""', 're.VERBOSE'], {}), '(\n """(?:^|(?<=\\\\s))\n[:;,8B][-~]?(?:[)}\\\\]|({[]{1,2}|[pPD])[=\\\\#]?\n(?:\\\\s|$)"""\n , re.VERBOSE)\n', (5994, 6094), False, 'import re\n'), ((8145, 8184), 're.compile', 're.compile', (['"""(\\\\*|#(\\\\*|#([*#])))[*#]*"""'], {}), "('(\\\\*|#(\\\\*|#([*#])))[*#]*')\n", (8155, 8184), False, 'import re\n'), ((8566, 8597), 're.compile', 're.compile', (['"""\\\\s*(\\\\S.*?)\\\\s*$"""'], {}), "('\\\\s*(\\\\S.*?)\\\\s*$')\n", (8576, 8597), False, 'import re\n'), ((8972, 9013), 're.compile', 're.compile', (['"""#REDIRECT(\\\\s*)(?P<name>.*)"""'], {}), "('#REDIRECT(\\\\s*)(?P<name>.*)')\n", (8982, 9013), False, 'import re\n'), ((10142, 10245), 're.compile', 're.compile', (['"""<(?P<close>[/]?)\n(?P<name>(b|i|nowiki|pre|toc|tt))\n>"""', '(re.IGNORECASE | re.VERBOSE)'], {}), '("""<(?P<close>[/]?)\n(?P<name>(b|i|nowiki|pre|toc|tt))\n>""", re.\n IGNORECASE | re.VERBOSE)\n', (10152, 10245), False, 'import re\n'), ((10714, 10814), 're.compile', 're.compile', (['"""\n(?P<open><[<`]*)\n(?P<name>br)\n(?P<close>[>`]*>)\n"""', '(re.IGNORECASE | re.VERBOSE)'], {}), '("""\n(?P<open><[<`]*)\n(?P<name>br)\n(?P<close>[>`]*>)\n""", re.\n IGNORECASE | re.VERBOSE)\n', (10724, 10814), False, 'import re\n'), ((11640, 12046), 're.compile', 're.compile', (['"""\n(?P<spaceBeforOpen>\\\\s*) # Illegal.\n(?P<openTag>[=]+) # Headline open tag.\n(?P<spaceAfterOpen>\\\\s*) # Required.\n(?P<nIndicator>[\\\\#*]*)\\\\s* # Numbering from old wiki.\n(?P<text>.*?) # Required headline text (non-greedy).\n(?P<spaceBeforClose>\\\\s*) # Required.\n(?P<closeTag>[=]*) # Has to be same as open tag.\n(?P<spaceAfterClose>\\\\s*) # Illegal trailing whitespace.\n$"""', 're.VERBOSE'], {}), '(\n """\n(?P<spaceBeforOpen>\\\\s*) # Illegal.\n(?P<openTag>[=]+) # Headline open tag.\n(?P<spaceAfterOpen>\\\\s*) # Required.\n(?P<nIndicator>[\\\\#*]*)\\\\s* # Numbering from old wiki.\n(?P<text>.*?) # Required headline text (non-greedy).\n(?P<spaceBeforClose>\\\\s*) # Required.\n(?P<closeTag>[=]*) # Has to be same as open tag.\n(?P<spaceAfterClose>\\\\s*) # Illegal trailing whitespace.\n$"""\n , re.VERBOSE)\n', (11650, 12046), False, 'import re\n'), ((14894, 14945), 're.compile', 're.compile', (['"""(?:^|[^[])(\\\\[#[^#\\\\]]+\\\\])(?:$|[^]])"""'], {}), "('(?:^|[^[])(\\\\[#[^#\\\\]]+\\\\])(?:$|[^]])')\n", (14904, 14945), False, 'import re\n'), ((15259, 15593), 're.compile', 're.compile', (['"""\n(?P<openBrackets>\\\\[[\\\\[`]*) # Valid links got 2 brackets\n(?P<openQuote>"?) # Artifact from old wiki conversion\n\\\\s*\n(?P<linkUrl>.*?) # Link URL (not greedy)\n\\\\s*\n(?P<closeQuote>"?) # Artifact from old wiki conversion\n(?P<closeBrackets>[\\\\]`]*\\\\]) # Valid links got 2 brackets\n"""', '(re.IGNORECASE | re.VERBOSE)'], {}), '(\n """\n(?P<openBrackets>\\\\[[\\\\[`]*) # Valid links got 2 brackets\n(?P<openQuote>"?) # Artifact from old wiki conversion\n\\\\s*\n(?P<linkUrl>.*?) # Link URL (not greedy)\n\\\\s*\n(?P<closeQuote>"?) # Artifact from old wiki conversion\n(?P<closeBrackets>[\\\\]`]*\\\\]) # Valid links got 2 brackets\n"""\n , re.IGNORECASE | re.VERBOSE)\n', (15269, 15593), False, 'import re\n'), ((16307, 16362), 're.compile', 're.compile', (['"""(^|\\\\s)(?P<link>upload:\\\\S+)(\\\\s|$)"""', 're.I'], {}), "('(^|\\\\s)(?P<link>upload:\\\\S+)(\\\\s|$)', re.I)\n", (16317, 16362), False, 'import re\n'), ((21196, 21203), 'multiprocessing.Event', 'Event', ([], {}), '()\n', (21201, 21203), False, 'from multiprocessing import Process, Event, Queue\n'), ((21215, 21245), 'multiprocessing.Queue', 'Queue', ([], {'maxsize': '(2 * workerCount)'}), '(maxsize=2 * workerCount)\n', (21220, 21245), False, 'from multiprocessing import Process, Event, Queue\n'), ((21258, 21288), 'multiprocessing.Queue', 'Queue', ([], {'maxsize': '(2 * workerCount)'}), '(maxsize=2 * workerCount)\n', (21263, 21288), False, 'from multiprocessing import Process, Event, Queue\n'), ((21534, 21543), 'collections.Counter', 'Counter', ([], {}), '()\n', (21541, 21543), False, 'from collections import Counter\n'), ((935, 944), 'collections.Counter', 'Counter', ([], {}), '()\n', (942, 944), False, 'from collections import Counter\n'), ((3967, 3976), 'collections.Counter', 'Counter', ([], {}), '()\n', (3974, 3976), False, 'from collections import Counter\n'), ((6709, 6733), 'unicodedata.category', 'unicodedata.category', (['cp'], {}), '(cp)\n', (6729, 6733), False, 'import unicodedata\n'), ((12968, 12997), 're.finditer', 're.finditer', (['"""[`\']{2,}"""', 'text'], {}), '("[`\']{2,}", text)\n', (12979, 12997), False, 'import re\n'), ((20828, 20847), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (20845, 20847), False, 'import sys\n'), ((21384, 21427), 'multiprocessing.Process', 'Process', ([], {'target': 'workerProc', 'args': 'workerArgs'}), '(target=workerProc, args=workerArgs)\n', (21391, 21427), False, 'from multiprocessing import Process, Event, Queue\n'), ((22404, 22420), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (22414, 22420), False, 'import time\n'), ((7758, 7789), 'unicodedata.name', 'unicodedata.name', (['cp', '"""unnamed"""'], {}), "(cp, 'unnamed')\n", (7774, 7789), False, 'import unicodedata\n'), ((16098, 16121), 're.search', 're.search', (['""":"""', 'linkUrl'], {}), "(':', linkUrl)\n", (16107, 16121), False, 'import re\n'), ((17184, 17221), 'codecs.getincrementaldecoder', 'codecs.getincrementaldecoder', (['"""utf-8"""'], {}), "('utf-8')\n", (17212, 17221), False, 'import codecs\n'), ((20853, 20870), 'platform.system', 'platform.system', ([], {}), '()\n', (20868, 20870), False, 'import platform\n'), ((21158, 21181), 'os.sched_getaffinity', 'os.sched_getaffinity', (['(0)'], {}), '(0)\n', (21178, 21181), False, 'import os\n'), ((21636, 21668), 'os.path.join', 'os.path.join', (['sourceDir', '"""*.txt"""'], {}), "(sourceDir, '*.txt')\n", (21648, 21668), False, 'import os\n'), ((22168, 22184), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (22178, 22184), False, 'import time\n'), ((21716, 21736), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (21730, 21736), False, 'import os\n'), ((1394, 1416), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1410, 1416), False, 'import os\n'), ((20931, 20989), 'subprocess.Popen', 'subprocess.Popen', (["('tput', 'cols')"], {'stdout': 'subprocess.PIPE'}), "(('tput', 'cols'), stdout=subprocess.PIPE)\n", (20947, 20989), False, 'import subprocess\n')] |
import subprocess
import pytest
from build.platform.python.tests import testlib
PYTHON_VERSIONS = ["2.7", "3.4", "3.5", "3.6"] # 3.7, 3.8 are not runnable
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_version_matched(pyver):
testlib.check_python_version(pyver)
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_python_max_unicode_bytes(pyver):
cmd = [testlib.get_python_bin(pyver), '-c', 'import sys; print(sys.maxunicode)']
maxunicode = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8')
assert int(maxunicode) > 65535, "Found UCS2 build"
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_python_imports(pyver):
imports = {
"2.7": ['pkg_resources'],
"3.4": [],
"3.5": ['pkg_resources'],
"3.6": [],
}
for imp in imports[pyver]:
subprocess.check_call([testlib.get_python_bin(pyver), '-c', 'import ' + imp])
| [
"subprocess.check_output",
"pytest.mark.parametrize",
"build.platform.python.tests.testlib.get_python_bin",
"build.platform.python.tests.testlib.check_python_version"
] | [((162, 211), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pyver"""', 'PYTHON_VERSIONS'], {}), "('pyver', PYTHON_VERSIONS)\n", (185, 211), False, 'import pytest\n'), ((288, 337), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pyver"""', 'PYTHON_VERSIONS'], {}), "('pyver', PYTHON_VERSIONS)\n", (311, 337), False, 'import pytest\n'), ((611, 660), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pyver"""', 'PYTHON_VERSIONS'], {}), "('pyver', PYTHON_VERSIONS)\n", (634, 660), False, 'import pytest\n'), ((249, 284), 'build.platform.python.tests.testlib.check_python_version', 'testlib.check_python_version', (['pyver'], {}), '(pyver)\n', (277, 284), False, 'from build.platform.python.tests import testlib\n'), ((391, 420), 'build.platform.python.tests.testlib.get_python_bin', 'testlib.get_python_bin', (['pyver'], {}), '(pyver)\n', (413, 420), False, 'from build.platform.python.tests import testlib\n'), ((482, 536), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'stderr': 'subprocess.STDOUT'}), '(cmd, stderr=subprocess.STDOUT)\n', (505, 536), False, 'import subprocess\n'), ((883, 912), 'build.platform.python.tests.testlib.get_python_bin', 'testlib.get_python_bin', (['pyver'], {}), '(pyver)\n', (905, 912), False, 'from build.platform.python.tests import testlib\n')] |
from __future__ import print_function
import numpy as np
from scipy import sparse
from scipy.interpolate import griddata
def fast_histogram2d(x, y, bins=10, weights=None, reduce_w=None, NULL=None,
reinterp=None):
"""
Compute the sparse bi-dimensional histogram of two data samples where *x*,
and *y* are 1-D sequences of the same length. If *weights* is None
(default), this is a histogram of the number of occurences of the
observations at (x[i], y[i]).
If *weights* is specified, it specifies values at the coordinate (x[i],
y[i]). These values are accumulated for each bin and then reduced according
to *reduce_w* function, which defaults to numpy's sum function (np.sum).
(If *weights* is specified, it must also be a 1-D sequence of the same
length as *x* and *y*.)
Parameters
------
x: ndarray[ndim=1]
first data sample coordinates
y: ndarray[ndim=1]
second data sample coordinates
bins: int or [int, int]
int, the number of bins for the two dimensions (nx=ny=bins)
or [int, int], the number of bins in each dimension (nx, ny = bins)
weights: ndarray[ndim=1]
values *w_i* weighing each sample *(x_i, y_i)*
accumulated and reduced (using reduced_w) per bin
reduce_w: callable
function that will reduce the *weights* values accumulated per bin
defaults to numpy's sum function (np.sum)
NULL: value type
filling missing data value
reinterp: str in {‘linear’, ‘nearest’, ‘cubic’}, optional
Method of interpolation.
if set, reinterpolation is made using scipy.interpolate.griddata to
fill missing data within the convex polygone that encloses the data
Returns
-------
B: ndarray[ndim=2]
bi-dimensional histogram
extent: tuple(4)
(xmin, xmax, ymin, ymax) extension of the histogram
steps: tuple(2)
(dx, dy) bin size in x and y direction
"""
# define the bins (do anything you want here but needs edges and sizes of
# the 2d bins)
try:
nx, ny = bins
except TypeError:
nx = ny = bins
# values you want to be reported
if weights is None:
weights = np.ones(x.size)
if reduce_w is None:
reduce_w = np.sum
else:
if not hasattr(reduce_w, '__call__'):
raise TypeError('reduce function is not callable')
# culling nans
finite_inds = (np.isfinite(x) & np.isfinite(y) & np.isfinite(weights))
_x = np.asarray(x)[finite_inds]
_y = np.asarray(y)[finite_inds]
_w = np.asarray(weights)[finite_inds]
if not (len(_x) == len(_y)) & (len(_y) == len(_w)):
raise ValueError('Shape mismatch between x, y, and weights: {}, {}, {}'.format(_x.shape, _y.shape, _w.shape))
xmin, xmax = _x.min(), _x.max()
ymin, ymax = _y.min(), _y.max()
dx = (xmax - xmin) / (nx - 1.0)
dy = (ymax - ymin) / (ny - 1.0)
# Basically, this is just doing what np.digitize does with one less copy
xyi = np.vstack((_x, _y)).T
xyi -= [xmin, ymin]
xyi /= [dx, dy]
xyi = np.floor(xyi, xyi).T
# xyi contains the bins of each point as a 2d array [(xi,yi)]
d = {}
for e, k in enumerate(xyi.T):
key = (k[0], k[1])
if key in d:
d[key].append(_w[e])
else:
d[key] = [_w[e]]
_xyi = np.array(list(d.keys())).T
_w = np.array([reduce_w(v) for v in d.values()])
# exploit a sparse coo_matrix to build the 2D histogram...
_grid = sparse.coo_matrix((_w, _xyi), shape=(nx, ny))
if reinterp is None:
# convert sparse to array with filled value
# grid.toarray() does not account for filled value
# sparse.coo.coo_todense() does actually add the values to the existing
# ones, i.e. not what we want -> brute force
if NULL is None:
B = _grid.toarray()
else: # Brute force only went needed
B = np.zeros(_grid.shape, dtype=_grid.dtype)
B.fill(NULL)
for (x, y, v) in zip(_grid.col, _grid.row, _grid.data):
B[y, x] = v
else: # reinterp
xi = np.arange(nx, dtype=float)
yi = np.arange(ny, dtype=float)
# Old griddata from mlab
# B = griddata(_grid.col.astype(float), _grid.row.astype(float),
# _grid.data, xi, yi, interp=reinterp)
B = griddata(np.array([_grid.col.astype(float),
_grid.row.astype(float)]).T,
_grid.data,
np.array([xi, yi]).T, interp=reinterp)
return B, (xmin, xmax, ymin, ymax), (dx, dy)
def bayesian_blocks(t):
"""Bayesian Blocks Implementation
By <NAME>. License: BSD
Based on algorithm outlined in
http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
Parameters
----------
t : ndarray, length N
data to be histogrammed
Returns
-------
bins : ndarray
array containing the (N+1) bin edges
Notes
-----
This is an incomplete implementation: it may fail for some
datasets. Alternate fitness functions and prior forms can
be found in the paper listed above.
"""
# copy and sort the array
t = np.sort(t)
N = t.size
# create length-(N + 1) array of cell edges
edges = np.concatenate([t[:1], 0.5 * (t[1:] + t[:-1]), t[-1:]])
block_length = t[-1] - edges
# arrays needed for the iteration
nn_vec = np.ones(N)
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
# -----------------------------------------------------------------
# Start with first data cell; add one cell at each iteration
# -----------------------------------------------------------------
for K in range(N):
# Compute the width and count of the final bin for all possible
# locations of the K^th changepoint
width = block_length[:K + 1] - block_length[K + 1]
count_vec = np.cumsum(nn_vec[:K + 1][::-1])[::-1]
# evaluate fitness function for these possibilities
fit_vec = count_vec * (np.log(count_vec) - np.log(width))
fit_vec -= 4 # 4 comes from the prior on the number of changepoints
fit_vec[1:] += best[:K]
# find the max of the fitness: this is the K^th changepoint
i_max = np.argmax(fit_vec)
last[K] = i_max
best[K] = fit_vec[i_max]
# -----------------------------------------------------------------
# Recover changepoints by iteratively peeling off the last block
# -----------------------------------------------------------------
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
return edges[change_points]
def optbins(data, method='freedman', ret='N'):
""" Determine the optimal binning of the data based on common estimators
and returns either the number of bins of the width to use.
inputs
------
data 1d dataset to estimate from
keywords
--------
method the method to use: str in {sturge, scott, freedman}
ret set to N will return the number of bins / edges
set to W will return the width
refs
----
* <NAME>. (1926)."The choice of a class interval". J. American
Statistical Association, 65-66
* <NAME>. (1979), "On optimal and data-based histograms".
Biometrika, 66, 605-610
* <NAME>.; <NAME>. (1981). "On the histogram as a density
estimator: L2 theory". Zeitschrift fur Wahrscheinlichkeitstheorie und
verwandte Gebiete, 57, 453-476
* <NAME>. et al (2012) "Studies in Astronomical Time Series Analysis.
VI. Bayesian Block Representations."
"""
x = np.asarray(data)
n = x.size
r = x.max() - x.min()
def sturge():
if (n <= 30):
print("Warning: Sturge estimator can perform poorly for small samples")
k = int(np.log(n) + 1)
h = r / k
return h, k
def scott():
h = 3.5 * np.std(x) * float(n) ** (-1. / 3.)
k = int(r / h)
return h, k
def freedman():
q = quantiles(x, [25, 75])
h = 2 * (q[75] - q[25]) * float(n) ** (-1. / 3.)
k = int(r / h)
return h, k
def bayesian():
r = bayesian_blocks(x)
return np.diff(r), r
m = {'sturge': sturge, 'scott': scott, 'freedman': freedman,
'bayesian': bayesian}
if method.lower() in m:
s = m[method.lower()]()
if ret.lower() == 'n':
return s[1]
elif ret.lower() == 'w':
return s[0]
else:
return None
def quantiles(x, qlist=[2.5, 25, 50, 75, 97.5]):
"""computes quantiles from an array
Quantiles := points taken at regular intervals from the cumulative
distribution function (CDF) of a random variable. Dividing ordered data
into q essentially equal-sized data subsets is the motivation for
q-quantiles; the quantiles are the data values marking the boundaries
between consecutive subsets.
The quantile with a fraction 50 is called the median
(50% of the distribution)
Inputs:
x - variable to evaluate from
qlist - quantiles fraction to estimate (in %)
Outputs:
Returns a dictionary of requested quantiles from array
"""
# Make a copy of trace
x = x.copy()
# For multivariate node
if x.ndim > 1:
# Transpose first, then sort, then transpose back
sx = np.transpose(np.sort(np.transpose(x)))
else:
# Sort univariate node
sx = np.sort(x)
try:
# Generate specified quantiles
quants = [sx[int(len(sx) * q / 100.0)] for q in qlist]
return dict(zip(qlist, quants))
except IndexError:
print("Too few elements for quantile calculation")
| [
"numpy.ones",
"numpy.sort",
"numpy.log",
"numpy.asarray",
"numpy.floor",
"numpy.argmax",
"numpy.diff",
"numpy.std",
"numpy.array",
"numpy.zeros",
"numpy.isfinite",
"numpy.vstack",
"numpy.concatenate",
"scipy.sparse.coo_matrix",
"numpy.cumsum",
"numpy.transpose",
"numpy.arange"
] | [((3555, 3600), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(_w, _xyi)'], {'shape': '(nx, ny)'}), '((_w, _xyi), shape=(nx, ny))\n', (3572, 3600), False, 'from scipy import sparse\n'), ((5274, 5284), 'numpy.sort', 'np.sort', (['t'], {}), '(t)\n', (5281, 5284), True, 'import numpy as np\n'), ((5361, 5416), 'numpy.concatenate', 'np.concatenate', (['[t[:1], 0.5 * (t[1:] + t[:-1]), t[-1:]]'], {}), '([t[:1], 0.5 * (t[1:] + t[:-1]), t[-1:]])\n', (5375, 5416), True, 'import numpy as np\n'), ((5502, 5512), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (5509, 5512), True, 'import numpy as np\n'), ((5524, 5548), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'float'}), '(N, dtype=float)\n', (5532, 5548), True, 'import numpy as np\n'), ((5560, 5582), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'int'}), '(N, dtype=int)\n', (5568, 5582), True, 'import numpy as np\n'), ((6680, 6702), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'int'}), '(N, dtype=int)\n', (6688, 6702), True, 'import numpy as np\n'), ((7915, 7931), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (7925, 7931), True, 'import numpy as np\n'), ((2248, 2263), 'numpy.ones', 'np.ones', (['x.size'], {}), '(x.size)\n', (2255, 2263), True, 'import numpy as np\n'), ((2508, 2528), 'numpy.isfinite', 'np.isfinite', (['weights'], {}), '(weights)\n', (2519, 2528), True, 'import numpy as np\n'), ((2539, 2552), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (2549, 2552), True, 'import numpy as np\n'), ((2575, 2588), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (2585, 2588), True, 'import numpy as np\n'), ((2611, 2630), 'numpy.asarray', 'np.asarray', (['weights'], {}), '(weights)\n', (2621, 2630), True, 'import numpy as np\n'), ((3052, 3071), 'numpy.vstack', 'np.vstack', (['(_x, _y)'], {}), '((_x, _y))\n', (3061, 3071), True, 'import numpy as np\n'), ((3128, 3146), 'numpy.floor', 'np.floor', (['xyi', 'xyi'], {}), '(xyi, xyi)\n', (3136, 3146), True, 'import numpy as np\n'), ((4187, 4213), 'numpy.arange', 'np.arange', (['nx'], {'dtype': 'float'}), '(nx, dtype=float)\n', (4196, 4213), True, 'import numpy as np\n'), ((4227, 4253), 'numpy.arange', 'np.arange', (['ny'], {'dtype': 'float'}), '(ny, dtype=float)\n', (4236, 4253), True, 'import numpy as np\n'), ((6370, 6388), 'numpy.argmax', 'np.argmax', (['fit_vec'], {}), '(fit_vec)\n', (6379, 6388), True, 'import numpy as np\n'), ((9775, 9785), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (9782, 9785), True, 'import numpy as np\n'), ((2474, 2488), 'numpy.isfinite', 'np.isfinite', (['x'], {}), '(x)\n', (2485, 2488), True, 'import numpy as np\n'), ((2491, 2505), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (2502, 2505), True, 'import numpy as np\n'), ((3990, 4030), 'numpy.zeros', 'np.zeros', (['_grid.shape'], {'dtype': '_grid.dtype'}), '(_grid.shape, dtype=_grid.dtype)\n', (3998, 4030), True, 'import numpy as np\n'), ((6011, 6042), 'numpy.cumsum', 'np.cumsum', (['nn_vec[:K + 1][::-1]'], {}), '(nn_vec[:K + 1][::-1])\n', (6020, 6042), True, 'import numpy as np\n'), ((8504, 8514), 'numpy.diff', 'np.diff', (['r'], {}), '(r)\n', (8511, 8514), True, 'import numpy as np\n'), ((4591, 4609), 'numpy.array', 'np.array', (['[xi, yi]'], {}), '([xi, yi])\n', (4599, 4609), True, 'import numpy as np\n'), ((6141, 6158), 'numpy.log', 'np.log', (['count_vec'], {}), '(count_vec)\n', (6147, 6158), True, 'import numpy as np\n'), ((6161, 6174), 'numpy.log', 'np.log', (['width'], {}), '(width)\n', (6167, 6174), True, 'import numpy as np\n'), ((8114, 8123), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (8120, 8123), True, 'import numpy as np\n'), ((8203, 8212), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (8209, 8212), True, 'import numpy as np\n'), ((9703, 9718), 'numpy.transpose', 'np.transpose', (['x'], {}), '(x)\n', (9715, 9718), True, 'import numpy as np\n')] |
"""
Holt Winter Anomaly detection Example
In this example we will look into how to create holt winters model and build an anomaly detection model
in less than 4 steps.
"""
if __name__ == '__main__':
from pytsal import anomaly, forecasting
from pytsal.dataset import *
# 1. Load the dataset
ts_with_anomaly = load_airline_with_anomaly()
# 2. Forecasting model
# 2.a Load existing forecasting model
model = forecasting.load_model()
# 2.b Create new model
if model is None:
ts = load_airline()
model = forecasting.setup(ts, 'holtwinter', eda=False, validation=False, find_best_model=True,
plot_model_comparison=False)
trained_model = forecasting.finalize(ts, model)
forecasting.save_model(trained_model)
model = forecasting.load_model()
# 3. brutlag algorithm finds and returns the anomaly points
anomaly_points = anomaly.setup(ts_with_anomaly, model, 'brutlag')
print(anomaly_points)
| [
"pytsal.forecasting.setup",
"pytsal.forecasting.load_model",
"pytsal.forecasting.finalize",
"pytsal.anomaly.setup",
"pytsal.forecasting.save_model"
] | [((446, 470), 'pytsal.forecasting.load_model', 'forecasting.load_model', ([], {}), '()\n', (468, 470), False, 'from pytsal import anomaly, forecasting\n'), ((944, 992), 'pytsal.anomaly.setup', 'anomaly.setup', (['ts_with_anomaly', 'model', '"""brutlag"""'], {}), "(ts_with_anomaly, model, 'brutlag')\n", (957, 992), False, 'from pytsal import anomaly, forecasting\n'), ((565, 684), 'pytsal.forecasting.setup', 'forecasting.setup', (['ts', '"""holtwinter"""'], {'eda': '(False)', 'validation': '(False)', 'find_best_model': '(True)', 'plot_model_comparison': '(False)'}), "(ts, 'holtwinter', eda=False, validation=False,\n find_best_model=True, plot_model_comparison=False)\n", (582, 684), False, 'from pytsal import anomaly, forecasting\n'), ((739, 770), 'pytsal.forecasting.finalize', 'forecasting.finalize', (['ts', 'model'], {}), '(ts, model)\n', (759, 770), False, 'from pytsal import anomaly, forecasting\n'), ((779, 816), 'pytsal.forecasting.save_model', 'forecasting.save_model', (['trained_model'], {}), '(trained_model)\n', (801, 816), False, 'from pytsal import anomaly, forecasting\n'), ((833, 857), 'pytsal.forecasting.load_model', 'forecasting.load_model', ([], {}), '()\n', (855, 857), False, 'from pytsal import anomaly, forecasting\n')] |
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.conf import settings
from .models import ContentImage
import io
import os
import json
from .src.trans_model import TransGraph
from .src.recognition_model import predict_app
import base64
from PIL import Image
import numpy as np
import logging
import time
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/static-storage/images/'
def runfuc(args):
# im = Image.open(args[0])
# print(im)
s_img = TransGraph(args[0], args[1]).run()
# s_img = model.run()
s_img.save(BASE_DIR + 'artist/style{}.jpg'.format(args[1]))
style_img = 'images/artist/style{}.jpg'.format(args[1])
return style_img
# Create your views here.
@csrf_exempt
def styleTransform(request):
"""
style transform service
"""
if request.method == 'POST':
style_img_url = []
try:
# read styles
styles = {}
for key in ['radios_1', 'radios_2', 'radios_3', 'radios_4', 'radios_5', 'radios_6']:
style = request.POST.get(key, '')
if not style == '':
styles[key] = style
now = time.localtime()
# load image
img = request.FILES['image']
name = '{}{}{}{}{}content.jpg'.format(now[1], now[2], now[3], now[4], now[5])
#save to database
Image = ContentImage()
Image.name = 'static/images/artist/' + name
Image.save()
#save to disk
addr = BASE_DIR + 'artist/' + name
save_to_disk(addr, img)
# appento url
style_img_url.append('images/artist/' + name)
# multiprocessing
pool = ThreadPool(6)
tasks_args = [(addr, styles[key]) for key in styles.keys()]
style_img_url += pool.map(runfuc, tasks_args)
except Exception as e:
print(e)
return render(request, 'artistpainting/basic.html', {})
return render(request, 'artistpainting/basic.html', {'style_img': style_img_url})
if request.method == 'GET':
return render(request, 'artistpainting/basic.html', {})
# Create your views here.
@csrf_exempt
def recognition(request):
"""
style transform service
"""
if request.method == 'POST':
name = ''
predicitons = ''
try:
# load image
now = time.localtime()
img = request.FILES['image']
image_name = '{}{}{}{}{}object.jpg'.format(now[1], now[2], now[3], now[4], now[5])
# get prediction
predicitons = predict_app(img)
# save to database
Image = ContentImage()
Image.name = 'static/images/predict/' + image_name
Image.save()
# save to disk
addr = BASE_DIR + 'predict/' + image_name
save_to_disk(addr, img)
image_url = 'images/predict/' + image_name
except Exception as e:
print(e)
return render(request, 'recognition/basic.html', {})
return render(request, 'recognition/basic.html', {'image_url':image_url, 'predictions': predicitons})
if request.method == 'GET':
return render(request, 'recognition/basic.html', {})
def save_to_disk(addr, img):
with default_storage.open(addr, 'wb+') as destination:
for chunk in img.chunks():
destination.write(chunk)
| [
"django.shortcuts.render",
"os.path.abspath",
"time.localtime",
"django.core.files.storage.default_storage.open",
"multiprocessing.dummy.Pool",
"PIL.Image.save"
] | [((2325, 2399), 'django.shortcuts.render', 'render', (['request', '"""artistpainting/basic.html"""', "{'style_img': style_img_url}"], {}), "(request, 'artistpainting/basic.html', {'style_img': style_img_url})\n", (2331, 2399), False, 'from django.shortcuts import render\n'), ((2448, 2496), 'django.shortcuts.render', 'render', (['request', '"""artistpainting/basic.html"""', '{}'], {}), "(request, 'artistpainting/basic.html', {})\n", (2454, 2496), False, 'from django.shortcuts import render\n'), ((3475, 3574), 'django.shortcuts.render', 'render', (['request', '"""recognition/basic.html"""', "{'image_url': image_url, 'predictions': predicitons}"], {}), "(request, 'recognition/basic.html', {'image_url': image_url,\n 'predictions': predicitons})\n", (3481, 3574), False, 'from django.shortcuts import render\n'), ((3618, 3663), 'django.shortcuts.render', 'render', (['request', '"""recognition/basic.html"""', '{}'], {}), "(request, 'recognition/basic.html', {})\n", (3624, 3663), False, 'from django.shortcuts import render\n'), ((3705, 3738), 'django.core.files.storage.default_storage.open', 'default_storage.open', (['addr', '"""wb+"""'], {}), "(addr, 'wb+')\n", (3725, 3738), False, 'from django.core.files.storage import default_storage\n'), ((589, 614), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (604, 614), False, 'import os\n'), ((1439, 1455), 'time.localtime', 'time.localtime', ([], {}), '()\n', (1453, 1455), False, 'import time\n'), ((1747, 1759), 'PIL.Image.save', 'Image.save', ([], {}), '()\n', (1757, 1759), False, 'from PIL import Image\n'), ((2029, 2042), 'multiprocessing.dummy.Pool', 'ThreadPool', (['(6)'], {}), '(6)\n', (2039, 2042), True, 'from multiprocessing.dummy import Pool as ThreadPool\n'), ((2749, 2765), 'time.localtime', 'time.localtime', ([], {}), '()\n', (2763, 2765), False, 'import time\n'), ((3141, 3153), 'PIL.Image.save', 'Image.save', ([], {}), '()\n', (3151, 3153), False, 'from PIL import Image\n'), ((2260, 2308), 'django.shortcuts.render', 'render', (['request', '"""artistpainting/basic.html"""', '{}'], {}), "(request, 'artistpainting/basic.html', {})\n", (2266, 2308), False, 'from django.shortcuts import render\n'), ((3413, 3458), 'django.shortcuts.render', 'render', (['request', '"""recognition/basic.html"""', '{}'], {}), "(request, 'recognition/basic.html', {})\n", (3419, 3458), False, 'from django.shortcuts import render\n')] |
#
# Unittests for nuskell.dsdcompiler.compiler
#
# Written by <NAME> (<EMAIL>).
#
import unittest
from nuskell.dsdcompiler.objects import clear_memory, NuskellComplex
from nuskell.dsdcompiler.compiler import translate
class Test_Workflow(unittest.TestCase):
def setUp(self):
NuskellComplex.ID = 1
def tearDown(self):
clear_memory()
def test_compile(self):
crn = 'A + B -> C + D; A + A <=> C + A; C @i 5'
ts = 'soloveichik2010.ts'
solution, modules = translate(crn, ts, modular = True)
f = [x for x in solution if x[0] == 'f']
s = [x for x in solution if x[0] != 'f']
assert len(s) == 4
assert len(f) == 9
assert len(modules) == 2
assert solution['A'].concentration is None
assert solution['C'].concentration[0] == 'initial'
assert solution['C'].concentration[1] == 5
assert solution['C'].concentration[2] == 'nM'
assert solution['f1'].concentration[0] == 'constant'
assert solution['f1'].concentration[1] == 100
assert solution['f1'].concentration[2] == 'nM'
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"nuskell.dsdcompiler.compiler.translate",
"nuskell.dsdcompiler.objects.clear_memory"
] | [((1147, 1162), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1160, 1162), False, 'import unittest\n'), ((344, 358), 'nuskell.dsdcompiler.objects.clear_memory', 'clear_memory', ([], {}), '()\n', (356, 358), False, 'from nuskell.dsdcompiler.objects import clear_memory, NuskellComplex\n'), ((507, 539), 'nuskell.dsdcompiler.compiler.translate', 'translate', (['crn', 'ts'], {'modular': '(True)'}), '(crn, ts, modular=True)\n', (516, 539), False, 'from nuskell.dsdcompiler.compiler import translate\n')] |
from utils import load_data
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
import math
import os
import time
from datetime import datetime
from py.predict import Predictor
from py.predict import ttypes
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
import torch.nn as nn
import torch.nn.functional as F
import torch
input_size = 52
hidden_size = 128
num_layers = 2
num_classes = 21
batch_size = 256
num_epochs = 2
learning_rate = 0.01
class GraphConvolution(Module):
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.gc2 = GraphConvolution(nhid, nclass)
self.dropout = dropout
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, adj)
return F.log_softmax(x, dim=1)
class PredictionHandler:
def __init__(self):
self.adj, self.features, self.labels, self.idx_train, self.idx_val, self.idx_test = load_data()
def ping(self):
print('ping()')
def pong(self, data):
print(data)
data.append(1.5)
return data
def predict(self, i):
idx_test = torch.LongTensor(self.idx_test)
print(datetime.now(), " Receive data successfully.")
model = GCN(nfeat=13845, nhid=128, nclass=5, dropout=0.5)
model.eval()
script_dir = os.path.dirname(__file__)
model.load_state_dict(torch.load(
os.path.join(script_dir, 'saved/model.pkl')))
outputs = model(self.features, self.adj)
# loss = F.nll_loss(outputs[idx_test], labels[idx_test])
_, predicted = torch.max(outputs[idx_test], 1)
confidence = []
for idx, item in enumerate(outputs[idx_test]):
confidence.append(item[predicted[idx]])
result = predicted[i]
pred = ttypes.pred()
pred.type = int(result)
pred.confidence = float(-confidence[i])
pred.timestamp = str(round(time.time() * 1000))
print(pred)
return pred
if __name__ == '__main__':
model = GCN(nfeat=13845, nhid=128, nclass=5, dropout=0.5)
script_dir = os.path.dirname(__file__)
model.load_state_dict(torch.load(
os.path.join(script_dir, 'saved/model.pkl')))
handler = PredictionHandler()
processor = Predictor.Processor(handler)
transport = TSocket.TServerSocket(host='127.0.0.1', port=9090)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
# server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)
# You could do one of these for a multithreaded server
server = TServer.TThreadedServer(
processor, transport, tfactory, pfactory)
server.serve()
| [
"thrift.server.TServer.TThreadedServer",
"py.predict.ttypes.pred",
"utils.load_data",
"torch.LongTensor",
"torch.max",
"os.path.join",
"torch.FloatTensor",
"torch.nn.functional.dropout",
"torch.mm",
"os.path.dirname",
"datetime.datetime.now",
"thrift.transport.TTransport.TBufferedTransportFact... | [((3437, 3462), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3452, 3462), False, 'import os\n'), ((3605, 3633), 'py.predict.Predictor.Processor', 'Predictor.Processor', (['handler'], {}), '(handler)\n', (3624, 3633), False, 'from py.predict import Predictor\n'), ((3650, 3700), 'thrift.transport.TSocket.TServerSocket', 'TSocket.TServerSocket', ([], {'host': '"""127.0.0.1"""', 'port': '(9090)'}), "(host='127.0.0.1', port=9090)\n", (3671, 3700), False, 'from thrift.transport import TSocket\n'), ((3716, 3754), 'thrift.transport.TTransport.TBufferedTransportFactory', 'TTransport.TBufferedTransportFactory', ([], {}), '()\n', (3752, 3754), False, 'from thrift.transport import TTransport\n'), ((3770, 3810), 'thrift.protocol.TBinaryProtocol.TBinaryProtocolFactory', 'TBinaryProtocol.TBinaryProtocolFactory', ([], {}), '()\n', (3808, 3810), False, 'from thrift.protocol import TBinaryProtocol\n'), ((3963, 4028), 'thrift.server.TServer.TThreadedServer', 'TServer.TThreadedServer', (['processor', 'transport', 'tfactory', 'pfactory'], {}), '(processor, transport, tfactory, pfactory)\n', (3986, 4028), False, 'from thrift.server import TServer\n'), ((1335, 1363), 'torch.mm', 'torch.mm', (['input', 'self.weight'], {}), '(input, self.weight)\n', (1343, 1363), False, 'import torch\n'), ((1381, 1405), 'torch.spmm', 'torch.spmm', (['adj', 'support'], {}), '(adj, support)\n', (1391, 1405), False, 'import torch\n'), ((2007, 2057), 'torch.nn.functional.dropout', 'F.dropout', (['x', 'self.dropout'], {'training': 'self.training'}), '(x, self.dropout, training=self.training)\n', (2016, 2057), True, 'import torch.nn.functional as F\n'), ((2102, 2125), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (2115, 2125), True, 'import torch.nn.functional as F\n'), ((2269, 2280), 'utils.load_data', 'load_data', ([], {}), '()\n', (2278, 2280), False, 'from utils import load_data\n'), ((2464, 2495), 'torch.LongTensor', 'torch.LongTensor', (['self.idx_test'], {}), '(self.idx_test)\n', (2480, 2495), False, 'import torch\n'), ((2666, 2691), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2681, 2691), False, 'import os\n'), ((2930, 2961), 'torch.max', 'torch.max', (['outputs[idx_test]', '(1)'], {}), '(outputs[idx_test], 1)\n', (2939, 2961), False, 'import torch\n'), ((3139, 3152), 'py.predict.ttypes.pred', 'ttypes.pred', ([], {}), '()\n', (3150, 3152), False, 'from py.predict import ttypes\n'), ((841, 885), 'torch.FloatTensor', 'torch.FloatTensor', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (858, 885), False, 'import torch\n'), ((2511, 2525), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2523, 2525), False, 'from datetime import datetime\n'), ((3509, 3552), 'os.path.join', 'os.path.join', (['script_dir', '"""saved/model.pkl"""'], {}), "(script_dir, 'saved/model.pkl')\n", (3521, 3552), False, 'import os\n'), ((938, 969), 'torch.FloatTensor', 'torch.FloatTensor', (['out_features'], {}), '(out_features)\n', (955, 969), False, 'import torch\n'), ((2746, 2789), 'os.path.join', 'os.path.join', (['script_dir', '"""saved/model.pkl"""'], {}), "(script_dir, 'saved/model.pkl')\n", (2758, 2789), False, 'import os\n'), ((3268, 3279), 'time.time', 'time.time', ([], {}), '()\n', (3277, 3279), False, 'import time\n')] |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^register/$', views.register, name='register'),
url(r'^login/$', views.user_login, name='login'),
url(r'^logout/$', views.user_logout, name='logout'),
url(r'^groups/$', views.groups, name='groups'),
url(r'^sitemanager/$', views.sitemanager, name='sitemanager'),
url(r'^(?P<user_id>[0-9]+)/user_view/$', views.user_view, name='user_view'),
url(r'^(?P<user_id>[0-9]+)/deactivate/$', views.deactivate, name='deactivate'),
url(r'^(?P<user_id>[0-9]+)/activate/$', views.activate, name='activate'),
url(r'^(?P<user_id>[0-9]+)/makeSiteManager/$', views.makeSiteManager, name='makeSiteManager'),
url(r'^(?P<user_id>[0-9]+)/unmakeSiteManager/$', views.unmakeSiteManager, name='unmakeSiteManager'),
url(r'^groups/sitemanager/$', views.groupsSM, name='groupsSM'),
]
| [
"django.conf.urls.url"
] | [((75, 126), 'django.conf.urls.url', 'url', (['"""^register/$"""', 'views.register'], {'name': '"""register"""'}), "('^register/$', views.register, name='register')\n", (78, 126), False, 'from django.conf.urls import url\n'), ((133, 180), 'django.conf.urls.url', 'url', (['"""^login/$"""', 'views.user_login'], {'name': '"""login"""'}), "('^login/$', views.user_login, name='login')\n", (136, 180), False, 'from django.conf.urls import url\n'), ((187, 237), 'django.conf.urls.url', 'url', (['"""^logout/$"""', 'views.user_logout'], {'name': '"""logout"""'}), "('^logout/$', views.user_logout, name='logout')\n", (190, 237), False, 'from django.conf.urls import url\n'), ((244, 289), 'django.conf.urls.url', 'url', (['"""^groups/$"""', 'views.groups'], {'name': '"""groups"""'}), "('^groups/$', views.groups, name='groups')\n", (247, 289), False, 'from django.conf.urls import url\n'), ((296, 356), 'django.conf.urls.url', 'url', (['"""^sitemanager/$"""', 'views.sitemanager'], {'name': '"""sitemanager"""'}), "('^sitemanager/$', views.sitemanager, name='sitemanager')\n", (299, 356), False, 'from django.conf.urls import url\n'), ((363, 437), 'django.conf.urls.url', 'url', (['"""^(?P<user_id>[0-9]+)/user_view/$"""', 'views.user_view'], {'name': '"""user_view"""'}), "('^(?P<user_id>[0-9]+)/user_view/$', views.user_view, name='user_view')\n", (366, 437), False, 'from django.conf.urls import url\n'), ((444, 521), 'django.conf.urls.url', 'url', (['"""^(?P<user_id>[0-9]+)/deactivate/$"""', 'views.deactivate'], {'name': '"""deactivate"""'}), "('^(?P<user_id>[0-9]+)/deactivate/$', views.deactivate, name='deactivate')\n", (447, 521), False, 'from django.conf.urls import url\n'), ((528, 599), 'django.conf.urls.url', 'url', (['"""^(?P<user_id>[0-9]+)/activate/$"""', 'views.activate'], {'name': '"""activate"""'}), "('^(?P<user_id>[0-9]+)/activate/$', views.activate, name='activate')\n", (531, 599), False, 'from django.conf.urls import url\n'), ((606, 703), 'django.conf.urls.url', 'url', (['"""^(?P<user_id>[0-9]+)/makeSiteManager/$"""', 'views.makeSiteManager'], {'name': '"""makeSiteManager"""'}), "('^(?P<user_id>[0-9]+)/makeSiteManager/$', views.makeSiteManager, name=\n 'makeSiteManager')\n", (609, 703), False, 'from django.conf.urls import url\n'), ((705, 807), 'django.conf.urls.url', 'url', (['"""^(?P<user_id>[0-9]+)/unmakeSiteManager/$"""', 'views.unmakeSiteManager'], {'name': '"""unmakeSiteManager"""'}), "('^(?P<user_id>[0-9]+)/unmakeSiteManager/$', views.unmakeSiteManager,\n name='unmakeSiteManager')\n", (708, 807), False, 'from django.conf.urls import url\n'), ((810, 871), 'django.conf.urls.url', 'url', (['"""^groups/sitemanager/$"""', 'views.groupsSM'], {'name': '"""groupsSM"""'}), "('^groups/sitemanager/$', views.groupsSM, name='groupsSM')\n", (813, 871), False, 'from django.conf.urls import url\n')] |
from flask import Flask
from flask import render_template
from flask import Flask, flash, request, redirect, url_for
from werkzeug.utils import secure_filename
import os
import numpy as np
import tensorflow as tf
import PIL
from tensorflow import keras
#backend instantiation
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = "static/upload_folder"
#loading ai model
model = tf.keras.models.load_model('ai/fingernail_model')
class_names = ['long', 'short']
@app.route('/')
def home(name=None):
return render_template("index.html")
@app.route("/upload", methods = ['POST'])
def upload():
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file:
filename = secure_filename(file.filename)
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(file_path)
img_array = tf.keras.preprocessing.image.load_img(file_path, target_size = (64, 64))
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions)
statement = "I am {:.2f} percent confident that your fingernails are {}".format(100 * np.max(score), class_names[np.argmax(score)])
os.remove(file_path)
return statement
if __name__ == "__main__":
app.run(debug=True)
app.run(host='0.0.0.0')
| [
"flask.render_template",
"tensorflow.keras.preprocessing.image.load_img",
"flask.flash",
"flask.Flask",
"os.path.join",
"numpy.argmax",
"numpy.max",
"flask.redirect",
"tensorflow.keras.models.load_model",
"werkzeug.utils.secure_filename",
"tensorflow.nn.softmax",
"tensorflow.expand_dims",
"o... | [((284, 299), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (289, 299), False, 'from flask import Flask, flash, request, redirect, url_for\n'), ((381, 430), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""ai/fingernail_model"""'], {}), "('ai/fingernail_model')\n", (407, 430), True, 'import tensorflow as tf\n'), ((511, 540), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (526, 540), False, 'from flask import render_template\n'), ((642, 663), 'flask.flash', 'flash', (['"""No file part"""'], {}), "('No file part')\n", (647, 663), False, 'from flask import Flask, flash, request, redirect, url_for\n'), ((679, 700), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (687, 700), False, 'from flask import Flask, flash, request, redirect, url_for\n'), ((775, 800), 'flask.flash', 'flash', (['"""No selected file"""'], {}), "('No selected file')\n", (780, 800), False, 'from flask import Flask, flash, request, redirect, url_for\n'), ((816, 837), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (824, 837), False, 'from flask import Flask, flash, request, redirect, url_for\n'), ((870, 900), 'werkzeug.utils.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (885, 900), False, 'from werkzeug.utils import secure_filename\n'), ((921, 972), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'filename'], {}), "(app.config['UPLOAD_FOLDER'], filename)\n", (933, 972), False, 'import os\n'), ((1022, 1092), 'tensorflow.keras.preprocessing.image.load_img', 'tf.keras.preprocessing.image.load_img', (['file_path'], {'target_size': '(64, 64)'}), '(file_path, target_size=(64, 64))\n', (1059, 1092), True, 'import tensorflow as tf\n'), ((1115, 1143), 'tensorflow.expand_dims', 'tf.expand_dims', (['img_array', '(0)'], {}), '(img_array, 0)\n', (1129, 1143), True, 'import tensorflow as tf\n'), ((1208, 1234), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['predictions'], {}), '(predictions)\n', (1221, 1234), True, 'import tensorflow as tf\n'), ((1383, 1403), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (1392, 1403), False, 'import os\n'), ((1329, 1342), 'numpy.max', 'np.max', (['score'], {}), '(score)\n', (1335, 1342), True, 'import numpy as np\n'), ((1356, 1372), 'numpy.argmax', 'np.argmax', (['score'], {}), '(score)\n', (1365, 1372), True, 'import numpy as np\n')] |
import math
import os.path
from dataclasses import dataclass
from itertools import combinations
@dataclass
class Moon:
x: int
y: int
z: int
dx: int = 0
dy: int = 0
dz: int = 0
def _parse_moons(lines):
moons = []
for line in lines:
parts = line.replace('<', '').replace('>', '').split(',')
x = int(parts[0].replace('x=', ''))
y = int(parts[1].replace('y=', ''))
z = int(parts[2].replace('z=', ''))
moons.append(Moon(x, y, z))
return moons
def _read_input():
with open(os.path.basename(__file__).replace('.py', '.txt')) as f:
return _parse_moons(f.readlines())
def _apply_gravity(m1: Moon, m2: Moon):
if m1.x > m2.x:
m1.dx -= 1
m2.dx += 1
elif m1.x < m2.x:
m1.dx += 1
m2.dx -= 1
if m1.y > m2.y:
m1.dy -= 1
m2.dy += 1
elif m1.y < m2.y:
m1.dy += 1
m2.dy -= 1
if m1.z > m2.z:
m1.dz -= 1
m2.dz += 1
elif m1.z < m2.z:
m1.dz += 1
m2.dz -= 1
def _potential_energy(moons):
return [abs(m.x) + abs(m.y) + abs(m.z) for m in moons]
def _kinetic_energy(moons):
return [abs(m.dx) + abs(m.dy) + abs(m.dz) for m in moons]
def _time_step(moons, pairs):
for m1, m2 in pairs:
_apply_gravity(m1, m2)
for m in moons:
m.x += m.dx
m.y += m.dy
m.z += m.dz
def part1(moons, n=1000):
pairs = list(combinations(moons, 2))
for _ in range(n):
_time_step(moons, pairs)
return sum([
p * k for p, k in zip(_potential_energy(moons), _kinetic_energy(moons))
])
def _lcm(a, b):
return abs(a * b) // math.gcd(a, b)
def part2(moons):
pairs = list(combinations(moons, 2))
xs = set()
ys = set()
zs = set()
found_x = False
found_y = False
found_z = False
while True:
x_state = tuple((m.x, m.dx) for m in moons)
y_state = tuple((m.y, m.dy) for m in moons)
z_state = tuple((m.z, m.dz) for m in moons)
if x_state in xs:
found_x = True
else:
xs.add(x_state)
if y_state in ys:
found_y = True
else:
ys.add(y_state)
if z_state in zs:
found_z = True
else:
zs.add(z_state)
if found_x and found_y and found_z:
break
_time_step(moons, pairs)
return _lcm(len(xs), _lcm(len(ys), len(zs)))
if __name__ == '__main__':
moons = _read_input()
print(part1(moons))
moons = _read_input()
print(part2(moons))
############
# Tests
example_1 = '''\
<x=-1, y=0, z=2>
<x=2, y=-10, z=-7>
<x=4, y=-8, z=8>
<x=3, y=5, z=-1>
'''.splitlines()
example_2 = '''\
<x=-8, y=-10, z=0>
<x=5, y=5, z=10>
<x=2, y=-7, z=3>
<x=9, y=-8, z=-3>
'''.splitlines()
def test_apply_gravity():
m1 = Moon(3, 0, 4)
m2 = Moon(5, 0, 3)
_apply_gravity(m1, m2)
assert m1.dx == 1
assert m2.dx == -1
assert m1.dy == 0
assert m2.dy == 0
assert m1.dz == -1
assert m2.dz == 1
def test_example_1():
assert part1(_parse_moons(example_1), n=10) == 179
assert part2(_parse_moons(example_1)) == 2772
def test_example_2():
assert part1(_parse_moons(example_2), n=100) == 1940
assert part2(_parse_moons(example_2)) == 4686774924
def test_solutions():
moons = _read_input()
assert part1(moons) == 10635
moons = _read_input()
assert part2(moons) == 583523031727256
| [
"itertools.combinations",
"math.gcd"
] | [((1448, 1470), 'itertools.combinations', 'combinations', (['moons', '(2)'], {}), '(moons, 2)\n', (1460, 1470), False, 'from itertools import combinations\n'), ((1676, 1690), 'math.gcd', 'math.gcd', (['a', 'b'], {}), '(a, b)\n', (1684, 1690), False, 'import math\n'), ((1728, 1750), 'itertools.combinations', 'combinations', (['moons', '(2)'], {}), '(moons, 2)\n', (1740, 1750), False, 'from itertools import combinations\n')] |
from django.contrib.sitemaps import Sitemap
from django.shortcuts import reverse
class StaticViewSitemap(Sitemap):
changefreq = "weekly"
priority = 0.5
def items(self):
return ['team_app:index']
def location(self, item):
return reverse(item)
| [
"django.shortcuts.reverse"
] | [((266, 279), 'django.shortcuts.reverse', 'reverse', (['item'], {}), '(item)\n', (273, 279), False, 'from django.shortcuts import reverse\n')] |
import asyncio as aio
import argparse as ap
import sys
import os
from pathlib import Path
from typing import Optional
from .cfg import get_cfg
from .event import ConnectionMode, DaemonEvent
def get_args() -> ap.Namespace:
parser = ap.ArgumentParser(description="Send commands to a wcd instance, through a unix socket.")
parser.add_argument("command", choices=[e.name.lower() for e in DaemonEvent],
help="A command to be sent to the wcd server")
parser.add_argument("-s", "--socket", default=None, type=Path,
help="The path to the unix socket over which to communicate with the server. If this option"
" is not supplied, wcc will look for this path inside wcd's config file.")
return parser.parse_args()
# tbf this didnt need to be async, but ive never used synchronous unix sockets
async def wcc(command: str, socket: Optional[Path]) -> None:
if "TMPDIR" not in os.environ:
os.environ["TMPDIR"] = "/tmp"
socket_path = socket or os.path.expandvars(get_cfg()["socket_path"])
try:
r, w = await aio.open_unix_connection(socket_path)
except ConnectionRefusedError:
print(f"Couldn't connect to a wcd over '{socket_path}'")
return
w.write(ConnectionMode.ONE_SHOT.to_bytes(4, byteorder="big"))
w.write(DaemonEvent[command.upper()].to_bytes(4, byteorder="big"))
await w.drain()
print((await r.read()).decode("utf-8"))
def main() -> None:
aio.run(wcc(**vars(get_args())))
if __name__ == "__main__":
main()
| [
"asyncio.open_unix_connection",
"argparse.ArgumentParser"
] | [((240, 333), 'argparse.ArgumentParser', 'ap.ArgumentParser', ([], {'description': '"""Send commands to a wcd instance, through a unix socket."""'}), "(description=\n 'Send commands to a wcd instance, through a unix socket.')\n", (257, 333), True, 'import argparse as ap\n'), ((1071, 1108), 'asyncio.open_unix_connection', 'aio.open_unix_connection', (['socket_path'], {}), '(socket_path)\n', (1095, 1108), True, 'import asyncio as aio\n')] |
import pygame
from pygame.locals import *
from paddle import Paddle
from ball import Ball
from inputs import handle_events, handle_input
from constants import SCREEN_WIDTH, SCREEN_HEIGHT, WHITE, RED
ball = None
left_paddle = None
right_paddle = None
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption("Python PONG")
clock = pygame.time.Clock()
done = [False]
is_game_over = [False]
def setup_game():
global ball
global left_paddle
global right_paddle
ball = Ball((SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2))
left_paddle = Paddle()
right_paddle = Paddle()
right_paddle.rect.x = SCREEN_WIDTH - right_paddle.rect.width
def draw_game_over():
font = pygame.font.Font("freesansbold.ttf", 32)
game_over = font.render("GAME OVER", True, RED)
game_over_rect = game_over.get_rect()
game_over_rect.center = (SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2)
screen.blit(game_over, game_over_rect)
def draw_game():
left_paddle.draw(screen)
right_paddle.draw(screen)
ball.draw(screen)
def draw():
screen.fill(WHITE)
if is_game_over[0]:
draw_game_over()
else:
draw_game()
pygame.display.flip()
def update():
handle_events(done)
if not is_game_over[0]:
handle_input(left_paddle, right_paddle)
ball.update(left_paddle, right_paddle, is_game_over)
setup_game()
while not done[0]:
clock.tick(30)
update()
draw()
pygame.quit()
| [
"pygame.display.set_caption",
"pygame.quit",
"pygame.init",
"pygame.display.set_mode",
"pygame.display.flip",
"inputs.handle_events",
"ball.Ball",
"inputs.handle_input",
"pygame.time.Clock",
"pygame.font.Font",
"paddle.Paddle"
] | [((252, 265), 'pygame.init', 'pygame.init', ([], {}), '()\n', (263, 265), False, 'import pygame\n'), ((275, 329), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(SCREEN_WIDTH, SCREEN_HEIGHT)'], {}), '((SCREEN_WIDTH, SCREEN_HEIGHT))\n', (298, 329), False, 'import pygame\n'), ((330, 371), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Python PONG"""'], {}), "('Python PONG')\n", (356, 371), False, 'import pygame\n'), ((381, 400), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (398, 400), False, 'import pygame\n'), ((1482, 1495), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1493, 1495), False, 'import pygame\n'), ((535, 580), 'ball.Ball', 'Ball', (['(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2)'], {}), '((SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2))\n', (539, 580), False, 'from ball import Ball\n'), ((599, 607), 'paddle.Paddle', 'Paddle', ([], {}), '()\n', (605, 607), False, 'from paddle import Paddle\n'), ((627, 635), 'paddle.Paddle', 'Paddle', ([], {}), '()\n', (633, 635), False, 'from paddle import Paddle\n'), ((736, 776), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(32)'], {}), "('freesansbold.ttf', 32)\n", (752, 776), False, 'import pygame\n'), ((1204, 1225), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (1223, 1225), False, 'import pygame\n'), ((1246, 1265), 'inputs.handle_events', 'handle_events', (['done'], {}), '(done)\n', (1259, 1265), False, 'from inputs import handle_events, handle_input\n'), ((1302, 1341), 'inputs.handle_input', 'handle_input', (['left_paddle', 'right_paddle'], {}), '(left_paddle, right_paddle)\n', (1314, 1341), False, 'from inputs import handle_events, handle_input\n')] |
import logging
import logging.config
import logging.handlers
import os
import sys
from oslo_config import cfg
import six
from six import moves
import tvrenamer
from tvrenamer import options
from tvrenamer import services
logging.getLogger().addHandler(logging.NullHandler())
DEFAULT_LIBRARY_LOG_LEVEL = {'stevedore': logging.WARNING,
'requests': logging.WARNING,
'tvdbapi_client': logging.WARNING,
'trakt': logging.WARNING,
}
CONSOLE_MESSAGE_FORMAT = '%(message)s'
LOG_FILE_MESSAGE_FORMAT = '[%(asctime)s] %(levelname)-8s %(name)s %(message)s'
def _setup_logging():
root_logger = logging.getLogger()
root_logger.setLevel(cfg.CONF.loglevel.upper())
# Set up logging to a file
if cfg.CONF.logfile:
file_handler = logging.handlers.RotatingFileHandler(
filename=cfg.CONF.logfile, maxBytes=1000 * 1024, backupCount=9)
formatter = logging.Formatter(LOG_FILE_MESSAGE_FORMAT)
file_handler.setFormatter(formatter)
root_logger.addHandler(file_handler)
if cfg.CONF.console_output_enabled:
# Always send higher-level messages to the console via stderr
console = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter(CONSOLE_MESSAGE_FORMAT)
console.setFormatter(formatter)
root_logger.addHandler(console)
# shut off logging from 3rd party frameworks
for xlib, xlevel in six.iteritems(DEFAULT_LIBRARY_LOG_LEVEL):
xlogger = logging.getLogger(xlib)
xlogger.setLevel(xlevel)
def _configure(args):
config_files = []
virtual_path = os.getenv('VIRTUAL_ENV')
cfg_file = '{0}.conf'.format(tvrenamer.PROJECT_NAME)
# if virtualenv is active; then leverage <virtualenv>/etc
# and <virtualenv>/etc/<project>
if virtual_path:
config_files.append(os.path.join(virtual_path, 'etc', cfg_file))
config_files.append(os.path.join(virtual_path, 'etc',
tvrenamer.PROJECT_NAME, cfg_file))
config_files.extend(
cfg.find_config_files(project=tvrenamer.PROJECT_NAME))
cfg.CONF(args,
project=tvrenamer.PROJECT_NAME,
version=tvrenamer.__version__,
default_config_files=list(moves.filter(os.path.isfile,
config_files)))
# if no config_dir was provided then we will set it to the
# path of the most specific config file found.
if not cfg.CONF.config_dir and cfg.CONF.config_file:
cfg.CONF.set_default('config_dir',
os.path.dirname(cfg.CONF.config_file[-1]))
def prepare_service(args=None):
"""Configures application and setups logging."""
options.register_opts(cfg.CONF)
services.load_service_opts(cfg.CONF)
_configure(args)
_setup_logging()
cfg.CONF.log_opt_values(logging.getLogger(), logging.DEBUG)
| [
"logging.NullHandler",
"logging.getLogger",
"tvrenamer.services.load_service_opts",
"logging.StreamHandler",
"tvrenamer.options.register_opts",
"os.getenv",
"logging.Formatter",
"logging.handlers.RotatingFileHandler",
"oslo_config.cfg.find_config_files",
"os.path.join",
"oslo_config.cfg.CONF.log... | [((255, 276), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (274, 276), False, 'import logging\n'), ((707, 726), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (724, 726), False, 'import logging\n'), ((1505, 1545), 'six.iteritems', 'six.iteritems', (['DEFAULT_LIBRARY_LOG_LEVEL'], {}), '(DEFAULT_LIBRARY_LOG_LEVEL)\n', (1518, 1545), False, 'import six\n'), ((1688, 1712), 'os.getenv', 'os.getenv', (['"""VIRTUAL_ENV"""'], {}), "('VIRTUAL_ENV')\n", (1697, 1712), False, 'import os\n'), ((2814, 2845), 'tvrenamer.options.register_opts', 'options.register_opts', (['cfg.CONF'], {}), '(cfg.CONF)\n', (2835, 2845), False, 'from tvrenamer import options\n'), ((2850, 2886), 'tvrenamer.services.load_service_opts', 'services.load_service_opts', (['cfg.CONF'], {}), '(cfg.CONF)\n', (2876, 2886), False, 'from tvrenamer import services\n'), ((224, 243), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (241, 243), False, 'import logging\n'), ((752, 777), 'oslo_config.cfg.CONF.loglevel.upper', 'cfg.CONF.loglevel.upper', ([], {}), '()\n', (775, 777), False, 'from oslo_config import cfg\n'), ((859, 964), 'logging.handlers.RotatingFileHandler', 'logging.handlers.RotatingFileHandler', ([], {'filename': 'cfg.CONF.logfile', 'maxBytes': '(1000 * 1024)', 'backupCount': '(9)'}), '(filename=cfg.CONF.logfile, maxBytes=\n 1000 * 1024, backupCount=9)\n', (895, 964), False, 'import logging\n'), ((993, 1035), 'logging.Formatter', 'logging.Formatter', (['LOG_FILE_MESSAGE_FORMAT'], {}), '(LOG_FILE_MESSAGE_FORMAT)\n', (1010, 1035), False, 'import logging\n'), ((1255, 1288), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stderr'], {}), '(sys.stderr)\n', (1276, 1288), False, 'import logging\n'), ((1309, 1350), 'logging.Formatter', 'logging.Formatter', (['CONSOLE_MESSAGE_FORMAT'], {}), '(CONSOLE_MESSAGE_FORMAT)\n', (1326, 1350), False, 'import logging\n'), ((1565, 1588), 'logging.getLogger', 'logging.getLogger', (['xlib'], {}), '(xlib)\n', (1582, 1588), False, 'import logging\n'), ((2136, 2189), 'oslo_config.cfg.find_config_files', 'cfg.find_config_files', ([], {'project': 'tvrenamer.PROJECT_NAME'}), '(project=tvrenamer.PROJECT_NAME)\n', (2157, 2189), False, 'from oslo_config import cfg\n'), ((2958, 2977), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2975, 2977), False, 'import logging\n'), ((1919, 1962), 'os.path.join', 'os.path.join', (['virtual_path', '"""etc"""', 'cfg_file'], {}), "(virtual_path, 'etc', cfg_file)\n", (1931, 1962), False, 'import os\n'), ((1992, 2059), 'os.path.join', 'os.path.join', (['virtual_path', '"""etc"""', 'tvrenamer.PROJECT_NAME', 'cfg_file'], {}), "(virtual_path, 'etc', tvrenamer.PROJECT_NAME, cfg_file)\n", (2004, 2059), False, 'import os\n'), ((2680, 2721), 'os.path.dirname', 'os.path.dirname', (['cfg.CONF.config_file[-1]'], {}), '(cfg.CONF.config_file[-1])\n', (2695, 2721), False, 'import os\n'), ((2339, 2381), 'six.moves.filter', 'moves.filter', (['os.path.isfile', 'config_files'], {}), '(os.path.isfile, config_files)\n', (2351, 2381), False, 'from six import moves\n')] |
# Generated by Django 3.0.3 on 2020-08-04 22:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ressources', '0004_auto_20200731_1630'),
]
operations = [
migrations.AlterModelOptions(
name='meetingtime',
options={'ordering': ('day',)},
),
]
| [
"django.db.migrations.AlterModelOptions"
] | [((230, 315), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""meetingtime"""', 'options': "{'ordering': ('day',)}"}), "(name='meetingtime', options={'ordering': ('day',)}\n )\n", (258, 315), False, 'from django.db import migrations\n')] |
import discord
from Script.clash_info import Bot
from Script.Const_variables.import_const import Login
intents = discord.Intents.default()
intents.members = True
Clash_info = Bot(intents=intents)
main_bot = 1
if main_bot:
Token = Login["discord"]["token"]
Clash_info.default_prefix = "/"
Clash_info.id = 704688212832026724
else:
Token = Login["discord"]["beta"]
Clash_info.default_prefix = ","
Clash_info.id = 710119855348645888
| [
"Script.clash_info.Bot",
"discord.Intents.default"
] | [((115, 140), 'discord.Intents.default', 'discord.Intents.default', ([], {}), '()\n', (138, 140), False, 'import discord\n'), ((177, 197), 'Script.clash_info.Bot', 'Bot', ([], {'intents': 'intents'}), '(intents=intents)\n', (180, 197), False, 'from Script.clash_info import Bot\n')] |
import torch
import torch.nn.functional as F
from utils_shapley import *
class eval_Syn0():
def __init__(self,c=5, **kwargs):
self.c = c
self.j = None
self.i = None
def init_baseline(self,x=np.ones((1,3)), c = 5,j = None, i = None, fixed_present = True, baseline_value = 0, **kwargs):
self.x_baseline = x
self.j = j
self.i = i
self.fixed_present = fixed_present
self.baseline_value = baseline_value # if baseline is not zero
def __call__(self, x, **kwargs):
# Shapley Excess--------------------------------------
# feature i and j are assumed to be in the same coalition, therefore j is present if i is present
if self.i is not None:
j_indicator = (self.x_baseline[:, self.i] == x[:,self.i]).reshape(-1,1)*1 # 1 if j should be present, 0 if j should be absent
j_present = self.x_baseline[:, self.j].repeat(x.shape[0], axis = 0).reshape(-1, 1)
j_absent = (np.zeros((x.shape[0], self.x_baseline.shape[1])) + self.baseline_value)[:,self.j].reshape(-1,1)
j_vector = j_indicator * j_present + (1-j_indicator) * j_absent
x = np_insert(x, j_vector, index = self.j) #
#-----------------------------------------------------
# Interaction Shapley---------------------------------
if (self.j is not None) and (self.i is None): #
if self.fixed_present: #
j_vector = self.x_baseline[:, self.j].repeat(x.shape[0], axis = 0).reshape(x.shape[0], -1)
x = np_insert(x, j_vector, index = self.j) #
else: #
j_vector = (np.zeros((x.shape[0], self.x_baseline.shape[1])) + self.baseline_value)[:,self.j].reshape(-1,1)
x = np_insert(x, j_vector, index = self.j) #
#-----------------------------------------------------
#return x.sum(axis = 1) + self.c * x.prod(axis = 1)
#return 0.5*(x[:,0] + x[:,1])*(1-x[:,2]) + x[:,2]
#return (((x == 1).max(axis = 1)*1 + (x == 0).max(axis = 1)*1 == 2)*1).reshape(-1)
return 5*x[:,0] + x[:,1] + x[:,0] * x[:,1] - 8*x[:,3] - 2*x[:,3] * x[:,3]
class eval_glove():
def __init__(self, **kwargs):
pass
def init_baseline(self, **kwargs):
pass
def __call__(self, x, **kwargs):
n_0 = len(np.where(x == 0)[1]) # number of Left Gloves
n_1 = len(np.where(x == 1)[1]) # number of Right Gloves
if n_0 > 0 and n_1 > 0:
return 1
else:
return 0
class eval_MLP():
def __init__(self, model, binary = True, reshape = False):
self.model = model
self.model.eval()
self.baseline = None
self.binary = binary
self.j = None
self.i = None
self.baseline_value = 0
self.reshape = reshape
def init_baseline(self, x, j = None, i = None, fixed_present = True, baseline_value = 0, **kwargs):
'''
set baseline prediction for original non-perturbed x value
args:
x: single sample. numpy array. 1 x d
'''
_, self.d = x.shape
if self.binary:
self.baseline = torch.sigmoid(self.model(numpy2cuda(x)))
else:
self.baseline = self.model(numpy2cuda(x)).argmax(dim = 1)
self.x_baseline = x
self.j = j
self.i = i
self.fixed_present = fixed_present
self.baseline_value = baseline_value # if baseline is not zero
def forward(self, x):
'''
forward pass of model, returns predictions.
args:
data: list of np arrays (note, this is different __call__)
return:
P(y|x) and predictions
'''
x = numpy2cuda(x)
x = x.type(dtype = torch.float32)
with torch.no_grad():
output = self.model(x)
if self.binary:
pred = output >= 0.0
else:
pred = output.max(1, keepdim=True)[1] # Calculate Predictions
return tensor2numpy(output), tensor2numpy(pred)
def eval_accy(self, x, label):
'''
given samples and labels, calculate accuracy
args:
x: np matrix
label: np array of labels
'''
_, pred = self.forward(x)
pred = numpy2cuda(pred)
label = numpy2cuda(label)
if self.binary:
truth = label >= 0.5
accy = pred.eq(truth).sum().item()
else:
accy = pred.eq(label.view_as(pred)).sum().item()
return accy/len(label)
def __call__(self, x, **kwargs):
'''
Note: The input to Shapley Function will be flattened. Therefore, it may be necessary to reshape x prior to a forward pass.
'''
if self.baseline is None: raise Exception('Need to first initialize baseline in evaluation function!')
# Shapley Excess--------------------------------------
# feature i and j are assumed to be in the same coalition, therefore j is present if i is present
if self.i is not None:
j_indicator = (self.x_baseline[:, self.i] == x[:,self.i]).reshape(-1,1)*1 # 1 if j should be present, 0 if j should be absent
j_present = self.x_baseline[:, self.j].repeat(x.shape[0], axis = 0).reshape(-1, 1)
j_absent = (np.zeros((x.shape[0], self.x_baseline.shape[1])) + self.baseline_value)[:,self.j].reshape(-1,1)
j_vector = j_indicator * j_present + (1-j_indicator) * j_absent
x = np_insert(x, j_vector, index = self.j) #
#-----------------------------------------------------
# Interaction Shapley---------------------------------
if (self.j is not None) and (self.i is None): #
if self.fixed_present: #
j_vector = self.x_baseline[:, self.j].repeat(x.shape[0], axis = 0).reshape(x.shape[0], -1)
x = np_insert(x, j_vector, index = self.j) #
else: #
j_vector = (np.zeros((x.shape[0], self.x_baseline.shape[1])) + self.baseline_value)[:,self.j].reshape(-1,1)
x = np_insert(x, j_vector, index = self.j) #
#-----------------------------------------------------
with torch.no_grad():
x = numpy2cuda(x).type(dtype=torch.float32)
pred = self.model(x)
if self.binary:
pred = torch.sigmoid(pred)
if self.reshape:
output = tensor2numpy(pred).reshape(-1, 1)
output = np.concatenate((np.ones_like(output) - output, output), axis = 1)
return output
if self.baseline < 0.5: pred = 1-pred
else:
pred = torch.exp(-F.cross_entropy(pred, self.baseline.expand(pred.shape[0]), reduction = 'none'))
return tensor2numpy(pred)
class eval_nlp_binary_rnn():
'''
note: this is for a rnn that requires length parameter
'''
def __init__(self, model, reshape = False, **kwargs):
self.model = model
self.model.eval()
self.baseline = None
self.j = None
self.i = None
self.reshape = reshape
def init_baseline(self, x, j = None, i=None, fixed_present = True, baseline_value = 0, **kwargs):
'''
set baseline prediction for original non-perturbed x value
'''
self.x_baseline = x
self.j = j
self.i = i
self.fixed_present = fixed_present
self.x_lens = torch.tensor(x.shape[1], dtype=torch.int64).reshape(1)
self.baseline = torch.sigmoid(self.model(numpy2cuda(x), self.x_lens))
self.baseline_value = baseline_value # if baseline is not zero
def forward(self, data, data_lens = None):
'''
forward pass of model, returns predictions.
args:
data: list of np arrays (note, this is different __call__)
return:
P(y|x) and predictions
'''
if type(data) == list:
x = []
x_len = []
for tkn_list in data:
if len(tkn_list[0, :]) < 400:
tmp = np.concatenate((tkn_list[0, :], np.zeros(400 - len(tkn_list[0, :]), dtype = 'int')), axis = 0)
tmp_len = len(tkn_list[0, :])
else:
tmp = tkn_list[0,:400]
tmp_len = 400
x.append(tmp)
x_len.append(tmp_len)
x = np.array(x, dtype = 'int')
x = numpy2cuda(x)
x_len = list2cuda(x_len)
elif type(data) == np.ndarray:
x = numpy2cuda(data.astype('intc'))
if data_lens is None:
x_len = tensor2cuda(torch.zeros(x.shape[0], dtype = torch.int32)+400)
else:
x_len = numpy2cuda(data_lens)
elif type(data) == torch.Tensor:
x = tensor2cuda(data)
if data_lens is None:
x_len = tensor2cuda(torch.zeros(x.shape[0], dtype = torch.int32)+400)
else:
x_len = tensor2cuda(data_lens)
with torch.no_grad():
output = self.model(x, x_len)
pred = output >= 0.0
return tensor2numpy(torch.sigmoid(output)), tensor2numpy(pred)
def eval_accy(self, data, label, data_lens = None):
'''
given samples and labels, calculate accuracy
args:
data: list of np arrays (note, this is different __call__)
label: np array of labels
'''
_, pred = self.forward(data, data_lens)
pred = numpy2cuda(pred)
label = numpy2cuda(label)
truth = label >= 0.5
accy = pred.eq(truth).sum().item()
return accy / len(label)
def __call__(self, x, **kwargs):
if self.baseline is None: raise Exception('Need to first initialize baseline in evaluation function!')
with torch.no_grad():
# Shapley Excess--------------------------------------
# feature i and j are assumed to be in the same coalition, therefore j is present if i is present
if self.i is not None:
j_indicator = (self.x_baseline[:, self.i] == x[:,self.i]).reshape(-1,1)*1 # 1 if j should be present, 0 if j should be absent
j_present = self.x_baseline[:, self.j].repeat(x.shape[0], axis = 0).reshape(-1, 1)
j_absent = (np.zeros((x.shape[0], self.x_baseline.shape[1])) + self.baseline_value)[:,self.j].reshape(-1,1)
j_vector = j_indicator * j_present + (1-j_indicator) * j_absent
x = np_insert(x, j_vector, index = self.j) #
#-----------------------------------------------------
# Interaction Shapley---------------------------------
if (self.j is not None) and (self.i is None): #
if self.fixed_present: #
j_vector = self.x_baseline[:, self.j].repeat(x.shape[0], axis = 0).reshape(x.shape[0], -1)
x = np_insert(x, j_vector, index = self.j) #
else: #
j_vector = np.zeros((x.shape[0], 1)) #
x = np_insert(x, j_vector, index = self.j) #
#-----------------------------------------------------
x = numpy2cuda(x.astype('int'))
#X_lens = torch.tensor(X.shape[1], dtype=torch.int64).reshape(1)
pred = self.model(x, self.x_lens.expand(x.shape[0]))
pred = torch.sigmoid(pred)
if self.reshape:
output = tensor2numpy(pred).reshape(-1, 1)
output = np.concatenate((np.ones_like(output) - output, output), axis = 1)
return output
if self.baseline < 0.5: pred = 1-pred
return pred.cpu().detach().numpy()
class eval_nlp_binary_cnn():
##### NEEDS TO BE UPDATED ####
def __init__(self, model, max_length = 400, **kwargs):
self.model = model
self.model.eval()
self.max_length = max_length
self.baseline = None
self.j = None
self.i = None
def init_baseline(self, x, j = None, fixed_present = True, baseline_value = 0, **kwargs):
'''
set baseline prediction for original non-perturbed x value
'''
self.x_baseline = x
self.j = j
self.fixed_present = fixed_present
self.baseline = torch.sigmoid(self.model(numpy2cuda(x)))
self.baseline_value = baseline_value # if baseline is not zero
def forward(self, data):
'''
forward pass of model, returns predictions.
args:
data: list of np arrays (note, this is different __call__)
return:
P(y|x) and predictions
'''
x = []
x_len = []
for tkn_list in data:
if len(tkn_list[0, :]) < 400:
tmp = np.concatenate((tkn_list[0, :], np.zeros(400 - len(tkn_list[0, :]), dtype = 'int')), axis = 0)
tmp_len = len(tkn_list[0, :])
else:
tmp = tkn_list[0,:400]
tmp_len = 400
x.append(tmp)
x_len.append(tmp_len)
x = np.array(x, dtype = 'int')
x = numpy2cuda(x)
x = x.long()
with torch.no_grad():
output = self.model(x)
pred = (output >= 0.0)*1
return tensor2numpy(torch.sigmoid(output)), tensor2numpy(pred)
def eval_accy(self, data, label):
'''
given samples and labels, calculate accuracy
args:
data: list of np arrays (note, this is different __call__)
label: np array of labels
'''
_, pred = self.forward(data)
pred = numpy2cuda(pred)
label = numpy2cuda(label)
truth = label >= 0.5
accy = pred.eq(truth).sum().item()
return accy / len(label)
def __call__(self, x, **kwargs):
if self.baseline is None: raise Exception('Need to first initialize baseline in evaluation function!')
with torch.no_grad():
# Interaction Shapley---------------------------------
if self.j is not None: #
if self.fixed_present: #
j_vector = self.x_baseline[:, self.j].repeat(x.shape[0], axis = 0).reshape(x.shape[0], -1)
x = np_insert(x, j_vector, index = self.j) #
else: #
j_vector = (np.zeros((x.shape[0], self.x_baseline.shape[1])) + self.baseline_value)[:,self.j]
x = np_insert(x, j_vector, index = self.j) #
#-----------------------------------------------------
x = numpy2cuda(x.astype('int'))
pred = self.model(x)
pred = torch.sigmoid(pred)
if self.baseline < 0.5: pred = 1-pred
return tensor2numpy(pred)
class eval_image():
def __init__(self, model, binary = True, reshape = False):
self.model = model
self.model.eval()
self.baseline = None
self.binary = binary
self.reshape = reshape
def init_baseline(self, x, **kwargs):
'''
set baseline prediction for original non-perturbed x value
args:
x: single sample. numpy array. 1 x c x h x w
'''
x = numpy2cuda(x)
_, self.c, self.h, self.w = x.shape
if self.binary:
self.baseline = torch.sigmoid(self.model(x))
else:
self.baseline = self.model(x).argmax(dim = 1)
def forward(self, x):
'''
forward pass of model, returns predictions.
args:
data: list of np arrays (note, this is different __call__)
return:
P(y|x) and predictions
'''
x = numpy2cuda(x)
with torch.no_grad():
output = self.model(x)
if self.binary:
pred = output >= 0.0
else:
pred = output.max(1, keepdim=True)[1] # Calculate Predictions
return tensor2numpy(output), tensor2numpy(pred)
def eval_accy(self, x, label):
'''
given samples and labels, calculate accuracy
args:
x: np matrix
label: np array of labels
'''
_, pred = self.forward(x)
pred = numpy2cuda(pred)
label = numpy2cuda(label)
if self.binary:
truth = label >= 0.5
accy = pred.eq(truth).sum().item()
else:
accy = pred.eq(label.view_as(pred)).sum().item()
return accy/len(label)
def __call__(self, x, **kwargs):
'''
Note: The input to Shapley Function will be flattened. Therefore, it may be necessary to reshape x prior to a forward pass.
'''
if self.baseline is None: raise Exception('Need to first initialize baseline in evaluation function!')
with torch.no_grad():
x = numpy2cuda(x)
x = x.reshape(-1, self.c, self.h, self.w).type(dtype=torch.float32)
pred = self.model(x)
if self.reshape:
pred = tensor2numpy(pred)
return pred
if self.binary:
pred = torch.sigmoid(pred)
if self.baseline < 0.5: pred = 1-pred
else:
pred = torch.exp(-F.cross_entropy(pred, self.baseline.expand(pred.shape[0]), reduction = 'none'))
return pred.cpu().detach().numpy()
class eval_image_superpixel():
def __init__(self, model, binary = True):
self.model = model
self.model.eval()
self.baseline = None
self.binary = binary
def init_baseline(self, x, num_superpixels, sp_mapping, baseline_value = 0, **kwargs):
'''
set baseline prediction for original non-perturbed x value
args:
x: single sample. numpy array. 1 x c x h x w
sp_mapping: superpixel to pixel decoder function
'''
x = numpy2cuda(x)
_, self.c, self.h, self.w = x.shape
self.x_baseline = x
# Superpixel mapping
self.sp_mapping = sp_mapping
# Calculate superpixel map for current sample
_, self.segment_mask = self.sp_mapping(torch.ones((1, num_superpixels)), x_orig = x)
if self.binary:
self.baseline = torch.sigmoid(self.model(x))
else:
self.baseline = self.model(x).argmax(dim = 1)
self.baseline_value = baseline_value # if baseline is not zero
def __call__(self, x, w, **kwargs):
'''
args:
x: superpixel indicator: numpy array
w: baseline value to set for "null" pixels.
'''
if self.baseline is None: raise Exception('Need to first initialize baseline in evaluation function!')
w = numpy2cuda(w)
if len(w[0, ...]) == len(x[0, ...]):
# zero baseline
w = torch.zeros((x.shape[0], self.c, self.h, self.w))
w = tensor2cuda(w)
else:
# mean baseline
w = w.reshape(-1, self.c, self.h, self.w)
with torch.no_grad():
x = numpy2cuda(x)
mask, _ = self.sp_mapping(x, x_orig = self.x_baseline, segment_mask = self.segment_mask)
mask = tensor2cuda(mask)
x = torch.mul(mask, self.x_baseline) + torch.mul(1-mask, w)
pred = self.model(x)
if self.binary:
pred = torch.sigmoid(pred)
if self.baseline < 0.5: pred = 1-pred
else:
pred = torch.exp(-F.cross_entropy(pred, self.baseline.expand(pred.shape[0]), reduction = 'none'))
return pred.cpu().detach().numpy()
import xgboost as xgb
from sklearn import metrics
class eval_XGB():
def __init__(self, model, reshape = False):
self.model = model
self.j = None
self.i = None
self.reshape = reshape
def init_baseline(self, x, j = None, i=None, fixed_present = True, baseline_value = 0, **kwargs):
'''
set baseline prediction for original non-perturbed x value
args:
x: single sample. numpy array. 1 x d
'''
_, self.d = x.shape
self.x_baseline = x
self.j = j
self.i = i
self.fixed_present = fixed_present
self.baseline = self.model.predict(xgb.DMatrix(x))
self.baseline_value = baseline_value # if baseline is not zero
def forward(self, x):
'''
forward pass of model, returns predictions.
args:
data: list of np arrays (note, this is different __call__)
return:
P(y|x) and predictions
'''
# Shapley Excess--------------------------------------
# feature i and j are assumed to be in the same coalition, therefore j is present if i is present
if self.i is not None:
j_indicator = (self.x_baseline[:, self.i] == x[:,self.i]).reshape(-1,1)*1 # 1 if j should be present, 0 if j should be absent
j_present = self.x_baseline[:, self.j].repeat(x.shape[0], axis = 0).reshape(-1, 1)
j_absent = (np.zeros((x.shape[0], self.x_baseline.shape[1])) + self.baseline_value)[:,self.j].reshape(-1,1)
j_vector = j_indicator * j_present + (1-j_indicator) * j_absent
x = np_insert(x, j_vector, index = self.j) #
#-----------------------------------------------------
# Interaction Shapley---------------------------------
if (self.j is not None) and (self.i is None): #
if self.fixed_present: #
j_vector = self.x_baseline[:, self.j].repeat(x.shape[0], axis = 0).reshape(x.shape[0], -1)
x = np_insert(x, j_vector, index = self.j) #
else: #
j_vector = np.zeros((x.shape[0], 1)) #
x = np_insert(x, j_vector, index = self.j) #
#-----------------------------------------------------
pred = self.model.predict(xgb.DMatrix(x))
return pred, (pred > 0.5)*1
def eval_accy(self, x, label):
_, pred = self.forward(x)
return metrics.accuracy_score(label, pred)
def __call__(self, x, **kwargs):
'''
Note: The input to Shapley Function will be flattened. Therefore, it may be necessary to reshape x prior to a forward pass.
'''
output, _ = self.forward(x)
if self.reshape:
output = output.reshape(-1, 1)
output = np.concatenate((np.ones_like(output) - output, output), axis = 1)
return output
if self.baseline < 0.5: output = 1-output
return output
'''
class eval_XGB_cox():
def __init__(self, model):
self.model = model
def init_baseline(self, x, **kwargs):
_, self.d = x.shape
def forward(self, x):
pred = self.model.predict(xgb.DMatrix(x), ntree_limit = 5000)
return None, pred
def eval_accy(self, x, label):
# adapted from https://github.com/slundberg/shap
_, pred = self.forward(x)
total = 0
matches = 0
for i in range(len(label)):
for j in range(len(label)):
if label[j] > 0 and abs(label[i]) > label[j]:
total += 1
if pred[j] > pred[i]:
matches += 1
return matches/total
def __call__(self, x, **kwargs):
#x = pd.DataFrame(x)
pred = self.model.predict(xgb.DMatrix(x), ntree_limit = 5000)
return pred
'''
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
class eval_RF_binary():
def __init__(self, model, binary = True, reshape = False):
self.model = model
self.baseline = None
self.j = None
self.i = None
self.binary = binary
self.reshape = reshape
def init_baseline(self, x, j = None, i = None, fixed_present = True, baseline_value = 0, **kwargs):
'''
set baseline prediction for original non-perturbed x value
args:
x: single sample. numpy array. 1 x d
'''
_, self.d = x.shape
self.x_baseline = x
self.j = j
self.i = i
self.fixed_present = fixed_present
self.baseline = self.model.predict_proba(x)[:,1]
self.baseline_value = baseline_value # if baseline is not zero
def forward(self, x, logits = False):
'''
forward pass of model, returns predictions.
args:
data: list of np arrays (note, this is different __call__)
return:
P(y|x) and predictions
'''
# Shapley Excess--------------------------------------
# feature i and j are assumed to be in the same coalition, therefore j is present if i is present
if self.i is not None:
j_indicator = (self.x_baseline[:, self.i] == x[:,self.i]).reshape(-1,1)*1 # 1 if j should be present, 0 if j should be absent
j_present = self.x_baseline[:, self.j].repeat(x.shape[0], axis = 0).reshape(-1, 1)
j_absent = (np.zeros((x.shape[0], self.x_baseline.shape[1])) + self.baseline_value)[:,self.j].reshape(-1,1)
j_vector = j_indicator * j_present + (1-j_indicator) * j_absent
x = np_insert(x, j_vector, index = self.j) #
#-----------------------------------------------------
# Interaction Shapley---------------------------------
if (self.j is not None) and (self.i is None): #
if self.fixed_present: #
j_vector = self.x_baseline[:, self.j].repeat(x.shape[0], axis = 0).reshape(x.shape[0], -1)
x = np_insert(x, j_vector, index = self.j) #
else: #
j_vector = np.zeros((x.shape[0], 1)) #
x = np_insert(x, j_vector, index = self.j) #
#-----------------------------------------------------
x = tensor2numpy(x)
output = self.model.predict_proba(x)[:, 1]
if logits: output = np.log(np.minimum(np.maximum(output, 0.0001), 0.9999)) # logits
return output, self.model.predict(x)
def eval_accy(self, x, label):
'''
given samples and labels, calculate accuracy
args:
x: np matrix
label: np array of labels
'''
_, pred = self.forward(x)
return metrics.accuracy_score(label, pred)
def __call__(self, x,**kwargs):
'''
Note: The input to Shapley Function will be flattened. Therefore, it may be necessary to reshape x prior to a forward pass.
'''
if self.baseline is None: raise Exception('Need to first initialize baseline in evaluation function!')
output, _ = self.forward(x)
if self.reshape:
output = output.reshape(-1, 1)
output = np.concatenate((np.ones_like(output) - output, output), axis = 1)
return output
if self.baseline < 0.5: output = 1-output
return output
| [
"sklearn.metrics.accuracy_score",
"torch.mul",
"torch.sigmoid",
"torch.tensor",
"torch.no_grad",
"xgboost.DMatrix",
"torch.zeros",
"torch.ones"
] | [((23458, 23493), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['label', 'pred'], {}), '(label, pred)\n', (23480, 23493), False, 'from sklearn import metrics\n'), ((27986, 28021), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['label', 'pred'], {}), '(label, pred)\n', (28008, 28021), False, 'from sklearn import metrics\n'), ((4071, 4086), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4084, 4086), False, 'import torch\n'), ((6684, 6699), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6697, 6699), False, 'import torch\n'), ((9679, 9694), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9692, 9694), False, 'import torch\n'), ((10513, 10528), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10526, 10528), False, 'import torch\n'), ((12225, 12244), 'torch.sigmoid', 'torch.sigmoid', (['pred'], {}), '(pred)\n', (12238, 12244), False, 'import torch\n'), ((14081, 14096), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14094, 14096), False, 'import torch\n'), ((14881, 14896), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14894, 14896), False, 'import torch\n'), ((15704, 15723), 'torch.sigmoid', 'torch.sigmoid', (['pred'], {}), '(pred)\n', (15717, 15723), False, 'import torch\n'), ((16784, 16799), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16797, 16799), False, 'import torch\n'), ((17924, 17939), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17937, 17939), False, 'import torch\n'), ((19308, 19340), 'torch.ones', 'torch.ones', (['(1, num_superpixels)'], {}), '((1, num_superpixels))\n', (19318, 19340), False, 'import torch\n'), ((20019, 20068), 'torch.zeros', 'torch.zeros', (['(x.shape[0], self.c, self.h, self.w)'], {}), '((x.shape[0], self.c, self.h, self.w))\n', (20030, 20068), False, 'import torch\n'), ((20216, 20231), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (20229, 20231), False, 'import torch\n'), ((21507, 21521), 'xgboost.DMatrix', 'xgb.DMatrix', (['x'], {}), '(x)\n', (21518, 21521), True, 'import xgboost as xgb\n'), ((23294, 23308), 'xgboost.DMatrix', 'xgb.DMatrix', (['x'], {}), '(x)\n', (23305, 23308), True, 'import xgboost as xgb\n'), ((6847, 6866), 'torch.sigmoid', 'torch.sigmoid', (['pred'], {}), '(pred)\n', (6860, 6866), False, 'import torch\n'), ((8023, 8066), 'torch.tensor', 'torch.tensor', (['x.shape[1]'], {'dtype': 'torch.int64'}), '(x.shape[1], dtype=torch.int64)\n', (8035, 8066), False, 'import torch\n'), ((9802, 9823), 'torch.sigmoid', 'torch.sigmoid', (['output'], {}), '(output)\n', (9815, 9823), False, 'import torch\n'), ((14201, 14222), 'torch.sigmoid', 'torch.sigmoid', (['output'], {}), '(output)\n', (14214, 14222), False, 'import torch\n'), ((18248, 18267), 'torch.sigmoid', 'torch.sigmoid', (['pred'], {}), '(pred)\n', (18261, 18267), False, 'import torch\n'), ((20423, 20455), 'torch.mul', 'torch.mul', (['mask', 'self.x_baseline'], {}), '(mask, self.x_baseline)\n', (20432, 20455), False, 'import torch\n'), ((20458, 20480), 'torch.mul', 'torch.mul', (['(1 - mask)', 'w'], {}), '(1 - mask, w)\n', (20467, 20480), False, 'import torch\n'), ((20568, 20587), 'torch.sigmoid', 'torch.sigmoid', (['pred'], {}), '(pred)\n', (20581, 20587), False, 'import torch\n'), ((9277, 9319), 'torch.zeros', 'torch.zeros', (['x.shape[0]'], {'dtype': 'torch.int32'}), '(x.shape[0], dtype=torch.int32)\n', (9288, 9319), False, 'import torch\n'), ((9542, 9584), 'torch.zeros', 'torch.zeros', (['x.shape[0]'], {'dtype': 'torch.int32'}), '(x.shape[0], dtype=torch.int32)\n', (9553, 9584), False, 'import torch\n')] |
import urllib.request
import random
import time
import os
from bs4 import BeautifulSoup
chars = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z","a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z","0","1","2","3","4","5","6","7","8","9"]
def GenerateUrl_postfix():
return random.choice(chars) + random.choice(chars) + random.choice(chars) + random.choice(chars) \
+ random.choice(chars)
def GenerateUrl():
url = 'https://imgur.com/' + GenerateUrl_postfix()
try:
urllib.request.urlopen(url, timeout=1)
except urllib.error.URLError as e:
print(url + " : " + str(e))
return -1
return url
def GetImg( url ):
try:
html = urllib.request.urlopen(url, timeout=1)
except urllib.error.URLError as e:
print(url + ' : ' + str(e))
return
soup = BeautifulSoup(html, "html.parser")
img_src = soup.img['src']
if len(img_src) > 0:
ex = img_src[14:]
try:
urllib.request.urlretrieve("https:" + img_src, "img/file_" + ex)
except urllib.error.URLError as e:
print("Some hell is going on: " + str(e))
def LoadProxy():
list = []
with open("./src/proxy.list", "r") as file:
for line in file:
list.append(line[0:-2])
return list
def ConnectToProxy( proxy ):
proxy = urllib.request.ProxyHandler({'http': proxy})
opener = urllib.request.build_opener(proxy)
urllib.request.install_opener(opener)
print("Proxy changed")
def main():
i = 0
elem = 1
Plist = LoadProxy()
ConnectToProxy(Plist[0])
while not False:
if i >= 10:
ConnectToProxy(Plist[elem])
elem += 1
if elem >= len(Plist):
elem = 0
i = 0
url = GenerateUrl()
if url != -1:
GetImg(url)
with open("log.txt", "a") as file:
file.write(url + "\n")
i += 1
time.sleep(0.1)
if not os.path.exists("img"):
os.makedirs("img")
main()
| [
"os.path.exists",
"random.choice",
"os.makedirs",
"time.sleep",
"bs4.BeautifulSoup"
] | [((941, 975), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (954, 975), False, 'from bs4 import BeautifulSoup\n'), ((2096, 2117), 'os.path.exists', 'os.path.exists', (['"""img"""'], {}), "('img')\n", (2110, 2117), False, 'import os\n'), ((2123, 2141), 'os.makedirs', 'os.makedirs', (['"""img"""'], {}), "('img')\n", (2134, 2141), False, 'import os\n'), ((491, 511), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (504, 511), False, 'import random\n'), ((2071, 2086), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2081, 2086), False, 'import time\n'), ((455, 475), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (468, 475), False, 'import random\n'), ((432, 452), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (445, 452), False, 'import random\n'), ((386, 406), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (399, 406), False, 'import random\n'), ((409, 429), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (422, 429), False, 'import random\n')] |
#!/usr/bin/env python
import os
import sys
import ideal
from setuptools import setup, find_packages
def read_file(name):
return open(os.path.join(os.path.dirname(__file__), name)).read()
readme = read_file('README.rst')
changes = read_file('CHANGES.rst')
install_requires = [
'requests>=1.2.0',
'lxml',
'python-dateutil',
'pyOpenSSL'
]
tests_require = [
'nose',
'unittest2',
'mock',
]
setup(
name='ideal',
version='.'.join(map(str, ideal.__version__)),
license='MIT',
# Packaging.
packages=find_packages(exclude=('tests', 'tests.*')),
install_requires=install_requires,
dependency_links=[],
tests_require=tests_require,
include_package_data=True,
zip_safe=False,
# Metadata for PyPI.
description='Python iDEAL v3.3.1+ implementation.',
long_description='\n\n'.join([readme, changes]),
author='<NAME>, <NAME>',
author_email='<EMAIL>',
platforms=['any'],
url='http://github.com/maykinmedia/python-ideal',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
],
)
| [
"os.path.dirname",
"setuptools.find_packages"
] | [((585, 628), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "('tests', 'tests.*')"}), "(exclude=('tests', 'tests.*'))\n", (598, 628), False, 'from setuptools import setup, find_packages\n'), ((160, 185), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (175, 185), False, 'import os\n')] |
import random
value_a = int(input("enter the first number"))
value_b = int(input("enter the second number"))
value_c = int(input("enter the third number"))
print(value_b + value_a + value_c)
list_of_numbers = []
for number in range(100):
list_of_numbers.append(random.randint(1,100)) # inclusive of both values
minimum_value = list_of_numbers[0]
maximum_value = list_of_numbers[0]
total = list_of_numbers[0]
for index in range(1, len(list_of_numbers)):
if list_of_numbers[index] < minimum_value:
minimum_value = list_of_numbers[index]
if list_of_numbers[index] > maximum_value:
maximum_value = list_of_numbers[index]
total += list_of_numbers[index]
for number in list_of_numbers:
if number< minimum_value:
minimum_value = list_of_numbers[index]
if number > maximum_value:
maximum_value = list_of_numbers[index]
total += number
average = total / len(list_of_numbers)
print("Min value:", minimum_value)
print("Max value:", maximum_value)
print("average:", average)
# book
# number formatting from https://www.bing.com/search?q=python+string+format+number+decimal+places&cvid=c321953d925a4c3f99ca309d2a4eff65&aqs=edge.0.0j69i57.8149j0j1&pglt=43&FORM=ANNTA1&PC=W000
print("Min: {:d} - max: {:d} - average: {:.2f}".format(
min(list_of_numbers), max(list_of_numbers), sum(list_of_numbers) / len(list_of_numbers)))
print("Min:", min(list_of_numbers), "- max:", max(list_of_numbers), "- average: ",
sum(list_of_numbers) / len(list_of_numbers))
gradebook = {}
gradebook['Eric'] = 'A' # keys have to be unique
gradebook['Jeb'] = 'A' # will add new value if the key doesn't exist
gradebook['Eric'] = 'B' # changes the value
name = input("Enter the name of someone to get their grade")
if name in gradebook:
print(gradebook[name])
else:
print("they are not in the gradebook, let's add them, what is their grade?")
grade = input()
gradebook[name.lower()] = grade
for key in list(gradebook.keys()):
grade = gradebook[key]
gradebook.pop(key)
gradebook[key.lower()] = grade
print(gradebook)
| [
"random.randint"
] | [((269, 291), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (283, 291), False, 'import random\n')] |
import pygame
# https://stackoverflow.com/questions/28005641
# /how-to-add-a-background-image-into-pygame
class Background:
"""Creates background as image_image file"""
def __init__(self, image_file, location=(0, 0)):
self.image = pygame.image.load(image_file)
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = location
# Set constants for game dimensions, background, title and icon
backgroundImage = Background('gameBackDrop.png')
display_width = 860
display_height = 760
gameDisplay = pygame.display.set_mode((display_width, display_height))
caption = pygame.display.set_caption('Twisted Towers')
icon = pygame.display.set_icon(pygame.image.load('gameBackDrop.png'))
# Initialize and set clock
init = pygame.init()
clock = pygame.time.Clock()
| [
"pygame.display.set_caption",
"pygame.init",
"pygame.display.set_mode",
"pygame.time.Clock",
"pygame.image.load"
] | [((541, 597), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(display_width, display_height)'], {}), '((display_width, display_height))\n', (564, 597), False, 'import pygame\n'), ((609, 653), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Twisted Towers"""'], {}), "('Twisted Towers')\n", (635, 653), False, 'import pygame\n'), ((759, 772), 'pygame.init', 'pygame.init', ([], {}), '()\n', (770, 772), False, 'import pygame\n'), ((781, 800), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (798, 800), False, 'import pygame\n'), ((685, 722), 'pygame.image.load', 'pygame.image.load', (['"""gameBackDrop.png"""'], {}), "('gameBackDrop.png')\n", (702, 722), False, 'import pygame\n'), ((249, 278), 'pygame.image.load', 'pygame.image.load', (['image_file'], {}), '(image_file)\n', (266, 278), False, 'import pygame\n')] |
import pytest
import requests
import pandas_datareader.base as base
class TestBaseReader(object):
def test_requests_not_monkey_patched(self):
assert not hasattr(requests.Session(), 'stor')
def test_valid_retry_count(self):
with pytest.raises(ValueError):
base._BaseReader([], retry_count='stuff')
with pytest.raises(ValueError):
base._BaseReader([], retry_count=-1)
def test_invalid_url(self):
with pytest.raises(NotImplementedError):
base._BaseReader([]).url
def test_invalid_format(self):
with pytest.raises(NotImplementedError):
b = base._BaseReader([])
b._format = 'IM_NOT_AN_IMPLEMENTED_TYPE'
b._read_one_data('a', None)
class TestDailyBaseReader(object):
def test_get_params(self):
with pytest.raises(NotImplementedError):
b = base._DailyBaseReader()
b._get_params()
| [
"pandas_datareader.base._DailyBaseReader",
"pytest.raises",
"requests.Session",
"pandas_datareader.base._BaseReader"
] | [((256, 281), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (269, 281), False, 'import pytest\n'), ((295, 336), 'pandas_datareader.base._BaseReader', 'base._BaseReader', (['[]'], {'retry_count': '"""stuff"""'}), "([], retry_count='stuff')\n", (311, 336), True, 'import pandas_datareader.base as base\n'), ((350, 375), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (363, 375), False, 'import pytest\n'), ((389, 425), 'pandas_datareader.base._BaseReader', 'base._BaseReader', (['[]'], {'retry_count': '(-1)'}), '([], retry_count=-1)\n', (405, 425), True, 'import pandas_datareader.base as base\n'), ((472, 506), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (485, 506), False, 'import pytest\n'), ((594, 628), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (607, 628), False, 'import pytest\n'), ((646, 666), 'pandas_datareader.base._BaseReader', 'base._BaseReader', (['[]'], {}), '([])\n', (662, 666), True, 'import pandas_datareader.base as base\n'), ((841, 875), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (854, 875), False, 'import pytest\n'), ((893, 916), 'pandas_datareader.base._DailyBaseReader', 'base._DailyBaseReader', ([], {}), '()\n', (914, 916), True, 'import pandas_datareader.base as base\n'), ((176, 194), 'requests.Session', 'requests.Session', ([], {}), '()\n', (192, 194), False, 'import requests\n'), ((520, 540), 'pandas_datareader.base._BaseReader', 'base._BaseReader', (['[]'], {}), '([])\n', (536, 540), True, 'import pandas_datareader.base as base\n')] |
from django.test import TestCase
from django.contrib.auth.models import Group
from hs_access_control.models import PrivilegeCodes
from hs_core import hydroshare
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.tests.utilities import global_reset, is_equal_to_as_set
class T09GroupPublic(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(T09GroupPublic, self).setUp()
global_reset()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.admin = hydroshare.create_account(
'<EMAIL>',
username='admin',
first_name='administrator',
last_name='couch',
superuser=True,
groups=[]
)
self.dog = hydroshare.create_account(
'<EMAIL>',
username='dog',
first_name='<NAME>',
last_name='last_name_dog',
superuser=False,
groups=[]
)
self.squirrels = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.dog,
title='all about chasing squirrels',
metadata=[],
)
self.holes = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.dog,
title='all about storing bones in holes',
metadata=[],
)
# dog owns canines group
self.canines = self.dog.uaccess.create_group(
title='canines', description="We are the canines")
def test_public_resources(self):
""" public resources contain those resources that are public and discoverable """
res = self.canines.gaccess.public_resources
self.assertTrue(is_equal_to_as_set(res, []))
self.dog.uaccess.share_resource_with_group(self.squirrels, self.canines,
PrivilegeCodes.VIEW)
self.dog.uaccess.share_resource_with_group(self.holes, self.canines,
PrivilegeCodes.VIEW)
res = self.canines.gaccess.public_resources
self.assertTrue(is_equal_to_as_set(res, []))
self.holes.raccess.public = True
self.holes.raccess.discoverable = True
self.holes.raccess.save() # this avoids regular requirements for "public"
res = self.canines.gaccess.public_resources
self.assertTrue(is_equal_to_as_set(res, [self.holes]))
for r in res:
self.assertEqual(r.public, r.raccess.public)
self.assertEqual(r.discoverable, r.raccess.discoverable)
self.assertEqual(r.published, r.raccess.published)
self.assertEqual(r.group_name, self.canines.name)
self.assertEqual(r.group_id, self.canines.id)
self.squirrels.raccess.discoverable = True
self.squirrels.raccess.save()
res = self.canines.gaccess.public_resources
self.assertTrue(is_equal_to_as_set(res, [self.holes, self.squirrels]))
for r in res:
self.assertEqual(r.public, r.raccess.public)
self.assertEqual(r.discoverable, r.raccess.discoverable)
self.assertEqual(r.published, r.raccess.published)
self.assertEqual(r.group_name, self.canines.name)
self.assertEqual(r.group_id, self.canines.id)
| [
"django.contrib.auth.models.Group.objects.get_or_create",
"hs_access_control.tests.utilities.is_equal_to_as_set",
"hs_core.hydroshare.create_resource",
"hs_core.hydroshare.create_account",
"hs_access_control.tests.utilities.global_reset"
] | [((426, 440), 'hs_access_control.tests.utilities.global_reset', 'global_reset', ([], {}), '()\n', (438, 440), False, 'from hs_access_control.tests.utilities import global_reset, is_equal_to_as_set\n'), ((465, 518), 'django.contrib.auth.models.Group.objects.get_or_create', 'Group.objects.get_or_create', ([], {'name': '"""Hydroshare Author"""'}), "(name='Hydroshare Author')\n", (492, 518), False, 'from django.contrib.auth.models import Group\n'), ((540, 673), 'hs_core.hydroshare.create_account', 'hydroshare.create_account', (['"""<EMAIL>"""'], {'username': '"""admin"""', 'first_name': '"""administrator"""', 'last_name': '"""couch"""', 'superuser': '(True)', 'groups': '[]'}), "('<EMAIL>', username='admin', first_name=\n 'administrator', last_name='couch', superuser=True, groups=[])\n", (565, 673), False, 'from hs_core import hydroshare\n'), ((771, 903), 'hs_core.hydroshare.create_account', 'hydroshare.create_account', (['"""<EMAIL>"""'], {'username': '"""dog"""', 'first_name': '"""<NAME>"""', 'last_name': '"""last_name_dog"""', 'superuser': '(False)', 'groups': '[]'}), "('<EMAIL>', username='dog', first_name='<NAME>',\n last_name='last_name_dog', superuser=False, groups=[])\n", (796, 903), False, 'from hs_core import hydroshare\n'), ((1008, 1137), 'hs_core.hydroshare.create_resource', 'hydroshare.create_resource', ([], {'resource_type': '"""GenericResource"""', 'owner': 'self.dog', 'title': '"""all about chasing squirrels"""', 'metadata': '[]'}), "(resource_type='GenericResource', owner=self.dog,\n title='all about chasing squirrels', metadata=[])\n", (1034, 1137), False, 'from hs_core import hydroshare\n'), ((1215, 1349), 'hs_core.hydroshare.create_resource', 'hydroshare.create_resource', ([], {'resource_type': '"""GenericResource"""', 'owner': 'self.dog', 'title': '"""all about storing bones in holes"""', 'metadata': '[]'}), "(resource_type='GenericResource', owner=self.dog,\n title='all about storing bones in holes', metadata=[])\n", (1241, 1349), False, 'from hs_core import hydroshare\n'), ((1761, 1788), 'hs_access_control.tests.utilities.is_equal_to_as_set', 'is_equal_to_as_set', (['res', '[]'], {}), '(res, [])\n', (1779, 1788), False, 'from hs_access_control.tests.utilities import global_reset, is_equal_to_as_set\n'), ((2168, 2195), 'hs_access_control.tests.utilities.is_equal_to_as_set', 'is_equal_to_as_set', (['res', '[]'], {}), '(res, [])\n', (2186, 2195), False, 'from hs_access_control.tests.utilities import global_reset, is_equal_to_as_set\n'), ((2444, 2481), 'hs_access_control.tests.utilities.is_equal_to_as_set', 'is_equal_to_as_set', (['res', '[self.holes]'], {}), '(res, [self.holes])\n', (2462, 2481), False, 'from hs_access_control.tests.utilities import global_reset, is_equal_to_as_set\n'), ((2979, 3032), 'hs_access_control.tests.utilities.is_equal_to_as_set', 'is_equal_to_as_set', (['res', '[self.holes, self.squirrels]'], {}), '(res, [self.holes, self.squirrels])\n', (2997, 3032), False, 'from hs_access_control.tests.utilities import global_reset, is_equal_to_as_set\n')] |
import sys
import numpy as np
from PIL import Image
def spec_to_png(in_path, out_path):
specgram = np.load(in_path) # (channels, bins, frames)
specgram = specgram[0]
specgram = np.log2(specgram)
specgram = specgram.sum(1)[:, np.newaxis]
specgram = np.repeat(specgram, 128, axis=1)
smax, smin = np.max(specgram), np.min(specgram)
specgram = (specgram - smin) / (smax - smin)
specgram = (specgram * 256).astype(np.uint8)
specgram = np.flipud(specgram)
Image.fromarray(specgram).save(out_path)
if __name__ == '__main__':
spec_to_png(sys.argv[1], sys.argv[2])
| [
"PIL.Image.fromarray",
"numpy.repeat",
"numpy.flipud",
"numpy.max",
"numpy.min",
"numpy.log2",
"numpy.load"
] | [((105, 121), 'numpy.load', 'np.load', (['in_path'], {}), '(in_path)\n', (112, 121), True, 'import numpy as np\n'), ((192, 209), 'numpy.log2', 'np.log2', (['specgram'], {}), '(specgram)\n', (199, 209), True, 'import numpy as np\n'), ((271, 303), 'numpy.repeat', 'np.repeat', (['specgram', '(128)'], {'axis': '(1)'}), '(specgram, 128, axis=1)\n', (280, 303), True, 'import numpy as np\n'), ((469, 488), 'numpy.flipud', 'np.flipud', (['specgram'], {}), '(specgram)\n', (478, 488), True, 'import numpy as np\n'), ((321, 337), 'numpy.max', 'np.max', (['specgram'], {}), '(specgram)\n', (327, 337), True, 'import numpy as np\n'), ((339, 355), 'numpy.min', 'np.min', (['specgram'], {}), '(specgram)\n', (345, 355), True, 'import numpy as np\n'), ((493, 518), 'PIL.Image.fromarray', 'Image.fromarray', (['specgram'], {}), '(specgram)\n', (508, 518), False, 'from PIL import Image\n')] |
import uuid
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.dialects.postgresql import ARRAY
from api import app, db, bcrypt
from api.models.user import User
from api.models.recipe import Recipe
class Rating(db.Model):
__tablename__ = 'ratings'
id = db.Column(UUID(as_uuid=True), primary_key=True, default=lambda: uuid.uuid4().hex)
user_id = db.Column(UUID(as_uuid=True), db.ForeignKey('users.id'))
recipe_id = db.Column(UUID(as_uuid=True), db.ForeignKey('recipes.id'))
value = db.Column(db.Float, nullable=False)
user = db.relationship(User, backref='user_ratings')
recipe = db.relationship(Recipe, backref='rating_users')
def __init__(self, user=None, recipe=None, value=0):
self.user = user
self.recipe = recipe
self.value = value
| [
"api.db.ForeignKey",
"uuid.uuid4",
"sqlalchemy.dialects.postgresql.UUID",
"api.db.Column",
"api.db.relationship"
] | [((580, 615), 'api.db.Column', 'db.Column', (['db.Float'], {'nullable': '(False)'}), '(db.Float, nullable=False)\n', (589, 615), False, 'from api import app, db, bcrypt\n'), ((628, 673), 'api.db.relationship', 'db.relationship', (['User'], {'backref': '"""user_ratings"""'}), "(User, backref='user_ratings')\n", (643, 673), False, 'from api import app, db, bcrypt\n'), ((687, 734), 'api.db.relationship', 'db.relationship', (['Recipe'], {'backref': '"""rating_users"""'}), "(Recipe, backref='rating_users')\n", (702, 734), False, 'from api import app, db, bcrypt\n'), ((350, 368), 'sqlalchemy.dialects.postgresql.UUID', 'UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (354, 368), False, 'from sqlalchemy.dialects.postgresql import UUID\n'), ((446, 464), 'sqlalchemy.dialects.postgresql.UUID', 'UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (450, 464), False, 'from sqlalchemy.dialects.postgresql import UUID\n'), ((466, 491), 'api.db.ForeignKey', 'db.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (479, 491), False, 'from api import app, db, bcrypt\n'), ((519, 537), 'sqlalchemy.dialects.postgresql.UUID', 'UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (523, 537), False, 'from sqlalchemy.dialects.postgresql import UUID\n'), ((539, 566), 'api.db.ForeignKey', 'db.ForeignKey', (['"""recipes.id"""'], {}), "('recipes.id')\n", (552, 566), False, 'from api import app, db, bcrypt\n'), ((404, 416), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (414, 416), False, 'import uuid\n')] |
#Cognitive NPL (Natural Language Processing)
#Copyright 2020 <NAME> MIT License. READ LICENSE.
#Personality Profiling with a Restricted Botzmannm Machine (RBM)
import numpy as np
from random import randint
class RBM:
def __init__(self, num_visible, num_hidden):
self.num_hidden = num_hidden
self.num_visible = num_visible
self.debug_print = True
# Initialize a weight matrix, of dimensions (num_visible x num_hidden), using
# a uniform distribution between -sqrt(6. / (num_hidden + num_visible))
# and sqrt(6. / (num_hidden + num_visible)).
# Standard initialization the weights with mean 0 and standard deviation 0.1.
#Starts with random state
np_rng = np.random.RandomState(1234)
self.weights = np.asarray(np_rng.uniform(
low=-0.1 * np.sqrt(6. / (num_hidden + num_visible)),
high=0.1 * np.sqrt(6. / (num_hidden + num_visible)),
size=(num_visible, num_hidden)))
# Insert weights for the bias units into the first row and first column.
self.weights = np.insert(self.weights, 0, 0, axis = 0)
self.weights = np.insert(self.weights, 0, 0, axis = 1)
def train(self, data, max_epochs, learning_rate):
num_examples = data.shape[0]
# Insert bias units of 1 into the first column.
data = np.insert(data, 0, 1, axis = 1)
for epoch in range(max_epochs):
# Linking the data and sample from the hidden units.
# (This is the "positive CD phase", aka the reality phase.)
pos_hidden_activations = np.dot(data, self.weights)
pos_hidden_probs = self._logistic(pos_hidden_activations)
pos_hidden_probs[:,0] = 1 # Fix the bias unit.
pos_hidden_states = pos_hidden_probs > np.random.rand(num_examples, self.num_hidden + 1)
pos_associations = np.dot(data.T, pos_hidden_probs)
# Reconstruct the visible units and sample again from the hidden units.
# (This is the "negative CD phase", aka the daydreaming phase
neg_visible_activations = np.dot(pos_hidden_states, self.weights.T)
neg_visible_probs = self._logistic(neg_visible_activations)
neg_visible_probs[:,0] = 1 # Fix the bias unit.
neg_hidden_activations = np.dot(neg_visible_probs, self.weights)
neg_hidden_probs = self._logistic(neg_hidden_activations)
neg_associations = np.dot(neg_visible_probs.T, neg_hidden_probs)
# Update weights.
self.weights += learning_rate * ((pos_associations - neg_associations))
error = np.sum((data - neg_visible_probs) ** 2)
energy=-np.sum(data) - np.sum(neg_hidden_probs)-np.sum(pos_associations * self.weights)
z=np.sum(data)+np.sum(neg_hidden_probs)
if z>0: energy=np.exp(-energy)/z;
if self.debug_print:
print("Epoch %s: error is %s" % (epoch, error)," Energy:",energy)
def _logistic(self, x):
return 1.0 / (1 + np.exp(-x))
if __name__ == '__main__':
r = RBM(num_visible = 6, num_hidden = 2)
training_data = np.array([[1,1,0,0,1,1],
[1,1,0,1,1,0],
[1,1,1,0,0,1],
[1,1,0,1,1,0],
[1,1,0,0,1,0],
[1,1,1,0,1,0]])
F=["love","happiness","family","horizons","action","violence"]
print(" A Restricted Boltzmann Machine(RBM)","\n","applied to profiling a person name X","\n","based on the movie ratings of X.","\n")
print("The input data represents the features to be trained to learn about person X.")
print("\n","Each colum represents a feature of X's potential pesonality and tastes.")
print(F,"\n")
print(" Each line is a movie X watched containing those 6 features")
print(" and for which X gave a 5 star rating.","\n")
print(training_data)
print("\n")
max_epochs=5000
learning_rate = 0.001
r.train(training_data, max_epochs,learning_rate)
print("\n","The weights of the features have been trained for person X.","\n","The first line is the bias and examine column 2 and 3","\n","The following 6 lines are X's features.","\n")
print("Weights:")
print(r.weights)
print("\n","The following array is a reminder of the features of X.")
print(" The columns are the potential features of X.","\n", "The lines are the movies highly rated by X")
print(F,"\n")
print(training_data)
print("\n")
print("The results are only experimental results.","\n")
for w in range(7):
if(w>0):
W=print(F[w-1],":",r.weights[w,1]+r.weights[w,2])
print("\n")
print("A value>0 is positive, close to 0 slightly positive")
print("A value<0 is negative, close to 0 slightly negative","\n")
| [
"numpy.insert",
"numpy.sqrt",
"numpy.random.rand",
"numpy.exp",
"numpy.array",
"numpy.dot",
"numpy.sum",
"numpy.random.RandomState"
] | [((3056, 3190), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 1, 0], [1, 1, 1, 0, 0, 1], [1, 1, 0, 1, 1,\n 0], [1, 1, 0, 0, 1, 0], [1, 1, 1, 0, 1, 0]]'], {}), '([[1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 1, 0], [1, 1, 1, 0, 0, 1], [1, 1,\n 0, 1, 1, 0], [1, 1, 0, 0, 1, 0], [1, 1, 1, 0, 1, 0]])\n', (3064, 3190), True, 'import numpy as np\n'), ((716, 743), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (737, 743), True, 'import numpy as np\n'), ((1086, 1123), 'numpy.insert', 'np.insert', (['self.weights', '(0)', '(0)'], {'axis': '(0)'}), '(self.weights, 0, 0, axis=0)\n', (1095, 1123), True, 'import numpy as np\n'), ((1146, 1183), 'numpy.insert', 'np.insert', (['self.weights', '(0)', '(0)'], {'axis': '(1)'}), '(self.weights, 0, 0, axis=1)\n', (1155, 1183), True, 'import numpy as np\n'), ((1344, 1373), 'numpy.insert', 'np.insert', (['data', '(0)', '(1)'], {'axis': '(1)'}), '(data, 0, 1, axis=1)\n', (1353, 1373), True, 'import numpy as np\n'), ((1581, 1607), 'numpy.dot', 'np.dot', (['data', 'self.weights'], {}), '(data, self.weights)\n', (1587, 1607), True, 'import numpy as np\n'), ((1855, 1887), 'numpy.dot', 'np.dot', (['data.T', 'pos_hidden_probs'], {}), '(data.T, pos_hidden_probs)\n', (1861, 1887), True, 'import numpy as np\n'), ((2069, 2110), 'numpy.dot', 'np.dot', (['pos_hidden_states', 'self.weights.T'], {}), '(pos_hidden_states, self.weights.T)\n', (2075, 2110), True, 'import numpy as np\n'), ((2265, 2304), 'numpy.dot', 'np.dot', (['neg_visible_probs', 'self.weights'], {}), '(neg_visible_probs, self.weights)\n', (2271, 2304), True, 'import numpy as np\n'), ((2396, 2441), 'numpy.dot', 'np.dot', (['neg_visible_probs.T', 'neg_hidden_probs'], {}), '(neg_visible_probs.T, neg_hidden_probs)\n', (2402, 2441), True, 'import numpy as np\n'), ((2563, 2602), 'numpy.sum', 'np.sum', (['((data - neg_visible_probs) ** 2)'], {}), '((data - neg_visible_probs) ** 2)\n', (2569, 2602), True, 'import numpy as np\n'), ((1779, 1828), 'numpy.random.rand', 'np.random.rand', (['num_examples', '(self.num_hidden + 1)'], {}), '(num_examples, self.num_hidden + 1)\n', (1793, 1828), True, 'import numpy as np\n'), ((2658, 2697), 'numpy.sum', 'np.sum', (['(pos_associations * self.weights)'], {}), '(pos_associations * self.weights)\n', (2664, 2697), True, 'import numpy as np\n'), ((2707, 2719), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (2713, 2719), True, 'import numpy as np\n'), ((2720, 2744), 'numpy.sum', 'np.sum', (['neg_hidden_probs'], {}), '(neg_hidden_probs)\n', (2726, 2744), True, 'import numpy as np\n'), ((2951, 2961), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (2957, 2961), True, 'import numpy as np\n'), ((2633, 2657), 'numpy.sum', 'np.sum', (['neg_hidden_probs'], {}), '(neg_hidden_probs)\n', (2639, 2657), True, 'import numpy as np\n'), ((2767, 2782), 'numpy.exp', 'np.exp', (['(-energy)'], {}), '(-energy)\n', (2773, 2782), True, 'import numpy as np\n'), ((806, 847), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (num_hidden + num_visible))'], {}), '(6.0 / (num_hidden + num_visible))\n', (813, 847), True, 'import numpy as np\n'), ((884, 925), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (num_hidden + num_visible))'], {}), '(6.0 / (num_hidden + num_visible))\n', (891, 925), True, 'import numpy as np\n'), ((2618, 2630), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (2624, 2630), True, 'import numpy as np\n')] |
# DRF
from rest_framework import viewsets
# Models
from product.models.product_entry import ProductEntry
# Serializers
from product.serializers.product_entry_serializer import ProductEntrySerializer
class ProductEntryViewSet(viewsets.ModelViewSet):
queryset = ProductEntry.objects.all()
serializer_class = ProductEntrySerializer
| [
"product.models.product_entry.ProductEntry.objects.all"
] | [((268, 294), 'product.models.product_entry.ProductEntry.objects.all', 'ProductEntry.objects.all', ([], {}), '()\n', (292, 294), False, 'from product.models.product_entry import ProductEntry\n')] |
from distutils.command.build import build
from floodsystem.utils import sorted_by_key
from floodsystem.stationdata import build_station_list
from floodsystem.plot import plot_water_levels
import datetime
from floodsystem.stationdata import update_water_levels
from floodsystem.datafetcher import fetch_measure_levels
from Task2C import return_name_and_level
def run(name, dt):
stations = build_station_list()
station_name = name
station_cam = None
for station in stations:
if station.name == station_name:
station_cam = station
break
# Check that station could be found. Return if not found.
if not station_cam:
print("Station {} could not be found".format(station_name))
return
dates, levels = fetch_measure_levels(station_cam.measure_id, dt=datetime.timedelta(days=dt))
plot_water_levels(station_cam, dates, levels)
if __name__ == "__main__":
dt = 10
limit = 6 # letcomb bassett being problematic as usual
stations = build_station_list()
update_water_levels(stations)
it_list = []
for i in return_name_and_level(stations, limit):
it_list.append(i[0])
for i in it_list:
run(i,dt)
| [
"Task2C.return_name_and_level",
"floodsystem.plot.plot_water_levels",
"floodsystem.stationdata.build_station_list",
"datetime.timedelta",
"floodsystem.stationdata.update_water_levels"
] | [((395, 415), 'floodsystem.stationdata.build_station_list', 'build_station_list', ([], {}), '()\n', (413, 415), False, 'from floodsystem.stationdata import build_station_list\n'), ((858, 903), 'floodsystem.plot.plot_water_levels', 'plot_water_levels', (['station_cam', 'dates', 'levels'], {}), '(station_cam, dates, levels)\n', (875, 903), False, 'from floodsystem.plot import plot_water_levels\n'), ((1019, 1039), 'floodsystem.stationdata.build_station_list', 'build_station_list', ([], {}), '()\n', (1037, 1039), False, 'from floodsystem.stationdata import build_station_list\n'), ((1044, 1073), 'floodsystem.stationdata.update_water_levels', 'update_water_levels', (['stations'], {}), '(stations)\n', (1063, 1073), False, 'from floodsystem.stationdata import update_water_levels\n'), ((1105, 1143), 'Task2C.return_name_and_level', 'return_name_and_level', (['stations', 'limit'], {}), '(stations, limit)\n', (1126, 1143), False, 'from Task2C import return_name_and_level\n'), ((824, 851), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'dt'}), '(days=dt)\n', (842, 851), False, 'import datetime\n')] |
# Copyright 2019 BlueCat Networks (USA) Inc. and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import os
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from apscheduler.schedulers.background import BackgroundScheduler
from config import logger, LOG_PRIORITY
from .common import get_config_data
from .healthcheck import (get_name_servers, health_check_dns_server)
executors = {
"default": ThreadPoolExecutor(20),
"processpool": ProcessPoolExecutor(5)
}
job_defaults = {
"coalesce": False,
"max_instances": 3
}
scheduler = BackgroundScheduler()
scheduler.configure(executors=executors, job_defaults=job_defaults)
TestQueryFailed = {
}
class ExecutorManager(object):
def __init__(self):
self.subscribers = dict()
self.current_jobs = list()
self.interval, _, _, _ = get_config_data()
def register(self, who, job):
self.subscribers[who] = job
def unregister(self, who):
del self.subscribers[who]
def clear_up_job(self):
remove_jobs = list(set(self.subscribers.keys()) - set(self.current_jobs))
for job_id in remove_jobs:
job = self.subscribers.get(job_id)
self.unregister(job_id)
job.remove_job(job_id)
manager = ExecutorManager()
def query_dns():
interval, _, _, vm_host_name = get_config_data()
if not interval:
return
name_servers = get_name_servers()
data = health_check_dns_server(name_servers)
logger.info(data)
try:
for server in data:
source = vm_host_name
if server["status"] == False:
# SET ALARM
case = "set"
else:
# CLEAR ALARM
case = "clear"
cond, keypair = set_or_clear_alarm_test_query_failed(case, source, server["name_server"])
logger.info("DNSHealth-case: {0} - {1} - {2}".format(cond, keypair, "TestQueryFailed"))
if cond is not None and keypair is not None:
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
snmpv3_file_path = os.path.join(basedir, "Snmp", "snmpv3.py")
os.system('python "{0}" "{1}" "{2}" "{3}" "{4}" "{5}" "{6}"'.format(snmpv3_file_path, cond, LOG_PRIORITY['err'],
keypair,
"DNSHealth: {0}".format(server["status"]),
"TestQueryFailed", source))
logger.info("DNSHealth-send successfully:{0} - {1} - {2}".format(cond, keypair, "TestQueryFailed"))
except Exception as ex:
logger.error(
"DNSHealth-send:{}".format(ex)
)
def set_or_clear_alarm_test_query_failed(case, source, target):
key = "{0}_{1}".format(source, target)
if case.lower() == "set":
if key in TestQueryFailed.keys():
return None, None
TestQueryFailed.update({key:{}})
return "set", key
elif case.lower() == "clear":
if key in TestQueryFailed.keys():
del TestQueryFailed[key]
return "clear", key
return None, None
return None, None
scheduler.add_job(query_dns, trigger='interval', minutes=manager.interval, id='manager')
scheduler.start()
atexit.register(lambda: scheduler.shutdown())
| [
"apscheduler.executors.pool.ThreadPoolExecutor",
"os.path.join",
"config.logger.info",
"apscheduler.executors.pool.ProcessPoolExecutor",
"os.path.dirname",
"apscheduler.schedulers.background.BackgroundScheduler"
] | [((1135, 1156), 'apscheduler.schedulers.background.BackgroundScheduler', 'BackgroundScheduler', ([], {}), '()\n', (1154, 1156), False, 'from apscheduler.schedulers.background import BackgroundScheduler\n'), ((979, 1001), 'apscheduler.executors.pool.ThreadPoolExecutor', 'ThreadPoolExecutor', (['(20)'], {}), '(20)\n', (997, 1001), False, 'from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor\n'), ((1023, 1045), 'apscheduler.executors.pool.ProcessPoolExecutor', 'ProcessPoolExecutor', (['(5)'], {}), '(5)\n', (1042, 1045), False, 'from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor\n'), ((2093, 2110), 'config.logger.info', 'logger.info', (['data'], {}), '(data)\n', (2104, 2110), False, 'from config import logger, LOG_PRIORITY\n'), ((2758, 2800), 'os.path.join', 'os.path.join', (['basedir', '"""Snmp"""', '"""snmpv3.py"""'], {}), "(basedir, 'Snmp', 'snmpv3.py')\n", (2770, 2800), False, 'import os\n'), ((2687, 2712), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2702, 2712), False, 'import os\n')] |
import requests
import selectolax.parser as sp
ALPHA_API_KEY = "<KEY>"
NAME_FILTER = {
ord(" "): "_",
ord("."): "",
ord("("): "",
ord(")"): "",
ord("'"): "",
}
class Stock:
def __init__(self, symbol):
self.symbol = symbol.upper()
def profile(self):
prof = dict()
url = "https://finance.yahoo.com/quote/{}".format(self.symbol.upper())
req = requests.get(url)
res = req.content
if req.status_code != 200:
return {'error': 'Not Founded'}
# Check if company exists
exists_check = sp.HTMLParser(res).css("section span")
if len(exists_check) >= 2 and exists_check[1].text() == "All (0)":
return {'error': 'Not Founded'}
# Extract h1 elements
h1_raw = sp.HTMLParser(res).css("h1")
company_title = h1_raw[0].text()
prof["symbol"] = self.symbol
prof["company_title"] = company_title[company_title.find('-')+2:]
# Extract span elements
span_raw = sp.HTMLParser(res).css("span")[13:]
prof["exchange"] = span_raw[0].text()[0:span_raw[0].text().find('-')-1]
prof["price"] = span_raw[1].text()
prof["change_amount"] = span_raw[2].text()[0:span_raw[2].text().find(" ")]
prof["change_percentage"] = span_raw[2].text()[span_raw[2].text().find("(")+1:span_raw[2].text().find("%")]
for node in range(0, len(span_raw)):
if span_raw[node].text() == "Previous Close":
for ind in range(node, node + 22, 2):
key = span_raw[ind].text().lower().translate(NAME_FILTER)
val = span_raw[ind+1].text()
prof[key] = val
break
return prof
def intraday_time_series(self, interval="5min"):
url = "https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=" + \
self.symbol + "&interval=" + interval + "&apikey=" + ALPHA_API_KEY
req = requests.get(url)
res = eval(req.text)['Time Series (' + interval + ')']
data = dict()
for key, val in res.items():
change_per = (float(val['4. close']) -
float(val['1. open']))/float(val['1. open'])
data[key] = {
"Open": float(val['1. open']),
"High": float(val['2. high']),
"Low": float(val['3. low']),
"Close": float(val['4. close']),
"Change Percentage": round(float(change_per), 3),
"Volume": float(val['5. volume']),
}
return data
def daily_time_series(self, amount="compact"):
url = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&outputsize=" + \
amount + "&symbol=" + self.symbol + "&apikey=" + ALPHA_API_KEY
req = requests.get(url)
res = eval(req.text)['Time Series (Daily)']
data = dict()
for key, val in res.items():
change_per = (float(val['4. close']) -
float(val['1. open']))/float(val['1. open'])
data[key] = {
"Open": float(val['1. open']),
"High": float(val['2. high']),
"Low": float(val['3. low']),
"Close": float(val['4. close']),
"Adjusted Close": float(val['5. adjusted close']),
"Change Percentage": round(float(change_per), 3),
"Volume": float(val['6. volume']),
"Dividend": float(val['7. dividend amount']),
}
return data
def monthly_time_series(self):
url = "https://www.alphavantage.co/query?function=TIME_SERIES_MONTHLY_ADJUSTED&symbol=" + \
self.symbol + "&apikey=" + ALPHA_API_KEY
req = requests.get(url)
res = eval(req.text)['Monthly Adjusted Time Series']
data = dict()
for key, val in res.items():
change_per = (float(val['4. close']) -
float(val['1. open']))/float(val['1. open'])
data[key] = {
"Open": float(val['1. open']),
"High": float(val['2. high']),
"Low": float(val['3. low']),
"Close": float(val['4. close']),
"Adjusted Close": float(val['5. adjusted close']),
"Change Percentage": round(float(change_per), 3),
"Volume": float(val['6. volume']),
"Dividend": float(val['7. dividend amount']),
}
return data
| [
"selectolax.parser.HTMLParser",
"requests.get"
] | [((403, 420), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (415, 420), False, 'import requests\n'), ((2002, 2019), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2014, 2019), False, 'import requests\n'), ((2875, 2892), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2887, 2892), False, 'import requests\n'), ((3826, 3843), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3838, 3843), False, 'import requests\n'), ((585, 603), 'selectolax.parser.HTMLParser', 'sp.HTMLParser', (['res'], {}), '(res)\n', (598, 603), True, 'import selectolax.parser as sp\n'), ((791, 809), 'selectolax.parser.HTMLParser', 'sp.HTMLParser', (['res'], {}), '(res)\n', (804, 809), True, 'import selectolax.parser as sp\n'), ((1024, 1042), 'selectolax.parser.HTMLParser', 'sp.HTMLParser', (['res'], {}), '(res)\n', (1037, 1042), True, 'import selectolax.parser as sp\n')] |
from uuid import uuid4
from sqlalchemy.orm import validates
from werkzeug.security import check_password_hash, generate_password_hash
from flaskr_carved_rock.login import login_manager
from flaskr_carved_rock.sqla import sqla
from flask_login import UserMixin
class User(UserMixin, sqla.Model):
id = sqla.Column(sqla.Integer, primary_key=True)
uuid = sqla.Column(sqla.String(64), nullable=False, default=lambda: str(uuid4()))
username = sqla.Column(sqla.Text, nullable=False, unique=True)
password = sqla.Column(sqla.Text, nullable=False)
api_key = sqla.Column(sqla.String(64), nullable=True) # use a UUID
@validates('username', 'password')
def validate_not_empty(self, key, value):
if not value:
raise ValueError(f'{key.capitalize()} is required.')
if key == 'username':
self.validate_unique(key, value, f'{value} already registered')
if key == 'password':
value = generate_password_hash(value)
return value
def validate_unique(self, key, value, error_message=None):
if (
User.query.filter_by(**{key: value}).first()
is not None
):
if not error_message:
error_message = f'{key} must be unique.'
raise ValueError(error_message)
return value
def correct_password(self, plaintext):
return check_password_hash(self.password, plaintext)
def get_id(self):
return self.uuid
def __repr__(self):
return self.username
@login_manager.user_loader
def load_user(user_uuid):
return User.query.filter_by(uuid=user_uuid).first()
@login_manager.request_loader
def load_user_from_request(request):
api_key = request.headers.get('x-api-key')
if api_key:
user = User.query.filter_by(api_key=api_key).first()
if user:
return user
return None | [
"flaskr_carved_rock.sqla.sqla.String",
"sqlalchemy.orm.validates",
"uuid.uuid4",
"werkzeug.security.generate_password_hash",
"flaskr_carved_rock.sqla.sqla.Column",
"werkzeug.security.check_password_hash"
] | [((307, 350), 'flaskr_carved_rock.sqla.sqla.Column', 'sqla.Column', (['sqla.Integer'], {'primary_key': '(True)'}), '(sqla.Integer, primary_key=True)\n', (318, 350), False, 'from flaskr_carved_rock.sqla import sqla\n'), ((452, 503), 'flaskr_carved_rock.sqla.sqla.Column', 'sqla.Column', (['sqla.Text'], {'nullable': '(False)', 'unique': '(True)'}), '(sqla.Text, nullable=False, unique=True)\n', (463, 503), False, 'from flaskr_carved_rock.sqla import sqla\n'), ((519, 557), 'flaskr_carved_rock.sqla.sqla.Column', 'sqla.Column', (['sqla.Text'], {'nullable': '(False)'}), '(sqla.Text, nullable=False)\n', (530, 557), False, 'from flaskr_carved_rock.sqla import sqla\n'), ((635, 668), 'sqlalchemy.orm.validates', 'validates', (['"""username"""', '"""password"""'], {}), "('username', 'password')\n", (644, 668), False, 'from sqlalchemy.orm import validates\n'), ((374, 389), 'flaskr_carved_rock.sqla.sqla.String', 'sqla.String', (['(64)'], {}), '(64)\n', (385, 389), False, 'from flaskr_carved_rock.sqla import sqla\n'), ((584, 599), 'flaskr_carved_rock.sqla.sqla.String', 'sqla.String', (['(64)'], {}), '(64)\n', (595, 599), False, 'from flaskr_carved_rock.sqla import sqla\n'), ((1413, 1458), 'werkzeug.security.check_password_hash', 'check_password_hash', (['self.password', 'plaintext'], {}), '(self.password, plaintext)\n', (1432, 1458), False, 'from werkzeug.security import check_password_hash, generate_password_hash\n'), ((968, 997), 'werkzeug.security.generate_password_hash', 'generate_password_hash', (['value'], {}), '(value)\n', (990, 997), False, 'from werkzeug.security import check_password_hash, generate_password_hash\n'), ((427, 434), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (432, 434), False, 'from uuid import uuid4\n')] |
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: ./bilibili.ipynb
from nbexp_personal import sendEmail
def itemgetter(*args):
g = operator.itemgetter(*args)
def f(*args2):
return dict(zip(args, g(*args2)))
return f
def write_json(filename, content):
with open(filename, 'w', encoding='UTF-8') as f:
json.dump( content,f, ensure_ascii=False, indent=4)
def read_json(filename):
with open(filename, 'r', encoding='UTF-8') as f:
return json.load( f)
# 复制为cCURL(posix)
def read_code(code_path):
with open(code_path, 'r', encoding='UTF-8') as f:
code = f.read().split('\n')[0]
return code
code =read_code('../bili_curl.txt')
import nbexp_uncurl
import requests
from functools import partial
def fetch_code(code):
"""
default timeout for five second
"""
c =nbexp_uncurl.parse(code, timeout=5)
r = eval(c)
j = r.json()
return j
import operator
import json
import datetime
def get_time(timestamp):
d = datetime.datetime.fromtimestamp(timestamp)
d = d.isoformat()
return d
def cvt_cards(j):
cards = j['data']['cards']
# card = cards[0]
# uname = card['desc']['user_profile']['info']['uname']
# card = card['card']
# print( desc)
# return
unames = list(map(lambda card:card['desc']['user_profile']['info']['uname'], cards))
cards = list(map(operator.itemgetter('card'), cards))
cards = list(map(json.loads, cards))
kl = ('title', 'desc', 'pic', 'stat', 'ctime')
cards = list(map(itemgetter(*kl), cards))
def cvt(tp):
card, uname = tp
content_id = str(card['stat']['aid'])
content = itemgetter(*kl[:-2])(card)
pic = content['pic'] + '@64w_36h_1c.jpg'
content['pic'] = pic
d = get_time(card['ctime'])
url = 'https://www.bilibili.com/video/av' + content_id
return (content_id, {'content': content , "url":url , 'time':d, 'uname':uname } )
cards = dict((map(cvt, zip(cards, unames))))
return cards
def get_cards():
fetch = partial(fetch_code, code)
cards = cvt_cards(fetch())
return cards
def render_div(v):
content = v['content']
desc = content['desc']
if len(desc) > 50:
desc = desc[:20]+'...'
body = f"""
<div style="margin:10px">
<img src='{content['pic']}'>
<a href='{v['url']}'>{content['title']}</a>
<span>{desc} {v['time']}</span>
</div>
"""
return body
def render_html(v_list):
divs = ''.join(map(render_div, v_list))
html = f"""\
<html>
<head></head>
<body>
{divs}
</body>
</html>
"""
return html
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Create the body of the message (a plain-text and an HTML version).
def render_msg(v_list, sub_name=""):
v_list = list(v_list)
html = render_html(v_list)
msg = MIMEMultipart('alternative')
msg['Subject'] = sub_name + '订阅' + '+' + str(len(v_list))
msg['From'] = sub_name
msg['To'] = ''
# Record the MIME types of both parts - text/plain and text/html.
part2 = MIMEText(html, 'html')
msg.attach(part2)
return msg.as_string()
def get_main(json_path, get_cards, sub_name=""):
"""
json_path where to read old cards and save merge content
"""
def main():
cards = get_cards()
wj = partial( write_json, json_path,)
rj = partial( read_json, json_path,)
if not exists(json_path):
# 发送所有
wj({})
old_cards = rj()
new_cards = filter(lambda tp:tp[0] not in old_cards, cards.items())
new_cards = map(operator.itemgetter(1), new_cards)
new_cards = list(new_cards)
if new_cards:
msg = render_msg(new_cards, sub_name)
sendEmail(msg)
old_cards.update(cards)
wj(old_cards)
return main
def block_on_观视频工作室(tp):
key, o = tp
if o['uname'] != '观视频工作室': return True
if '睡前消息' in o['content']['title']: return True
return False
def filter_get_cards():
cards = get_cards()
cards = list(filter(block_on_观视频工作室, cards.items()))
cards = dict(cards)
return cards
from os.path import exists
json_path = './bili.json'
main = get_main(json_path, filter_get_cards, "bili")
if __name__ == '__main__': main() | [
"os.path.exists",
"datetime.datetime.fromtimestamp",
"nbexp_uncurl.parse",
"nbexp_personal.sendEmail",
"email.mime.multipart.MIMEMultipart",
"functools.partial",
"json.load",
"operator.itemgetter",
"json.dump",
"email.mime.text.MIMEText"
] | [((254, 280), 'operator.itemgetter', 'operator.itemgetter', (['*args'], {}), '(*args)\n', (273, 280), False, 'import operator\n'), ((954, 989), 'nbexp_uncurl.parse', 'nbexp_uncurl.parse', (['code'], {'timeout': '(5)'}), '(code, timeout=5)\n', (972, 989), False, 'import nbexp_uncurl\n'), ((1114, 1156), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (1145, 1156), False, 'import datetime\n'), ((2173, 2198), 'functools.partial', 'partial', (['fetch_code', 'code'], {}), '(fetch_code, code)\n', (2180, 2198), False, 'from functools import partial\n'), ((3065, 3093), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', (['"""alternative"""'], {}), "('alternative')\n", (3078, 3093), False, 'from email.mime.multipart import MIMEMultipart\n'), ((3284, 3306), 'email.mime.text.MIMEText', 'MIMEText', (['html', '"""html"""'], {}), "(html, 'html')\n", (3292, 3306), False, 'from email.mime.text import MIMEText\n'), ((453, 504), 'json.dump', 'json.dump', (['content', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(content, f, ensure_ascii=False, indent=4)\n', (462, 504), False, 'import json\n'), ((598, 610), 'json.load', 'json.load', (['f'], {}), '(f)\n', (607, 610), False, 'import json\n'), ((3543, 3573), 'functools.partial', 'partial', (['write_json', 'json_path'], {}), '(write_json, json_path)\n', (3550, 3573), False, 'from functools import partial\n'), ((3589, 3618), 'functools.partial', 'partial', (['read_json', 'json_path'], {}), '(read_json, json_path)\n', (3596, 3618), False, 'from functools import partial\n'), ((1494, 1521), 'operator.itemgetter', 'operator.itemgetter', (['"""card"""'], {}), "('card')\n", (1513, 1521), False, 'import operator\n'), ((3637, 3654), 'os.path.exists', 'exists', (['json_path'], {}), '(json_path)\n', (3643, 3654), False, 'from os.path import exists\n'), ((3821, 3843), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (3840, 3843), False, 'import operator\n'), ((3976, 3990), 'nbexp_personal.sendEmail', 'sendEmail', (['msg'], {}), '(msg)\n', (3985, 3990), False, 'from nbexp_personal import sendEmail\n')] |
from pyspider.libs.base_handler import *
from my import My
import os
from bs4 import BeautifulSoup
'''广州'''
class Handler(My):
name = "GZ"
@every(minutes=24 * 60)
def on_start(self):
self.crawl('http://www.upo.gov.cn/WebApi/SzskgkApi.aspx?do=list&lb=004&area=all&page=1',
fetch_type='js', callback=self.plan_page,
age=1, save={'type':self.table_name[1], 'source':'GH'})
self.crawl('http://www.upo.gov.cn/WebApi/SzskgkApi.aspx?do=list&lb=005&area=all&page=1',
fetch_type='js', callback=self.plan_page,
age=1, save={'type':self.table_name[0], 'source':'GH'})
self.crawl('http://www.upo.gov.cn/WebApi/SzskgkApi.aspx?do=list&lb=006&area=all&page=1',
fetch_type='js', callback=self.plan_page,
age=1, save={'type':self.table_name[4], 'source':'GH'})
self.crawl('http://www.upo.gov.cn/WebApi/SzskgkApi.aspx?do=list&lb=007&area=all&page=1',
fetch_type='js', callback=self.plan_page,
age=1, save={'type':self.table_name[2], 'source':'GH'})
self.crawl('http://www.upo.gov.cn/WebApi/GsApi.aspx?do=phlist&lb=null&area=null&page=1',
fetch_type='js', callback=self.plan_page,
age=1, save={'type':self.table_name[7], 'source':'GH'})
self.crawl('http://www.upo.gov.cn/WebApi/GsApi.aspx?do=pclist&lb=null&area=null&page=1',
fetch_type='js', callback=self.plan_page,
age=1, save={'type':self.table_name[6], 'source':'GH'})
self.crawl('http://www.laho.gov.cn/ywpd/tdgl/zwxx/tdjyxx/cjgs/index.htm', age=1,
fetch_type='js', callback=self.land_page, headers={},
save={'type':self.table_name[14], 'source':'GT'},
js_script='''function(){return nAllCount}''')
self.crawl('http://gzcc2012.gzcc.gov.cn/zwgk/jgys.aspx',
save={'type':self.table_name[15], 'source':'JS', 'page':'1'}, age=1,
fetch_type='js', callback=self.build_page)
def build_page(self, response):
soup = BeautifulSoup(response.text, 'html.parser')
page_count = int(soup('a', 'a1')[-1]['href'].split(',')[1].split('\'')[1])
data = {}
data['__VIEWSTATE'] = soup('input', {'name':'__VIEWSTATE'})[0]['value']
data['__EVENTTARGET'] = 'ASNPager1'
data['__EVENTVALIDATION'] = soup('input', {'name':'__EVENTVALIDATION'})[0]['value']
data['__EVENTARGUMENT'] = response.save['page']
params = {}
params['page'] = response.save['page']
self.crawl(response.url, data=data, method='POST', age=1, params=params,
save=response.save, fetch_type='js', callback=self.content_page)
if response.save['page'] != str(page_count):
response.save['page'] = str(int(response.save['page']) + 1)
data['__EVENTARGUMENT'] = response.save['page']
self.crawl(response.url, data=data, method='POST', age=1,
save=response.save, fetch_type='js', callback=self.build_page)
def land_page(self, response):
soup = BeautifulSoup(response.text, 'html.parser')
page_count = int((int(response.js_script_result) + 14) / 15)
# print(page_count)
url = response.url
url = url[:url.rfind('.')] + '_%s' + url[url.rfind('.'):]
for i in range(1, page_count):
link = url % str(i)
self.crawl(link, callback=self.land_list_page, age=1, fetch_type='js',
headers={}, save=response.save)
lists = soup('dl', 'f_clear marginT10')[0].find_all('a')
for i in lists:
link = self.real_path(response.url, i['href'])
self.crawl(link, save=response.save, callback=self.content_page)
def land_list_page(self, response):
soup = BeautifulSoup(response.text, 'html.parser')
lists = soup('dl', 'f_clear marginT10')[0].find_all('a')
for i in lists:
link = self.real_path(response.url, i['href'])
self.crawl(link, save=response.save, callback=self.content_page)
def plan_page(self, response):
soup = BeautifulSoup(response.text)
json = soup.body.text
null = ''
true = 'true'
false = 'false'
response_json = eval(json)
json_list = response_json['list']
domain = 'http://www.upo.gov.cn'
content_list = [self.real_path(domain, i['Url']) for i in json_list]
page_count = response_json['pagecount']
page_count = int(page_count)
for each in content_list:
self.crawl(each, callback=self.content_page, save=response.save)
ajax_url = response.url[:-1]
for i in range(2, page_count + 1):
next_page = ajax_url + str(i)
self.crawl(next_page, callback=self.plan_list_page,
age=1, save=response.save)
def plan_list_page(self, response):
soup = BeautifulSoup(response.text)
# url = response.url
# params_str = url.split('?')[1]
# params = {}
# for i in params_str.split('&'):
# temp = i.split('=')
# params[temp[0]] = temp[1]
# page = int(params['page'])
# print(page)
json = soup.body.text
null = ''
true = 'true'
false = 'false'
response_json = eval(json)
json_list = response_json['list']
domain = 'http://www.upo.gov.cn'
content_list = [self.real_path(domain, i['Url']) for i in json_list]
for each in content_list:
self.crawl(each, callback=self.content_page, save=response.save) | [
"bs4.BeautifulSoup"
] | [((2053, 2096), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (2066, 2096), False, 'from bs4 import BeautifulSoup\n'), ((3090, 3133), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (3103, 3133), False, 'from bs4 import BeautifulSoup\n'), ((3809, 3852), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (3822, 3852), False, 'from bs4 import BeautifulSoup\n'), ((4129, 4157), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text'], {}), '(response.text)\n', (4142, 4157), False, 'from bs4 import BeautifulSoup\n'), ((4931, 4959), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text'], {}), '(response.text)\n', (4944, 4959), False, 'from bs4 import BeautifulSoup\n')] |
import numpy as np
from mandlebrot import mandelbrot
def test_mandelbrot_incorrect_test():
x = np.linspace(-1.5, -2.0, 10)
y = np.linspace(-1.25, 1.25, 10)
output = mandelbrot(x, y, 100, False)
assert np.all(output == 0.0) | [
"numpy.all",
"numpy.linspace",
"mandlebrot.mandelbrot"
] | [((100, 127), 'numpy.linspace', 'np.linspace', (['(-1.5)', '(-2.0)', '(10)'], {}), '(-1.5, -2.0, 10)\n', (111, 127), True, 'import numpy as np\n'), ((136, 164), 'numpy.linspace', 'np.linspace', (['(-1.25)', '(1.25)', '(10)'], {}), '(-1.25, 1.25, 10)\n', (147, 164), True, 'import numpy as np\n'), ((178, 206), 'mandlebrot.mandelbrot', 'mandelbrot', (['x', 'y', '(100)', '(False)'], {}), '(x, y, 100, False)\n', (188, 206), False, 'from mandlebrot import mandelbrot\n'), ((218, 239), 'numpy.all', 'np.all', (['(output == 0.0)'], {}), '(output == 0.0)\n', (224, 239), True, 'import numpy as np\n')] |
from django.conf.urls import patterns, url
from admins import views
urlpatterns = patterns(
'',
# Control panels
url(r'^admin/overview/$', views.overview, name='admin_overview'),
)
| [
"django.conf.urls.url"
] | [((128, 191), 'django.conf.urls.url', 'url', (['"""^admin/overview/$"""', 'views.overview'], {'name': '"""admin_overview"""'}), "('^admin/overview/$', views.overview, name='admin_overview')\n", (131, 191), False, 'from django.conf.urls import patterns, url\n')] |
def download_tle(outdir='./'):
"""Download the NuSTAR TLE archive.
Parameters
----------
outdir: Optional desired output location. Defaults to the working directory.
Returns
----------
Returns the filename that you've downloaded.
Notes
---------
"""
import os
import wget
# Make sure you've got a trailing slash...
if not(outdir.endswith('/')):
outdir+'/'
# Make sure the directory exists and create one if not.
directory = os.path.dirname(outdir)
if not os.path.exists(directory):
os.makedirs(directory)
myname='nustar_pysolar.io.download_tle'
url='http://www.srl.caltech.edu/NuSTAR_Public/NuSTAROperationSite/NuSTAR.tle'
# Check to see if the file exists:
fname = 'NuSTAR.tle'
outfile = outdir+'/'+fname
if (os.path.isfile(outfile)):
os.remove(outfile)
wget.download(url, out=outfile)
return outfile
def read_tle_file(tlefile, **kwargs):
"""Read in the TLE file.
Returns the times for each line element and the TLE time
"""
times = []
line1 = []
line2 = []
from datetime import datetime
# Catch if the file can't be opened:
try:
f = open(tlefile, 'r')
except FileNotFoundError:
print("Unable to open: "+tlefile)
ln=0
for line in f:
# print(line)
if (ln == 0):
year= int(line[18:20])
day = int(line[20:23])
times.extend([datetime.strptime("{}:{}".format(year, day), "%y:%j")])
line1.extend([line.strip()])
ln=1
else:
ln=0
line2.extend([line.strip()])
f.close()
return times, line1, line2
def get_epoch_tle(epoch, tlefile):
"""Find the TLE that is closest to the epoch you want to search.
epoch is a datetime object, tlefile is the file you want to search through.
"""
times, line1, line2 = read_tle_file(tlefile)
from astropy.time import Time
# Allow astropy Time objects
if type(epoch) is Time:
epoch = epoch.datetime
mindt = 100.
min_ind = 0
for ind, t in enumerate(times):
dt = abs((epoch -t).days)
if dt < mindt:
min_ind = ind
mindt = dt
good_line1 = line1[min_ind]
good_line2 = line2[min_ind]
return mindt, good_line1, good_line2
def parse_occ(file):
import pandas as pd
from datetime import datetime
'''Parse the occultation file that you generated usisng the orbit_model/occ script'''
df = pd.read_csv(file, delim_whitespace=True, header=None, skiprows=6,
names = ['ingress', 'ingress_ang', 'midpoint_eng', 'midpoint_ang',
'egress', 'egress_ang'])
df['visible'] = df['egress']
df['occulted'] = df['egress']
for ind in range(len(df)):
if ind == len(df) -1:
break
df.loc[ind,('visible')] = datetime.strptime(
df.loc[ind, ('egress')],
'%Y:%j:%H:%M:%S')
df.loc[ind,('occulted')] = datetime.strptime(
df.loc[ind+1, ('ingress')],
'%Y:%j:%H:%M:%S')
orbits = df.loc[0:len(df)-2, ('visible', 'occulted')]
return orbits
def parse_pa(file):
'''Parse the output PA string that you generated usisng the orbit_model/occ script
This should have exactly one line of the format:
Position angle: 43.608806 [deg]
'''
f = open(file)
for line in f:
fields = line.split()
mps_pa = float(fields[2])
# Remember that the mission planning PA is 180 degrees off from the SKY PA:
sky_pa = 180 - mps_pa
return sky_pa
| [
"wget.download",
"os.path.exists",
"pandas.read_csv",
"os.makedirs",
"datetime.datetime.strptime",
"os.path.isfile",
"os.path.dirname",
"os.remove"
] | [((534, 557), 'os.path.dirname', 'os.path.dirname', (['outdir'], {}), '(outdir)\n', (549, 557), False, 'import os\n'), ((867, 890), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (881, 890), False, 'import os\n'), ((929, 960), 'wget.download', 'wget.download', (['url'], {'out': 'outfile'}), '(url, out=outfile)\n', (942, 960), False, 'import wget\n'), ((2651, 2815), 'pandas.read_csv', 'pd.read_csv', (['file'], {'delim_whitespace': '(True)', 'header': 'None', 'skiprows': '(6)', 'names': "['ingress', 'ingress_ang', 'midpoint_eng', 'midpoint_ang', 'egress',\n 'egress_ang']"}), "(file, delim_whitespace=True, header=None, skiprows=6, names=[\n 'ingress', 'ingress_ang', 'midpoint_eng', 'midpoint_ang', 'egress',\n 'egress_ang'])\n", (2662, 2815), True, 'import pandas as pd\n'), ((569, 594), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (583, 594), False, 'import os\n'), ((604, 626), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (615, 626), False, 'import os\n'), ((901, 919), 'os.remove', 'os.remove', (['outfile'], {}), '(outfile)\n', (910, 919), False, 'import os\n'), ((3042, 3100), 'datetime.datetime.strptime', 'datetime.strptime', (["df.loc[ind, 'egress']", '"""%Y:%j:%H:%M:%S"""'], {}), "(df.loc[ind, 'egress'], '%Y:%j:%H:%M:%S')\n", (3059, 3100), False, 'from datetime import datetime\n'), ((3171, 3234), 'datetime.datetime.strptime', 'datetime.strptime', (["df.loc[ind + 1, 'ingress']", '"""%Y:%j:%H:%M:%S"""'], {}), "(df.loc[ind + 1, 'ingress'], '%Y:%j:%H:%M:%S')\n", (3188, 3234), False, 'from datetime import datetime\n')] |
import os
from pathlib import Path
from minder_utils.util.util import reformat_path
import yaml
p = Path(os.path.join(os.path.dirname(__file__), 'confidential'))
if not p.exists():
os.mkdir(reformat_path(p))
data_path = os.path.join(os.path.dirname(__file__), 'confidential', 'data_path.txt')
token_path = os.path.join(os.path.dirname(__file__), 'confidential', 'token_real.json')
dates_path = os.path.join(os.path.dirname(__file__), 'confidential', 'dates.json')
delta_path = os.path.join(os.path.dirname(__file__), 'confidential', 'delta.txt')
tihm_data_path = os.path.join(os.path.dirname(__file__), 'confidential', 'tihm_data_path.txt')
| [
"os.path.dirname",
"minder_utils.util.util.reformat_path"
] | [((239, 264), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (254, 264), False, 'import os\n'), ((325, 350), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (340, 350), False, 'import os\n'), ((413, 438), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (428, 438), False, 'import os\n'), ((496, 521), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (511, 521), False, 'import os\n'), ((582, 607), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (597, 607), False, 'import os\n'), ((119, 144), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (134, 144), False, 'import os\n'), ((195, 211), 'minder_utils.util.util.reformat_path', 'reformat_path', (['p'], {}), '(p)\n', (208, 211), False, 'from minder_utils.util.util import reformat_path\n')] |
import logging
from collections import Counter, namedtuple
from datetime import date
from typing import Optional, Tuple, Set
from core.logic.debug import log_memory
from logs.logic.validation import clean_and_validate_issn, ValidationError, normalize_isbn
from logs.models import ImportBatch
from nigiri.counter5 import CounterRecord
from organizations.models import Organization
from publications.models import Title, Platform, PlatformTitle
from ..models import ReportType, Metric, DimensionText, AccessLog
logger = logging.getLogger(__name__)
def get_or_create_with_map(model, mapping, attr_name, attr_value, other_attrs=None):
if attr_value not in mapping:
data = {attr_name: attr_value}
if other_attrs:
data.update(other_attrs)
obj = model.objects.create(**data)
mapping[attr_value] = obj
return obj
else:
return mapping[attr_value]
TitleRec = namedtuple('TitleRec', ('name', 'pub_type', 'issn', 'eissn', 'isbn', 'doi'))
class TitleManager(object):
def __init__(self):
self.key_to_title_id_and_pub_type = {}
self.stats = Counter()
def prefetch_titles(self, records: [TitleRec]):
title_qs = Title.objects.all()
for attr_name in ('issn', 'eissn', 'isbn', 'doi', 'name'):
attr_values = {getattr(rec, attr_name) for rec in records}
title_qs = title_qs.filter(**{attr_name + '__in': attr_values})
self.key_to_title_id_and_pub_type = {
tuple(t[:5]): tuple(t[5:])
for t in title_qs.order_by().values_list(
'name', 'isbn', 'issn', 'eissn', 'doi', 'pk', 'pub_type'
)
}
logger.debug('Prefetched %d records', len(self.key_to_title_id_and_pub_type))
@classmethod
def normalize_title_rec(cls, record: TitleRec) -> TitleRec:
"""
Normalize specific fields in the record and return a new TitleRec with normalized data.
Should be run before one attempts to ingest the data into the database.
"""
# normalize issn, eissn and isbn - they are sometimes malformed by whitespace in the data
issn = record.issn
if issn:
issn = clean_and_validate_issn(issn, raise_error=False)
eissn = record.eissn
if eissn:
eissn = clean_and_validate_issn(eissn, raise_error=False)
isbn = normalize_isbn(record.isbn) if record.isbn else record.isbn
return TitleRec(
name=record.name,
isbn=isbn,
issn=issn,
eissn=eissn,
doi=record.doi,
pub_type=record.pub_type,
)
def get_or_create(self, record: TitleRec) -> Optional[int]:
if not record.name:
logger.warning(
'Record is missing or has empty title: ' 'ISBN: %s, ISSN: %s, eISSN: %s, DOI: %s',
record.isbn,
record.issn,
record.eissn,
record.doi,
)
return None
key = (record.name, record.isbn, record.issn, record.eissn, record.doi)
if key in self.key_to_title_id_and_pub_type:
title_pk, db_pub_type = self.key_to_title_id_and_pub_type[key]
# check if we need to improve the pub_type from UNKNOWN to something better
if db_pub_type == Title.PUB_TYPE_UNKNOWN and record.pub_type != Title.PUB_TYPE_UNKNOWN:
logger.info('Upgrading publication type from unknown to "%s"', record.pub_type)
Title.objects.filter(pk=title_pk).update(pub_type=record.pub_type)
self.stats['update'] += 1
else:
self.stats['existing'] += 1
return title_pk
title = Title.objects.create(
name=record.name,
pub_type=record.pub_type,
isbn=record.isbn,
issn=record.issn,
eissn=record.eissn,
doi=record.doi,
)
self.key_to_title_id_and_pub_type[key] = (title.pk, record.pub_type)
self.stats['created'] += 1
return title.pk
def counter_record_to_title_rec(self, record: CounterRecord) -> TitleRec:
title = record.title
isbn = None
issn = None
eissn = None
doi = None
for key, value in record.title_ids.items():
if key == 'DOI':
doi = value
elif key == 'Online_ISSN':
eissn = clean_and_validate_issn(value, raise_error=False) if value else value
elif key == 'Print_ISSN':
issn = clean_and_validate_issn(value, raise_error=False) if value else value
elif key == 'ISBN':
isbn = normalize_isbn(value) if value else value
pub_type = self.deduce_pub_type(eissn, isbn, issn, record)
# convert None values for the following attrs to empty strings
isbn = '' if isbn is None else isbn
issn = '' if issn is None else issn
eissn = '' if eissn is None else eissn
doi = '' if doi is None else doi
return TitleRec(name=title, pub_type=pub_type, isbn=isbn, issn=issn, eissn=eissn, doi=doi)
def deduce_pub_type(self, eissn, isbn, issn, record):
pub_type = Title.PUB_TYPE_UNKNOWN
if 'Data_Type' in record.dimension_data:
data_type = record.dimension_data['Data_Type']
pub_type = Title.data_type_to_pub_type(data_type)
if pub_type == Title.PUB_TYPE_UNKNOWN:
# we try harder - based on isbn, issn, etc.
if (issn is not None or eissn is not None) and isbn is None:
pub_type = Title.PUB_TYPE_JOURNAL
elif isbn is not None and issn is None:
pub_type = Title.PUB_TYPE_BOOK
return pub_type
def get_or_create_from_counter_record(self, record: CounterRecord) -> int:
title_rec = self.counter_record_to_title_rec(record)
return self.get_or_create(title_rec)
def import_counter_records(
report_type: ReportType,
organization: Organization,
platform: Platform,
records: [CounterRecord],
import_batch: ImportBatch,
) -> Counter:
stats = Counter()
# prepare all remaps
metrics = {metric.short_name: metric for metric in Metric.objects.all()}
text_to_int_remaps = {}
log_memory('X-2')
for dim_text in DimensionText.objects.all():
if dim_text.dimension_id not in text_to_int_remaps:
text_to_int_remaps[dim_text.dimension_id] = {}
text_to_int_remaps[dim_text.dimension_id][dim_text.text] = dim_text
log_memory('X-1.5')
tm = TitleManager()
title_recs = [tm.counter_record_to_title_rec(rec) for rec in records]
tm.prefetch_titles(title_recs)
# prepare raw data to be inserted into the database
dimensions = report_type.dimensions_sorted
to_insert = {}
seen_dates = set()
log_memory('X-1')
for title_rec, record in zip(title_recs, records): # type: TitleRec, CounterRecord
# attributes that define the identity of the log
title_id = tm.get_or_create(title_rec)
if title_id is None:
# the title could not be found or created (probably missing required field like title)
stats['warn missing title'] += 1
if type(record.metric) is int:
# we can pass a specific metric by numeric ID
metric_id = record.metric
else:
metric_id = get_or_create_with_map(Metric, metrics, 'short_name', record.metric).pk
start = record.start if not isinstance(record.start, date) else record.start.isoformat()
id_attrs = {
'report_type_id': report_type.pk,
'metric_id': metric_id,
'organization_id': organization.pk,
'platform_id': platform.pk,
'target_id': title_id,
'date': start,
}
for i, dim in enumerate(dimensions):
dim_value = record.dimension_data.get(dim.short_name)
if dim.type != dim.TYPE_INT:
if dim_value is not None:
remap = text_to_int_remaps.get(dim.pk)
if not remap:
remap = {}
text_to_int_remaps[dim.pk] = remap
dim_text_obj = get_or_create_with_map(
DimensionText,
remap,
'text',
dim_value,
other_attrs={'dimension_id': dim.pk},
)
dim_value = dim_text_obj.pk
else:
dim_value = int(dim_value) if dim_value is not None else None
id_attrs[f'dim{i+1}'] = dim_value
key = tuple(sorted(id_attrs.items()))
if key in to_insert:
to_insert[key] += record.value
else:
to_insert[key] = record.value
seen_dates.add(record.start)
logger.info('Title statistics: %s', tm.stats)
# compare the prepared data with current database content
# get the candidates
log_memory('XX')
to_check = AccessLog.objects.filter(
organization=organization,
platform=platform,
report_type=report_type,
date__lte=max(seen_dates),
date__gte=min(seen_dates),
)
to_compare = {}
for al_rec in to_check.values(
'pk',
'organization_id',
'platform_id',
'report_type_id',
'date',
'value',
'target_id',
'metric_id',
*[f'dim{i+1}' for i, d in enumerate(dimensions)],
):
pk = al_rec.pop('pk')
value = al_rec.pop('value')
al_rec['date'] = al_rec['date'].isoformat()
key = tuple(sorted(al_rec.items()))
to_compare[key] = (pk, value)
# make the comparison
log_memory('XX2')
als_to_insert = []
target_date_tuples = set()
max_batch_size = 100_000
for key, value in to_insert.items():
db_pk, db_value = to_compare.get(key, (None, None))
if db_pk:
if value != db_value:
logger.warning(f'Clashing values between import and db: {db_value} x {value}')
stats['updated logs'] += 1
else:
logger.info('Record already present with the same value from other import')
stats['skipped logs'] += 1
else:
rec = dict(key)
rec['value'] = value
als_to_insert.append(AccessLog(import_batch=import_batch, **rec))
if rec['target_id'] is not None:
target_date_tuples.add((rec['target_id'], rec['date']))
if len(als_to_insert) >= max_batch_size:
log_memory('Batch create')
AccessLog.objects.bulk_create(als_to_insert)
stats['new logs'] += len(als_to_insert)
als_to_insert = []
# now insert the records that are clean to be inserted
log_memory('XX3')
AccessLog.objects.bulk_create(als_to_insert)
stats['new logs'] += len(als_to_insert)
log_memory('XX4')
# and insert the PlatformTitle links
stats.update(create_platformtitle_links(organization, platform, target_date_tuples))
log_memory('XX5')
return stats
def create_platformtitle_links(organization, platform, target_date_tuples: Set[Tuple]):
"""
Takes list of dicts that are used to create AccessLogs in `import_counter_records`
and creates the explicit PlatformTitle objects from the data
"""
existing = {
(pt.title_id, pt.date.isoformat())
for pt in PlatformTitle.objects.filter(organization=organization, platform=platform)
}
pts = []
before_count = PlatformTitle.objects.count()
for title_id, rec_date in target_date_tuples - existing:
pts.append(
PlatformTitle(
organization=organization, platform=platform, title_id=title_id, date=rec_date
)
)
PlatformTitle.objects.bulk_create(pts, ignore_conflicts=True)
after_count = PlatformTitle.objects.count()
return {'new platformtitles': after_count - before_count}
def create_platformtitle_links_from_accesslogs(accesslogs: [AccessLog]) -> [PlatformTitle]:
"""
Creates all the required platformtitle objects from a list of accesslogs
:param accesslogs:
:return:
"""
data = {(al.organization_id, al.platform_id, al.target_id, al.date) for al in accesslogs}
possible_clashing = {
(pt.organization_id, pt.platform_id, pt.target_id, pt.date)
for pt in PlatformTitle.objects.filter(
organization_id__in={rec[0] for rec in data},
platform_id__in={rec[1] for rec in data},
title_id__in={rec[2] for rec in data},
date__in={rec[3] for rec in data},
)
}
to_create = [
PlatformTitle(organization_id=rec[0], platform_id=rec[1], title_id=rec[2], date=rec[3])
for rec in (data - possible_clashing)
]
return PlatformTitle.objects.bulk_create(to_create, ignore_conflicts=True)
| [
"logging.getLogger",
"logs.logic.validation.normalize_isbn",
"publications.models.Title.data_type_to_pub_type",
"collections.namedtuple",
"publications.models.PlatformTitle.objects.count",
"core.logic.debug.log_memory",
"logs.logic.validation.clean_and_validate_issn",
"collections.Counter",
"publica... | [((520, 547), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (537, 547), False, 'import logging\n'), ((923, 999), 'collections.namedtuple', 'namedtuple', (['"""TitleRec"""', "('name', 'pub_type', 'issn', 'eissn', 'isbn', 'doi')"], {}), "('TitleRec', ('name', 'pub_type', 'issn', 'eissn', 'isbn', 'doi'))\n", (933, 999), False, 'from collections import Counter, namedtuple\n'), ((6178, 6187), 'collections.Counter', 'Counter', ([], {}), '()\n', (6185, 6187), False, 'from collections import Counter, namedtuple\n'), ((6322, 6339), 'core.logic.debug.log_memory', 'log_memory', (['"""X-2"""'], {}), "('X-2')\n", (6332, 6339), False, 'from core.logic.debug import log_memory\n'), ((6588, 6607), 'core.logic.debug.log_memory', 'log_memory', (['"""X-1.5"""'], {}), "('X-1.5')\n", (6598, 6607), False, 'from core.logic.debug import log_memory\n'), ((6890, 6907), 'core.logic.debug.log_memory', 'log_memory', (['"""X-1"""'], {}), "('X-1')\n", (6900, 6907), False, 'from core.logic.debug import log_memory\n'), ((9081, 9097), 'core.logic.debug.log_memory', 'log_memory', (['"""XX"""'], {}), "('XX')\n", (9091, 9097), False, 'from core.logic.debug import log_memory\n'), ((9825, 9842), 'core.logic.debug.log_memory', 'log_memory', (['"""XX2"""'], {}), "('XX2')\n", (9835, 9842), False, 'from core.logic.debug import log_memory\n'), ((10931, 10948), 'core.logic.debug.log_memory', 'log_memory', (['"""XX3"""'], {}), "('XX3')\n", (10941, 10948), False, 'from core.logic.debug import log_memory\n'), ((11046, 11063), 'core.logic.debug.log_memory', 'log_memory', (['"""XX4"""'], {}), "('XX4')\n", (11056, 11063), False, 'from core.logic.debug import log_memory\n'), ((11198, 11215), 'core.logic.debug.log_memory', 'log_memory', (['"""XX5"""'], {}), "('XX5')\n", (11208, 11215), False, 'from core.logic.debug import log_memory\n'), ((11682, 11711), 'publications.models.PlatformTitle.objects.count', 'PlatformTitle.objects.count', ([], {}), '()\n', (11709, 11711), False, 'from publications.models import Title, Platform, PlatformTitle\n'), ((11943, 12004), 'publications.models.PlatformTitle.objects.bulk_create', 'PlatformTitle.objects.bulk_create', (['pts'], {'ignore_conflicts': '(True)'}), '(pts, ignore_conflicts=True)\n', (11976, 12004), False, 'from publications.models import Title, Platform, PlatformTitle\n'), ((12023, 12052), 'publications.models.PlatformTitle.objects.count', 'PlatformTitle.objects.count', ([], {}), '()\n', (12050, 12052), False, 'from publications.models import Title, Platform, PlatformTitle\n'), ((12977, 13044), 'publications.models.PlatformTitle.objects.bulk_create', 'PlatformTitle.objects.bulk_create', (['to_create'], {'ignore_conflicts': '(True)'}), '(to_create, ignore_conflicts=True)\n', (13010, 13044), False, 'from publications.models import Title, Platform, PlatformTitle\n'), ((1122, 1131), 'collections.Counter', 'Counter', ([], {}), '()\n', (1129, 1131), False, 'from collections import Counter, namedtuple\n'), ((1204, 1223), 'publications.models.Title.objects.all', 'Title.objects.all', ([], {}), '()\n', (1221, 1223), False, 'from publications.models import Title, Platform, PlatformTitle\n'), ((3743, 3884), 'publications.models.Title.objects.create', 'Title.objects.create', ([], {'name': 'record.name', 'pub_type': 'record.pub_type', 'isbn': 'record.isbn', 'issn': 'record.issn', 'eissn': 'record.eissn', 'doi': 'record.doi'}), '(name=record.name, pub_type=record.pub_type, isbn=\n record.isbn, issn=record.issn, eissn=record.eissn, doi=record.doi)\n', (3763, 3884), False, 'from publications.models import Title, Platform, PlatformTitle\n'), ((12826, 12917), 'publications.models.PlatformTitle', 'PlatformTitle', ([], {'organization_id': 'rec[0]', 'platform_id': 'rec[1]', 'title_id': 'rec[2]', 'date': 'rec[3]'}), '(organization_id=rec[0], platform_id=rec[1], title_id=rec[2],\n date=rec[3])\n', (12839, 12917), False, 'from publications.models import Title, Platform, PlatformTitle\n'), ((2203, 2251), 'logs.logic.validation.clean_and_validate_issn', 'clean_and_validate_issn', (['issn'], {'raise_error': '(False)'}), '(issn, raise_error=False)\n', (2226, 2251), False, 'from logs.logic.validation import clean_and_validate_issn, ValidationError, normalize_isbn\n'), ((2319, 2368), 'logs.logic.validation.clean_and_validate_issn', 'clean_and_validate_issn', (['eissn'], {'raise_error': '(False)'}), '(eissn, raise_error=False)\n', (2342, 2368), False, 'from logs.logic.validation import clean_and_validate_issn, ValidationError, normalize_isbn\n'), ((2384, 2411), 'logs.logic.validation.normalize_isbn', 'normalize_isbn', (['record.isbn'], {}), '(record.isbn)\n', (2398, 2411), False, 'from logs.logic.validation import clean_and_validate_issn, ValidationError, normalize_isbn\n'), ((5402, 5440), 'publications.models.Title.data_type_to_pub_type', 'Title.data_type_to_pub_type', (['data_type'], {}), '(data_type)\n', (5429, 5440), False, 'from publications.models import Title, Platform, PlatformTitle\n'), ((10701, 10727), 'core.logic.debug.log_memory', 'log_memory', (['"""Batch create"""'], {}), "('Batch create')\n", (10711, 10727), False, 'from core.logic.debug import log_memory\n'), ((11569, 11643), 'publications.models.PlatformTitle.objects.filter', 'PlatformTitle.objects.filter', ([], {'organization': 'organization', 'platform': 'platform'}), '(organization=organization, platform=platform)\n', (11597, 11643), False, 'from publications.models import Title, Platform, PlatformTitle\n'), ((11805, 11903), 'publications.models.PlatformTitle', 'PlatformTitle', ([], {'organization': 'organization', 'platform': 'platform', 'title_id': 'title_id', 'date': 'rec_date'}), '(organization=organization, platform=platform, title_id=\n title_id, date=rec_date)\n', (11818, 11903), False, 'from publications.models import Title, Platform, PlatformTitle\n'), ((12544, 12742), 'publications.models.PlatformTitle.objects.filter', 'PlatformTitle.objects.filter', ([], {'organization_id__in': '{rec[0] for rec in data}', 'platform_id__in': '{rec[1] for rec in data}', 'title_id__in': '{rec[2] for rec in data}', 'date__in': '{rec[3] for rec in data}'}), '(organization_id__in={rec[0] for rec in data},\n platform_id__in={rec[1] for rec in data}, title_id__in={rec[2] for rec in\n data}, date__in={rec[3] for rec in data})\n', (12572, 12742), False, 'from publications.models import Title, Platform, PlatformTitle\n'), ((3528, 3561), 'publications.models.Title.objects.filter', 'Title.objects.filter', ([], {'pk': 'title_pk'}), '(pk=title_pk)\n', (3548, 3561), False, 'from publications.models import Title, Platform, PlatformTitle\n'), ((4459, 4508), 'logs.logic.validation.clean_and_validate_issn', 'clean_and_validate_issn', (['value'], {'raise_error': '(False)'}), '(value, raise_error=False)\n', (4482, 4508), False, 'from logs.logic.validation import clean_and_validate_issn, ValidationError, normalize_isbn\n'), ((4590, 4639), 'logs.logic.validation.clean_and_validate_issn', 'clean_and_validate_issn', (['value'], {'raise_error': '(False)'}), '(value, raise_error=False)\n', (4613, 4639), False, 'from logs.logic.validation import clean_and_validate_issn, ValidationError, normalize_isbn\n'), ((4715, 4736), 'logs.logic.validation.normalize_isbn', 'normalize_isbn', (['value'], {}), '(value)\n', (4729, 4736), False, 'from logs.logic.validation import clean_and_validate_issn, ValidationError, normalize_isbn\n')] |
import datetime
import feedparser
import json
import os
import shutil
import sys
import time
from .common import p, FEEDS_FILE_NAME
from .config import TIMEZONE
def do(target_category=None, log=False):
def getFeedFromRSS(category, urls, show_author=False, log=False):
rslt = {}
for source, url in urls.items():
try:
if log:
sys.stdout.write(f"- {url}")
d = feedparser.parse(url)
if log:
sys.stdout.write(" - Done\n")
except:
sys.exit(" - Failed\n" if log else 0)
for feed in d.entries:
try:
at = datetime.datetime(*feed.published_parsed[:6]).replace(tzinfo=datetime.timezone.utc).astimezone(TIMEZONE)
except:
continue
pubDate = at.strftime("%H:%M" if at.date() == datetime.date.today() else "%b %d, %H:%M")
ts = int(time.mktime(feed.published_parsed))
entries = {
"id": ts,
"sourceName": source if not show_author else feed.author,
"pubDate": pubDate,
"timestamp": ts,
"url": feed.link,
"title": feed.title,
}
rslt[entries["id"]] = entries
rslt = [val for key, val in sorted(rslt.items(), reverse=True)]
rslt = {"entries": rslt, "created_at": int(time.time())}
with open(os.path.join(p["path_data"], f"rss_{category}.json"), "w", encoding="utf-8") as f:
f.write(json.dumps(rslt, ensure_ascii=False))
return rslt
if not os.path.isfile(FEEDS_FILE_NAME):
shutil.copyfile(os.path.join(os.path.dirname(os.path.abspath(__file__)), "feeds.json"), FEEDS_FILE_NAME)
with open(FEEDS_FILE_NAME, "r") as fp:
RSS = json.load(fp)
if target_category:
return getFeedFromRSS(target_category, RSS[target_category]["feeds"], show_author=RSS[target_category].get("show_author", False), log=log)
for category, d in RSS.items():
getFeedFromRSS(category, d["feeds"], show_author=d.get("show_author", False), log=log)
if __name__ == "__main__":
do()
| [
"datetime.datetime",
"feedparser.parse",
"time.mktime",
"json.dumps",
"os.path.join",
"os.path.abspath",
"os.path.isfile",
"sys.exit",
"json.load",
"datetime.date.today",
"time.time",
"sys.stdout.write"
] | [((1718, 1749), 'os.path.isfile', 'os.path.isfile', (['FEEDS_FILE_NAME'], {}), '(FEEDS_FILE_NAME)\n', (1732, 1749), False, 'import os\n'), ((1922, 1935), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (1931, 1935), False, 'import json\n'), ((447, 468), 'feedparser.parse', 'feedparser.parse', (['url'], {}), '(url)\n', (463, 468), False, 'import feedparser\n'), ((1511, 1522), 'time.time', 'time.time', ([], {}), '()\n', (1520, 1522), False, 'import time\n'), ((1544, 1596), 'os.path.join', 'os.path.join', (["p['path_data']", 'f"""rss_{category}.json"""'], {}), "(p['path_data'], f'rss_{category}.json')\n", (1556, 1596), False, 'import os\n'), ((1647, 1683), 'json.dumps', 'json.dumps', (['rslt'], {'ensure_ascii': '(False)'}), '(rslt, ensure_ascii=False)\n', (1657, 1683), False, 'import json\n'), ((397, 425), 'sys.stdout.write', 'sys.stdout.write', (['f"""- {url}"""'], {}), "(f'- {url}')\n", (413, 425), False, 'import sys\n'), ((514, 543), 'sys.stdout.write', 'sys.stdout.write', (['""" - Done\n"""'], {}), "(' - Done\\n')\n", (530, 543), False, 'import sys\n'), ((581, 618), 'sys.exit', 'sys.exit', (["(' - Failed\\n' if log else 0)"], {}), "(' - Failed\\n' if log else 0)\n", (589, 618), False, 'import sys\n'), ((992, 1026), 'time.mktime', 'time.mktime', (['feed.published_parsed'], {}), '(feed.published_parsed)\n', (1003, 1026), False, 'import time\n'), ((1804, 1829), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1819, 1829), False, 'import os\n'), ((923, 944), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (942, 944), False, 'import datetime\n'), ((702, 747), 'datetime.datetime', 'datetime.datetime', (['*feed.published_parsed[:6]'], {}), '(*feed.published_parsed[:6])\n', (719, 747), False, 'import datetime\n')] |
import math
import numpy as np
"""
This function calculates the roots of the quadratic inequality for the Rh reuse factor.
Parameters:
lx - list of input sizes of the lstms. The size of this list is equal to the number of layers.
lh - list of input sizes of the hidden layers. The size of this list is equal to the number of layers.
lt_sigma - the latency of the sigmoid/tanh functions.
lt_tail - the latency of the tail.
dsp_total - the total number of dsps
This returns the roots of the quadratic inequality.
"""
def reuse_factor(lx, lh, lt_sigma, lt_tail, dsp_total):
a = dsp_total - 4 * sum(lh)
b = dsp_total * (lt_sigma + lt_tail) - 4 * np.dot(lx, lh) - 4 * np.dot(lh, lh) - 4 * (lt_sigma + lt_tail) * sum(lh)
c = - 4 * (lt_sigma + lt_tail) * np.dot(lh, lh)
# print(a)
# print(b)
# print(c)
r_1 = (-b + math.sqrt(b**2 - 4*a*c)) / (2*a)
r_2 = (-b - math.sqrt(b**2 - 4*a*c)) / (2*a)
return r_1, r_2
print("ZYNQ")
print(reuse_factor([1,9],[9,9], 3,8,220))
print("lstm_ae_small exmaple")
print(reuse_factor([1,9],[9,9], 3,8,900))
print("\n")
print("KU115")
print("mnist 1/2 layers examples")
print(reuse_factor([28],[32], 3,8,5520))
print(reuse_factor([28,16],[16,16], 3,8,5520))
print("\n")
print("U250")
print("lstm_ae exmaple")
print(reuse_factor([1,32,8,8],[32,8,8,32], 3,8,12200))
| [
"numpy.dot",
"math.sqrt"
] | [((815, 829), 'numpy.dot', 'np.dot', (['lh', 'lh'], {}), '(lh, lh)\n', (821, 829), True, 'import numpy as np\n'), ((889, 918), 'math.sqrt', 'math.sqrt', (['(b ** 2 - 4 * a * c)'], {}), '(b ** 2 - 4 * a * c)\n', (898, 918), False, 'import math\n'), ((938, 967), 'math.sqrt', 'math.sqrt', (['(b ** 2 - 4 * a * c)'], {}), '(b ** 2 - 4 * a * c)\n', (947, 967), False, 'import math\n'), ((726, 740), 'numpy.dot', 'np.dot', (['lh', 'lh'], {}), '(lh, lh)\n', (732, 740), True, 'import numpy as np\n'), ((705, 719), 'numpy.dot', 'np.dot', (['lx', 'lh'], {}), '(lx, lh)\n', (711, 719), True, 'import numpy as np\n')] |
try:
from kfp.components import InputPath
from kfp.components import OutputPath
except ImportError:
def InputPath(c):
return c
def OutputPath(c):
return c
metrics = "Metrics"
def test(
prepro_dir: InputPath(str),
prev_model_dir: InputPath(str),
sent_size_th,
ques_size_th,
num_epochs,
num_steps,
eval_period,
save_period,
learning_rate,
batch_size,
hidden_size,
var_decay,
training_mode,
device,
device_type,
num_gpus,
mlpipeline_metrics_path: OutputPath(metrics),
model_dir: OutputPath(str),
):
import os
import shutil
import tensorflow as tf
src = prev_model_dir + "/out/squad"
dst = model_dir + "/out/squad"
shutil.copytree(src, dst)
model_name = "basic" if training_mode == "span" else "basic-class"
data_dir = (
prepro_dir + "/squad"
if training_mode == "span"
else prepro_dir + "/squad-class"
)
output_dir = model_dir + "/out/squad"
flags = tf.app.flags
# Names and directories
flags.DEFINE_string("model_name", model_name, "Model name [basic | basic-class]")
flags.DEFINE_string("data_dir", data_dir, "Data dir [data/squad]")
flags.DEFINE_string("run_id", "0", "Run ID [0]")
flags.DEFINE_string("out_base_dir", output_dir, "out base dir [out]")
flags.DEFINE_string("forward_name", "single", "Forward name [single]")
flags.DEFINE_string("answer_path", "", "Answer path []")
flags.DEFINE_string("eval_path", "", "Eval path []")
flags.DEFINE_string("load_path", "", "Load path []")
flags.DEFINE_string("shared_path", "", "Shared path []")
# Device placement
flags.DEFINE_string(
"device", device, "default device for summing gradients. [/cpu:0]"
)
flags.DEFINE_string(
"device_type",
device_type,
"device for computing gradients (parallelization). cpu | gpu [gpu]",
)
flags.DEFINE_integer(
"num_gpus", int(num_gpus), "num of gpus or cpus for computing gradients [1]"
)
# Essential training and test options
flags.DEFINE_string("mode", "test", "train | test | forward [test]")
flags.DEFINE_boolean("load", True, "load saved data? [True]")
flags.DEFINE_bool("single", False, "supervise only the answer sentence? [False]")
flags.DEFINE_boolean("debug", False, "Debugging mode? [False]")
flags.DEFINE_bool(
"load_ema", True, "load exponential average of variables when testing? [True]"
)
flags.DEFINE_bool("eval", True, "eval? [True]")
flags.DEFINE_bool("train_only_output", False, "Train only output module?")
flags.DEFINE_bool("load_trained_model", False, "Load SQUAD trained model")
flags.DEFINE_bool("freeze_phrase_layer", False, "Freeze phrase layer")
flags.DEFINE_bool("freeze_att_layer", False, "Freeze att layer")
flags.DEFINE_bool(
"freeze_span_modelling_layer", False, "Freeze modelling layer for span"
)
flags.DEFINE_bool("using_shared", False, "using pre-created shared.json")
flags.DEFINE_bool("load_shared", False, "load shared.json for each batch")
flags.DEFINE_string("dev_name", "test", "using dev or test?")
flags.DEFINE_string("test_name", "dev", "using test or dev?")
# Training / test parameters
flags.DEFINE_integer("batch_size", int(batch_size), "Batch size [60]")
flags.DEFINE_integer("val_num_batches", 100, "validation num batches [100]")
flags.DEFINE_integer("test_num_batches", 0, "test num batches [0]")
flags.DEFINE_integer(
"num_epochs", int(num_epochs), "Total number of epochs for training [12]"
)
flags.DEFINE_integer("num_steps", int(num_steps), "Number of steps [20000]")
flags.DEFINE_integer("load_step", 0, "load step [0]")
flags.DEFINE_float("init_lr", float(learning_rate), "Initial learning rate [0.5]")
flags.DEFINE_float(
"input_keep_prob", 0.8, "Input keep prob for the dropout of LSTM weights [0.8]"
)
flags.DEFINE_float(
"keep_prob", 0.8, "Keep prob for the dropout of Char-CNN weights [0.8]"
)
flags.DEFINE_float("wd", 0.0, "L2 weight decay for regularization [0.0]")
flags.DEFINE_integer("hidden_size", int(hidden_size), "Hidden size [100]")
flags.DEFINE_integer("char_out_size", 100, "char-level word embedding size [100]")
flags.DEFINE_integer("char_emb_size", 8, "Char emb size [8]")
flags.DEFINE_string(
"out_channel_dims",
"100",
"Out channel dims of Char-CNN, separated by commas [100]",
)
flags.DEFINE_string(
"filter_heights", "5", "Filter heights of Char-CNN, separated by commas [5]"
)
flags.DEFINE_bool("finetune", False, "Finetune word embeddings? [False]")
flags.DEFINE_bool("highway", True, "Use highway? [True]")
flags.DEFINE_integer("highway_num_layers", 2, "highway num layers [2]")
flags.DEFINE_bool("share_cnn_weights", True, "Share Char-CNN weights [True]")
flags.DEFINE_bool(
"share_lstm_weights",
True,
"Share pre-processing (phrase-level) LSTM weights [True]",
)
flags.DEFINE_float(
"var_decay",
float(var_decay),
"Exponential moving average decay for variables [0.999]",
)
flags.DEFINE_string("classifier", "maxpool", "[maxpool, sumpool, default]")
# Optimizations
flags.DEFINE_bool("cluster", True, "Cluster data for faster training [False]")
flags.DEFINE_bool("len_opt", True, "Length optimization? [False]")
flags.DEFINE_bool(
"cpu_opt", False, "CPU optimization? GPU computation can be slower [False]"
)
# Logging and saving options
flags.DEFINE_boolean("progress", True, "Show progress? [True]")
flags.DEFINE_integer("log_period", 100, "Log period [100]")
flags.DEFINE_integer("eval_period", int(eval_period), "Eval period [1000]")
flags.DEFINE_integer("save_period", int(save_period), "Save Period [1000]")
flags.DEFINE_integer("max_to_keep", 20, "Max recent saves to keep [20]")
flags.DEFINE_bool("dump_eval", True, "dump eval? [True]")
flags.DEFINE_bool("dump_answer", False, "dump answer? [True]")
flags.DEFINE_bool("vis", False, "output visualization numbers? [False]")
flags.DEFINE_bool("dump_pickle", True, "Dump pickle instead of json? [True]")
flags.DEFINE_float(
"decay", 0.9, "Exponential moving average decay for logging values [0.9]"
)
# Thresholds for speed and less memory usage
flags.DEFINE_integer("word_count_th", 10, "word count th [100]")
flags.DEFINE_integer("char_count_th", 50, "char count th [500]")
flags.DEFINE_integer("sent_size_th", int(sent_size_th), "sent size th [64]")
flags.DEFINE_integer("num_sents_th", 1, "num sents th [8]")
flags.DEFINE_integer("ques_size_th", int(ques_size_th), "ques size th [32]")
flags.DEFINE_integer("word_size_th", 16, "word size th [16]")
flags.DEFINE_integer("para_size_th", 256, "para size th [256]")
# Advanced training options
flags.DEFINE_bool("lower_word", True, "lower word [True]")
flags.DEFINE_bool("squash", False, "squash the sentences into one? [False]")
flags.DEFINE_bool("swap_memory", True, "swap memory? [True]")
flags.DEFINE_string("data_filter", "max", "max | valid | semi [max]")
flags.DEFINE_bool("use_glove_for_unk", True, "use glove for unk [False]")
flags.DEFINE_bool(
"known_if_glove", True, "consider as known if present in glove [False]"
)
flags.DEFINE_string("logit_func", "tri_linear", "logit func [tri_linear]")
flags.DEFINE_string("answer_func", "linear", "answer logit func [linear]")
flags.DEFINE_string("sh_logit_func", "tri_linear", "sh logit func [tri_linear]")
# Ablation options
flags.DEFINE_bool("use_char_emb", True, "use char emb? [True]")
flags.DEFINE_bool("use_word_emb", True, "use word embedding? [True]")
flags.DEFINE_bool("q2c_att", True, "question-to-context attention? [True]")
flags.DEFINE_bool("c2q_att", True, "context-to-question attention? [True]")
flags.DEFINE_bool("dynamic_att", False, "Dynamic attention [False]")
def main(_):
from basic.main import main as m
config = flags.FLAGS
config.out_dir = os.path.join(
config.out_base_dir, config.model_name, str(config.run_id).zfill(2)
)
evaluator = m(config)
"""Generating metrics for the squad model"""
if training_mode == "span":
metrics = {
"metrics": [
{
"name": "accuracy-score",
"numberValue": str(evaluator.acc),
"format": "RAW",
},
{
"name": "f1-score",
"numberValue": str(evaluator.f1),
"format": "RAW",
},
]
}
else:
metrics = {
"metrics": [
{
"name": "accuracy-score",
"numberValue": str(evaluator.acc),
"format": "RAW",
},
{
"name": "loss",
"numberValue": str(evaluator.loss),
"format": "RAW",
},
]
}
import json
with open(mlpipeline_metrics_path, "w") as f:
json.dump(metrics, f)
tf.app.run(main)
| [
"basic.main.main",
"json.dump",
"kfp.components.InputPath",
"shutil.copytree",
"kfp.components.OutputPath",
"tensorflow.app.run"
] | [((748, 773), 'shutil.copytree', 'shutil.copytree', (['src', 'dst'], {}), '(src, dst)\n', (763, 773), False, 'import shutil\n'), ((9516, 9532), 'tensorflow.app.run', 'tf.app.run', (['main'], {}), '(main)\n', (9526, 9532), True, 'import tensorflow as tf\n'), ((239, 253), 'kfp.components.InputPath', 'InputPath', (['str'], {}), '(str)\n', (248, 253), False, 'from kfp.components import InputPath\n'), ((275, 289), 'kfp.components.InputPath', 'InputPath', (['str'], {}), '(str)\n', (284, 289), False, 'from kfp.components import InputPath\n'), ((550, 569), 'kfp.components.OutputPath', 'OutputPath', (['metrics'], {}), '(metrics)\n', (560, 569), False, 'from kfp.components import OutputPath\n'), ((586, 601), 'kfp.components.OutputPath', 'OutputPath', (['str'], {}), '(str)\n', (596, 601), False, 'from kfp.components import OutputPath\n'), ((8353, 8362), 'basic.main.main', 'm', (['config'], {}), '(config)\n', (8354, 8362), True, 'from basic.main import main as m\n'), ((9489, 9510), 'json.dump', 'json.dump', (['metrics', 'f'], {}), '(metrics, f)\n', (9498, 9510), False, 'import json\n')] |
import os
import time
from collections import deque
import functools
import itertools
from typing import Callable, Iterable
import numpy as np
import yaml
import gym
from box import Box
import torch
# torch.multiprocessing.set_start_method("forkserver")
import torch.nn as nn
from torch.utils.data import IterableDataset, DataLoader
from torch import optim
from a2c_ppo_acktr import algo, utils
from a2c_ppo_acktr.envs import make_vec_envs
from a2c_ppo_acktr.utils import get_vec_normalize
from a2c_ppo_acktr.model import Policy
from a2c_ppo_acktr.storage import RolloutStorage
from evaluation import evaluate
def main():
with open("seaadrl.yaml") as f:
config = Box(yaml.load(f, Loader=yaml.FullLoader)["baseline"])
device = utils.get_device()
# trained_agent, _ = torch.load(
# os.path.join(config.load_dir, config.env_name + ".pt"), map_location=device
# )
# trained_agent.eval()
trained_agent, _ = torch.load(
os.path.join(config.load_dir, config.env_name + "-vaxxed_2.pt"), map_location=device
)
trained_agent.eval()
evaluate(
trained_agent,
None,
config.env_name,
seed=1,
num_processes=24,
eval_log_dir='/tmp/gym',
device=utils.get_device(),
)
if __name__ == "__main__":
main() | [
"yaml.load",
"os.path.join",
"a2c_ppo_acktr.utils.get_device"
] | [((750, 768), 'a2c_ppo_acktr.utils.get_device', 'utils.get_device', ([], {}), '()\n', (766, 768), False, 'from a2c_ppo_acktr import algo, utils\n'), ((971, 1034), 'os.path.join', 'os.path.join', (['config.load_dir', "(config.env_name + '-vaxxed_2.pt')"], {}), "(config.load_dir, config.env_name + '-vaxxed_2.pt')\n", (983, 1034), False, 'import os\n'), ((1254, 1272), 'a2c_ppo_acktr.utils.get_device', 'utils.get_device', ([], {}), '()\n', (1270, 1272), False, 'from a2c_ppo_acktr import algo, utils\n'), ((686, 722), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (695, 722), False, 'import yaml\n')] |
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function
import math
from ...common import dog
from ...common import nnef_shape_optimizer
from ...common import utils
# HELPERS
def calculate_padding_elem(upscaled_size, downscaled_size, filter_size, stride, dilation):
dilated_filter_size = (filter_size - 1) * dilation + 1
t = (downscaled_size - 1) * stride + dilated_filter_size - upscaled_size
return math.floor(t / 2), math.ceil(t / 2)
def calculate_padding(upscaled_shape, downscaled_shape, filter_shape, strides, dilations):
return [
calculate_padding_elem(i, o, f, s, d)
for i, o, f, s, d in zip(upscaled_shape, downscaled_shape, filter_shape, strides, dilations)
]
def get_paddings(nnefop):
"""
Returns:
nnefpadding_separate: int[]|None
tfpadding_in_op: "SAME"|"VALID"
"""
are_args_spatial = "conv" in nnefop.name
nnefpadding = nnefop.args["padding"]
nnefborder = nnefop.args["border"].lower()
if nnefpadding == [] and (nnefborder == "constant" or nnefborder == "ignore"):
return None, "SAME"
elif not utils.has_greater_than_0(nnefpadding):
return None, "VALID"
else:
if nnefpadding == []:
if are_args_spatial:
nnefpadding = calculate_padding(
upscaled_shape=dog.get_shape_safe(nnefop.args["input"])[2:],
downscaled_shape=dog.get_shape_safe(nnefop.result)[2:],
filter_shape=dog.get_shape_safe(nnefop.args["filter"])[2:],
strides=nnefop.args["stride"],
dilations=nnefop.args["dilation"]
)
else:
nnefpadding = calculate_padding(
upscaled_shape=dog.get_shape_safe(nnefop.args["input"]),
downscaled_shape=dog.get_shape_safe(nnefop.result),
filter_shape=nnefop.args["size"],
strides=nnefop.args["stride"],
dilations=nnefop.args["dilation"]
)
if utils.has_greater_than_0(nnefpadding):
if are_args_spatial:
return [(0, 0), (0, 0)] + nnefpadding, "VALID"
else:
return nnefpadding, "VALID"
else:
return None, "VALID"
def is_varlike(nnefdn):
return nnefdn.producer.name in ["external", "variable", "constant"]
# TRANSFORMS
def transform_extract_padding(nnefdog):
nnefopnames = {"conv", "planewise_conv", "separable_conv", "argmax_pool", "max_pool_with_index", "max_pool",
"avg_pool", "rms_pool"}
new_nnefops = []
for nnefop in nnefdog.ops:
if nnefop.name in nnefopnames:
nnefpadding_separate, tfpadding_in_op = get_paddings(nnefop)
nnefop.add_arg("_tf_padding", tfpadding_in_op)
if nnefpadding_separate is not None:
nnefdn_input = nnefop.args["input"]
nnefdn_result = nnefop.results["index" if nnefop.name == "argmax_pool" else "output"]
nnefdn_new_result = dog.DataNode(nnefdn_result.name + "_pad__")
nnefdn_new_result.shape = [s + p + q for s, (p, q) in
zip(dog.get_shape_safe(nnefdn_input), nnefpadding_separate)]
nnefdog.dn_by_name[nnefdn_new_result.name] = nnefdn_new_result
nnefop_pad = dog.OperationNode("_nnef_pad")
nnefop_pad.add_arg("input", nnefdn_input)
nnefop_pad.add_arg("padding", nnefpadding_separate)
nnefop_pad.add_arg("border", nnefop.args["border"])
nnefop_pad.add_result("result", nnefdn_new_result)
nnefop_pad.extra["original_op"] = nnefop
nnefop.set_arg("input", nnefop_pad.result)
new_nnefops.append(nnefop_pad)
new_nnefops.append(nnefop)
nnefdog.ops = new_nnefops
def transform_extract_padding_for_grads(nnefdog):
# These are handled separately with calculate_tfpadding_for_deconv
# "conv_grad_input", "conv_grad_filter"
nnefopnames = {"max_pool_grad_with_index", "avg_pool_grad", "max_pool_grad"}
new_nnefops = []
for nnefop in nnefdog.ops:
if nnefop.name in nnefopnames:
nnefpadding_separate, tfpadding_in_op = get_paddings(nnefop)
nnefop.add_arg("_tf_padding", tfpadding_in_op)
if nnefpadding_separate is not None and "filter" not in nnefop.name:
utils.print_error("Separate padding not supported in grads, {}".format(nnefpadding_separate))
new_nnefops.append(nnefop)
nnefdog.ops = new_nnefops
def transform_extract_bias_add(nnefdog):
nnefopnames = {"conv", "planewise_conv", "separable_conv", "deconv", "planewise_deconv", "separable_deconv"}
new_nnefops = []
for nnefop in nnefdog.ops:
new_nnefops.append(nnefop)
if nnefop.name in nnefopnames and nnefop.args["bias"] != 0.0:
nnefdn_bias = nnefop.args["bias"]
nnefop.remove_arg("bias")
nnefdn_old_result = nnefop.result
nnefdn_new_result = dog.DataNode(nnefdn_old_result.name + "_conv__")
nnefdn_new_result.shape = list(nnefdn_old_result.shape)
nnefdog.dn_by_name[nnefdn_new_result.name] = nnefdn_new_result
nnefop.set_result("output", nnefdn_new_result)
nnefop_bias_add = dog.OperationNode("_nnef_bias_add")
nnefop_bias_add.add_arg("x", nnefdn_new_result)
nnefop_bias_add.add_arg("y", nnefdn_bias)
nnefop_bias_add.add_result("z", nnefdn_old_result)
nnefop_bias_add.extra["original_op"] = nnefop
new_nnefops.append(nnefop_bias_add)
nnefdog.ops = new_nnefops
def transform_extract_bias_add_for_grads(nnefdog):
nnefopnames = {"conv_grad_input", "conv_grad_filter"}
new_nnefops = []
for nnefop in nnefdog.ops:
new_nnefops.append(nnefop)
if nnefop.name in nnefopnames and nnefop.args["bias"] != 0.0:
utils.print_error("Bias not supported in grads")
#
# nnefdn_bias = nnefop.args["bias"]
# nnefop.remove_arg("bias")
#
# nnefdn_old_result = nnefop.result
# nnefdn_new_result = dog.DataNode(nnefdn_old_result.name + "_conv__")
# nnefdn_new_result.shape = list(nnefdn_old_result.shape)
# nnefdog.dn_by_name[nnefdn_new_result.name] = nnefdn_new_result
# nnefop.set_result("output", nnefdn_new_result)
#
# nnefop_bias_add = dog.OperationNode("_nnef_bias_add")
# nnefop_bias_add.add_arg("x", nnefdn_new_result)
# nnefop_bias_add.add_arg("y", nnefdn_bias)
# nnefop_bias_add.add_result("z", nnefdn_old_result)
# nnefop_bias_add.extra["original_op"] = nnefop
#
# new_nnefops.append(nnefop_bias_add)
nnefdog.ops = new_nnefops
def transform_transpose_to_target_lang(nnefdog):
global ctr
ctr = 0
# Tensorflow does broadcasting from right, nnef does it from left
# btw we handle rms_pool as non_atomic
conv_ops = ['conv', 'deconv']
pooling_ops = ['max_pool_with_index', 'max_pool', 'avg_pool', 'rms_pool']
up_down_sampling_ops = ['nearest_downsample', 'area_downsample', 'nearest_upsample', 'multilinear_upsample']
binary_ops = ['add', 'sub', 'mul', 'div', 'pow', 'lt', 'gt', 'le', 'ge', 'eq', 'ne', 'and', 'or',
'min', 'max']
new_ops = []
for op in nnefdog.ops:
if op.name in conv_ops:
input_channels = op.args['input'].shape[1]
add_transpose_to_arg(op, 'input', new_ops)
if op.args["groups"] == 1:
add_transpose_to_filter(op, 'filter', new_ops)
else:
if op.args["groups"] not in [0, op.result.shape[1]]:
utils.print_error("Unsupported groups value for {}: {}"
.format(op.result.name, op.args["groups"]))
add_transpose_reshape_to_planewise_filter(op, 'filter', new_ops, input_channels)
if op.args.get("output_shape"):
nchw_arg_to_nhwc(op, 'output_shape')
new_ops.append(op)
add_transpose_to_result(op, 'output', new_ops, nnefdog)
elif op.name == "conv_grad_input":
input_channels = op.args['orig_input_shape'][1]
if op.args["groups"] == 1:
add_transpose_to_filter(op, 'orig_filter', new_ops)
else:
if op.args["groups"] not in [0, op.result.shape[1]]:
utils.print_error("Unsupported groups value for {}: {}"
.format(op.result.name, op.args["groups"]))
add_transpose_reshape_to_planewise_filter(op, 'orig_filter', new_ops, input_channels)
add_transpose_to_arg(op, 'output_grad', new_ops)
nchw_arg_to_nhwc(op, 'orig_input_shape')
new_ops.append(op)
add_transpose_to_result(op, 'input_grad', new_ops, nnefdog)
elif op.name == "conv_grad_filter":
input_channels = op.args['orig_input'].shape[1]
add_transpose_to_arg(op, 'orig_input', new_ops)
add_transpose_to_arg(op, 'output_grad', new_ops)
if op.args["groups"] == 1:
nchw_arg_to_hwcn(op, 'orig_filter_shape')
else:
if op.args["groups"] not in [0, op.result.shape[1]]:
utils.print_error("Unsupported groups value for {}: {}"
.format(op.result.name, op.args["groups"]))
nchw_arg_to_hwcm(op, 'orig_filter_shape', input_channels)
new_ops.append(op)
if op.args["groups"] == 1:
add_transpose_to_result_filter(op, 'input_grad', new_ops, nnefdog)
else:
if op.args["groups"] not in [0, op.result.shape[1]]:
utils.print_error("Unsupported groups value for {}: {}"
.format(op.result.name, op.args["groups"]))
add_reshape_transpose_to_result_planewise_filter(op, 'input_grad', new_ops, nnefdog, input_channels)
elif op.name in pooling_ops:
add_transpose_to_arg(op, 'input', new_ops)
nchw_arg_to_nhwc(op, 'size')
nchw_arg_to_nhwc(op, 'padding')
nchw_arg_to_nhwc(op, 'stride')
nchw_arg_to_nhwc(op, 'dilation')
new_ops.append(op)
add_transpose_to_result(op, 'output', new_ops, nnefdog)
if 'index' in op.results.keys():
add_transpose_to_result(op, 'index', new_ops, nnefdog)
elif op.name in ["max_pool_grad", "max_pool_grad_with_index"]:
add_transpose_to_arg(op, 'orig_input', new_ops)
if 'index' in op.name:
add_transpose_to_arg(op, 'orig_index', new_ops)
else:
add_transpose_to_arg(op, 'orig_output', new_ops)
add_transpose_to_arg(op, 'output_grad', new_ops)
nchw_arg_to_nhwc(op, 'size')
nchw_arg_to_nhwc(op, 'padding')
nchw_arg_to_nhwc(op, 'stride')
nchw_arg_to_nhwc(op, 'dilation')
new_ops.append(op)
add_transpose_to_result(op, 'input_grad', new_ops, nnefdog)
elif op.name == "avg_pool_grad":
add_transpose_to_arg(op, 'output_grad', new_ops)
nchw_arg_to_nhwc(op, 'orig_input_shape')
nchw_arg_to_nhwc(op, 'size')
nchw_arg_to_nhwc(op, 'padding')
nchw_arg_to_nhwc(op, 'stride')
nchw_arg_to_nhwc(op, 'dilation')
new_ops.append(op)
add_transpose_to_result(op, 'input_grad', new_ops, nnefdog)
elif op.name == "_nnef_bias_add":
add_transpose_to_arg(op, 'x', new_ops)
add_squeeze_to_arg(op, 'y', new_ops)
new_ops.append(op)
add_transpose_to_result(op, 'z', new_ops, nnefdog)
elif op.name == "local_response_normalization":
if len(op.args["size"]) > 2 and op.args["size"][1] > 1:
add_transpose_to_arg(op, 'input', new_ops)
nchw_arg_to_nhwc(op, 'size')
new_ops.append(op)
add_transpose_to_result(op, 'output', new_ops, nnefdog)
else:
new_ops.append(op)
elif op.name == "batch_normalization":
add_transpose_to_arg(op, 'input', new_ops)
add_squeeze_or_transpose_to_arg(op, 'mean', new_ops)
add_squeeze_or_transpose_to_arg(op, 'variance', new_ops)
add_squeeze_or_transpose_to_arg(op, 'offset', new_ops)
add_squeeze_or_transpose_to_arg(op, 'scale', new_ops)
new_ops.append(op)
add_transpose_to_result(op, 'output', new_ops, nnefdog)
elif op.name in up_down_sampling_ops:
add_transpose_to_arg(op, 'input', new_ops)
new_ops.append(op)
add_transpose_to_result(op, 'output', new_ops, nnefdog)
elif op.name in binary_ops:
add_unsqueeze_to_arg_if_broadcast(op, 'x', 'y', new_ops)
add_unsqueeze_to_arg_if_broadcast(op, 'y', 'x', new_ops)
new_ops.append(op)
elif op.name == "clamp":
add_unsqueeze_to_arg_if_broadcast(op, 'a', 'x', new_ops)
add_unsqueeze_to_arg_if_broadcast(op, 'b', 'x', new_ops)
new_ops.append(op)
else:
new_ops.append(op)
nnefdog.ops = new_ops
ctr = 0 # TODO
def add_transpose_to_arg(op, arg_name, new_ops):
global ctr
dn = op.args[arg_name]
input_rank = dog.get_rank_safe(dn)
op_transpose = dog.OperationNode("transpose")
op_transpose.add_arg("input", dn)
op_transpose.add_arg("axes", utils.transpose_axes_nchw_to_nhwc(input_rank))
op_transpose.add_result("output", dog.DataNode("_nnef_nhwc_" + str(ctr)))
ctr += 1
op_transpose.result.shape = utils.shape_nchw_to_nhwc(dog.get_shape_safe(dn))
op_transpose.extra[nnef_shape_optimizer.EXTRA_GENERATED_TRANSPOSE] = True
op.set_arg(arg_name, op_transpose.result)
new_ops.append(op_transpose)
def add_squeeze_to_arg(op, arg_name, new_ops):
global ctr
dn = op.args[arg_name]
op_squeeze = dog.OperationNode("squeeze")
op_squeeze.add_arg("input", dn)
op_squeeze.add_arg("axes", [0])
op_squeeze.add_result("output", dog.DataNode("_nnef_squeeze_" + str(ctr)))
op_squeeze.extra[nnef_shape_optimizer.EXTRA_GENERATED_SQUEEZE] = True
ctr += 1
op_squeeze.result.shape = utils.apply_squeeze_shape(dog.get_shape_safe(dn), [0])
op.set_arg(arg_name, op_squeeze.result)
new_ops.append(op_squeeze)
def add_transpose_to_filter(op, arg_name, new_ops):
global ctr
dn = op.args[arg_name]
input_rank = dog.get_rank_safe(dn)
op_transpose = dog.OperationNode("transpose")
op_transpose.add_arg("input", dn)
op_transpose.add_arg("axes", utils.transpose_axes_nchw_to_hwcn(input_rank))
op_transpose.add_result("output", dog.DataNode("_nnef_hwcn_" + str(ctr)))
ctr += 1
op_transpose.result.shape = utils.shape_nchw_to_hwcn(dog.get_shape_safe(dn))
op_transpose.extra[nnef_shape_optimizer.EXTRA_GENERATED_TRANSPOSE] = True
op.set_arg(arg_name, op_transpose.result)
new_ops.append(op_transpose)
def add_transpose_reshape_to_planewise_filter(op, arg_name, new_ops, input_channels):
global ctr
dn = op.args[arg_name]
input_rank = dog.get_rank_safe(dn)
op_transpose = dog.OperationNode("transpose")
op_transpose.add_arg("input", dn)
op_transpose.add_arg("axes", utils.transpose_axes_nchw_to_hwcn(input_rank))
op_transpose.add_result("output", dog.DataNode("_nnef_hwcn_" + str(ctr)))
ctr += 1
op_transpose.result.shape = utils.shape_nchw_to_hwcn(dog.get_shape_safe(dn))
op_transpose.extra[nnef_shape_optimizer.EXTRA_GENERATED_TRANSPOSE] = True
new_ops.append(op_transpose)
reshape_shape = list(op_transpose.result.shape)
reshape_shape = reshape_shape[:-2] + [input_channels, reshape_shape[-1] // input_channels]
op_reshape = dog.OperationNode("reshape")
op_reshape.add_arg("input", op_transpose.result)
op_reshape.add_arg("shape", reshape_shape)
op_reshape.add_result("output", dog.DataNode("_nnef_reshape_" + str(ctr)))
ctr += 1
op_reshape.result.shape = reshape_shape
op_reshape.extra[nnef_shape_optimizer.EXTRA_GENERATED_RESHAPE] = True
new_ops.append(op_reshape)
op.set_arg(arg_name, op_reshape.result)
def swap_names(dn1, dn2):
n = dn1.name
dn1.name = dn2.name
dn2.name = n
def add_transpose_to_result(op, result_name, new_ops, dog_graph):
global ctr
dn = op.results[result_name]
orig_dn_shape = dn.shape
input_rank = dog.get_rank_safe(dn)
perm = utils.transpose_axes_nhwc_to_nchw(input_rank)
perm_inverse = utils.get_inverse_permutation(perm)
dn.extra[nnef_shape_optimizer.EXTRA_APPLIED_TRANSFORMATIONS] = [("transpose", perm_inverse)]
op_transpose = dog.OperationNode("transpose")
op_transpose.add_arg("input", dn)
op_transpose.add_arg("axes", perm)
op_transpose.add_result("output", dog.DataNode("_nnef_nchw_" + str(ctr)))
op_transpose.result.shape = orig_dn_shape
op_transpose.extra[nnef_shape_optimizer.EXTRA_GENERATED_TRANSPOSE] = True
dn.shape = utils.shape_nchw_to_nhwc(orig_dn_shape)
ctr += 1
new_ops.append(op_transpose)
replace_in_consumers(dn, op_transpose.result, dog_graph)
def add_transpose_to_result_filter(op, result_name, new_ops, dog_graph):
global ctr
dn = op.results[result_name]
orig_dn_shape = dn.shape
input_rank = dog.get_rank_safe(dn)
perm = utils.transpose_axes_hwcn_to_nchw(input_rank)
perm_inverse = utils.get_inverse_permutation(perm)
dn.extra[nnef_shape_optimizer.EXTRA_APPLIED_TRANSFORMATIONS] = [("transpose", perm_inverse)]
op_transpose = dog.OperationNode("transpose")
op_transpose.add_arg("input", dn)
op_transpose.add_arg("axes", perm)
op_transpose.add_result("output", dog.DataNode("_nnef_nchw" + str(ctr)))
op_transpose.result.shape = orig_dn_shape
op_transpose.extra[nnef_shape_optimizer.EXTRA_GENERATED_TRANSPOSE] = True
dn.shape = utils.shape_nchw_to_hwcn(orig_dn_shape)
ctr += 1
new_ops.append(op_transpose)
replace_in_consumers(dn, op_transpose.result, dog_graph)
def add_reshape_transpose_to_result_planewise_filter(op, result_name, new_ops, dog_graph, input_channels):
global ctr
dn = op.results[result_name]
orig_dn_shape = dn.shape
shape_hwcm = utils.shape_nchw_to_hwcm(orig_dn_shape, input_channels)
shape_hw1x = shape_hwcm[:-2] + [1, shape_hwcm[-2] * shape_hwcm[-1]]
input_rank = dog.get_rank_safe(dn)
perm = utils.transpose_axes_hwcn_to_nchw(input_rank)
perm_inverse = utils.get_inverse_permutation(perm)
dn.extra[nnef_shape_optimizer.EXTRA_APPLIED_TRANSFORMATIONS] = [
("transpose", perm_inverse),
("reshape", shape_hwcm)
]
op_reshape = dog.OperationNode("reshape")
op_reshape.add_arg("input", dn)
op_reshape.add_arg("shape", shape_hw1x)
op_reshape.add_result("output", dog.DataNode("_nnef_reshape_" + str(ctr)))
ctr += 1
op_reshape.result.shape = shape_hw1x
op_reshape.extra[nnef_shape_optimizer.EXTRA_GENERATED_RESHAPE] = True
new_ops.append(op_reshape)
op_transpose = dog.OperationNode("transpose")
op_transpose.add_arg("input", op_reshape.result)
op_transpose.add_arg("axes", perm)
op_transpose.add_result("output", dog.DataNode("_nnef_nchw_" + str(ctr)))
op_transpose.result.shape = orig_dn_shape
op_transpose.extra[nnef_shape_optimizer.EXTRA_GENERATED_TRANSPOSE] = True
dn.shape = shape_hwcm
ctr += 1
new_ops.append(op_transpose)
replace_in_consumers(dn, op_transpose.result, dog_graph)
def add_squeeze_or_transpose_to_arg(op, arg_name, new_ops):
if dog.get_rank_safe(op.args[arg_name]) == 2 and dog.get_shape_safe(op.args[arg_name])[0] == 1:
add_squeeze_to_arg(op, arg_name, new_ops)
else:
add_transpose_to_arg(op, arg_name, new_ops)
def add_unsqueeze_to_arg_if_broadcast(op, arg_name, other_arg_name, new_ops):
global ctr
dn = op.args[arg_name]
dn_other = op.args[other_arg_name]
if is_broadcast(dn, dn_other):
dn_rank = dog.get_rank_safe(dn)
rank_diff = dog.get_rank_safe(dn_other) - dog.get_rank_safe(dn)
axes = list(range(dn_rank, dn_rank + rank_diff))
op_unsqueeze = dog.OperationNode("unsqueeze")
op_unsqueeze.add_arg("input", dn)
op_unsqueeze.add_arg("axes", axes)
op_unsqueeze.add_result("output", dog.DataNode("_nnef_unsqueeze_" + str(ctr)))
ctr += 1
op_unsqueeze.extra[nnef_shape_optimizer.EXTRA_GENERATED_UNSQUEEZE] = True
op_unsqueeze.result.shape = utils.apply_unsqueeze_shape(dog.get_shape_safe(dn), axes)
op.set_arg(arg_name, op_unsqueeze.result)
new_ops.append(op_unsqueeze)
def is_ancestor_of2(dn, ancestor): # TODO nicer
if dn.producer is ancestor:
return True
for dn2 in dn.producer.get_arg_nodes():
if dn2.producer is ancestor:
return True
return False
def replace_in_consumers(dn_old, dn_new, dog_graph):
def replace(x):
if x == dn_old:
return dn_new
return x
for consumer in dn_old.consumers:
if not is_ancestor_of2(dn_new, consumer):
dn_new.consumers.append(consumer)
consumer.args = utils.recursive_transform(consumer.args, replace)
consumer.results = utils.recursive_transform(consumer.results, replace)
def nchw_arg_to_nhwc(op, arg_name):
arg = op.args[arg_name]
if arg:
arg = utils.shape_nchw_to_nhwc(arg)
op.set_arg(arg_name, arg)
def nchw_arg_to_hwcn(op, arg_name):
arg = op.args[arg_name]
if arg:
arg = utils.shape_nchw_to_hwcn(arg)
op.set_arg(arg_name, arg)
def nchw_arg_to_hwcm(op, arg_name, input_channels):
arg = op.args[arg_name]
if arg:
arg = utils.shape_nchw_to_hwcm(arg, input_channels)
op.set_arg(arg_name, arg)
# TODO rename
def is_broadcast(nnefdn_broadcasted, nnefdn_other):
return (isinstance(nnefdn_broadcasted, dog.DataNode)
and isinstance(nnefdn_other, dog.DataNode)
and nnefdn_broadcasted.shape is not None
and nnefdn_broadcasted.shape != []
and nnefdn_broadcasted.shape != [1]
and nnefdn_other.shape is not None
and len(nnefdn_other.shape) > len(nnefdn_broadcasted.shape)
and utils.can_broadcast_from_left(nnefdn_other.shape, nnefdn_broadcasted.shape))
| [
"math.ceil",
"math.floor"
] | [((1001, 1018), 'math.floor', 'math.floor', (['(t / 2)'], {}), '(t / 2)\n', (1011, 1018), False, 'import math\n'), ((1020, 1036), 'math.ceil', 'math.ceil', (['(t / 2)'], {}), '(t / 2)\n', (1029, 1036), False, 'import math\n')] |
# Import the SST module
import sst
# In Example0, two components send each other a number of events
# The simulation ends when the components have sent and
# received all expected events. While the eventSize is parameterized,
# it has no effect on simulation time because the components don't limit
# their link bandwidth
#
# Relevant code:
# simpleElementExample/example0.h
# simpleElementExample/example0.cc
# simpleElementExample/basicEvent.h
# Output:
# simpleElementExample/tests/refFiles/example0.out
#
### Create the components
component0 = sst.Component("c0", "simpleElementExample.example0")
component1 = sst.Component("c1", "simpleElementExample.example0")
### Parameterize the components.
# Run 'sst-info simpleElementExample.example0' at the command line
# to see parameter documentation
params = {
"eventsToSend" : 50, # Required parameter, error if not provided
"eventSize" : 32 # Optional parameter, defaults to 16 if not provided
}
component0.addParams(params)
component1.addParams(params)
# Link the components via their 'port' ports
link = sst.Link("component_link")
link.connect( (component0, "port", "1ns"), (component1, "port", "1ns") )
# Because the link latency is ~1ns and the components send one event
# per cycle on a 1GHz clock, the simulation time should be just over eventsToSend ns
| [
"sst.Link",
"sst.Component"
] | [((559, 611), 'sst.Component', 'sst.Component', (['"""c0"""', '"""simpleElementExample.example0"""'], {}), "('c0', 'simpleElementExample.example0')\n", (572, 611), False, 'import sst\n'), ((625, 677), 'sst.Component', 'sst.Component', (['"""c1"""', '"""simpleElementExample.example0"""'], {}), "('c1', 'simpleElementExample.example0')\n", (638, 677), False, 'import sst\n'), ((1098, 1124), 'sst.Link', 'sst.Link', (['"""component_link"""'], {}), "('component_link')\n", (1106, 1124), False, 'import sst\n')] |
import sphinx
from pkg_resources import parse_version
sphinx_version = sphinx.__version__
if parse_version(sphinx_version) >= parse_version("1.6"):
from sphinx.util import logging
else:
import logging
logging.basicConfig()
class BaseService:
def __init__(self, *args, **kwargs):
self.log = logging.getLogger(__name__)
def request(self, *args, **kwargs):
raise NotImplementedError('Must be implemented by the service!')
| [
"logging.basicConfig",
"pkg_resources.parse_version",
"logging.getLogger"
] | [((94, 123), 'pkg_resources.parse_version', 'parse_version', (['sphinx_version'], {}), '(sphinx_version)\n', (107, 123), False, 'from pkg_resources import parse_version\n'), ((127, 147), 'pkg_resources.parse_version', 'parse_version', (['"""1.6"""'], {}), "('1.6')\n", (140, 147), False, 'from pkg_resources import parse_version\n'), ((215, 236), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (234, 236), False, 'import logging\n'), ((319, 346), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (336, 346), False, 'import logging\n')] |
#System libraries
import tkinter as tk
import sys
sys.path.insert(1, '../common')
import socket
from cli import *
#User libraries
# from motorcommands import *
key_to_direction = {
38: "left",
25: "forward",
40: "right",
39: "back",
65: "stop",
}
numbers = {
19 : 0,
10 : 1,
11 : 2,
12 : 3,
13 : 4,
14 : 5,
15 : 6,
16 : 7,
17 : 8,
18 : 9
}
class gui:
def __init__(self, socket):
self._cli = cli(socket)
self._root = tk.Tk()
pass
def update_image(self):
self._cli.send("image")
self._root.after(10, self.update_image)
def key_press(self, event):
keycode = event.keycode
try:
print(keycode)
if keycode in key_to_direction:
input = key_to_direction[event.keycode]
elif keycode in numbers:
input = "set_bot %d" % numbers[event.keycode]
else:
input = "stop"
except:
input = "stop"
self._cli.send(input)
def key_release(self, event):
self._cli.send(input)
def run(self):
label = tk.Label(text = "GUI", width = 100, height = 40)
label.pack()
self.update_image()
self._root.bind('<KeyPress>', self.key_press)
#root.bind('<KeyRelease>', self.key_release)
self._root.mainloop()
| [
"sys.path.insert",
"tkinter.Label",
"tkinter.Tk"
] | [((51, 82), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../common"""'], {}), "(1, '../common')\n", (66, 82), False, 'import sys\n'), ((518, 525), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (523, 525), True, 'import tkinter as tk\n'), ((1184, 1226), 'tkinter.Label', 'tk.Label', ([], {'text': '"""GUI"""', 'width': '(100)', 'height': '(40)'}), "(text='GUI', width=100, height=40)\n", (1192, 1226), True, 'import tkinter as tk\n')] |
import numpy as np
import pytest
from tensorflow.keras import losses as losses_module
from tensorflow.keras import metrics as metrics_module
from scikeras.utils import loss_name, metric_name
class CustomLoss(losses_module.Loss):
pass
class CustomMetric(metrics_module.AUC):
pass
@pytest.mark.parametrize(
"obj",
[
"categorical_crossentropy",
"CategoricalCrossentropy",
losses_module.categorical_crossentropy,
losses_module.CategoricalCrossentropy,
losses_module.CategoricalCrossentropy(),
],
)
def test_loss_invariance(obj):
"""Test to make sure loss_name returns same string no matter which object
is passed (str, function, class, type)"""
assert loss_name(obj) == "categorical_crossentropy"
@pytest.mark.parametrize("obj", [CustomLoss, CustomLoss()])
def test_custom_loss(obj):
assert loss_name(obj) == "custom_loss"
@pytest.mark.parametrize(
"obj",
[
"categorical_crossentropy",
"CategoricalCrossentropy",
metrics_module.categorical_crossentropy,
metrics_module.CategoricalCrossentropy,
metrics_module.CategoricalCrossentropy(),
],
)
def test_metric_invariance(obj):
"""Test to make sure same metric returned no matter which object passed"""
assert metric_name(obj) == "categorical_crossentropy"
@pytest.mark.parametrize("loss", [object(), object, list()])
def test_loss_types(loss):
with pytest.raises(TypeError, match="``loss`` must be a"):
loss_name(loss)
def test_unknown_loss_raises():
with pytest.raises(ValueError, match="Unknown loss function"):
loss_name("unknown_loss")
@pytest.mark.parametrize("obj", [object(), object, list()])
def test_metric_types(obj):
with pytest.raises(TypeError, match="``metric`` must be a"):
metric_name(obj)
def test_unknown_metric():
with pytest.raises(ValueError, match="Unknown metric function"):
metric_name("unknown_metric")
@pytest.mark.parametrize("metric", [CustomMetric, CustomMetric()])
def test_custom_metric(metric):
assert metric_name(metric) == "custom_metric"
| [
"tensorflow.keras.metrics.CategoricalCrossentropy",
"scikeras.utils.metric_name",
"pytest.raises",
"scikeras.utils.loss_name",
"tensorflow.keras.losses.CategoricalCrossentropy"
] | [((728, 742), 'scikeras.utils.loss_name', 'loss_name', (['obj'], {}), '(obj)\n', (737, 742), False, 'from scikeras.utils import loss_name, metric_name\n'), ((512, 551), 'tensorflow.keras.losses.CategoricalCrossentropy', 'losses_module.CategoricalCrossentropy', ([], {}), '()\n', (549, 551), True, 'from tensorflow.keras import losses as losses_module\n'), ((873, 887), 'scikeras.utils.loss_name', 'loss_name', (['obj'], {}), '(obj)\n', (882, 887), False, 'from scikeras.utils import loss_name, metric_name\n'), ((1300, 1316), 'scikeras.utils.metric_name', 'metric_name', (['obj'], {}), '(obj)\n', (1311, 1316), False, 'from scikeras.utils import loss_name, metric_name\n'), ((1126, 1166), 'tensorflow.keras.metrics.CategoricalCrossentropy', 'metrics_module.CategoricalCrossentropy', ([], {}), '()\n', (1164, 1166), True, 'from tensorflow.keras import metrics as metrics_module\n'), ((1446, 1498), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""``loss`` must be a"""'}), "(TypeError, match='``loss`` must be a')\n", (1459, 1498), False, 'import pytest\n'), ((1508, 1523), 'scikeras.utils.loss_name', 'loss_name', (['loss'], {}), '(loss)\n', (1517, 1523), False, 'from scikeras.utils import loss_name, metric_name\n'), ((1567, 1623), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Unknown loss function"""'}), "(ValueError, match='Unknown loss function')\n", (1580, 1623), False, 'import pytest\n'), ((1633, 1658), 'scikeras.utils.loss_name', 'loss_name', (['"""unknown_loss"""'], {}), "('unknown_loss')\n", (1642, 1658), False, 'from scikeras.utils import loss_name, metric_name\n'), ((1758, 1812), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""``metric`` must be a"""'}), "(TypeError, match='``metric`` must be a')\n", (1771, 1812), False, 'import pytest\n'), ((1822, 1838), 'scikeras.utils.metric_name', 'metric_name', (['obj'], {}), '(obj)\n', (1833, 1838), False, 'from scikeras.utils import loss_name, metric_name\n'), ((1877, 1935), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Unknown metric function"""'}), "(ValueError, match='Unknown metric function')\n", (1890, 1935), False, 'import pytest\n'), ((1945, 1974), 'scikeras.utils.metric_name', 'metric_name', (['"""unknown_metric"""'], {}), "('unknown_metric')\n", (1956, 1974), False, 'from scikeras.utils import loss_name, metric_name\n'), ((2087, 2106), 'scikeras.utils.metric_name', 'metric_name', (['metric'], {}), '(metric)\n', (2098, 2106), False, 'from scikeras.utils import loss_name, metric_name\n')] |
from spikeextractors import RecordingExtractor
import numpy as np
import h5py
import ctypes
class BiocamRecordingExtractor(RecordingExtractor):
def __init__(self, recording_file):
RecordingExtractor.__init__(self)
self._recording_file = recording_file
self._rf, self._nFrames, self._samplingRate, self._nRecCh, self._chIndices, self._file_format, self._signalInv, self._positions, self._read_function = openBiocamFile(
self._recording_file)
for m in range(self._nRecCh):
self.setChannelProperty(m, 'location', self._positions[m])
def getChannelIds(self):
return list(range(self._nRecCh))
def getNumFrames(self):
return self._nFrames
def getSamplingFrequency(self):
return self._samplingRate
def getTraces(self, channel_ids=None, start_frame=None, end_frame=None):
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = self.getNumFrames()
if channel_ids is None:
channel_ids = range(self.getNumChannels())
data = self._read_function(
self._rf, start_frame, end_frame, self.getNumChannels())
return data.reshape((end_frame - start_frame,
self.getNumChannels())).T[channel_ids]
@staticmethod
def writeRecording(recording, save_path):
M = recording.getNumChannels()
N = recording.getNumFrames()
channel_ids = range(M)
raw = recording.getTraces()
if raw.dtype != int:
raise Exception('Cannot write dataset in the format with non-int datatype:', raw.dtype)
rf = h5py.File(save_path, 'w')
# writing out in 100 format: Time x Channels
g = rf.create_group('3BData')
d = rf.create_dataset('3BData/Raw', data=raw.T + 2048, dtype=int)
g.attrs['Version'] = 100
rf.create_dataset('3BRecInfo/3BRecVars/MinVolt', data=[0])
rf.create_dataset('3BRecInfo/3BRecVars/MaxVolt', data=[1])
rf.create_dataset('3BRecInfo/3BRecVars/NRecFrames', data=[N])
rf.create_dataset('3BRecInfo/3BRecVars/SamplingRate', data=[recording.getSamplingFrequency()])
rf.create_dataset('3BRecInfo/3BRecVars/SignalInversion', data=[1])
rf.create_dataset('3BRecInfo/3BMeaChip/NCols', data=[M])
rf.create_dataset('3BRecInfo/3BMeaStreams/Raw/Chs', data=np.vstack((np.arange(M), np.zeros(M))).T, dtype=int)
rf.close()
def openBiocamFile(filename):
"""Open a Biocam hdf5 file, read and return the recording info, pick te correct method to access raw data, and return this to the caller."""
rf = h5py.File(filename, 'r')
# Read recording variables
recVars = rf.require_group('3BRecInfo/3BRecVars/')
# bitDepth = recVars['BitDepth'].value[0]
# maxV = recVars['MaxVolt'].value[0]
# minV = recVars['MinVolt'].value[0]
nFrames = recVars['NRecFrames'].value[0]
samplingRate = recVars['SamplingRate'].value[0]
signalInv = recVars['SignalInversion'].value[0]
# Read chip variables
chipVars = rf.require_group('3BRecInfo/3BMeaChip/')
nCols = chipVars['NCols'].value[0]
# Get the actual number of channels used in the recording
file_format = rf['3BData'].attrs.get('Version')
if file_format == 100:
nRecCh = len(rf['3BData/Raw'][0])
# raise Warning('This may go wrong!')
elif file_format == 101:
nRecCh = int(1. * rf['3BData/Raw'].shape[0] / nFrames)
else:
raise Exception('Unknown data file format.')
print('# 3Brain data format:', file_format, 'signal inversion', signalInv)
print('# signal range: ', recVars['MinVolt'].value[0], '- ',
recVars['MaxVolt'].value[0])
# Compute indices
rawIndices = rf['3BRecInfo/3BMeaStreams/Raw/Chs'].value
# Name channels ([0..4095] for fullarray files)
chIndices = [(x - 1) + (y - 1) * nCols for (y, x) in rawIndices]
# chIndices = [(x-1) + (y-1)*nCols for (x,y) in rawIndices]
# Swap X and Y (old format)
# determine correct function to read data
print("# Signal inversion looks like " + str(signalInv) + ", guessing the "
"right method for data access.\n# If your results "
"look strange, signal polarity is wrong.")
if file_format == 100:
if signalInv == -1:
read_function = readHDF5t_100
else:
read_function = readHDF5t_100_i
else:
if signalInv == -1:
read_function = readHDF5t_101_i
else:
read_function = readHDF5t_101
return (rf, nFrames, samplingRate, nRecCh, chIndices, file_format, signalInv, rawIndices, read_function)
def readHDF5(rf, t0, t1):
"""In order to use the algorithms designed for the old format, the input data must be inverted."""
return 4095 - rf['3BData/Raw'][t0:t1].flatten().astype(ctypes.c_short)
def readHDF5t_100(rf, t0, t1, nch):
"""Transposed version for the interpolation method."""
if t0 <= t1:
d = 2048 - rf['3BData/Raw'][t0:t1].flatten('C').astype(ctypes.c_short)
d[np.where(np.abs(d) > 1500)[0]] = 0
return d
else: # Reversed read
raise Exception('Reading backwards? Not sure about this.')
return 2048 - rf['3BData/Raw'][t1:t0].flatten(
'F').astype(ctypes.c_short)
def readHDF5t_100_i(rf, t0, t1, nch):
''' Transposed version for the interpolation method. '''
if t0 <= t1:
d = rf['3BData/Raw'][t0:t1].flatten('C').astype(ctypes.c_short) - 2048
d[np.where(np.abs(d) > 1500)[0]] = 0
return d
else: # Reversed read
raise Exception('Reading backwards? Not sure about this.')
return rf['3BData/Raw'][t1:t0].flatten(
'F').astype(ctypes.c_short) - 2048
def readHDF5t_101(rf, t0, t1, nch):
''' Transposed version for the interpolation method. '''
if t0 <= t1:
d = rf['3BData/Raw'][nch * t0:nch * t1].reshape(
(-1, nch), order='C').flatten('C').astype(ctypes.c_short) - 2048
d[np.abs(d) > 1500] = 0
return d
else: # Reversed read
raise Exception('Reading backwards? Not sure about this.')
d = rf['3BData/Raw'][nch * t1:nch * t0].reshape(
(-1, nch), order='C').flatten('C').astype(ctypes.c_short) - 2048
d[np.where(np.abs(d) > 1500)[0]] = 0
return d
def readHDF5t_101_i(rf, t0, t1, nch):
''' Transposed version for the interpolation method. '''
if t0 <= t1:
d = 2048 - rf['3BData/Raw'][nch * t0:nch * t1].reshape(
(-1, nch), order='C').flatten('C').astype(ctypes.c_short)
d[np.where(np.abs(d) > 1500)[0]] = 0
return d
else: # Reversed read
raise Exception('Reading backwards? Not sure about this.')
d = 2048 - rf['3BData/Raw'][nch * t1:nch * t0].reshape(
(-1, nch), order='C').flatten('C').astype(ctypes.c_short)
d[np.where(np.abs(d) > 1500)[0]] = 0
return d
| [
"numpy.abs",
"h5py.File",
"numpy.zeros",
"spikeextractors.RecordingExtractor.__init__",
"numpy.arange"
] | [((2663, 2687), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (2672, 2687), False, 'import h5py\n'), ((195, 228), 'spikeextractors.RecordingExtractor.__init__', 'RecordingExtractor.__init__', (['self'], {}), '(self)\n', (222, 228), False, 'from spikeextractors import RecordingExtractor\n'), ((1669, 1694), 'h5py.File', 'h5py.File', (['save_path', '"""w"""'], {}), "(save_path, 'w')\n", (1678, 1694), False, 'import h5py\n'), ((6155, 6164), 'numpy.abs', 'np.abs', (['d'], {}), '(d)\n', (6161, 6164), True, 'import numpy as np\n'), ((5215, 5224), 'numpy.abs', 'np.abs', (['d'], {}), '(d)\n', (5221, 5224), True, 'import numpy as np\n'), ((5663, 5672), 'numpy.abs', 'np.abs', (['d'], {}), '(d)\n', (5669, 5672), True, 'import numpy as np\n'), ((6441, 6450), 'numpy.abs', 'np.abs', (['d'], {}), '(d)\n', (6447, 6450), True, 'import numpy as np\n'), ((6755, 6764), 'numpy.abs', 'np.abs', (['d'], {}), '(d)\n', (6761, 6764), True, 'import numpy as np\n'), ((7045, 7054), 'numpy.abs', 'np.abs', (['d'], {}), '(d)\n', (7051, 7054), True, 'import numpy as np\n'), ((2416, 2428), 'numpy.arange', 'np.arange', (['M'], {}), '(M)\n', (2425, 2428), True, 'import numpy as np\n'), ((2430, 2441), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (2438, 2441), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-08-17 09:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('commercialoperator', '0085_proposaleventotherdetails_other_comments'),
]
operations = [
migrations.AddField(
model_name='proposal',
name='filming_licence_charge_type',
field=models.CharField(choices=[('half_day_charge', 'Half day charge'), ('full_day_charge', 'Full day charge'), ('2_days_charge', '2 days charge'), ('3_or_more_days_charge', '3 or more days charge')], default='full_day_charge', max_length=30, verbose_name='Filming Licence charge Type'),
),
migrations.AddField(
model_name='proposal',
name='filming_non_standard_charge',
field=models.DecimalField(decimal_places=2, default='0.00', max_digits=8),
),
]
| [
"django.db.models.DecimalField",
"django.db.models.CharField"
] | [((455, 753), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('half_day_charge', 'Half day charge'), ('full_day_charge',\n 'Full day charge'), ('2_days_charge', '2 days charge'), (\n '3_or_more_days_charge', '3 or more days charge')]", 'default': '"""full_day_charge"""', 'max_length': '(30)', 'verbose_name': '"""Filming Licence charge Type"""'}), "(choices=[('half_day_charge', 'Half day charge'), (\n 'full_day_charge', 'Full day charge'), ('2_days_charge',\n '2 days charge'), ('3_or_more_days_charge', '3 or more days charge')],\n default='full_day_charge', max_length=30, verbose_name=\n 'Filming Licence charge Type')\n", (471, 753), False, 'from django.db import migrations, models\n'), ((878, 945), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'default': '"""0.00"""', 'max_digits': '(8)'}), "(decimal_places=2, default='0.00', max_digits=8)\n", (897, 945), False, 'from django.db import migrations, models\n')] |
# autor : colson (<NAME>)
# https://www.github.com/YeongJunKim
from aiy.vision.leds import Leds
from time import sleep
from aiy_log import MyLogger
import logging
class MyLed:
def __init__(self, led=(0x00, 0x00, 0x00)):
self.logger = MyLogger(level=logging.INFO, get="LED")
self.leds = Leds()
self.leds.update(Leds.rgb_on(led))
self.logger.logger.debug("Init LED drivers")
def set_color(self, led):
self.leds.update(Leds.rgb_on(led))
self.logger.logger.debug("set LED colors")
def __exit__(self):
led = (0x00, 0x00, 0x00)
self.leds.update(Leds.rgb_on(led))
self.logger.logger.debug("exit LED drivers")
def main():
a = MyLed()
while True:
leds = (0x00, 0x00, 0xFF)
a.set_color(led=leds)
sleep(0.1)
leds = (0x00, 0xFF, 0x00)
a.set_color(led=leds)
sleep(0.1)
leds = (0xFF, 0x00, 0x00)
a.set_color(led=leds)
sleep(0.1)
if __name__ == '__main__':
main()
| [
"aiy.vision.leds.Leds.rgb_on",
"aiy.vision.leds.Leds",
"time.sleep",
"aiy_log.MyLogger"
] | [((248, 287), 'aiy_log.MyLogger', 'MyLogger', ([], {'level': 'logging.INFO', 'get': '"""LED"""'}), "(level=logging.INFO, get='LED')\n", (256, 287), False, 'from aiy_log import MyLogger\n'), ((308, 314), 'aiy.vision.leds.Leds', 'Leds', ([], {}), '()\n', (312, 314), False, 'from aiy.vision.leds import Leds\n'), ((808, 818), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (813, 818), False, 'from time import sleep\n'), ((891, 901), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (896, 901), False, 'from time import sleep\n'), ((974, 984), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (979, 984), False, 'from time import sleep\n'), ((340, 356), 'aiy.vision.leds.Leds.rgb_on', 'Leds.rgb_on', (['led'], {}), '(led)\n', (351, 356), False, 'from aiy.vision.leds import Leds\n'), ((467, 483), 'aiy.vision.leds.Leds.rgb_on', 'Leds.rgb_on', (['led'], {}), '(led)\n', (478, 483), False, 'from aiy.vision.leds import Leds\n'), ((619, 635), 'aiy.vision.leds.Leds.rgb_on', 'Leds.rgb_on', (['led'], {}), '(led)\n', (630, 635), False, 'from aiy.vision.leds import Leds\n')] |
# Core
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
# Third party
from rest_framework import serializers
# Local
from .models import TorrentInfoHash
class TorrentInfoHashSerializer(serializers.Serializer):
info_hash = serializers.CharField()
prev_info_hash = serializers.CharField()
def create(self, validated_data):
# the requisite dictionary that torrent info hash expects we don't want
# to have the previous info hash saved in the database
req_data = {}
for key, value in validated_data.items():
if key is not 'prev_info_hash':
req_data[key] = value
prev_info_hash = validated_data['prev_info_hash']
if not prev_info_hash == settings.NEW_INFO_HASH:
raise ValueError(f"For method create, the prev_info_hash must be {settings.NEW_INFO_HASH}")
return TorrentInfoHash.objects.create(**req_data)
def update(self, instance, validated_data):
info_hash = validated_data.get('info_hash', instance.info_hash)
instance.info_hash = info_hash
instance.save()
return instance
def _ensure_deletion(self, prev_info_hash):
if prev_info_hash is not None:
qs_torrent_info_hash = TorrentInfoHash.objects.filter(prev_info_hash=prev_info_hash)
for torrent_info_hash in qs_torrent_info_hash:
torrent_info_hash.delete()
| [
"rest_framework.serializers.CharField"
] | [((268, 291), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (289, 291), False, 'from rest_framework import serializers\n'), ((313, 336), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (334, 336), False, 'from rest_framework import serializers\n')] |
import gym
import argparse
import tensorflow as tf
from tf_rl.common.memory import HER_replay_buffer
from tf_rl.common.utils import eager_setup, her_sampler, create_log_model_directory, get_alg_name, RunningMeanStd
from tf_rl.common.params import ROBOTICS_ENV_LIST
from tf_rl.common.train import train_HER, train_HER_ray
from tf_rl.common.networks import HER_Actor as Actor, HER_Critic as Critic
from tf_rl.agents.HER import HER_DDPG as HER, HER_DDPG_debug as HER_debug
eager_setup()
"""
# defined in params.py
ROBOTICS_ENV_LIST = {
"FetchPickAndPlace-v1"
"FetchPush-v1"
"FetchReach-v1"
"FetchSlide-v1"
}
"""
parser = argparse.ArgumentParser()
parser.add_argument("--mode", default="MuJoCo", help="Task mode")
parser.add_argument("--env_name", default="FetchPush-v1", help="Env title")
parser.add_argument("--seed", default=123, type=int, help="seed for randomness")
parser.add_argument("--num_epochs", default=200, type=int, help="number of epochs in a training")
parser.add_argument("--num_cycles", default=50, type=int, help="number of cycles in epoch")
parser.add_argument("--num_episodes", default=2, type=int, help="number of episodes in cycle")
parser.add_argument("--num_steps", default=50, type=int, help="number of steps in an episode")
parser.add_argument("--replay_strategy", default="future", help="replay_strategy")
parser.add_argument("--replay_k", default=4, type=int, help="number of replay strategy")
parser.add_argument("--num_updates", default=40, type=int, help="number of updates in cycle")
parser.add_argument("--memory_size", default=1000000, type=int, help="memory size in a training")
parser.add_argument("--batch_size", default=256, type=int, help="batch size of each iteration of update")
parser.add_argument("--gamma", default=0.98, type=float, help="discount factor")
parser.add_argument("--tau", default=0.05, type=float, help="soft-update tau")
parser.add_argument("--action_l2", default=1.0, type=float, help="magnitude of L2 regularisation")
parser.add_argument("--noise_eps", default=0.2, type=float, help="magnitude of noise")
parser.add_argument("--random_eps", default=0.3, type=float, help="magnitude of randomness")
parser.add_argument("--debug_flg", default=False, type=bool, help="debug mode or not")
parser.add_argument("--google_colab", default=False, type=bool, help="if you are executing this on GoogleColab")
params = parser.parse_args()
params.goal = ROBOTICS_ENV_LIST[params.env_name]
params.test_episodes = 10
env = gym.make(params.env_name)
params.max_action = env.action_space.high[0]
params.num_action = env.action_space.shape[0]
# set seed
env.seed(params.seed)
tf.random.set_random_seed(params.seed)
# create a directory for log/model
params = create_log_model_directory(params, get_alg_name())
# get init obs for creating env_params
obs = env.reset()
# prep for basic stats
env_params = {
'obs': obs['observation'].shape[0],
'goal': obs['desired_goal'].shape[0],
'action': env.action_space.shape[0],
'action_max': env.action_space.high[0],
'max_timesteps': env._max_episode_steps
}
her_sample_func = her_sampler(params.replay_strategy, params.replay_k, env.compute_reward)
replay_buffer = HER_replay_buffer(env_params, params.memory_size, her_sample_func.sample_her_transitions)
summary_writer = tf.contrib.summary.create_file_writer(params.log_dir)
o_norm = RunningMeanStd(env_params['obs'])
g_norm = RunningMeanStd(env_params['goal'])
if params.debug_flg:
agent = HER_debug(Actor, Critic, env.action_space.shape[0], params, o_norm, g_norm)
else:
agent = HER(Actor, Critic, env.action_space.shape[0], params, o_norm, g_norm)
train_HER(agent, env, replay_buffer, summary_writer)
# train_HER_ray(agent, env, replay_buffer, summary_writer)
| [
"tf_rl.common.memory.HER_replay_buffer",
"tf_rl.common.train.train_HER",
"argparse.ArgumentParser",
"tensorflow.random.set_random_seed",
"tf_rl.common.utils.her_sampler",
"tf_rl.common.utils.eager_setup",
"tf_rl.agents.HER.HER_DDPG_debug",
"tensorflow.contrib.summary.create_file_writer",
"tf_rl.comm... | [((471, 484), 'tf_rl.common.utils.eager_setup', 'eager_setup', ([], {}), '()\n', (482, 484), False, 'from tf_rl.common.utils import eager_setup, her_sampler, create_log_model_directory, get_alg_name, RunningMeanStd\n'), ((637, 662), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (660, 662), False, 'import argparse\n'), ((2486, 2511), 'gym.make', 'gym.make', (['params.env_name'], {}), '(params.env_name)\n', (2494, 2511), False, 'import gym\n'), ((2637, 2675), 'tensorflow.random.set_random_seed', 'tf.random.set_random_seed', (['params.seed'], {}), '(params.seed)\n', (2662, 2675), True, 'import tensorflow as tf\n'), ((3101, 3173), 'tf_rl.common.utils.her_sampler', 'her_sampler', (['params.replay_strategy', 'params.replay_k', 'env.compute_reward'], {}), '(params.replay_strategy, params.replay_k, env.compute_reward)\n', (3112, 3173), False, 'from tf_rl.common.utils import eager_setup, her_sampler, create_log_model_directory, get_alg_name, RunningMeanStd\n'), ((3190, 3284), 'tf_rl.common.memory.HER_replay_buffer', 'HER_replay_buffer', (['env_params', 'params.memory_size', 'her_sample_func.sample_her_transitions'], {}), '(env_params, params.memory_size, her_sample_func.\n sample_her_transitions)\n', (3207, 3284), False, 'from tf_rl.common.memory import HER_replay_buffer\n'), ((3297, 3350), 'tensorflow.contrib.summary.create_file_writer', 'tf.contrib.summary.create_file_writer', (['params.log_dir'], {}), '(params.log_dir)\n', (3334, 3350), True, 'import tensorflow as tf\n'), ((3361, 3394), 'tf_rl.common.utils.RunningMeanStd', 'RunningMeanStd', (["env_params['obs']"], {}), "(env_params['obs'])\n", (3375, 3394), False, 'from tf_rl.common.utils import eager_setup, her_sampler, create_log_model_directory, get_alg_name, RunningMeanStd\n'), ((3404, 3438), 'tf_rl.common.utils.RunningMeanStd', 'RunningMeanStd', (["env_params['goal']"], {}), "(env_params['goal'])\n", (3418, 3438), False, 'from tf_rl.common.utils import eager_setup, her_sampler, create_log_model_directory, get_alg_name, RunningMeanStd\n'), ((3638, 3690), 'tf_rl.common.train.train_HER', 'train_HER', (['agent', 'env', 'replay_buffer', 'summary_writer'], {}), '(agent, env, replay_buffer, summary_writer)\n', (3647, 3690), False, 'from tf_rl.common.train import train_HER, train_HER_ray\n'), ((2756, 2770), 'tf_rl.common.utils.get_alg_name', 'get_alg_name', ([], {}), '()\n', (2768, 2770), False, 'from tf_rl.common.utils import eager_setup, her_sampler, create_log_model_directory, get_alg_name, RunningMeanStd\n'), ((3473, 3548), 'tf_rl.agents.HER.HER_DDPG_debug', 'HER_debug', (['Actor', 'Critic', 'env.action_space.shape[0]', 'params', 'o_norm', 'g_norm'], {}), '(Actor, Critic, env.action_space.shape[0], params, o_norm, g_norm)\n', (3482, 3548), True, 'from tf_rl.agents.HER import HER_DDPG as HER, HER_DDPG_debug as HER_debug\n'), ((3567, 3636), 'tf_rl.agents.HER.HER_DDPG', 'HER', (['Actor', 'Critic', 'env.action_space.shape[0]', 'params', 'o_norm', 'g_norm'], {}), '(Actor, Critic, env.action_space.shape[0], params, o_norm, g_norm)\n', (3570, 3636), True, 'from tf_rl.agents.HER import HER_DDPG as HER, HER_DDPG_debug as HER_debug\n')] |
import collections
import pickle
import random
import h5py
import numpy as np
import tqdm
from nas_201_api import NASBench201API
def is_valid_arch(matrix):
n = matrix.shape[0]
visited = {0}
q = collections.deque([0])
while q:
u = q.popleft()
for v in range(u + 1, n):
if v not in visited and matrix[u][v] != 0:
# select a non-zero op
visited.add(v)
q.append(v)
return (n - 1) in visited
random.seed(0)
api = NASBench201API("/tmp/NAS-Bench-201-v1_1-096897.pth")
results = []
for arch_index in tqdm.tqdm(range(len(api))):
op_matrix = NASBench201API.str2matrix(api.arch(arch_index)).astype(np.uint8).T
arch = {f"{i}_{j}": op_matrix[i, j].item() for i in range(op_matrix.shape[0]) for j in range(i + 1, op_matrix.shape[0])}
result = {"arch": arch}
if not is_valid_arch(op_matrix):
continue
for dataset in ["cifar10-valid", "cifar10", "cifar100", "ImageNet16-120"]:
compute_data = api.query_by_index(arch_index, dataset)
arch_index_data = []
available_seeds = api.arch2infos_full[arch_index].get_dataset_seeds(dataset)
for k in range(3):
seed = available_seeds[k] if k < len(available_seeds) else random.choice(available_seeds)
if dataset == "cifar10-valid":
metrics_name = ["train-loss", "train-accuracy", "valid-loss", "valid-accuracy", "test-loss", "test-accuracy"]
elif dataset == "cifar10":
metrics_name = ["train-loss", "train-accuracy", "test-loss", "test-accuracy", "test-loss", "test-accuracy"]
else:
metrics_name = ["train-loss", "train-accuracy", "valid-loss", "valid-accuracy", "test-loss", "test-accuracy"]
metrics = api.get_more_info(arch_index, dataset, is_random=seed)
data = [metrics[k] / 100 if "accuracy" in k else metrics[k] for k in metrics_name]
data = [d[0] if isinstance(d, tuple) else d for d in data]
data += [compute_data[seed].flop, compute_data[seed].params, compute_data[seed].get_latency()]
if arch_index == 0 and k == 0:
print(arch, dataset, metrics, data)
arch_index_data.append(data)
register_dataset_name = dataset
if dataset == "ImageNet16-120":
register_dataset_name = "imagenet-16-120"
result[register_dataset_name] = np.array(arch_index_data)
results.append(result)
print("Found %d valid architectures." % len(results))
with open("data/nb201/nb201.pkl", "wb") as fp:
pickle.dump(results, fp)
| [
"random.choice",
"pickle.dump",
"collections.deque",
"nas_201_api.NASBench201API",
"random.seed",
"numpy.array"
] | [((488, 502), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (499, 502), False, 'import random\n'), ((509, 561), 'nas_201_api.NASBench201API', 'NASBench201API', (['"""/tmp/NAS-Bench-201-v1_1-096897.pth"""'], {}), "('/tmp/NAS-Bench-201-v1_1-096897.pth')\n", (523, 561), False, 'from nas_201_api import NASBench201API\n'), ((209, 231), 'collections.deque', 'collections.deque', (['[0]'], {}), '([0])\n', (226, 231), False, 'import collections\n'), ((2591, 2615), 'pickle.dump', 'pickle.dump', (['results', 'fp'], {}), '(results, fp)\n', (2602, 2615), False, 'import pickle\n'), ((2432, 2457), 'numpy.array', 'np.array', (['arch_index_data'], {}), '(arch_index_data)\n', (2440, 2457), True, 'import numpy as np\n'), ((1265, 1295), 'random.choice', 'random.choice', (['available_seeds'], {}), '(available_seeds)\n', (1278, 1295), False, 'import random\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#############
## Imports ##
#############
import os
import sys ; sys.path.append("/home/developer/workspace/rklearn-lib")
import time
import pickle
import numpy as np
from rklearn.tfoo_v1 import BaseDataGenerator
from rktools.monitors import ProgressBar
############################
## CIFAR10DataGenerator() ##
############################
class CIFAR10DataGenerator(BaseDataGenerator):
################
## __init__() ##
################
def __init__(self, config, logger = None):
try:
super().__init__(config, logger)
self.logger = logger
self.config = config
self.data_top_dir = self.config.data["data_home"]
self.batch_size = self.config.data["batch_size"]
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
raise RuntimeError("error msg = {}, error type = {}, error file = {}, error line = {}".format(e, exc_type, fname, exc_tb.tb_lineno))
#################
## load_data() ##
#################
def load_data(self):
"""
Load both training and testing data.
"""
if not os.path.exists(self.data_top_dir):
raise FileNotFoundError("Directory {} is not valid!".format(self.data_top_dir))
try:
start = time.time()
# Read CIFAR training data
nb_files = self.config.data["train_data_nb_files"]
progress_bar = ProgressBar(max_value = nb_files, desc="File: ", ascii = True)
for file_index in range(nb_files):
file_path = os.path.join(self.data_top_dir, self.config.data["train_data_batch_prefix"] + str(file_index+1))
assert(os.path.exists(file_path))
train_file = open(file_path, "rb")
train_dict = pickle.load(train_file, encoding="latin1")
train_file.close()
# 1st file
if self.X_train is None:
self.X_train = np.array(train_dict['data'], float)
self.y_train = train_dict['labels']
else:
self.X_train = np.concatenate((self.X_train, train_dict["data"]), 0)
self.y_train = np.concatenate((self.y_train, train_dict["labels"]), 0)
progress_bar.update(1)
progress_bar.close()
# Read CIFAR test data
file_path = os.path.join(self.data_top_dir, self.config.data["test_data_batch_prefix"])
assert(os.path.exists(file_path))
test_file = open(file_path, "rb")
test_dict = pickle.load(test_file, encoding="latin1")
test_file.close()
self.X_test = test_dict["data"]
self.y_test = np.array(test_dict["labels"])
# for dev
if self.config.data["dev_sample"] >0:
train_sample_size = int(len(self.X_train) * self.config.data["dev_sample"])
self.X_train = self.X_train[:train_sample_size]
self.y_train = self.y_train[:train_sample_size]
test_sample_size = int(len(self.X_train) * self.config.data["dev_sample"])
self.X_test = self.X_test[:test_sample_size]
self.y_test = self.y_test[:test_sample_size]
end = time.time()
if self.logger:
self.logger.debug("CIFAR 10 data loaded in {} secs.".format((end - start)))
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
raise RuntimeError("error msg = {}, error type = {}, error file = {}, error line = {}".format(e, exc_type, fname, exc_tb.tb_lineno))
####################
## prepare_data() ##
####################
def prepare_data(self):
start = time.time()
# Preprocess training data and labels
self.X_train = self.X_train.astype(np.float32) / 255.0 # normalize
self.X_train = self.X_train.reshape([-1, self.config.data["num_channels"],
self.config.data["image_size"],
self.config.data["image_size"]])
self.X_train = self.X_train.transpose([0, 2, 3, 1])
self.y_train = np.eye(self.config.data["num_categories"])[self.y_train]
# Preprocess test data and labels
self.X_test = self.X_test.astype(np.float32) / 255.0 # normalize
self.X_test = self.X_test.reshape([-1, self.config.data["num_channels"],
self.config.data["image_size"],
self.config.data["image_size"]])
self.X_test = self.X_test.transpose([0, 2, 3, 1])
self.y_test = np.eye(self.config.data["num_categories"])[self.y_test]
end = time.time()
if self.logger:
self.logger.debug("Data prepared in {} secs.".format((end - start)))
| [
"os.path.exists",
"numpy.eye",
"os.path.join",
"rktools.monitors.ProgressBar",
"pickle.load",
"os.path.split",
"numpy.array",
"sys.exc_info",
"numpy.concatenate",
"time.time",
"sys.path.append"
] | [((113, 169), 'sys.path.append', 'sys.path.append', (['"""/home/developer/workspace/rklearn-lib"""'], {}), "('/home/developer/workspace/rklearn-lib')\n", (128, 169), False, 'import sys\n'), ((4090, 4101), 'time.time', 'time.time', ([], {}), '()\n', (4099, 4101), False, 'import time\n'), ((5106, 5117), 'time.time', 'time.time', ([], {}), '()\n', (5115, 5117), False, 'import time\n'), ((1297, 1330), 'os.path.exists', 'os.path.exists', (['self.data_top_dir'], {}), '(self.data_top_dir)\n', (1311, 1330), False, 'import os\n'), ((1459, 1470), 'time.time', 'time.time', ([], {}), '()\n', (1468, 1470), False, 'import time\n'), ((1602, 1660), 'rktools.monitors.ProgressBar', 'ProgressBar', ([], {'max_value': 'nb_files', 'desc': '"""File: """', 'ascii': '(True)'}), "(max_value=nb_files, desc='File: ', ascii=True)\n", (1613, 1660), False, 'from rktools.monitors import ProgressBar\n'), ((2609, 2684), 'os.path.join', 'os.path.join', (['self.data_top_dir', "self.config.data['test_data_batch_prefix']"], {}), "(self.data_top_dir, self.config.data['test_data_batch_prefix'])\n", (2621, 2684), False, 'import os\n'), ((2704, 2729), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (2718, 2729), False, 'import os\n'), ((2801, 2842), 'pickle.load', 'pickle.load', (['test_file'], {'encoding': '"""latin1"""'}), "(test_file, encoding='latin1')\n", (2812, 2842), False, 'import pickle\n'), ((2943, 2972), 'numpy.array', 'np.array', (["test_dict['labels']"], {}), "(test_dict['labels'])\n", (2951, 2972), True, 'import numpy as np\n'), ((3530, 3541), 'time.time', 'time.time', ([], {}), '()\n', (3539, 3541), False, 'import time\n'), ((4547, 4589), 'numpy.eye', 'np.eye', (["self.config.data['num_categories']"], {}), "(self.config.data['num_categories'])\n", (4553, 4589), True, 'import numpy as np\n'), ((5035, 5077), 'numpy.eye', 'np.eye', (["self.config.data['num_categories']"], {}), "(self.config.data['num_categories'])\n", (5041, 5077), True, 'import numpy as np\n'), ((877, 891), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (889, 891), False, 'import sys\n'), ((1862, 1887), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (1876, 1887), False, 'import os\n'), ((1969, 2011), 'pickle.load', 'pickle.load', (['train_file'], {'encoding': '"""latin1"""'}), "(train_file, encoding='latin1')\n", (1980, 2011), False, 'import pickle\n'), ((3734, 3748), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3746, 3748), False, 'import sys\n'), ((912, 961), 'os.path.split', 'os.path.split', (['exc_tb.tb_frame.f_code.co_filename'], {}), '(exc_tb.tb_frame.f_code.co_filename)\n', (925, 961), False, 'import os\n'), ((2151, 2186), 'numpy.array', 'np.array', (["train_dict['data']", 'float'], {}), "(train_dict['data'], float)\n", (2159, 2186), True, 'import numpy as np\n'), ((2301, 2354), 'numpy.concatenate', 'np.concatenate', (["(self.X_train, train_dict['data'])", '(0)'], {}), "((self.X_train, train_dict['data']), 0)\n", (2315, 2354), True, 'import numpy as np\n'), ((2390, 2445), 'numpy.concatenate', 'np.concatenate', (["(self.y_train, train_dict['labels'])", '(0)'], {}), "((self.y_train, train_dict['labels']), 0)\n", (2404, 2445), True, 'import numpy as np\n'), ((3769, 3818), 'os.path.split', 'os.path.split', (['exc_tb.tb_frame.f_code.co_filename'], {}), '(exc_tb.tb_frame.f_code.co_filename)\n', (3782, 3818), False, 'import os\n')] |
__author__ = "<NAME>"
__copyright__ = "MIT Licence 2021, <NAME>"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "1.0"
__update__ = "2021-05-11"
import glob
import os
import os.path
dir_path = os.path.dirname(os.path.realpath(__file__))
print("start _run_py3.py...")
dir_path_ttl1 = dir_path.replace("\\py", "\\data_v1\\rdf\\ogham")
filelist1 = glob.glob(os.path.join(dir_path_ttl1, "*.ttl"))
for f in filelist1:
os.remove(f)
dir_path_ttl2 = dir_path.replace("\\py", "\\data_v1\\rdf\\geodata")
filelist2 = glob.glob(os.path.join(dir_path_ttl2, "*.ttl"))
for f in filelist2:
os.remove(f)
dir_path_ttl3 = dir_path.replace("\\py", "\\data_v1\\rdf\\crosstaböe")
filelist3 = glob.glob(os.path.join(dir_path_ttl3, "*.ttl"))
for f in filelist3:
os.remove(f)
print("removed all ttl files...")
# ogham
exec(open(dir_path + "/og_sites.py").read())
exec(open(dir_path + "/og_inscriptions.py").read())
exec(open(dir_path + "/og_locations.py").read())
exec(open(dir_path + "/og_persons.py").read())
exec(open(dir_path + "/og_readings.py").read())
exec(open(dir_path + "/og_stones.py").read())
exec(open(dir_path + "/og_words.py").read())
sum_ogham = int(_config.count(0))
# geodata
exec(open(dir_path + "/gs_baronies.py").read())
exec(open(dir_path + "/gs_counties.py").read())
exec(open(dir_path + "/gs_countries.py").read())
exec(open(dir_path + "/gs_ireland_island.py").read())
exec(open(dir_path + "/gs_provinces.py").read())
#exec(open(dir_path + "/gs_townlands.py").read())
step2 = int(_config.count(0))
sum_geodata = step2 - sum_ogham
# crostables
exec(open(dir_path + "/ct_barony_townland.py").read())
exec(open(dir_path + "/ct_country_province.py").read())
exec(open(dir_path + "/ct_county_barony.py").read())
exec(open(dir_path + "/ct_insc_read.py").read())
exec(open(dir_path + "/ct_site_barony.py").read())
exec(open(dir_path + "/ct_site_country.py").read())
exec(open(dir_path + "/ct_site_county.py").read())
exec(open(dir_path + "/ct_site_loc.py").read())
exec(open(dir_path + "/ct_site_province.py").read())
exec(open(dir_path + "/ct_site_townland.py").read())
exec(open(dir_path + "/ct_stone_insc.py").read())
exec(open(dir_path + "/ct_stone_person.py").read())
exec(open(dir_path + "/ct_stone_site.py").read())
exec(open(dir_path + "/ct_stone_squirrel.py").read())
exec(open(dir_path + "/ct_stone_word.py").read())
step3 = int(_config.count(0))
sum_crosstable = step3 - sum_ogham - sum_geodata
print("SUM TRIPLES OGHAM: " + str(sum_ogham))
print("SUM TRIPLES GEODATA: " + str(sum_geodata))
print("SUM TRIPLES CROSSTABLES: " + str(sum_crosstable))
print("SUM TRIPLES: " + str(_config.count(0)))
| [
"os.path.realpath",
"os.path.join",
"os.remove"
] | [((289, 315), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (305, 315), False, 'import os\n'), ((435, 471), 'os.path.join', 'os.path.join', (['dir_path_ttl1', '"""*.ttl"""'], {}), "(dir_path_ttl1, '*.ttl')\n", (447, 471), False, 'import os\n'), ((497, 509), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (506, 509), False, 'import os\n'), ((600, 636), 'os.path.join', 'os.path.join', (['dir_path_ttl2', '"""*.ttl"""'], {}), "(dir_path_ttl2, '*.ttl')\n", (612, 636), False, 'import os\n'), ((662, 674), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (671, 674), False, 'import os\n'), ((768, 804), 'os.path.join', 'os.path.join', (['dir_path_ttl3', '"""*.ttl"""'], {}), "(dir_path_ttl3, '*.ttl')\n", (780, 804), False, 'import os\n'), ((830, 842), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (839, 842), False, 'import os\n')] |
# -*- encoding=UTF-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
import json
from . import projectsettings as fstsettings
import logging
logger = logging.getLogger('fastsettings_logger')
class Settings(models.Model):
# Common Settings #
# ==========Fields============== #
# Utility name #
name = models.CharField(primary_key=True, max_length=64, help_text="Name of setting, must be unique.")
# Can be used for anything. (name='version' Current version of the app) #
action_int = models.IntegerField(blank=True, null=True,
help_text="The setting integer. NOTE: If only using integer, check use_integer box and uncheck use_string!")
# Can be used for anything. #
action_str = models.TextField(blank=True, null=True,
help_text='The setting string. (json: {"key:"val","key2":2}) NOTE: If only using string, check use_string box and uncheck use_integer!')
# Use integer part of this setting? #
use_integer = models.BooleanField(default=True, help_text="Use integer part of this setting?")
# Use string part of this setting? #
use_string = models.BooleanField(default=False, help_text="Use string part of this setting?")
# Description of setting. #
description = models.CharField(blank=True, null=True, max_length=200, help_text="Description of setting.")
# Game Name #
game_name = models.CharField(default="testapp", max_length=12)
class Meta:
verbose_name = "Settings"
verbose_name_plural = "Settings"
app_label = "fastsettings"
# ==================================== #
# Handle save signals.
# e.g. When Settings gets saved, re-sync
# the settings key in redis.
# Redis is used to so the db isn't hit
# as often.
# ==================================== #
@receiver(post_save, sender=Settings)
def on_settings_save(sender, **kwargs):
settingobj = kwargs.get('instance', None)
if (settingobj is not None):
the_setting = None
try:
# Update setting entry
settings_name = "{0}_PROJECT_SETTINGS".format(fstsettings.get_from_settings_file("APP_NAME", ""))
the_setting = {'action_int': settingobj.action_int, 'action_str': settingobj.action_str,
'use_integer': settingobj.use_integer, 'use_string': settingobj.use_string}
the_setting = json.dumps(the_setting)
redis_settings_server = fstsettings.get_redis_connection()
redis_settings_server.hset(settings_name, settingobj.name, the_setting)
except Exception as e:
logger.error("on_settings_save: setting_obj:{0}".format(the_setting), exc_info=e)
| [
"logging.getLogger",
"django.db.models.TextField",
"django.db.models.IntegerField",
"json.dumps",
"django.db.models.BooleanField",
"django.dispatch.receiver",
"django.db.models.CharField"
] | [((260, 300), 'logging.getLogger', 'logging.getLogger', (['"""fastsettings_logger"""'], {}), "('fastsettings_logger')\n", (277, 300), False, 'import logging\n'), ((1934, 1970), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'Settings'}), '(post_save, sender=Settings)\n', (1942, 1970), False, 'from django.dispatch import receiver\n'), ((428, 528), 'django.db.models.CharField', 'models.CharField', ([], {'primary_key': '(True)', 'max_length': '(64)', 'help_text': '"""Name of setting, must be unique."""'}), "(primary_key=True, max_length=64, help_text=\n 'Name of setting, must be unique.')\n", (444, 528), False, 'from django.db import models\n'), ((619, 780), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""The setting integer. NOTE: If only using integer, check use_integer box and uncheck use_string!"""'}), "(blank=True, null=True, help_text=\n 'The setting integer. NOTE: If only using integer, check use_integer box and uncheck use_string!'\n )\n", (638, 780), False, 'from django.db import models\n'), ((859, 1045), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""The setting string. (json: {"key:"val","key2":2}) NOTE: If only using string, check use_string box and uncheck use_integer!"""'}), '(blank=True, null=True, help_text=\n \'The setting string. (json: {"key:"val","key2":2}) NOTE: If only using string, check use_string box and uncheck use_integer!\'\n )\n', (875, 1045), False, 'from django.db import models\n'), ((1130, 1215), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'help_text': '"""Use integer part of this setting?"""'}), "(default=True, help_text='Use integer part of this setting?'\n )\n", (1149, 1215), False, 'from django.db import models\n'), ((1269, 1354), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Use string part of this setting?"""'}), "(default=False, help_text='Use string part of this setting?'\n )\n", (1288, 1354), False, 'from django.db import models\n'), ((1400, 1497), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'null': '(True)', 'max_length': '(200)', 'help_text': '"""Description of setting."""'}), "(blank=True, null=True, max_length=200, help_text=\n 'Description of setting.')\n", (1416, 1497), False, 'from django.db import models\n'), ((1527, 1577), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""testapp"""', 'max_length': '(12)'}), "(default='testapp', max_length=12)\n", (1543, 1577), False, 'from django.db import models\n'), ((2505, 2528), 'json.dumps', 'json.dumps', (['the_setting'], {}), '(the_setting)\n', (2515, 2528), False, 'import json\n')] |
# coding: utf8
import json
import os
import time
import random
import socket
import hashlib
try:
lib = __import__('pandas')
globals()['pd'] = lib
except ImportError:
pandas_import_error_msg = \
'''
Este script utiliza la libreria de Python Pandas.
Por favor ejecuta:
$ sudo -H pip install pandas
o si se esta utilizando un VirtualEnv:
(py_venv) $ pip install pandas.
'''
print (pandas_import_error_msg)
exit(1)
SAMPLE_CONF = 'config.sample.json'
SAMPLE_DATA = 'data.sample.csv'
TEXT_TYPE = (str, unicode)
LOCAL = os.path.dirname(os.path.abspath(__file__))[:-len('/helpers')]
def clean_str(_somestr):
allowed_chars = 'abcdef01234567890'
cleaned_str = ''
for c in _somestr:
if c in allowed_chars:
cleaned_str += c
return cleaned_str
def hash_cache(_origin, _hash):
cache_file = 'conversion_ids.keys'
if isinstance(_origin, tuple):
_origin = _origin[0]
try:
lines = open(cache_file, 'r').readlines()
cache_lines = {}
for line in lines:
try:
k = line.split(':')[0]
v = line.split(':')[1]
cache_lines.update({k: v})
except Exception as e:
print ('Err Msg: \"{}\".'.format(e))
except IOError:
cache_lines = {}
if _origin in cache_lines.keys():
# do something
return clean_str(cache_lines[_origin])
else:
# Do other thing!
cache_lines.update({_origin: _hash})
with open(cache_file, 'w') as cache:
for k, v in cache_lines.items():
cache.write('{}:{}\n'.format(k, clean_str(v)))
def resolve_kwargs(_conf, _kwargs):
"""
Función de renderizado de KWARGS
Args:
- defaults:
- _kwargs:
Return:
- Dict.
"""
if not isinstance(_conf, dict) or not isinstance(_kwargs, dict):
raise TypeError('Argumentos no validos.')
_tmp_configs = {}
for k, v in _conf.items():
if k in _kwargs.keys() and isinstance(_kwargs.get(k), v.__class__):
if isinstance(v, dict):
_tmp_configs.update({k: resolve_kwargs(v, _kwargs.get(k))})
else:
_tmp_configs.update({k: _kwargs[k]})
else:
_tmp_configs.update({k: v})
return _tmp_configs
def load_config(config_path=SAMPLE_CONF):
"""
:param config_path:
:return:
"""
if not isinstance(config_path, TEXT_TYPE):
print ('config_path debe ser una instancia de STR o UNICODE.')
return None
if config_path == SAMPLE_CONF:
# Load Sample
config_full_path = os.path.join(LOCAL, 'samples', config_path)
else:
# Load Custom Config
config_full_path = config_path
try:
return json.load(open(config_full_path, 'rb'))
except ValueError:
print ('No es posible decodificar la configuracion: {}, no JSON parseable.'.format(config_path))
return None
except IOError:
print ('No es posible localizar la configuracion: {}.'.format(config_path))
return None
def anonymize_cols(_pddf=None, columns=None):
"""
Convierte en un Hash los valores del las columnas descriptas en columns
:param _pddf:
:return:
"""
if not isinstance(_pddf, pd.DataFrame):
print ('_pddf debe ser una instancia de Pandas.DataFrame')
return None
if not isinstance(columns, list):
print ('columns debe ser una instancia de LIST.')
return None
headers_count = len(columns)
for col in columns:
try:
_pddf[col] = _pddf[col].apply(lambda x: generate_unique_id(x))
headers_count -= 1
except Exception as e:
print (e)
print ('Fallo el procesamiento de la columna:\"{}\", err: NOT-FOUND.'.format(col))
if headers_count > 0:
print ('No fue posible procesar todas las columnas')
return _pddf
def load_input(_input_filename=None):
"""
Carga y valida el input CSV.
:param _input_filename:
Return:
- Pandas.DataFrame: Carga exitosa.
- None: Fallo la cara del recurso.
"""
if _input_filename == SAMPLE_DATA:
# Load Sample
_input_filename = os.path.join(LOCAL, 'samples', _input_filename)
# Validar path de entrada:
if not os.path.exists(_input_filename):
print ('No es posible localizar el archivo: {}.'.format(os.path.basename(_input_filename)))
with open(_input_filename, 'rb') as tmp_f:
tmp_lines = tmp_f.readlines()
if len(tmp_lines) > 0:
csv_headers = tmp_lines[0].replace('\n', '').replace(' ', '').split(',')
try:
return pd.read_csv(_input_filename, skipinitialspace=True, usecols=csv_headers)
except:
pass
def generate_unique_id(*args):
"""
source: StackOverFlow.
"""
t = long(time.time() * 1000)
r = long(random.random() * 100000000000000000)
try:
a = socket.gethostbyname(socket.gethostname())
except Exception as e:
print (e)
a = random.random() * 100000000000000000
_uid = str(t) + ' ' + str(r) + ' ' + str(a) + ' ' + str(args)
_uid = hashlib.md5(_uid).hexdigest()
cached_hash = hash_cache(args, _uid)
if cached_hash:
return cached_hash
else:
return _uid
def is_a_valid_conf(_conf=None):
"""
Valida una configuracion.
Args:
- _conf:
- description: configuracion provista que debe ser validada.
- type: Dict.
Return:
- bool:
- True: La config es valida.
- False: No es valida la conf.
"""
if not isinstance(_conf, dict):
print ('_conf debe ser una instancia de DICT.')
return False
required = \
{
'key': 'columns',
'type': list,
'content': TEXT_TYPE
}
# exists required.key?
if not required['key'] in _conf.keys():
print ('{} es requerida!'.format(required['key']))
return False
if not isinstance(_conf[required['key']], required['type']):
print ('{} debe contener {}'.format(required['key'], required['type']))
return False
if False in [isinstance(e, required['content']) for e in _conf['columns']]:
print ('_conf[\'columns\'] debe ser una {} de {}'.format(required['type'],
required['content']))
return False
return True
def write_csv(df, output_fn=None):
"""
Escribe un OUTPUT a un archhivo de tipo CSV.
Args:
- df: Pandas.DataFrame.
- Data procesada.
- output_fn: str o unicode
- Nombre que recibira el archivo CSV de salida.
Return:
- Str: Nombre del Archivo de salida.
"""
if not output_fn:
output_fn = '{}.csv'.format(generate_unique_id('abc')[0:15])
for column in df.columns:
for idx in df[column].index:
x = df.get_value(idx, column)
try:
x = unicode(x.encode('utf-8', 'ignore'),
errors='ignore') if type(x) == unicode else unicode(str(x), errors='ignore')
df.set_value(idx, column, x)
except Exception as e:
print ('encoding error: {0} {1}'.format(idx, column))
print ('Err Msg: \"{}\".'.format(e))
df.set_value(idx, column, '')
continue
try:
df.to_csv(output_fn, index=False)
return output_fn
except Exception as e:
print ('Ocurrio un fallo al intentar grabar el archivo {}'.format(output_fn))
print ('Err Msg: \"{}\".'.format(e))
| [
"os.path.exists",
"hashlib.md5",
"os.path.join",
"socket.gethostname",
"os.path.basename",
"os.path.abspath",
"random.random",
"time.time"
] | [((580, 605), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (595, 605), False, 'import os\n'), ((2679, 2722), 'os.path.join', 'os.path.join', (['LOCAL', '"""samples"""', 'config_path'], {}), "(LOCAL, 'samples', config_path)\n", (2691, 2722), False, 'import os\n'), ((4287, 4334), 'os.path.join', 'os.path.join', (['LOCAL', '"""samples"""', '_input_filename'], {}), "(LOCAL, 'samples', _input_filename)\n", (4299, 4334), False, 'import os\n'), ((4377, 4408), 'os.path.exists', 'os.path.exists', (['_input_filename'], {}), '(_input_filename)\n', (4391, 4408), False, 'import os\n'), ((4931, 4942), 'time.time', 'time.time', ([], {}), '()\n', (4940, 4942), False, 'import time\n'), ((4964, 4979), 'random.random', 'random.random', ([], {}), '()\n', (4977, 4979), False, 'import random\n'), ((5044, 5064), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (5062, 5064), False, 'import socket\n'), ((5237, 5254), 'hashlib.md5', 'hashlib.md5', (['_uid'], {}), '(_uid)\n', (5248, 5254), False, 'import hashlib\n'), ((4474, 4507), 'os.path.basename', 'os.path.basename', (['_input_filename'], {}), '(_input_filename)\n', (4490, 4507), False, 'import os\n'), ((5123, 5138), 'random.random', 'random.random', ([], {}), '()\n', (5136, 5138), False, 'import random\n')] |
"""Add anonymous flags
Revision ID: 4ea0403733dc
Revises: 5<PASSWORD>
Create Date: 2016-12-05 14:04:39.239593
"""
# revision identifiers, used by Alembic.
revision = '4ea0403733dc'
down_revision = '<KEY>'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('bundle', sa.Column('is_anonymous', sa.Boolean(), nullable=False))
op.add_column('worksheet', sa.Column('is_anonymous', sa.Boolean(), nullable=False))
def downgrade():
op.drop_column('worksheet', 'is_anonymous')
op.drop_column('bundle', 'is_anonymous')
| [
"sqlalchemy.Boolean",
"alembic.op.drop_column"
] | [((469, 512), 'alembic.op.drop_column', 'op.drop_column', (['"""worksheet"""', '"""is_anonymous"""'], {}), "('worksheet', 'is_anonymous')\n", (483, 512), False, 'from alembic import op\n'), ((517, 557), 'alembic.op.drop_column', 'op.drop_column', (['"""bundle"""', '"""is_anonymous"""'], {}), "('bundle', 'is_anonymous')\n", (531, 557), False, 'from alembic import op\n'), ((327, 339), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (337, 339), True, 'import sqlalchemy as sa\n'), ((415, 427), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (425, 427), True, 'import sqlalchemy as sa\n')] |
#!/usr/bin/env python3
import dnsmonitor
import json
import os
from base64 import b64decode
import boto3
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def decrypt_environment(env=os.environ):
kms = boto3.client("kms")
for item in env.copy().keys():
if item.endswith("_ENC"):
print("Decrypting %s as %s:" % (item, item[:-4]))
env[item[:-4]] = kms.decrypt(CiphertextBlob=b64decode(env[item]))[
"Plaintext"
].decode("utf-8")
def lambda_handler(event, context):
logger.info("Starting lambda...")
# Decrypt encrypted environment vars
env = os.environ.copy()
decrypt_environment(env)
new = dnsmonitor.DNSMonitor(env=env)
new.run()
old = dnsmonitor.DNSMonitor(env=env)
try:
old.load_from_s3(os.environ["AWS_BUCKET_NAME"], os.environ["AWS_OBJECT_PATH"])
except:
logging.error("Old dns file not found for lambda")
old = new
new.save_to_s3(os.environ["AWS_BUCKET_NAME"], os.environ["AWS_OBJECT_PATH"])
# Check for changes
differ = dnsmonitor.DNSMonitor_diff(new=new, old=old, env=env)
differ.run()
# Ship out the info!
if os.environ.get("SUMO_HTTP_ENDPOINT") is not None:
differ.to_sumologic()
if os.environ.get("SLACK_WEBHOOK") is not None:
differ.to_slack()
print("Lambda done!")
return None
def main():
new = dnsmonitor.DNSMonitor()
new.run()
old = dnsmonitor.DNSMonitor()
old.load_from_file("dnsmonitor.json")
# Check for changes
differ = dnsmonitor.DNSMonitor_diff(new=new, old=old)
differ.run()
# Debug output
for change in differ.changes:
j = json.loads(change)
print(j)
# Ship out the info!
if os.environ.get("SUMO_HTTP_ENDPOINT") is not None:
differ.to_sumologic()
if os.environ.get("SLACK_WEBHOOK") is not None:
differ.to_slack()
# Save this run
new.save_to_file("dnsmonitor.json")
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"json.loads",
"boto3.client",
"os.environ.get",
"base64.b64decode",
"os.environ.copy",
"dnsmonitor.DNSMonitor",
"dnsmonitor.DNSMonitor_diff",
"logging.error"
] | [((130, 149), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (147, 149), False, 'import logging\n'), ((233, 252), 'boto3.client', 'boto3.client', (['"""kms"""'], {}), "('kms')\n", (245, 252), False, 'import boto3\n'), ((648, 665), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (663, 665), False, 'import os\n'), ((706, 736), 'dnsmonitor.DNSMonitor', 'dnsmonitor.DNSMonitor', ([], {'env': 'env'}), '(env=env)\n', (727, 736), False, 'import dnsmonitor\n'), ((762, 792), 'dnsmonitor.DNSMonitor', 'dnsmonitor.DNSMonitor', ([], {'env': 'env'}), '(env=env)\n', (783, 792), False, 'import dnsmonitor\n'), ((1098, 1151), 'dnsmonitor.DNSMonitor_diff', 'dnsmonitor.DNSMonitor_diff', ([], {'new': 'new', 'old': 'old', 'env': 'env'}), '(new=new, old=old, env=env)\n', (1124, 1151), False, 'import dnsmonitor\n'), ((1427, 1450), 'dnsmonitor.DNSMonitor', 'dnsmonitor.DNSMonitor', ([], {}), '()\n', (1448, 1450), False, 'import dnsmonitor\n'), ((1475, 1498), 'dnsmonitor.DNSMonitor', 'dnsmonitor.DNSMonitor', ([], {}), '()\n', (1496, 1498), False, 'import dnsmonitor\n'), ((1579, 1623), 'dnsmonitor.DNSMonitor_diff', 'dnsmonitor.DNSMonitor_diff', ([], {'new': 'new', 'old': 'old'}), '(new=new, old=old)\n', (1605, 1623), False, 'import dnsmonitor\n'), ((1202, 1238), 'os.environ.get', 'os.environ.get', (['"""SUMO_HTTP_ENDPOINT"""'], {}), "('SUMO_HTTP_ENDPOINT')\n", (1216, 1238), False, 'import os\n'), ((1289, 1320), 'os.environ.get', 'os.environ.get', (['"""SLACK_WEBHOOK"""'], {}), "('SLACK_WEBHOOK')\n", (1303, 1320), False, 'import os\n'), ((1707, 1725), 'json.loads', 'json.loads', (['change'], {}), '(change)\n', (1717, 1725), False, 'import json\n'), ((1776, 1812), 'os.environ.get', 'os.environ.get', (['"""SUMO_HTTP_ENDPOINT"""'], {}), "('SUMO_HTTP_ENDPOINT')\n", (1790, 1812), False, 'import os\n'), ((1863, 1894), 'os.environ.get', 'os.environ.get', (['"""SLACK_WEBHOOK"""'], {}), "('SLACK_WEBHOOK')\n", (1877, 1894), False, 'import os\n'), ((909, 959), 'logging.error', 'logging.error', (['"""Old dns file not found for lambda"""'], {}), "('Old dns file not found for lambda')\n", (922, 959), False, 'import logging\n'), ((440, 460), 'base64.b64decode', 'b64decode', (['env[item]'], {}), '(env[item])\n', (449, 460), False, 'from base64 import b64decode\n')] |
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import plotly.express as px
import pandas as pd
import requests
from bs4 import BeautifulSoup
import re
from newspaper import Article
import sys
module_path = './question_answering'
if module_path not in sys.path:
sys.path.append(module_path)
from question_answering_inference_utility import question_answering
# ------------------------------ LAYOUT CONTAINERS ------------------------------
# The main page will be divided in two columns.
# We'll define them right away.
# 1st column:
col_1 = html.Div(
className='column-left',
children=[
html.Div([
# Where the content of the tabs will be showed:
html.B(
children='The article must be in English.'
),
dcc.Input(
id="input_url",
type="url",
placeholder="Source URL",
required=True,
style=dict(
width='100%'
)
)
]),
# Radiobuttons for choosing parser:
html.Div([
html.Div('Parser:'),
dcc.RadioItems(
id='radiobuttons-parser',
options=[
{'label': "lxml: newspaper3k's default parser", 'value': 'lxml'},
{'label': "html.parser: Python's default parser",
'value': 'html.parser'},
{'label': 'html5lib: Extremely lenient but quite slow',
'value': 'html5lib'},
],
value='lxml'
)
]),
# Button to extract text from the give url:
html.Button(
id='extract-button-state',
n_clicks=0,
children='Extract',
style=dict(
width='100%'
)
),
# Div to debug:
html.Div(id='output-state-extract')
]
)
# 2nd column:
col_2 = html.Div(
className='column-right',
children=[
# Text area for the extracted text from the URL:
html.Div('Extracted text:'),
dcc.Textarea(
id='textarea-processed-url',
className='extracted-text',
value='Textarea content initialized\nwith multiple lines of text',
persistence=True,
readOnly=True,
)]
)
# Input question container:
input_question_container = html.Div(
children=[
html.Div(
style={'margin-bottom': '10px'},
children=[
html.Div('Question:'),
dcc.Input(
id="input-question",
type='text',
placeholder="Please enter your question.",
style=dict(
width='100%',
)
)
]
),
# Checkbutton for filtering numbers:
dcc.Checklist(
id='checkbutton',
options=[
{'label': 'Filter numbers', 'value': '1'},
],
value=['1']
),
]
)
# Submit button container:
submit_button_container = html.Div(
children=[
html.Div(
className='center',
children=[
html.Button(
id='submit-button-state',
className='submit-button',
n_clicks=0,
children='Submit')
],
style=dict(
margin='20px'
)
),
# Div to debug:
html.Div(id='output-state-submit')
]
)
# ------------------------------ FINAL LAYOUT ------------------------------
layout = html.Div(
className='main',
children=[
html.H1(
children='T5 Text Extraction Demo',
style={'textAlign': 'center'}
),
html.Div(
className="row",
children=[
col_1,
col_2
]
),
# Horizontal separator:
html.Hr(),
input_question_container,
submit_button_container
]
)
# ------------------------------ CALLBACKS ------------------------------
# Callback for when the Extract button has been pressed:
@app.callback(
[
Output('output-state-extract', 'children'),
Output('textarea-processed-url', 'value')
],
[
Input('extract-button-state', 'n_clicks')
],
[
State('input_url', 'value'),
State('radiobuttons-parser', 'value')
]
)
def update_output_extract_button(n_clicks, url, parser):
children = ''
# Web scrapping
if url is None:
return children, ''
else:
regex_url = r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)"
if re.match(regex_url, url) is None:
children += '\nIncorrect url!'
return children, ''
else:
article = Article(url=url, language='en')
article.download()
article.parse()
title = article.title
article.nlp()
keywords = article.keywords
if parser=='lxml':
extracted_txt = article.text
else:
resp = requests.get(url)
html_doc = resp.text
soup = BeautifulSoup(html_doc, 'html.parser')
extracted_txt = re.sub(r'(\n| )+', ' ', soup.text)
children += f'Title: {title}, keywords: {keywords}, parser: {parser}'
return children, extracted_txt
# Callback for when the Submit button has been pressed:
@app.callback(
Output('output-state-submit', 'children'),
[
Input('submit-button-state', 'n_clicks')
],
[
State('textarea-processed-url', 'value'),
State('input-question', 'value'),
State('checkbutton', 'value')
]
)
def update_output_extract_button(n_clicks, processed_txt, question, checkbutton_value):
# Question answering:
if n_clicks >= 1:
if len(processed_txt) == 0:
return 'Please enter a proper URL.'
elif question is None:
return 'Please enter a question.'
else:
answer, paragraph, prob = question_answering(
query=question, text=processed_txt, filter_numbers=checkbutton_value)
children = [
html.P(f'Asked question: "{question}"'),
html.P(f'Answer: {answer}'),
html.P(f'Paragraph: {paragraph}'),
html.P(f'Probability: {prob}')
]
return children
else:
return ''
| [
"dash_html_components.Button",
"dash_core_components.Textarea",
"dash.dependencies.Input",
"newspaper.Article",
"question_answering_inference_utility.question_answering",
"sys.path.append",
"dash_html_components.Div",
"dash.dependencies.Output",
"dash_html_components.Hr",
"dash.dependencies.State"... | [((373, 401), 'sys.path.append', 'sys.path.append', (['module_path'], {}), '(module_path)\n', (388, 401), False, 'import sys\n'), ((5883, 5924), 'dash.dependencies.Output', 'Output', (['"""output-state-submit"""', '"""children"""'], {}), "('output-state-submit', 'children')\n", (5889, 5924), False, 'from dash.dependencies import Input, Output, State\n'), ((4445, 4487), 'dash.dependencies.Output', 'Output', (['"""output-state-extract"""', '"""children"""'], {}), "('output-state-extract', 'children')\n", (4451, 4487), False, 'from dash.dependencies import Input, Output, State\n'), ((4497, 4538), 'dash.dependencies.Output', 'Output', (['"""textarea-processed-url"""', '"""value"""'], {}), "('textarea-processed-url', 'value')\n", (4503, 4538), False, 'from dash.dependencies import Input, Output, State\n'), ((4560, 4601), 'dash.dependencies.Input', 'Input', (['"""extract-button-state"""', '"""n_clicks"""'], {}), "('extract-button-state', 'n_clicks')\n", (4565, 4601), False, 'from dash.dependencies import Input, Output, State\n'), ((4623, 4650), 'dash.dependencies.State', 'State', (['"""input_url"""', '"""value"""'], {}), "('input_url', 'value')\n", (4628, 4650), False, 'from dash.dependencies import Input, Output, State\n'), ((4660, 4697), 'dash.dependencies.State', 'State', (['"""radiobuttons-parser"""', '"""value"""'], {}), "('radiobuttons-parser', 'value')\n", (4665, 4697), False, 'from dash.dependencies import Input, Output, State\n'), ((5940, 5980), 'dash.dependencies.Input', 'Input', (['"""submit-button-state"""', '"""n_clicks"""'], {}), "('submit-button-state', 'n_clicks')\n", (5945, 5980), False, 'from dash.dependencies import Input, Output, State\n'), ((6002, 6042), 'dash.dependencies.State', 'State', (['"""textarea-processed-url"""', '"""value"""'], {}), "('textarea-processed-url', 'value')\n", (6007, 6042), False, 'from dash.dependencies import Input, Output, State\n'), ((6052, 6084), 'dash.dependencies.State', 'State', (['"""input-question"""', '"""value"""'], {}), "('input-question', 'value')\n", (6057, 6084), False, 'from dash.dependencies import Input, Output, State\n'), ((6094, 6123), 'dash.dependencies.State', 'State', (['"""checkbutton"""', '"""value"""'], {}), "('checkbutton', 'value')\n", (6099, 6123), False, 'from dash.dependencies import Input, Output, State\n'), ((2016, 2051), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""output-state-extract"""'}), "(id='output-state-extract')\n", (2024, 2051), True, 'import dash_html_components as html\n'), ((2205, 2232), 'dash_html_components.Div', 'html.Div', (['"""Extracted text:"""'], {}), "('Extracted text:')\n", (2213, 2232), True, 'import dash_html_components as html\n'), ((2243, 2424), 'dash_core_components.Textarea', 'dcc.Textarea', ([], {'id': '"""textarea-processed-url"""', 'className': '"""extracted-text"""', 'value': '"""Textarea content initialized\nwith multiple lines of text"""', 'persistence': '(True)', 'readOnly': '(True)'}), '(id=\'textarea-processed-url\', className=\'extracted-text\', value\n ="""Textarea content initialized\nwith multiple lines of text""",\n persistence=True, readOnly=True)\n', (2255, 2424), True, 'import dash_core_components as dcc\n'), ((3049, 3150), 'dash_core_components.Checklist', 'dcc.Checklist', ([], {'id': '"""checkbutton"""', 'options': "[{'label': 'Filter numbers', 'value': '1'}]", 'value': "['1']"}), "(id='checkbutton', options=[{'label': 'Filter numbers',\n 'value': '1'}], value=['1'])\n", (3062, 3150), True, 'import dash_core_components as dcc\n'), ((3711, 3745), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""output-state-submit"""'}), "(id='output-state-submit')\n", (3719, 3745), True, 'import dash_html_components as html\n'), ((3899, 3973), 'dash_html_components.H1', 'html.H1', ([], {'children': '"""T5 Text Extraction Demo"""', 'style': "{'textAlign': 'center'}"}), "(children='T5 Text Extraction Demo', style={'textAlign': 'center'})\n", (3906, 3973), True, 'import dash_html_components as html\n'), ((4018, 4068), 'dash_html_components.Div', 'html.Div', ([], {'className': '"""row"""', 'children': '[col_1, col_2]'}), "(className='row', children=[col_1, col_2])\n", (4026, 4068), True, 'import dash_html_components as html\n'), ((4191, 4200), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (4198, 4200), True, 'import dash_html_components as html\n'), ((4997, 5021), 're.match', 're.match', (['regex_url', 'url'], {}), '(regex_url, url)\n', (5005, 5021), False, 'import re\n'), ((5143, 5174), 'newspaper.Article', 'Article', ([], {'url': 'url', 'language': '"""en"""'}), "(url=url, language='en')\n", (5150, 5174), False, 'from newspaper import Article\n'), ((5482, 5499), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (5494, 5499), False, 'import requests\n'), ((5560, 5598), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_doc', '"""html.parser"""'], {}), "(html_doc, 'html.parser')\n", (5573, 5598), False, 'from bs4 import BeautifulSoup\n'), ((5631, 5665), 're.sub', 're.sub', (['"""(\\\\n| )+"""', '""" """', 'soup.text'], {}), "('(\\\\n| )+', ' ', soup.text)\n", (5637, 5665), False, 'import re\n'), ((6499, 6592), 'question_answering_inference_utility.question_answering', 'question_answering', ([], {'query': 'question', 'text': 'processed_txt', 'filter_numbers': 'checkbutton_value'}), '(query=question, text=processed_txt, filter_numbers=\n checkbutton_value)\n', (6517, 6592), False, 'from question_answering_inference_utility import question_answering\n'), ((805, 855), 'dash_html_components.B', 'html.B', ([], {'children': '"""The article must be in English."""'}), "(children='The article must be in English.')\n", (811, 855), True, 'import dash_html_components as html\n'), ((1225, 1244), 'dash_html_components.Div', 'html.Div', (['"""Parser:"""'], {}), "('Parser:')\n", (1233, 1244), True, 'import dash_html_components as html\n'), ((1259, 1559), 'dash_core_components.RadioItems', 'dcc.RadioItems', ([], {'id': '"""radiobuttons-parser"""', 'options': '[{\'label\': "lxml: newspaper3k\'s default parser", \'value\': \'lxml\'}, {\'label\':\n "html.parser: Python\'s default parser", \'value\': \'html.parser\'}, {\n \'label\': \'html5lib: Extremely lenient but quite slow\', \'value\': \'html5lib\'}\n ]', 'value': '"""lxml"""'}), '(id=\'radiobuttons-parser\', options=[{\'label\':\n "lxml: newspaper3k\'s default parser", \'value\': \'lxml\'}, {\'label\':\n "html.parser: Python\'s default parser", \'value\': \'html.parser\'}, {\n \'label\': \'html5lib: Extremely lenient but quite slow\', \'value\':\n \'html5lib\'}], value=\'lxml\')\n', (1273, 1559), True, 'import dash_core_components as dcc\n'), ((6647, 6686), 'dash_html_components.P', 'html.P', (['f"""Asked question: "{question}\\""""'], {}), '(f\'Asked question: "{question}"\')\n', (6653, 6686), True, 'import dash_html_components as html\n'), ((6704, 6731), 'dash_html_components.P', 'html.P', (['f"""Answer: {answer}"""'], {}), "(f'Answer: {answer}')\n", (6710, 6731), True, 'import dash_html_components as html\n'), ((6749, 6782), 'dash_html_components.P', 'html.P', (['f"""Paragraph: {paragraph}"""'], {}), "(f'Paragraph: {paragraph}')\n", (6755, 6782), True, 'import dash_html_components as html\n'), ((6800, 6830), 'dash_html_components.P', 'html.P', (['f"""Probability: {prob}"""'], {}), "(f'Probability: {prob}')\n", (6806, 6830), True, 'import dash_html_components as html\n'), ((2672, 2693), 'dash_html_components.Div', 'html.Div', (['"""Question:"""'], {}), "('Question:')\n", (2680, 2693), True, 'import dash_html_components as html\n'), ((3407, 3506), 'dash_html_components.Button', 'html.Button', ([], {'id': '"""submit-button-state"""', 'className': '"""submit-button"""', 'n_clicks': '(0)', 'children': '"""Submit"""'}), "(id='submit-button-state', className='submit-button', n_clicks=0,\n children='Submit')\n", (3418, 3506), True, 'import dash_html_components as html\n')] |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from dataflow.uc.adapter.spark_code_operation_adapter import SparkCodeOperationAdapter
from dataflow.uc.adapter.spark_structured_streaming_code_operation_adapter import (
SparkStructuredStreamingCodeOperationAdapter,
)
from dataflow.uc.adapter.tensorflow_code_operation_adapter import TensorFlowCodeOperationAdapter
from dataflow.uc.exceptions.comp_exceptions import IllegalArgumentException
from dataflow.uc.settings import UnifiedComputingJobType
OPERATION_ADAPTERS = {
UnifiedComputingJobType.SPARK_STRUCTURED_STREAMING_CODE.value: SparkStructuredStreamingCodeOperationAdapter(),
UnifiedComputingJobType.SPARK_CODE.value: SparkCodeOperationAdapter(),
UnifiedComputingJobType.TENSORFLOW_CODE.value: TensorFlowCodeOperationAdapter(),
}
def get_job_operation_adapter(job_type):
operation_adapter = OPERATION_ADAPTERS.get(job_type)
if not operation_adapter:
raise IllegalArgumentException("Not support the job type %s" % job_type)
return operation_adapter
| [
"dataflow.uc.adapter.spark_code_operation_adapter.SparkCodeOperationAdapter",
"dataflow.uc.adapter.tensorflow_code_operation_adapter.TensorFlowCodeOperationAdapter",
"dataflow.uc.adapter.spark_structured_streaming_code_operation_adapter.SparkStructuredStreamingCodeOperationAdapter",
"dataflow.uc.exceptions.co... | [((1918, 1964), 'dataflow.uc.adapter.spark_structured_streaming_code_operation_adapter.SparkStructuredStreamingCodeOperationAdapter', 'SparkStructuredStreamingCodeOperationAdapter', ([], {}), '()\n', (1962, 1964), False, 'from dataflow.uc.adapter.spark_structured_streaming_code_operation_adapter import SparkStructuredStreamingCodeOperationAdapter\n'), ((2012, 2039), 'dataflow.uc.adapter.spark_code_operation_adapter.SparkCodeOperationAdapter', 'SparkCodeOperationAdapter', ([], {}), '()\n', (2037, 2039), False, 'from dataflow.uc.adapter.spark_code_operation_adapter import SparkCodeOperationAdapter\n'), ((2092, 2124), 'dataflow.uc.adapter.tensorflow_code_operation_adapter.TensorFlowCodeOperationAdapter', 'TensorFlowCodeOperationAdapter', ([], {}), '()\n', (2122, 2124), False, 'from dataflow.uc.adapter.tensorflow_code_operation_adapter import TensorFlowCodeOperationAdapter\n'), ((2272, 2338), 'dataflow.uc.exceptions.comp_exceptions.IllegalArgumentException', 'IllegalArgumentException', (["('Not support the job type %s' % job_type)"], {}), "('Not support the job type %s' % job_type)\n", (2296, 2338), False, 'from dataflow.uc.exceptions.comp_exceptions import IllegalArgumentException\n')] |
import os
import numpy as np
from tqdm.notebook import tqdm
from deepnote import MusicRepr
from deepnote import DEFAULT_UNIT
from joblib import delayed, Parallel
import torch
from torch.utils.data import random_split, Dataset, DataLoader
def get_dataloaders(dataset,
n_jobs=2,
batch_size=64,
val_frac=0.2):
n = len(dataset)
v = int(n*val_frac)
train_dataset, val_dataset = random_split(dataset, [n - v, v])
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=n_jobs, collate_fn=dataset.fn)
val_loader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False, num_workers=n_jobs, collate_fn=dataset.fn)
print('train dataset has {} samples and val dataset has {} samples.'.format(n-v, v))
return train_loader, val_loader
def load_midi(file, unit=DEFAULT_UNIT, instruments=None):
seq = MusicRepr.from_file(file, unit=unit)
if instruments is None:
return seq
if len(set(instruments).intersection(set(seq.get_instruments()))) == 0:
return None
tracks = seq.separate_tracks()
res = {}
for inst in instruments:
if inst in tracks:
res[inst] = tracks[inst]
return np.concatenate([MusicRepr.merge_tracks(res).to_cp(), np.array([[2] + [0]*7])], axis=0)
class LMDataset(Dataset):
def __init__(
self,
data_dir,
max_files=100,
unit=DEFAULT_UNIT,
instruments:list=None,
max_len=256,
n_jobs=2,
masked=False,
p_mask=0.2
):
super().__init__()
## load samples
files = list(filter(lambda x: x.endswith('.mid'), os.listdir(data_dir)))[:max_files]
self.samples = list(
filter(
lambda x: x is not None,
Parallel(n_jobs=n_jobs)(delayed(load_midi)(data_dir + file, unit, instruments) for file in tqdm(files))
)
)
if instruments is None:
instruments = set()
for samp in self.samples:
instruments.update(samp.get_instrument())
self.instruments = list(instruments)
else:
self.instruments = instruments
self.max_len = max_len
self.masked = masked
self.p_mask = p_mask
self.lens = [max(1, len(samp) - max_len) for samp in self.samples]
self.cum_lens = [0] + [sum(self.lens[:i+1]) for i in range(len(self.samples))]
def __len__(self):
return self.cum_lens[-1]
def get_idx(self, idx):
for i, cl in enumerate(self.cum_lens):
if idx < cl:
return i-1, idx - self.cum_lens[i-1]
return -1, -1
def __getitem__(self, idx):
samp_idx, offset = self.get_idx(idx)
if samp_idx > -1:
x = np.array(self.samples[samp_idx][offset : offset + self.max_len])
y = np.array(self.samples[samp_idx][offset + 1 : offset + self.max_len + 1])
return x, y
raise Exception('Wrong index for the dataset.')
def mask(self, x):
if self.masked:
raise NotImplementedError
return x
def fn(self, batch):
X = []
Y = []
for b in batch:
x, y = b
X += [x]
Y += [y]
x_len = torch.tensor([x.shape[0] for x in X])
M = max(x_len)
res = {
'X': torch.tensor([np.pad(x, ((0, M - x.shape[0]), (0,0))) for x in X]),
'X_len': x_len,
'labels': torch.tensor([np.pad(y, ((0, M - y.shape[0]), (0,0))) for y in Y])
}
return res
| [
"deepnote.MusicRepr.from_file",
"os.listdir",
"torch.utils.data.random_split",
"deepnote.MusicRepr.merge_tracks",
"joblib.Parallel",
"numpy.array",
"torch.tensor",
"torch.utils.data.DataLoader",
"joblib.delayed",
"numpy.pad",
"tqdm.notebook.tqdm"
] | [((453, 486), 'torch.utils.data.random_split', 'random_split', (['dataset', '[n - v, v]'], {}), '(dataset, [n - v, v])\n', (465, 486), False, 'from torch.utils.data import random_split, Dataset, DataLoader\n'), ((506, 623), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'n_jobs', 'collate_fn': 'dataset.fn'}), '(dataset=train_dataset, batch_size=batch_size, shuffle=True,\n num_workers=n_jobs, collate_fn=dataset.fn)\n', (516, 623), False, 'from torch.utils.data import random_split, Dataset, DataLoader\n'), ((637, 753), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'val_dataset', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'n_jobs', 'collate_fn': 'dataset.fn'}), '(dataset=val_dataset, batch_size=batch_size, shuffle=False,\n num_workers=n_jobs, collate_fn=dataset.fn)\n', (647, 753), False, 'from torch.utils.data import random_split, Dataset, DataLoader\n'), ((945, 981), 'deepnote.MusicRepr.from_file', 'MusicRepr.from_file', (['file'], {'unit': 'unit'}), '(file, unit=unit)\n', (964, 981), False, 'from deepnote import MusicRepr\n'), ((3412, 3449), 'torch.tensor', 'torch.tensor', (['[x.shape[0] for x in X]'], {}), '([x.shape[0] for x in X])\n', (3424, 3449), False, 'import torch\n'), ((1331, 1356), 'numpy.array', 'np.array', (['[[2] + [0] * 7]'], {}), '([[2] + [0] * 7])\n', (1339, 1356), True, 'import numpy as np\n'), ((2912, 2974), 'numpy.array', 'np.array', (['self.samples[samp_idx][offset:offset + self.max_len]'], {}), '(self.samples[samp_idx][offset:offset + self.max_len])\n', (2920, 2974), True, 'import numpy as np\n'), ((2994, 3064), 'numpy.array', 'np.array', (['self.samples[samp_idx][offset + 1:offset + self.max_len + 1]'], {}), '(self.samples[samp_idx][offset + 1:offset + self.max_len + 1])\n', (3002, 3064), True, 'import numpy as np\n'), ((1294, 1321), 'deepnote.MusicRepr.merge_tracks', 'MusicRepr.merge_tracks', (['res'], {}), '(res)\n', (1316, 1321), False, 'from deepnote import MusicRepr\n'), ((1741, 1761), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (1751, 1761), False, 'import os\n'), ((1883, 1906), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs'}), '(n_jobs=n_jobs)\n', (1891, 1906), False, 'from joblib import delayed, Parallel\n'), ((3520, 3560), 'numpy.pad', 'np.pad', (['x', '((0, M - x.shape[0]), (0, 0))'], {}), '(x, ((0, M - x.shape[0]), (0, 0)))\n', (3526, 3560), True, 'import numpy as np\n'), ((3638, 3678), 'numpy.pad', 'np.pad', (['y', '((0, M - y.shape[0]), (0, 0))'], {}), '(y, ((0, M - y.shape[0]), (0, 0)))\n', (3644, 3678), True, 'import numpy as np\n'), ((1907, 1925), 'joblib.delayed', 'delayed', (['load_midi'], {}), '(load_midi)\n', (1914, 1925), False, 'from joblib import delayed, Parallel\n'), ((1974, 1985), 'tqdm.notebook.tqdm', 'tqdm', (['files'], {}), '(files)\n', (1978, 1985), False, 'from tqdm.notebook import tqdm\n')] |
from algorithm_toolkit import Algorithm, AlgorithmChain
from sarpy.visualization import remap
class Main(Algorithm):
def run(self):
cl = self.cl # type: AlgorithmChain.ChainLedger
params = self.params # type: dict
# Add your algorithm code here
ro = params['sarpy_reader']
decimation = params['decimation']
if 'ystart' in params:
ystart = params['ystart']
else:
ystart = 0
if 'yend' in params:
yend = params['yend']
else:
yend = ro.sicdmeta.ImageData.NumRows
if 'xstart' in params:
xstart = params['xstart']
else:
xstart = 0
if 'xend' in params:
xend = params['xend']
else:
xend = ro.sicdmeta.ImageData.NumCols
remap_type = params['remap_type']
cdata = ro.read_chip[ystart:yend:decimation, xstart:xend:decimation]
if remap_type == 'density':
pix = remap.density(cdata)
elif remap_type == 'brighter':
pix = remap.brighter(cdata)
elif remap_type == 'darker':
pix = remap.darker(cdata)
elif remap_type == 'highcontrast':
pix = remap.highcontrast(cdata)
elif remap_type == 'linear':
pix = remap.linear(cdata)
elif remap_type == 'log':
pix = remap.log(cdata)
elif remap_type == 'pedf':
pix = remap.pedf(cdata)
elif remap_type == 'nrl':
pix = remap.nrl(cdata)
cl.add_to_metadata('remapped_data', pix)
cl.add_to_metadata('decimation', decimation)
# Do not edit below this line
return cl
| [
"sarpy.visualization.remap.nrl",
"sarpy.visualization.remap.log",
"sarpy.visualization.remap.darker",
"sarpy.visualization.remap.pedf",
"sarpy.visualization.remap.brighter",
"sarpy.visualization.remap.highcontrast",
"sarpy.visualization.remap.linear",
"sarpy.visualization.remap.density"
] | [((1002, 1022), 'sarpy.visualization.remap.density', 'remap.density', (['cdata'], {}), '(cdata)\n', (1015, 1022), False, 'from sarpy.visualization import remap\n'), ((1080, 1101), 'sarpy.visualization.remap.brighter', 'remap.brighter', (['cdata'], {}), '(cdata)\n', (1094, 1101), False, 'from sarpy.visualization import remap\n'), ((1157, 1176), 'sarpy.visualization.remap.darker', 'remap.darker', (['cdata'], {}), '(cdata)\n', (1169, 1176), False, 'from sarpy.visualization import remap\n'), ((1238, 1263), 'sarpy.visualization.remap.highcontrast', 'remap.highcontrast', (['cdata'], {}), '(cdata)\n', (1256, 1263), False, 'from sarpy.visualization import remap\n'), ((1319, 1338), 'sarpy.visualization.remap.linear', 'remap.linear', (['cdata'], {}), '(cdata)\n', (1331, 1338), False, 'from sarpy.visualization import remap\n'), ((1391, 1407), 'sarpy.visualization.remap.log', 'remap.log', (['cdata'], {}), '(cdata)\n', (1400, 1407), False, 'from sarpy.visualization import remap\n'), ((1461, 1478), 'sarpy.visualization.remap.pedf', 'remap.pedf', (['cdata'], {}), '(cdata)\n', (1471, 1478), False, 'from sarpy.visualization import remap\n'), ((1531, 1547), 'sarpy.visualization.remap.nrl', 'remap.nrl', (['cdata'], {}), '(cdata)\n', (1540, 1547), False, 'from sarpy.visualization import remap\n')] |
from datetime import datetime, timedelta
from typing import List, Dict, Any
from zoneinfo import ZoneInfo
from tests.test_types_generator import create_account, create_audit
from src.compliance.iam_compliance import IamCompliance
from src.data.account import Account
from src.data.findings import Findings
EXPECTED_OLD_KEY_VIOLATION = "key is older than 30 days"
UTC = ZoneInfo("UTC")
def test_no_violations() -> None:
good_keys = [
{
"id": "key1_id",
"user_name": "test_user1",
"created": datetime.now(tz=UTC) - timedelta(days=29),
},
{
"id": "key2_id",
"user_name": "test_user2",
"created": datetime.now(tz=UTC) - timedelta(days=10),
"last_used": datetime.now(tz=UTC) - timedelta(days=2),
},
]
account = create_account()
audit = create_audit([(create_account_report(account, good_keys))])
notifications = IamCompliance().analyse(audit)
assert sorted(notifications, key=lambda x: x.item) == [
Findings(
account=account,
compliance_item_type="iam_access_key",
item="key1_id",
findings=set(),
description="this key is `29 days old` and belongs to `test_user1`",
),
Findings(
account=account,
compliance_item_type="iam_access_key",
item="key2_id",
findings=set(),
description="this key is `10 days old`, belongs to `test_user2` and was last used 2 days ago",
),
]
def test_keys_older_than_30_days() -> None:
keys_account1 = [
{
"id": "key1_id",
"user_name": "test_user1_old",
"created": datetime.now(tz=UTC) - timedelta(days=31),
},
{
"id": "key2_id",
"user_name": "test_user2_old",
"created": datetime.now(tz=UTC) - timedelta(days=100),
"last_used": datetime.now(tz=UTC),
},
]
keys_account2 = [
{
"id": "key3_id",
"user_name": "test_user3_good",
"created": datetime.now(tz=UTC) - timedelta(days=1),
},
{
"id": "key4_id",
"user_name": "test_user4_old",
"created": datetime.now(tz=UTC) - timedelta(days=999),
"last_used": datetime.now(tz=UTC) - timedelta(days=1),
},
]
account1 = create_account()
account2 = create_account()
audit = create_audit(
[(create_account_report(account1, keys_account1)), (create_account_report(account2, keys_account2))]
)
notifications = IamCompliance().analyse(audit)
assert sorted(notifications, key=lambda x: x.item) == [
Findings(
account=account1,
compliance_item_type="iam_access_key",
item="key1_id",
findings={EXPECTED_OLD_KEY_VIOLATION},
description="this key is `31 days old` and belongs to `test_user1_old`",
),
Findings(
account=account1,
compliance_item_type="iam_access_key",
item="key2_id",
findings={EXPECTED_OLD_KEY_VIOLATION},
description="this key is `100 days old`, belongs to `test_user2_old` and was last used today",
),
Findings(
account=account2,
compliance_item_type="iam_access_key",
item="key3_id",
findings=set(),
description="this key is `1 day old` and belongs to `test_user3_good`",
),
Findings(
account=account2,
compliance_item_type="iam_access_key",
item="key4_id",
findings={EXPECTED_OLD_KEY_VIOLATION},
description="this key is `999 days old`, belongs to `test_user4_old` and was last used yesterday",
),
]
def create_account_report(account: Account, access_keys: List[Dict[str, Any]]) -> Dict[str, Any]:
dates_as_strings = []
for access_key in access_keys:
key = {
"id": access_key["id"],
"user_name": access_key["user_name"],
"created": access_key["created"].isoformat(),
}
if "last_used" in access_key:
key["last_used"] = access_key["last_used"].isoformat()
dates_as_strings.append(key)
return {
"account": {
"identifier": account.identifier,
"name": account.name,
},
"description": "audit_iam",
"results": {"iam_access_keys": dates_as_strings},
}
| [
"src.data.findings.Findings",
"src.compliance.iam_compliance.IamCompliance",
"zoneinfo.ZoneInfo",
"datetime.timedelta",
"datetime.datetime.now",
"tests.test_types_generator.create_account"
] | [((372, 387), 'zoneinfo.ZoneInfo', 'ZoneInfo', (['"""UTC"""'], {}), "('UTC')\n", (380, 387), False, 'from zoneinfo import ZoneInfo\n'), ((839, 855), 'tests.test_types_generator.create_account', 'create_account', ([], {}), '()\n', (853, 855), False, 'from tests.test_types_generator import create_account, create_audit\n'), ((2434, 2450), 'tests.test_types_generator.create_account', 'create_account', ([], {}), '()\n', (2448, 2450), False, 'from tests.test_types_generator import create_account, create_audit\n'), ((2466, 2482), 'tests.test_types_generator.create_account', 'create_account', ([], {}), '()\n', (2480, 2482), False, 'from tests.test_types_generator import create_account, create_audit\n'), ((949, 964), 'src.compliance.iam_compliance.IamCompliance', 'IamCompliance', ([], {}), '()\n', (962, 964), False, 'from src.compliance.iam_compliance import IamCompliance\n'), ((1966, 1986), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'UTC'}), '(tz=UTC)\n', (1978, 1986), False, 'from datetime import datetime, timedelta\n'), ((2645, 2660), 'src.compliance.iam_compliance.IamCompliance', 'IamCompliance', ([], {}), '()\n', (2658, 2660), False, 'from src.compliance.iam_compliance import IamCompliance\n'), ((2745, 2948), 'src.data.findings.Findings', 'Findings', ([], {'account': 'account1', 'compliance_item_type': '"""iam_access_key"""', 'item': '"""key1_id"""', 'findings': '{EXPECTED_OLD_KEY_VIOLATION}', 'description': '"""this key is `31 days old` and belongs to `test_user1_old`"""'}), "(account=account1, compliance_item_type='iam_access_key', item=\n 'key1_id', findings={EXPECTED_OLD_KEY_VIOLATION}, description=\n 'this key is `31 days old` and belongs to `test_user1_old`')\n", (2753, 2948), False, 'from src.data.findings import Findings\n'), ((3019, 3249), 'src.data.findings.Findings', 'Findings', ([], {'account': 'account1', 'compliance_item_type': '"""iam_access_key"""', 'item': '"""key2_id"""', 'findings': '{EXPECTED_OLD_KEY_VIOLATION}', 'description': '"""this key is `100 days old`, belongs to `test_user2_old` and was last used today"""'}), "(account=account1, compliance_item_type='iam_access_key', item=\n 'key2_id', findings={EXPECTED_OLD_KEY_VIOLATION}, description=\n 'this key is `100 days old`, belongs to `test_user2_old` and was last used today'\n )\n", (3027, 3249), False, 'from src.data.findings import Findings\n'), ((3565, 3799), 'src.data.findings.Findings', 'Findings', ([], {'account': 'account2', 'compliance_item_type': '"""iam_access_key"""', 'item': '"""key4_id"""', 'findings': '{EXPECTED_OLD_KEY_VIOLATION}', 'description': '"""this key is `999 days old`, belongs to `test_user4_old` and was last used yesterday"""'}), "(account=account2, compliance_item_type='iam_access_key', item=\n 'key4_id', findings={EXPECTED_OLD_KEY_VIOLATION}, description=\n 'this key is `999 days old`, belongs to `test_user4_old` and was last used yesterday'\n )\n", (3573, 3799), False, 'from src.data.findings import Findings\n'), ((543, 563), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'UTC'}), '(tz=UTC)\n', (555, 563), False, 'from datetime import datetime, timedelta\n'), ((566, 584), 'datetime.timedelta', 'timedelta', ([], {'days': '(29)'}), '(days=29)\n', (575, 584), False, 'from datetime import datetime, timedelta\n'), ((698, 718), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'UTC'}), '(tz=UTC)\n', (710, 718), False, 'from datetime import datetime, timedelta\n'), ((721, 739), 'datetime.timedelta', 'timedelta', ([], {'days': '(10)'}), '(days=10)\n', (730, 739), False, 'from datetime import datetime, timedelta\n'), ((766, 786), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'UTC'}), '(tz=UTC)\n', (778, 786), False, 'from datetime import datetime, timedelta\n'), ((789, 806), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (798, 806), False, 'from datetime import datetime, timedelta\n'), ((1738, 1758), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'UTC'}), '(tz=UTC)\n', (1750, 1758), False, 'from datetime import datetime, timedelta\n'), ((1761, 1779), 'datetime.timedelta', 'timedelta', ([], {'days': '(31)'}), '(days=31)\n', (1770, 1779), False, 'from datetime import datetime, timedelta\n'), ((1897, 1917), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'UTC'}), '(tz=UTC)\n', (1909, 1917), False, 'from datetime import datetime, timedelta\n'), ((1920, 1939), 'datetime.timedelta', 'timedelta', ([], {'days': '(100)'}), '(days=100)\n', (1929, 1939), False, 'from datetime import datetime, timedelta\n'), ((2133, 2153), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'UTC'}), '(tz=UTC)\n', (2145, 2153), False, 'from datetime import datetime, timedelta\n'), ((2156, 2173), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2165, 2173), False, 'from datetime import datetime, timedelta\n'), ((2291, 2311), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'UTC'}), '(tz=UTC)\n', (2303, 2311), False, 'from datetime import datetime, timedelta\n'), ((2314, 2333), 'datetime.timedelta', 'timedelta', ([], {'days': '(999)'}), '(days=999)\n', (2323, 2333), False, 'from datetime import datetime, timedelta\n'), ((2360, 2380), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'UTC'}), '(tz=UTC)\n', (2372, 2380), False, 'from datetime import datetime, timedelta\n'), ((2383, 2400), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2392, 2400), False, 'from datetime import datetime, timedelta\n')] |
import os
from natsort import natsorted
path_to_directory = input("Enter path to directory: ") + "/"
new_name = input("Enter new name for files: ")
try:
i = 0
list_of_files = natsorted(os.listdir(path_to_directory))
for file in list_of_files:
i += 1
extension = file.split(".")[1]
os.rename(
path_to_directory + file,
path_to_directory + new_name + str(i) + "." + extension,
)
except FileNotFoundError:
print("Got unccorect directory path")
| [
"os.listdir"
] | [((195, 224), 'os.listdir', 'os.listdir', (['path_to_directory'], {}), '(path_to_directory)\n', (205, 224), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/rcv_page.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 300)
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(290, 260, 98, 27))
self.pushButton.setObjectName("pushButton")
self.qr_label = QtWidgets.QLabel(Dialog)
self.qr_label.setGeometry(QtCore.QRect(55, 70, 311, 161))
self.qr_label.setAlignment(QtCore.Qt.AlignCenter)
self.qr_label.setObjectName("qr_label")
self.address_label = QtWidgets.QLabel(Dialog)
self.address_label.setGeometry(QtCore.QRect(20, 10, 371, 17))
self.address_label.setObjectName("address_label")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.pushButton.setText(_translate("Dialog", "Close"))
self.qr_label.setText(_translate("Dialog", "QR CODE"))
self.address_label.setText(_translate("Dialog", "Address"))
| [
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtCore.QRect"
] | [((394, 423), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['Dialog'], {}), '(Dialog)\n', (415, 423), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((568, 592), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['Dialog'], {}), '(Dialog)\n', (584, 592), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((794, 818), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['Dialog'], {}), '(Dialog)\n', (810, 818), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((991, 1036), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Dialog'], {}), '(Dialog)\n', (1028, 1036), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((460, 490), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(290)', '(260)', '(98)', '(27)'], {}), '(290, 260, 98, 27)\n', (472, 490), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((627, 657), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(55)', '(70)', '(311)', '(161)'], {}), '(55, 70, 311, 161)\n', (639, 657), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((858, 887), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(10)', '(371)', '(17)'], {}), '(20, 10, 371, 17)\n', (870, 887), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class Woeids(models.Model):
country = models.CharField(max_length=200, null=True, blank=True)
name = models.CharField(max_length=200, null=True, blank=True)
woeid = models.IntegerField(default=0, null=True, blank=True)
def __str__(self):
return '%s %s %s' % (
self.country,
self.name,
str(self.woeid)
)
| [
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((151, 206), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'null': '(True)', 'blank': '(True)'}), '(max_length=200, null=True, blank=True)\n', (167, 206), False, 'from django.db import models\n'), ((218, 273), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'null': '(True)', 'blank': '(True)'}), '(max_length=200, null=True, blank=True)\n', (234, 273), False, 'from django.db import models\n'), ((286, 339), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'blank': '(True)'}), '(default=0, null=True, blank=True)\n', (305, 339), False, 'from django.db import models\n')] |
# Copyright INRIM (https://www.inrim.eu)
# See LICENSE file for full licensing details.
import copy
from copy import deepcopy
from formiodata.builder import Builder
from formiodata.form import Form
import collections
from . import custom_components
import logging
import uuid
logger = logging.getLogger(__name__)
class CustomBuilder(Builder):
def __init__(self, schema_json, **kwargs):
self.tmpe = kwargs.get('template_engine', False)
self.model = kwargs.get('model')
self.action_url = kwargs.get('action_url', "")
self.disabled = kwargs.get('disabled', False)
self.authtoken = kwargs.get('authtoken')
self.theme_cfg = kwargs.get('theme_cfg', "")
self.editable_fields = kwargs.get('editable_fields', [])
# self.components_base_path = kwargs.get('components_base_path', False)
self.settings = kwargs.get('settings', {}).copy()
self.context_data = kwargs.get('context', {})
self.security_headers = kwargs.get('security_headers', {}).copy()
self.table_colums = collections.OrderedDict({
'list_order': 'O',
'check': "Chk",
'owner_name': 'Operatore',
})
self.rec_name = kwargs.get('rec_name', "")
self.is_mobile = kwargs.get("is_mobile", False)
self.tables = []
self.filters = []
self.default_fields = kwargs.get('default_fields', []).copy()
self.components_ext_data_src = []
self.html_components = []
self.form_data = kwargs.get('form_data', {})
self.form_data_values = {}
self.search_areas = []
self.uploaders = []
self.uploaders_keys = []
self.components_logic = []
self.filter_keys = []
self.components_change_ext_data_src = []
self.new_record = False
if self.form_data.get("rec_name", "") == "":
self.new_record = True
# logger.info(f"builder with security {self.security_headers}")
super(CustomBuilder, self).__init__(schema_json, **kwargs)
def load_components(self):
self._raw_components = self.schema.get('components')
self.raw_components = deepcopy(self.schema.get('components'))
# schema_type = self.schema.get('type')
self.main = self.get_component_object(self.schema)
self.main.eval_components()
self.set_model_field()
def load_data(self, data):
self.form_data = copy.deepcopy(data)
self.main.form_data = copy.deepcopy(data)
if "data_value" not in self.main.form_data:
self.main.form_data['data_value'] = {}
self.main.load_data()
self.context_data['form'] = self.main.form_data.copy()
def compute_data(self):
self.main.compute_data()
if self.new_record:
self.main.form_data['data_value'] = {}
def set_model_field(self):
component = {}
component['type'] = 'textfield'
component['key'] = 'data_model'
component['label'] = 'Data Model'
component['hidden'] = True
component['defaultValue'] = self.model
model_c = self.get_component_object(component.copy())
model_c.id = component.get('id', str(uuid.uuid4()))
model_c.parent = self.main
# self.form_components[component.get('key')] = model_c
self.components[component.get('key')] = model_c
self.main.component_items.append(model_c)
if self.action_url:
component = {}
component['type'] = 'textfield'
component['key'] = 'action_url'
component['label'] = 'Action Url'
component['hidden'] = True
component['defaultValue'] = self.action_url
model_c = self.get_component_object(component.copy())
model_c.id = component.get('id', str(uuid.uuid4()))
model_c.parent = self.main
# self.form_components[component.get('key')] = model_c
self.components[component.get('key')] = model_c
self.main.component_items.append(model_c)
if not self.get_component_by_key("rec_name"):
component = {}
component['type'] = 'textfield'
component['key'] = 'rec_name'
component['label'] = 'Name'
component['hidden'] = True
component['defaultValue'] = ""
rec_name_c = self.get_component_object(component.copy())
rec_name_c.id = component.get('id', str(uuid.uuid4()))
rec_name_c.parent = self.main
# self.form_components[component.get('key')] = model_c
self.components[component.get('key')] = rec_name_c
self.main.component_items.append(rec_name_c)
def get_component_object(self, component):
"""
@param component
"""
component_type = component.get('type')
# if not component_type == "components":
try:
cls_name = '%sComponent' % component_type
cls = getattr(custom_components, cls_name)
return cls(
component, self, language=self.language,
i18n=self.i18n, resources=self.resources,
resources_ext=self.resources_ext
)
except AttributeError as e:
# TODO try to find/load first from self._component_cls else
# re-raise exception or silence (log error and return False)
# logger.warnin(component)
# logger.warning(e, exc_info=True)
return custom_components.CustomComponent(component, self)
# else:
# return False
def _get_component_by_key(self, node, key):
if node.key == key:
return node
if node.component_items:
for sub_node in node.component_items:
comp = self._compute_form_data(sub_node, key)
if comp:
return comp
def get_component_by_key(self, key):
return self.components.get(key)
def compute_components_data(self, data):
self.context_data['form'] = self.main.form_data.copy()
self.main.form_data['data_value'] = {}
def compute_form_data_table(self):
data_v = self.main.compute_data_table(self.main.form_data)
for node in self.main.component_items:
data_v = self._compute_form_data_table(node, data_v)
# clean metadata
to_pop = []
for k, v in data_v.items():
if any(x in k for x in ["data_value", "_surveyRow_", "_dataGridRow_"]):
to_pop.append(k)
for x in to_pop:
data_v.pop(x)
self.main.form_data['data_value'] = data_v.copy()
def _compute_form_data_table(self, node, form_data):
# TODO dataGrid
data = node.compute_data_table(form_data)
if node.component_items:
for sub_node in node.component_items:
data = self._compute_form_data_table(sub_node, data)
return data
def clean_record_for_table_value(self, data):
return self.clean_record(data)
def clean_record(self, data):
res = {}
for k, v in data.items():
if k not in self.default_fields:
res[k] = v
return res.copy()
class CustomForm(Form):
def load_components(self):
for component in self.builder.main.component_items:
self._load_components(component)
def _load_components(self, component):
component.value = self.form.get(component.key, component.defaultValue)
if (
not component.survey and
not component.multi_row and
not component.tabs and
component.component_items
):
for sub_component in component.component_items:
self._load_components(sub_component)
if component.survey:
component.eval_components()
if component.tabs:
component.form = self.form.copy()
component.eval_components()
COMPONENT_UI_TEMPLATE = {
"title": "",
"key": "",
"icon": "plus-square-o",
"schema": {}
}
class FormioBuilderFields:
def __init__(self, schema_components):
self.parent_model_components = {}
self.schema_components = schema_components
def load_components(self):
self._load_components(self.schema_components)
def _load_components(self, components):
for component in components:
if component.get('key') and not component.get('type') in ["button"]:
self.parent_model_components[component["key"]] = COMPONENT_UI_TEMPLATE.copy()
self.parent_model_components[component["key"]]['title'] = component['label']
self.parent_model_components[component["key"]]['key'] = component["key"]
self.parent_model_components[component["key"]]['schema'] = component.copy()
if component.get('type') == "columns":
self.parent_model_components[component["key"]]['icon'] = "columns"
if component.get('type') == "panel": #
self.parent_model_components[component["key"]]['icon'] = "list-alt"
if component.get('type') == "tabs": #
self.parent_model_components[component["key"]]['icon'] = "folder-o"
# if component.get('components'):
# self._load_components(component.get('components'))
#
# # (Layout) nested components (e.g. columns, panels)
# for k, vals in component.copy().items():
# if isinstance(vals, list):
# for v in vals:
# if 'components' in v:
# self._load_components(v.get('components'))
# elif isinstance(v, list):
# for sub_v in v:
# if 'components' in sub_v:
# self._load_components(sub_v.get('components'))
| [
"logging.getLogger",
"collections.OrderedDict",
"uuid.uuid4",
"copy.deepcopy"
] | [((287, 314), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (304, 314), False, 'import logging\n'), ((1063, 1154), 'collections.OrderedDict', 'collections.OrderedDict', (["{'list_order': 'O', 'check': 'Chk', 'owner_name': 'Operatore'}"], {}), "({'list_order': 'O', 'check': 'Chk', 'owner_name':\n 'Operatore'})\n", (1086, 1154), False, 'import collections\n'), ((2449, 2468), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (2462, 2468), False, 'import copy\n'), ((2499, 2518), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (2512, 2518), False, 'import copy\n'), ((3223, 3235), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3233, 3235), False, 'import uuid\n'), ((3841, 3853), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3851, 3853), False, 'import uuid\n'), ((4474, 4486), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4484, 4486), False, 'import uuid\n')] |
from multiprocessing import Pool
def _calc_epa_func(thermostat):
""" Takes an individual thermostat and runs the
calculate_epa_field_savings_metrics method. This method is necessary for
the multiprocessing pool as map / imap need a function to run on.
Parameters
----------
thermostat : thermostat
Returns
-------
results : results from running calculate_epa_field_savings_metrics
"""
results = thermostat.calculate_epa_field_savings_metrics()
return results
def multiple_thermostat_calculate_epa_field_savings_metrics(thermostats):
""" Takes a list of thermostats and uses Python's Multiprocessing module to
run as many processes in parallel as the system will allow.
Parameters
----------
thermostats : thermostats iterator
A list of the thermostats run the calculate_epa_field_savings_metrics
upon.
Returns
-------
metrics : list
Returns a list of the metrics calculated for the thermostats
"""
# Convert the thermostats iterator to a list
thermostats_list = list(thermostats)
pool = Pool()
results = pool.imap(_calc_epa_func, thermostats_list)
pool.close()
pool.join()
metrics_dict = {}
for output in results:
thermostat_id = output[0]['ct_identifier']
metrics_dict[thermostat_id] = []
for individual_output in output:
metrics_dict[thermostat_id].append(individual_output)
# Get the order of the thermostats from the original input so the output
# matches the order that was sent in
thermostat_ids = \
[thermostat.thermostat_id for thermostat in thermostats_list]
metrics = []
for thermostat_id in thermostat_ids:
try:
for metric in metrics_dict[thermostat_id]:
metrics.append(metric)
# Prevent duplicate thermostat IDs from being double-counted
metrics_dict.pop(thermostat_id, None)
# Trap for missing keys
except KeyError:
pass
return metrics
| [
"multiprocessing.Pool"
] | [((1116, 1122), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (1120, 1122), False, 'from multiprocessing import Pool\n')] |
import mirtop.libs.logger as mylog
logger = mylog.getLogger(__name__)
def create(reads, database, sample, fn, header):
"""Read https://github.com/miRTop/mirtop/issues/9"""
seen = set()
lines = []
seen_ann = {}
# print >>out_handle, "seq\tname\tfreq\tchrom\tstart\tend\tmism\tadd\tt5\tt3\ts5\ts3\tDB\tprecursor\tambiguity\tName"
out_handle = open(fn, 'w')
print >>out_handle, header
filter_precursor = 0
filter_score = 0
for r, read in reads.iteritems():
hits = set()
[hits.add(mature.mirna) for mature in read.precursors.values() if mature.mirna]
hits = len(hits)
for p, iso in read.precursors.iteritems():
if len(iso.subs) > 3 or not iso.mirna:
continue
filter_precursor += 1
if (r, iso.mirna) not in seen:
seen.add((r, iso.mirna))
chrom = iso.mirna
if not chrom:
chrom = p
# count = _get_freq(r)
seq = reads[r].sequence
if iso.get_score(len(seq)) < 1:
continue
filter_score += 1
if iso.subs:
iso.subs = [] if "N" in iso.subs[0] else iso.subs
annotation = "%s.%s" % (chrom, iso.format_id(sep="."))
idseq = reads[r].idseq
source = "ref_miRNA" if not iso.is_iso() else "isomiR"
strand = iso.strand
start, end = iso.start, iso.end
score = iso.map_score
filter = iso.filter
mirName = iso.mirna
preName = p
Variant = iso.formatGFF()
Cigar = iso.cigar
counts = read.counts
Filter = iso.filter
attrb = ("Read {r}; UID {idseq}; Name {mirName}; Parent {preName}; Variant {Variant}; Cigar {Cigar}; Expression {counts}; Filter {Filter};").format(**locals())
res = ("{chrom}\t{database}\t{source}\t{start}\t{end}\t{score}\t{strand}\t.\t{attrb}").format(**locals())
if annotation in seen_ann and seq.find("N") < 0 and seen_ann[annotation].split("\t")[0].find("N") < 0:
logger.warning("Same isomir %s from different sequence: \n%s and \n%s" % (annotation, res, seen_ann[annotation]))
seen_ann[annotation] = res
lines.append([annotation, chrom, counts, sample])
logger.debug("GFF::%s" % res)
# lines_pre.append([annotation, chrom, p, count, sample, hits])
print >>out_handle, res
out_handle.close()
logger.info("GFF lines: %s" % len(lines))
logger.info("Filter by being outside mirna size: %s" % filter_precursor)
logger.info("Filter by being low score: %s" % filter_score)
return lines
def _merge(lines):
if lines:
dt = pd.DataFrame(lines)
dt.columns = ["isomir", "chrom", "counts", "sample", "hits"]
dt = dt[dt['hits']>0]
dt = dt.loc[:, "isomir":"sample"]
dt = dt.groupby(['isomir', 'chrom', 'sample'], as_index=False).sum()
dt.to_csv(out_file + "_summary")
dt_pre = pd.DataFrame(lines_pre)
dt_pre.columns = ["isomir", "mature", "chrom", "counts", "sample", "hits"]
dt_pre = dt_pre[dt_pre['hits']==1]
dt_pre = dt_pre.loc[:, "isomir":"sample"]
dt_pre = dt_pre.groupby(['isomir', 'chrom', 'mature', 'sample'], as_index=False).sum()
return out_file, dt, dt_pre
| [
"mirtop.libs.logger.getLogger"
] | [((44, 69), 'mirtop.libs.logger.getLogger', 'mylog.getLogger', (['__name__'], {}), '(__name__)\n', (59, 69), True, 'import mirtop.libs.logger as mylog\n')] |
# Generated by Django 3.0.4 on 2020-03-18 15:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('imdb', '0003_remove_cast_is_debut_movie'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='director',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='imdb.Director'),
),
]
| [
"django.db.models.ForeignKey"
] | [((375, 472), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""imdb.Director"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='imdb.Director')\n", (392, 472), False, 'from django.db import migrations, models\n')] |
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_daq as daq
import datetime as dt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier
# Estimators
from joblib import load
price_estimator = load('models/price_model.joblib')
cxl_estimator = load('models/cancellation_model.joblib')
# Imports from this application
from app import app
# 2 column layout. 1st column width = 4/12
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Reservations
Make a reservation. Then see an estimated price and the likelihood you will cancel. Change
your selections to see how that will impact the estimates.
"""
),
dcc.DatePickerSingle(
id='arrival_date',
min_date_allowed=dt.date.today(),
max_date_allowed=dt.date(2022, 12, 31),
initial_visible_month=dt.date.today(),
date=dt.date.today()
),
dcc.Markdown('Choose arrival date'),
dcc.Dropdown(
id='hotel',
options=[
{'label': 'City', 'value': 1},
{'label': 'Resort', 'value': 2}
],
value=1
),
dcc.Markdown('Choose which location'),
daq.NumericInput(
id='num_nights',
min=1,
max=30,
value=1
),
dcc.Markdown('Choose number of nights'),
daq.NumericInput(
id='num_adults',
min=1,
max=4,
value=2
),
dcc.Markdown('How many adults?'),
dcc.Dropdown(
id='meal_plan',
options=[
{'label': 'None', 'value': 0},
{'label': 'Breakfast', 'value': 1},
{'label': 'Breakfast and dinner', 'value': 2},
{'label': 'Full meal plan','value': 3 }
],
value=3
),
dcc.Markdown('Choose a meal plan'),
daq.NumericInput(
id='num_cars',
min=0,
max=4,
value=2
),
dcc.Markdown('Available parking spaces'),
daq.NumericInput(
id='num_sr',
min=0,
max=5,
value=2
),
dcc.Markdown('How many special requests will you have?'),
dcc.RadioItems(
id='prev_stay',
options=[
{'label': 'Yes', 'value': 1},
{'label': 'No', 'value': 0}
],
value=1,
labelStyle={'display': 'inline-block', 'padding': '5px'}
),
dcc.Markdown('Have you stayed with us before?'),
daq.NumericInput(
id='num_prev_cxl',
min=0,
max=10,
value=0
),
dcc.Markdown('Be honest, how many times you have cancelled reservations before?'),
dcc.RadioItems(
id='deposit_type',
options=[
{'label': 'None', 'value': 1},
{'label': 'Refundable', 'value': 2},
{'label': 'Non-refundable', 'value': 3}
],
value=1,
labelStyle={'display': 'inline-block', 'padding': '5px'}
),
dcc.Markdown('Will you make a deposit?')
#html.Button('Book it!', id='submit-reservation', n_clicks=0)
],
md=4,
)
column2 = dbc.Col(
[
html.Div(id='price-output'),
html.Div(id='cxl-output')
]
)
@app.callback(
[Output('price-output', 'children'), Output('cxl-output', 'children')],
[Input('arrival_date', 'date'),
Input('num_adults', 'value'),
Input('num_nights', 'value'),
Input('meal_plan', 'value'),
Input('hotel', 'value'),
Input('num_cars', 'value'),
Input('num_sr', 'value'),
Input('prev_stay', 'value'),
Input('num_prev_cxl', 'value'),
Input('deposit_type', 'value')],
)
def predict(arrival_date, num_adults, num_nights, meal_plan, hotel, num_cars,
num_sr, prev_stay, num_prev_cxl, deposit_type):
# Get the week of the year and lead time from the arrival date
arrival = dt.datetime.strptime(arrival_date, '%Y-%m-%d')
week = arrival.isocalendar()[1]
lead_time = (arrival.date() - dt.date.today()).days
# Features used in the model not exposed in this app, but will be hard coded
market_seg = 1
room_type_changed = False
customer_type = 1
days_in_waiting_list = 0
booking_changes = 0
# Input for the price model. Already encoded.
# ['arrival_date_week_number', 'hotel', 'adults', 'nights_stay']
input1 = [week, hotel, num_adults, num_nights]
# Get estimated price of this stay
adr = round(price_estimator.predict([input1])[0] + meal_plan * 10, 2)
total = round(adr * num_nights, 2)
# Generate the output
output1 = [html.P(f'Estimated nightly rate {adr} Euros', style={'text-align': 'center',
'margin-top': '40px'}),
html.P(f'Total cost {total} Euros', style={'text-align': 'center'})]
# Input for the cancellation model. Rolls in the ADR from previous step
input2 = [hotel, lead_time, week, num_adults, meal_plan, market_seg, prev_stay,
num_prev_cxl, booking_changes, deposit_type, days_in_waiting_list,
customer_type, adr, num_cars, num_sr, num_nights, room_type_changed]
# Predict probabilities of staying, and cancelling
probabilities = cxl_estimator.predict_proba([input2])
# Generate the output as guage
output2 = daq.Gauge(
showCurrentValue=True,
units="percentage points",
value= probabilities[0][1] * 100,
label=f'{round(probabilities[0][1] * 100, 2)} % probability of cancellation',
size=280,
labelPosition='bottom',
max=100,
min=0,
)
return output1, output2
layout = dbc.Row([column1, column2]) | [
"dash_bootstrap_components.Row",
"dash_core_components.RadioItems",
"datetime.datetime.strptime",
"dash_daq.NumericInput",
"dash.dependencies.Output",
"dash.dependencies.Input",
"dash_core_components.Dropdown",
"dash_core_components.Markdown",
"datetime.date",
"joblib.load",
"dash_html_component... | [((403, 436), 'joblib.load', 'load', (['"""models/price_model.joblib"""'], {}), "('models/price_model.joblib')\n", (407, 436), False, 'from joblib import load\n'), ((453, 493), 'joblib.load', 'load', (['"""models/cancellation_model.joblib"""'], {}), "('models/cancellation_model.joblib')\n", (457, 493), False, 'from joblib import load\n'), ((5902, 5929), 'dash_bootstrap_components.Row', 'dbc.Row', (['[column1, column2]'], {}), '([column1, column2])\n', (5909, 5929), True, 'import dash_bootstrap_components as dbc\n'), ((4096, 4142), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['arrival_date', '"""%Y-%m-%d"""'], {}), "(arrival_date, '%Y-%m-%d')\n", (4116, 4142), True, 'import datetime as dt\n'), ((702, 958), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""\n \n ## Reservations\n\n Make a reservation. Then see an estimated price and the likelihood you will cancel. Change\n your selections to see how that will impact the estimates.\n\n """'], {}), '(\n """\n \n ## Reservations\n\n Make a reservation. Then see an estimated price and the likelihood you will cancel. Change\n your selections to see how that will impact the estimates.\n\n """\n )\n', (714, 958), True, 'import dash_core_components as dcc\n'), ((1203, 1238), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""Choose arrival date"""'], {}), "('Choose arrival date')\n", (1215, 1238), True, 'import dash_core_components as dcc\n'), ((1245, 1356), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""hotel"""', 'options': "[{'label': 'City', 'value': 1}, {'label': 'Resort', 'value': 2}]", 'value': '(1)'}), "(id='hotel', options=[{'label': 'City', 'value': 1}, {'label':\n 'Resort', 'value': 2}], value=1)\n", (1257, 1356), True, 'import dash_core_components as dcc\n'), ((1422, 1459), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""Choose which location"""'], {}), "('Choose which location')\n", (1434, 1459), True, 'import dash_core_components as dcc\n'), ((1466, 1523), 'dash_daq.NumericInput', 'daq.NumericInput', ([], {'id': '"""num_nights"""', 'min': '(1)', 'max': '(30)', 'value': '(1)'}), "(id='num_nights', min=1, max=30, value=1)\n", (1482, 1523), True, 'import dash_daq as daq\n'), ((1569, 1608), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""Choose number of nights"""'], {}), "('Choose number of nights')\n", (1581, 1608), True, 'import dash_core_components as dcc\n'), ((1615, 1671), 'dash_daq.NumericInput', 'daq.NumericInput', ([], {'id': '"""num_adults"""', 'min': '(1)', 'max': '(4)', 'value': '(2)'}), "(id='num_adults', min=1, max=4, value=2)\n", (1631, 1671), True, 'import dash_daq as daq\n'), ((1715, 1747), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""How many adults?"""'], {}), "('How many adults?')\n", (1727, 1747), True, 'import dash_core_components as dcc\n'), ((1754, 1965), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""meal_plan"""', 'options': "[{'label': 'None', 'value': 0}, {'label': 'Breakfast', 'value': 1}, {\n 'label': 'Breakfast and dinner', 'value': 2}, {'label':\n 'Full meal plan', 'value': 3}]", 'value': '(3)'}), "(id='meal_plan', options=[{'label': 'None', 'value': 0}, {\n 'label': 'Breakfast', 'value': 1}, {'label': 'Breakfast and dinner',\n 'value': 2}, {'label': 'Full meal plan', 'value': 3}], value=3)\n", (1766, 1965), True, 'import dash_core_components as dcc\n'), ((2050, 2084), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""Choose a meal plan"""'], {}), "('Choose a meal plan')\n", (2062, 2084), True, 'import dash_core_components as dcc\n'), ((2091, 2145), 'dash_daq.NumericInput', 'daq.NumericInput', ([], {'id': '"""num_cars"""', 'min': '(0)', 'max': '(4)', 'value': '(2)'}), "(id='num_cars', min=0, max=4, value=2)\n", (2107, 2145), True, 'import dash_daq as daq\n'), ((2189, 2229), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""Available parking spaces"""'], {}), "('Available parking spaces')\n", (2201, 2229), True, 'import dash_core_components as dcc\n'), ((2236, 2288), 'dash_daq.NumericInput', 'daq.NumericInput', ([], {'id': '"""num_sr"""', 'min': '(0)', 'max': '(5)', 'value': '(2)'}), "(id='num_sr', min=0, max=5, value=2)\n", (2252, 2288), True, 'import dash_daq as daq\n'), ((2332, 2388), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""How many special requests will you have?"""'], {}), "('How many special requests will you have?')\n", (2344, 2388), True, 'import dash_core_components as dcc\n'), ((2395, 2570), 'dash_core_components.RadioItems', 'dcc.RadioItems', ([], {'id': '"""prev_stay"""', 'options': "[{'label': 'Yes', 'value': 1}, {'label': 'No', 'value': 0}]", 'value': '(1)', 'labelStyle': "{'display': 'inline-block', 'padding': '5px'}"}), "(id='prev_stay', options=[{'label': 'Yes', 'value': 1}, {\n 'label': 'No', 'value': 0}], value=1, labelStyle={'display':\n 'inline-block', 'padding': '5px'})\n", (2409, 2570), True, 'import dash_core_components as dcc\n'), ((2639, 2686), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""Have you stayed with us before?"""'], {}), "('Have you stayed with us before?')\n", (2651, 2686), True, 'import dash_core_components as dcc\n'), ((2693, 2752), 'dash_daq.NumericInput', 'daq.NumericInput', ([], {'id': '"""num_prev_cxl"""', 'min': '(0)', 'max': '(10)', 'value': '(0)'}), "(id='num_prev_cxl', min=0, max=10, value=0)\n", (2709, 2752), True, 'import dash_daq as daq\n'), ((2798, 2884), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""Be honest, how many times you have cancelled reservations before?"""'], {}), "(\n 'Be honest, how many times you have cancelled reservations before?')\n", (2810, 2884), True, 'import dash_core_components as dcc\n'), ((2886, 3114), 'dash_core_components.RadioItems', 'dcc.RadioItems', ([], {'id': '"""deposit_type"""', 'options': "[{'label': 'None', 'value': 1}, {'label': 'Refundable', 'value': 2}, {\n 'label': 'Non-refundable', 'value': 3}]", 'value': '(1)', 'labelStyle': "{'display': 'inline-block', 'padding': '5px'}"}), "(id='deposit_type', options=[{'label': 'None', 'value': 1}, {\n 'label': 'Refundable', 'value': 2}, {'label': 'Non-refundable', 'value':\n 3}], value=1, labelStyle={'display': 'inline-block', 'padding': '5px'})\n", (2900, 3114), True, 'import dash_core_components as dcc\n'), ((3195, 3235), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""Will you make a deposit?"""'], {}), "('Will you make a deposit?')\n", (3207, 3235), True, 'import dash_core_components as dcc\n'), ((3366, 3393), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""price-output"""'}), "(id='price-output')\n", (3374, 3393), True, 'import dash_html_components as html\n'), ((3404, 3429), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""cxl-output"""'}), "(id='cxl-output')\n", (3412, 3429), True, 'import dash_html_components as html\n'), ((4808, 4911), 'dash_html_components.P', 'html.P', (['f"""Estimated nightly rate {adr} Euros"""'], {'style': "{'text-align': 'center', 'margin-top': '40px'}"}), "(f'Estimated nightly rate {adr} Euros', style={'text-align': 'center',\n 'margin-top': '40px'})\n", (4814, 4911), True, 'import dash_html_components as html\n'), ((4992, 5059), 'dash_html_components.P', 'html.P', (['f"""Total cost {total} Euros"""'], {'style': "{'text-align': 'center'}"}), "(f'Total cost {total} Euros', style={'text-align': 'center'})\n", (4998, 5059), True, 'import dash_html_components as html\n'), ((3459, 3493), 'dash.dependencies.Output', 'Output', (['"""price-output"""', '"""children"""'], {}), "('price-output', 'children')\n", (3465, 3493), False, 'from dash.dependencies import Input, Output\n'), ((3495, 3527), 'dash.dependencies.Output', 'Output', (['"""cxl-output"""', '"""children"""'], {}), "('cxl-output', 'children')\n", (3501, 3527), False, 'from dash.dependencies import Input, Output\n'), ((3535, 3564), 'dash.dependencies.Input', 'Input', (['"""arrival_date"""', '"""date"""'], {}), "('arrival_date', 'date')\n", (3540, 3564), False, 'from dash.dependencies import Input, Output\n'), ((3572, 3600), 'dash.dependencies.Input', 'Input', (['"""num_adults"""', '"""value"""'], {}), "('num_adults', 'value')\n", (3577, 3600), False, 'from dash.dependencies import Input, Output\n'), ((3607, 3635), 'dash.dependencies.Input', 'Input', (['"""num_nights"""', '"""value"""'], {}), "('num_nights', 'value')\n", (3612, 3635), False, 'from dash.dependencies import Input, Output\n'), ((3642, 3669), 'dash.dependencies.Input', 'Input', (['"""meal_plan"""', '"""value"""'], {}), "('meal_plan', 'value')\n", (3647, 3669), False, 'from dash.dependencies import Input, Output\n'), ((3676, 3699), 'dash.dependencies.Input', 'Input', (['"""hotel"""', '"""value"""'], {}), "('hotel', 'value')\n", (3681, 3699), False, 'from dash.dependencies import Input, Output\n'), ((3706, 3732), 'dash.dependencies.Input', 'Input', (['"""num_cars"""', '"""value"""'], {}), "('num_cars', 'value')\n", (3711, 3732), False, 'from dash.dependencies import Input, Output\n'), ((3739, 3763), 'dash.dependencies.Input', 'Input', (['"""num_sr"""', '"""value"""'], {}), "('num_sr', 'value')\n", (3744, 3763), False, 'from dash.dependencies import Input, Output\n'), ((3770, 3797), 'dash.dependencies.Input', 'Input', (['"""prev_stay"""', '"""value"""'], {}), "('prev_stay', 'value')\n", (3775, 3797), False, 'from dash.dependencies import Input, Output\n'), ((3804, 3834), 'dash.dependencies.Input', 'Input', (['"""num_prev_cxl"""', '"""value"""'], {}), "('num_prev_cxl', 'value')\n", (3809, 3834), False, 'from dash.dependencies import Input, Output\n'), ((3841, 3871), 'dash.dependencies.Input', 'Input', (['"""deposit_type"""', '"""value"""'], {}), "('deposit_type', 'value')\n", (3846, 3871), False, 'from dash.dependencies import Input, Output\n'), ((4213, 4228), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (4226, 4228), True, 'import datetime as dt\n'), ((1051, 1066), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (1064, 1066), True, 'import datetime as dt\n'), ((1093, 1114), 'datetime.date', 'dt.date', (['(2022)', '(12)', '(31)'], {}), '(2022, 12, 31)\n', (1100, 1114), True, 'import datetime as dt\n'), ((1146, 1161), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (1159, 1161), True, 'import datetime as dt\n'), ((1176, 1191), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (1189, 1191), True, 'import datetime as dt\n')] |
from django.core.management.base import BaseCommand
from wagtail.core.models import Page
from events.models import EventIndexPage, Event
from home.models import HomePage
from pathlib import Path
import json
import os
import sys
json_directory_path = Path("legacy_site_data/old_site_data_events.json")
with open(json_directory_path) as f:
event_json = json.load(f)
class Command(BaseCommand):
help = 'Seeds the pages'
def handle(self, *args, **kwargs):
try:
print('getting home')
home_page = HomePage.objects.all()[0]
home_page.title = "CIPUG home page"
home_page.body = "Hello from the Channel Island Python User Group"
home_page.seo_title = "CIPUG home page"
home_page.save()
self.stdout.write(self.style.SUCCESS('Home page updated!'))
except IndexError:
self.stdout.write(self.style.ERROR('Home page needs to be created in wagtail admin first'))
try:
event_index_page = EventIndexPage(
title = "Events",
intro = "All the CIPUG events",
slug = "jersey-events",
)
home_page.add_child(instance=event_index_page)
self.stdout.write(self.style.SUCCESS('Event index page created!'))
number_of_events = 8
for n in range(number_of_events):
event = Event(
slug = str(n+1),
title = 'Meet up no. {}'.format(str(n+1)),
date = event_json["date"][str(n)],
place = event_json["place"][str(n)],
agenda = event_json["agenda"][str(n)],
notes = event_json["notes"][str(n)],
resources = event_json["resources"][str(n)],
attended = event_json["attended"][str(n)],
)
event_index_page.add_child(instance=event)
self.stdout.write(self.style.SUCCESS('Event detail page no. {} created!'.format(str(n))))
self.stdout.write(self.style.SUCCESS('Woop woop!'))
except:
self.stdout.write(self.style.ERROR(f'An error occured: \n {sys.exc_info()[0]}\n{sys.exc_info()[1]}'))
| [
"home.models.HomePage.objects.all",
"events.models.EventIndexPage",
"pathlib.Path",
"sys.exc_info",
"json.load"
] | [((252, 302), 'pathlib.Path', 'Path', (['"""legacy_site_data/old_site_data_events.json"""'], {}), "('legacy_site_data/old_site_data_events.json')\n", (256, 302), False, 'from pathlib import Path\n'), ((358, 370), 'json.load', 'json.load', (['f'], {}), '(f)\n', (367, 370), False, 'import json\n'), ((1036, 1123), 'events.models.EventIndexPage', 'EventIndexPage', ([], {'title': '"""Events"""', 'intro': '"""All the CIPUG events"""', 'slug': '"""jersey-events"""'}), "(title='Events', intro='All the CIPUG events', slug=\n 'jersey-events')\n", (1050, 1123), False, 'from events.models import EventIndexPage, Event\n'), ((540, 562), 'home.models.HomePage.objects.all', 'HomePage.objects.all', ([], {}), '()\n', (560, 562), False, 'from home.models import HomePage\n'), ((2245, 2259), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2257, 2259), False, 'import sys\n'), ((2266, 2280), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2278, 2280), False, 'import sys\n')] |
import json
import urllib.request
import pprint
import webbrowser
URL = "https://mars.jpl.nasa.gov/msl-raw-images/image/images_sol2320.json"
jsonFILE = json.loads(urllib.request.urlopen(URL).read())
#pprint.pprint(jsonFILE)
camera = jsonFILE['images'][0]['cameraModelType']
sol = jsonFILE['images'][0]['sol']
link = jsonFILE['images'][0]['urlList']
print('Camera type: {0}, Sol #{1}'.format(camera, sol))
webbrowser.open(link)
| [
"webbrowser.open"
] | [((409, 430), 'webbrowser.open', 'webbrowser.open', (['link'], {}), '(link)\n', (424, 430), False, 'import webbrowser\n')] |