code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
#from utils import ordinal
import re
from urllib.parse import quote as uriquote
import asyncio
from bs4 import BeautifulSoup
import collections
from utils.context import MoreContext
from utils.context import Location
from utils.paginator import Paginator
from utils.units import units
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n//10%10!=1)*(n%10<4)*n%10::4])
tagregex = re.compile(r'<.*?>')
def remove_html_tags(data):
#removes all html tags from a given string
return tagregex.sub('', data)
async def google_for_urls(bot, search_term, *, url_regex=None, return_full_data=False):
url = 'https://www.googleapis.com/customsearch/v1?key={}&cx={}&q={}'
url = url.format(bot.config.gsearchapi,
bot.config.gsearchcx, uriquote(search_term))
async with bot.session.get(url) as resp:
json = await resp.json()
if resp.status != 200:
print(resp, json)
return
if return_full_data:
return json['items']
if 'items' not in json:
return None
results = []
for result in json['items']:
if url_regex:
check = re.search(url_regex, result['link'])
if not check:
continue
results.append(result['link'].replace('%25', '%'))
return results
async def bs_from_url(bot, url, return_url=False):
headers = {'User-Agent': 'Mozilla/5.0 PalBot'}
async with bot.session.get(url, headers=headers) as resp:
assert "text" in resp.headers['Content-Type']
data = await resp.read()
page = BeautifulSoup(data, 'lxml')
if return_url:
return page, resp.url
else:
return page
def dict_merge(dct, merge_dct):
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
| [
"bs4.BeautifulSoup",
"re.search",
"urllib.parse.quote",
"re.compile"
] | [((375, 394), 're.compile', 're.compile', (['"""<.*?>"""'], {}), "('<.*?>')\n", (385, 394), False, 'import re\n'), ((747, 768), 'urllib.parse.quote', 'uriquote', (['search_term'], {}), '(search_term)\n', (755, 768), True, 'from urllib.parse import quote as uriquote\n'), ((1618, 1645), 'bs4.BeautifulSoup', 'BeautifulSoup', (['data', '"""lxml"""'], {}), "(data, 'lxml')\n", (1631, 1645), False, 'from bs4 import BeautifulSoup\n'), ((1168, 1204), 're.search', 're.search', (['url_regex', "result['link']"], {}), "(url_regex, result['link'])\n", (1177, 1204), False, 'import re\n')] |
from django.forms import ModelForm, inlineformset_factory, BaseInlineFormSet
from . import models
class AuthorContainerForm(ModelForm):
class Meta:
model = models.AuthorContainer
exclude = ('id',)
class AuthorForm(ModelForm):
class Meta:
model = models.Author
fields = ('first_name', 'last_name')
class BookForm(ModelForm):
class Meta:
model = models.Book
fields = ('title', 'isbn',)
BookFormset = inlineformset_factory(models.Author, models.Book, form=BookForm, can_delete=True, extra=0)
class BaseAuthorFormset(BaseInlineFormSet):
def add_fields(self, form, index):
super(BaseAuthorFormset, self).add_fields(form, index)
form.nested_book = BookFormset(
instance=form.instance,
data=form.data if form.is_bound else None,
files=form.files if form.is_bound else None,
prefix='nested_book-%s-%s' % (
form.prefix,
BookFormset.get_default_prefix()))
def is_valid(self):
result = super(BaseAuthorFormset, self).is_valid()
if self.is_bound:
for form in self.forms:
if hasattr(form, 'nested_book'):
result = result and form.nested_book.is_valid()
return result
def save(self, commit=True):
result = super(BaseAuthorFormset, self).save(commit=commit)
for form in self.forms:
if hasattr(form, 'nested_book'):
if not self._should_delete_form(form):
form.nested_book.save(commit=commit)
return result
AuthorsFormset = inlineformset_factory(models.AuthorContainer, models.Author, formset=BaseAuthorFormset,
form=AuthorForm, extra=0)
| [
"django.forms.inlineformset_factory"
] | [((467, 562), 'django.forms.inlineformset_factory', 'inlineformset_factory', (['models.Author', 'models.Book'], {'form': 'BookForm', 'can_delete': '(True)', 'extra': '(0)'}), '(models.Author, models.Book, form=BookForm, can_delete\n =True, extra=0)\n', (488, 562), False, 'from django.forms import ModelForm, inlineformset_factory, BaseInlineFormSet\n'), ((1637, 1755), 'django.forms.inlineformset_factory', 'inlineformset_factory', (['models.AuthorContainer', 'models.Author'], {'formset': 'BaseAuthorFormset', 'form': 'AuthorForm', 'extra': '(0)'}), '(models.AuthorContainer, models.Author, formset=\n BaseAuthorFormset, form=AuthorForm, extra=0)\n', (1658, 1755), False, 'from django.forms import ModelForm, inlineformset_factory, BaseInlineFormSet\n')] |
from collections import Counter
from math import log
from tqdm import tqdm
import re
from evaluation import evaluateSet
def build_model(train_set):
hmm_model = {i:Counter() for i in 'SBME'}
trans = {'SS':0,
'SB':0,
'BM':0,
'BE':0,
'MM':0,
'ME':0,
'ES':0,
'EB':0
}
with open(train_set,'r',encoding='utf-8') as f:
cha = []
tag = []
for l in f:
l = l.split()
if (len(l) == 0) :
cha += " "
tag += " "
else:
cha += l[0]
tag += l[1]
for i in range(len(tag)):
if tag[i] != ' ':
hmm_model[tag[i]][cha[i]] += int(1)
if i+1<len(tag) and tag[i+1] != ' ':
trans[tag[i]+tag[i+1]] +=1
s_ = trans['SS'] + trans['SB']
trans['SS'] /= s_
trans['SB'] /= s_
b_ = trans['BM'] + trans['BE']
trans['BM'] /= b_
trans['BE'] /= b_
m_ = trans['MM'] + trans['ME']
trans['MM'] /= m_
trans['ME'] /= m_
e_ = trans['ES'] + trans['EB']
trans['ES'] /= e_
trans['EB'] /= e_
log_total = {i:log(sum(hmm_model[i].values())) for i in 'SBME'}
trans = {i:log(j) for i,j in trans.items()}
return hmm_model, trans, log_total
def viterbi(nodes):
paths = nodes[0]
for l in range(1, len(nodes)):
paths_ = paths
paths = {}
for i in nodes[l]:
nows = {}
for j in paths_:
if j[-1]+i in trans:
nows[j+i]=paths_[j]+nodes[l][i]+trans[j[-1]+i]
k = list(nows.values()).index(max(nows.values()))
paths[list(nows.keys())[k]] = list(nows.values())[k]
return list(paths.keys())[list(paths.values()).index(max(list(paths.values())))]
def hmm_cut(s):
nodes = [{i:log(j[t]+1)-log_total[i] for i,j in hmm_model.items()} for t in s]
tags = viterbi(nodes)
words = [s[0]]
for i in range(1, len(s)):
if tags[i] in ['B', 'S']:
words.append(s[i])
else:
words[-1] += s[i]
return words
def changenum(ustring):
rstr = ""
for uchar in ustring:
unic=ord(uchar)
if unic == 12288:
unic = 32
elif (65296 <= unic <= 65305) or (65345 <= unic <= 65370) or (65313 <= unic <= 65338):
unic -= 65248
rstr += chr(unic)
# 所有数字改为 0
rstr = re.sub(r"\d+\.?\d*", "0", rstr)
# 所有英文单词改为 1
rstr = re.sub(r"[a-zA-Z]+\/", "1/", rstr)
return rstr
if __name__ == '__main__':
print("Train Set: PKU; Test Set: Weibo, w/o re-replacement")
hmm_model, trans, log_total = build_model("BMES_corpus/rmrb_BMES.txt")
# load test set without number and english replace
nlpcc_f = open('data/nlpcc2016-wordseg-dev.dat', 'r', encoding='utf-8')
lines = nlpcc_f.readlines()
lines = [line.strip().split() for line in lines]
nlpcc_f.close()
# Test with Simple 2-gram model
results = []
for line in tqdm(lines):
ori_line = ''.join(line)
res = hmm_cut(ori_line)
results.append(res)
evaluateSet(results, lines)
print("Train Set: PKU; Test Set: Weibo, w/ re-replacement")
hmm_model, trans, log_total = build_model("BMES_corpus/rmrb_BMES_nonum.txt")
# load test set without number and english replace
nlpcc_f = open('data/nlpcc2016-wordseg-dev.dat', 'r', encoding='utf-8')
lines = nlpcc_f.readlines()
lines = [changenum(line) for line in lines]
lines = [line.strip().split() for line in lines]
nlpcc_f.close()
# Test with Simple 2-gram model
results = []
for line in tqdm(lines):
ori_line = ''.join(line)
res = hmm_cut(ori_line)
results.append(res)
evaluateSet(results, lines)
print("Train Set: MSR; Test Set: PKU, w/ re-replacement")
hmm_model, trans, log_total = build_model("BMES_corpus/msr_BMES_nonum.txt")
# load test set without number and english replace
nlpcc_f = open('data/nlpcc2016-wordseg-dev.dat', 'r', encoding='utf-8')
lines = nlpcc_f.readlines()
lines = [changenum(line) for line in lines]
lines = [line.strip().split() for line in lines]
nlpcc_f.close()
# Test with Simple 2-gram model
results = []
for line in tqdm(lines):
ori_line = ''.join(line)
res = hmm_cut(ori_line)
results.append(res)
evaluateSet(results, lines)
print("Train Set: PKU; Test Set: PKU, w/ re-replacement")
hmm_model, trans, log_total = build_model("BMES_corpus/rmrb_BMES_nonum.txt")
# load test set without number and english replace
nlpcc_f = open('BMES_corpus/pku_training.utf8', 'r', encoding='utf-8')
lines = nlpcc_f.readlines()
lines = [changenum(line) for line in lines]
lines = [line.strip().split() for line in lines]
lines = [line for line in lines if len(line)]
nlpcc_f.close()
# Test with Simple 2-gram model
results = []
for line in tqdm(lines):
ori_line = ''.join(line)
res = hmm_cut(ori_line)
results.append(res)
evaluateSet(results, lines)
print("Train Set: MSR; Test Set: MSR, w/ re-replacement")
hmm_model, trans, log_total = build_model("BMES_corpus/msr_BMES_nonum.txt")
# load test set without number and english replace
nlpcc_f = open('BMES_corpus/msr_training.utf8', 'r', encoding='utf-8')
lines = nlpcc_f.readlines()
lines = [changenum(line) for line in lines]
lines = [line.strip().split() for line in lines]
lines = [line for line in lines if len(line)]
nlpcc_f.close()
# Test with Simple 2-gram model
results = []
for line in tqdm(lines):
ori_line = ''.join(line)
res = hmm_cut(ori_line)
results.append(res)
evaluateSet(results, lines)
| [
"tqdm.tqdm",
"evaluation.evaluateSet",
"math.log",
"collections.Counter",
"re.sub"
] | [((2597, 2630), 're.sub', 're.sub', (['"""\\\\d+\\\\.?\\\\d*"""', '"""0"""', 'rstr'], {}), "('\\\\d+\\\\.?\\\\d*', '0', rstr)\n", (2603, 2630), False, 'import re\n'), ((2661, 2695), 're.sub', 're.sub', (['"""[a-zA-Z]+\\\\/"""', '"""1/"""', 'rstr'], {}), "('[a-zA-Z]+\\\\/', '1/', rstr)\n", (2667, 2695), False, 'import re\n'), ((3208, 3219), 'tqdm.tqdm', 'tqdm', (['lines'], {}), '(lines)\n', (3212, 3219), False, 'from tqdm import tqdm\n'), ((3322, 3349), 'evaluation.evaluateSet', 'evaluateSet', (['results', 'lines'], {}), '(results, lines)\n', (3333, 3349), False, 'from evaluation import evaluateSet\n'), ((3872, 3883), 'tqdm.tqdm', 'tqdm', (['lines'], {}), '(lines)\n', (3876, 3883), False, 'from tqdm import tqdm\n'), ((3986, 4013), 'evaluation.evaluateSet', 'evaluateSet', (['results', 'lines'], {}), '(results, lines)\n', (3997, 4013), False, 'from evaluation import evaluateSet\n'), ((4535, 4546), 'tqdm.tqdm', 'tqdm', (['lines'], {}), '(lines)\n', (4539, 4546), False, 'from tqdm import tqdm\n'), ((4649, 4676), 'evaluation.evaluateSet', 'evaluateSet', (['results', 'lines'], {}), '(results, lines)\n', (4660, 4676), False, 'from evaluation import evaluateSet\n'), ((5249, 5260), 'tqdm.tqdm', 'tqdm', (['lines'], {}), '(lines)\n', (5253, 5260), False, 'from tqdm import tqdm\n'), ((5363, 5390), 'evaluation.evaluateSet', 'evaluateSet', (['results', 'lines'], {}), '(results, lines)\n', (5374, 5390), False, 'from evaluation import evaluateSet\n'), ((5962, 5973), 'tqdm.tqdm', 'tqdm', (['lines'], {}), '(lines)\n', (5966, 5973), False, 'from tqdm import tqdm\n'), ((6076, 6103), 'evaluation.evaluateSet', 'evaluateSet', (['results', 'lines'], {}), '(results, lines)\n', (6087, 6103), False, 'from evaluation import evaluateSet\n'), ((179, 188), 'collections.Counter', 'Counter', ([], {}), '()\n', (186, 188), False, 'from collections import Counter\n'), ((1358, 1364), 'math.log', 'log', (['j'], {}), '(j)\n', (1361, 1364), False, 'from math import log\n'), ((1996, 2009), 'math.log', 'log', (['(j[t] + 1)'], {}), '(j[t] + 1)\n', (1999, 2009), False, 'from math import log\n')] |
# Copyright 2018 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Classes for reading and writing BedGraph files.
The BedGraph format is described at
https://genome.ucsc.edu/goldenpath/help/bedgraph.html
API for reading:
```python
from third_party.nucleus.io import bedgraph
# Iterate through all records.
with bed.BedGraphReader(input_path) as reader:
for record in reader:
print(record)
```
where `record` is a `nucleus.genomics.v1.BedGraphRecord` protocol buffer.
API for writing:
```python
from third_party.nucleus.io import bedgraph
from third_party.nucleus.protos import bedgraph_pb2
# records is an iterable of nucleus.genomics.v1.BedGraphRecord protocol buffers.
records = ...
# Write all records to the desired output path.
with bed.BedGraphWriter(output_path) as writer:
for record in records:
writer.write(record)
```
For both reading and writing, if the path provided to the constructor contains
'.tfrecord' as an extension, a `TFRecord` file is assumed and attempted to be
read or written. Otherwise, the filename is treated as a true BedGraph file.
Files that end in a '.gz' suffix cause the file to be treated as compressed
(with BGZF if it is a BedGraph file, and with gzip if it is a TFRecord file).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from third_party.nucleus.io import genomics_reader
from third_party.nucleus.io import genomics_writer
from third_party.nucleus.io.python import bedgraph_reader
from third_party.nucleus.io.python import bedgraph_writer
from third_party.nucleus.protos import bedgraph_pb2
class NativeBedGraphReader(genomics_reader.GenomicsReader):
"""Class for reading from native BedGraph files.
Most users will want to use BedGraphReader instead, because it dynamically
dispatches between reading native BedGraph files and TFRecord files based on
the filename's extension.
"""
def __init__(self, input_path, num_fields=0):
"""Initializes a NativeBedGraphReader.
Args:
input_path: string. A path to a resource containing BedGraph records.
num_fields: int. The number of fields to read in the BedGraph. If unset or
set to zero, all fields in the input are read.
"""
super(NativeBedGraphReader, self).__init__()
bedgraph_path = input_path.encode('utf8')
self._reader = bedgraph_reader.BedGraphReader.from_file(bedgraph_path)
def query(self):
"""Returns an iterator for going through the records in the region.
NOTE: This function is not currently implemented by NativeBedGraphReader
though it could be implemented for sorted, tabix-indexed BedGraph files.
"""
raise NotImplementedError('Can not currently query a BedGraph file')
def iterate(self):
"""Returns an iterable of BedGraphRecord protos in the file."""
return self._reader.iterate()
def __exit__(self, exit_type, exit_value, exit_traceback):
self._reader.__exit__(exit_type, exit_value, exit_traceback)
class BedGraphReader(genomics_reader.DispatchingGenomicsReader):
"""Class for reading BedGraphRecord protos from BedGraph or TFRecord files."""
def _native_reader(self, input_path, **kwargs):
return NativeBedGraphReader(input_path, **kwargs)
def _record_proto(self):
return bedgraph_pb2.BedGraphRecord
class NativeBedGraphWriter(genomics_writer.GenomicsWriter):
"""Class for writing to native BedGraph files.
Most users will want BedGraphWriter, which will write to either native
BedGraph files or TFRecord files, based on the output filename's extension.
"""
def __init__(self, output_path, header=None):
"""Initializer for NativeBedGraphWriter.
Args:
output_path: str. The path to which to write the BedGraph file.
"""
super(NativeBedGraphWriter, self).__init__()
self._writer = bedgraph_writer.BedGraphWriter.to_file(output_path)
def write(self, proto):
self._writer.write(proto)
def __exit__(self, exit_type, exit_value, exit_traceback):
self._writer.__exit__(exit_type, exit_value, exit_traceback)
class BedGraphWriter(genomics_writer.DispatchingGenomicsWriter):
"""Class for writing BedGraphRecord protos to BedGraph or TFRecord files."""
def _native_writer(self, output_path):
return NativeBedGraphWriter(output_path)
| [
"third_party.nucleus.io.python.bedgraph_writer.BedGraphWriter.to_file",
"third_party.nucleus.io.python.bedgraph_reader.BedGraphReader.from_file"
] | [((3826, 3881), 'third_party.nucleus.io.python.bedgraph_reader.BedGraphReader.from_file', 'bedgraph_reader.BedGraphReader.from_file', (['bedgraph_path'], {}), '(bedgraph_path)\n', (3866, 3881), False, 'from third_party.nucleus.io.python import bedgraph_reader\n'), ((5301, 5352), 'third_party.nucleus.io.python.bedgraph_writer.BedGraphWriter.to_file', 'bedgraph_writer.BedGraphWriter.to_file', (['output_path'], {}), '(output_path)\n', (5339, 5352), False, 'from third_party.nucleus.io.python import bedgraph_writer\n')] |
from ConfigSpace import ConfigurationSpace, CategoricalHyperparameter
import time
import warnings
import os
import numpy as np
import pickle as pkl
from sklearn.metrics.scorer import balanced_accuracy_scorer
from solnml.utils.logging_utils import get_logger
from solnml.components.evaluators.base_evaluator import _BaseEvaluator
from solnml.components.evaluators.evaluate_func import validation
from solnml.components.feature_engineering.task_space import get_task_hyperparameter_space
from solnml.components.feature_engineering.parse import parse_config, construct_node
from solnml.components.utils.topk_saver import CombinedTopKModelSaver
from solnml.components.utils.class_loader import get_combined_candidtates
from solnml.components.models.regression import _regressors, _addons
from solnml.components.utils.constants import *
def get_estimator(config, estimator_id):
regressor_type = estimator_id
config_ = config.copy()
config_['%s:random_state' % regressor_type] = 1
hpo_config = dict()
for key in config_:
key_name = key.split(':')[0]
if regressor_type == key_name:
act_key = key.split(':')[1]
hpo_config[act_key] = config_[key]
_candidates = get_combined_candidtates(_regressors, _addons)
estimator = _candidates[regressor_type](**hpo_config)
if hasattr(estimator, 'n_jobs'):
setattr(estimator, 'n_jobs', 1)
return regressor_type, estimator
def get_hpo_cs(estimator_id, task_type=REGRESSION):
_candidates = get_combined_candidtates(_regressors, _addons)
if estimator_id in _candidates:
rgs_class = _candidates[estimator_id]
else:
raise ValueError("Algorithm %s not supported!" % estimator_id)
cs = rgs_class.get_hyperparameter_search_space()
return cs
def get_cash_cs(include_algorithms=None, task_type=REGRESSION):
_candidates = get_combined_candidtates(_regressors, _addons)
if include_algorithms is not None:
_candidates = set(include_algorithms).intersection(set(_candidates.keys()))
if len(_candidates) == 0:
raise ValueError("No algorithms included! Please check the spelling of the included algorithms!")
cs = ConfigurationSpace()
algo = CategoricalHyperparameter('algorithm', list(_candidates))
cs.add_hyperparameter(algo)
for estimator_id in _candidates:
estimator_cs = get_hpo_cs(estimator_id)
parent_hyperparameter = {'parent': algo,
'value': estimator_id}
cs.add_configuration_space(estimator_id, estimator_cs, parent_hyperparameter=parent_hyperparameter)
return cs
def get_fe_cs(task_type=REGRESSION, include_image=False, include_text=False, include_preprocessors=None):
cs = get_task_hyperparameter_space(task_type=task_type, include_image=include_image, include_text=include_text,
include_preprocessors=include_preprocessors)
return cs
def get_combined_cs(task_type=REGRESSION, include_image=False, include_text=False,
include_preprocessors=None):
cash_cs = get_cash_cs(task_type)
fe_cs = get_fe_cs(task_type,
include_image=include_image, include_text=include_text,
include_preprocessors=include_preprocessors)
for hp in fe_cs.get_hyperparameters():
cash_cs.add_hyperparameter(hp)
for cond in fe_cs.get_conditions():
cash_cs.add_condition(cond)
for bid in fe_cs.get_forbiddens():
cash_cs.add_forbidden_clause(bid)
return cash_cs
class RegressionEvaluator(_BaseEvaluator):
def __init__(self, fixed_config=None, scorer=None, data_node=None, task_type=REGRESSION, resampling_strategy='cv',
resampling_params=None, timestamp=None, output_dir=None, seed=1):
self.resampling_strategy = resampling_strategy
self.resampling_params = resampling_params
self.fixed_config = fixed_config
self.scorer = scorer if scorer is not None else balanced_accuracy_scorer
self.task_type = task_type
self.data_node = data_node
self.output_dir = output_dir
self.seed = seed
self.onehot_encoder = None
self.logger = get_logger(self.__module__ + "." + self.__class__.__name__)
self.continue_training = False
self.train_node = data_node.copy_()
self.val_node = data_node.copy_()
self.timestamp = timestamp
def __call__(self, config, **kwargs):
start_time = time.time()
return_dict = dict()
self.seed = 1
downsample_ratio = kwargs.get('resource_ratio', 1.0)
# Convert Configuration into dictionary
if not isinstance(config, dict):
config = config.get_dictionary().copy()
else:
config = config.copy()
if self.fixed_config is not None:
config.update(self.fixed_config)
self.estimator_id = config['algorithm']
if 'holdout' in self.resampling_strategy:
# Prepare data node.
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
if self.resampling_params is None or 'test_size' not in self.resampling_params:
test_size = 0.33
else:
test_size = self.resampling_params['test_size']
from sklearn.model_selection import ShuffleSplit
ss = ShuffleSplit(n_splits=1, test_size=test_size, random_state=self.seed)
for train_index, test_index in ss.split(self.data_node.data[0], self.data_node.data[1]):
_x_train, _x_val = self.data_node.data[0][train_index], self.data_node.data[0][test_index]
_y_train, _y_val = self.data_node.data[1][train_index], self.data_node.data[1][test_index]
self.train_node.data = [_x_train, _y_train]
self.val_node.data = [_x_val, _y_val]
data_node, op_list = parse_config(self.train_node, config, record=True)
_val_node = self.val_node.copy_()
_val_node = construct_node(_val_node, op_list)
_x_train, _y_train = data_node.data
_x_val, _y_val = _val_node.data
config_dict = config.copy()
# regression gadgets
regressor_id, clf = get_estimator(config_dict, self.estimator_id)
score = validation(clf, self.scorer, _x_train, _y_train, _x_val, _y_val,
random_state=self.seed)
if np.isfinite(score):
model_path = CombinedTopKModelSaver.get_path_by_config(self.output_dir, config, self.timestamp)
if not os.path.exists(model_path):
with open(model_path, 'wb') as f:
pkl.dump([op_list, clf, score], f)
else:
with open(model_path, 'rb') as f:
_, _, perf = pkl.load(f)
if score > perf:
with open(model_path, 'wb') as f:
pkl.dump([op_list, clf, score], f)
self.logger.info("Model saved to %s" % model_path)
elif 'cv' in self.resampling_strategy:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
if 'cv' in self.resampling_strategy:
if self.resampling_params is None or 'folds' not in self.resampling_params:
folds = 5
else:
folds = self.resampling_params['folds']
from sklearn.model_selection import KFold
kfold = KFold(n_splits=folds, random_state=self.seed, shuffle=False)
scores = list()
for train_index, test_index in kfold.split(self.data_node.data[0], self.data_node.data[1]):
_x_train, _x_val = self.data_node.data[0][train_index], self.data_node.data[0][test_index]
_y_train, _y_val = self.data_node.data[1][train_index], self.data_node.data[1][test_index]
self.train_node.data = [_x_train, _y_train]
self.val_node.data = [_x_val, _y_val]
data_node, op_list = parse_config(self.train_node, config, record=True)
_val_node = self.val_node.copy_()
_val_node = construct_node(_val_node, op_list)
_x_train, _y_train = data_node.data
_x_val, _y_val = _val_node.data
config_dict = config.copy()
# regressor gadgets
regressor_id, clf = get_estimator(config_dict, self.estimator_id)
_score = validation(clf, self.scorer, _x_train, _y_train, _x_val, _y_val,
random_state=self.seed)
scores.append(_score)
score = np.mean(scores)
elif 'partial' in self.resampling_strategy:
# Prepare data node.
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
if self.resampling_params is None or 'test_size' not in self.resampling_params:
test_size = 0.33
else:
test_size = self.resampling_params['test_size']
from sklearn.model_selection import ShuffleSplit
ss = ShuffleSplit(n_splits=1, test_size=test_size, random_state=self.seed)
for train_index, test_index in ss.split(self.data_node.data[0], self.data_node.data[1]):
_x_train, _x_val = self.data_node.data[0][train_index], self.data_node.data[0][test_index]
_y_train, _y_val = self.data_node.data[1][train_index], self.data_node.data[1][test_index]
self.train_node.data = [_x_train, _y_train]
self.val_node.data = [_x_val, _y_val]
data_node, op_list = parse_config(self.train_node, config, record=True)
_val_node = self.val_node.copy_()
_val_node = construct_node(_val_node, op_list)
_x_train, _y_train = data_node.data
if downsample_ratio != 1:
down_ss = ShuffleSplit(n_splits=1, test_size=downsample_ratio,
random_state=self.seed)
for _, _val_index in down_ss.split(_x_train, _y_train):
_act_x_train, _act_y_train = _x_train[_val_index], _y_train[_val_index]
else:
_act_x_train, _act_y_train = _x_train, _y_train
_val_index = list(range(len(_x_train)))
_x_val, _y_val = _val_node.data
config_dict = config.copy()
# Regressor gadgets
regressor_id, clf = get_estimator(config_dict, self.estimator_id)
score = validation(clf, self.scorer, _act_x_train, _act_y_train, _x_val, _y_val,
random_state=self.seed)
if np.isfinite(score) and downsample_ratio == 1:
model_path = CombinedTopKModelSaver.get_path_by_config(self.output_dir, config, self.timestamp)
if not os.path.exists(model_path):
with open(model_path, 'wb') as f:
pkl.dump([op_list, clf, score], f)
else:
with open(model_path, 'rb') as f:
_, _, perf = pkl.load(f)
if score > perf:
with open(model_path, 'wb') as f:
pkl.dump([op_list, clf, score], f)
self.logger.info("Model saved to %s" % model_path)
else:
raise ValueError('Invalid resampling strategy: %s!' % self.resampling_strategy)
try:
self.logger.info('Evaluation<%s> | Score: %.4f | Time cost: %.2f seconds | Shape: %s' %
(regressor_id,
self.scorer._sign * score,
time.time() - start_time, _x_train.shape))
except:
pass
# Turn it into a minimization problem.
return_dict['objective_value'] = -score
return -score
| [
"solnml.components.feature_engineering.task_space.get_task_hyperparameter_space",
"os.path.exists",
"solnml.components.feature_engineering.parse.parse_config",
"numpy.mean",
"pickle.dump",
"warnings.catch_warnings",
"pickle.load",
"sklearn.model_selection.ShuffleSplit",
"solnml.components.utils.clas... | [((1220, 1266), 'solnml.components.utils.class_loader.get_combined_candidtates', 'get_combined_candidtates', (['_regressors', '_addons'], {}), '(_regressors, _addons)\n', (1244, 1266), False, 'from solnml.components.utils.class_loader import get_combined_candidtates\n'), ((1511, 1557), 'solnml.components.utils.class_loader.get_combined_candidtates', 'get_combined_candidtates', (['_regressors', '_addons'], {}), '(_regressors, _addons)\n', (1535, 1557), False, 'from solnml.components.utils.class_loader import get_combined_candidtates\n'), ((1872, 1918), 'solnml.components.utils.class_loader.get_combined_candidtates', 'get_combined_candidtates', (['_regressors', '_addons'], {}), '(_regressors, _addons)\n', (1896, 1918), False, 'from solnml.components.utils.class_loader import get_combined_candidtates\n'), ((2195, 2215), 'ConfigSpace.ConfigurationSpace', 'ConfigurationSpace', ([], {}), '()\n', (2213, 2215), False, 'from ConfigSpace import ConfigurationSpace, CategoricalHyperparameter\n'), ((2746, 2907), 'solnml.components.feature_engineering.task_space.get_task_hyperparameter_space', 'get_task_hyperparameter_space', ([], {'task_type': 'task_type', 'include_image': 'include_image', 'include_text': 'include_text', 'include_preprocessors': 'include_preprocessors'}), '(task_type=task_type, include_image=\n include_image, include_text=include_text, include_preprocessors=\n include_preprocessors)\n', (2775, 2907), False, 'from solnml.components.feature_engineering.task_space import get_task_hyperparameter_space\n'), ((4223, 4282), 'solnml.utils.logging_utils.get_logger', 'get_logger', (["(self.__module__ + '.' + self.__class__.__name__)"], {}), "(self.__module__ + '.' + self.__class__.__name__)\n", (4233, 4282), False, 'from solnml.utils.logging_utils import get_logger\n'), ((4509, 4520), 'time.time', 'time.time', ([], {}), '()\n', (4518, 4520), False, 'import time\n'), ((6427, 6519), 'solnml.components.evaluators.evaluate_func.validation', 'validation', (['clf', 'self.scorer', '_x_train', '_y_train', '_x_val', '_y_val'], {'random_state': 'self.seed'}), '(clf, self.scorer, _x_train, _y_train, _x_val, _y_val,\n random_state=self.seed)\n', (6437, 6519), False, 'from solnml.components.evaluators.evaluate_func import validation\n'), ((6563, 6581), 'numpy.isfinite', 'np.isfinite', (['score'], {}), '(score)\n', (6574, 6581), True, 'import numpy as np\n'), ((5060, 5085), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (5083, 5085), False, 'import warnings\n'), ((5103, 5136), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (5126, 5136), False, 'import warnings\n'), ((5448, 5517), 'sklearn.model_selection.ShuffleSplit', 'ShuffleSplit', ([], {'n_splits': '(1)', 'test_size': 'test_size', 'random_state': 'self.seed'}), '(n_splits=1, test_size=test_size, random_state=self.seed)\n', (5460, 5517), False, 'from sklearn.model_selection import ShuffleSplit\n'), ((5997, 6047), 'solnml.components.feature_engineering.parse.parse_config', 'parse_config', (['self.train_node', 'config'], {'record': '(True)'}), '(self.train_node, config, record=True)\n', (6009, 6047), False, 'from solnml.components.feature_engineering.parse import parse_config, construct_node\n'), ((6126, 6160), 'solnml.components.feature_engineering.parse.construct_node', 'construct_node', (['_val_node', 'op_list'], {}), '(_val_node, op_list)\n', (6140, 6160), False, 'from solnml.components.feature_engineering.parse import parse_config, construct_node\n'), ((6612, 6699), 'solnml.components.utils.topk_saver.CombinedTopKModelSaver.get_path_by_config', 'CombinedTopKModelSaver.get_path_by_config', (['self.output_dir', 'config', 'self.timestamp'], {}), '(self.output_dir, config, self.\n timestamp)\n', (6653, 6699), False, 'from solnml.components.utils.topk_saver import CombinedTopKModelSaver\n'), ((6719, 6745), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (6733, 6745), False, 'import os\n'), ((7276, 7301), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (7299, 7301), False, 'import warnings\n'), ((7319, 7352), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (7342, 7352), False, 'import warnings\n'), ((7710, 7770), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'folds', 'random_state': 'self.seed', 'shuffle': '(False)'}), '(n_splits=folds, random_state=self.seed, shuffle=False)\n', (7715, 7770), False, 'from sklearn.model_selection import KFold\n'), ((8979, 8994), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (8986, 8994), True, 'import numpy as np\n'), ((10948, 11048), 'solnml.components.evaluators.evaluate_func.validation', 'validation', (['clf', 'self.scorer', '_act_x_train', '_act_y_train', '_x_val', '_y_val'], {'random_state': 'self.seed'}), '(clf, self.scorer, _act_x_train, _act_y_train, _x_val, _y_val,\n random_state=self.seed)\n', (10958, 11048), False, 'from solnml.components.evaluators.evaluate_func import validation\n'), ((6825, 6859), 'pickle.dump', 'pkl.dump', (['[op_list, clf, score]', 'f'], {}), '([op_list, clf, score], f)\n', (6833, 6859), True, 'import pickle as pkl\n'), ((6973, 6984), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (6981, 6984), True, 'import pickle as pkl\n'), ((8298, 8348), 'solnml.components.feature_engineering.parse.parse_config', 'parse_config', (['self.train_node', 'config'], {'record': '(True)'}), '(self.train_node, config, record=True)\n', (8310, 8348), False, 'from solnml.components.feature_engineering.parse import parse_config, construct_node\n'), ((8435, 8469), 'solnml.components.feature_engineering.parse.construct_node', 'construct_node', (['_val_node', 'op_list'], {}), '(_val_node, op_list)\n', (8449, 8469), False, 'from solnml.components.feature_engineering.parse import parse_config, construct_node\n'), ((8784, 8876), 'solnml.components.evaluators.evaluate_func.validation', 'validation', (['clf', 'self.scorer', '_x_train', '_y_train', '_x_val', '_y_val'], {'random_state': 'self.seed'}), '(clf, self.scorer, _x_train, _y_train, _x_val, _y_val,\n random_state=self.seed)\n', (8794, 8876), False, 'from solnml.components.evaluators.evaluate_func import validation\n'), ((9098, 9123), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (9121, 9123), False, 'import warnings\n'), ((9141, 9174), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (9164, 9174), False, 'import warnings\n'), ((9486, 9555), 'sklearn.model_selection.ShuffleSplit', 'ShuffleSplit', ([], {'n_splits': '(1)', 'test_size': 'test_size', 'random_state': 'self.seed'}), '(n_splits=1, test_size=test_size, random_state=self.seed)\n', (9498, 9555), False, 'from sklearn.model_selection import ShuffleSplit\n'), ((10035, 10085), 'solnml.components.feature_engineering.parse.parse_config', 'parse_config', (['self.train_node', 'config'], {'record': '(True)'}), '(self.train_node, config, record=True)\n', (10047, 10085), False, 'from solnml.components.feature_engineering.parse import parse_config, construct_node\n'), ((10164, 10198), 'solnml.components.feature_engineering.parse.construct_node', 'construct_node', (['_val_node', 'op_list'], {}), '(_val_node, op_list)\n', (10178, 10198), False, 'from solnml.components.feature_engineering.parse import parse_config, construct_node\n'), ((10313, 10389), 'sklearn.model_selection.ShuffleSplit', 'ShuffleSplit', ([], {'n_splits': '(1)', 'test_size': 'downsample_ratio', 'random_state': 'self.seed'}), '(n_splits=1, test_size=downsample_ratio, random_state=self.seed)\n', (10325, 10389), False, 'from sklearn.model_selection import ShuffleSplit\n'), ((11092, 11110), 'numpy.isfinite', 'np.isfinite', (['score'], {}), '(score)\n', (11103, 11110), True, 'import numpy as np\n'), ((11167, 11254), 'solnml.components.utils.topk_saver.CombinedTopKModelSaver.get_path_by_config', 'CombinedTopKModelSaver.get_path_by_config', (['self.output_dir', 'config', 'self.timestamp'], {}), '(self.output_dir, config, self.\n timestamp)\n', (11208, 11254), False, 'from solnml.components.utils.topk_saver import CombinedTopKModelSaver\n'), ((7108, 7142), 'pickle.dump', 'pkl.dump', (['[op_list, clf, score]', 'f'], {}), '([op_list, clf, score], f)\n', (7116, 7142), True, 'import pickle as pkl\n'), ((11274, 11300), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (11288, 11300), False, 'import os\n'), ((12118, 12129), 'time.time', 'time.time', ([], {}), '()\n', (12127, 12129), False, 'import time\n'), ((11380, 11414), 'pickle.dump', 'pkl.dump', (['[op_list, clf, score]', 'f'], {}), '([op_list, clf, score], f)\n', (11388, 11414), True, 'import pickle as pkl\n'), ((11528, 11539), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (11536, 11539), True, 'import pickle as pkl\n'), ((11663, 11697), 'pickle.dump', 'pkl.dump', (['[op_list, clf, score]', 'f'], {}), '([op_list, clf, score], f)\n', (11671, 11697), True, 'import pickle as pkl\n')] |
#
# MLDB-1104-input-data-spec.py
# mldb.ai inc, 2015
# This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved.
#
import unittest
import datetime
import random
from mldb import mldb, ResponseException
class InputDataSpecTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.load_kmeans_dataset()
cls.load_classifier_dataset()
@classmethod
def load_kmeans_dataset(cls):
kmeans_example = mldb.create_dataset({
"type": "sparse.mutable",
'id' : 'kmeans_example'
})
now = datetime.datetime.now()
for i in range(100):
val_x = float(random.randint(-5, 5))
val_y = float(random.randint(-5, 5))
row = [['x', val_x, now], ['y', val_y, now]]
kmeans_example.record_row('row_%d' % i, row)
kmeans_example.commit()
def train_kmeans(self, training_data):
metric = "euclidean"
mldb.put("/v1/procedures/kmeans", {
'type' : 'kmeans.train',
'params' : {
'trainingData' : training_data,
'centroidsDataset' : {
'id' : 'kmeans_centroids',
'type' : 'embedding',
'params': {
'metric': metric
}
},
'numClusters' : 2,
'metric': metric
}
})
def train_svd(self, training_data):
mldb.put("/v1/procedures/svd", {
'type' : 'svd.train',
'params' : {
'trainingData' : training_data,
'runOnCreation' : True
}
})
@classmethod
def load_classifier_dataset(cls):
dataset = mldb.create_dataset({
"type": "sparse.mutable",
"id": "iris_dataset"
})
with open("./mldb/testing/dataset/iris.data") as f:
for i, line in enumerate(f):
cols = []
line_split = line.split(',')
if len(line_split) != 5:
continue
# Jemery's what if a feature is named label
cols.append(["label", float(line_split[0]), 0]) # sepal length
cols.append(["labels", float(line_split[1]), 0]) # sepal width
cols.append(["petal length", float(line_split[2]), 0])
cols.append(["petal width", float(line_split[3]), 0])
cols.append(["features", line_split[4].strip('\n"'), 0]) #class
dataset.record_row(str(i+1), cols)
dataset.commit()
def train_classifier(self, training_data):
result = mldb.put("/v1/procedures/classifier", {
'type' : 'classifier.train',
'params' : {
'trainingData' : training_data,
"configuration": {
"type": "decision_tree",
"max_depth": 8,
"verbosity": 3,
"update_alg": "prob"
},
"modelFileUrl": "file://tmp/MLDB-1104.cls",
"mode": "categorical",
"functionName": "classifier_apply",
'runOnCreation' : True
}
})
return result.json()
def test_train_kmeans(self):
# KMEANS TRAIN PROCEDURE WITH BOTH TYPE OF INPUT DATA
self.train_kmeans('select * from kmeans_example')
self.train_kmeans('select x + y as x, y + x as y from kmeans_example')
self.train_kmeans({'select' : '*', 'from' : {'id' : 'kmeans_example'}})
# TEST ERROR CASE
with self.assertRaises(ResponseException):
self.train_kmeans(
'select x, y from kmeans_example group by x')
with self.assertRaises(ResponseException):
self.train_kmeans(
'select x, y from kmeans_example group by x having y > 2')
def test_train_svd(self):
self.train_svd('select * from kmeans_example')
self.train_svd('select x, y from kmeans_example')
self.train_svd('select x AS z, y from kmeans_example')
self.train_svd('select * EXCLUDING(x) from kmeans_example')
self.train_svd({'select' : '*', 'from' : {'id' : 'kmeans_example'}})
self.train_svd('select x + 1, y from kmeans_example')
with self.assertRaises(ResponseException):
self.train_svd('select x, y from kmeans_example group by x')
with self.assertRaises(ResponseException):
self.train_svd(
'select x, y from kmeans_example group by x having y > 2')
def test_train_classifier(self):
mldb.log(self.train_classifier(
"select {label, labels} as features, features as label "
"from iris_dataset"))
result = mldb.get(
"/v1/query",
q="SELECT classifier_apply({{label, labels} as features}) as *, features from iris_dataset")
rows = result.json()
mldb.log("-------------------------------");
mldb.log(rows)
# compare the classifier results on the train data with the original
# label
count = 0
for row in rows:
_max = 0
category = ""
for column in row['columns'][1:4]:
if column[1] > _max:
_max = column[1]
# remove the leading scores. and quotation marks
category = column[0][10:-3]
if category != row['columns'][0][1]:
count += 1
# misclassified result should be a small fraction
self.assertTrue(
float(count) / len(rows) < 0.2,
'the classifier results on the train data are strangely low')
if __name__ == '__main__':
mldb.run_tests()
| [
"mldb.mldb.create_dataset",
"random.randint",
"mldb.mldb.put",
"mldb.mldb.run_tests",
"datetime.datetime.now",
"mldb.mldb.log",
"mldb.mldb.get"
] | [((5813, 5829), 'mldb.mldb.run_tests', 'mldb.run_tests', ([], {}), '()\n', (5827, 5829), False, 'from mldb import mldb, ResponseException\n'), ((458, 529), 'mldb.mldb.create_dataset', 'mldb.create_dataset', (["{'type': 'sparse.mutable', 'id': 'kmeans_example'}"], {}), "({'type': 'sparse.mutable', 'id': 'kmeans_example'})\n", (477, 529), False, 'from mldb import mldb, ResponseException\n'), ((579, 602), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (600, 602), False, 'import datetime\n'), ((957, 1207), 'mldb.mldb.put', 'mldb.put', (['"""/v1/procedures/kmeans"""', "{'type': 'kmeans.train', 'params': {'trainingData': training_data,\n 'centroidsDataset': {'id': 'kmeans_centroids', 'type': 'embedding',\n 'params': {'metric': metric}}, 'numClusters': 2, 'metric': metric}}"], {}), "('/v1/procedures/kmeans', {'type': 'kmeans.train', 'params': {\n 'trainingData': training_data, 'centroidsDataset': {'id':\n 'kmeans_centroids', 'type': 'embedding', 'params': {'metric': metric}},\n 'numClusters': 2, 'metric': metric}})\n", (965, 1207), False, 'from mldb import mldb, ResponseException\n'), ((1487, 1611), 'mldb.mldb.put', 'mldb.put', (['"""/v1/procedures/svd"""', "{'type': 'svd.train', 'params': {'trainingData': training_data,\n 'runOnCreation': True}}"], {}), "('/v1/procedures/svd', {'type': 'svd.train', 'params': {\n 'trainingData': training_data, 'runOnCreation': True}})\n", (1495, 1611), False, 'from mldb import mldb, ResponseException\n'), ((1765, 1834), 'mldb.mldb.create_dataset', 'mldb.create_dataset', (["{'type': 'sparse.mutable', 'id': 'iris_dataset'}"], {}), "({'type': 'sparse.mutable', 'id': 'iris_dataset'})\n", (1784, 1834), False, 'from mldb import mldb, ResponseException\n'), ((2693, 3043), 'mldb.mldb.put', 'mldb.put', (['"""/v1/procedures/classifier"""', "{'type': 'classifier.train', 'params': {'trainingData': training_data,\n 'configuration': {'type': 'decision_tree', 'max_depth': 8, 'verbosity':\n 3, 'update_alg': 'prob'}, 'modelFileUrl': 'file://tmp/MLDB-1104.cls',\n 'mode': 'categorical', 'functionName': 'classifier_apply',\n 'runOnCreation': True}}"], {}), "('/v1/procedures/classifier', {'type': 'classifier.train', 'params':\n {'trainingData': training_data, 'configuration': {'type':\n 'decision_tree', 'max_depth': 8, 'verbosity': 3, 'update_alg': 'prob'},\n 'modelFileUrl': 'file://tmp/MLDB-1104.cls', 'mode': 'categorical',\n 'functionName': 'classifier_apply', 'runOnCreation': True}})\n", (2701, 3043), False, 'from mldb import mldb, ResponseException\n'), ((4836, 4960), 'mldb.mldb.get', 'mldb.get', (['"""/v1/query"""'], {'q': '"""SELECT classifier_apply({{label, labels} as features}) as *, features from iris_dataset"""'}), "('/v1/query', q=\n 'SELECT classifier_apply({{label, labels} as features}) as *, features from iris_dataset'\n )\n", (4844, 4960), False, 'from mldb import mldb, ResponseException\n'), ((5013, 5056), 'mldb.mldb.log', 'mldb.log', (['"""-------------------------------"""'], {}), "('-------------------------------')\n", (5021, 5056), False, 'from mldb import mldb, ResponseException\n'), ((5066, 5080), 'mldb.mldb.log', 'mldb.log', (['rows'], {}), '(rows)\n', (5074, 5080), False, 'from mldb import mldb, ResponseException\n'), ((658, 679), 'random.randint', 'random.randint', (['(-5)', '(5)'], {}), '(-5, 5)\n', (672, 679), False, 'import random\n'), ((707, 728), 'random.randint', 'random.randint', (['(-5)', '(5)'], {}), '(-5, 5)\n', (721, 728), False, 'import random\n')] |
import os.path
from pi3d import *
from pi3d.Buffer import Buffer
from pi3d.Shape import Shape
from pi3d.Texture import Texture
CUBE_PARTS = ['front', 'right', 'top', 'bottom', 'left', 'back']
BOTTOM_INDEX = 3
def loadECfiles(path, fname, suffix='jpg', nobottom=False):
"""Helper for loading environment cube faces.
TODO nobottom will redraw the top on the bottom of cube. It really should
substitute a blank (black) texture instead!
Arguments:
*path*
to the image files relative to the top directory.
*fname*
The stem of the file name without the _top, _bottom, _right etc.
Keyword arguments:
*suffix*
String to add after the '_top','_bottom' has been added to the stem.
*nobottom*
If True then only load five parts into array the bottom will be
drawn with the previous image i.e. top.
"""
if nobottom:
parts = [p for p in CUBE_PARTS if p != 'bottom']
else:
parts = CUBE_PARTS
files = (os.path.join(path, '%s_%s.%s' % (fname, p, suffix)) for p in parts)
return [Texture(f) for f in files]
class EnvironmentCube(Shape):
""" 3d model inherits from Shape"""
def __init__(self, camera=None, light=None, size=500.0, maptype="HALFCROSS", name="", x=0.0, y=0.0, z=0.0,
rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0, nobottom=False):
"""uses standard constructor for Shape extra Keyword arguments:
*size*
Dimensions of the cube
*maptype*
HALFCROSS (default) or CROSS any other defaults to CUBE type
and will require 6 (or 5 with nobottom) image files to render it
"""
super(EnvironmentCube,self).__init__(camera, light, name, x, y, z, rx, ry, rz,
1.0, 1.0, 1.0, cx, cy, cz)
self.width = size
self.height = size
self.depth = size
self.ssize = 36
self.ttype = GL_TRIANGLES
self.nobottom = nobottom
ww = size / 2.0
hh = size / 2.0
dd = size / 2.0
#cuboid data - faces are separated out for texturing..
self.vertices = ((-ww, hh, dd), (ww, hh, dd), (ww,-hh, dd), (-ww, -hh, dd),
(ww, hh, dd), (ww, hh, -dd), (ww, -hh, -dd), (ww, -hh, dd),
(-ww, hh, dd), (-ww, hh, -dd), (ww, hh, -dd), (ww, hh, dd),
(ww, -hh, dd), (ww, -hh, -dd), (-ww, -hh, -dd),(-ww, -hh, dd),
(-ww, -hh, dd),(-ww, -hh, -dd),(-ww, hh, -dd), (-ww, hh, dd),
(-ww, hh, -dd),(ww, hh, -dd), (ww, -hh, -dd), (-ww,-hh,-dd))
self.normals = ((0.0, 0.0, 1), (0.0, 0.0, 1), (0.0, 0.0, 1), (0.0, 0.0, 1),
(1, 0.0, 0), (1, 0.0, 0), (1, 0.0, 0), (1, 0.0, 0),
(0.0, 1, 0), (0.0, 1, 0), (0.0, 1, 0), (0.0, 1, 0),
(0.0, -1, 0), (0,- 1, 0), (0.0, -1, 0), (0.0, -1, 0),
(-1, 0.0, 0), (-1, 0.0, 0), (-1, 0.0, 0), (-1, 0.0, 0),
(0.0, 0.0, -1),(0.0, 0.0, -1),(0.0, 0.0, -1), (0.0, 0.0, -1))
self.indices = ((3, 0, 1), (2, 3, 1), (7, 4, 5), (6, 7, 5),
(11, 8, 9), (10, 11, 9), (15, 12, 13), (14, 15, 13),
(17, 18, 19),(16, 17, 19),(22, 21, 20), (23, 22, 20))
if maptype == "CROSS":
self.tex_coords = ((1.0, 0.34), (0.75, 0.34), (0.75, 0.661), (1.0, 0.661), #front
(0.75, 0.34), (0.5, 0.34), (0.5, 0.661), (0.75, 0.661), #right
(0.251, 0.0), (0.251, 0.34), (0.498, 0.34), (0.498, 0.0), #top
(0.498, 0.998), (0.498, 0.66), (0.251, 0.66), (0.251, 0.998), #bottom
(0.0, 0.661), (0.25, 0.661), (0.25, 0.34), (0.0, 0.34), #left
(0.25, 0.34), (0.5, 0.34), (0.5, 0.661), (0.25, 0.661)) #back
self.buf = []
self.buf.append(Buffer(self, self.vertices, self.tex_coords, self.indices, self.normals))
elif maptype == "HALFCROSS":
self.tex_coords = ((0.25,0.25), (0.25,0.75), (-0.25,0.75), (-0.25,0.25), #front
(0.25,0.75), (0.75,0.75), (0.75,1.25), (0.25,1.25), #right
(0.25,0.25), (0.75,0.25), (0.75,0.75), (0.25,0.75), #top
(0,0), (1,0), (1,1), (0,1), #bottom
(0.25,-0.25), (0.75,-0.25), (0.75,0.25), (0.25,0.25), #left
(0.75,0.25), (0.75,0.75), (1.25,0.75), (1.25,0.25)) #back
self.buf = []
self.buf.append(Buffer(self, self.vertices, self.tex_coords, self.indices, self.normals))
else:
self.tex_coords = ((0.002,0.002), (0.998,0.002), (0.998,0.998),(0.002,0.998),
(0.002,0.002), (0.998,0.002), (0.998,0.998), (0.002,0.998),
(0.002,0.998), (0.002,0.002), (0.998,0.002), (0.998,0.998),
(0.998,0.002), (0.998,0.998), (0.002,0.998), (0.002,0.002),
(0.998,0.998), (0.002,0.998), (0.002,0.002), (0.998,0.002),
(0.998,0.002), (0.002,0.002), (0.002,0.998), (0.998,0.998))
self.buf = []
self.buf.append(Buffer(self, self.vertices[0:4], self.tex_coords[0:4], ((3,0,1), (2,3,1)), self.normals[0:4])) #front
self.buf.append(Buffer(self, self.vertices[4:8], self.tex_coords[4:8], ((3,0,1), (2,3,1)), self.normals[4:8])) #right
self.buf.append(Buffer(self, self.vertices[8:12], self.tex_coords[8:12], ((3,0,1), (2,3,1)), self.normals[8:12])) #top
self.buf.append(Buffer(self, self.vertices[12:16], self.tex_coords[12:16], ((3,0,1), (2,3,1)), self.normals[12:16])) #bottom
self.buf.append(Buffer(self, self.vertices[16:20], self.tex_coords[16:20], ((3,0,1), (2,3,1)), self.normals[16:20])) #left
self.buf.append(Buffer(self, self.vertices[20:24], self.tex_coords[20:24], ((3,1,0), (2,1,3)), self.normals[20:24])) #back
def set_draw_details(self, shader, textures, ntiles=0.0, shiny=0.0, umult=1.0, vmult=1.0):
"""overrides this method in Shape to cope with nobottom option"""
if not (type(textures) is list):
textures = [textures]
elif len(textures) == 5:
# this should be the only circumstance. Saves setting it in the constructor
self.nobottom = True
for i, b in enumerate(self.buf):
j = i - 1 if (self.nobottom and i >= BOTTOM_INDEX) else i
b.set_draw_details(shader, [textures[j]], ntiles, shiny, umult, vmult)
| [
"pi3d.Texture.Texture",
"pi3d.Buffer.Buffer"
] | [((1046, 1056), 'pi3d.Texture.Texture', 'Texture', (['f'], {}), '(f)\n', (1053, 1056), False, 'from pi3d.Texture import Texture\n'), ((3594, 3666), 'pi3d.Buffer.Buffer', 'Buffer', (['self', 'self.vertices', 'self.tex_coords', 'self.indices', 'self.normals'], {}), '(self, self.vertices, self.tex_coords, self.indices, self.normals)\n', (3600, 3666), False, 'from pi3d.Buffer import Buffer\n'), ((4141, 4213), 'pi3d.Buffer.Buffer', 'Buffer', (['self', 'self.vertices', 'self.tex_coords', 'self.indices', 'self.normals'], {}), '(self, self.vertices, self.tex_coords, self.indices, self.normals)\n', (4147, 4213), False, 'from pi3d.Buffer import Buffer\n'), ((4713, 4815), 'pi3d.Buffer.Buffer', 'Buffer', (['self', 'self.vertices[0:4]', 'self.tex_coords[0:4]', '((3, 0, 1), (2, 3, 1))', 'self.normals[0:4]'], {}), '(self, self.vertices[0:4], self.tex_coords[0:4], ((3, 0, 1), (2, 3, 1\n )), self.normals[0:4])\n', (4719, 4815), False, 'from pi3d.Buffer import Buffer\n'), ((4837, 4939), 'pi3d.Buffer.Buffer', 'Buffer', (['self', 'self.vertices[4:8]', 'self.tex_coords[4:8]', '((3, 0, 1), (2, 3, 1))', 'self.normals[4:8]'], {}), '(self, self.vertices[4:8], self.tex_coords[4:8], ((3, 0, 1), (2, 3, 1\n )), self.normals[4:8])\n', (4843, 4939), False, 'from pi3d.Buffer import Buffer\n'), ((4961, 5065), 'pi3d.Buffer.Buffer', 'Buffer', (['self', 'self.vertices[8:12]', 'self.tex_coords[8:12]', '((3, 0, 1), (2, 3, 1))', 'self.normals[8:12]'], {}), '(self, self.vertices[8:12], self.tex_coords[8:12], ((3, 0, 1), (2, 3,\n 1)), self.normals[8:12])\n', (4967, 5065), False, 'from pi3d.Buffer import Buffer\n'), ((5086, 5194), 'pi3d.Buffer.Buffer', 'Buffer', (['self', 'self.vertices[12:16]', 'self.tex_coords[12:16]', '((3, 0, 1), (2, 3, 1))', 'self.normals[12:16]'], {}), '(self, self.vertices[12:16], self.tex_coords[12:16], ((3, 0, 1), (2, \n 3, 1)), self.normals[12:16])\n', (5092, 5194), False, 'from pi3d.Buffer import Buffer\n'), ((5217, 5325), 'pi3d.Buffer.Buffer', 'Buffer', (['self', 'self.vertices[16:20]', 'self.tex_coords[16:20]', '((3, 0, 1), (2, 3, 1))', 'self.normals[16:20]'], {}), '(self, self.vertices[16:20], self.tex_coords[16:20], ((3, 0, 1), (2, \n 3, 1)), self.normals[16:20])\n', (5223, 5325), False, 'from pi3d.Buffer import Buffer\n'), ((5346, 5454), 'pi3d.Buffer.Buffer', 'Buffer', (['self', 'self.vertices[20:24]', 'self.tex_coords[20:24]', '((3, 1, 0), (2, 1, 3))', 'self.normals[20:24]'], {}), '(self, self.vertices[20:24], self.tex_coords[20:24], ((3, 1, 0), (2, \n 1, 3)), self.normals[20:24])\n', (5352, 5454), False, 'from pi3d.Buffer import Buffer\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
He copiado y modificado software ajeno. Gran parte de este script es una
modificación y/o mejora del original, por ello doy los debidos créditos al
autor del software original:
MIT License
Copyright (c) 2016 - 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
ORIGINAL SCRIPT: []
"""
import discord
from discord.ext import commands
import kiwi_config
from .scpUtils import UTC_TIME
class Mod:
'''Comandos generales para administradores y moderadores'''
def __init__(self, bot):
self.bot = bot
self.log = kiwi_config.__log_channel__
async def _recording(self, msg):
"""Enviá registros de mensajes al canal __log_channel__"""
await self.log.send('**{}**'.format(UTC_TIME.get_time()))
await self.log.send(msg)
@commands.group(hidden=True, aliases=['purgame'],
description="Elimina mis odiosos mensajes (MOD)")
@commands.has_any_role(['AT Mod', 'AT Admin'])
async def eliminar(self, ctx, limit: int):
"""Ahora podrás hacer la limpieza del chat mucho más rápida
y sencilla con este comando. ¡Si! Borro tonebytes de mensajes
por ti, bag@-sempai! ^^
**::Sintaxis::**
-----------
keliminar <n de mensajes> (Véase Subcomandos)
**::Ejemplo::**
---------
>>> keliminar 200
Esto debería eliminar los primeros 200 mensajes del canal en
donde se haya activado el comando.
**::Subcomandos::**
---------
* Con "keliminar me <n de mensajes>" eliminaras mis mensajes.
* Con "keliminar a <miembro> <n de mensajes>" eliminaras los mensajes de
esas odiosas ratas llamadas miembros.
"""
if ctx.invoked_subcommand is None:
await ctx.channel.purge(ctx.channel, limit=limit)
await ctx.send('Fueron eliminados {} mensaje(s)'.format(len(deleted)))
@eliminar.command(name='me', description='Borra mis mensajes')
async def me(self, ctx, limit: int):
'''Con esto borraras mis odiosos mensajes.
Misma sintaxis que eliminar.'''
def is_me(m):
return m.author.id == ctx.bot.user.id
await ctx.channel.purge(ctx.channel, limit=limit, check=is_me)
await ctx.send('Fueron eliminados {} mensaje(s)'.format(len(deleted)))
@eliminar.command(name='a', description='Borra los mensajes de un miembro')
async def a(self, ctx, member: discord.Member, limit: int):
'''Con esto borraras los mensajes de aquellos hijos del averno que nunca
debieron pisar un pie en la tierra.
Misma sintaxis que eliminar, sólo que agregando una mención miembro al
que le deseas borrar los mensajes.'''
def is_member(m):
return m.author.id == member.id
await ctx.channel.purge(ctx.channel, limit=limit, check=is_member)
await ctx.send('Fueron eliminados {} mensaje(s)'.format(len(deleted)))
@commands.command(hidden=True, aliases=['patear'],
description="Kickea a tus enemigos-nya (MOD)")
@commands.has_any_role(['AT Mod', 'AT Admin'])
async def kick(self, ctx, member: discord.Member = None, *, reason):
"""
(COMANDO DE MODERACIÓN)
**::Sintaxis::**
---------
kkick <mención al usuario> <razón>
**::Ejemplo::**
---------
>>> kkick <@kiwi> Por ser mala
Esto debería enviarme un Mensaje Privado con la razón del kick.
"""
msg = ""
if member in ctx.message.guild.members:
if reason:
await member.kick(reason=reason)
re = '{} ha sido kickeado del {} debido a: {}'
await self._recording(re.format(member.name, ctx.message.guild.name, reason))
else:
await member.kick()
re = '{} ha sido kickeado del {} debido a: {}'
await self._recording(re.format(member.name, ctx.message.guild.name))
else:
msg += "Jeje {0.message.author.mention}-tan,"
msg += "{1.name.mention} no es miembro de este servidor, tal vez ni existe ^^"
msg = msg.format(ctx, member)
await ctx.send(msg)
@commands.command(hidden=True, description="DESTROZA a tus enemigos-nya (MOD)")
@commands.has_any_role(['AT Mod', 'AT Admin'])
async def ban(self, ctx, member: discord.Member, delete_message_days = 0, *, reason):
"""
(COMANDO DE MODERACIÓN)
**::Sintaxis::**
---------
kban <mención al usuario> <mensajes a eliminar> <razón>
<mensajes a eliminar> es igual al número de días desde cuando se quiere borrar los mensajes
del usuario a banear.
**::Ejemplo::**
---------
>>> kban <@kiwi> 2 Por ser mala
Borrará los mensajes desde hace 2 días al presente y me enviará un MP con la razón del baneo.
"""
msg = ""
if member in ctx.message.guild.members:
if delete_message_days != 0:
if reason:
await member.ban(delete_message_days = int(delete_message_days), reason=reason)
re = '{} ha sido kickeado del {} por {}'
await self._recording(re.format(member.name, ctx.message.guild.name, reason))
else:
await member.ban(delete_message_days = int(delete_message_days))
re = '{} ha sido banneado del {}, las razones no fueron especificadas'
await self._recording(re.format(member.name, ctx.message.guild.name))
else:
msg += "{0.message.author.mention}-tan, ¿quieres borrar de la existencia a {1.name.mention}-nya?"
msg += "Si es así, dígame de 0 a 7 desde cuantos días atrás empiezo a borrar sus mensajes."
msg = msg.format(ctx, member)
await ctx.send(msg)
else:
msg += "Jeje {0.message.author.mention}-tan,"
msg += "{1.name.mention} no es miembro de este servidor, tal vez ni existe ^^"
msg = msg.format(ctx, member)
await ctx.send(msg)
@commands.command(hidden=True, description="Renombra a tus sirvientes o3o")
@commands.has_any_role(['AT Mod', 'AT Admin'])
async def renombrar(self, member: discord.Member, *, new_name):
old_name = member.name
log_msg = "{0.message.author} a renombrado a {1} como {2}"
await self._recording(log_msg.format(member, old_name, new_name))
await self.bot.change_nickname(member, new_name)
@commands.command(hidden=True, aliases=['darr'],
description='Dale roles a tus compañeros')
@commands.has_any_role(['AT Mod', 'AT Admin'])
async def darRol(self, ctx, member: discord.Member=None, *, rankName: str):
'''¡Con este comando puedes darle roles a los miembros!
Esto sin mover mucho las manos ^.^
**::Sintaxis::**
-----------
kdarRol <mención al usuario> <rango a dar>
**::Ejemplo::**
---------
>>> kdarRol <@kiwi> Kiwi-sempai
¡Esto me dará el rango Kiwi-sempai si tu servidor lo tiene! OuO
'''
rank = discord.utils.get(ctx.guild.roles, name=rankName)
if member is not None:
await member.add_roles(rank)
await ctx.send('¡**{member.mention}**! Has obtenido el rango **{rank.name}**')
else:
await ctx.send('¿Eh? No encuentro a ese miembro')
@commands.command(hidden=True, aliases=['rmrole'],
description='Quitale los roles a quienes no se los merecen')
@commands.has_any_role(['AT Mod', 'AT Admin'])
async def quitarRol(self, ctx, member: discord.Member=None, *, rankName: str):
'''Con este comando podrás desterrar de sus escaños a los miembros con roles
prestigiosos! Igual que con darRol, no moverás mucho las manos ^.^
**::Sintaxis::**
-----------
kquitarRol <mención al usuario> <rango a quitar>
**::Ejemplo::**
---------
>>> kquitarRol <@kiwi> Kiwi-sempai
Ya no seré tu sempai T.T
'''
rank = discord.utils.get(ctx.guild.roles, name=rankName)
if member is not None:
await member.remove_roles(rank)
await ctx.send('¡**{member.mention}**! Ya no tienes el rango **{rank.name}**')
else:
await ctx.send('¿Eh? No encuentro a ese miembro')
def setup(bot):
bot.add_cog(Mod(bot))
| [
"discord.ext.commands.group",
"discord.utils.get",
"discord.ext.commands.has_any_role",
"discord.ext.commands.command"
] | [((1823, 1926), 'discord.ext.commands.group', 'commands.group', ([], {'hidden': '(True)', 'aliases': "['purgame']", 'description': '"""Elimina mis odiosos mensajes (MOD)"""'}), "(hidden=True, aliases=['purgame'], description=\n 'Elimina mis odiosos mensajes (MOD)')\n", (1837, 1926), False, 'from discord.ext import commands\n'), ((1932, 1977), 'discord.ext.commands.has_any_role', 'commands.has_any_role', (["['AT Mod', 'AT Admin']"], {}), "(['AT Mod', 'AT Admin'])\n", (1953, 1977), False, 'from discord.ext import commands\n'), ((3788, 3889), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)', 'aliases': "['patear']", 'description': '"""Kickea a tus enemigos-nya (MOD)"""'}), "(hidden=True, aliases=['patear'], description=\n 'Kickea a tus enemigos-nya (MOD)')\n", (3804, 3889), False, 'from discord.ext import commands\n'), ((3897, 3942), 'discord.ext.commands.has_any_role', 'commands.has_any_role', (["['AT Mod', 'AT Admin']"], {}), "(['AT Mod', 'AT Admin'])\n", (3918, 3942), False, 'from discord.ext import commands\n'), ((4875, 4953), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)', 'description': '"""DESTROZA a tus enemigos-nya (MOD)"""'}), "(hidden=True, description='DESTROZA a tus enemigos-nya (MOD)')\n", (4891, 4953), False, 'from discord.ext import commands\n'), ((4957, 5002), 'discord.ext.commands.has_any_role', 'commands.has_any_role', (["['AT Mod', 'AT Admin']"], {}), "(['AT Mod', 'AT Admin'])\n", (4978, 5002), False, 'from discord.ext import commands\n'), ((6546, 6620), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)', 'description': '"""Renombra a tus sirvientes o3o"""'}), "(hidden=True, description='Renombra a tus sirvientes o3o')\n", (6562, 6620), False, 'from discord.ext import commands\n'), ((6624, 6669), 'discord.ext.commands.has_any_role', 'commands.has_any_role', (["['AT Mod', 'AT Admin']"], {}), "(['AT Mod', 'AT Admin'])\n", (6645, 6669), False, 'from discord.ext import commands\n'), ((6953, 7048), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)', 'aliases': "['darr']", 'description': '"""Dale roles a tus compañeros"""'}), "(hidden=True, aliases=['darr'], description=\n 'Dale roles a tus compañeros')\n", (6969, 7048), False, 'from discord.ext import commands\n'), ((7056, 7101), 'discord.ext.commands.has_any_role', 'commands.has_any_role', (["['AT Mod', 'AT Admin']"], {}), "(['AT Mod', 'AT Admin'])\n", (7077, 7101), False, 'from discord.ext import commands\n'), ((7780, 7895), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)', 'aliases': "['rmrole']", 'description': '"""Quitale los roles a quienes no se los merecen"""'}), "(hidden=True, aliases=['rmrole'], description=\n 'Quitale los roles a quienes no se los merecen')\n", (7796, 7895), False, 'from discord.ext import commands\n'), ((7903, 7948), 'discord.ext.commands.has_any_role', 'commands.has_any_role', (["['AT Mod', 'AT Admin']"], {}), "(['AT Mod', 'AT Admin'])\n", (7924, 7948), False, 'from discord.ext import commands\n'), ((7520, 7569), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.roles'], {'name': 'rankName'}), '(ctx.guild.roles, name=rankName)\n', (7537, 7569), False, 'import discord\n'), ((8386, 8435), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.roles'], {'name': 'rankName'}), '(ctx.guild.roles, name=rankName)\n', (8403, 8435), False, 'import discord\n')] |
#!/usr/bin/env python3
import argparse
import gc
import numpy as np
import os
import pandas as pd
import pysam
# Number of SVs to process before resetting pysam (close and re-open file). Avoids a memory leak in pysam.
PYSAM_RESET_INTERVAL = 1000
def get_read_depth(df_subset, bam_file_name, mapq, ref_filename=None):
"""
Get read depths over one or more breakpoints.
:param df_subset: Subset dataframe with a column for contigs (first column) and one or more columns for the
location of breakpoints to quantify.
:param bam_file_name: Name of alignment file to query.
:param mapq: Minimum mapping quality.
:return: A Series with with one element for each row of `df_subset` containing the average of read depths over
the breakpoints for each variant.
"""
# Init pysam query count (for memory leak prevention)
pysam_count = 0
bam_file = pysam.AlignmentFile(bam_file_name, 'r', reference_filename=ref_filename)
# Init dataframe
df_subset = df_subset.copy()
n_loc_cols = df_subset.shape[1] - 1 # Number of location columns; depth is averaged for each
df_subset.columns = ['CONTIG'] + ['LOC_{}'.format(col) for col in range(n_loc_cols)]
# Init count
df_subset['N'] = np.zeros(df_subset.shape[0], np.float64)
n_index = df_subset.shape[1] - 1
# Count
for subset_index in range(n_loc_cols):
# Use numeric index, skip chromosome column
subset_index += 1
for row_index in range(df_subset.shape[0]):
n_reads = 0
# Get position
contig = df_subset.iloc[row_index, 0]
pos = df_subset.iloc[row_index, subset_index]
# Reset pysam periodically (avoids memory leak)
pysam_count += 1
if pysam_count >= PYSAM_RESET_INTERVAL:
if bam_file is not None:
bam_file.close()
gc.collect()
bam_file = pysam.AlignmentFile(bam_file_name, 'r', reference_filename=ref_filename)
pysam_count = 0
# Count
for segment in bam_file.fetch(str(contig), pos, pos + 1):
if segment.mapping_quality >= mapq and segment.is_proper_pair:
n_reads += 1
df_subset.iloc[row_index, n_index] += n_reads
# Return mean of depths (divide by the number of locations)
return df_subset['N'] / n_loc_cols
def get_ref_contig_sizes(altref_file):
"""
Get a Series of contigs lengths. Includes primary and alt contigs.
:param altref_file: BED file of contig information where each record spans the whole contig. Must contain
columns "#CHROM" and "END".
:return: Series of contig lengths indexed by the contig name.
"""
# Get reference chromosome sizes
ref_len_series = pd.read_table(altref_file, header=0)
ref_len_series.index = ref_len_series['#CHROM']
ref_len_series = ref_len_series['END']
return ref_len_series
def annotate_variant_info(variant_table, ref_len_series, flank):
"""
Annotate variant info with locations reads will be extracted from.
:param variant_table: Variant info table.
:param ref_len_series: Series of contig sizes.
:param flank: Number of bases from variant breakpoints.
:return: `variant_table` with additional fields.
"""
# Annotate variant info with flank locations
variant_table['FLANK_L_REF'] = variant_table['POS'] - flank
variant_table['FLANK_L_REF'] = variant_table['FLANK_L_REF'].apply(lambda pos: pos if pos > 0 else 0)
variant_table['FLANK_R_REF'] = variant_table['END'] + flank
variant_table['FLANK_R_REF'] = variant_table.apply(lambda row: min(row['FLANK_R_REF'], ref_len_series[row['#CHROM']]), axis=1)
variant_table['FLANK_L_CTG'] = variant_table['CONTIG_START'] - flank
variant_table['FLANK_L_CTG'] = variant_table['FLANK_L_CTG'].apply(lambda pos: pos if pos > 0 else 0)
variant_table['FLANK_R_CTG'] = variant_table['CONTIG_END'] + flank
variant_table['FLANK_R_CTG'] = variant_table.apply(lambda row: min(row['FLANK_R_CTG'], ref_len_series[row['CONTIG']]), axis=1)
# Annotate with the midpoint of the variant sequence
variant_table['VAR_CONTIG'] = variant_table.apply(lambda row: row['#CHROM'] if row['SVTYPE'] == 'DEL' else row['CONTIG'], axis=1)
variant_table['VAR_MIDPOINT'] = variant_table.apply(
lambda row:
(row['POS'] + row['END']) / 2 if row['SVTYPE'] == 'DEL' else (row['CONTIG_START'] + row['CONTIG_END']) / 2,
axis=1)
variant_table['VAR_MIDPOINT'] = variant_table['VAR_MIDPOINT'].astype(np.int64)
return variant_table
# Main
if __name__ == '__main__':
# Get arguments
arg_parser = argparse.ArgumentParser(description='Get insert size deltas on the reference over the SV breakpoints.')
arg_parser.add_argument('bam', help='BAM file of short read alignments.')
arg_parser.add_argument('bed', help='SV info BED file with columns "#CHROM", "POS", "END", "SVTYPE", "CONTIG", '
'"CONTIG_START", and "CONTIG_END", including a header line.')
arg_parser.add_argument('alt_info', help='BED file of contigs in the reference.')
arg_parser.add_argument('out', help='Output file.')
arg_parser.add_argument('--out_stats',
help='Output depth distribution statistics.')
arg_parser.add_argument('--mapq', type=int, default=20,
help='Minimum mapping quality of aligned reads.')
arg_parser.add_argument('--flank', type=int, default=100,
help='Number of reference bases on each side of the SV for flanking regions.')
arg_parser.add_argument('--ref', nargs='?',
default=None, help='Reference for records are aligned against.')
args = arg_parser.parse_args()
# Check arguments
if not os.path.isfile(args.bam):
raise RuntimeError('Input BAM file does not exist or is not a regular file: {}'.format(args.bam))
if args.mapq < 0:
raise RuntimeError('Mapping quality is negative: {}'.format(args.mapq))
if args.flank < 0:
raise RuntimeError('Flank is negative: {}'.format(args.flank))
args.out = args.out.strip()
if not args.out:
raise RuntimeError('Output file name is empty.')
# Get variant info
df_bed = pd.read_table(args.bed, header=0)
# Get reference chromosome sizes
ref_len = get_ref_contig_sizes(args.alt_info)
# Annotate variant info with locations reads are extracted from
df_bed = annotate_variant_info(df_bed, ref_len, args.flank)
# Count reads over variant midpoint
df_bed['DP_N_VAR'] =\
get_read_depth(df_bed.loc[:, ['VAR_CONTIG', 'VAR_MIDPOINT']], args.bam, args.mapq, ref_filename=args.ref)
# Count reads over reference flank
df_bed['DP_N_PROX_REF'] =\
get_read_depth(df_bed.loc[:, ['#CHROM', 'FLANK_L_REF', 'FLANK_R_REF']], args.bam, args.mapq, ref_filename=args.ref)
# Count reads over contig flank
df_bed['DP_N_PROX_CTG'] =\
get_read_depth(df_bed.loc[:, ['CONTIG', 'FLANK_L_CTG', 'FLANK_R_CTG']], args.bam, args.mapq, ref_filename=args.ref)
# Get global stats
ref_mean = np.mean(df_bed['DP_N_PROX_REF'])
ref_sd = np.std(df_bed['DP_N_PROX_REF'])
if ref_mean == 0:
raise RuntimeError('Cannot compute global depth stats: Global mean of proximal reference breakpoint depths is 0')
# Combine total depths
df_bed['DP_N_VAR_PROX_REF'] = df_bed['DP_N_VAR'] + df_bed['DP_N_PROX_REF']
df_bed['DP_N_VAR_PROX_CTG'] = df_bed['DP_N_VAR'] + df_bed['DP_N_PROX_CTG']
# Set relative ratios
df_bed['DP_VAR_REF'] = df_bed.apply(
lambda row: row['DP_N_VAR'] / row['DP_N_VAR_PROX_REF'] if row['DP_N_VAR_PROX_REF'] > 0 else 0,
axis=1
)
df_bed['DP_VAR_CTG'] = df_bed.apply(
lambda row: row['DP_N_VAR'] / row['DP_N_VAR_PROX_CTG'] if row['DP_N_VAR_PROX_CTG'] > 0 else 0,
axis=1
)
df_bed['DP_VAR_GLOBAL'] = df_bed['DP_N_VAR'] / ref_mean
# Write
df_features = df_bed.loc[
:, ('INDEX', 'DP_VAR_REF', 'DP_VAR_CTG', 'DP_VAR_GLOBAL', 'DP_N_VAR', 'DP_N_PROX_REF', 'DP_N_PROX_CTG')
]
df_features.to_csv(
args.out, sep='\t', index=False
)
# Write stats
if args.out_stats:
with open(args.out_stats, 'w') as stats_out:
stats_out.write('ref_mean\t{:.6f}\n'.format(ref_mean))
stats_out.write('ref_sd\t{:.6f}\n'.format(ref_sd))
| [
"numpy.mean",
"argparse.ArgumentParser",
"pysam.AlignmentFile",
"os.path.isfile",
"numpy.zeros",
"pandas.read_table",
"numpy.std",
"gc.collect"
] | [((899, 971), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['bam_file_name', '"""r"""'], {'reference_filename': 'ref_filename'}), "(bam_file_name, 'r', reference_filename=ref_filename)\n", (918, 971), False, 'import pysam\n'), ((1255, 1295), 'numpy.zeros', 'np.zeros', (['df_subset.shape[0]', 'np.float64'], {}), '(df_subset.shape[0], np.float64)\n', (1263, 1295), True, 'import numpy as np\n'), ((2834, 2870), 'pandas.read_table', 'pd.read_table', (['altref_file'], {'header': '(0)'}), '(altref_file, header=0)\n', (2847, 2870), True, 'import pandas as pd\n'), ((4749, 4857), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Get insert size deltas on the reference over the SV breakpoints."""'}), "(description=\n 'Get insert size deltas on the reference over the SV breakpoints.')\n", (4772, 4857), False, 'import argparse\n'), ((6414, 6447), 'pandas.read_table', 'pd.read_table', (['args.bed'], {'header': '(0)'}), '(args.bed, header=0)\n', (6427, 6447), True, 'import pandas as pd\n'), ((7276, 7308), 'numpy.mean', 'np.mean', (["df_bed['DP_N_PROX_REF']"], {}), "(df_bed['DP_N_PROX_REF'])\n", (7283, 7308), True, 'import numpy as np\n'), ((7322, 7353), 'numpy.std', 'np.std', (["df_bed['DP_N_PROX_REF']"], {}), "(df_bed['DP_N_PROX_REF'])\n", (7328, 7353), True, 'import numpy as np\n'), ((5935, 5959), 'os.path.isfile', 'os.path.isfile', (['args.bam'], {}), '(args.bam)\n', (5949, 5959), False, 'import os\n'), ((1920, 1932), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1930, 1932), False, 'import gc\n'), ((1961, 2033), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['bam_file_name', '"""r"""'], {'reference_filename': 'ref_filename'}), "(bam_file_name, 'r', reference_filename=ref_filename)\n", (1980, 2033), False, 'import pysam\n')] |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# load in the image
#
pnmReader = vtk.vtkTIFFReader()
pnmReader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/beach.tif")
# "beach.tif" image contains ORIENTATION tag which is
# ORIENTATION_TOPLEFT (row 0 top, col 0 lhs) type. The TIFF
# reader parses this tag and sets the internal TIFF image
# orientation accordingly. To overwrite this orientation with a vtk
# convention of ORIENTATION_BOTLEFT (row 0 bottom, col 0 lhs ), invoke
# SetOrientationType method with parameter value of 4.
pnmReader.SetOrientationType(4)
lum = vtk.vtkImageLuminance()
lum.SetInputConnection(pnmReader.GetOutputPort())
ia = vtk.vtkImageActor()
ia.GetMapper().SetInputConnection(lum.GetOutputPort())
# Add the actors to the renderer, set the background and size
ren1.AddActor(ia)
ren1.SetBackground(0.1,0.2,0.4)
renWin.SetSize(400,400)
# render the image
renWin.Render()
# switch from greyscale input to RGB to test against an old bug
ia.GetMapper().SetInputConnection(pnmReader.GetOutputPort())
cam1 = ren1.GetActiveCamera()
cam1.Elevation(-30)
cam1.Roll(-20)
ren1.ResetCameraClippingRange()
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
| [
"vtk.util.misc.vtkGetDataRoot",
"vtk.vtkImageLuminance",
"vtk.vtkImageActor",
"vtk.vtkRenderWindowInteractor",
"vtk.vtkRenderWindow",
"vtk.vtkRenderer",
"vtk.vtkTIFFReader"
] | [((119, 135), 'vtk.util.misc.vtkGetDataRoot', 'vtkGetDataRoot', ([], {}), '()\n', (133, 135), False, 'from vtk.util.misc import vtkGetDataRoot\n'), ((196, 213), 'vtk.vtkRenderer', 'vtk.vtkRenderer', ([], {}), '()\n', (211, 213), False, 'import vtk\n'), ((223, 244), 'vtk.vtkRenderWindow', 'vtk.vtkRenderWindow', ([], {}), '()\n', (242, 244), False, 'import vtk\n'), ((277, 308), 'vtk.vtkRenderWindowInteractor', 'vtk.vtkRenderWindowInteractor', ([], {}), '()\n', (306, 308), False, 'import vtk\n'), ((372, 391), 'vtk.vtkTIFFReader', 'vtk.vtkTIFFReader', ([], {}), '()\n', (389, 391), False, 'import vtk\n'), ((867, 890), 'vtk.vtkImageLuminance', 'vtk.vtkImageLuminance', ([], {}), '()\n', (888, 890), False, 'import vtk\n'), ((946, 965), 'vtk.vtkImageActor', 'vtk.vtkImageActor', ([], {}), '()\n', (963, 965), False, 'import vtk\n')] |
#!/usr/local/anaconda/bin/python
#copyright: <NAME>, <EMAIL>
from random import random
from tkinter import *
from copy import deepcopy
class Boggle():
'''
@param dict F
@param dict T
@param int size
@param int cellWidth
@param array clone
@param list soln
@param cube ActionNow
@param bool ActionNow_correct
'''
def __init__(self, file='words.dat'):
self.readData(file)
self.size=5
self.cellWidth=30
self.newGame()
def readData(self, file):
self.F=dict()
self.T=dict()
obj=open(file)
lines=obj.readlines()
words_number=len(lines)
for word in lines:
word=word.strip()
for c in word:
if c in self.F.keys(): #计算字符分布总数
self.F[c]+=1
else:
self.F[c]=1
t=self.T #生成T字典
for i in range(4):
if word[i] not in t.keys():
t[word[i]]=dict()
t=t[word[i]]
t[word[4]]=word
for key in self.F.keys():
self.F[key]/=words_number*5
def ckSoln(self,soln): #若指定路径无效,返回False;若指定路径对应单词,返回字符串单词;否则返回剩余路径对应的字典
for index in range(len(soln)-1):
if abs(soln[index][0]-soln[index+1][0])+abs(soln[index][1]-soln[index+1][1])>1:
return False
t=self.T
for (x,y) in soln:
c=self.board[x][y]
if c not in t:
return False
t=t[c]
return t
def resetGame(self):
self.board=self.clone
self.soln=list()
self.ActionNow=None
self.ActionNow_correct=None
def newGame(self):
self.board=list()
for i in range(self.size):
t=list()
self.board.append(t)
for j in range(self.size):
t.append(self.randChoice())
#self.board=[['b','y','u','u','n'],['s','x','o','y','r'],['h','s','l','o','r'],['t','y','a','f','n'],['b','c','r','o','c']] #供测试用
self.soln=list()
self.ActionNow=None
self.ActionNow_correct=None
self.clone=self.board
#按权重返回一个字母
def randChoice(self):
z=random()
p=0
for (key,value) in self.F.items():
p+=value
if p>=z:
return key
def playTK(self):
self.initTK()
print("<Left Click>: Choose character\n<Mid Click>: New Game\n<Right Click>: Reset Game\n<Triple Click>: Show all solutions")
self.win.mainloop()
def initTK(self):
#创建窗口对象
self.win =Tk()
self.win.title('Boggle')
#创建画布
self.canvas=Canvas(self.win,width=self.size*self.cellWidth,height=self.size*self.cellWidth,bg='white')
self.canvas.pack()
self.drawCanvas()
#绑定事件
self.canvas.bind("<Button-1>",self.extend)
self.canvas.bind("<Button-2>",self.new)
self.canvas.bind("<Button-3>",self.reset)
self.canvas.bind("<Triple-Button-1>",self.getAllSolutions)
self.canvas.focus_set()
self.updateTK()
#画游戏板块
def drawCanvas(self):
self.canvas.create_rectangle(0,0,self.size*self.cellWidth,self.size*self.cellWidth,fill='white')
#画格子
for i in range(self.size):
for j in range(self.size):
self.canvas.create_rectangle(i*self.cellWidth,j*self.cellWidth,(i+1)*self.cellWidth,(j+1)*self.cellWidth)
for i in range(self.size):
for j in range(self.size):
self.canvas.create_text(j*self.cellWidth+self.cellWidth/2,
i*self.cellWidth+self.cellWidth/2,
text=self.board[i][j].upper(),
fill='black')
#更新绿圈或者红圈
def updateTK(self):
if self.ActionNow != None:
if self.ActionNow_correct:
color='green'
else:
color='red'
x=self.ActionNow[0]
y=self.ActionNow[1]
self.canvas.create_oval(y*self.cellWidth+1,
x*self.cellWidth+1,
(y+1)*self.cellWidth-1,
(x+1)*self.cellWidth-1,
fill=color)
self.canvas.create_text(y*self.cellWidth+self.cellWidth/2,
x*self.cellWidth+self.cellWidth/2,
text=self.board[x][y].upper(),
fill='black')
#选中一个格子
def extend(self,event):
row=event.y//self.cellWidth
col=event.x//self.cellWidth
if (row,col) in self.soln:
return
self.ActionNow=(row,col)
self.soln.append((row,col))
result=self.ckSoln(self.soln)
if type(result)==type(False):
self.soln.pop()
self.ActionNow_correct=False
else:
self.ActionNow_correct=True
self.updateTK()
if type(result)==type(''):
self.findASolution(result)
#新的游戏
def new(self,event):
self.newGame()
self.drawCanvas()
#重置当前游戏
def reset(self,event):
self.resetGame()
self.drawCanvas()
#输出当前游戏所有解
def getAllSolutions(self,event):
self.solve()
#当用户找到了一个解
def findASolution(self,result):
print("You find a word:",result)
self.resetGame()
self.drawCanvas()
#寻找所有解
def solve(self):
self.allSolutions=list()
for i in range(self.size):
for j in range(self.size):
self.solutionPath=list()
self.findAllSolutions((i,j))
print("All solutions:"+str(self.allSolutions)+'\n')
def findAllSolutions(self,coor):
x=coor[0]
y=coor[1]
self.solutionPath.append(coor)
result=self.ckSoln(self.solutionPath)
if type(result)==type(False):
return
if type(result)==type(''):
self.allSolutions.append(result)
return
if(x>0 and (x-1,y) not in self.solutionPath):
self.findAllSolutions((x-1,y))
self.solutionPath.pop()
if(y>0 and (x,y-1) not in self.solutionPath):
self.findAllSolutions((x,y-1))
self.solutionPath.pop()
if(y<4 and (x,y+1) not in self.solutionPath):
self.findAllSolutions((x,y+1))
self.solutionPath.pop()
if(x<4 and (x+1,y) not in self.solutionPath):
self.findAllSolutions((x+1,y))
self.solutionPath.pop()
if __name__ == "__main__":
Boggle().playTK()
| [
"random.random"
] | [((2303, 2311), 'random.random', 'random', ([], {}), '()\n', (2309, 2311), False, 'from random import random\n')] |
# Copyright 2021 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vectorized embedding pairwise distances computation functions"""
from abc import ABC, abstractmethod
from typing import Union, List
import tensorflow as tf
from .types import FloatTensor
class Distance(ABC):
"""
Note: don't forget to add your distance to the DISTANCES list
and add alias names in it.
"""
def __init__(self, name: str, aliases: List[str] = []):
self.name = name
self.aliases = aliases
@abstractmethod
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
def __call__(self, embeddings: FloatTensor):
return self.call(embeddings)
def __str__(self) -> str:
return self.name
def get_config(self):
return {}
@tf.keras.utils.register_keras_serializable(package="Similarity")
class InnerProductSimilarity(Distance):
"""Compute the pairwise inner product between embeddings.
The [Inner product](https://en.wikipedia.org/wiki/Inner_product_space) is
a measure of similarity where the more similar vectors have the largest
values.
NOTE! This is not a distance and is likely not what you want to use with
the built in losses. At the very least this will flip the sign on the
margin in many of the losses. This is likely meant to be used with custom
loss functions that expect a similarity instead of a distance.
"""
def __init__(self):
"Init Inner product similarity"
super().__init__('inner_product', ['ip'])
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise similarities for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
sims: FloatTensor = tf.linalg.matmul(embeddings, embeddings, transpose_b=True)
return sims
@tf.keras.utils.register_keras_serializable(package="Similarity")
class CosineDistance(Distance):
"""Compute pairwise cosine distances between embeddings.
The [Cosine Distance](https://en.wikipedia.org/wiki/Cosine_similarity) is
an angular distance that varies from 0 (similar) to 1 (dissimilar).
"""
def __init__(self):
"Init Cosine distance"
super().__init__('cosine')
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one. The embeddings
are expected to be normalized.
Returns:
FloatTensor: Pairwise distance tensor.
"""
distances = 1 - tf.linalg.matmul(
embeddings, embeddings, transpose_b=True)
min_clip_distances: FloatTensor = tf.math.maximum(distances, 0.0)
return min_clip_distances
@tf.keras.utils.register_keras_serializable(package="Similarity")
class EuclideanDistance(Distance):
"""Compute pairwise euclidean distances between embeddings.
The [Euclidean Distance](https://en.wikipedia.org/wiki/Euclidean_distance)
is the standard distance to measure the line segment between two embeddings
in the Cartesian point. The larger the distance the more dissimilar
the embeddings are.
**Alias**: L2 Norm, Pythagorean
"""
def __init__(self):
"Init Euclidean distance"
super().__init__('euclidean', ['l2', 'pythagorean'])
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
squared_norm = tf.math.square(embeddings)
squared_norm = tf.math.reduce_sum(squared_norm, axis=1, keepdims=True)
distances: FloatTensor = 2.0 * tf.linalg.matmul(
embeddings, embeddings, transpose_b=True)
distances = squared_norm - distances + tf.transpose(squared_norm)
# Avoid NaN and inf gradients when back propagating through the sqrt.
# values smaller than 1e-18 produce inf for the gradient, and 0.0
# produces NaN. All values smaller than 1e-13 should produce a gradient
# of 1.0.
dist_mask = tf.math.greater_equal(distances, 1e-18)
distances = tf.math.maximum(distances, 1e-18)
distances = tf.math.sqrt(distances) * tf.cast(dist_mask, tf.float32)
return distances
@tf.keras.utils.register_keras_serializable(package="Similarity")
class ModifiedEuclideanDistance(Distance):
"""Compute pairwise euclidean distances between embeddings.
Assumes last element of embedding vector is output of adjustment network.
The [Euclidean Distance](https://en.wikipedia.org/wiki/Euclidean_distance)
is the standard distance to measure the line segment between two embeddings
in the Cartesian point. The larger the distance the more dissimilar
the embeddings are.
"""
def __init__(self):
"Init Modified Euclidean distance"
super().__init__('modified_euclidean', ['modl2', 'modeuclidean'])
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
squared_norm = tf.math.square(embeddings)
squared_norm = tf.math.reduce_sum(squared_norm, axis=1, keepdims=True)
distances: FloatTensor = 2.0 * tf.linalg.matmul(
embeddings[:,:-1], embeddings[:,:-1], transpose_b=True)
distances = squared_norm - distances + tf.transpose(squared_norm)
# Avoid NaN and inf gradients when back propagating through the sqrt.
# values smaller than 1e-18 produce inf for the gradient, and 0.0
# produces NaN. All values smaller than 1e-13 should produce a gradient
# of 1.0.
dist_mask = tf.math.greater_equal(distances, 1e-18)
distances = tf.math.maximum(distances, 1e-18)
distances = tf.math.sqrt(distances) * tf.cast(dist_mask, tf.float32)
return distances
@tf.keras.utils.register_keras_serializable(package="Similarity")
class SquaredEuclideanDistance(Distance):
"""Compute pairwise squared Euclidean distance.
The [Squared Euclidean Distance](https://en.wikipedia.org/wiki/Euclidean_distance#Squared_Euclidean_distance) is
a distance that varies from 0 (similar) to infinity (dissimilar).
"""
def __init__(self):
super().__init__('squared_euclidean', ['sql2', 'sqeuclidean'])
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
squared_norm = tf.math.square(embeddings)
squared_norm = tf.math.reduce_sum(squared_norm, axis=1, keepdims=True)
distances: FloatTensor = 2.0 * tf.linalg.matmul(
embeddings, embeddings, transpose_b=True)
distances = squared_norm - distances + tf.transpose(squared_norm)
distances = tf.math.maximum(distances, 0.0)
return distances
@tf.keras.utils.register_keras_serializable(package="Similarity")
class ManhattanDistance(Distance):
"""Compute pairwise Manhattan distances between embeddings.
The [Manhattan Distance](https://en.wikipedia.org/wiki/Euclidean_distance)
is the sum of the lengths of the projections of the line segment between
two embeddings onto the Cartesian axes. The larger the distance the more
dissimilar the embeddings are.
"""
def __init__(self):
"Init Manhattan distance"
super().__init__('manhattan', ['l1', 'taxicab'])
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
x_rs = tf.reshape(embeddings, shape=[tf.shape(embeddings)[0], -1])
deltas = tf.expand_dims(x_rs, axis=1) - tf.expand_dims(x_rs, axis=0)
distances: FloatTensor = tf.norm(deltas, 1, axis=2)
return distances
@tf.keras.utils.register_keras_serializable(package="Similarity")
class SNRDistance(Distance):
"""
Computes pairwise SNR distances between embeddings.
The [Signal-to-Noise Ratio distance](https://arxiv.org/abs/1904.02616)
is the ratio of noise variance to the feature variance.
"""
def __init__(self):
"Init SNR distance"
super().__init__('snr')
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise snr distances for a given batch of embeddings.
SNR(i, j): anchor i and compared feature j
SNR(i,j) may not be equal to SNR(j, i)
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
# Calculating feature variance for each example
embed_mean = tf.math.reduce_mean(embeddings, axis=1)
embed_square = tf.math.square(embeddings)
embed_sq_mean = tf.math.reduce_mean(embed_square, axis=1)
anchor_var = embed_sq_mean - tf.square(embed_mean)
# Calculating pairwise noise variances
x_rs = tf.reshape(embeddings, shape=[tf.shape(embeddings)[0], -1])
delta = tf.expand_dims(x_rs, axis=1) - tf.expand_dims(x_rs, axis=0)
delta_mean = tf.math.reduce_mean(delta, axis=2)
delta_sq = tf.math.square(delta)
delta_sq_mean = tf.math.reduce_mean(delta_sq, axis=2)
noise_var = delta_sq_mean - tf.square(delta_mean)
distances: FloatTensor = tf.divide(noise_var,
tf.expand_dims(anchor_var, axis=1))
return distances
# List of implemented distances
DISTANCES = [
InnerProductSimilarity(),
EuclideanDistance(),
ModifiedEuclideanDistance(),
SquaredEuclideanDistance(),
ManhattanDistance(),
CosineDistance(),
SNRDistance()
]
def distance_canonicalizer(user_distance: Union[Distance, str]) -> Distance:
"""Normalize user requested distance to its matching Distance object.
Args:
user_distance: Requested distance either by name or by object
Returns:
Distance: Requested object name.
"""
# just return Distance object
if isinstance(user_distance, Distance):
# user supplied distance function
return user_distance
mapping = {}
name2fn = {}
for distance in DISTANCES:
# self reference
mapping[distance.name] = distance.name
name2fn[distance.name] = distance
# aliasing
for alias in distance.aliases:
mapping[alias] = distance.name
if isinstance(user_distance, str):
user_distance = user_distance.lower().strip()
if user_distance in mapping:
user_distance = mapping[user_distance]
else:
raise ValueError('Metric not supported by the framework')
return name2fn[user_distance]
raise ValueError('Unknown distance: must either be a MetricDistance\
or a known distance function')
| [
"tensorflow.expand_dims",
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.math.sqrt",
"tensorflow.math.square",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.math.maximum",
"tensorflow.math.reduce_mean",
"tensorflow.math.greater_equal",
"tensorflow.math.reduce_sum",
"... | [((1521, 1585), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""Similarity"""'}), "(package='Similarity')\n", (1563, 1585), True, 'import tensorflow as tf\n'), ((2697, 2761), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""Similarity"""'}), "(package='Similarity')\n", (2739, 2761), True, 'import tensorflow as tf\n'), ((3682, 3746), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""Similarity"""'}), "(package='Similarity')\n", (3724, 3746), True, 'import tensorflow as tf\n'), ((5361, 5425), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""Similarity"""'}), "(package='Similarity')\n", (5403, 5425), True, 'import tensorflow as tf\n'), ((7125, 7189), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""Similarity"""'}), "(package='Similarity')\n", (7167, 7189), True, 'import tensorflow as tf\n'), ((8280, 8344), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""Similarity"""'}), "(package='Similarity')\n", (8322, 8344), True, 'import tensorflow as tf\n'), ((9385, 9449), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""Similarity"""'}), "(package='Similarity')\n", (9427, 9449), True, 'import tensorflow as tf\n'), ((2615, 2673), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['embeddings', 'embeddings'], {'transpose_b': '(True)'}), '(embeddings, embeddings, transpose_b=True)\n', (2631, 2673), True, 'import tensorflow as tf\n'), ((3613, 3644), 'tensorflow.math.maximum', 'tf.math.maximum', (['distances', '(0.0)'], {}), '(distances, 0.0)\n', (3628, 3644), True, 'import tensorflow as tf\n'), ((4598, 4624), 'tensorflow.math.square', 'tf.math.square', (['embeddings'], {}), '(embeddings)\n', (4612, 4624), True, 'import tensorflow as tf\n'), ((4648, 4703), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['squared_norm'], {'axis': '(1)', 'keepdims': '(True)'}), '(squared_norm, axis=1, keepdims=True)\n', (4666, 4703), True, 'import tensorflow as tf\n'), ((5161, 5200), 'tensorflow.math.greater_equal', 'tf.math.greater_equal', (['distances', '(1e-18)'], {}), '(distances, 1e-18)\n', (5182, 5200), True, 'import tensorflow as tf\n'), ((5221, 5254), 'tensorflow.math.maximum', 'tf.math.maximum', (['distances', '(1e-18)'], {}), '(distances, 1e-18)\n', (5236, 5254), True, 'import tensorflow as tf\n'), ((6348, 6374), 'tensorflow.math.square', 'tf.math.square', (['embeddings'], {}), '(embeddings)\n', (6362, 6374), True, 'import tensorflow as tf\n'), ((6398, 6453), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['squared_norm'], {'axis': '(1)', 'keepdims': '(True)'}), '(squared_norm, axis=1, keepdims=True)\n', (6416, 6453), True, 'import tensorflow as tf\n'), ((6925, 6964), 'tensorflow.math.greater_equal', 'tf.math.greater_equal', (['distances', '(1e-18)'], {}), '(distances, 1e-18)\n', (6946, 6964), True, 'import tensorflow as tf\n'), ((6985, 7018), 'tensorflow.math.maximum', 'tf.math.maximum', (['distances', '(1e-18)'], {}), '(distances, 1e-18)\n', (7000, 7018), True, 'import tensorflow as tf\n'), ((7907, 7933), 'tensorflow.math.square', 'tf.math.square', (['embeddings'], {}), '(embeddings)\n', (7921, 7933), True, 'import tensorflow as tf\n'), ((7957, 8012), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['squared_norm'], {'axis': '(1)', 'keepdims': '(True)'}), '(squared_norm, axis=1, keepdims=True)\n', (7975, 8012), True, 'import tensorflow as tf\n'), ((8219, 8250), 'tensorflow.math.maximum', 'tf.math.maximum', (['distances', '(0.0)'], {}), '(distances, 0.0)\n', (8234, 8250), True, 'import tensorflow as tf\n'), ((9330, 9356), 'tensorflow.norm', 'tf.norm', (['deltas', '(1)'], {'axis': '(2)'}), '(deltas, 1, axis=2)\n', (9337, 9356), True, 'import tensorflow as tf\n'), ((10259, 10298), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['embeddings'], {'axis': '(1)'}), '(embeddings, axis=1)\n', (10278, 10298), True, 'import tensorflow as tf\n'), ((10322, 10348), 'tensorflow.math.square', 'tf.math.square', (['embeddings'], {}), '(embeddings)\n', (10336, 10348), True, 'import tensorflow as tf\n'), ((10373, 10414), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['embed_square'], {'axis': '(1)'}), '(embed_square, axis=1)\n', (10392, 10414), True, 'import tensorflow as tf\n'), ((10694, 10728), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['delta'], {'axis': '(2)'}), '(delta, axis=2)\n', (10713, 10728), True, 'import tensorflow as tf\n'), ((10748, 10769), 'tensorflow.math.square', 'tf.math.square', (['delta'], {}), '(delta)\n', (10762, 10769), True, 'import tensorflow as tf\n'), ((10794, 10831), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['delta_sq'], {'axis': '(2)'}), '(delta_sq, axis=2)\n', (10813, 10831), True, 'import tensorflow as tf\n'), ((3495, 3553), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['embeddings', 'embeddings'], {'transpose_b': '(True)'}), '(embeddings, embeddings, transpose_b=True)\n', (3511, 3553), True, 'import tensorflow as tf\n'), ((4744, 4802), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['embeddings', 'embeddings'], {'transpose_b': '(True)'}), '(embeddings, embeddings, transpose_b=True)\n', (4760, 4802), True, 'import tensorflow as tf\n'), ((4863, 4889), 'tensorflow.transpose', 'tf.transpose', (['squared_norm'], {}), '(squared_norm)\n', (4875, 4889), True, 'import tensorflow as tf\n'), ((5275, 5298), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['distances'], {}), '(distances)\n', (5287, 5298), True, 'import tensorflow as tf\n'), ((5301, 5331), 'tensorflow.cast', 'tf.cast', (['dist_mask', 'tf.float32'], {}), '(dist_mask, tf.float32)\n', (5308, 5331), True, 'import tensorflow as tf\n'), ((6494, 6568), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['embeddings[:, :-1]', 'embeddings[:, :-1]'], {'transpose_b': '(True)'}), '(embeddings[:, :-1], embeddings[:, :-1], transpose_b=True)\n', (6510, 6568), True, 'import tensorflow as tf\n'), ((6627, 6653), 'tensorflow.transpose', 'tf.transpose', (['squared_norm'], {}), '(squared_norm)\n', (6639, 6653), True, 'import tensorflow as tf\n'), ((7039, 7062), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['distances'], {}), '(distances)\n', (7051, 7062), True, 'import tensorflow as tf\n'), ((7065, 7095), 'tensorflow.cast', 'tf.cast', (['dist_mask', 'tf.float32'], {}), '(dist_mask, tf.float32)\n', (7072, 7095), True, 'import tensorflow as tf\n'), ((8053, 8111), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['embeddings', 'embeddings'], {'transpose_b': '(True)'}), '(embeddings, embeddings, transpose_b=True)\n', (8069, 8111), True, 'import tensorflow as tf\n'), ((8172, 8198), 'tensorflow.transpose', 'tf.transpose', (['squared_norm'], {}), '(squared_norm)\n', (8184, 8198), True, 'import tensorflow as tf\n'), ((9237, 9265), 'tensorflow.expand_dims', 'tf.expand_dims', (['x_rs'], {'axis': '(1)'}), '(x_rs, axis=1)\n', (9251, 9265), True, 'import tensorflow as tf\n'), ((9268, 9296), 'tensorflow.expand_dims', 'tf.expand_dims', (['x_rs'], {'axis': '(0)'}), '(x_rs, axis=0)\n', (9282, 9296), True, 'import tensorflow as tf\n'), ((10452, 10473), 'tensorflow.square', 'tf.square', (['embed_mean'], {}), '(embed_mean)\n', (10461, 10473), True, 'import tensorflow as tf\n'), ((10613, 10641), 'tensorflow.expand_dims', 'tf.expand_dims', (['x_rs'], {'axis': '(1)'}), '(x_rs, axis=1)\n', (10627, 10641), True, 'import tensorflow as tf\n'), ((10644, 10672), 'tensorflow.expand_dims', 'tf.expand_dims', (['x_rs'], {'axis': '(0)'}), '(x_rs, axis=0)\n', (10658, 10672), True, 'import tensorflow as tf\n'), ((10868, 10889), 'tensorflow.square', 'tf.square', (['delta_mean'], {}), '(delta_mean)\n', (10877, 10889), True, 'import tensorflow as tf\n'), ((10988, 11022), 'tensorflow.expand_dims', 'tf.expand_dims', (['anchor_var'], {'axis': '(1)'}), '(anchor_var, axis=1)\n', (11002, 11022), True, 'import tensorflow as tf\n'), ((9190, 9210), 'tensorflow.shape', 'tf.shape', (['embeddings'], {}), '(embeddings)\n', (9198, 9210), True, 'import tensorflow as tf\n'), ((10567, 10587), 'tensorflow.shape', 'tf.shape', (['embeddings'], {}), '(embeddings)\n', (10575, 10587), True, 'import tensorflow as tf\n')] |
import hashlib
import hmac
from satang_pro_signer import preparer
class Signer:
def __init__(self, secret: bytes):
self.secret = secret
def sign(self, obj) -> bytes:
parsed = preparer.Preparer(obj).encode()
msg = bytes(parsed, encoding='utf-8')
try:
# better performance
return hmac.digest(self.secret, msg, 'sha512')
except AttributeError:
# compatible with Python 3.6
m = hmac.new(self.secret, msg, hashlib.sha512)
return m.digest() | [
"satang_pro_signer.preparer.Preparer",
"hmac.digest",
"hmac.new"
] | [((347, 386), 'hmac.digest', 'hmac.digest', (['self.secret', 'msg', '"""sha512"""'], {}), "(self.secret, msg, 'sha512')\n", (358, 386), False, 'import hmac\n'), ((203, 225), 'satang_pro_signer.preparer.Preparer', 'preparer.Preparer', (['obj'], {}), '(obj)\n', (220, 225), False, 'from satang_pro_signer import preparer\n'), ((475, 517), 'hmac.new', 'hmac.new', (['self.secret', 'msg', 'hashlib.sha512'], {}), '(self.secret, msg, hashlib.sha512)\n', (483, 517), False, 'import hmac\n')] |
# pylint:disable=missing-module-docstring,missing-class-docstring,missing-function-docstring
from django import forms
#-
from .base import compare_template, SimpleTestCase
class DummyForm(forms.Form):
choice1 = forms.ChoiceField(
required=False,
help_text="Optional helper text here")
choice2 = forms.ChoiceField(
required=False,
choices=(
('one', 'Option 1'), ('two', 'Option 2'),
('three', 'Option 3'), ('four', 'Option 4'),
('five', "An example option that is really long to show what "
"should be done to handle long text"),
),
help_text="Optional helper text here")
class MultiSelectHtmlTest(SimpleTestCase):
maxDiff = None
def test_filterable(self):
form = DummyForm(data={'choice2': ['one']})
context = {'form': form}
template = """
{% load carbondesign %}
{% MultiSelect form.choice1 mode="filterable" label="Multi-Select label" %}
"""
expected = """
<div class="bx--form-item">
<div class="bx--list-box__wrapper ">
<label class="bx--label">
Multi-Select label
</label>
<div class="bx--multi-select bx--list-box bx--combo-box bx--multi-select--filterable">
<div role="button" class="bx--list-box__field" tabindex="0"
aria-label="Open menu" aria-haspopup="true" aria-expanded="false">
<input class="bx--text-input" placeholder="Filter...">
<div class="bx--list-box__menu-icon">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Open menu" width="16" height="16" viewBox="0 0 16 16"
role="img">
<path d="M8 11L3 6 3.7 5.3 8 9.6 12.3 5.3 13 6z"></path>
</svg>
</div>
</div>
<fieldset class="bx--list-box__menu" role="listbox">
<legend class="bx--assistive-text">
Multi-Select label
</legend>
</fieldset>
</div>
<div id="hint-id_choice1" class="bx--form__helper-text">
Optional helper text here
</div>
</div>
</div>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
def test_filterable_expanded(self):
form = DummyForm(data={'choice2': ['one']})
context = {'form': form}
template = """
{% load carbondesign %}
{% MultiSelect form.choice2 mode="filterable" label="Multi-Select label" expanded=True %}
"""
expected = """
<div class="bx--form-item">
<div class="bx--list-box__wrapper">
<label class="bx--label">
Multi-Select label
</label>
<div class="bx--multi-select bx--list-box bx--combo-box bx--multi-select--filterable bx--list-box--expanded bx--multi-select--selected">
<div role="button" class="bx--list-box__field" tabindex="0"
aria-label="Close menu" aria-haspopup="true" aria-expanded="true">
<div role="button"
class="bx--list-box__selection bx--list-box__selection--multi bx--tag--filter"
tabindex="0" title="Clear all selected items">
1
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Clear selection" width="16" height="16"
viewBox="0 0 32 32" role="img">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</div>
<input class="bx--text-input" placeholder="Filter...">
<div class="bx--list-box__menu-icon">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Close menu" width="16" height="16" viewBox="0 0 16 16"
role="img">
<path d="M8 5L13 10 12.3 10.7 8 6.4 3.7 10.7 3 10z"></path>
</svg>
</div>
</div>
<fieldset class="bx--list-box__menu" role="listbox">
<legend class="bx--assistive-text">
Multi-Select label
</legend>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 1" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-1" value="one" checked>
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 1
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 2" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-2" value="two">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 2
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 3" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-3" value="three">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 3
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 4" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-4" value="four">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 4
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="An example option that is really long to show what should be done to handle long text" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-5" value="five" >
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
An example option that is really long to show what should be done to handle long text
</span>
</label>
</div>
</div>
</div>
</fieldset>
</div>
<div id="hint-id_choice2" class="bx--form__helper-text">
Optional helper text here
</div>
</div>
</div>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
def test_inline(self):
form = DummyForm(data={'choice2': ['one']})
context = {'form': form}
template = """
{% load carbondesign %}
{% MultiSelect form.choice1 mode="inline" label="Multi-Select label" %}
"""
expected = """
<div class="bx--form-item">
<div class="bx--list-box__wrapper bx--list-box__wrapper--inline">
<label class="bx--label">
Multi-Select label
</label>
<div class="bx--multi-select bx--list-box bx--list-box--inline">
<div role="button" class="bx--list-box__field" tabindex="0"
aria-label="Open menu" aria-haspopup="true" aria-expanded="false">
<span class="bx--list-box__label">Multi select options</span>
<div class="bx--list-box__menu-icon">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Open menu" width="16" height="16" viewBox="0 0 16 16"
role="img">
<path d="M8 11L3 6 3.7 5.3 8 9.6 12.3 5.3 13 6z"></path>
</svg>
</div>
</div>
<fieldset class="bx--list-box__menu" role="listbox">
<legend class="bx--assistive-text">
Multi-Select label
</legend>
</fieldset>
</div>
<div id="hint-id_choice1" class="bx--form__helper-text">
Optional helper text here
</div>
</div>
</div>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
def test_inline_expanded(self):
form = DummyForm(data={'choice2': ['one']})
context = {'form': form}
template = """
{% load carbondesign %}
{% MultiSelect form.choice2 mode="inline" label="Multi-Select label" expanded=True %}
"""
expected = """
<div class="bx--form-item">
<div class="bx--list-box__wrapper bx--list-box__wrapper--inline">
<label class="bx--label">
Multi-Select label
</label>
<div class="bx--multi-select bx--list-box bx--list-box--inline bx--list-box--expanded bx--multi-select--selected">
<div role="button" class="bx--list-box__field" tabindex="0"
aria-label="Close menu" aria-haspopup="true" aria-expanded="true">
<div role="button"
class="bx--list-box__selection bx--list-box__selection--multi bx--tag--filter"
tabindex="0" title="Clear all selected items">
1
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Clear selection" width="16" height="16"
viewBox="0 0 32 32" role="img">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</div>
<span class="bx--list-box__label">Multi select options</span>
<div class="bx--list-box__menu-icon">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Close menu" width="16" height="16" viewBox="0 0 16 16"
role="img">
<path d="M8 5L13 10 12.3 10.7 8 6.4 3.7 10.7 3 10z"></path>
</svg>
</div>
</div>
<fieldset class="bx--list-box__menu" role="listbox">
<legend class="bx--assistive-text">
Multi-Select label
</legend>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 1" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-1" value="one" checked>
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 1
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 2" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-2" value="two">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 2
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 3" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-3" value="three">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 3
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 4" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-4" value="four">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 4
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="An example option that is really long to show what should be done to handle long text" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-5" value="five">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
An example option that is really long to show what should be done to handle long text
</span>
</label>
</div>
</div>
</div>
</fieldset>
</div>
<div id="hint-id_choice2" class="bx--form__helper-text">
Optional helper text here
</div>
</div>
</div>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
def test_light(self):
form = DummyForm(data={'choice2': ['one']})
context = {'form': form}
template = """
{% load carbondesign %}
{% MultiSelect form.choice1 label="Multi-Select label" light=True %}
"""
expected = """
<div class="bx--form-item">
<div class="bx--list-box__wrapper">
<label class="bx--label">
Multi-Select label
</label>
<div class="bx--multi-select bx--list-box bx--list-box--light">
<div role="button" class="bx--list-box__field" tabindex="0"
aria-label="Open menu" aria-haspopup="true" aria-expanded="false">
<span class="bx--list-box__label">Multi select options</span>
<div class="bx--list-box__menu-icon">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Open menu" width="16" height="16" viewBox="0 0 16 16"
role="img">
<path d="M8 11L3 6 3.7 5.3 8 9.6 12.3 5.3 13 6z"></path>
</svg>
</div>
</div>
<fieldset class="bx--list-box__menu" role="listbox">
<legend class="bx--assistive-text">
Multi-Select label
</legend>
</fieldset>
</div>
<div id="hint-id_choice1" class="bx--form__helper-text">
Optional helper text here
</div>
</div>
</div>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
def test_light_expanded(self):
form = DummyForm(data={'choice2': ['one']})
context = {'form': form}
template = """
{% load carbondesign %}
{% MultiSelect form.choice2 label="Multi-Select label" expanded=True light=True %}
"""
expected = """
<div class="bx--form-item">
<div class="bx--list-box__wrapper">
<label class="bx--label">
Multi-Select label
</label>
<div class="bx--multi-select bx--list-box bx--list-box--light bx--list-box--expanded bx--multi-select--selected">
<div role="button" class="bx--list-box__field" tabindex="0"
aria-label="Close menu" aria-haspopup="true" aria-expanded="true">
<div role="button"
class="bx--list-box__selection bx--list-box__selection--multi bx--tag--filter"
tabindex="0" title="Clear all selected items">
1
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Clear selection" width="16" height="16"
viewBox="0 0 32 32" role="img">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</div>
<span class="bx--list-box__label">Multi select options</span>
<div class="bx--list-box__menu-icon">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Close menu" width="16" height="16" viewBox="0 0 16 16"
role="img">
<path d="M8 5L13 10 12.3 10.7 8 6.4 3.7 10.7 3 10z"></path>
</svg>
</div>
</div>
<fieldset class="bx--list-box__menu" role="listbox">
<legend class="bx--assistive-text">
Multi-Select label
</legend>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 1" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-1" value="one" checked>
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 1
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 2" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-2" value="two">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 2
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 3" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-3" value="three">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 3
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 4" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-4" value="four">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 4
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="An example option that is really long to show what should be done to handle long text" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-5" value="five" >
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
An example option that is really long to show what should be done to handle long text
</span>
</label>
</div>
</div>
</div>
</fieldset>
</div>
<div id="hint-id_choice2" class="bx--form__helper-text">
Optional helper text here
</div>
</div>
</div>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
def test_default(self):
form = DummyForm(data={'choice2': ['one']})
context = {'form': form}
template = """
{% load carbondesign %}
{% MultiSelect form.choice1 label="Multi-Select label" %}
"""
expected = """
<div class="bx--form-item">
<div class="bx--list-box__wrapper ">
<label class="bx--label">
Multi-Select label
</label>
<div class="bx--multi-select bx--list-box">
<div role="button" class="bx--list-box__field" tabindex="0"
aria-label="Open menu" aria-haspopup="true" aria-expanded="false">
<span class="bx--list-box__label">Multi select options</span>
<div class="bx--list-box__menu-icon">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Open menu" width="16" height="16" viewBox="0 0 16 16"
role="img">
<path d="M8 11L3 6 3.7 5.3 8 9.6 12.3 5.3 13 6z"></path>
</svg>
</div>
</div>
<fieldset class="bx--list-box__menu" role="listbox">
<legend class="bx--assistive-text">
Multi-Select label
</legend>
</fieldset>
</div>
<div id="hint-id_choice1" class="bx--form__helper-text">
Optional helper text here
</div>
</div>
</div>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
def test_default_expanded(self):
form = DummyForm(data={'choice2': ['one']})
context = {'form': form}
template = """
{% load carbondesign %}
{% MultiSelect form.choice2 label="Multi-Select label" expanded=True %}
"""
expected = """
<div class="bx--form-item">
<div class="bx--list-box__wrapper">
<label class="bx--label">
Multi-Select label
</label>
<div class="bx--multi-select bx--list-box bx--list-box--expanded bx--multi-select--selected">
<div role="button" class="bx--list-box__field" tabindex="0"
aria-label="Close menu" aria-haspopup="true" aria-expanded="true">
<div role="button"
class="bx--list-box__selection bx--list-box__selection--multi bx--tag--filter"
tabindex="0" title="Clear all selected items">
1
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Clear selection" width="16" height="16"
viewBox="0 0 32 32" role="img">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</div>
<span class="bx--list-box__label">Multi select options</span>
<div class="bx--list-box__menu-icon">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Close menu" width="16" height="16" viewBox="0 0 16 16"
role="img">
<path d="M8 5L13 10 12.3 10.7 8 6.4 3.7 10.7 3 10z"></path>
</svg>
</div>
</div>
<fieldset class="bx--list-box__menu" role="listbox">
<legend class="bx--assistive-text">
Multi-Select label
</legend>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 1" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-1" value="one" checked>
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 1
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 2" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-2" value="two">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 2
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 3" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-3" value="three">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 3
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 4" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-4" value="four">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 4
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="An example option that is really long to show what should be done to handle long text" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-5" value="five">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
An example option that is really long to show what should be done to handle long text
</span>
</label>
</div>
</div>
</div>
</fieldset>
</div>
<div id="hint-id_choice2" class="bx--form__helper-text">
Optional helper text here
</div>
</div>
</div>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
| [
"django.forms.ChoiceField"
] | [((216, 288), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'required': '(False)', 'help_text': '"""Optional helper text here"""'}), "(required=False, help_text='Optional helper text here')\n", (233, 288), False, 'from django import forms\n'), ((328, 609), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'required': '(False)', 'choices': "(('one', 'Option 1'), ('two', 'Option 2'), ('three', 'Option 3'), ('four',\n 'Option 4'), ('five',\n 'An example option that is really long to show what should be done to handle long text'\n ))", 'help_text': '"""Optional helper text here"""'}), "(required=False, choices=(('one', 'Option 1'), ('two',\n 'Option 2'), ('three', 'Option 3'), ('four', 'Option 4'), ('five',\n 'An example option that is really long to show what should be done to handle long text'\n )), help_text='Optional helper text here')\n", (345, 609), False, 'from django import forms\n')] |
from requests import post
from random import randint
from json import loads, dumps
import asyncio,base64,glob,json,math,urllib3,os,pathlib,random,sys,concurrent.futures,time
from tqdm import tqdm
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad, unpad
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class encryption:
def __init__(self, auth):
self.key = bytearray(self.secret(auth), "UTF-8")
self.iv = bytearray.fromhex('00000000000000000000000000000000')
def replaceCharAt(self, e, t, i):
return e[0:t] + i + e[t + len(i):]
def secret(self, e):
t = e[0:8]
i = e[8:16]
n = e[16:24] + t + e[24:32] + i
s = 0
while s < len(n):
e = n[s]
if e >= '0' and e <= '9':
t = chr((ord(e[0]) - ord('0') + 5) % 10 + ord('0'))
n = self.replaceCharAt(n, s, t)
else:
t = chr((ord(e[0]) - ord('a') + 9) % 26 + ord('a'))
n = self.replaceCharAt(n, s, t)
s += 1
return n
def encrypt(self, text):
raw = pad(text.encode('UTF-8'), AES.block_size)
aes = AES.new(self.key, AES.MODE_CBC, self.iv)
enc = aes.encrypt(raw)
result = base64.b64encode(enc).decode('UTF-8')
return result
def decrypt(self, text):
aes = AES.new(self.key, AES.MODE_CBC, self.iv)
dec = aes.decrypt(base64.urlsafe_b64decode(text.encode('UTF-8')))
result = unpad(dec, AES.block_size).decode('UTF-8')
return result
class Bot:
def __init__(self, auth):
self.auth = auth
self.enc = encryption(auth)
def sendMessage(self, chat_id, text, message_id=None):
if message_id == None:
return post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"text":text,
"reply_to_message_id":message_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/")
else:
return post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"text":text,
"reply_to_message_id":message_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/")
def deleteMessages(self, chat_id, message_ids):
return post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"deleteMessages",
"input":{
"object_guid":chat_id,
"message_ids":message_ids,
"type":"Global"
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c66.iranlms.ir/")
def getUserInfo(self, chat_id):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getUserInfo",
"input":{
"user_guid":chat_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c37.iranlms.ir/").json()["data_enc"]))
def getMessages(self, chat_id,min_id):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getMessagesInterval",
"input":{
"object_guid":chat_id,
"middle_message_id":min_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c67.iranlms.ir/").json().get("data_enc"))).get("data").get("messages")
def getInfoByUsername(self, username):
''' username should be without @ '''
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getObjectByUsername",
"input":{
"username":username
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c23.iranlms.ir/").json().get("data_enc")))
def banGroupMember(self, chat_id, user_id):
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"banGroupMember",
"input":{
"group_guid": chat_id,
"member_guid": user_id,
"action":"Set"
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c21.iranlms.ir/")
def invite(self, chat_id, user_ids):
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"addGroupMembers",
"input":{
"group_guid": chat_id,
"member_guids": user_ids
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c22.iranlms.ir/")
def getGroupAdmins(self, chat_id):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"client":{
"app_name":"Main",
"app_version":"2.9.5",
"lang_code":"fa",
"package":"ir.resaneh1.iptv",
"platform":"Android"
},
"input":{
"group_guid":chat_id
},
"method":"getGroupAdminMembers"
}))},url="https://messengerg2c22.iranlms.ir/").json().get("data_enc")))
def getMessagesInfo(self, chat_id, message_ids):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getMessagesByID",
"input":{
"object_guid": chat_id,
"message_ids": message_ids
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))}, url="https://messengerg2c24.iranlms.ir/").json()["data_enc"])).get("data").get("messages")
def setMembersAccess(self, chat_id, access_list):
return post(json={
"api_version": "4",
"auth": self.auth,
"client": {
"app_name": "Main",
"app_version": "2.9.5",
"lang_code": "fa",
"package": "ir.resaneh1.iptv",
"platform": "Android"
},
"data_enc": self.enc.encrypt(dumps({
"access_list": access_list,
"group_guid": chat_id
})),
"method": "setGroupDefaultAccess"
}, url="https://messengerg2c24.iranlms.ir/")
def getGroupMembers(self, chat_id):
return loads(self.enc.decrypt(post(json={
"api_version":"5",
"auth": self.auth,
"data_enc": self.enc.encrypt(dumps({
"method":"getGroupAllMembers",
"input":{
"group_guid": chat_id,
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))
}, url="https://messengerg2c17.iranlms.ir/").json()["data_enc"]))["data"]["in_chat_members"]
def getGroupInfo(self, chat_id):
return loads(self.enc.decrypt(post(
json={
"api_version":"5",
"auth": self.auth,
"data_enc": self.enc.encrypt(dumps({
"method":"getGroupInfo",
"input":{
"group_guid": chat_id,
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))}, url="https://messengerg2c24.iranlms.ir/").json()["data_enc"]))
def getGroupLink(self, chat_id):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getGroupLink",
"input":{
"group_guid":chat_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c67.iranlms.ir/").json().get("data_enc"))).get("data").get("join_link")
# thanks for <NAME>
def get_updates_all_chats(self):
time_stamp = str(random._floor(datetime.datetime.today().timestamp()) - 200)
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getChatsUpdates",
"input":{
"state":time_stamp,
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c67.iranlms.ir/").json().get("data_enc"))).get("data").get("chats")
def get_updates_chat(self, chat_id):
time_stamp = str(random._floor(datetime.datetime.today().timestamp()) - 200)
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getMessagesUpdates",
"input":{
"object_guid":chat_id,
"state":time_stamp
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c67.iranlms.ir/").json().get("data_enc"))).get("data").get("updated_messages")
def my_sticker_set(self):
time_stamp = str(random._floor(datetime.datetime.today().timestamp()) - 200)
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getMyStickerSets",
"input":{},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c67.iranlms.ir/").json().get("data_enc"))).get("data")
def requestFile(name, size , mime):
o = ''
while str(o) != '<Response [200]>':
o = post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"requestSendFile",
"input":{
"file_name":name,
"size":size,
"mime":mime
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c66.iranlms.ir/")
try:
k = loads(self.enc.decrypt(o.json()["data_enc"]))
if k['status'] != 'OK' or k['status_det'] != 'OK':
o = '502'
except:
o = '502'
return k['data']
def fileUpload(bytef ,hash_send ,file_id ,url):
if len(bytef) <= 131072:
h = {
'auth':self.auth,
'chunk-size':str(len(bytef)),
'file-id':str(file_id),
'access-hash-send':hash_send,
'total-part':str(1),
'part-number':str(1)
}
t = False
while t == False:
try:
j = post(data=bytef,url=url,headers=h).text
j = loads(j)['data']['access_hash_rec']
t = True
except:
t = False
return j
else:
t = len(bytef) / 131072
t += 1
t = random._floor(t)
for i in range(1,t+1):
if i != t:
k = i - 1
k = k * 131072
t2 = False
while t2 == False:
try:
o = post(data=bytef[k:k + 131072],url=url,headers={
'auth':self.auth,
'chunk-size':str(131072),
'file-id':file_id,
'access-hash-send':hash_send,
'total-part':str(t),
'part-number':str(i)
}).text
o = loads(o)['data']
t2 = True
except:
t2 = False
j = k + 131072
j = round(j / 1024)
j2 = round(len(bytef) / 1024)
print(str(j) + 'kb / ' + str(j2) + ' kb')
else:
k = i - 1
k = k * 131072
t2 = False
while t2 == False:
try:
p = post(data=bytef[k:],url=url,headers={
'auth':self.auth,
'chunk-size':str(len(bytef[k:])),
'file-id':file_id,
'access-hash-send':hash_send,
'total-part':str(t),
'part-number':str(i)
}).text
p = loads(p)['data']['access_hash_rec']
t2 = True
except:
t2 = False
j2 = round(len(bytef) / 1024)
print(str(j2) + 'kb / ' + str(j2) + ' kb')
return p
def sendFile(chat_id, file_id , mime , dc_id, access_hash_rec, file_name, size, text=None, message_id=None):
if text == None:
if message_id == None:
t = False
while t == False:
try:
p = loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"File",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
t = True
except:
t = False
return p
else:
return loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"reply_to_message_id":message_id,
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"File",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
else:
if message_id == None:
return loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"text":text,
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"File",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
else:
return loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"text":text,
"reply_to_message_id":message_id,
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"File",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc'])) | [
"json.loads",
"requests.post",
"base64.b64encode",
"json.dumps",
"urllib3.disable_warnings",
"Crypto.Cipher.AES.new",
"Crypto.Util.Padding.unpad",
"random.randint",
"random._floor"
] | [((270, 337), 'urllib3.disable_warnings', 'urllib3.disable_warnings', (['urllib3.exceptions.InsecureRequestWarning'], {}), '(urllib3.exceptions.InsecureRequestWarning)\n', (294, 337), False, 'import asyncio, base64, glob, json, math, urllib3, os, pathlib, random, sys, concurrent.futures, time\n'), ((1188, 1228), 'Crypto.Cipher.AES.new', 'AES.new', (['self.key', 'AES.MODE_CBC', 'self.iv'], {}), '(self.key, AES.MODE_CBC, self.iv)\n', (1195, 1228), False, 'from Crypto.Cipher import AES\n'), ((1381, 1421), 'Crypto.Cipher.AES.new', 'AES.new', (['self.key', 'AES.MODE_CBC', 'self.iv'], {}), '(self.key, AES.MODE_CBC, self.iv)\n', (1388, 1421), False, 'from Crypto.Cipher import AES\n'), ((11182, 11198), 'random._floor', 'random._floor', (['t'], {}), '(t)\n', (11195, 11198), False, 'import asyncio, base64, glob, json, math, urllib3, os, pathlib, random, sys, concurrent.futures, time\n'), ((1277, 1298), 'base64.b64encode', 'base64.b64encode', (['enc'], {}), '(enc)\n', (1293, 1298), False, 'import asyncio, base64, glob, json, math, urllib3, os, pathlib, random, sys, concurrent.futures, time\n'), ((1513, 1539), 'Crypto.Util.Padding.unpad', 'unpad', (['dec', 'AES.block_size'], {}), '(dec, AES.block_size)\n', (1518, 1539), False, 'from Crypto.Util.Padding import pad, unpad\n'), ((2820, 3070), 'json.dumps', 'dumps', (["{'method': 'deleteMessages', 'input': {'object_guid': chat_id,\n 'message_ids': message_ids, 'type': 'Global'}, 'client': {'app_name':\n 'Main', 'app_version': '3.2.1', 'platform': 'Web', 'package':\n 'web.rubika.ir', 'lang_code': 'fa'}}"], {}), "({'method': 'deleteMessages', 'input': {'object_guid': chat_id,\n 'message_ids': message_ids, 'type': 'Global'}, 'client': {'app_name':\n 'Main', 'app_version': '3.2.1', 'platform': 'Web', 'package':\n 'web.rubika.ir', 'lang_code': 'fa'}})\n", (2825, 3070), False, 'from json import loads, dumps\n'), ((4711, 4955), 'json.dumps', 'dumps', (["{'method': 'banGroupMember', 'input': {'group_guid': chat_id, 'member_guid':\n user_id, 'action': 'Set'}, 'client': {'app_name': 'Main', 'app_version':\n '3.2.1', 'platform': 'Web', 'package': 'web.rubika.ir', 'lang_code': 'fa'}}"], {}), "({'method': 'banGroupMember', 'input': {'group_guid': chat_id,\n 'member_guid': user_id, 'action': 'Set'}, 'client': {'app_name': 'Main',\n 'app_version': '3.2.1', 'platform': 'Web', 'package': 'web.rubika.ir',\n 'lang_code': 'fa'}})\n", (4716, 4955), False, 'from json import loads, dumps\n'), ((5157, 5388), 'json.dumps', 'dumps', (["{'method': 'addGroupMembers', 'input': {'group_guid': chat_id,\n 'member_guids': user_ids}, 'client': {'app_name': 'Main', 'app_version':\n '3.2.1', 'platform': 'Web', 'package': 'web.rubika.ir', 'lang_code': 'fa'}}"], {}), "({'method': 'addGroupMembers', 'input': {'group_guid': chat_id,\n 'member_guids': user_ids}, 'client': {'app_name': 'Main', 'app_version':\n '3.2.1', 'platform': 'Web', 'package': 'web.rubika.ir', 'lang_code': 'fa'}}\n )\n", (5162, 5388), False, 'from json import loads, dumps\n'), ((6743, 6801), 'json.dumps', 'dumps', (["{'access_list': access_list, 'group_guid': chat_id}"], {}), "({'access_list': access_list, 'group_guid': chat_id})\n", (6748, 6801), False, 'from json import loads, dumps\n'), ((10991, 11027), 'requests.post', 'post', ([], {'data': 'bytef', 'url': 'url', 'headers': 'h'}), '(data=bytef, url=url, headers=h)\n', (10995, 11027), False, 'from requests import post\n'), ((10185, 10414), 'json.dumps', 'dumps', (["{'method': 'requestSendFile', 'input': {'file_name': name, 'size': size,\n 'mime': mime}, 'client': {'app_name': 'Main', 'app_version': '3.2.1',\n 'platform': 'Web', 'package': 'web.rubika.ir', 'lang_code': 'fa'}}"], {}), "({'method': 'requestSendFile', 'input': {'file_name': name, 'size':\n size, 'mime': mime}, 'client': {'app_name': 'Main', 'app_version':\n '3.2.1', 'platform': 'Web', 'package': 'web.rubika.ir', 'lang_code': 'fa'}}\n )\n", (10190, 10414), False, 'from json import loads, dumps\n'), ((11040, 11048), 'json.loads', 'loads', (['j'], {}), '(j)\n', (11045, 11048), False, 'from json import loads, dumps\n'), ((11594, 11602), 'json.loads', 'loads', (['o'], {}), '(o)\n', (11599, 11602), False, 'from json import loads, dumps\n'), ((12149, 12157), 'json.loads', 'loads', (['p'], {}), '(p)\n', (12154, 12157), False, 'from json import loads, dumps\n'), ((3287, 3482), 'json.dumps', 'dumps', (["{'method': 'getUserInfo', 'input': {'user_guid': chat_id}, 'client': {\n 'app_name': 'Main', 'app_version': '3.2.1', 'platform': 'Web',\n 'package': 'web.rubika.ir', 'lang_code': 'fa'}}"], {}), "({'method': 'getUserInfo', 'input': {'user_guid': chat_id}, 'client':\n {'app_name': 'Main', 'app_version': '3.2.1', 'platform': 'Web',\n 'package': 'web.rubika.ir', 'lang_code': 'fa'}})\n", (3292, 3482), False, 'from json import loads, dumps\n'), ((7578, 7775), 'json.dumps', 'dumps', (["{'method': 'getGroupInfo', 'input': {'group_guid': chat_id}, 'client': {\n 'app_name': 'Main', 'app_version': '3.2.1', 'platform': 'Web',\n 'package': 'web.rubika.ir', 'lang_code': 'fa'}}"], {}), "({'method': 'getGroupInfo', 'input': {'group_guid': chat_id}, 'client':\n {'app_name': 'Main', 'app_version': '3.2.1', 'platform': 'Web',\n 'package': 'web.rubika.ir', 'lang_code': 'fa'}})\n", (7583, 7775), False, 'from json import loads, dumps\n'), ((1925, 1948), 'random.randint', 'randint', (['(100000)', '(900000)'], {}), '(100000, 900000)\n', (1932, 1948), False, 'from random import randint\n'), ((2398, 2421), 'random.randint', 'randint', (['(100000)', '(900000)'], {}), '(100000, 900000)\n', (2405, 2421), False, 'from random import randint\n'), ((4278, 4481), 'json.dumps', 'dumps', (["{'method': 'getObjectByUsername', 'input': {'username': username}, 'client':\n {'app_name': 'Main', 'app_version': '3.2.1', 'platform': 'Web',\n 'package': 'web.rubika.ir', 'lang_code': 'fa'}}"], {}), "({'method': 'getObjectByUsername', 'input': {'username': username},\n 'client': {'app_name': 'Main', 'app_version': '3.2.1', 'platform':\n 'Web', 'package': 'web.rubika.ir', 'lang_code': 'fa'}})\n", (4283, 4481), False, 'from json import loads, dumps\n'), ((5607, 5820), 'json.dumps', 'dumps', (["{'client': {'app_name': 'Main', 'app_version': '2.9.5', 'lang_code': 'fa',\n 'package': 'ir.resaneh1.iptv', 'platform': 'Android'}, 'input': {\n 'group_guid': chat_id}, 'method': 'getGroupAdminMembers'}"], {}), "({'client': {'app_name': 'Main', 'app_version': '2.9.5', 'lang_code':\n 'fa', 'package': 'ir.resaneh1.iptv', 'platform': 'Android'}, 'input': {\n 'group_guid': chat_id}, 'method': 'getGroupAdminMembers'})\n", (5612, 5820), False, 'from json import loads, dumps\n'), ((7061, 7264), 'json.dumps', 'dumps', (["{'method': 'getGroupAllMembers', 'input': {'group_guid': chat_id}, 'client':\n {'app_name': 'Main', 'app_version': '3.2.1', 'platform': 'Web',\n 'package': 'web.rubika.ir', 'lang_code': 'fa'}}"], {}), "({'method': 'getGroupAllMembers', 'input': {'group_guid': chat_id},\n 'client': {'app_name': 'Main', 'app_version': '3.2.1', 'platform':\n 'Web', 'package': 'web.rubika.ir', 'lang_code': 'fa'}})\n", (7066, 7264), False, 'from json import loads, dumps\n'), ((9735, 9915), 'json.dumps', 'dumps', (["{'method': 'getMyStickerSets', 'input': {}, 'client': {'app_name': 'Main',\n 'app_version': '3.2.1', 'platform': 'Web', 'package': 'web.rubika.ir',\n 'lang_code': 'fa'}}"], {}), "({'method': 'getMyStickerSets', 'input': {}, 'client': {'app_name':\n 'Main', 'app_version': '3.2.1', 'platform': 'Web', 'package':\n 'web.rubika.ir', 'lang_code': 'fa'}})\n", (9740, 9915), False, 'from json import loads, dumps\n'), ((6077, 6310), 'json.dumps', 'dumps', (["{'method': 'getMessagesByID', 'input': {'object_guid': chat_id,\n 'message_ids': message_ids}, 'client': {'app_name': 'Main',\n 'app_version': '3.2.1', 'platform': 'Web', 'package': 'web.rubika.ir',\n 'lang_code': 'fa'}}"], {}), "({'method': 'getMessagesByID', 'input': {'object_guid': chat_id,\n 'message_ids': message_ids}, 'client': {'app_name': 'Main',\n 'app_version': '3.2.1', 'platform': 'Web', 'package': 'web.rubika.ir',\n 'lang_code': 'fa'}})\n", (6082, 6310), False, 'from json import loads, dumps\n'), ((3726, 3964), 'json.dumps', 'dumps', (["{'method': 'getMessagesInterval', 'input': {'object_guid': chat_id,\n 'middle_message_id': min_id}, 'client': {'app_name': 'Main',\n 'app_version': '3.2.1', 'platform': 'Web', 'package': 'web.rubika.ir',\n 'lang_code': 'fa'}}"], {}), "({'method': 'getMessagesInterval', 'input': {'object_guid': chat_id,\n 'middle_message_id': min_id}, 'client': {'app_name': 'Main',\n 'app_version': '3.2.1', 'platform': 'Web', 'package': 'web.rubika.ir',\n 'lang_code': 'fa'}})\n", (3731, 3964), False, 'from json import loads, dumps\n'), ((8039, 8236), 'json.dumps', 'dumps', (["{'method': 'getGroupLink', 'input': {'group_guid': chat_id}, 'client': {\n 'app_name': 'Main', 'app_version': '3.2.1', 'platform': 'Web',\n 'package': 'web.rubika.ir', 'lang_code': 'fa'}}"], {}), "({'method': 'getGroupLink', 'input': {'group_guid': chat_id}, 'client':\n {'app_name': 'Main', 'app_version': '3.2.1', 'platform': 'Web',\n 'package': 'web.rubika.ir', 'lang_code': 'fa'}})\n", (8044, 8236), False, 'from json import loads, dumps\n'), ((8608, 8806), 'json.dumps', 'dumps', (["{'method': 'getChatsUpdates', 'input': {'state': time_stamp}, 'client': {\n 'app_name': 'Main', 'app_version': '3.2.1', 'platform': 'Web',\n 'package': 'web.rubika.ir', 'lang_code': 'fa'}}"], {}), "({'method': 'getChatsUpdates', 'input': {'state': time_stamp},\n 'client': {'app_name': 'Main', 'app_version': '3.2.1', 'platform':\n 'Web', 'package': 'web.rubika.ir', 'lang_code': 'fa'}})\n", (8613, 8806), False, 'from json import loads, dumps\n'), ((9157, 9387), 'json.dumps', 'dumps', (["{'method': 'getMessagesUpdates', 'input': {'object_guid': chat_id, 'state':\n time_stamp}, 'client': {'app_name': 'Main', 'app_version': '3.2.1',\n 'platform': 'Web', 'package': 'web.rubika.ir', 'lang_code': 'fa'}}"], {}), "({'method': 'getMessagesUpdates', 'input': {'object_guid': chat_id,\n 'state': time_stamp}, 'client': {'app_name': 'Main', 'app_version':\n '3.2.1', 'platform': 'Web', 'package': 'web.rubika.ir', 'lang_code': 'fa'}}\n )\n", (9162, 9387), False, 'from json import loads, dumps\n'), ((13545, 13568), 'random.randint', 'randint', (['(100000)', '(900000)'], {}), '(100000, 900000)\n', (13552, 13568), False, 'from random import randint\n'), ((14312, 14335), 'random.randint', 'randint', (['(100000)', '(900000)'], {}), '(100000, 900000)\n', (14319, 14335), False, 'from random import randint\n'), ((15033, 15056), 'random.randint', 'randint', (['(100000)', '(900000)'], {}), '(100000, 900000)\n', (15040, 15056), False, 'from random import randint\n'), ((12750, 12773), 'random.randint', 'randint', (['(100000)', '(900000)'], {}), '(100000, 900000)\n', (12757, 12773), False, 'from random import randint\n')] |
# -*- coding: utf-8 -*-
"""
Functions to install dependencies for non-standard models (e.g., Centermask2)
and get compatible Detectron2 configs for them.
"""
import sys
import subprocess
try:
from detectron2.config import get_cfg
except ModuleNotFoundError:
print('WARNING: Detectron2 not installed on (virtual?) machine;',
'colab_zirc_dims model loading functions unavailable')
__all__ = ['get_czd_swint_cfg',
'get_czd_centermask2_cfg']
def get_czd_swint_cfg():
"""Install dependencies for swint_detectron2 and/or get a Swin-T Mask RCNN
Detectron2 cfg.
Returns
-------
out_cfg : Detectron2 Config instance
A D2 config for a MaskRCNN model with a Swin-T (see swint_detectron2)
backbone. Lacks usable weights path; this must be added in main
Notebook.
"""
from detectron2.config import get_cfg
try:
import timm.utils as test_timm
except ModuleNotFoundError:
print('Installing module: timm')
try:
subpout = subprocess.run(["pip", "install", "timm"],
capture_output=True, check=True)
print(str(subpout.stdout.decode('UTF-8')))
except subprocess.CalledProcessError as check:
print(check)
try:
import swint
except ModuleNotFoundError:
print('Cloning module: Swint_detectron2')
try:
subpout = subprocess.run(["git", "clone",
"https://github.com/xiaohu2015/SwinT_detectron2",
"swinT_repo"], capture_output=True,
check=True)
print(str(subpout.stdout.decode('UTF-8')))
except subprocess.CalledProcessError as check:
print(check)
sys.path.insert(0, '/content/swinT_repo')
import swint
out_cfg = get_cfg()
swint.add_swint_config(out_cfg)
out_cfg.merge_from_file('/content/swinT_repo/configs/SwinT/mask_rcnn_swint_T_FPN_3x.yaml')
return out_cfg
def get_czd_centermask2_cfg():
"""Clone dependency for Centermask2 and/or get a Centermask2 VoVNet2-backbone
Detectron2 cfg.
Returns
-------
out_cfg : Detectron2 Config instance
A D2 config for a Centermask2 model with VoVNet2 backbone. Lacks usable
weights path; this must be added in main Notebook.
"""
try:
import centermask
except ModuleNotFoundError:
print('Cloning module: Centermask2')
try:
subpout = subprocess.run(["git", "clone",
"https://github.com/youngwanLEE/centermask2.git",
"centermask"], capture_output=True,
check = True)
print(str(subpout.stdout.decode('UTF-8')))
except subprocess.CalledProcessError as check:
print(check)
sys.path.insert(0, '/content/centermask')
import centermask
from centermask.config import get_cfg
out_cfg = get_cfg()
out_cfg.merge_from_file('/content/centermask/configs/centermask/centermask_V_99_eSE_FPN_ms_3x.yaml')
return out_cfg
| [
"subprocess.run",
"swint.add_swint_config",
"sys.path.insert",
"centermask.config.get_cfg"
] | [((1896, 1905), 'centermask.config.get_cfg', 'get_cfg', ([], {}), '()\n', (1903, 1905), False, 'from centermask.config import get_cfg\n'), ((1910, 1941), 'swint.add_swint_config', 'swint.add_swint_config', (['out_cfg'], {}), '(out_cfg)\n', (1932, 1941), False, 'import swint\n'), ((3067, 3076), 'centermask.config.get_cfg', 'get_cfg', ([], {}), '()\n', (3074, 3076), False, 'from centermask.config import get_cfg\n'), ((1819, 1860), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/content/swinT_repo"""'], {}), "(0, '/content/swinT_repo')\n", (1834, 1860), False, 'import sys\n'), ((2943, 2984), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/content/centermask"""'], {}), "(0, '/content/centermask')\n", (2958, 2984), False, 'import sys\n'), ((1038, 1113), 'subprocess.run', 'subprocess.run', (["['pip', 'install', 'timm']"], {'capture_output': '(True)', 'check': '(True)'}), "(['pip', 'install', 'timm'], capture_output=True, check=True)\n", (1052, 1113), False, 'import subprocess\n'), ((1433, 1570), 'subprocess.run', 'subprocess.run', (["['git', 'clone', 'https://github.com/xiaohu2015/SwinT_detectron2', 'swinT_repo'\n ]"], {'capture_output': '(True)', 'check': '(True)'}), "(['git', 'clone',\n 'https://github.com/xiaohu2015/SwinT_detectron2', 'swinT_repo'],\n capture_output=True, check=True)\n", (1447, 1570), False, 'import subprocess\n'), ((2555, 2692), 'subprocess.run', 'subprocess.run', (["['git', 'clone', 'https://github.com/youngwanLEE/centermask2.git', 'centermask'\n ]"], {'capture_output': '(True)', 'check': '(True)'}), "(['git', 'clone',\n 'https://github.com/youngwanLEE/centermask2.git', 'centermask'],\n capture_output=True, check=True)\n", (2569, 2692), False, 'import subprocess\n')] |
#!/usr/bin/env python
from preprocess import get_negative_samples, get_positive_samples
from utils import init_spark
from preprocess import get_dataset_df
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.tuning import ParamGridBuilder, TrainValidationSplit, \
CrossValidator
from pyspark.ml import Pipeline
from class_weighter import ClassWeighter
from random_forest import get_feature_importances
from export_results import *
result_dir = create_result_dir('brf')
spark = init_spark()
neg_samples = get_negative_samples(spark).sample(0.5)
pos_samples = get_positive_samples(spark)
imbalance_ratio = (neg_samples.count()/pos_samples.count())
train_set, test_set = get_dataset_df(spark, pos_samples, neg_samples)
train_set, test_set = train_set.persist(), test_set.persist()
brf = RandomForestClassifier(labelCol="label",
featuresCol="features",
cacheNodeIds=True,
maxDepth=25,
impurity='entropy',
featureSubsetStrategy='13',
weightCol='weight',
minInstancesPerNode=10,
numTrees=100,
subsamplingRate=1.0,
maxMemoryInMB=256)
cw = ClassWeighter().setClassWeight([1/imbalance_ratio, 1.0])
pipeline = Pipeline().setStages([cw, brf])
model = pipeline.fit(train_set)
predictions = model.transform(test_set).persist()
train_predictions = model.transform(train_set).persist()
write_params(model, result_dir)
write_results(predictions, train_predictions, result_dir)
# Write feature importances
feature_importances = get_feature_importances(model.stages[1])
feature_importances.to_csv(result_dir + '/feature_importances.csv')
| [
"preprocess.get_positive_samples",
"pyspark.ml.Pipeline",
"class_weighter.ClassWeighter",
"pyspark.ml.classification.RandomForestClassifier",
"utils.init_spark",
"preprocess.get_negative_samples",
"preprocess.get_dataset_df",
"random_forest.get_feature_importances"
] | [((532, 544), 'utils.init_spark', 'init_spark', ([], {}), '()\n', (542, 544), False, 'from utils import init_spark\n'), ((613, 640), 'preprocess.get_positive_samples', 'get_positive_samples', (['spark'], {}), '(spark)\n', (633, 640), False, 'from preprocess import get_negative_samples, get_positive_samples\n'), ((725, 772), 'preprocess.get_dataset_df', 'get_dataset_df', (['spark', 'pos_samples', 'neg_samples'], {}), '(spark, pos_samples, neg_samples)\n', (739, 772), False, 'from preprocess import get_dataset_df\n'), ((842, 1096), 'pyspark.ml.classification.RandomForestClassifier', 'RandomForestClassifier', ([], {'labelCol': '"""label"""', 'featuresCol': '"""features"""', 'cacheNodeIds': '(True)', 'maxDepth': '(25)', 'impurity': '"""entropy"""', 'featureSubsetStrategy': '"""13"""', 'weightCol': '"""weight"""', 'minInstancesPerNode': '(10)', 'numTrees': '(100)', 'subsamplingRate': '(1.0)', 'maxMemoryInMB': '(256)'}), "(labelCol='label', featuresCol='features',\n cacheNodeIds=True, maxDepth=25, impurity='entropy',\n featureSubsetStrategy='13', weightCol='weight', minInstancesPerNode=10,\n numTrees=100, subsamplingRate=1.0, maxMemoryInMB=256)\n", (864, 1096), False, 'from pyspark.ml.classification import RandomForestClassifier\n'), ((1761, 1801), 'random_forest.get_feature_importances', 'get_feature_importances', (['model.stages[1]'], {}), '(model.stages[1])\n', (1784, 1801), False, 'from random_forest import get_feature_importances\n'), ((559, 586), 'preprocess.get_negative_samples', 'get_negative_samples', (['spark'], {}), '(spark)\n', (579, 586), False, 'from preprocess import get_negative_samples, get_positive_samples\n'), ((1380, 1395), 'class_weighter.ClassWeighter', 'ClassWeighter', ([], {}), '()\n', (1393, 1395), False, 'from class_weighter import ClassWeighter\n'), ((1448, 1458), 'pyspark.ml.Pipeline', 'Pipeline', ([], {}), '()\n', (1456, 1458), False, 'from pyspark.ml import Pipeline\n')] |
import torch as th
from unittest import TestCase
from pro_gan_pytorch import CustomLayers as cL
device = th.device("cuda" if th.cuda.is_available() else "cpu")
class Test_equalized_conv2d(TestCase):
def setUp(self):
self.conv_block = cL._equalized_conv2d(21, 3, k_size=(3, 3), pad=1)
# print the Equalized conv block
print("\nEqualized conv block:\n%s" % str(self.conv_block))
def test_forward(self):
mock_in = th.randn(32, 21, 16, 16).to(device)
mock_out = self.conv_block(mock_in)
# check output
self.assertEqual(mock_out.shape, (32, 3, 16, 16))
self.assertEqual(th.isnan(mock_out).sum().item(), 0)
self.assertEqual(th.isinf(mock_out).sum().item(), 0)
# check the weight's scale
self.assertAlmostEqual(self.conv_block.weight.data.std(), 1, delta=1e-1)
def tearDown(self):
# delete the computational resources
del self.conv_block
class Test_equalized_deconv2d(TestCase):
def setUp(self):
self.deconv_block = cL._equalized_deconv2d(21, 3, k_size=(3, 3), pad=1)
# print the Equalized conv block
print("\nEqualized conv block:\n%s" % str(self.deconv_block))
def test_forward(self):
mock_in = th.randn(32, 21, 16, 16).to(device)
mock_out = self.deconv_block(mock_in)
# check output
self.assertEqual(mock_out.shape, (32, 3, 16, 16))
self.assertEqual(th.isnan(mock_out).sum().item(), 0)
self.assertEqual(th.isinf(mock_out).sum().item(), 0)
# check the weight's scale
self.assertAlmostEqual(self.deconv_block.weight.data.std(), 1, delta=1e-1)
def tearDown(self):
# delete the computational resources
del self.deconv_block
class Test_equalized_linear(TestCase):
def setUp(self):
self.lin_block = cL._equalized_linear(13, 52)
# print the Equalized conv block
print("\nEqualized linear block:\n%s" % str(self.lin_block))
def test_forward(self):
# test the forward for the first res block
mock_in = th.randn(32, 13).to(device)
mock_out = self.lin_block(mock_in)
# check output
self.assertEqual(mock_out.shape, (32, 52))
self.assertEqual(th.isnan(mock_out).sum().item(), 0)
self.assertEqual(th.isinf(mock_out).sum().item(), 0)
# check the weight's scale
self.assertAlmostEqual(self.lin_block.weight.data.std(), 1, delta=1e-1)
def tearDown(self):
# delete the computational resources
del self.lin_block
class Test_PixelwiseNorm(TestCase):
def setUp(self):
self.normalizer = cL.PixelwiseNorm()
def test_forward(self):
mock_in = th.randn(1, 13, 1, 1).to(device)
mock_out = self.normalizer(mock_in)
# check output
self.assertEqual(mock_out.shape, mock_in.shape)
self.assertEqual(th.isnan(mock_out).sum().item(), 0)
self.assertEqual(th.isinf(mock_out).sum().item(), 0)
# we cannot comment that the norm of the output tensor
# will always be less than the norm of the input tensor
# so no more checking can be done
def tearDown(self):
# delete the computational resources
del self.normalizer
class Test_MinibatchStdDev(TestCase):
def setUp(self):
self.minStdD = cL.MinibatchStdDev()
def test_forward(self):
mock_in = th.randn(1, 13, 16, 16).to(device)
mock_out = self.minStdD(mock_in)
# check output
self.assertEqual(mock_out.shape[1], mock_in.shape[1] + 1)
self.assertEqual(th.isnan(mock_out).sum().item(), 0)
self.assertEqual(th.isinf(mock_out).sum().item(), 0)
def tearDown(self):
# delete the computational resources
del self.minStdD
| [
"pro_gan_pytorch.CustomLayers._equalized_deconv2d",
"torch.isnan",
"pro_gan_pytorch.CustomLayers._equalized_linear",
"pro_gan_pytorch.CustomLayers.MinibatchStdDev",
"torch.cuda.is_available",
"pro_gan_pytorch.CustomLayers._equalized_conv2d",
"torch.isinf",
"pro_gan_pytorch.CustomLayers.PixelwiseNorm",... | [((127, 149), 'torch.cuda.is_available', 'th.cuda.is_available', ([], {}), '()\n', (147, 149), True, 'import torch as th\n'), ((251, 300), 'pro_gan_pytorch.CustomLayers._equalized_conv2d', 'cL._equalized_conv2d', (['(21)', '(3)'], {'k_size': '(3, 3)', 'pad': '(1)'}), '(21, 3, k_size=(3, 3), pad=1)\n', (271, 300), True, 'from pro_gan_pytorch import CustomLayers as cL\n'), ((1050, 1101), 'pro_gan_pytorch.CustomLayers._equalized_deconv2d', 'cL._equalized_deconv2d', (['(21)', '(3)'], {'k_size': '(3, 3)', 'pad': '(1)'}), '(21, 3, k_size=(3, 3), pad=1)\n', (1072, 1101), True, 'from pro_gan_pytorch import CustomLayers as cL\n'), ((1854, 1882), 'pro_gan_pytorch.CustomLayers._equalized_linear', 'cL._equalized_linear', (['(13)', '(52)'], {}), '(13, 52)\n', (1874, 1882), True, 'from pro_gan_pytorch import CustomLayers as cL\n'), ((2659, 2677), 'pro_gan_pytorch.CustomLayers.PixelwiseNorm', 'cL.PixelwiseNorm', ([], {}), '()\n', (2675, 2677), True, 'from pro_gan_pytorch import CustomLayers as cL\n'), ((3357, 3377), 'pro_gan_pytorch.CustomLayers.MinibatchStdDev', 'cL.MinibatchStdDev', ([], {}), '()\n', (3375, 3377), True, 'from pro_gan_pytorch import CustomLayers as cL\n'), ((458, 482), 'torch.randn', 'th.randn', (['(32)', '(21)', '(16)', '(16)'], {}), '(32, 21, 16, 16)\n', (466, 482), True, 'import torch as th\n'), ((1261, 1285), 'torch.randn', 'th.randn', (['(32)', '(21)', '(16)', '(16)'], {}), '(32, 21, 16, 16)\n', (1269, 1285), True, 'import torch as th\n'), ((2092, 2108), 'torch.randn', 'th.randn', (['(32)', '(13)'], {}), '(32, 13)\n', (2100, 2108), True, 'import torch as th\n'), ((2725, 2746), 'torch.randn', 'th.randn', (['(1)', '(13)', '(1)', '(1)'], {}), '(1, 13, 1, 1)\n', (2733, 2746), True, 'import torch as th\n'), ((3425, 3448), 'torch.randn', 'th.randn', (['(1)', '(13)', '(16)', '(16)'], {}), '(1, 13, 16, 16)\n', (3433, 3448), True, 'import torch as th\n'), ((645, 663), 'torch.isnan', 'th.isnan', (['mock_out'], {}), '(mock_out)\n', (653, 663), True, 'import torch as th\n'), ((706, 724), 'torch.isinf', 'th.isinf', (['mock_out'], {}), '(mock_out)\n', (714, 724), True, 'import torch as th\n'), ((1450, 1468), 'torch.isnan', 'th.isnan', (['mock_out'], {}), '(mock_out)\n', (1458, 1468), True, 'import torch as th\n'), ((1511, 1529), 'torch.isinf', 'th.isinf', (['mock_out'], {}), '(mock_out)\n', (1519, 1529), True, 'import torch as th\n'), ((2263, 2281), 'torch.isnan', 'th.isnan', (['mock_out'], {}), '(mock_out)\n', (2271, 2281), True, 'import torch as th\n'), ((2324, 2342), 'torch.isinf', 'th.isinf', (['mock_out'], {}), '(mock_out)\n', (2332, 2342), True, 'import torch as th\n'), ((2907, 2925), 'torch.isnan', 'th.isnan', (['mock_out'], {}), '(mock_out)\n', (2915, 2925), True, 'import torch as th\n'), ((2968, 2986), 'torch.isinf', 'th.isinf', (['mock_out'], {}), '(mock_out)\n', (2976, 2986), True, 'import torch as th\n'), ((3616, 3634), 'torch.isnan', 'th.isnan', (['mock_out'], {}), '(mock_out)\n', (3624, 3634), True, 'import torch as th\n'), ((3677, 3695), 'torch.isinf', 'th.isinf', (['mock_out'], {}), '(mock_out)\n', (3685, 3695), True, 'import torch as th\n')] |
import os
from serif.model.relation_mention_model import RelationMentionModel
from serif.theory.enumerated_type import Tense, Modality
# Modified from DogFoodFinderRelationMentionModel
class AIDARelationMentionModel(RelationMentionModel):
'''adds TACRED relations to TACRED entities'''
def __init__(self, mapping_file, **kwargs):
super(AIDARelationMentionModel,self).__init__(**kwargs)
self.words2anno = self.load_words2anno_dict(mapping_file)
self.anno_dict = self.load_anno_dict(self.words2anno)
self.external_tag_file = True # to permit the model to accept annotations file as argument
def get_relation_mention_info(self, sentence):
#annotations = self.anno_dict[serif_doc_name][sent_index_in_doc]
annotations = self.anno_dict[sentence.document.docid][sentence.sent_no]
[subj_start, subj_end, subj_type, obj_start, obj_end, obj_type, relation] = annotations
# each TACRED sentence should have exactly two entity mentions created
print(sentence.mention_set)
l_mention = sentence.mention_set[0]
r_mention = sentence.mention_set[1]
tuples = [(relation, l_mention, r_mention, Tense.Unspecified, Modality.Asserted)]
return tuples
def load_words2anno_dict(self, mapping_file):
'''
:param mapping_file: tab-separated file of "doc[.words] doc.annotations" line for each doc to be processed
TACRED relation annotations consist of 7 fields per line: "subj_start","subj_end","subj_type", "obj_start","obj_end","obj_type", "relation"
:return: {"doc":"doc.annotations" for each doc}
'''
words2anno = dict()
with open(mapping_file, 'r') as f:
for l in f.readlines():
words_file = l.strip().split()[0]
tags_file = l.strip().split()[1]
words2anno[os.path.basename(words_file)] = tags_file
return words2anno
def load_anno_dict(self, mapping_dict):
'''
:param mapping_dict: created by self.load_mapping_dict
:return: {train_doc=":[["subj_start","subj_end","subj_type", "obj_start","obj_end","obj_type", "relation"],
["subj_start","subj_end","subj_type", "obj_start","obj_end","obj_type", "relation"],
...]], for each train, dev, test }
'''
anno_dict = dict()
for words_file,anno_file in mapping_dict.items():
anno_sents = self.preprocess_anno_file(anno_file)
anno_dict[os.path.basename(words_file)] = anno_sents
return anno_dict
def preprocess_anno_file(self, anno_file):
'''processes supplementary .annotations file into sents to provide as labelling info to entity indices, types and relations to doc'''
anno_sents = [s.strip().split() for s in open(anno_file).readlines()]
return anno_sents
| [
"os.path.basename"
] | [((2648, 2676), 'os.path.basename', 'os.path.basename', (['words_file'], {}), '(words_file)\n', (2664, 2676), False, 'import os\n'), ((1895, 1923), 'os.path.basename', 'os.path.basename', (['words_file'], {}), '(words_file)\n', (1911, 1923), False, 'import os\n')] |
import math
import gmpy2
# How many you want to find
MAX_COUNT = 500
K_COUNT = 3.7 # d = 1000 yields ~264
#for parallel C++
K_COST = 4.14 * 1e-11 # d = 5000 takes ~400s
K_FILTER_COST = 1.0 * 1e-9 # d = 5000, sieve = 30M takes 10.3s
def optimal_sieve(d, expected_cost):
non_trivial_a_b = d * 23 # removes 2, 3, 5,
expected_after_sieve = non_trivial_a_b
sieve_cost = 0
best_cost = expected_cost + 1.0
prime_pi = 3
current_prime = gmpy2.mpz(5)
while True:
if current_prime < 1e5:
group_size = 1
current_prime = int(gmpy2.next_prime(current_prime))
else:
# do groups of primes at the same time
group_size = int(current_prime / 10000)
current_prime += group_size * math.log(current_prime)
prime_pi += group_size
filter_rate = (1 - (0.99 / current_prime)) ** group_size
expected_after_sieve *= filter_rate
calc_cost = group_size * d * K_FILTER_COST
sieve_cost += calc_cost
filter_ratio = expected_after_sieve / non_trivial_a_b
new_cost = sieve_cost + filter_ratio * expected_cost
if new_cost > best_cost:
break
best_cost = new_cost
return (sieve_cost,
expected_cost * filter_ratio,
int(current_prime),
prime_pi,
int(expected_after_sieve))
def cost_test_d(d):
log_d = d * math.log(10)
# log_a is trivial compared to log_d
log_num = log_d # + log_a
# In theory log_num ^ 2
# In practice log_num ^ 2.3
d_cost = log_num ** 2.3
d_count = 1 / log_num
# 24 a,b pairs are valid
t_cost = 24 * K_COST * d_cost
t_count = 24 * K_COUNT * d_count
return t_cost, t_count
def maybe_M(n):
if n < 1e7:
return n
if n < 1e9:
return "{:.1f}M".format(n / 1e6)
if n < 1e12:
return "{:.1f}B".format(n / 1e9)
return "{:.1f}T".format(n / 1e12)
def maybe_H(n):
if n < 3 * 3600:
return "{:.1f} seconds".format(n)
if n < 2 * 86400:
return "{:.1f} hours".format(n / 3600.0)
if n < 365 * 86400:
return "{:.1f} days".format(n / 86400.0)
return "{:.1f} years".format(n / 86400.0 / 365.0)
expected_count = 170 # count below a googol
expected_cost = 0
last_print_count = 0
# paired with expected_count = 170 this helps with the initial
# not-quite-so normal zone of the function.
d = 100
while expected_count < MAX_COUNT:
mult = 1 if d < 1000 else int(math.sqrt(d))
t_cost, t_count = cost_test_d(d)
expected_cost += mult * t_cost
expected_count += mult * t_count
if int(expected_count) > int(last_print_count):
sieve_cost, post_sieve_cost, sieve_limit, prime_pi, to_check = \
optimal_sieve(d, expected_cost)
sieve_stats = "optimal sieve: PrimePi({}) ~= {}, leaves {} cost ~~{}".format(
maybe_M(sieve_limit), maybe_M(prime_pi),
to_check,
maybe_H(sieve_cost))
print ("expect {:.0f} around 10^{} ({}) cost: ~~{}".format(
expected_count, d, sieve_stats, maybe_H(post_sieve_cost)))
last_print_count = expected_count
d += mult
| [
"gmpy2.next_prime",
"math.sqrt",
"gmpy2.mpz",
"math.log"
] | [((468, 480), 'gmpy2.mpz', 'gmpy2.mpz', (['(5)'], {}), '(5)\n', (477, 480), False, 'import gmpy2\n'), ((1339, 1351), 'math.log', 'math.log', (['(10)'], {}), '(10)\n', (1347, 1351), False, 'import math\n'), ((2397, 2409), 'math.sqrt', 'math.sqrt', (['d'], {}), '(d)\n', (2406, 2409), False, 'import math\n'), ((570, 601), 'gmpy2.next_prime', 'gmpy2.next_prime', (['current_prime'], {}), '(current_prime)\n', (586, 601), False, 'import gmpy2\n'), ((740, 763), 'math.log', 'math.log', (['current_prime'], {}), '(current_prime)\n', (748, 763), False, 'import math\n')] |
import pandas as pd
import numpy as np
function2idx = {"negative": 0, "ferritin": 1, "gpcr": 2, "p450": 3, "protease": 4}
input_dir = '../data/raw/'
data_dir = '../data/processed/'
max_seq_len = 800
def read_and_concat_data():
df_cysteine = pd.read_csv(input_dir + 'uniprot-cysteine+protease+AND+reviewed_yes.tab', sep='\t', skiprows=(0),
header=(0))
df_cysteine.drop(['Entry name', "Status"], axis=1, inplace=True)
df_cysteine.columns = ['id', 'sequence']
df_cysteine['function'] = function2idx['protease']
df_serine = pd.read_csv(input_dir + 'uniprot-serine+protease+AND+reviewed_yes.tab', sep='\t', skiprows=(0),
header=(0))
df_serine.drop(['Entry name', "Status"], axis=1, inplace=True)
df_serine.columns = ['id', 'sequence']
df_serine['function'] = function2idx['protease']
df_gpcr = pd.read_csv(input_dir + 'uniprot-gpcr+AND+reviewed_yes.tab', sep='\t', skiprows=(0), header=(0))
df_gpcr.drop(['Entry name', "Status"], axis=1, inplace=True)
df_gpcr.columns = ['id', 'sequence']
df_gpcr['function'] = function2idx['gpcr']
df_p450 = pd.read_csv(input_dir + 'uniprot-p450+AND+reviewed_yes.tab', sep='\t', skiprows=(0), header=(0))
df_p450.drop(['Entry name', "Status"], axis=1, inplace=True)
df_p450.columns = ['id', 'sequence']
df_p450['function'] = function2idx['p450']
df_f = pd.read_csv(input_dir + 'uniprot-ferritin-filtered-reviewed_yes.tab', sep='\t', skiprows=(0), header=(0))
df_f.drop(['Entry name', "Status"], axis=1, inplace=True)
df_f.columns = ['id', 'sequence']
df_f['function'] = function2idx['ferritin']
df_positive = pd.concat([df_cysteine, df_serine, df_f, df_gpcr, df_p450], ignore_index=True)
duplicates = list(df_positive[df_positive.duplicated('id')].id)
df_uniprot = pd.read_csv(input_dir + 'uniprot-reviewed_yes.tab', sep='\t', skiprows=(0), header=(0))
df_uniprot = df_uniprot.drop(["Entry name", "Status", "Gene names", "Gene ontology (molecular function)",
"Gene ontology IDs", "Gene ontology (cellular component)",
"Gene ontology (biological process)", "Gene ontology (GO)"], axis=1)
df_uniprot['function'] = function2idx['negative']
df_uniprot.columns = ['id', 'sequence', 'function']
df_uniprot[~df_uniprot.id.isin(duplicates)]
df_all = pd.concat([df_uniprot, df_positive], ignore_index=True)
df_all.sort_values(by='function', inplace=True, ascending=False)
df_all = df_all.drop_duplicates(subset='id').reset_index(drop=True)
print("Finished reading raw data and concating")
return df_all
def clean_sequence_length(dataframe):
# Add 800 amino acids from C-terminus for the longest proteins
reverse_rows = []
for index, row in dataframe[dataframe.sequence.apply(len) > max_seq_len].iterrows():
reverse_rows.append([row.id + '_r', row.sequence[::-1], row.function])
reverse_rows = pd.DataFrame(reverse_rows, columns=['id', 'sequence', 'function'])
dataframe = pd.concat([dataframe, reverse_rows], ignore_index=True)
# Cut all sequences to 800 char
dataframe['sequence'] = dataframe.sequence.apply(lambda x: x[:max_seq_len])
dataframe['length'] = dataframe.sequence.apply(len)
dataframe = dataframe.sort_values(by='length').reset_index(drop=True)
print("Finished cleaning sequences by length")
return dataframe
df = read_and_concat_data()
df = clean_sequence_length(df)
np.savetxt(data_dir + 'sequence.txt', df.sequence.values, fmt='%s')
np.savetxt(data_dir + 'function.txt', df.function.values, fmt='%s')
print("Saved sequence and function to txt") | [
"pandas.DataFrame",
"pandas.concat",
"numpy.savetxt",
"pandas.read_csv"
] | [((3531, 3598), 'numpy.savetxt', 'np.savetxt', (["(data_dir + 'sequence.txt')", 'df.sequence.values'], {'fmt': '"""%s"""'}), "(data_dir + 'sequence.txt', df.sequence.values, fmt='%s')\n", (3541, 3598), True, 'import numpy as np\n'), ((3599, 3666), 'numpy.savetxt', 'np.savetxt', (["(data_dir + 'function.txt')", 'df.function.values'], {'fmt': '"""%s"""'}), "(data_dir + 'function.txt', df.function.values, fmt='%s')\n", (3609, 3666), True, 'import numpy as np\n'), ((249, 358), 'pandas.read_csv', 'pd.read_csv', (["(input_dir + 'uniprot-cysteine+protease+AND+reviewed_yes.tab')"], {'sep': '"""\t"""', 'skiprows': '(0)', 'header': '(0)'}), "(input_dir + 'uniprot-cysteine+protease+AND+reviewed_yes.tab',\n sep='\\t', skiprows=0, header=0)\n", (260, 358), True, 'import pandas as pd\n'), ((575, 683), 'pandas.read_csv', 'pd.read_csv', (["(input_dir + 'uniprot-serine+protease+AND+reviewed_yes.tab')"], {'sep': '"""\t"""', 'skiprows': '(0)', 'header': '(0)'}), "(input_dir + 'uniprot-serine+protease+AND+reviewed_yes.tab', sep\n ='\\t', skiprows=0, header=0)\n", (586, 683), True, 'import pandas as pd\n'), ((889, 985), 'pandas.read_csv', 'pd.read_csv', (["(input_dir + 'uniprot-gpcr+AND+reviewed_yes.tab')"], {'sep': '"""\t"""', 'skiprows': '(0)', 'header': '(0)'}), "(input_dir + 'uniprot-gpcr+AND+reviewed_yes.tab', sep='\\t',\n skiprows=0, header=0)\n", (900, 985), True, 'import pandas as pd\n'), ((1154, 1250), 'pandas.read_csv', 'pd.read_csv', (["(input_dir + 'uniprot-p450+AND+reviewed_yes.tab')"], {'sep': '"""\t"""', 'skiprows': '(0)', 'header': '(0)'}), "(input_dir + 'uniprot-p450+AND+reviewed_yes.tab', sep='\\t',\n skiprows=0, header=0)\n", (1165, 1250), True, 'import pandas as pd\n'), ((1416, 1522), 'pandas.read_csv', 'pd.read_csv', (["(input_dir + 'uniprot-ferritin-filtered-reviewed_yes.tab')"], {'sep': '"""\t"""', 'skiprows': '(0)', 'header': '(0)'}), "(input_dir + 'uniprot-ferritin-filtered-reviewed_yes.tab', sep=\n '\\t', skiprows=0, header=0)\n", (1427, 1522), True, 'import pandas as pd\n'), ((1689, 1767), 'pandas.concat', 'pd.concat', (['[df_cysteine, df_serine, df_f, df_gpcr, df_p450]'], {'ignore_index': '(True)'}), '([df_cysteine, df_serine, df_f, df_gpcr, df_p450], ignore_index=True)\n', (1698, 1767), True, 'import pandas as pd\n'), ((1854, 1941), 'pandas.read_csv', 'pd.read_csv', (["(input_dir + 'uniprot-reviewed_yes.tab')"], {'sep': '"""\t"""', 'skiprows': '(0)', 'header': '(0)'}), "(input_dir + 'uniprot-reviewed_yes.tab', sep='\\t', skiprows=0,\n header=0)\n", (1865, 1941), True, 'import pandas as pd\n'), ((2420, 2475), 'pandas.concat', 'pd.concat', (['[df_uniprot, df_positive]'], {'ignore_index': '(True)'}), '([df_uniprot, df_positive], ignore_index=True)\n', (2429, 2475), True, 'import pandas as pd\n'), ((3007, 3073), 'pandas.DataFrame', 'pd.DataFrame', (['reverse_rows'], {'columns': "['id', 'sequence', 'function']"}), "(reverse_rows, columns=['id', 'sequence', 'function'])\n", (3019, 3073), True, 'import pandas as pd\n'), ((3090, 3145), 'pandas.concat', 'pd.concat', (['[dataframe, reverse_rows]'], {'ignore_index': '(True)'}), '([dataframe, reverse_rows], ignore_index=True)\n', (3099, 3145), True, 'import pandas as pd\n')] |
import typing
import re
from .CD_relations import cardinal_relation, inverse_directions
from .regions import Region, region_union
from .expression_walker import PatternWalker
from .expressions import Constant
REFINE_OVERLAPPING = True
class RegionSolver(PatternWalker[Region]):
type_name = 'Region'
def __new__(cls, *args, **kwargs):
cardinal_operations = {
'inferior_of': 'I', 'superior_of': 'S',
'posterior_of': 'P', 'anterior_of': 'A',
'left_of': 'L', 'right_of': 'R',
'overlapping': 'O'
}
refine_overlapping = kwargs.get(
'refine_overlapping',
REFINE_OVERLAPPING
)
max_tree_depth_level = kwargs.get(
'max_tree_depth_level',
None
)
def build_function(relation, refine_overlapping=False):
def fun(self, x: Region, y: Region) -> bool:
return bool(cardinal_relation(
x, y, relation,
refine_overlapping=refine_overlapping,
stop_at=max_tree_depth_level
))
return fun
def anatomical_direction_function(relation, refine_overlapping=False):
def func(self, x: Region, y: Region) -> bool:
return bool(
cardinal_relation(
x, y, relation,
refine_overlapping=refine_overlapping,
stop_at=max_tree_depth_level
) and not (
cardinal_relation(
x, y, inverse_directions[relation],
refine_overlapping=refine_overlapping,
stop_at=max_tree_depth_level
) or
cardinal_relation(
x, y, cardinal_operations['overlapping'],
refine_overlapping=refine_overlapping,
stop_at=max_tree_depth_level
)
)
)
return func
for key, value in cardinal_operations.items():
setattr(
cls, f'function_{key}',
build_function(value, refine_overlapping=refine_overlapping)
)
anatomical_correct_operations = {
k: cardinal_operations[k] for k in (
'inferior_of', 'superior_of',
'posterior_of', 'anterior_of'
)
}
for key, value in anatomical_correct_operations.items():
setattr(
cls, f'function_anatomical_{key}',
anatomical_direction_function(
value, refine_overlapping=refine_overlapping
)
)
return PatternWalker.__new__(cls)
def function_regexp(
self, regexp: typing.Text
) -> typing.AbstractSet[Region]:
regions = []
for k in self.symbol_table.symbols_by_type(Region):
if re.search(regexp, k.name):
regions.append(k)
return frozenset(regions)
def function_region_union(
self, region_set: typing.AbstractSet[Region]
) -> Region:
new_region_set = []
for region in region_set:
region = self.walk(region)
if not isinstance(region, Constant):
raise ValueError(
"Region union can only be evaluated on resolved regions"
)
new_region_set.append(region.value)
return region_union(new_region_set)
| [
"re.search"
] | [((3071, 3096), 're.search', 're.search', (['regexp', 'k.name'], {}), '(regexp, k.name)\n', (3080, 3096), False, 'import re\n')] |
# -----------------------------------------------------------------------------
#
# P A G E B O T E X A M P L E S
#
# Copyright (c) 2017 <NAME> <https://github.com/thomgb>
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# conditionsHierarchy.py
#
import sys, inspect
from pagebot import getContext
from pagebot.toolbox.units import *
from pagebot.toolbox.color import Color, blackColor, blueColor, greenColor
from pagebot.fonttoolbox.objects.font import findFont
from pagebot.conditions import *
context = getContext()
X0 = 100
Y0 = 100
WIDTH = 1600
HEIGHT = 1400
HBOX = 34
WBOX = 170
GAP = 20
HGAP = 60
P = 15
TEXTSIZE = pt(12)
OFFSET = 9
titleFont = findFont('BungeeInline-Regular')
font = findFont('Roboto-Regular')
boldFont = findFont('BungeeOutline-Regular')
def drawClassHierarchy(obj, colorRange, i):
previous = None
y = Y0
x = X0
for c in list(obj.__mro__)[::-1]:
current = c.__name__
#if current == 'object':
# continue
if i >= len(colorRange):
i = 0
drawClass(current, x, y, colorRange[i])
if previous is not None:
drawConnection(current, previous)
previous = current
y += HGAP
i += 1
return i
def drawConnection(current, previous):
if sorted([current, previous]) in connections:
return
pos0 = drawnclasses[current]
p0x, p0y = pos0
pos1 = drawnclasses[previous]
p1x, p1y = pos1
context.stroke(blueColor)
# Determines box entry / exit points.
if p0y > p1y:
#print('%s > %s' % (current, previous))
p0x += WBOX / 2
p1x += WBOX / 2
p1y += HBOX
elif p0y < p1y:
# Never happens?
p0x + WBOX / 2
p1x + WBOX / 2
p0y += HBOX
elif p0y == p1y:
p0y += HBOX / 2
p1y += HBOX / 2
if p1x > p0x:
p0x += WBOX
elif p1x < p0x:
p1x += WBOX
# TODO: draw only once for any location.
context.circle(p0x, p0y, 3)
context.circle(p1x, p1y, 3)
# Curve.
path = context.newPath()
context.moveTo((p0x, p0y))
cp0x = p0x - OFFSET
cp0y = p0y - (p0y - p1y) / 3
context.stroke(greenColor)
#context.fill(None)
#context.circle(cp0x, cp0y, 3)
cp1x = p1x + OFFSET
cp1y = p1y + (p0y - p1y) / 3
context.stroke(greenColor)
#context.fill(None)
#context.circle(cp1x, cp1y, 3)
context.fill(None)
context.stroke((1, 0, 1, 0.5))
context.curveTo((cp0x, cp0y), (cp1x, cp1y), (p1x, p1y))
drawPath(path)
#cp1x = p1x
#path.moveTo((p0x, p0y))
connections.append(sorted([current, previous]))
#print(connections)
def drawClass(name, x, y, color):
if name in drawnclasses:
return
pos = (x, y)
while pos in positions:
px, py = pos
newx = px + GAP + WBOX
if newx >= WIDTH - WBOX - GAP:
newx = X0
py += HGAP# / 2
pos = (newx, py)
context.fill(blackColor)
context.fontSize(TEXTSIZE)
boxx, boxy = pos
textx = boxx + P
texty = boxy + P
context.stroke(None)
#color = Color(0.6, 1, 0.6)
context.fill(color)
context.roundedRect(boxx, boxy, WBOX, HBOX)
context.fill(blackColor)
style = dict(font=font.path, fontSize=TEXTSIZE, textFill=0.1)
bs = context.newString(name, style=style)
context.text(bs, (textx, texty))
drawnclasses[name] = pos
positions.append(pos)
def getColorRange(l):
colorRange = []
for i in range(l):
v = i * 1.0 / l
c = Color(0.7, 0.7, v)
colorRange.append(c)
return colorRange
def drawClasses(inspected):
classes = []
for _, obj in inspected:
if inspect.isclass(obj):
classes.append(obj)
l = len(classes)
colorRange = getColorRange(l)
i = 0
for o in classes:
i = drawClassHierarchy(o, colorRange, i)
context.newPage(pt(WIDTH), pt(HEIGHT))
connections = []
drawnclasses = {}
positions = []
classes = []
classes.extend(inspect.getmembers(sys.modules['pagebot.conditions']))
drawClasses(classes)
context.fill(0)
context.stroke(None)
context.fontSize(42)
msg = 'PageBot Alignment Conditions '
msg1 = 'Object Hierarchy'
style = dict(font=titleFont.path, fontSize=36, textFill=0.5)
boldStyle = dict(font=boldFont.path, fontSize=36, textFill=0)
bs = context.newString(msg, style=style)
bs += context.newString(msg1, style=boldStyle)
context.text(bs, (100, HEIGHT - 100))
context.saveDrawing('_export/conditionObjectHierarchy.png')
context.saveDrawing('_export/conditionObjectHierarchy.pdf')
| [
"inspect.getmembers",
"pagebot.getContext",
"pagebot.toolbox.color.Color",
"pagebot.fonttoolbox.objects.font.findFont",
"inspect.isclass"
] | [((695, 707), 'pagebot.getContext', 'getContext', ([], {}), '()\n', (705, 707), False, 'from pagebot import getContext\n'), ((843, 875), 'pagebot.fonttoolbox.objects.font.findFont', 'findFont', (['"""BungeeInline-Regular"""'], {}), "('BungeeInline-Regular')\n", (851, 875), False, 'from pagebot.fonttoolbox.objects.font import findFont\n'), ((883, 909), 'pagebot.fonttoolbox.objects.font.findFont', 'findFont', (['"""Roboto-Regular"""'], {}), "('Roboto-Regular')\n", (891, 909), False, 'from pagebot.fonttoolbox.objects.font import findFont\n'), ((921, 954), 'pagebot.fonttoolbox.objects.font.findFont', 'findFont', (['"""BungeeOutline-Regular"""'], {}), "('BungeeOutline-Regular')\n", (929, 954), False, 'from pagebot.fonttoolbox.objects.font import findFont\n'), ((4294, 4347), 'inspect.getmembers', 'inspect.getmembers', (["sys.modules['pagebot.conditions']"], {}), "(sys.modules['pagebot.conditions'])\n", (4312, 4347), False, 'import sys, inspect\n'), ((3810, 3828), 'pagebot.toolbox.color.Color', 'Color', (['(0.7)', '(0.7)', 'v'], {}), '(0.7, 0.7, v)\n', (3815, 3828), False, 'from pagebot.toolbox.color import Color, blackColor, blueColor, greenColor\n'), ((3971, 3991), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (3986, 3991), False, 'import sys, inspect\n')] |
import os
import enum
import hashlib
from urllib.parse import urljoin
from flask import url_for, current_app as app
from mcarch.app import db, get_b2bucket
class StoredFile(db.Model):
"""Represents a file stored in some sort of storage medium."""
__tablename__ = 'stored_file'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
sha256 = db.Column(db.String(130), nullable=False)
upload_by_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=True)
upload_by = db.relationship('User')
# Path to this file within the B2 bucket. Null if file is not on B2.
b2_path = db.Column(db.String(300), nullable=True)
def b2_download_url(self):
"""Gets the URL to download this file from the archive's B2 bucket."""
if self.b2_path:
return urljoin(app.config['B2_PUBLIC_URL'], self.b2_path)
def gen_b2_path(filename, sha):
"""Generates the path where a file should be stored in B2 based on name and hash."""
return os.path.join(sha, filename)
def sha256_file(path):
BUF_SZ = 65536
h = hashlib.sha256()
with open(path, 'rb') as f:
buf = f.read(BUF_SZ)
while buf:
h.update(buf)
buf = f.read(BUF_SZ)
return h.hexdigest()
def upload_b2_file(path, name, user=None):
"""Uploads a local file to B2, adds it to the DB, and returns the StoredFile.
This adds the StoredFile to the database and does a commit.
@param path: path to the file on disk
@param name: name of the file as it should be in B2
@param user: user to associate the stored file with. Can be None
"""
bucket = get_b2bucket()
fhash = sha256_file(path)
b2path = gen_b2_path(name, fhash)
bucket.upload_local_file(path, b2path)
stored = StoredFile(name=name, sha256=fhash, b2_path=b2path, upload_by=user)
db.session.add(stored)
db.session.commit()
return stored
| [
"hashlib.sha256",
"mcarch.app.db.relationship",
"mcarch.app.db.String",
"os.path.join",
"mcarch.app.db.session.add",
"urllib.parse.urljoin",
"mcarch.app.get_b2bucket",
"mcarch.app.db.session.commit",
"mcarch.app.db.ForeignKey",
"mcarch.app.db.Column"
] | [((297, 336), 'mcarch.app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (306, 336), False, 'from mcarch.app import db, get_b2bucket\n'), ((543, 566), 'mcarch.app.db.relationship', 'db.relationship', (['"""User"""'], {}), "('User')\n", (558, 566), False, 'from mcarch.app import db, get_b2bucket\n'), ((1035, 1062), 'os.path.join', 'os.path.join', (['sha', 'filename'], {}), '(sha, filename)\n', (1047, 1062), False, 'import os\n'), ((1114, 1130), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (1128, 1130), False, 'import hashlib\n'), ((1676, 1690), 'mcarch.app.get_b2bucket', 'get_b2bucket', ([], {}), '()\n', (1688, 1690), False, 'from mcarch.app import db, get_b2bucket\n'), ((1889, 1911), 'mcarch.app.db.session.add', 'db.session.add', (['stored'], {}), '(stored)\n', (1903, 1911), False, 'from mcarch.app import db, get_b2bucket\n'), ((1916, 1935), 'mcarch.app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1933, 1935), False, 'from mcarch.app import db, get_b2bucket\n'), ((358, 371), 'mcarch.app.db.String', 'db.String', (['(80)'], {}), '(80)\n', (367, 371), False, 'from mcarch.app import db, get_b2bucket\n'), ((412, 426), 'mcarch.app.db.String', 'db.String', (['(130)'], {}), '(130)\n', (421, 426), False, 'from mcarch.app import db, get_b2bucket\n'), ((486, 510), 'mcarch.app.db.ForeignKey', 'db.ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (499, 510), False, 'from mcarch.app import db, get_b2bucket\n'), ((665, 679), 'mcarch.app.db.String', 'db.String', (['(300)'], {}), '(300)\n', (674, 679), False, 'from mcarch.app import db, get_b2bucket\n'), ((851, 901), 'urllib.parse.urljoin', 'urljoin', (["app.config['B2_PUBLIC_URL']", 'self.b2_path'], {}), "(app.config['B2_PUBLIC_URL'], self.b2_path)\n", (858, 901), False, 'from urllib.parse import urljoin\n')] |
# Generated by Django 2.2 on 2020-03-08 16:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0005_auto_20200308_1735'),
]
operations = [
migrations.AlterField(
model_name='clue',
name='paragraph1',
field=models.TextField(blank=True, null=True, verbose_name='paragraph1'),
),
migrations.AlterField(
model_name='clue',
name='paragraph2',
field=models.TextField(blank=True, null=True, verbose_name='paragraph2'),
),
migrations.AlterField(
model_name='clue',
name='paragraph3',
field=models.TextField(blank=True, null=True, verbose_name='paragraph3'),
),
migrations.AlterField(
model_name='clue',
name='paragraph4',
field=models.TextField(blank=True, null=True, verbose_name='paragraph4'),
),
migrations.AlterField(
model_name='clue',
name='paragraph5',
field=models.TextField(blank=True, null=True, verbose_name='paragraph5'),
),
]
| [
"django.db.models.TextField"
] | [((336, 402), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""paragraph1"""'}), "(blank=True, null=True, verbose_name='paragraph1')\n", (352, 402), False, 'from django.db import migrations, models\n'), ((526, 592), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""paragraph2"""'}), "(blank=True, null=True, verbose_name='paragraph2')\n", (542, 592), False, 'from django.db import migrations, models\n'), ((716, 782), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""paragraph3"""'}), "(blank=True, null=True, verbose_name='paragraph3')\n", (732, 782), False, 'from django.db import migrations, models\n'), ((906, 972), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""paragraph4"""'}), "(blank=True, null=True, verbose_name='paragraph4')\n", (922, 972), False, 'from django.db import migrations, models\n'), ((1096, 1162), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""paragraph5"""'}), "(blank=True, null=True, verbose_name='paragraph5')\n", (1112, 1162), False, 'from django.db import migrations, models\n')] |
import subprocess
import scipy.io.wavfile as wav
import sys
import numpy as np
# import pyaudio
import time
import wave
import os
from pydub import AudioSegment
import pafy
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_dl import YoutubeDL
dirname = os.path.dirname(os.path.abspath(__file__))
sys.path.append(dirname)
def get_youtube_cc(url):
try:
video_ids = [url.split('?v=')[1]]
id = video_ids[0]
captions = str()
cc = (YouTubeTranscriptApi.get_transcripts(video_ids, languages=['de', 'en']))
for line in (cc[0][id]):
captions+=(' '+line['text'])
return (captions,True)
except Exception as e:
return ("Can't fetch from youtube captions",False)
def get_youtube_audio(url):
try:
dirname = os.path.dirname(os.path.dirname(__file__))
video_ids = [url.split('?v=')[1]]
id = video_ids[0]
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'outtmpl': dirname+'/temp/%(id)s.%(etx)s',
'quiet': False
}
ydl = YoutubeDL(ydl_opts)
ydl.download(video_ids)
return (id,True)
except Exception as e:
return (e,False)
| [
"youtube_transcript_api.YouTubeTranscriptApi.get_transcripts",
"youtube_dl.YoutubeDL",
"os.path.dirname",
"os.path.abspath",
"sys.path.append"
] | [((319, 343), 'sys.path.append', 'sys.path.append', (['dirname'], {}), '(dirname)\n', (334, 343), False, 'import sys\n'), ((292, 317), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (307, 317), False, 'import os\n'), ((486, 557), 'youtube_transcript_api.YouTubeTranscriptApi.get_transcripts', 'YouTubeTranscriptApi.get_transcripts', (['video_ids'], {'languages': "['de', 'en']"}), "(video_ids, languages=['de', 'en'])\n", (522, 557), False, 'from youtube_transcript_api import YouTubeTranscriptApi\n'), ((1229, 1248), 'youtube_dl.YoutubeDL', 'YoutubeDL', (['ydl_opts'], {}), '(ydl_opts)\n', (1238, 1248), False, 'from youtube_dl import YoutubeDL\n'), ((821, 846), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (836, 846), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('cms_pages', '0005_auto_20150829_1516'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='body_en',
field=wagtail.wagtailcore.fields.RichTextField(default='', verbose_name='[EN] \u0422\u0435\u043a\u0441\u0442 \u043d\u0430 \u0431\u043b\u0430\u043a\u0438\u0442\u043d\u0456\u0439 \u043f\u0430\u043d\u0435\u043b\u0456'),
),
migrations.AddField(
model_name='homepage',
name='title_en',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.wagtailcore.fields.RichTextField(default='', verbose_name='[UA] \u0422\u0435\u043a\u0441\u0442 \u043d\u0430 \u0431\u043b\u0430\u043a\u0438\u0442\u043d\u0456\u0439 \u043f\u0430\u043d\u0435\u043b\u0456'),
),
]
| [
"django.db.models.CharField"
] | [((722, 766), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(255)'}), "(default='', max_length=255)\n", (738, 766), False, 'from django.db import migrations, models\n')] |
from autorop import PwnState, arutil
from pwn import ROP
def puts(state: PwnState) -> PwnState:
"""Leak libc addresses using ``puts``.
This function leaks the libc addresses of ``__libc_start_main`` and ``puts``
using ``puts``, placing them in ``state.leaks``.
Arguments:
state: The current ``PwnState`` with the following set
- ``target``: What we want to exploit.
- ``_elf``: pwntools ``ELF`` of ``state.binary_name``.
- ``overwriter``: Function which writes rop chain to the "right place".
- ``vuln_function``: Name of vulnerable function in binary,
which we can return to repeatedly.
Returns:
Mutated ``PwnState``, with the following updated
- ``target``: The instance of target from which we got a successful leak.
Hopefully it can still be interacted with.
- ``leaks``: Updated with ``"symbol": address`` pairs for each
function address of libc that was leaked.
"""
LEAK_FUNCS = ["__libc_start_main", "puts"]
def leaker(rop: ROP, address: int) -> ROP:
arutil.align_call(rop, "puts", [address])
return rop
return arutil.leak_helper(state, leaker, LEAK_FUNCS)
| [
"autorop.arutil.align_call",
"autorop.arutil.leak_helper"
] | [((1209, 1254), 'autorop.arutil.leak_helper', 'arutil.leak_helper', (['state', 'leaker', 'LEAK_FUNCS'], {}), '(state, leaker, LEAK_FUNCS)\n', (1227, 1254), False, 'from autorop import PwnState, arutil\n'), ((1136, 1177), 'autorop.arutil.align_call', 'arutil.align_call', (['rop', '"""puts"""', '[address]'], {}), "(rop, 'puts', [address])\n", (1153, 1177), False, 'from autorop import PwnState, arutil\n')] |
# Copyright 2013-2014 Massachusetts Open Cloud Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Unit tests for VLAN helper functions"""
from functools import wraps
from haas import model, api
from haas.test_common import *
import pytest
from haas.config import cfg
from haas.drivers.simple_vlan import *
def vlan_test(vlan_list):
"""A decorator for tests of the simple_vlan driver. Pass in a string for
the vlan_list configuration option, which determines which vlans can be
used for networking.
"""
def dec(f):
def config_initialize():
# Use the 'dell' backend for these tests
cfg.add_section('general')
cfg.set('general', 'driver', 'simple_vlan')
cfg.add_section('vlan')
cfg.set('vlan', 'vlans', vlan_list)
cfg.add_section('driver simple_vlan')
cfg.set('driver simple_vlan', 'switch', '{"switch":"null"}')
cfg.add_section('devel')
cfg.set('devel', 'dry_run', 'True')
@wraps(f)
@clear_configuration
def wrapped(self):
config_initialize()
db = newDB()
f(self, db)
releaseDB(db)
return wrapped
return dec
class TestSimpleVLAN:
"""Tests basic operation of Simple VLAN driver"""
@vlan_test('84')
def test_simple_vlan_network_operations(self, db):
api.project_create('anvil-nextgen')
network_create_simple('hammernet', 'anvil-nextgen')
for k in range(97,100):
nodename = 'node-' + str(k)
api.node_register(nodename, 'ipmihost', 'root', 'tapeworm')
api.node_register_nic(nodename, 'eth0', 'DE:AD:BE:EF:20:14')
api.project_connect_node('anvil-nextgen', nodename)
api.port_register(nodename)
api.port_connect_nic(nodename, nodename, 'eth0')
api.project_detach_node('anvil-nextgen', 'node-97')
api.node_connect_network('node-98', 'eth0', 'hammernet')
| [
"haas.api.node_register_nic",
"haas.api.node_register",
"haas.api.node_connect_network",
"haas.api.project_detach_node",
"haas.config.cfg.add_section",
"functools.wraps",
"haas.config.cfg.set",
"haas.api.port_register",
"haas.api.project_create",
"haas.api.project_connect_node",
"haas.api.port_c... | [((1538, 1546), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (1543, 1546), False, 'from functools import wraps\n'), ((1912, 1947), 'haas.api.project_create', 'api.project_create', (['"""anvil-nextgen"""'], {}), "('anvil-nextgen')\n", (1930, 1947), False, 'from haas import model, api\n'), ((2398, 2449), 'haas.api.project_detach_node', 'api.project_detach_node', (['"""anvil-nextgen"""', '"""node-97"""'], {}), "('anvil-nextgen', 'node-97')\n", (2421, 2449), False, 'from haas import model, api\n'), ((2458, 2514), 'haas.api.node_connect_network', 'api.node_connect_network', (['"""node-98"""', '"""eth0"""', '"""hammernet"""'], {}), "('node-98', 'eth0', 'hammernet')\n", (2482, 2514), False, 'from haas import model, api\n'), ((1153, 1179), 'haas.config.cfg.add_section', 'cfg.add_section', (['"""general"""'], {}), "('general')\n", (1168, 1179), False, 'from haas.config import cfg\n'), ((1192, 1235), 'haas.config.cfg.set', 'cfg.set', (['"""general"""', '"""driver"""', '"""simple_vlan"""'], {}), "('general', 'driver', 'simple_vlan')\n", (1199, 1235), False, 'from haas.config import cfg\n'), ((1248, 1271), 'haas.config.cfg.add_section', 'cfg.add_section', (['"""vlan"""'], {}), "('vlan')\n", (1263, 1271), False, 'from haas.config import cfg\n'), ((1284, 1319), 'haas.config.cfg.set', 'cfg.set', (['"""vlan"""', '"""vlans"""', 'vlan_list'], {}), "('vlan', 'vlans', vlan_list)\n", (1291, 1319), False, 'from haas.config import cfg\n'), ((1332, 1369), 'haas.config.cfg.add_section', 'cfg.add_section', (['"""driver simple_vlan"""'], {}), "('driver simple_vlan')\n", (1347, 1369), False, 'from haas.config import cfg\n'), ((1382, 1442), 'haas.config.cfg.set', 'cfg.set', (['"""driver simple_vlan"""', '"""switch"""', '"""{"switch":"null"}"""'], {}), '(\'driver simple_vlan\', \'switch\', \'{"switch":"null"}\')\n', (1389, 1442), False, 'from haas.config import cfg\n'), ((1455, 1479), 'haas.config.cfg.add_section', 'cfg.add_section', (['"""devel"""'], {}), "('devel')\n", (1470, 1479), False, 'from haas.config import cfg\n'), ((1492, 1527), 'haas.config.cfg.set', 'cfg.set', (['"""devel"""', '"""dry_run"""', '"""True"""'], {}), "('devel', 'dry_run', 'True')\n", (1499, 1527), False, 'from haas.config import cfg\n'), ((2092, 2151), 'haas.api.node_register', 'api.node_register', (['nodename', '"""ipmihost"""', '"""root"""', '"""tapeworm"""'], {}), "(nodename, 'ipmihost', 'root', 'tapeworm')\n", (2109, 2151), False, 'from haas import model, api\n'), ((2164, 2224), 'haas.api.node_register_nic', 'api.node_register_nic', (['nodename', '"""eth0"""', '"""DE:AD:BE:EF:20:14"""'], {}), "(nodename, 'eth0', 'DE:AD:BE:EF:20:14')\n", (2185, 2224), False, 'from haas import model, api\n'), ((2237, 2288), 'haas.api.project_connect_node', 'api.project_connect_node', (['"""anvil-nextgen"""', 'nodename'], {}), "('anvil-nextgen', nodename)\n", (2261, 2288), False, 'from haas import model, api\n'), ((2301, 2328), 'haas.api.port_register', 'api.port_register', (['nodename'], {}), '(nodename)\n', (2318, 2328), False, 'from haas import model, api\n'), ((2341, 2389), 'haas.api.port_connect_nic', 'api.port_connect_nic', (['nodename', 'nodename', '"""eth0"""'], {}), "(nodename, nodename, 'eth0')\n", (2361, 2389), False, 'from haas import model, api\n')] |
import numpy as np
import matplotlib.pyplot as plt
from extract import HurricaneExtraction
#npy_file = './Data/NpyData/LIDIA/20172450002.npz'
npy_file = './Data/NpyData/IRMA/20172531622.npz'
data = HurricaneExtraction.read_extraction_data(npy_file)
data = HurricaneExtraction.normalize_using_physics(data)
for d in data:
fig = plt.figure()
im = plt.imshow(d, cmap='Greys_r')
plt.show()
| [
"matplotlib.pyplot.imshow",
"extract.HurricaneExtraction.normalize_using_physics",
"matplotlib.pyplot.figure",
"extract.HurricaneExtraction.read_extraction_data",
"matplotlib.pyplot.show"
] | [((201, 251), 'extract.HurricaneExtraction.read_extraction_data', 'HurricaneExtraction.read_extraction_data', (['npy_file'], {}), '(npy_file)\n', (241, 251), False, 'from extract import HurricaneExtraction\n'), ((260, 309), 'extract.HurricaneExtraction.normalize_using_physics', 'HurricaneExtraction.normalize_using_physics', (['data'], {}), '(data)\n', (303, 309), False, 'from extract import HurricaneExtraction\n'), ((336, 348), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (346, 348), True, 'import matplotlib.pyplot as plt\n'), ((358, 387), 'matplotlib.pyplot.imshow', 'plt.imshow', (['d'], {'cmap': '"""Greys_r"""'}), "(d, cmap='Greys_r')\n", (368, 387), True, 'import matplotlib.pyplot as plt\n'), ((392, 402), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (400, 402), True, 'import matplotlib.pyplot as plt\n')] |
from urllib.request import urlopen
import json
import re
url = urlopen("https://raw.githubusercontent.com/Templarian/MaterialDesign/master/meta.json")
meta = [(i['name'], i['codepoint']) for i in json.loads(url.read()) if re.search('^weather-', i['name'])]
print('''---
esphome:
# ...
includes:
- weather_icon_map.h
# ...
font:
- file: fonts/materialdesignicons-webfont.ttf
id: ...
size: ...
glyphs:''')
for name, codepoint in meta:
print(' - "\\U000%s" # %s' % (codepoint, name))
with open('weather_icon_map.h', 'w') as h:
h.write('#include <map>\nstd::map<std::string, std::string> weather_icon_map\n')
h.write(' {\n')
for name, codepoint in meta:
h.write(' {"%s", "\\U000%s"},\n' % (name.replace('weather-', ''), codepoint))
h.write(' };\n')
print('---')
for name, codepoint in meta:
print(' "%s",' % (name.replace('weather-', '')))
| [
"urllib.request.urlopen",
"re.search"
] | [((64, 161), 'urllib.request.urlopen', 'urlopen', (['"""https://raw.githubusercontent.com/Templarian/MaterialDesign/master/meta.json"""'], {}), "(\n 'https://raw.githubusercontent.com/Templarian/MaterialDesign/master/meta.json'\n )\n", (71, 161), False, 'from urllib.request import urlopen\n'), ((223, 256), 're.search', 're.search', (['"""^weather-"""', "i['name']"], {}), "('^weather-', i['name'])\n", (232, 256), False, 'import re\n')] |
# common functions
import sys
import json
# taken from sp_lib
def read_json_file(file_path):
try:
with open(file_path, 'r') as json_file:
readstr = json_file.read()
json_dict = json.loads(readstr)
return json_dict
except OSError as e:
print('Unable to read url json file', e)
raise
def write_json_file(src_dict, target_file, sort_keys=False):
try:
with open(target_file, 'w', encoding='utf8') as json_file:
json.dump(src_dict, json_file, ensure_ascii=False, indent=2, sort_keys=sort_keys)
json_file.write("\n") # Add newline cause Py JSON does not
except OSError as e:
raise
def write_list(data, file):
with open(file, 'w') as f:
for d in data:
f.write(d + '\n')
def read_file(file_path, mode='rt'):
try:
with open(file_path, mode) as f:
return f.read()
except OSError as e:
print('Unable to read file', e)
raise | [
"json.loads",
"json.dump"
] | [((214, 233), 'json.loads', 'json.loads', (['readstr'], {}), '(readstr)\n', (224, 233), False, 'import json\n'), ((497, 583), 'json.dump', 'json.dump', (['src_dict', 'json_file'], {'ensure_ascii': '(False)', 'indent': '(2)', 'sort_keys': 'sort_keys'}), '(src_dict, json_file, ensure_ascii=False, indent=2, sort_keys=\n sort_keys)\n', (506, 583), False, 'import json\n')] |
# -*- encoding: utf-8 -*-
import dsl
from shapely.wkt import loads as wkt_loads
from . import FixtureTest
class SuppressHistoricalClosed(FixtureTest):
def test_cartoon_museum(self):
# Cartoon Art Museum (closed)
self.generate_fixtures(dsl.way(368173967, wkt_loads('POINT (-122.400856246311 37.78696485494709)'), {u'name': u'Cartoon Art Museum (closed)', u'gnis:reviewed': u'no', u'addr:state': u'CA', u'ele': u'7',
u'source': u'openstreetmap.org', u'wikidata': u'Q1045990', u'gnis:import_uuid': u'57871b70-0100-4405-bb30-88b2e001a944', u'gnis:feature_id': u'1657282', u'tourism': u'museum', u'gnis:county_name': u'San Francisco'}))
# POI shouldn't be visible early
self.assert_no_matching_feature(
15, 5242, 12664, 'pois',
{'id': 368173967})
# but POI should be present at z17 and marked as closed
self.assert_has_feature(
16, 10485, 25328, 'pois',
{'id': 368173967, 'kind': 'closed', 'min_zoom': 17})
| [
"shapely.wkt.loads"
] | [((278, 334), 'shapely.wkt.loads', 'wkt_loads', (['"""POINT (-122.400856246311 37.78696485494709)"""'], {}), "('POINT (-122.400856246311 37.78696485494709)')\n", (287, 334), True, 'from shapely.wkt import loads as wkt_loads\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 13 11:03:51 2019
@author: ivanpauno
"""
import matplotlib.pyplot as plt
import numpy as np
def main():
# A = sqrt(10^(.1*alpha_min-1)/10^(.1*alpha_max-1))
A = np.logspace(np.log10(2), np.log10(100), num=200)
ws_array = [1.1, 1.5, 2, 3]
n_butter = [np.log(A)/np.log(ws) for ws in ws_array]
n_cheby = [np.arccosh(A)/np.arccosh(ws) for ws in ws_array]
# Para verlo redondeado, descomentar las dos lineas siguientes.
n_butter = np.ceil(n_butter)
n_cheby = np.ceil(n_cheby)
for i in range(len(n_butter)):
fig, ax = plt.subplots()
ax.ticklabel_format(useOffset=False)
ax.set_xlabel('A')
ax.set_ylabel('n')
ax.grid(True)
ax.plot(A, n_butter[i], 'k')
ax.plot(A, n_cheby[i], 'r')
title = 'Order comparison ws={}'.format(ws_array[i])
fig.suptitle(title)
fig.canvas.set_window_title(title)
plt.show()
if __name__ == '__main__':
main()
| [
"numpy.ceil",
"numpy.log10",
"numpy.log",
"numpy.arccosh",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((526, 543), 'numpy.ceil', 'np.ceil', (['n_butter'], {}), '(n_butter)\n', (533, 543), True, 'import numpy as np\n'), ((558, 574), 'numpy.ceil', 'np.ceil', (['n_cheby'], {}), '(n_cheby)\n', (565, 574), True, 'import numpy as np\n'), ((973, 983), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (981, 983), True, 'import matplotlib.pyplot as plt\n'), ((253, 264), 'numpy.log10', 'np.log10', (['(2)'], {}), '(2)\n', (261, 264), True, 'import numpy as np\n'), ((266, 279), 'numpy.log10', 'np.log10', (['(100)'], {}), '(100)\n', (274, 279), True, 'import numpy as np\n'), ((628, 642), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (640, 642), True, 'import matplotlib.pyplot as plt\n'), ((338, 347), 'numpy.log', 'np.log', (['A'], {}), '(A)\n', (344, 347), True, 'import numpy as np\n'), ((348, 358), 'numpy.log', 'np.log', (['ws'], {}), '(ws)\n', (354, 358), True, 'import numpy as np\n'), ((394, 407), 'numpy.arccosh', 'np.arccosh', (['A'], {}), '(A)\n', (404, 407), True, 'import numpy as np\n'), ((408, 422), 'numpy.arccosh', 'np.arccosh', (['ws'], {}), '(ws)\n', (418, 422), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
_____________________________________________________________________________
Created By : <NAME> - Bacnv6
Created Date: Mon November 03 10:00:00 VNT 2020
Project : AkaOCR core
_____________________________________________________________________________
This file contain runtime utilities
_____________________________________________________________________________
"""
import sys
import signal
import torch, time, gc
from contextlib import contextmanager
class Color: # pylint: disable=W0232
GRAY = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
CRIMSON = 38
def colorize(num, string, bold=False, highlight=False):
assert isinstance(num, int)
attr = []
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
def colorprint(colorcode, text, o=sys.stdout, bold=False):
o.write(colorize(colorcode, text, bold=bold))
def warn(msg):
print(colorize(Color.YELLOW, msg))
def error(msg):
print(colorize(Color.RED, msg))
# http://stackoverflow.com/questions/366682/how-to-limit-execution-time-of-a-function-call-in-python
class TimeoutException(Exception): pass
@contextmanager
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutException(colorize(Color.RED, " *** Timed out!", highlight=True))
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
# Timing utilities
start_time = None
def start_timer():
global start_time
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.synchronize()
start_time = time.time()
def end_timer_and_print(local_msg):
torch.cuda.synchronize()
end_time = time.time()
print("\n" + local_msg)
print("Total execution time = {:.3f} sec".format(end_time - start_time))
print("Max memory used by tensors = {} bytes".format(torch.cuda.max_memory_allocated())) | [
"signal.signal",
"torch.cuda.max_memory_allocated",
"torch.cuda.reset_max_memory_allocated",
"torch.cuda.synchronize",
"gc.collect",
"signal.alarm",
"time.time",
"torch.cuda.empty_cache"
] | [((1536, 1581), 'signal.signal', 'signal.signal', (['signal.SIGALRM', 'signal_handler'], {}), '(signal.SIGALRM, signal_handler)\n', (1549, 1581), False, 'import signal\n'), ((1587, 1608), 'signal.alarm', 'signal.alarm', (['seconds'], {}), '(seconds)\n', (1599, 1608), False, 'import signal\n'), ((1766, 1778), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1776, 1778), False, 'import torch, time, gc\n'), ((1784, 1808), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1806, 1808), False, 'import torch, time, gc\n'), ((1814, 1853), 'torch.cuda.reset_max_memory_allocated', 'torch.cuda.reset_max_memory_allocated', ([], {}), '()\n', (1851, 1853), False, 'import torch, time, gc\n'), ((1859, 1883), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1881, 1883), False, 'import torch, time, gc\n'), ((1902, 1913), 'time.time', 'time.time', ([], {}), '()\n', (1911, 1913), False, 'import torch, time, gc\n'), ((1958, 1982), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1980, 1982), False, 'import torch, time, gc\n'), ((1999, 2010), 'time.time', 'time.time', ([], {}), '()\n', (2008, 2010), False, 'import torch, time, gc\n'), ((1657, 1672), 'signal.alarm', 'signal.alarm', (['(0)'], {}), '(0)\n', (1669, 1672), False, 'import signal\n'), ((2176, 2209), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (2207, 2209), False, 'import torch, time, gc\n')] |
# -*- coding: utf-8 -*-
'''
@time: 2019/9/8 18:45
@ author: javis
'''
import os
class Config:
# for data_process.py
#root = r'D:\ECG'
root = r'data'
train_dir = os.path.join(root, 'ecg_data/')
# test_dir = os.path.join(root, 'ecg_data/testA')
# train_label = os.path.join(root, 'hf_round1_label.txt')
# test_label = os.path.join(root, 'hf_round1_subA.txt')
# arrythmia = os.path.join(root, 'hf_round1_arrythmia.txt')
train_data = os.path.join(root, 'ecg_data')
# for train
#训练的模型名称
model_name = 'resnet50'
#在第几个epoch进行到下一个state,调整lr
stage_epoch = [32, 64,128]
#训练时的batch大小
batch_size = 128
#label的类别数
num_classes = 18
#最大训练多少个epoch
max_epoch = 128
#目标的采样长度
target_point_num = 2048 * 5
#保存模型的文件夹
ckpt = 'ckpt/'
#保存提交文件的文件夹
sub_dir = 'submit'
#初始的学习率
lr = 1e-3
#保存模型当前epoch的权重
kfold = ""
current_w = 'current_w.pth'
#保存最佳的权重 你还愿意
best_w = 'best_w.pth'
# 学习率衰减 lr/=lr_decay
lr_decay = 10
#for test
temp_dir=os.path.join(root,'temp')
# SiT
patch_size = 8
dim = 256
mlp_dim = 512
dropout = 0.3
head_num = 8
depth = 12
heads = 8
config = Config()
| [
"os.path.join"
] | [((180, 211), 'os.path.join', 'os.path.join', (['root', '"""ecg_data/"""'], {}), "(root, 'ecg_data/')\n", (192, 211), False, 'import os\n'), ((469, 499), 'os.path.join', 'os.path.join', (['root', '"""ecg_data"""'], {}), "(root, 'ecg_data')\n", (481, 499), False, 'import os\n'), ((1057, 1083), 'os.path.join', 'os.path.join', (['root', '"""temp"""'], {}), "(root, 'temp')\n", (1069, 1083), False, 'import os\n')] |
import cv2
import matplotlib.pyplot as plt
import easyocr
reader = easyocr.Reader(['en'], gpu=False)
image = cv2.imread('results/JK_21_05/page_1.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
dilated = cv2.dilate(image, None, iterations=1)
eroded = cv2.erode(image, None, iterations=1)
res = reader.readtext(eroded)
cv2.imshow('s', eroded)
cv2.waitKey(0)
cv2.destroyAllWindows()
# for response in res:
# print(res)
for (bbox, text, prob) in res:
# unpack the bounding box
(tl, tr, br, bl) = bbox
tl = (int(tl[0]), int(tl[1]))
tr = (int(tr[0]), int(tr[1]))
br = (int(br[0]), int(br[1]))
bl = (int(bl[0]), int(bl[1]))
cv2.rectangle(eroded, tl, br, (0, 255, 0), 2)
cv2.putText(eroded, text, (tl[0], tl[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
cv2.imshow("Image", eroded)
cv2.waitKey(0) | [
"cv2.rectangle",
"cv2.erode",
"cv2.imshow",
"easyocr.Reader",
"cv2.putText",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.dilate",
"cv2.imread"
] | [((67, 100), 'easyocr.Reader', 'easyocr.Reader', (["['en']"], {'gpu': '(False)'}), "(['en'], gpu=False)\n", (81, 100), False, 'import easyocr\n'), ((109, 150), 'cv2.imread', 'cv2.imread', (['"""results/JK_21_05/page_1.jpg"""'], {}), "('results/JK_21_05/page_1.jpg')\n", (119, 150), False, 'import cv2\n'), ((159, 198), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (171, 198), False, 'import cv2\n'), ((209, 246), 'cv2.dilate', 'cv2.dilate', (['image', 'None'], {'iterations': '(1)'}), '(image, None, iterations=1)\n', (219, 246), False, 'import cv2\n'), ((256, 292), 'cv2.erode', 'cv2.erode', (['image', 'None'], {'iterations': '(1)'}), '(image, None, iterations=1)\n', (265, 292), False, 'import cv2\n'), ((324, 347), 'cv2.imshow', 'cv2.imshow', (['"""s"""', 'eroded'], {}), "('s', eroded)\n", (334, 347), False, 'import cv2\n'), ((348, 362), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (359, 362), False, 'import cv2\n'), ((363, 386), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (384, 386), False, 'import cv2\n'), ((798, 825), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'eroded'], {}), "('Image', eroded)\n", (808, 825), False, 'import cv2\n'), ((826, 840), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (837, 840), False, 'import cv2\n'), ((654, 699), 'cv2.rectangle', 'cv2.rectangle', (['eroded', 'tl', 'br', '(0, 255, 0)', '(2)'], {}), '(eroded, tl, br, (0, 255, 0), 2)\n', (667, 699), False, 'import cv2\n'), ((704, 802), 'cv2.putText', 'cv2.putText', (['eroded', 'text', '(tl[0], tl[1] - 10)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.8)', '(0, 255, 0)', '(2)'], {}), '(eroded, text, (tl[0], tl[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, \n 0.8, (0, 255, 0), 2)\n', (715, 802), False, 'import cv2\n')] |
from PyQt5.QtCore import QAbstractTableModel, QAbstractItemModel
from PyQt5.QtCore import Qt, QModelIndex, pyqtSlot
class LoadTypesProcess(QAbstractTableModel):
def __init__(self):
super().__init__()
self.csv_values = []
self.header_model = HeaderModel()
def index(self, row, column, parent=QModelIndex()):
return self.createIndex(row, column)
def rowCount(self, parent=QModelIndex()):
return len(self.csv_values)
def columnCount(self, parent=QModelIndex()):
count = 0
if len(self.csv_values):
count = len(self.csv_values[0])
return count
def data(self, index, role=Qt.DisplayRole):
if role == Qt.DisplayRole:
return self.csv_values[index.row()][index.column()]
def setData(self, index, value, role=Qt.EditRole):
if index.isValid() and role == Qt.EditRole:
if index.column() >= self.columnCount():
self.insertColumns(index.column(), 1)
if index.row() >= self.rowCount():
self.insertRows(index.row(), 1)
self.csv_values[index.row()][index.column()] = value
self.dataChanged.emit(index, index)
return True
return False
def flags(self, index):
return QAbstractTableModel.flags(self, index) | Qt.ItemIsEditable
def insertRows(self, position, rows, index=QModelIndex()):
self.beginInsertRows(index, position, position+rows-1)
for _ in range(rows):
new_row = []
for _ in range(self.columnCount()):
new_row.append("")
self.csv_values.append(new_row)
self.endInsertRows()
return True
def insertColumns(self, position, columns, index=QModelIndex()):
self.beginInsertColumns(index, position, position+columns-1)
for row in self.csv_values:
for _ in range(columns):
row.append("")
self.endInsertColumns()
return True
def removeRows(self, position, rows, index=QModelIndex()):
self.beginRemoveRows(index, position, position+rows-1)
for row in reversed(range(position, position+rows)):
self.csv_values.pop(row)
self.endRemoveRows()
def removeColumns(self, position, columns, index=QModelIndex()):
self.beginRemoveColumns(index, position, position+columns-1)
for row in self.csv_values:
for column in reversed(range(position, position+columns)):
row.pop(column)
class HeaderModel(QAbstractItemModel):
def __init__(self):
super().__init__()
self.values = []
def index(self, row, column, parent=QModelIndex()):
return self.createIndex(row, column)
def columnCount(self, parent=QModelIndex()):
return len(self.values)
def rowCount(self, parent=QModelIndex()):
return 1
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.DisplayRole:
return self.values[section]
def setHeaderData(self, section, orientation, value, role=Qt.EditRole):
if role == Qt.EditRole:
self.values[section] = value
def removeColumn(self, column, index=QModelIndex()):
self.beginRemoveColumns(index, column, column)
self.values.pop(column)
self.endRemoveColumns()
def insertColumns(self, column, amount, index=QModelIndex()):
self.beginInsertColumns(index, column, column+amount-1)
for idx in range(amount):
self.values.append(str(self.columnCount()+idx))
self.endInsertColumns()
| [
"PyQt5.QtCore.QAbstractTableModel.flags",
"PyQt5.QtCore.QModelIndex"
] | [((326, 339), 'PyQt5.QtCore.QModelIndex', 'QModelIndex', ([], {}), '()\n', (337, 339), False, 'from PyQt5.QtCore import Qt, QModelIndex, pyqtSlot\n'), ((418, 431), 'PyQt5.QtCore.QModelIndex', 'QModelIndex', ([], {}), '()\n', (429, 431), False, 'from PyQt5.QtCore import Qt, QModelIndex, pyqtSlot\n'), ((504, 517), 'PyQt5.QtCore.QModelIndex', 'QModelIndex', ([], {}), '()\n', (515, 517), False, 'from PyQt5.QtCore import Qt, QModelIndex, pyqtSlot\n'), ((1407, 1420), 'PyQt5.QtCore.QModelIndex', 'QModelIndex', ([], {}), '()\n', (1418, 1420), False, 'from PyQt5.QtCore import Qt, QModelIndex, pyqtSlot\n'), ((1771, 1784), 'PyQt5.QtCore.QModelIndex', 'QModelIndex', ([], {}), '()\n', (1782, 1784), False, 'from PyQt5.QtCore import Qt, QModelIndex, pyqtSlot\n'), ((2060, 2073), 'PyQt5.QtCore.QModelIndex', 'QModelIndex', ([], {}), '()\n', (2071, 2073), False, 'from PyQt5.QtCore import Qt, QModelIndex, pyqtSlot\n'), ((2320, 2333), 'PyQt5.QtCore.QModelIndex', 'QModelIndex', ([], {}), '()\n', (2331, 2333), False, 'from PyQt5.QtCore import Qt, QModelIndex, pyqtSlot\n'), ((2702, 2715), 'PyQt5.QtCore.QModelIndex', 'QModelIndex', ([], {}), '()\n', (2713, 2715), False, 'from PyQt5.QtCore import Qt, QModelIndex, pyqtSlot\n'), ((2797, 2810), 'PyQt5.QtCore.QModelIndex', 'QModelIndex', ([], {}), '()\n', (2808, 2810), False, 'from PyQt5.QtCore import Qt, QModelIndex, pyqtSlot\n'), ((2876, 2889), 'PyQt5.QtCore.QModelIndex', 'QModelIndex', ([], {}), '()\n', (2887, 2889), False, 'from PyQt5.QtCore import Qt, QModelIndex, pyqtSlot\n'), ((3246, 3259), 'PyQt5.QtCore.QModelIndex', 'QModelIndex', ([], {}), '()\n', (3257, 3259), False, 'from PyQt5.QtCore import Qt, QModelIndex, pyqtSlot\n'), ((3432, 3445), 'PyQt5.QtCore.QModelIndex', 'QModelIndex', ([], {}), '()\n', (3443, 3445), False, 'from PyQt5.QtCore import Qt, QModelIndex, pyqtSlot\n'), ((1300, 1338), 'PyQt5.QtCore.QAbstractTableModel.flags', 'QAbstractTableModel.flags', (['self', 'index'], {}), '(self, index)\n', (1325, 1338), False, 'from PyQt5.QtCore import QAbstractTableModel, QAbstractItemModel\n')] |
#!/usr/bin/env python
# mix of:
# https://www.programcreek.com/python/example/88577/gi.repository.Gst.Pipeline
# https://github.com/GStreamer/gst-python/blob/master/examples/helloworld.py
# http://lifestyletransfer.com/how-to-launch-gstreamer-pipeline-in-python/
import sys
import collections
from pprint import pprint
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst, GLib
import pdb
'''
gst-launch-1.0 \
videotestsrc is-live=true ! \
queue ! videoconvert ! x264enc byte-stream=true ! \
h264parse config-interval=1 ! queue ! matroskamux ! queue leaky=2 ! \
tcpserversink port=7001 host=0.0.0.0 recover-policy=keyframe sync-method=latest-keyframe sync=false
'''
def main(args):
# depricated but still in much of the tutorials I found!
#GObject.threads_init()
Gst.init(None)
# ! NO PYTHON DEV WARING ! -> https://pymotw.com/2/collections/namedtuple.html
Element = collections.namedtuple('Element', ['type', 'attributes'])
elements = [
Element('videotestsrc', { "is-live": True}),
Element('queue', {}),
Element('videoconvert', {}),
Element('x264enc', {"byte-stream": True}),
Element('h264parse', {"config-interval":1}),
Element('queue', {}),
Element('matroskamux', {}),
Element('queue', {"leaky": 2}),
Element('tcpserversink', {"port": 7001, "host": "0.0.0.0", "recover-policy": "keyframe", "sync-method":"latest-keyframe", "sync": False}),
]
pipeline = Gst.Pipeline()
message_bus = pipeline.get_bus()
message_bus.add_signal_watch()
message_bus.connect('message', bus_call, None)
elements_created= dict()
# ! NO PYTHON DEV WARING ! -> https://stackoverflow.com/questions/25150502/python-loop-index-of-key-value-for-loop-when-using-items
for index, item in enumerate(elements):
name = item.type+str(index)
elements_created[name] = Gst.ElementFactory.make(item.type, name)
for key, value in item.attributes.items():
#pdb.set_trace()
elements_created[name].set_property(key, value)
pipeline.add(elements_created[name])
# https://www.geeksforgeeks.org/iterate-over-a-list-in-python/
length = len(elements)
i = 0
# Iterating to connect the elements
while i < length-1:
pprint(elements[i].type+str(i))
current_name_in_created= elements[i].type+str(i)
next_name_in_created= elements[i+1].type+str(i+1)
## now link them!
print(current_name_in_created+"->"+next_name_in_created)
elements_created[current_name_in_created].link(elements_created[next_name_in_created])
i += 1
pprint(elements_created)
#pdb.set_trace()
# start play back and listed to events
pipeline.set_state(Gst.State.PLAYING)
# create and event loop and feed gstreamer bus mesages to it
loop = GLib.MainLoop()
try:
loop.run()
except:
loop.quit()
# cleanup
print("cleaning up")
pipeline.set_state(Gst.State.NULL)
sys.exit()
# http://lifestyletransfer.com/how-to-launch-gstreamer-pipeline-in-python/
def bus_call(bus: Gst.Bus, message: Gst.Message, loop: GLib.MainLoop):
t = message.type
if t == Gst.MessageType.EOS:
sys.stdout.write("End-of-stream\n")
loop.quit()
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write("Error: %s: %s\n" % (err, debug))
loop.quit()
elif t == Gst.MessageType.WARNING:
# Handle warnings
err, debug = message.parse_warning()
sys.stderr.write("Warning: %s: %s\n" % (err, debug))
return True
if __name__ == '__main__':
#done in main!
#sys.exit(main(sys.argv))
#https://stackoverflow.com/questions/4205317/capture-keyboardinterrupt-in-python-without-try-except
try:
main(sys.argv)
except KeyboardInterrupt:
# do nothing here
pass
| [
"collections.namedtuple",
"gi.repository.Gst.init",
"gi.repository.Gst.Pipeline",
"gi.repository.Gst.ElementFactory.make",
"gi.repository.GLib.MainLoop",
"gi.require_version",
"sys.stderr.write",
"sys.exit",
"pprint.pprint",
"sys.stdout.write"
] | [((332, 364), 'gi.require_version', 'gi.require_version', (['"""Gst"""', '"""1.0"""'], {}), "('Gst', '1.0')\n", (350, 364), False, 'import gi\n'), ((804, 818), 'gi.repository.Gst.init', 'Gst.init', (['None'], {}), '(None)\n', (812, 818), False, 'from gi.repository import GObject, Gst, GLib\n'), ((912, 969), 'collections.namedtuple', 'collections.namedtuple', (['"""Element"""', "['type', 'attributes']"], {}), "('Element', ['type', 'attributes'])\n", (934, 969), False, 'import collections\n'), ((1445, 1459), 'gi.repository.Gst.Pipeline', 'Gst.Pipeline', ([], {}), '()\n', (1457, 1459), False, 'from gi.repository import GObject, Gst, GLib\n'), ((2545, 2569), 'pprint.pprint', 'pprint', (['elements_created'], {}), '(elements_created)\n', (2551, 2569), False, 'from pprint import pprint\n'), ((2742, 2757), 'gi.repository.GLib.MainLoop', 'GLib.MainLoop', ([], {}), '()\n', (2755, 2757), False, 'from gi.repository import GObject, Gst, GLib\n'), ((2881, 2891), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2889, 2891), False, 'import sys\n'), ((1842, 1882), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['item.type', 'name'], {}), '(item.type, name)\n', (1865, 1882), False, 'from gi.repository import GObject, Gst, GLib\n'), ((3096, 3131), 'sys.stdout.write', 'sys.stdout.write', (['"""End-of-stream\n"""'], {}), "('End-of-stream\\n')\n", (3112, 3131), False, 'import sys\n'), ((3232, 3282), 'sys.stderr.write', 'sys.stderr.write', (["('Error: %s: %s\\n' % (err, debug))"], {}), "('Error: %s: %s\\n' % (err, debug))\n", (3248, 3282), False, 'import sys\n'), ((3420, 3472), 'sys.stderr.write', 'sys.stderr.write', (["('Warning: %s: %s\\n' % (err, debug))"], {}), "('Warning: %s: %s\\n' % (err, debug))\n", (3436, 3472), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
u"""Test simulationSerial
:copyright: Copyright (c) 2016 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('srwl_bl')
#: Used for a sanity check on serial numbers
_MIN_SERIAL = 10000000
def test_1_serial_stomp():
from pykern.pkdebug import pkdp, pkdpretty
from pykern.pkunit import pkfail, pkok
from sirepo import sr_unit
import copy
fc = sr_unit.flask_client()
sim_type = 'srw'
data = fc.sr_post('listSimulations', {'simulationType': sim_type})
for youngs in data:
if youngs['name'] == "Young's Double Slit Experiment":
break
else:
pkfail("{}: Young's not found", pkdpretty(data))
data = fc.sr_get(
'simulationData',
{
'simulation_type': sim_type,
'pretty': '0',
'simulation_id': youngs['simulationId'],
},
)
prev_serial = data['models']['simulation']['simulationSerial']
prev_data = copy.deepcopy(data)
pkok(
prev_serial > _MIN_SERIAL,
'{}: serial must be greater than {}',
prev_serial,
_MIN_SERIAL,
)
data['models']['beamline'][4]['position'] = '61'
curr_data = fc.sr_post('saveSimulationData', data)
curr_serial = curr_data['models']['simulation']['simulationSerial']
pkok(
prev_serial < curr_serial,
'{}: serial not incremented, still < {}',
prev_serial,
curr_serial,
)
prev_data['models']['beamline'][4]['position'] = '60.5'
failure = fc.sr_post('saveSimulationData', prev_data)
pkok(
failure['error'] == 'invalidSerial',
'{}: unexpected status, expected serial failure',
failure,
)
curr_data['models']['beamline'][4]['position'] = '60.5'
curr_serial = curr_data['models']['simulation']['simulationSerial']
new_data = fc.sr_post('saveSimulationData', curr_data)
new_serial = new_data['models']['simulation']['simulationSerial']
pkok(
curr_serial < new_serial,
'{}: serial not incremented, still < {}',
new_serial,
curr_serial,
)
def test_oauth():
from pykern import pkconfig
pkconfig.reset_state_for_testing({
'SIREPO_SERVER_OAUTH_LOGIN': '1',
'SIREPO_OAUTH_GITHUB_KEY': 'n/a',
'SIREPO_OAUTH_GITHUB_SECRET': 'n/a',
'SIREPO_OAUTH_GITHUB_CALLBACK_URI': 'n/a',
})
from pykern.pkunit import pkfail, pkok
from sirepo import server
from sirepo import sr_unit
import re
sim_type = 'srw'
fc = sr_unit.flask_client()
fc.sr_post('listSimulations', {'simulationType': sim_type})
text = fc.sr_get(
'oauthLogin',
{
'simulation_type': sim_type,
'oauth_type': 'github',
},
raw_response=True,
).data
state = re.search(r'state=(.*?)"', text).group(1)
#TODO(pjm): causes a forbidden error due to missing variables, need to mock-up an oauth test type
text = fc.get('/oauth-authorized/github')
text = fc.sr_get(
'oauthLogout',
{
'simulation_type': sim_type,
},
raw_response=True,
).data
pkok(
text.find('Redirecting') > 0,
'missing redirect',
)
pkok(
text.find('"/{}"'.format(sim_type)) > 0,
'missing redirect target',
)
| [
"pykern.pkdebug.pkdpretty",
"sirepo.sr_unit.flask_client",
"pykern.pkunit.pkok",
"pytest.importorskip",
"pykern.pkconfig.reset_state_for_testing",
"copy.deepcopy",
"re.search"
] | [((260, 290), 'pytest.importorskip', 'pytest.importorskip', (['"""srwl_bl"""'], {}), "('srwl_bl')\n", (279, 290), False, 'import pytest\n'), ((536, 558), 'sirepo.sr_unit.flask_client', 'sr_unit.flask_client', ([], {}), '()\n', (556, 558), False, 'from sirepo import sr_unit\n'), ((1102, 1121), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (1115, 1121), False, 'import copy\n'), ((1126, 1225), 'pykern.pkunit.pkok', 'pkok', (['(prev_serial > _MIN_SERIAL)', '"""{}: serial must be greater than {}"""', 'prev_serial', '_MIN_SERIAL'], {}), "(prev_serial > _MIN_SERIAL, '{}: serial must be greater than {}',\n prev_serial, _MIN_SERIAL)\n", (1130, 1225), False, 'from pykern.pkunit import pkfail, pkok\n'), ((1445, 1548), 'pykern.pkunit.pkok', 'pkok', (['(prev_serial < curr_serial)', '"""{}: serial not incremented, still < {}"""', 'prev_serial', 'curr_serial'], {}), "(prev_serial < curr_serial, '{}: serial not incremented, still < {}',\n prev_serial, curr_serial)\n", (1449, 1548), False, 'from pykern.pkunit import pkfail, pkok\n'), ((1706, 1810), 'pykern.pkunit.pkok', 'pkok', (["(failure['error'] == 'invalidSerial')", '"""{}: unexpected status, expected serial failure"""', 'failure'], {}), "(failure['error'] == 'invalidSerial',\n '{}: unexpected status, expected serial failure', failure)\n", (1710, 1810), False, 'from pykern.pkunit import pkfail, pkok\n'), ((2103, 2204), 'pykern.pkunit.pkok', 'pkok', (['(curr_serial < new_serial)', '"""{}: serial not incremented, still < {}"""', 'new_serial', 'curr_serial'], {}), "(curr_serial < new_serial, '{}: serial not incremented, still < {}',\n new_serial, curr_serial)\n", (2107, 2204), False, 'from pykern.pkunit import pkfail, pkok\n'), ((2296, 2486), 'pykern.pkconfig.reset_state_for_testing', 'pkconfig.reset_state_for_testing', (["{'SIREPO_SERVER_OAUTH_LOGIN': '1', 'SIREPO_OAUTH_GITHUB_KEY': 'n/a',\n 'SIREPO_OAUTH_GITHUB_SECRET': 'n/a', 'SIREPO_OAUTH_GITHUB_CALLBACK_URI':\n 'n/a'}"], {}), "({'SIREPO_SERVER_OAUTH_LOGIN': '1',\n 'SIREPO_OAUTH_GITHUB_KEY': 'n/a', 'SIREPO_OAUTH_GITHUB_SECRET': 'n/a',\n 'SIREPO_OAUTH_GITHUB_CALLBACK_URI': 'n/a'})\n", (2328, 2486), False, 'from pykern import pkconfig\n'), ((2668, 2690), 'sirepo.sr_unit.flask_client', 'sr_unit.flask_client', ([], {}), '()\n', (2688, 2690), False, 'from sirepo import sr_unit\n'), ((806, 821), 'pykern.pkdebug.pkdpretty', 'pkdpretty', (['data'], {}), '(data)\n', (815, 821), False, 'from pykern.pkdebug import pkdp, pkdpretty\n'), ((2947, 2978), 're.search', 're.search', (['"""state=(.*?)\\""""', 'text'], {}), '(\'state=(.*?)"\', text)\n', (2956, 2978), False, 'import re\n')] |
import unittest
from kubragen import KubraGen
from kubragen.jsonpatch import FilterJSONPatches_Apply, ObjectFilter, FilterJSONPatch
from kubragen.provider import Provider_Generic
from kg_nodeexporter import NodeExporterBuilder, NodeExporterOptions
class TestBuilder(unittest.TestCase):
def setUp(self):
self.kg = KubraGen(provider=Provider_Generic())
def test_empty(self):
nodeexporter_config = NodeExporterBuilder(kubragen=self.kg)
self.assertEqual(nodeexporter_config.object_name('daemonset'), 'node-exporter')
def test_basedata(self):
nodeexporter_config = NodeExporterBuilder(kubragen=self.kg, options=NodeExporterOptions({
'namespace': 'myns',
'basename': 'mynodeexporter',
}))
self.assertEqual(nodeexporter_config.object_name('daemonset'), 'mynodeexporter')
FilterJSONPatches_Apply(items=nodeexporter_config.build(nodeexporter_config.BUILD_SERVICE), jsonpatches=[
FilterJSONPatch(filters=ObjectFilter(names=[nodeexporter_config.BUILDITEM_DAEMONSET]), patches=[
{'op': 'check', 'path': '/metadata/name', 'cmp': 'equals', 'value': 'mynodeexporter'},
{'op': 'check', 'path': '/metadata/namespace', 'cmp': 'equals', 'value': 'myns'},
]),
])
| [
"kg_nodeexporter.NodeExporterBuilder",
"kubragen.jsonpatch.ObjectFilter",
"kg_nodeexporter.NodeExporterOptions",
"kubragen.provider.Provider_Generic"
] | [((424, 461), 'kg_nodeexporter.NodeExporterBuilder', 'NodeExporterBuilder', ([], {'kubragen': 'self.kg'}), '(kubragen=self.kg)\n', (443, 461), False, 'from kg_nodeexporter import NodeExporterBuilder, NodeExporterOptions\n'), ((347, 365), 'kubragen.provider.Provider_Generic', 'Provider_Generic', ([], {}), '()\n', (363, 365), False, 'from kubragen.provider import Provider_Generic\n'), ((656, 728), 'kg_nodeexporter.NodeExporterOptions', 'NodeExporterOptions', (["{'namespace': 'myns', 'basename': 'mynodeexporter'}"], {}), "({'namespace': 'myns', 'basename': 'mynodeexporter'})\n", (675, 728), False, 'from kg_nodeexporter import NodeExporterBuilder, NodeExporterOptions\n'), ((1005, 1066), 'kubragen.jsonpatch.ObjectFilter', 'ObjectFilter', ([], {'names': '[nodeexporter_config.BUILDITEM_DAEMONSET]'}), '(names=[nodeexporter_config.BUILDITEM_DAEMONSET])\n', (1017, 1066), False, 'from kubragen.jsonpatch import FilterJSONPatches_Apply, ObjectFilter, FilterJSONPatch\n')] |
import os
import json
import time
import torch
# Called when the deployed service starts
def init():
global model
global device
# Get the path where the deployed model can be found.
model_filename = 'obj_segmentation.pkl'
model_path = os.path.join(os.environ['AZUREML_MODEL_DIR'], model_filename)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model = torch.load(model_path, map_location=device)
# Handle requests to the service
def run(data):
try:
start_at = time.time()
inputs = json.loads(data)
img_data_list = inputs["instances"]
img_tensor_list = [torch.tensor(item) for item in img_data_list]
model.eval()
with torch.no_grad():
predictions = model([item.to(device) for item in img_tensor_list])
pred_data_list = [{
"masks": prediction['masks'][0, 0].mul(255).byte().cpu().numpy().tolist(),
"boxes": prediction['boxes'].numpy().tolist(),
"labels": prediction['labels'].numpy().tolist(),
"scores": prediction['scores'].numpy().tolist(),
} for prediction in predictions]
return {"predictions": pred_data_list,
"elapsed_time": time.time() - start_at}
except Exception as e:
error = str(e)
return error
| [
"json.loads",
"torch.load",
"os.path.join",
"torch.tensor",
"torch.cuda.is_available",
"torch.no_grad",
"time.time",
"torch.device"
] | [((257, 318), 'os.path.join', 'os.path.join', (["os.environ['AZUREML_MODEL_DIR']", 'model_filename'], {}), "(os.environ['AZUREML_MODEL_DIR'], model_filename)\n", (269, 318), False, 'import os\n'), ((420, 463), 'torch.load', 'torch.load', (['model_path'], {'map_location': 'device'}), '(model_path, map_location=device)\n', (430, 463), False, 'import torch\n'), ((357, 382), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (380, 382), False, 'import torch\n'), ((333, 353), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (345, 353), False, 'import torch\n'), ((388, 407), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (400, 407), False, 'import torch\n'), ((541, 552), 'time.time', 'time.time', ([], {}), '()\n', (550, 552), False, 'import time\n'), ((570, 586), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (580, 586), False, 'import json\n'), ((658, 676), 'torch.tensor', 'torch.tensor', (['item'], {}), '(item)\n', (670, 676), False, 'import torch\n'), ((738, 753), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (751, 753), False, 'import torch\n'), ((1253, 1264), 'time.time', 'time.time', ([], {}), '()\n', (1262, 1264), False, 'import time\n')] |
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
This module contains (and isolates) logic used to find entities based on entity type,
list selection criteria and search terms.
"""
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2014, <NAME>"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import re
from pyparsing import Word, QuotedString, Literal, Group, Empty, StringEnd, ParseException
from pyparsing import alphas, alphanums
from utils.py3porting import is_string, to_unicode
from annalist import layout
from annalist.util import valid_id, extract_entity_id
from annalist.models.recordtype import RecordType
from annalist.models.recordtypedata import RecordTypeData
from annalist.models.entitytypeinfo import EntityTypeInfo
# -------------------------------------------------------------------
# Auxilliary functions
# -------------------------------------------------------------------
def order_entity_key(entity):
"""
Function returns sort key for ordering entities by type and entity id
Use with `sorted`, thus:
sorted(entities, order_entity_key)
"""
type_id = entity.get_type_id()
entity_id = entity.get_id()
key = ( 0 if type_id.startswith('_') else 1, type_id,
0 if entity_id.startswith('_') else 1, entity_id
)
return key
# -------------------------------------------------------------------
# EntityFinder
# -------------------------------------------------------------------
class EntityFinder(object):
"""
Logic for enumerating entities matching a supplied type, selector and/or search string.
"""
def __init__(self, coll, selector=None):
"""
Initialize entity finder for collection and selector.
"""
super(EntityFinder, self).__init__()
self._coll = coll
self._site = coll.get_site()
self._selector = EntitySelector(selector, FieldComparison(coll))
# self._subtypes = None
return
def get_collection_type_ids(self, altscope):
"""
Returns iterator over possible type ids in current collection.
Each type is returned as a candidate type identifier string
"""
return self._coll.cache_get_all_type_ids(altscope=altscope)
def get_collection_subtype_ids(self, supertype_id, altscope):
"""
Returns a iterator of type ids for all subtypes of the supplied type
accessible in the indicated scope from the current collection, including
the identified type itself.
"""
if not valid_id(supertype_id):
log.warning("EntityFinder.get_collection_subtype_ids: invalid type_id %s"%(supertype_id,))
return
supertype_info = EntityTypeInfo(self._coll, supertype_id)
supertype_uri = supertype_info.get_type_uri()
if supertype_uri is not None:
for try_subtype_id in self.get_collection_type_ids(altscope):
try_subtype = self._coll.cache_get_type(try_subtype_id)
if try_subtype:
try_subtype_uri = try_subtype.get_uri()
if ( ( supertype_uri == try_subtype_uri ) or
( supertype_uri in self._coll.cache_get_supertype_uris(try_subtype_uri) ) ):
yield try_subtype_id
else:
log.warning("EntityFinder.get_collection_subtype_ids: no type_uri for %s"%(supertype_id,))
def get_type_entities(self, type_id, user_permissions, altscope):
"""
Iterate over entities from collection matching the supplied type.
'altscope' is used to determine the extent of data to be included in the listing:
a value of 'all' means that site-wide entyities are icnluded in the listing.
Otherwise only collection entities are included.
"""
#@@
# log.info("get_type_entities: type_id %s, user_permissions %r"%(type_id,user_permissions))
#@@
entitytypeinfo = EntityTypeInfo(self._coll, type_id)
for e in entitytypeinfo.enum_entities_with_implied_values(
user_permissions, altscope=altscope
):
if e.get_id() != layout.INITIAL_VALUES_ID:
#@@
# log.info(" yield: %s"%(e.get_id(),))
#@@
yield e
return
def get_subtype_entities(self, type_id, user_permissions, altscope):
"""
Iterate over entities from collection that are of the indicated type
or any of its subtypes.
'altscope' is used to determine the extent of data to be included in the listing:
a value of 'all' means that site-wide entities are included in the listing.
Otherwise only collection entities are included.
"""
for subtype_id in self.get_collection_subtype_ids(type_id, "all"):
subtype_info = EntityTypeInfo(self._coll, subtype_id)
es = subtype_info.enum_entities_with_implied_values(
user_permissions, altscope=altscope
)
#@@
# es = list(es) #@@ Force strict eval
# log.info("get_subtype_entities: %r"%([e.get_id() for e in es],))
#@@
for e in es:
if e.get_id() != layout.INITIAL_VALUES_ID:
yield e
return
def get_all_types_entities(self, types, user_permissions, altscope):
"""
Iterate over all entities of all types from a supplied type iterator
"""
#@@
# log.info("@@@@ get_all_types_entities")
#@@
for t in types:
for e in self.get_type_entities(t, user_permissions, altscope):
#@@
# log.info("get_all_types_entities: type %s/%s"%(t,e.get_id()))
#@@
yield e
return
def get_base_entities(self, type_id=None, user_permissions=None, altscope=None):
"""
Iterate over base entities from collection, matching the supplied type id if supplied.
If a type_id is supplied, site data values are included.
"""
entities = None
if type_id:
entities = self.get_subtype_entities(type_id, user_permissions, altscope)
# return self.get_type_entities(type_id, user_permissions, scope)
else:
entities = self.get_all_types_entities(
self.get_collection_type_ids(altscope="all"), user_permissions, altscope
)
#@@
# entities = list(entities) #@@ Force strict eval
# log.info("get_base_entities: %r"%([(e.get_type_id(), e.get_id()) for e in entities],))
#@@
return entities
def search_entities(self, entities, search):
"""
Iterate over entities from supplied iterator containing supplied search term.
"""
for e in entities:
if self.entity_contains(e, search):
yield e
return
def get_entities(self,
user_permissions=None, type_id=None, altscope=None, context=None, search=None
):
"""
Iterates over entities of the specified type, matching search term and visible to
supplied user permissions.
"""
entities = self._selector.filter(
self.get_base_entities(type_id, user_permissions, altscope), context=context
)
if search:
entities = self.search_entities(entities, search)
return entities
def get_entities_sorted(self,
user_permissions=None, type_id=None, altscope=None, context={}, search=None
):
"""
Get sorted list of entities of the specified type, matching search term and
visible to supplied user permissions.
"""
entities = self.get_entities(
user_permissions, type_id=type_id, altscope=altscope,
context=context, search=search
)
#@@
# entities = list(entities) #@@ Force strict eval
# log.info("get_entities_sorted: %r"%([e.get_id() for e in entities],))
#@@
return sorted(entities, key=order_entity_key)
@classmethod
def entity_contains(cls, e, search):
"""
Returns True if entity contains/matches search term, else False.
Search term None (or blank) matches all entities.
>>> e1 = { 'p:a': '1', 'p:b': '2', 'p:c': '3', 'annal:property_uri': 'annal:member' }
>>> EntityFinder.entity_contains(e1, "1")
True
>>> EntityFinder.entity_contains(e1, "3")
True
>>> EntityFinder.entity_contains(e1, "nothere")
False
>>> EntityFinder.entity_contains(e1, "annal:member")
True
>>> e2 = { 'list': ['l1', 'l2', 'l3'] \
, 'dict': {'p:a': 'd1', 'p:b': 'd2', 'p:c': 'd3'} \
}
>>> EntityFinder.entity_contains(e2, "l1")
True
>>> EntityFinder.entity_contains(e2, "d3")
True
>>> EntityFinder.entity_contains(e2, "nothere")
False
"""
if search:
# Entity is not a dict, so scan entity keys for search
for key in e:
val = e[key]
if cls.value_contains(val, search):
return True
return False
return True
@classmethod
def value_contains(cls, val, search):
"""
Helper function tests for search term in dictionary, list or string values.
Other values are not searched.
"""
if isinstance(val, dict):
for k in val:
if cls.value_contains(val[k], search):
return True
elif isinstance(val, list):
for e in val:
if cls.value_contains(e, search):
return True
elif is_string(val):
return search in val
return False
# -------------------------------------------------------------------
# EntitySelector
# -------------------------------------------------------------------
class EntitySelector(object):
"""
This class implements a selector filter. It is initialized with a selector
expression, and may be invoked as a filter applied to an entity generator,
or as a predicate applied to a single entity.
>>> e = { 'p:a': '1', 'p:b': '2', 'p:c': '3', '@type': ["http://example.com/type", "foo:bar"] }
>>> c = { 'view': { 'v:a': '1', 'v:b': ['2', '3'] } }
>>> f1 = "'1' == [p:a]"
>>> f2 = "[p:a]=='2'"
>>> f3 = ""
>>> f4 = "'http://example.com/type' in [@type]"
>>> f5 = "'foo:bar' in [@type]"
>>> f6 = "'bar:foo' in [@type]"
>>> f7 = "[p:a] in view[v:a]"
>>> f8 = "[p:b] in view[v:b]"
>>> f9 = "[p:a] in view[v:b]"
>>> f10 = "[annal:field_entity_type] in view[annal:view_entity_type]"
>>> f11 = "foo:bar in [@type]"
>>> f12 = "bar:foo in [@type]"
>>> EntitySelector(f1).select_entity(e, c)
True
>>> EntitySelector(f2).select_entity(e, c)
False
>>> EntitySelector(f3).select_entity(e, c)
True
>>> EntitySelector(f4).select_entity(e, c)
True
>>> EntitySelector(f5).select_entity(e, c)
True
>>> EntitySelector(f6).select_entity(e, c)
False
>>> EntitySelector(f7).select_entity(e, c)
True
>>> EntitySelector(f8).select_entity(e, c)
True
>>> EntitySelector(f9).select_entity(e, c)
False
>>> EntitySelector(f10).select_entity(e, c)
True
>>> EntitySelector(f11).select_entity(e, c)
True
>>> EntitySelector(f12).select_entity(e, c)
False
"""
def __init__(self, selector, fieldcomp=None):
self._fieldcomp = fieldcomp
# Returns None if no filter is applied, otherwise a predcicate function
self._selector = self.compile_selector_filter(selector)
return
def filter(self, entities, context=None):
"""
Iterate over selection of entities from supplied iterator, using the
selection specification supplied to the constructor of the current object.
entities is an iterator over entities from which selection is made
context is a dictionary of context values that may be referenced by
the selector in choosing entities to be returned.
If no filtering is applied, the supplied iterator is returned as-is.
"""
if self._selector:
entities = self._filter(entities, context)
return entities
def _filter(self, entities, context):
"""
Internal helper applies selector to entity iterator, returning a new iterator.
"""
for e in entities:
if self._selector(e, context):
yield e
return
def select_entity(self, entity, context={}):
"""
Apply selector to an entity, and returns True if the entity is selected
"""
if self._selector:
return self._selector(entity, context)
return True
@classmethod #@@ @staticmethod, no cls?
def parse_selector(cls, selector):
"""
Parse a selector and return list of tokens
Selector formats:
ALL (or blank) match any entity
<val1> == <val2> values are same
<val1> in <val2> second value is list containing 1st value,
or values are same, or val1 is None.
<val1> <name> <val2> invoke comparison method from supplied
FieldComparison object
<val1> and <val2> may be:
[<field-id>] refers to field in entity under test
<name>[<field-id>] refers to field of context value, or None if the
indicated context value or field is not defined.
"<string>" literal string value. Quotes within are escaped.
<field_id> values are URIs or CURIEs, using characters defined by RFC3986,
except "[" and "]"
RFC3986:
unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
reserved = gen-delims / sub-delims
gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
/ "*" / "+" / "," / ";" / "="
Parser uses pyparsing combinators (cf. http://pyparsing.wikispaces.com).
"""
def get_value(val_list):
if len(val_list) == 1:
return { 'type': 'literal', 'name': None, 'field_id': None, 'value': val_list[0] }
elif val_list[0] == '[':
return { 'type': 'entity', 'name': None, 'field_id': val_list[1], 'value': None }
elif val_list[1] == '[':
return { 'type': 'context', 'name': val_list[0], 'field_id': val_list[2], 'value': None }
else:
return { 'type': 'unknown', 'name': None, 'field_id': None, 'value': None }
p_name = Word(alphas+"_", alphanums+"_")
p_id = Word(alphas+"_@", alphanums+"_-.~:/?#@!$&'()*+,;=)")
p_val = ( Group( Literal("[") + p_id + Literal("]") )
| Group( p_name + Literal("[") + p_id + Literal("]") )
| Group( QuotedString('"', "\\") )
| Group( QuotedString("'", "\\") )
| Group( p_id )
)
p_comp = ( Literal("==") | Literal("in") | p_name )
p_selector = ( p_val + p_comp + p_val + StringEnd() )
try:
resultlist = p_selector.parseString(selector).asList()
except ParseException:
return None
resultdict = {}
if resultlist:
resultdict['val1'] = get_value(resultlist[0])
resultdict['comp'] = resultlist[1]
resultdict['val2'] = get_value(resultlist[2])
return resultdict
def compile_selector_filter(self, selector):
"""
Return filter for for testing entities matching a supplied selector.
Returns None if no selection is performed; i.e. all possible entities are selected.
Selector formats: see `parse_selector` above.
This function returns a filter function compiled from the supplied selector.
"""
def get_entity(field_id):
"Get field from entity tested by filter"
def get_entity_f(e, c):
return e.get(field_id, None)
return get_entity_f
#
def get_context(name, field_id):
"Get field from named value in current display context"
def get_context_f(e, c):
if name in c and c[name]:
return c[name].get(field_id, None)
return None
return get_context_f
#
def get_literal(value):
"Get literal value specified directly in selector string"
def get_literal_f(e, c):
return value
return get_literal_f
#
def get_val_f(selval):
if selval['type'] == "entity":
return get_entity(selval['field_id'])
elif selval['type'] == "context":
return get_context(selval['name'], selval['field_id'])
elif selval['type'] == "literal":
return get_literal(selval['value'])
else:
msg = "Unrecognized value type from selector (%s)"%selval['type']
raise ValueError(msg)
assert False, "Unrecognized value type from selector"
#
def match_eq(v1f, v2f):
def match_eq_f(e, c):
return v1f(e, c) == v2f(e, c)
return match_eq_f
#
def match_in(v1f, v2f):
def match_in_f(e, c):
v1 = v1f(e, c)
if not v1: return True
v2 = v2f(e, c)
if isinstance(v2, list):
return v1 in v2
return v1 == v2
return match_in_f
#
def match_subtype(v1f, v2f):
def match_subtype_f(e, c):
return self._fieldcomp.subtype(v1f(e, c), v2f(e, c))
return match_subtype_f
#
if selector in {None, "", "ALL"}:
return None
sel = self.parse_selector(selector)
if not sel:
msg = "Unrecognized selector syntax (%s)"%selector
raise ValueError(msg)
v1f = get_val_f(sel['val1'])
v2f = get_val_f(sel['val2'])
if sel['comp'] == "==":
return match_eq(v1f, v2f)
if sel['comp'] == "in":
return match_in(v1f, v2f)
if sel['comp'] == "subtype":
return match_subtype(v1f, v2f)
# Drop through: raise error
msg = "Unrecognized entity selector (%s)"%selector
raise ValueError(msg)
# -------------------------------------------------------------------
# FieldComparison
# -------------------------------------------------------------------
class FieldComparison(object):
"""
Logic for comparing fields using additional context information not available
directly to 'EntitySelector'
"""
def __init__(self, coll):
super(FieldComparison, self).__init__()
self._coll = coll
self._site = coll.get_site()
return
def get_uri_type_info(self, type_uri):
"""
Return typeinfo corresponding to the supplied type URI
"""
t = self._coll.get_uri_type(type_uri)
return t and EntityTypeInfo(self._coll, t.get_id())
def subtype(self, type1_uri, type2_uri):
"""
Returns True if the first type is a subtype of the second type, where both
types are supplied as type URIs. Returns True if both URIs are the same.
If type1_uri is not specified, assume no restriction.
If type2_uri is not specified, assume it does not satisfy the restriction.
"""
# log.info("FieldComparison.subtype(%s, %s)"%(type1_uri, type2_uri))
if not type2_uri or (type1_uri == type2_uri):
return True
if not type1_uri:
return False
type1_info = self.get_uri_type_info(type1_uri)
type1_supertype_uris = (type1_info and type1_info.get_all_type_uris()) or []
# log.info("FieldComparison.subtype: type1_uris (supertypes) %r"%(type1_uris,))
return type2_uri in type1_supertype_uris
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
| [
"logging.getLogger",
"pyparsing.QuotedString",
"annalist.models.entitytypeinfo.EntityTypeInfo",
"annalist.util.valid_id",
"pyparsing.Group",
"pyparsing.Word",
"utils.py3porting.is_string",
"doctest.testmod",
"pyparsing.Literal",
"pyparsing.StringEnd"
] | [((406, 433), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (423, 433), False, 'import logging\n'), ((20952, 20969), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (20967, 20969), False, 'import doctest\n'), ((2941, 2981), 'annalist.models.entitytypeinfo.EntityTypeInfo', 'EntityTypeInfo', (['self._coll', 'supertype_id'], {}), '(self._coll, supertype_id)\n', (2955, 2981), False, 'from annalist.models.entitytypeinfo import EntityTypeInfo\n'), ((4201, 4236), 'annalist.models.entitytypeinfo.EntityTypeInfo', 'EntityTypeInfo', (['self._coll', 'type_id'], {}), '(self._coll, type_id)\n', (4215, 4236), False, 'from annalist.models.entitytypeinfo import EntityTypeInfo\n'), ((15403, 15438), 'pyparsing.Word', 'Word', (["(alphas + '_')", "(alphanums + '_')"], {}), "(alphas + '_', alphanums + '_')\n", (15407, 15438), False, 'from pyparsing import Word, QuotedString, Literal, Group, Empty, StringEnd, ParseException\n'), ((15456, 15512), 'pyparsing.Word', 'Word', (["(alphas + '_@')", '(alphanums + "_-.~:/?#@!$&\'()*+,;=)")'], {}), '(alphas + \'_@\', alphanums + "_-.~:/?#@!$&\'()*+,;=)")\n', (15460, 15512), False, 'from pyparsing import Word, QuotedString, Literal, Group, Empty, StringEnd, ParseException\n'), ((2770, 2792), 'annalist.util.valid_id', 'valid_id', (['supertype_id'], {}), '(supertype_id)\n', (2778, 2792), False, 'from annalist.util import valid_id, extract_entity_id\n'), ((5114, 5152), 'annalist.models.entitytypeinfo.EntityTypeInfo', 'EntityTypeInfo', (['self._coll', 'subtype_id'], {}), '(self._coll, subtype_id)\n', (5128, 5152), False, 'from annalist.models.entitytypeinfo import EntityTypeInfo\n'), ((15787, 15798), 'pyparsing.Group', 'Group', (['p_id'], {}), '(p_id)\n', (15792, 15798), False, 'from pyparsing import Word, QuotedString, Literal, Group, Empty, StringEnd, ParseException\n'), ((15936, 15947), 'pyparsing.StringEnd', 'StringEnd', ([], {}), '()\n', (15945, 15947), False, 'from pyparsing import Word, QuotedString, Literal, Group, Empty, StringEnd, ParseException\n'), ((10108, 10122), 'utils.py3porting.is_string', 'is_string', (['val'], {}), '(val)\n', (10117, 10122), False, 'from utils.py3porting import is_string, to_unicode\n'), ((15847, 15860), 'pyparsing.Literal', 'Literal', (['"""=="""'], {}), "('==')\n", (15854, 15860), False, 'from pyparsing import Word, QuotedString, Literal, Group, Empty, StringEnd, ParseException\n'), ((15863, 15876), 'pyparsing.Literal', 'Literal', (['"""in"""'], {}), "('in')\n", (15870, 15876), False, 'from pyparsing import Word, QuotedString, Literal, Group, Empty, StringEnd, ParseException\n'), ((15738, 15761), 'pyparsing.QuotedString', 'QuotedString', (['"""\'"""', '"""\\\\"""'], {}), '("\'", \'\\\\\')\n', (15750, 15761), False, 'from pyparsing import Word, QuotedString, Literal, Group, Empty, StringEnd, ParseException\n'), ((15682, 15705), 'pyparsing.QuotedString', 'QuotedString', (['"""\\""""', '"""\\\\"""'], {}), '(\'"\', \'\\\\\')\n', (15694, 15705), False, 'from pyparsing import Word, QuotedString, Literal, Group, Empty, StringEnd, ParseException\n'), ((15561, 15573), 'pyparsing.Literal', 'Literal', (['"""]"""'], {}), "(']')\n", (15568, 15573), False, 'from pyparsing import Word, QuotedString, Literal, Group, Empty, StringEnd, ParseException\n'), ((15637, 15649), 'pyparsing.Literal', 'Literal', (['"""]"""'], {}), "(']')\n", (15644, 15649), False, 'from pyparsing import Word, QuotedString, Literal, Group, Empty, StringEnd, ParseException\n'), ((15539, 15551), 'pyparsing.Literal', 'Literal', (['"""["""'], {}), "('[')\n", (15546, 15551), False, 'from pyparsing import Word, QuotedString, Literal, Group, Empty, StringEnd, ParseException\n'), ((15615, 15627), 'pyparsing.Literal', 'Literal', (['"""["""'], {}), "('[')\n", (15622, 15627), False, 'from pyparsing import Word, QuotedString, Literal, Group, Empty, StringEnd, ParseException\n')] |
import argparse
import h5py
import sys
import os
from savu.version import __version__
class NXcitation(object):
def __init__(self, description, doi, endnote, bibtex):
self.description = description.decode('UTF-8')
self.doi = doi.decode('UTF-8')
self.endnote = endnote.decode('UTF-8')
self.bibtex = bibtex.decode('UTF-8')
def get_bibtex_ref(self):
return self.bibtex.split(',')[0].split('{')[1] \
if self.bibtex else ""
def get_first_author(self):
parts = self.endnote.split('\n')
for part in parts:
if part.startswith("%A"):
return part.replace("%A", "").strip()
def get_date(self):
parts = self.endnote.split('\n')
for part in parts:
if part.startswith("%D"):
return part.replace("%D", "").strip()
def get_description_with_author(self):
return "%s \\ref{%s}(%s, %s)" % (self.description,
self.get_bibtex_ref(),
self.get_first_author(),
self.get_date())
class NXcitation_manager(object):
def __init__(self):
self.NXcite_list = []
def add_citation(self, citation):
self.NXcite_list.append(citation)
def get_full_endnote(self):
return "\n\n".join([cite.endnote for cite in self.NXcite_list])
def get_full_bibtex(self):
return "\n".join([cite.bibtex for cite in self.NXcite_list])
def get_description_with_citations(self):
return ". ".join([cite.get_description_with_author() for cite in
self.NXcite_list])
def __str__(self):
return "\nDESCRIPTION\n%s\n\nBIBTEX\n%s\n\nENDNOTE\n%s" % \
(self.get_description_with_citations(), self.get_full_bibtex(),
self.get_full_endnote())
class NXciteVisitor(object):
def __init__(self):
self.citation_manager = NXcitation_manager()
def _visit_NXcite(self, name, obj):
if "NX_class" in list(obj.attrs.keys()):
if obj.attrs["NX_class"] in ["NXcite"]:
citation = NXcitation(obj['description'][0],
obj['doi'][0],
obj['endnote'][0],
obj['bibtex'][0])
self.citation_manager.add_citation(citation)
def get_citation_manager(self, nx_file, entry):
nx_file[entry].visititems(self._visit_NXcite)
return self.citation_manager
def __check_input_params(args):
""" Check for required input arguments.
"""
if len(args) != 2:
print("Input and output filename need to be specified")
print("Exiting with error code 1 - incorrect number of inputs")
sys.exit(1)
if not os.path.exists(args[0]):
print(("Input file '%s' does not exist" % args[0]))
print("Exiting with error code 2 - Input file missing")
sys.exit(2)
def __option_parser(doc=True):
""" Option parser for command line arguments.
"""
version = "%(prog)s " + __version__
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='Input data file.')
parser.add_argument('out_file', help='Output file to extract citation \
information to.')
parser.add_argument('--version', action='version', version=version)
return parser if doc==True else parser.parse_args()
def main(in_file=None, quiet=False):
# when calling directly from tomo_recon.py
if in_file:
log_folder = os.path.join(os.path.dirname(in_file),"run_log")
out_file = os.path.join(log_folder, "citations.txt")
else:
args = __option_parser(doc=False)
in_file = args.in_file
out_file = args.out_file
infile = h5py.File(in_file, 'r')
citation_manager = NXciteVisitor().get_citation_manager(infile, "/")
if citation_manager is not None:
with open(out_file, 'w') as outfile:
outfile.write(citation_manager.__str__())
if not quiet:
print("Extraction complete")
if __name__ == '__main__':
main()
| [
"os.path.exists",
"argparse.ArgumentParser",
"os.path.join",
"h5py.File",
"os.path.dirname",
"sys.exit"
] | [((3175, 3200), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3198, 3200), False, 'import argparse\n'), ((3878, 3901), 'h5py.File', 'h5py.File', (['in_file', '"""r"""'], {}), "(in_file, 'r')\n", (3887, 3901), False, 'import h5py\n'), ((2838, 2849), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2846, 2849), False, 'import sys\n'), ((2862, 2885), 'os.path.exists', 'os.path.exists', (['args[0]'], {}), '(args[0])\n', (2876, 2885), False, 'import os\n'), ((3019, 3030), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (3027, 3030), False, 'import sys\n'), ((3698, 3739), 'os.path.join', 'os.path.join', (['log_folder', '"""citations.txt"""'], {}), "(log_folder, 'citations.txt')\n", (3710, 3739), False, 'import os\n'), ((3643, 3667), 'os.path.dirname', 'os.path.dirname', (['in_file'], {}), '(in_file)\n', (3658, 3667), False, 'import os\n')] |
import time
import pymysql # for pulling UCSC data
import pandas as pd
from pathlib import Path
import logging
# app
from .progress_bar import * # tqdm, context-friendly
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
logging.getLogger('numexpr').setLevel(logging.WARNING)
# these login stats for the public database should not change.
HOST = 'genome-mysql.soe.ucsc.edu'
USER = 'genome'
DB = 'hg38'
# cpg related table schema: http://genome.ucsc.edu/cgi-bin/hgTables?db=hg38&hgta_group=regulation&hgta_track=cpgIslandExt&hgta_table=cpgIslandExt&hgta_doSchema=describe+table+schema
possible_tables = [
'refGene', # cruzdb used this in examples -- 88,819 genes
'knownGene', # 232,184 -- genes and pseudo genes too (use TranscriptType == 'coding_protein')
'ncbiRefSeq', # 173,733 genes -- won't have matching descriptions; no kgXref shared key.
# 'wgEncodeGencodeBasicV38', # 177k genes -- doesn't work
]
table_mapper = {
'txStart': 'chromStart', # knownGene transcription start, refGene start, ncbiRefSeq start
'txEnd': 'chromStart',
}
conn = None
def fetch_genes(dmr_regions_file=None, tol=250, ref=None, tissue=None, sql=None,
save=True, verbose=False, use_cached=True, no_sync=False, genome_build=None,
host=HOST, user=USER, password='', db=DB):
"""find genes that are adjacent to significantly different CpG regions provided.
Summary:
fetch_genes() annotates the DMR region output file, using the UCSC Genome Browser database as a reference
as to what genes are nearby. This is an exploratory tool, as there are many versions of the human genome
that map genes to slightly different locations.
fetch_genes() is an EXPLORATORY tool and makes a number of simplicifications:
* the DMR regions file saves one CpG probe name and location, even though clusters of probes may map to
that nearby area.
* it measures the distance from the start position of the one representative probe per region to any nearby
genes, using the `tol`erance parameter as the cutoff. Tolerance is the max number of base pairs of separation
between the probe sequence start and the gene sequence start for it to be considered as a match.
* The default `tol`erance is 250, but that is arbitrary. Increase it to expand the search area, or decrease it
to be more conservative. Remember that Illumina CpG probe sequences are 50 base pairs long, so 100 is nearly
overlapping. 300 or 500 would also be reasonable.
* "Adjacent" in the linear sequence may not necessarily mean that the CpG island is FUNCTIONALLY coupled to the
regulatory or coding region of the nearby protein. DNA superstructure can position regulatory elements near to
a coding region that are far upstream or downstream from the mapped position, and there is no easy way to identify
"adjacent" in this sense.
* Changing the `tol`erance, or the reference database will result major differences in the output, and thus
one's interpretation of the same data.
* Before interpreting these "associations" you should also consider filtering candidate genes by
specific cell types where they are expressed. You should know the tissue from which your samples originated.
And filter candidate genes to exclude those that are only expressed in your tissue during development,
if your samples are from adults, and vice versa.
Arguments:
dmr_regions_file:
pass in the output file DataFrame or FILEPATH from DMR function.
Omit if you specify the `sql` kwarg instead.
ref: default is `refGene`
use one of possible_tables for lookup:
- 'refGene' -- 88,819 genes -- default table used in comb-b and cruzdb packages.
- 'knownGene' -- 232,184 genes -- pseudo genes too (the "WHere TranscriptType == 'coding_protein'" clause would work, but these fields are missing from the data returned.)
- 'ncbiRefSeq' -- 173,733 genes -- this table won't have gene descriptions, because it cannot be joined with the 'kgXref' (no shared key).
Additionally, 'gtexGeneV8' is used for tissue-expression levels. Pseudogenes are ommited using the "WHERE score > 0" clause in the SQL.
tol: default 250
+/- this many base pairs consistutes a gene "related" to a CpG region provided.
tissue: str
if specified, adds additional columns to output with the expression levels for identified genes
in any/all tissue(s) that match the keyword. (e.g. if your methylation samples are whole blood,
specify `tissue=blood`) For all 54 tissues, use `tissue=all`
genome_build: (None, NEW, OLD)
Only the default human genome build, hg38, is currently supported. Even though many other builds are available
in the UCSC database, most tables do not join together in the same way.
use_cached:
If True, the first time it downloads a dataset from UCSC Genome Browser, it will save to disk
and use that local copy thereafter. To force it to use the online copy, set to False.
no_sync:
methylize ships with a copy of the relevant UCSC gene browser tables, and will auto-update these
every month. If you want to run this function without accessing this database, you can avoid updating
using the `no_sync=True` kwarg.
host, user, password, db:
Internal database connections for UCSC server. You would only need to mess with these of the server domain changes
from the current hardcoded value {HOST}. Necessary for tables to be updated and for `tissue` annotation.
sql:
a DEBUG mode that bypasses the function and directly queries the database for any information the user wants.
Be sure to specify the complete SQL statement, including the ref-table (e.g. refGene or ncbiRefSeq).
.. note::
This method flushes cache periodically. After 30 days, it deletes cached reference gene tables and re-downloads.
"""
if verbose:
logging.basicConfig(level=logging.INFO)
if isinstance(dmr_regions_file, pd.DataFrame):
regions = dmr_regions_file
reqd_regions = set(['name', 'chromStart'])
if set(regions.columns) & reqd_regions != reqd_regions:
raise KeyError(f"Your file of CpG regions must have these columns, at a minimum: {reqd_regions}")
LOGGER.info(f"Loaded {regions.shape[0]} CpG regions.")
elif not sql and dmr_regions_file is None:
raise Exception("Either provide a path to the DMR stats file or a sql query.")
elif not sql:
regions = pd.read_csv(dmr_regions_file) #.sort_values('z_p')
reqd_regions = set(['name', 'chromStart'])
if set(regions.columns) & reqd_regions != reqd_regions:
raise KeyError(f"Your file of CpG regions must have these columns, at a minimum: {reqd_regions}")
LOGGER.info(f"Loaded {regions.shape[0]} CpG regions from {dmr_regions_file}.")
if not ref:
ref = possible_tables[0] # refGene
global conn # allows function to reuse the same connection
if conn is None and no_sync is False:
conn = pymysql.connect(host=host, user=user, password=password, db=db, cursorclass=pymysql.cursors.DictCursor)
if sql:
with conn.cursor() as cur:
cur.execute(sql)
return list(cur.fetchall())
# these will be packed into the output CSV saved, but a nested dataframe is returned.
matches = {i:[] for i in regions.name} # cpg name --> [gene names]
distances = {i:[] for i in regions.name}
descriptions = {i:[] for i in regions.name}
# fetch WHOLE table needed, unless using cache
package_path = Path(__file__).parent
cache_file = Path(package_path, 'data', f"{ref}.pkl")
cache_available = cache_file.exists()
# don't use cache if over 1 month old:
if use_cached and cache_available and no_sync is False:
last_download = cache_file.stat().st_ctime
if time.time() - last_download > 2629746:
LOGGER.info(f"Cached genome table is over 1 month old; re-downloading from UCSC.")
cache_file.unlink()
cache_available = False
if use_cached and cache_available:
genes = pd.read_pickle(cache_file)
LOGGER.info(f"""Using cached `{ref}`: {Path(package_path, 'data', f"{ref}.pkl")} with ({len(genes)}) genes""")
elif no_sync is False: # download it
LOGGER.info(f"Downloading {ref}")
# chrom, txStart, txEnd; all 3 tables have name, but knownGene lacks a name2.
if ref == 'knownGene':
sql = f"""SELECT name as name2, txStart, txEnd, description FROM {ref} LEFT JOIN kgXref ON kgXref.kgID = {ref}.name;"""
else:
sql = f"""SELECT name, name2, txStart, txEnd, description FROM {ref} LEFT JOIN kgXref ON kgXref.refseq = {ref}.name;"""
with conn.cursor() as cur:
cur.execute(sql)
genes = list(cur.fetchall())
if use_cached:
import pickle
with open(Path(package_path, 'data', f"{ref}.pkl"),'wb') as f:
pickle.dump(genes, f)
LOGGER.info(f"Cached {Path(package_path, 'data', f'{ref}.pkl')} on first use, with {len(genes)} genes")
else:
LOGGER.info(f"Using {ref} with {len(genes)} genes")
# compare two dataframes and calc diff.
# need to loop here: but prob some matrix way of doing this faster
done = 0
for gene in tqdm(genes, total=len(genes), desc="Mapping genes"):
closeby = regions[ abs(regions.chromStart - gene['txStart']) < tol ]
if len(closeby) > 0:
for idx,item in closeby.iterrows():
matches[item['name']].append(gene['name2'])
dist = item['chromStart'] - gene['txStart']
distances[item['name']].append(dist)
desc = gene['description'].decode('utf8') if gene['description'] != None else ''
descriptions[item['name']].append(desc)
done += 1
#if done % 1000 == 0:
# LOGGER.info(f"[{done} matches]")
# also, remove duplicate gene matches for the same region (it happens a lot)
matches = {k: ','.join(set(v)) for k,v in matches.items()}
distances = {k: ','.join(set([str(j) for j in v])) for k,v in distances.items()}
descriptions = {k: ' | '.join(set(v)) for k,v in descriptions.items()}
# tidying up some of the deduping
def _tidy(desc):
if desc.startswith('|'):
desc = desc.lstrip('|')
if desc.endswith('|'):
desc = desc.rstrip('|')
return desc
descriptions = {k: _tidy(desc) for k,desc in descriptions.items()}
regions['genes'] = regions['name'].map(matches)
regions['distances'] = regions['name'].map(distances)
regions['descriptions'] = regions['name'].map(descriptions)
# add column(s) for gene tissue expression
if tissue != None:
# tissue == 'all'
tissues = fetch_genes(sql="select * from hgFixed.gtexTissueV8;")
sorted_tissues = [i['name'] for i in tissues]
gene_names = [i.split(',') for i in list(regions['genes']) if i != '']
N_regions_with_multiple_genes = len([i for i in gene_names if len(i) > 1])
if N_regions_with_multiple_genes > 0:
LOGGER.warning(f"{N_regions_with_multiple_genes} of the {len(gene_names)} regions have multiple genes matching in the same region, and output won't show tissue expression levels.")
gene_names = tuple([item for sublist in gene_names for item in sublist])
gtex = fetch_genes(sql=f"select name, expScores from gtexGeneV8 WHERE name in {gene_names} and score > 0;")
if len(gtex) > 0:
# convert to a lookup dict of gene name: list of tissue scores
gtex = {item['name']: [float(i) for i in item['expScores'].decode().split(',') if i != ''] for item in gtex}
# add tissue names
if len(tissues) != len(list(gtex.values())[0]):
LOGGER.error(f"GTEx tissue names and expression levels mismatch.")
else:
for gene, expScores in gtex.items():
labeled_scores = dict(zip(sorted_tissues, expScores))
gtex[gene] = labeled_scores
# to merge, create a new dataframe with matching genes names as index.
tissue_df = pd.DataFrame.from_dict(data=gtex, orient='index')
if tissue != 'all':
matchable = dict(zip([k.lower() for k in list(tissue_df.columns)], list(tissue_df.columns)))
keep_columns = [col_name for item,col_name in matchable.items() if tissue.lower() in item]
if keep_columns == []:
LOGGER.warning(f"No GTEx tissue types matched: {tissue}; returning all tissues instead.")
else:
tissue_df = tissue_df[keep_columns]
# this merge will ONLY WORK if there is just one gene listed in the gene column
regions = regions.merge(tissue_df, how='left', left_on='genes', right_index=True)
#finaly, add column to file and save
if save:
dmr_regions_stem = str(dmr_regions_file).replace('.csv','')
outfile = f"{dmr_regions_stem}_genes.csv"
regions.to_csv(Path(outfile))
LOGGER.info(f"Wrote {outfile}")
return regions
"""
tissue='all' (for big table) or tissue='blood' for one extra column
TODO -- incorporate the GTEx tables (expression by tissue) if user specifies one of 54 tissue types covered.
gtexGeneV8 x gtexTissue
"hgFixed.gtexTissue lists each of the 53 tissues in alphabetical order, corresponding to the comma separated expression values in gtexGene."
works: tissue_lookup = m.fetch_genes('', sql="select * from hgFixed.gtexTissueV8;")
then match tissue keyword kwarg against 'description' field and use 'name' for colname
note that expScores is a list of 54 numbers (expression levels).
chrom chromStart chromEnd name score strand geneId geneType expCount expScores
{'chrom': 'chr1',
'chromEnd': 29806,
'chromStart': 14969,
'expCount': 53,
'expScores': b'6.886,6.083,4.729,5.91,6.371,6.007,8.768,4.202,4.455,4.64,10'
b'.097,10.619,6.108,5.037,5.018,4.808,4.543,4.495,5.576,4.57,8'
b'.275,4.707,2.55,9.091,9.885,8.17,7.392,7.735,5.353,7.124,8.6'
b'17,3.426,2.375,7.669,3.826,7.094,6.365,3.263,10.723,10.507,4'
b'.843,9.193,13.25,11.635,11.771,8.641,10.448,6.522,9.313,10.3'
b'04,9.987,9.067,6.12,',
'geneId': 'ENSG00000227232.4',
'geneType': 'unprocessed_pseudogene',
'name': 'WASH7P',
'score': 427,
'strand': '-'},
"""
| [
"logging.getLogger",
"pandas.read_pickle",
"logging.basicConfig",
"pickle.dump",
"pandas.read_csv",
"pathlib.Path",
"pymysql.connect",
"pandas.DataFrame.from_dict",
"time.time"
] | [((180, 207), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (197, 207), False, 'import logging\n'), ((208, 247), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (227, 247), False, 'import logging\n'), ((7686, 7726), 'pathlib.Path', 'Path', (['package_path', '"""data"""', 'f"""{ref}.pkl"""'], {}), "(package_path, 'data', f'{ref}.pkl')\n", (7690, 7726), False, 'from pathlib import Path\n'), ((248, 276), 'logging.getLogger', 'logging.getLogger', (['"""numexpr"""'], {}), "('numexpr')\n", (265, 276), False, 'import logging\n'), ((5974, 6013), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (5993, 6013), False, 'import logging\n'), ((7101, 7209), 'pymysql.connect', 'pymysql.connect', ([], {'host': 'host', 'user': 'user', 'password': 'password', 'db': 'db', 'cursorclass': 'pymysql.cursors.DictCursor'}), '(host=host, user=user, password=password, db=db, cursorclass\n =pymysql.cursors.DictCursor)\n', (7116, 7209), False, 'import pymysql\n'), ((7647, 7661), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (7651, 7661), False, 'from pathlib import Path\n'), ((8191, 8217), 'pandas.read_pickle', 'pd.read_pickle', (['cache_file'], {}), '(cache_file)\n', (8205, 8217), True, 'import pandas as pd\n'), ((13310, 13323), 'pathlib.Path', 'Path', (['outfile'], {}), '(outfile)\n', (13314, 13323), False, 'from pathlib import Path\n'), ((6558, 6587), 'pandas.read_csv', 'pd.read_csv', (['dmr_regions_file'], {}), '(dmr_regions_file)\n', (6569, 6587), True, 'import pandas as pd\n'), ((7934, 7945), 'time.time', 'time.time', ([], {}), '()\n', (7943, 7945), False, 'import time\n'), ((12367, 12416), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', ([], {'data': 'gtex', 'orient': '"""index"""'}), "(data=gtex, orient='index')\n", (12389, 12416), True, 'import pandas as pd\n'), ((8265, 8305), 'pathlib.Path', 'Path', (['package_path', '"""data"""', 'f"""{ref}.pkl"""'], {}), "(package_path, 'data', f'{ref}.pkl')\n", (8269, 8305), False, 'from pathlib import Path\n'), ((9060, 9081), 'pickle.dump', 'pickle.dump', (['genes', 'f'], {}), '(genes, f)\n', (9071, 9081), False, 'import pickle\n'), ((8991, 9031), 'pathlib.Path', 'Path', (['package_path', '"""data"""', 'f"""{ref}.pkl"""'], {}), "(package_path, 'data', f'{ref}.pkl')\n", (8995, 9031), False, 'from pathlib import Path\n'), ((9120, 9160), 'pathlib.Path', 'Path', (['package_path', '"""data"""', 'f"""{ref}.pkl"""'], {}), "(package_path, 'data', f'{ref}.pkl')\n", (9124, 9160), False, 'from pathlib import Path\n')] |
# encoding: utf-8
"""
@author: BrikerMan
@contact: <EMAIL>
@blog: https://eliyar.biz
@version: 1.0
@license: Apache Licence
@file: test.py.py
@time: 2019-01-25 14:43
"""
import unittest
from tests import *
from kashgari.utils.logger import init_logger
init_logger()
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"kashgari.utils.logger.init_logger"
] | [((255, 268), 'kashgari.utils.logger.init_logger', 'init_logger', ([], {}), '()\n', (266, 268), False, 'from kashgari.utils.logger import init_logger\n'), ((301, 316), 'unittest.main', 'unittest.main', ([], {}), '()\n', (314, 316), False, 'import unittest\n')] |
from itertools import count, tee
class Bouncy:
def __init__(self, porcentage):
"""
print the number bouncy
:type porcentage: int -> this is porcentage of the bouncy
"""
nums = count(1)
rebound = self.sum_number(map(lambda number: float(self.is_rebound(number)), count(1)))
bouncy = next(
(
number
for number, number_b in zip(nums, rebound)
if number_b / number == (porcentage / 100)
)
)
print(bouncy)
def pairs(self, iterable):
"""
return a list convert map, produces new list
:type number: int
"""
# tee() get iterator independent (default 2) with a input
a, b = tee(iterable)
# next() return next element in the secuence
next(b, None)
# zip() return new iterator
return zip(a, b)
def digits(self, number):
"""
return a list convert map, produces new list
:type number: int
"""
return list(map(int, str(number)))
def increase(self, number):
"""
return the elements as long as the previous number is less than or equal to the current one
:type number: int
"""
return all(prev <= curr for prev, curr in self.pairs(self.digits(number)))
def decrease(self, number):
"""
return the elements as long as the previous number is greater than or equal to the current one
:type number: int
"""
return all(prev >= curr for prev, curr in self.pairs(self.digits(number)))
def is_rebound(self, number):
"""
return the elements is rebound
:type number: int
"""
return not self.increase(number) and not self.decrease(number)
def sum_number(self, iterable):
"""
return a element sum total
:type iterable: list
"""
total = 0
for element in iterable:
total += element
yield total
test = Bouncy(99)
| [
"itertools.count",
"itertools.tee"
] | [((222, 230), 'itertools.count', 'count', (['(1)'], {}), '(1)\n', (227, 230), False, 'from itertools import count, tee\n'), ((767, 780), 'itertools.tee', 'tee', (['iterable'], {}), '(iterable)\n', (770, 780), False, 'from itertools import count, tee\n'), ((316, 324), 'itertools.count', 'count', (['(1)'], {}), '(1)\n', (321, 324), False, 'from itertools import count, tee\n')] |
import tkinter as tk
class View():
def __init__(self):
window = tk.Tk()
self.frame = tk.Frame(master=window, width=200, height=200)
self.frame.pack()
def show_grid(self, grid):
for i in range(4):
for j in range(4):
label = tk.Label(master=self.frame, text=grid[i][j])
label.place(x=(50*j)+20, y=(50*i)+20)
| [
"tkinter.Tk",
"tkinter.Frame",
"tkinter.Label"
] | [((77, 84), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (82, 84), True, 'import tkinter as tk\n'), ((106, 152), 'tkinter.Frame', 'tk.Frame', ([], {'master': 'window', 'width': '(200)', 'height': '(200)'}), '(master=window, width=200, height=200)\n', (114, 152), True, 'import tkinter as tk\n'), ((293, 337), 'tkinter.Label', 'tk.Label', ([], {'master': 'self.frame', 'text': 'grid[i][j]'}), '(master=self.frame, text=grid[i][j])\n', (301, 337), True, 'import tkinter as tk\n')] |
import numpy as np
def eval_rerr(X, X_hat, X0=None):
"""
:param X: tensor, X0 or X0+noise
:param X_hat: output for apporoximation
:param X0: true signal, tensor
:return: the relative error = ||X- X_hat||_F/ ||X_0||_F
"""
if X0 is not None:
error = X0 - X_hat
return np.linalg.norm(error.reshape(np.size(error), 1), 'fro') / \
np.linalg.norm(X0.reshape(np.size(X0), 1), 'fro')
error = X - X_hat
return np.linalg.norm(error.reshape(np.size(error), 1), 'fro') / \
np.linalg.norm(X0.reshape(np.size(X), 1), 'fro') | [
"numpy.size"
] | [((493, 507), 'numpy.size', 'np.size', (['error'], {}), '(error)\n', (500, 507), True, 'import numpy as np\n'), ((561, 571), 'numpy.size', 'np.size', (['X'], {}), '(X)\n', (568, 571), True, 'import numpy as np\n'), ((339, 353), 'numpy.size', 'np.size', (['error'], {}), '(error)\n', (346, 353), True, 'import numpy as np\n'), ((407, 418), 'numpy.size', 'np.size', (['X0'], {}), '(X0)\n', (414, 418), True, 'import numpy as np\n')] |
"""helpers module
"""
import json
import pcap
import yaml
def get_adapters_names():
"""Finds all adapters on the system
:return: A list of the network adapters available on the system
"""
return pcap.findalldevs()
def config_loader_yaml(config_name):
"""Loads a .yml configuration file
:param config_name: The path name of the yml configuration file
:return: A dictionary of the configuration
"""
with open(config_name, 'r') as f:
config_yml = f.read()
return yaml.load(config_yml)
def log_message(queue, level, module_name, class_name, function_name, message):
"""Sends a message to a log worker process
:param queue: A queue to send the message to
:param level: A string identifying the level of the message (Either DEBUG, INFO, WARNING, ERROR, CRITICAL
:param module: A string identifying the source module of the message
:param function: A string identifying the source function module of the message
:param message: A string representing the message
:return:
"""
queue.put((level, module_name, class_name, function_name, message))
| [
"pcap.findalldevs",
"yaml.load"
] | [((216, 234), 'pcap.findalldevs', 'pcap.findalldevs', ([], {}), '()\n', (232, 234), False, 'import pcap\n'), ((516, 537), 'yaml.load', 'yaml.load', (['config_yml'], {}), '(config_yml)\n', (525, 537), False, 'import yaml\n')] |
import spacy
def find_entities(input_phrase, language):
models = {
'en': 'en_core_web_sm',
'pl': 'pl_core_news_sm',
'fr': 'fr_core_news_sm',
'de': 'de_core_news_sm',
'it': 'it_core_news_sm',
}
if language in models:
nlp = spacy.load(models[language])
doc = nlp(input_phrase)
res = []
for ent in doc.ents:
res.append({'text': ent.text, 'start_pos': ent.start_char, 'end_pos': ent.end_char, 'type': ent.label_})
return res
else:
raise FileNotFoundError('model %s not found, please download' % language)
if __name__ == "__main__":
print(find_entities("As I had only one hour to write this on my old Dell computer, I am aware there is space for improvement.", 'en')) | [
"spacy.load"
] | [((264, 292), 'spacy.load', 'spacy.load', (['models[language]'], {}), '(models[language])\n', (274, 292), False, 'import spacy\n')] |
# -*- coding:utf-8 -*-
# Usage : python ~~.py
import sys
import os
import pickle
import collections
import pandas as pd
import numpy as np
from itertools import chain
from itertools import combinations
from itertools import compress
from itertools import product
from sklearn.metrics import accuracy_score
from multiprocessing import Pool
from multiprocessing import freeze_support
# Global Setting
DIR_UCI = '/mnt/data/uci'
# ------------------------------------------------------
# Rule Class
# ------------------------------------------------------
class Rule :
def __init__(self):
self.value = list()
self.consequent = list()
self.strength = float()
self.support = list()
self.support_v = float()
self.conf = float()
def setValue(self, values) :
self.value = values
def setConsequent(self, consequents) :
self.consequent = consequents
def setStrength(self, strength) :
self.strength = strength
def setSupport(self, supports) :
self.support = supports
def setSupportV(self, support_v):
self.support_v = support_v
def setConf(self, confidence) :
self.conf = confidence
def getValue(self) :
return(self.value)
def getConsequent(self) :
return(self.consequent)
def getStrength(self):
return(self.strength)
def getSupport(self) :
return(self.support)
def getSupportV(self) :
return(self.support_v)
def getSupportD(self) :
return(self.support * len(self.value))
def getConf(self) :
return(self.conf)
def output(self) :
print("value:" + str(self.value))
print("consequent:" + str(self.consequent))
print("strength:" + str(self.strength))
print("support:" + str(self.support))
print("support_v:" + str(self.support_v))
print("conf:" + str(self.conf))
# ======================================================
# rules load and save
# ======================================================
def loadPickleRules(fullpath_filename) :
with open(fullpath_filename, mode='rb') as inputfile:
rules = pickle.load(inputfile)
return(rules)
def savePickleRules(rules, fullpath_filename) :
with open(fullpath_filename, mode='wb') as outfile:
pickle.dump(rules, outfile, pickle.HIGHEST_PROTOCOL)
# ========================================
# rules をロードしてconfidence と ruleを満たす対象を出す
# ========================================
def updateConfidenceSupport(FILENAME, iter1, iter2, min_sup):
# rules load
fullpath_rulename = DIR_UCI+'/'+FILENAME+'/FPGrowth/rules/rules-'+str(min_sup)+'_'+str(iter1)+'-'+str(iter2)+'.pkl'
rules = loadPickleRules(fullpath_rulename)
# train data load
fullpath_train = DIR_UCI+'/'+FILENAME+'/alpha/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.txt'
data = []
with open(fullpath_train) as inputfile:
for line in inputfile:
data.append(line.strip().split(' '))
# confidence and support and support_v
for rule in rules:
bunshi = [rule.getConsequent() in record and all(x in record for x in rule.getValue()) for record in data]
bunbo = [all(x in record for x in rule.getValue()) for record in data]
confidence = sum(bunshi) / sum(bunbo)
rule.setConf(confidence)
support = [i for i, x in enumerate(bunshi) if x]
rule.setSupport(support)
support_v = len(support) / len(data)
rule.setSupportV(support_v)
# update save
savePickleRules(rules, fullpath_rulename)
# ========================================
# multi に実行する
# ========================================
def multi_main(proc, FILENAME, FUN, **kargs):
pool = Pool(proc)
multiargs = []
# FPGrowth_LERS 用
if FUN == updateConfidenceSupport :
min_sup_range = kargs['min_sup_range']
for iter1, iter2, min_sup in product(range(1,2), range(1,11), min_sup_range):
multiargs.append((FILENAME, iter1, iter2, min_sup))
print(multiargs)
pool.starmap(FUN, multiargs)
else :
print("I dont' know the function.")
# ======================================================
# main
# ======================================================
if __name__ == "__main__":
# データ準備
FILENAME = "adult_cleansing2"
#FILENAME = "default_cleansing"
#FILENAME = "german_credit_categorical"
# クラスの数を設定
#classes = ['D1', 'D2']
# support range
min_sup_range = [0.05, 0.10, 0.15, 0.20, 0.25]
# 並列実行して全データで評価
proc = 32
freeze_support()
FUN = updateConfidenceSupport
multi_main(proc, FILENAME, FUN, min_sup_range = min_sup_range)
| [
"pickle.dump",
"multiprocessing.freeze_support",
"pickle.load",
"multiprocessing.Pool"
] | [((3712, 3722), 'multiprocessing.Pool', 'Pool', (['proc'], {}), '(proc)\n', (3716, 3722), False, 'from multiprocessing import Pool\n'), ((4564, 4580), 'multiprocessing.freeze_support', 'freeze_support', ([], {}), '()\n', (4578, 4580), False, 'from multiprocessing import freeze_support\n'), ((2121, 2143), 'pickle.load', 'pickle.load', (['inputfile'], {}), '(inputfile)\n', (2132, 2143), False, 'import pickle\n'), ((2275, 2327), 'pickle.dump', 'pickle.dump', (['rules', 'outfile', 'pickle.HIGHEST_PROTOCOL'], {}), '(rules, outfile, pickle.HIGHEST_PROTOCOL)\n', (2286, 2327), False, 'import pickle\n')] |
from directory_forms_api_client.actions import PardotAction
from directory_forms_api_client.helpers import Sender
from django.conf import settings
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.generic import TemplateView
from formtools.wizard.views import NamedUrlSessionWizardView
from contact.views import BaseNotifyFormView
from core import mixins
from core.datastructures import NotifySettings
from domestic.forms import (
CompanyDetailsForm,
HelpForm,
PersonalDetailsForm,
UKEFContactForm,
)
class UKEFHomeView(TemplateView):
template_name = 'domestic/ukef/home_page.html'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['trade_finance_bullets'] = [
'working capital support',
'bond support',
'credit insurance',
]
context['project_finance_bullets'] = [
'UKEF buyer credit guarantees',
'direct lending',
'credit and bond insurance',
]
return context
class ContactView(BaseNotifyFormView):
template_name = 'domestic/ukef/contact_form.html'
form_class = UKEFContactForm
success_url = reverse_lazy('domestic:uk-export-contact-success')
notify_settings = NotifySettings(
agent_template=settings.UKEF_CONTACT_AGENT_NOTIFY_TEMPLATE_ID,
agent_email=settings.UKEF_CONTACT_AGENT_EMAIL_ADDRESS,
user_template=settings.UKEF_CONTACT_USER_NOTIFY_TEMPLATE_ID,
)
def form_valid(self, form):
user_email = form.cleaned_data['email']
self.request.session['user_email'] = user_email
return super().form_valid(form)
class SuccessPageView(TemplateView):
template_name = 'domestic/ukef/contact_form_success.html'
def get(self, *args, **kwargs):
if not self.request.session.get('user_email'):
return HttpResponseRedirect(reverse_lazy('domestic:uk-export-contact'))
return super().get(*args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['user_email'] = self.request.session.get('user_email')
return super().get_context_data(**kwargs)
@method_decorator(never_cache, name='dispatch')
class GetFinanceLeadGenerationFormView(
mixins.PrepopulateFormMixin,
mixins.PreventCaptchaRevalidationMixin,
NamedUrlSessionWizardView,
):
success_url = reverse_lazy(
'domestic:uk-export-finance-lead-generation-form-success',
)
PERSONAL_DETAILS = 'your-details'
COMPANY_DETAILS = 'company-details'
HELP = 'help'
form_list = (
(PERSONAL_DETAILS, PersonalDetailsForm),
(COMPANY_DETAILS, CompanyDetailsForm),
(HELP, HelpForm),
)
templates = {
PERSONAL_DETAILS: 'domestic/finance/lead_generation_form/step-personal.html',
COMPANY_DETAILS: 'domestic/finance/lead_generation_form/step-company.html',
HELP: 'domestic/finance/lead_generation_form/step-help.html',
}
def get_form_kwargs(self, *args, **kwargs):
# skipping `PrepopulateFormMixin.get_form_kwargs`
return super(mixins.PrepopulateFormMixin, self).get_form_kwargs(*args, **kwargs)
def get_form_initial(self, step):
initial = super().get_form_initial(step)
if self.request.user.is_authenticated:
if step == self.PERSONAL_DETAILS and self.request.user.company:
initial.update(
{
'email': self.request.user.email,
'phone': getattr(self.request.user.company, 'mobile_number', ''),
'firstname': self.guess_given_name,
'lastname': self.guess_family_name,
}
)
elif step == self.COMPANY_DETAILS and self.request.user.company:
company = self.request.user.company
_sectors = getattr(company, 'sectors', [])
_industry = _sectors[0] if _sectors else None
initial.update(
{
'not_companies_house': False,
'company_number': getattr(company, 'number', ''),
'trading_name': getattr(company, 'name', ''),
'address_line_one': getattr(company, 'address_line_1', ''),
'address_line_two': getattr(company, 'address_line_2', ''),
'address_town_city': getattr(company, 'locality', ''),
'address_post_code': getattr(company, 'postal_code', ''),
'industry': _industry,
}
)
return initial
def get_template_names(self):
return [self.templates[self.steps.current]]
def done(self, form_list, **kwargs):
form_data = self.serialize_form_list(form_list)
sender = Sender(email_address=form_data['email'], country_code=None)
action = PardotAction(
pardot_url=settings.UKEF_FORM_SUBMIT_TRACKER_URL,
form_url=reverse('domestic:uk-export-finance-lead-generation-form', kwargs={'step': self.PERSONAL_DETAILS}),
sender=sender,
)
response = action.save(form_data)
response.raise_for_status()
return redirect(self.success_url)
@staticmethod
def serialize_form_list(form_list):
data = {}
for form in form_list:
data.update(form.cleaned_data)
return data
| [
"django.urls.reverse",
"django.utils.decorators.method_decorator",
"django.urls.reverse_lazy",
"django.shortcuts.redirect",
"directory_forms_api_client.helpers.Sender",
"core.datastructures.NotifySettings"
] | [((2376, 2422), 'django.utils.decorators.method_decorator', 'method_decorator', (['never_cache'], {'name': '"""dispatch"""'}), "(never_cache, name='dispatch')\n", (2392, 2422), False, 'from django.utils.decorators import method_decorator\n'), ((1414, 1464), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""domestic:uk-export-contact-success"""'], {}), "('domestic:uk-export-contact-success')\n", (1426, 1464), False, 'from django.urls import reverse, reverse_lazy\n'), ((1487, 1695), 'core.datastructures.NotifySettings', 'NotifySettings', ([], {'agent_template': 'settings.UKEF_CONTACT_AGENT_NOTIFY_TEMPLATE_ID', 'agent_email': 'settings.UKEF_CONTACT_AGENT_EMAIL_ADDRESS', 'user_template': 'settings.UKEF_CONTACT_USER_NOTIFY_TEMPLATE_ID'}), '(agent_template=settings.\n UKEF_CONTACT_AGENT_NOTIFY_TEMPLATE_ID, agent_email=settings.\n UKEF_CONTACT_AGENT_EMAIL_ADDRESS, user_template=settings.\n UKEF_CONTACT_USER_NOTIFY_TEMPLATE_ID)\n', (1501, 1695), False, 'from core.datastructures import NotifySettings\n'), ((2592, 2663), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""domestic:uk-export-finance-lead-generation-form-success"""'], {}), "('domestic:uk-export-finance-lead-generation-form-success')\n", (2604, 2663), False, 'from django.urls import reverse, reverse_lazy\n'), ((5101, 5160), 'directory_forms_api_client.helpers.Sender', 'Sender', ([], {'email_address': "form_data['email']", 'country_code': 'None'}), "(email_address=form_data['email'], country_code=None)\n", (5107, 5160), False, 'from directory_forms_api_client.helpers import Sender\n'), ((5505, 5531), 'django.shortcuts.redirect', 'redirect', (['self.success_url'], {}), '(self.success_url)\n', (5513, 5531), False, 'from django.shortcuts import redirect\n'), ((2122, 2164), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""domestic:uk-export-contact"""'], {}), "('domestic:uk-export-contact')\n", (2134, 2164), False, 'from django.urls import reverse, reverse_lazy\n'), ((5275, 5377), 'django.urls.reverse', 'reverse', (['"""domestic:uk-export-finance-lead-generation-form"""'], {'kwargs': "{'step': self.PERSONAL_DETAILS}"}), "('domestic:uk-export-finance-lead-generation-form', kwargs={'step':\n self.PERSONAL_DETAILS})\n", (5282, 5377), False, 'from django.urls import reverse, reverse_lazy\n')] |
import unicodedata
import warnings
import logging
import re
import argparse
import sbol3
import openpyxl
import tyto
from .helper_functions import toplevel_named, strip_sbol2_version, is_plasmid, url_to_identity, strip_filetype_suffix
from .workarounds import type_to_standard_extension
BASIC_PARTS_COLLECTION = 'BasicParts'
COMPOSITE_PARTS_COLLECTION = 'CompositeParts'
LINEAR_PRODUCTS_COLLECTION = 'LinearDNAProducts'
FINAL_PRODUCTS_COLLECTION = 'FinalProducts'
def expand_configuration(values: dict) -> dict:
"""
Initialize sheet configuration dictionary
:param values: Dictionary of overrides for defaults
:return configuration with all defaults filled in
"""
# set up the default values
default_values = {
'basic_sheet': 'Basic Parts',
'basic_parts_name': 'B1',
'basic_parts_description': 'A11',
'basic_first_row': 20,
'basic_name_col': 0,
'basic_role_col': 1,
'basic_notes_col': 2,
'basic_description_col': 4,
'basic_source_prefix_col': 5,
'basic_source_id_col': 6,
'basic_final_col': 9,
'basic_circular_col': 10,
'basic_length_col': 11,
'basic_sequence_col': 12,
'composite_sheet': 'Composite Parts',
'composite_parts_name': 'B1',
'composite_parts_description': 'A11',
'composite_first_row': 24,
'composite_name_col': 0,
'composite_notes_col': 1,
'composite_description_col': 2,
'composite_final_col': 3,
'composite_strain_col': 4,
'composite_context_col': 5,
'composite_constraints_col': 6,
'composite_first_part_col': 7,
'sources_sheet': 'data_source',
'sources_first_row': 2,
'source_name_col': 1,
'source_uri_col': 2,
'source_literal_col': 6
}
# override with supplied values
values_to_use = default_values
if values is not None:
for k, v in values.items():
if k not in default_values:
raise ValueError(f'Sheet configuration has no setting "{k}"')
values_to_use[k] = v
# initialize the dictionary
return values_to_use
def read_metadata(wb: openpyxl.Workbook, doc: sbol3.Document, config: dict):
"""
Extract metadata and build collections
:param wb: Excel workbook to extract material from
:param doc: SBOL document to build collections in
:param config: dictionary of sheet parsing configuration variables
:return: Tuple of SBOL collections for basic, composite, linear, and final parts
"""
# Read the metadata
ws_b = wb[config['basic_sheet']]
bp_name = ws_b[config['basic_parts_name']].value
bp_description = ws_b[config['basic_parts_description']].value
ws_c = wb[config['composite_sheet']]
if config['composite_parts_name']:
cp_name = ws_c[config['composite_parts_name']].value
cp_description = ws_c[config['composite_parts_description']].value
else:
cp_name = bp_name
cp_description = bp_description
# Make the collections
basic_parts = sbol3.Collection(BASIC_PARTS_COLLECTION, name=bp_name, description=bp_description)
doc.add(basic_parts)
composite_parts = sbol3.Collection(COMPOSITE_PARTS_COLLECTION, name=cp_name, description=cp_description)
doc.add(composite_parts)
linear_products = sbol3.Collection(LINEAR_PRODUCTS_COLLECTION, name='Linear DNA Products',
description='Linear DNA constructs to be fabricated')
doc.add(linear_products)
final_products = sbol3.Collection(FINAL_PRODUCTS_COLLECTION, name='Final Products',
description='Final products desired for actual fabrication')
doc.add(final_products)
# also collect any necessary data tables from extra sheets
source_table = {row[config['source_name_col']].value: row[config['source_uri_col']].value
for row in wb[config['sources_sheet']].iter_rows(min_row=config['sources_first_row'])
if row[config['source_literal_col']].value}
# return the set of created collections
return basic_parts, composite_parts, linear_products, final_products, source_table
def row_to_basic_part(doc: sbol3.Document, row, basic_parts: sbol3.Collection, linear_products: sbol3.Collection,
final_products: sbol3.Collection, config: dict, source_table: dict):
"""
Read a row for a basic part and turn it into SBOL Component
:param doc: Document to add parts to
:param row: Excel row to be processed
:param basic_parts: collection of parts to add to
:param linear_products: collection of linear parts to add to
:param final_products: collection of final parts to add to
:param config: dictionary of sheet parsing configuration variables
:param source_table: dictionary mapping source names to namespaces
:return: None
"""
# Parse material from sheet row
name = row[config['basic_name_col']].value
if name is None:
return # skip lines without names
else:
name = name.strip() # make sure we're discarding whitespace
raw_role = row[config['basic_role_col']].value
try: # look up with tyto; if fail, leave blank or add to description
role = (tyto.SO.get_uri_by_term(raw_role) if raw_role else None)
except LookupError:
logging.warning(f'Role "{raw_role}" could not be found in Sequence Ontology')
role = None
design_notes = (row[config['basic_notes_col']].value if row[config['basic_notes_col']].value else "")
description = (row[config['basic_description_col']].value if row[config['basic_description_col']].value else "")
source_prefix = row[config['basic_source_prefix_col']].value
source_id = row[config['basic_source_id_col']].value
final_product = row[config['basic_final_col']].value # boolean
circular = row[config['basic_circular_col']].value # boolean
length = row[config['basic_length_col']].value
raw_sequence = row[config['basic_sequence_col']].value
sequence = (None if raw_sequence is None else "".join(unicodedata.normalize("NFKD", raw_sequence).upper().split()))
if not ((sequence is None and length == 0) or len(sequence) == length):
raise ValueError(f'Part "{name}" has mismatched sequence length: check for bad characters and extra whitespace')
# identity comes from source if set to a literal table, from display_id if not set
identity = None
display_id = None
was_derived_from = None
namespace = sbol3.get_namespace()
if source_id and source_prefix:
source_prefix = source_prefix.strip()
if source_prefix in source_table:
if source_table[source_prefix]:
display_id = sbol3.string_to_display_id(source_id.strip())
identity = f'{source_table[source_prefix]}/{display_id}'
namespace = source_table[source_prefix]
else: # when there is no prefix, use the bare value (in SBOL3 format)
raw_url = source_id.strip()
identity = url_to_identity(strip_filetype_suffix(strip_sbol2_version(raw_url)))
was_derived_from = raw_url
namespace = identity.rsplit('/',1)[0] # TODO: use a helper function
else:
logging.info(f'Part "{name}" ignoring non-literal source: {source_prefix}')
elif source_id:
logging.warning(f'Part "{name}" has source ID specified but not prefix: {source_id}')
elif source_prefix:
logging.warning(f'Part "{name}" has source prefix specified but not ID: {source_prefix}')
if not identity:
display_id = sbol3.string_to_display_id(name)
# build a component from the material
logging.debug(f'Creating basic part "{name}"')
component = sbol3.Component(identity or display_id, sbol3.SBO_DNA, name=name, namespace=namespace,
description=f'{design_notes}\n{description}'.strip())
if was_derived_from:
component.derived_from.append(was_derived_from)
doc.add(component)
if role:
component.roles.append(role)
if circular:
component.types.append(sbol3.SO_CIRCULAR)
if sequence:
sbol_seq = sbol3.Sequence(f'{component.identity}_sequence', namespace=namespace,
encoding=sbol3.IUPAC_DNA_ENCODING, elements=sequence)
doc.add(sbol_seq)
component.sequences.append(sbol_seq.identity)
# add the component to the appropriate collections
basic_parts.members.append(component.identity)
if final_product:
linear_products.members.append(component.identity)
final_products.members.append(component.identity)
##########################################
# Functions for parsing sub-components
# form of a sub-component:
# X: identifies a component or set thereof
# RC(X): X is reversed
reverse_complement_pattern = re.compile('RC\(.+\)')
# Returns sanitized text without optional reverse complement marker
def strip_RC(name):
sanitized = name.strip()
match = reverse_complement_pattern.match(sanitized)
return (sanitized[3:-1] if (match and len(match.group())==len(sanitized)) else sanitized)
# returns true if part is reverse complement
def is_RC(name):
sanitized = name.strip()
return len(strip_RC(sanitized))<len(sanitized)
# returns a list of part names
def part_names(specification):
return [name.strip() for name in strip_RC(str(specification)).split(',')]
# list all the parts in the row that aren't fully resolved
def unresolved_subparts(doc: sbol3.Document, row, config):
return [name for spec in part_specifications(row, config) for name in part_names(spec) if not partname_to_part(doc,name)]
# get the part specifications until they stop
def part_specifications(row, config):
return (cell.value for cell in row[config['composite_first_part_col']:] if cell.value)
def partname_to_part(doc: sbol3.Document, name_or_display_id: str):
"""Look up a part by its displayID or its name, searching first by displayID, then by name
:param doc: SBOL document to search
:param name_or_display_id: string to look up
:return: object if found, None if not
"""
return doc.find(name_or_display_id) or toplevel_named(doc,name_or_display_id)
###############################################################
# Functions for making composites, combinatorials, and libraries
def make_composite_component(display_id,part_lists,reverse_complements):
# Make the composite as an engineered region
composite_part = sbol3.Component(display_id, sbol3.SBO_DNA)
composite_part.roles.append(sbol3.SO_ENGINEERED_REGION)
# for each part, make a SubComponent and link them together in sequence
last_sub = None
for part_list,rc in zip(part_lists,reverse_complements):
if not len(part_list)==1:
raise ValueError(f'Part list should have precisely one element, but is {part_list}')
sub = sbol3.SubComponent(part_list[0])
sub.orientation = (sbol3.SBOL_REVERSE_COMPLEMENT if rc else sbol3.SBOL_INLINE)
composite_part.features.append(sub)
if last_sub: composite_part.constraints.append(sbol3.Constraint(sbol3.SBOL_MEETS,last_sub,sub))
last_sub = sub
# return the completed part
return composite_part
constraint_pattern = re.compile('Part (\d+) (.+) Part (\d+)')
constraint_dict = {'same as': sbol3.SBOL_VERIFY_IDENTICAL,
'different from': sbol3.SBOL_DIFFERENT_FROM,
'same orientation as': sbol3.SBOL_SAME_ORIENTATION_AS,
'different orientation from': sbol3.SBOL_SAME_ORIENTATION_AS}
def make_constraint(constraint, part_list):
m = constraint_pattern.match(constraint)
if not m:
raise ValueError(f'Constraint "{constraint}" does not match pattern "Part X relation Part Y"')
try:
restriction = constraint_dict[m.group(2)]
except KeyError:
raise ValueError(f'Do not recognize constraint relation in "{constraint}"')
x = int(m.group(1))
y = int(m.group(3))
if x is y:
raise ValueError(f'A part cannot constrain itself: {constraint}')
for n in [x,y]:
if not (0 < n <= len(part_list)):
raise ValueError(f'Part number "{str(n)}" is not between 1 and {len(part_list)}')
return sbol3.Constraint(restriction, part_list[x-1], part_list[y-1])
def make_combinatorial_derivation(document, display_id,part_lists,reverse_complements,constraints):
# Make the combinatorial derivation and its template
template = sbol3.Component(display_id + "_template", sbol3.SBO_DNA)
document.add(template)
cd = sbol3.CombinatorialDerivation(display_id, template)
cd.strategy = sbol3.SBOL_ENUMERATE
# for each part, make a SubComponent or LocalSubComponent in the template and link them together in sequence
template_part_list = []
for part_list,rc in zip(part_lists,reverse_complements):
# it's a variable if there are multiple values or if there's a single value that's a combinatorial derivation
if len(part_list)>1 or not isinstance(part_list[0],sbol3.Component):
sub = sbol3.LocalSubComponent({sbol3.SBO_DNA}) # make a template variable
sub.name = "Part "+str(len(template_part_list)+1)
template.features.append(sub)
var = sbol3.VariableFeature(cardinality=sbol3.SBOL_ONE, variable=sub)
cd.variable_features.append(var)
# add all of the parts as variables
for part in part_list:
if isinstance(part,sbol3.Component): var.variants.append(part)
elif isinstance(part,sbol3.CombinatorialDerivation): var.variant_derivations.append(part)
else: raise ValueError("Don't know how to make library element for "+part.name+", a "+str(part))
else: # otherwise it's a fixed element of the template
sub = sbol3.SubComponent(part_list[0])
template.features.append(sub)
# in either case, orient and order the template elements
sub.orientation = (sbol3.SBOL_REVERSE_COMPLEMENT if rc else sbol3.SBOL_INLINE)
if template_part_list: template.constraints.append(sbol3.Constraint(sbol3.SBOL_MEETS,template_part_list[-1],sub))
template_part_list.append(sub)
# next, add all of the constraints to the template
#template.constraints = (make_constraint(c.strip(),template_part_list) for c in (constraints.split(',') if constraints else [])) # impacted by pySBOL3 appending
c_list = (make_constraint(c.strip(),template_part_list) for c in (constraints.split(',') if constraints else []))
for c in c_list: template.constraints.append(c)
# return the completed part
return cd
def make_composite_part(document, row, composite_parts, linear_products, final_products, config):
"""
Create a composite part from a row in the composites sheet
:param document: Document to add parts to
:param row: Excel row to be processed
:param composite_parts: collection of parts to add to
:param linear_products: collection of linear parts to add to
:param final_products: collection of final parts to add to
:param config: dictionary of sheet parsing configuration variables
"""
# Parse material from sheet row
name = row[config['composite_name_col']].value
if name is None:
return # skip lines without names
else:
name = name.strip() # make sure we're discarding whitespace
display_id = sbol3.string_to_display_id(name)
design_notes = (row[config['composite_notes_col']].value if row[config['composite_notes_col']].value else "")
description = \
(row[config['composite_description_col']].value if row[config['composite_description_col']].value else "")
final_product = row[config['composite_final_col']].value # boolean
transformed_strain = row[config['composite_strain_col']].value if config['composite_strain_col'] else None
backbone_or_locus_raw = row[config['composite_context_col']].value if config['composite_context_col'] else None
backbone_or_locus = part_names(backbone_or_locus_raw) if backbone_or_locus_raw else []
constraints = row[config['composite_constraints_col']].value if config['composite_constraints_col'] else None
reverse_complements = [is_RC(spec) for spec in part_specifications(row,config)]
part_lists = \
[[partname_to_part(document, name) for name in part_names(spec)] for spec in part_specifications(row, config)]
combinatorial = any(x for x in part_lists if len(x) > 1 or isinstance(x[0], sbol3.CombinatorialDerivation))
# Build the composite
logging.debug(f'Creating {"library" if combinatorial else "composite part"} "{name}"')
linear_dna_display_id = (f'{display_id}_ins' if backbone_or_locus else display_id)
if combinatorial:
composite_part = make_combinatorial_derivation(document, linear_dna_display_id, part_lists, reverse_complements,
constraints)
else:
composite_part = make_composite_component(linear_dna_display_id, part_lists, reverse_complements)
composite_part.name = (f'{name} insert' if backbone_or_locus else name)
composite_part.description = f'{design_notes}\n{description}'.strip()
# add the component to the appropriate collections
document.add(composite_part)
composite_parts.members.append(composite_part.identity)
if final_product:
linear_products.members.append(composite_part.identity)
###############
# Consider strain and locus information
if transformed_strain:
warnings.warn("Not yet handling strain information: "+transformed_strain)
if backbone_or_locus:
# TODO: handle integration locuses as well as plasmid backbones
backbones = [partname_to_part(document,name) for name in backbone_or_locus]
if any(b is None for b in backbones):
raise ValueError(f'Could not find specified backbone(s) "{backbone_or_locus}"')
if any(not is_plasmid(b) for b in backbones):
raise ValueError(f'Specified backbones "{backbone_or_locus}" are not all plasmids')
if combinatorial:
logging.debug(f"Embedding library '{composite_part.name}' in plasmid backbone(s) '{backbone_or_locus}'")
plasmid = sbol3.Component(f'{display_id}_template', sbol3.SBO_DNA)
document.add(plasmid)
part_sub = sbol3.LocalSubComponent([sbol3.SBO_DNA], name="Inserted Construct")
plasmid.features.append(part_sub)
plasmid_cd = sbol3.CombinatorialDerivation(display_id, plasmid, name=name)
document.add(plasmid_cd)
part_var = sbol3.VariableFeature(cardinality=sbol3.SBOL_ONE, variable=part_sub)
plasmid_cd.variable_features.append(part_var)
part_var.variant_derivations.append(composite_part)
if final_product:
final_products.members.append(plasmid_cd)
else:
if len(backbones) == 1:
logging.debug(f'Embedding part "{composite_part.name}" in plasmid backbone "{backbone_or_locus}"')
plasmid = sbol3.Component(display_id, sbol3.SBO_DNA, name=name)
document.add(plasmid)
part_sub = sbol3.SubComponent(composite_part)
plasmid.features.append(part_sub)
if final_product:
final_products.members += {plasmid}
else:
logging.debug(f'Embedding part "{composite_part.name}" in plasmid library "{backbone_or_locus}"')
plasmid = sbol3.Component(f'{display_id}_template', sbol3.SBO_DNA)
document.add(plasmid)
part_sub = sbol3.SubComponent(composite_part)
plasmid.features.append(part_sub)
plasmid_cd = sbol3.CombinatorialDerivation(display_id, plasmid, name=name)
document.add(plasmid_cd)
if final_product:
final_products.members.append(plasmid_cd)
if len(backbones) == 1:
backbone_sub = sbol3.SubComponent(backbones[0])
plasmid.features.append(backbone_sub)
else:
backbone_sub = sbol3.LocalSubComponent([sbol3.SBO_DNA])
backbone_sub.name = "Vector"
plasmid.features.append(backbone_sub)
backbone_var = sbol3.VariableFeature(cardinality=sbol3.SBOL_ONE, variable=backbone_sub)
plasmid_cd.variable_features.append(backbone_var)
backbone_var.variants += backbones
plasmid.constraints.append(sbol3.Constraint(sbol3.SBOL_MEETS, part_sub, backbone_sub))
plasmid.constraints.append(sbol3.Constraint(sbol3.SBOL_MEETS, backbone_sub, part_sub))
def excel_to_sbol(wb: openpyxl.Workbook, config: dict = None) -> sbol3.Document:
"""
Take an open Excel file, return an SBOL document
:param wb: openpyxl pointer to an Excel file
:param config: dictionary of sheet parsing configuration variables
:return: Document containing all SBOL extracted from Excel sheet
"""
config = expand_configuration(config)
doc = sbol3.Document()
logging.info('Reading metadata for collections')
basic_parts, composite_parts, linear_products, final_products, source_table = read_metadata(wb, doc, config)
logging.info('Reading basic parts')
for row in wb[config['basic_sheet']].iter_rows(min_row=config['basic_first_row']):
row_to_basic_part(doc, row, basic_parts, linear_products, final_products, config, source_table)
logging.info(f'Created {len(basic_parts.members)} basic parts')
logging.info('Reading composite parts and libraries')
# first collect all rows with names
pending_parts = [row for row in wb[config['composite_sheet']].iter_rows(min_row=config['composite_first_row'])
if row[config['composite_name_col']].value]
while pending_parts:
ready = [row for row in pending_parts if not unresolved_subparts(doc, row, config)]
if not ready:
raise ValueError("Could not resolve subparts" + ''.join(
(f"\n in '{row[config['composite_name_col']].value}':" +
''.join(f" '{x}'" for x in unresolved_subparts(doc, row, config)))
for row in pending_parts))
for row in ready:
make_composite_part(doc, row, composite_parts, linear_products, final_products, config)
pending_parts = [p for p in pending_parts if p not in ready] # subtract parts from stable list
logging.info(f'Created {len(composite_parts.members)} composite parts or libraries')
logging.info(f'Count {len(basic_parts.members)} basic parts, {len(composite_parts.members)} composites/libraries')
report = doc.validate()
logging.info(f'Validation of document found {len(report.errors)} errors and {len(report.warnings)} warnings')
return doc
def main():
"""
Main wrapper: read from input file, invoke excel_to_sbol, then write to output file
"""
parser = argparse.ArgumentParser()
parser.add_argument('excel_file', help="Excel file used as input")
parser.add_argument('-n', '--namespace', dest='namespace',
help="Namespace for Components in output file")
parser.add_argument('-l', '--local', dest='local', default=None,
help="Local path for Components in output file")
parser.add_argument('-o', '--output', dest='output_file', default='out',
help="Name of SBOL file to be written")
parser.add_argument('-t', '--file-type', dest='file_type', default=sbol3.SORTED_NTRIPLES,
help="Name of SBOL file to output to (excluding type)")
parser.add_argument('--verbose', '-v', dest='verbose', action='count', default=0,
help="Print running explanation of conversion process")
args_dict = vars(parser.parse_args())
# Extract arguments:
verbosity = args_dict['verbose']
log_level = logging.WARN if verbosity == 0 else logging.INFO if verbosity == 1 else logging.DEBUG
logging.getLogger().setLevel(level=log_level)
output_file = args_dict['output_file']
file_type = args_dict['file_type']
excel_file = args_dict['excel_file']
extension = type_to_standard_extension[file_type]
outfile_name = output_file if output_file.endswith(extension) else output_file+extension
sbol3.set_namespace(args_dict['namespace'])
# TODO: unkludge after resolution of https://github.com/SynBioDex/pySBOL3/issues/288
if args_dict['local']:
sbol3.set_namespace(f"{args_dict['namespace']}/{args_dict['local']}")
# Read file, convert, and write resulting document
logging.info('Accessing Excel file '+excel_file)
sbol_document = excel_to_sbol(openpyxl.load_workbook(excel_file, data_only=True))
sbol_document.write(outfile_name, file_type)
logging.info('SBOL file written to '+outfile_name)
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"sbol3.SubComponent",
"logging.debug",
"re.compile",
"sbol3.string_to_display_id",
"sbol3.CombinatorialDerivation",
"logging.info",
"tyto.SO.get_uri_by_term",
"argparse.ArgumentParser",
"sbol3.VariableFeature",
"sbol3.Document",
"sbol3.Collection",
"sbol3.LocalSubCompone... | [((8982, 9006), 're.compile', 're.compile', (['"""RC\\\\(.+\\\\)"""'], {}), "('RC\\\\(.+\\\\)')\n", (8992, 9006), False, 'import re\n'), ((11408, 11450), 're.compile', 're.compile', (['"""Part (\\\\d+) (.+) Part (\\\\d+)"""'], {}), "('Part (\\\\d+) (.+) Part (\\\\d+)')\n", (11418, 11450), False, 'import re\n'), ((3107, 3194), 'sbol3.Collection', 'sbol3.Collection', (['BASIC_PARTS_COLLECTION'], {'name': 'bp_name', 'description': 'bp_description'}), '(BASIC_PARTS_COLLECTION, name=bp_name, description=\n bp_description)\n', (3123, 3194), False, 'import sbol3\n'), ((3238, 3329), 'sbol3.Collection', 'sbol3.Collection', (['COMPOSITE_PARTS_COLLECTION'], {'name': 'cp_name', 'description': 'cp_description'}), '(COMPOSITE_PARTS_COLLECTION, name=cp_name, description=\n cp_description)\n', (3254, 3329), False, 'import sbol3\n'), ((3377, 3507), 'sbol3.Collection', 'sbol3.Collection', (['LINEAR_PRODUCTS_COLLECTION'], {'name': '"""Linear DNA Products"""', 'description': '"""Linear DNA constructs to be fabricated"""'}), "(LINEAR_PRODUCTS_COLLECTION, name='Linear DNA Products',\n description='Linear DNA constructs to be fabricated')\n", (3393, 3507), False, 'import sbol3\n'), ((3594, 3725), 'sbol3.Collection', 'sbol3.Collection', (['FINAL_PRODUCTS_COLLECTION'], {'name': '"""Final Products"""', 'description': '"""Final products desired for actual fabrication"""'}), "(FINAL_PRODUCTS_COLLECTION, name='Final Products',\n description='Final products desired for actual fabrication')\n", (3610, 3725), False, 'import sbol3\n'), ((6594, 6615), 'sbol3.get_namespace', 'sbol3.get_namespace', ([], {}), '()\n', (6613, 6615), False, 'import sbol3\n'), ((7799, 7845), 'logging.debug', 'logging.debug', (['f"""Creating basic part "{name}\\""""'], {}), '(f\'Creating basic part "{name}"\')\n', (7812, 7845), False, 'import logging\n'), ((10632, 10674), 'sbol3.Component', 'sbol3.Component', (['display_id', 'sbol3.SBO_DNA'], {}), '(display_id, sbol3.SBO_DNA)\n', (10647, 10674), False, 'import sbol3\n'), ((12399, 12464), 'sbol3.Constraint', 'sbol3.Constraint', (['restriction', 'part_list[x - 1]', 'part_list[y - 1]'], {}), '(restriction, part_list[x - 1], part_list[y - 1])\n', (12415, 12464), False, 'import sbol3\n'), ((12635, 12691), 'sbol3.Component', 'sbol3.Component', (["(display_id + '_template')", 'sbol3.SBO_DNA'], {}), "(display_id + '_template', sbol3.SBO_DNA)\n", (12650, 12691), False, 'import sbol3\n'), ((12728, 12779), 'sbol3.CombinatorialDerivation', 'sbol3.CombinatorialDerivation', (['display_id', 'template'], {}), '(display_id, template)\n', (12757, 12779), False, 'import sbol3\n'), ((15590, 15622), 'sbol3.string_to_display_id', 'sbol3.string_to_display_id', (['name'], {}), '(name)\n', (15616, 15622), False, 'import sbol3\n'), ((16741, 16841), 'logging.debug', 'logging.debug', (['f"""Creating {\'library\' if combinatorial else \'composite part\'} "{name}\\""""'], {}), '(\n f\'Creating {\\\'library\\\' if combinatorial else \\\'composite part\\\'} "{name}"\'\n )\n', (16754, 16841), False, 'import logging\n'), ((21277, 21293), 'sbol3.Document', 'sbol3.Document', ([], {}), '()\n', (21291, 21293), False, 'import sbol3\n'), ((21299, 21347), 'logging.info', 'logging.info', (['"""Reading metadata for collections"""'], {}), "('Reading metadata for collections')\n", (21311, 21347), False, 'import logging\n'), ((21466, 21501), 'logging.info', 'logging.info', (['"""Reading basic parts"""'], {}), "('Reading basic parts')\n", (21478, 21501), False, 'import logging\n'), ((21766, 21819), 'logging.info', 'logging.info', (['"""Reading composite parts and libraries"""'], {}), "('Reading composite parts and libraries')\n", (21778, 21819), False, 'import logging\n'), ((23175, 23200), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (23198, 23200), False, 'import argparse\n'), ((24561, 24604), 'sbol3.set_namespace', 'sbol3.set_namespace', (["args_dict['namespace']"], {}), "(args_dict['namespace'])\n", (24580, 24604), False, 'import sbol3\n'), ((24859, 24909), 'logging.info', 'logging.info', (["('Accessing Excel file ' + excel_file)"], {}), "('Accessing Excel file ' + excel_file)\n", (24871, 24909), False, 'import logging\n'), ((25047, 25099), 'logging.info', 'logging.info', (["('SBOL file written to ' + outfile_name)"], {}), "('SBOL file written to ' + outfile_name)\n", (25059, 25099), False, 'import logging\n'), ((7719, 7751), 'sbol3.string_to_display_id', 'sbol3.string_to_display_id', (['name'], {}), '(name)\n', (7745, 7751), False, 'import sbol3\n'), ((8292, 8419), 'sbol3.Sequence', 'sbol3.Sequence', (['f"""{component.identity}_sequence"""'], {'namespace': 'namespace', 'encoding': 'sbol3.IUPAC_DNA_ENCODING', 'elements': 'sequence'}), "(f'{component.identity}_sequence', namespace=namespace,\n encoding=sbol3.IUPAC_DNA_ENCODING, elements=sequence)\n", (8306, 8419), False, 'import sbol3\n'), ((11037, 11069), 'sbol3.SubComponent', 'sbol3.SubComponent', (['part_list[0]'], {}), '(part_list[0])\n', (11055, 11069), False, 'import sbol3\n'), ((17727, 17802), 'warnings.warn', 'warnings.warn', (["('Not yet handling strain information: ' + transformed_strain)"], {}), "('Not yet handling strain information: ' + transformed_strain)\n", (17740, 17802), False, 'import warnings\n'), ((24729, 24798), 'sbol3.set_namespace', 'sbol3.set_namespace', (['f"""{args_dict[\'namespace\']}/{args_dict[\'local\']}"""'], {}), '(f"{args_dict[\'namespace\']}/{args_dict[\'local\']}")\n', (24748, 24798), False, 'import sbol3\n'), ((24942, 24992), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (['excel_file'], {'data_only': '(True)'}), '(excel_file, data_only=True)\n', (24964, 24992), False, 'import openpyxl\n'), ((5327, 5360), 'tyto.SO.get_uri_by_term', 'tyto.SO.get_uri_by_term', (['raw_role'], {}), '(raw_role)\n', (5350, 5360), False, 'import tyto\n'), ((5416, 5493), 'logging.warning', 'logging.warning', (['f"""Role "{raw_role}" could not be found in Sequence Ontology"""'], {}), '(f\'Role "{raw_role}" could not be found in Sequence Ontology\')\n', (5431, 5493), False, 'import logging\n'), ((7365, 7440), 'logging.info', 'logging.info', (['f"""Part "{name}" ignoring non-literal source: {source_prefix}"""'], {}), '(f\'Part "{name}" ignoring non-literal source: {source_prefix}\')\n', (7377, 7440), False, 'import logging\n'), ((7469, 7559), 'logging.warning', 'logging.warning', (['f"""Part "{name}" has source ID specified but not prefix: {source_id}"""'], {}), '(\n f\'Part "{name}" has source ID specified but not prefix: {source_id}\')\n', (7484, 7559), False, 'import logging\n'), ((13234, 13274), 'sbol3.LocalSubComponent', 'sbol3.LocalSubComponent', (['{sbol3.SBO_DNA}'], {}), '({sbol3.SBO_DNA})\n', (13257, 13274), False, 'import sbol3\n'), ((13424, 13487), 'sbol3.VariableFeature', 'sbol3.VariableFeature', ([], {'cardinality': 'sbol3.SBOL_ONE', 'variable': 'sub'}), '(cardinality=sbol3.SBOL_ONE, variable=sub)\n', (13445, 13487), False, 'import sbol3\n'), ((13995, 14027), 'sbol3.SubComponent', 'sbol3.SubComponent', (['part_list[0]'], {}), '(part_list[0])\n', (14013, 14027), False, 'import sbol3\n'), ((18309, 18423), 'logging.debug', 'logging.debug', (['f"""Embedding library \'{composite_part.name}\' in plasmid backbone(s) \'{backbone_or_locus}\'"""'], {}), '(\n f"Embedding library \'{composite_part.name}\' in plasmid backbone(s) \'{backbone_or_locus}\'"\n )\n', (18322, 18423), False, 'import logging\n'), ((18436, 18492), 'sbol3.Component', 'sbol3.Component', (['f"""{display_id}_template"""', 'sbol3.SBO_DNA'], {}), "(f'{display_id}_template', sbol3.SBO_DNA)\n", (18451, 18492), False, 'import sbol3\n'), ((18550, 18617), 'sbol3.LocalSubComponent', 'sbol3.LocalSubComponent', (['[sbol3.SBO_DNA]'], {'name': '"""Inserted Construct"""'}), "([sbol3.SBO_DNA], name='Inserted Construct')\n", (18573, 18617), False, 'import sbol3\n'), ((18689, 18750), 'sbol3.CombinatorialDerivation', 'sbol3.CombinatorialDerivation', (['display_id', 'plasmid'], {'name': 'name'}), '(display_id, plasmid, name=name)\n', (18718, 18750), False, 'import sbol3\n'), ((18811, 18879), 'sbol3.VariableFeature', 'sbol3.VariableFeature', ([], {'cardinality': 'sbol3.SBOL_ONE', 'variable': 'part_sub'}), '(cardinality=sbol3.SBOL_ONE, variable=part_sub)\n', (18832, 18879), False, 'import sbol3\n'), ((20228, 20260), 'sbol3.SubComponent', 'sbol3.SubComponent', (['backbones[0]'], {}), '(backbones[0])\n', (20246, 20260), False, 'import sbol3\n'), ((20352, 20392), 'sbol3.LocalSubComponent', 'sbol3.LocalSubComponent', (['[sbol3.SBO_DNA]'], {}), '([sbol3.SBO_DNA])\n', (20375, 20392), False, 'import sbol3\n'), ((20511, 20583), 'sbol3.VariableFeature', 'sbol3.VariableFeature', ([], {'cardinality': 'sbol3.SBOL_ONE', 'variable': 'backbone_sub'}), '(cardinality=sbol3.SBOL_ONE, variable=backbone_sub)\n', (20532, 20583), False, 'import sbol3\n'), ((20729, 20787), 'sbol3.Constraint', 'sbol3.Constraint', (['sbol3.SBOL_MEETS', 'part_sub', 'backbone_sub'], {}), '(sbol3.SBOL_MEETS, part_sub, backbone_sub)\n', (20745, 20787), False, 'import sbol3\n'), ((20824, 20882), 'sbol3.Constraint', 'sbol3.Constraint', (['sbol3.SBOL_MEETS', 'backbone_sub', 'part_sub'], {}), '(sbol3.SBOL_MEETS, backbone_sub, part_sub)\n', (20840, 20882), False, 'import sbol3\n'), ((24241, 24260), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (24258, 24260), False, 'import logging\n'), ((7587, 7681), 'logging.warning', 'logging.warning', (['f"""Part "{name}" has source prefix specified but not ID: {source_prefix}"""'], {}), '(\n f\'Part "{name}" has source prefix specified but not ID: {source_prefix}\')\n', (7602, 7681), False, 'import logging\n'), ((11256, 11305), 'sbol3.Constraint', 'sbol3.Constraint', (['sbol3.SBOL_MEETS', 'last_sub', 'sub'], {}), '(sbol3.SBOL_MEETS, last_sub, sub)\n', (11272, 11305), False, 'import sbol3\n'), ((14281, 14344), 'sbol3.Constraint', 'sbol3.Constraint', (['sbol3.SBOL_MEETS', 'template_part_list[-1]', 'sub'], {}), '(sbol3.SBOL_MEETS, template_part_list[-1], sub)\n', (14297, 14344), False, 'import sbol3\n'), ((19156, 19264), 'logging.debug', 'logging.debug', (['f"""Embedding part "{composite_part.name}" in plasmid backbone "{backbone_or_locus}\\""""'], {}), '(\n f\'Embedding part "{composite_part.name}" in plasmid backbone "{backbone_or_locus}"\'\n )\n', (19169, 19264), False, 'import logging\n'), ((19281, 19334), 'sbol3.Component', 'sbol3.Component', (['display_id', 'sbol3.SBO_DNA'], {'name': 'name'}), '(display_id, sbol3.SBO_DNA, name=name)\n', (19296, 19334), False, 'import sbol3\n'), ((19400, 19434), 'sbol3.SubComponent', 'sbol3.SubComponent', (['composite_part'], {}), '(composite_part)\n', (19418, 19434), False, 'import sbol3\n'), ((19609, 19716), 'logging.debug', 'logging.debug', (['f"""Embedding part "{composite_part.name}" in plasmid library "{backbone_or_locus}\\""""'], {}), '(\n f\'Embedding part "{composite_part.name}" in plasmid library "{backbone_or_locus}"\'\n )\n', (19622, 19716), False, 'import logging\n'), ((19733, 19789), 'sbol3.Component', 'sbol3.Component', (['f"""{display_id}_template"""', 'sbol3.SBO_DNA'], {}), "(f'{display_id}_template', sbol3.SBO_DNA)\n", (19748, 19789), False, 'import sbol3\n'), ((19855, 19889), 'sbol3.SubComponent', 'sbol3.SubComponent', (['composite_part'], {}), '(composite_part)\n', (19873, 19889), False, 'import sbol3\n'), ((19969, 20030), 'sbol3.CombinatorialDerivation', 'sbol3.CombinatorialDerivation', (['display_id', 'plasmid'], {'name': 'name'}), '(display_id, plasmid, name=name)\n', (19998, 20030), False, 'import sbol3\n'), ((6161, 6204), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFKD"""', 'raw_sequence'], {}), "('NFKD', raw_sequence)\n", (6182, 6204), False, 'import unicodedata\n')] |
import copy
import json
import os
from collections import UserDict
from signalworks.tracking import Event, Partition, TimeValue, Value, Wave
class MultiTrack(UserDict):
"""
A dictionary containing time-synchronous tracks of equal duration and fs
"""
def __init__(self, mapping=None):
if mapping is None:
mapping = UserDict()
UserDict.__init__(self, mapping)
if __debug__: # long assert - TODO: do this on mapping, and then assign
self.check()
def check(self):
if len(self) > 1:
for i, (key, track) in enumerate(self.items()):
if track.fs != self.fs:
raise AssertionError(
f"all fs' must be equal, track #{i} ('{key}) does not match track #1"
)
if track.duration != next(iter(self.values())).duration:
raise AssertionError(
f"all durations must be equal, track #{i} ('{key}'') does not match track #1"
)
def get_fs(self):
if len(self):
return next(iter(self.values())).fs
else:
return 0 # or raise?
def set_fs(self, fs):
raise Exception("Cannot change fs, try resample()")
fs = property(get_fs, set_fs, doc="sampling frequency")
def get_duration(self):
if len(self):
if __debug__: # long assert - TODO: do this on mapping, and then assign
self.check()
return next(iter(self.values())).duration
else:
return 0
def set_duration(self, duration):
raise Exception("The duration cannot be set, it is derived from its conents")
duration = property(
get_duration, set_duration, doc="duration, as defined by its content"
)
def __eq__(self, other):
# excluding wav from comparison as long as wav writing/reading is erroneous
if (set(self.keys()) - {"wav"}) != (set(other.keys()) - {"wav"}):
return False
for k in self.keys():
if k != "wav" and self[k] != other[k]:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __setitem__(self, key, value):
if len(self):
if value.duration != self.duration:
raise AssertionError("duration does not match")
if value.fs != self.fs:
raise AssertionError("fs does not match")
UserDict.__setitem__(self, key, value)
def __str__(self):
s = ""
for key, track in self.items():
s += "%s: %s\n" % (key, track)
return s
def __add__(self, other):
if self is other:
other = copy.deepcopy(other)
obj = type(self)()
for k in self: # .iterkeys():
obj[k] = self[k] + other[k]
return obj
def resample(self, fs):
multiTrack = type(self)()
for key, track in self.items():
multiTrack[key] = track.resample(fs)
return multiTrack
def crossfade(self, other, length):
"""
append multiTrack to self, using a crossfade of a specified length in samples
"""
assert type(self) == type(other)
assert self.keys() == other.keys()
assert self.fs == other.fs
assert isinstance(length, int)
assert length > 0
assert other.duration >= length
assert self.duration >= length
multiTrack = type(self)()
for key, _ in self.items():
multiTrack[key] = self[key].crossfade(other[key], length)
return multiTrack
def select(self, a, b, keys=None):
assert a >= 0
assert a < b # or a <= b?
assert b <= self.duration
"""return a new multitrack object with all track views from time a to b"""
if keys is None:
keys = self.keys()
multiTrack = type(self)()
for key in keys:
multiTrack[key] = self[key].select(a, b)
return multiTrack
# TODO: should this be deprecated in favor of / should this call - the more general time_warp function?
def scale_duration(self, factor):
if factor != 1:
for t in self.values():
if isinstance(t, Partition):
t.time *= (
factor
) # last time parameter IS duration, so no worries about duration
elif isinstance(t, TimeValue) or isinstance(t, Event):
if factor > 1: # make room for expanded times
t.duration = int(t.duration * factor)
t.time *= factor
else:
t.time *= factor
t.duration = int(t.duration * factor)
else:
raise NotImplementedError # wave?
def time_warp(self, x, y):
"""in-place"""
for track in iter(self.values()):
track.time_warp(x, y)
default_suffix = ".mtt"
@classmethod
def read(cls, name):
"""Loads info about stored tracks from name, adding extension if missing,
and loads tracks by calling read(<name without extension>) for them.
"""
name_wo_ext = os.path.splitext(name)[
0
] # TODO: upgrade all path stuff to pathlib
if name == name_wo_ext:
name += cls.default_suffix
with open(name, "rb") as mtt_file:
track_infos = json.load(mtt_file)
self = cls()
for track_type_name, track_info_list in track_infos:
track_type = globals()[track_type_name]
track_info: UserDict = UserDict(track_info_list)
track = track_type.read(name_wo_ext, **track_info)
self[track_info["track_name"]] = track
return self
@classmethod
def read_edf(cls, path):
raise NotImplementedError
# TODO: adapt
# the following is copied from elsewhere and won't work as is
import pyedflib
with pyedflib.EdfReader(str(path)) as f:
labels = f.getSignalLabels()
for label in labels:
index = labels.index(label)
wav = Wave(f.readSignal(index), f.getSampleFrequency(index))
wav.label = label
wav.path = f.with_name(f.stem + "-" + label + ".wav")
wav.min = f.getPhysicalMinimum(index)
wav.max = f.getPhysicalMaximum(index)
wav.unit = f.getPhysicalDimension(index)
# self.add_view(wav, panel_index=panel_index, y_min=wav.min, y_max=wav.max)
@classmethod
def read_xdf(cls, path):
raise NotImplementedError
import openxdf
# TODO: below is a place holder and needs to be finalize
xdf = openxdf.OpenXDF(path)
signals = openxdf.Signal(xdf, path.with_suffix(".nkamp"))
# TODO: automate this, why are the xdf.header names different from signals.list_channels?
for label in ["ECG", "Chin"]:
# logger.info(f'reading {label} channel')
sig = signals.read_file(label)[label]
wav = Wave(sig.ravel(), 200)
wav.label = label
# wav.path = file.with_name(file.stem + '-' + label + '.wav')
wav.min = -3200
wav.max = 3200
wav.unit = "1"
# self.add_view(wav, panel_index=panel_index, y_min=wav.min, y_max=wav.max)
def write(self, name):
"""Saves info about stored tracks to name, adding extension if missing,
and calls write(<name without extension>) for the contained tracks.
Note!: not saving wav as long as wav writing/reading is erroneous
"""
name_wo_ext = os.path.splitext(name)[0]
if name == name_wo_ext:
name += self.default_suffix
track_infos = [] # list of dicts storing track info
for track_name, track in sorted(self.items()):
if track_name == "wav":
continue
track_info = {
"track_name": track_name,
"fs": int(track.get_fs()),
"duration": int(track.get_duration()),
}
if type(track) == Value:
track_info.update({"value_type": type(track.get_value()).__name__})
track.write(name_wo_ext, **track_info)
track_infos.append((type(track).__name__, sorted(track_info.items())))
with open(name, "wt") as mtt_file:
json.dump(track_infos, mtt_file)
| [
"openxdf.OpenXDF",
"collections.UserDict",
"os.path.splitext",
"json.load",
"collections.UserDict.__setitem__",
"copy.deepcopy",
"collections.UserDict.__init__",
"json.dump"
] | [((373, 405), 'collections.UserDict.__init__', 'UserDict.__init__', (['self', 'mapping'], {}), '(self, mapping)\n', (390, 405), False, 'from collections import UserDict\n'), ((2522, 2560), 'collections.UserDict.__setitem__', 'UserDict.__setitem__', (['self', 'key', 'value'], {}), '(self, key, value)\n', (2542, 2560), False, 'from collections import UserDict\n'), ((6901, 6922), 'openxdf.OpenXDF', 'openxdf.OpenXDF', (['path'], {}), '(path)\n', (6916, 6922), False, 'import openxdf\n'), ((354, 364), 'collections.UserDict', 'UserDict', ([], {}), '()\n', (362, 364), False, 'from collections import UserDict\n'), ((2777, 2797), 'copy.deepcopy', 'copy.deepcopy', (['other'], {}), '(other)\n', (2790, 2797), False, 'import copy\n'), ((5334, 5356), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (5350, 5356), False, 'import os\n'), ((5565, 5584), 'json.load', 'json.load', (['mtt_file'], {}), '(mtt_file)\n', (5574, 5584), False, 'import json\n'), ((5754, 5779), 'collections.UserDict', 'UserDict', (['track_info_list'], {}), '(track_info_list)\n', (5762, 5779), False, 'from collections import UserDict\n'), ((7836, 7858), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (7852, 7858), False, 'import os\n'), ((8602, 8634), 'json.dump', 'json.dump', (['track_infos', 'mtt_file'], {}), '(track_infos, mtt_file)\n', (8611, 8634), False, 'import json\n')] |
import cv2
import matplotlib.pyplot as plt
import glob
import os
filepath ="afm_dataset4/20211126/"
files = [line.rstrip() for line in open((filepath+"sep_trainlist.txt"))]
files = glob.glob("orig_img/20211112/*")
def variance_of_laplacian(image):
# compute the Laplacian of the image and then return the focus
# measure, which is simply the variance of the Laplacian
return cv2.Laplacian(image, cv2.CV_64F).var()
gt_fm = []
input_fm = []
for i, file in enumerate(files):
image = cv2.imread(file)
# image = cv2.imread(filepath + file)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fm = variance_of_laplacian(image)
input_fm.append(fm)
# file_gt = "/".join(file.split("/")[:-1] + ["gt.png"])
# image = cv2.imread(filepath + file_gt)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# fm = variance_of_laplacian(image)
# gt_fm.append(fm)
# if (i+1)%25==0:
if fm < 500:
text = "Blurry"
elif fm>2000:
text = "Noisy"
else:
text = "Not blurry"
# show the image
os.makedirs("blur/"+file[:-9], exist_ok=True)
cv2.putText(image, "{}: {:.2f}".format(text, fm), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 1)
cv2.imwrite("blur/"+file, image)
# fig = plt.figure()
# plt.imshow(image)
# fig.savefig("blur/"+file)
print("iter", i)
# print("gt:", sum(gt_fm)/len(gt_fm))
print("input:", sum(input_fm)/len(input_fm))
fig = plt.figure()
plt.scatter(list(range(len(input_fm))), input_fm)
# plt.scatter(list(range(len(gt_fm))), gt_fm)
fig.savefig("img_1126.png")
# print("gt:", sum(gt_fm)/len(gt_fm))
# print("input:", sum(input_fm)/len(input_fm))
# print(len(gt_fm)) | [
"cv2.imwrite",
"cv2.Laplacian",
"os.makedirs",
"matplotlib.pyplot.figure",
"cv2.cvtColor",
"cv2.imread",
"glob.glob"
] | [((184, 216), 'glob.glob', 'glob.glob', (['"""orig_img/20211112/*"""'], {}), "('orig_img/20211112/*')\n", (193, 216), False, 'import glob\n'), ((1434, 1446), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1444, 1446), True, 'import matplotlib.pyplot as plt\n'), ((492, 508), 'cv2.imread', 'cv2.imread', (['file'], {}), '(file)\n', (502, 508), False, 'import cv2\n'), ((563, 602), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (575, 602), False, 'import cv2\n'), ((1052, 1099), 'os.makedirs', 'os.makedirs', (["('blur/' + file[:-9])"], {'exist_ok': '(True)'}), "('blur/' + file[:-9], exist_ok=True)\n", (1063, 1099), False, 'import os\n'), ((1213, 1247), 'cv2.imwrite', 'cv2.imwrite', (["('blur/' + file)", 'image'], {}), "('blur/' + file, image)\n", (1224, 1247), False, 'import cv2\n'), ((381, 413), 'cv2.Laplacian', 'cv2.Laplacian', (['image', 'cv2.CV_64F'], {}), '(image, cv2.CV_64F)\n', (394, 413), False, 'import cv2\n')] |
import os
def check_path(path):
if not path or not path.strip() or os.path.exists(path):
return
os.makedirs(path)
pass | [
"os.path.exists",
"os.makedirs"
] | [((113, 130), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (124, 130), False, 'import os\n'), ((72, 92), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (86, 92), False, 'import os\n')] |
# Author: <NAME>, <NAME>, <NAME>
# Date: 2020/11/27
"""Compare the performance of different classifier and train the best model given cross_validate results .
Usage: src/clf_comparison.py <input_file> <input_file1> <output_file> <output_file1>
Options:
<input_file> Path (including filename and file extension) to transformed train file
<input_file1> Path (including filename and file extension) to transformed test file
<output_file> Path (including filename and file extension) to cross validate result file
<output_file1> Path (including filename and file extension) to store untuned model predictions
"""
#import packages
from docopt import docopt
import pandas as pd
import sys
import os
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import (
cross_validate,
GridSearchCV,
RandomizedSearchCV
)
from joblib import dump, load
from sklearn.metrics import f1_score, make_scorer
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
opt = docopt(__doc__)
def main(input_file, input_file1, output_file, output_file1):
# read train_df.csv
train = pd.read_csv(input_file)
test = pd.read_csv(input_file1)
# create split the train_df
X_train, y_train = train.drop(columns=["quality_level"]), train["quality_level"]
X_test, y_test = test.drop(columns=["quality_level"]), test["quality_level"]
# check if target folder exists
try:
os.makedirs(os.path.dirname(output_file))
except FileExistsError:
pass
# define classifiers
classifiers = {
"Logistic_Regression": LogisticRegression(random_state = 123, class_weight = 'balanced'),
"Random_Forest": RandomForestClassifier(random_state = 123, class_weight = 'balanced'),
"DummyClassifier": DummyClassifier(random_state = 123),
"SVC" : SVC(random_state = 123, class_weight = 'balanced'),
"K_Nearest_Neighbors": KNeighborsClassifier()
}
f1 = make_scorer(f1_score, average = 'weighted', labels = ['Excellent'])
def score_with_metrics(models, scoring=f1):
"""
Return cross-validation scores for given models as a dataframe.
Parameters
----------
models : dict
a dictionary with names and scikit-learn models
scoring : list/dict/string
scoring parameter values for cross-validation
Returns
----------
None
"""
results_df = {}
for (name, model) in models.items():
clf = model
scores = cross_validate(
clf, X_train, y_train, return_train_score=True, scoring=scoring
)
df = pd.DataFrame(scores)
results_df[name] = df.mean()
clf.fit(X_train, y_train)
# save the model
dump(clf, 'results/'+name+'.joblib')
return pd.DataFrame(results_df)
res = score_with_metrics(classifiers)
res = res.transpose()
best_model = res.idxmax()['test_score']
best_clf = classifiers[best_model]
best_clf.fit(X_train, y_train)
pred = best_clf.predict(X_test)
test_scores = f1_score(y_test, pred, average = 'weighted', labels = ['Excellent'])
best_score = pd.DataFrame({'Model': [best_model], 'Test_Score':[test_scores]})
res.to_csv(output_file, index = True)
best_score.to_csv(output_file1, index = False)
# perform hyperparameter tuning on two of the best models
param_RF = {'n_estimators':[int(i) for i in np.linspace(start = 100, stop = 1000, num = 10).tolist()],
'max_depth':[int(i) for i in np.linspace(start = 10, stop = 1000, num = 100).tolist()]}
param_log = {
"C": [0.0001, 0.001, 0.01, 0.1, 1.0, 10, 100, 1000]}
rf_search = RandomizedSearchCV(classifiers['Random_Forest'],
param_RF, cv = 5,
n_jobs = -1,
scoring = f1,
n_iter = 20, random_state = 123)
log_search = GridSearchCV(classifiers['Logistic_Regression'],
param_log, cv = 5,
n_jobs = -1,
scoring = f1
)
rf_search.fit(X_train, y_train)
log_search.fit(X_train, y_train)
rf_best = rf_search.best_estimator_
log_best = log_search.best_estimator_
tuned_results = {}
rf_score = cross_validate(rf_best, X_train, y_train, return_train_score=True, scoring=f1)
log_score = cross_validate(log_best, X_train, y_train, return_train_score=True, scoring=f1)
tuned_results['Random Forest'] = pd.DataFrame(rf_score).mean()
tuned_results['Logistic Regression'] = pd.DataFrame(log_score).mean()
tuned_results = pd.DataFrame(tuned_results).transpose()
tuned_results.to_csv('results/tuned_cv_results.csv', index = True)
rf_best.fit(X_train, y_train)
dump(rf_best, 'results/Bestrfmodel.joblib')
pred = rf_best.predict(X_test)
best_f1 = f1_score(y_test, pred, average = 'weighted', labels = ['Excellent'])
best_tuned_model_test = pd.DataFrame({'Model': ['Random Forest'], 'Test_Score':[best_f1]})
best_tuned_model_test.to_csv('results/best_tuned_model.csv', index = False)
if __name__ == "__main__":
main(opt["<input_file>"], opt["<input_file1>"], opt["<output_file>"], opt["<output_file1>"])
| [
"sklearn.model_selection.GridSearchCV",
"sklearn.svm.SVC",
"sklearn.metrics.f1_score",
"pandas.read_csv",
"pandas.DataFrame",
"sklearn.model_selection.cross_validate",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.metrics.make_scorer",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.linea... | [((1218, 1233), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (1224, 1233), False, 'from docopt import docopt\n'), ((1179, 1210), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1200, 1210), False, 'import warnings\n'), ((1341, 1364), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {}), '(input_file)\n', (1352, 1364), True, 'import pandas as pd\n'), ((1376, 1400), 'pandas.read_csv', 'pd.read_csv', (['input_file1'], {}), '(input_file1)\n', (1387, 1400), True, 'import pandas as pd\n'), ((2169, 2232), 'sklearn.metrics.make_scorer', 'make_scorer', (['f1_score'], {'average': '"""weighted"""', 'labels': "['Excellent']"}), "(f1_score, average='weighted', labels=['Excellent'])\n", (2180, 2232), False, 'from sklearn.metrics import f1_score, make_scorer\n'), ((3352, 3416), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'pred'], {'average': '"""weighted"""', 'labels': "['Excellent']"}), "(y_test, pred, average='weighted', labels=['Excellent'])\n", (3360, 3416), False, 'from sklearn.metrics import f1_score, make_scorer\n'), ((3438, 3504), 'pandas.DataFrame', 'pd.DataFrame', (["{'Model': [best_model], 'Test_Score': [test_scores]}"], {}), "({'Model': [best_model], 'Test_Score': [test_scores]})\n", (3450, 3504), True, 'import pandas as pd\n'), ((3966, 4086), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (["classifiers['Random_Forest']", 'param_RF'], {'cv': '(5)', 'n_jobs': '(-1)', 'scoring': 'f1', 'n_iter': '(20)', 'random_state': '(123)'}), "(classifiers['Random_Forest'], param_RF, cv=5, n_jobs=-1,\n scoring=f1, n_iter=20, random_state=123)\n", (3984, 4086), False, 'from sklearn.model_selection import cross_validate, GridSearchCV, RandomizedSearchCV\n'), ((4271, 4363), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (["classifiers['Logistic_Regression']", 'param_log'], {'cv': '(5)', 'n_jobs': '(-1)', 'scoring': 'f1'}), "(classifiers['Logistic_Regression'], param_log, cv=5, n_jobs=-1,\n scoring=f1)\n", (4283, 4363), False, 'from sklearn.model_selection import cross_validate, GridSearchCV, RandomizedSearchCV\n'), ((4716, 4794), 'sklearn.model_selection.cross_validate', 'cross_validate', (['rf_best', 'X_train', 'y_train'], {'return_train_score': '(True)', 'scoring': 'f1'}), '(rf_best, X_train, y_train, return_train_score=True, scoring=f1)\n', (4730, 4794), False, 'from sklearn.model_selection import cross_validate, GridSearchCV, RandomizedSearchCV\n'), ((4811, 4890), 'sklearn.model_selection.cross_validate', 'cross_validate', (['log_best', 'X_train', 'y_train'], {'return_train_score': '(True)', 'scoring': 'f1'}), '(log_best, X_train, y_train, return_train_score=True, scoring=f1)\n', (4825, 4890), False, 'from sklearn.model_selection import cross_validate, GridSearchCV, RandomizedSearchCV\n'), ((5201, 5244), 'joblib.dump', 'dump', (['rf_best', '"""results/Bestrfmodel.joblib"""'], {}), "(rf_best, 'results/Bestrfmodel.joblib')\n", (5205, 5244), False, 'from joblib import dump, load\n'), ((5294, 5358), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'pred'], {'average': '"""weighted"""', 'labels': "['Excellent']"}), "(y_test, pred, average='weighted', labels=['Excellent'])\n", (5302, 5358), False, 'from sklearn.metrics import f1_score, make_scorer\n'), ((5391, 5458), 'pandas.DataFrame', 'pd.DataFrame', (["{'Model': ['Random Forest'], 'Test_Score': [best_f1]}"], {}), "({'Model': ['Random Forest'], 'Test_Score': [best_f1]})\n", (5403, 5458), True, 'import pandas as pd\n'), ((1817, 1878), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(123)', 'class_weight': '"""balanced"""'}), "(random_state=123, class_weight='balanced')\n", (1835, 1878), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1909, 1974), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(123)', 'class_weight': '"""balanced"""'}), "(random_state=123, class_weight='balanced')\n", (1931, 1974), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2003, 2036), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'random_state': '(123)'}), '(random_state=123)\n', (2018, 2036), False, 'from sklearn.dummy import DummyClassifier\n'), ((2052, 2098), 'sklearn.svm.SVC', 'SVC', ([], {'random_state': '(123)', 'class_weight': '"""balanced"""'}), "(random_state=123, class_weight='balanced')\n", (2055, 2098), False, 'from sklearn.svm import SVC\n'), ((2131, 2153), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (2151, 2153), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((3084, 3108), 'pandas.DataFrame', 'pd.DataFrame', (['results_df'], {}), '(results_df)\n', (3096, 3108), True, 'import pandas as pd\n'), ((1670, 1698), 'os.path.dirname', 'os.path.dirname', (['output_file'], {}), '(output_file)\n', (1685, 1698), False, 'import os\n'), ((2764, 2843), 'sklearn.model_selection.cross_validate', 'cross_validate', (['clf', 'X_train', 'y_train'], {'return_train_score': '(True)', 'scoring': 'scoring'}), '(clf, X_train, y_train, return_train_score=True, scoring=scoring)\n', (2778, 2843), False, 'from sklearn.model_selection import cross_validate, GridSearchCV, RandomizedSearchCV\n'), ((2891, 2911), 'pandas.DataFrame', 'pd.DataFrame', (['scores'], {}), '(scores)\n', (2903, 2911), True, 'import pandas as pd\n'), ((3032, 3072), 'joblib.dump', 'dump', (['clf', "('results/' + name + '.joblib')"], {}), "(clf, 'results/' + name + '.joblib')\n", (3036, 3072), False, 'from joblib import dump, load\n'), ((4928, 4950), 'pandas.DataFrame', 'pd.DataFrame', (['rf_score'], {}), '(rf_score)\n', (4940, 4950), True, 'import pandas as pd\n'), ((5001, 5024), 'pandas.DataFrame', 'pd.DataFrame', (['log_score'], {}), '(log_score)\n', (5013, 5024), True, 'import pandas as pd\n'), ((5052, 5079), 'pandas.DataFrame', 'pd.DataFrame', (['tuned_results'], {}), '(tuned_results)\n', (5064, 5079), True, 'import pandas as pd\n'), ((3712, 3753), 'numpy.linspace', 'np.linspace', ([], {'start': '(100)', 'stop': '(1000)', 'num': '(10)'}), '(start=100, stop=1000, num=10)\n', (3723, 3753), True, 'import numpy as np\n'), ((3811, 3852), 'numpy.linspace', 'np.linspace', ([], {'start': '(10)', 'stop': '(1000)', 'num': '(100)'}), '(start=10, stop=1000, num=100)\n', (3822, 3852), True, 'import numpy as np\n')] |
import glob
import os
import numpy as np
import nibabel as nb
import argparse
def get_dir_list(train_path):
fnames = glob.glob(train_path)
list_train = []
for k, f in enumerate(fnames):
list_train.append(os.path.split(f)[0])
return list_train
def ParseData(list_data):
'''
Creates a list of all the slices
'''
data_instance = []
for dir_name in list_data:
fname = glob.glob(os.path.join(dir_name, '*seg.nii.gz'))
f = nb.load(fname[0])
img = f.get_fdata().astype('float32')
h, w, d = f.shape # sag, cor, ax
for slc in range(h):
if np.sum(img[slc, :, :]) != 0:
data_instance.append([dir_name, 'sag', slc])
for slc in range(w):
if np.sum(img[:, slc, :]) != 0:
data_instance.append([dir_name, 'cor', slc])
for slc in range(d):
if np.sum(img[:, :, slc]) != 0:
data_instance.append([dir_name, 'ax', slc])
print('Number of images: ', len(data_instance))
return data_instance
def get_slice(dir_name, orient, slc, cont, isNorm=True):
'''
takes the directory name, orientation, slice number and reads a slice, zero pad/crop and normalize
'''
# ---- get slice for given contrast image ---- #
fname = glob.glob(os.path.join(dir_name, cont))
f = nb.load(fname[0])
img = np.squeeze(f.get_fdata()).astype('float32')
if orient == 'sag':
x = img[slc, :, :]
elif orient == 'cor':
x = img[:, slc, :]
else:
x = img[:, :, slc]
return np.expand_dims(x, 0)
def get_batchsize_one(dir_name, orient, slc):
'''
takes index and generates one sample of input data
'''
# ---- get images ---- #
x_t1 = get_slice(dir_name, orient, slc, '*flair.nii.gz')
x_t2 = get_slice(dir_name, orient, slc, '*t1.nii.gz')
x_t1ce = get_slice(dir_name, orient, slc, '*t2.nii.gz')
x_flair = get_slice(dir_name, orient, slc, '*t1ce.nii.gz')
x_seg = get_slice(dir_name, orient, slc, '*seg.nii.gz', isNorm=False).astype('int')
x_seg[x_seg==4] = 3
x_inp = np.concatenate((x_t1, x_t2, x_t1ce, x_flair, x_seg), 0)
# (flair, t1, t2, t1ce)
return x_inp
def generate_data(src_path, dst_path):
data_instance = ParseData(get_dir_list(src_path))
for k, data in enumerate(data_instance):
print(k, ' of ', len(data_instance))
dir_name, orient, slc = data[0], data[1], data[2]
x_inp = get_batchsize_one(dir_name, orient, slc)
fname = os.path.join(dst_path, str(k)+'.npy')
np.save(fname, x_inp)
# ---- Arguments ---- #
ap = argparse.ArgumentParser()
ap.add_argument("-sp", "--src_path", type=str, default='./data/nifti/train/*/*seg.nii.gz')
ap.add_argument("-dp", "--dst_path", type=str, default='./data/np/train/')
args = vars(ap.parse_args())
if __name__ == '__main__':
'''
Script to convert nifti images to numpy array for faster loading
'''
src_path = args['src_path']
dst_path = args['dst_path']
generate_data(src_path, dst_path)
| [
"argparse.ArgumentParser",
"nibabel.load",
"os.path.join",
"os.path.split",
"numpy.sum",
"numpy.concatenate",
"numpy.expand_dims",
"numpy.save",
"glob.glob"
] | [((2640, 2665), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2663, 2665), False, 'import argparse\n'), ((122, 143), 'glob.glob', 'glob.glob', (['train_path'], {}), '(train_path)\n', (131, 143), False, 'import glob\n'), ((1359, 1376), 'nibabel.load', 'nb.load', (['fname[0]'], {}), '(fname[0])\n', (1366, 1376), True, 'import nibabel as nb\n'), ((1583, 1603), 'numpy.expand_dims', 'np.expand_dims', (['x', '(0)'], {}), '(x, 0)\n', (1597, 1603), True, 'import numpy as np\n'), ((2118, 2173), 'numpy.concatenate', 'np.concatenate', (['(x_t1, x_t2, x_t1ce, x_flair, x_seg)', '(0)'], {}), '((x_t1, x_t2, x_t1ce, x_flair, x_seg), 0)\n', (2132, 2173), True, 'import numpy as np\n'), ((484, 501), 'nibabel.load', 'nb.load', (['fname[0]'], {}), '(fname[0])\n', (491, 501), True, 'import nibabel as nb\n'), ((1321, 1349), 'os.path.join', 'os.path.join', (['dir_name', 'cont'], {}), '(dir_name, cont)\n', (1333, 1349), False, 'import os\n'), ((2588, 2609), 'numpy.save', 'np.save', (['fname', 'x_inp'], {}), '(fname, x_inp)\n', (2595, 2609), True, 'import numpy as np\n'), ((433, 470), 'os.path.join', 'os.path.join', (['dir_name', '"""*seg.nii.gz"""'], {}), "(dir_name, '*seg.nii.gz')\n", (445, 470), False, 'import os\n'), ((229, 245), 'os.path.split', 'os.path.split', (['f'], {}), '(f)\n', (242, 245), False, 'import os\n'), ((633, 655), 'numpy.sum', 'np.sum', (['img[slc, :, :]'], {}), '(img[slc, :, :])\n', (639, 655), True, 'import numpy as np\n'), ((767, 789), 'numpy.sum', 'np.sum', (['img[:, slc, :]'], {}), '(img[:, slc, :])\n', (773, 789), True, 'import numpy as np\n'), ((901, 923), 'numpy.sum', 'np.sum', (['img[:, :, slc]'], {}), '(img[:, :, slc])\n', (907, 923), True, 'import numpy as np\n')] |
#moduleForShowingJudges
#cmd /K "$(FULL_CURRENT_PATH)"
#cd ~/Documents/GitHub/Keyboard-Biometric-Project/Project_Tuples
#sudo python -m pip install statistics
#python analyzeData.py
"""
Author: <NAME> and <NAME>
Date: 3/09/2018
Program Description: This code can record the
Press Time and Flight Time of a tuple as a user
types a passage and it saves a matrix to a file.
"""
__version__ = '1.0'
__author__ = '<NAME>'
"""STANDARD LIBRARY IMPORTS"""
import json
import platform
import os
"""LOCAL LIBRARY IMPORTS"""
import moduleForSavingTimelines as ST
import moduleForRecordingWithGUI as GUI
import moduleForCreatingPasswordSentence as PS
import moduleForDeconstructingTimelines as DT
import moduleForAuthenticatingUsers as AU
import moduleForFindingTuples as FT
import moduleForGettingSentence as GS
import moduleForPlotting as P
"""FOLDER IMPORTS"""
infile = "data/451.txt"# passage for training people.
#tupleList = FT.allPeople()
tupleList = ["his", "the","ing"]
location = ""
if(platform.system() == "Windows"):#WINDOWS
name = input("What is your name: ")
while(not(location in ["y","n","z","c"])):
location = input("Is this training data?(y/n) ")
if(location == "n"):
location = "Applying/"
passage = ("The thing likes learning his history.There the thing sings.This is what the thing sings.").split(".")
elif(location == "z"):
os.chdir("judgeslib")
P.plot(tupleList)
elif(location == "c"):
os.chdir("judgeslib")
DT.clearAll()
else:
location = "Database/"
passages = open(infile,"r").read().split(".")
passage2 = passages[1].split(",")
passage = passages + passage2
passage.remove(passages[1])
"""TYPE THE PASSAGE AND RECORD THE TIME LINE"""
pressTimeLine,pressCharTimeLine,releaseTimeLine,releaseCharTimeLine = GUI.start_recording(passage)
os.chdir("judgeslib/")
ST.saveTimeLine(pressTimeLine,pressCharTimeLine,name,location)
DT.userSummary(name,location)
if(location == "Applying/"):
#AU.newData(tupleList)
print("Now to verify")
AU.verify(tupleList,name)
#IMPLIMENT MATPLOTLIB
#IMPLIMENT CLEAR FEATURE
| [
"moduleForDeconstructingTimelines.userSummary",
"moduleForDeconstructingTimelines.clearAll",
"moduleForAuthenticatingUsers.verify",
"moduleForRecordingWithGUI.start_recording",
"os.chdir",
"platform.system",
"moduleForSavingTimelines.saveTimeLine",
"moduleForPlotting.plot"
] | [((1745, 1773), 'moduleForRecordingWithGUI.start_recording', 'GUI.start_recording', (['passage'], {}), '(passage)\n', (1764, 1773), True, 'import moduleForRecordingWithGUI as GUI\n'), ((1774, 1796), 'os.chdir', 'os.chdir', (['"""judgeslib/"""'], {}), "('judgeslib/')\n", (1782, 1796), False, 'import os\n'), ((1797, 1862), 'moduleForSavingTimelines.saveTimeLine', 'ST.saveTimeLine', (['pressTimeLine', 'pressCharTimeLine', 'name', 'location'], {}), '(pressTimeLine, pressCharTimeLine, name, location)\n', (1812, 1862), True, 'import moduleForSavingTimelines as ST\n'), ((1860, 1890), 'moduleForDeconstructingTimelines.userSummary', 'DT.userSummary', (['name', 'location'], {}), '(name, location)\n', (1874, 1890), True, 'import moduleForDeconstructingTimelines as DT\n'), ((992, 1009), 'platform.system', 'platform.system', ([], {}), '()\n', (1007, 1009), False, 'import platform\n'), ((1969, 1995), 'moduleForAuthenticatingUsers.verify', 'AU.verify', (['tupleList', 'name'], {}), '(tupleList, name)\n', (1978, 1995), True, 'import moduleForAuthenticatingUsers as AU\n'), ((1351, 1372), 'os.chdir', 'os.chdir', (['"""judgeslib"""'], {}), "('judgeslib')\n", (1359, 1372), False, 'import os\n'), ((1374, 1391), 'moduleForPlotting.plot', 'P.plot', (['tupleList'], {}), '(tupleList)\n', (1380, 1391), True, 'import moduleForPlotting as P\n'), ((1416, 1437), 'os.chdir', 'os.chdir', (['"""judgeslib"""'], {}), "('judgeslib')\n", (1424, 1437), False, 'import os\n'), ((1439, 1452), 'moduleForDeconstructingTimelines.clearAll', 'DT.clearAll', ([], {}), '()\n', (1450, 1452), True, 'import moduleForDeconstructingTimelines as DT\n')] |
import unittest
def get_formatted_name(first, last, middle = ""):
"""生成整洁的姓名"""
if middle:
full_name = f"{first} {middle} {last}"
else:
full_name = f"{first} {last}"
return full_name.title()
class NamesTestCase(unittest.TestCase): #创建一个测试类,继承于unittest.TestCase 这样才能Python自动测试
"""测试get_formatted_name函数"""
def test_first_last_name(self): # 具体的测试方法 运行这个测试案例时,所有以test开头的方法都会被自动执行
"""能够正确的处理像<NAME>这样的姓名吗"""
formatted_name = get_formatted_name("jains", "jpolin") # 测试方法的具体实现
self.assertEqual(formatted_name, "<NAME>") # 断言,执行结果是否个期望的结果一致
def test_first_last_middle_name(self): # test开头
"""能够正确的处理像<NAME>这样的姓名吗"""
formatted_name = get_formatted_name("wolfgang", "mozart", "amadeus")
self.assertEqual(formatted_name, "<NAME>")
if __name__ == "__main__": # __name__是一个程序执行时的特殊变量,如果作为主程序执行时,这个值就是__main__
unittest.main() # 运行测试案例 | [
"unittest.main"
] | [((903, 918), 'unittest.main', 'unittest.main', ([], {}), '()\n', (916, 918), False, 'import unittest\n')] |
from __future__ import unicode_literals
from django.db import models
from hospital.models import Hospital
# Create your models here.
class Donor(models.Model):
name = models.CharField(max_length = 200)
username = models.CharField(max_length = 200)
password = models.CharField(max_length = 200)
gender = models.CharField(max_length = 1)
blood_type = models.CharField(max_length=3)
linking_agent = models.ForeignKey(Hospital, on_delete = models.CASCADE)
DOB = models.DateField()
address = models.CharField(max_length = 200)
phone = models.CharField(max_length = 200)
last_verified = models.DateField()
latitude = models.DecimalField(decimal_places = 2, max_digits = 5)
longitude = models.DecimalField(decimal_places = 2, max_digits = 5)
| [
"django.db.models.DecimalField",
"django.db.models.DateField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((173, 205), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (189, 205), False, 'from django.db import models\n'), ((223, 255), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (239, 255), False, 'from django.db import models\n'), ((273, 305), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (289, 305), False, 'from django.db import models\n'), ((321, 351), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)'}), '(max_length=1)\n', (337, 351), False, 'from django.db import models\n'), ((371, 401), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)'}), '(max_length=3)\n', (387, 401), False, 'from django.db import models\n'), ((422, 475), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Hospital'], {'on_delete': 'models.CASCADE'}), '(Hospital, on_delete=models.CASCADE)\n', (439, 475), False, 'from django.db import models\n'), ((488, 506), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (504, 506), False, 'from django.db import models\n'), ((521, 553), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (537, 553), False, 'from django.db import models\n'), ((568, 600), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (584, 600), False, 'from django.db import models\n'), ((623, 641), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (639, 641), False, 'from django.db import models\n'), ((657, 708), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(5)'}), '(decimal_places=2, max_digits=5)\n', (676, 708), False, 'from django.db import models\n'), ((729, 780), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(5)'}), '(decimal_places=2, max_digits=5)\n', (748, 780), False, 'from django.db import models\n')] |
from unittest import TestCase
from lf3py.test.helper import data_provider
from tests.helper.example.flowapi import perform_api
class TestHandler(TestCase):
@data_provider([
(
{
'path': '/models',
'httpMethod': 'GET',
'headers': {},
'queryStringParameters': {},
},
{
'statusCode': 200,
'headers': {'Content-Type': 'application/json'},
'body': {
'models': [
{'id': 1234},
],
},
},
),
])
def test_index(self, event: dict, expected: dict):
self.assertEqual(perform_api(event), expected)
| [
"lf3py.test.helper.data_provider",
"tests.helper.example.flowapi.perform_api"
] | [((165, 383), 'lf3py.test.helper.data_provider', 'data_provider', (["[({'path': '/models', 'httpMethod': 'GET', 'headers': {},\n 'queryStringParameters': {}}, {'statusCode': 200, 'headers': {\n 'Content-Type': 'application/json'}, 'body': {'models': [{'id': 1234}]}})]"], {}), "([({'path': '/models', 'httpMethod': 'GET', 'headers': {},\n 'queryStringParameters': {}}, {'statusCode': 200, 'headers': {\n 'Content-Type': 'application/json'}, 'body': {'models': [{'id': 1234}]}})])\n", (178, 383), False, 'from lf3py.test.helper import data_provider\n'), ((733, 751), 'tests.helper.example.flowapi.perform_api', 'perform_api', (['event'], {}), '(event)\n', (744, 751), False, 'from tests.helper.example.flowapi import perform_api\n')] |
from anndata import AnnData
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from joblib import delayed
from tqdm import tqdm
import sys
import igraph
from .utils import ProgressParallel
from .. import logging as logg
from .. import settings
def pseudotime(adata: AnnData, n_jobs: int = 1, n_map: int = 1, copy: bool = False):
"""\
Compute pseudotime.
Projects cells onto the tree, and uses distance from the root as a pseudotime value.
Parameters
----------
adata
Annotated data matrix.
n_jobs
Number of cpu processes to use in case of performing multiple mapping.
n_map
number of probabilistic mapping of cells onto the tree to use. If n_map=1 then likelihood cell mapping is used.
copy
Return a copy instead of writing to adata.
Returns
-------
adata : anndata.AnnData
if `copy=True` it returns or else add fields to `adata`:
`.obs['edge']`
assigned edge.
`.obs['t']`
assigned pseudotime value.
`.obs['seg']`
assigned segment of the tree.
`.obs['milestone']`
assigned region surrounding forks and tips.
`.uns['pseudotime_list']`
list of cell projection from all mappings.
"""
if "root" not in adata.uns["graph"]:
raise ValueError(
"You need to run `tl.root` or `tl.roots` before projecting cells."
)
adata = adata.copy() if copy else adata
graph = adata.uns["graph"]
reassign, recolor = False, False
if "milestones" in adata.obs:
if adata.obs.milestones.dtype.name == "category":
tmp_mil = adata.obs.milestones.cat.categories.copy()
reassign = True
if "milestones_colors" in adata.uns:
tmp_mil_col = adata.uns["milestones_colors"].copy()
recolor = True
logg.info("projecting cells onto the principal graph", reset=True)
if n_map == 1:
df_l = [map_cells(graph, multi=False)]
else:
df_l = ProgressParallel(
n_jobs=n_jobs, total=n_map, file=sys.stdout, desc=" mappings"
)(delayed(map_cells)(graph=graph, multi=True) for m in range(n_map))
# formatting cell projection data
df_summary = df_l[0]
df_summary["seg"] = df_summary["seg"].astype("category")
df_summary["edge"] = df_summary["edge"].astype("category")
# remove pre-existing palette to avoid errors with plotting
if "seg_colors" in adata.uns:
del adata.uns["seg_colors"]
if set(df_summary.columns.tolist()).issubset(adata.obs.columns):
adata.obs[df_summary.columns] = df_summary
else:
adata.obs = pd.concat([adata.obs, df_summary], axis=1)
# list(map(lambda x: x.column))
# todict=list(map(lambda x: dict(zip(["cells"]+["_"+s for s in x.columns.tolist()],
# [x.index.tolist()]+x.to_numpy().T.tolist())),df_l))
names = np.arange(len(df_l)).astype(str).tolist()
# vals = todict
dictionary = dict(zip(names, df_l))
adata.uns["pseudotime_list"] = dictionary
if n_map > 1:
adata.obs["t_sd"] = (
pd.concat(
list(
map(
lambda x: pd.Series(x["t"]),
list(adata.uns["pseudotime_list"].values()),
)
),
axis=1,
)
.apply(np.std, axis=1)
.values
)
milestones = pd.Series(index=adata.obs_names)
for seg in graph["pp_seg"].n:
cell_seg = adata.obs.loc[adata.obs["seg"] == seg, "t"]
if len(cell_seg) > 0:
milestones[
cell_seg.index[
(cell_seg - min(cell_seg) - (max(cell_seg - min(cell_seg)) / 2) < 0)
]
] = graph["pp_seg"].loc[int(seg), "from"]
milestones[
cell_seg.index[
(cell_seg - min(cell_seg) - (max(cell_seg - min(cell_seg)) / 2) > 0)
]
] = graph["pp_seg"].loc[int(seg), "to"]
adata.obs["milestones"] = milestones
adata.obs.milestones = (
adata.obs.milestones.astype(int).astype("str").astype("category")
)
adata.uns["graph"]["milestones"] = dict(
zip(
adata.obs.milestones.cat.categories,
adata.obs.milestones.cat.categories.astype(int),
)
)
while reassign:
if "tmp_mil_col" not in locals():
break
if len(tmp_mil_col) != len(adata.obs.milestones.cat.categories):
break
rename_milestones(adata, tmp_mil)
if recolor:
adata.uns["milestones_colors"] = tmp_mil_col
reassign = False
logg.info(" finished", time=True, end=" " if settings.verbosity > 2 else "\n")
logg.hint(
"added\n"
" .obs['edge'] assigned edge.\n"
" .obs['t'] pseudotime value.\n"
" .obs['seg'] segment of the tree assigned.\n"
" .obs['milestones'] milestone assigned.\n"
" .uns['pseudotime_list'] list of cell projection from all mappings."
)
return adata if copy else None
def map_cells(graph, multi=False):
import igraph
g = igraph.Graph.Adjacency((graph["B"] > 0).tolist(), mode="undirected")
# Add edge weights and node labels.
g.es["weight"] = graph["B"][graph["B"].nonzero()]
if multi:
rrm = (
np.apply_along_axis(
lambda x: np.random.choice(np.arange(len(x)), size=1, p=x),
axis=1,
arr=graph["R"],
)
).T.flatten()
else:
rrm = np.apply_along_axis(np.argmax, axis=1, arr=graph["R"])
def map_on_edges(v):
vcells = np.argwhere(rrm == v)
if vcells.shape[0] > 0:
nv = np.array(g.neighborhood(v, order=1))
nvd = np.array(g.shortest_paths(v, nv)[0])
spi = np.apply_along_axis(np.argmax, axis=1, arr=graph["R"][vcells, nv[1:]])
ndf = pd.DataFrame(
{
"cell": vcells.flatten(),
"v0": v,
"v1": nv[1:][spi],
"d": nvd[1:][spi],
}
)
p0 = graph["R"][vcells, v].flatten()
p1 = np.array(
list(
map(lambda x: graph["R"][vcells[x], ndf.v1[x]], range(len(vcells)))
)
).flatten()
alpha = np.random.uniform(size=len(vcells))
f = np.abs(
(np.sqrt(alpha * p1 ** 2 + (1 - alpha) * p0 ** 2) - p0) / (p1 - p0)
)
ndf["t"] = (
graph["pp_info"].loc[ndf.v0, "time"].values
+ (
graph["pp_info"].loc[ndf.v1, "time"].values
- graph["pp_info"].loc[ndf.v0, "time"].values
)
* alpha
)
ndf["seg"] = 0
isinfork = (graph["pp_info"].loc[ndf.v0, "PP"].isin(graph["forks"])).values
ndf.loc[isinfork, "seg"] = (
graph["pp_info"].loc[ndf.loc[isinfork, "v1"], "seg"].values
)
ndf.loc[~isinfork, "seg"] = (
graph["pp_info"].loc[ndf.loc[~isinfork, "v0"], "seg"].values
)
return ndf
else:
return None
df = list(map(map_on_edges, range(graph["B"].shape[1])))
df = pd.concat(df)
df.sort_values("cell", inplace=True)
df.index = graph["cells_fitted"]
df["edge"] = df.apply(lambda x: str(int(x[1])) + "|" + str(int(x[2])), axis=1)
df.drop(["cell", "v0", "v1", "d"], axis=1, inplace=True)
return df
def rename_milestones(adata, new, copy: bool = False):
adata = adata.copy() if copy else adata
adata.uns["graph"]["milestones"] = dict(
zip(new, list(adata.uns["graph"]["milestones"].values()))
)
adata.obs.milestones = adata.obs.milestones.cat.rename_categories(new)
return adata if copy else None
| [
"pandas.Series",
"numpy.sqrt",
"numpy.apply_along_axis",
"numpy.argwhere",
"joblib.delayed",
"pandas.concat"
] | [((3533, 3565), 'pandas.Series', 'pd.Series', ([], {'index': 'adata.obs_names'}), '(index=adata.obs_names)\n', (3542, 3565), True, 'import pandas as pd\n'), ((7500, 7513), 'pandas.concat', 'pd.concat', (['df'], {}), '(df)\n', (7509, 7513), True, 'import pandas as pd\n'), ((2709, 2751), 'pandas.concat', 'pd.concat', (['[adata.obs, df_summary]'], {'axis': '(1)'}), '([adata.obs, df_summary], axis=1)\n', (2718, 2751), True, 'import pandas as pd\n'), ((5702, 5756), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.argmax'], {'axis': '(1)', 'arr': "graph['R']"}), "(np.argmax, axis=1, arr=graph['R'])\n", (5721, 5756), True, 'import numpy as np\n'), ((5800, 5821), 'numpy.argwhere', 'np.argwhere', (['(rrm == v)'], {}), '(rrm == v)\n', (5811, 5821), True, 'import numpy as np\n'), ((5983, 6053), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.argmax'], {'axis': '(1)', 'arr': "graph['R'][vcells, nv[1:]]"}), "(np.argmax, axis=1, arr=graph['R'][vcells, nv[1:]])\n", (6002, 6053), True, 'import numpy as np\n'), ((2166, 2184), 'joblib.delayed', 'delayed', (['map_cells'], {}), '(map_cells)\n', (2173, 2184), False, 'from joblib import delayed\n'), ((6616, 6664), 'numpy.sqrt', 'np.sqrt', (['(alpha * p1 ** 2 + (1 - alpha) * p0 ** 2)'], {}), '(alpha * p1 ** 2 + (1 - alpha) * p0 ** 2)\n', (6623, 6664), True, 'import numpy as np\n'), ((3283, 3300), 'pandas.Series', 'pd.Series', (["x['t']"], {}), "(x['t'])\n", (3292, 3300), True, 'import pandas as pd\n')] |
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow.utils.data import Sampler
class CyclicSampler(Sampler):
"""
This sampler supports cyclic sampling, and it is also compatible with
non-data parallelism and data parallelism.
Arguments:
dataset: dataset to be sampled.
micro_batch_size: batch size for per model instance.
global_batch_size is micro_batch_size times data_parallel_size.
shuffle: whether to shuffle the dataset.
consumed_samples: the number of samples that have been trained at the current time,
used for resuming training (default: ``0``).
data_parallel_rank: local rank for data parallelism.
data_parallel_size: the size of data parallelism.
seed: random seed, used for reproducing experiments (default: ``0``).
"""
def __init__(
self,
dataset,
micro_batch_size,
shuffle=False,
consumed_samples=0,
data_parallel_rank=0,
data_parallel_size=1,
seed=0,
):
self.dataset = dataset
self.data_size = len(self.dataset)
self.shuffle = shuffle
self.data_parallel_rank = data_parallel_rank
self.data_parallel_size = data_parallel_size
self.micro_batch_size = micro_batch_size
self.actual_batch_size = self.micro_batch_size * self.data_parallel_size
self.data_size_per_epoch = self.data_size // self.actual_batch_size * self.micro_batch_size
self.consumed_samples = consumed_samples
self.seed = seed
def __iter__(self):
"""divide the data into data_parallel_size buckets,
and shuffle it if `shuffle` is set to `True`.
Each processor samples from its own buckets and data_loader
will load the corresponding data.
"""
epoch = self.consumed_samples // self.data_size_per_epoch
current_epoch_samples = self.consumed_samples % self.data_size_per_epoch
batch = []
while True:
bucket_offset = current_epoch_samples // self.data_parallel_size
start_idx = self.data_parallel_rank * self.data_size_per_epoch
if self.shuffle:
generator = flow.Generator()
generator.manual_seed(self.seed + epoch)
random_idx = flow.randperm(self.data_size_per_epoch, generator=generator).tolist()
indices = [start_idx + x for x in random_idx[bucket_offset:]]
else:
seq_idx = flow.arange(self.data_size_per_epoch).tolist()
indices = [start_idx + x for x in seq_idx[bucket_offset:]]
epoch += 1
if hasattr(self.dataset, "supports_prefetch") and self.dataset.supports_prefetch:
self.dataset.prefetch(indices)
for idx in indices:
batch.append(idx)
if len(batch) == self.micro_batch_size:
self.consumed_samples += self.actual_batch_size
yield batch
batch = []
current_epoch_samples = 0
def __len__(self):
return self.data_size
def set_consumed_samples(self, consumed_samples):
"""You can recover the training iteration by setting `consumed_samples`."""
self.consumed_samples = consumed_samples
def set_epoch(self, epoch):
"""Used for restoring training status."""
self.epoch = epoch
class SingleRoundSampler(Sampler):
"""
This sampler supports single round sampling, and it is also compatible with
non data parallelism and data parallelism.
Arguments:
dataset: dataset to be sampled.
micro_batch_size: batch size for per model instance, global_batch_size
is micro_batch_size times data_parallel_size.
shuffle: whether to shuffle the dataset.
data_parallel_rank: local rank for data parallelism.
data_parallel_size: the size of data parallelism.
seed: random seed, used for reproducing experiments (default: ``0``).
drop_last: whether to drop the remaining data (default: ``False``).
"""
def __init__(
self,
dataset,
micro_batch_size,
shuffle=False,
data_parallel_rank=0,
data_parallel_size=1,
seed=0,
drop_last=False,
):
self.dataset = dataset
self.data_size = len(self.dataset)
self.shuffle = shuffle
self.data_parallel_rank = data_parallel_rank
self.data_parallel_size = data_parallel_size
self.micro_batch_size = micro_batch_size
self.seed = seed
self.drop_last = drop_last
def __iter__(self):
bucket_size = self.data_size // self.data_parallel_size
remain = self.data_size % self.data_parallel_size
start_idx = self.data_parallel_rank * bucket_size
if self.data_parallel_rank < remain:
bucket_size += 1
start_idx += min(self.data_parallel_rank, remain)
if self.shuffle:
generator = flow.Generator()
generator.manual_seed(self.seed)
random_idx = flow.randperm(bucket_size, generator=generator).tolist()
indices = [start_idx + x for x in random_idx]
else:
seq_idx = flow.arange(bucket_size).tolist()
indices = [start_idx + x for x in seq_idx]
if hasattr(self.dataset, "supports_prefetch") and self.dataset.supports_prefetch:
self.dataset.prefetch(indices)
batch = []
for idx in indices:
batch.append(idx)
if len(batch) == self.micro_batch_size:
yield batch
batch = []
if not self.drop_last:
if self.data_parallel_rank >= remain and remain > 0:
batch.append(0)
if len(batch) > 0:
yield batch
def __len__(self):
global_batch_size = self.micro_batch_size * self.data_parallel_size
if self.drop_last:
return self.data_size // global_batch_size
else:
return (self.data_size + global_batch_size - 1) // global_batch_size
| [
"oneflow.arange",
"oneflow.Generator",
"oneflow.randperm"
] | [((5675, 5691), 'oneflow.Generator', 'flow.Generator', ([], {}), '()\n', (5689, 5691), True, 'import oneflow as flow\n'), ((2821, 2837), 'oneflow.Generator', 'flow.Generator', ([], {}), '()\n', (2835, 2837), True, 'import oneflow as flow\n'), ((5762, 5809), 'oneflow.randperm', 'flow.randperm', (['bucket_size'], {'generator': 'generator'}), '(bucket_size, generator=generator)\n', (5775, 5809), True, 'import oneflow as flow\n'), ((5913, 5937), 'oneflow.arange', 'flow.arange', (['bucket_size'], {}), '(bucket_size)\n', (5924, 5937), True, 'import oneflow as flow\n'), ((2924, 2984), 'oneflow.randperm', 'flow.randperm', (['self.data_size_per_epoch'], {'generator': 'generator'}), '(self.data_size_per_epoch, generator=generator)\n', (2937, 2984), True, 'import oneflow as flow\n'), ((3116, 3153), 'oneflow.arange', 'flow.arange', (['self.data_size_per_epoch'], {}), '(self.data_size_per_epoch)\n', (3127, 3153), True, 'import oneflow as flow\n')] |
from os import path
from setuptools import find_namespace_packages, setup
this_directory = path.abspath(path.dirname(__file__))
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name='lambda-learner',
namespace_packages=['linkedin'],
version='0.0.1',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=['Programming Language :: Python :: 3',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved'],
license='BSD-2-CLAUSE',
keywords='lambda-learner incremental training',
package_dir={'': 'src'},
packages=find_namespace_packages(where='src', exclude=['test*', 'doc']),
url='https://github.com/linkedin/lambda-learner',
project_urls={
'Documentation': 'https://github.com/linkedin/lambda-learner/blob/main/README.md',
'Source': 'https://github.com/linkedin/lambda-learner',
'Tracker': 'https://github.com/linkedin/lambda-learner/issues',
},
include_package_data=True,
python_requires='>=3.6',
install_requires=[
'numpy >= 1.14',
'scipy >= 1.0.0',
'scikit-learn >= 0.18.1',
'typing-extensions >= 3.7.4',
],
tests_require=[
'pytest',
]
)
| [
"setuptools.find_namespace_packages",
"os.path.dirname"
] | [((104, 126), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (116, 126), False, 'from os import path\n'), ((723, 785), 'setuptools.find_namespace_packages', 'find_namespace_packages', ([], {'where': '"""src"""', 'exclude': "['test*', 'doc']"}), "(where='src', exclude=['test*', 'doc'])\n", (746, 785), False, 'from setuptools import find_namespace_packages, setup\n')] |
# -*- coding: utf-8 -*-
"""
Independent model based on Geodesic Regression model R_G
"""
import torch
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torch.nn.functional as F
from dataGenerators import ImagesAll, TestImages, my_collate
from axisAngle import get_error2, geodesic_loss
from poseModels import model_3layer
from helperFunctions import classes
from featureModels import resnet_model
import numpy as np
import scipy.io as spio
import gc
import os
import time
import progressbar
import argparse
from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser(description='Pure Regression Models')
parser.add_argument('--gpu_id', type=str, default='0')
parser.add_argument('--render_path', type=str, default='data/renderforcnn/')
parser.add_argument('--augmented_path', type=str, default='data/augmented2/')
parser.add_argument('--pascal3d_path', type=str, default='data/flipped_new/test/')
parser.add_argument('--save_str', type=str)
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--feature_network', type=str, default='resnet')
parser.add_argument('--N0', type=int, default=2048)
parser.add_argument('--N1', type=int, default=1000)
parser.add_argument('--N2', type=int, default=500)
parser.add_argument('--init_lr', type=float, default=1e-4)
parser.add_argument('--num_epochs', type=int, default=3)
args = parser.parse_args()
print(args)
# assign GPU
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
# save stuff here
results_file = os.path.join('results', args.save_str)
model_file = os.path.join('models', args.save_str + '.tar')
plots_file = os.path.join('plots', args.save_str)
log_dir = os.path.join('logs', args.save_str)
# relevant variables
ydata_type = 'axis_angle'
ndim = 3
num_classes = len(classes)
mse_loss = nn.MSELoss().cuda()
gve_loss = geodesic_loss().cuda()
ce_loss = nn.CrossEntropyLoss().cuda()
# DATA
# datasets
real_data = ImagesAll(args.augmented_path, 'real', ydata_type)
render_data = ImagesAll(args.render_path, 'render', ydata_type)
test_data = TestImages(args.pascal3d_path, ydata_type)
# setup data loaders
real_loader = DataLoader(real_data, batch_size=args.num_workers, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)
render_loader = DataLoader(render_data, batch_size=args.num_workers, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)
test_loader = DataLoader(test_data, batch_size=32)
print('Real: {0} \t Render: {1} \t Test: {2}'.format(len(real_loader), len(render_loader), len(test_loader)))
max_iterations = min(len(real_loader), len(render_loader))
# my_model
class IndependentModel(nn.Module):
def __init__(self):
super().__init__()
self.num_classes = num_classes
self.feature_model = resnet_model('resnet50', 'layer4').cuda()
self.pose_model = model_3layer(args.N0, args.N1, args.N2, ndim).cuda()
def forward(self, x):
x = self.feature_model(x)
x = self.pose_model(x)
x = np.pi*F.tanh(x)
return x
model = IndependentModel()
# print(model)
# loss and optimizer
optimizer = optim.Adam(model.parameters(), lr=args.init_lr)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)
# store stuff
writer = SummaryWriter(log_dir)
count = 0
val_loss = []
# OPTIMIZATION functions
def training_init():
global count, val_loss
model.train()
bar = progressbar.ProgressBar(max_value=max_iterations)
for i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):
# forward steps
xdata_real = Variable(sample_real['xdata'].cuda())
ydata_real = Variable(sample_real['ydata'].cuda())
output_real = model(xdata_real)
xdata_render = Variable(sample_render['xdata'].cuda())
ydata_render = Variable(sample_render['ydata'].cuda())
output_render = model(xdata_render)
output_pose = torch.cat((output_real, output_render))
gt_pose = torch.cat((ydata_real, ydata_render))
loss = mse_loss(output_pose, gt_pose)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# store
count += 1
writer.add_scalar('train_loss', loss.item(), count)
if i % 1000 == 0:
ytest, yhat_test, test_labels = testing()
spio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})
tmp_val_loss = get_error2(ytest, yhat_test, test_labels, num_classes)
writer.add_scalar('val_loss', tmp_val_loss, count)
val_loss.append(tmp_val_loss)
# cleanup
del xdata_real, xdata_render, ydata_real, ydata_render
del output_real, output_render, sample_real, sample_render, loss, output_pose, gt_pose
bar.update(i)
# stop
if i == max_iterations:
break
render_loader.dataset.shuffle_images()
real_loader.dataset.shuffle_images()
def training():
global count, val_loss
model.train()
bar = progressbar.ProgressBar(max_value=max_iterations)
for i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):
# forward steps
xdata_real = Variable(sample_real['xdata'].cuda())
ydata_real = Variable(sample_real['ydata'].cuda())
output_real = model(xdata_real)
xdata_render = Variable(sample_render['xdata'].cuda())
ydata_render = Variable(sample_render['ydata'].cuda())
output_render = model(xdata_render)
output_pose = torch.cat((output_real, output_render))
gt_pose = torch.cat((ydata_real, ydata_render))
loss = gve_loss(output_pose, gt_pose)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# store
count += 1
writer.add_scalar('train_loss', loss.item(), count)
if i % 1000 == 0:
ytest, yhat_test, test_labels = testing()
spio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})
tmp_val_loss = get_error2(ytest, yhat_test, test_labels, num_classes)
writer.add_scalar('val_loss', tmp_val_loss, count)
val_loss.append(tmp_val_loss)
# cleanup
del xdata_real, xdata_render, ydata_real, ydata_render
del output_real, output_render, sample_real, sample_render, loss, output_pose, gt_pose
bar.update(i)
# stop
if i == max_iterations:
break
render_loader.dataset.shuffle_images()
real_loader.dataset.shuffle_images()
def testing():
model.eval()
ypred = []
ytrue = []
labels = []
for i, sample in enumerate(test_loader):
xdata = Variable(sample['xdata'].cuda())
label = Variable(sample['label'].cuda())
output = model(xdata)
ypred.append(output.data.cpu().numpy())
ytrue.append(sample['ydata'].numpy())
labels.append(sample['label'].numpy())
del xdata, label, output, sample
gc.collect()
ypred = np.concatenate(ypred)
ytrue = np.concatenate(ytrue)
labels = np.concatenate(labels)
model.train()
return ytrue, ypred, labels
def save_checkpoint(filename):
torch.save(model.state_dict(), filename)
# initialization
training_init()
ytest, yhat_test, test_labels = testing()
print('\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))
for epoch in range(args.num_epochs):
tic = time.time()
scheduler.step()
# training step
training()
# save model at end of epoch
save_checkpoint(model_file)
# validation
ytest, yhat_test, test_labels = testing()
print('\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))
# time and output
toc = time.time() - tic
print('Epoch: {0} done in time {1}s'.format(epoch, toc))
# cleanup
gc.collect()
writer.close()
val_loss = np.stack(val_loss)
spio.savemat(plots_file, {'val_loss': val_loss})
# evaluate the model
ytest, yhat_test, test_labels = testing()
print('\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))
spio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})
| [
"poseModels.model_3layer",
"scipy.io.savemat",
"torch.nn.CrossEntropyLoss",
"torch.nn.MSELoss",
"progressbar.ProgressBar",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"axisAngle.geodesic_loss",
"numpy.stack",
"numpy.concatenate",
"featureModels.resnet_model",
"torch.nn.functional.... | [((620, 681), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Pure Regression Models"""'}), "(description='Pure Regression Models')\n", (643, 681), False, 'import argparse\n'), ((1552, 1590), 'os.path.join', 'os.path.join', (['"""results"""', 'args.save_str'], {}), "('results', args.save_str)\n", (1564, 1590), False, 'import os\n'), ((1604, 1650), 'os.path.join', 'os.path.join', (['"""models"""', "(args.save_str + '.tar')"], {}), "('models', args.save_str + '.tar')\n", (1616, 1650), False, 'import os\n'), ((1664, 1700), 'os.path.join', 'os.path.join', (['"""plots"""', 'args.save_str'], {}), "('plots', args.save_str)\n", (1676, 1700), False, 'import os\n'), ((1711, 1746), 'os.path.join', 'os.path.join', (['"""logs"""', 'args.save_str'], {}), "('logs', args.save_str)\n", (1723, 1746), False, 'import os\n'), ((1967, 2017), 'dataGenerators.ImagesAll', 'ImagesAll', (['args.augmented_path', '"""real"""', 'ydata_type'], {}), "(args.augmented_path, 'real', ydata_type)\n", (1976, 2017), False, 'from dataGenerators import ImagesAll, TestImages, my_collate\n'), ((2032, 2081), 'dataGenerators.ImagesAll', 'ImagesAll', (['args.render_path', '"""render"""', 'ydata_type'], {}), "(args.render_path, 'render', ydata_type)\n", (2041, 2081), False, 'from dataGenerators import ImagesAll, TestImages, my_collate\n'), ((2094, 2136), 'dataGenerators.TestImages', 'TestImages', (['args.pascal3d_path', 'ydata_type'], {}), '(args.pascal3d_path, ydata_type)\n', (2104, 2136), False, 'from dataGenerators import ImagesAll, TestImages, my_collate\n'), ((2172, 2310), 'torch.utils.data.DataLoader', 'DataLoader', (['real_data'], {'batch_size': 'args.num_workers', 'shuffle': '(True)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)', 'collate_fn': 'my_collate'}), '(real_data, batch_size=args.num_workers, shuffle=True,\n num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)\n', (2182, 2310), False, 'from torch.utils.data import DataLoader\n'), ((2323, 2463), 'torch.utils.data.DataLoader', 'DataLoader', (['render_data'], {'batch_size': 'args.num_workers', 'shuffle': '(True)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)', 'collate_fn': 'my_collate'}), '(render_data, batch_size=args.num_workers, shuffle=True,\n num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)\n', (2333, 2463), False, 'from torch.utils.data import DataLoader\n'), ((2474, 2510), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': '(32)'}), '(test_data, batch_size=32)\n', (2484, 2510), False, 'from torch.utils.data import DataLoader\n'), ((3188, 3248), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(1)', 'gamma': '(0.1)'}), '(optimizer, step_size=1, gamma=0.1)\n', (3213, 3248), False, 'from torch import nn, optim\n'), ((3272, 3294), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['log_dir'], {}), '(log_dir)\n', (3285, 3294), False, 'from tensorboardX import SummaryWriter\n'), ((7405, 7423), 'numpy.stack', 'np.stack', (['val_loss'], {}), '(val_loss)\n', (7413, 7423), True, 'import numpy as np\n'), ((7424, 7472), 'scipy.io.savemat', 'spio.savemat', (['plots_file', "{'val_loss': val_loss}"], {}), "(plots_file, {'val_loss': val_loss})\n", (7436, 7472), True, 'import scipy.io as spio\n'), ((7623, 7723), 'scipy.io.savemat', 'spio.savemat', (['results_file', "{'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels}"], {}), "(results_file, {'ytest': ytest, 'yhat_test': yhat_test,\n 'test_labels': test_labels})\n", (7635, 7723), True, 'import scipy.io as spio\n'), ((3413, 3462), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {'max_value': 'max_iterations'}), '(max_value=max_iterations)\n', (3436, 3462), False, 'import progressbar\n'), ((4826, 4875), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {'max_value': 'max_iterations'}), '(max_value=max_iterations)\n', (4849, 4875), False, 'import progressbar\n'), ((6577, 6598), 'numpy.concatenate', 'np.concatenate', (['ypred'], {}), '(ypred)\n', (6591, 6598), True, 'import numpy as np\n'), ((6608, 6629), 'numpy.concatenate', 'np.concatenate', (['ytrue'], {}), '(ytrue)\n', (6622, 6629), True, 'import numpy as np\n'), ((6640, 6662), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (6654, 6662), True, 'import numpy as np\n'), ((6990, 7001), 'time.time', 'time.time', ([], {}), '()\n', (6999, 7001), False, 'import time\n'), ((7366, 7378), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7376, 7378), False, 'import gc\n'), ((1843, 1855), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1853, 1855), False, 'from torch import nn, optim\n'), ((1874, 1889), 'axisAngle.geodesic_loss', 'geodesic_loss', ([], {}), '()\n', (1887, 1889), False, 'from axisAngle import get_error2, geodesic_loss\n'), ((1907, 1928), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1926, 1928), False, 'from torch import nn, optim\n'), ((3873, 3912), 'torch.cat', 'torch.cat', (['(output_real, output_render)'], {}), '((output_real, output_render))\n', (3882, 3912), False, 'import torch\n'), ((3925, 3962), 'torch.cat', 'torch.cat', (['(ydata_real, ydata_render)'], {}), '((ydata_real, ydata_render))\n', (3934, 3962), False, 'import torch\n'), ((5286, 5325), 'torch.cat', 'torch.cat', (['(output_real, output_render)'], {}), '((output_real, output_render))\n', (5295, 5325), False, 'import torch\n'), ((5338, 5375), 'torch.cat', 'torch.cat', (['(ydata_real, ydata_render)'], {}), '((ydata_real, ydata_render))\n', (5347, 5375), False, 'import torch\n'), ((6555, 6567), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6565, 6567), False, 'import gc\n'), ((6888, 6942), 'axisAngle.get_error2', 'get_error2', (['ytest', 'yhat_test', 'test_labels', 'num_classes'], {}), '(ytest, yhat_test, test_labels, num_classes)\n', (6898, 6942), False, 'from axisAngle import get_error2, geodesic_loss\n'), ((7278, 7289), 'time.time', 'time.time', ([], {}), '()\n', (7287, 7289), False, 'import time\n'), ((7566, 7620), 'axisAngle.get_error2', 'get_error2', (['ytest', 'yhat_test', 'test_labels', 'num_classes'], {}), '(ytest, yhat_test, test_labels, num_classes)\n', (7576, 7620), False, 'from axisAngle import get_error2, geodesic_loss\n'), ((3030, 3039), 'torch.nn.functional.tanh', 'F.tanh', (['x'], {}), '(x)\n', (3036, 3039), True, 'import torch.nn.functional as F\n'), ((4209, 4309), 'scipy.io.savemat', 'spio.savemat', (['results_file', "{'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels}"], {}), "(results_file, {'ytest': ytest, 'yhat_test': yhat_test,\n 'test_labels': test_labels})\n", (4221, 4309), True, 'import scipy.io as spio\n'), ((4324, 4378), 'axisAngle.get_error2', 'get_error2', (['ytest', 'yhat_test', 'test_labels', 'num_classes'], {}), '(ytest, yhat_test, test_labels, num_classes)\n', (4334, 4378), False, 'from axisAngle import get_error2, geodesic_loss\n'), ((5622, 5722), 'scipy.io.savemat', 'spio.savemat', (['results_file', "{'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels}"], {}), "(results_file, {'ytest': ytest, 'yhat_test': yhat_test,\n 'test_labels': test_labels})\n", (5634, 5722), True, 'import scipy.io as spio\n'), ((5737, 5791), 'axisAngle.get_error2', 'get_error2', (['ytest', 'yhat_test', 'test_labels', 'num_classes'], {}), '(ytest, yhat_test, test_labels, num_classes)\n', (5747, 5791), False, 'from axisAngle import get_error2, geodesic_loss\n'), ((7195, 7249), 'axisAngle.get_error2', 'get_error2', (['ytest', 'yhat_test', 'test_labels', 'num_classes'], {}), '(ytest, yhat_test, test_labels, num_classes)\n', (7205, 7249), False, 'from axisAngle import get_error2, geodesic_loss\n'), ((2826, 2860), 'featureModels.resnet_model', 'resnet_model', (['"""resnet50"""', '"""layer4"""'], {}), "('resnet50', 'layer4')\n", (2838, 2860), False, 'from featureModels import resnet_model\n'), ((2888, 2933), 'poseModels.model_3layer', 'model_3layer', (['args.N0', 'args.N1', 'args.N2', 'ndim'], {}), '(args.N0, args.N1, args.N2, ndim)\n', (2900, 2933), False, 'from poseModels import model_3layer\n')] |
# from django.shortcuts import render, redirect, get_object_or_404
from .forms import CharacterForm
from rick_and_morty_app.models import Character
from django.views.generic import ListView, CreateView, UpdateView, DetailView, DeleteView
from django.urls import reverse_lazy # new
# Create your views here.
class HomePageView(ListView):
model = Character
template_name = 'character_list.html'
class CreateCharacterView(CreateView):
model = Character
form_class = CharacterForm
template_name = 'character_form.html'
success_url = reverse_lazy('character_list')
class CharacterDetailView(DetailView):
model = Character
template_name = 'character_details.html'
class CharacterUpdate(UpdateView):
model = Character
fields = ['name', 'lastEpisode']
template_name = 'character_update.html'
success_url = reverse_lazy('character_list')
class DeleteCharacter(DeleteView):
model = Character
template_name = 'character_delete.html'
success_url = reverse_lazy('character_list')
| [
"django.urls.reverse_lazy"
] | [((556, 586), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""character_list"""'], {}), "('character_list')\n", (568, 586), False, 'from django.urls import reverse_lazy\n'), ((851, 881), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""character_list"""'], {}), "('character_list')\n", (863, 881), False, 'from django.urls import reverse_lazy\n'), ((1002, 1032), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""character_list"""'], {}), "('character_list')\n", (1014, 1032), False, 'from django.urls import reverse_lazy\n')] |
from apps.flow.settings import config
if config.SERVER_ENV != 'dev':
from gevent import monkey
monkey.patch_all()
else:
pass
from apps.flow.views.deploy import deploy
from apps.flow.views.flow import flow
from library.api.tFlask import tflask
def create_app():
app = tflask(config)
register_blueprints(app)
return app
def register_blueprints(app):
app.register_blueprint(flow, url_prefix="/v1/flow")
app.register_blueprint(deploy, url_prefix="/v1/deploy")
if __name__ == '__main__':
create_app().run(port=config.PORT)
| [
"library.api.tFlask.tflask",
"gevent.monkey.patch_all"
] | [((105, 123), 'gevent.monkey.patch_all', 'monkey.patch_all', ([], {}), '()\n', (121, 123), False, 'from gevent import monkey\n'), ((288, 302), 'library.api.tFlask.tflask', 'tflask', (['config'], {}), '(config)\n', (294, 302), False, 'from library.api.tFlask import tflask\n')] |
""" File to house a requester connection """
from logging import getLogger
import zmq
from service_framework.utils.connection_utils import BaseConnection
from service_framework.utils.msgpack_utils import msg_pack, msg_unpack
from service_framework.utils.socket_utils import get_requester_socket
LOG = getLogger(__name__)
class Requester(BaseConnection):
"""
Needed to automatically generate all connection functions/sockets so external
calls will be properly handled.
"""
def __init__(self, model, addresses):
super().__init__(model, addresses)
self.addresses = addresses
self.context = None
self.socket = None
def __del__(self):
if hasattr(self, 'socket') and self.socket:
self.socket.close()
@staticmethod
def get_addresses_model():
"""
This is needed so the BaseConnector can validate the
provided addresses and throw an error if any are missing.
As well as automatically generate documentation.
NOTE: types must always be "str"
return = {
'required_addresses': {
'req_address_name_1': str,
'req_address_name_2': str,
},
'optional_addresses': {
'opt_address_name_1': str,
'opt_address_name_2': str,
},
}
"""
return {
'required_addresses': {'requester': str},
'optional_addresses': {},
}
@staticmethod
def get_connection_arguments_model():
"""
This is needed so the BaseConnection can validate the provided
model explicitly state the arguments to be passed on each
send message.
return = {
'required_connection_arguments': {
'required_connection_arg_1': type,
'required_connection_arg_2': type,
},
'optional_connection_arguments': {
'optional_connection_arg_1': type,
'optional_connection_arg_2': type,
},
}
"""
return {
'required_connection_arguments': {},
'optional_connection_arguments': {},
}
@staticmethod
def get_creation_arguments_model():
"""
This is needed so the BaseConnection can validate the provided
creation arguments as well as for auto documentation.
return = {
'required_creation_arguments': {
'required_creation_arg_1': type,
'required_creation_arg_2': type,
},
'optional_creation_arguments': {
'optional_creation_arg_1': type,
'optional_creation_arg_2': type,
},
}
"""
return {
'required_creation_arguments': {},
'optional_creation_arguments': {},
}
def get_inbound_sockets_and_triggered_functions(self):
"""
Method needed so the service framework knows which sockets to listen
for new messages and what functions to call when a message appears.
return [{
'inbound_socket': zmq.Context.Socket,
'decode_message': def(bytes) -> payload,
'arg_validator': def(args),
'connection_function': def(args) -> args or None,
'model_function': def(args, to_send, conifg) -> return_args or None,
'return_validator': def(return_args)
'return_function': def(return_args),
}]
"""
self.context = zmq.Context()
self.socket = get_requester_socket(
self.addresses['requester'],
self.context
)
return []
def runtime_setup(self):
"""
Method called directly after instantiation to conduct all
runtime required setup. I.E. Setting up a zmq.Context().
"""
self.context = zmq.Context()
self.socket = get_requester_socket(
self.addresses['requester'],
self.context
)
def send(self, payload):
"""
This is needed to wrap socket calls. So all calls to the connection
will be properly formatted.
"""
self.socket.send(msg_pack(payload))
return msg_unpack(self.socket.recv())
| [
"logging.getLogger",
"service_framework.utils.socket_utils.get_requester_socket",
"zmq.Context",
"service_framework.utils.msgpack_utils.msg_pack"
] | [((304, 323), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (313, 323), False, 'from logging import getLogger\n'), ((3578, 3591), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (3589, 3591), False, 'import zmq\n'), ((3615, 3678), 'service_framework.utils.socket_utils.get_requester_socket', 'get_requester_socket', (["self.addresses['requester']", 'self.context'], {}), "(self.addresses['requester'], self.context)\n", (3635, 3678), False, 'from service_framework.utils.socket_utils import get_requester_socket\n'), ((3940, 3953), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (3951, 3953), False, 'import zmq\n'), ((3977, 4040), 'service_framework.utils.socket_utils.get_requester_socket', 'get_requester_socket', (["self.addresses['requester']", 'self.context'], {}), "(self.addresses['requester'], self.context)\n", (3997, 4040), False, 'from service_framework.utils.socket_utils import get_requester_socket\n'), ((4266, 4283), 'service_framework.utils.msgpack_utils.msg_pack', 'msg_pack', (['payload'], {}), '(payload)\n', (4274, 4283), False, 'from service_framework.utils.msgpack_utils import msg_pack, msg_unpack\n')] |
# from sklearn.cluster._kmeans import *
import copy
from typing import Union
import torch
import torch.nn as nn
from sklearn.cluster._robustq import *
from .quantizer import Quantizer
__all__ = ['MiniBatchRobustqTorch', 'RobustqTorch']
class ClusterQuantizerBase(Quantizer):
def __init__(self, n_feature=1, n_clusters=8, name='',
quant_fun=lambda x: x):
super(ClusterQuantizerBase, self).__init__()
self.n_clusters = n_clusters
self.name = name
# specify the initial values for loading judgment
self.register_buffer("labels_", torch.zeros((0, ),dtype=torch.long))
# specify the initial values for initial judgment
self.register_buffer("cluster_centers_", torch.zeros(n_clusters, n_feature))
self.quant_fun = quant_fun
def reset(self):
super().reset()
# self.labels_.zero_()
self.register_buffer("labels_", torch.zeros((0, ),dtype=torch.long))
self.cluster_centers_.data.copy_(torch.linspace(-1, 1, steps=self.n_clusters).view(-1, 1))
def forward(self, inputs):
output = self.quant_func(inputs)
return output
def extra_repr(self) -> str:
return 'name={},cluster={}'.format(self.name, self.n_clusters)
@staticmethod
def quant_calib(net,wrapped_modules,calib_loader):
calib_layers=[]
n_calibration_steps=1
for name,module in wrapped_modules.items():
module.mode='calibration_forward'
calib_layers.append(name)
n_calibration_steps=max(n_calibration_steps,module.quantizer.n_calibration_steps)
print(f"prepare calibration for {calib_layers}\n n_calibration_steps={n_calibration_steps}")
for step in range(n_calibration_steps):
print(f"Start calibration step={step+1}")
for name,module in wrapped_modules.items():
module.quantizer.calibration_step=step+1
with torch.no_grad():
for inp,target in calib_loader:
inp=inp.cuda()
net(inp)
for name,module in wrapped_modules.items():
print(f"{name}: {module.quantizer}")
module.mode='qat_forward'
print("calibration finished")
class RobustqTorch(ClusterQuantizerBase):
def __init__(self, # data_or_size,
n_feature=1, n_clusters=8, name='',
alpha=0.1, gamma=1.0, q_level_init='uniform', **kwargs):
super(RobustqTorch, self).__init__(n_feature, n_clusters=n_clusters, name=name)
self.alpha = alpha
self.gamma = gamma
self.kmeans = RobustQ(n_clusters=n_clusters, **kwargs)
# if hasattr(data_or_size, '__array__'):
# data = data_or_size
# else:
# data = None
# # if isinstance(data, torch.Tensor):
# # data = data.detach().clone().cpu().view(-1, 1).numpy()
# if isinstance(data, np.ndarray):
# data = self.label_.new_tensor(torch.from_numpy(data))
# self.init_layer_cluster_center(data, n_clusters, q_level_init)
self.init_layer_cluster_center(None, n_clusters, q_level_init)
def init_layer_cluster_center(self, data, n_clusters, method="uniform"):
if method == "uniform" or data is None:
self.cluster_centers_.data.copy_(torch.linspace(-1, 1, steps=n_clusters).view(-1, 1))
self.kmeans.cluster_centers_ = self.cluster_centers_.data.cpu().numpy()
else:
self.fit(data, tol=1e-2)
def reset(self):
super().reset()
self.kmeans.cluster_centers_ = self.cluster_centers_.data.cpu().numpy()
def fit(self, X: torch.Tensor, y=None, sample_weight=None, n_init=None, init=None, tol=None):
# 210626 data copy optimization
# data = X.detach().clone().view(-1, 1)
data = X.view(-1, 1)
if X.requires_grad:
data = data.detach()
data = data.cpu().numpy()
bak = copy.deepcopy([self.kmeans.n_init, self.kmeans.init, self.kmeans.tol])
self.kmeans.n_init, self.kmeans.init, self.kmeans.tol = [new if new is not None else old
for new, old in zip((n_init, init, tol), bak)]
self.kmeans.fit(data, y=y, sample_weight=sample_weight, var_std=self.alpha, var_weight=self.gamma)
# self.labels_.data.copy_(torch.from_numpy(self.kmeans.labels_))
self.register_buffer("labels_", torch.as_tensor(self.kmeans.labels_,dtype=torch.long))
self.cluster_centers_.data.copy_(torch.from_numpy(self.kmeans.cluster_centers_))
self.kmeans.n_init, self.kmeans.init, self.kmeans.tol = bak
def predict(self, X, sample_weight=None):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None).
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
# 210626 data copy optimization
# data = X.detach().clone().view(-1, 1)
data = X.view(-1, 1)
if X.requires_grad:
data = data.detach()
data = data.cpu().numpy()
return self.kmeans.predict(data, sample_weight, var_std=self.alpha, var_weight=self.gamma)
def forward(self, inputs):
# To avoid fault fitness in initial iterations
# if (self.cluster_centers_.data == 0).all():
# # use uniform quantization to avoid further fitness with bad data
# self.init_layer_cluster_center(inputs, self.weight_qbit)
if self.calibration and not self.calibrated:
self.fit(inputs)
labels = self.labels_
weight_quan = self.cluster_centers_[:, 0][labels].view(inputs.shape)
elif self.training:
# label should change as weights are updated
labels = self.predict(inputs)
weight_quan_temp = self.cluster_centers_[:, 0][labels].view(inputs.shape)
weight_quan = inputs - inputs.detach() + weight_quan_temp
else:
# to avoid load the model without pre-fitness
# if len(self.labels_.data) == 0:
# # self.labels_.data.copy_(torch.from_numpy(self.predict(inputs)).view(-1))
# self.register_buffer("labels_", torch.from_numpy(self.predict(inputs)).view(-1))
assert len(self.labels_.data)
labels = self.labels_
weight_quan_temp = self.cluster_centers_[:, 0][labels].view(inputs.shape)
weight_quan = weight_quan_temp
return weight_quan
def extra_repr(self) -> str:
return super(RobustqTorch, self).extra_repr() + " gamma:{}, alpha:{} )".format(self.gamma, self.alpha)
class MiniBatchRobustqTorch(RobustqTorch):
def __init__(self, # batch_size, # data_or_size,
n_feature=1, n_clusters=8, name='',
alpha=0.1, gamma=1.0, q_level_init='uniform', **kwargs):
if "batch_size" in kwargs:
kwargs.pop("batch_size")
super().__init__(n_feature=n_feature, n_clusters=n_clusters, name=name,
alpha=alpha, gamma=gamma, q_level_init=q_level_init, **kwargs)
self.kmeans = MiniBatchRobustQ(n_clusters=n_clusters,**kwargs)
# if hasattr(data_or_size, '__array__'):
# data = data_or_size
# else:
# data = None
# # if isinstance(data, torch.Tensor):
# # data = data.detach().clone().cpu().view(-1, 1).numpy()
# if isinstance(data, np.ndarray):
# data = self.label_.new_tensor(torch.from_numpy(data))
# self.init_layer_cluster_center(data, n_clusters, q_level_init)
self.init_layer_cluster_center(None, n_clusters, q_level_init)
def partial_fit(self, X, y=None, sample_weight=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Coordinates of the data points to cluster. It must be noted that
X will be copied if it is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None).
Returns
-------
self
"""
# 210626 data copy optimization
# data = X.detach().clone().view(-1, 1)
data = X.view(-1, 1)
if X.requires_grad:
data = data.detach()
data = data.cpu().numpy()
self.kmeans.partial_fit(data, y, sample_weight, var_std=self.alpha, var_weight=self.gamma)
# self.labels_.data.copy_(torch.from_numpy(self.kmeans.labels_))
self.register_buffer("labels_", torch.as_tensor(self.kmeans.labels_,dtype=torch.long))
self.cluster_centers_.data.copy_(torch.from_numpy(self.kmeans.cluster_centers_))
def extra_repr(self) -> str:
return super(MiniBatchRobustqTorch, self).extra_repr() + " gamma:{}, alpha:{} )".format(self.gamma, self.alpha)
# TODO: Use close package
def insert_robust_quntizer(module:nn.Module, quantizer: Union[RobustqTorch, MiniBatchRobustqTorch], alpha, gamma):
for k, m in module.named_modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
n_samples = m.weight.numel()
n_clusters = 2 ** m.quanizer.w_bit - 1
batch_factor = 800
# if q_type == 'robust_batch':
if isinstance(quantizer, MiniBatchRobustqTorch):
m.quantizer.w_quantizer = MiniBatchRobustqTorch(n_feature=1,
n_clusters=n_clusters,
alpha=alpha, gamma=gamma,
batch_size=n_clusters * batch_factor
if n_clusters * batch_factor < int(0.3 * n_samples)
else int(0.2 * n_samples),
n_init=1, max_iter=30, random_state=0,
q_level_init="uniform"
)
# elif q_type == 'robust':
elif isinstance(quantizer, RobustqTorch):
m.quantizer.w_quantizer = RobustqTorch(n_feature=1,
n_clusters=n_clusters,
alpha=alpha, gamma=gamma,
n_init=1, max_iter=30, random_state=0,
q_level_init="uniform"
)
if __name__ == '__main__':
import numpy as np
np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
torch.set_printoptions(3)
import sklearn
sklearn.show_versions()
a = {}
# vgg = models.vgg11(pretrained=True)
# if torch.cuda.is_available():
# vgg.cuda()
# a['state_dict'] = vgg.state_dict()
a = torch.load("plot/checkpoints/resnet18_batch256_imagenet_20200708-34ab8f90.pth",
map_location=torch.device('cpu') if not torch.cuda.is_available() else torch.device('cuda'))
num_class = 7
batch_factor = 800
gamma = 0.
train_flg = False
robustq_torch_batch = []
robustq_sklean_batch = []
robustq_torch = []
robustq_sklean = []
kmeans_sklean = []
kmeans_sklean_batch = []
for n, v in a['state_dict'].items():
if "weight" in n:
n_samples = v.numel()
if n_samples > 1024:
print(n_samples)
# from sklearn
kmeans_sklean.append(
KMeans(n_clusters=num_class, n_init=1, max_iter=30, random_state=0, algorithm="full"))
kmeans_sklean_batch.append(
MiniBatchKMeans(n_clusters=num_class, n_init=1, max_iter=30, random_state=0, # tol=1e-4,
batch_size=num_class * batch_factor if num_class * 300 < int(
0.3 * n_samples) else int(0.2 * n_samples)))
# from Robustq
robustq_sklean.append(
RobustQ(n_clusters=num_class, n_init=1, max_iter=30, random_state=0, algorithm="full"))
robustq_sklean_batch.append(MiniBatchRobustQ(n_clusters=num_class,
n_init=1, max_iter=30, random_state=0, # tol=1e-4,
batch_size=num_class * batch_factor
if num_class * batch_factor < int(0.3 * n_samples)
else int(0.2 * n_samples)))
# from clusterq
robustq_torch_batch_t = MiniBatchRobustqTorch(n_feature=1,
n_clusters=num_class,
alpha=0.12, gamma=gamma,
batch_size=num_class * batch_factor
if num_class * batch_factor < int(0.3 * n_samples)
else int(0.2 * n_samples),
n_init=1, max_iter=30, random_state=0,
q_level_init="uniform"
)
if not train_flg:
robustq_torch_batch_t.eval()
robustq_torch_t = RobustqTorch(n_feature=1,
n_clusters=num_class,
alpha=0.12, gamma=gamma,
n_init=1, max_iter=30, random_state=0,
q_level_init="uniform"
)
if not train_flg:
robustq_torch_t.eval()
if torch.cuda.is_available():
robustq_torch_batch_t.cuda()
robustq_torch_t.cuda()
robustq_torch.append(robustq_torch_t)
robustq_torch_batch.append(robustq_torch_batch_t)
import sys
sys.path.append("../")
from utee.misc import time_measurement
@time_measurement(False, 0, 0)
def f1(quantizer_list, is_np=False):
print("start\n")
ix = 0
for n, v in a['state_dict'].items():
if "weight" in n:
n_samples = v.numel()
if n_samples > 1024:
data_o = v.detach().view(-1, 1)
if is_np:
data = data_o.cpu().numpy()
else:
data = data_o.cuda()
quantizer_list[ix].fit(data)
data_o = v.detach().view(-1, 1)
if is_np:
datac = data_o.cpu().numpy()
t = (datac != data)
tt = t if not isinstance(t, np.ndarray) else t.any()
# print("data is modified:", tt)
else:
datac = data_o.cuda()
t = (datac != data)
tt = t.any().item()
# print("data is modified:", tt)
if tt:
print("max difference:", ((datac - data_o)[t]).max())
ix += 1
# import visdom
#
# vis = visdom.Visdom()
class Visdom():
def bar(self, *args, **kwargs):
pass
def line(self, *args, **kwargs):
pass
vis = Visdom()
def plot(quantizer, name="None", is_np=False):
print(quantizer.labels_)
print(quantizer.cluster_centers_)
# ------------- visdom draw --------------
# histogram of weight distribution
qw = quantizer.cluster_centers_[:, 0][quantizer.labels_] # .view(weight.shape)
qw_hist = []
if is_np:
qw_v = np.unique(qw)
for v in qw_v:
qw_hist.append((qw == v).sum())
else:
qw_v = qw.unique()
for v in qw_v:
qw_hist.append((qw == v).sum().item())
vis.bar(torch.tensor(qw_hist), qw_v, win=name + " hist",
opts=dict(title=name + " hist" + ' gamma={}'.format(gamma)))
# vis.histogram(qw, win=name+" hist",
# opts=dict(title=name+" hist"+' gamma={}'.format(gamma)))
# transform function
x = torch.arange(-1., 1., 0.01)
print(x.shape)
if is_np:
x = x.view(-1, 1).cpu().numpy()
elif torch.cuda.is_available():
x = x.view(-1, 1).cuda()
else:
x = x.view(-1, 1)
level1 = quantizer.cluster_centers_[:, 0][quantizer.predict(x)]
# print(level1.shape, x.shape)
vis.line(Y=level1, X=x.reshape(-1),
win=name,
opts=dict(title=name))
@time_measurement(False, 0, 0)
def get_q_loss(quantizer_list, is_np=False):
ix = 0
loss = 0
for n, v in a['state_dict'].items():
if "weight" in n:
n_samples = v.numel()
if n_samples > 1024:
if is_np:
data = v.detach().view(-1, 1)
data = data.cpu().numpy()
q_data = quantizer_list[ix].cluster_centers_[:, 0][quantizer_list[ix].predict(data)].reshape(
data.shape)
else:
data = v
q_data = quantizer_list[ix](data).reshape(data.shape)
loss += ((q_data - data) ** 2).sum()
# print(n)
ix += 1
print(loss)
print("=======test kmeans_sklean======\n")
f1(kmeans_sklean, True)
get_q_loss(kmeans_sklean, True)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# data = data.cpu().numpy()
# q_data = kmeans_sklean[ix].cluster_centers_[:, 0][kmeans_sklean[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
print("=======test kmeans_sklean_batch======\n")
f1(kmeans_sklean_batch, True)
get_q_loss(kmeans_sklean_batch, True)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# data = data.cpu().numpy()
# q_data = kmeans_sklean_batch[ix].cluster_centers_[:, 0][kmeans_sklean_batch[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
print("=======test robustq_sklean======\n")
f1(robustq_sklean, True)
get_q_loss(robustq_sklean, True)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# data = data.cpu().numpy()
# q_data = robustq_sklean[ix].cluster_centers_[:, 0][robustq_sklean[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
plot(robustq_sklean[0], 'robustq_sklean', True)
print("=======test robustq_sklean_batch======\n")
f1(robustq_sklean_batch, True)
get_q_loss(robustq_sklean_batch, True)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# data = data.cpu().numpy()
# q_data = robustq_sklean_batch[ix].cluster_centers_[:, 0][robustq_sklean_batch[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
plot(robustq_sklean_batch[0], 'robustq_sklean_batch', True)
print("=======test robustq_torch======\n")
f1(robustq_torch)
get_q_loss(robustq_torch)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v
# q_data = robustq_torch[ix].cluster_centers_[:, 0][robustq_torch[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
plot(robustq_torch[0], 'robustq_torch')
print("=======test robustq_torch_batch======\n")
f1(robustq_torch_batch)
get_q_loss(robustq_torch_batch)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v
# q_data = robustq_torch_batch[ix].cluster_centers_[:, 0][robustq_torch_batch[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
plot(robustq_torch_batch[0], 'robustq_torch_batch')
# print("======= cudalib ======\n")
# from libKMCUDA import kmeans_cuda
# clq_temp = []
# import time
# t_s = time.monotonic()
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# samples = data.cpu().numpy()
# centroids, assignments = kmeans_cuda(samples, num_class, )
# clq_temp.append([centroids, assignments])
# t_e = time.monotonic()
# s, ms = divmod((t_e - t_s) * 1000, 1000)
# m, s = divmod(s, 60)
# h, m = divmod(m, 60)
# print("%d:%02d:%02d:%03d" % (h, m, s, ms))
#
# t_s = time.monotonic()
# ix = 0
# loss=0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# data = data.cpu().numpy()
# centroids, assignments = clq_temp[ix]
# q_data = centroids[:, 0][assignments].reshape(data.shape)
# loss += ((q_data - data) ** 2).sum()
# ix +=1
# t_e = time.monotonic()
# s, ms = divmod((t_e - t_s) * 1000, 1000)
# m, s = divmod(s, 60)
# h, m = divmod(m, 60)
# print("%d:%02d:%02d:%03d" % (h, m, s, ms))
# print(loss)
print("=======test uniform======\n")
from module.quantization.quant_functions import linear_quantize, compute_integral_part
bits = 3
print("start\n")
ix = 0
q2_loss = 0
q2_list = []
for n, v in a['state_dict'].items():
if "weight" in n:
n_samples = v.numel()
if n_samples > 1024:
w = v.detach()
sf = bits - 1. - compute_integral_part(w, overflow_rate=0)
q2 = linear_quantize(w, sf, bits=bits)
q2_list.append(q2)
q2_loss += ((q2 - w)**2).sum()
ix += 1
print(q2_loss)
# vis.histogram(q2_list[0].view(-1), win='uniform'+" hist",
# opts=dict(title='uniform'+" hist"))
qw = q2_list[0]
qw_v = qw.unique()
qw_hist = []
for v in qw_v:
qw_hist.append((qw == v).sum().item())
vis.bar(torch.tensor(qw_hist), qw_v, win='uniform' + " hist",
opts=dict(title='uniform' + " hist"))
# 2021/08/31: remove dulplicated code of MiniBatchRobustqTorch and RobustqTorch,
# 2021/08/31: MiniBatchRobustqTorch inherits functions from RobustqTorch. | [
"utee.misc.time_measurement",
"module.quantization.quant_functions.linear_quantize",
"module.quantization.quant_functions.compute_integral_part",
"torch.as_tensor",
"numpy.unique",
"torch.set_printoptions",
"torch.device",
"sklearn.show_versions",
"torch.from_numpy",
"torch.tensor",
"torch.cuda.... | [((11625, 11684), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'formatter': "{'float': '{: 0.3f}'.format}"}), "(formatter={'float': '{: 0.3f}'.format})\n", (11644, 11684), True, 'import numpy as np\n'), ((11689, 11714), 'torch.set_printoptions', 'torch.set_printoptions', (['(3)'], {}), '(3)\n', (11711, 11714), False, 'import torch\n'), ((11740, 11763), 'sklearn.show_versions', 'sklearn.show_versions', ([], {}), '()\n', (11761, 11763), False, 'import sklearn\n'), ((15380, 15402), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (15395, 15402), False, 'import sys\n'), ((15453, 15482), 'utee.misc.time_measurement', 'time_measurement', (['(False)', '(0)', '(0)'], {}), '(False, 0, 0)\n', (15469, 15482), False, 'from utee.misc import time_measurement\n'), ((18207, 18236), 'utee.misc.time_measurement', 'time_measurement', (['(False)', '(0)', '(0)'], {}), '(False, 0, 0)\n', (18223, 18236), False, 'from utee.misc import time_measurement\n'), ((3984, 4054), 'copy.deepcopy', 'copy.deepcopy', (['[self.kmeans.n_init, self.kmeans.init, self.kmeans.tol]'], {}), '([self.kmeans.n_init, self.kmeans.init, self.kmeans.tol])\n', (3997, 4054), False, 'import copy\n'), ((17742, 17771), 'torch.arange', 'torch.arange', (['(-1.0)', '(1.0)', '(0.01)'], {}), '(-1.0, 1.0, 0.01)\n', (17754, 17771), False, 'import torch\n'), ((25415, 25436), 'torch.tensor', 'torch.tensor', (['qw_hist'], {}), '(qw_hist)\n', (25427, 25436), False, 'import torch\n'), ((594, 629), 'torch.zeros', 'torch.zeros', (['(0,)'], {'dtype': 'torch.long'}), '((0,), dtype=torch.long)\n', (605, 629), False, 'import torch\n'), ((738, 772), 'torch.zeros', 'torch.zeros', (['n_clusters', 'n_feature'], {}), '(n_clusters, n_feature)\n', (749, 772), False, 'import torch\n'), ((926, 961), 'torch.zeros', 'torch.zeros', (['(0,)'], {'dtype': 'torch.long'}), '((0,), dtype=torch.long)\n', (937, 961), False, 'import torch\n'), ((4484, 4538), 'torch.as_tensor', 'torch.as_tensor', (['self.kmeans.labels_'], {'dtype': 'torch.long'}), '(self.kmeans.labels_, dtype=torch.long)\n', (4499, 4538), False, 'import torch\n'), ((4580, 4626), 'torch.from_numpy', 'torch.from_numpy', (['self.kmeans.cluster_centers_'], {}), '(self.kmeans.cluster_centers_)\n', (4596, 4626), False, 'import torch\n'), ((9440, 9494), 'torch.as_tensor', 'torch.as_tensor', (['self.kmeans.labels_'], {'dtype': 'torch.long'}), '(self.kmeans.labels_, dtype=torch.long)\n', (9455, 9494), False, 'import torch\n'), ((9536, 9582), 'torch.from_numpy', 'torch.from_numpy', (['self.kmeans.cluster_centers_'], {}), '(self.kmeans.cluster_centers_)\n', (9552, 9582), False, 'import torch\n'), ((17216, 17229), 'numpy.unique', 'np.unique', (['qw'], {}), '(qw)\n', (17225, 17229), True, 'import numpy as np\n'), ((17448, 17469), 'torch.tensor', 'torch.tensor', (['qw_hist'], {}), '(qw_hist)\n', (17460, 17469), False, 'import torch\n'), ((17868, 17893), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (17891, 17893), False, 'import torch\n'), ((1953, 1968), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1966, 1968), False, 'import torch\n'), ((12036, 12055), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (12048, 12055), False, 'import torch\n'), ((12094, 12114), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (12106, 12114), False, 'import torch\n'), ((15121, 15146), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15144, 15146), False, 'import torch\n'), ((24998, 25031), 'module.quantization.quant_functions.linear_quantize', 'linear_quantize', (['w', 'sf'], {'bits': 'bits'}), '(w, sf, bits=bits)\n', (25013, 25031), False, 'from module.quantization.quant_functions import linear_quantize, compute_integral_part\n'), ((1004, 1048), 'torch.linspace', 'torch.linspace', (['(-1)', '(1)'], {'steps': 'self.n_clusters'}), '(-1, 1, steps=self.n_clusters)\n', (1018, 1048), False, 'import torch\n'), ((12063, 12088), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12086, 12088), False, 'import torch\n'), ((24935, 24976), 'module.quantization.quant_functions.compute_integral_part', 'compute_integral_part', (['w'], {'overflow_rate': '(0)'}), '(w, overflow_rate=0)\n', (24956, 24976), False, 'from module.quantization.quant_functions import linear_quantize, compute_integral_part\n'), ((3345, 3384), 'torch.linspace', 'torch.linspace', (['(-1)', '(1)'], {'steps': 'n_clusters'}), '(-1, 1, steps=n_clusters)\n', (3359, 3384), False, 'import torch\n')] |
import time
import os
import spidev as SPI
import SSD1306
from PIL import Image, ImageDraw, ImageFont # 调用相关库文件
from datetime import datetime
PATH = os.path.dirname(__file__)
RST = 19
DC = 16
bus = 0
device = 0 # 树莓派管脚配置
disp = SSD1306.SSD1306(rst=RST, dc=DC, spi=SPI.SpiDev(bus, device))
disp.begin()
disp.clear()
def gettime():
dt = datetime.now()
hour = '0'+str(dt.hour) if len(str(dt.hour)) == 1 else str(dt.hour)
minute = '0'+str(dt.minute) if len(str(dt.minute)) == 1 else str(dt.minute)
second = '0'+str(dt.second) if len(str(dt.second)) == 1 else str(dt.second)
timestr = hour+':'+minute+':'+second
return timestr
def disp1():
'''显示helloworld'''
font = ImageFont.truetype("comicsansms.ttf", 20)
image = Image.new('RGB', (disp.width, disp.height), 'black').convert('1')
draw = ImageDraw.Draw(image)
draw.bitmap((0, 0), image, fill=1)
draw.text((10, 20), 'Hello World!', font=font, fill=255)
disp.image(image)
disp.display() # 显示图片
def disp2():
'''显示时钟'''
while True:
nowtime = gettime()
logo = Image.open(os.path.join(PATH, 'p128.png')).resize(
(32, 32), Image.ANTIALIAS).convert('1') # logo
img = Image.new('1', (disp.width, disp.height), 'black') # final_img
img.paste(logo, (0, 0, logo.size[0], logo.size[1]))
font = ImageFont.truetype("comicsansms.ttf", 13)
draw = ImageDraw.Draw(img)
draw.bitmap((0, 0), img, fill=1)
draw.text((64, 0), nowtime, font=font, fill=255)
draw.text((32, 15), "Count down of ", font=font, fill=255)
draw.text((50, 30), "mid-term:", font=font, fill=255)
tardate = datetime(2020, 11, 9)
nowdate = datetime.now()
delta = tardate-nowdate
days = delta.days
seconds = delta.seconds
hours = seconds//3600
seconds = seconds % 3600
minutes = seconds//60
seconds = seconds % 60
draw.text((0, 45), f"{days}d {hours}hour {minutes}min {seconds}s",
font=font, fill=255)
disp.clear()
disp.image(img)
disp.display()
time.sleep(0.1)
if __name__ == "__main__":
disp2()
| [
"datetime.datetime",
"spidev.SpiDev",
"PIL.Image.new",
"os.path.join",
"PIL.ImageFont.truetype",
"time.sleep",
"os.path.dirname",
"PIL.ImageDraw.Draw",
"datetime.datetime.now"
] | [((152, 177), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (167, 177), False, 'import os\n'), ((346, 360), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (358, 360), False, 'from datetime import datetime\n'), ((704, 745), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""comicsansms.ttf"""', '(20)'], {}), "('comicsansms.ttf', 20)\n", (722, 745), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((835, 856), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (849, 856), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((268, 291), 'spidev.SpiDev', 'SPI.SpiDev', (['bus', 'device'], {}), '(bus, device)\n', (278, 291), True, 'import spidev as SPI\n'), ((1222, 1272), 'PIL.Image.new', 'Image.new', (['"""1"""', '(disp.width, disp.height)', '"""black"""'], {}), "('1', (disp.width, disp.height), 'black')\n", (1231, 1272), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1362, 1403), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""comicsansms.ttf"""', '(13)'], {}), "('comicsansms.ttf', 13)\n", (1380, 1403), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1419, 1438), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (1433, 1438), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1685, 1706), 'datetime.datetime', 'datetime', (['(2020)', '(11)', '(9)'], {}), '(2020, 11, 9)\n', (1693, 1706), False, 'from datetime import datetime\n'), ((1725, 1739), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1737, 1739), False, 'from datetime import datetime\n'), ((2138, 2153), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2148, 2153), False, 'import time\n'), ((758, 810), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(disp.width, disp.height)', '"""black"""'], {}), "('RGB', (disp.width, disp.height), 'black')\n", (767, 810), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1108, 1138), 'os.path.join', 'os.path.join', (['PATH', '"""p128.png"""'], {}), "(PATH, 'p128.png')\n", (1120, 1138), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import torch
import argparse
import numpy as np
from model import PointCloudNet
from code.utils import fp_sampling, knn_patch, helper_function
import os
parser = argparse.ArgumentParser()
parser.add_argument('--num_points', default=1024, type=int,
help='Number of points per patch')
parser.add_argument('--patch_num_ratio', default=4, type=int,
help='Number of points per patch')
parser.add_argument('--trained_model', type=str,
help='Trained model directory')
parser.add_argument('--test_file', type=str,
help='XYZ file for testing')
FLAGS = parser.parse_args()
if not os.path.exists("../results"):
os.mkdir("../results")
NUM_POINTS = FLAGS.num_points
PATCH_NUM_RATIO = FLAGS.patch_num_ratio
TRAINED_MODEL = FLAGS.trained_model
TEST_FILE = FLAGS.test_file
f_name = TEST_FILE.split("/")[-1]
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#normaliaze data and extract patches
pc = torch.tensor(np.loadtxt(TEST_FILE)).float().to(device)
num_patches = int(pc.shape[0] / NUM_POINTS * PATCH_NUM_RATIO)
fps_idx = fp_sampling.furthest_point_sample(torch.unsqueeze(pc[:, 0:3], dim=0).contiguous(), num_patches)
patches = torch.tensor(knn_patch.extract_knn_patch(pc[torch.squeeze(fps_idx, dim=0).cpu().numpy(), 0:3].cpu().numpy(), pc.cpu().numpy(), NUM_POINTS)).to(device)
print(patches.shape)
centroid = torch.mean(patches[:, :, 0:3], dim=1, keepdim=True)
patches[:, :, 0:3] = patches[:, :, 0:3] - centroid
furthest_distance = torch.max(torch.sqrt(torch.sum(patches[:, :, 0:3] ** 2, dim=-1)), dim=1,keepdim=True).values
patches[:, :, 0:3] = patches[:, :, 0:3] / torch.unsqueeze(furthest_distance, dim=-1)
# read best epoch from trained model
trained_model_state = open("{0}/state.txt".format(TRAINED_MODEL), "r")
best_epoch, read_min_loss = helper_function.get_best_epoch(trained_model_state)
print(best_epoch, read_min_loss)
print("Best epoch (i.e., minimum loss) for {0}".format(read_min_loss))
#initialize model
net = PointCloudNet(3, 6, True, NUM_POINTS).to(device)
model = torch.load("{0}/epoch_{1}.pt".format(TRAINED_MODEL, best_epoch))
net.load_state_dict(model["model_state_dict"])
net.eval()
up_patches = net(patches)
#denormalize and merge patches
up_patches[:, :, 0:3] = up_patches[:, :, 0:3] * torch.unsqueeze(furthest_distance, dim=-1) + centroid
up_points = torch.cat([p for p in up_patches], dim=0)
fps_idx = fp_sampling.furthest_point_sample(torch.unsqueeze(up_points[:, 0:3], dim=0).contiguous(), pc.shape[0] * 4)
up_points = up_points[torch.squeeze(fps_idx, dim=0).cpu().numpy(), :].detach().cpu().numpy()
np.savetxt("../results/{0}".format(f_name), up_points, fmt='%.6f', delimiter=" ", newline="\n")
| [
"os.path.exists",
"argparse.ArgumentParser",
"torch.mean",
"torch.unsqueeze",
"model.PointCloudNet",
"code.utils.helper_function.get_best_epoch",
"torch.cuda.is_available",
"torch.sum",
"os.mkdir",
"torch.squeeze",
"numpy.loadtxt",
"torch.cat"
] | [((190, 215), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (213, 215), False, 'import argparse\n'), ((1444, 1495), 'torch.mean', 'torch.mean', (['patches[:, :, 0:3]'], {'dim': '(1)', 'keepdim': '(True)'}), '(patches[:, :, 0:3], dim=1, keepdim=True)\n', (1454, 1495), False, 'import torch\n'), ((1884, 1935), 'code.utils.helper_function.get_best_epoch', 'helper_function.get_best_epoch', (['trained_model_state'], {}), '(trained_model_state)\n', (1914, 1935), False, 'from code.utils import fp_sampling, knn_patch, helper_function\n'), ((2425, 2466), 'torch.cat', 'torch.cat', (['[p for p in up_patches]'], {'dim': '(0)'}), '([p for p in up_patches], dim=0)\n', (2434, 2466), False, 'import torch\n'), ((684, 712), 'os.path.exists', 'os.path.exists', (['"""../results"""'], {}), "('../results')\n", (698, 712), False, 'import os\n'), ((718, 740), 'os.mkdir', 'os.mkdir', (['"""../results"""'], {}), "('../results')\n", (726, 740), False, 'import os\n'), ((1702, 1744), 'torch.unsqueeze', 'torch.unsqueeze', (['furthest_distance'], {'dim': '(-1)'}), '(furthest_distance, dim=-1)\n', (1717, 1744), False, 'import torch\n'), ((945, 970), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (968, 970), False, 'import torch\n'), ((2066, 2103), 'model.PointCloudNet', 'PointCloudNet', (['(3)', '(6)', '(True)', 'NUM_POINTS'], {}), '(3, 6, True, NUM_POINTS)\n', (2079, 2103), False, 'from model import PointCloudNet\n'), ((2359, 2401), 'torch.unsqueeze', 'torch.unsqueeze', (['furthest_distance'], {'dim': '(-1)'}), '(furthest_distance, dim=-1)\n', (2374, 2401), False, 'import torch\n'), ((1187, 1221), 'torch.unsqueeze', 'torch.unsqueeze', (['pc[:, 0:3]'], {'dim': '(0)'}), '(pc[:, 0:3], dim=0)\n', (1202, 1221), False, 'import torch\n'), ((1588, 1630), 'torch.sum', 'torch.sum', (['(patches[:, :, 0:3] ** 2)'], {'dim': '(-1)'}), '(patches[:, :, 0:3] ** 2, dim=-1)\n', (1597, 1630), False, 'import torch\n'), ((2511, 2552), 'torch.unsqueeze', 'torch.unsqueeze', (['up_points[:, 0:3]'], {'dim': '(0)'}), '(up_points[:, 0:3], dim=0)\n', (2526, 2552), False, 'import torch\n'), ((1039, 1060), 'numpy.loadtxt', 'np.loadtxt', (['TEST_FILE'], {}), '(TEST_FILE)\n', (1049, 1060), True, 'import numpy as np\n'), ((2606, 2635), 'torch.squeeze', 'torch.squeeze', (['fps_idx'], {'dim': '(0)'}), '(fps_idx, dim=0)\n', (2619, 2635), False, 'import torch\n'), ((1303, 1332), 'torch.squeeze', 'torch.squeeze', (['fps_idx'], {'dim': '(0)'}), '(fps_idx, dim=0)\n', (1316, 1332), False, 'import torch\n')] |
import importlib
from uvicorn.workers import UvicornWorker
class DynamicUvicornWorker(UvicornWorker):
"""
This class is called `DynamicUvicornWorker` because it assigns values
according to the module available Union['asyncio', 'uvloop']
It also set `lifespan` to `off` :)
"""
spam_spec = importlib.util.find_spec("uvloop")
found = spam_spec is not None
if found:
CONFIG_KWARGS = {"loop": "uvloop", "http": "auto", "lifespan": "off"}
else:
CONFIG_KWARGS = {"loop": "auto", "http": "auto", "lifespan": "off"}
| [
"importlib.util.find_spec"
] | [((316, 350), 'importlib.util.find_spec', 'importlib.util.find_spec', (['"""uvloop"""'], {}), "('uvloop')\n", (340, 350), False, 'import importlib\n')] |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reader class for tfdbg v2 debug events."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import threading
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
class DebugEventsReader(object):
"""Reader class for a tfdbg v2 DebugEvents directory."""
def __init__(self, dump_root):
if not os.path.isdir(dump_root):
raise ValueError("Specified dump_root is not a directory: %s" % dump_root)
metadata_paths = glob.glob(os.path.join(dump_root, "*.metadata"))
if not metadata_paths:
raise ValueError("Cannot find any metadata file in directory: %s" %
dump_root)
elif len(metadata_paths) > 1:
raise ValueError(
"Unexpected: Found multiple (%d) metadata in directory: %s" %
(len(metadata_paths), dump_root))
self._metadata_path = compat.as_bytes(metadata_paths[0])
self._metadata_reader = None
prefix = metadata_paths[0][:-len(".metadata")]
self._source_files_path = compat.as_bytes("%s.source_files" % prefix)
self._stack_frames_path = compat.as_bytes("%s.stack_frames" % prefix)
self._graphs_path = compat.as_bytes("%s.graphs" % prefix)
self._execution_path = compat.as_bytes("%s.execution" % prefix)
self._graph_execution_traces_path = compat.as_bytes(
"%s.graph_execution_traces" % prefix)
self._readers = dict() # A map from file path to reader.
self._readers_lock = threading.Lock()
def _generic_iterator(self, file_path):
"""A helper method that makes an iterator given a debug-events file path."""
# The following code uses the double-checked locking pattern to optimize
# the common case (where the reader is already initialized).
if file_path not in self._readers: # 1st check, without lock.
with self._readers_lock:
if file_path not in self._readers: # 2nd check, with lock.
with errors.raise_exception_on_not_ok_status() as status:
self._readers[file_path] = pywrap_tensorflow.PyRecordReader_New(
compat.as_bytes(file_path), 0, b"", status)
reader = self._readers[file_path]
while True:
try:
reader.GetNext()
except (errors.DataLossError, errors.OutOfRangeError):
# We ignore partial read exceptions, because a record may be truncated.
# PyRecordReader holds the offset prior to the failed read, so retrying
# will succeed.
break
yield debug_event_pb2.DebugEvent.FromString(reader.record())
def metadata_iterator(self):
return self._generic_iterator(self._metadata_path)
def source_files_iterator(self):
return self._generic_iterator(self._source_files_path)
def stack_frames_iterator(self):
return self._generic_iterator(self._stack_frames_path)
def graphs_iterator(self):
return self._generic_iterator(self._graphs_path)
def execution_iterator(self):
return self._generic_iterator(self._execution_path)
def graph_execution_traces_iterator(self):
return self._generic_iterator(self._graph_execution_traces_path)
| [
"tensorflow.python.framework.errors.raise_exception_on_not_ok_status",
"threading.Lock",
"tensorflow.python.util.compat.as_bytes",
"os.path.join",
"os.path.isdir"
] | [((1727, 1761), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['metadata_paths[0]'], {}), '(metadata_paths[0])\n', (1742, 1761), False, 'from tensorflow.python.util import compat\n'), ((1877, 1920), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (["('%s.source_files' % prefix)"], {}), "('%s.source_files' % prefix)\n", (1892, 1920), False, 'from tensorflow.python.util import compat\n'), ((1951, 1994), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (["('%s.stack_frames' % prefix)"], {}), "('%s.stack_frames' % prefix)\n", (1966, 1994), False, 'from tensorflow.python.util import compat\n'), ((2019, 2056), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (["('%s.graphs' % prefix)"], {}), "('%s.graphs' % prefix)\n", (2034, 2056), False, 'from tensorflow.python.util import compat\n'), ((2084, 2124), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (["('%s.execution' % prefix)"], {}), "('%s.execution' % prefix)\n", (2099, 2124), False, 'from tensorflow.python.util import compat\n'), ((2165, 2218), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (["('%s.graph_execution_traces' % prefix)"], {}), "('%s.graph_execution_traces' % prefix)\n", (2180, 2218), False, 'from tensorflow.python.util import compat\n'), ((2315, 2331), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (2329, 2331), False, 'import threading\n'), ((1215, 1239), 'os.path.isdir', 'os.path.isdir', (['dump_root'], {}), '(dump_root)\n', (1228, 1239), False, 'import os\n'), ((1353, 1390), 'os.path.join', 'os.path.join', (['dump_root', '"""*.metadata"""'], {}), "(dump_root, '*.metadata')\n", (1365, 1390), False, 'import os\n'), ((2779, 2820), 'tensorflow.python.framework.errors.raise_exception_on_not_ok_status', 'errors.raise_exception_on_not_ok_status', ([], {}), '()\n', (2818, 2820), False, 'from tensorflow.python.framework import errors\n'), ((2925, 2951), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['file_path'], {}), '(file_path)\n', (2940, 2951), False, 'from tensorflow.python.util import compat\n')] |
"""
bridge-like integrator for amuse
the bridge class provides a bridge like coupling between different
gravitational integrators. In this way a system composed of multiple
components can be evolved taking account of the self gravity of the whole
system self consistently, while choosing the most appropiate integrator
for the self-gravity of the component systems. This is mainly useful for
systems consist of two or more components that are either well separated
spatially or have different scales (otherwise using a single integrator is
more efficient)
The main idea is that systems experience each others gravity through
periodic velocty kicks with ordinary evolution in between - the evolution
is thus described by an alternation of drift (D) and kick (K) operators,
here chosen as:
K(1/2 dt) D(dt) K(1/2 dt)
K(dt) denotes a kick of the velocities over a timestep dt, while D(dt)
denotes a drift, meaning secular evolution using self gravity of the
system, over dt.
implementation notes:
In order to use bridge the component systems should be initialized as usual,
then a bridge systems is initialized, after which one or more systems are
added:
from amuse.ext.bridge import bridge
bridgesys=bridge(verbose=False)
bridgesys.add_system(galaxy, (cluster,), False)
bridgesys.add_system(cluster, (galaxy,), True )
bridge builds on the full gravity interface, so unit handling etc is
guaranteed. Bridge itself is a (somewhat incomplete) gravity interface,
so the usual evolve, get_potential methods work (and bridge can be a
component in a bridge systems). Note that a single coordinate system should
be used at the moment for all the components systems (different units are
allowed though). The call to add systems, for example:
bridgesys.add_system(galaxy, (cluster,), False)
has three arguments: the system, a set with *interaction* partners and
a flag to specify whether synchronization is needed . The
interaction partners indicate which systems will kick the system. In the
most simple case these would be the set of other systems that are added,
but usually this is not what you want to get good performace. In some
cases you want to ignore one direction of interaction (eg. in a combined
simulation of a galaxy and a comet orbits around a star you may want the
ignore the gravity of the comet), in other cases you want to use a
different force calculator (eg integrating a cluster in a galaxy where
the galaxy is evolved with a tree code and the cluster with a direct sum
code, one also would want to use a tree code to calculate the cluster
gravity for the galaxy. In such a case one can derive a skeleton gravity
interface from the cluster system. A module is provided with some
examples of such *derived* systems, derived_grav_systems.py
Hints for good use:
The bridgesys is flexible but care should be taken in order to obtain
valid results. For one thing, there is no restriction or check on the
validity of the assumption of well seperated dynamics: for example any
system could be split up and put together in bridge, but if the timestep
is chosen to be larger than the timestep criterion of the code, the
integration will show errors.
For good performance one should use derived systems to reduce the
complexity where possible.
There is an issue with the synchronization: some codes do not end on the
exact time of an evolve, or need an explicit sync call. In these cases it
is up to the user to determine whether bridge can be used (an explicit
sync call may induce extra errors that degrade the order of the
integrator).
"""
# issues:
# - for now, units in si
# - a common coordinate system is used for all systems
# - sync of systems should be checked
# - timestepping: adaptive dt?
import threading
from amuse.units import quantities
from amuse.units import units, constants, generic_unit_system, nbody_system
from amuse import datamodel
from amuse.support.exceptions import AmuseException
class AbstractCalculateFieldForCodes(object):
"""
Calculated gravity and potential fields using the particles
of other codes with the code provided.
"""
def __init__(self, input_codes, verbose=False, required_attributes=None):
"""
'verbose' indicates whether to output some run info
'required_attributes' specifies which particle attributes need to be
transferred from the input_codes to the code that will calculate the
field. For example, some codes don't need the velocity. Other codes
may (wrongly) interpret the radius of the input code as gravitational
softening. In the latter case
required_attributes=['mass', 'x','y','z', 'vx','vy','vz']
should prevent the radius of the input codes from being used.
"""
self.codes_to_calculate_field_for = input_codes
self.verbose=verbose
if required_attributes is None:
self.required_attributes = lambda p, attribute_name: True
else:
self.required_attributes = lambda p, attribute_name: attribute_name in required_attributes
def evolve_model(self,tend,timestep=None):
"""
"""
def get_potential_at_point(self,radius,x,y,z):
code = self._setup_code()
try:
for input_code in self.codes_to_calculate_field_for:
particles = input_code.particles.copy(filter_attributes = self.required_attributes)
code.particles.add_particles(particles)
code.commit_particles()
return code.get_potential_at_point(radius,x,y,z)
finally:
self._cleanup_code(code)
def get_gravity_at_point(self,radius,x,y,z):
code = self._setup_code()
try:
for input_code in self.codes_to_calculate_field_for:
particles = input_code.particles.copy(filter_attributes = self.required_attributes)
code.particles.add_particles(particles)
code.commit_particles()
return code.get_gravity_at_point(radius,x,y,z)
finally:
self._cleanup_code(code)
def _setup_code(self):
pass
def _cleanup_code(self, code):
pass
class CalculateFieldForCodes(AbstractCalculateFieldForCodes):
"""
Calculated gravity and potential fields using the particles
of other codes with the code provided.
The code is created for every calculation.
"""
def __init__(self, code_factory_function, input_codes, *args, **kwargs):
AbstractCalculateFieldForCodes.__init__(self, input_codes, *args, **kwargs)
self.code_factory_function = code_factory_function
def _setup_code(self):
return self.code_factory_function()
def _cleanup_code(self, code):
code.stop()
class CalculateFieldForCodesUsingReinitialize(AbstractCalculateFieldForCodes):
"""
Calculated gravity and potential fields using the particles
of other codes with the code provided.
The code is created for every calculation.
"""
def __init__(self, code, input_codes, *args, **kwargs):
AbstractCalculateFieldForCodes.__init__(self, input_codes, *args, **kwargs)
self.code = code
def _setup_code(self):
return self.code
def _cleanup_code(self, code):
code.reset()
class CalculateFieldForCodesUsingRemove(AbstractCalculateFieldForCodes):
"""
Calculated gravity and potential fields using the particles
of other codes with the code provided.
The code is created for every calculation.
"""
def __init__(self, code, input_codes, *args, **kwargs):
AbstractCalculateFieldForCodes.__init__(self, input_codes, *args, **kwargs)
self.code = code
def _setup_code(self):
return self.code
def _cleanup_code(self, code):
code.particles.remove_particles(code.particles)
class CalculateFieldForParticles(object):
"""
Calculates an field for a set of particles, the set
of particles can be from another code.
"""
def __init__(self, particles = None, gravity_constant = None,
softening_mode="shared", G = None):
if particles is None:
self.particles=datamodel.Particles()
else:
self.particles = particles
if gravity_constant is None:
gravity_constant = G
elif not G is None:
raise Exception("both the parameter 'gravity_constant'({0}) and the parameter 'G'({1}) are given, please specify only one!".format(gravity_constant, G))
if gravity_constant is None:
if len(particles) and hasattr(particles, 'mass'):
try:
particles[0].mass.value_in(units.kg)
self.gravity_constant = constants.G
except:
raise AmuseException("For generic units the gravity_constant must be specified")
else:
raise AmuseException("Particle data not yet available, so the gravity_constant must be specified")
else:
self.gravity_constant = gravity_constant
if softening_mode == "individual" or softening_mode == "radius":
self._softening_lengths_squared = self._softening_lengths_squared_individual
elif softening_mode == "h_smooth":
self._softening_lengths_squared = self._softening_lengths_squared_h_smooth
else:
self._softening_lengths_squared = self._softening_lengths_squared_shared
self.smoothing_length_squared = quantities.zero
def _softening_lengths_squared_individual(self):
return self.particles.radius**2
def _softening_lengths_squared_h_smooth(self):
return self.particles.h_smooth**2
def _softening_lengths_squared_shared(self):
return self.smoothing_length_squared#.as_vector_with_length(len(self.particles))
def cleanup_code(self):
self.particles = datamodel.Particles()
def evolve_model(self,tend,timestep=None):
"""
"""
def get_potential_at_point(self,radius,x,y,z):
positions = self.particles.position
result = quantities.AdaptingVectorQuantity()
for i in range(len(x)):
dx = x[i] - positions.x
dy = y[i] - positions.y
dz = z[i] - positions.z
dr_squared = (dx * dx) + (dy * dy) + (dz * dz)
dr = (dr_squared + self._softening_lengths_squared()).sqrt()
energy_of_this_particle = (self.particles.mass / dr).sum()
result.append(-self.gravity_constant * energy_of_this_particle)
return result
def get_gravity_at_point(self,radius,x,y,z):
positions = self.particles.position
m1 = self.particles.mass
result_ax = quantities.AdaptingVectorQuantity()
result_ay = quantities.AdaptingVectorQuantity()
result_az = quantities.AdaptingVectorQuantity()
for i in range(len(x)):
dx = x[i] - positions.x
dy = y[i] - positions.y
dz = z[i] - positions.z
dr_squared = ((dx * dx) + (dy * dy) + (dz * dz) +
self._softening_lengths_squared() + radius[i]**2)
ax = -self.gravity_constant * (m1*dx/dr_squared**1.5).sum()
ay = -self.gravity_constant * (m1*dy/dr_squared**1.5).sum()
az = -self.gravity_constant * (m1*dz/dr_squared**1.5).sum()
result_ax.append(ax)
result_ay.append(ay)
result_az.append(az)
return result_ax, result_ay, result_az
class GravityCodeInField(object):
def __init__(self, code, field_codes, do_sync=True, verbose=False, radius_is_eps=False, h_smooth_is_eps=False, zero_smoothing=False):
"""
verbose indicates whether to output some run info
"""
self.code = code
self.field_codes = field_codes
if hasattr(self.code, 'model_time'):
self.time = self.code.model_time
else:
self.time = quantities.zero
self.do_sync=do_sync
self.verbose=verbose
self.timestep=None
self.radius_is_eps = radius_is_eps
self.h_smooth_is_eps = h_smooth_is_eps
required_attributes = ['mass', 'x', 'y', 'z', 'vx', 'vy', 'vz']
if self.radius_is_eps:
required_attributes.append('radius')
elif self.h_smooth_is_eps:
required_attributes.append('h_smooth')
self.required_attributes = lambda p, x : x in required_attributes
if not hasattr(self.code,"parameters"):
self.zero_smoothing=True
elif not hasattr(self.code.parameters,"epsilon_squared"):
self.zero_smoothing=True
else:
self.zero_smoothing=zero_smoothing
def evolve_model(self,tend,timestep=None):
"""
evolve combined system to tend, timestep fixes timestep
"""
if timestep is None:
timestep = self.timestep
first=True
while self.time < (tend-timestep/2.):
if first:
self.kick(timestep/2.)
first=False
else:
self.kick(timestep)
self.drift(self.time+timestep)
self.time+=timestep
if not first:
self.kick(timestep/2.)
def synchronize_model(self):
"""
explicitly synchronize all components
"""
if hasattr(self.code,"synchronize_model"):
if(self.verbose):
print(self.code.__class__.__name__,"is synchronizing", end=' ')
self.code.synchronize_model()
if(self.verbose):
print(".. done")
def get_potential_at_point(self,radius,x,y,z):
return self.code.get_potential_at_point(radius,x,y,z)
def get_gravity_at_point(self,radius,x,y,z):
return self.code.get_gravity_at_point(radius,x,y,z)
@property
def model_time(self):
return self.time
@property
def potential_energy(self):
if not hasattr(self.code, 'particles'):
return quantities.zero
result = self.code.potential_energy
particles = self.code.particles.copy(filter_attributes = self.required_attributes)
for y in self.field_codes:
energy = self.get_potential_energy_in_field_code(particles, y)
result += energy
return result
@property
def kinetic_energy(self):
return self.code.kinetic_energy
@property
def thermal_energy(self):
if hasattr(self.code,'thermal_energy'):
return self.code.thermal_energy
else:
return quantities.zero
@property
def particles(self):
return self.code.particles
@property
def gas_particles(self):
if hasattr(self.code, "gas_particles"):
return self.code.gas_particles
else:
raise AttributeError
@property
def dm_particles(self):
if hasattr(self.code, "dm_particles"):
return self.code.dm_particles
else:
raise AttributeError
def drift(self, tend):
if not hasattr(self.code,"evolve_model"):
return
if (self.verbose):
print(self.code.__class__.__name__, "is evolving to", tend)
self.code.evolve_model(tend)
if(self.verbose):
print(".. done")
def cannot_kick(self):
"""
check if the code is capable of kicking other particles,
please do not try to optimize this, I know it is called every kick but
only calculating it at the start causes an annoying bug in certain uses of the code.
"""
return len(self.code.particles)==0 or not (hasattr(self, 'particles') and 'vx' in self.particles.get_attribute_names_defined_in_store())
def kick(self, dt):
if self.cannot_kick():
return quantities.zero
particles = self.code.particles.copy(filter_attributes = self.required_attributes)
kinetic_energy_before = particles.kinetic_energy()
for field_code in self.field_codes:
if(self.verbose):
print(self.code.__class__.__name__,"receives kick from",field_code.__class__.__name__, end=' ')
self.kick_with_field_code(
particles,
field_code,
dt
)
if(self.verbose):
print(".. done")
channel=particles.new_channel_to(self.code.particles)
channel.copy_attributes(["vx","vy","vz"])
kinetic_energy_after = particles.kinetic_energy()
return kinetic_energy_after - kinetic_energy_before
def _softening_lengths(self, particles):
if self.radius_is_eps:
return particles.radius
elif self.h_smooth_is_eps:
return particles.h_smooth
elif self.zero_smoothing:
return 0.*particles.x
else:
return (self.code.parameters.epsilon_squared**0.5).as_vector_with_length(len(particles))
def get_potential_energy_in_field_code(self, particles, field_code):
pot=field_code.get_potential_at_point(
self._softening_lengths(particles),
particles.x,
particles.y,
particles.z
)
return (pot*particles.mass).sum() / 2
def kick_with_field_code(self, particles, field_code, dt):
ax,ay,az=field_code.get_gravity_at_point(
self._softening_lengths(particles),
particles.x,
particles.y,
particles.z
)
self.update_velocities(particles, dt, ax, ay, az)
def update_velocities(self,particles, dt, ax, ay, az):
particles.vx += dt * ax
particles.vy += dt * ay
particles.vz += dt * az
def stop(self):
self.code.stop()
class Bridge(object):
def __init__(self, timestep = None, verbose=False, use_threading=True,method=None):
"""
verbose indicates whether to output some run info
"""
self.codes=[]
self.time=quantities.zero
self.verbose=verbose
self.timestep=timestep
self.kick_energy = quantities.zero
self.use_threading = use_threading
self.time_offsets = dict()
self.method=method
self.channels = datamodel.Channels()
def add_system(self, interface, partners=set(), do_sync=True,
radius_is_eps=False, h_smooth_is_eps=False, zero_smoothing=False):
"""
add a system to bridge integrator
"""
if hasattr(interface, "particles"):
code = GravityCodeInField(interface, partners, do_sync, self.verbose,
radius_is_eps, h_smooth_is_eps, zero_smoothing)
self.add_code(code)
else:
if len(partners):
raise Exception("You added a code without particles, but with partners, this is not supported!")
self.add_code(interface)
def add_code(self, code):
self.codes.append(code)
if hasattr(code,"model_time"):
self.time_offsets[code]=(self.time-code.model_time)
else:
self.time_offsets[code]=quantities.zero
def evolve_model(self, tend, timestep=None):
"""
evolve combined system to tend, timestep fixes timestep
"""
if timestep is None:
if self.timestep is None:
timestep=tend-self.time
else:
timestep = self.timestep
if self.method is None:
return self.evolve_joined_leapfrog(tend,timestep)
else:
return self.evolve_simple_steps(tend,timestep)
def evolve_simple_steps(self,tend,timestep):
while self.time < (tend-timestep/2):
self._drift_time=self.time
self.method(self.kick_codes,self.drift_codes_dt, timestep)
self.channels.copy()
self.time=self.time+timestep
def evolve_joined_leapfrog(self,tend,timestep):
first=True
while self.time < (tend-timestep/2.):
if first:
self.kick_codes(timestep/2.)
first=False
else:
self.kick_codes(timestep)
self.drift_codes(self.time+timestep)
self.channels.copy()
self.time += timestep
if not first:
self.kick_codes(timestep/2.)
def synchronize_model(self):
"""
explicitly synchronize all components
"""
for x in self.codes:
if hasattr(x,"synchronize_model"):
if(self.verbose): print(x.__class__.__name__,"is synchronizing", end=' ')
x.synchronize_model()
if(self.verbose): print(".. done")
def stop(self):
for one_code in self.codes:
if hasattr(one_code, "stop"):
one_code.stop()
def get_potential_at_point(self,radius,x,y,z):
pot=quantities.zero
for code in self.codes:
_pot=code.get_potential_at_point(radius,x,y,z)
pot=pot+_pot
return pot
def get_gravity_at_point(self,radius,x,y,z):
ax=quantities.zero
ay=quantities.zero
az=quantities.zero
for code in self.codes:
_ax,_ay,_az=code.get_gravity_at_point(radius,x,y,z)
ax=ax+_ax
ay=ay+_ay
az=az+_az
return ax,ay,az
@property
def model_time(self):
return self.time
@property
def potential_energy(self):
result=quantities.zero
for x in self.codes:
result+=x.potential_energy
return result
@property
def kinetic_energy(self):
result=quantities.zero
for x in self.codes:
result+=x.kinetic_energy
return result #- self.kick_energy
@property
def thermal_energy(self):
result=quantities.zero
for x in self.codes:
if hasattr(x,'thermal_energy'):
result+=x.thermal_energy
return result
@property
def particles(self):
array=[]
for x in self.codes:
if hasattr(x,"particles"):
array.append(x.particles)
if len(array) == 0:
raise AttributeError
elif len(array) == 1:
return array[0]
return datamodel.ParticlesSuperset(array)
@property
def gas_particles(self):
array=[]
for x in self.codes:
if hasattr(x,"gas_particles"):
array.append(x.gas_particles)
if len(array) == 0:
raise AttributeError
elif len(array) == 1:
return array[0]
return datamodel.ParticlesSuperset(array)
@property
def dm_particles(self):
array=[]
for x in self.codes:
if hasattr(x,"dm_particles"):
array.append(x.dm_particles)
elif hasattr(x,"particles"):
array.append(x.particles)
if len(array) == 0:
raise AttributeError
elif len(array) == 1:
return array[0]
return datamodel.ParticlesSuperset(array)
# 'private' functions
def drift_codes_dt(self,dt):
self._drift_time+=dt
self.drift_codes(self._drift_time)
def drift_codes(self,tend):
threads=[]
for x in self.codes:
offset=self.time_offsets[x]
if hasattr(x,"drift"):
threads.append(threading.Thread(target=x.drift, args=(tend-offset,)) )
elif hasattr(x,"evolve_model"):
threads.append(threading.Thread(target=x.evolve_model, args=(tend-offset,)) )
if self.use_threading:
for x in threads:
x.start()
for x in threads:
x.join()
else:
for x in threads:
x.run()
def kick_codes(self,dt):
de = quantities.zero
for x in self.codes:
if hasattr(x,"kick"):
de += x.kick(dt)
self.kick_energy += de
| [
"amuse.datamodel.Channels",
"amuse.units.quantities.AdaptingVectorQuantity",
"amuse.datamodel.ParticlesSuperset",
"threading.Thread",
"amuse.datamodel.Particles",
"amuse.support.exceptions.AmuseException"
] | [((10042, 10063), 'amuse.datamodel.Particles', 'datamodel.Particles', ([], {}), '()\n', (10061, 10063), False, 'from amuse import datamodel\n'), ((10249, 10284), 'amuse.units.quantities.AdaptingVectorQuantity', 'quantities.AdaptingVectorQuantity', ([], {}), '()\n', (10282, 10284), False, 'from amuse.units import quantities\n'), ((10875, 10910), 'amuse.units.quantities.AdaptingVectorQuantity', 'quantities.AdaptingVectorQuantity', ([], {}), '()\n', (10908, 10910), False, 'from amuse.units import quantities\n'), ((10931, 10966), 'amuse.units.quantities.AdaptingVectorQuantity', 'quantities.AdaptingVectorQuantity', ([], {}), '()\n', (10964, 10966), False, 'from amuse.units import quantities\n'), ((10987, 11022), 'amuse.units.quantities.AdaptingVectorQuantity', 'quantities.AdaptingVectorQuantity', ([], {}), '()\n', (11020, 11022), False, 'from amuse.units import quantities\n'), ((18460, 18480), 'amuse.datamodel.Channels', 'datamodel.Channels', ([], {}), '()\n', (18478, 18480), False, 'from amuse import datamodel\n'), ((22504, 22538), 'amuse.datamodel.ParticlesSuperset', 'datamodel.ParticlesSuperset', (['array'], {}), '(array)\n', (22531, 22538), False, 'from amuse import datamodel\n'), ((22852, 22886), 'amuse.datamodel.ParticlesSuperset', 'datamodel.ParticlesSuperset', (['array'], {}), '(array)\n', (22879, 22886), False, 'from amuse import datamodel\n'), ((23280, 23314), 'amuse.datamodel.ParticlesSuperset', 'datamodel.ParticlesSuperset', (['array'], {}), '(array)\n', (23307, 23314), False, 'from amuse import datamodel\n'), ((8306, 8327), 'amuse.datamodel.Particles', 'datamodel.Particles', ([], {}), '()\n', (8325, 8327), False, 'from amuse import datamodel\n'), ((9055, 9157), 'amuse.support.exceptions.AmuseException', 'AmuseException', (['"""Particle data not yet available, so the gravity_constant must be specified"""'], {}), "(\n 'Particle data not yet available, so the gravity_constant must be specified'\n )\n", (9069, 9157), False, 'from amuse.support.exceptions import AmuseException\n'), ((23631, 23686), 'threading.Thread', 'threading.Thread', ([], {'target': 'x.drift', 'args': '(tend - offset,)'}), '(target=x.drift, args=(tend - offset,))\n', (23647, 23686), False, 'import threading\n'), ((8940, 9014), 'amuse.support.exceptions.AmuseException', 'AmuseException', (['"""For generic units the gravity_constant must be specified"""'], {}), "('For generic units the gravity_constant must be specified')\n", (8954, 9014), False, 'from amuse.support.exceptions import AmuseException\n'), ((23762, 23824), 'threading.Thread', 'threading.Thread', ([], {'target': 'x.evolve_model', 'args': '(tend - offset,)'}), '(target=x.evolve_model, args=(tend - offset,))\n', (23778, 23824), False, 'import threading\n')] |
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from PIL import Image
import pandas as pd
from sklearn.decomposition import PCA
from scipy.spatial.distance import pdist, squareform
from scipy.sparse.linalg import eigs
from numpy import linalg as LA
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.pyplot import figure
numImages = 60
# fig = plt.figure(figsize = (8,8))
X = np.zeros(shape = (numImages, 490*490))
for i in range(1, numImages + 1):
filename = str(i)+'.jpg'
img = mpimg.imread(filename)
img = img[:, :, 0]*0.299 + img[:, :, 1]*0.587 + img[:, :, 2]*0.114
X[i-1] = np.array(img.flatten()).reshape(1, img.shape[0]*img.shape[1])
numComponents = 60
pca = PCA(n_components=numComponents)
pca.fit(X)
Z = pca.transform(X)
fig1, ax = plt.subplots()
ax.scatter(Z[0:5, 0], Z[0:5, 1], s = 25, marker = 'x', c = 'r', label = '$NaCl\; 10mM,\; CaCl_2\; 3.0mM,\; MgCl_2\; 1.5mM$')
ax.scatter(Z[5:10, 0], Z[5:10, 1], s = 25, marker = 'o', facecolors = 'none', edgecolors='r',
label = '$NaCl\; 5.0mM,\; CaCl_2\; 3.0mM,\; MgCl_2\; 1.5mM$')
ax.scatter(Z[10:15, 0], Z[10:15, 1], s = 25, marker = 's', facecolors = 'none', edgecolors='r',
label = '$NaCl\; 2.5mM,\; CaCl_2\; 3.0mM,\; MgCl_2\; 1.5mM$')
ax.scatter(Z[15:20, 0], Z[15:20, 1], s = 25, marker = 'x', c = 'g', label = '$NaHCO_3\; 10mM,\; CaCl_2\; 0.5mM,\; MgCl_2\; 0.25mM$')
ax.scatter(Z[20:25, 0], Z[20:25, 1], s = 25, marker = 'o', facecolors = 'none', edgecolors='g',
label = '$NaHCO_3\; 5.0mM,\; CaCl_2\; 0.5mM,\; MgCl_2\; 0.25mM$')
ax.scatter(Z[25:30, 0], Z[25:30, 1], s = 25, marker = 's', facecolors = 'none', edgecolors='g',
label = '$NaHCO_3\; 2.5mM,\; CaCl_2\; 0.5mM,\; MgCl_2\; 0.25mM$')
ax.scatter(Z[30:35, 0], Z[30:35, 1], s = 25, marker = 'x', c = 'b', label = '$Na_2SO_4\; 10mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
ax.scatter(Z[35:40, 0], Z[35:40, 1], s = 25, marker = 'o', facecolors = 'none', edgecolors='b',
label = '$Na_2SO_4\; 5.0mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
ax.scatter(Z[40:45, 0], Z[40:45, 1], s = 25, marker = 's', facecolors = 'none', edgecolors='b',
label = '$Na_2SO_4\; 2.5mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
ax.scatter(Z[45:50, 0], Z[45:50, 1], s = 25, marker = 'x', c = 'y', label = '$NaHCO_3\; 10mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
ax.scatter(Z[50:55, 0], Z[50:55, 1], s = 25, marker = 'o', facecolors = 'none', edgecolors='y',
label = '$NaHCO_3\; 5.0mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
ax.scatter(Z[55:60, 0], Z[55:60, 1], s = 25, marker = 's', facecolors = 'none', edgecolors='y',
label = '$NaHCO_3\; 2.5mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
plt.xlabel('First component')
plt.ylabel('Second component')
ax.set_yticklabels([])
ax.set_xticklabels([])
# plt.title('PCA Image analysis for all samples')
ax.legend(loc='upper right', prop={'size': 7}, handletextpad = 0, labelspacing = 0)
plt.show()
fig1.savefig('PCA_all_images_2_components_1_plot.jpg', dpi = 1000)
# # use component 3 and 4
# fig2, ax = plt.subplots()
# ax.scatter(Z[0:5, 2], Z[0:5, 3], s = 100, marker = 'x', c = 'r', label = 'NaCl 10mM, CaCl2 3.0mM, MgCl2 1.5mM')
# ax.scatter(Z[5:10, 2], Z[5:10, 3], s = 100, marker = 'o', facecolors = 'none', edgecolors='r',
# label = 'NaCl 5.0mM, CaCl2 3.0mM, MgCl2 1.5mM')
# ax.scatter(Z[10:15, 2], Z[10:15, 3], s = 100, marker = 's', facecolors = 'none', edgecolors='r',
# label = 'NaCl 2.5mM, CaCl2 3.0mM, MgCl2 1.5mM')
#
# ax.scatter(Z[15:20, 2], Z[15:20, 3], s = 100, marker = 'x', c = 'g', label = 'NaHCO3 10mM, CaCl2 0.5mM, MgCl2 0.25mM')
# ax.scatter(Z[20:25, 2], Z[20:25, 3], s = 100, marker = 'o', facecolors = 'none', edgecolors='g',
# label = 'NaHCO3 5.0mM, CaCl2 0.5mM, MgCl2 0.25mM')
# ax.scatter(Z[25:30, 2], Z[25:30, 3], s = 100, marker = 's', facecolors = 'none', edgecolors='g',
# label = 'NaHCO3 2.5mM, CaCl2 0.5mM, MgCl2 0.25mM')
#
# ax.scatter(Z[30:35, 2], Z[30:35, 3], s = 100, marker = 'x', c = 'b', label = 'Na2SO4 5.0mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[35:40, 2], Z[35:40, 3], s = 100, marker = 'o', facecolors = 'none', edgecolors='b',
# label = 'Na2SO4 2.5mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[40:45, 2], Z[40:45, 3], s = 100, marker = 's', facecolors = 'none', edgecolors='b',
# label='Na2SO4 1.25mM, CaSO4 0.5mM, MgSO4 0.25mM')
#
# ax.scatter(Z[45:50, 2], Z[45:50, 3], s = 100, marker = 'x', c = 'y', label = 'NaHCO3 10mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[50:55, 2], Z[50:55, 3], s = 100, marker = 'o', facecolors = 'none', edgecolors='y',
# label = 'NaHCO3 5.0mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[55:60, 2], Z[55:60, 3], s = 100, marker = 's', facecolors = 'none', edgecolors='y',
# label = 'NaHCO3 2.5mM, CaSO4 0.5mM, MgSO4 0.25mM')
#
# plt.xlabel('Third component', fontsize = 20)
# plt.ylabel('Fourth component', fontsize = 20)
# plt.title('PCA Image analysis for all samples', fontsize = 20)
# ax.legend(loc = 'upper right', prop={'size': 7})
# plt.show()
#
# eigenvalues = pca.explained_variance_
# variance = []
# for i in range(len(eigenvalues)):
# if i == 0:
# variance.append(eigenvalues[0])
# else:
# variance.append(variance[i-1] + eigenvalues[i])
# variance = variance/variance[-1]
#
# fig3, ax = plt.subplots()
# plt.plot(variance, 'ro-', linewidth=1)
# plt.title('Scree Plot for all 60 images', fontsize=20)
# plt.xlabel('Principal Component', fontsize=20)
# plt.ylabel('Cumulative Eigenvalue', fontsize=20)
# fig3.savefig('Scree Plot for all 60 images.png')
# # 3d image
# # fig = plt.figure(num=None, figsize=(4, 3), dpi=80, facecolor='w', edgecolor='k')
# fig = plt.figure()
# # figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
# # fig, axs = plt.subplots(nrows=1, ncols=1, constrained_layout=True)
# ax = Axes3D(fig)
# ax.scatter(Z[0:5, 0], Z[0:5, 1], Z[0:5, 2], s = 100, marker = 'x', c = 'r', label = 'NaCl 10mM, CaCl2 3.0mM, MgCl2 1.5mM')
# ax.scatter(Z[5:10, 0], Z[5:10, 1], Z[5:10, 2], s = 100, marker = 's', c = 'r', label = 'NaCl 5.0mM, CaCl2 3.0mM, MgCl2 1.5mM')
# ax.scatter(Z[10:15, 0], Z[10:15, 1], Z[10:15, 2], s = 100, marker = 'o', c ='r', label = 'NaCl 2.5mM, CaCl2 3.0mM, MgCl2 1.5mM')
#
# ax.scatter(Z[15:20, 0], Z[15:20, 1], Z[15:20, 2], s = 100, marker = 'x', c = 'g', label = 'NaHCO3 10mM, CaCl2 0.5mM, MgCl2 0.25mM')
# ax.scatter(Z[20:25, 0], Z[20:25, 1], Z[20:25, 2], s = 100, marker = 's', c = 'g', label = 'NaHCO3 5.0mM, CaCl2 0.5mM, MgCl2 0.25mM')
# ax.scatter(Z[25:30, 0], Z[25:30, 1], Z[25:30, 2], s = 100, marker = 'o', c = 'g', label = 'NaHCO3 2.5mM, CaCl2 0.5mM, MgCl2 0.25mM')
#
# ax.scatter(Z[30:35, 0], Z[30:35, 1], Z[30:35, 2], s = 100, marker = 'x', c = 'b', label = 'Na2SO4 5.0mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[35:40, 0], Z[35:40, 1], Z[35:40, 2], s = 100, marker = 's', c = 'b', label = 'Na2SO4 2.5mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[40:45, 0], Z[40:45, 1], Z[40:45, 2], s = 100, marker = 'o', c = 'b', label='Na2SO4 1.25mM, CaSO4 0.5mM, MgSO4 0.25mM')
#
# ax.scatter(Z[45:50, 0], Z[45:50, 1], Z[45:50, 2], s = 100, marker = 'x', c = 'y', label = 'NaHCO3 10mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[50:55, 0], Z[50:55, 1], Z[50:55, 2], s = 100, marker = 's', c = 'y', label = 'NaHCO3 5.0mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[55:60, 0], Z[55:60, 1], Z[55:60, 2], s = 100, marker = 'o', c = 'y', label = 'NaHCO3 2.5mM, CaSO4 0.5mM, MgSO4 0.25mM')
#
# ax.set_xlabel('First component', fontsize = 15)
# ax.set_ylabel('Second component', fontsize = 15)
# ax.set_zlabel('Third component', fontsize = 15)
# ax.set_title('PCA image analysis for all samples \n with three components', fontsize = 20)
# ax.legend(loc = 'upper right', prop={'size': 7})
# plt.show()
# plt.close(fig) | [
"matplotlib.pyplot.ylabel",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.xlabel",
"matplotlib.image.imread",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((419, 457), 'numpy.zeros', 'np.zeros', ([], {'shape': '(numImages, 490 * 490)'}), '(shape=(numImages, 490 * 490))\n', (427, 457), True, 'import numpy as np\n'), ((727, 758), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'numComponents'}), '(n_components=numComponents)\n', (730, 758), False, 'from sklearn.decomposition import PCA\n'), ((803, 817), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (815, 817), True, 'import matplotlib.pyplot as plt\n'), ((2726, 2755), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""First component"""'], {}), "('First component')\n", (2736, 2755), True, 'import matplotlib.pyplot as plt\n'), ((2756, 2786), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Second component"""'], {}), "('Second component')\n", (2766, 2786), True, 'import matplotlib.pyplot as plt\n'), ((2968, 2978), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2976, 2978), True, 'import matplotlib.pyplot as plt\n'), ((532, 554), 'matplotlib.image.imread', 'mpimg.imread', (['filename'], {}), '(filename)\n', (544, 554), True, 'import matplotlib.image as mpimg\n')] |
from pydub import AudioSegment
from pydub.silence import split_on_silence
def segment(filename,foldername):
"""
filename : str
foldername: str folder to put all the chunks
"""
sound_file = AudioSegment.from_file(filename)
sound_file = sound_file.set_channels(1)
sound_file = sound_file.set_frame_rate(16000)
audio_chunks = split_on_silence(sound_file,min_silence_len=1000,silence_thresh=-60)
for i, chunk in enumerate(audio_chunks):
out_file = foldername+"/chunk{0}.wav".format(i)
print("exporting", out_file)
chunk.export(out_file, format="wav", bitrate="128")
| [
"pydub.AudioSegment.from_file",
"pydub.silence.split_on_silence"
] | [((198, 230), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', (['filename'], {}), '(filename)\n', (220, 230), False, 'from pydub import AudioSegment\n'), ((335, 405), 'pydub.silence.split_on_silence', 'split_on_silence', (['sound_file'], {'min_silence_len': '(1000)', 'silence_thresh': '(-60)'}), '(sound_file, min_silence_len=1000, silence_thresh=-60)\n', (351, 405), False, 'from pydub.silence import split_on_silence\n')] |
# coding: utf-8
import os
size = os.path.getsize("test.txt")
with open("test.txt", mode="r") as f:
print(f.read(size))
| [
"os.path.getsize"
] | [((35, 62), 'os.path.getsize', 'os.path.getsize', (['"""test.txt"""'], {}), "('test.txt')\n", (50, 62), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# !/usr/bin/python
__author__ = 'ma_keling'
# Version : 1.0.0
# Start Time : 2018-12-20
# Update Time :
# Change Log :
## 1.
## 2.
## 3.
import arcpy
import CalculateLods
def execute():
in_map = arcpy.GetParameter(0)
arcpy.AddMessage("Input map : {0}.".format(in_map))
in_layers = arcpy.GetParameter(1)
field_name = "lod"
CalculateLods.calculate_lods_for_feature(in_layers, field_name)
execute()
| [
"arcpy.GetParameter",
"CalculateLods.calculate_lods_for_feature"
] | [((263, 284), 'arcpy.GetParameter', 'arcpy.GetParameter', (['(0)'], {}), '(0)\n', (281, 284), False, 'import arcpy\n'), ((359, 380), 'arcpy.GetParameter', 'arcpy.GetParameter', (['(1)'], {}), '(1)\n', (377, 380), False, 'import arcpy\n'), ((414, 477), 'CalculateLods.calculate_lods_for_feature', 'CalculateLods.calculate_lods_for_feature', (['in_layers', 'field_name'], {}), '(in_layers, field_name)\n', (454, 477), False, 'import CalculateLods\n')] |
import io
import logging
import time
from typing import List, Optional
from custom_components.xiaomi_cloud_map_extractor.common.map_data import MapData
from custom_components.xiaomi_cloud_map_extractor.types import Colors, Drawables, ImageConfig, Sizes, Texts
try:
from miio import RoborockVacuum, DeviceException
except ImportError:
from miio import Vacuum as RoborockVacuum, DeviceException
import PIL.Image as Image
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_TOKEN, CONF_USERNAME
from custom_components.xiaomi_cloud_map_extractor.common.map_data_parser import MapDataParser
from custom_components.xiaomi_cloud_map_extractor.common.xiaomi_cloud_connector import XiaomiCloudConnector
from custom_components.xiaomi_cloud_map_extractor.const import *
from custom_components.xiaomi_cloud_map_extractor.dreame.vacuum import DreameVacuum
from custom_components.xiaomi_cloud_map_extractor.enums import CameraStatus
from custom_components.xiaomi_cloud_map_extractor.roidmi.vacuum import RoidmiVacuum
from custom_components.xiaomi_cloud_map_extractor.unsupported.vacuum import UnsupportedVacuum
from custom_components.xiaomi_cloud_map_extractor.viomi.vacuum import ViomiVacuum
from custom_components.xiaomi_cloud_map_extractor.xiaomi.vacuum import XiaomiVacuum
_LOGGER = logging.getLogger(__name__)
DEVICE_MAPPING = {
CONF_AVAILABLE_API_XIAOMI: XiaomiVacuum,
CONF_AVAILABLE_API_VIOMI: ViomiVacuum,
CONF_AVAILABLE_API_ROIDMI: RoidmiVacuum,
CONF_AVAILABLE_API_DREAME: DreameVacuum,
}
STATUS_LOG_LEVEL = {
CameraStatus.FAILED_TO_RETRIEVE_DEVICE: _LOGGER.error,
CameraStatus.UNABLE_TO_PARSE_MAP: _LOGGER.warning,
CameraStatus.UNABLE_TO_RETRIEVE_MAP: _LOGGER.warning
}
class VacuumManager:
def __init__(self, config):
host: str = config[CONF_HOST]
token: str = config[CONF_TOKEN]
username: str = config[CONF_USERNAME]
password: str = config[CONF_PASSWORD]
drawables = config.get(CONF_DRAW, [])
room_colors = config.get(CONF_ROOM_COLORS, {})
colors: Colors = config.get(CONF_COLORS, {})
for room, color in room_colors.items():
colors[f"{COLOR_ROOM_PREFIX}{room}"] = color
self._vacuum = RoborockVacuum(host, token)
self._connector = XiaomiCloudConnector(username, password)
self._name: str = config.get(CONF_NAME, DEFAULT_NAME)
self._should_poll: bool = config.get(CONF_AUTO_UPDATE, True)
self._image_config: ImageConfig = config.get(CONF_MAP_TRANSFORM, DEFAULT_MAP_TRANSFORM)
self._colors: Colors = colors
self._drawables: Drawables = CONF_AVAILABLE_DRAWABLES[1:] if DRAWABLE_ALL in drawables else drawables
self._sizes: Sizes = config.get(CONF_SIZES, DEFAULT_SIZES)
self._texts: Texts = config.get(CONF_TEXTS, [])
self._country: str = config.get(CONF_COUNTRY)
self._allowed_attributes: List[str] = config.get(CONF_ATTRIBUTES, [])
self._store_map_raw: bool = config.get(CONF_STORE_MAP_RAW, False)
self._store_map_image: bool = config.get(CONF_STORE_MAP_IMAGE)
self._store_map_path: str = config.get(CONF_STORE_MAP_PATH, DEFAULT_STORE_MAP_PATH)
self._forced_api: str = config.get(CONF_FORCE_API)
self._device = None
self._used_api = None
self._map_saved = None
self._image = None
self._map_data = None
self._logged_in = False
self._logged_in_previously = True
self._received_map_name_previously = True
self._attributes = {}
self._status = CameraStatus.INITIALIZING
@property
def image(self) -> Optional[bytes]:
return self._image
@property
def name(self):
return self._name
@property
def attributes(self):
return self._attributes
@property
def should_poll(self):
return self._should_poll
def turn_on(self):
self._should_poll = True
def turn_off(self):
self._should_poll = False
def _get_attributes_data(self):
map_data = self._map_data
rooms = []
if self._map_data.rooms is not None:
rooms = dict(
filter(lambda x: x[0] is not None, map(lambda x: (x[0], x[1].name), self._map_data.rooms.items())))
if len(rooms) == 0:
rooms = list(self._map_data.rooms.keys())
attributes = {
ATTRIBUTE_CALIBRATION: map_data.calibration(),
ATTRIBUTE_CHARGER: map_data.charger,
ATTRIBUTE_CLEANED_ROOMS: map_data.cleaned_rooms,
ATTRIBUTE_COUNTRY: self._country,
ATTRIBUTE_GOTO: map_data.goto,
ATTRIBUTE_GOTO_PATH: map_data.goto_path,
ATTRIBUTE_GOTO_PREDICTED_PATH: map_data.predicted_path,
ATTRIBUTE_IGNORED_OBSTACLES: map_data.ignored_obstacles,
ATTRIBUTE_IGNORED_OBSTACLES_WITH_PHOTO: map_data.ignored_obstacles_with_photo,
ATTRIBUTE_IMAGE: map_data.image,
ATTRIBUTE_IS_EMPTY: map_data.image.is_empty,
ATTRIBUTE_MAP_NAME: map_data.map_name,
ATTRIBUTE_NO_GO_AREAS: map_data.no_go_areas,
ATTRIBUTE_NO_MOPPING_AREAS: map_data.no_mopping_areas,
ATTRIBUTE_OBSTACLES: map_data.obstacles,
ATTRIBUTE_OBSTACLES_WITH_PHOTO: map_data.obstacles_with_photo,
ATTRIBUTE_PATH: map_data.path,
ATTRIBUTE_ROOM_NUMBERS: rooms,
ATTRIBUTE_ROOMS: map_data.rooms,
ATTRIBUTE_VACUUM_POSITION: map_data.vacuum_position,
ATTRIBUTE_VACUUM_ROOM: map_data.vacuum_room,
ATTRIBUTE_VACUUM_ROOM_NAME: map_data.vacuum_room_name,
ATTRIBUTE_WALLS: map_data.walls,
ATTRIBUTE_ZONES: map_data.zones
}
return attributes
def _update_attributes(self):
attributes = {}
if self._map_data is not None:
data = self._get_attributes_data()
for name, value in data.items():
if name in self._allowed_attributes:
attributes[name] = value
if self._store_map_raw:
attributes[ATTRIBUTE_MAP_SAVED] = self._map_saved
if self._device is not None:
attributes[ATTR_MODEL] = self._device.model
attributes[ATTR_USED_API] = self._used_api
if self._connector.two_factor_auth_url is not None:
attributes[ATTR_TWO_FACTOR_AUTH] = self._connector.two_factor_auth_url
self._attributes = attributes
def update(self, now):
counter = 10
if self._status != CameraStatus.TWO_FACTOR_AUTH_REQUIRED and not self._logged_in:
self._handle_login()
if self._device is None and self._logged_in:
self._handle_device()
map_name = self._handle_map_name(counter)
if map_name == "retry" and self._device is not None:
self._set_status(CameraStatus.FAILED_TO_RETRIEVE_MAP_FROM_VACUUM)
self._received_map_name_previously = map_name != "retry"
if self._logged_in and map_name != "retry" and self._device is not None:
self._handle_map_data(map_name)
else:
exists = self._device is not None
_LOGGER.debug(
f"Unable to retrieve map ({now}), "
f"Logged in: {self._logged_in} | "
f"Map name: {map_name} | "
f"Device retrieved: {exists}"
)
message = str(self._status)
map_data = MapDataParser.create_empty(self._colors, message)
self._set_map_data(map_data)
self._logged_in_previously = self._logged_in
self._update_attributes()
def _handle_login(self):
_LOGGER.debug("Logging in...")
self._logged_in = self._connector.login()
if self._logged_in is None:
self._set_status(CameraStatus.TWO_FACTOR_AUTH_REQUIRED)
elif self._logged_in:
self._set_status(CameraStatus.LOGGED_IN)
else:
self._set_status(CameraStatus.FAILED_LOGIN)
if self._logged_in_previously:
_LOGGER.error("Unable to log in, check credentials")
def _handle_device(self):
_LOGGER.debug(f"Retrieving device info, country: {self._country}")
country, user_id, device_id, model = self._connector.get_device_details(self._vacuum.token, self._country)
if model is not None:
self._country = country
_LOGGER.debug(f"Retrieved device model: {model}")
self._used_api = self._detect_api(model)
device_init = DEVICE_MAPPING.get(self._used_api, UnsupportedVacuum)
self._device = device_init(self._connector, self._country, user_id, device_id, model)
_LOGGER.debug(f"Created device, used api: {self._used_api}")
else:
self._set_status(CameraStatus.FAILED_TO_RETRIEVE_DEVICE)
def _handle_map_name(self, counter):
map_name = "retry"
if self._device is not None and not self._device.should_get_map_from_vacuum():
map_name = "0"
while map_name == "retry" and counter > 0:
_LOGGER.debug("Retrieving map name from device")
time.sleep(0.1)
try:
map_name = self._vacuum.map()[0]
_LOGGER.debug("Map name %s", map_name)
except OSError as exc:
_LOGGER.error(f"Got OSError while fetching the state: {str(exc)}")
except DeviceException as exc:
if self._received_map_name_previously:
_LOGGER.warning(f"Got exception while fetching the state: {str(exc)}")
self._received_map_name_previously = False
finally:
counter = counter - 1
return map_name
def _handle_map_data(self, map_name):
_LOGGER.debug("Retrieving map from Xiaomi cloud")
store_map_path = self._store_map_path if self._store_map_raw else None
map_data, map_stored = self._device.get_map(map_name, self._colors, self._drawables, self._texts,
self._sizes, self._image_config, store_map_path)
if map_data is not None:
# noinspection PyBroadException
try:
_LOGGER.debug("Map data retrieved")
self._set_map_data(map_data)
self._map_saved = map_stored
if self._map_data.image.is_empty:
self._set_status(CameraStatus.EMPTY_MAP)
if self._map_data is None or self._map_data.image.is_empty:
self._set_map_data(map_data)
else:
self._set_map_data(map_data)
self._set_status(CameraStatus.OK)
except Exception as ex:
self._set_status(CameraStatus.UNABLE_TO_PARSE_MAP, ex)
else:
self._logged_in = False
self._set_status(CameraStatus.UNABLE_TO_RETRIEVE_MAP)
def _set_status(self, status, ex: Optional[Exception] = None):
log = STATUS_LOG_LEVEL.get(status, _LOGGER.debug)
log_message = status
if ex is not None:
log_message = f"{status}, Error: {str(ex)}"
self._status = status
log(log_message)
def _set_map_data(self, map_data: MapData):
img_byte_arr = io.BytesIO()
map_data.image.data.save(img_byte_arr, format='PNG')
self._image = img_byte_arr.getvalue()
self._map_data = map_data
self._store_image()
def _detect_api(self, model: str):
if self._forced_api is not None:
return self._forced_api
if model in API_EXCEPTIONS:
return API_EXCEPTIONS[model]
def list_contains_model(prefixes):
return len(list(filter(lambda x: model.startswith(x), prefixes))) > 0
filtered = list(filter(lambda x: list_contains_model(x[1]), AVAILABLE_APIS.items()))
if len(filtered) > 0:
return filtered[0][0]
return CONF_AVAILABLE_API_XIAOMI
def _store_image(self):
if self._store_map_image:
try:
if self._image is not None:
image = Image.open(io.BytesIO(self._image))
image.save(f"{self._store_map_path}/map_image_{self._device.model}.png")
except Exception as ex:
_LOGGER.warning(f"Error while saving image, Error: {str(ex)}")
| [
"logging.getLogger",
"io.BytesIO",
"custom_components.xiaomi_cloud_map_extractor.common.map_data_parser.MapDataParser.create_empty",
"custom_components.xiaomi_cloud_map_extractor.common.xiaomi_cloud_connector.XiaomiCloudConnector",
"time.sleep",
"miio.Vacuum"
] | [((1310, 1337), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1327, 1337), False, 'import logging\n'), ((2244, 2271), 'miio.Vacuum', 'RoborockVacuum', (['host', 'token'], {}), '(host, token)\n', (2258, 2271), True, 'from miio import Vacuum as RoborockVacuum, DeviceException\n'), ((2298, 2338), 'custom_components.xiaomi_cloud_map_extractor.common.xiaomi_cloud_connector.XiaomiCloudConnector', 'XiaomiCloudConnector', (['username', 'password'], {}), '(username, password)\n', (2318, 2338), False, 'from custom_components.xiaomi_cloud_map_extractor.common.xiaomi_cloud_connector import XiaomiCloudConnector\n'), ((11425, 11437), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (11435, 11437), False, 'import io\n'), ((7525, 7574), 'custom_components.xiaomi_cloud_map_extractor.common.map_data_parser.MapDataParser.create_empty', 'MapDataParser.create_empty', (['self._colors', 'message'], {}), '(self._colors, message)\n', (7551, 7574), False, 'from custom_components.xiaomi_cloud_map_extractor.common.map_data_parser import MapDataParser\n'), ((9245, 9260), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (9255, 9260), False, 'import time\n'), ((12290, 12313), 'io.BytesIO', 'io.BytesIO', (['self._image'], {}), '(self._image)\n', (12300, 12313), False, 'import io\n')] |
from Locators.checkout_overview_locator import CheckoutOverviewLocator
from Objects.product import Product
from Pages.base_page_object import BasePage
from Utils.utility import Utils
class CheckoutOverViewPage(BasePage):
def __init__(self, driver):
super().__init__(driver)
def get_product_overview_info(self, index):
name = self.get_text(CheckoutOverviewLocator.PRODUCT_NAME_LABEL(index))
desc = self.get_text(CheckoutOverviewLocator.PRODUCT_DESC_LABEL(index))
price = self.get_text(CheckoutOverviewLocator.PRODUCT_PRICE_LABEL(index))
qty = self.get_text(CheckoutOverviewLocator.PRODUCT_QTY_LABEL(index))
return Product(name, desc, price, qty)
def get_product_price(self):
price_lbl = self.get_text(CheckoutOverviewLocator.TOTAL_ITEM_PRICE_LABEL)
price = Utils.convert_string_to_float(self, price_lbl)
return float(price)
def get_tax(self):
tax_lbl = self.get_text(CheckoutOverviewLocator.TAX_LABEL)
tax = Utils.convert_string_to_float(self, tax_lbl)
return float(tax)
def get_total_price(self):
total_price_lbl = self.get_text(CheckoutOverviewLocator.TOTAL_PRICE_LABEL)
total_price = Utils.convert_string_to_float(self, total_price_lbl)
return float(total_price)
def click_finish_button(self):
return self.click(CheckoutOverviewLocator.FINISH_BUTTON)
| [
"Locators.checkout_overview_locator.CheckoutOverviewLocator.PRODUCT_PRICE_LABEL",
"Locators.checkout_overview_locator.CheckoutOverviewLocator.PRODUCT_NAME_LABEL",
"Locators.checkout_overview_locator.CheckoutOverviewLocator.PRODUCT_DESC_LABEL",
"Locators.checkout_overview_locator.CheckoutOverviewLocator.PRODUC... | [((645, 676), 'Objects.product.Product', 'Product', (['name', 'desc', 'price', 'qty'], {}), '(name, desc, price, qty)\n', (652, 676), False, 'from Objects.product import Product\n'), ((799, 845), 'Utils.utility.Utils.convert_string_to_float', 'Utils.convert_string_to_float', (['self', 'price_lbl'], {}), '(self, price_lbl)\n', (828, 845), False, 'from Utils.utility import Utils\n'), ((965, 1009), 'Utils.utility.Utils.convert_string_to_float', 'Utils.convert_string_to_float', (['self', 'tax_lbl'], {}), '(self, tax_lbl)\n', (994, 1009), False, 'from Utils.utility import Utils\n'), ((1159, 1211), 'Utils.utility.Utils.convert_string_to_float', 'Utils.convert_string_to_float', (['self', 'total_price_lbl'], {}), '(self, total_price_lbl)\n', (1188, 1211), False, 'from Utils.utility import Utils\n'), ((355, 404), 'Locators.checkout_overview_locator.CheckoutOverviewLocator.PRODUCT_NAME_LABEL', 'CheckoutOverviewLocator.PRODUCT_NAME_LABEL', (['index'], {}), '(index)\n', (397, 404), False, 'from Locators.checkout_overview_locator import CheckoutOverviewLocator\n'), ((431, 480), 'Locators.checkout_overview_locator.CheckoutOverviewLocator.PRODUCT_DESC_LABEL', 'CheckoutOverviewLocator.PRODUCT_DESC_LABEL', (['index'], {}), '(index)\n', (473, 480), False, 'from Locators.checkout_overview_locator import CheckoutOverviewLocator\n'), ((508, 558), 'Locators.checkout_overview_locator.CheckoutOverviewLocator.PRODUCT_PRICE_LABEL', 'CheckoutOverviewLocator.PRODUCT_PRICE_LABEL', (['index'], {}), '(index)\n', (551, 558), False, 'from Locators.checkout_overview_locator import CheckoutOverviewLocator\n'), ((584, 632), 'Locators.checkout_overview_locator.CheckoutOverviewLocator.PRODUCT_QTY_LABEL', 'CheckoutOverviewLocator.PRODUCT_QTY_LABEL', (['index'], {}), '(index)\n', (625, 632), False, 'from Locators.checkout_overview_locator import CheckoutOverviewLocator\n')] |
# -*- coding: utf-8 -*-
# https://github.com/Kodi-vStream/venom-xbmc-addons
import xbmcaddon, xbmcgui, xbmc
"""System d'importation
from resources.lib.comaddon import addon, dialog, VSlog, xbmcgui, xbmc
"""
"""
from resources.lib.comaddon import addon
addons = addon() en haut de page.
utiliser une fonction comaddon ou xbmcaddon
http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcaddon.html
addons.VSlang(30305)
addons.getLocalizedString(30305)
addons.openSettings()
utiliser la fonction avec un autre addon
addons2 = addon('plugin.video.youtube')
addons2.openSettings()
"""
class addon(xbmcaddon.Addon):
#def __init__(self, id='plugin.video.vstream'):
# xbmcaddon.__init__(id)
# pass
def VSlang(self, lang):
return xbmc.translatePath(self.getLocalizedString(lang))
#xbmcaddon.Addon('plugin.video.vstream').getLocalizedString(lang))
#Bug avec accent xbmc.translatePath(xbmcaddon.Addon('plugin.video.vstream').getLocalizedString(lang)).decode('utf-8')
#deprecier utiliser addons.setSetting et addons.getSetting
def VSsetting(self, name, value = False):
#addons = addon()
#use addons.setting('name') pour getsetting
#use addons.setting('name', 'value) pour setsetting
if value:
return self.setSetting(name, value)
else:
return self.getSetting(name)
"""
from resources.lib.comaddon import dialog
ne peux pas utiliser les autres fonction que dialog
dialogs = dialog()
dialogs.VSinfo('test')
http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#Dialog
"""
class dialog(xbmcgui.Dialog):
#def __init__(self):
# xbmcgui.__init__('')
# pass
def VSok(self, desc, title = 'vStream'):
dialog = self.ok(title, desc)
return dialog
def VSyesno(self, desc, title = 'vStream'):
dialog = self.yesno(title, desc)
return dialog
def VSselect(self, desc, title = 'vStream'):
ret = self.select(title, desc)
return ret
def VSselectqual(self, list_qual, list_url):
if len(list_url) == 0:
return ''
if len(list_url) == 1:
return list_url[0]
ret = self.select(addon().VSlang(30448), list_qual)
if ret > -1:
return list_url[ret]
return ''
def VSinfo(self, desc, title = 'vStream', iseconds = 0, sound = False):
if (iseconds == 0):
iseconds = 1000
else:
iseconds = iseconds * 1000
if (addon().getSetting('Block_Noti_sound') == 'true'):
sound = True
return self.notification(str(title), str(desc), xbmcgui.NOTIFICATION_INFO, iseconds, sound)
def VSerror(self, e):
return self.notification('vStream', 'Erreur: ' + str(e), xbmcgui.NOTIFICATION_ERROR, 2000), VSlog('Erreur: ' + str(e))
"""
from resources.lib.comaddon import progress
progress_ = progress()
progress_.VScreate(SITE_NAME)
progress_.VSupdate(progress_, total)
if progress_.iscanceled():
break
progress_.VSclose(progress_)
dialog = progress() non recommander
progress = progress() non recommander
http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#DialogProgress
"""
COUNT = 0
DIALOG2 = None
class empty():
def VSupdate(self, dialog, total, text = '', search = False):
pass
def iscanceled(self):
pass
def VSclose(self, dialog):
pass
class progress(xbmcgui.DialogProgress):
def VScreate(self, title = 'vStream', desc = ''):
global DIALOG2
currentWindow = xbmcgui.getCurrentWindowId()
if currentWindow == 10000:
return empty()
if DIALOG2 == None:
self.create(title, desc)
VSlog('create dialog')
DIALOG2 = self
return self
else:
return DIALOG2
def VSupdate(self, dialog, total, text = '', search = False):
if not search and window(10101).getProperty('search') == 'true':
return
global COUNT
COUNT += 1
iPercent = int(float(COUNT * 100) / total)
dialog.update(iPercent, 'Loading: ' + str(COUNT) + '/' + str(total), text)
def VSclose(self, dialog = ''):
if not dialog and DIALOG2:
dialog = DIALOG2
if not dialog:
return
if window(10101).getProperty('search') == 'true':
return
dialog.close()
VSlog('close dialog')
del dialog
return False
"""
from resources.lib.comaddon import window
window(10101).getProperty('test')
http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#Window
"""
class window(xbmcgui.Window):
def __init__(self, id):
pass
"""
from resources.lib.comaddon import listitem
listitem.setLabel('test')
http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#ListItem
"""
class listitem(xbmcgui.ListItem):
#ListItem([label, label2, iconImage, thumbnailImage, path])
def __init__(self, label = '', label2 = '', iconImage = '', thumbnailImage = '', path = ''):
pass
"""
from resources.lib.comaddon import VSlog
VSlog('testtttttttttttt')
ou
xbmc.log
"""
#xbmc des fonctions pas des class
def VSlog(e, level = xbmc.LOGDEBUG):
#rapelle l'ID de l'addon pour être appelé hors addon
if (addon('plugin.video.vstream').getSetting('debug') == 'true'):
level = xbmc.LOGNOTICE
return xbmc.log('\t[PLUGIN] vStream: ' + str(e), level)
def VSupdate():
return xbmc.executebuiltin('Container.Refresh')
def VSshow_busy():
xbmc.executebuiltin('ActivateWindow(busydialog)')
def VShide_busy():
xbmc.executebuiltin('Dialog.Close(busydialog)')
while xbmc.getCondVisibility('Window.IsActive(busydialog)'):
xbmc.sleep(100)
def isKrypton():
try:
version = xbmc.getInfoLabel('system.buildversion')
if version[0:2] >= '17':
return True
else:
return False
except:
return False
def VSread(sHtmlContent):
import xbmcvfs
file = 'special://userdata/addon_data/plugin.video.vstream/html.txt'
if xbmcvfs.exists(file):
xbmcvfs.delete(file)
f = xbmcvfs.File (file, 'w')
result = f.write(sHtmlContent)
f.close()
#use cGui.showKeyBoard
def VSkeyboard(sDefaultText = ''):
return False
| [
"xbmc.getCondVisibility",
"xbmcvfs.exists",
"xbmc.getInfoLabel",
"xbmcvfs.delete",
"xbmc.sleep",
"xbmc.executebuiltin",
"xbmcgui.getCurrentWindowId",
"xbmcvfs.File"
] | [((5509, 5549), 'xbmc.executebuiltin', 'xbmc.executebuiltin', (['"""Container.Refresh"""'], {}), "('Container.Refresh')\n", (5528, 5549), False, 'import xbmcaddon, xbmcgui, xbmc\n'), ((5574, 5623), 'xbmc.executebuiltin', 'xbmc.executebuiltin', (['"""ActivateWindow(busydialog)"""'], {}), "('ActivateWindow(busydialog)')\n", (5593, 5623), False, 'import xbmcaddon, xbmcgui, xbmc\n'), ((5648, 5695), 'xbmc.executebuiltin', 'xbmc.executebuiltin', (['"""Dialog.Close(busydialog)"""'], {}), "('Dialog.Close(busydialog)')\n", (5667, 5695), False, 'import xbmcaddon, xbmcgui, xbmc\n'), ((5706, 5759), 'xbmc.getCondVisibility', 'xbmc.getCondVisibility', (['"""Window.IsActive(busydialog)"""'], {}), "('Window.IsActive(busydialog)')\n", (5728, 5759), False, 'import xbmcaddon, xbmcgui, xbmc\n'), ((6126, 6146), 'xbmcvfs.exists', 'xbmcvfs.exists', (['file'], {}), '(file)\n', (6140, 6146), False, 'import xbmcvfs\n'), ((6186, 6209), 'xbmcvfs.File', 'xbmcvfs.File', (['file', '"""w"""'], {}), "(file, 'w')\n", (6198, 6209), False, 'import xbmcvfs\n'), ((3576, 3604), 'xbmcgui.getCurrentWindowId', 'xbmcgui.getCurrentWindowId', ([], {}), '()\n', (3602, 3604), False, 'import xbmcaddon, xbmcgui, xbmc\n'), ((5769, 5784), 'xbmc.sleep', 'xbmc.sleep', (['(100)'], {}), '(100)\n', (5779, 5784), False, 'import xbmcaddon, xbmcgui, xbmc\n'), ((5830, 5870), 'xbmc.getInfoLabel', 'xbmc.getInfoLabel', (['"""system.buildversion"""'], {}), "('system.buildversion')\n", (5847, 5870), False, 'import xbmcaddon, xbmcgui, xbmc\n'), ((6156, 6176), 'xbmcvfs.delete', 'xbmcvfs.delete', (['file'], {}), '(file)\n', (6170, 6176), False, 'import xbmcvfs\n')] |
# type: ignore[attr-defined]
from solids import example_one_solid # pylint: disable=import-error
from dagster import pipeline
@pipeline
def example_one_pipeline():
example_one_solid()
| [
"solids.example_one_solid"
] | [((172, 191), 'solids.example_one_solid', 'example_one_solid', ([], {}), '()\n', (189, 191), False, 'from solids import example_one_solid\n')] |
import random
import torch
from game import Game
from agent import RLAgent
from moves import Moves
game=Game()
agent=RLAgent()
moves=Moves()
num_win=0 #initialize no. of win by human
num_lose=0 #initialize no. of win by ai but loss by human
num_tie=0
random.seed(1000)
def check_board_and_may_update_state_values():
global num_win, num_lose, num_tie
win_or_tie=True
if game.who_wins()==-1: #human win
print("YOU WIN!!")
agent.update_state_values(0)
num_win+=1
elif game.who_wins()==1: #ai win
print("YOU LOSE!!")
agent.update_state_values(1)
num_lose+=1
elif game.who_wins()==2: #tie
print("TIE!!")
num_tie+=1
else:
win_or_tie=False
if win_or_tie:
game.clear()
agent.clear_history()
return win_or_tie
while True:
print("The number of wins are : {}\nThe number of loses : {}\nThe number of ties is : {}".format(num_win, num_lose, num_tie))
if (num_win+num_lose+num_tie) == 30000:
break
#moves
x,y=moves.random_move(game.board)
game.take_move(x,y,-1)
print(game)
#check
win_or_tie=check_board_and_may_update_state_values()
if win_or_tie:
continue
#RL AI move
x,y=agent.next_move(game.board)
game.take_move(x,y,1)
agent.cache_move(game.board)
print(game)
#check
check_board_and_may_update_state_values()
torch.save(agent.state_values,'tic_tac_toe.pth')
| [
"agent.RLAgent",
"moves.Moves",
"random.seed",
"torch.save",
"game.Game"
] | [((106, 112), 'game.Game', 'Game', ([], {}), '()\n', (110, 112), False, 'from game import Game\n'), ((119, 128), 'agent.RLAgent', 'RLAgent', ([], {}), '()\n', (126, 128), False, 'from agent import RLAgent\n'), ((135, 142), 'moves.Moves', 'Moves', ([], {}), '()\n', (140, 142), False, 'from moves import Moves\n'), ((255, 272), 'random.seed', 'random.seed', (['(1000)'], {}), '(1000)\n', (266, 272), False, 'import random\n'), ((1276, 1325), 'torch.save', 'torch.save', (['agent.state_values', '"""tic_tac_toe.pth"""'], {}), "(agent.state_values, 'tic_tac_toe.pth')\n", (1286, 1325), False, 'import torch\n')] |
# receive_msg.py
#
# SPDX-FileCopyrightText: Copyright 2021 <NAME>
#
# SPDX-License-Identifier: MIT
#
# Receive message from IOTA tangle
#
import iota_client
import os
import pprint
# Config
msg_meta = False
env_node_address = 'HORNET_NODE_ADDRESS'
# Print Message data
def show_message(message, meta=False):
if meta:
show = 'Message meta'
else:
show = 'Message'
print(
'''
{} data:
'''.format(show))
pprint.pprint(message)
# Connect to node and retrieve message
def main():
import argparse
parser = argparse.ArgumentParser(description='Receive message from IOTA tangle.')
parser.add_argument('--msg_id', dest='msg_id',
default='497c1b68e5480d07819bbd9c989c8d245fa748667a89fdf7dac884741f493326',
help='Id of message stored on tangle')
parser.add_argument('--node_info', dest='node_info',
default=False,
help='Print node information')
args = parser.parse_args()
message_id = args.msg_id
node_info = args.node_info
# Get node address out of environment
NODE_URL = os.getenv(env_node_address)
if not NODE_URL:
raise Exception("Please define environment variable with node URL.")
try:
# Initialize client
client = iota_client.Client(
nodes_name_password=[[NODE_URL]], node_sync_disabled=True)
except:
raise Exception('Node not found.')
# Check node status
if not client.get_health():
print('''
------------------
Node not healthy.
------------------''')
# Get node information
if node_info:
print('Node Information:')
pprint.pprint(client.get_info())
# Retrieve message from Tangle
message = client.get_message_data(message_id)
# Show results
show_message(message)
if msg_meta:
message_meta = client.get_message_metadata(message_id)
show_message(message_meta, True)
# Decode message
msg_str = bytes(message['payload']['indexation'][0]['data']).decode('utf-8')
print('''
Decoded message:
{}
'''.format(msg_str))
if __name__ == "__main__":
main() | [
"iota_client.Client",
"pprint.pprint",
"argparse.ArgumentParser",
"os.getenv"
] | [((452, 474), 'pprint.pprint', 'pprint.pprint', (['message'], {}), '(message)\n', (465, 474), False, 'import pprint\n'), ((566, 638), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Receive message from IOTA tangle."""'}), "(description='Receive message from IOTA tangle.')\n", (589, 638), False, 'import argparse\n'), ((1154, 1181), 'os.getenv', 'os.getenv', (['env_node_address'], {}), '(env_node_address)\n', (1163, 1181), False, 'import os\n'), ((1335, 1412), 'iota_client.Client', 'iota_client.Client', ([], {'nodes_name_password': '[[NODE_URL]]', 'node_sync_disabled': '(True)'}), '(nodes_name_password=[[NODE_URL]], node_sync_disabled=True)\n', (1353, 1412), False, 'import iota_client\n')] |
# -*- coding: utf-8 -*-
"""
Turn on and off systemd suspend inhibitor.
Configuration parameters:
format: display format for this module
(default '[\?color=state SUSPEND [\?if=state OFF|ON]]')
lock_types: specify state to inhibit, comma separated list
https://www.freedesktop.org/wiki/Software/systemd/inhibit/
(default ['handle-lid-switch', 'idle', 'sleep'])
thresholds: specify color thresholds to use
(default [(True, 'bad'), (False, 'good')])
Format placeholders:
{state} systemd suspend inhibitor state, eg True, False
Color thresholds:
xxx: print a color based on the value of `xxx` placeholder
@author Cyrinux https://github.com/cyrinux
@license BSD
SAMPLE OUTPUT
[{'full_text': 'SUSPEND ON', 'color': '#00FF00'}]
off
[{'full_text': 'SUSPEND OFF', 'color': '#FF0000'}]
"""
from dbus import SystemBus
from os import close
STRING_DBUS_EXCEPTION = "DBUS error, systemd-logind not started?"
STRING_BAD_LOCK_TYPES = "DBUS error, bad lock types used"
class Py3status:
"""
"""
# available configuration parameters
format = "[\?color=state SUSPEND [\?if=state OFF|ON]]"
lock_types = ["handle-lid-switch", "idle", "sleep"]
thresholds = [(True, "bad"), (False, "good")]
def post_config_hook(self):
try:
self.login1 = SystemBus().get_object(
"org.freedesktop.login1", "/org/freedesktop/login1"
)
except Exception:
raise Exception(STRING_DBUS_EXCEPTION)
self.lock = None
self.lock_types = ":".join(self.lock_types)
self.thresholds_init = self.py3.get_color_names_list(self.format)
def systemd_suspend_inhibitor(self):
suspend_data = {"state": bool(self.lock)}
for x in self.thresholds_init:
if x in suspend_data:
self.py3.threshold_get_color(suspend_data[x], x)
return {
"cached_until": self.py3.CACHE_FOREVER,
"full_text": self.py3.safe_format(self.format, suspend_data),
}
def on_click(self, event):
if self.lock is None:
self.lock = self.login1.Inhibit(
self.lock_types,
"Py3Status",
"Systemd suspend inhibitor module",
"block",
dbus_interface="org.freedesktop.login1.Manager",
).take()
else:
close(self.lock)
self.lock = None
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| [
"os.close",
"dbus.SystemBus",
"py3status.module_test.module_test"
] | [((2578, 2600), 'py3status.module_test.module_test', 'module_test', (['Py3status'], {}), '(Py3status)\n', (2589, 2600), False, 'from py3status.module_test import module_test\n'), ((2403, 2419), 'os.close', 'close', (['self.lock'], {}), '(self.lock)\n', (2408, 2419), False, 'from os import close\n'), ((1326, 1337), 'dbus.SystemBus', 'SystemBus', ([], {}), '()\n', (1335, 1337), False, 'from dbus import SystemBus\n')] |
import peeweedb
import astropy.units as u
def get_by_basename(db, table, basename):
"""Get data from SQL database by basename. Returns a list of dict"""
if isinstance(table, str):
assert table in db.get_tables(), "Sanity Check Failed: Table queried does not exist"
table = peeweedb.tables[table]
else:
table = table
query = table.select().where(table.basename == basename)
print(query.sql())
data = list(query.dicts())
return data
def get_by_radec(db, table, ra, dec, radius):
"""
Get data from SQL database within a square area of the sky determined by ra, dec, radius.
Returns a list of dict
"""
radius = radius*u.arcmin.to(u.deg)
if isinstance(table, str):
assert table in db.get_tables(), "Sanity Check Failed: Table queried does not exist"
table = peeweedb.tables[table]
else:
table = table
query = table.select().where(
table.centerRa.between(ra - radius, ra + radius),
table.centerDec.between(dec - radius, dec + radius)
)
print(query.sql())
data = list(query.dicts())
return data
| [
"astropy.units.arcmin.to"
] | [((716, 734), 'astropy.units.arcmin.to', 'u.arcmin.to', (['u.deg'], {}), '(u.deg)\n', (727, 734), True, 'import astropy.units as u\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved.
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ilo_redfish_info
short_description: Gathers server information through iLO using Redfish APIs
version_added: 4.2.0
description:
- Builds Redfish URIs locally and sends them to iLO to
get information back.
- For use with HPE iLO operations that require Redfish OEM extensions.
options:
category:
required: true
description:
- List of categories to execute on iLO.
type: list
elements: str
command:
required: true
description:
- List of commands to execute on iLO.
type: list
elements: str
baseuri:
required: true
description:
- Base URI of iLO.
type: str
username:
description:
- User for authentication with iLO.
type: str
password:
description:
- Password for authentication with iLO.
type: str
auth_token:
description:
- Security token for authentication with iLO.
type: str
timeout:
description:
- Timeout in seconds for URL requests to iLO.
default: 10
type: int
author:
- "<NAME> (@bhavya06)"
'''
EXAMPLES = '''
- name: Get iLO Sessions
community.general.ilo_redfish_info:
category: Sessions
command: GetiLOSessions
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
register: result_sessions
'''
RETURN = '''
ilo_redfish_info:
description: Returns iLO sessions.
type: dict
contains:
GetiLOSessions:
description: Returns the iLO session msg and whether the function executed successfully.
type: dict
contains:
ret:
description: Check variable to see if the information was succesfully retrived.
type: bool
msg:
description: Information of all active iLO sessions.
type: list
elements: dict
contains:
Description:
description: Provides a description of the resource.
type: str
Id:
description: The sessionId.
type: str
Name:
description: The name of the resource.
type: str
UserName:
description: Name to use to log in to the management processor.
type: str
returned: always
'''
CATEGORY_COMMANDS_ALL = {
"Sessions": ["GetiLOSessions"]
}
CATEGORY_COMMANDS_DEFAULT = {
"Sessions": "GetiLOSessions"
}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils
def main():
result = {}
category_list = []
module = AnsibleModule(
argument_spec=dict(
category=dict(required=True, type='list', elements='str'),
command=dict(required=True, type='list', elements='str'),
baseuri=dict(required=True),
username=dict(),
password=dict(no_log=True),
auth_token=dict(no_log=True),
timeout=dict(type='int', default=10)
),
required_together=[
('username', 'password'),
],
required_one_of=[
('username', 'auth_token'),
],
mutually_exclusive=[
('username', 'auth_token'),
],
supports_check_mode=True
)
creds = {"user": module.params['username'],
"pswd": module.params['password'],
"token": module.params['auth_token']}
timeout = module.params['timeout']
root_uri = "https://" + module.params['baseuri']
rf_utils = iLORedfishUtils(creds, root_uri, timeout, module)
# Build Category list
if "all" in module.params['category']:
for entry in CATEGORY_COMMANDS_ALL:
category_list.append(entry)
else:
# one or more categories specified
category_list = module.params['category']
for category in category_list:
command_list = []
# Build Command list for each Category
if category in CATEGORY_COMMANDS_ALL:
if not module.params['command']:
# True if we don't specify a command --> use default
command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
elif "all" in module.params['command']:
for entry in CATEGORY_COMMANDS_ALL[category]:
command_list.append(entry)
# one or more commands
else:
command_list = module.params['command']
# Verify that all commands are valid
for cmd in command_list:
# Fail if even one command given is invalid
if cmd not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg="Invalid Command: %s" % cmd)
else:
# Fail if even one category given is invalid
module.fail_json(msg="Invalid Category: %s" % category)
# Organize by Categories / Commands
if category == "Sessions":
for command in command_list:
if command == "GetiLOSessions":
result[command] = rf_utils.get_ilo_sessions()
module.exit_json(ilo_redfish_info=result)
if __name__ == '__main__':
main()
| [
"ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils.iLORedfishUtils"
] | [((4197, 4246), 'ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils.iLORedfishUtils', 'iLORedfishUtils', (['creds', 'root_uri', 'timeout', 'module'], {}), '(creds, root_uri, timeout, module)\n', (4212, 4246), False, 'from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils\n')] |
import torch
import torch.nn.functional as F
from torch import nn
from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
accuracy, get_world_size, interpolate,
is_dist_avail_and_initialized)
from .backbone import build_backbone
from .matcher import build_matcher_crowd
import numpy as np
import time
# the network frmawork of the regression branch
class RegressionModel(nn.Module):
def __init__(self, num_features_in, num_anchor_points=4, feature_size=256):
super(RegressionModel, self).__init__()
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchor_points * 2, kernel_size=3, padding=1)
# sub-branch forward
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.output(out)
out = out.permute(0, 2, 3, 1)
return out.contiguous().view(out.shape[0], -1, 2)
# the network frmawork of the classification branch
class ClassificationModel(nn.Module):
def __init__(self, num_features_in, num_anchor_points=4, num_classes=80, prior=0.01, feature_size=256):
super(ClassificationModel, self).__init__()
self.num_classes = num_classes
self.num_anchor_points = num_anchor_points
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchor_points * num_classes, kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
# sub-branch forward
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.output(out)
out1 = out.permute(0, 2, 3, 1)
batch_size, width, height, _ = out1.shape
out2 = out1.view(batch_size, width, height, self.num_anchor_points, self.num_classes)
return out2.contiguous().view(x.shape[0], -1, self.num_classes)
# generate the reference points in grid layout
def generate_anchor_points(stride=16, row=3, line=3):
row_step = stride / row
line_step = stride / line
shift_x = (np.arange(1, line + 1) - 0.5) * line_step - stride / 2
shift_y = (np.arange(1, row + 1) - 0.5) * row_step - stride / 2
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
anchor_points = np.vstack((
shift_x.ravel(), shift_y.ravel()
)).transpose()
return anchor_points
# shift the meta-anchor to get an acnhor points
def shift(shape, stride, anchor_points):
shift_x = (np.arange(0, shape[1]) + 0.5) * stride
shift_y = (np.arange(0, shape[0]) + 0.5) * stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((
shift_x.ravel(), shift_y.ravel()
)).transpose()
A = anchor_points.shape[0]
K = shifts.shape[0]
all_anchor_points = (anchor_points.reshape((1, A, 2)) + shifts.reshape((1, K, 2)).transpose((1, 0, 2)))
all_anchor_points = all_anchor_points.reshape((K * A, 2))
return all_anchor_points
# this class generate all reference points on all pyramid levels
class AnchorPoints(nn.Module):
def __init__(self, pyramid_levels=None, strides=None, row=3, line=3):
super(AnchorPoints, self).__init__()
if pyramid_levels is None:
self.pyramid_levels = [3, 4, 5, 6, 7]
else:
self.pyramid_levels = pyramid_levels
if strides is None:
self.strides = [2 ** x for x in self.pyramid_levels]
self.row = row
self.line = line
def forward(self, image):
image_shape = image.shape[2:]
image_shape = np.array(image_shape)
image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in self.pyramid_levels]
all_anchor_points = np.zeros((0, 2)).astype(np.float32)
# get reference points for each level
for idx, p in enumerate(self.pyramid_levels):
anchor_points = generate_anchor_points(2**p, row=self.row, line=self.line)
shifted_anchor_points = shift(image_shapes[idx], self.strides[idx], anchor_points)
all_anchor_points = np.append(all_anchor_points, shifted_anchor_points, axis=0)
all_anchor_points = np.expand_dims(all_anchor_points, axis=0)
# send reference points to device
if torch.cuda.is_available():
return torch.from_numpy(all_anchor_points.astype(np.float32)).cuda()
else:
return torch.from_numpy(all_anchor_points.astype(np.float32))
class Decoder(nn.Module):
def __init__(self, C3_size, C4_size, C5_size, feature_size=256):
super(Decoder, self).__init__()
# upsample C5 to get P5 from the FPN paper
self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P5 elementwise to C4
self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P4 elementwise to C3
self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P3_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
def forward(self, inputs):
C3, C4, C5 = inputs
P5_x = self.P5_1(C5)
P5_upsampled_x = self.P5_upsampled(P5_x)
P5_x = self.P5_2(P5_x)
P4_x = self.P4_1(C4)
P4_x = P5_upsampled_x + P4_x
P4_upsampled_x = self.P4_upsampled(P4_x)
P4_x = self.P4_2(P4_x)
P3_x = self.P3_1(C3)
P3_x = P3_x + P4_upsampled_x
P3_x = self.P3_2(P3_x)
return [P3_x, P4_x, P5_x]
# the defenition of the P2PNet model
class P2PNet(nn.Module):
def __init__(self, backbone, row=2, line=2):
super().__init__()
self.backbone = backbone
self.num_classes = 2
# the number of all anchor points
num_anchor_points = row * line
self.regression = RegressionModel(num_features_in=256, num_anchor_points=num_anchor_points)
self.classification = ClassificationModel(num_features_in=256, \
num_classes=self.num_classes, \
num_anchor_points=num_anchor_points)
self.anchor_points = AnchorPoints(pyramid_levels=[3,], row=row, line=line)
self.fpn = Decoder(256, 512, 512)
def forward(self, samples: NestedTensor):
# get the backbone features
features = self.backbone(samples)
# forward the feature pyramid
features_fpn = self.fpn([features[1], features[2], features[3]])
batch_size = features[0].shape[0]
# run the regression and classification branch
regression = self.regression(features_fpn[1]) * 100 # 8x
classification = self.classification(features_fpn[1])
anchor_points = self.anchor_points(samples).repeat(batch_size, 1, 1)
# decode the points as prediction
output_coord = regression + anchor_points
output_class = classification
out = {'pred_logits': output_class, 'pred_points': output_coord}
return out
class SetCriterion_Crowd(nn.Module):
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[0] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def loss_labels(self, outputs, targets, indices, num_points):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], 0,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
return losses
def loss_points(self, outputs, targets, indices, num_points):
assert 'pred_points' in outputs
idx = self._get_src_permutation_idx(indices)
src_points = outputs['pred_points'][idx]
target_points = torch.cat([t['point'][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.mse_loss(src_points, target_points, reduction='none')
losses = {}
losses['loss_point'] = loss_bbox.sum() / num_points
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_points, **kwargs):
loss_map = {
'labels': self.loss_labels,
'points': self.loss_points,
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_points, **kwargs)
def forward(self, outputs, targets):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
output1 = {'pred_logits': outputs['pred_logits'], 'pred_points': outputs['pred_points']}
indices1 = self.matcher(output1, targets)
num_points = sum(len(t["labels"]) for t in targets)
num_points = torch.as_tensor([num_points], dtype=torch.float, device=next(iter(output1.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_points)
num_boxes = torch.clamp(num_points / get_world_size(), min=1).item()
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, output1, targets, indices1, num_boxes))
return losses
# create the P2PNet model
def build(args, training):
# treats persons as a single class
num_classes = 1
backbone = build_backbone(args)
model = P2PNet(backbone, args.row, args.line)
if not training:
return model
weight_dict = {'loss_ce': 1, 'loss_points': args.point_loss_coef}
losses = ['labels', 'points']
matcher = build_matcher_crowd(args)
criterion = SetCriterion_Crowd(num_classes, \
matcher=matcher, weight_dict=weight_dict, \
eos_coef=args.eos_coef, losses=losses)
return model, criterion | [
"torch.nn.ReLU",
"torch.full_like",
"numpy.array",
"torch.cuda.is_available",
"numpy.arange",
"torch.nn.Sigmoid",
"util.misc.get_world_size",
"numpy.meshgrid",
"torch.nn.functional.mse_loss",
"util.misc.is_dist_avail_and_initialized",
"torch.distributed.all_reduce",
"torch.nn.Upsample",
"tor... | [((3171, 3200), 'numpy.meshgrid', 'np.meshgrid', (['shift_x', 'shift_y'], {}), '(shift_x, shift_y)\n', (3182, 3200), True, 'import numpy as np\n'), ((3541, 3570), 'numpy.meshgrid', 'np.meshgrid', (['shift_x', 'shift_y'], {}), '(shift_x, shift_y)\n', (3552, 3570), True, 'import numpy as np\n'), ((596, 662), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_features_in', 'feature_size'], {'kernel_size': '(3)', 'padding': '(1)'}), '(num_features_in, feature_size, kernel_size=3, padding=1)\n', (605, 662), False, 'from torch import nn\n'), ((683, 692), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (690, 692), False, 'from torch import nn\n'), ((715, 778), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, padding=1)\n', (724, 778), False, 'from torch import nn\n'), ((799, 808), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (806, 808), False, 'from torch import nn\n'), ((831, 894), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, padding=1)\n', (840, 894), False, 'from torch import nn\n'), ((915, 924), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (922, 924), False, 'from torch import nn\n'), ((947, 1010), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, padding=1)\n', (956, 1010), False, 'from torch import nn\n'), ((1031, 1040), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1038, 1040), False, 'from torch import nn\n'), ((1064, 1136), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', '(num_anchor_points * 2)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feature_size, num_anchor_points * 2, kernel_size=3, padding=1)\n', (1073, 1136), False, 'from torch import nn\n'), ((1799, 1865), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_features_in', 'feature_size'], {'kernel_size': '(3)', 'padding': '(1)'}), '(num_features_in, feature_size, kernel_size=3, padding=1)\n', (1808, 1865), False, 'from torch import nn\n'), ((1886, 1895), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1893, 1895), False, 'from torch import nn\n'), ((1918, 1981), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, padding=1)\n', (1927, 1981), False, 'from torch import nn\n'), ((2002, 2011), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2009, 2011), False, 'from torch import nn\n'), ((2034, 2097), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, padding=1)\n', (2043, 2097), False, 'from torch import nn\n'), ((2118, 2127), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2125, 2127), False, 'from torch import nn\n'), ((2150, 2213), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, padding=1)\n', (2159, 2213), False, 'from torch import nn\n'), ((2234, 2243), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2241, 2243), False, 'from torch import nn\n'), ((2267, 2353), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', '(num_anchor_points * num_classes)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feature_size, num_anchor_points * num_classes, kernel_size=3,\n padding=1)\n', (2276, 2353), False, 'from torch import nn\n'), ((2376, 2388), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2386, 2388), False, 'from torch import nn\n'), ((4512, 4533), 'numpy.array', 'np.array', (['image_shape'], {}), '(image_shape)\n', (4520, 4533), True, 'import numpy as np\n'), ((5095, 5136), 'numpy.expand_dims', 'np.expand_dims', (['all_anchor_points'], {'axis': '(0)'}), '(all_anchor_points, axis=0)\n', (5109, 5136), True, 'import numpy as np\n'), ((5190, 5215), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5213, 5215), False, 'import torch\n'), ((5594, 5662), 'torch.nn.Conv2d', 'nn.Conv2d', (['C5_size', 'feature_size'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(C5_size, feature_size, kernel_size=1, stride=1, padding=0)\n', (5603, 5662), False, 'from torch import nn\n'), ((5691, 5734), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (5702, 5734), False, 'from torch import nn\n'), ((5755, 5828), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, stride=1, padding=1)\n', (5764, 5828), False, 'from torch import nn\n'), ((5885, 5953), 'torch.nn.Conv2d', 'nn.Conv2d', (['C4_size', 'feature_size'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(C4_size, feature_size, kernel_size=1, stride=1, padding=0)\n', (5894, 5953), False, 'from torch import nn\n'), ((5982, 6025), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (5993, 6025), False, 'from torch import nn\n'), ((6046, 6119), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, stride=1, padding=1)\n', (6055, 6119), False, 'from torch import nn\n'), ((6176, 6244), 'torch.nn.Conv2d', 'nn.Conv2d', (['C3_size', 'feature_size'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(C3_size, feature_size, kernel_size=1, stride=1, padding=0)\n', (6185, 6244), False, 'from torch import nn\n'), ((6273, 6316), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (6284, 6316), False, 'from torch import nn\n'), ((6337, 6410), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, stride=1, padding=1)\n', (6346, 6410), False, 'from torch import nn\n'), ((9249, 9281), 'torch.ones', 'torch.ones', (['(self.num_classes + 1)'], {}), '(self.num_classes + 1)\n', (9259, 9281), False, 'import torch\n'), ((9851, 9936), 'torch.full', 'torch.full', (['src_logits.shape[:2]', '(0)'], {'dtype': 'torch.int64', 'device': 'src_logits.device'}), '(src_logits.shape[:2], 0, dtype=torch.int64, device=src_logits.device\n )\n', (9861, 9936), False, 'import torch\n'), ((10502, 10557), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['src_points', 'target_points'], {'reduction': '"""none"""'}), "(src_points, target_points, reduction='none')\n", (10512, 10557), True, 'import torch.nn.functional as F\n'), ((10873, 10911), 'torch.cat', 'torch.cat', (['[src for src, _ in indices]'], {}), '([src for src, _ in indices])\n', (10882, 10911), False, 'import torch\n'), ((11155, 11193), 'torch.cat', 'torch.cat', (['[tgt for _, tgt in indices]'], {}), '([tgt for _, tgt in indices])\n', (11164, 11193), False, 'import torch\n'), ((12309, 12340), 'util.misc.is_dist_avail_and_initialized', 'is_dist_avail_and_initialized', ([], {}), '()\n', (12338, 12340), False, 'from util.misc import NestedTensor, nested_tensor_from_tensor_list, accuracy, get_world_size, interpolate, is_dist_avail_and_initialized\n'), ((3424, 3446), 'numpy.arange', 'np.arange', (['(0)', 'shape[1]'], {}), '(0, shape[1])\n', (3433, 3446), True, 'import numpy as np\n'), ((3478, 3500), 'numpy.arange', 'np.arange', (['(0)', 'shape[0]'], {}), '(0, shape[0])\n', (3487, 3500), True, 'import numpy as np\n'), ((5006, 5065), 'numpy.append', 'np.append', (['all_anchor_points', 'shifted_anchor_points'], {'axis': '(0)'}), '(all_anchor_points, shifted_anchor_points, axis=0)\n', (5015, 5065), True, 'import numpy as np\n'), ((12354, 12394), 'torch.distributed.all_reduce', 'torch.distributed.all_reduce', (['num_points'], {}), '(num_points)\n', (12382, 12394), False, 'import torch\n'), ((3024, 3046), 'numpy.arange', 'np.arange', (['(1)', '(line + 1)'], {}), '(1, line + 1)\n', (3033, 3046), True, 'import numpy as np\n'), ((3094, 3115), 'numpy.arange', 'np.arange', (['(1)', '(row + 1)'], {}), '(1, row + 1)\n', (3103, 3115), True, 'import numpy as np\n'), ((4656, 4672), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {}), '((0, 2))\n', (4664, 4672), True, 'import numpy as np\n'), ((10791, 10814), 'torch.full_like', 'torch.full_like', (['src', 'i'], {}), '(src, i)\n', (10806, 10814), False, 'import torch\n'), ((11073, 11096), 'torch.full_like', 'torch.full_like', (['tgt', 'i'], {}), '(tgt, i)\n', (11088, 11096), False, 'import torch\n'), ((12440, 12456), 'util.misc.get_world_size', 'get_world_size', ([], {}), '()\n', (12454, 12456), False, 'from util.misc import NestedTensor, nested_tensor_from_tensor_list, accuracy, get_world_size, interpolate, is_dist_avail_and_initialized\n')] |
import datetime
from data_sqlalchemy.modelbase import SqlAlchemyBase
import sqlalchemy as sa
class Word(SqlAlchemyBase):
__tablename__ = "words"
# id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
id = sa.Column(sa.String, primary_key=True)
created_date = sa.Column(sa.DateTime, default=datetime.datetime.now, index=True)
hint = sa.String() # not supported by sqlite: nullable=True
def __repr__(self): # for more useful debug messages
return f"<Package {self.id}>"
| [
"sqlalchemy.String",
"sqlalchemy.Column"
] | [((232, 270), 'sqlalchemy.Column', 'sa.Column', (['sa.String'], {'primary_key': '(True)'}), '(sa.String, primary_key=True)\n', (241, 270), True, 'import sqlalchemy as sa\n'), ((290, 355), 'sqlalchemy.Column', 'sa.Column', (['sa.DateTime'], {'default': 'datetime.datetime.now', 'index': '(True)'}), '(sa.DateTime, default=datetime.datetime.now, index=True)\n', (299, 355), True, 'import sqlalchemy as sa\n'), ((367, 378), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (376, 378), True, 'import sqlalchemy as sa\n')] |
import bpy
from bpy.props import *
from ...preferences import get_pref
def update_node(self, context):
try:
self.node.node_dict[self.name] = self.value
# update node tree
self.node.update_parms()
except Exception as e:
print(e)
class RenderNodeSocketInterface(bpy.types.NodeSocketInterface):
bl_socket_idname = 'RenderNodeSocket'
def draw(self, context, layout):
pass
def draw_color(self, context):
return (0, 1, 1, 1)
class RenderNodeSocket(bpy.types.NodeSocket):
bl_idname = 'RenderNodeSocket'
bl_label = 'RenderNodeSocket'
text: StringProperty(default='custom text')
value: IntProperty(default=0, update=update_node)
def draw(self, context, layout, node, text):
row = layout.row(align=1)
if self.is_linked:
row.label(text=self.text)
else:
row.prop(self, 'value', text=self.text)
def draw_color(self, context, node):
return 0.5, 0.5, 0.5, 1
class RenderNodeSocketBool(RenderNodeSocket):
bl_idname = 'RenderNodeSocketBool'
bl_label = 'RenderNodeSocketBool'
value: BoolProperty(default=False, update=update_node)
def draw_color(self, context, node):
return 0.9, 0.7, 1.0, 1
class RenderNodeSocketInt(RenderNodeSocket):
bl_idname = 'RenderNodeSocketInt'
bl_label = 'RenderNodeSocketInt'
value: IntProperty(default=0, update=update_node)
def draw_color(self, context, node):
return 0, 0.9, 0.1, 1
class RenderNodeSocketFloat(RenderNodeSocket):
bl_idname = 'RenderNodeSocketFloat'
bl_label = 'RenderNodeSocketFloat'
value: FloatProperty(default=0, update=update_node)
def draw_color(self, context, node):
return 0.5, 0.5, 0.5, 1
class RenderNodeSocketString(RenderNodeSocket):
bl_idname = 'RenderNodeSocketString'
bl_label = 'RenderNodeSocketString'
value: StringProperty(default='', update=update_node)
def draw_color(self, context, node):
return 0.2, 0.7, 1.0, 1
# Vector and Subtype
####################
class RenderNodeSocketVector(RenderNodeSocket):
bl_idname = 'RenderNodeSocketVector'
bl_label = 'RenderNodeSocketVector'
value: FloatVectorProperty(name='Vector', default=(0, 0, 0), subtype='NONE',
update=update_node)
def draw_color(self, context, node):
return 0.5, 0.3, 1.0, 1
def draw(self, context, layout, node, text):
col = layout.column(align=1)
if self.is_linked:
col.label(text=self.text)
else:
col.prop(self, 'value', text=self.text)
class RenderNodeSocketXYZ(RenderNodeSocketVector):
bl_idname = 'RenderNodeSocketXYZ'
bl_label = 'RenderNodeSocketXYZ'
value: FloatVectorProperty(name='Vector', default=(1.0, 1.0, 1.0), subtype='XYZ',
update=update_node)
class RenderNodeSocketTranslation(RenderNodeSocketVector):
bl_idname = 'RenderNodeSocketTranslation'
bl_label = 'RenderNodeSocketTranslation'
value: FloatVectorProperty(name='Vector', default=(0, 0, 0), subtype='TRANSLATION',
update=update_node)
class RenderNodeSocketEuler(RenderNodeSocketVector):
bl_idname = 'RenderNodeSocketEuler'
bl_label = 'RenderNodeSocketEuler'
value: FloatVectorProperty(name='Vector', default=(0, 0, 0), subtype='EULER',
update=update_node)
class RenderNodeSocketColor(RenderNodeSocketVector):
bl_idname = 'RenderNodeSocketColor'
bl_label = 'RenderNodeSocketColor'
value: FloatVectorProperty(update=update_node, subtype='COLOR',
default=(1.0, 1.0, 1.0),
min=0.0, max=1.0)
def draw_color(self, context, node):
return 0.9, 0.9, 0.3, 1
# Object and subtype
##################
class RenderNodeSocketObject(RenderNodeSocket):
bl_idname = 'RenderNodeSocketObject'
bl_label = 'RenderNodeSocketObject'
value: PointerProperty(type=bpy.types.Object, update=update_node)
def draw(self, context, layout, node, text):
row = layout.row(align=1)
if self.is_linked:
row.label(text=self.text)
else:
row.prop(self, 'value', text=self.text)
if self.value:
row.operator('rsn.select_object', icon='RESTRICT_SELECT_OFF', text='').name = self.value.name
def draw_color(self, context, node):
return 1, 0.6, 0.3, 1
def poll_camera(self, object):
return object.type == 'CAMERA'
class RenderNodeSocketCamera(RenderNodeSocket):
bl_idname = 'RenderNodeSocketCamera'
bl_label = 'RenderNodeSocketCamera'
value: PointerProperty(type=bpy.types.Object, update=update_node, poll=poll_camera)
def draw(self, context, layout, node, text):
row = layout.row(align=1)
if self.is_linked:
row.label(text=self.text)
else:
row.prop(self, 'value', text='')
if self.value:
row.operator('rsn.select_object', icon='RESTRICT_SELECT_OFF', text='').name = self.value.name
def draw_color(self, context, node):
return 1, 0.6, 0.3, 1
# other pointer property
###############
class RenderNodeSocketMaterial(RenderNodeSocket):
bl_idname = 'RenderNodeSocketMaterial'
bl_label = 'RenderNodeSocketMaterial'
value: PointerProperty(type=bpy.types.Material, update=update_node)
def draw_color(self, context, node):
return 1, 0.4, 0.4, 1
class RenderNodeSocketWorld(RenderNodeSocket):
bl_idname = 'RenderNodeSocketWorld'
bl_label = 'RenderNodeSocketWorld'
value: PointerProperty(type=bpy.types.World, update=update_node)
def draw_color(self, context, node):
return 1, 0.4, 0.4, 1
class RenderNodeSocketViewLayer(RenderNodeSocket):
bl_idname = 'RenderNodeSocketViewLayer'
bl_label = 'RenderNodeSocketViewLayer'
value: StringProperty(update=update_node)
def draw(self, context, layout, node, text):
row = layout.row(align=1)
if self.is_linked:
row.label(text=self.text)
else:
row.prop_search(self, "value", context.scene, "view_layers", text='')
def draw_color(self, context, node):
return 0.2, 0.7, 1.0, 1
### old types ###
#################
class RSNodeSocketTaskSettings(bpy.types.NodeSocket):
bl_idname = 'RSNodeSocketTaskSettings'
bl_label = 'RSNodeSocketTaskSettings'
def draw(self, context, layout, node, text):
if not self.is_linked:
io = layout.operator('rsn.search_and_link', text=text, icon='ADD')
io.node_name = node.name
if self.is_output:
io.output_id = int(self.path_from_id()[-2:-1])
io.input_id = 666
else:
io.input_id = int(self.path_from_id()[-2:-1])
io.output_id = 666
else:
layout.label(text=text)
def draw_color(self, context, node):
return 0.6, 0.6, 0.6, 1.0
class RSNodeSocketCamera(bpy.types.NodeSocket):
bl_idname = 'RSNodeSocketCamera'
bl_label = 'RSNodeSocketCamera'
def draw(self, context, layout, node, text):
layout.label(text=text)
def draw_color(self, context, node):
return 0.6, 0.6, 0.6, 1.0
class RSNodeSocketRenderSettings(bpy.types.NodeSocket):
bl_idname = 'RSNodeSocketRenderSettings'
bl_label = 'RSNodeSocketRenderSettings'
def draw(self, context, layout, node, text):
layout.label(text=text)
def draw_color(self, context, node):
return 0, 1, 0.5, 1.0
class RSNodeSocketOutputSettings(bpy.types.NodeSocket):
bl_idname = 'RSNodeSocketOutputSettings'
bl_label = 'RSNod eSocketOutputSettings'
def draw(self, context, layout, node, text):
layout.label(text=text)
def draw_color(self, context, node):
return 1, 0.8, 0.2, 1.0
class RSNodeSocketRenderList(bpy.types.NodeSocket):
bl_idname = 'RSNodeSocketRenderList'
bl_label = 'RSNodeSocketRenderList'
def draw(self, context, layout, node, text):
layout.label(text=text)
def draw_color(self, context, node):
return 0.95, 0.95, 0.95, 1.0
classes = (
RSNodeSocketCamera,
RSNodeSocketRenderSettings,
RSNodeSocketOutputSettings,
RSNodeSocketTaskSettings,
RSNodeSocketRenderList,
# new
RenderNodeSocketInterface,
RenderNodeSocket,
RenderNodeSocketObject,
RenderNodeSocketCamera,
RenderNodeSocketMaterial,
RenderNodeSocketWorld,
RenderNodeSocketViewLayer,
RenderNodeSocketBool,
RenderNodeSocketInt,
RenderNodeSocketFloat,
RenderNodeSocketString,
RenderNodeSocketVector,
RenderNodeSocketXYZ,
RenderNodeSocketTranslation,
RenderNodeSocketEuler,
RenderNodeSocketColor,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
| [
"bpy.utils.unregister_class",
"bpy.utils.register_class"
] | [((8909, 8938), 'bpy.utils.register_class', 'bpy.utils.register_class', (['cls'], {}), '(cls)\n', (8933, 8938), False, 'import bpy\n'), ((8991, 9022), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['cls'], {}), '(cls)\n', (9017, 9022), False, 'import bpy\n')] |
from keras.models import load_model
import numpy as np
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
from sklearn.cluster import KMeans
from time import time
# Takes a pandas dataframe containing the cluster assignment and ground truth for each data point
# and returns the purity of the cluster results
def clustering_purity(cluster_results, index_to_name):
clusters = cluster_results['cluster'].unique()
m = cluster_results.shape[0]
# Purity for each cluster
cluster_purities = []
cluster_sizes = []
most_common_classes = []
for j in clusters:
cluster_j = cluster_results[cluster_results['cluster'] == j]
m_j = cluster_j.shape[0]
cluster_sizes.append(m_j)
classes = cluster_j['class'].unique()
# Class probability distribution for this cluster
class_probabilities = []
for i in classes:
cluster_j_class_i = cluster_j[cluster_j['class'] == i]
m_ij = cluster_j_class_i.shape[0]
class_probabilities.append(m_ij / m_j)
# Calculate cluster purity
cluster_purity = np.max(np.array(class_probabilities))
cluster_purities.append(cluster_purity)
# Save most common class per cluster
most_common_classes.append(index_to_name[class_probabilities.index(cluster_purity)])
total_purity = 0
for i, size in enumerate(cluster_sizes):
total_purity += (size / m) * cluster_purities[i]
# Pandas dataframe containing per cluster results
results_table = pd.DataFrame({'cluster': clusters,
'cluster_size': cluster_sizes,
'most_common_class': most_common_classes,
'purity': cluster_purities,
'total_purity': total_purity})
return total_purity, results_table
# Takes a pandas dataframe containing the cluster assignment and ground truth for each data point
# and returns the entropy of the cluster results
def clustering_entropy(cluster_results, index_to_name):
clusters = cluster_results['cluster'].unique()
m = cluster_results.shape[0]
# Entropy for each cluster
cluster_entropies = []
cluster_sizes = []
most_common_classes = []
for j in clusters:
cluster_j = cluster_results[cluster_results['cluster'] == j]
m_j = cluster_j.shape[0]
cluster_sizes.append(m_j)
classes = cluster_j['class'].unique()
# Class probability distribution for this cluster
class_probabilities = []
for i in classes:
cluster_j_class_i = cluster_j[cluster_j['class'] == i]
m_ij = cluster_j_class_i.shape[0]
class_probabilities.append(m_ij/m_j)
# Calculate cluster entropy
cluster_entropy = 0
for p in class_probabilities:
cluster_entropy -= p * np.log2(p)
cluster_entropies.append(cluster_entropy)
# Save most common class per cluster
most_common_classes.append(index_to_name[class_probabilities.index(np.max(np.array(class_probabilities)))])
total_entropy = 0
for i, size in enumerate(cluster_sizes):
total_entropy += (size / m) * cluster_entropies[i]
# Pandas dataframe containing per cluster results
results_table = pd.DataFrame({'cluster': clusters,
'cluster_size': cluster_sizes,
'most_common_class': most_common_classes,
'entropy': cluster_entropies,
'total_entropy': total_entropy})
return total_entropy, results_table
def main():
model_name = 'encoder_caltech256.h5'
encoder = load_model(model_name)
encode_datagen = ImageDataGenerator(rescale=1. / 255)
predict_generator = encode_datagen.flow_from_directory(
'data/256_ObjectCategories',
target_size=(128, 128),
batch_size=1,
class_mode='input', shuffle=False)
n_images = 29780
# Encode all images
encoded_imgs = encoder.predict_generator(predict_generator, n_images, verbose=1)
# Flatten encoded images to create feature vector for clustering
encoded_imgs_feature_vecs = encoded_imgs.reshape(n_images, 8 * 8 * 600)
# Perform K-means clustering on flattened feature vector
print('Starting K-means..')
t0 = time()
kmeans = KMeans(n_clusters=256, n_init=2, n_jobs=-1)
clusters = kmeans.fit_predict(encoded_imgs_feature_vecs)
duration = time() - t0
print("done in %fs" % (duration))
print()
# Prepare data for evaluation functions
cluster_results = pd.DataFrame({'cluster': clusters, 'class': predict_generator.classes})
# Save cluster results
cluster_results.to_csv(model_name[:-3] + 'cluster_results.csv', index=False)
class_index_to_name = {v: k for k, v in predict_generator.class_indices.items()}
print('Evaluating entropy..')
t0 = time()
total_entropy, entropy_per_cluster = clustering_entropy(cluster_results, index_to_name=class_index_to_name)
duration = time() - t0
print("done in %fs" % (duration))
print()
print('Evaluating purity..')
total_purity, purity_per_cluster = clustering_purity(cluster_results, index_to_name=class_index_to_name)
duration = time() - t0
print("done in %fs" % (duration))
print()
print('Entropy:')
print(str(total_entropy))
print(entropy_per_cluster.to_string())
print('\n\n\nPurity: ')
print(str(total_purity))
print(purity_per_cluster.to_string())
entropy_per_cluster.to_csv(model_name[:-3] + 'entropy_details.csv', index=False)
purity_per_cluster.to_csv(model_name[:-3] + 'purity_details.csv', index=False)
if __name__ == '__main__':
main() | [
"sklearn.cluster.KMeans",
"keras.models.load_model",
"keras.preprocessing.image.ImageDataGenerator",
"numpy.array",
"pandas.DataFrame",
"numpy.log2",
"time.time"
] | [((1556, 1730), 'pandas.DataFrame', 'pd.DataFrame', (["{'cluster': clusters, 'cluster_size': cluster_sizes, 'most_common_class':\n most_common_classes, 'purity': cluster_purities, 'total_purity':\n total_purity}"], {}), "({'cluster': clusters, 'cluster_size': cluster_sizes,\n 'most_common_class': most_common_classes, 'purity': cluster_purities,\n 'total_purity': total_purity})\n", (1568, 1730), True, 'import pandas as pd\n'), ((3346, 3524), 'pandas.DataFrame', 'pd.DataFrame', (["{'cluster': clusters, 'cluster_size': cluster_sizes, 'most_common_class':\n most_common_classes, 'entropy': cluster_entropies, 'total_entropy':\n total_entropy}"], {}), "({'cluster': clusters, 'cluster_size': cluster_sizes,\n 'most_common_class': most_common_classes, 'entropy': cluster_entropies,\n 'total_entropy': total_entropy})\n", (3358, 3524), True, 'import pandas as pd\n'), ((3765, 3787), 'keras.models.load_model', 'load_model', (['model_name'], {}), '(model_name)\n', (3775, 3787), False, 'from keras.models import load_model\n'), ((3810, 3847), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (3828, 3847), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((4422, 4428), 'time.time', 'time', ([], {}), '()\n', (4426, 4428), False, 'from time import time\n'), ((4442, 4485), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(256)', 'n_init': '(2)', 'n_jobs': '(-1)'}), '(n_clusters=256, n_init=2, n_jobs=-1)\n', (4448, 4485), False, 'from sklearn.cluster import KMeans\n'), ((4691, 4762), 'pandas.DataFrame', 'pd.DataFrame', (["{'cluster': clusters, 'class': predict_generator.classes}"], {}), "({'cluster': clusters, 'class': predict_generator.classes})\n", (4703, 4762), True, 'import pandas as pd\n'), ((5002, 5008), 'time.time', 'time', ([], {}), '()\n', (5006, 5008), False, 'from time import time\n'), ((4562, 4568), 'time.time', 'time', ([], {}), '()\n', (4566, 4568), False, 'from time import time\n'), ((5136, 5142), 'time.time', 'time', ([], {}), '()\n', (5140, 5142), False, 'from time import time\n'), ((5356, 5362), 'time.time', 'time', ([], {}), '()\n', (5360, 5362), False, 'from time import time\n'), ((1139, 1168), 'numpy.array', 'np.array', (['class_probabilities'], {}), '(class_probabilities)\n', (1147, 1168), True, 'import numpy as np\n'), ((2921, 2931), 'numpy.log2', 'np.log2', (['p'], {}), '(p)\n', (2928, 2931), True, 'import numpy as np\n'), ((3110, 3139), 'numpy.array', 'np.array', (['class_probabilities'], {}), '(class_probabilities)\n', (3118, 3139), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""Evohome RF - Opentherm processor."""
import logging
import struct
from typing import Any
from .const import __dev_mode__
DEV_MODE = __dev_mode__
_LOGGER = logging.getLogger(__name__)
if DEV_MODE:
_LOGGER.setLevel(logging.DEBUG)
# Data structure shamelessy copied, with thanks to @nlrb, from:
# github.com/nlrb/com.tclcode.otgw (node_modules/otg-api/lib/ot_msg.js),
# Other code shamelessy copied, with thanks to @mvn23, from:
# github.com/mvn23/pyotgw (pyotgw/protocol.py),
READ_WRITE = "RW"
READ_ONLY = "R-"
WRITE_ONLY = "-W"
EN = "en"
FLAGS = "flags"
DIR = "dir"
NL = "nl"
SENSOR = "sensor"
VAL = "val"
VAR = "var"
FLAG8 = "flag8"
FLAG = "flag"
U8 = "u8"
S8 = "s8"
F8_8 = "f8.8"
U16 = "u16"
S16 = "s16"
HB = "hb"
LB = "lb"
VALUE = "value"
COUNTER = "counter"
HUMIDITY = "humidity"
PERCENTAGE = "percentage"
PRESSURE = "pressure"
TEMPERATURE = "temperature"
OPENTHERM_MSG_TYPE = {
0b000: "Read-Data",
0b001: "Write-Data",
0b010: "Invalid-Data",
0b011: "-reserved-",
0b100: "Read-Ack",
0b101: "Write-Ack",
0b110: "Data-Invalid",
0b111: "Unknown-DataId",
}
# These must have either a FLAGS (preferred) or a VAR for their message name
OPENTHERM_MESSAGES = {
# OpenTherm status flags [ID 0: Master status (HB) & Slave status (LB)]
"status_flags": {
"0x0100": {
EN: "Central heating enable",
NL: "Centrale verwarming aan",
VAR: "StatusCHEnabled",
},
"0x0200": {
EN: "DHW enable",
NL: "Tapwater aan",
VAR: "StatusDHWEnabled",
},
"0x0400": {
EN: "Cooling enable",
NL: "Koeling aan",
VAR: "StatusCoolEnabled",
},
"0x0800": {
EN: "Outside temp. comp. active",
NL: "Compenseren buitentemp.",
VAR: "StatusOTCActive",
},
"0x1000": {
EN: "Central heating 2 enable",
NL: "Centrale verwarming 2 aan",
VAR: "StatusCH2Enabled",
},
"0x2000": {
EN: "Summer/winter mode",
NL: "Zomer/winter mode",
VAR: "StatusSummerWinter",
},
"0x4000": {
EN: "DHW blocking",
NL: "Tapwater blokkade",
VAR: "StatusDHWBlocked",
},
"0x0001": {
EN: "Fault indication",
NL: "Fout indicatie",
VAR: "StatusFault",
}, # no fault/fault
"0x0002": {
EN: "Central heating mode",
NL: "Centrale verwarming mode",
VAR: "StatusCHMode",
}, # not active/active
"0x0004": {
EN: "DHW mode",
NL: "Tapwater mode",
VAR: "StatusDHWMode",
}, # not active/active
"0x0008": {
EN: "Flame status",
NL: "Vlam status",
VAR: "StatusFlame",
}, # flame off/on
"0x0010": {
EN: "Cooling status",
NL: "Status koelen",
VAR: "StatusCooling",
}, # not active/active
"0x0020": {
EN: "Central heating 2 mode",
NL: "Centrale verwarming 2 mode",
VAR: "StatusCH2Mode",
}, # not active/active
"0x0040": {
EN: "Diagnostic indication",
NL: "Diagnose indicatie",
VAR: "StatusDiagnostic",
}, # no diagnostics/diagnostics event
},
# OpenTherm Master configuration flags [ID 2: master config flags (HB)]
"Master_config_flags": {
"0x0100": {
EN: "Smart Power",
VAR: "ConfigSmartPower",
},
},
# OpenTherm Slave configuration flags [ID 3: slave config flags (HB)]
"Slave_Config_flags": {
"0x0100": {
EN: "DHW present",
VAR: "ConfigDHWpresent",
},
"0x0200": {
EN: "Control type (modulating on/off)",
VAR: "ConfigControlType",
},
"0x0400": {
EN: "Cooling supported",
VAR: "ConfigCooling",
},
"0x0800": {
EN: "DHW storage tank",
VAR: "ConfigDHW",
},
"0x1000": {
EN: "Master low-off & pump control allowed",
VAR: "ConfigMasterPump",
},
"0x2000": {
EN: "Central heating 2 present",
VAR: "ConfigCH2",
},
},
# OpenTherm fault flags [ID 5: Application-specific fault flags (HB)]
"fault_flags": {
"0x0100": {
EN: "Service request",
NL: "Onderhoudsvraag",
VAR: "FaultServiceRequest",
},
"0x0200": {
EN: "Lockout-reset",
NL: "Geen reset op afstand",
VAR: "FaultLockoutReset",
},
"0x0400": {
EN: "Low water pressure",
NL: "Waterdruk te laag",
VAR: "FaultLowWaterPressure",
},
"0x0800": {
EN: "Gas/flame fault",
NL: "Gas/vlam fout",
VAR: "FaultGasFlame",
},
"0x1000": {
EN: "Air pressure fault",
NL: "Luchtdruk fout",
VAR: "FaultAirPressure",
},
"0x2000": {
EN: "Water over-temperature",
NL: "Water te heet",
VAR: "FaultOverTemperature",
},
},
# OpenTherm remote flags [ID 6: Remote parameter flags (HB)]
"Remote_flags": {
"0x0100": {
EN: "DHW setpoint enable",
VAR: "RemoteDHWEnabled",
},
"0x0200": {
EN: "Max. CH setpoint enable",
VAR: "RemoteMaxCHEnabled",
},
"0x0001": {
EN: "DHW setpoint read/write",
VAR: "RemoteDHWReadWrite",
},
"0x0002": {
EN: "Max. CH setpoint read/write",
VAR: "RemoteMaxCHReadWrite",
},
},
# OpenTherm messages
"messages": {
0x00: { # 0, Status
EN: "Status",
DIR: READ_ONLY,
VAL: FLAG8,
FLAGS: "StatusFlags",
},
0x01: { # 1, Control Setpoint
EN: "Control setpoint",
NL: "Ketel doeltemperatuur",
DIR: WRITE_ONLY,
VAL: F8_8,
VAR: "ControlSetpoint",
SENSOR: TEMPERATURE,
},
0x02: { # 2, Master Member ID
EN: "Master configuration",
DIR: WRITE_ONLY,
VAL: {HB: FLAG8, LB: U8},
FLAGS: "MasterConfigFlags",
VAR: {LB: "MasterMemberId"},
},
0x03: { # 3, Slave Member ID
EN: "Slave configuration",
DIR: READ_ONLY,
VAL: {HB: FLAG8, LB: U8},
FLAGS: "SlaveConfigFlags",
VAR: {LB: "SlaveMemberId"},
},
0x04: { # 4, Remote Command
EN: "Remote command",
DIR: WRITE_ONLY,
VAL: U8,
VAR: "RemoteCommand",
},
0x05: { # 5, OEM Fault Code
EN: "Fault flags & OEM fault code",
DIR: READ_ONLY,
VAL: {HB: FLAG8, LB: U8},
VAR: {LB: "OEMFaultCode"},
FLAGS: "FaultFlags",
},
0x06: { # 6, Remote Flags
EN: "Remote parameter flags",
DIR: READ_ONLY,
VAL: FLAG8,
FLAGS: "RemoteFlags",
},
0x07: { # 7, Cooling Control Signal
EN: "Cooling control signal",
DIR: WRITE_ONLY,
VAL: F8_8,
VAR: "CoolingControlSignal",
SENSOR: PERCENTAGE,
},
0x08: { # 8, CH2 Control Setpoint
EN: "Control setpoint for 2nd CH circuit",
DIR: WRITE_ONLY,
VAL: F8_8,
VAR: "CH2ControlSetpoint",
SENSOR: TEMPERATURE,
},
0x09: { # 9, Remote Override Room Setpoint
EN: "Remote override room setpoint",
NL: "Overschreven kamer doeltemperatuur",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "RemoteOverrideRoomSetpoint",
SENSOR: TEMPERATURE,
},
0x0A: { # 10, TSP Number
EN: "Number of transparent slave parameters supported by slave",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "TSPNumber"},
},
0x0B: { # 11, TSP Entry
EN: "Index number/value of referred-to transparent slave parameter",
DIR: READ_WRITE,
VAL: U8,
VAR: {HB: "TSPIndex", LB: "TSPValue"},
},
0x0C: { # 12, FHB Size
EN: "Size of fault history buffer supported by slave",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "FHBSize"},
},
0x0D: { # 13, FHB Entry
EN: "Index number/value of referred-to fault history buffer entry",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "FHBIndex", LB: "FHBValue"},
},
0x0E: { # 14, Max Relative Modulation Level
EN: "Max. relative modulation level",
NL: "Max. relatief modulatie-niveau",
DIR: WRITE_ONLY,
VAL: F8_8,
VAR: "MaxRelativeModulationLevel",
SENSOR: PERCENTAGE,
},
0x0F: { # 15, Max Boiler Capacity & Min Modulation Level
EN: "Max. boiler capacity (kW) and modulation level setting (%)",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "MaxBoilerCapacity", LB: "MinModulationLevel"},
},
0x10: { # 16, Current Setpoint
EN: "Room setpoint",
NL: "Kamer doeltemperatuur",
DIR: WRITE_ONLY,
VAL: F8_8,
VAR: "CurrentSetpoint",
SENSOR: TEMPERATURE,
},
0x11: { # 17, Relative Modulation Level
EN: "Relative modulation level",
NL: "Relatief modulatie-niveau",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "RelativeModulationLevel",
SENSOR: PERCENTAGE,
},
0x12: { # 18, CH Water Pressure
EN: "Central heating water pressure",
NL: "Keteldruk",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "CHWaterPressure",
SENSOR: PRESSURE,
},
0x13: { # 19, DHW Flow Rate
EN: "DHW flow rate (litres/minute)",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "DHWFlowRate",
SENSOR: "flow",
},
0x14: { # 20, Day/Time
EN: "Day of week & time of day",
DIR: READ_WRITE,
VAR: "DayTime",
},
0x15: { # 21, Date
EN: "Date",
DIR: READ_WRITE,
VAL: U8,
VAR: "Date",
},
0x16: { # 22, Year
EN: "Year",
DIR: READ_WRITE,
VAL: U16,
VAR: "Year",
},
0x17: { # 23, CH2 Current Setpoint
EN: "Room setpoint for 2nd CH circuit",
DIR: WRITE_ONLY,
VAL: F8_8,
VAR: "CH2CurrentSetpoint",
SENSOR: TEMPERATURE,
},
0x18: { # 24, Current Room Temperature
EN: "Room temperature",
NL: "Kamertemperatuur",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "CurrentTemperature",
SENSOR: TEMPERATURE,
},
0x19: { # 25, Boiler Water Temperature
EN: "Boiler water temperature",
NL: "Ketelwatertemperatuur",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "BoilerWaterTemperature",
SENSOR: TEMPERATURE,
},
0x1A: { # 26, DHW Temperature
EN: "DHW temperature",
NL: "Tapwatertemperatuur",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "DHWTemperature",
SENSOR: TEMPERATURE,
},
0x1B: { # 27, Outside Temperature
EN: "Outside temperature",
NL: "Buitentemperatuur",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "OutsideTemperature",
SENSOR: TEMPERATURE,
},
0x1C: { # 28, Return Water Temperature
EN: "Return water temperature",
NL: "Retourtemperatuur",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "ReturnWaterTemperature",
SENSOR: TEMPERATURE,
},
0x1D: { # 29, Solar Storage Temperature
EN: "Solar storage temperature",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "SolarStorageTemperature",
SENSOR: TEMPERATURE,
},
0x1E: { # 30, Solar Collector Temperature
EN: "Solar collector temperature",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "SolarCollectorTemperature",
SENSOR: TEMPERATURE,
},
0x1F: { # 31, CH2 Flow Temperature
EN: "Flow temperature for 2nd CH circuit",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "CH2FlowTemperature",
SENSOR: TEMPERATURE,
},
0x20: { # 32, DHW2 Temperature
EN: "DHW 2 temperature",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "DHW2Temperature",
SENSOR: TEMPERATURE,
},
0x21: { # 33, Boiler Exhaust Temperature
EN: "Boiler exhaust temperature",
DIR: READ_ONLY,
VAL: S16,
VAR: "BoilerExhaustTemperature",
SENSOR: TEMPERATURE,
},
0x30: { # 48, DHW Boundaries
EN: "DHW setpoint boundaries",
DIR: READ_ONLY,
VAL: S8,
VAR: "DHWBoundaries",
SENSOR: TEMPERATURE,
},
0x31: { # 49, CH Boundaries
EN: "Max. central heating setpoint boundaries",
DIR: READ_ONLY,
VAL: S8,
VAR: "CHBoundaries",
SENSOR: TEMPERATURE,
},
0x32: { # 50, OTC Boundaries
EN: "OTC heat curve ratio upper & lower bounds",
DIR: READ_ONLY,
VAL: S8,
VAR: "OTCBoundaries",
},
0x38: { # 56, DHW Setpoint
EN: "DHW setpoint",
NL: "Tapwater doeltemperatuur",
DIR: READ_WRITE,
VAL: F8_8,
VAR: "DHWSetpoint",
SENSOR: TEMPERATURE,
},
0x39: { # 57, Max CH Water Setpoint
EN: "Max. central heating water setpoint",
NL: "Max. ketel doeltemperatuur",
DIR: READ_WRITE,
VAL: F8_8,
VAR: "MaxCHWaterSetpoint",
SENSOR: TEMPERATURE,
},
0x3A: { # 58, OTC Heat Curve Ratio
EN: "OTC heat curve ratio",
DIR: READ_WRITE,
VAL: F8_8,
VAR: "OTCHeatCurveRatio",
SENSOR: TEMPERATURE,
},
# OpenTherm 2.3 IDs (70-91) for ventilation/heat-recovery applications
0x46: { # 70, VH Status
EN: "Status ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: FLAG8,
VAR: "VHStatus",
},
0x47: { # 71, VH Control Setpoint
EN: "Control setpoint ventilation/heat-recovery",
DIR: WRITE_ONLY,
VAL: U8,
VAR: {HB: "VHControlSetpoint"},
},
0x48: { # 72, VH Fault Code
EN: "Fault flags/code ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: {HB: FLAG, LB: U8},
VAR: {LB: "VHFaultCode"},
},
0x49: { # 73, VH Diagnostic Code
EN: "Diagnostic code ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: U16,
VAR: "VHDiagnosticCode",
},
0x4A: { # 74, VH Member ID
EN: "Config/memberID ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: {HB: FLAG, LB: U8},
VAR: {LB: "VHMemberId"},
},
0x4B: { # 75, VH OpenTherm Version
EN: "OpenTherm version ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "VHOpenThermVersion",
},
0x4C: { # 76, VH Product Type/Version
EN: "Version & type ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "VHProductType", LB: "VHProductVersion"},
},
0x4D: { # 77, Relative Ventilation
EN: "Relative ventilation",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "RelativeVentilation"},
},
0x4E: { # 78, Relative Humidity
EN: "Relative humidity",
NL: "Luchtvochtigheid",
DIR: READ_WRITE,
VAL: U8,
VAR: {HB: "RelativeHumidity"},
SENSOR: HUMIDITY,
},
0x4F: { # 79, CO2 Level
EN: "CO2 level",
NL: "CO2 niveau",
DIR: READ_WRITE,
VAL: U16,
VAR: "CO2Level",
SENSOR: "co2",
},
0x50: { # 80, Supply Inlet Temperature
EN: "Supply inlet temperature",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "SupplyInletTemperature",
SENSOR: TEMPERATURE,
},
0x51: { # 81, Supply Outlet Temperature
EN: "Supply outlet temperature",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "SupplyOutletTemperature",
SENSOR: TEMPERATURE,
},
0x52: { # 82, Exhaust Inlet Temperature
EN: "Exhaust inlet temperature",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "ExhaustInletTemperature",
SENSOR: TEMPERATURE,
},
0x53: { # 83, Exhaust Outlet Temperature
EN: "Exhaust outlet temperature",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "ExhaustOutletTemperature",
SENSOR: TEMPERATURE,
},
0x54: { # 84, Exhaust Fan Speed
EN: "Actual exhaust fan speed",
DIR: READ_ONLY,
VAL: U16,
VAR: "ExhaustFanSpeed",
},
0x55: { # 85, Inlet Fan Speed
EN: "Actual inlet fan speed",
DIR: READ_ONLY,
VAL: U16,
VAR: "InletFanSpeed",
},
0x56: { # 86, VH Remote Parameter
EN: "Remote parameter settings ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: FLAG8,
VAR: "VHRemoteParameter",
},
0x57: { # 87, Nominal Ventilation
EN: "Nominal ventilation value",
DIR: READ_WRITE,
VAL: U8,
VAR: "NominalVentilation",
},
0x58: { # 88, VH TSP Size
EN: "TSP number ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "VHTSPSize"},
},
0x59: { # 89, VH TSP Entry
EN: "TSP entry ventilation/heat-recovery",
DIR: READ_WRITE,
VAL: U8,
VAR: {HB: "VHTSPIndex", LB: "VHTSPValue"},
},
0x5A: { # 90, VH FHB Size
EN: "Fault buffer size ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "VHFHBSize"},
},
0x5B: { # 91, VH FHB Entry
EN: "Fault buffer entry ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "VHFHBIndex", LB: "VHFHBValue"},
},
# OpenTherm 2.2 IDs
0x64: { # 100, Remote Override Function
EN: "Remote override function",
DIR: READ_ONLY,
VAL: {HB: FLAG8, LB: U8},
VAR: {HB: "RemoteOverrideFunction"},
},
0x73: { # 115, OEM Diagnostic Code
EN: "OEM diagnostic code",
DIR: READ_ONLY,
VAL: U16,
VAR: "OEMDiagnosticCode",
},
0x74: { # 116, Starts Burner
EN: "Number of starts burner",
DIR: READ_WRITE,
VAL: U16,
VAR: "StartsBurner",
SENSOR: COUNTER,
},
0x75: { # 117, Starts CH Pump
EN: "Number of starts central heating pump",
DIR: READ_WRITE,
VAL: U16,
VAR: "StartsCHPump",
SENSOR: COUNTER,
},
0x76: { # 118, Starts DHW Pump
EN: "Number of starts DHW pump/valve",
DIR: READ_WRITE,
VAL: U16,
VAR: "StartsDHWPump",
SENSOR: COUNTER,
},
0x77: { # 119, Starts Burner DHW
EN: "Number of starts burner during DHW mode",
DIR: READ_WRITE,
VAL: U16,
VAR: "StartsBurnerDHW",
SENSOR: COUNTER,
},
0x78: { # 120, Hours Burner
EN: "Number of hours burner is in operation (i.e. flame on)",
DIR: READ_WRITE,
VAL: U16,
VAR: "HoursBurner",
SENSOR: COUNTER,
},
0x79: { # 121, Hours CH Pump
EN: "Number of hours central heating pump has been running",
DIR: READ_WRITE,
VAL: U16,
VAR: "HoursCHPump",
SENSOR: COUNTER,
},
0x7A: { # 122, Hours DHW Pump
EN: "Number of hours DHW pump has been running/valve has been opened",
DIR: READ_WRITE,
VAL: U16,
VAR: "HoursDHWPump",
SENSOR: COUNTER,
},
0x7B: { # 123, Hours DHW Burner
EN: "Number of hours DHW burner is in operation during DHW mode",
DIR: READ_WRITE,
VAL: U16,
VAR: "HoursDHWBurner",
SENSOR: COUNTER,
},
0x7C: { # 124, Master OpenTherm Version
EN: "Opentherm version Master",
DIR: WRITE_ONLY,
VAL: F8_8,
VAR: "MasterOpenThermVersion",
},
0x7D: { # 125, Slave OpenTherm Version
EN: "Opentherm version Slave",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "SlaveOpenThermVersion",
},
0x7E: { # 126, Master Product Type/Version
EN: "Master product version and type",
DIR: WRITE_ONLY,
VAL: U8,
VAR: {HB: "MasterProductType", LB: "MasterProductVersion"},
},
0x7F: { # 127, Slave Product Type/Version
EN: "Slave product version and type",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "SlaveProductType", LB: "SlaveProductVersion"},
},
# ZX-DAVB extras
0x71: { # 113, Bad Starts Burner
EN: "Number of un-successful burner starts",
DIR: READ_WRITE,
VAL: U16,
VAR: "BadStartsBurner?",
SENSOR: COUNTER,
},
0x72: { # 114, Low Signals Flame
EN: "Number of times flame signal was too low",
DIR: READ_WRITE,
VAL: U16,
VAR: "LowSignalsFlame?",
SENSOR: COUNTER,
},
# https://www.domoticaforum.eu/viewtopic.php?f=70&t=10893
# 0x23: { # 35, Boiler Fan Speed (rpm/60?)?
# },
0x24: { # 36, Electrical current through burner flame (µA)
EN: "Electrical current through burner flame (µA)",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "BurnerCurrent",
},
0x25: { # 37, CH2 Room Temperature
EN: "Room temperature for 2nd CH circuit",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "CH2CurrentTemperature",
SENSOR: TEMPERATURE,
},
0x26: { # 38, Relative Humidity, c.f. 0x4E
EN: "Relative humidity",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "RelativeHumidity"}, # TODO: or LB?
SENSOR: HUMIDITY,
},
},
}
def parity(x: int) -> int:
"""Make this the docstring."""
shiftamount = 1
while x >> shiftamount:
x ^= x >> shiftamount
shiftamount <<= 1
return x & 1
def ot_msg_value(val_seqx, val_type) -> Any:
"""Make this the docstring."""
def _get_flag8(byte, *args) -> list:
"""Split a byte (as a str) into a list of 8 bits (1/0)."""
ret = [0] * 8
byte = bytes.fromhex(byte)[0]
for i in range(0, 8):
ret[i] = byte & 1
byte = byte >> 1
return ret
def _get_u8(byte, *args) -> int:
"""Convert a byte (as a str) into an unsigned int."""
return struct.unpack(">B", bytes.fromhex(byte))[0]
def _get_s8(byte, *args) -> int:
"""Convert a byte (as a str) into a signed int."""
return struct.unpack(">b", bytes.fromhex(byte))[0]
def _get_f8_8(msb, lsb) -> float:
"""Convert 2 bytes (as strs) into an OpenTherm f8_8 (float) value."""
return float(_get_s16(msb, lsb) / 256)
def _get_u16(msb, lsb) -> int:
"""Convert 2 bytes (as strs) into an unsigned int."""
buf = struct.pack(">BB", _get_u8(msb), _get_u8(lsb))
return int(struct.unpack(">H", buf)[0])
def _get_s16(msb, lsb) -> int:
"""Convert 2 bytes (as strs) into a signed int."""
buf = struct.pack(">bB", _get_s8(msb), _get_u8(lsb))
return int(struct.unpack(">h", buf)[0])
DATA_TYPES = {
FLAG8: _get_flag8,
U8: _get_u8,
S8: _get_s8,
F8_8: _get_f8_8,
U16: _get_u16,
S16: _get_s16,
}
if val_type in DATA_TYPES:
return DATA_TYPES[val_type](val_seqx[:2], val_seqx[2:])
return val_seqx
# See: https://www.opentherm.eu/request-details/?post_ids=2944
#
# ID0:HB0: Master status: CH enable
# ID0:HB1: Master status: DHW enable
# ID0:HB2: Master status: Cooling enable
# ID0:HB3: Master status: OTC active
# ID0:HB5: Master status: Summer/winter mode
# ID0:HB6: Master status: DHW blocking
# ID0:LB0: Slave Status: Fault indication
# ID0:LB1: Slave Status: CH mode
# ID0:LB2: Slave Status: DHW mode
# ID0:LB3: Slave Status: Flame status
# ID1: Control Setpoint i.e. CH water temperature Setpoint (°C)
# ID2:HB0: Master configuration: Smart power
# ID2:LB: Master MemberID Code
# ID3:HB0: Slave configuration: DHW present
# ID3:HB1: Slave configuration: Control type
# ID3:HB4: Slave configuration: Master low-off&pump control
# ID5:HB0: Service request
# ID5:HB1: Lockout-reset
# ID5:HB2: Low water pressure
# ID5:HB3: Gas/flame fault
# ID5:HB4: Air pressure fault
# ID5:HB5: Water over-temperature
# ID5:LB: OEM fault code
# ID6:HB0: Remote boiler parameter transfer-enable: DHW setpoint
# ID6:HB1: Remote boiler parameter transfer-enable: max. CH setpoint
# ID6:LB0: Remote boiler parameter read/write: DHW setpoint
# ID6:LB1: Remote boiler parameter read/write: max. CH setpoint
# ID9: Remote override room Setpoint
# ID10: Number of Transparent-Slave-Parameters supported by slave
# ID12: Size of Fault-History-Buffer supported by slave
# ID14: Maximum relative modulation level setting (%)
# ID16: Room Setpoint (°C)
# ID17: Relative Modulation Level (%)
# ID18: Water pressure in CH circuit (bar)
# ID19: Water flow rate in DHW circuit. (litres/minute)
# ID24: Room temperature (°C)
# ID25: Boiler flow water temperature (°C)
# ID26: DHW temperature (°C)
# ID27: Outside temperature (°C)
# ID28: Return water temperature (°C)
# ID48: DHW Setpoint upper & lower bounds for adjustment (°C)
# ID49: Max CH water Setpoint upper & lower bounds for adjustment (°C)
# ID56: DHW Setpoint (°C) (Remote parameter 1)
# ID57: Max CH water Setpoint (°C) (Remote parameters 2)
# ID126: Master product version number and type
# ID127: Slave product version number and type
# https://github.com/rvdbreemen/OTGW-firmware/blob/main/Specification/New%20OT%20data-ids.txt # noqa
"""
New OT Data-ID's - Found two new ID's at this device description:
http://www.opentherm.eu/product/view/18/feeling-d201-ot
ID 98: For a specific RF sensor the RF strength and battery level is written
ID 99: Operating Mode HC1, HC2/ Operating Mode DHW
Found new data-id's at this page:
https://www.opentherm.eu/request-details/?post_ids=1833
ID 109: Electricity producer starts
ID 110: Electricity producer hours
ID 111: Electricity production
ID 112: Cumulative Electricity production
Found new Data-ID's at this page:
https://www.opentherm.eu/request-details/?post_ids=1833
ID 36: {f8.8} "Electrical current through burner flame" (µA)
ID 37: {f8.8} "Room temperature for 2nd CH circuit"
ID 38: {u8 u8} "Relative Humidity"
For Data-ID's 37 and 38 I assumed their data types, for Data ID 36 I determined it by
matching qSense value with the correct data-type.
I also analysed OT Remeha qSense <-> Remeha Tzerra communication.
ID 131: {u8 u8} "Remeha dF-/dU-codes"
ID 132: {u8 u8} "Remeha Service message"
ID 133: {u8 u8} "Remeha detection connected SCU’s"
"Remeha dF-/dU-codes": Should match the dF-/dU-codes written on boiler nameplate.
Read-Data Request (0 0) returns the data. Also accepts Write-Data Requests (dF dU),
this returns the boiler to its factory defaults.
"Remeha Service message" Read-Data Request (0 0), boiler returns (0 2) in case of no
boiler service. Write-Data Request (1 255) clears the boiler service message.
boiler returns (1 1) = next service type is "A"
boiler returns (1 2) = next service type is "B"
boiler returns (1 3) = next service type is "C"
"Remeha detection connected SCU’s": Write-Data Request (255 1) enables detection of
connected SCU prints, correct response is (Write-Ack 255 1).
Other Remeha info:
ID 5: correponds with the Remeha E:xx fault codes
ID 11: correponds with the Remeha Pxx parameter codes
ID 35: reported value is fan speed in rpm/60
ID 115: correponds with the Remeha Status and Sub-status numbers, {u8 u8} data-type
"""
| [
"logging.getLogger",
"struct.unpack"
] | [((211, 238), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (228, 238), False, 'import logging\n'), ((25371, 25395), 'struct.unpack', 'struct.unpack', (['""">H"""', 'buf'], {}), "('>H', buf)\n", (25384, 25395), False, 'import struct\n'), ((25575, 25599), 'struct.unpack', 'struct.unpack', (['""">h"""', 'buf'], {}), "('>h', buf)\n", (25588, 25599), False, 'import struct\n')] |
import bokeh.io
import bokeh.plotting
import bokeh.layouts
import bokeh.palettes
import seaborn as sns
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib
def plotting_style(grid=True):
"""
Sets the style to the publication style
"""
rc = {'axes.facecolor': '#E3DCD0',
'font.family': 'Lucida Sans Unicode',
'grid.linestyle': '-',
'grid.linewidth': 0.5,
'grid.alpha': 0.75,
'grid.color': '#ffffff',
'axes.grid': grid,
'ytick.direction': 'in',
'xtick.direction': 'in',
'xtick.gridOn': True,
'ytick.gridOn': True,
'ytick.major.width':5,
'xtick.major.width':5,
'ytick.major.size': 5,
'xtick.major.size': 5,
'mathtext.fontset': 'stixsans',
'mathtext.sf': 'sans',
'legend.frameon': True,
'legend.facecolor': '#FFEDCE',
'figure.dpi': 150,
'xtick.color': 'k',
'ytick.color': 'k'}
plt.rc('text.latex', preamble=r'\usepackage{sfmath}')
plt.rc('mathtext', fontset='stixsans', sf='sans')
sns.set_style('darkgrid', rc=rc)
def color_selector(style):
"""
Select the color palette of your choice.
Parameters
----------
style: str "mut" or "pboc"
A string identifier for the style. "mut" gives colors for single and double mutants.
"pboc" returns the PBoC2e color palette.
Returns
-------
colors: dict
Dictionary of colors. If "dna", "double", or "inducer" is the selected style,
keys will be the mutants in upper case. Double mutant keys will be DNA-IND. For
pboc, the keys will be the typical color descriptors.
"""
# Ensure the provided style name makes sense.
if style.lower() not in ['mut', 'pboc']:
raise ValueError("Provided style must be 'pboc' or 'mut'. {} provided.".format(style))
# Set the color styles and return.
if style.lower() == 'mut':
colors = {'Y20I': '#738FC1', 'Q21A': '#7AA974', 'Q21M': '#AB85AC',
'F164T': '#A97C50', 'Q294K': '#5D737E', 'Q294V': '#D56C55',
'Q294R': '#B2AF58', 'Y20I-F164T': '#2d98da', 'Y20I-Q294K': '#34495e',
'Y20I-Q294V': '#8854d0', 'Q21A-F164T': '#4b6584', 'Q21A-Q294K': '#EE5A24',
'Q21A-Q294V': '#009432', 'Q21M-F164T': '#1289A7', 'Q21M-Q294K': '#6F1E51',
'Q21M-Q294V': '#006266', 'WT': '#3C3C3C'}
elif style.lower() == 'pboc':
colors = {'green': '#7AA974', 'light_green': '#BFD598',
'pale_green': '#DCECCB', 'yellow': '#EAC264',
'light_yellow': '#F3DAA9', 'pale_yellow': '#FFEDCE',
'blue': '#738FC1', 'light_blue': '#A9BFE3',
'pale_blue': '#C9D7EE', 'red': '#D56C55', 'light_red': '#E8B19D',
'pale_red': '#F1D4C9', 'purple': '#AB85AC',
'light_purple': '#D4C2D9', 'dark_green':'#7E9D90', 'dark_brown':'#905426'}
return colors
| [
"seaborn.set_style",
"matplotlib.pyplot.rc"
] | [((1032, 1085), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text.latex"""'], {'preamble': '"""\\\\usepackage{sfmath}"""'}), "('text.latex', preamble='\\\\usepackage{sfmath}')\n", (1038, 1085), True, 'import matplotlib.pyplot as plt\n'), ((1090, 1139), 'matplotlib.pyplot.rc', 'plt.rc', (['"""mathtext"""'], {'fontset': '"""stixsans"""', 'sf': '"""sans"""'}), "('mathtext', fontset='stixsans', sf='sans')\n", (1096, 1139), True, 'import matplotlib.pyplot as plt\n'), ((1144, 1176), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {'rc': 'rc'}), "('darkgrid', rc=rc)\n", (1157, 1176), True, 'import seaborn as sns\n')] |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from reno import create
from reno.tests import base
class TestPickFileName(base.TestCase):
@mock.patch('os.path.exists')
def test_not_random_enough(self, exists):
exists.return_value = True
self.assertRaises(
ValueError,
create._pick_note_file_name,
'somepath',
'someslug',
)
@mock.patch('os.path.exists')
def test_random_enough(self, exists):
exists.return_value = False
result = create._pick_note_file_name('somepath', 'someslug')
self.assertIn('somepath', result)
self.assertIn('someslug', result)
class TestCreate(base.TestCase):
def setUp(self):
super(TestCreate, self).setUp()
self.tmpdir = self.useFixture(fixtures.TempDir()).path
def test_create_from_template(self):
filename = create._pick_note_file_name(self.tmpdir, 'theslug')
create._make_note_file(filename, 'i-am-a-template')
with open(filename, 'r') as f:
body = f.read()
self.assertEqual('i-am-a-template', body)
def test_edit(self):
self.useFixture(fixtures.EnvironmentVariable('EDITOR', 'myeditor'))
with mock.patch('subprocess.call') as call_mock:
self.assertTrue(create._edit_file('somepath'))
call_mock.assert_called_once_with(['myeditor', 'somepath'])
def test_edit_without_editor_env_var(self):
self.useFixture(fixtures.EnvironmentVariable('EDITOR'))
with mock.patch('subprocess.call') as call_mock:
self.assertFalse(create._edit_file('somepath'))
call_mock.assert_not_called()
| [
"mock.patch",
"reno.create._pick_note_file_name",
"reno.create._edit_file",
"fixtures.EnvironmentVariable",
"reno.create._make_note_file",
"fixtures.TempDir"
] | [((699, 727), 'mock.patch', 'mock.patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (709, 727), False, 'import mock\n'), ((965, 993), 'mock.patch', 'mock.patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (975, 993), False, 'import mock\n'), ((1089, 1140), 'reno.create._pick_note_file_name', 'create._pick_note_file_name', (['"""somepath"""', '"""someslug"""'], {}), "('somepath', 'someslug')\n", (1116, 1140), False, 'from reno import create\n'), ((1446, 1497), 'reno.create._pick_note_file_name', 'create._pick_note_file_name', (['self.tmpdir', '"""theslug"""'], {}), "(self.tmpdir, 'theslug')\n", (1473, 1497), False, 'from reno import create\n'), ((1506, 1557), 'reno.create._make_note_file', 'create._make_note_file', (['filename', '"""i-am-a-template"""'], {}), "(filename, 'i-am-a-template')\n", (1528, 1557), False, 'from reno import create\n'), ((1725, 1775), 'fixtures.EnvironmentVariable', 'fixtures.EnvironmentVariable', (['"""EDITOR"""', '"""myeditor"""'], {}), "('EDITOR', 'myeditor')\n", (1753, 1775), False, 'import fixtures\n'), ((1790, 1819), 'mock.patch', 'mock.patch', (['"""subprocess.call"""'], {}), "('subprocess.call')\n", (1800, 1819), False, 'import mock\n'), ((2038, 2076), 'fixtures.EnvironmentVariable', 'fixtures.EnvironmentVariable', (['"""EDITOR"""'], {}), "('EDITOR')\n", (2066, 2076), False, 'import fixtures\n'), ((2091, 2120), 'mock.patch', 'mock.patch', (['"""subprocess.call"""'], {}), "('subprocess.call')\n", (2101, 2120), False, 'import mock\n'), ((1360, 1378), 'fixtures.TempDir', 'fixtures.TempDir', ([], {}), '()\n', (1376, 1378), False, 'import fixtures\n'), ((1862, 1891), 'reno.create._edit_file', 'create._edit_file', (['"""somepath"""'], {}), "('somepath')\n", (1879, 1891), False, 'from reno import create\n'), ((2164, 2193), 'reno.create._edit_file', 'create._edit_file', (['"""somepath"""'], {}), "('somepath')\n", (2181, 2193), False, 'from reno import create\n')] |
import math
import torch.nn as nn
from .modules import QConv2d, QLinear
def make_layers(cfg, batch_norm=False, wbit=4, abit=4):
layers = list()
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'A':
layers += [nn.AvgPool2d(kernel_size=2, stride=2)]
else:
conv2d = QConv2d(in_channels, v, kernel_size=3, padding=1, wbit=wbit, abit=abit)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
7: [128, 128, 'M', 256, 256, 'M', 512, 512, 'M'],
16: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
19: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M',
512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, num_classes=10, depth=16, batch_norm=False, wbit=4, abit=4, channel_wise=0):
super(VGG, self).__init__()
self.features = make_layers(cfg[depth], batch_norm, wbit=wbit, abit=abit)
if depth == 7:
self.classifier = nn.Sequential(
QLinear(8192, 1024, wbit=wbit, abit=abit),
nn.ReLU(True),
QLinear(1024, num_classes, wbit=wbit, abit=abit),
)
else:
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, num_classes),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
# m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class vgg7_Q:
base = VGG
args = list()
kwargs={'depth':7, 'batch_norm':True} | [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.Sequential",
"math.sqrt",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d"
] | [((682, 704), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (695, 704), True, 'import torch.nn as nn\n'), ((232, 269), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (244, 269), True, 'import torch.nn as nn\n'), ((1375, 1388), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1382, 1388), True, 'import torch.nn as nn\n'), ((1545, 1557), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (1555, 1557), True, 'import torch.nn as nn\n'), ((1575, 1594), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(512)'], {}), '(512, 512)\n', (1584, 1594), True, 'import torch.nn as nn\n'), ((1612, 1625), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1619, 1625), True, 'import torch.nn as nn\n'), ((1643, 1655), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (1653, 1655), True, 'import torch.nn as nn\n'), ((1673, 1692), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(512)'], {}), '(512, 512)\n', (1682, 1692), True, 'import torch.nn as nn\n'), ((1710, 1723), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1717, 1723), True, 'import torch.nn as nn\n'), ((1741, 1768), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'num_classes'], {}), '(512, num_classes)\n', (1750, 1768), True, 'import torch.nn as nn\n'), ((317, 354), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (329, 354), True, 'import torch.nn as nn\n'), ((1973, 1991), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (1982, 1991), False, 'import math\n'), ((525, 542), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['v'], {}), '(v)\n', (539, 542), True, 'import torch.nn as nn\n'), ((544, 565), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (551, 565), True, 'import torch.nn as nn\n'), ((620, 641), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (627, 641), True, 'import torch.nn as nn\n')] |
#!/usr/bin/python
'''
Central Templates Ansible Module
'''
# MIT License
#
# Copyright (c) 2020 Aruba, a Hewlett Packard Enterprise company
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: central_templates
version_added: 2.9.0
short_descriptions: REST API module for templates on Aruba Central
description: This module provides a mechanism to interact with or upload
configuration templates that are used for group-level and
device-level configuration on Aruba Central
options:
action:
description:
- Action to be performed on the template(s)
- "get_template_text" gets the contents of a template
- "get_all" gets info on all templates in a group
- "update" updates attributes of an existing template
- "create" creates a new template in a group
- "delete" deletes an existing template from a group
required: true
type: str
choices:
- get_template_text
- get_all
- update
- create
- delete
group_name:
description:
- Name of the group
required: true
type: str
template_name:
description:
- Name of the template on Aruba Central
- Used with actions "get_template_text", "create", "update", and
"delete"
required: false
type: str
device_type:
description:
- Type of device for which the template file is applicable
- Used with action "create"
- Used optionally with actions "get_all" and "update"
required: false
type: str
choices:
- IAP
- ArubaSwitch
- CX
- MobilityController
version:
description:
- Firmware version property of template
- Used with action "create"
- Used optionally with actions "get_all" and "update"
required: false
type: str
default: ALL
model:
description:
- Model property of template
- Used with action "create"
- Used optionally with actions "get_all" and "update"
- For the "ArubaSwitch" device_type (i.e. AOS-S switches),
the part number (J number) can be used
- e.g. 2920, J9727A, etc.
required: false
type: str
default: ALL
local_file_path:
description:
- Full local path of template file to be uploaded
- For HP Switches, the template text should include the following
commands to maintain connection with Central:
- aruba-central enable
- aruba-central url https://< URL | IP >/ws
- Used with actions "create" and "update"
required: false
type: str
limit:
description:
- Maximum number of records to be returned
- Used optionally as a filter parameter for "get_all"
required: false
type: int
default: 20
offset:
description:
- Number of items to be skipped before returning the data, which is
useful for pagination
- Used optionally as a filter parameter for get_all
required: false
type: int
default: 0
"""
EXAMPLES = """
#Usage Examples
- name: Get all templates in a given group
central_templates:
action: get_all
group_name: new-group
limit: 20
offset: 0
- name: Get templates in a given group for a particular device type
central_templates:
action: get_all
group_name: new-group
device_type: IAP
limit: 20
offset: 0
version: ALL
model: ALL
- name: Get template text
central_templates:
action: get_template_text
group_name: new-group
template_name: iap-temp
- name: Upload a new template file and create a new template for a given device type # NOQA
central_templates:
action: create
group_name: new-group
template_name: iap-temp
device_type: IAP
version: ALL
model: ALL
local_file_path: /home/iap_template.txt
- name: Update an existing template
central_templates:
action: update
group_name: new-group
template_name: iap-temp
device_type: IAP
version: ALL
model: ALL
local_file_path: /home/modified_iap_template.txt
- name: Delete an existing template
central_templates:
action: delete
group_name: new-group
template_name: iap-temp
"""
import json # NOQA
from ansible.module_utils.basic import AnsibleModule # NOQA
from ansible.module_utils.central_http import CentralApi # NOQA
def error_msg(action):
'''
Error handler for errors related to missing playbook parameters in
templates module
'''
result = {"resp": None, "code": 400}
if action == "get_template_text" or action == "delete":
resp = "Template name is not present in the playbook"
if action == "create" or action == "update":
resp = "Template name, device type, or local file path is not" \
" present in the playbook"
result['resp'] = resp
return result
def get_all_templates(central_api, group_name, **kwargs):
'''
Used to get info on all templates in a group
'''
endpoint = "/configuration/v1/groups/" + str(group_name) + "/templates"
query_params = {}
headers = central_api.get_headers(False, "get")
for key, val in kwargs.items():
if val is not None:
query_params[key] = val
path = central_api.get_url(endpoint, query_params)
result = central_api.get(path=path, headers=headers)
return result
def get_template_text(central_api, group_name, template_name):
'''
Used to get template text, which is the group configuration for applicable
devices
'''
if template_name is not None:
path = "/configuration/v1/groups/" + str(group_name) + "/templates/" +\
str(template_name)
headers = central_api.get_headers(True, "get")
result = central_api.get(path=path, headers=headers)
return result
return error_msg("get_template_text")
def create_update_template(central_api, group_name, template_name, **kwargs):
'''
Used to upload and create a new group template for various devices, as
well as change attributes for an existing template
'''
if None not in kwargs.values() and template_name is not None:
endpoint = "/configuration/v1/groups/"+str(group_name)+"/templates"
query_params = {"name": template_name,
"device_type": kwargs['device_type'],
"version": kwargs['version'], "model": kwargs['model']}
headers = central_api.get_headers(True, "post")
path = central_api.get_url(endpoint, query_params)
filepath = kwargs['file']
if kwargs['action'] == "create":
result = central_api.post(path=path, headers=headers,
filename=filepath)
elif kwargs['action'] == "update":
result = central_api.patch(path=path, headers=headers,
filename=filepath)
return result
return error_msg("create")
def delete_template(central_api, group_name, template_name):
'''
Used to delete an existing template from an existing group
'''
if template_name is not None:
headers = central_api.get_headers(False, "delete")
path = "/configuration/v1/groups/" + str(group_name) + "/templates/"\
+ str(template_name)
result = central_api.delete(path=path, headers=headers)
return result
return error_msg("delete")
def api_call(module):
'''
Uses playbook parameters to determine type of API request to be made
'''
central_api = CentralApi(module)
action = module.params.get('action').lower()
group_name = module.params.get('group_name')
template_name = module.params.get('template_name')
limit = module.params.get('limit')
offset = module.params.get('offset')
device_type = module.params.get('device_type')
version = module.params.get('version')
model = module.params.get('model')
local_file_path = module.params.get('local_file_path')
if action == "get_template_text":
result = get_template_text(central_api, group_name, template_name)
elif action == "get_all":
result = get_all_templates(central_api=central_api,
group_name=group_name,
limit=limit, offset=offset,
template=template_name,
device_type=device_type, version=version,
model=model)
elif action == "create" or action == "update":
result = create_update_template(central_api=central_api,
group_name=group_name,
template_name=template_name,
device_type=device_type,
version=version, model=model,
action=action,
file=local_file_path)
elif action == "delete":
result = delete_template(central_api, group_name, template_name)
else:
module.fail_json(changed=False, msg="Unsupported or no action provided"
" in playbook")
return result
def main():
'''
Central-template-related parameters definitions and response handling for
module
'''
module = AnsibleModule(
argument_spec=dict(
action=dict(required=True, type='str',
choices=["get_template_text", "get_all", "update",
"create", "delete"]),
group_name=dict(required=True, type='str'),
limit=dict(required=False, type='int', default=20),
offset=dict(required=False, type='int', default=0),
template_name=dict(required=False, type='str'),
device_type=dict(required=False, type='str',
choices=["IAP", "ArubaSwitch", "CX",
"MobilityController"]),
version=dict(required=False, type='str', default="ALL"),
model=dict(required=False, type='str', defaul="ALL"),
local_file_path=dict(required=False, type='path', default=None)
))
success_codes = [200, 201]
exit_codes = [304, 400, 404]
changed = False
if "get" not in module.params.get('action'):
changed = True
result = api_call(module)
try:
result['resp'] = json.loads(result['resp'])
except (TypeError, ValueError):
pass
if result['code'] and result['code'] in success_codes:
module.exit_json(changed=changed, msg=result['resp'],
response_code=result['code'])
elif result['code'] and result['code'] in exit_codes:
module.exit_json(changed=False, msg=result['resp'],
response_code=result['code'])
else:
module.fail_json(changed=False, msg=result['resp'],
response_code=result['code'])
if __name__ == '__main__':
main()
| [
"json.loads",
"ansible.module_utils.central_http.CentralApi"
] | [((9181, 9199), 'ansible.module_utils.central_http.CentralApi', 'CentralApi', (['module'], {}), '(module)\n', (9191, 9199), False, 'from ansible.module_utils.central_http import CentralApi\n'), ((12141, 12167), 'json.loads', 'json.loads', (["result['resp']"], {}), "(result['resp'])\n", (12151, 12167), False, 'import json\n')] |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from habitat.timezone import get_timezone
timezone = get_timezone()
class MissionDate(models.Model):
date = models.CharField(
verbose_name=_(timezone.DATE_VERBOSE_NAME),
help_text=_(timezone.DATE_HELP_TEXT),
max_length=15,
default=timezone.date)
class Meta:
abstract = True
class MissionTime(models.Model):
time = models.TimeField(
verbose_name=_(timezone.TIME_VERBOSE_NAME),
help_text=_(timezone.TIME_HELP_TEXT),
default=timezone.time)
class Meta:
abstract = True
class MissionDateTime(MissionDate, MissionTime):
def datetime(self):
return timezone.datetime
datetime.allow_tags = False
datetime.short_description = _(timezone.DATETIME_VERBOSE_NAME)
class Meta:
abstract = True
| [
"django.utils.translation.ugettext_lazy",
"habitat.timezone.get_timezone"
] | [((140, 154), 'habitat.timezone.get_timezone', 'get_timezone', ([], {}), '()\n', (152, 154), False, 'from habitat.timezone import get_timezone\n'), ((822, 855), 'django.utils.translation.ugettext_lazy', '_', (['timezone.DATETIME_VERBOSE_NAME'], {}), '(timezone.DATETIME_VERBOSE_NAME)\n', (823, 855), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((241, 270), 'django.utils.translation.ugettext_lazy', '_', (['timezone.DATE_VERBOSE_NAME'], {}), '(timezone.DATE_VERBOSE_NAME)\n', (242, 270), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((290, 316), 'django.utils.translation.ugettext_lazy', '_', (['timezone.DATE_HELP_TEXT'], {}), '(timezone.DATE_HELP_TEXT)\n', (291, 316), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((498, 527), 'django.utils.translation.ugettext_lazy', '_', (['timezone.TIME_VERBOSE_NAME'], {}), '(timezone.TIME_VERBOSE_NAME)\n', (499, 527), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((547, 573), 'django.utils.translation.ugettext_lazy', '_', (['timezone.TIME_HELP_TEXT'], {}), '(timezone.TIME_HELP_TEXT)\n', (548, 573), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
#!/usr/bin/env python
from __future__ import print_function
import sys
import serial
import time
from math import sin, cos, pi
import argparse
import ast
from comms import *
from boards import *
from livegraph import livegraph
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Drive motor module(s) with a given control mode and plot current measurements.')
parser.add_argument('serial', type=str, help='Serial port')
parser.add_argument('--baud_rate', type=int, help='Serial baud rate')
parser.add_argument('board_ids', type=str, help='Board ID (separate with comma)')
parser.add_argument('mode', type=str, help='Control mode: \
current (Id[A], Iq[A]), \
phase (dc,dc,dc), \
torque (N*m), \
velocity (rad/s), \
position (rad), \
pos_vel (rad,rad/s), \
pos_ff (rad,ff[A]), \
pwm (dc)')
parser.add_argument('actuations', type=str, help='Actuation amount in the units of the selected mode (if requires multiple args, separate by comma)')
parser.set_defaults(baud_rate=COMM_DEFAULT_BAUD_RATE, offset=COMM_BOOTLOADER_OFFSET)
args = parser.parse_args()
make_list = lambda x: list(x) if (type(x) == list or type(x) == tuple) else [x]
make_int = lambda x: [int(y) for y in x]
board_ids = make_int(make_list(ast.literal_eval(args.board_ids)))
actuations = make_list(ast.literal_eval(args.actuations))
mode = args.mode
ser = serial.Serial(port=args.serial, baudrate=args.baud_rate, timeout=0.05)
client = BLDCControllerClient(ser)
initialized = initBoards(client, board_ids)
client.leaveBootloader(board_ids)
client.resetInputBuffer()
initMotor(client, board_ids)
def updateCurrent(i):
data = []
for board_id in board_ids:
try:
driveMotor(client, board_ids, actuations, mode)
# Read the iq calulated
read = struct.unpack('<f', client.readRegisters([board_id], [0x3003], [1])[0])
data.append(read)
# Read the iq command
read = struct.unpack('<f', client.readRegisters([board_id], [0x3020], [1])[0])
data.append(read)
except (ProtocolError, struct.error):
#print("Failed to communicate with board: ", board_id)
data.append([0.0])
data.append([0.0])
return time.time(), data
flatten = lambda l: [item for sublist in l for item in sublist]
labels = []
labels.extend([[str(bid) + '\'s iq Reading', str(bid) + '\'s iq PID output'] for bid in board_ids])
labels = flatten(labels)
graph = livegraph(updateCurrent, labels, sample_interval=1, window_size = 2000)
graph.start()
| [
"argparse.ArgumentParser",
"ast.literal_eval",
"serial.Serial",
"livegraph.livegraph",
"time.time"
] | [((270, 397), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Drive motor module(s) with a given control mode and plot current measurements."""'}), "(description=\n 'Drive motor module(s) with a given control mode and plot current measurements.'\n )\n", (293, 397), False, 'import argparse\n'), ((1786, 1856), 'serial.Serial', 'serial.Serial', ([], {'port': 'args.serial', 'baudrate': 'args.baud_rate', 'timeout': '(0.05)'}), '(port=args.serial, baudrate=args.baud_rate, timeout=0.05)\n', (1799, 1856), False, 'import serial\n'), ((3006, 3075), 'livegraph.livegraph', 'livegraph', (['updateCurrent', 'labels'], {'sample_interval': '(1)', 'window_size': '(2000)'}), '(updateCurrent, labels, sample_interval=1, window_size=2000)\n', (3015, 3075), False, 'from livegraph import livegraph\n'), ((1718, 1751), 'ast.literal_eval', 'ast.literal_eval', (['args.actuations'], {}), '(args.actuations)\n', (1734, 1751), False, 'import ast\n'), ((1656, 1688), 'ast.literal_eval', 'ast.literal_eval', (['args.board_ids'], {}), '(args.board_ids)\n', (1672, 1688), False, 'import ast\n'), ((2757, 2768), 'time.time', 'time.time', ([], {}), '()\n', (2766, 2768), False, 'import time\n')] |