code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import object_database
from object_database import Schema, Indexed, Index, core_schema, SubscribeLazilyByDefault
from typed_python import *
import threading
from object_database.service_manager.ServiceSchema import service_schema
from typed_python.Codebase import Codebase as TypedPythonCodebase
#singleton state objects for the codebase cache
_codebase_lock = threading.Lock()
_codebase_cache = {}
_codebase_instantiation_dir = None
def setCodebaseInstantiationDirectory(directory, forceReset=False):
"""Called at program invocation to specify where we can instantiate codebases."""
with _codebase_lock:
global _codebase_instantiation_dir
global _codebase_cache
if forceReset:
_codebase_instantiation_dir = None
_codebase_cache = {}
if _codebase_instantiation_dir == directory:
return
assert _codebase_instantiation_dir is None, "Can't modify the codebase instantiation location. (%s != %s)" % (
_codebase_instantiation_dir,
directory
)
_codebase_instantiation_dir = os.path.abspath(directory)
@service_schema.define
@SubscribeLazilyByDefault
class File:
hash = Indexed(str)
contents = str
@staticmethod
def create(contents):
hash = sha_hash(contents).hexdigest
f = File.lookupAny(hash=hash)
if f:
return f
else:
return File(hash=hash, contents=contents)
@service_schema.define
@SubscribeLazilyByDefault
class Codebase:
hash = Indexed(str)
#filename (at root of project import) to contents
files = ConstDict(str, service_schema.File)
@staticmethod
def createFromRootlevelPath(rootPath):
return Codebase.createFromCodebase(
TypedPythonCodebase.FromRootlevelPath(rootPath)
)
@staticmethod
def createFromCodebase(codebase:TypedPythonCodebase):
return Codebase.createFromFiles(codebase.filesToContents)
@staticmethod
def createFromFiles(files):
assert files
files = {k: File.create(v) if not isinstance(v, File) else v for k,v in files.items()}
hashval = sha_hash(files).hexdigest
c = Codebase.lookupAny(hash=hashval)
if c:
return c
return Codebase(hash=hashval, files=files)
def instantiate(self, module_name=None):
"""Instantiate a codebase on disk and load it."""
with _codebase_lock:
assert _codebase_instantiation_dir is not None
if self.hash not in _codebase_cache:
try:
if not os.path.exists(_codebase_instantiation_dir):
os.makedirs(_codebase_instantiation_dir)
except Exception as e:
logging.getLogger(__name__).warn(
"Exception trying to make directory '%s'", _codebase_instantiation_dir)
logging.getLogger(__name__).warn(
"Exception: %s", e)
disk_path = os.path.join(_codebase_instantiation_dir, self.hash)
#preload the files, since they're lazy.
object_database.current_transaction().db().requestLazyObjects(set(self.files.values()))
fileContents = {fpath: file.contents for fpath, file in self.files.items()}
_codebase_cache[self.hash] = TypedPythonCodebase.Instantiate(fileContents, disk_path)
if module_name is None:
return _codebase_cache[self.hash]
return _codebase_cache[self.hash].getModuleByName(module_name)
|
[
"os.path.abspath",
"typed_python.Codebase.Codebase.FromRootlevelPath",
"os.makedirs",
"object_database.Indexed",
"os.path.exists",
"threading.Lock",
"os.path.join",
"typed_python.Codebase.Codebase.Instantiate",
"logging.getLogger",
"object_database.current_transaction"
] |
[((980, 996), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (994, 996), False, 'import threading\n'), ((1821, 1833), 'object_database.Indexed', 'Indexed', (['str'], {}), '(str)\n', (1828, 1833), False, 'from object_database import Schema, Indexed, Index, core_schema, SubscribeLazilyByDefault\n'), ((2160, 2172), 'object_database.Indexed', 'Indexed', (['str'], {}), '(str)\n', (2167, 2172), False, 'from object_database import Schema, Indexed, Index, core_schema, SubscribeLazilyByDefault\n'), ((1721, 1747), 'os.path.abspath', 'os.path.abspath', (['directory'], {}), '(directory)\n', (1736, 1747), False, 'import os\n'), ((2394, 2441), 'typed_python.Codebase.Codebase.FromRootlevelPath', 'TypedPythonCodebase.FromRootlevelPath', (['rootPath'], {}), '(rootPath)\n', (2431, 2441), True, 'from typed_python.Codebase import Codebase as TypedPythonCodebase\n'), ((3657, 3709), 'os.path.join', 'os.path.join', (['_codebase_instantiation_dir', 'self.hash'], {}), '(_codebase_instantiation_dir, self.hash)\n', (3669, 3709), False, 'import os\n'), ((4010, 4066), 'typed_python.Codebase.Codebase.Instantiate', 'TypedPythonCodebase.Instantiate', (['fileContents', 'disk_path'], {}), '(fileContents, disk_path)\n', (4041, 4066), True, 'from typed_python.Codebase import Codebase as TypedPythonCodebase\n'), ((3231, 3274), 'os.path.exists', 'os.path.exists', (['_codebase_instantiation_dir'], {}), '(_codebase_instantiation_dir)\n', (3245, 3274), False, 'import os\n'), ((3300, 3340), 'os.makedirs', 'os.makedirs', (['_codebase_instantiation_dir'], {}), '(_codebase_instantiation_dir)\n', (3311, 3340), False, 'import os\n'), ((3400, 3427), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3417, 3427), False, 'import logging\n'), ((3550, 3577), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3567, 3577), False, 'import logging\n'), ((3783, 3820), 'object_database.current_transaction', 'object_database.current_transaction', ([], {}), '()\n', (3818, 3820), False, 'import object_database\n')]
|
import requests
from insurance_claims.record_types import *
base_url = 'http://127.0.0.1:5000/'
class App():
def evaluate(self, save_dataset=False):
calculated_claims_value = self._calculate_claims_value()
classified_claims_value = self._classify_claims_value(calculated_claims_value)
classified_claims_complexity = self._classify_claims_complexity(classified_claims_value)
self._update_claims_complexity(classified_claims_complexity)
claim_payouts = self._calculate_payments(classified_claims_complexity)
if save_dataset:
self._save_claims(claim_payouts)
return self.get_outputs(claim_payouts)
# Client to calculate claims value
def _calculate_claims_value(self):
url = base_url + 'claim-request/calculate_claims_value'
response = requests.post(url, json={})
calculated_claims_value = response.json()
return calculated_claims_value
# Client to classify claims by value
def _classify_claims_value(self, claims):
url = base_url + 'claim-request/classify_claims_value'
response = requests.post(url, json=claims)
classified_claims_value = response.json()
return classified_claims_value
# Client to classify claims by complexity
def _classify_claims_complexity(self, classified_claims_value):
url = base_url + 'claim-request/classify_claims_complexity'
response = requests.post(url, json=classified_claims_value)
classified_claims_complexity = response.json()
return classified_claims_complexity
# Client to update claims by complexity
def _update_claims_complexity(self, classified_claims_complexity):
url = base_url + 'claim-request/update_claims_complexity'
requests.post(url, json=classified_claims_complexity)
# Client to calculate payments
def _calculate_payments(self, classified_claims_complexity):
url = base_url + 'claim-request/calculate_payments'
response = requests.post(url, json=classified_claims_complexity)
claim_payouts = response.json()
return claim_payouts
# Client to save claims
def _save_claims(self, claim_payouts):
url = base_url + 'claim-request/save_claims'
response = requests.post(url, json=claim_payouts)
claim_payouts = response.json()
return claim_payouts
def add_data(self, input_records):
self._add_claims_requests(input_records)
# Client to add claims data
def _add_claims_requests(self, input_records):
if len(input_records) > 0:
claims = []
for record in input_records:
claims.append(record)
url = base_url + 'claim-request/add_claims'
response = requests.post(url, json=claims)
# print(response.json())
# Parsing data for main program
def get_outputs(self, claim_payouts):
claim_payouts = self._parse_claim_payouts(claim_payouts)
return claim_payouts
# Parses payouts
def _parse_claim_payouts(self, claim_payouts):
claims = []
for claim in claim_payouts:
c = ClaimPayout.from_dict(claim)
claims.append(c)
return claims
if __name__ == "__main__":
app = App()
|
[
"requests.post"
] |
[((834, 861), 'requests.post', 'requests.post', (['url'], {'json': '{}'}), '(url, json={})\n', (847, 861), False, 'import requests\n'), ((1121, 1152), 'requests.post', 'requests.post', (['url'], {'json': 'claims'}), '(url, json=claims)\n', (1134, 1152), False, 'import requests\n'), ((1444, 1492), 'requests.post', 'requests.post', (['url'], {'json': 'classified_claims_value'}), '(url, json=classified_claims_value)\n', (1457, 1492), False, 'import requests\n'), ((1782, 1835), 'requests.post', 'requests.post', (['url'], {'json': 'classified_claims_complexity'}), '(url, json=classified_claims_complexity)\n', (1795, 1835), False, 'import requests\n'), ((2016, 2069), 'requests.post', 'requests.post', (['url'], {'json': 'classified_claims_complexity'}), '(url, json=classified_claims_complexity)\n', (2029, 2069), False, 'import requests\n'), ((2283, 2321), 'requests.post', 'requests.post', (['url'], {'json': 'claim_payouts'}), '(url, json=claim_payouts)\n', (2296, 2321), False, 'import requests\n'), ((2781, 2812), 'requests.post', 'requests.post', (['url'], {'json': 'claims'}), '(url, json=claims)\n', (2794, 2812), False, 'import requests\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from yabab import app, db
app.config.from_object('app_conf')
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
[
"yabab.app.config.from_object",
"flask.ext.migrate.Migrate",
"flask.ext.script.Manager"
] |
[((176, 210), 'yabab.app.config.from_object', 'app.config.from_object', (['"""app_conf"""'], {}), "('app_conf')\n", (198, 210), False, 'from yabab import app, db\n'), ((222, 238), 'flask.ext.migrate.Migrate', 'Migrate', (['app', 'db'], {}), '(app, db)\n', (229, 238), False, 'from flask.ext.migrate import Migrate, MigrateCommand\n'), ((249, 261), 'flask.ext.script.Manager', 'Manager', (['app'], {}), '(app)\n', (256, 261), False, 'from flask.ext.script import Manager\n')]
|
from .joint_representation import Joint_Representaion_Learner
from .seq2seq import Seq2Seq
from .rnn import Hierarchical_Encoder#Encoder_Baseline, LSTM_Decoder
from .bert import BertEncoder, BertDecoder, NVADecoder, DirectDecoder, APDecoder, SignalDecoder, Signal3Decoder, Signal2Decoder, NVDecoder, MSDecoder, ARDecoder_with_attribute_generation, BeamDecoder
from .bert_pytorch import BertDecoder as BD
from .decoder import LSTM_Decoder, LSTM_GCC_Decoder, LSTM_Decoder_2stream, Top_Down_Decoder
from .encoder import Encoder_Baseline, Progressive_Encoder, SVD_Encoder, Input_Embedding_Layer, Semantics_Enhanced_IEL, HighWay_IEL, Encoder_HighWay, LEL
from .rnn import ENSEMBLE_Decoder
import torch
import torch.nn as nn
def get_preEncoder(opt, input_size):
preEncoder = None
output_size = input_size.copy()
if opt.get('use_preEncoder', False):
pem = opt.get('preEncoder_modality', '')
if pem:
skip_info = [1] * len(opt['modality'])
for char in pem:
pos = opt['modality'].index(char)
skip_info[pos] = 0
output_size[pos] = opt['dim_hidden']
else:
output_size = [opt.get('dim_iel', opt['dim_hidden'])] * len(input_size)
skip_info = [0] * len(opt['modality'])
preEncoder = Input_Embedding_Layer(
input_size=input_size,
hidden_size=opt.get('dim_iel', opt['dim_hidden']),
skip_info=skip_info,
name=opt['modality'].upper()
)
if opt.get('use_SEIEL', False):
output_size = [opt['num_factor']] * len(input_size)
preEncoder = Semantics_Enhanced_IEL(
input_size=input_size,
semantics_size=opt['dim_s'],
nf=opt['num_factor'],
name=opt['modality'],
multiply=opt.get('SEIEL_multiply', False)
)
return preEncoder, output_size
def get_encoder(opt, input_size, mapping, modality):
hidden_size = [opt['dim_hidden']] * len(modality)
if opt['encoder_type'] == 'IPE':
if opt.get('MLP', False):
from .rnn import MLP
encoder = MLP(sum(input_size), opt['dim_hidden'], 'a' in modality)
elif opt.get('MSLSTM', False):
from .rnn import Encoder_Baseline
encoder = Encoder_Baseline(input_size=input_size, output_size=hidden_size, name=modality.upper(), encoder_type='mslstm')
else:
encoder = Hierarchical_Encoder(input_size = input_size, hidden_size = hidden_size, opt = opt)
elif opt['encoder_type'] == 'IEL':
encoder = HighWay_IEL(
input_size=input_size,
hidden_size=hidden_size,
name=modality.upper(),
dropout=opt['encoder_dropout']
)
elif opt['encoder_type'] == 'LEL':
encoder = LEL(
input_size=input_size,
hidden_size=hidden_size,
name=modality.upper(),
dropout=opt['encoder_dropout']
)
elif opt['encoder_type'] == 'MME':
encoder = MultiModalEncoder(
input_size=input_size,
hidden_size=opt['dim_hidden'],
dropout=opt['encoder_dropout'],
name=opt['modality'].upper(),
multimodal_fusion_type=opt.get('multimodal_fusion_type', 'mean'),
num_heads=opt.get('num_heads', 8),
att_dropout=opt.get('att_dropout', 0.0),
with_layernorm=opt.get('with_norm', True),
shared_layernorm=opt.get('shared_layernorm', False),
with_residual=opt.get('with_residual', True),
pivot_idx=0,
include_pivot=opt.get('include_pivot', False),
n_frames=opt['n_frames'],
watch=opt.get('mm_watch', 1)
)
elif opt['encoder_type'] == 'GRU':
if opt.get('use_chain', False):
encoder = Progressive_Encoder(
input_size=input_size,
output_size=hidden_size,
opt=opt,
return_gate_info=opt.get('return_gate_info', False)
)
else:
auxiliary_pos = []
for char in modality:
auxiliary_for_this_input = opt.get('auxiliary_for_%s'%char, '')
pos = []
for c in auxiliary_for_this_input:
pos.append(modality.index(c))
auxiliary_pos.append(pos)
skip_info = opt.get('skip_info', [])
if not len(skip_info):
skip_info = [0] * len(modality)
opt['skip_info'] = skip_info
from models.encoder import Encoder_Baseline_TwoStream
if opt.get('two_stream', False):
E = Encoder_Baseline_TwoStream
if 'a' in modality:
hidden_size[modality.index('a')] = opt.get('dim_hidden_a', opt['dim_hidden'])
else:
E = Encoder_Baseline
#E = Encoder_HighWay
if opt.get('use_svd', False):
encoder = SVD_Encoder(
input_size=input_size,
output_size=hidden_size,
name=modality.upper(),
auxiliary_pos=auxiliary_pos,
skip_info=skip_info,
return_gate_info=opt.get('return_gate_info', False),
num_factor=opt['num_factor']
)
else:
encoder = E(
input_size=input_size,
output_size=hidden_size,
name=modality.upper(),
auxiliary_pos=auxiliary_pos,
skip_info=skip_info,
return_gate_info=opt.get('return_gate_info', False),
opt=opt
)
else:
assert len(modality) == 1
encoder = BertEncoder(feats_size = mapping[modality], config = opt)
return encoder
def get_joint_representation_learner(opt):
modality = opt['modality'].lower()
if opt['encoder_type'] == 'GRU':
if opt.get('use_chain', False):
feats_size = [opt['dim_hidden'], opt['dim_hidden']] if opt.get('chain_both') else [opt['dim_hidden']]
elif (opt['multi_scale_context_attention'] and not opt.get('query_all', False)) or opt.get('addition', False) or opt.get('gated_sum', False) or opt.get('temporal_concat', False):
feats_size = [opt['dim_hidden']]
elif opt.get('two_stream', False):
if 'a' in opt['modality']:
feats_size = [opt['dim_hidden'], opt.get('dim_hidden_a', opt['dim_hidden'])]
else:
feats_size = [opt['dim_hidden']]
else:
feats_size = [opt['dim_hidden'] * (2 if opt.get('bidirectional', False) else 1)] * (len(modality) - sum(opt['skip_info']))
elif opt['encoder_type'] in ['IEL', 'LEL']:
feats_size = [opt['dim_hidden']] * len(modality)
else:
feats_size = [opt['dim_hidden']]
return Joint_Representaion_Learner(feats_size, opt)
def get_decoder(opt):
if opt['decoder_type'] == 'LSTM':
if opt.get('decoder_gcc', False):
decoder = LSTM_GCC_Decoder(opt)
elif opt.get('two_stream', False):
decoder = LSTM_Decoder_2stream(opt)
elif opt.get('top_down', False):
decoder = Top_Down_Decoder(opt)
else:
decoder = LSTM_Decoder(opt)
elif opt['decoder_type'] == 'ENSEMBLE':
decoder = ENSEMBLE_Decoder(opt)
elif opt['decoder_type'] == 'ARFormer':
#decoder = BD(config=opt)
if opt['method'] == 'ag':
decoder = ARDecoder_with_attribute_generation(config=opt)
else:
decoder = BertDecoder(config=opt)
else:
if opt['method'] == 'mp':
decoder = BertDecoder(config=opt)
elif opt['method'] == 'nva':
decoder = NVADecoder(config=opt)
elif opt['method'] == 'direct':
decoder = DirectDecoder(config=opt)
elif opt['method'] == 'ap':
decoder = APDecoder(config=opt)
elif opt['method'] == 'signal':
decoder = SignalDecoder(config=opt)
elif opt['method'] == 'signal3':
decoder = Signal3Decoder(config=opt)
elif opt['method'] == 'signal2':
decoder = Signal2Decoder(config=opt)
elif opt['method'] == 'nv':
decoder = NVDecoder(config=opt)
elif opt['method'] == 'ms':
decoder = MSDecoder(config=opt)
return decoder
def get_beam_decoder(opt, embedding):
if opt.get('use_beam_decoder', False):
return BeamDecoder(opt, embedding)
return None
def get_model(opt):
modality = opt['modality'].lower()
input_size = []
mapping = {
'i': opt['dim_i'],
'm': opt['dim_m'],
'a': opt['dim_a']
}
for char in modality:
assert char in mapping.keys()
input_size.append(mapping[char])
preEncoder, input_size = get_preEncoder(opt, input_size)
encoder = get_encoder(opt, input_size, mapping, modality)
if opt.get('intra_triplet', False) or opt['encoder_type'] == 'MME':
joint_representation_learner = None
else:
joint_representation_learner = get_joint_representation_learner(opt)
if len(opt['crit']) == 1:
# only the main task: language generation
if not opt.get('use_beam_decoder', False) and not opt.get('use_rl', False):
assert opt['crit'][0] == 'lang'
have_auxiliary_tasks = sum([(1 if item not in ['lang', 'tag'] else 0) for item in opt['crit']])
auxiliary_task_predictor = Auxiliary_Task_Predictor(opt) if have_auxiliary_tasks else None
decoder = get_decoder(opt)
tgt_word_prj = nn.Linear(opt["dim_hidden"], opt["vocab_size"], bias=False)
beam_decoder = get_beam_decoder(opt, decoder.embedding)
model = Seq2Seq(
preEncoder = preEncoder,
encoder = encoder,
joint_representation_learner = joint_representation_learner,
auxiliary_task_predictor = auxiliary_task_predictor,
decoder = decoder,
tgt_word_prj = tgt_word_prj,
beam_decoder = beam_decoder,
opt = opt
)
return model
|
[
"torch.nn.Linear"
] |
[((9905, 9964), 'torch.nn.Linear', 'nn.Linear', (["opt['dim_hidden']", "opt['vocab_size']"], {'bias': '(False)'}), "(opt['dim_hidden'], opt['vocab_size'], bias=False)\n", (9914, 9964), True, 'import torch.nn as nn\n')]
|
import logging
from .wrapper import PipelineWrapper
import os
import json
logging.basicConfig(level=logging.INFO)
def main():
wrapper = PipelineWrapper()
config = wrapper.get_config()
output = wrapper.run(json.dumps({"data": "hello"}))
with open(os.path.join(config["output_path"], "output.txt"), 'w') as f:
f.write(output["output"])
logging.info(output)
if __name__ == "__main__":
main()
|
[
"logging.info",
"os.path.join",
"logging.basicConfig",
"json.dumps"
] |
[((74, 113), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (93, 113), False, 'import logging\n'), ((363, 383), 'logging.info', 'logging.info', (['output'], {}), '(output)\n', (375, 383), False, 'import logging\n'), ((218, 247), 'json.dumps', 'json.dumps', (["{'data': 'hello'}"], {}), "({'data': 'hello'})\n", (228, 247), False, 'import json\n'), ((263, 312), 'os.path.join', 'os.path.join', (["config['output_path']", '"""output.txt"""'], {}), "(config['output_path'], 'output.txt')\n", (275, 312), False, 'import os\n')]
|
import sys
from copy import copy
import numpy as np
from moviepy.audio.io.ffmpeg_audiowriter import ffmpeg_audiowrite
from moviepy.decorators import requires_duration
from moviepy.Clip import Clip
# optimize range in function of Python's version
if sys.version_info < (3,):
range = xrange
class AudioClip(Clip):
"""Base class for audio clips.
See ``SoundClip`` and ``CompositeSoundClip`` for usable classes.
An AudioClip is a Clip with a ``get_frame`` attribute of
the form `` t -> [ f_t ]`` for mono sound and
``t-> [ f1_t, f2_t ]`` for stereo sound (the arrays are Numpy arrays).
The `f_t` are floats between -1 and 1. These bounds can be
trespassed wihtout problems (the program will put the
sound back into the bounds at conversion time, without much impact).
Parameters
-----------
get_frame
A function `t-> frame at time t`. The frame does not mean much
for a sound, it is just a float. What 'makes' the sound are
the variations of that float in the time.
nchannels
Number of channels (one or two for mono or stereo).
Examples
---------
>>> # Plays the note A (a sine wave of frequency 404HZ)
>>> import numpy as np
>>> gf = lambda t : 2*[ np.sin(404 * 2 * np.pi * t) ]
>>> clip = AudioClip().set_get_frame(gf)
>>> clip.set_duration(5).preview()
"""
def __init__(self, get_frame = None):
Clip.__init__(self)
if get_frame:
self.get_frame = get_frame
frame0 = self.get_frame(0)
if hasattr(frame0, '__iter__'):
self.nchannels = len(list(frame0))
else:
self.nchannels = 1
@requires_duration
def to_soundarray(self,tt=None,fps=None, nbytes=2):
"""
Transforms the sound into an array that can be played by pygame
or written in a wav file. See ``AudioClip.preview``.
Parameters
------------
fps
Frame rate of the sound for the conversion.
44100 for top quality.
nbytes
Number of bytes to encode the sound: 1 for 8bit sound,
2 for 16bit, 4 for 32bit sound.
"""
if tt is None:
tt = np.arange(0,self.duration, 1.0/fps)
#print tt.max() - tt.min(), tt.min(), tt.max()
snd_array = self.get_frame(tt)
snd_array = np.maximum(-0.99,
np.minimum(0.99,snd_array))
inttype = {1:'int8',2:'int16', 4:'int32'}[nbytes]
return (2**(8*nbytes-1)*snd_array).astype(inttype)
@requires_duration
def to_audiofile(self,filename, fps=44100, nbytes=2,
buffersize=2000, codec='libvorbis',
bitrate=None, verbose=True):
"""
codecs = { 'libmp3lame': 'mp3',
'libvorbis':'ogg',
'libfdk_aac':'m4a',
'pcm_s16le':'wav',
'pcm_s32le': 'wav'}
"""
return ffmpeg_audiowrite(self,filename, fps, nbytes, buffersize,
codec, bitrate, verbose)
class AudioArrayClip(AudioClip):
"""
An audio clip made from a sound array.
Parameters
-----------
array
A Numpy array representing the sound, of size Nx1 for mono,
Nx2 for stereo.
fps
Frames per second : speed at which the sound is supposed to be
played.
"""
def __init__(self, array, fps):
Clip.__init__(self)
self.array = array
self.fps = fps
self.duration = 1.0 * len(array) / fps
def get_frame(t):
""" complicated, but must be able to handle the case where t
is a list of the form sin(t) """
if isinstance(t, np.ndarray):
array_inds = (self.fps*t).astype(int)
in_array = (array_inds>0) & (array_inds < len(self.array))
result = np.zeros((len(t),2))
result[in_array] = self.array[array_inds[in_array]]
return result
else:
i = int(self.fps * t)
if i < 0 or i >= len(self.array):
return 0*self.array[0]
else:
return self.array[i]
self.get_frame = get_frame
self.nchannels = len(list(self.get_frame(0)))
class CompositeAudioClip(AudioClip):
""" Clip made by composing several AudioClips.
An audio clip made by putting together several audio clips.
Parameters
------------
clips
List of audio clips, which may start playing at different times or
together. If all have their ``duration`` attribute set, the
duration of the composite clip is computed automatically.
"""
def __init__(self, clips):
Clip.__init__(self)
self.clips = clips
ends = [c.end for c in self.clips]
self.nchannels = max([c.nchannels for c in self.clips])
if not any([(e is None) for e in ends]):
self.duration = max(ends)
self.end = max(ends)
def get_frame(t):
# buggy
played_parts = [c.is_playing(t) for c in self.clips]
sounds= [c.get_frame(t - c.start)*np.array([part]).T
for c,part in zip(self.clips, played_parts)
if (part is not False) ]
if isinstance(t,np.ndarray):
zero = np.zeros((len(t),self.nchannels))
else:
zero = np.zeros(self.nchannels)
return zero + sum(sounds)
self.get_frame = get_frame
|
[
"numpy.minimum",
"moviepy.Clip.Clip.__init__",
"numpy.zeros",
"numpy.arange",
"numpy.array",
"moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowrite"
] |
[((1488, 1507), 'moviepy.Clip.Clip.__init__', 'Clip.__init__', (['self'], {}), '(self)\n', (1501, 1507), False, 'from moviepy.Clip import Clip\n'), ((3164, 3251), 'moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowrite', 'ffmpeg_audiowrite', (['self', 'filename', 'fps', 'nbytes', 'buffersize', 'codec', 'bitrate', 'verbose'], {}), '(self, filename, fps, nbytes, buffersize, codec, bitrate,\n verbose)\n', (3181, 3251), False, 'from moviepy.audio.io.ffmpeg_audiowriter import ffmpeg_audiowrite\n'), ((3669, 3688), 'moviepy.Clip.Clip.__init__', 'Clip.__init__', (['self'], {}), '(self)\n', (3682, 3688), False, 'from moviepy.Clip import Clip\n'), ((5063, 5082), 'moviepy.Clip.Clip.__init__', 'Clip.__init__', (['self'], {}), '(self)\n', (5076, 5082), False, 'from moviepy.Clip import Clip\n'), ((2332, 2370), 'numpy.arange', 'np.arange', (['(0)', 'self.duration', '(1.0 / fps)'], {}), '(0, self.duration, 1.0 / fps)\n', (2341, 2370), True, 'import numpy as np\n'), ((2533, 2560), 'numpy.minimum', 'np.minimum', (['(0.99)', 'snd_array'], {}), '(0.99, snd_array)\n', (2543, 2560), True, 'import numpy as np\n'), ((5851, 5875), 'numpy.zeros', 'np.zeros', (['self.nchannels'], {}), '(self.nchannels)\n', (5859, 5875), True, 'import numpy as np\n'), ((5543, 5559), 'numpy.array', 'np.array', (['[part]'], {}), '([part])\n', (5551, 5559), True, 'import numpy as np\n')]
|
import os
import swamp
import unittest
import joblib
from operator import itemgetter
from swamp.utils import remove, create_tempfile
from swamp.search.searchtarget import SearchTarget
TOPCONS_DUMY = """TOPCONS predicted topology:
iiiiiiiiiiiiiiMMMMMMMMMMMMMMMMMMMMMooooooMMMMMMMMMMMMMMMMMMMMMiiiiiiiiiiMMMMMMMMMMMMMMMMMMMMMooooooMMMMMMMMMMMMMMMMMMMMMiiiiiiiiiiiMMMMMMMMMMMMMMMMMMMMMoooooooooooMMMMMMMMMMMMMMMMMMMMMiiiiiiiiiiiiiiMMMMMMMMMMMMMMMMMMMMMoooooooMMMMMMMMMMMMMMMMMMMMMiiiiiiiiiiiiiiiiiiiiiiMMMMMMMMMMMMMMMMMMMMMoMMMMMMMMMMMMMMMMMMMMMiiiiiiiiiiiiiiiiiiii
"""
PDB_DUMY = """CRYST1 73.330 73.330 163.520 90.00 90.00 90.00 P 41 2 2 8
REMARK 465
REMARK 465 MISSING RESIDUES
REMARK 465 THE FOLLOWING RESIDUES WERE NOT LOCATED IN THE
REMARK 465 EXPERIMENT. (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN
REMARK 465 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)
REMARK 465
REMARK 465 M RES C SSSEQI
REMARK 465 MET A -4
REMARK 465 VAL A -3
REMARK 465 ALA A -2
REMARK 465 ALA A -1
REMARK 465 SER A 0
REMARK 465 MET A 1
REMARK 465 GLY A 98
REMARK 465 LYS A 99
REMARK 465 HIS A 212
REMARK 465 LYS A 215
ATOM 760 N VAL A 100 17.668 61.385 96.142 1.00 36.12 N
ANISOU 760 N VAL A 100 4189 5832 3703 370 -20 96 N
ATOM 761 CA VAL A 100 16.510 62.175 95.720 1.00 34.76 C
ANISOU 761 CA VAL A 100 3981 5676 3550 300 62 84 C
ATOM 762 C VAL A 100 16.924 63.214 94.641 1.00 39.15 C
ANISOU 762 C VAL A 100 4461 6274 4139 307 77 -9 C
ATOM 763 O VAL A 100 16.205 63.379 93.656 1.00 38.11 O
ANISOU 763 O VAL A 100 4288 6134 4059 275 108 -15 O
ATOM 764 CB VAL A 100 15.715 62.769 96.916 1.00 37.75 C
ANISOU 764 CB VAL A 100 4379 6111 3852 257 129 130 C
ATOM 765 CG1 VAL A 100 14.623 63.727 96.450 1.00 36.89 C
ANISOU 765 CG1 VAL A 100 4216 6025 3776 215 217 110 C
ATOM 766 CG2 VAL A 100 15.112 61.661 97.786 1.00 38.05 C
ANISOU 766 CG2 VAL A 100 4485 6113 3858 228 124 244 C
ATOM 767 N GLY A 101 18.105 63.825 94.809 1.00 36.09 N
ANISOU 767 N GLY A 101 4052 5944 3718 343 50 -70 N
ATOM 768 CA GLY A 101 18.670 64.791 93.867 1.00 34.68 C
ANISOU 768 CA GLY A 101 3805 5805 3566 340 63 -145 C
ATOM 769 C GLY A 101 18.998 64.193 92.514 1.00 37.41 C
ANISOU 769 C GLY A 101 4110 6137 3967 361 26 -177 C
ATOM 770 O GLY A 101 18.818 64.843 91.481 1.00 35.74 O
ANISOU 770 O GLY A 101 3843 5954 3784 335 57 -198 O
ATOM 771 N VAL A 102 19.463 62.931 92.513 1.00 34.96 N
ANISOU 771 N VAL A 102 3830 5784 3671 410 -36 -177 N
ATOM 772 CA VAL A 102 19.819 62.187 91.297 1.00 34.18 C
ANISOU 772 CA VAL A 102 3699 5666 3623 436 -67 -233 C
ATOM 773 C VAL A 102 18.531 61.710 90.593 1.00 37.41 C
ANISOU 773 C VAL A 102 4118 6023 4073 373 -37 -212 C
ATOM 774 O VAL A 102 18.409 61.831 89.370 1.00 35.53 O
ANISOU 774 O VAL A 102 3822 5829 3850 347 -28 -263 O
ATOM 775 CB VAL A 102 20.820 61.047 91.624 1.00 38.45 C
ANISOU 775 CB VAL A 102 4268 6161 4180 528 -137 -249 C
ATOM 776 CG1 VAL A 102 21.126 60.185 90.399 1.00 38.40 C
ANISOU 776 CG1 VAL A 102 4237 6117 4236 561 -155 -331 C
ATOM 777 CG2 VAL A 102 22.111 61.608 92.229 1.00 37.89 C
ANISOU 777 CG2 VAL A 102 4155 6188 4054 582 -172 -273 C
ATOM 778 N ILE A 103 17.542 61.236 91.381 1.00 34.36 N
ANISOU 778 N ILE A 103 3794 5569 3692 337 -19 -133 N
ATOM 779 CA ILE A 103 16.260 60.794 90.844 1.00 33.66 C
ANISOU 779 CA ILE A 103 3704 5449 3636 259 10 -110 C
ATOM 780 C ILE A 103 15.544 61.966 90.187 1.00 37.70 C
ANISOU 780 C ILE A 103 4134 6061 4131 214 61 -102 C
ATOM 781 O ILE A 103 15.031 61.813 89.070 1.00 37.84 O
ANISOU 781 O ILE A 103 4097 6118 4163 169 63 -129 O
ATOM 782 CB ILE A 103 15.417 60.020 91.896 1.00 37.07 C
ANISOU 782 CB ILE A 103 4214 5798 4074 220 22 -17 C
ATOM 783 CG1 ILE A 103 16.062 58.633 92.170 1.00 37.34 C
ANISOU 783 CG1 ILE A 103 4330 5703 4156 266 -32 -15 C
ATOM 784 CG2 ILE A 103 13.920 59.876 91.451 1.00 37.66 C
ANISOU 784 CG2 ILE A 103 4258 5883 4167 115 67 14 C
ATOM 785 CD1 ILE A 103 15.598 57.949 93.432 1.00 47.42 C
ANISOU 785 CD1 ILE A 103 5694 6900 5425 246 -28 105 C
ATOM 786 N LEU A 104 15.594 63.153 90.831 1.00 33.68 N
ANISOU 786 N LEU A 104 3611 5596 3591 231 101 -71 N
ATOM 787 CA LEU A 104 14.977 64.376 90.307 1.00 33.36 C
ANISOU 787 CA LEU A 104 3499 5623 3552 211 157 -47 C
ATOM 788 C LEU A 104 15.511 64.746 88.917 1.00 34.07 C
ANISOU 788 C LEU A 104 3518 5780 3648 214 139 -90 C
ATOM 789 O LEU A 104 14.708 65.012 88.027 1.00 32.90 O
ANISOU 789 O LEU A 104 3303 5692 3505 182 158 -56 O
ATOM 790 CB LEU A 104 15.136 65.530 91.310 1.00 33.69 C
ANISOU 790 CB LEU A 104 3558 5667 3576 234 207 -34 C
ATOM 791 CG LEU A 104 14.360 66.816 91.054 1.00 38.02 C
ANISOU 791 CG LEU A 104 4051 6243 4152 231 283 6 C
ATOM 792 CD1 LEU A 104 12.849 66.546 90.864 1.00 37.78 C
ANISOU 792 CD1 LEU A 104 3981 6236 4138 203 317 79 C
ATOM 793 CD2 LEU A 104 14.564 67.790 92.218 1.00 40.00 C
ANISOU 793 CD2 LEU A 104 4341 6468 4390 248 340 -14 C
ATOM 794 N VAL A 105 16.858 64.715 88.727 1.00 31.14 N
ANISOU 794 N VAL A 105 3147 5419 3264 251 101 -159 N
ATOM 795 CA VAL A 105 17.526 64.969 87.443 1.00 30.96 C
ANISOU 795 CA VAL A 105 3053 5478 3231 250 87 -207 C
ATOM 796 C VAL A 105 17.042 63.944 86.411 1.00 35.03 C
ANISOU 796 C VAL A 105 3543 6022 3746 218 59 -243 C
ATOM 797 O VAL A 105 16.709 64.332 85.295 1.00 35.34 O
ANISOU 797 O VAL A 105 3507 6161 3761 183 71 -233 O
ATOM 798 CB VAL A 105 19.074 64.920 87.574 1.00 35.01 C
ANISOU 798 CB VAL A 105 3565 6008 3728 296 51 -284 C
ATOM 799 CG1 VAL A 105 19.755 64.761 86.208 1.00 34.91 C
ANISOU 799 CG1 VAL A 105 3479 6090 3696 293 33 -351 C
ATOM 800 CG2 VAL A 105 19.596 66.143 88.285 1.00 34.55 C
ANISOU 800 CG2 VAL A 105 3509 5958 3662 295 82 -268 C
ATOM 801 N GLY A 106 17.024 62.660 86.802 1.00 31.90 N
ANISOU 801 N GLY A 106 3210 5538 3374 226 26 -284 N
ATOM 802 CA GLY A 106 16.603 61.553 85.954 1.00 32.79 C
ANISOU 802 CA GLY A 106 3316 5644 3498 184 4 -348 C
ATOM 803 C GLY A 106 15.165 61.656 85.488 1.00 38.66 C
ANISOU 803 C GLY A 106 4015 6444 4230 96 28 -295 C
ATOM 804 O GLY A 106 14.840 61.246 84.373 1.00 39.30 O
ANISOU 804 O GLY A 106 4041 6603 4287 40 16 -354 O
ATOM 805 N CYS A 107 14.292 62.202 86.336 1.00 34.66 N
ANISOU 805 N CYS A 107 3520 5918 3732 83 64 -190 N
ATOM 806 CA CYS A 107 12.871 62.327 86.029 1.00 33.96 C
ANISOU 806 CA CYS A 107 3372 5898 3635 11 89 -126 C
ATOM 807 C CYS A 107 12.559 63.546 85.180 1.00 36.79 C
ANISOU 807 C CYS A 107 3622 6399 3958 17 112 -65 C
ATOM 808 O CYS A 107 11.462 63.650 84.641 1.00 34.93 O
ANISOU 808 O CYS A 107 3306 6262 3703 -34 121 -13 O
ATOM 809 CB CYS A 107 12.047 62.300 87.309 1.00 34.50 C
ANISOU 809 CB CYS A 107 3487 5894 3726 0 126 -44 C
ATOM 810 SG CYS A 107 12.085 60.707 88.159 1.00 39.40 S
ANISOU 810 SG CYS A 107 4227 6357 4386 -36 99 -71 S
ATOM 811 N CYS A 108 13.515 64.471 85.058 1.00 35.29 N
ANISOU 811 N CYS A 108 3424 6224 3761 77 122 -62 N
ATOM 812 CA CYS A 108 13.303 65.682 84.256 1.00 35.81 C
ANISOU 812 CA CYS A 108 3398 6402 3805 88 148 20 C
ATOM 813 C CYS A 108 13.248 65.386 82.748 1.00 39.76 C
ANISOU 813 C CYS A 108 3808 7060 4239 38 113 -8 C
ATOM 814 O CYS A 108 13.805 64.369 82.295 1.00 39.23 O
ANISOU 814 O CYS A 108 3760 7001 4146 8 73 -131 O
ATOM 815 CB CYS A 108 14.373 66.725 84.577 1.00 35.44 C
ANISOU 815 CB CYS A 108 3377 6313 3777 143 174 28 C
ATOM 816 SG CYS A 108 14.063 67.645 86.106 1.00 38.80 S
ANISOU 816 SG CYS A 108 3867 6613 4263 189 240 86 S
ATOM 817 N PRO A 109 12.626 66.278 81.941 1.00 36.74 N
ANISOU 817 N PRO A 109 3324 6811 3825 33 128 102 N
ATOM 818 CA PRO A 109 12.651 66.072 80.487 1.00 36.54 C
ANISOU 818 CA PRO A 109 3203 6973 3707 -19 92 82 C
ATOM 819 C PRO A 109 14.051 66.355 79.917 1.00 39.90 C
ANISOU 819 C PRO A 109 3632 7433 4096 -2 88 25 C
ATOM 820 O PRO A 109 14.984 66.720 80.652 1.00 38.31 O
ANISOU 820 O PRO A 109 3500 7110 3946 47 109 3 O
ATOM 821 CB PRO A 109 11.626 67.086 79.984 1.00 38.91 C
ANISOU 821 CB PRO A 109 3395 7399 3989 -7 112 256 C
ATOM 822 CG PRO A 109 11.709 68.211 80.953 1.00 43.16 C
ANISOU 822 CG PRO A 109 3985 7792 4623 78 173 354 C
ATOM 823 CD PRO A 109 11.921 67.533 82.291 1.00 38.62 C
ANISOU 823 CD PRO A 109 3527 7039 4109 85 182 254 C
ATOM 824 N GLY A 110 14.178 66.209 78.606 1.00 36.96 N
ANISOU 824 N GLY A 110 3172 7251 3622 -51 63 1 N
ATOM 825 CA GLY A 110 15.412 66.500 77.896 1.00 36.79 C
ANISOU 825 CA GLY A 110 3125 7311 3543 -47 66 -43 C
ATOM 826 C GLY A 110 15.754 67.977 77.871 1.00 39.68 C
ANISOU 826 C GLY A 110 3468 7678 3930 -11 108 116 C
ATOM 827 O GLY A 110 14.932 68.832 78.237 1.00 37.90 O
ANISOU 827 O GLY A 110 3235 7404 3761 20 137 269 O
ATOM 828 N GLY A 111 16.997 68.255 77.492 1.00 37.57 N
ANISOU 828 N GLY A 111 3192 7452 3630 -14 120 73 N
ATOM 829 CA GLY A 111 17.524 69.610 77.421 1.00 38.32 C
ANISOU 829 CA GLY A 111 3273 7535 3751 -3 166 207 C
ATOM 830 C GLY A 111 17.587 70.139 76.005 1.00 45.38 C
ANISOU 830 C GLY A 111 4057 8656 4530 -51 169 312 C
ATOM 831 O GLY A 111 17.838 69.373 75.069 1.00 44.83 O
ANISOU 831 O GLY A 111 3925 8771 4339 -97 138 211 O
ATOM 832 N THR A 112 17.360 71.460 75.849 1.00 44.03 N
ANISOU 832 N THR A 112 3865 8469 4397 -38 211 517 N
ATOM 833 CA THR A 112 17.408 72.178 74.572 1.00 46.06 C
ANISOU 833 CA THR A 112 4021 8928 4552 -78 221 676 C
ATOM 834 C THR A 112 18.755 71.978 73.857 1.00 50.30 C
ANISOU 834 C THR A 112 4519 9608 4985 -142 226 577 C
ATOM 835 O THR A 112 18.772 71.719 72.654 1.00 51.95 O
ANISOU 835 O THR A 112 4629 10072 5039 -195 206 587 O
ATOM 836 CB THR A 112 17.000 73.648 74.806 1.00 62.92 C
ANISOU 836 CB THR A 112 6172 10938 6798 -35 277 915 C
ATOM 837 OG1 THR A 112 15.573 73.736 74.755 1.00 69.65 O
ANISOU 837 OG1 THR A 112 6981 11815 7666 17 260 1043 O
ATOM 838 CG2 THR A 112 17.610 74.618 73.802 1.00 64.44 C
ANISOU 838 CG2 THR A 112 6303 11251 6931 -82 311 1083 C
ATOM 839 N ALA A 113 19.873 72.042 74.608 1.00 44.39 N
ANISOU 839 N ALA A 113 3836 8720 4311 -139 253 467 N
ATOM 840 CA ALA A 113 21.222 71.889 74.071 1.00 44.43 C
ANISOU 840 CA ALA A 113 3795 8854 4234 -190 266 364 C
ATOM 841 C ALA A 113 21.413 70.625 73.231 1.00 47.29 C
ANISOU 841 C ALA A 113 4088 9428 4452 -211 225 186 C
ATOM 842 O ALA A 113 22.263 70.622 72.346 1.00 49.14 O
ANISOU 842 O ALA A 113 4242 9859 4571 -263 243 147 O
ATOM 843 CB ALA A 113 22.253 71.957 75.192 1.00 44.42 C
ANISOU 843 CB ALA A 113 3868 8672 4339 -172 285 250 C
ATOM 844 N SER A 114 20.592 69.580 73.459 1.00 42.02 N
ANISOU 844 N SER A 114 3449 8728 3789 -181 179 77 N
ATOM 845 CA SER A 114 20.643 68.338 72.676 1.00 42.09 C
ANISOU 845 CA SER A 114 3406 8907 3678 -208 146 -113 C
ATOM 846 C SER A 114 20.318 68.595 71.190 1.00 48.23 C
ANISOU 846 C SER A 114 4059 9997 4271 -285 144 -27 C
ATOM 847 O SER A 114 20.812 67.864 70.334 1.00 49.03 O
ANISOU 847 O SER A 114 4094 10295 4241 -326 141 -189 O
ATOM 848 CB SER A 114 19.694 67.294 73.251 1.00 42.07 C
ANISOU 848 CB SER A 114 3468 8782 3735 -182 104 -217 C
ATOM 849 OG SER A 114 18.338 67.653 73.046 1.00 40.48 O
ANISOU 849 OG SER A 114 3237 8626 3517 -204 85 -61 O
ATOM 850 N ASN A 115 19.509 69.644 70.892 1.00 45.56 N
ANISOU 850 N ASN A 115 3683 9708 3919 -299 148 228 N
ATOM 851 CA ASN A 115 19.145 70.032 69.522 1.00 47.16 C
ANISOU 851 CA ASN A 115 3760 10222 3938 -367 140 364 C
ATOM 852 C ASN A 115 20.388 70.481 68.749 1.00 52.79 C
ANISOU 852 C ASN A 115 4408 11108 4543 -421 186 376 C
ATOM 853 O ASN A 115 20.575 70.064 67.608 1.00 53.35 O
ANISOU 853 O ASN A 115 4376 11476 4418 -488 179 307 O
ATOM 854 CB ASN A 115 18.077 71.140 69.504 1.00 44.25 C
ANISOU 854 CB ASN A 115 3369 9833 3610 -341 138 667 C
ATOM 855 CG ASN A 115 16.800 70.842 70.247 1.00 49.80 C
ANISOU 855 CG ASN A 115 4113 10392 4415 -288 102 684 C
ATOM 856 OD1 ASN A 115 16.532 69.712 70.684 1.00 42.29 O
ANISOU 856 OD1 ASN A 115 3203 9378 3486 -290 71 477 O
ATOM 857 ND2 ASN A 115 15.969 71.870 70.401 1.00 36.54 N
ANISOU 857 ND2 ASN A 115 2419 8656 2807 -236 113 942 N
ATOM 858 N VAL A 116 21.239 71.306 69.392 1.00 50.44 N
ANISOU 858 N VAL A 116 4165 10635 4366 -402 236 447 N
ATOM 859 CA VAL A 116 22.505 71.841 68.853 1.00 52.16 C
ANISOU 859 CA VAL A 116 4326 10981 4511 -463 290 468 C
ATOM 860 C VAL A 116 23.544 70.715 68.752 1.00 54.64 C
ANISOU 860 C VAL A 116 4613 11384 4763 -465 293 166 C
ATOM 861 O VAL A 116 24.294 70.673 67.783 1.00 55.35 O
ANISOU 861 O VAL A 116 4601 11734 4694 -529 323 124 O
ATOM 862 CB VAL A 116 23.071 73.022 69.706 1.00 56.89 C
ANISOU 862 CB VAL A 116 4998 11337 5279 -457 344 609 C
ATOM 863 CG1 VAL A 116 24.071 73.845 68.896 1.00 58.16 C
ANISOU 863 CG1 VAL A 116 5080 11672 5348 -553 403 725 C
ATOM 864 CG2 VAL A 116 21.955 73.923 70.239 1.00 56.78 C
ANISOU 864 CG2 VAL A 116 5052 11120 5402 -409 344 840 C
ATOM 865 N MET A 117 23.610 69.824 69.766 1.00 49.13 N
ANISOU 865 N MET A 117 4004 10473 4191 -388 266 -33 N
ATOM 866 CA MET A 117 24.552 68.701 69.767 1.00 48.45 C
ANISOU 866 CA MET A 117 3899 10430 4078 -358 268 -314 C
ATOM 867 C MET A 117 24.254 67.711 68.648 1.00 52.17 C
ANISOU 867 C MET A 117 4293 11152 4378 -393 252 -474 C
ATOM 868 O MET A 117 25.194 67.215 68.028 1.00 52.50 O
ANISOU 868 O MET A 117 4257 11371 4319 -405 284 -646 O
ATOM 869 CB MET A 117 24.629 68.000 71.128 1.00 49.33 C
ANISOU 869 CB MET A 117 4128 10247 4370 -260 239 -453 C
ATOM 870 CG MET A 117 25.150 68.879 72.238 1.00 52.66 C
ANISOU 870 CG MET A 117 4613 10460 4935 -235 258 -352 C
ATOM 871 SD MET A 117 26.786 69.586 71.949 1.00 59.01 S
ANISOU 871 SD MET A 117 5326 11401 5696 -287 317 -356 S
ATOM 872 CE MET A 117 26.451 71.275 72.409 1.00 55.97 C
ANISOU 872 CE MET A 117 4993 10867 5406 -350 352 -71 C
ATOM 873 N THR A 118 22.954 67.453 68.369 1.00 48.22 N
ANISOU 873 N THR A 118 3801 10684 3838 -416 208 -425 N
ATOM 874 CA THR A 118 22.510 66.589 67.261 1.00 49.35 C
ANISOU 874 CA THR A 118 3864 11087 3801 -477 189 -573 C
ATOM 875 C THR A 118 22.875 67.211 65.915 1.00 54.31 C
ANISOU 875 C THR A 118 4351 12081 4202 -570 221 -475 C
ATOM 876 O THR A 118 23.249 66.476 64.997 1.00 55.68 O
ANISOU 876 O THR A 118 4443 12501 4211 -616 237 -677 O
ATOM 877 CB THR A 118 21.012 66.325 67.336 1.00 50.39 C
ANISOU 877 CB THR A 118 4022 11182 3942 -497 131 -513 C
ATOM 878 OG1 THR A 118 20.724 65.778 68.612 1.00 43.18 O
ANISOU 878 OG1 THR A 118 3238 9934 3233 -419 110 -590 O
ATOM 879 CG2 THR A 118 20.537 65.369 66.270 1.00 47.96 C
ANISOU 879 CG2 THR A 118 3636 11134 3454 -578 107 -697 C
ATOM 880 N TYR A 119 22.760 68.554 65.795 1.00 51.06 N
ANISOU 880 N TYR A 119 3914 11704 3783 -598 236 -166 N
ATOM 881 CA TYR A 119 23.117 69.271 64.566 1.00 53.65 C
ANISOU 881 CA TYR A 119 4114 12372 3900 -691 271 -16 C
ATOM 882 C TYR A 119 24.628 69.129 64.296 1.00 59.16 C
ANISOU 882 C TYR A 119 4757 13180 4541 -710 338 -173 C
ATOM 883 O TYR A 119 25.034 68.875 63.162 1.00 59.37 O
ANISOU 883 O TYR A 119 4665 13547 4347 -783 366 -255 O
ATOM 884 CB TYR A 119 22.679 70.751 64.632 1.00 55.51 C
ANISOU 884 CB TYR A 119 4355 12551 4185 -703 279 366 C
ATOM 885 CG TYR A 119 23.166 71.571 63.454 1.00 60.61 C
ANISOU 885 CG TYR A 119 4880 13516 4633 -800 322 559 C
ATOM 886 CD1 TYR A 119 22.548 71.478 62.209 1.00 63.65 C
ANISOU 886 CD1 TYR A 119 5144 14271 4768 -873 293 637 C
ATOM 887 CD2 TYR A 119 24.279 72.401 63.569 1.00 62.92 C
ANISOU 887 CD2 TYR A 119 5170 13763 4972 -833 393 656 C
ATOM 888 CE1 TYR A 119 23.026 72.193 61.109 1.00 66.91 C
ANISOU 888 CE1 TYR A 119 5441 15003 4978 -968 334 824 C
ATOM 889 CE2 TYR A 119 24.753 73.132 62.484 1.00 65.74 C
ANISOU 889 CE2 TYR A 119 5416 14418 5143 -936 440 843 C
ATOM 890 CZ TYR A 119 24.127 73.022 61.255 1.00 76.19 C
ANISOU 890 CZ TYR A 119 6624 16110 6213 -999 411 933 C
ATOM 891 OH TYR A 119 24.601 73.748 60.192 1.00 84.36 O
ANISOU 891 OH TYR A 119 7548 17454 7051 -1105 458 1138 O
ATOM 892 N LEU A 120 25.443 69.243 65.355 1.00 56.13 N
ANISOU 892 N LEU A 120 4451 12528 4349 -644 363 -228 N
ATOM 893 CA LEU A 120 26.885 69.097 65.249 1.00 57.63 C
ANISOU 893 CA LEU A 120 4579 12807 4510 -648 422 -379 C
ATOM 894 C LEU A 120 27.279 67.646 64.978 1.00 60.25 C
ANISOU 894 C LEU A 120 4879 13229 4783 -599 423 -735 C
ATOM 895 O LEU A 120 28.129 67.411 64.125 1.00 61.78 O
ANISOU 895 O LEU A 120 4956 13698 4819 -639 477 -862 O
ATOM 896 CB LEU A 120 27.592 69.691 66.475 1.00 57.46 C
ANISOU 896 CB LEU A 120 4636 12495 4700 -601 440 -323 C
ATOM 897 CG LEU A 120 27.363 71.209 66.696 1.00 63.48 C
ANISOU 897 CG LEU A 120 5431 13154 5533 -661 461 9 C
ATOM 898 CD1 LEU A 120 27.684 71.611 68.123 1.00 62.41 C
ANISOU 898 CD1 LEU A 120 5407 12675 5630 -606 459 21 C
ATOM 899 CD2 LEU A 120 28.144 72.063 65.682 1.00 68.31 C
ANISOU 899 CD2 LEU A 120 5923 14043 5990 -782 529 157 C
ATOM 1976 N ALA A 270 17.395 71.560 89.366 1.00 35.38 N
ANISOU 1976 N ALA A 270 3622 5894 3926 228 397 -64 N
ATOM 1977 CA ALA A 270 17.973 72.447 90.379 1.00 35.08 C
ANISOU 1977 CA ALA A 270 3634 5791 3903 205 443 -136 C
ATOM 1978 C ALA A 270 16.885 73.370 90.924 1.00 38.15 C
ANISOU 1978 C ALA A 270 4048 6088 4360 230 542 -100 C
ATOM 1979 O ALA A 270 16.822 73.575 92.129 1.00 38.62 O
ANISOU 1979 O ALA A 270 4161 6109 4402 225 578 -172 O
ATOM 1980 CB ALA A 270 19.128 73.259 89.793 1.00 35.65 C
ANISOU 1980 CB ALA A 270 3680 5870 3995 157 449 -165 C
ATOM 1981 N ALA A 271 15.997 73.864 90.048 1.00 34.42 N
ANISOU 1981 N ALA A 271 3528 5596 3955 265 584 11 N
ATOM 1982 CA ALA A 271 14.866 74.726 90.424 1.00 34.94 C
ANISOU 1982 CA ALA A 271 3597 5575 4102 317 682 61 C
ATOM 1983 C ALA A 271 13.844 73.999 91.306 1.00 38.84 C
ANISOU 1983 C ALA A 271 4101 6101 4557 346 691 53 C
ATOM 1984 O ALA A 271 13.371 74.567 92.293 1.00 38.92 O
ANISOU 1984 O ALA A 271 4146 6047 4593 369 773 7 O
ATOM 1985 CB ALA A 271 14.180 75.270 89.179 1.00 35.46 C
ANISOU 1985 CB ALA A 271 3588 5648 4238 361 705 211 C
ATOM 1986 N LEU A 272 13.492 72.752 90.934 1.00 35.78 N
ANISOU 1986 N LEU A 272 3680 5809 4104 337 616 91 N
ATOM 1987 CA LEU A 272 12.516 71.931 91.663 1.00 35.50 C
ANISOU 1987 CA LEU A 272 3648 5811 4029 343 620 101 C
ATOM 1988 C LEU A 272 13.015 71.552 93.053 1.00 39.08 C
ANISOU 1988 C LEU A 272 4184 6252 4414 316 617 5 C
ATOM 1989 O LEU A 272 12.231 71.522 93.998 1.00 40.15 O
ANISOU 1989 O LEU A 272 4334 6392 4529 325 673 0 O
ATOM 1990 CB LEU A 272 12.119 70.681 90.845 1.00 34.66 C
ANISOU 1990 CB LEU A 272 3494 5793 3881 317 540 155 C
ATOM 1991 CG LEU A 272 11.233 70.919 89.606 1.00 38.06 C
ANISOU 1991 CG LEU A 272 3821 6290 4350 336 544 263 C
ATOM 1992 CD1 LEU A 272 10.998 69.623 88.830 1.00 36.79 C
ANISOU 1992 CD1 LEU A 272 3620 6223 4134 283 461 273 C
ATOM 1993 CD2 LEU A 272 9.888 71.527 89.980 1.00 39.50 C
ANISOU 1993 CD2 LEU A 272 3951 6476 4583 389 629 334 C
ATOM 1994 N ALA A 273 14.325 71.313 93.181 1.00 35.62 N
ANISOU 1994 N ALA A 273 3787 5818 3930 284 555 -67 N
ATOM 1995 CA ALA A 273 14.977 70.980 94.446 1.00 35.61 C
ANISOU 1995 CA ALA A 273 3851 5833 3846 260 535 -149 C
ATOM 1996 C ALA A 273 15.034 72.213 95.357 1.00 41.32 C
ANISOU 1996 C ALA A 273 4608 6511 4582 252 627 -233 C
ATOM 1997 O ALA A 273 14.832 72.086 96.566 1.00 41.96 O
ANISOU 1997 O ALA A 273 4729 6623 4590 240 654 -281 O
ATOM 1998 CB ALA A 273 16.385 70.473 94.176 1.00 35.68 C
ANISOU 1998 CB ALA A 273 3867 5876 3813 240 441 -195 C
ATOM 1999 N ALA A 274 15.306 73.400 94.773 1.00 37.99 N
ANISOU 1999 N ALA A 274 4172 6014 4250 253 679 -252 N
ATOM 2000 CA ALA A 274 15.361 74.677 95.495 1.00 38.62 C
ANISOU 2000 CA ALA A 274 4289 6011 4372 241 781 -348 C
ATOM 2001 C ALA A 274 13.976 75.054 96.014 1.00 40.49 C
ANISOU 2001 C ALA A 274 4522 6213 4650 298 887 -328 C
ATOM 2002 O ALA A 274 13.870 75.527 97.136 1.00 42.06 O
ANISOU 2002 O ALA A 274 4764 6399 4817 285 958 -435 O
ATOM 2003 CB ALA A 274 15.902 75.778 94.584 1.00 39.90 C
ANISOU 2003 CB ALA A 274 4439 6076 4645 227 814 -342 C
ATOM 2004 N ALA A 275 12.917 74.808 95.219 1.00 35.17 N
ANISOU 2004 N ALA A 275 3785 5545 4034 358 895 -198 N
ATOM 2005 CA ALA A 275 11.534 75.127 95.578 1.00 35.50 C
ANISOU 2005 CA ALA A 275 3792 5576 4119 425 993 -162 C
ATOM 2006 C ALA A 275 10.891 74.133 96.544 1.00 41.37 C
ANISOU 2006 C ALA A 275 4539 6428 4750 407 987 -168 C
ATOM 2007 O ALA A 275 10.161 74.562 97.441 1.00 41.44 O
ANISOU 2007 O ALA A 275 4550 6440 4754 435 1088 -219 O
ATOM 2008 CB ALA A 275 10.682 75.243 94.323 1.00 35.94 C
ANISOU 2008 CB ALA A 275 3756 5631 4267 491 994 -11 C
ATOM 2009 N HIS A 276 11.151 72.806 96.365 1.00 37.48 N
ANISOU 2009 N HIS A 276 4048 6022 4172 360 877 -116 N
ATOM 2010 CA HIS A 276 10.514 71.766 97.173 1.00 36.82 C
ANISOU 2010 CA HIS A 276 3970 6029 3991 331 867 -88 C
ATOM 2011 C HIS A 276 11.327 71.079 98.243 1.00 39.86 C
ANISOU 2011 C HIS A 276 4431 6468 4246 274 814 -146 C
ATOM 2012 O HIS A 276 10.741 70.470 99.133 1.00 40.60 O
ANISOU 2012 O HIS A 276 4537 6634 4256 250 835 -122 O
ATOM 2013 CB HIS A 276 9.784 70.774 96.275 1.00 37.80 C
ANISOU 2013 CB HIS A 276 4031 6199 4131 322 810 33 C
ATOM 2014 CG HIS A 276 8.828 71.460 95.363 1.00 41.63 C
ANISOU 2014 CG HIS A 276 4421 6676 4720 382 864 104 C
ATOM 2015 ND1 HIS A 276 7.594 71.905 95.817 1.00 44.10 N
ANISOU 2015 ND1 HIS A 276 4674 7023 5060 428 967 129 N
ATOM 2016 CD2 HIS A 276 8.988 71.835 94.071 1.00 43.54 C
ANISOU 2016 CD2 HIS A 276 4613 6894 5037 410 830 160 C
ATOM 2017 CE1 HIS A 276 7.034 72.512 94.787 1.00 44.28 C
ANISOU 2017 CE1 HIS A 276 4609 7036 5180 492 987 207 C
ATOM 2018 NE2 HIS A 276 7.819 72.471 93.702 1.00 44.20 N
ANISOU 2018 NE2 HIS A 276 4602 6997 5195 478 903 236 N
"""
CONPRED_DUMMY = """PFRMAT RR
TARGET 536987
AUTHOR RaptorX-Contact
METHOD deep dilated residual networks (one variant of deep CNN). Consult <EMAIL> for details.
MODEL 1
MVAASMNILSKISSFIGKTFSLWAALFAAAAFFAPDTFKWAGPYIPWLLG
IIMFGMGLTLKPSDFDILFKHPKVVIIGVIAQFAIMPATAWCLSKLLNLP
AEIAVGVILVGCCPGGTASNVMTYLARGNVALSVAVTSVSTLTSPLLTPA
IFLMLAGEMLEIQAAGMLMSIVKMVLLPIVLGLIVHKVLGSKTEKLTDAL
PLVSVAAIVLIIGAVVGASKGKIMESGLLIFAVVVLHNGIGYLLGFFAAK
WTGLPYDAQKALTIEVGMQNSGLAAALAAAHFAAAPVVAVPGALFSVWHN
ISGSLLATYWAAKAGKHKKPLDRAGSENLYFQ
53 178 0 8 0.9999614
57 182 0 8 0.9999346
58 182 0 8 0.9999014
54 181 0 8 0.9998163
54 182 0 8 0.9997769
54 178 0 8 0.9996910
249 259 0 8 0.9989253
58 185 0 8 0.9979285
58 186 0 8 0.9977884
249 262 0 8 0.9974785
94 104 0 8 0.9972718
123 133 0 8 0.9972159
57 179 0 8 0.9963613
246 263 0 8 0.9962631
50 178 0 8 0.9946589
106 288 0 8 0.9932054
57 183 0 8 0.9925978
123 261 0 8 0.9922032
102 288 0 8 0.9917381
27 212 0 8 0.9908113
103 291 0 8 0.9907801
75 136 0 8 0.9905434
31 216 0 8 0.9904293
89 240 0 8 0.9902470
27 213 0 8 0.9900678
110 292 0 8 0.9887912
85 244 0 8 0.9886514
90 108 0 8 0.9883336
109 278 0 8 0.9877242
94 107 0 8 0.9875522
78 262 0 8 0.9875078
48 207 0 8 0.9874308
74 262 0 8 0.9874212
28 216 0 8 0.9870313
245 263 0 8 0.9866461
78 136 0 8 0.9865698
106 291 0 8 0.9861109
79 139 0 8 0.9859405
133 265 0 8 0.9857825
77 252 0 8 0.9857346
109 274 0 8 0.9857225
110 295 0 8 0.9855377
81 248 0 8 0.9851450
81 266 0 8 0.9848748
74 258 0 8 0.9841593
106 292 0 8 0.9837796
31 213 0 8 0.9835263
68 135 0 8 0.9834397
48 211 0 8 0.9833449
113 274 0 8 0.9828007
52 207 0 8 0.9818235
128 261 0 8 0.9814836
90 107 0 8 0.9814461
119 265 0 8 0.9814367
105 288 0 8 0.9791791
271 296 0 8 0.9788657
90 111 0 8 0.9781752
31 217 0 8 0.9776807
53 175 0 8 0.9772123
77 262 0 8 0.9764582
129 258 0 8 0.9764170
234 298 0 8 0.9763948
133 261 0 8 0.9759184
79 140 0 8 0.9759070
55 182 0 8 0.9758528
246 259 0 8 0.9756561
27 209 0 8 0.9746038
234 295 0 8 0.9741930
112 148 0 8 0.9737659
102 287 0 8 0.9732612
132 258 0 8 0.9728087
82 266 0 8 0.9718467
242 263 0 8 0.9710815
245 266 0 8 0.9700539
91 108 0 8 0.9698529
75 139 0 8 0.9698042
48 210 0 8 0.9697683
24 212 0 8 0.9695854
107 233 0 8 0.9683198
136 262 0 8 0.9669924
107 291 0 8 0.9663849
79 136 0 8 0.9657449
94 108 0 8 0.9650769
125 307 0 8 0.9650706
77 248 0 8 0.9650462
120 133 0 8 0.9647374
93 233 0 8 0.9635152
51 207 0 8 0.9634590
"""
class SearchTargetTestCase(unittest.TestCase):
def test_1(self):
pdb_fname = create_tempfile(PDB_DUMY)
self.addCleanup(remove, pdb_fname)
conpred_fname = create_tempfile(CONPRED_DUMMY)
self.addCleanup(remove, conpred_fname)
topcons_fname = create_tempfile(TOPCONS_DUMY)
self.addCleanup(remove, topcons_fname)
search = SearchTarget(workdir=os.path.join(os.environ['CCP4_SCR'], 'test'), conpred=conpred_fname,
sspred=topcons_fname, target_pdb_benchmark=PDB_DUMY, queue_environment='environ',
platform='local', queue_name='queue', n_contacts_threshold=0)
self.addCleanup(remove, os.path.join(os.environ['CCP4_SCR'], 'test'))
self.assertTrue(os.path.isdir(os.path.join(os.environ['CCP4_SCR'], 'test')))
self.assertEqual(search.search_header, """**********************************************************************
***************** SWAMP SEARCH *****************
**********************************************************************
""")
self.assertEqual(os.path.join(os.environ['CCP4_SCR'], 'test', "tmp_cmap_{}.map"), search._tmp_cmap)
self.assertEqual(os.path.join(os.environ['CCP4_SCR'], 'test', "search_{}"), search._search_workdir)
self.assertIsNone(search._tmp_pdb)
search.target.split()
self.assertFalse(search.target.error)
self.assertEqual(swamp.FRAG_MAPALIGN_DB, search.template_library)
self.assertEqual('mapalign', search.library_format)
self.assertDictEqual({'directory': os.path.join(os.environ['CCP4_SCR'], 'test'), 'shell': '/bin/bash',
'name': 'swamp', 'queue': 'queue', 'environment': 'environ', 'processes': 1},
search._other_task_info)
self.assertListEqual(["SUBTRGT_RANK", "SUBTRGT_ID", "N_CON_MAP_A", "MAP_A", "MAP_B", "CON_SCO", "GAP_SCO",
"TOTAL_SCO", "ALI_LEN", "QSCORE", "RMSD", "SEQ_ID", "N_ALIGN"], search._column_reference)
self.assertIsNone(search.scripts)
self.assertIsNone(search.search_pickle_dict)
search._create_scripts()
self.assertEqual(12, len(search.scripts))
self.assertListEqual(
['%s/search_1/search_1_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_10/search_10_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_11/search_11_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_12/search_12_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_2/search_2_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_3/search_3_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_4/search_4_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_5/search_5_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_6/search_6_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_7/search_7_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_8/search_8_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_9/search_9_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test')],
sorted(list(search.search_pickle_dict.keys())))
def test_2(self):
pdb_fname = create_tempfile(PDB_DUMY)
self.addCleanup(remove, pdb_fname)
conpred_fname = create_tempfile(CONPRED_DUMMY)
self.addCleanup(remove, conpred_fname)
topcons_fname = create_tempfile(TOPCONS_DUMY)
self.addCleanup(remove, topcons_fname)
search = SearchTarget(workdir=os.path.join(os.environ['CCP4_SCR'], 'test_2'), conpred=conpred_fname,
sspred=topcons_fname, platform='sge', n_contacts_threshold=0,
alignment_algorithm_name='mapalign')
self.addCleanup(remove, os.path.join(os.environ['CCP4_SCR'], 'test_2'))
search.target.split()
self.assertDictEqual({'directory': os.path.join(os.environ['CCP4_SCR'], 'test_2'), 'shell': '/bin/bash',
'name': 'swamp', 'max_array_size': 1}, search._other_task_info)
search._create_scripts()
for idx, pickle in enumerate(sorted(search.search_pickle_dict.keys())):
if not os.path.isdir(os.path.dirname(pickle)):
os.makedirs(os.path.dirname(pickle))
self.addCleanup(remove, os.path.dirname(pickle))
joblib.dump([["MAP_A_%s" % idx, "MAP_B_%s" % idx, "CON_SCO_%s" % idx, "GAP_SCO_%s" % idx,
"TOTAL_SCO_%s" % idx, "ALI_LEN_%s" % idx, "QSCORE_%s" % idx, "RMSD_%s" % idx,
"SEQ_ID_%s" % idx, "N_ALIGN_%s" % idx]],
pickle)
self.assertIsNone(search.results)
search.results = search.recover_results()
self.assertListEqual([[1, '2_6', 11, 'MAP_A_0', 'MAP_B_0', 'CON_SCO_0', 'GAP_SCO_0', 'TOTAL_SCO_0', 'ALI_LEN_0',
'QSCORE_0', 'RMSD_0', 'SEQ_ID_0', 'N_ALIGN_0'],
[2, '1_7', 8, 'MAP_A_4', 'MAP_B_4', 'CON_SCO_4', 'GAP_SCO_4', 'TOTAL_SCO_4', 'ALI_LEN_4',
'QSCORE_4', 'RMSD_4', 'SEQ_ID_4', 'N_ALIGN_4'],
[3, '4_9', 7, 'MAP_A_5', 'MAP_B_5', 'CON_SCO_5', 'GAP_SCO_5', 'TOTAL_SCO_5', 'ALI_LEN_5',
'QSCORE_5', 'RMSD_5', 'SEQ_ID_5', 'N_ALIGN_5'],
[4, '3_5', 6, 'MAP_A_6', 'MAP_B_6', 'CON_SCO_6', 'GAP_SCO_6', 'TOTAL_SCO_6', 'ALI_LEN_6',
'QSCORE_6', 'RMSD_6', 'SEQ_ID_6', 'N_ALIGN_6'],
[5, '2_7', 5, 'MAP_A_7', 'MAP_B_7', 'CON_SCO_7', 'GAP_SCO_7', 'TOTAL_SCO_7', 'ALI_LEN_7',
'QSCORE_7', 'RMSD_7', 'SEQ_ID_7', 'N_ALIGN_7'],
[6, '3_4', 4, 'MAP_A_8', 'MAP_B_8', 'CON_SCO_8', 'GAP_SCO_8', 'TOTAL_SCO_8', 'ALI_LEN_8',
'QSCORE_8', 'RMSD_8', 'SEQ_ID_8', 'N_ALIGN_8'],
[7, '3_8', 3, 'MAP_A_9', 'MAP_B_9', 'CON_SCO_9', 'GAP_SCO_9', 'TOTAL_SCO_9', 'ALI_LEN_9',
'QSCORE_9', 'RMSD_9', 'SEQ_ID_9', 'N_ALIGN_9'],
[8, '4_10', 3, 'MAP_A_10', 'MAP_B_10', 'CON_SCO_10', 'GAP_SCO_10', 'TOTAL_SCO_10',
'ALI_LEN_10', 'QSCORE_10', 'RMSD_10', 'SEQ_ID_10', 'N_ALIGN_10'],
[9, '4_5', 2, 'MAP_A_11', 'MAP_B_11', 'CON_SCO_11', 'GAP_SCO_11', 'TOTAL_SCO_11',
'ALI_LEN_11', 'QSCORE_11', 'RMSD_11', 'SEQ_ID_11', 'N_ALIGN_11'],
[10, '8_10', 2, 'MAP_A_1', 'MAP_B_1', 'CON_SCO_1', 'GAP_SCO_1', 'TOTAL_SCO_1',
'ALI_LEN_1', 'QSCORE_1', 'RMSD_1', 'SEQ_ID_1', 'N_ALIGN_1'],
[11, '4_8', 1, 'MAP_A_2', 'MAP_B_2', 'CON_SCO_2', 'GAP_SCO_2', 'TOTAL_SCO_2', 'ALI_LEN_2',
'QSCORE_2', 'RMSD_2', 'SEQ_ID_2', 'N_ALIGN_2'],
[12, '9_10', 1, 'MAP_A_3', 'MAP_B_3', 'CON_SCO_3', 'GAP_SCO_3', 'TOTAL_SCO_3',
'ALI_LEN_3', 'QSCORE_3', 'RMSD_3', 'SEQ_ID_3', 'N_ALIGN_3']],
sorted(search.results, key=itemgetter(0)))
for result in search.results:
result[5] = result[2]
search._make_dataframe(search.results)
search.rank(consco_threshold=0)
self.assertListEqual([11, 8, 7, 6, 5, 4, 3, 3, 2, 2, 1, 1], search.ranked_searchmodels.consco.tolist())
search.rank(consco_threshold=0, combine_searchmodels=True)
self.assertListEqual([4.416666666666667], search.ranked_searchmodels.consco.tolist())
|
[
"swamp.utils.create_tempfile",
"os.path.dirname",
"joblib.dump",
"operator.itemgetter",
"os.path.join"
] |
[((34178, 34203), 'swamp.utils.create_tempfile', 'create_tempfile', (['PDB_DUMY'], {}), '(PDB_DUMY)\n', (34193, 34203), False, 'from swamp.utils import remove, create_tempfile\n'), ((34271, 34301), 'swamp.utils.create_tempfile', 'create_tempfile', (['CONPRED_DUMMY'], {}), '(CONPRED_DUMMY)\n', (34286, 34301), False, 'from swamp.utils import remove, create_tempfile\n'), ((34373, 34402), 'swamp.utils.create_tempfile', 'create_tempfile', (['TOPCONS_DUMY'], {}), '(TOPCONS_DUMY)\n', (34388, 34402), False, 'from swamp.utils import remove, create_tempfile\n'), ((37657, 37682), 'swamp.utils.create_tempfile', 'create_tempfile', (['PDB_DUMY'], {}), '(PDB_DUMY)\n', (37672, 37682), False, 'from swamp.utils import remove, create_tempfile\n'), ((37750, 37780), 'swamp.utils.create_tempfile', 'create_tempfile', (['CONPRED_DUMMY'], {}), '(CONPRED_DUMMY)\n', (37765, 37780), False, 'from swamp.utils import remove, create_tempfile\n'), ((37852, 37881), 'swamp.utils.create_tempfile', 'create_tempfile', (['TOPCONS_DUMY'], {}), '(TOPCONS_DUMY)\n', (37867, 37881), False, 'from swamp.utils import remove, create_tempfile\n'), ((34794, 34838), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""'], {}), "(os.environ['CCP4_SCR'], 'test')\n", (34806, 34838), False, 'import os\n'), ((35220, 35283), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""', '"""tmp_cmap_{}.map"""'], {}), "(os.environ['CCP4_SCR'], 'test', 'tmp_cmap_{}.map')\n", (35232, 35283), False, 'import os\n'), ((35328, 35385), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""', '"""search_{}"""'], {}), "(os.environ['CCP4_SCR'], 'test', 'search_{}')\n", (35340, 35385), False, 'import os\n'), ((38231, 38277), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test_2"""'], {}), "(os.environ['CCP4_SCR'], 'test_2')\n", (38243, 38277), False, 'import os\n'), ((38817, 39047), 'joblib.dump', 'joblib.dump', (["[['MAP_A_%s' % idx, 'MAP_B_%s' % idx, 'CON_SCO_%s' % idx, 'GAP_SCO_%s' %\n idx, 'TOTAL_SCO_%s' % idx, 'ALI_LEN_%s' % idx, 'QSCORE_%s' % idx, \n 'RMSD_%s' % idx, 'SEQ_ID_%s' % idx, 'N_ALIGN_%s' % idx]]", 'pickle'], {}), "([['MAP_A_%s' % idx, 'MAP_B_%s' % idx, 'CON_SCO_%s' % idx, \n 'GAP_SCO_%s' % idx, 'TOTAL_SCO_%s' % idx, 'ALI_LEN_%s' % idx, \n 'QSCORE_%s' % idx, 'RMSD_%s' % idx, 'SEQ_ID_%s' % idx, 'N_ALIGN_%s' %\n idx]], pickle)\n", (38828, 39047), False, 'import joblib\n'), ((34489, 34533), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""'], {}), "(os.environ['CCP4_SCR'], 'test')\n", (34501, 34533), False, 'import os\n'), ((34878, 34922), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""'], {}), "(os.environ['CCP4_SCR'], 'test')\n", (34890, 34922), False, 'import os\n'), ((35707, 35751), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""'], {}), "(os.environ['CCP4_SCR'], 'test')\n", (35719, 35751), False, 'import os\n'), ((37968, 38014), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test_2"""'], {}), "(os.environ['CCP4_SCR'], 'test_2')\n", (37980, 38014), False, 'import os\n'), ((38353, 38399), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test_2"""'], {}), "(os.environ['CCP4_SCR'], 'test_2')\n", (38365, 38399), False, 'import os\n'), ((38780, 38803), 'os.path.dirname', 'os.path.dirname', (['pickle'], {}), '(pickle)\n', (38795, 38803), False, 'import os\n'), ((36434, 36478), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""'], {}), "(os.environ['CCP4_SCR'], 'test')\n", (36446, 36478), False, 'import os\n'), ((36533, 36577), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""'], {}), "(os.environ['CCP4_SCR'], 'test')\n", (36545, 36577), False, 'import os\n'), ((36632, 36676), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""'], {}), "(os.environ['CCP4_SCR'], 'test')\n", (36644, 36676), False, 'import os\n'), ((36731, 36775), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""'], {}), "(os.environ['CCP4_SCR'], 'test')\n", (36743, 36775), False, 'import os\n'), ((36828, 36872), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""'], {}), "(os.environ['CCP4_SCR'], 'test')\n", (36840, 36872), False, 'import os\n'), ((36925, 36969), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""'], {}), "(os.environ['CCP4_SCR'], 'test')\n", (36937, 36969), False, 'import os\n'), ((37022, 37066), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""'], {}), "(os.environ['CCP4_SCR'], 'test')\n", (37034, 37066), False, 'import os\n'), ((37119, 37163), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""'], {}), "(os.environ['CCP4_SCR'], 'test')\n", (37131, 37163), False, 'import os\n'), ((37216, 37260), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""'], {}), "(os.environ['CCP4_SCR'], 'test')\n", (37228, 37260), False, 'import os\n'), ((37313, 37357), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""'], {}), "(os.environ['CCP4_SCR'], 'test')\n", (37325, 37357), False, 'import os\n'), ((37410, 37454), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""'], {}), "(os.environ['CCP4_SCR'], 'test')\n", (37422, 37454), False, 'import os\n'), ((37507, 37551), 'os.path.join', 'os.path.join', (["os.environ['CCP4_SCR']", '"""test"""'], {}), "(os.environ['CCP4_SCR'], 'test')\n", (37519, 37551), False, 'import os\n'), ((38665, 38688), 'os.path.dirname', 'os.path.dirname', (['pickle'], {}), '(pickle)\n', (38680, 38688), False, 'import os\n'), ((38719, 38742), 'os.path.dirname', 'os.path.dirname', (['pickle'], {}), '(pickle)\n', (38734, 38742), False, 'import os\n'), ((41676, 41689), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (41686, 41689), False, 'from operator import itemgetter\n')]
|
import frappe
from frappe.utils import cstr, unique
@frappe.whitelist()
def title_field(doctype, name):
meta = frappe.get_meta(doctype)
if meta.title_field:
return frappe.db.get_value(doctype, name, meta.title_field or 'name')
else:
return name
|
[
"frappe.whitelist",
"frappe.db.get_value",
"frappe.get_meta"
] |
[((57, 75), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (73, 75), False, 'import frappe\n'), ((118, 142), 'frappe.get_meta', 'frappe.get_meta', (['doctype'], {}), '(doctype)\n', (133, 142), False, 'import frappe\n'), ((176, 238), 'frappe.db.get_value', 'frappe.db.get_value', (['doctype', 'name', "(meta.title_field or 'name')"], {}), "(doctype, name, meta.title_field or 'name')\n", (195, 238), False, 'import frappe\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections
import threading
import sys
import os
import stat
import shutil
import fcntl
import termios
import struct
import copy
import signal
import time
import fcntl
from functools import partial
this = sys.modules[__name__]
this.__progress_running = False
this.__last_progress = ""
TTY = sys.stdout.isatty() and (str(os.environ.get('CI', 'false')) == 'false')
def interrupt_stdout() -> None:
if this.__progress_running:
sys.stdout.write('\n')
sys.stdout.flush()
this.__progress_running = False
def debug(msg) -> None:
this.__progress_running = False
if isinstance(msg, str):
sys.stdout.write('\033[97m debug | \033[0m{0}\033[K\n'.format(msg))
sys.stdout.flush()
elif isinstance(msg, collections.Iterable) and len(msg):
sys.stdout.write('\033[97m debug | \033[0m{0}\033[K\n'.format(msg[0]))
for chunk in msg[1:]:
sys.stdout.write('\033[97m | \033[0m{0}\033[K\n'.format(chunk))
sys.stdout.flush()
def info(msg) -> None:
this.__progress_running = False
sys.stdout.write('\033[95m info | \033[0m{0}\033[K\n'.format(msg))
sys.stdout.flush()
def progress(msg) -> None:
if this.__last_progress == msg:
return
this.__last_progress = msg
if TTY:
this.__progress_running = True
sys.stdout.write('\033[94m | {0}\033[K\r'.format(msg.rstrip()))
sys.stdout.flush()
else:
sys.stdout.write('\033[94m | {0}\033[K\n'.format(msg.rstrip()))
sys.stdout.flush()
def error(msg) -> None:
this.__progress_running = False
sys.stdout.write('\033[91m! error | {0}\033[0m[K\n'.format(msg))
sys.stdout.flush()
def success(msg) -> None:
this.__progress_running = False
sys.stdout.write('\033[92m pass | {0}\033[0m\033[K\n'.format(msg))
sys.stdout.flush()
def warn(msg) -> None:
this.__progress_running = False
sys.stdout.write('\033[93m warn | {0}\033[0m\033[K\n'.format(msg))
sys.stdout.flush()
class timeit():
def __init__(self, label):
self.__label = label
def __call__(self, f, *args, **kwargs):
self.__enter__()
result = f(*args, **kwargs)
self.__exit__()
return result
def __enter__(self):
self.ts = time.time()
sys.stdout.write('\033[95m info | \033[0mstarting {0}\033[K\n'.format(self.__label))
sys.stdout.flush()
def __exit__(self, exception_type, exception_value, traceback):
if exception_type == KeyboardInterrupt:
sys.stdout.write('\033[0m')
sys.stdout.flush()
return
te = time.time()
sys.stdout.write('\033[90m {0} took {1}\033[0m\n'.format(self.__label, human_readable_duration((te - self.ts)*1e3)))
sys.stdout.flush()
def human_readable_duration(ms):
if ms < 1:
return "0 ms"
s, ms = divmod(ms, 1e3)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
h = int(h)
m = int(m)
s = int(s)
ms = int(ms)
return ' '.join(u'{h}{m}{s}{ms}'.format(
h=str(h) + " h " if h > 0 else '',
m=str(m) + " m " if m > 0 else '',
s=str(s) + " s " if s > 0 else '',
ms=str(ms) + " ms " if ms > 0 else ''
).strip().split(" ")[:4])
class with_deadline():
def __init__(self, timeout=None):
if not isinstance(timeout, int):
raise ValueError("invalid timeout")
self.__timeout = timeout
self.__ready = False
self.__fn = lambda *args: None
def __get__(self, instance, *args):
return partial(self.__call__, instance)
def __call__(self, *args, **kwargs):
if not self.__ready:
self.__fn = args[0]
self.__ready = True
return self
with self:
return self.__fn(*args, **kwargs)
def __enter__(self):
def handler(signum, frame):
raise TimeoutError()
signal.signal(signal.SIGALRM, handler)
signal.alarm(self.__timeout)
def __exit__(self, *args):
signal.alarm(0)
|
[
"sys.stdout.write",
"functools.partial",
"time.time",
"os.environ.get",
"sys.stdout.isatty",
"sys.stdout.flush",
"signal.alarm",
"signal.signal"
] |
[((351, 370), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (368, 370), False, 'import sys\n'), ((1145, 1163), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1161, 1163), False, 'import sys\n'), ((1643, 1661), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1659, 1661), False, 'import sys\n'), ((1796, 1814), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1812, 1814), False, 'import sys\n'), ((1946, 1964), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1962, 1964), False, 'import sys\n'), ((491, 513), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (507, 513), False, 'import sys\n'), ((518, 536), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (534, 536), False, 'import sys\n'), ((734, 752), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (750, 752), False, 'import sys\n'), ((1390, 1408), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1406, 1408), False, 'import sys\n'), ((1496, 1514), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1512, 1514), False, 'import sys\n'), ((2210, 2221), 'time.time', 'time.time', ([], {}), '()\n', (2219, 2221), False, 'import time\n'), ((2317, 2335), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2333, 2335), False, 'import sys\n'), ((2529, 2540), 'time.time', 'time.time', ([], {}), '()\n', (2538, 2540), False, 'import time\n'), ((2675, 2693), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2691, 2693), False, 'import sys\n'), ((3397, 3429), 'functools.partial', 'partial', (['self.__call__', 'instance'], {}), '(self.__call__, instance)\n', (3404, 3429), False, 'from functools import partial\n'), ((3709, 3747), 'signal.signal', 'signal.signal', (['signal.SIGALRM', 'handler'], {}), '(signal.SIGALRM, handler)\n', (3722, 3747), False, 'import signal\n'), ((3752, 3780), 'signal.alarm', 'signal.alarm', (['self.__timeout'], {}), '(self.__timeout)\n', (3764, 3780), False, 'import signal\n'), ((3815, 3830), 'signal.alarm', 'signal.alarm', (['(0)'], {}), '(0)\n', (3827, 3830), False, 'import signal\n'), ((380, 409), 'os.environ.get', 'os.environ.get', (['"""CI"""', '"""false"""'], {}), "('CI', 'false')\n", (394, 409), False, 'import os\n'), ((995, 1013), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1011, 1013), False, 'import sys\n'), ((2453, 2480), 'sys.stdout.write', 'sys.stdout.write', (['"""\x1b[0m"""'], {}), "('\\x1b[0m')\n", (2469, 2480), False, 'import sys\n'), ((2487, 2505), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2503, 2505), False, 'import sys\n')]
|
#!/usr/bin/env python
# This Software (Dioptra) is being made available as a public service by the
# National Institute of Standards and Technology (NIST), an Agency of the United
# States Department of Commerce. This software was developed in part by employees of
# NIST and in part by NIST contractors. Copyright in portions of this software that
# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant
# to Title 17 United States Code Section 105, works of NIST employees are not
# subject to copyright protection in the United States. However, NIST may hold
# international copyright in software created by its employees and domestic
# copyright (or licensing rights) in portions of software that were assigned or
# licensed to NIST. To the extent that NIST holds copyright in this software, it is
# being made available under the Creative Commons Attribution 4.0 International
# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts
# of the software developed or licensed by NIST.
#
# ACCESS THE FULL CC BY 4.0 LICENSE HERE:
# https://creativecommons.org/licenses/by/4.0/legalcode
import os
from pathlib import Path
from typing import Dict, List
import click
import mlflow
import numpy as np
import structlog
from prefect import Flow, Parameter
from prefect.utilities.logging import get_logger as get_prefect_logger
from structlog.stdlib import BoundLogger
from mitre.securingai import pyplugs
from mitre.securingai.sdk.utilities.contexts import plugin_dirs
from mitre.securingai.sdk.utilities.logging import (
StderrLogStream,
StdoutLogStream,
attach_stdout_stream_handler,
clear_logger_handlers,
configure_structlog,
set_logging_level,
)
_CUSTOM_PLUGINS_IMPORT_PATH: str = "securingai_custom"
_PLUGINS_IMPORT_PATH: str = "securingai_builtins"
DISTANCE_METRICS: List[Dict[str, str]] = [
{"name": "l_infinity_norm", "func": "l_inf_norm"},
{"name": "l_1_norm", "func": "l_1_norm"},
{"name": "l_2_norm", "func": "l_2_norm"},
{"name": "cosine_similarity", "func": "paired_cosine_similarities"},
{"name": "euclidean_distance", "func": "paired_euclidean_distances"},
{"name": "manhattan_distance", "func": "paired_manhattan_distances"},
{"name": "wasserstein_distance", "func": "paired_wasserstein_distances"},
]
LOGGER: BoundLogger = structlog.stdlib.get_logger()
def _map_norm(ctx, param, value):
norm_mapping: Dict[str, float] = {"inf": np.inf, "1": 1, "2": 2}
processed_norm: float = norm_mapping[value]
return processed_norm
def _coerce_comma_separated_ints(ctx, param, value):
return tuple(int(x.strip()) for x in value.split(","))
def _coerce_int_to_bool(ctx, param, value):
return bool(int(value))
@click.command()
@click.option(
"--data-dir",
type=click.Path(
exists=True, file_okay=False, dir_okay=True, resolve_path=True, readable=True
),
help="Root directory for NFS mounted datasets (in container)",
)
@click.option(
"--image-size",
type=click.STRING,
callback=_coerce_comma_separated_ints,
help="Dimensions for the input images",
)
@click.option(
"--adv-tar-name",
type=click.STRING,
default="adversarial_patch.tar.gz",
help="Name to give to tarfile artifact containing patches",
)
@click.option(
"--adv-data-dir",
type=click.STRING,
default="adv_patches",
help="Directory for saving adversarial patches",
)
@click.option(
"--model-name",
type=click.STRING,
help="Name of model to load from registry",
)
@click.option(
"--model-version",
type=click.STRING,
help="Version of model to load from registry",
)
@click.option(
"--rotation-max",
type=click.FLOAT,
help="The maximum rotation applied to random patches. \
The value is expected to be in the range `[0, 180]` ",
default=22.5,
)
@click.option(
"--scale-min",
type=click.FLOAT,
help="The minimum scaling applied to random patches. \
The value should be in the range `[0, 1]`, but less than `scale_max` ",
default=0.1,
)
@click.option(
"--scale-max",
type=click.FLOAT,
help="The maximum scaling applied to random patches. \
The value should be in the range `[0, 1]`, but larger than `scale_min.` ",
default=1.0,
)
@click.option(
"--learning-rate",
type=click.FLOAT,
help="The learning rate of the patch attack optimization procedure. ",
default=5.0,
)
@click.option(
"--max-iter",
type=click.INT,
help=" The number of patch optimization steps. ",
default=500,
)
@click.option(
"--patch-target",
type=click.INT,
help=" The target class index of the generated patch. Negative numbers will generate randomized id labels.",
default=-1,
)
@click.option(
"--num-patch",
type=click.INT,
help=" The number of patches generated. Each adversarial image recieves one patch. ",
default=1,
)
@click.option(
"--num-patch-gen-samples",
type=click.INT,
help=" The number of sample images used to generate each patch. ",
default=10,
)
@click.option(
"--imagenet-preprocessing",
type=click.BOOL,
help="If true, initializes model with Imagenet image preprocessing settings.",
default=False,
)
@click.option(
"--seed",
type=click.INT,
help="Set the entry point rng seed",
default=-1,
)
def patch_attack(
data_dir,
image_size,
adv_tar_name,
adv_data_dir,
rotation_max,
scale_min,
scale_max,
learning_rate,
max_iter,
patch_target,
num_patch,
num_patch_gen_samples,
model_name,
model_version,
imagenet_preprocessing,
seed,
patch_shape=None,
):
LOGGER.info(
"Execute MLFlow entry point",
entry_point="gen_patch",
data_dir=data_dir,
image_size=image_size,
adv_tar_name=adv_tar_name,
adv_data_dir=adv_data_dir,
model_name=model_name,
model_version=model_version,
patch_target=patch_target,
num_patch=num_patch,
num_patch_gen_samples=num_patch_gen_samples,
rotation_max=rotation_max,
scale_min=scale_min,
scale_max=scale_max,
learning_rate=learning_rate,
max_iter=max_iter,
imagenet_preprocessing=imagenet_preprocessing,
seed=seed,
)
clip_values: Tuple[float, float] = (0, 255) if image_size[2] == 3 else (0, 1)
if imagenet_preprocessing:
rescale = 1.0
else:
rescale = 1.0 / 255
with mlflow.start_run() as active_run: # noqa: F841
flow: Flow = init_gen_patch_flow()
state = flow.run(
parameters=dict(
testing_dir=Path(data_dir),
image_size=image_size,
rescale=rescale,
clip_values=clip_values,
adv_tar_name=adv_tar_name,
adv_data_dir=(Path.cwd() / adv_data_dir).resolve(),
model_name=model_name,
model_version=model_version,
patch_target=patch_target,
num_patch=num_patch,
num_patch_gen_samples=num_patch_gen_samples,
rotation_max=rotation_max,
scale_min=scale_min,
scale_max=scale_max,
learning_rate=learning_rate,
max_iter=max_iter,
patch_shape=patch_shape,
imagenet_preprocessing=imagenet_preprocessing,
seed=seed,
)
)
return state
def init_gen_patch_flow() -> Flow:
with Flow("Fast Gradient Method") as flow:
(
testing_dir,
image_size,
rescale,
clip_values,
adv_tar_name,
adv_data_dir,
model_name,
model_version,
rotation_max,
scale_min,
scale_max,
learning_rate,
max_iter,
patch_target,
num_patch,
num_patch_gen_samples,
imagenet_preprocessing,
patch_shape,
seed,
) = (
Parameter("testing_dir"),
Parameter("image_size"),
Parameter("rescale"),
Parameter("clip_values"),
Parameter("adv_tar_name"),
Parameter("adv_data_dir"),
Parameter("model_name"),
Parameter("model_version"),
Parameter("rotation_max"),
Parameter("scale_min"),
Parameter("scale_max"),
Parameter("learning_rate"),
Parameter("max_iter"),
Parameter("patch_target"),
Parameter("num_patch"),
Parameter("num_patch_gen_samples"),
Parameter("imagenet_preprocessing"),
Parameter("patch_shape"),
Parameter("seed"),
)
seed, rng = pyplugs.call_task(
f"{_PLUGINS_IMPORT_PATH}.random", "rng", "init_rng", seed=seed
)
tensorflow_global_seed = pyplugs.call_task(
f"{_PLUGINS_IMPORT_PATH}.random", "sample", "draw_random_integer", rng=rng
)
dataset_seed = pyplugs.call_task(
f"{_PLUGINS_IMPORT_PATH}.random", "sample", "draw_random_integer", rng=rng
)
init_tensorflow_results = pyplugs.call_task(
f"{_PLUGINS_IMPORT_PATH}.backend_configs",
"tensorflow",
"init_tensorflow",
seed=tensorflow_global_seed,
)
make_directories_results = pyplugs.call_task(
f"{_PLUGINS_IMPORT_PATH}.artifacts",
"utils",
"make_directories",
dirs=[adv_data_dir],
)
log_mlflow_params_result = pyplugs.call_task( # noqa: F841
f"{_PLUGINS_IMPORT_PATH}.tracking",
"mlflow",
"log_parameters",
parameters=dict(
entry_point_seed=seed,
tensorflow_global_seed=tensorflow_global_seed,
dataset_seed=dataset_seed,
),
)
keras_classifier = pyplugs.call_task(
f"{_CUSTOM_PLUGINS_IMPORT_PATH}.custom_patch_plugins",
"registry_art",
"load_wrapped_tensorflow_keras_classifier",
name=model_name,
version=model_version,
clip_values=clip_values,
imagenet_preprocessing=imagenet_preprocessing,
upstream_tasks=[init_tensorflow_results],
)
patch_dir = pyplugs.call_task(
f"{_CUSTOM_PLUGINS_IMPORT_PATH}.custom_patch_plugins",
"attacks_patch",
"create_adversarial_patches",
data_dir=testing_dir,
keras_classifier=keras_classifier,
adv_data_dir=adv_data_dir,
image_size=image_size,
rescale=rescale,
patch_target=patch_target,
num_patch=num_patch,
num_patch_samples=num_patch_gen_samples,
rotation_max=rotation_max,
scale_min=scale_min,
scale_max=scale_max,
learning_rate=learning_rate,
max_iter=max_iter,
patch_shape=patch_shape,
upstream_tasks=[make_directories_results],
)
log_evasion_dataset_result = pyplugs.call_task( # noqa: F841
f"{_PLUGINS_IMPORT_PATH}.artifacts",
"mlflow",
"upload_directory_as_tarball_artifact",
source_dir=adv_data_dir,
tarball_filename=adv_tar_name,
upstream_tasks=[patch_dir],
)
return flow
if __name__ == "__main__":
log_level: str = os.getenv("AI_JOB_LOG_LEVEL", default="INFO")
as_json: bool = True if os.getenv("AI_JOB_LOG_AS_JSON") else False
clear_logger_handlers(get_prefect_logger())
attach_stdout_stream_handler(as_json)
set_logging_level(log_level)
configure_structlog()
with plugin_dirs(), StdoutLogStream(as_json), StderrLogStream(as_json):
_ = patch_attack()
|
[
"mlflow.start_run",
"pathlib.Path.cwd",
"mitre.securingai.sdk.utilities.logging.configure_structlog",
"mitre.securingai.sdk.utilities.logging.StderrLogStream",
"click.option",
"prefect.utilities.logging.get_logger",
"mitre.securingai.sdk.utilities.logging.set_logging_level",
"click.command",
"mitre.securingai.pyplugs.call_task",
"prefect.Parameter",
"pathlib.Path",
"mitre.securingai.sdk.utilities.contexts.plugin_dirs",
"prefect.Flow",
"click.Path",
"mitre.securingai.sdk.utilities.logging.StdoutLogStream",
"os.getenv",
"mitre.securingai.sdk.utilities.logging.attach_stdout_stream_handler",
"structlog.stdlib.get_logger"
] |
[((2350, 2379), 'structlog.stdlib.get_logger', 'structlog.stdlib.get_logger', ([], {}), '()\n', (2377, 2379), False, 'import structlog\n'), ((2751, 2766), 'click.command', 'click.command', ([], {}), '()\n', (2764, 2766), False, 'import click\n'), ((2984, 3115), 'click.option', 'click.option', (['"""--image-size"""'], {'type': 'click.STRING', 'callback': '_coerce_comma_separated_ints', 'help': '"""Dimensions for the input images"""'}), "('--image-size', type=click.STRING, callback=\n _coerce_comma_separated_ints, help='Dimensions for the input images')\n", (2996, 3115), False, 'import click\n'), ((3131, 3286), 'click.option', 'click.option', (['"""--adv-tar-name"""'], {'type': 'click.STRING', 'default': '"""adversarial_patch.tar.gz"""', 'help': '"""Name to give to tarfile artifact containing patches"""'}), "('--adv-tar-name', type=click.STRING, default=\n 'adversarial_patch.tar.gz', help=\n 'Name to give to tarfile artifact containing patches')\n", (3143, 3286), False, 'import click\n'), ((3297, 3422), 'click.option', 'click.option', (['"""--adv-data-dir"""'], {'type': 'click.STRING', 'default': '"""adv_patches"""', 'help': '"""Directory for saving adversarial patches"""'}), "('--adv-data-dir', type=click.STRING, default='adv_patches',\n help='Directory for saving adversarial patches')\n", (3309, 3422), False, 'import click\n'), ((3439, 3535), 'click.option', 'click.option', (['"""--model-name"""'], {'type': 'click.STRING', 'help': '"""Name of model to load from registry"""'}), "('--model-name', type=click.STRING, help=\n 'Name of model to load from registry')\n", (3451, 3535), False, 'import click\n'), ((3547, 3649), 'click.option', 'click.option', (['"""--model-version"""'], {'type': 'click.STRING', 'help': '"""Version of model to load from registry"""'}), "('--model-version', type=click.STRING, help=\n 'Version of model to load from registry')\n", (3559, 3649), False, 'import click\n'), ((3661, 3854), 'click.option', 'click.option', (['"""--rotation-max"""'], {'type': 'click.FLOAT', 'help': '"""The maximum rotation applied to random patches. The value is expected to be in the range `[0, 180]` """', 'default': '(22.5)'}), "('--rotation-max', type=click.FLOAT, help=\n 'The maximum rotation applied to random patches. The value is expected to be in the range `[0, 180]` '\n , default=22.5)\n", (3673, 3854), False, 'import click\n'), ((3867, 4072), 'click.option', 'click.option', (['"""--scale-min"""'], {'type': 'click.FLOAT', 'help': '"""The minimum scaling applied to random patches. The value should be in the range `[0, 1]`, but less than `scale_max` """', 'default': '(0.1)'}), "('--scale-min', type=click.FLOAT, help=\n 'The minimum scaling applied to random patches. The value should be in the range `[0, 1]`, but less than `scale_max` '\n , default=0.1)\n", (3879, 4072), False, 'import click\n'), ((4085, 4293), 'click.option', 'click.option', (['"""--scale-max"""'], {'type': 'click.FLOAT', 'help': '"""The maximum scaling applied to random patches. The value should be in the range `[0, 1]`, but larger than `scale_min.` """', 'default': '(1.0)'}), "('--scale-max', type=click.FLOAT, help=\n 'The maximum scaling applied to random patches. The value should be in the range `[0, 1]`, but larger than `scale_min.` '\n , default=1.0)\n", (4097, 4293), False, 'import click\n'), ((4306, 4448), 'click.option', 'click.option', (['"""--learning-rate"""'], {'type': 'click.FLOAT', 'help': '"""The learning rate of the patch attack optimization procedure. """', 'default': '(5.0)'}), "('--learning-rate', type=click.FLOAT, help=\n 'The learning rate of the patch attack optimization procedure. ',\n default=5.0)\n", (4318, 4448), False, 'import click\n'), ((4460, 4570), 'click.option', 'click.option', (['"""--max-iter"""'], {'type': 'click.INT', 'help': '""" The number of patch optimization steps. """', 'default': '(500)'}), "('--max-iter', type=click.INT, help=\n ' The number of patch optimization steps. ', default=500)\n", (4472, 4570), False, 'import click\n'), ((4586, 4763), 'click.option', 'click.option', (['"""--patch-target"""'], {'type': 'click.INT', 'help': '""" The target class index of the generated patch. Negative numbers will generate randomized id labels."""', 'default': '(-1)'}), "('--patch-target', type=click.INT, help=\n ' The target class index of the generated patch. Negative numbers will generate randomized id labels.'\n , default=-1)\n", (4598, 4763), False, 'import click\n'), ((4774, 4924), 'click.option', 'click.option', (['"""--num-patch"""'], {'type': 'click.INT', 'help': '""" The number of patches generated. Each adversarial image recieves one patch. """', 'default': '(1)'}), "('--num-patch', type=click.INT, help=\n ' The number of patches generated. Each adversarial image recieves one patch. '\n , default=1)\n", (4786, 4924), False, 'import click\n'), ((4935, 5074), 'click.option', 'click.option', (['"""--num-patch-gen-samples"""'], {'type': 'click.INT', 'help': '""" The number of sample images used to generate each patch. """', 'default': '(10)'}), "('--num-patch-gen-samples', type=click.INT, help=\n ' The number of sample images used to generate each patch. ', default=10)\n", (4947, 5074), False, 'import click\n'), ((5090, 5250), 'click.option', 'click.option', (['"""--imagenet-preprocessing"""'], {'type': 'click.BOOL', 'help': '"""If true, initializes model with Imagenet image preprocessing settings."""', 'default': '(False)'}), "('--imagenet-preprocessing', type=click.BOOL, help=\n 'If true, initializes model with Imagenet image preprocessing settings.',\n default=False)\n", (5102, 5250), False, 'import click\n'), ((5262, 5353), 'click.option', 'click.option', (['"""--seed"""'], {'type': 'click.INT', 'help': '"""Set the entry point rng seed"""', 'default': '(-1)'}), "('--seed', type=click.INT, help='Set the entry point rng seed',\n default=-1)\n", (5274, 5353), False, 'import click\n'), ((11625, 11670), 'os.getenv', 'os.getenv', (['"""AI_JOB_LOG_LEVEL"""'], {'default': '"""INFO"""'}), "('AI_JOB_LOG_LEVEL', default='INFO')\n", (11634, 11670), False, 'import os\n'), ((11795, 11832), 'mitre.securingai.sdk.utilities.logging.attach_stdout_stream_handler', 'attach_stdout_stream_handler', (['as_json'], {}), '(as_json)\n', (11823, 11832), False, 'from mitre.securingai.sdk.utilities.logging import StderrLogStream, StdoutLogStream, attach_stdout_stream_handler, clear_logger_handlers, configure_structlog, set_logging_level\n'), ((11837, 11865), 'mitre.securingai.sdk.utilities.logging.set_logging_level', 'set_logging_level', (['log_level'], {}), '(log_level)\n', (11854, 11865), False, 'from mitre.securingai.sdk.utilities.logging import StderrLogStream, StdoutLogStream, attach_stdout_stream_handler, clear_logger_handlers, configure_structlog, set_logging_level\n'), ((11870, 11891), 'mitre.securingai.sdk.utilities.logging.configure_structlog', 'configure_structlog', ([], {}), '()\n', (11889, 11891), False, 'from mitre.securingai.sdk.utilities.logging import StderrLogStream, StdoutLogStream, attach_stdout_stream_handler, clear_logger_handlers, configure_structlog, set_logging_level\n'), ((6516, 6534), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (6532, 6534), False, 'import mlflow\n'), ((2809, 2902), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'file_okay': '(False)', 'dir_okay': '(True)', 'resolve_path': '(True)', 'readable': '(True)'}), '(exists=True, file_okay=False, dir_okay=True, resolve_path=True,\n readable=True)\n', (2819, 2902), False, 'import click\n'), ((7570, 7598), 'prefect.Flow', 'Flow', (['"""Fast Gradient Method"""'], {}), "('Fast Gradient Method')\n", (7574, 7598), False, 'from prefect import Flow, Parameter\n'), ((8873, 8959), 'mitre.securingai.pyplugs.call_task', 'pyplugs.call_task', (['f"""{_PLUGINS_IMPORT_PATH}.random"""', '"""rng"""', '"""init_rng"""'], {'seed': 'seed'}), "(f'{_PLUGINS_IMPORT_PATH}.random', 'rng', 'init_rng', seed\n =seed)\n", (8890, 8959), False, 'from mitre.securingai import pyplugs\n'), ((9010, 9107), 'mitre.securingai.pyplugs.call_task', 'pyplugs.call_task', (['f"""{_PLUGINS_IMPORT_PATH}.random"""', '"""sample"""', '"""draw_random_integer"""'], {'rng': 'rng'}), "(f'{_PLUGINS_IMPORT_PATH}.random', 'sample',\n 'draw_random_integer', rng=rng)\n", (9027, 9107), False, 'from mitre.securingai import pyplugs\n'), ((9149, 9246), 'mitre.securingai.pyplugs.call_task', 'pyplugs.call_task', (['f"""{_PLUGINS_IMPORT_PATH}.random"""', '"""sample"""', '"""draw_random_integer"""'], {'rng': 'rng'}), "(f'{_PLUGINS_IMPORT_PATH}.random', 'sample',\n 'draw_random_integer', rng=rng)\n", (9166, 9246), False, 'from mitre.securingai import pyplugs\n'), ((9299, 9425), 'mitre.securingai.pyplugs.call_task', 'pyplugs.call_task', (['f"""{_PLUGINS_IMPORT_PATH}.backend_configs"""', '"""tensorflow"""', '"""init_tensorflow"""'], {'seed': 'tensorflow_global_seed'}), "(f'{_PLUGINS_IMPORT_PATH}.backend_configs', 'tensorflow',\n 'init_tensorflow', seed=tensorflow_global_seed)\n", (9316, 9425), False, 'from mitre.securingai import pyplugs\n'), ((9516, 9624), 'mitre.securingai.pyplugs.call_task', 'pyplugs.call_task', (['f"""{_PLUGINS_IMPORT_PATH}.artifacts"""', '"""utils"""', '"""make_directories"""'], {'dirs': '[adv_data_dir]'}), "(f'{_PLUGINS_IMPORT_PATH}.artifacts', 'utils',\n 'make_directories', dirs=[adv_data_dir])\n", (9533, 9624), False, 'from mitre.securingai import pyplugs\n'), ((10076, 10380), 'mitre.securingai.pyplugs.call_task', 'pyplugs.call_task', (['f"""{_CUSTOM_PLUGINS_IMPORT_PATH}.custom_patch_plugins"""', '"""registry_art"""', '"""load_wrapped_tensorflow_keras_classifier"""'], {'name': 'model_name', 'version': 'model_version', 'clip_values': 'clip_values', 'imagenet_preprocessing': 'imagenet_preprocessing', 'upstream_tasks': '[init_tensorflow_results]'}), "(f'{_CUSTOM_PLUGINS_IMPORT_PATH}.custom_patch_plugins',\n 'registry_art', 'load_wrapped_tensorflow_keras_classifier', name=\n model_name, version=model_version, clip_values=clip_values,\n imagenet_preprocessing=imagenet_preprocessing, upstream_tasks=[\n init_tensorflow_results])\n", (10093, 10380), False, 'from mitre.securingai import pyplugs\n'), ((10490, 11035), 'mitre.securingai.pyplugs.call_task', 'pyplugs.call_task', (['f"""{_CUSTOM_PLUGINS_IMPORT_PATH}.custom_patch_plugins"""', '"""attacks_patch"""', '"""create_adversarial_patches"""'], {'data_dir': 'testing_dir', 'keras_classifier': 'keras_classifier', 'adv_data_dir': 'adv_data_dir', 'image_size': 'image_size', 'rescale': 'rescale', 'patch_target': 'patch_target', 'num_patch': 'num_patch', 'num_patch_samples': 'num_patch_gen_samples', 'rotation_max': 'rotation_max', 'scale_min': 'scale_min', 'scale_max': 'scale_max', 'learning_rate': 'learning_rate', 'max_iter': 'max_iter', 'patch_shape': 'patch_shape', 'upstream_tasks': '[make_directories_results]'}), "(f'{_CUSTOM_PLUGINS_IMPORT_PATH}.custom_patch_plugins',\n 'attacks_patch', 'create_adversarial_patches', data_dir=testing_dir,\n keras_classifier=keras_classifier, adv_data_dir=adv_data_dir,\n image_size=image_size, rescale=rescale, patch_target=patch_target,\n num_patch=num_patch, num_patch_samples=num_patch_gen_samples,\n rotation_max=rotation_max, scale_min=scale_min, scale_max=scale_max,\n learning_rate=learning_rate, max_iter=max_iter, patch_shape=patch_shape,\n upstream_tasks=[make_directories_results])\n", (10507, 11035), False, 'from mitre.securingai import pyplugs\n'), ((11272, 11468), 'mitre.securingai.pyplugs.call_task', 'pyplugs.call_task', (['f"""{_PLUGINS_IMPORT_PATH}.artifacts"""', '"""mlflow"""', '"""upload_directory_as_tarball_artifact"""'], {'source_dir': 'adv_data_dir', 'tarball_filename': 'adv_tar_name', 'upstream_tasks': '[patch_dir]'}), "(f'{_PLUGINS_IMPORT_PATH}.artifacts', 'mlflow',\n 'upload_directory_as_tarball_artifact', source_dir=adv_data_dir,\n tarball_filename=adv_tar_name, upstream_tasks=[patch_dir])\n", (11289, 11468), False, 'from mitre.securingai import pyplugs\n'), ((11699, 11730), 'os.getenv', 'os.getenv', (['"""AI_JOB_LOG_AS_JSON"""'], {}), "('AI_JOB_LOG_AS_JSON')\n", (11708, 11730), False, 'import os\n'), ((11769, 11789), 'prefect.utilities.logging.get_logger', 'get_prefect_logger', ([], {}), '()\n', (11787, 11789), True, 'from prefect.utilities.logging import get_logger as get_prefect_logger\n'), ((11902, 11915), 'mitre.securingai.sdk.utilities.contexts.plugin_dirs', 'plugin_dirs', ([], {}), '()\n', (11913, 11915), False, 'from mitre.securingai.sdk.utilities.contexts import plugin_dirs\n'), ((11917, 11941), 'mitre.securingai.sdk.utilities.logging.StdoutLogStream', 'StdoutLogStream', (['as_json'], {}), '(as_json)\n', (11932, 11941), False, 'from mitre.securingai.sdk.utilities.logging import StderrLogStream, StdoutLogStream, attach_stdout_stream_handler, clear_logger_handlers, configure_structlog, set_logging_level\n'), ((11943, 11967), 'mitre.securingai.sdk.utilities.logging.StderrLogStream', 'StderrLogStream', (['as_json'], {}), '(as_json)\n', (11958, 11967), False, 'from mitre.securingai.sdk.utilities.logging import StderrLogStream, StdoutLogStream, attach_stdout_stream_handler, clear_logger_handlers, configure_structlog, set_logging_level\n'), ((8126, 8150), 'prefect.Parameter', 'Parameter', (['"""testing_dir"""'], {}), "('testing_dir')\n", (8135, 8150), False, 'from prefect import Flow, Parameter\n'), ((8164, 8187), 'prefect.Parameter', 'Parameter', (['"""image_size"""'], {}), "('image_size')\n", (8173, 8187), False, 'from prefect import Flow, Parameter\n'), ((8201, 8221), 'prefect.Parameter', 'Parameter', (['"""rescale"""'], {}), "('rescale')\n", (8210, 8221), False, 'from prefect import Flow, Parameter\n'), ((8235, 8259), 'prefect.Parameter', 'Parameter', (['"""clip_values"""'], {}), "('clip_values')\n", (8244, 8259), False, 'from prefect import Flow, Parameter\n'), ((8273, 8298), 'prefect.Parameter', 'Parameter', (['"""adv_tar_name"""'], {}), "('adv_tar_name')\n", (8282, 8298), False, 'from prefect import Flow, Parameter\n'), ((8312, 8337), 'prefect.Parameter', 'Parameter', (['"""adv_data_dir"""'], {}), "('adv_data_dir')\n", (8321, 8337), False, 'from prefect import Flow, Parameter\n'), ((8351, 8374), 'prefect.Parameter', 'Parameter', (['"""model_name"""'], {}), "('model_name')\n", (8360, 8374), False, 'from prefect import Flow, Parameter\n'), ((8388, 8414), 'prefect.Parameter', 'Parameter', (['"""model_version"""'], {}), "('model_version')\n", (8397, 8414), False, 'from prefect import Flow, Parameter\n'), ((8428, 8453), 'prefect.Parameter', 'Parameter', (['"""rotation_max"""'], {}), "('rotation_max')\n", (8437, 8453), False, 'from prefect import Flow, Parameter\n'), ((8467, 8489), 'prefect.Parameter', 'Parameter', (['"""scale_min"""'], {}), "('scale_min')\n", (8476, 8489), False, 'from prefect import Flow, Parameter\n'), ((8503, 8525), 'prefect.Parameter', 'Parameter', (['"""scale_max"""'], {}), "('scale_max')\n", (8512, 8525), False, 'from prefect import Flow, Parameter\n'), ((8539, 8565), 'prefect.Parameter', 'Parameter', (['"""learning_rate"""'], {}), "('learning_rate')\n", (8548, 8565), False, 'from prefect import Flow, Parameter\n'), ((8579, 8600), 'prefect.Parameter', 'Parameter', (['"""max_iter"""'], {}), "('max_iter')\n", (8588, 8600), False, 'from prefect import Flow, Parameter\n'), ((8614, 8639), 'prefect.Parameter', 'Parameter', (['"""patch_target"""'], {}), "('patch_target')\n", (8623, 8639), False, 'from prefect import Flow, Parameter\n'), ((8653, 8675), 'prefect.Parameter', 'Parameter', (['"""num_patch"""'], {}), "('num_patch')\n", (8662, 8675), False, 'from prefect import Flow, Parameter\n'), ((8689, 8723), 'prefect.Parameter', 'Parameter', (['"""num_patch_gen_samples"""'], {}), "('num_patch_gen_samples')\n", (8698, 8723), False, 'from prefect import Flow, Parameter\n'), ((8737, 8772), 'prefect.Parameter', 'Parameter', (['"""imagenet_preprocessing"""'], {}), "('imagenet_preprocessing')\n", (8746, 8772), False, 'from prefect import Flow, Parameter\n'), ((8786, 8810), 'prefect.Parameter', 'Parameter', (['"""patch_shape"""'], {}), "('patch_shape')\n", (8795, 8810), False, 'from prefect import Flow, Parameter\n'), ((8824, 8841), 'prefect.Parameter', 'Parameter', (['"""seed"""'], {}), "('seed')\n", (8833, 8841), False, 'from prefect import Flow, Parameter\n'), ((6690, 6704), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (6694, 6704), False, 'from pathlib import Path\n'), ((6892, 6902), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (6900, 6902), False, 'from pathlib import Path\n')]
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import pytest
from telegram import (InputTextMessageContent, InlineQueryResultLocation, InlineKeyboardButton,
InlineQueryResultVoice, InlineKeyboardMarkup)
@pytest.fixture(scope='class')
def inline_query_result_location():
return InlineQueryResultLocation(
TestInlineQueryResultLocation.id,
TestInlineQueryResultLocation.latitude,
TestInlineQueryResultLocation.longitude,
TestInlineQueryResultLocation.title,
live_period=TestInlineQueryResultLocation.live_period,
thumb_url=TestInlineQueryResultLocation.thumb_url,
thumb_width=TestInlineQueryResultLocation.thumb_width,
thumb_height=TestInlineQueryResultLocation.thumb_height,
input_message_content=TestInlineQueryResultLocation.input_message_content,
reply_markup=TestInlineQueryResultLocation.reply_markup)
class TestInlineQueryResultLocation(object):
id = 'id'
type = 'location'
latitude = 0.0
longitude = 1.0
title = 'title'
live_period = 70
thumb_url = 'thumb url'
thumb_width = 10
thumb_height = 15
input_message_content = InputTextMessageContent('input_message_content')
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton('reply_markup')]])
def test_expected_values(self, inline_query_result_location):
assert inline_query_result_location.id == self.id
assert inline_query_result_location.type == self.type
assert inline_query_result_location.latitude == self.latitude
assert inline_query_result_location.longitude == self.longitude
assert inline_query_result_location.title == self.title
assert inline_query_result_location.live_period == self.live_period
assert inline_query_result_location.thumb_url == self.thumb_url
assert inline_query_result_location.thumb_width == self.thumb_width
assert inline_query_result_location.thumb_height == self.thumb_height
assert (inline_query_result_location.input_message_content.to_dict()
== self.input_message_content.to_dict())
assert inline_query_result_location.reply_markup.to_dict() == self.reply_markup.to_dict()
def test_to_dict(self, inline_query_result_location):
inline_query_result_location_dict = inline_query_result_location.to_dict()
assert isinstance(inline_query_result_location_dict, dict)
assert inline_query_result_location_dict['id'] == inline_query_result_location.id
assert inline_query_result_location_dict['type'] == inline_query_result_location.type
assert (inline_query_result_location_dict['latitude']
== inline_query_result_location.latitude)
assert (inline_query_result_location_dict['longitude']
== inline_query_result_location.longitude)
assert inline_query_result_location_dict['title'] == inline_query_result_location.title
assert (inline_query_result_location_dict['live_period']
== inline_query_result_location.live_period)
assert (inline_query_result_location_dict['thumb_url']
== inline_query_result_location.thumb_url)
assert (inline_query_result_location_dict['thumb_width']
== inline_query_result_location.thumb_width)
assert (inline_query_result_location_dict['thumb_height']
== inline_query_result_location.thumb_height)
assert (inline_query_result_location_dict['input_message_content']
== inline_query_result_location.input_message_content.to_dict())
assert (inline_query_result_location_dict['reply_markup']
== inline_query_result_location.reply_markup.to_dict())
def test_equality(self):
a = InlineQueryResultLocation(self.id, self.longitude, self.latitude, self.title)
b = InlineQueryResultLocation(self.id, self.longitude, self.latitude, self.title)
c = InlineQueryResultLocation(self.id, 0, self.latitude, self.title)
d = InlineQueryResultLocation('', self.longitude, self.latitude, self.title)
e = InlineQueryResultVoice(self.id, '', '')
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
|
[
"telegram.InputTextMessageContent",
"telegram.InlineQueryResultVoice",
"telegram.InlineKeyboardButton",
"pytest.fixture",
"telegram.InlineQueryResultLocation"
] |
[((954, 983), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (968, 983), False, 'import pytest\n'), ((1031, 1597), 'telegram.InlineQueryResultLocation', 'InlineQueryResultLocation', (['TestInlineQueryResultLocation.id', 'TestInlineQueryResultLocation.latitude', 'TestInlineQueryResultLocation.longitude', 'TestInlineQueryResultLocation.title'], {'live_period': 'TestInlineQueryResultLocation.live_period', 'thumb_url': 'TestInlineQueryResultLocation.thumb_url', 'thumb_width': 'TestInlineQueryResultLocation.thumb_width', 'thumb_height': 'TestInlineQueryResultLocation.thumb_height', 'input_message_content': 'TestInlineQueryResultLocation.input_message_content', 'reply_markup': 'TestInlineQueryResultLocation.reply_markup'}), '(TestInlineQueryResultLocation.id,\n TestInlineQueryResultLocation.latitude, TestInlineQueryResultLocation.\n longitude, TestInlineQueryResultLocation.title, live_period=\n TestInlineQueryResultLocation.live_period, thumb_url=\n TestInlineQueryResultLocation.thumb_url, thumb_width=\n TestInlineQueryResultLocation.thumb_width, thumb_height=\n TestInlineQueryResultLocation.thumb_height, input_message_content=\n TestInlineQueryResultLocation.input_message_content, reply_markup=\n TestInlineQueryResultLocation.reply_markup)\n', (1056, 1597), False, 'from telegram import InputTextMessageContent, InlineQueryResultLocation, InlineKeyboardButton, InlineQueryResultVoice, InlineKeyboardMarkup\n'), ((1902, 1950), 'telegram.InputTextMessageContent', 'InputTextMessageContent', (['"""input_message_content"""'], {}), "('input_message_content')\n", (1925, 1950), False, 'from telegram import InputTextMessageContent, InlineQueryResultLocation, InlineKeyboardButton, InlineQueryResultVoice, InlineKeyboardMarkup\n'), ((4530, 4607), 'telegram.InlineQueryResultLocation', 'InlineQueryResultLocation', (['self.id', 'self.longitude', 'self.latitude', 'self.title'], {}), '(self.id, self.longitude, self.latitude, self.title)\n', (4555, 4607), False, 'from telegram import InputTextMessageContent, InlineQueryResultLocation, InlineKeyboardButton, InlineQueryResultVoice, InlineKeyboardMarkup\n'), ((4620, 4697), 'telegram.InlineQueryResultLocation', 'InlineQueryResultLocation', (['self.id', 'self.longitude', 'self.latitude', 'self.title'], {}), '(self.id, self.longitude, self.latitude, self.title)\n', (4645, 4697), False, 'from telegram import InputTextMessageContent, InlineQueryResultLocation, InlineKeyboardButton, InlineQueryResultVoice, InlineKeyboardMarkup\n'), ((4710, 4774), 'telegram.InlineQueryResultLocation', 'InlineQueryResultLocation', (['self.id', '(0)', 'self.latitude', 'self.title'], {}), '(self.id, 0, self.latitude, self.title)\n', (4735, 4774), False, 'from telegram import InputTextMessageContent, InlineQueryResultLocation, InlineKeyboardButton, InlineQueryResultVoice, InlineKeyboardMarkup\n'), ((4787, 4859), 'telegram.InlineQueryResultLocation', 'InlineQueryResultLocation', (['""""""', 'self.longitude', 'self.latitude', 'self.title'], {}), "('', self.longitude, self.latitude, self.title)\n", (4812, 4859), False, 'from telegram import InputTextMessageContent, InlineQueryResultLocation, InlineKeyboardButton, InlineQueryResultVoice, InlineKeyboardMarkup\n'), ((4872, 4911), 'telegram.InlineQueryResultVoice', 'InlineQueryResultVoice', (['self.id', '""""""', '""""""'], {}), "(self.id, '', '')\n", (4894, 4911), False, 'from telegram import InputTextMessageContent, InlineQueryResultLocation, InlineKeyboardButton, InlineQueryResultVoice, InlineKeyboardMarkup\n'), ((1993, 2029), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', (['"""reply_markup"""'], {}), "('reply_markup')\n", (2013, 2029), False, 'from telegram import InputTextMessageContent, InlineQueryResultLocation, InlineKeyboardButton, InlineQueryResultVoice, InlineKeyboardMarkup\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2019 <NAME>
# https://www.tu-ilmenau.de/it-ems/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Setup script for installation of chefkoch package
Usecases:
- install package system-wide on your machine (needs su privileges)
EXAMPLE: 'python setup.py install'
- install package for your local user only (no privileges needed)
EXAMPLE: 'python setup.py install --user'
'''
# import modules
import platform
import sys
import os
import re
import subprocess
from distutils import sysconfig
def WARNING(string):
print("\033[91mWARNING:\033[0m %s" % (string))
def INFO(string):
print("\033[96mINFO:\033[0m %s" % (string))
# load setup and extensions from setuptools. If that fails, try distutils
try:
from setuptools import setup, Extension
except ImportError:
WARNING("Could not import setuptools.")
raise
# global package constants
packageName = 'chefkoch'
packageVersion = '0.0' # provide a version tag as fallback
fullVersion = packageVersion
strVersionFile = "%s/version.py" %(packageName)
VERSION_PY = """
# -*- coding: utf-8 -*-
# This file carries the module's version information which will be updated
# during execution of the installation script, setup.py. Distribution tarballs
# contain a pre-generated copy of this file.
__version__ = '%s'
"""
##############################################################################
### function and class declaration section. DO NOT PUT SCRIPT CODE IN BETWEEN
##############################################################################
def getCurrentVersion():
'''
Determine package version and put it in the signatures.
'''
global packageVersion
global fullVersion
# check if there is a manual version override
if os.path.isfile(".version"):
with open(".version", "r") as f:
stdout = f.read().split('\n')[0]
print("Override of version string to '%s' (from .version file )" % (
stdout))
fullVersion = stdout
else:
# check if source directory is a git repository
if not os.path.exists(".git"):
print(("Installing from something other than a Git repository; " +
"Version file '%s' untouched.") % (strVersionFile))
return
# fetch current tag and commit description from git
try:
p = subprocess.Popen(
["git", "describe", "--tags", "--dirty", "--always"],
stdout=subprocess.PIPE
)
except EnvironmentError:
print("Not a git repository; Version file '%s' not touched." % (
strVersionFile))
return
stdout = p.communicate()[0].strip()
if stdout is not str:
stdout = stdout.decode()
if p.returncode != 0:
print(("Unable to fetch version from git repository; " +
"leaving version file '%s' untouched.") % (strVersionFile))
return
fullVersion = stdout
# output results to version string, extract package version number from
# `fullVersion` as this string might also contain additional tags (e.g.
# commit hashes or `-dirty` flags from git tags)
versionMatch = re.match(r"[.+\d+]+\d*[abr]\d*", fullVersion)
if versionMatch:
packageVersion = versionMatch.group(0)
print("Fetched package version number from git tag (%s)." % (
packageVersion))
# determine requirements for install and setup
def checkRequirement(lstRequirements, importName, requirementName):
'''
Don't add packages unconditionally as this involves the risk of updating an
already installed package. Sometimes this may break during install or mix
up dependencies after install. Consider an update only if the requested
package is not installed at all or if we are building an installation
wheel.
'''
try:
__import__(importName)
except ImportError:
lstRequirements.append(requirementName)
else:
if 'bdist_wheel' in sys.argv[1:]:
lstRequirements.append(requirementName)
def doc_opts():
'''
Introduce a command-line setup target to generate the sphinx doc.
'''
try:
from sphinx.setup_command import BuildDoc
except ImportError:
return {}
class OwnDoc(BuildDoc):
def __init__(self, *args, **kwargs):
super(OwnDoc, self).__init__(*args, **kwargs)
return OwnDoc
##############################################################################
### The actual script. KEEP THE `import filter` ALIVE AT ALL TIMES
##############################################################################
if __name__ == '__main__':
# get version from git and update chefkoch/__init__.py accordingly
getCurrentVersion()
# make sure there exists a version.py file in the project
with open(strVersionFile, "w") as f:
f.write(VERSION_PY % (fullVersion))
print("Set %s to '%s'" % (strVersionFile, fullVersion))
# get the long description from the README file.
# CAUTION: Python2/3 utf encoding shit calls needs some adjustments
fileName = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'README.md'
)
f = (open(fileName, 'r') if sys.version_info < (3, 0)
else open(fileName, 'r', encoding='utf-8'))
longDescription = f.read()
f.close()
print("Building %s v%s" % (
packageName,
packageVersion
))
# check if all requirements are met prior to actually calling setup()
setupRequires = []
installRequires = []
checkRequirement(setupRequires, 'setuptools', 'setuptools>=18.0')
checkRequirement(installRequires, 'dask', 'dask>=1.0.0')
checkRequirement(installRequires, 'six', 'six')
checkRequirement(installRequires, 'dask-jobqueue', 'dask-jobqueue>=0.4.1')
print("Requirements for setup: %s" % (setupRequires))
print("Requirements for install: %s" % (installRequires))
# everything's set. Fire in the hole.
setup(
name=packageName,
version=packageVersion,
description=('A compute cluster cuisine for distributed scientific ' +
'computing in python'),
long_description=longDescription,
author='<NAME>, EMS group TU Ilmenau',
author_email='<EMAIL>',
url='https://ems-tu-ilmenau.github.io/ćhefkoch/',
license='Apache Software License',
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
'Topic :: System :: Distributed Computing'
],
keywords=('compute cluster HPC LSF dask parallel computing ' +
'scheduler framework'),
setup_requires=setupRequires,
install_requires=installRequires,
packages=[
'chefkoch'
],
cmdclass={'build_doc': doc_opts()},
command_options={
'build_doc': {
'project': ('setup.py', packageName),
'version': ('setup.py', packageVersion),
'release': ('setup.py', fullVersion),
'copyright': ('setup.py', '2019, ' + packageName)
}}
)
|
[
"subprocess.Popen",
"os.path.dirname",
"os.path.exists",
"re.match",
"os.path.isfile"
] |
[((2334, 2360), 'os.path.isfile', 'os.path.isfile', (['""".version"""'], {}), "('.version')\n", (2348, 2360), False, 'import os\n'), ((3809, 3856), 're.match', 're.match', (['"""[.+\\\\d+]+\\\\d*[abr]\\\\d*"""', 'fullVersion'], {}), "('[.+\\\\d+]+\\\\d*[abr]\\\\d*', fullVersion)\n", (3817, 3856), False, 'import re\n'), ((2658, 2680), 'os.path.exists', 'os.path.exists', (['""".git"""'], {}), "('.git')\n", (2672, 2680), False, 'import os\n'), ((2941, 3039), 'subprocess.Popen', 'subprocess.Popen', (["['git', 'describe', '--tags', '--dirty', '--always']"], {'stdout': 'subprocess.PIPE'}), "(['git', 'describe', '--tags', '--dirty', '--always'],\n stdout=subprocess.PIPE)\n", (2957, 3039), False, 'import subprocess\n'), ((5784, 5809), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5799, 5809), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# <NAME>, <EMAIL>.
# All rights reserved
#
# db/sqlpsql.py
#
from .sqlnone import SqlNone
from ppmessage.core.constant import SQL
from ppmessage.core.singleton import singleton
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy import create_engine
import logging
import traceback
class SqlInstance(SqlNone):
def __init__(self, _db_config):
self.pgsql_config = _db_config
self.db_name = self.pgsql_config.get("db_name")
self.db_pass = self.pgsql_config.get("db_pass")
self.db_user = self.pgsql_config.get("db_user")
self.db_host = self.pgsql_config.get("db_host")
self.db_port = self.pgsql_config.get("db_port")
super(SqlInstance, self).__init__()
return
def name(self):
return SQL.PGSQL
def createEngine(self):
db_string = "postgresql+psycopg2://%s:%s@%s:%s/%s" % (
self.db_user,
self.db_pass,
self.db_host,
self.db_port,
self.db_name
)
if self.dbengine == None:
self.dbengine = create_engine(db_string, echo_pool=True)
# it will create a thread local session for every single web request
return self.dbengine
|
[
"sqlalchemy.create_engine"
] |
[((1214, 1254), 'sqlalchemy.create_engine', 'create_engine', (['db_string'], {'echo_pool': '(True)'}), '(db_string, echo_pool=True)\n', (1227, 1254), False, 'from sqlalchemy import create_engine\n')]
|
# see ()[https://stackoverflow.com/a/40749716]
from xml.dom.minidom import parseString
html_string = """
<!DOCTYPE html>
<html><head><title>title</title></head><body><p>test</p></body></html>
"""
# extract the text value of the document's <p> tag:
doc = parseString(html_string)
paragraph = doc.getElementsByTagName("p")[0]
content = paragraph.firstChild.data
print(content)
# This would raise an exception on common HTML entities such as or ®.
|
[
"xml.dom.minidom.parseString"
] |
[((257, 281), 'xml.dom.minidom.parseString', 'parseString', (['html_string'], {}), '(html_string)\n', (268, 281), False, 'from xml.dom.minidom import parseString\n')]
|
import joblib
import numpy as np
import pandas as pd
np.random.seed(0)
df_tracks = pd.read_hdf('df_data/df_tracks.hdf')
df_playlists = pd.read_hdf('df_data/df_playlists.hdf')
df_playlists_info = pd.read_hdf('df_data/df_playlists_info.hdf')
df_playlists_test = pd.read_hdf('df_data/df_playlists_test.hdf')
df_playlists_test_info = pd.read_hdf('df_data/df_playlists_test_info.hdf')
num_tracks = df_playlists_info.groupby('num_tracks').pid.apply(np.array)
validation_playlists = {}
for i, j in df_playlists_test_info.num_tracks.value_counts().reset_index().values:
validation_playlists[i] = np.random.choice(num_tracks.loc[i], 2 * j, replace=False)
val1_playlist = {}
val2_playlist = {}
for i in [0, 1, 5, 10, 25, 100]:
val1_playlist[i] = []
val2_playlist[i] = []
value_counts = df_playlists_test_info.query('num_samples==@i').num_tracks.value_counts()
for j, k in value_counts.reset_index().values:
val1_playlist[i] += list(validation_playlists[j][:k])
validation_playlists[j] = validation_playlists[j][k:]
val2_playlist[i] += list(validation_playlists[j][:k])
validation_playlists[j] = validation_playlists[j][k:]
val1_index = df_playlists.pid.isin(val1_playlist[0])
val2_index = df_playlists.pid.isin(val2_playlist[0])
for i in [1, 5, 10, 25, 100]:
val1_index = val1_index | (df_playlists.pid.isin(val1_playlist[i]) & (df_playlists.pos >= i))
val2_index = val2_index | (df_playlists.pid.isin(val2_playlist[i]) & (df_playlists.pos >= i))
train = df_playlists[~(val1_index | val2_index)]
val1 = df_playlists[val1_index]
val2 = df_playlists[val2_index]
val1_pids = np.hstack([val1_playlist[i] for i in val1_playlist])
val2_pids = np.hstack([val2_playlist[i] for i in val2_playlist])
train = pd.concat([train, df_playlists_test])
train.to_hdf('df_data/train.hdf', key='abc')
val1.to_hdf('df_data/val1.hdf', key='abc')
val2.to_hdf('df_data/val2.hdf', key='abc')
joblib.dump(val1_pids, 'df_data/val1_pids.pkl')
joblib.dump(val2_pids, 'df_data/val2_pids.pkl')
|
[
"numpy.random.seed",
"pandas.read_hdf",
"joblib.dump",
"numpy.hstack",
"numpy.random.choice",
"pandas.concat"
] |
[((54, 71), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (68, 71), True, 'import numpy as np\n'), ((85, 121), 'pandas.read_hdf', 'pd.read_hdf', (['"""df_data/df_tracks.hdf"""'], {}), "('df_data/df_tracks.hdf')\n", (96, 121), True, 'import pandas as pd\n'), ((137, 176), 'pandas.read_hdf', 'pd.read_hdf', (['"""df_data/df_playlists.hdf"""'], {}), "('df_data/df_playlists.hdf')\n", (148, 176), True, 'import pandas as pd\n'), ((197, 241), 'pandas.read_hdf', 'pd.read_hdf', (['"""df_data/df_playlists_info.hdf"""'], {}), "('df_data/df_playlists_info.hdf')\n", (208, 241), True, 'import pandas as pd\n'), ((262, 306), 'pandas.read_hdf', 'pd.read_hdf', (['"""df_data/df_playlists_test.hdf"""'], {}), "('df_data/df_playlists_test.hdf')\n", (273, 306), True, 'import pandas as pd\n'), ((332, 381), 'pandas.read_hdf', 'pd.read_hdf', (['"""df_data/df_playlists_test_info.hdf"""'], {}), "('df_data/df_playlists_test_info.hdf')\n", (343, 381), True, 'import pandas as pd\n'), ((1636, 1688), 'numpy.hstack', 'np.hstack', (['[val1_playlist[i] for i in val1_playlist]'], {}), '([val1_playlist[i] for i in val1_playlist])\n', (1645, 1688), True, 'import numpy as np\n'), ((1701, 1753), 'numpy.hstack', 'np.hstack', (['[val2_playlist[i] for i in val2_playlist]'], {}), '([val2_playlist[i] for i in val2_playlist])\n', (1710, 1753), True, 'import numpy as np\n'), ((1763, 1800), 'pandas.concat', 'pd.concat', (['[train, df_playlists_test]'], {}), '([train, df_playlists_test])\n', (1772, 1800), True, 'import pandas as pd\n'), ((1933, 1980), 'joblib.dump', 'joblib.dump', (['val1_pids', '"""df_data/val1_pids.pkl"""'], {}), "(val1_pids, 'df_data/val1_pids.pkl')\n", (1944, 1980), False, 'import joblib\n'), ((1981, 2028), 'joblib.dump', 'joblib.dump', (['val2_pids', '"""df_data/val2_pids.pkl"""'], {}), "(val2_pids, 'df_data/val2_pids.pkl')\n", (1992, 2028), False, 'import joblib\n'), ((596, 653), 'numpy.random.choice', 'np.random.choice', (['num_tracks.loc[i]', '(2 * j)'], {'replace': '(False)'}), '(num_tracks.loc[i], 2 * j, replace=False)\n', (612, 653), True, 'import numpy as np\n')]
|
"""
This module is special.
``Reader`` does not produce ``ReaderBasedN`` interface as other containers.
Because ``Reader`` can be used with two or three type arguments:
- ``RequiresContext[value, env]``
- ``RequiresContextResult[value, error, env]``
Because the second type argument changes its meaning
based on the used ``KindN`` instance,
we need to have two separate interfaces for two separate use-cases:
- ``ReaderBased2`` is used for types where the second type argument is ``env``
- ``ReaderBased3`` is used for types where the third type argument is ``env``
We also have two methods and two poinfree helpers
for ``bind_context`` composition: one for each interface.
Furthermore, ``Reader`` cannot have ``ReaderBased1`` type,
because we need both ``value`` and ``env`` types at all cases.
See also:
https://github.com/dry-python/returns/issues/485
"""
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Callable, Type, TypeVar
from returns.interfaces import container, iterable
from returns.primitives.hkt import Kind2, Kind3
if TYPE_CHECKING:
from returns.context import RequiresContext, NoDeps # noqa: WPS433
_FirstType = TypeVar('_FirstType')
_SecondType = TypeVar('_SecondType')
_ThirdType = TypeVar('_ThirdType')
_UpdatedType = TypeVar('_UpdatedType')
_ValueType = TypeVar('_ValueType')
_ErrorType = TypeVar('_ErrorType')
_EnvType = TypeVar('_EnvType')
_ReaderBased2Type = TypeVar('_ReaderBased2Type', bound='ReaderBased2')
_ReaderBased3Type = TypeVar('_ReaderBased3Type', bound='ReaderBased3')
class ReaderBased2(
container.Container2[_FirstType, _SecondType],
iterable.Iterable2[_FirstType, _SecondType],
):
"""
Reader interface for ``Kind2`` based types.
It has two type arguments and treats the second type argument as env type.
"""
@abstractmethod
def __call__(self, deps: _SecondType) -> _FirstType:
"""Calls the reader with the env to get the result back."""
@property
@abstractmethod
def empty(self: _ReaderBased2Type) -> 'NoDeps':
"""Is required to call ``Reader`` with explicit empty argument."""
@abstractmethod
def bind_context(
self: _ReaderBased2Type,
function: Callable[
[_FirstType],
'RequiresContext[_UpdatedType, _SecondType]',
],
) -> Kind2[_ReaderBased2Type, _UpdatedType, _SecondType]:
"""Allows to apply a wrapped function over a ``Reader`` container."""
@abstractmethod
def modify_env(
self: _ReaderBased2Type,
function: Callable[[_UpdatedType], _SecondType],
) -> Kind2[_ReaderBased2Type, _FirstType, _UpdatedType]:
"""Transforms the environment before calling the container."""
@classmethod
@abstractmethod
def ask(
cls: Type[_ReaderBased2Type],
) -> Kind2[_ReaderBased2Type, _SecondType, _SecondType]:
"""Returns the depedencies inside the container."""
@classmethod
@abstractmethod
def from_context(
cls: Type[_ReaderBased2Type], # noqa: N805
inner_value: 'RequiresContext[_ValueType, _EnvType]',
) -> Kind2[_ReaderBased2Type, _ValueType, _EnvType]:
"""Unit method to create new containers from successful ``Reader``."""
class ReaderBased3(
container.Container3[_FirstType, _SecondType, _ThirdType],
iterable.Iterable3[_FirstType, _SecondType, _ThirdType],
):
"""
Reader interface for ``Kind3`` based types.
It has three type arguments and treats the third type argument as env type.
The second type argument is not used here.
"""
@abstractmethod
def __call__(self, deps: _ThirdType) -> Any:
"""
Calls the reader with the env to get the result back.
Returns ``Any``, because we cannot know in advance
what combitation of ``_FirstType`` and ``_SecondType`` would be used.
It can be ``Union[_FirstType, _SecondType]`` or ``Tuple`` or ``Result``.
Or any other type.
"""
@property
@abstractmethod
def empty(self: _ReaderBased3Type) -> 'NoDeps':
"""Is required to call ``Reader`` with explicit empty argument."""
@abstractmethod
def bind_context(
self: _ReaderBased3Type,
function: Callable[
[_FirstType],
'RequiresContext[_UpdatedType, _ThirdType]',
],
) -> Kind3[_ReaderBased3Type, _UpdatedType, _SecondType, _ThirdType]:
"""Allows to apply a wrapped function over a ``Reader`` container."""
@abstractmethod
def modify_env(
self: _ReaderBased3Type,
function: Callable[[_UpdatedType], _ThirdType],
) -> Kind3[_ReaderBased3Type, _FirstType, _SecondType, _UpdatedType]:
"""Transforms the environment before calling the container."""
@classmethod
@abstractmethod
def ask(
cls: Type[_ReaderBased3Type],
) -> Kind3[_ReaderBased3Type, _ThirdType, _SecondType, _ThirdType]:
"""Returns the depedencies inside the container."""
@classmethod
@abstractmethod
def from_context(
cls: Type[_ReaderBased3Type], # noqa: N805
inner_value: 'RequiresContext[_ValueType, _EnvType]',
) -> Kind3[_ReaderBased3Type, _ValueType, _SecondType, _EnvType]:
"""Unit method to create new containers from successful ``Reader``."""
|
[
"typing.TypeVar"
] |
[((1170, 1191), 'typing.TypeVar', 'TypeVar', (['"""_FirstType"""'], {}), "('_FirstType')\n", (1177, 1191), False, 'from typing import TYPE_CHECKING, Any, Callable, Type, TypeVar\n'), ((1206, 1228), 'typing.TypeVar', 'TypeVar', (['"""_SecondType"""'], {}), "('_SecondType')\n", (1213, 1228), False, 'from typing import TYPE_CHECKING, Any, Callable, Type, TypeVar\n'), ((1242, 1263), 'typing.TypeVar', 'TypeVar', (['"""_ThirdType"""'], {}), "('_ThirdType')\n", (1249, 1263), False, 'from typing import TYPE_CHECKING, Any, Callable, Type, TypeVar\n'), ((1279, 1302), 'typing.TypeVar', 'TypeVar', (['"""_UpdatedType"""'], {}), "('_UpdatedType')\n", (1286, 1302), False, 'from typing import TYPE_CHECKING, Any, Callable, Type, TypeVar\n'), ((1317, 1338), 'typing.TypeVar', 'TypeVar', (['"""_ValueType"""'], {}), "('_ValueType')\n", (1324, 1338), False, 'from typing import TYPE_CHECKING, Any, Callable, Type, TypeVar\n'), ((1352, 1373), 'typing.TypeVar', 'TypeVar', (['"""_ErrorType"""'], {}), "('_ErrorType')\n", (1359, 1373), False, 'from typing import TYPE_CHECKING, Any, Callable, Type, TypeVar\n'), ((1385, 1404), 'typing.TypeVar', 'TypeVar', (['"""_EnvType"""'], {}), "('_EnvType')\n", (1392, 1404), False, 'from typing import TYPE_CHECKING, Any, Callable, Type, TypeVar\n'), ((1426, 1476), 'typing.TypeVar', 'TypeVar', (['"""_ReaderBased2Type"""'], {'bound': '"""ReaderBased2"""'}), "('_ReaderBased2Type', bound='ReaderBased2')\n", (1433, 1476), False, 'from typing import TYPE_CHECKING, Any, Callable, Type, TypeVar\n'), ((1497, 1547), 'typing.TypeVar', 'TypeVar', (['"""_ReaderBased3Type"""'], {'bound': '"""ReaderBased3"""'}), "('_ReaderBased3Type', bound='ReaderBased3')\n", (1504, 1547), False, 'from typing import TYPE_CHECKING, Any, Callable, Type, TypeVar\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 8 17:03:07 2018
@author: jeremiasknoblauch
Description: Plots pics from Air Pollution Data London
"""
import csv
import numpy as np
from Evaluation_tool import EvaluationTool
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
import datetime
import matplotlib
#ensure that we have type 1 fonts (for ICML publishing guiedlines)
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
"""""STEP 1: DATA TRANSFOMRATIONS"""""
normalize = True
deseasonalize_2h = True
deseasonalize_day = True #only one of the two deseasonalizations should be chosen
shortened, shortened_to = False, 500
daily_avg = True
if daily_avg:
deseasonalize_2h = False
data_dir = ("//Users//jeremiasknoblauch//Documents//OxWaSP//BOCPDMS//" +
"//Code//SpatialBOCD//Data//AirPollutionData")
cp_type = "CongestionChargeData"
dist_file_road = (data_dir + "//" + cp_type + "//" +
"RoadDistanceMatrix_")
dist_file_euclid = (data_dir + "//" + cp_type + "//" +
"EuclideanDistanceMatrix_")
results_file = ("/Users//jeremiasknoblauch//Documents////OxWaSP//BOCPDMS//Code//" +
"SpatialBOCD//Paper//AirPollutionData//" +
"results_daily.txt")
res_path = ("/Users//jeremiasknoblauch//Documents////OxWaSP//BOCPDMS//Code//" +
"SpatialBOCD//Paper//AirPollutionData//")
frequency = "2h" #2h, daily (=every 15 min),
mode = "bigger" #bigger, smaller (bigger contains more filled-in values)
if mode == "bigger":
stationIDs = ["BT1", "BX1", "BX2", "CR2", "CR4",
"EA1", "EA2", "EN1", "GR4", "GR5",
"HG1", "HG2", "HI0", "HI1", "HR1",
"HS2", "HV1", "HV3", "KC1", "KC2",
"LH2", "MY1", "RB3", "RB4", "TD0",
"TH1", "TH2", "WA2", "WL1"]
elif mode == "smaller":
stationIDs = ["BT1", "BX2", "CR2", "EA2", "EN1", "GR4",
"GR5", "HG1", "HG2", "HI0", "HR1", "HV1",
"HV3", "KC1", "LH2", "RB3", "TD0", "WA2"]
num_stations = len(stationIDs)
"""STEP 1: Read in distances"""
"""STEP 1.1: Read in road distances (as strings)"""
pw_distances_road = []
station_IDs = []
count = 0
with open(dist_file_road + mode + ".csv") as csvfile:
reader = csv.reader(csvfile)
for row in reader:
pw_distances_road += row
"""STEP 1.2: Read in euclidean distances (as strings)"""
pw_distances_euclid = []
station_IDs = []
count = 0
with open(dist_file_euclid + mode + ".csv") as csvfile:
reader = csv.reader(csvfile)
for row in reader:
pw_distances_euclid += row
"""STEP 1.3: Convert both distance lists to floats and matrices"""
pw_d_r, pw_d_e = [], []
for r,e in zip(pw_distances_road, pw_distances_euclid):
pw_d_r.append(float(r))
pw_d_e.append(float(e))
pw_distances_road = np.array(pw_d_r).reshape(num_stations, num_stations)
pw_distances_euclid = np.array(pw_d_e).reshape(num_stations, num_stations)
"""STEP 2: Convert distance matrices to nbhs"""
cutoffs = [0.0, 10.0, 20.0, 30.0, 40.0, 100.0]
num_nbhs = len(cutoffs) - 1
"""STEP 2.1: road distances"""
road_nbhs = []
for location in range(0, num_stations):
location_nbh = []
for i in range(0, num_nbhs):
larger_than, smaller_than = cutoffs[i], cutoffs[i+1]
indices = np.intersect1d(
np.where(pw_distances_road[location,:] > larger_than),
np.where(pw_distances_road[location,:] < smaller_than)).tolist()
location_nbh.append(indices.copy())
road_nbhs.append(location_nbh.copy())
"""STEP 2.2: euclidean distances"""
euclid_nbhs =[]
for location in range(0, num_stations):
location_nbh = []
for i in range(0, num_nbhs):
larger_than, smaller_than = cutoffs[i], cutoffs[i+1]
indices = np.intersect1d(
np.where(pw_distances_euclid[location,:] > larger_than),
np.where(pw_distances_euclid[location,:] < smaller_than)).tolist()
location_nbh.append(indices.copy())
euclid_nbhs.append(location_nbh.copy())
"""STEP 3: Read in station data for each station"""
station_data = []
for id_ in stationIDs:
file_name = (data_dir + "//" + cp_type + "//" +
id_ + "_081702-081703_" + frequency + ".txt")
"""STEP 3.1: Read in raw data"""
#NOTE: Skip the header
data_raw = []
count = 0
with open(file_name) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if count > 0:
data_raw += row
count += 1
"""STEP 3.2: Convert to floats"""
#NOTE: We have row names, so skip every second
dat = []
for entry in data_raw:
dat += [float(entry)]
"""STEP 3.3: Append to station_data list"""
station_data.append(dat.copy())
"""STEP 4: Format the station data into a matrix"""
T, S1, S2 = len(station_data[0]), num_stations, 1
data = np.zeros((T, num_stations))
for i in range(0, num_stations):
data[:,i] = np.array(station_data[i])
intercept_priors = np.mean(data,axis=0)
hyperpar_opt = "caron"
"""STEP 5: Transformation if necessary"""
if shortened:
T = shortened_to
data = data[:T,:]
if daily_avg:
"""average 12 consecutive values until all have been processed"""
new_data = np.zeros((int(T/12), num_stations))
for station in range(0, num_stations):
new_data[:, station] = np.mean(data[:,station].
reshape(int(T/12), 12),axis=1)
data= new_data
T = data.shape[0]
if deseasonalize_day:
if deseasonalize_2h:
print("CAREFUL! You want to deseasonalize twice, so deseasonalizing " +
"was aborted!")
elif not daily_avg:
mean_day = np.zeros((7, num_stations))
#deseasonalize
for station in range(0, num_stations):
"""get the daily average. Note that we have 12 obs/day for a year"""
for day in range(0, 7):
selection_week = [False]*day + [True]*12 + [False]*(6-day)
selection = (selection_week * int(T/(7*12)) +
selection_week[:(T-int(T/(7*12))*7*12)])
mean_day[day, station] = np.mean(data[selection,station])
data[selection,station] = (data[selection,station] -
mean_day[day, station])
if deseasonalize_day and daily_avg:
mean_day = np.zeros((7, num_stations))
#deseasonalize
for station in range(0, num_stations):
"""get the daily average. Note that we have 12 obs/day for a year"""
#Also note that T will already have changed to the #days
for day in range(0, 7):
selection_week = [False]*day + [True] + [False]*(6-day)
selection = (selection_week * int(T/7) +
selection_week[:(T-int(T/7)*7)])
mean_day[day, station] = np.mean(data[selection,station])
data[selection,station] = (data[selection,station] -
mean_day[day, station])
T = data.shape[0]
if deseasonalize_2h:
if deseasonalize_day:
print("CAREFUL! You want to deseasonalize twice, so deseasonalizing " +
"was aborted!")
else:
mean_2h = np.zeros((12*7, num_stations))
for station in range(0, num_stations):
"""get the average for each 2h-interval for each weekday"""
for _2h in range(0, 12*7):
selection_2h = [False]*_2h + [True] + [False]*(12*7-1-_2h)
selection = (selection_2h * int(T/(7*12)) +
selection_2h[:(T-int(T/(7*12))*7*12)])
mean_2h[_2h, station] = np.mean(data[selection,station])
data[selection,station] = (data[selection,station] -
mean_2h[_2h, station])
if normalize:
data = (data - np.mean(data, axis=0))/np.sqrt(np.var(data,axis=0))
intercept_priors = np.mean(data,axis=0)
"""""STEP 2: READ RESULTS"""""
EvT = EvaluationTool()
EvT.build_EvaluationTool_via_results(results_file)
segmentation = EvT.results[EvT.names.index("MAP CPs")][-2]
model_labels = EvT.results[EvT.names.index("model labels")]
num_models = len(np.union1d(model_labels, model_labels))
relevant_models = np.union1d([seg[1] for seg in segmentation],[seg[1] for seg in segmentation])
#mods = [8,11,13,17,18]
all_models = [e for e in range(0, len(model_labels))] #np.linspace(0, len(model_labels)-1, len(model_labels), dtype = int)
"""Get dates"""
def perdelta(start, end, delta, date_list):
curr = start
while curr < end:
#yield curr
date_list.append(curr)
curr += delta
all_dates = []
#start_year, start_month, start_day, start_hour = 2002, 8, 17, 0
#start_datetime = datetime.datetime(year = 2002, month = 8, day = 17, hour = 0)
#stop_datetime = datetime.datetime(year=2003, month = 8, day = 18, hour = 0)
#perdelta(start_datetime, stop_datetime, datetime.timedelta(hours = 2), all_dates)
start_year, start_month, start_day, start_hour = 2002, 8, 17, 0
start_datetime = datetime.date(year = 2002, month = 8, day = 17)
stop_datetime = datetime.date(year=2003, month = 8, day = 18)
perdelta(start_datetime, stop_datetime, datetime.timedelta(days = 1), all_dates)
"""""STEP 3: Plot"""""
index_selection = [0,5,9,13,17,21,30]
#location, color
true_CPs = [[datetime.date(year = 2003, month = 2, day = 17), "red", 4.0]]
#paper: height_ratio, num_subplots = [4,3,5],3
#poster: height_ratio, num_subplots = [4,3,4],3
height_ratio, num_subplots = [4,3,5],3
#paper: ylabel_coords = [-0.085, 0.5]
#poster: [-0.06, 0.5]
ylabel_coords = [-0.085, 0.5]
#paper: figsize = (8,5) #for poster: 12,5
fig, ax_array = plt.subplots(num_subplots, sharex = True,
gridspec_kw = {'height_ratios':height_ratio},
figsize=(8,5))
plt.subplots_adjust(hspace = .2, left = None, bottom = None, right = None, top = None)
off = 5
time_range = np.linspace(10,T-2, T-2-off,dtype = int)
all_dates = all_dates[-len(time_range):]
fig_1 = EvT.plot_raw_TS(data[-len(time_range):,:].reshape(len(time_range), 29), all_dates = all_dates, ax = ax_array[0],
time_range = time_range,
custom_colors_series = ["black"]*10,
ylab_fontsize = 14,
yticks_fontsize = 14,
ylab = "NOX",
xlab=None,
ylabel_coords = ylabel_coords,
true_CPs = true_CPs)
mod = [17,21]
EvT.plot_model_posterior(indices=mod, #mods, #mods, #relevant_models,
plot_type = "trace", #"MAPVariance1_trace",
#y_axis_labels = [str(e) for e in all_models],#[#"AR(1)",
#"M(5+)", "M(6)",
# "M(6+)",
# "M(7)", "M(7+)"],#relevant_models],
time_range = time_range,
y_axis_labels = [],
log_format=False, aspect = 'auto',
show_MAP_CPs = False,
#start_plot = 2002.75, stop_plot = 2003.75,
custom_colors = ["blue", "orange"], # ["orange"], #custom_colors_models,
ax = ax_array[1] ,#ax_array[1], #None, #ax_array[1],
xlab = None, #ylab = None, #trace",period_time_list = None,
number_offset = 1.0, #datetime.timedelta(days = 1),#0.75,
number_fontsize = 20,
period_line_thickness = 7.0,
xlab_fontsize = 14, ylab_fontsize = 14,
xticks_fontsize = 14, yticks_fontsize = 14,
ylabel_coords = ylabel_coords,
#ylab = None, #"Model posterior max",
#period_time_list = [
# [datetime.datetime(year = 2003, month = 2, day = 17, hour = 0),
# datetime.datetime(year = 2003, month = 2, day = 18, hour = 0)]],
#label_list = [["1"]],
#window_len = int(12*7*1),
period_time_list = None, #[[datetime.datetime(year = 2003, month = 2, day = 17, hour = 0),
#datetime.datetime(year = 2003, month = 2, day = 18, hour = 0)]],
label_list = None, #[["1"]],
SGV = True,
log_det = True,
all_dates = all_dates,
true_CPs = true_CPs)
fig_4 = EvT.plot_model_posterior(indices=mod, #mods, #mods, #relevant_models,
plot_type = "BF", #"MAPVariance1_trace",
#y_axis_labels = [str(e) for e in all_models],#[#"AR(1)",
#"M(5+)", "M(6)",
# "M(6+)",
# "M(7)", "M(7+)"],#relevant_models],
time_range = time_range,
log_format=True, aspect = 'auto',
show_MAP_CPs = False,
#start_plot = 2002.7, stop_plot = 2003.7,
custom_colors = ["green"], #custom_colors_models,
ax = ax_array[2], xlab = None, ylab = "log(BF)", #trace",
period_time_list = None,
label_list =None,
number_offset = 0.75,
number_fontsize = 20,
period_line_thickness = 7.0,
xlab_fontsize = 14, ylab_fontsize = 14,
xticks_fontsize = 14, yticks_fontsize = 14,
ylabel_coords = ylabel_coords,
window_len = int(12*7*2),
SGV = False,
log_det = True,
all_dates = all_dates,
true_CPs = true_CPs
)
fig.savefig(res_path + "APData.pdf")
|
[
"Evaluation_tool.EvaluationTool",
"csv.reader",
"numpy.zeros",
"datetime.date",
"matplotlib.pyplot.subplots",
"numpy.var",
"numpy.mean",
"numpy.array",
"datetime.timedelta",
"numpy.linspace",
"numpy.where",
"matplotlib.pyplot.subplots_adjust",
"numpy.union1d"
] |
[((4958, 4985), 'numpy.zeros', 'np.zeros', (['(T, num_stations)'], {}), '((T, num_stations))\n', (4966, 4985), True, 'import numpy as np\n'), ((5080, 5101), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (5087, 5101), True, 'import numpy as np\n'), ((8052, 8068), 'Evaluation_tool.EvaluationTool', 'EvaluationTool', ([], {}), '()\n', (8066, 8068), False, 'from Evaluation_tool import EvaluationTool\n'), ((8316, 8394), 'numpy.union1d', 'np.union1d', (['[seg[1] for seg in segmentation]', '[seg[1] for seg in segmentation]'], {}), '([seg[1] for seg in segmentation], [seg[1] for seg in segmentation])\n', (8326, 8394), True, 'import numpy as np\n'), ((9125, 9166), 'datetime.date', 'datetime.date', ([], {'year': '(2002)', 'month': '(8)', 'day': '(17)'}), '(year=2002, month=8, day=17)\n', (9138, 9166), False, 'import datetime\n'), ((9189, 9230), 'datetime.date', 'datetime.date', ([], {'year': '(2003)', 'month': '(8)', 'day': '(18)'}), '(year=2003, month=8, day=18)\n', (9202, 9230), False, 'import datetime\n'), ((9758, 9862), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num_subplots'], {'sharex': '(True)', 'gridspec_kw': "{'height_ratios': height_ratio}", 'figsize': '(8, 5)'}), "(num_subplots, sharex=True, gridspec_kw={'height_ratios':\n height_ratio}, figsize=(8, 5))\n", (9770, 9862), True, 'from matplotlib import pyplot as plt\n'), ((9921, 9998), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.2)', 'left': 'None', 'bottom': 'None', 'right': 'None', 'top': 'None'}), '(hspace=0.2, left=None, bottom=None, right=None, top=None)\n', (9940, 9998), True, 'from matplotlib import pyplot as plt\n'), ((10030, 10076), 'numpy.linspace', 'np.linspace', (['(10)', '(T - 2)', '(T - 2 - off)'], {'dtype': 'int'}), '(10, T - 2, T - 2 - off, dtype=int)\n', (10041, 10076), True, 'import numpy as np\n'), ((2320, 2339), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (2330, 2339), False, 'import csv\n'), ((2577, 2596), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (2587, 2596), False, 'import csv\n'), ((5035, 5060), 'numpy.array', 'np.array', (['station_data[i]'], {}), '(station_data[i])\n', (5043, 5060), True, 'import numpy as np\n'), ((6437, 6464), 'numpy.zeros', 'np.zeros', (['(7, num_stations)'], {}), '((7, num_stations))\n', (6445, 6464), True, 'import numpy as np\n'), ((7988, 8009), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (7995, 8009), True, 'import numpy as np\n'), ((8258, 8296), 'numpy.union1d', 'np.union1d', (['model_labels', 'model_labels'], {}), '(model_labels, model_labels)\n', (8268, 8296), True, 'import numpy as np\n'), ((9275, 9301), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (9293, 9301), False, 'import datetime\n'), ((2879, 2895), 'numpy.array', 'np.array', (['pw_d_r'], {}), '(pw_d_r)\n', (2887, 2895), True, 'import numpy as np\n'), ((2954, 2970), 'numpy.array', 'np.array', (['pw_d_e'], {}), '(pw_d_e)\n', (2962, 2970), True, 'import numpy as np\n'), ((4457, 4476), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (4467, 4476), False, 'import csv\n'), ((7296, 7328), 'numpy.zeros', 'np.zeros', (['(12 * 7, num_stations)'], {}), '((12 * 7, num_stations))\n', (7304, 7328), True, 'import numpy as np\n'), ((9411, 9452), 'datetime.date', 'datetime.date', ([], {'year': '(2003)', 'month': '(2)', 'day': '(17)'}), '(year=2003, month=2, day=17)\n', (9424, 9452), False, 'import datetime\n'), ((5758, 5785), 'numpy.zeros', 'np.zeros', (['(7, num_stations)'], {}), '((7, num_stations))\n', (5766, 5785), True, 'import numpy as np\n'), ((6918, 6951), 'numpy.mean', 'np.mean', (['data[selection, station]'], {}), '(data[selection, station])\n', (6925, 6951), True, 'import numpy as np\n'), ((7913, 7934), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (7920, 7934), True, 'import numpy as np\n'), ((7944, 7964), 'numpy.var', 'np.var', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (7950, 7964), True, 'import numpy as np\n'), ((7729, 7762), 'numpy.mean', 'np.mean', (['data[selection, station]'], {}), '(data[selection, station])\n', (7736, 7762), True, 'import numpy as np\n'), ((3382, 3436), 'numpy.where', 'np.where', (['(pw_distances_road[location, :] > larger_than)'], {}), '(pw_distances_road[location, :] > larger_than)\n', (3390, 3436), True, 'import numpy as np\n'), ((3449, 3504), 'numpy.where', 'np.where', (['(pw_distances_road[location, :] < smaller_than)'], {}), '(pw_distances_road[location, :] < smaller_than)\n', (3457, 3504), True, 'import numpy as np\n'), ((3864, 3920), 'numpy.where', 'np.where', (['(pw_distances_euclid[location, :] > larger_than)'], {}), '(pw_distances_euclid[location, :] > larger_than)\n', (3872, 3920), True, 'import numpy as np\n'), ((3933, 3990), 'numpy.where', 'np.where', (['(pw_distances_euclid[location, :] < smaller_than)'], {}), '(pw_distances_euclid[location, :] < smaller_than)\n', (3941, 3990), True, 'import numpy as np\n'), ((6222, 6255), 'numpy.mean', 'np.mean', (['data[selection, station]'], {}), '(data[selection, station])\n', (6229, 6255), True, 'import numpy as np\n')]
|
#! usr/bin/env python
from math import sqrt
for run in range(6):
file1 = open("../data/times_just_C_run_{}.txt".format(run))
file2 = open("../data/times_poy_processor_{}.txt".format(run))
C_times_str = file1.readlines()
POY_times_str = file2.readlines()
if len(C_times_str) < 11 or len(POY_times_str) < 11:
continue
file1.close()
file2.close()
variance = 0
mean = 0
expectation = 0
POY_count = 0
C_count = 0
C_times = list(map(lambda x: int(float(x.split()[-1])), C_times_str))
POY_times = list(map(lambda x: int(float(x.split()[-1])), POY_times_str))
print("Run {}:".format(run))
for c, poy in zip(C_times[:-1], POY_times[:-1]): # remember, last line is average
print("{:>7}{:>7}{:>7}".format(c, poy, c - poy))
# print((c - poy) * (c - poy))
mean += abs(c - poy)
if c < poy:
C_count += 1
else:
POY_count += 1
mean /= 10
for c, poy in zip(C_times[:-1], POY_times[:-1]):
difference = abs(c - poy)
rel_diff = c - poy
variance += (difference - mean) * (difference - mean)
expectation += rel_diff
expectation /= 10
variance /= 10
print("\nC is faster: {:>8}".format(C_count))
print("Poy is faster: {:>8}".format(POY_count))
print("Mean time diff: {:>8}".format(int(mean)))
print("Expectation: {:>8}".format(int(expectation)))
print("Variance: {:>8}".format(int(variance)))
print("Std. Dev: {:>8}".format(int(sqrt(variance))))
print()
|
[
"math.sqrt"
] |
[((1569, 1583), 'math.sqrt', 'sqrt', (['variance'], {}), '(variance)\n', (1573, 1583), False, 'from math import sqrt\n')]
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import re
from . import SwedishLegalStore, Trips
class KommitteStore(SwedishLegalStore):
def basefile_to_pathfrag(self, basefile):
# "Ju 2012:01" => "Ju/2012/01"
return basefile.replace(" ", "/").replace(":", "/")
def pathfrag_to_basefile(self, pathfrag):
# "Ju/2012/01" => "2012:152"
return pathfrag.replace("/", " ", 1).replace("/", ":")
class Kommitte(Trips):
documentstore_class = KommitteStore
alias = "komm"
app = "komm"
base = "KOMM"
download_params = [{'maxpage': 101, 'app': app, 'base': base}]
basefile_regex = "(?P<basefile>\w+ \d{4}:\w+)$"
re_basefile = re.compile(r'(\w+ \d{4}:\w+)', re.UNICODE)
def parse_from_soup(self, soup, basefile):
pre = soup.findAll("pre")[-1]
text = ''.join(pre.findAll(text=True))
print(text)
# End result something like this
#
# <http://rinfo.lagrummet.se/komm/a/1991:03> a :Kommittebeskrivning
# dcterms:identifier "A 1991:03" ;
# :tillkalladAr "1991" ;
# :lopnummer "03";
# :kommittestatus "Avslutad";
# :avslutadAr "1993";
# :departement <http://rinfo.lagrummet.se/publ/org/Arbetsmarknadsdepartementet>;
# :kommittedirektiv <http://rinfo.lagrummet.se/publ/dir/1991:75> ,
# <http://rinfo.lagrummet.se/publ/dir/1992:33> ,
# :betankanden <http://rinfo.lagrummet.se/publ/bet/sou/1993:81> .
#
# <http://rinfo.lagrummet.se/publ/bet/sou/1993:81> dcterms:title "Översyn av arbetsmiljölagen";
|
[
"re.compile"
] |
[((805, 849), 're.compile', 're.compile', (['"""(\\\\w+ \\\\d{4}:\\\\w+)"""', 're.UNICODE'], {}), "('(\\\\w+ \\\\d{4}:\\\\w+)', re.UNICODE)\n", (815, 849), False, 'import re\n')]
|
from wordsalad.input import split_germanic, group_words
import unittest
class TestTokenisation(unittest.TestCase):
def test_split_germanic_punctuation_treated_like_one_word(self):
txt = "abc. def.,"
res = list(split_germanic(txt))
self.assertListEqual(["abc", ".", "def", ".", ","], res)
def test_split_germanic_fails_on_empty_whitespace(self):
txt = "A high powered mutant"
with self.assertRaises(ValueError):
list(split_germanic(txt, whitespace=""))
def test_split_germanic_whitespace_not_a_string(self):
txt = "ab1c d1ef"
lst = list(split_germanic(txt, whitespace=1))
self.assertListEqual(["ab", "c d", "ef"], lst)
def test_split_germanic_start_words(self):
s = "Hello my name is <NAME>. How very nice to meet you! :) What is your name?"
start_words = []
list(split_germanic(s, start_words=start_words, punctuation=".!", sentence_end=".?!"))
self.assertEqual(["Hello", "How", ":)"], start_words)
def test_split_germanic(self):
cases = [
(
"Swiss cheese is a type of dairy product.[5]",
["Swiss", "cheese", "is", "a", "type", "of", "dairy", "product", ".", "[", "5", "]"]
),
(
"Who are you... he said.",
["Who", "are", "you", ".", ".", ".", "he", "said", "."]
),
(
"A list of (approved) items follows:",
["A", "list", "of", "(", "approved", ")", "items", "follows", ":"]
)
]
for s, expected in cases:
actual = list(split_germanic(s))
self.assertListEqual(expected, actual)
class TestGroupWords(unittest.TestCase):
def test_group_words_words_must_be_iterable(self):
with self.assertRaises(TypeError):
list(group_words(1))
def test_group_words_size_must_be_int(self):
with self.assertRaises(ValueError):
list(group_words([1,2], size="abc"))
def test_group_words_fills_with_empty(self):
words = [1,2,3,4,5,6,7]
res = group_words(words, size=3, empty="E")
self.assertIsNotNone(res)
self.assertEqual(
list(res),
[
(1,2,3),
(4,5,6),
(7,"E", "E")
])
def test_group_words_empty_sequence_gives_empty(self):
res = group_words([])
self.assertEqual(list(res), [])
def test_group_words_size_larger_than_1(self):
with self.assertRaises(ValueError):
list(group_words([1,2,3], size=1))
|
[
"wordsalad.input.split_germanic",
"wordsalad.input.group_words"
] |
[((2194, 2231), 'wordsalad.input.group_words', 'group_words', (['words'], {'size': '(3)', 'empty': '"""E"""'}), "(words, size=3, empty='E')\n", (2205, 2231), False, 'from wordsalad.input import split_germanic, group_words\n'), ((2498, 2513), 'wordsalad.input.group_words', 'group_words', (['[]'], {}), '([])\n', (2509, 2513), False, 'from wordsalad.input import split_germanic, group_words\n'), ((237, 256), 'wordsalad.input.split_germanic', 'split_germanic', (['txt'], {}), '(txt)\n', (251, 256), False, 'from wordsalad.input import split_germanic, group_words\n'), ((636, 669), 'wordsalad.input.split_germanic', 'split_germanic', (['txt'], {'whitespace': '(1)'}), '(txt, whitespace=1)\n', (650, 669), False, 'from wordsalad.input import split_germanic, group_words\n'), ((908, 993), 'wordsalad.input.split_germanic', 'split_germanic', (['s'], {'start_words': 'start_words', 'punctuation': '""".!"""', 'sentence_end': '""".?!"""'}), "(s, start_words=start_words, punctuation='.!', sentence_end='.?!'\n )\n", (922, 993), False, 'from wordsalad.input import split_germanic, group_words\n'), ((490, 524), 'wordsalad.input.split_germanic', 'split_germanic', (['txt'], {'whitespace': '""""""'}), "(txt, whitespace='')\n", (504, 524), False, 'from wordsalad.input import split_germanic, group_words\n'), ((1687, 1704), 'wordsalad.input.split_germanic', 'split_germanic', (['s'], {}), '(s)\n', (1701, 1704), False, 'from wordsalad.input import split_germanic, group_words\n'), ((1931, 1945), 'wordsalad.input.group_words', 'group_words', (['(1)'], {}), '(1)\n', (1942, 1945), False, 'from wordsalad.input import split_germanic, group_words\n'), ((2062, 2093), 'wordsalad.input.group_words', 'group_words', (['[1, 2]'], {'size': '"""abc"""'}), "([1, 2], size='abc')\n", (2073, 2093), False, 'from wordsalad.input import split_germanic, group_words\n'), ((2671, 2701), 'wordsalad.input.group_words', 'group_words', (['[1, 2, 3]'], {'size': '(1)'}), '([1, 2, 3], size=1)\n', (2682, 2701), False, 'from wordsalad.input import split_germanic, group_words\n')]
|
r"""Summary objects at the end of training procedures."""
import numpy as np
import pickle
import torch
class TrainingSummary:
def __init__(self,
model_best,
model_final,
epochs,
epoch_best,
losses_train,
losses_test=None,
identifier=None):
self.identifier = identifier
self.epochs = epochs
self.model_best = model_best
self.model_final = model_final
self.epoch_best = epoch_best
self.losses_train = losses_train
self.losses_test = losses_test
def save(self, path):
summary = {
"identifier": self.identifier,
"best_model": self.model_best,
"final_model": self.model_final,
"epochs": self.epochs,
"best_epoch": self.epoch_best,
"training_losses": self.losses_train,
"testing_losses": self.losses_test}
torch.save(summary, path)
def load(self, path):
summary = torch.load(path)
self.identifier = summary["identifier"]
self.model_best = summary["best_model"]
self.model_final = summary["final_model"]
self.epochs = summary["epochs"]
self.epoch_best = summary["best_epoch"]
self.losses_train = summary["training_losses"]
self.losses_test = summary["testing_losses"]
def test_losses_available(self):
return self.losses_test is not None and len(self.losses_test) > 0
def identifier_available(self):
return self.identifier is not None
def num_epochs(self):
return self.epochs
def best_epoch(self):
return self.epoch_best
def best_model(self):
return self.model_best
def final_model(self):
return self.model_final
def test_losses(self, log=False):
if log:
losses = np.log(self.losses_test)
else:
losses = self.losses_test
return losses
def train_losses(self, log=False):
if log:
losses = np.log(self.losses_train)
else:
losses = self.losses_train
return losses
def __str__(self):
representation = ""
if self.identifier_available():
representation = "Identifier:\t\t{}\n".format(self.identifier)
representation = representation + "Total epochs:\t\t{}\n".format(self.epochs) + \
"Best training loss:\t{}\n".format(self.losses_train.min()) + \
"Final training loss:\t{}".format(self.losses_train[-1])
if self.test_losses_available():
representation = representation + \
"\nBest testing loss:\t{}\n".format(self.losses_test.min()) + \
"Best test epoch:\t{}\n".format(self.epoch_best) + \
"Final test loss:\t{}".format(self.losses_test[-1])
return representation
|
[
"torch.save",
"torch.load",
"numpy.log"
] |
[((927, 952), 'torch.save', 'torch.save', (['summary', 'path'], {}), '(summary, path)\n', (937, 952), False, 'import torch\n'), ((998, 1014), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (1008, 1014), False, 'import torch\n'), ((1855, 1879), 'numpy.log', 'np.log', (['self.losses_test'], {}), '(self.losses_test)\n', (1861, 1879), True, 'import numpy as np\n'), ((2032, 2057), 'numpy.log', 'np.log', (['self.losses_train'], {}), '(self.losses_train)\n', (2038, 2057), True, 'import numpy as np\n')]
|
from decimal import Decimal
from django.db import models
# Create your models here.
class Orders(models.Model):
order_id = models.CharField( max_length=200, blank=False, null=True, unique=True)
ship_date = models.DateField(auto_now_add=False, auto_now=False, null=True)
customer = models.CharField("Company Name",max_length=200, blank=False, null=True)
currency= models.CharField(max_length=200, blank=False, null=True)
creater = models.CharField(max_length=200, blank=False, null=True)
order_amount= models.DecimalField(max_digits=10, decimal_places=2)
paid_amount= models.DecimalField(max_digits=10, decimal_places=2, default=Decimal('0.00'))
create_date = models.DateField(auto_now_add=False, auto_now=False, null=True)
pay_term = models.CharField("Payment Term",max_length=200, blank=False, null=True)
updater = models.CharField(max_length=200, blank=False, null=True)
def __unicode__(self):
# return u'%s, %s' %(self.company.companyprofile_name, self.reference_id)
#return u'%s' %(self.company.companyprofile_name)
return u'%s, %s, %s $%s, %s' %(self.order_id, self.customer, self.currency, self.order_amount, self.creater)
|
[
"django.db.models.CharField",
"django.db.models.DecimalField",
"django.db.models.DateField",
"decimal.Decimal"
] |
[((127, 196), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(False)', 'null': '(True)', 'unique': '(True)'}), '(max_length=200, blank=False, null=True, unique=True)\n', (143, 196), False, 'from django.db import models\n'), ((211, 274), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(False)', 'auto_now': '(False)', 'null': '(True)'}), '(auto_now_add=False, auto_now=False, null=True)\n', (227, 274), False, 'from django.db import models\n'), ((287, 359), 'django.db.models.CharField', 'models.CharField', (['"""Company Name"""'], {'max_length': '(200)', 'blank': '(False)', 'null': '(True)'}), "('Company Name', max_length=200, blank=False, null=True)\n", (303, 359), False, 'from django.db import models\n'), ((370, 426), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(False)', 'null': '(True)'}), '(max_length=200, blank=False, null=True)\n', (386, 426), False, 'from django.db import models\n'), ((438, 494), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(False)', 'null': '(True)'}), '(max_length=200, blank=False, null=True)\n', (454, 494), False, 'from django.db import models\n'), ((510, 562), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(2)'}), '(max_digits=10, decimal_places=2)\n', (529, 562), False, 'from django.db import models\n'), ((670, 733), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(False)', 'auto_now': '(False)', 'null': '(True)'}), '(auto_now_add=False, auto_now=False, null=True)\n', (686, 733), False, 'from django.db import models\n'), ((746, 818), 'django.db.models.CharField', 'models.CharField', (['"""Payment Term"""'], {'max_length': '(200)', 'blank': '(False)', 'null': '(True)'}), "('Payment Term', max_length=200, blank=False, null=True)\n", (762, 818), False, 'from django.db import models\n'), ((829, 885), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(False)', 'null': '(True)'}), '(max_length=200, blank=False, null=True)\n', (845, 885), False, 'from django.db import models\n'), ((638, 653), 'decimal.Decimal', 'Decimal', (['"""0.00"""'], {}), "('0.00')\n", (645, 653), False, 'from decimal import Decimal\n')]
|
#!/usr/bin/env python
#
# Copyright 2021 CRS4 - Center for Advanced Studies, Research and Development
# in Sardinia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from lorem_text import lorem
import random
def random_org_name(prefix='pykeyrock unittest', words=2):
return f"{prefix} {lorem.words(words)}".capitalize()
def random_org_description(words=5):
return lorem.words(words).capitalize() + '.'
def random_app_name(prefix='pykeyrock unittest', words=2):
return f"{prefix} {lorem.words(words)}".capitalize()
def random_app_description(words=5):
return lorem.words(words).capitalize() + '.'
def random_role_name(prefix='pykeyrock unittest', words=2):
_w = prefix.split() + lorem.words(2).split()
return ''.join(list(map(str.capitalize, _w)))
def random_user_email(prefix='pykeyrock_unittest'):
_login, _domain, _tld = lorem.words(3).split()
return f"{prefix}_{_login}@{_domain}.{_tld[0:2]}".lower()
def random_user_password():
return '.'.join(lorem.words(3).split()).lower()
def random_user_name(prefix='pykeyrock', words=2):
return f"{prefix} {lorem.words(words)}".capitalize()
def random_permission_name(prefix='pykeyrock unittest', words=2):
return f"{prefix} {lorem.words(words)}".capitalize()
def random_permission_action(prefix='pykeyrock unittest', words=2):
_actions = ["GET", "POST", "PUT", "PATCH", "DELETE"]
return random.choice(_actions)
def random_permission_resource(prefix='pykeyrock unittest', words=1):
_w = prefix.split() + lorem.words(words).split()
return '/'.join(list(map(str.lower, _w)))
|
[
"lorem_text.lorem.words",
"random.choice"
] |
[((1914, 1937), 'random.choice', 'random.choice', (['_actions'], {}), '(_actions)\n', (1927, 1937), False, 'import random\n'), ((1374, 1388), 'lorem_text.lorem.words', 'lorem.words', (['(3)'], {}), '(3)\n', (1385, 1388), False, 'from lorem_text import lorem\n'), ((887, 905), 'lorem_text.lorem.words', 'lorem.words', (['words'], {}), '(words)\n', (898, 905), False, 'from lorem_text import lorem\n'), ((1093, 1111), 'lorem_text.lorem.words', 'lorem.words', (['words'], {}), '(words)\n', (1104, 1111), False, 'from lorem_text import lorem\n'), ((1219, 1233), 'lorem_text.lorem.words', 'lorem.words', (['(2)'], {}), '(2)\n', (1230, 1233), False, 'from lorem_text import lorem\n'), ((2036, 2054), 'lorem_text.lorem.words', 'lorem.words', (['words'], {}), '(words)\n', (2047, 2054), False, 'from lorem_text import lorem\n'), ((803, 821), 'lorem_text.lorem.words', 'lorem.words', (['words'], {}), '(words)\n', (814, 821), False, 'from lorem_text import lorem\n'), ((1009, 1027), 'lorem_text.lorem.words', 'lorem.words', (['words'], {}), '(words)\n', (1020, 1027), False, 'from lorem_text import lorem\n'), ((1617, 1635), 'lorem_text.lorem.words', 'lorem.words', (['words'], {}), '(words)\n', (1628, 1635), False, 'from lorem_text import lorem\n'), ((1742, 1760), 'lorem_text.lorem.words', 'lorem.words', (['words'], {}), '(words)\n', (1753, 1760), False, 'from lorem_text import lorem\n'), ((1509, 1523), 'lorem_text.lorem.words', 'lorem.words', (['(3)'], {}), '(3)\n', (1520, 1523), False, 'from lorem_text import lorem\n')]
|
# -*- coding: UTF-8 -*-
# vim: set expandtab sw=4 ts=4 sts=4:
#
# phpMyAdmin web site
#
# Copyright (C) 2008 - 2016 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from django.contrib import admin
from news.models import Post, Planet
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'date', 'author')
list_filter = ('author',)
date_hierarchy = 'date'
prepopulated_fields = {'slug': ('title',)}
search_fields = ['title', 'slug']
def save_model(self, request, obj, form, change):
if getattr(obj, 'author', None) is None:
obj.author = request.user
obj.save()
class PlanetAdmin(admin.ModelAdmin):
list_display = ('title', 'date', 'url')
date_hierarchy = 'date'
search_fields = ['title', 'url']
admin.site.register(Planet, PlanetAdmin)
admin.site.register(Post, PostAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((1449, 1489), 'django.contrib.admin.site.register', 'admin.site.register', (['Planet', 'PlanetAdmin'], {}), '(Planet, PlanetAdmin)\n', (1468, 1489), False, 'from django.contrib import admin\n'), ((1490, 1526), 'django.contrib.admin.site.register', 'admin.site.register', (['Post', 'PostAdmin'], {}), '(Post, PostAdmin)\n', (1509, 1526), False, 'from django.contrib import admin\n')]
|
from functools import reduce
import scipy.ndimage as nd
import numpy as np
def convert_to_img_frame(img, node_position, mesh, borders, settings):
local_node_pos = np.zeros((2, mesh.element_def.n_nodes), dtype=settings.precision)
# Partition image
image_frame = extract_subframe(img, borders, settings.pad)
# Determine nodal positions in image frame coordinates
local_node_pos[0, :] = node_position[0] + settings.pad - borders[0, :]
local_node_pos[1, :] = node_position[1] + settings.pad - borders[2, :]
return image_frame, local_node_pos
def generate_edge_coordinates(seed):
seeding = np.linspace(0., 1., seed)
es, ns = np.meshgrid(seeding, seeding)
mask = np.ones_like(es, dtype=np.bool)
mask[1:-1, 1:-1] = 0
return es[mask], ns[mask]
def find_element_borders(node_position, mesh, seed=20):
e, n = generate_edge_coordinates(seed)
N_at_borders = mesh.element_def.Nn(e.flatten(), n.flatten())
# Find global coordinates of elements
pixel_x = np.einsum("jk,k->j", N_at_borders, node_position[0])
pixel_y = np.einsum("jk,k->j", N_at_borders, node_position[1])
axis = None
# [Xmin_Xmax,Ymin,Ymax,elm_nr]
borders = np.zeros((4, mesh.n_elms), dtype=np.int)
borders[0, :] = np.min(pixel_x, axis=axis)
borders[1, :] = np.max(pixel_x, axis=axis)
borders[2, :] = np.min(pixel_y, axis=axis)
borders[3, :] = np.max(pixel_y, axis=axis)
return borders
def extract_subframe(img, borders, pad):
return img[borders[2, 0] - pad:borders[3, 0] + pad, borders[0, 0] - pad:borders[1, 0] + pad]
def find_borders(coord):
return int(np.min(np.floor(coord))), int(np.max(np.ceil(coord)))
def find_inconsistent(ep, ny):
rem1 = np.where(ep > 1.)
rem2 = np.where(ep < 0.)
rem3 = np.where(ny > 1.)
rem4 = np.where(ny < 0.)
return reduce(np.union1d, [rem1[0], rem2[0], rem3[0], rem4[0]])
def extract_points_from_image(image, coordinates):
return nd.map_coordinates(image, coordinates, order=3, prefilter=True)
def image_coordinates(image):
xs, ys = np.meshgrid(np.arange(image.shape[0]), np.arange(image.shape[1]))
return xs,ys
|
[
"numpy.meshgrid",
"numpy.ones_like",
"numpy.ceil",
"numpy.floor",
"numpy.zeros",
"numpy.einsum",
"numpy.min",
"numpy.max",
"numpy.where",
"numpy.arange",
"numpy.linspace",
"functools.reduce",
"scipy.ndimage.map_coordinates"
] |
[((169, 234), 'numpy.zeros', 'np.zeros', (['(2, mesh.element_def.n_nodes)'], {'dtype': 'settings.precision'}), '((2, mesh.element_def.n_nodes), dtype=settings.precision)\n', (177, 234), True, 'import numpy as np\n'), ((624, 651), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'seed'], {}), '(0.0, 1.0, seed)\n', (635, 651), True, 'import numpy as np\n'), ((663, 692), 'numpy.meshgrid', 'np.meshgrid', (['seeding', 'seeding'], {}), '(seeding, seeding)\n', (674, 692), True, 'import numpy as np\n'), ((704, 735), 'numpy.ones_like', 'np.ones_like', (['es'], {'dtype': 'np.bool'}), '(es, dtype=np.bool)\n', (716, 735), True, 'import numpy as np\n'), ((1014, 1066), 'numpy.einsum', 'np.einsum', (['"""jk,k->j"""', 'N_at_borders', 'node_position[0]'], {}), "('jk,k->j', N_at_borders, node_position[0])\n", (1023, 1066), True, 'import numpy as np\n'), ((1081, 1133), 'numpy.einsum', 'np.einsum', (['"""jk,k->j"""', 'N_at_borders', 'node_position[1]'], {}), "('jk,k->j', N_at_borders, node_position[1])\n", (1090, 1133), True, 'import numpy as np\n'), ((1200, 1240), 'numpy.zeros', 'np.zeros', (['(4, mesh.n_elms)'], {'dtype': 'np.int'}), '((4, mesh.n_elms), dtype=np.int)\n', (1208, 1240), True, 'import numpy as np\n'), ((1261, 1287), 'numpy.min', 'np.min', (['pixel_x'], {'axis': 'axis'}), '(pixel_x, axis=axis)\n', (1267, 1287), True, 'import numpy as np\n'), ((1308, 1334), 'numpy.max', 'np.max', (['pixel_x'], {'axis': 'axis'}), '(pixel_x, axis=axis)\n', (1314, 1334), True, 'import numpy as np\n'), ((1355, 1381), 'numpy.min', 'np.min', (['pixel_y'], {'axis': 'axis'}), '(pixel_y, axis=axis)\n', (1361, 1381), True, 'import numpy as np\n'), ((1402, 1428), 'numpy.max', 'np.max', (['pixel_y'], {'axis': 'axis'}), '(pixel_y, axis=axis)\n', (1408, 1428), True, 'import numpy as np\n'), ((1729, 1747), 'numpy.where', 'np.where', (['(ep > 1.0)'], {}), '(ep > 1.0)\n', (1737, 1747), True, 'import numpy as np\n'), ((1758, 1776), 'numpy.where', 'np.where', (['(ep < 0.0)'], {}), '(ep < 0.0)\n', (1766, 1776), True, 'import numpy as np\n'), ((1787, 1805), 'numpy.where', 'np.where', (['(ny > 1.0)'], {}), '(ny > 1.0)\n', (1795, 1805), True, 'import numpy as np\n'), ((1816, 1834), 'numpy.where', 'np.where', (['(ny < 0.0)'], {}), '(ny < 0.0)\n', (1824, 1834), True, 'import numpy as np\n'), ((1845, 1901), 'functools.reduce', 'reduce', (['np.union1d', '[rem1[0], rem2[0], rem3[0], rem4[0]]'], {}), '(np.union1d, [rem1[0], rem2[0], rem3[0], rem4[0]])\n', (1851, 1901), False, 'from functools import reduce\n'), ((1965, 2028), 'scipy.ndimage.map_coordinates', 'nd.map_coordinates', (['image', 'coordinates'], {'order': '(3)', 'prefilter': '(True)'}), '(image, coordinates, order=3, prefilter=True)\n', (1983, 2028), True, 'import scipy.ndimage as nd\n'), ((2085, 2110), 'numpy.arange', 'np.arange', (['image.shape[0]'], {}), '(image.shape[0])\n', (2094, 2110), True, 'import numpy as np\n'), ((2112, 2137), 'numpy.arange', 'np.arange', (['image.shape[1]'], {}), '(image.shape[1])\n', (2121, 2137), True, 'import numpy as np\n'), ((1638, 1653), 'numpy.floor', 'np.floor', (['coord'], {}), '(coord)\n', (1646, 1653), True, 'import numpy as np\n'), ((1668, 1682), 'numpy.ceil', 'np.ceil', (['coord'], {}), '(coord)\n', (1675, 1682), True, 'import numpy as np\n')]
|
import speech_recognition as sr
import re
TEXT_TO_NUMBER = {
'0': 0,
'zero': 0,
'1': 1,
'one': 1,
'2': 2,
'two': 2,
'to': 2,
'too': 2,
'3': 3,
'three': 3,
'tree': 3,
'4': 4,
'four': 4,
'for': 4,
'5': 5,
'five': 5,
}
def text_to_number(text, keyword):
print(text)
print(keyword)
text_num = get_number_from_text(text, keyword)
return TEXT_TO_NUMBER[text_num]
def get_number_from_text(text, keyword):
return re.findall(r'%s(.+)' % keyword, text)[0].strip()
class VoiceRecognition:
def __init__(self):
self.r = sr.Recognizer()
self.mic = sr.Microphone()
def recognize_command(self):
response = {}
try:
with self.mic as source:
self.r.adjust_for_ambient_noise(source)
audio = self.r.listen(source, phrase_time_limit=3)
command = self.r.recognize_google(audio)
response["recording"] = command
response["success"] = True
except sr.UnknownValueError:
response[
"exception"
] = "Google Speech Recognition could not understand audio"
response["success"] = False
except sr.RequestError as e:
response[
"exception"
] = f"Could not request results from Google Speech Recognition service; {e}"
response["success"] = False
return response
|
[
"re.findall",
"speech_recognition.Recognizer",
"speech_recognition.Microphone"
] |
[((610, 625), 'speech_recognition.Recognizer', 'sr.Recognizer', ([], {}), '()\n', (623, 625), True, 'import speech_recognition as sr\n'), ((645, 660), 'speech_recognition.Microphone', 'sr.Microphone', ([], {}), '()\n', (658, 660), True, 'import speech_recognition as sr\n'), ((494, 530), 're.findall', 're.findall', (["('%s(.+)' % keyword)", 'text'], {}), "('%s(.+)' % keyword, text)\n", (504, 530), False, 'import re\n')]
|
from urllib.parse import urlparse
import requests
import re
def url_syntax(url_changes):
url_search_http = re.search("http", url_changes)
if url_search_http is None:
url_http = "http://" + url_changes
else:
url_http = url_changes
return url_http # Returns the url with 'http://' if not there in the input url
def api_call(inurl, seurl):
'''
"inurl" is the input url which is suspected to be phishing site.
"seurl" is the select url in which only the domain name of the orginal site is to be given as in put.
'''
check = url_syntax (inurl)
inurl = urlparse(check).netloc
URL = "https://phishbuster-web.herokuapp.com/api/"+ inurl + '+' + seurl
req = requests.get(url = URL)
return req.json()
if __name__ == '__main__':
print(api_call('https://www.microsoft.com~@www.google.com/wsgrye/ruygfbryu/gijgnuf','google.com'))
|
[
"urllib.parse.urlparse",
"re.search",
"requests.get"
] |
[((117, 147), 're.search', 're.search', (['"""http"""', 'url_changes'], {}), "('http', url_changes)\n", (126, 147), False, 'import re\n'), ((734, 755), 'requests.get', 'requests.get', ([], {'url': 'URL'}), '(url=URL)\n', (746, 755), False, 'import requests\n'), ((623, 638), 'urllib.parse.urlparse', 'urlparse', (['check'], {}), '(check)\n', (631, 638), False, 'from urllib.parse import urlparse\n')]
|
import pytest
from mixer.backend.django import mixer
from projects.models import Project, ProjectMembership
from users.models import User
@pytest.mark.django_db
class TestProject:
def test_project_create(self):
user = mixer.blend(User, username='test')
proj = mixer.blend(Project, owner = user)
assert proj.owner == user
def test_project_str(self):
proj = mixer.blend(Project)
assert str(proj) == proj.title
@pytest.mark.django_db
class TestProjectMembers:
def test_member(self):
proj = mixer.blend(Project)
user = mixer.blend(User, username='test')
mixer.blend(ProjectMembership, member=user, project=proj)
assert proj.members.get(username='test') == user
def test_proj_member_str(self):
pmem = mixer.blend(ProjectMembership)
assert str(pmem) == f'{pmem.member.full_name} , {pmem.project.title}'
|
[
"mixer.backend.django.mixer.blend"
] |
[((232, 266), 'mixer.backend.django.mixer.blend', 'mixer.blend', (['User'], {'username': '"""test"""'}), "(User, username='test')\n", (243, 266), False, 'from mixer.backend.django import mixer\n'), ((282, 314), 'mixer.backend.django.mixer.blend', 'mixer.blend', (['Project'], {'owner': 'user'}), '(Project, owner=user)\n', (293, 314), False, 'from mixer.backend.django import mixer\n'), ((399, 419), 'mixer.backend.django.mixer.blend', 'mixer.blend', (['Project'], {}), '(Project)\n', (410, 419), False, 'from mixer.backend.django import mixer\n'), ((551, 571), 'mixer.backend.django.mixer.blend', 'mixer.blend', (['Project'], {}), '(Project)\n', (562, 571), False, 'from mixer.backend.django import mixer\n'), ((587, 621), 'mixer.backend.django.mixer.blend', 'mixer.blend', (['User'], {'username': '"""test"""'}), "(User, username='test')\n", (598, 621), False, 'from mixer.backend.django import mixer\n'), ((630, 687), 'mixer.backend.django.mixer.blend', 'mixer.blend', (['ProjectMembership'], {'member': 'user', 'project': 'proj'}), '(ProjectMembership, member=user, project=proj)\n', (641, 687), False, 'from mixer.backend.django import mixer\n'), ((802, 832), 'mixer.backend.django.mixer.blend', 'mixer.blend', (['ProjectMembership'], {}), '(ProjectMembership)\n', (813, 832), False, 'from mixer.backend.django import mixer\n')]
|
from .Crop import Crop
from .Gaussian_blur import Gaussian_blur
from .Gaussian_noise import Gaussian_noise
from .Jpeg_compression import JpegCompression
from .Combination import Combination_attack
import torch
def attack_initializer(attack_method, is_train):
if (attack_method == 'Crop'):
attack = Crop([0.8, 1], is_train)
elif (attack_method == 'Noise'):
attack = Gaussian_noise([0, 0.3], is_train)
elif (attack_method == 'Blur'):
#terminology would be different kernel_size
attack = Gaussian_blur(kernel_size=[1,3,5,7,9], is_train = is_train)
elif(attack_method == "Jpeg"):
device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
attack = JpegCompression(device)
elif (attack_method == 'Combination'):
device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
attacks = []
attacks.append(Gaussian_blur(kernel_size=[1,3,5,7,9], is_train = is_train))
attacks.append(Crop([0.8, 1], is_train))
attacks.append(Gaussian_noise([0, 0.1], is_train))
attacks.append(JpegCompression(device))
attack = Combination_attack(attacks, is_train)
elif (attack_method == 'Combination_with_pillow'):
# Combination Attack but Jpeg will be done after 3 attacks finished samples
# img -> 3 attack -> Save PNG -> Pillow 75 -> Load
attacks = []
attacks.append(Gaussian_blur(kernel_size=[1, 3, 5, 7, 9], is_train=is_train))
attacks.append(Crop([0.8, 1], is_train))
attacks.append(Gaussian_noise([0, 0.1], is_train))
attack = Combination_attack(attacks, is_train)
else:
raise ValueError("Not available Attacks")
return attack
|
[
"torch.cuda.is_available"
] |
[((672, 697), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (695, 697), False, 'import torch\n'), ((839, 864), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (862, 864), False, 'import torch\n')]
|
import zipfile
import pandas as pd
"""
Lay of the land:
"""
class UserHandler(object):
# This Object is used to access the directory that saves the results
def __init__(self, user_directory_path, pathToMixture):
self.user_directory_path = user_directory_path
self.username = None
self.pathToMixture = pathToMixture
def addZip(self, zipfile_path):
if self.username is None:
print("[userHandler.py][addZip] Error")
else:
if zipfile.is_zipfile(zipfile_path):
with zipfile.ZipFile(zipfile_path, 'r') as zipObj:
topDirName = zipObj.infolist()[0].filename
var = self.pathToMixture
|
[
"zipfile.is_zipfile",
"zipfile.ZipFile"
] |
[((505, 537), 'zipfile.is_zipfile', 'zipfile.is_zipfile', (['zipfile_path'], {}), '(zipfile_path)\n', (523, 537), False, 'import zipfile\n'), ((560, 594), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zipfile_path', '"""r"""'], {}), "(zipfile_path, 'r')\n", (575, 594), False, 'import zipfile\n')]
|
# -*- coding: utf-8 -*-
import os
import random
from torch.utils.data import Dataset
from PIL import Image
import numpy as np
from datasets.data_io import get_transform, read_all_lines
from datasets.data_io import *
import torchvision.transforms as transforms
import torch
import torch.nn as nn
class LapaPngPng(Dataset):
def __init__(self, datapath, list_filename, training, crop_h, crop_w, channel):
self.datapath = datapath
self.ori_filenames, self.gt_filenames = self.load_path(list_filename)
self.training = training
self.crop_h = crop_h
self.crop_w = crop_w
self.channel = channel
self.transform_img = transforms.Compose(
[
transforms.Resize(size=(self.crop_h, self.crop_w)), # Resize:尺寸随意变大变小; h, w
]
)
# error:Floating point exception(core dumped)
self.processed_color = transforms.Compose([transforms.ColorJitter(brightness=0.5, contrast=0.5),
transforms.ToTensor(), #transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
def load_path(self, list_filename):
lines = read_all_lines(list_filename)
splits = [line.split() for line in lines]
ori_images = [x[0] for x in splits]
gt_images = [x[1] for x in splits]
return ori_images, gt_images
# load rgb-color
def load_image(self, filename):
if self.channel == 3:
return Image.open(filename).convert('RGB')
elif self.channel == 1:
return Image.open(filename).convert('L')
def load_img(self, filename):
return Image.open(filename).convert('L')
def __len__(self):
return len(self.ori_filenames)
# augmentation
def augment_image_pair(self, left_image):
# randomly shift gamma
random_gamma = torch.rand(1).numpy()[0] * 0.4 + 0.8 # random.uniform(0.8, 1.2)
left_image_aug = left_image ** random_gamma
# randomly shift brightness
random_brightness = torch.rand(1).numpy()[0] * 1.5 + 0.5 # random.uniform(0.5, 2.0)
left_image_aug = left_image_aug * random_brightness
# randomly shift color
if self.channel == 3:
random_colors = (torch.rand(1).numpy()[0] * 0.4 + 0.8, torch.rand(1).numpy()[0] * 0.4 + 0.8,
torch.rand(1).numpy()[0] * 0.4 + 0.8)
white = torch.ones(left_image.shape[1], left_image.shape[2])
color_image = torch.stack((white * random_colors[0], white * random_colors[1], white * random_colors[2]),dim=0)
left_image_aug *= color_image
# saturate
left_image_aug = torch.clamp(left_image_aug, 0, 1)
return left_image_aug
def __getitem__(self, index):
ori_img = self.load_image(os.path.join(self.datapath, self.ori_filenames[index]))
# img_np = np.asarray(ori_img, dtype=float)
# print(img_np.shape)
# print(img_np.dtype)
# ori_img = Image.fromarray(img_np, mode='RGB')
gt_img = self.load_img(os.path.join(self.datapath, self.gt_filenames[index]))
# add -png.name
ori_pathname = self.ori_filenames[index]
if self.training:
w, h = ori_img.size
if w < self.crop_w or h < self.crop_h:
# 图片尺寸比预设的裁剪尺寸小,先同步做resize
ori_img = self.transform_img(ori_img)
gt_img = self.transform_img(gt_img)
# to tensor, normalize --转为tensor归一化
ori_img = self.processed_color(ori_img)
gt_img = np.array(gt_img, dtype='int64')
gt_img = torch.from_numpy(gt_img)
gt_img = torch.squeeze(gt_img).long()
# gt_img = gt_img.squeeze(0) # (h, w)
# randomly images
# do_augment = torch.rand(1).numpy()[0]
# if do_augment > 0.5:
# ori_img = self.augment_image_pair(ori_img)
return {"ori": ori_img,
"gt": gt_img}
# random crop --同步裁剪
x1 = random.randint(0, w - self.crop_w)
y1 = random.randint(0, h - self.crop_h)
ori_img = ori_img.crop((x1, y1, x1 + self.crop_w, y1 + self.crop_h))
gt_img = gt_img.crop((x1, y1, x1 + self.crop_w, y1 + self.crop_h))
# to tensor, normalize --转为tensor
ori_img = self.processed_color(ori_img)
# GT转为tensor时不做归一化
gt_img = np.array(gt_img, dtype='int64')
gt_img = torch.from_numpy(gt_img)
gt_img = torch.squeeze(gt_img).long()
# gt_img = gt_img.squeeze(0) # (h, w)
# randomly images
# do_augment = torch.rand(1).numpy()[0]
# if do_augment > 0.5:
# ori_img = self.augment_image_pair(ori_img)
return {"ori": ori_img,
"gt": gt_img}
else:
w, h = ori_img.size
if w < self.crop_w or h < self.crop_h:
# 图片尺寸比预设的裁剪尺寸小,先同步做resize
ori_img = self.transform_img(ori_img)
gt_img = self.transform_img(gt_img)
# to tensor, normalize --转为tensor归一化
ori_img = self.processed_color(ori_img)
gt_img = np.array(gt_img, dtype='int64')
gt_img = torch.from_numpy(gt_img)
gt_img = torch.squeeze(gt_img).long()
# gt_img = gt_img.squeeze(0) # (h, w)
# randomly images
# do_augment = torch.rand(1).numpy()[0]
# if do_augment > 0.5:
# ori_img = self.augment_image_pair(ori_img)
return {"ori": ori_img,
"gt": gt_img,
"img_name": ori_pathname}
x1 = w - self.crop_w
y1 = h - self.crop_h
ori_img = ori_img.crop((x1, y1, x1 + self.crop_w, y1 + self.crop_h))
gt_img = gt_img.crop((x1, y1, x1 + self.crop_w, y1 + self.crop_h))
# to tensor, normalize --转为tensor
ori_img = self.processed_color(ori_img)
# GT转为tensor时不做归一化
gt_img = np.array(gt_img, dtype='int64')
gt_img = torch.from_numpy(gt_img)
gt_img = torch.squeeze(gt_img).long()
return {"ori": ori_img,
"gt": gt_img,
"img_name": ori_pathname}
|
[
"torchvision.transforms.ColorJitter",
"torch.ones",
"torch.stack",
"random.randint",
"datasets.data_io.read_all_lines",
"torchvision.transforms.ToTensor",
"PIL.Image.open",
"torch.squeeze",
"torch.clamp",
"numpy.array",
"torch.rand",
"torchvision.transforms.Resize",
"os.path.join",
"torch.from_numpy"
] |
[((1218, 1247), 'datasets.data_io.read_all_lines', 'read_all_lines', (['list_filename'], {}), '(list_filename)\n', (1232, 1247), False, 'from datasets.data_io import get_transform, read_all_lines\n'), ((2742, 2775), 'torch.clamp', 'torch.clamp', (['left_image_aug', '(0)', '(1)'], {}), '(left_image_aug, 0, 1)\n', (2753, 2775), False, 'import torch\n'), ((2478, 2530), 'torch.ones', 'torch.ones', (['left_image.shape[1]', 'left_image.shape[2]'], {}), '(left_image.shape[1], left_image.shape[2])\n', (2488, 2530), False, 'import torch\n'), ((2557, 2659), 'torch.stack', 'torch.stack', (['(white * random_colors[0], white * random_colors[1], white * random_colors[2])'], {'dim': '(0)'}), '((white * random_colors[0], white * random_colors[1], white *\n random_colors[2]), dim=0)\n', (2568, 2659), False, 'import torch\n'), ((2877, 2931), 'os.path.join', 'os.path.join', (['self.datapath', 'self.ori_filenames[index]'], {}), '(self.datapath, self.ori_filenames[index])\n', (2889, 2931), False, 'import os\n'), ((3135, 3188), 'os.path.join', 'os.path.join', (['self.datapath', 'self.gt_filenames[index]'], {}), '(self.datapath, self.gt_filenames[index])\n', (3147, 3188), False, 'import os\n'), ((4178, 4212), 'random.randint', 'random.randint', (['(0)', '(w - self.crop_w)'], {}), '(0, w - self.crop_w)\n', (4192, 4212), False, 'import random\n'), ((4230, 4264), 'random.randint', 'random.randint', (['(0)', '(h - self.crop_h)'], {}), '(0, h - self.crop_h)\n', (4244, 4264), False, 'import random\n'), ((4578, 4609), 'numpy.array', 'np.array', (['gt_img'], {'dtype': '"""int64"""'}), "(gt_img, dtype='int64')\n", (4586, 4609), True, 'import numpy as np\n'), ((4631, 4655), 'torch.from_numpy', 'torch.from_numpy', (['gt_img'], {}), '(gt_img)\n', (4647, 4655), False, 'import torch\n'), ((6288, 6319), 'numpy.array', 'np.array', (['gt_img'], {'dtype': '"""int64"""'}), "(gt_img, dtype='int64')\n", (6296, 6319), True, 'import numpy as np\n'), ((6341, 6365), 'torch.from_numpy', 'torch.from_numpy', (['gt_img'], {}), '(gt_img)\n', (6357, 6365), False, 'import torch\n'), ((724, 774), 'torchvision.transforms.Resize', 'transforms.Resize', ([], {'size': '(self.crop_h, self.crop_w)'}), '(size=(self.crop_h, self.crop_w))\n', (741, 774), True, 'import torchvision.transforms as transforms\n'), ((932, 984), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.5)', 'contrast': '(0.5)'}), '(brightness=0.5, contrast=0.5)\n', (954, 984), True, 'import torchvision.transforms as transforms\n'), ((1022, 1043), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1041, 1043), True, 'import torchvision.transforms as transforms\n'), ((1700, 1720), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (1710, 1720), False, 'from PIL import Image\n'), ((3661, 3692), 'numpy.array', 'np.array', (['gt_img'], {'dtype': '"""int64"""'}), "(gt_img, dtype='int64')\n", (3669, 3692), True, 'import numpy as np\n'), ((3718, 3742), 'torch.from_numpy', 'torch.from_numpy', (['gt_img'], {}), '(gt_img)\n', (3734, 3742), False, 'import torch\n'), ((5392, 5423), 'numpy.array', 'np.array', (['gt_img'], {'dtype': '"""int64"""'}), "(gt_img, dtype='int64')\n", (5400, 5423), True, 'import numpy as np\n'), ((5449, 5473), 'torch.from_numpy', 'torch.from_numpy', (['gt_img'], {}), '(gt_img)\n', (5465, 5473), False, 'import torch\n'), ((1529, 1549), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (1539, 1549), False, 'from PIL import Image\n'), ((4677, 4698), 'torch.squeeze', 'torch.squeeze', (['gt_img'], {}), '(gt_img)\n', (4690, 4698), False, 'import torch\n'), ((6387, 6408), 'torch.squeeze', 'torch.squeeze', (['gt_img'], {}), '(gt_img)\n', (6400, 6408), False, 'import torch\n'), ((1616, 1636), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (1626, 1636), False, 'from PIL import Image\n'), ((3768, 3789), 'torch.squeeze', 'torch.squeeze', (['gt_img'], {}), '(gt_img)\n', (3781, 3789), False, 'import torch\n'), ((5499, 5520), 'torch.squeeze', 'torch.squeeze', (['gt_img'], {}), '(gt_img)\n', (5512, 5520), False, 'import torch\n'), ((1917, 1930), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (1927, 1930), False, 'import torch\n'), ((2099, 2112), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (2109, 2112), False, 'import torch\n'), ((2315, 2328), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (2325, 2328), False, 'import torch\n'), ((2353, 2366), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (2363, 2366), False, 'import torch\n'), ((2420, 2433), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (2430, 2433), False, 'import torch\n')]
|
import os
import logging
import argparse
TMP_ARTIFACTS = '/tmp_artifacts'
X_TRAIN_FILENAME = os.path.join(TMP_ARTIFACTS, 'x_train.npy')
TRAIN_DF_FILENAME = os.path.join(TMP_ARTIFACTS, 'train.pkl')
TRAIN_DF_HTML_FILENAME = os.path.join(TMP_ARTIFACTS, 'train.html')
TEST_PRED_DF_FILENAME = os.path.join(TMP_ARTIFACTS, 'test.pkl')
TEST_PRED_DF_HTML_FILENAME = os.path.join(TMP_ARTIFACTS, 'test.html')
CONFUSION_MATRIX_FILENAME = os.path.join(TMP_ARTIFACTS, 'confusion_matrix.jpg')
STDOUT_LOG_FILENAME = os.path.join(TMP_ARTIFACTS, 'stdout_log.txt')
LOGGING_FILENAME = os.path.join(TMP_ARTIFACTS, 'log.txt')
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO, filename=LOGGING_FILENAME, filemode='w')
parser = argparse.ArgumentParser(description='Train a multi-layer perceptron classifier.')
parser.add_argument('train_path', type=str, help='File path or URL to the training data')
parser.add_argument('test_path', type=str, help='File path or URL to the test data')
parser.add_argument('features', nargs='+', type=str, help='column name(s) of the features to use.')
parser.add_argument('--verbose', default=1, type=int, choices=(0, 1), help='redirect stdout to file?')
|
[
"os.path.join",
"argparse.ArgumentParser",
"logging.basicConfig"
] |
[((96, 138), 'os.path.join', 'os.path.join', (['TMP_ARTIFACTS', '"""x_train.npy"""'], {}), "(TMP_ARTIFACTS, 'x_train.npy')\n", (108, 138), False, 'import os\n'), ((160, 200), 'os.path.join', 'os.path.join', (['TMP_ARTIFACTS', '"""train.pkl"""'], {}), "(TMP_ARTIFACTS, 'train.pkl')\n", (172, 200), False, 'import os\n'), ((226, 267), 'os.path.join', 'os.path.join', (['TMP_ARTIFACTS', '"""train.html"""'], {}), "(TMP_ARTIFACTS, 'train.html')\n", (238, 267), False, 'import os\n'), ((293, 332), 'os.path.join', 'os.path.join', (['TMP_ARTIFACTS', '"""test.pkl"""'], {}), "(TMP_ARTIFACTS, 'test.pkl')\n", (305, 332), False, 'import os\n'), ((362, 402), 'os.path.join', 'os.path.join', (['TMP_ARTIFACTS', '"""test.html"""'], {}), "(TMP_ARTIFACTS, 'test.html')\n", (374, 402), False, 'import os\n'), ((432, 483), 'os.path.join', 'os.path.join', (['TMP_ARTIFACTS', '"""confusion_matrix.jpg"""'], {}), "(TMP_ARTIFACTS, 'confusion_matrix.jpg')\n", (444, 483), False, 'import os\n'), ((507, 552), 'os.path.join', 'os.path.join', (['TMP_ARTIFACTS', '"""stdout_log.txt"""'], {}), "(TMP_ARTIFACTS, 'stdout_log.txt')\n", (519, 552), False, 'import os\n'), ((572, 610), 'os.path.join', 'os.path.join', (['TMP_ARTIFACTS', '"""log.txt"""'], {}), "(TMP_ARTIFACTS, 'log.txt')\n", (584, 610), False, 'import os\n'), ((611, 731), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s:%(message)s"""', 'level': 'logging.INFO', 'filename': 'LOGGING_FILENAME', 'filemode': '"""w"""'}), "(format='%(levelname)s:%(message)s', level=logging.INFO,\n filename=LOGGING_FILENAME, filemode='w')\n", (630, 731), False, 'import logging\n'), ((738, 824), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train a multi-layer perceptron classifier."""'}), "(description=\n 'Train a multi-layer perceptron classifier.')\n", (761, 824), False, 'import argparse\n')]
|
# Confidential, Copyright 2020, Sony Corporation of America, All rights reserved.
from typing import List, Optional, Sequence, Union
import numpy as np
from tqdm import trange
from .setup_sim_env import make_gym_env
from ..data.interfaces import ExperimentDataSaver, StageSchedule
from ..environment import PandemicSimOpts, PandemicSimNonCLIOpts, NoPandemicDone, PandemicRegulation, austin_regulations
from ..utils import shallow_asdict
__all__ = ['experiment_main', 'seeded_experiment_main']
def seeded_experiment_main(exp_id: int,
sim_opts: PandemicSimOpts,
sim_non_cli_opts: PandemicSimNonCLIOpts,
data_saver: ExperimentDataSaver,
pandemic_regulations: Optional[List[PandemicRegulation]] = None,
stages_to_execute: Union[int, Sequence[StageSchedule]] = 0,
enable_warm_up: bool = False,
max_episode_length: int = 120,
random_seed: int = 0) -> bool:
"""A helper that runs an experiment with the given seed and records data"""
rng = np.random.RandomState(random_seed)
env = make_gym_env(sim_opts, sim_non_cli_opts,
pandemic_regulations=pandemic_regulations or austin_regulations,
done_fn=NoPandemicDone(30), numpy_rng=rng)
env.reset()
stages = ([StageSchedule(stage=stages_to_execute, end_day=None)]
if isinstance(stages_to_execute, int) else stages_to_execute)
stage_dict = {f'stage_{i}': (s.stage, s.end_day if s.end_day is not None else -1)
for i, s in enumerate(stages)}
data_saver.begin(env.observation)
stage_idx = 0
warm_up_done = not enable_warm_up
for i in trange(max_episode_length, desc='Simulating day'):
if not env.observation.infection_above_threshold and not warm_up_done:
stage = 0
else:
warm_up_done = True
cur_stage = stages[stage_idx]
stage = cur_stage.stage
if cur_stage.end_day is not None and cur_stage.end_day <= i:
stage_idx += 1
obs, reward, done, aux = env.step(stage)
data_saver.record(obs, reward)
if done:
print('done')
break
return data_saver.finalize(exp_id=exp_id,
seed=random_seed,
num_stages_to_execute=len(stages),
num_persons=sim_non_cli_opts.population_params.num_persons,
**stage_dict,
**shallow_asdict(sim_opts))
def experiment_main(exp_id: int,
sim_opts: PandemicSimOpts,
sim_non_cli_opts: PandemicSimNonCLIOpts,
data_saver: ExperimentDataSaver,
pandemic_regulations: Optional[List[PandemicRegulation]] = None,
stages_to_execute: Union[int, Sequence[StageSchedule]] = 0,
enable_warm_up: bool = False,
max_episode_length: int = 120,
num_random_seeds: int = 5) -> None:
"""A helper that runs multi-seeded experiments and records data."""
rng = np.random.RandomState(seed=0)
num_evaluated_seeds = 0
while num_evaluated_seeds < num_random_seeds:
seed = rng.randint(0, 100000)
print(f'Running experiment seed: {seed} - {num_evaluated_seeds + 1}/{num_random_seeds}')
ret = seeded_experiment_main(exp_id=exp_id,
sim_opts=sim_opts,
sim_non_cli_opts=sim_non_cli_opts,
data_saver=data_saver,
pandemic_regulations=pandemic_regulations,
stages_to_execute=stages_to_execute,
enable_warm_up=enable_warm_up,
max_episode_length=max_episode_length,
random_seed=seed)
if ret:
num_evaluated_seeds += 1
else:
print(f'Experiment with seed {seed} did not succeed. Skipping...')
|
[
"numpy.random.RandomState",
"tqdm.trange"
] |
[((1163, 1197), 'numpy.random.RandomState', 'np.random.RandomState', (['random_seed'], {}), '(random_seed)\n', (1184, 1197), True, 'import numpy as np\n'), ((1810, 1859), 'tqdm.trange', 'trange', (['max_episode_length'], {'desc': '"""Simulating day"""'}), "(max_episode_length, desc='Simulating day')\n", (1816, 1859), False, 'from tqdm import trange\n'), ((3297, 3326), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(0)'}), '(seed=0)\n', (3318, 3326), True, 'import numpy as np\n')]
|
import torch
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.nn import global_add_pool
class DeepMultisets(torch.nn.Module):
def __init__(self, dim_features, dim_target, config):
super(DeepMultisets, self).__init__()
hidden_units = config['hidden_units']
self.fc_vertex = Linear(dim_features, hidden_units)
self.fc_global1 = Linear(hidden_units, hidden_units)
self.fc_global2 = Linear(hidden_units, dim_target)
def forward(self, data):
x, batch = data.x, data.batch
x = F.relu(self.fc_vertex(x))
x = global_add_pool(x, batch) # sums all vertex embeddings belonging to the same graph!
x = F.relu(self.fc_global1(x))
x = self.fc_global2(x)
return x
|
[
"torch_geometric.nn.global_add_pool",
"torch.nn.Linear"
] |
[((338, 372), 'torch.nn.Linear', 'Linear', (['dim_features', 'hidden_units'], {}), '(dim_features, hidden_units)\n', (344, 372), False, 'from torch.nn import Linear\n'), ((399, 433), 'torch.nn.Linear', 'Linear', (['hidden_units', 'hidden_units'], {}), '(hidden_units, hidden_units)\n', (405, 433), False, 'from torch.nn import Linear\n'), ((460, 492), 'torch.nn.Linear', 'Linear', (['hidden_units', 'dim_target'], {}), '(hidden_units, dim_target)\n', (466, 492), False, 'from torch.nn import Linear\n'), ((612, 637), 'torch_geometric.nn.global_add_pool', 'global_add_pool', (['x', 'batch'], {}), '(x, batch)\n', (627, 637), False, 'from torch_geometric.nn import global_add_pool\n')]
|
from pytest import fail, mark, yield_fixture, raises
try:
# in case of PyPI installation, this will work:
from giftgrab.tests.utils import FileChecker
except ImportError:
# in case of installation from source, this will work:
from utils import FileChecker
from time import sleep
from pygiftgrab import VideoSourceFactory
factory = None
video_duration = 0 # inferred, in sec
quarter_video_duration = 0 # inferred, in sec
@yield_fixture(scope='session')
def peri_test(colour_space, filepath,
frame_rate, frame_count,
frame_width, frame_height):
global factory
factory = VideoSourceFactory.get_instance()
global video_duration, quarter_video_duration
video_duration = frame_count / frame_rate
quarter_video_duration = video_duration / 4
yield
@mark.observer_pattern
@mark.usefixtures('peri_test')
def test_valid_filepath_returns_raii_reader(
filepath, colour_space,
frame_rate, frame_count,
frame_width, frame_height
):
source = None
global factory
global video_duration, quarter_video_duration
source = factory.create_file_reader(
filepath, colour_space
)
assert source is not None
file_checker = FileChecker(source)
file_checker.attach()
sleep(video_duration)
file_checker.detach()
assert file_checker.assert_colour(colour_space)
assert file_checker.assert_frame_rate(frame_rate)
assert file_checker.assert_frame_dimensions(
frame_width, frame_height)
assert file_checker.assert_data()
assert file_checker.assert_frame_data_lengths(
colour_space, frame_width, frame_height)
@mark.observer_pattern
@mark.usefixtures('peri_test')
def test_reader_releases_file_on_destruction(
filepath, colour_space,
frame_rate, frame_count,
frame_width, frame_height
):
source = None
global factory
global video_duration, quarter_video_duration
source = factory.create_file_reader(
filepath, colour_space
)
assert source is not None
file_checker_1 = FileChecker(source)
file_checker_1.attach()
sleep(quarter_video_duration)
file_checker_1.detach()
assert file_checker_1.assert_data()
del file_checker_1
del source
source = None
source = factory.create_file_reader(
filepath, colour_space
)
file_checker_2 = FileChecker(source)
file_checker_2.attach()
sleep(video_duration)
file_checker_2.detach()
assert file_checker_2.assert_colour(colour_space)
assert file_checker_2.assert_frame_rate(frame_rate)
assert file_checker_2.assert_frame_dimensions(
frame_width, frame_height)
assert file_checker_2.assert_data()
assert file_checker_2.assert_frame_data_lengths(
colour_space, frame_width, frame_height)
@mark.observer_pattern
@mark.usefixtures('peri_test')
def test_invalid_filepath_throws_exception(colour_space):
source = None
global factory
with raises(RuntimeError):
source = factory.create_file_reader(
'/this/path/should/never/exist.video',
colour_space
)
assert source is None
@mark.observer_pattern
@mark.usefixtures('peri_test')
def test_set_sub_frame(
filepath, colour_space, frame_width, frame_height
):
global factory
source = None
source = factory.create_file_reader(
filepath, colour_space
)
sub_x = frame_width // 4
sub_y = frame_height // 4
sub_width = frame_width // 2
sub_height = frame_height // 2
assert sub_x > 0 and sub_x + sub_width < frame_width
assert sub_y > 0 and sub_y + sub_height < frame_height
source.set_sub_frame(sub_x, sub_y,
sub_width, sub_height)
file_checker = FileChecker(source)
file_checker.attach()
global video_duration
sleep(video_duration)
file_checker.detach()
assert file_checker.assert_frame_dimensions(
sub_width, sub_height)
@mark.observer_pattern
@mark.usefixtures('peri_test')
def test_get_full_frame(
filepath, colour_space, frame_width, frame_height
):
global factory
source = None
source = factory.create_file_reader(
filepath, colour_space
)
sub_x = frame_width // 4
sub_y = frame_height // 4
sub_width = frame_width // 2
sub_height = frame_height // 2
assert sub_x > 0 and sub_x + sub_width < frame_width
assert sub_y > 0 and sub_y + sub_height < frame_height
source.set_sub_frame(sub_x, sub_y,
sub_width, sub_height)
global quarter_video_duration
sleep(quarter_video_duration)
source.get_full_frame()
file_checker = FileChecker(source)
file_checker.attach()
global video_duration
sleep(video_duration)
file_checker.detach()
assert file_checker.assert_frame_dimensions(
frame_width, frame_height)
|
[
"utils.FileChecker",
"pytest.yield_fixture",
"pygiftgrab.VideoSourceFactory.get_instance",
"time.sleep",
"pytest.raises",
"pytest.mark.usefixtures"
] |
[((443, 473), 'pytest.yield_fixture', 'yield_fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (456, 473), False, 'from pytest import fail, mark, yield_fixture, raises\n'), ((840, 869), 'pytest.mark.usefixtures', 'mark.usefixtures', (['"""peri_test"""'], {}), "('peri_test')\n", (856, 869), False, 'from pytest import fail, mark, yield_fixture, raises\n'), ((1672, 1701), 'pytest.mark.usefixtures', 'mark.usefixtures', (['"""peri_test"""'], {}), "('peri_test')\n", (1688, 1701), False, 'from pytest import fail, mark, yield_fixture, raises\n'), ((2826, 2855), 'pytest.mark.usefixtures', 'mark.usefixtures', (['"""peri_test"""'], {}), "('peri_test')\n", (2842, 2855), False, 'from pytest import fail, mark, yield_fixture, raises\n'), ((3165, 3194), 'pytest.mark.usefixtures', 'mark.usefixtures', (['"""peri_test"""'], {}), "('peri_test')\n", (3181, 3194), False, 'from pytest import fail, mark, yield_fixture, raises\n'), ((3970, 3999), 'pytest.mark.usefixtures', 'mark.usefixtures', (['"""peri_test"""'], {}), "('peri_test')\n", (3986, 3999), False, 'from pytest import fail, mark, yield_fixture, raises\n'), ((626, 659), 'pygiftgrab.VideoSourceFactory.get_instance', 'VideoSourceFactory.get_instance', ([], {}), '()\n', (657, 659), False, 'from pygiftgrab import VideoSourceFactory\n'), ((1220, 1239), 'utils.FileChecker', 'FileChecker', (['source'], {}), '(source)\n', (1231, 1239), False, 'from utils import FileChecker\n'), ((1270, 1291), 'time.sleep', 'sleep', (['video_duration'], {}), '(video_duration)\n', (1275, 1291), False, 'from time import sleep\n'), ((2054, 2073), 'utils.FileChecker', 'FileChecker', (['source'], {}), '(source)\n', (2065, 2073), False, 'from utils import FileChecker\n'), ((2106, 2135), 'time.sleep', 'sleep', (['quarter_video_duration'], {}), '(quarter_video_duration)\n', (2111, 2135), False, 'from time import sleep\n'), ((2360, 2379), 'utils.FileChecker', 'FileChecker', (['source'], {}), '(source)\n', (2371, 2379), False, 'from utils import FileChecker\n'), ((2412, 2433), 'time.sleep', 'sleep', (['video_duration'], {}), '(video_duration)\n', (2417, 2433), False, 'from time import sleep\n'), ((3740, 3759), 'utils.FileChecker', 'FileChecker', (['source'], {}), '(source)\n', (3751, 3759), False, 'from utils import FileChecker\n'), ((3816, 3837), 'time.sleep', 'sleep', (['video_duration'], {}), '(video_duration)\n', (3821, 3837), False, 'from time import sleep\n'), ((4565, 4594), 'time.sleep', 'sleep', (['quarter_video_duration'], {}), '(quarter_video_duration)\n', (4570, 4594), False, 'from time import sleep\n'), ((4642, 4661), 'utils.FileChecker', 'FileChecker', (['source'], {}), '(source)\n', (4653, 4661), False, 'from utils import FileChecker\n'), ((4718, 4739), 'time.sleep', 'sleep', (['video_duration'], {}), '(video_duration)\n', (4723, 4739), False, 'from time import sleep\n'), ((2960, 2980), 'pytest.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (2966, 2980), False, 'from pytest import fail, mark, yield_fixture, raises\n')]
|
VERSION = "1.0.0"
HOST = "192.168.1.217"
PORT = 4578
CERT = "cert.pem"
KEY = "priv.key"
RESTARTS = 5
TARGET_SERVER = "mc.koolkidz.club"
API_URL = "https://api.mcsrvstat.us/2/" + TARGET_SERVER
print("\n----------------------------------------------")
print("Command SSL Socket Server", VERSION)
print("----------------------------------------------")
print("HOST: ", HOST)
print("PORT: ", PORT)
print("TARGET SERVER: ", TARGET_SERVER)
print("----------------------------------------------\n")
import logging
import logging.handlers
import logging.config
import socket
import ssl
import time
from datetime import datetime
from enum import Enum
from threading import Thread
import IPR
logging.config.fileConfig(fname="log_config.conf", disable_existing_loggers=False)
log = logging.getLogger("root")
class Status(Enum):
ON = 10
TURNING_ON = 5
TURNING_OFF = 2
OFF = 0
statusString = {
Status.ON: "ON",
Status.TURNING_ON: "TURNING ON",
Status.TURNING_OFF: "TURNING OFF",
Status.OFF: "OFF",
}
SERVERSTATUS = Status.OFF
class Command(Enum):
triggerOn = 15
triggerOff = 12 # is this a good idea?
check = 8
def SERVERON():
SERVERSTATUS == Status.TURNING_ON
return
def SERVEROFF():
SERVERSTATUS == Status.TURNING_OFF
return
def order(cmd):
log.debug("Recieved command: " + str(cmd))
if cmd == Command.check.value:
# return statusString[SERVERSTATUS]
return Status(SERVERSTATUS)
if SERVERSTATUS == Status.ON:
if cmd == Command.triggerOn.value:
return "Server already on!"
elif cmd == Command.triggerOff.value:
SERVEROFF()
return "Turning off server!"
elif SERVERSTATUS == Status.OFF:
if cmd == Command.triggerOn.value:
SERVERON()
return "Turning on server!"
elif cmd == Command.triggerOff.value:
return "Server already off!"
elif cmd == Command.triggerOn.value or Command.triggerOn.value:
if SERVERSTATUS == Status.TURNING_OFF:
return "Server is Turning off!"
elif SERVERSTATUS == Status.TURNING_ON:
return "Server is Turning on!"
return "Unknown command"
class client(Thread):
def __init__(self, socket, address):
Thread.__init__(self)
self.sock = socket
self.addr = address
self.straddr = str(self.addr[0]) + ":" + str(self.addr[1]) + " : "
log.debug("New client thread: " + str(self.addr[0]) + ":" + str(self.addr[1]))
self.start()
def msg(self, message):
self.sock.send(message.encode())
log.debug(self.straddr + str(message))
def end(self):
log.debug(self.straddr + "Closing socket and thread")
self.sock.close()
self._running = False
def run(self):
recieve = self.sock.recv(256).decode()
try:
recieve = int(recieve)
except:
self.msg("Bad Command")
log.warning(self.straddr + "Bad command recieved: " + recieve)
self.end()
return
self.msg(order(recieve))
self.end()
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain("cert.pem", "priv.key")
def socketListen():
log.info("Command server started")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock:
sock.bind((HOST, PORT))
sock.listen(5)
with context.wrap_socket(sock, server_side=True) as ssock:
while not (
SERVERSTATUS == Status.TURNING_OFF or SERVERSTATUS == Status.TURNING_ON
):
try:
clientsocket, address = ssock.accept()
if IPR.checkIP(address[0]):
client(clientsocket, address)
else:
log.warning("Blocked: " + address[0])
except OSError as e:
print(e)
def newThread(trgt):
thread = Thread(target=trgt)
thread.daemon = True
thread.start()
return thread
while True:
time.sleep(60)
socketThread = newThread(socketListen)
if not socketThread.is_alive():
if RESTARTS == 0:
log.critical("Max restarts has been hit, server must be manually restared")
break
RESTARTS -= 1
log.error("Server seems to have crashed... Attempting restart")
# TODO: how are we actually turning the server on?
|
[
"threading.Thread",
"threading.Thread.__init__",
"ssl.SSLContext",
"socket.socket",
"IPR.checkIP",
"time.sleep",
"logging.config.fileConfig",
"logging.getLogger"
] |
[((685, 772), 'logging.config.fileConfig', 'logging.config.fileConfig', ([], {'fname': '"""log_config.conf"""', 'disable_existing_loggers': '(False)'}), "(fname='log_config.conf', disable_existing_loggers\n =False)\n", (710, 772), False, 'import logging\n'), ((774, 799), 'logging.getLogger', 'logging.getLogger', (['"""root"""'], {}), "('root')\n", (791, 799), False, 'import logging\n'), ((3148, 3187), 'ssl.SSLContext', 'ssl.SSLContext', (['ssl.PROTOCOL_TLS_SERVER'], {}), '(ssl.PROTOCOL_TLS_SERVER)\n', (3162, 3187), False, 'import ssl\n'), ((3989, 4008), 'threading.Thread', 'Thread', ([], {'target': 'trgt'}), '(target=trgt)\n', (3995, 4008), False, 'from threading import Thread\n'), ((4089, 4103), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (4099, 4103), False, 'import time\n'), ((2284, 2305), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (2299, 2305), False, 'from threading import Thread\n'), ((3306, 3358), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM', '(0)'], {}), '(socket.AF_INET, socket.SOCK_STREAM, 0)\n', (3319, 3358), False, 'import socket\n'), ((3720, 3743), 'IPR.checkIP', 'IPR.checkIP', (['address[0]'], {}), '(address[0])\n', (3731, 3743), False, 'import IPR\n')]
|
import numpy as np
from math import *
from interpolation import InterpVec
class Target(object):
@classmethod
def get_simple_target(cls, pos, vel):
velocity_vectors = [[0, np.array(vel)]]
vel_interp = InterpVec(velocity_vectors)
target = cls(vel_interp=vel_interp)
parameters_of_target = np.array([pos[0], pos[1], 0])
target.set_init_cond(parameters_of_target=parameters_of_target)
return target
def __init__(self, *args, **kwargs):
self.g = kwargs.get('g', 9.80665)
self.dt = kwargs.get('dt', 0.001)
self.vel_interp = kwargs['vel_interp']
def set_init_cond(self, parameters_of_target=None):
if parameters_of_target is None:
parameters_of_target = self.get_standart_parameters_of_target()
self.state = np.array(parameters_of_target)
self.state_0 = np.array(parameters_of_target)
def reset(self):
self.set_state(self.state_0)
def set_state(self, state):
self.state = np.array(state)
def get_state(self):
return self.state
def get_state_0(self):
return self.state_0
def step(self, tau):
x, y, t = self.state
t_end = t + tau
flag = True
while flag:
if t_end - t > self.dt:
dt = self.dt
else:
dt = t_end - t
flag = False
t += dt
vx, vy = self.vel_interp(t)
x += vx * dt
y += vy * dt
self.set_state([x, y, t])
@property
def pos(self):
return self.state[:2]
@property
def vel(self):
return self.vel_interp(self.t)
@property
def t(self):
return self.state[-1]
@property
def Q(self):
vx, vy = self.vel_interp(self.t)
return np.sqrt(vx ** 2 + vy ** 2)
@property
def v(self):
vx, vy = self.vel_interp(self.t)
return np.sqrt(vx ** 2 + vy ** 2)
@property
def x(self):
return self.pos[0]
@property
def y(self):
return self.pos[1]
def get_summary(self):
return {
't': self.t,
'v': self.v,
'x': self.x,
'y': self.y,
'Q': np.degrees(self.Q)
}
|
[
"interpolation.InterpVec",
"numpy.array",
"numpy.degrees",
"numpy.sqrt"
] |
[((233, 260), 'interpolation.InterpVec', 'InterpVec', (['velocity_vectors'], {}), '(velocity_vectors)\n', (242, 260), False, 'from interpolation import InterpVec\n'), ((336, 365), 'numpy.array', 'np.array', (['[pos[0], pos[1], 0]'], {}), '([pos[0], pos[1], 0])\n', (344, 365), True, 'import numpy as np\n'), ((831, 861), 'numpy.array', 'np.array', (['parameters_of_target'], {}), '(parameters_of_target)\n', (839, 861), True, 'import numpy as np\n'), ((885, 915), 'numpy.array', 'np.array', (['parameters_of_target'], {}), '(parameters_of_target)\n', (893, 915), True, 'import numpy as np\n'), ((1029, 1044), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (1037, 1044), True, 'import numpy as np\n'), ((1860, 1886), 'numpy.sqrt', 'np.sqrt', (['(vx ** 2 + vy ** 2)'], {}), '(vx ** 2 + vy ** 2)\n', (1867, 1886), True, 'import numpy as np\n'), ((1975, 2001), 'numpy.sqrt', 'np.sqrt', (['(vx ** 2 + vy ** 2)'], {}), '(vx ** 2 + vy ** 2)\n', (1982, 2001), True, 'import numpy as np\n'), ((2283, 2301), 'numpy.degrees', 'np.degrees', (['self.Q'], {}), '(self.Q)\n', (2293, 2301), True, 'import numpy as np\n'), ((196, 209), 'numpy.array', 'np.array', (['vel'], {}), '(vel)\n', (204, 209), True, 'import numpy as np\n')]
|
import unittest
import json
from tornado.websocket import websocket_connect
from tornado import gen
from malcolm.core import Process, call_with_params, Queue, Context, \
ResponseError
from malcolm.modules.builtin.blocks import proxy_block
from malcolm.modules.demo.blocks import hello_block, counter_block
from malcolm.modules.web.blocks import web_server_block, websocket_client_block
class TestSystemWSCommsServerOnly(unittest.TestCase):
socket = 8881
def setUp(self):
self.process = Process("proc")
self.hello = call_with_params(hello_block, self.process, mri="hello")
self.server = call_with_params(
web_server_block, self.process, mri="server", port=self.socket)
self.result = Queue()
self.process.start()
def tearDown(self):
self.process.stop(timeout=1)
@gen.coroutine
def send_message(self):
conn = yield websocket_connect("ws://localhost:%s/ws" % self.socket)
req = dict(
typeid="malcolm:core/Post:1.0",
id=0,
path=["hello", "greet"],
parameters=dict(
name="me"
)
)
conn.write_message(json.dumps(req))
resp = yield conn.read_message()
resp = json.loads(resp)
self.result.put(resp)
conn.close()
def test_server_and_simple_client(self):
self.server._loop.add_callback(self.send_message)
resp = self.result.get(timeout=2)
assert resp == dict(
typeid="malcolm:core/Return:1.0",
id=0,
value=dict(
typeid='malcolm:core/Map:1.0',
greeting="Hello me",
)
)
class TestSystemWSCommsServerAndClient(unittest.TestCase):
socket = 8883
def setUp(self):
self.process = Process("proc")
self.hello = call_with_params(hello_block, self.process, mri="hello")
self.counter = call_with_params(
counter_block, self.process, mri="counter")
self.server = call_with_params(
web_server_block, self.process, mri="server", port=self.socket)
self.process.start()
self.process2 = Process("proc2")
self.client = call_with_params(
websocket_client_block, self.process2, mri="client",
port=self.socket)
self.process2.start()
def tearDown(self):
self.socket += 1
self.process.stop(timeout=1)
self.process2.stop(timeout=1)
def test_server_hello_with_malcolm_client(self):
call_with_params(
proxy_block, self.process2, mri="hello", comms="client")
block2 = self.process2.block_view("hello")
ret = block2.greet("me2")
assert ret == dict(greeting="Hello me2")
with self.assertRaises(ResponseError):
block2.error()
def test_server_counter_with_malcolm_client(self):
call_with_params(
proxy_block, self.process2, mri="counter", comms="client")
block2 = self.process2.block_view("counter")
assert block2.counter.value == 0
block2.increment()
assert block2.counter.value == 1
block2.zero()
assert block2.counter.value == 0
assert self.client.remote_blocks.value == (
"hello", "counter", "server")
|
[
"malcolm.core.call_with_params",
"json.loads",
"json.dumps",
"malcolm.core.Queue",
"malcolm.core.Process",
"tornado.websocket.websocket_connect"
] |
[((511, 526), 'malcolm.core.Process', 'Process', (['"""proc"""'], {}), "('proc')\n", (518, 526), False, 'from malcolm.core import Process, call_with_params, Queue, Context, ResponseError\n'), ((548, 604), 'malcolm.core.call_with_params', 'call_with_params', (['hello_block', 'self.process'], {'mri': '"""hello"""'}), "(hello_block, self.process, mri='hello')\n", (564, 604), False, 'from malcolm.core import Process, call_with_params, Queue, Context, ResponseError\n'), ((627, 712), 'malcolm.core.call_with_params', 'call_with_params', (['web_server_block', 'self.process'], {'mri': '"""server"""', 'port': 'self.socket'}), "(web_server_block, self.process, mri='server', port=self.socket\n )\n", (643, 712), False, 'from malcolm.core import Process, call_with_params, Queue, Context, ResponseError\n'), ((743, 750), 'malcolm.core.Queue', 'Queue', ([], {}), '()\n', (748, 750), False, 'from malcolm.core import Process, call_with_params, Queue, Context, ResponseError\n'), ((1265, 1281), 'json.loads', 'json.loads', (['resp'], {}), '(resp)\n', (1275, 1281), False, 'import json\n'), ((1828, 1843), 'malcolm.core.Process', 'Process', (['"""proc"""'], {}), "('proc')\n", (1835, 1843), False, 'from malcolm.core import Process, call_with_params, Queue, Context, ResponseError\n'), ((1865, 1921), 'malcolm.core.call_with_params', 'call_with_params', (['hello_block', 'self.process'], {'mri': '"""hello"""'}), "(hello_block, self.process, mri='hello')\n", (1881, 1921), False, 'from malcolm.core import Process, call_with_params, Queue, Context, ResponseError\n'), ((1945, 2005), 'malcolm.core.call_with_params', 'call_with_params', (['counter_block', 'self.process'], {'mri': '"""counter"""'}), "(counter_block, self.process, mri='counter')\n", (1961, 2005), False, 'from malcolm.core import Process, call_with_params, Queue, Context, ResponseError\n'), ((2041, 2126), 'malcolm.core.call_with_params', 'call_with_params', (['web_server_block', 'self.process'], {'mri': '"""server"""', 'port': 'self.socket'}), "(web_server_block, self.process, mri='server', port=self.socket\n )\n", (2057, 2126), False, 'from malcolm.core import Process, call_with_params, Queue, Context, ResponseError\n'), ((2188, 2204), 'malcolm.core.Process', 'Process', (['"""proc2"""'], {}), "('proc2')\n", (2195, 2204), False, 'from malcolm.core import Process, call_with_params, Queue, Context, ResponseError\n'), ((2227, 2319), 'malcolm.core.call_with_params', 'call_with_params', (['websocket_client_block', 'self.process2'], {'mri': '"""client"""', 'port': 'self.socket'}), "(websocket_client_block, self.process2, mri='client', port=\n self.socket)\n", (2243, 2319), False, 'from malcolm.core import Process, call_with_params, Queue, Context, ResponseError\n'), ((2557, 2630), 'malcolm.core.call_with_params', 'call_with_params', (['proxy_block', 'self.process2'], {'mri': '"""hello"""', 'comms': '"""client"""'}), "(proxy_block, self.process2, mri='hello', comms='client')\n", (2573, 2630), False, 'from malcolm.core import Process, call_with_params, Queue, Context, ResponseError\n'), ((2916, 2991), 'malcolm.core.call_with_params', 'call_with_params', (['proxy_block', 'self.process2'], {'mri': '"""counter"""', 'comms': '"""client"""'}), "(proxy_block, self.process2, mri='counter', comms='client')\n", (2932, 2991), False, 'from malcolm.core import Process, call_with_params, Queue, Context, ResponseError\n'), ((911, 966), 'tornado.websocket.websocket_connect', 'websocket_connect', (["('ws://localhost:%s/ws' % self.socket)"], {}), "('ws://localhost:%s/ws' % self.socket)\n", (928, 966), False, 'from tornado.websocket import websocket_connect\n'), ((1192, 1207), 'json.dumps', 'json.dumps', (['req'], {}), '(req)\n', (1202, 1207), False, 'import json\n')]
|
"""
IPYthon Magics Extension to play audio without displaying the audio widget.
"""
from yaserver import QUOTES_LOCATION, YASERVER_URI
import os
import random
import pathlib
import inspect
from typing import Optional
from IPython import get_ipython
from IPython.display import Audio, display
from IPython.core.magic import line_cell_magic, Magics, magics_class
from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring
# https://stackoverflow.com/questions/61176900/jupyter-colab-play-sound-with-any-error-in-any-cell-play-sound-after-compl/61176901
from IPython.core.ultratb import AutoFormattedTB
# Catch any Exception, play error sound and re-raise the Exception
# -------------------------------------------------
# initialize the formatter for making the tracebacks into strings
itb = AutoFormattedTB(mode='Plain', tb_offset=1)
all_choices = []
included_extensions = ['mp3']
class _InvisibleAudio(Audio):
"""
An invisible (`display: none`) `Audio` element which removes itself when finished playing.
Original sample based on https://stackoverflow.com/a/50648266.
"""
def _repr_html_(self) -> str:
audio = super()._repr_html_()
audio = audio.replace(
"<audio", '<audio onended="this.parentNode.removeChild(this)"'
)
return f'<div style="display:none">{audio}</div>'
# return f'<div">{audio}</div>'
@magics_class
class NotificationMagics(Magics):
"""
IPython extension implementing the magic.
"""
@magic_arguments()
@argument(
"-u",
"--url",
default="quote1.mp3",
help="URL of audio file to play.",
)
@argument(
"line_code",
nargs="*",
help="Other code on the line will be executed, unless this is called as a cell magic.",
)
@line_cell_magic
def yamoment(self, line: str, cell: Optional[str] = None):
args = parse_argstring(self.yamoment, line)
MOMENTDEBUG = False
if line and line == '#MOMENTDEBUG':
MOMENTDEBUG = True
code = cell if cell else " ".join(args.line_code)
try:
ret = self.shell.ex(code)
finally:
quote_url = random.choice(all_choices)
audio = _InvisibleAudio(
url='{}/{}'.format(YASERVER_URI, quote_url), autoplay=True)
if MOMENTDEBUG:
print("[MomentAudio]:{}".format(quote_url))
display(audio)
return ret
def load_ipython_extension(ipython):
ipython.register_magics(NotificationMagics)
file_names = [fn for fn in os.listdir(QUOTES_LOCATION) if any(
fn.endswith(ext) for ext in included_extensions)]
all_choices.extend(file_names)
# ipython.shell.set_custom_exc((Exception,), custom_exc)
# get_ipython().register_magics(NotificationMagics)
|
[
"random.choice",
"IPython.display.display",
"IPython.core.magic_arguments.magic_arguments",
"IPython.core.magic_arguments.parse_argstring",
"IPython.core.ultratb.AutoFormattedTB",
"IPython.core.magic_arguments.argument",
"os.listdir"
] |
[((821, 863), 'IPython.core.ultratb.AutoFormattedTB', 'AutoFormattedTB', ([], {'mode': '"""Plain"""', 'tb_offset': '(1)'}), "(mode='Plain', tb_offset=1)\n", (836, 863), False, 'from IPython.core.ultratb import AutoFormattedTB\n'), ((1528, 1545), 'IPython.core.magic_arguments.magic_arguments', 'magic_arguments', ([], {}), '()\n', (1543, 1545), False, 'from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring\n'), ((1551, 1636), 'IPython.core.magic_arguments.argument', 'argument', (['"""-u"""', '"""--url"""'], {'default': '"""quote1.mp3"""', 'help': '"""URL of audio file to play."""'}), "('-u', '--url', default='quote1.mp3', help='URL of audio file to play.'\n )\n", (1559, 1636), False, 'from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring\n'), ((1676, 1806), 'IPython.core.magic_arguments.argument', 'argument', (['"""line_code"""'], {'nargs': '"""*"""', 'help': '"""Other code on the line will be executed, unless this is called as a cell magic."""'}), "('line_code', nargs='*', help=\n 'Other code on the line will be executed, unless this is called as a cell magic.'\n )\n", (1684, 1806), False, 'from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring\n'), ((1927, 1963), 'IPython.core.magic_arguments.parse_argstring', 'parse_argstring', (['self.yamoment', 'line'], {}), '(self.yamoment, line)\n', (1942, 1963), False, 'from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring\n'), ((2219, 2245), 'random.choice', 'random.choice', (['all_choices'], {}), '(all_choices)\n', (2232, 2245), False, 'import random\n'), ((2459, 2473), 'IPython.display.display', 'display', (['audio'], {}), '(audio)\n', (2466, 2473), False, 'from IPython.display import Audio, display\n'), ((2612, 2639), 'os.listdir', 'os.listdir', (['QUOTES_LOCATION'], {}), '(QUOTES_LOCATION)\n', (2622, 2639), False, 'import os\n')]
|
import tensorflow as tf
import readcifar10
slim = tf.contrib.slim
import os
import resnet
# 定义网络结构
# image:一张图像
# 返回10维的向量
def model(image, keep_prob=0.8, is_training=True):
batch_norm_params = {
"is_training": is_training,
"epsilon": 1e-5, # 防止除以0
"decay": 0.997, # 衰减系数
'scale': True,
'updates_collections': tf.GraphKeys.UPDATE_OPS
}
with slim.arg_scope(
[slim.conv2d], # 设置卷积默认参数的初始化
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu, # 激活函数初始化
weights_regularizer=slim.l2_regularizer(0.0001), # l2正则
normalizer_fn=slim.batch_norm, # batch norm层参数初始化
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding="SAME"): # 设置默认的max pooling的初始化
net = slim.conv2d(image, 32, [3, 3], scope='conv1')
net = slim.conv2d(net, 32, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1') # 进行2倍下采样
net = slim.conv2d(net, 64, [3, 3], scope='conv3')
net = slim.conv2d(net, 64, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool2')
net = slim.conv2d(net, 128, [3, 3], scope='conv5')
net = slim.conv2d(net, 128, [3, 3], scope='conv6')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool3')
net = slim.conv2d(net, 256, [3, 3], scope='conv7')
net = tf.reduce_mean(net, axis=[1, 2]) # nhwc--n11c 对特征图求均值
net = slim.flatten(net)
net = slim.fully_connected(net, 1024)
slim.dropout(net, keep_prob) # 添加dropout层
net = slim.fully_connected(net, 10)
return net # 10 dim vec
def loss(logits, label):
one_hot_label = slim.one_hot_encoding(label, 10) # 对label进行one hot编码
slim.losses.softmax_cross_entropy(logits, one_hot_label) # 交叉熵损失
# 正则化的损失
reg_set = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) # 正则化的loss集合
l2_loss = tf.add_n(reg_set)
slim.losses.add_loss(l2_loss)
totalloss = slim.losses.get_total_loss()
return totalloss, l2_loss
# 优化器
def func_optimal(batchsize, loss_val):
# 从0开始
global_step = tf.Variable(0, trainable=False)
# 定义指数衰减率的学习率
lr = tf.train.exponential_decay(0.01,
global_step,
decay_steps=50000 // batchsize, # 衰减步长
decay_rate=0.95, #
staircase=False) # 以平滑的形式
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
op = tf.train.AdamOptimizer(lr).minimize(loss_val, global_step)
return global_step, op, lr
def train():
"""
训练的函数
:return:
"""
batchsize = 64
folder_log = 'logdirs' # 日志存放的路径
folder_model = 'model' # Model存放的路径
if not os.path.exists(folder_log):
os.mkdir(folder_log)
if not os.path.exists(folder_model):
os.mkdir(folder_model)
tr_summary = set()
te_summary = set()
# data
tr_im, tr_label = readcifar10.read(batchsize, 0, 1) # 训练样本
te_im, te_label = readcifar10.read(batchsize, 1, 0) # 测试样本
# 定义网络
# 定义数据
input_data = tf.placeholder(tf.float32, shape=[None, 32, 32, 3],
name='input_data')
# 定义标签
input_label = tf.placeholder(tf.int64, shape=[None],
name='input_label')
keep_prob = tf.placeholder(tf.float32, shape=None,
name='keep_prob')
is_training = tf.placeholder(tf.bool, shape=None,
name='is_training')
# logits = resnet.model_resnet(input_data, keep_prob=keep_prob, is_training=is_training)
logits = model(input_data, keep_prob=keep_prob, is_training=is_training)
# 定义loss
total_loss, l2_loss = loss(logits, input_label)
tr_summary.add(tf.summary.scalar('train total loss', total_loss))
te_summary.add(tf.summary.scalar('test l2_loss', l2_loss))
tr_summary.add(tf.summary.scalar('train total loss', total_loss))
te_summary.add(tf.summary.scalar('test l2_loss', l2_loss))
# 获取accurancy
pred_max = tf.argmax(logits, 1)
correct = tf.equal(pred_max, input_label)
accurancy = tf.reduce_mean(tf.cast(correct, tf.float32))
tr_summary.add(tf.summary.scalar('train accurancy', accurancy))
te_summary.add(tf.summary.scalar('test accurancy', accurancy))
# op
global_step, op, lr = func_optimal(batchsize, total_loss)
tr_summary.add(tf.summary.scalar('train lr', lr))
te_summary.add(tf.summary.scalar('test lr', lr))
tr_summary.add(tf.summary.image('train image', input_data * 128 + 128)) # 这里的图片数据是处理过的
te_summary.add(tf.summary.image('test image', input_data * 128 + 128))
with tf.Session() as sess:
# 对全局变量和局部变量进行初始化
sess.run(tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer()))
# 启动多线程管理器
tf.train.start_queue_runners(sess=sess,
coord=tf.train.Coordinator())
saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
ckpt = tf.train.latest_checkpoint(folder_model)
if ckpt:
saver.restore(sess, ckpt)
epoch_val = 100
# 合并日志信息
tr_summary_op = tf.summary.merge(list(tr_summary))
te_summary_op = tf.summary.merge(list(te_summary))
summary_writer = tf.summary.FileWriter(folder_log, sess.graph)
for i in range(50000 * epoch_val):
# 每次获取一个batch size的数据
train_im_batch, train_label_batch = \
sess.run([tr_im, tr_label])
feed_dict = {
input_data: train_im_batch,
input_label: train_label_batch,
keep_prob: 0.8,
is_training: True
}
_, global_step_val, \
lr_val, \
total_loss_val, \
accurancy_val, tr_summary_str = sess.run([op,
global_step,
lr,
total_loss,
accurancy, tr_summary_op],
feed_dict=feed_dict)
summary_writer.add_summary(tr_summary_str, global_step_val)
# 每隔100次打印
if i % 100 == 0:
print("{},{},{},{}".format(global_step_val,
lr_val, total_loss_val,
accurancy_val))
if i % (50000 // batchsize) == 0:
test_loss = 0
test_acc = 0
for ii in range(10000//batchsize):
test_im_batch, test_label_batch = \
sess.run([te_im, te_label])
feed_dict = {
input_data: test_im_batch,
input_label: test_label_batch,
keep_prob: 1.0,
is_training: False
}
total_loss_val, global_step_val, \
accurancy_val, te_summary_str = sess.run([total_loss,global_step,
accurancy, te_summary_op],
feed_dict=feed_dict)
summary_writer.add_summary(te_summary_str, global_step_val)
test_loss += total_loss_val
test_acc += accurancy_val
print('test:', test_loss * batchsize / 10000,
test_acc * batchsize / 10000)
if i % 1000 == 0:
saver.save(sess, "{}/model.ckpt{}".format(folder_model, str(global_step_val)))
return
if __name__ == '__main__':
train()
|
[
"os.mkdir",
"tensorflow.train.Coordinator",
"tensorflow.get_collection",
"tensorflow.local_variables_initializer",
"tensorflow.global_variables",
"tensorflow.Variable",
"tensorflow.train.latest_checkpoint",
"tensorflow.add_n",
"os.path.exists",
"tensorflow.placeholder",
"tensorflow.cast",
"tensorflow.summary.FileWriter",
"tensorflow.equal",
"readcifar10.read",
"tensorflow.control_dependencies",
"tensorflow.summary.image",
"tensorflow.summary.scalar",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.reduce_mean",
"tensorflow.train.exponential_decay",
"tensorflow.argmax",
"tensorflow.train.AdamOptimizer"
] |
[((1977, 2030), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.REGULARIZATION_LOSSES'], {}), '(tf.GraphKeys.REGULARIZATION_LOSSES)\n', (1994, 2030), True, 'import tensorflow as tf\n'), ((2059, 2076), 'tensorflow.add_n', 'tf.add_n', (['reg_set'], {}), '(reg_set)\n', (2067, 2076), True, 'import tensorflow as tf\n'), ((2264, 2295), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (2275, 2295), True, 'import tensorflow as tf\n'), ((2323, 2438), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['(0.01)', 'global_step'], {'decay_steps': '(50000 // batchsize)', 'decay_rate': '(0.95)', 'staircase': '(False)'}), '(0.01, global_step, decay_steps=50000 //\n batchsize, decay_rate=0.95, staircase=False)\n', (2349, 2438), True, 'import tensorflow as tf\n'), ((2617, 2659), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (2634, 2659), True, 'import tensorflow as tf\n'), ((3185, 3218), 'readcifar10.read', 'readcifar10.read', (['batchsize', '(0)', '(1)'], {}), '(batchsize, 0, 1)\n', (3201, 3218), False, 'import readcifar10\n'), ((3249, 3282), 'readcifar10.read', 'readcifar10.read', (['batchsize', '(1)', '(0)'], {}), '(batchsize, 1, 0)\n', (3265, 3282), False, 'import readcifar10\n'), ((3331, 3401), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 32, 32, 3]', 'name': '"""input_data"""'}), "(tf.float32, shape=[None, 32, 32, 3], name='input_data')\n", (3345, 3401), True, 'import tensorflow as tf\n'), ((3463, 3521), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': '[None]', 'name': '"""input_label"""'}), "(tf.int64, shape=[None], name='input_label')\n", (3477, 3521), True, 'import tensorflow as tf\n'), ((3570, 3626), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'None', 'name': '"""keep_prob"""'}), "(tf.float32, shape=None, name='keep_prob')\n", (3584, 3626), True, 'import tensorflow as tf\n'), ((3678, 3733), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': 'None', 'name': '"""is_training"""'}), "(tf.bool, shape=None, name='is_training')\n", (3692, 3733), True, 'import tensorflow as tf\n'), ((4304, 4324), 'tensorflow.argmax', 'tf.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (4313, 4324), True, 'import tensorflow as tf\n'), ((4339, 4370), 'tensorflow.equal', 'tf.equal', (['pred_max', 'input_label'], {}), '(pred_max, input_label)\n', (4347, 4370), True, 'import tensorflow as tf\n'), ((2670, 2705), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), '(update_ops)\n', (2693, 2705), True, 'import tensorflow as tf\n'), ((2974, 3000), 'os.path.exists', 'os.path.exists', (['folder_log'], {}), '(folder_log)\n', (2988, 3000), False, 'import os\n'), ((3010, 3030), 'os.mkdir', 'os.mkdir', (['folder_log'], {}), '(folder_log)\n', (3018, 3030), False, 'import os\n'), ((3043, 3071), 'os.path.exists', 'os.path.exists', (['folder_model'], {}), '(folder_model)\n', (3057, 3071), False, 'import os\n'), ((3081, 3103), 'os.mkdir', 'os.mkdir', (['folder_model'], {}), '(folder_model)\n', (3089, 3103), False, 'import os\n'), ((4022, 4071), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""train total loss"""', 'total_loss'], {}), "('train total loss', total_loss)\n", (4039, 4071), True, 'import tensorflow as tf\n'), ((4092, 4134), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""test l2_loss"""', 'l2_loss'], {}), "('test l2_loss', l2_loss)\n", (4109, 4134), True, 'import tensorflow as tf\n'), ((4156, 4205), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""train total loss"""', 'total_loss'], {}), "('train total loss', total_loss)\n", (4173, 4205), True, 'import tensorflow as tf\n'), ((4226, 4268), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""test l2_loss"""', 'l2_loss'], {}), "('test l2_loss', l2_loss)\n", (4243, 4268), True, 'import tensorflow as tf\n'), ((4402, 4430), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (4409, 4430), True, 'import tensorflow as tf\n'), ((4451, 4498), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""train accurancy"""', 'accurancy'], {}), "('train accurancy', accurancy)\n", (4468, 4498), True, 'import tensorflow as tf\n'), ((4519, 4565), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""test accurancy"""', 'accurancy'], {}), "('test accurancy', accurancy)\n", (4536, 4565), True, 'import tensorflow as tf\n'), ((4657, 4690), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""train lr"""', 'lr'], {}), "('train lr', lr)\n", (4674, 4690), True, 'import tensorflow as tf\n'), ((4711, 4743), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""test lr"""', 'lr'], {}), "('test lr', lr)\n", (4728, 4743), True, 'import tensorflow as tf\n'), ((4765, 4820), 'tensorflow.summary.image', 'tf.summary.image', (['"""train image"""', '(input_data * 128 + 128)'], {}), "('train image', input_data * 128 + 128)\n", (4781, 4820), True, 'import tensorflow as tf\n'), ((4857, 4911), 'tensorflow.summary.image', 'tf.summary.image', (['"""test image"""', '(input_data * 128 + 128)'], {}), "('test image', input_data * 128 + 128)\n", (4873, 4911), True, 'import tensorflow as tf\n'), ((4923, 4935), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4933, 4935), True, 'import tensorflow as tf\n'), ((5315, 5355), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['folder_model'], {}), '(folder_model)\n', (5341, 5355), True, 'import tensorflow as tf\n'), ((5599, 5644), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['folder_log', 'sess.graph'], {}), '(folder_log, sess.graph)\n', (5620, 5644), True, 'import tensorflow as tf\n'), ((1505, 1537), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['net'], {'axis': '[1, 2]'}), '(net, axis=[1, 2])\n', (1519, 1537), True, 'import tensorflow as tf\n'), ((5261, 5282), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (5280, 5282), True, 'import tensorflow as tf\n'), ((2720, 2746), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr'], {}), '(lr)\n', (2742, 2746), True, 'import tensorflow as tf\n'), ((4997, 5030), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5028, 5030), True, 'import tensorflow as tf\n'), ((5058, 5090), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (5088, 5090), True, 'import tensorflow as tf\n'), ((5204, 5226), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (5224, 5226), True, 'import tensorflow as tf\n')]
|
from django.contrib import admin
from . import models
class UserFieldFilter(admin.ModelAdmin):
fields = ['role']
admin.site.register(models.User, UserFieldFilter)
admin.site.register(models.Role)
|
[
"django.contrib.admin.site.register"
] |
[((121, 170), 'django.contrib.admin.site.register', 'admin.site.register', (['models.User', 'UserFieldFilter'], {}), '(models.User, UserFieldFilter)\n', (140, 170), False, 'from django.contrib import admin\n'), ((171, 203), 'django.contrib.admin.site.register', 'admin.site.register', (['models.Role'], {}), '(models.Role)\n', (190, 203), False, 'from django.contrib import admin\n')]
|
import pytest
from gitlabform import EXIT_INVALID_INPUT
from gitlabform.configuration.projects_and_groups import ConfigurationProjectsAndGroups
from gitlabform.filter import NonEmptyConfigsProvider
def test_error_on_missing_key():
config_yaml = """
---
# no key at all
"""
with pytest.raises(SystemExit) as e:
configuration = ConfigurationProjectsAndGroups(config_string=config_yaml)
NonEmptyConfigsProvider(configuration, None, None)
assert e.value.code == EXIT_INVALID_INPUT
def test_error_on_empty_key():
config_yaml = """
---
projects_and_groups:
"""
with pytest.raises(SystemExit) as e:
configuration = ConfigurationProjectsAndGroups(config_string=config_yaml)
NonEmptyConfigsProvider(configuration, None, None)
assert e.value.code == EXIT_INVALID_INPUT
|
[
"pytest.raises",
"gitlabform.configuration.projects_and_groups.ConfigurationProjectsAndGroups",
"gitlabform.filter.NonEmptyConfigsProvider"
] |
[((302, 327), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (315, 327), False, 'import pytest\n'), ((358, 415), 'gitlabform.configuration.projects_and_groups.ConfigurationProjectsAndGroups', 'ConfigurationProjectsAndGroups', ([], {'config_string': 'config_yaml'}), '(config_string=config_yaml)\n', (388, 415), False, 'from gitlabform.configuration.projects_and_groups import ConfigurationProjectsAndGroups\n'), ((424, 474), 'gitlabform.filter.NonEmptyConfigsProvider', 'NonEmptyConfigsProvider', (['configuration', 'None', 'None'], {}), '(configuration, None, None)\n', (447, 474), False, 'from gitlabform.filter import NonEmptyConfigsProvider\n'), ((627, 652), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (640, 652), False, 'import pytest\n'), ((683, 740), 'gitlabform.configuration.projects_and_groups.ConfigurationProjectsAndGroups', 'ConfigurationProjectsAndGroups', ([], {'config_string': 'config_yaml'}), '(config_string=config_yaml)\n', (713, 740), False, 'from gitlabform.configuration.projects_and_groups import ConfigurationProjectsAndGroups\n'), ((749, 799), 'gitlabform.filter.NonEmptyConfigsProvider', 'NonEmptyConfigsProvider', (['configuration', 'None', 'None'], {}), '(configuration, None, None)\n', (772, 799), False, 'from gitlabform.filter import NonEmptyConfigsProvider\n')]
|
import logging
from cliff import command
from smiley import db
from smiley import output
class Show(command.Command):
"""Show the details of one run.
Includes summaries of the thread resource consumption, when
multiple threads are present.
"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Show, self).get_parser(prog_name)
parser.add_argument(
'--database',
default='smiley.db',
help='filename for the database (%(default)s)',
)
parser.add_argument(
'run_id',
help='identifier for the run',
)
return parser
def take_action(self, parsed_args):
self.db = db.DB(parsed_args.database)
run = self.db.get_run(parsed_args.run_id)
details = {
'id': run.id,
'cwd': run.cwd,
'description': run.description,
'start_time': run.start_time.isoformat(),
'end_time': run.end_time.isoformat(),
'error_message': run.error_message,
'traceback': run.traceback,
}
output.dump_dictionary(details, self.log.info, 0)
threads = list(self.db.get_thread_details(parsed_args.run_id))
if len(threads) > 1:
for thread in threads:
td = {
'id': thread.id,
'start_time': thread.start_time.isoformat(),
'end_time': thread.end_time.isoformat(),
}
output.dump_dictionary(td, self.log.info, 0)
return
|
[
"smiley.db.DB",
"smiley.output.dump_dictionary",
"logging.getLogger"
] |
[((276, 303), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (293, 303), False, 'import logging\n'), ((742, 769), 'smiley.db.DB', 'db.DB', (['parsed_args.database'], {}), '(parsed_args.database)\n', (747, 769), False, 'from smiley import db\n'), ((1148, 1197), 'smiley.output.dump_dictionary', 'output.dump_dictionary', (['details', 'self.log.info', '(0)'], {}), '(details, self.log.info, 0)\n', (1170, 1197), False, 'from smiley import output\n'), ((1553, 1597), 'smiley.output.dump_dictionary', 'output.dump_dictionary', (['td', 'self.log.info', '(0)'], {}), '(td, self.log.info, 0)\n', (1575, 1597), False, 'from smiley import output\n')]
|
# encoding:utf-8
"""
@Time : 2020-05-22 21:14
@Author : <EMAIL>
@File : rnn-start.py
@Software: PyCharm
"""
import d2lzh as d2l
import math
from mxnet import autograd, nd
from mxnet.gluon import loss as gloss
import time
def to_onehot(X, size):
return [nd.one_hot(x, size) for x in X.T]
def get_params():
def _one(shape):
return nd.random.normal(scale=0.01, shape=shape, ctx=ctx)
W_xh = _one((num_inputs, num_hiddens))
W_hh = _one((num_hiddens, num_hiddens))
b_h = nd.zeros(num_hiddens, ctx=ctx)
W_hq = _one((num_hiddens, num_outputs))
b_q = nd.zeros(num_outputs, ctx=ctx)
params = [W_xh, W_hh, b_h, W_hq, b_q]
for param in params:
param.attach_grad()
return params
def init_rnn_state(batch_size, num_hiddens, ctx):
return (nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx),)
def rnn(inputs, state, params):
W_xh, W_hh, b_h, W_hq, b_q = params
H, = state
outputs = []
for X in inputs:
H = nd.tanh(nd.dot(X, W_xh) + nd.dot(H, W_hh) + b_h)
Y = nd.dot(H, W_hq) + b_q
outputs.append(Y)
return outputs, (H,)
def predict_rnn(prefix, num_chars, rnn, params, init_rnn_state,
num_hiddens, vocab_size, ctx, idx_to_char, char_to_idx):
"""
预测函数
:param prefix:
:param num_chars:
:param rnn:
:param params:
:param init_rnn_state:
:param num_hiddens:
:param vocab_size:
:param ctx:
:param idx_to_char:
:param char_to_idx:
:return:
"""
state = init_rnn_state(1, num_hiddens, ctx)
output = [char_to_idx[prefix[0]]]
for t in range(num_chars + len(prefix) - 1):
X = to_onehot(nd.array([output[-1]], ctx=ctx), vocab_size)
(Y, state) = rnn(X, state, params)
if t < len(prefix) - 1:
output.append(char_to_idx[prefix[t + 1]])
else:
output.append(int(Y[0].argmax(axis=1).asscalar()))
return ''.join([idx_to_char[i] for i in output])
def grad_clipping(params, theta, ctx):
"""
裁剪梯度
:param params:
:param theta:
:param ctx:
:return:
"""
norm = nd.array([0], ctx)
for param in params:
norm += (param.grad ** 2).sum()
norm = norm.sqrt().asscalar()
if norm > theta:
for param in params:
param.grad[:] *= theta / norm
def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
vocab_size, ctx, corpus, idx_to_char, char_to_idx, is_random_iter, num_epochs,
num_steps, lr, clipping_theta, batch_size, pred_period, pred_len, prefixes):
"""
训练并预测
:param rnn:
:param get_params:
:param init_rnn_state:
:param num_hiddens:
:param vocab_size:
:param ctx:
:param corpus:
:param idx_to_char:
:param char_to_idx:
:param is_random_iter:
:param num_epochs:
:param num_steps:
:param lr:
:param clipping_theta:
:param batch_size:
:param pred_period:
:param pred_len:
:param prefixes:
:return:
"""
if is_random_iter:
data_iter_fn = d2l.data_iter_random
else:
data_iter_fn = d2l.data_iter_consecutive
params = get_params()
loss = gloss.SoftmaxCrossEntropyLoss()
for epoch in range(num_epochs):
if not is_random_iter:
state = init_rnn_state(batch_size, num_hiddens, ctx)
l_sum, n, start = 0.0, 0, time.time()
data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, ctx)
for X, Y in data_iter:
if is_random_iter:
state = init_rnn_state(batch_size, num_hiddens, ctx)
else:
for s in state:
s.detach()
with autograd.record():
inputs = to_onehot(X, vocab_size)
(outputs, state) = rnn(inputs, state, params)
outputs = nd.concat(*outputs, dim=0)
y = Y.T.reshape((-1,))
l = loss(outputs, y).mean()
l.backward()
grad_clipping(params, clipping_theta, ctx)
d2l.sgd(params, lr, 1)
l_sum += l.asscalar() * y.size
n += y.size
if (epoch + 1) % pred_period == 0:
print('epoch %d,perplexity %f,time %.2f sec' %
(epoch + 1, math.exp(l_sum / n), time.time() - start))
for prefix in prefixes:
print(' -', predict_rnn(prefix, pred_len, rnn, params,
init_rnn_state, num_hiddens, vocab_size, ctx,
idx_to_char, char_to_idx))
if __name__ == '__main__':
(corpus_indices, char_to_idx, idx_to_char, vocab_size) \
= d2l.load_data_jay_lyrics()
X = nd.arange(10).reshape((2, 5))
# print(X.T)
inputs = to_onehot(X, vocab_size)
# 定义模型
num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size
ctx = d2l.try_gpu()
# print('will use', ctx)
state = init_rnn_state(X.shape[0], num_hiddens, ctx)
inputs = to_onehot(X.as_in_context(ctx), vocab_size)
params = get_params()
outputs, state_new = rnn(inputs, state, params)
print(len(outputs), outputs[0].shape, state_new[0].shape)
print(predict_rnn('分开', 10, rnn, params, init_rnn_state, num_hiddens,
vocab_size, ctx, idx_to_char, char_to_idx))
num_epochs, num_steps, batch_size, lr, clipping_theta = 250, 35, 32, 1e2, 1e-2
pred_period, pred_len, prefixes = 50, 50, ['分开', '不分开']
train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
vocab_size, ctx, corpus_indices, idx_to_char,
char_to_idx, True, num_epochs, num_steps, lr,
clipping_theta, batch_size, pred_period, pred_len, prefixes)
train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
vocab_size, ctx, corpus_indices, idx_to_char,
char_to_idx, False, num_epochs, num_steps, lr,
clipping_theta, batch_size, pred_period, pred_len,
prefixes)
|
[
"math.exp",
"mxnet.autograd.record",
"d2lzh.load_data_jay_lyrics",
"mxnet.gluon.loss.SoftmaxCrossEntropyLoss",
"mxnet.nd.zeros",
"mxnet.nd.one_hot",
"mxnet.nd.random.normal",
"time.time",
"mxnet.nd.arange",
"mxnet.nd.concat",
"mxnet.nd.array",
"mxnet.nd.dot",
"d2lzh.sgd",
"d2lzh.try_gpu"
] |
[((505, 535), 'mxnet.nd.zeros', 'nd.zeros', (['num_hiddens'], {'ctx': 'ctx'}), '(num_hiddens, ctx=ctx)\n', (513, 535), False, 'from mxnet import autograd, nd\n'), ((591, 621), 'mxnet.nd.zeros', 'nd.zeros', (['num_outputs'], {'ctx': 'ctx'}), '(num_outputs, ctx=ctx)\n', (599, 621), False, 'from mxnet import autograd, nd\n'), ((2121, 2139), 'mxnet.nd.array', 'nd.array', (['[0]', 'ctx'], {}), '([0], ctx)\n', (2129, 2139), False, 'from mxnet import autograd, nd\n'), ((3214, 3245), 'mxnet.gluon.loss.SoftmaxCrossEntropyLoss', 'gloss.SoftmaxCrossEntropyLoss', ([], {}), '()\n', (3243, 3245), True, 'from mxnet.gluon import loss as gloss\n'), ((4716, 4742), 'd2lzh.load_data_jay_lyrics', 'd2l.load_data_jay_lyrics', ([], {}), '()\n', (4740, 4742), True, 'import d2lzh as d2l\n'), ((4928, 4941), 'd2lzh.try_gpu', 'd2l.try_gpu', ([], {}), '()\n', (4939, 4941), True, 'import d2lzh as d2l\n'), ((266, 285), 'mxnet.nd.one_hot', 'nd.one_hot', (['x', 'size'], {}), '(x, size)\n', (276, 285), False, 'from mxnet import autograd, nd\n'), ((356, 406), 'mxnet.nd.random.normal', 'nd.random.normal', ([], {'scale': '(0.01)', 'shape': 'shape', 'ctx': 'ctx'}), '(scale=0.01, shape=shape, ctx=ctx)\n', (372, 406), False, 'from mxnet import autograd, nd\n'), ((800, 850), 'mxnet.nd.zeros', 'nd.zeros', ([], {'shape': '(batch_size, num_hiddens)', 'ctx': 'ctx'}), '(shape=(batch_size, num_hiddens), ctx=ctx)\n', (808, 850), False, 'from mxnet import autograd, nd\n'), ((1053, 1068), 'mxnet.nd.dot', 'nd.dot', (['H', 'W_hq'], {}), '(H, W_hq)\n', (1059, 1068), False, 'from mxnet import autograd, nd\n'), ((1674, 1705), 'mxnet.nd.array', 'nd.array', (['[output[-1]]'], {'ctx': 'ctx'}), '([output[-1]], ctx=ctx)\n', (1682, 1705), False, 'from mxnet import autograd, nd\n'), ((3413, 3424), 'time.time', 'time.time', ([], {}), '()\n', (3422, 3424), False, 'import time\n'), ((4090, 4112), 'd2lzh.sgd', 'd2l.sgd', (['params', 'lr', '(1)'], {}), '(params, lr, 1)\n', (4097, 4112), True, 'import d2lzh as d2l\n'), ((4751, 4764), 'mxnet.nd.arange', 'nd.arange', (['(10)'], {}), '(10)\n', (4760, 4764), False, 'from mxnet import autograd, nd\n'), ((3731, 3748), 'mxnet.autograd.record', 'autograd.record', ([], {}), '()\n', (3746, 3748), False, 'from mxnet import autograd, nd\n'), ((3888, 3914), 'mxnet.nd.concat', 'nd.concat', (['*outputs'], {'dim': '(0)'}), '(*outputs, dim=0)\n', (3897, 3914), False, 'from mxnet import autograd, nd\n'), ((1000, 1015), 'mxnet.nd.dot', 'nd.dot', (['X', 'W_xh'], {}), '(X, W_xh)\n', (1006, 1015), False, 'from mxnet import autograd, nd\n'), ((1018, 1033), 'mxnet.nd.dot', 'nd.dot', (['H', 'W_hh'], {}), '(H, W_hh)\n', (1024, 1033), False, 'from mxnet import autograd, nd\n'), ((4313, 4332), 'math.exp', 'math.exp', (['(l_sum / n)'], {}), '(l_sum / n)\n', (4321, 4332), False, 'import math\n'), ((4334, 4345), 'time.time', 'time.time', ([], {}), '()\n', (4343, 4345), False, 'import time\n')]
|
from dataclasses import dataclass, field
from decimal import Decimal
from enum import Enum
from typing import Dict, List, Optional, Union
from xsdata.models.datatype import XmlDate
from models.xlink import TypeType
from models.xml import LangValue
__NAMESPACE__ = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
class XdmFiltering(Enum):
"""<div>
<p>
Clause 1.2 of validation rule
<a href="http://www.w3.org/TR/xmlschema11-1/#sec-cvc-assertion">Assertion satisifed</a> (in Structures sec. 3.13.4.1) says
</p>
<blockquote>
<p>By default, comments and processing instructions are
excluded from the partial post-schema-validation infoset,
but at user option processors may retain comments and
processing instructions instead of excluding them.</p>
</blockquote>
<p>
The value "<tt>comments-and-PIs-excluded</tt>" denotes the default
situation: comments and processing instructions are suppressed
before creating the XDM instance and thus cannot be examined
by assertions.
</p>
<p>
The value "<tt>comments-and-PIs-included</tt>" denotes the opposite:
comments and processing instructions are included in the XDM
instance and thus can be examined by assertions. (Since this is
required to be "at user option", any processor that supports this
token must also be available in a configuration that supports the
other token.)
</p>
<p>
(The user option was added in November 2012 to address bug
<a href="http://www.w3.org/Bugs/Public/show_bug.cgi?id=13935">13935
xsd 1.1 assertions testing comment nodes</a>.
These token values were added 20 January 2012 to allow both
configurations to be tested.)
</p>
</div>
"""
COMMENTS_AND_PIS_EXCLUDED = "comments-and-PIs-excluded"
COMMENTS_AND_PIS_INCLUDED = "comments-and-PIs-included"
@dataclass
class Appinfo:
class Meta:
name = "appinfo"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
source: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
class ExpectedOutcome(Enum):
"""<div>
<p>
Enumerates the possible values for the prescribed outcome
of a test. Values include both (a) the possible values of
type <a href="#type_test-outcome">ts:test-outcome</a> and
the following additional values:
</p>
<dl>
<dt>
<tt>implementation-defined</tt>
</dt>
<dd>(For instance tests) The value of the
<tt>[validity]</tt> property on the validation root
depends upon some property or behavior which is
explicitly described in the relevant version of the spec
as "implementation-defined", or for which the spec explicitly
imposes a requirement that implementations specify their
behavior. (It follows that this
value should never occur for 1.0 tests.)</dd>
<dd>(For schema tests) The conformance of the schema
depends upon some property or behavior explicitly
described in the spec as "implementation-defined",
or for which the spec explicitly
imposes a requirement that implementations specify their
behavior.</dd>
</dl>
<p>Note: in most cases of implementation-defined behaviors,
as a matter of test suite design it is better to analyse
the set of possible implementation behaviors, define
version tokens for the possible behaviors, and specify
more informative results based on those tokens. The value
<tt>implementation-defined</tt> is provided for situations
where this is not feasible for whatever reason.
</p>
<dl>
<dt>
<tt>implementation-dependent</tt>
</dt>
<dd>(For instance tests) The value of the
<tt>[validity]</tt> property on the validation root
depends upon some property or behavior which is
explicitly described in the relevant version of the spec
as "implementation-dependent", or otherwise explicitly
described as varying among implementations but not
"implementation-defined". (For XSD 1.0, this will often
take the form of a normative "<span class="rfc">may</span>" in the text.)
</dd>
<dd>(For schema tests) The conformance of the schema
depends upon some property or behavior explicitly
described in the spec as "implementation-dependent" or
as varying among implementations, but not described as
"implementation-defined".</dd>
<dt>
<tt>indeterminate</tt>
</dt>
<dd>The intended result is indeterminate for one of the
following reasons, or for other reasons:<ul><li>The result is under-determined (the spec is vague
or underspecified), but not described explicitly as
varying among conforming implementations.
</li><li>The spec imposed contradictory requirements on the
result. (I.e. the result is
<em>over-determined.)</em></li><li>
There is an unresolved dispute within the working
group as to what the spec requires the result to be.
(This includes cases where the working group cannot
agree on whether the spec explicitly labels the
result as implementation-dependent or
implementation-defined or not, as well as cases
where the group cannot agree on how to apply the
spec to the case in hand.)
</li></ul></dd>
</dl>
<p>N.B. the values <tt>implementation-dependent</tt> and
<tt>implementation-defined</tt> should be used only when
the spec is explicit about the implementation-dependence
of the result and it is thus clear that the
implementation-dependence is a design choice consciously
made by the working group. They must not be used in cases
where the spec simply appeals to some concept which it
turns out not to define: such cases are to be marked
<tt>indeterminate</tt>.
</p>
<p>Note: in most cases, as a matter of language design
it is better for the language specification to prescribe
clearly a particular result for a test, or to identify the
result explicitly as implementation-defined or
implementation-dependent. The value
<tt>indeterminate</tt> is provided for situations where
this has not been done for whatever reason.
</p>
<p class="note">The value <tt>invalid-latent</tt> described
in earlier drafts of this schema document is no longer
needed; the version keywords for complex-type restriction
behaviors can be used to describe the relevant cases
more precisely.
</p>
</div>
"""
VALID = "valid"
INVALID = "invalid"
NOT_KNOWN = "notKnown"
RUNTIME_SCHEMA_ERROR = "runtime-schema-error"
IMPLEMENTATION_DEFINED = "implementation-defined"
IMPLEMENTATION_DEPENDENT = "implementation-dependent"
INDETERMINATE = "indeterminate"
INVALID_LATENT = "invalid-latent"
class KnownToken(Enum):
"""<div>
<p>Tokens to denote well-known (i.e. documented) versions, features,
or implementation-defined behaviors, of XSD.</p> <p>The <tt>known-
token</tt> type is a union of several other types, each with an
enumeration of values. Each sub-type defines keywords for a set of
mutually exclusive versions, features, or behaviors, such that in
any given schema validation episode, at most one keyword in any
subtype will apply. For examples, see the various subtypes defined
immediately below. </p> </div>
"""
VALUE_1_0 = "1.0"
VALUE_1_1 = "1.1"
VALUE_1_0_1E = "1.0-1e"
VALUE_1_0_2E = "1.0-2e"
XML_1_0 = "XML-1.0"
XML_1_0_1E_4E = "XML-1.0-1e-4e"
XML_1_0_5E = "XML-1.0-5e"
XML_1_1 = "XML-1.1"
UNICODE_4_0_0 = "Unicode_4.0.0"
UNICODE_6_0_0 = "Unicode_6.0.0"
CTR_ALL_COMPILE = "CTR-all-compile"
CTR_ALL_RUNTIME = "CTR-all-runtime"
CTR_ALL_IDEP = "CTR-all-idep"
RESTRICTED_XPATH_IN_CTA = "restricted-xpath-in-CTA"
FULL_XPATH_IN_CTA = "full-xpath-in-CTA"
COMMENTS_AND_PIS_EXCLUDED = "comments-and-PIs-excluded"
COMMENTS_AND_PIS_INCLUDED = "comments-and-PIs-included"
class KnownXsdVersion(Enum):
"""<div>
<p> Tokens to denote specific known versions of XSD. </p> <p> Each
token denotes the version of the XSD language identified by the
<tt>ts:standard-version-id</tt> attribute on the
<tt>xsd:enumeration</tt> element. That is, "<tt>1.0</tt>" denotes
XSD 1.0 (without reference to a particular edition), and
"<tt>1.1</tt>" denotes XSD 1.1 (without referece to a particular
edition). </p> </div>
"""
VALUE_1_0 = "1.0"
VALUE_1_1 = "1.1"
class RuntimeSchemaError(Enum):
"""<div>
<p>
Tokens to denote different implementation-defined
behavior in the presence of faulty restriction in
a complex-type definition.
</p>
<p>
A full explanation of this token and its meaning
is needed, but not yet available. For the moment let it
suffice to say that if an <tt>all</tt>-group
in a restriction allows content not allowed by
the base type, the processor is not required
to detect the problem by inspection of the schema
in isolation. Three behaviors are allowed; the choice
among them is implementation-defined. The values
denoting the three behaviors are these.
</p>
<dl>
<dt>
<tt>CTR-all-compile</tt>
</dt>
<dd>Compile-time detection: the processor always
detects the problem by examining the schema in
isolation; it warrants that no non-conforming
schema will ever be used in validation.
</dd>
<dt>
<tt>CTR-all-runtime</tt>
</dt>
<dd>Run-time detection: the processor never
detects the problem by examining the schema in
isolation; it detects it always and only when
an instance of the type is valid against the
restriction but not against the base type.
If no instance of the type forces the recognition
of the fault, then a non-conforming schema will
have been used in validation. The results, however,
will always be the same as for a schema in
which the error had been corrected.
(Processors that don't always check the declaration
in isolation will need to validate each instance
both against its governing type and against the
base type.)
</dd>
<dt>
<tt>CTR-all-idep</tt>
</dt>
<dd>Implementation-dependent detection: the processor
sometimes detects the problem by examining the schema in
isolation, sometimes when examining an instance.
No guarantees.
</dd>
</dl>
<p>Note, 20 January 2012. Is this distinction still required,
or has it been overtaken by the change made to resolve
<a href="https://www.w3.org/Bugs/Public/show_bug.cgi?id=12185">bug 12185 Conditional Type Assignment and substitutability</a>
(or other late changes)?</p>
</div>
"""
CTR_ALL_COMPILE = "CTR-all-compile"
CTR_ALL_RUNTIME = "CTR-all-runtime"
CTR_ALL_IDEP = "CTR-all-idep"
class Status(Enum):
ACCEPTED = "accepted"
STABLE = "stable"
QUERIED = "queried"
DISPUTED_TEST = "disputed-test"
DISPUTED_SPEC = "disputed-spec"
class TestOutcome(Enum):
"""<div>
<p>
Enumerates the possible outcomes of running a test.
Usually, these are values of the <tt>[validity]</tt>
property on the validation root.
</p>
<p>The most common values are:</p>
<dl>
<dt>
<tt>valid</tt>
</dt>
<dd>(For instance tests) The value of the <tt>[validity]</tt>
property on the validation root is <tt>valid</tt>.</dd>
<dd>(For schema tests) The schema is a conforming schema.</dd>
<dt>
<tt>invalid</tt>
</dt>
<dd>(For instance tests) The value of the <tt>[validity]</tt>
property on the validation root is <tt>invalid</tt>.</dd>
<dd>(For schema tests) The schema is <em>not</em> a
conforming schema.</dd>
<dt>
<tt>notKnown</tt>
</dt>
<dd>(For instance tests) The value of the <tt>[validity]</tt>
property on the validation root is <tt>notKnown</tt>.</dd>
<dd>(For schema tests, this value is meaningless.)</dd>
</dl>
<p>Note: processors built as <a href="http://www.w3.org/TR/xmlschema11-1/#key-validator">instance validators</a> are not required by XSD to
distinguish between invalid documents and documents with
unknown validity; it is thus not an absolute requirement
(although it is desirable for clarity)
that a test result distinguish <tt>invalid</tt>
from <tt>notKnown</tt> outcomes.
</p>
<p>One further value is needed only in fairly specialized
circumstances (but is essential there):</p>
<dl>
<dt>
<tt>runtime-schema-error</tt>
</dt>
<dd>
<p>(For instance tests) The instance has a schema with
a latent error (see description below in the documentation
for type <a href="#type_expected-outcome">ts:expected-outcome</a>);
the processor did not detect the latent error on the
corresponding schema test, but the instance document
has exposed the error (by including content
which is valid against the apparent content model of the
governing type, but not valid against the base type)
and the processor has detected the schema error in the
course of instance validation.
</p>
<p>Note: processors are encouraged, though not required, to
distinguish this outcome from <tt>invalid</tt>, since
on an instance test <tt>invalid</tt> normally means that
the processor has found an invalid instance, not a
non-conforming schema.
</p>
</dd>
<dd>
<p>(For schema tests) The value <tt>runtime-schema-error</tt>
is meaningless for schema tests and should not be used for
them. (It would be a contradiction in terms.)</p>
</dd>
</dl>
</div>
"""
VALID = "valid"
INVALID = "invalid"
NOT_KNOWN = "notKnown"
RUNTIME_SCHEMA_ERROR = "runtime-schema-error"
class TestSuiteResultsPublicationPermission(Enum):
W3_C_MEMBERS = "W3C members"
PUBLIC = "public"
class UnicodeVersions(Enum):
"""<div>
<p> Tokens to denote specific known versions of Unicode. </p> <p>
Each token denotes the version of the Unicode specification. The
list is not complete; in the only cases where results are known to
vary between Unicode versions, results are published for version
4.0.0 and 6.0.0. Implementors wishing to provide reference results
for other versions of Unicode are welcome to submit such results.
</p> </div>
"""
UNICODE_4_0_0 = "Unicode_4.0.0"
UNICODE_6_0_0 = "Unicode_6.0.0"
class XmlSubstrate(Enum):
"""<div>
<p>
Tokens to denote different versions of XML-dependent
datatypes. Conforming XSD 1.1 processors may support
XML 1.0-based datatypes, XML 1.1-based datatypes,
or both. There is dispute in the working group over
whether conforming XSD 1.0 processors are allowed to
suport XML 1.1-based datatypes or not.
</p>
<p>
The value "<tt>XML-1.0</tt>" denotes processor support
for, or test applicability to, XSD datatypes based on XML
1.0, without specifying a particular edition. (This value
is retained for backward compatibility of this schema, but
it should be avoided unless there is no difference, for a
given test or test result, between editions 1-4 and
edition 5 of XML 1.0. Where there is a difference, the
values "<tt>XML-1.0-1e-4e</tt>" and "<tt>XML-1.0-5e</tt>"
should be used in preference.
(XSD 1.1 describes XML 1.0 Fifth Edition as the base
version in its normative reference, so in theory the
distinction between "<tt>XML-1.0-1e-4e</tt>" and
"<tt>XML-1.0-5e</tt>" is only relevant to XSD 1.0
processors. In practice, it may also be relevant for some
XSD 1.1 processors.
</p>
<p>
The value "<tt>XML-1.0-1e-4e</tt>" denotes processor support
for, or test applicability to, XSD datatypes based on XML
1.0 First Edition through Fourth Edition.
</p>
<p>
The value "<tt>XML-1.0-5e</tt>" denotes processor support
for, or test applicability to, XSD datatypes based on XML
1.0 Fifth Edition.
</p>
<p>
The value "<tt>XML-1.1</tt>" denotes processor support
for, or test applicability to, XSD datatypes based on XML
1.1 (for which at the moment there is only one edition).
</p>
<p>
In most cases, of course, "<tt>XML-1.0-5e</tt>" and
"<tt>XML-1.1</tt>" will describe the same behaviors.
</p>
</div>
"""
XML_1_0 = "XML-1.0"
XML_1_0_1E_4E = "XML-1.0-1e-4e"
XML_1_0_5E = "XML-1.0-5e"
XML_1_1 = "XML-1.1"
class XpathInCta(Enum):
"""<div>
<p>
Tokens to distinguish tests which use only the "required
subset" of XPath in conditional type assignment
from tests which use full XPath (or: any XPath outside
the subset) in conditional type assignment.
See "3.12.6 Constraints on Type Alternative Schema Components"
of the Structures spec, which reads in part
</p>
<blockquote>
<p>A conforming processor must accept and process any XPath
expression conforming to the "required subset" of [XPath 2.0]
defined by the following grammar.</p>
<p style="margin-left: 2em;">
Note: Any XPath expression containing no static errors as
defined in [XPath 2.0] may appear in a conforming schema.
Conforming processors may but are not required to support
XPath expressions not belonging to the required subset of
XPath.</p>
</blockquote>
<p>
The value "<tt>restricted-xpath-in-CTA</tt>" denotes processor support
for, or test applicability to, the minimal subset of XPath
required of all conforming 1.1 processors. All 1.1 processors
should support this feature and run tests marked with it.
</p>
<p>
The value "<tt>full-xpath-in-CTA</tt>" denotes processor support
for, or test applicability to, full XPath in conditional type
assignment expressions.
</p>
<p>
(These token values were added 29 July 2011 to address bug
<a href="http://www.w3.org/Bugs/Public/show_bug.cgi?id=13455">13455
XPath subset causes problem</a>.)
</p>
</div>
"""
RESTRICTED_XPATH_IN_CTA = "restricted-xpath-in-CTA"
FULL_XPATH_IN_CTA = "full-xpath-in-CTA"
class Xsd10Editions(Enum):
"""<div>
<p>
Tokens to denote specific editions of XSD 1.0.
</p>
<p>
Each token denotes the version of the XSD language
identified by the <tt>ts:standard-version-id</tt>
attribute on the <tt>xsd:enumeration</tt> element.
That is,
"<tt>1.0-1e</tt>" and "<tt>1.0-2e</tt>" represent
1.0 First Edition and 1.0 Second Edition,
respectively.
</p>
<p>Outside the context of XSD 1.0, these edition
identifiers have no meaning or applicability.
</p>
</div>
"""
VALUE_1_0_1E = "1.0-1e"
VALUE_1_0_2E = "1.0-2e"
@dataclass
class Documentation:
class Meta:
name = "documentation"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
source: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
}
)
lang: Optional[Union[str, LangValue]] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/XML/1998/namespace",
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
@dataclass
class Expected:
"""<div>
<p>The validation outcome prescribed by the spec
for a test in the XSTS.</p>
<p>This element has one optional attribute:</p>
<ul>
<li>
<p><tt>version</tt> - a list of version tokens.
The result specified is applicable to processor
configurations supporting <em>all</em> of the
indicated versions or features of XSD.
See the definition of the
<a href="#type_version-info"><tt>version-info</tt></a>
type.
</p>
<p>It is an error for more than one <tt>expected</tt>
element to be applicable to any given processor
configuration; this is most easily avoided by
making sure that any two sibling <tt>expected</tt>
elements have <tt>version</tt> attributes containing
mutually exclusive tokens.
</p>
</li>
</ul>
<p class="note">Note: The meaning of the <tt>version</tt></p>
<p>
On tests and elements for groups of
tests (<tt>testGroup</tt> etc.), a <tt>version</tt>
attribute of the form <code>version="<i>x</i><i>y</i><i>z</i>"</code> means "If <strong>any</strong> of
<tt>x</tt>, <tt>y</tt>, or <tt>z</tt> are supported, tests
in this group are applicable."
</p>
<p>On the <tt>expected</tt> element, the
meaning changes in a crucial way: the tokens are connected
with an implicit <tt>and</tt>, not an <tt>or</tt>. So
<code>version="<i>x</i><i>y</i><i>z</i>"</code> means
"If <strong>all</strong> of <tt>x</tt>, <tt>y</tt>, or
<tt>z</tt> are supported, the prescribed outcome is as
described. So on a test group, <code>version="1.0
1.1"</code> means tests for both versions are included.
On an <tt>expected</tt> element, <code>version="1.0
1.1"</code> would mean the expected result holds only if a
given processor is using both version 1.0 and version 1.1
in the same validation episode. Since the two tokens are
defined as mutually exclusive, this would be a
contradiction.
</p>
<p class="note">As a matter of test suite design, it
is a good idea to keep <tt>version</tt> attributes
on <tt>expected</tt> elements to a single token if
possible, to minimize opportunities for confusion.
</p>
<p>And one required attribute:</p>
<ul>
<li>
<p><tt>validity</tt> - indicates the expected outcome
of the test, using a value of type
<a href="#type_expected-outcome">ts:expected-outcome</a>.</p>
<p>
For an instance test, this typically indicates the expected
value of the <code>[validity]</code> property on the
root element of the instance document, or indicates
that the value may vary among processors.
</p>
<p>
For a schema test, this indicates whether the
schema created from the schema documents in the test
is expected to be a conforming schema (<code>valid</code>)
or a non-conforming schema (<code>invalid</code>).
The value <code>notKnown</code> has no meaning
for a schema test.
</p>
</li>
</ul>
</div>
"""
class Meta:
name = "expected"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
validity: Optional[ExpectedOutcome] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
version: List[Union[KnownToken, Decimal, str]] = field(
default_factory=list,
metadata={
"type": "Attribute",
"tokens": True,
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class Annotation:
"""<div>
<p> This is an exact copy of the <tt>annotation</tt> element defined
in the Schema Recommendation. It is duplicated here in order to
replicate the functionality of the <tt>xsd:annotation</tt> element
and because the Schema for Schemas cannot be imported. </p> </div>
"""
class Meta:
name = "annotation"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
appinfo: List[Appinfo] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
documentation: List[Documentation] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class Ref:
class Meta:
name = "ref"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.w3.org/XML/2004/xml-schema-test-suite/",
}
)
type: TypeType = field(
default=TypeType.LOCATOR,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
}
)
href: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class StatusEntry:
class Meta:
name = "statusEntry"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.w3.org/XML/2004/xml-schema-test-suite/",
}
)
status: Optional[Status] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
date: Optional[XmlDate] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
bugzilla: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"pattern": r"http://www\.w3\.org/Bugs/Public/show_bug\.cgi\?id=[0-9]*",
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class TestResult:
"""<div>
<p>
The result of an individual instance test or a schema test.
</p>
<p>
This element has four required attributes:
</p>
<ul>
<li><tt>validity</tt> - the validition outcome of the test.
A value of type <a href="#type_expected-outcome">ts:expected-outcome</a>,
i.e.
one of "<tt>valid</tt>", "<tt>invalid</tt>",
"<tt>notKnown</tt>", or "<tt>runtime-schema-error</tt>".
</li>
<li><tt>set</tt> - the value of the "<tt>name</tt>"
attribute of the test set to which the test belongs.
</li>
<li><tt>group</tt> - the value of the "<tt>name</tt>"
attribute of the test group to which the test belongs.
</li>
<li><tt>test</tt> - the value of the "<tt>name</tt>"
attribute of the schema test or instance test, the
validation outcome of which this result reports.
</li>
</ul>
<p>
NOTE: The "<tt>set</tt>", "<tt>group</tt>" and
"<tt>test</tt>" attributes are used to uniquely identify
the test within the XSTS for which this result reports the
validation outcome. Each matches the "<tt>name</tt>"
attribute of the respective element in the test suite.
</p>
<p>
This element has one optional attribute:
</p>
<ul>
<li><tt>normalizedLoad</tt> - a relative load value, intended as an indicator
of the resource requirements of an individual
test. Values may be based on processing time,
memory usage or a combination of the two.
Values should be in the vicinity of 1.0.
</li>
</ul>
<p>The element has one optional element:</p>
<ul>
<li><tt>annotation</tt> - zero or more instances of more detailed
(<tt>ts:documentation</tt>) or structured (<tt>ts:appinfo</tt>)
information or commentary regarding the individual
test result. Reporters are encouraged to use
<tt>annotation/appinfo</tt> to report more detailed outcome
information, such as error and warning messages.
</li>
</ul>
</div>
"""
class Meta:
name = "testResult"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
validity: Optional[TestOutcome] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
set: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
group: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
test: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
normalized_load: Optional[Decimal] = field(
default=None,
metadata={
"name": "normalizedLoad",
"type": "Attribute",
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class Current(StatusEntry):
"""<div>
<p>The current status of a test in the XSTS.</p>
<p>This element has two attributes, both of which are
required:</p>
<ul>
<li><tt>status</tt> - the status of the test. One of
"<tt>accepted</tt>", "<tt>stable</tt>",
"<tt>disputed-test</tt>" or "<tt>disputed-spec</tt>"
(see the XSTS website for an explanation of these values).
</li>
<li><tt>date</tt> - the date on which the test or the
metadata (including the value in the
<tt>status</tt> attribute, but also anything else
of importance) was last changed.
</li>
</ul>
</div>
"""
class Meta:
name = "current"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
@dataclass
class DocumentationReference(Ref):
"""<div>
<p> A link to documentation relevant to a test, such as a link to
the Recommendation, an erratum, an archived email discussion, etc.
</p> </div>
"""
class Meta:
name = "documentationReference"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
@dataclass
class InstanceDocument(Ref):
class Meta:
name = "instanceDocument"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
@dataclass
class Prior(StatusEntry):
"""<div>
<p>A former status of a test in the XSTS.</p>
<p>This element has two attributes, both of which are
required:</p>
<ul>
<li><tt>status</tt> - the former status of the test. One of
"<tt>accepted</tt>", "<tt>stable</tt>",
"<tt>disputed-test</tt>" or "<tt>disputed-spec</tt>"
(see the XSTS website for an explanation of these values).
</li>
<li><tt>date</tt> - the date on which the test or the
metadata (including the value in the
<tt>status</tt> attribute, but also anything else
of importance) was last changed.
</li>
</ul>
</div>
"""
class Meta:
name = "prior"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
@dataclass
class SchemaDocument(Ref):
class Meta:
name = "schemaDocument"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
@dataclass
class TestSetRef(Ref):
class Meta:
name = "testSetRef"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
@dataclass
class TestSuiteResults:
"""<div>
<p>
This is the root element of a document containing a test
result report. The report takes the form of a set of test
results returned by a processor/validator when run against
the XSTS.
</p>
<p>
It has three required attributes:
</p>
<ul>
<li><tt>suite</tt> - the name of the test suite to which
these results correspond. This should be the value of
the <tt>name</tt> attribute of the <tt>testSuite</tt>
element at the root of the test suite document
describing the tests to which these results correspond.
</li>
<li><tt>processor</tt> - some identifying information for
the processor/ validator which produced the reported
results. The value of this attribute is left to the
discretion of the reporter.
</li>
<li><tt>submitDate</tt> - the date on which these results
were submitted to the XSTS Task Force.
</li>
</ul>
<p>The element also has one optional attribute:</p>
<ul>
<li><tt>publicationPermission</tt> - the degree to which the
result reporter authorizes the W3C to disseminate the
reported results. One of "<tt>W3C members</tt>" or
"<tt>public</tt>" (see the XSTS website for an explanation
of these values). If this attribute is absent, no
permission to publish is granted.
</li>
</ul>
<p>This element has two optional elements:</p>
<ul>
<li><tt>annotation</tt> - zero or more instances of more
detailed (<tt>ts:documentation</tt>) or structured
(<tt>ts:appinfo</tt>) information or commentary
regarding the enclosed test results.
</li>
<li><tt>testResult</tt> - any number of reports of the
results of individual tests. Any results may be omitted,
particularly those for tests of features for which the
processor claims no support.
</li>
</ul>
</div>
"""
class Meta:
name = "testSuiteResults"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
test_result: List[TestResult] = field(
default_factory=list,
metadata={
"name": "testResult",
"type": "Element",
}
)
suite: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
processor: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
submit_date: Optional[XmlDate] = field(
default=None,
metadata={
"name": "submitDate",
"type": "Attribute",
"required": True,
}
)
publication_permission: Optional[TestSuiteResultsPublicationPermission] = field(
default=None,
metadata={
"name": "publicationPermission",
"type": "Attribute",
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class InstanceTest:
"""<div>
<p>
This element groups together information about an instance
document which should be validated against the schema
referenced in the enclosing <tt>testGroup</tt>.
</p>
<p>
Note: per section 5.2 "Assessing Schema-Validity" of the
Recommendation "XML Schema Part 1: Structures", validation
may be started in a variety of ways. For the purposes of
the XSTS, only the third method shall be used:
</p>
<blockquote>
<p>
The processor starts from Schema-Validity Assessment
(Element) (3.3.4) with no stipulated declaration or
definition.
</p>
</blockquote>
<p>The validation root is the outermost element in the
instance document.</p>
<p>
The <tt>instanceTest</tt> element has one required
attribute:
</p>
<ul>
<li><tt>name</tt> - the name of the instance document, which
must differ from the name of any other
<tt>schemaTest</tt> or <tt>instanceTest</tt> element
within the enclosing <tt>testGroup</tt></li>
</ul>
<p>
and one attribute which is optional, for signaling
that the test is applicable only to a particular set of
versions of XSD:
</p>
<ul>
<li>
<p><tt>version</tt> - Tests which only apply to certain
versions of XML Schema list those versions in the
<tt>version</tt> attribute.
</p>
<p>Processors supporting <em>any</em> version or feature
indicated by a keyword in the attribute should run the
test. (Or, more declaratively: the test is meaningful
to any processor which supports any of the features or
versions listed.) If no value is specified, all
processors which haven't already skipped the enclosing
test group, test set, or test suite should run the
test.
</p>
<p>
The value is a list of version tokens. See the
definition of the <a href="#type_version-info"><tt>version-info</tt></a>
type.</p>
<p class="note">Note: running instance tests with a
processor for an inapplicable version may produce an
failure owing to non-conformant constructs in the
schema document; if the processor does not detect the
problem or continues anyway, the results are certain
to be meaningless.
</p>
</li>
</ul>
<p>
One child element is required:
</p>
<ul>
<li><tt>instanceDocument</tt> - a link to a file containing
the instance document.
</li>
</ul>
<p>
Four child elements may optionally be present:
</p>
<ul>
<li><tt>annotation</tt> - zero or more instances of general
documentation</li>
<li><tt>expected</tt> - the prescribed validation outcome for
the instance document. Optional, and repeatable.
Each <tt>expected</tt> element indicates the result
on this test for a particular set of versions of the
language.
</li>
<li><tt>current</tt> - the current status of this test in
the XSTS (an indication of the test's accuracy in testing
the feature it is intended to test).
</li>
<li><tt>prior</tt> - the history of any changes in the
status of this test.
</li>
</ul>
<p>
The elements "<tt>expected</tt>" and "<tt>current</tt>" may
be absent when tests are contributed, but will always be
present for tests included in the XSTS.
</p>
<p>The <tt>current</tt> and <tt>prior</tt> elements should
be used to keep a change history of the test; see
discussion under the <a href="#elem_schemaTest"><tt>schemaTest</tt></a> element.
</p>
</div>
"""
class Meta:
name = "instanceTest"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
instance_document: Optional[InstanceDocument] = field(
default=None,
metadata={
"name": "instanceDocument",
"type": "Element",
"required": True,
}
)
expected: List[Expected] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
current: Optional[Current] = field(
default=None,
metadata={
"type": "Element",
}
)
prior: List[Prior] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
version: List[Union[KnownToken, Decimal, str]] = field(
default_factory=list,
metadata={
"type": "Attribute",
"tokens": True,
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class SchemaTest:
"""<div>
<p>
This element groups together information about the schema
for a particular test group.
</p>
<p>
It has one attribute which is required:
</p>
<ul>
<li><tt>name</tt> - the name of the schema test, which must be
unique within the enclosing <tt>testGroup</tt> (i.e. it must
differ from the name(s) of any associated <tt>instanceTest</tt>
elements).
</li>
</ul>
<p>
and one attribute which is optional, for identifying a subset
of versions and/or editions for which the test is valid:
</p>
<ul>
<li>
<p><tt>version</tt> - Tests which only apply to certain
versions of XML Schema list those versions in the
<tt>version</tt> attribute. Processors supporting
<em>any</em> version or feature indicated by a keyword
in the attribute should run the test. (Or, phrased
more declaratively: the test is meaningful to any
processor which supports any of the features or
versions listed.)
</p>
<p>If no value is specified, all processors which
haven't already skipped the enclosing test group,
test set, or test suite should run the test.
</p>
<p>
The value is a list of version tokens. See the
definition of the <a href="#type_version-info"><tt>version-info</tt></a>
type.</p>
<p>Note that the omission of a version token on a schema
test is in some sense strictly advisory: any schema
test is meaningful for any processor in any
configuration. For processor configurations not
supporting any of the features or versions named, the
expected result is that the schema is not a conforming
schema. This will <em>not</em> be indicated with an
explicit <tt>expected</tt> element.
</p>
</li>
</ul>
<p>
One child element is required:
</p>
<ul>
<li><tt>schemaDocument</tt> - at least one link to a file
containing a schema document. The schema for the test is
constructed from the set (or from other schemas via
import).
</li>
</ul>
<p>Four child elements may optionally be present:</p>
<ul>
<li><tt>annotation</tt> - zero or more instances of general
documentation</li>
<li><tt>expected</tt> - indicates the conformance or
non-conformance of the schema described by the schema
document(s)
(<tt>valid</tt> = conformant, <tt>invalid</tt> =
non-conformant).
</li>
<li><tt>current</tt> - the current status of this test in
the XSTS (an indication of the test's accuracy in testing
the feature it is intended to test).
</li>
<li><tt>prior</tt> - the history of any changes in the
status of this test.
</li>
</ul>
<p>
The elements "<tt>expected</tt>" and "<tt>current</tt>"
may be absent when tests are contributed, but will always
be present for tests included in the XSTS.
</p>
<p>
The <tt>current</tt> and <tt>prior</tt> elements were originally
designed for tracking changes of status in tests; they can and
should be used to keep a general change history of the test.
Whenever anything changes that may be of importance for users
of the test suite, it is appropriate to clone the existing
<tt>current</tt> element into a pair of similar elements, then
rename the second one <tt>prior</tt>. In the new <tt>current</tt>
element, the change made should be described in the
<tt>annotation</tt> children, and the date of the change
should be recorded.
</p>
<p>
Examples: The status of the test changes. The expected
result is questions and reaffirmed. The expected result is
changed, or multiple expected results are given for different
processor configurations.
</p>
<p>
For status changes involving bug reports, the relevant status
entries should have a Bugzilla cross-reference.
</p>
</div>
"""
class Meta:
name = "schemaTest"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
schema_document: List[SchemaDocument] = field(
default_factory=list,
metadata={
"name": "schemaDocument",
"type": "Element",
"min_occurs": 1,
}
)
expected: List[Expected] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
current: Optional[Current] = field(
default=None,
metadata={
"type": "Element",
}
)
prior: List[Prior] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
version: List[Union[KnownToken, Decimal, str]] = field(
default_factory=list,
metadata={
"type": "Attribute",
"tokens": True,
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class TestSuite:
"""<div>
<p>
The root element of a document describing a set of tests for one
or more versions of W3C XML Schema.
</p>
<p>
The element has three attributes, each of which is required:
</p>
<ul>
<li>
<p><tt>name</tt> - the name of this test suite.
</p>
</li>
<li>
<p><tt>releaseDate</tt> - the date on which this test
suite was released. This value serves to identify the
version of the test suite.
</p>
</li>
<li>
<p><tt>schemaVersion</tt> - the versions of XSD for which
the tests are designed. This has documentary function
only, and is intended for human readers. The
machine-processable version information is handled by
the <tt>version</tt> attribute.
</p>
</li>
<li>
<p><tt>version</tt> - a list of version tokens indicating
versions and features for which at least some tests in the
test suite are applicable.
</p>
<p>Any processor or processor configuration which
supports <em>any</em> of the tokens given should run
the tests. Processors which support none of the named
features can skip the entire test suite without loss.
If no <tt>version</tt> value is given, or if the value
is the empty string, all processors should run the
tests.</p>
<p>For example <code>version="1.1"</code> on a test suite
element indicates that XSD 1.1 processors will find
relevant tests, and XSD 1.0 processors will not,
while <code>version="1.0 1.1"</code>, or no
<code>version</code> attribute at all, indicates
that the test suite contains tests relevant to both.
</p>
<p>Logically, the <tt>version</tt> attribute on
the <tt>testSuite</tt> element, if given explicitly,
should include all the tokens used on any
<tt>testSet</tt>, <tt>testGroup</tt>,
<tt>schemaTest</tt>, or <tt>instanceTest</tt> in the
test suite, and no others. This is not necessarily
enforced, however, by the schema for this
vocabulary.</p>
</li>
</ul>
<p>
Two child elements may optionally be present:
</p>
<ul>
<li><tt>annotation</tt> - zero or more instances of
general documentation.</li>
<li><tt>testSetRef</tt> - a set of references to the sets
of tests which make up this test suite. No two test sets
referenced may have the same name.</li>
</ul>
</div>
"""
class Meta:
name = "testSuite"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
test_set_ref: List[TestSetRef] = field(
default_factory=list,
metadata={
"name": "testSetRef",
"type": "Element",
}
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
release_date: Optional[XmlDate] = field(
default=None,
metadata={
"name": "releaseDate",
"type": "Attribute",
"required": True,
}
)
schema_version: Optional[str] = field(
default=None,
metadata={
"name": "schemaVersion",
"type": "Attribute",
"required": True,
}
)
version: List[Union[KnownToken, Decimal, str]] = field(
default_factory=list,
metadata={
"type": "Attribute",
"tokens": True,
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class TestGroup:
"""<div>
<p>
This element groups a collection of closely related
tests. All instance tests in the group are to be
validated against the same schema; if a <tt>schemaTest</tt>
is present, it is the schema produced for that test
which should be used for the instance tests; if no
<tt>schemaTest</tt> is present, the instance tests
should be validated against a schema consisting only
of the built-in components.
</p>
<p>
The <tt>testGroup</tt> element has one attribute which is
required:
</p>
<ul>
<li><tt>name</tt> - an identifier for the <tt>testGroup</tt>
which differs from the name of any other
<tt>testGroup</tt> in the enclosing <tt>testSet</tt>.
</li>
</ul>
<p>
And one attribute which is optional:
</p>
<ul>
<li>
<p><tt>version</tt> - a list of version tokens, indicating
that the tests in the group are applicable to implementations
supporting <em>any</em> of the versions or features
or behaviors indicated. Any processor or processor
configuration which supports <em>any</em> of the features
indicated should run the tests. Processors which support
<em>none</em> of them can skip the entire test set.
See the definition of the
<a href="#type_version-info"><tt>version-info</tt></a>
type.
</p>
<p>
Logically, all keywords appearing here should also appear
in the <tt>version</tt> attribute of the enclosing
<tt>testSet</tt>, if it has one.
</p>
</li>
</ul>
<p>
Four child elements may optionally be present:
</p>
<ul>
<li>
<p><tt>annotation</tt> - zero or more instances of
general documentation.</p>
</li>
<li>
<p><tt>documentationReference</tt> - any number of
references to external documentation upon which the
test is based, e.g. links to relevant sections of the
Recommendation, to the Errata, etc.</p>
</li>
<li>
<p><tt>schemaTest</tt> - at most on <tt>schemaTest</tt>
element, containing any number of
<tt>schemaDocument</tt> elements, each of which holds
information on a single schema document.
</p>
<p>
When more than one schema document is present, a single
schema is constructed from the set (or from other
schemas via import).
</p>
<p class="note">Note: XSD's rules for schema composition
mean that the order in which schema documents are
encountered may be significant. When more than one
schema document is listed in the <tt>schemaTest</tt>
element, the test should be run as if the schema
documents given were loaded one by one, in order. For
most processors that will correspond to the result of
processing an otherwise empty schema document for an
otherwise unused namespace, containing one
<tt>xsd:import</tt> element for each schema document
listed in the <tt>schemaTest</tt>, with the location
indicated, in a processing mode that involves
following the schema-location hints in import
statements.
</p>
<p class="note">Note: the working group has made no
decision on whether the schema should be constructed
solely from the schema documents listed in the
<tt>schemaTest</tt> element, or from those schema
documents plus the transitive closure of their
references to other schema documents. Similarly, the
working group has not decided whether
<tt>schemaLocation</tt> hints in the instance tests
should be honored or not. It is therefore advisable
to draft test cases without dependencies on
<tt>schemaLocation</tt> hints and the like.
</p>
<p class="note">Note: work is pending on these issues of
schema composition. When it's complete, this part o
the test suite schema may be expected to change.
</p>
<p>
Schema documents may be omitted, for the purpose of
testing a processor's validation of an instance
containing only the built-in datatypes defined in the
Recommendation.
</p>
</li>
<li>
<p><tt>instanceTest</tt> - any number of elements, each
of which holds information on a single instance
document to be validated against the included
schema.</p>
</li>
</ul>
</div>
"""
class Meta:
name = "testGroup"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
documentation_reference: List[DocumentationReference] = field(
default_factory=list,
metadata={
"name": "documentationReference",
"type": "Element",
}
)
schema_test: Optional[SchemaTest] = field(
default=None,
metadata={
"name": "schemaTest",
"type": "Element",
}
)
instance_test: List[InstanceTest] = field(
default_factory=list,
metadata={
"name": "instanceTest",
"type": "Element",
}
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
version: List[Union[KnownToken, Decimal, str]] = field(
default_factory=list,
metadata={
"type": "Attribute",
"tokens": True,
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class TestSet:
"""<div>
<p>
The root element of a document describing a set of tests,
normally from a single contributor. A contributor may
supply any number of <tt>testSet</tt> files.
</p>
<p class="note">
Note: In order to make it possible to browse the test
suite in a browser, it is helpful if large test
collections are broken up into several <tt>testSet</tt>
documents of no more than a megabyte or so each. If
contributions have larger <tt>testSet</tt> documents, they
may be broken up into smaller ones.
</p>
<p>
The element has two attributes:
</p>
<ul>
<li>
<p><tt>contributor (required)</tt> - the name of the contributor of
this <tt>testSet</tt>. May contain any string of characters;
intended for human readers.</p>
</li>
<li>
<p><tt>name (required)</tt> - the name of this <tt>testSet</tt>,
which must be a name unique among the names of
<tt>testSet</tt> elements within the enclosing
<tt>testSuite</tt>.</p>
</li>
<li>
<p><tt>version (optional)</tt> - a list of version tokens indicating
versions and features for which at least some tests in the
test set are applicable.</p>
<p>Any processor or processor configuration which
supports <em>any</em> of the tokens given should run
the tests. Processors which support none of the named
features can skip the entire test set without loss.
If no <tt>version</tt> value is given, or if the value
is the empty string, all processors should run the
tests.</p>
<p>Logically, the tokens given in the <tt>version</tt>
attribute should all also be included in the
<tt>version</tt> attribute [if any] of any
<tt>testSuite</tt> including this test set. And
similarly the <tt>version</tt> attribute on a
<tt>testSet</tt> element should include all the tokens
used on any <tt>testGroup</tt>, <tt>schemaTest</tt>,
or <tt>instanceTest</tt> in the test set, and no
others. Otherwise processors may skip test sets they
ought to run. This logical rule is not necessarily
enforced, however, by the schema for this
vocabulary.</p>
</li>
</ul>
<p>
Two child elements may optionally be present:
</p>
<ul>
<li><tt>annotation</tt> - zero or more instances of general
documentation.
</li>
<li><tt>testGroup</tt> - a set of <tt>testGroup</tt>
elements, each of which defines a group of closely
related tests.
No two <tt>testGroup</tt> elements in the same
<tt>testSet</tt> may have the same name.
</li>
</ul>
</div>
"""
class Meta:
name = "testSet"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
test_group: List[TestGroup] = field(
default_factory=list,
metadata={
"name": "testGroup",
"type": "Element",
}
)
contributor: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
version: List[Union[KnownToken, Decimal, str]] = field(
default_factory=list,
metadata={
"type": "Attribute",
"tokens": True,
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
|
[
"dataclasses.field"
] |
[((2042, 2093), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute'}"}), "(default=None, metadata={'type': 'Attribute'})\n", (2047, 2093), False, 'from dataclasses import dataclass, field\n'), ((2178, 2266), 'dataclasses.field', 'field', ([], {'default_factory': 'dict', 'metadata': "{'type': 'Attributes', 'namespace': '##other'}"}), "(default_factory=dict, metadata={'type': 'Attributes', 'namespace':\n '##other'})\n", (2183, 2266), False, 'from dataclasses import dataclass, field\n'), ((2348, 2447), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Wildcard', 'namespace': '##any', 'mixed': True}"}), "(default_factory=list, metadata={'type': 'Wildcard', 'namespace':\n '##any', 'mixed': True})\n", (2353, 2447), False, 'from dataclasses import dataclass, field\n'), ((19396, 19447), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute'}"}), "(default=None, metadata={'type': 'Attribute'})\n", (19401, 19447), False, 'from dataclasses import dataclass, field\n'), ((19537, 19645), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'namespace': 'http://www.w3.org/XML/1998/namespace'}"}), "(default=None, metadata={'type': 'Attribute', 'namespace':\n 'http://www.w3.org/XML/1998/namespace'})\n", (19542, 19645), False, 'from dataclasses import dataclass, field\n'), ((19738, 19826), 'dataclasses.field', 'field', ([], {'default_factory': 'dict', 'metadata': "{'type': 'Attributes', 'namespace': '##other'}"}), "(default_factory=dict, metadata={'type': 'Attributes', 'namespace':\n '##other'})\n", (19743, 19826), False, 'from dataclasses import dataclass, field\n'), ((19908, 20007), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Wildcard', 'namespace': '##any', 'mixed': True}"}), "(default_factory=list, metadata={'type': 'Wildcard', 'namespace':\n '##any', 'mixed': True})\n", (19913, 20007), False, 'from dataclasses import dataclass, field\n'), ((23275, 23344), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'type': 'Attribute', 'required': True})\n", (23280, 23344), False, 'from dataclasses import dataclass, field\n'), ((23455, 23530), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Attribute', 'tokens': True}"}), "(default_factory=list, metadata={'type': 'Attribute', 'tokens': True})\n", (23460, 23530), False, 'from dataclasses import dataclass, field\n'), ((23627, 23715), 'dataclasses.field', 'field', ([], {'default_factory': 'dict', 'metadata': "{'type': 'Attributes', 'namespace': '##other'}"}), "(default_factory=dict, metadata={'type': 'Attributes', 'namespace':\n '##other'})\n", (23632, 23715), False, 'from dataclasses import dataclass, field\n'), ((24251, 24308), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Element'}"}), "(default_factory=list, metadata={'type': 'Element'})\n", (24256, 24308), False, 'from dataclasses import dataclass, field\n'), ((24395, 24452), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Element'}"}), "(default_factory=list, metadata={'type': 'Element'})\n", (24400, 24452), False, 'from dataclasses import dataclass, field\n'), ((24537, 24625), 'dataclasses.field', 'field', ([], {'default_factory': 'dict', 'metadata': "{'type': 'Attributes', 'namespace': '##other'}"}), "(default_factory=dict, metadata={'type': 'Attributes', 'namespace':\n '##other'})\n", (24542, 24625), False, 'from dataclasses import dataclass, field\n'), ((24776, 24903), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Element', 'namespace':\n 'http://www.w3.org/XML/2004/xml-schema-test-suite/'}"}), "(default_factory=list, metadata={'type': 'Element', 'namespace':\n 'http://www.w3.org/XML/2004/xml-schema-test-suite/'})\n", (24781, 24903), False, 'from dataclasses import dataclass, field\n'), ((24978, 25090), 'dataclasses.field', 'field', ([], {'default': 'TypeType.LOCATOR', 'metadata': "{'type': 'Attribute', 'namespace': 'http://www.w3.org/1999/xlink'}"}), "(default=TypeType.LOCATOR, metadata={'type': 'Attribute', 'namespace':\n 'http://www.w3.org/1999/xlink'})\n", (24983, 25090), False, 'from dataclasses import dataclass, field\n'), ((25170, 25270), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'namespace': 'http://www.w3.org/1999/xlink'}"}), "(default=None, metadata={'type': 'Attribute', 'namespace':\n 'http://www.w3.org/1999/xlink'})\n", (25175, 25270), False, 'from dataclasses import dataclass, field\n'), ((25363, 25451), 'dataclasses.field', 'field', ([], {'default_factory': 'dict', 'metadata': "{'type': 'Attributes', 'namespace': '##other'}"}), "(default_factory=dict, metadata={'type': 'Attributes', 'namespace':\n '##other'})\n", (25368, 25451), False, 'from dataclasses import dataclass, field\n'), ((25618, 25745), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Element', 'namespace':\n 'http://www.w3.org/XML/2004/xml-schema-test-suite/'}"}), "(default_factory=list, metadata={'type': 'Element', 'namespace':\n 'http://www.w3.org/XML/2004/xml-schema-test-suite/'})\n", (25623, 25745), False, 'from dataclasses import dataclass, field\n'), ((25830, 25899), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'type': 'Attribute', 'required': True})\n", (25835, 25899), False, 'from dataclasses import dataclass, field\n'), ((25987, 26056), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'type': 'Attribute', 'required': True})\n", (25992, 26056), False, 'from dataclasses import dataclass, field\n'), ((26144, 26274), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'pattern':\n 'http://www\\\\.w3\\\\.org/Bugs/Public/show_bug\\\\.cgi\\\\?id=[0-9]*'}"}), "(default=None, metadata={'type': 'Attribute', 'pattern':\n 'http://www\\\\.w3\\\\.org/Bugs/Public/show_bug\\\\.cgi\\\\?id=[0-9]*'})\n", (26149, 26274), False, 'from dataclasses import dataclass, field\n'), ((26364, 26452), 'dataclasses.field', 'field', ([], {'default_factory': 'dict', 'metadata': "{'type': 'Attributes', 'namespace': '##other'}"}), "(default_factory=dict, metadata={'type': 'Attributes', 'namespace':\n '##other'})\n", (26369, 26452), False, 'from dataclasses import dataclass, field\n'), ((28692, 28749), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Element'}"}), "(default_factory=list, metadata={'type': 'Element'})\n", (28697, 28749), False, 'from dataclasses import dataclass, field\n'), ((28833, 28902), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'type': 'Attribute', 'required': True})\n", (28838, 28902), False, 'from dataclasses import dataclass, field\n'), ((28985, 29054), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'type': 'Attribute', 'required': True})\n", (28990, 29054), False, 'from dataclasses import dataclass, field\n'), ((29139, 29208), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'type': 'Attribute', 'required': True})\n", (29144, 29208), False, 'from dataclasses import dataclass, field\n'), ((29292, 29361), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'type': 'Attribute', 'required': True})\n", (29297, 29361), False, 'from dataclasses import dataclass, field\n'), ((29460, 29537), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'name': 'normalizedLoad', 'type': 'Attribute'}"}), "(default=None, metadata={'name': 'normalizedLoad', 'type': 'Attribute'})\n", (29465, 29537), False, 'from dataclasses import dataclass, field\n'), ((29634, 29722), 'dataclasses.field', 'field', ([], {'default_factory': 'dict', 'metadata': "{'type': 'Attributes', 'namespace': '##other'}"}), "(default_factory=dict, metadata={'type': 'Attributes', 'namespace':\n '##other'})\n", (29639, 29722), False, 'from dataclasses import dataclass, field\n'), ((34218, 34275), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Element'}"}), "(default_factory=list, metadata={'type': 'Element'})\n", (34223, 34275), False, 'from dataclasses import dataclass, field\n'), ((34357, 34436), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'name': 'testResult', 'type': 'Element'}"}), "(default_factory=list, metadata={'name': 'testResult', 'type': 'Element'})\n", (34362, 34436), False, 'from dataclasses import dataclass, field\n'), ((34521, 34590), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'type': 'Attribute', 'required': True})\n", (34526, 34590), False, 'from dataclasses import dataclass, field\n'), ((34679, 34748), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'type': 'Attribute', 'required': True})\n", (34684, 34748), False, 'from dataclasses import dataclass, field\n'), ((34843, 34938), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'name': 'submitDate', 'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'name': 'submitDate', 'type': 'Attribute',\n 'required': True})\n", (34848, 34938), False, 'from dataclasses import dataclass, field\n'), ((35082, 35170), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'name': 'publicationPermission', 'type': 'Attribute'}"}), "(default=None, metadata={'name': 'publicationPermission', 'type':\n 'Attribute'})\n", (35087, 35170), False, 'from dataclasses import dataclass, field\n'), ((35263, 35351), 'dataclasses.field', 'field', ([], {'default_factory': 'dict', 'metadata': "{'type': 'Attributes', 'namespace': '##other'}"}), "(default_factory=dict, metadata={'type': 'Attributes', 'namespace':\n '##other'})\n", (35268, 35351), False, 'from dataclasses import dataclass, field\n'), ((39160, 39217), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Element'}"}), "(default_factory=list, metadata={'type': 'Element'})\n", (39165, 39217), False, 'from dataclasses import dataclass, field\n'), ((39315, 39414), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'name': 'instanceDocument', 'type': 'Element', 'required': True}"}), "(default=None, metadata={'name': 'instanceDocument', 'type': 'Element',\n 'required': True})\n", (39320, 39414), False, 'from dataclasses import dataclass, field\n'), ((39511, 39568), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Element'}"}), "(default_factory=list, metadata={'type': 'Element'})\n", (39516, 39568), False, 'from dataclasses import dataclass, field\n'), ((39647, 39696), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element'}"}), "(default=None, metadata={'type': 'Element'})\n", (39652, 39696), False, 'from dataclasses import dataclass, field\n'), ((39767, 39824), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Element'}"}), "(default_factory=list, metadata={'type': 'Element'})\n", (39772, 39824), False, 'from dataclasses import dataclass, field\n'), ((39896, 39965), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'type': 'Attribute', 'required': True})\n", (39901, 39965), False, 'from dataclasses import dataclass, field\n'), ((40076, 40151), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Attribute', 'tokens': True}"}), "(default_factory=list, metadata={'type': 'Attribute', 'tokens': True})\n", (40081, 40151), False, 'from dataclasses import dataclass, field\n'), ((40248, 40336), 'dataclasses.field', 'field', ([], {'default_factory': 'dict', 'metadata': "{'type': 'Attributes', 'namespace': '##other'}"}), "(default_factory=dict, metadata={'type': 'Attributes', 'namespace':\n '##other'})\n", (40253, 40336), False, 'from dataclasses import dataclass, field\n'), ((44490, 44547), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Element'}"}), "(default_factory=list, metadata={'type': 'Element'})\n", (44495, 44547), False, 'from dataclasses import dataclass, field\n'), ((44637, 44741), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'name': 'schemaDocument', 'type': 'Element', 'min_occurs': 1}"}), "(default_factory=list, metadata={'name': 'schemaDocument', 'type':\n 'Element', 'min_occurs': 1})\n", (44642, 44741), False, 'from dataclasses import dataclass, field\n'), ((44838, 44895), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Element'}"}), "(default_factory=list, metadata={'type': 'Element'})\n", (44843, 44895), False, 'from dataclasses import dataclass, field\n'), ((44974, 45023), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Element'}"}), "(default=None, metadata={'type': 'Element'})\n", (44979, 45023), False, 'from dataclasses import dataclass, field\n'), ((45094, 45151), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Element'}"}), "(default_factory=list, metadata={'type': 'Element'})\n", (45099, 45151), False, 'from dataclasses import dataclass, field\n'), ((45223, 45292), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'type': 'Attribute', 'required': True})\n", (45228, 45292), False, 'from dataclasses import dataclass, field\n'), ((45403, 45478), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Attribute', 'tokens': True}"}), "(default_factory=list, metadata={'type': 'Attribute', 'tokens': True})\n", (45408, 45478), False, 'from dataclasses import dataclass, field\n'), ((45575, 45663), 'dataclasses.field', 'field', ([], {'default_factory': 'dict', 'metadata': "{'type': 'Attributes', 'namespace': '##other'}"}), "(default_factory=dict, metadata={'type': 'Attributes', 'namespace':\n '##other'})\n", (45580, 45663), False, 'from dataclasses import dataclass, field\n'), ((48302, 48359), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Element'}"}), "(default_factory=list, metadata={'type': 'Element'})\n", (48307, 48359), False, 'from dataclasses import dataclass, field\n'), ((48442, 48521), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'name': 'testSetRef', 'type': 'Element'}"}), "(default_factory=list, metadata={'name': 'testSetRef', 'type': 'Element'})\n", (48447, 48521), False, 'from dataclasses import dataclass, field\n'), ((48605, 48674), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'type': 'Attribute', 'required': True})\n", (48610, 48674), False, 'from dataclasses import dataclass, field\n'), ((48770, 48866), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'name': 'releaseDate', 'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'name': 'releaseDate', 'type': 'Attribute',\n 'required': True})\n", (48775, 48866), False, 'from dataclasses import dataclass, field\n'), ((48968, 49066), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'name': 'schemaVersion', 'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'name': 'schemaVersion', 'type': 'Attribute',\n 'required': True})\n", (48973, 49066), False, 'from dataclasses import dataclass, field\n'), ((49185, 49260), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Attribute', 'tokens': True}"}), "(default_factory=list, metadata={'type': 'Attribute', 'tokens': True})\n", (49190, 49260), False, 'from dataclasses import dataclass, field\n'), ((49357, 49445), 'dataclasses.field', 'field', ([], {'default_factory': 'dict', 'metadata': "{'type': 'Attributes', 'namespace': '##other'}"}), "(default_factory=dict, metadata={'type': 'Attributes', 'namespace':\n '##other'})\n", (49362, 49445), False, 'from dataclasses import dataclass, field\n'), ((53961, 54018), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Element'}"}), "(default_factory=list, metadata={'type': 'Element'})\n", (53966, 54018), False, 'from dataclasses import dataclass, field\n'), ((54124, 54219), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'name': 'documentationReference', 'type': 'Element'}"}), "(default_factory=list, metadata={'name': 'documentationReference',\n 'type': 'Element'})\n", (54129, 54219), False, 'from dataclasses import dataclass, field\n'), ((54313, 54384), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'name': 'schemaTest', 'type': 'Element'}"}), "(default=None, metadata={'name': 'schemaTest', 'type': 'Element'})\n", (54318, 54384), False, 'from dataclasses import dataclass, field\n'), ((54482, 54567), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'name': 'instanceTest', 'type': 'Element'}"}), "(default_factory=list, metadata={'name': 'instanceTest', 'type':\n 'Element'})\n", (54487, 54567), False, 'from dataclasses import dataclass, field\n'), ((54647, 54716), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'type': 'Attribute', 'required': True})\n", (54652, 54716), False, 'from dataclasses import dataclass, field\n'), ((54827, 54902), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Attribute', 'tokens': True}"}), "(default_factory=list, metadata={'type': 'Attribute', 'tokens': True})\n", (54832, 54902), False, 'from dataclasses import dataclass, field\n'), ((54999, 55087), 'dataclasses.field', 'field', ([], {'default_factory': 'dict', 'metadata': "{'type': 'Attributes', 'namespace': '##other'}"}), "(default_factory=dict, metadata={'type': 'Attributes', 'namespace':\n '##other'})\n", (55004, 55087), False, 'from dataclasses import dataclass, field\n'), ((57947, 58004), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Element'}"}), "(default_factory=list, metadata={'type': 'Element'})\n", (57952, 58004), False, 'from dataclasses import dataclass, field\n'), ((58084, 58162), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'name': 'testGroup', 'type': 'Element'}"}), "(default_factory=list, metadata={'name': 'testGroup', 'type': 'Element'})\n", (58089, 58162), False, 'from dataclasses import dataclass, field\n'), ((58253, 58322), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'type': 'Attribute', 'required': True})\n", (58258, 58322), False, 'from dataclasses import dataclass, field\n'), ((58406, 58475), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'type': 'Attribute', 'required': True}"}), "(default=None, metadata={'type': 'Attribute', 'required': True})\n", (58411, 58475), False, 'from dataclasses import dataclass, field\n'), ((58586, 58661), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'type': 'Attribute', 'tokens': True}"}), "(default_factory=list, metadata={'type': 'Attribute', 'tokens': True})\n", (58591, 58661), False, 'from dataclasses import dataclass, field\n'), ((58758, 58846), 'dataclasses.field', 'field', ([], {'default_factory': 'dict', 'metadata': "{'type': 'Attributes', 'namespace': '##other'}"}), "(default_factory=dict, metadata={'type': 'Attributes', 'namespace':\n '##other'})\n", (58763, 58846), False, 'from dataclasses import dataclass, field\n')]
|
import sublime
# Settings variables
try:
from . import settings as S
except:
import settings as S
def load_project_values():
try:
settings = sublime.active_window().active_view().settings()
# Use 'xdebug' as key which contains dictionary with project values for package
S.CONFIG_PROJECT = settings.get(S.KEY_XDEBUG)
except:
pass
def load_package_values():
# Clear previous settings
config = {}
try:
# Load default/user package settings
settings = sublime.load_settings(S.FILE_PACKAGE_SETTINGS)
# Loop through all configuration keys
for key in S.CONFIG_KEYS:
# Set in config if available
if settings and settings.has(key):
config[key] = settings.get(key)
except:
pass
# Set settings in memory
S.CONFIG_PACKAGE = config
def get_value(key, default_value=None):
"""
Get value from package/project configuration settings.
"""
# Get value from project configuration
value = get_project_value(key)
# Use package configuration when value has not been found
if value is None:
value = get_package_value(key)
# Return package/project value
if value is not None:
return value
# Otherwise use default value
return default_value
def get_package_value(key, default_value=None):
"""
Get value from default/user package configuration settings.
"""
try:
config = sublime.load_settings(S.FILE_PACKAGE_SETTINGS)
if config and config.has(key):
return config.get(key)
except RuntimeError:
sublime.set_timeout(lambda: load_package_values(), 0)
if S.CONFIG_PACKAGE:
if key in S.CONFIG_PACKAGE:
return S.CONFIG_PACKAGE[key]
return default_value
def get_project_value(key, default_value=None):
"""
Get value from project configuration settings.
"""
# Load project coniguration settings
try:
load_project_values()
except RuntimeError:
sublime.set_timeout(lambda: load_project_values(), 0)
# Find value in project configuration
if S.CONFIG_PROJECT:
if key in S.CONFIG_PROJECT:
return S.CONFIG_PROJECT[key]
# Otherwise use default value
return default_value
def get_window_value(key, default_value=None):
"""
Get value from window session settings.
NOTE: Window object in Sublime Text 2 has no Settings.
"""
try:
settings = sublime.active_window().settings()
if settings.has(S.KEY_XDEBUG):
xdebug = settings.get(S.KEY_XDEBUG)
if isinstance(xdebug, dict) and key in xdebug.keys():
return xdebug[key]
except:
pass
return default_value
def set_package_value(key, value=None):
"""
Set value in package configuration settings.
"""
try:
config = sublime.load_settings(S.FILE_PACKAGE_SETTINGS)
if value is not None:
config.set(key, value)
elif config and config.has(key):
return config.erase(key)
except:
pass
def set_project_value(key, value=None):
"""
Set value in project configuration settings.
"""
# Unable to set project value if no project file
if not sublime.active_window().project_file_name():
return False
# Get current project data
project = sublime.active_window().project_data()
# Make sure project data is a dictionary
if not isinstance(project, dict):
project = {}
# Create settings entries if they are undefined
if S.KEY_SETTINGS not in project.keys() or not isinstance(project[S.KEY_SETTINGS], dict):
project[S.KEY_SETTINGS] = {}
if S.KEY_XDEBUG not in project[S.KEY_SETTINGS].keys() or not isinstance(project[S.KEY_SETTINGS][S.KEY_XDEBUG], dict):
project[S.KEY_SETTINGS][S.KEY_XDEBUG] = {}
# Update Xdebug settings
if value is not None:
project[S.KEY_SETTINGS][S.KEY_XDEBUG][key] = value
elif key in project[S.KEY_SETTINGS][S.KEY_XDEBUG].keys():
del project[S.KEY_SETTINGS][S.KEY_XDEBUG][key]
# Save project data
sublime.active_window().set_project_data(project)
return True
def set_window_value(key, value=None):
"""
Set value in window session settings.
NOTE: Window object in Sublime Text 2 has no Settings.
"""
try:
settings = sublime.active_window().settings()
if settings.has(S.KEY_XDEBUG):
xdebug = settings.get(S.KEY_XDEBUG)
else:
xdebug = {}
if value is not None:
xdebug[key] = value
elif key in xdebug.keys():
del xdebug[key]
settings.set(S.KEY_XDEBUG, xdebug)
except:
pass
|
[
"sublime.active_window",
"sublime.load_settings"
] |
[((528, 574), 'sublime.load_settings', 'sublime.load_settings', (['S.FILE_PACKAGE_SETTINGS'], {}), '(S.FILE_PACKAGE_SETTINGS)\n', (549, 574), False, 'import sublime\n'), ((1490, 1536), 'sublime.load_settings', 'sublime.load_settings', (['S.FILE_PACKAGE_SETTINGS'], {}), '(S.FILE_PACKAGE_SETTINGS)\n', (1511, 1536), False, 'import sublime\n'), ((2930, 2976), 'sublime.load_settings', 'sublime.load_settings', (['S.FILE_PACKAGE_SETTINGS'], {}), '(S.FILE_PACKAGE_SETTINGS)\n', (2951, 2976), False, 'import sublime\n'), ((3427, 3450), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (3448, 3450), False, 'import sublime\n'), ((4185, 4208), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (4206, 4208), False, 'import sublime\n'), ((2524, 2547), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (2545, 2547), False, 'import sublime\n'), ((3316, 3339), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (3337, 3339), False, 'import sublime\n'), ((4438, 4461), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (4459, 4461), False, 'import sublime\n'), ((164, 187), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (185, 187), False, 'import sublime\n')]
|
import argparse
import codecs
parser = argparse.ArgumentParser(description='Conveter of the Google sentence compression dataset')
parser.add_argument("-s", "--sent-file", dest="file_sent", type=str, help="path to the sentence file")
parser.add_argument("-p", "--pos-file", dest="file_pos", type=str, help="path to the pos file")
parser.add_argument("-r", "--rel-file", dest="file_rel", type=str, help="path to the relation file")
parser.add_argument("-o", "--out-file", dest="file_out", type=str, help="path to the output file")
opts = parser.parse_args()
f_sent = codecs.open(opts.file_sent,"r",encoding="utf8")
f_pos = codecs.open(opts.file_pos,"r",encoding="utf8")
f_rel = codecs.open(opts.file_rel,"r",encoding="utf8")
f_out = codecs.open(opts.file_out,"w",encoding="utf8")
with f_sent, f_pos, f_rel, f_out:
for line_sent in f_sent:
line_sent = line_sent.rstrip()
line_pos = f_pos.readline().rstrip()
line_rel = f_rel.readline().rstrip()
col_sent = line_sent.split(" ")
col_pos = line_pos.split(" ")
col_rel = line_rel.split(" ")
if len(col_sent) != len(col_pos) or len(col_sent) != len(col_rel):
print("POS, Rel and Tokens are not correctly aligned.")
assert(len(col_sent) == len(col_pos))
assert(len(col_sent) == len(col_rel))
body = "";
for i in range(len(col_sent)):
if body != "":
body += " "
body += col_sent[i] + "-|-" + col_pos[i] + "-|-" + col_rel[i]
f_out.write(body + "\n")
|
[
"codecs.open",
"argparse.ArgumentParser"
] |
[((40, 135), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Conveter of the Google sentence compression dataset"""'}), "(description=\n 'Conveter of the Google sentence compression dataset')\n", (63, 135), False, 'import argparse\n'), ((567, 616), 'codecs.open', 'codecs.open', (['opts.file_sent', '"""r"""'], {'encoding': '"""utf8"""'}), "(opts.file_sent, 'r', encoding='utf8')\n", (578, 616), False, 'import codecs\n'), ((623, 671), 'codecs.open', 'codecs.open', (['opts.file_pos', '"""r"""'], {'encoding': '"""utf8"""'}), "(opts.file_pos, 'r', encoding='utf8')\n", (634, 671), False, 'import codecs\n'), ((678, 726), 'codecs.open', 'codecs.open', (['opts.file_rel', '"""r"""'], {'encoding': '"""utf8"""'}), "(opts.file_rel, 'r', encoding='utf8')\n", (689, 726), False, 'import codecs\n'), ((733, 781), 'codecs.open', 'codecs.open', (['opts.file_out', '"""w"""'], {'encoding': '"""utf8"""'}), "(opts.file_out, 'w', encoding='utf8')\n", (744, 781), False, 'import codecs\n')]
|
from django.shortcuts import render
from accounts.models import Member
from search.models import Image, Tag, Category
import datetime
def parse_tags(tag, img):
data = tag.split(',')
for d in data:
num_records = Tag.objects.filter(tag_name__iexact=d).count()
if num_records == 0:
img.tag.create(tag_name=d)
else:
exist_tag = Tag.objects.get(tag_name__iexact=d)
img.tag.add(exist_tag)
def upload_image(request):
member = Member.objects.get(username=request.user.username) # e.g. Member instance with username 'nat'
all_cat = Category.CATEGORY
message = state = ''
d = datetime.date.today()
daily_usage = member.daily_quota
if d > member.username.last_login.date():
member.daily_quota = 4
member.save()
daily_usage = member.daily_quota
system_usage = str(3 - Image.objects.filter(photographer=member).count())
if request.method == 'POST':
img_name = request.FILES.get('img').name
tag_list = request.POST.get('tag').split(',')
cat = Category.objects.get(cat_name=request.POST.get('category'))
if len(tag_list) > 10:
state = 'F'
message = 'You cannot add more than 10 tags for an image. Please try again.'
elif img_name.endswith(('.jpg', '.jpeg')):
new_img = Image(
img=request.FILES.get('img'),
title=request.POST.get('title'),
description=request.POST.get('description'),
category=cat,
photographer=member
)
new_img.save()
# parse tags
parse_tags(request.POST.get('tag'), new_img)
# handle quota
member.daily_quota -= 1
member.save()
daily_usage = member.daily_quota
system_usage = str(3 - Image.objects.filter(photographer=member).count())
# send success signal
state = 'T'
message = 'Image uploaded'
else:
# send error signal
state = 'F'
message = 'Image is not jpg. Please upload only jpg files.'
return render(request, 'upload/upload.html', {'daily_usage': daily_usage, 'system_usage': system_usage, 'all_cat': all_cat, 'state': state, 'message': message})
|
[
"search.models.Image.objects.filter",
"datetime.date.today",
"accounts.models.Member.objects.get",
"search.models.Tag.objects.get",
"django.shortcuts.render",
"search.models.Tag.objects.filter"
] |
[((513, 563), 'accounts.models.Member.objects.get', 'Member.objects.get', ([], {'username': 'request.user.username'}), '(username=request.user.username)\n', (531, 563), False, 'from accounts.models import Member\n'), ((676, 697), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (695, 697), False, 'import datetime\n'), ((2249, 2410), 'django.shortcuts.render', 'render', (['request', '"""upload/upload.html"""', "{'daily_usage': daily_usage, 'system_usage': system_usage, 'all_cat':\n all_cat, 'state': state, 'message': message}"], {}), "(request, 'upload/upload.html', {'daily_usage': daily_usage,\n 'system_usage': system_usage, 'all_cat': all_cat, 'state': state,\n 'message': message})\n", (2255, 2410), False, 'from django.shortcuts import render\n'), ((395, 430), 'search.models.Tag.objects.get', 'Tag.objects.get', ([], {'tag_name__iexact': 'd'}), '(tag_name__iexact=d)\n', (410, 430), False, 'from search.models import Image, Tag, Category\n'), ((238, 276), 'search.models.Tag.objects.filter', 'Tag.objects.filter', ([], {'tag_name__iexact': 'd'}), '(tag_name__iexact=d)\n', (256, 276), False, 'from search.models import Image, Tag, Category\n'), ((908, 949), 'search.models.Image.objects.filter', 'Image.objects.filter', ([], {'photographer': 'member'}), '(photographer=member)\n', (928, 949), False, 'from search.models import Image, Tag, Category\n'), ((1938, 1979), 'search.models.Image.objects.filter', 'Image.objects.filter', ([], {'photographer': 'member'}), '(photographer=member)\n', (1958, 1979), False, 'from search.models import Image, Tag, Category\n')]
|
import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import time
from collections import namedtuple
import caffe
from lib import run_net
from lib import score_util
from datasets.pascal_voc import Pascal
PV = Pascal('C:\\ALISURE\\Data\\voc\\VOCdevkit\\VOC2012')
val_set = PV.get_data_set()
def show_demo():
image_name_0 = val_set[0]
im, label = PV.load_image(image_name_0), PV.load_label(image_name_0)
im_t, label_t = PV.make_translated_frames(im, label, shift=32, num_frames=6)
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['figure.figsize'] = (12, 12)
plt.figure()
for i, im in enumerate(im_t):
plt.subplot(3, len(im_t), i + 1)
plt.imshow(im)
plt.axis('off')
plt.subplot(3, len(label_t), len(im_t) + i + 1)
plt.imshow(PV.palette(label_t[i]))
plt.axis('off')
plt.subplot(3, len(label_t), 2 * len(im_t) + 2)
plt.imshow(PV.palette(label))
plt.axis('off')
plt.subplot(3, len(label_t), 2 * len(im_t) + 5)
plt.imshow(PV.make_boundaries(label, thickness=2))
plt.axis('off')
plt.show()
class_number = len(PV.classes)
num_frames = 6
thickness = 5
shifts = (16, 32)
Method = namedtuple('Method', 'method arch weights infer_func, input_offset')
fcn = Method('fcn', '../nets/voc-fcn8s.prototxt',
'../nets/voc-fcn8s-heavy.caffemodel', run_net.segrun, 2)
baseline_3stage = Method('baseline_3stage', '../nets/voc-fcn-pool3.prototxt',
'../nets/voc-fcn-pool3.caffemodel', run_net.segrun, 2)
baseline_2stage = Method('baseline_2stage', '../nets/voc-fcn-pool4.prototxt',
'../nets/voc-fcn-pool4.caffemodel', run_net.segrun, 2)
pipeline_3stage = Method('pipeline_3stage', '../nets/stage-voc-fcn8s.prototxt',
'../nets/voc-fcn8s-heavy.caffemodel', run_net.pipeline_3stage_forward, 0)
pipeline_2stage = Method('pipeline_2stage', '../nets/stage-voc-fcn8s.prototxt',
'../nets/voc-fcn8s-heavy.caffemodel', run_net.pipeline_2stage_forward, 1)
def score_translations(method, shift, arch, weights, infer, offset):
"""
Score the translated "video" of PASCAL VOC seg11valid images
taking care of the net architecture and weights, the particular inference method,
and the input offset needed to align every frame and pipeline methods.
"""
net = caffe.Net(arch, weights, caffe.TEST)
hist, hist_b = np.zeros((class_number, class_number)), np.zeros((class_number, class_number))
for index, image_name in enumerate(val_set[0: 10]):
print("{} begin {}".format(time.strftime("%H:%M:%S", time.localtime()), index))
im, label = PV.load_image(image_name), PV.load_label(image_name)
im_frames, label_frames = PV.make_translated_frames(im, label, shift=shift, num_frames=num_frames)
im_frames, label_frames = im_frames[offset:], label_frames[offset:]
# prepare pipelines: feed initial inputs then skip accordingly
if method == 'pipeline_3stage':
run_net.pipeline_fill_3stage(net, PV.pre_process(im_frames[0]), PV.pre_process(im_frames[1]))
im_frames, label_frames = im_frames[2:], label_frames[2:]
elif method == 'pipeline_2stage':
run_net.pipeline_fill_2stage(net, PV.pre_process(im_frames[0]))
im_frames, label_frames = im_frames[1:], label_frames[1:]
for im_t, label_t in zip(im_frames, label_frames):
print("{} begin {} .....".format(time.strftime("%H:%M:%S", time.localtime()), index))
out = infer(net, PV.pre_process(im_t))
Image.fromarray(out * 12).convert("L").show()
hist += score_util.score_out_gt(out, label_t, n_cl=class_number)
bdry = PV.make_boundaries(label_t, thickness=thickness)
hist_b += score_util.score_out_gt_bdry(out, label_t, bdry, n_cl=class_number)
pass
for name, h in zip(('seg', 'bdry'), (hist, hist_b)):
accP, cl_accP, mean_iuP, fw_iuP = score_util.get_scores(h)
print('{}: {}, shift {}'.format(method, name, shift))
print('acc\t\t cl acc\t\t mIU\t\t fwIU')
print('{:f}\t {:f}\t {:f}\t {:f}\t'.format(100*accP, 100*cl_accP, 100*mean_iuP, 100*fw_iuP))
for shift in shifts:
for m in (fcn, baseline_3stage, pipeline_3stage, baseline_2stage, pipeline_2stage):
score_translations(m.method, shift, m.arch, m.weights, m.infer_func, m.input_offset)
"""
fcn: seg, shift 16
acc cl acc mIU fwIU
91.974863 82.881608 70.022842 85.902034
fcn: bdry, shift 16
acc cl acc mIU fwIU
63.948030 65.065930 49.555515 53.667815
baseline_3stage: seg, shift 16
acc cl acc mIU fwIU
60.286632 13.705269 4.690409 43.286493
baseline_3stage: bdry, shift 16
acc cl acc mIU fwIU
11.166320 11.496480 1.818804 3.798124
pipeline_3stage: seg, shift 16
acc cl acc mIU fwIU
88.349069 74.970788 55.175040 79.954080
pipeline_3stage: bdry, shift 16
acc cl acc mIU fwIU
56.349989 56.221222 41.709843 46.512476
baseline_2stage: seg, shift 16
acc cl acc mIU fwIU
69.464357 36.060632 13.589065 53.310369
baseline_2stage: bdry, shift 16
acc cl acc mIU fwIU
29.001448 26.982958 8.294242 19.001877
pipeline_2stage: seg, shift 16
acc cl acc mIU fwIU
90.942986 80.925445 67.604536 84.123599
pipeline_2stage: bdry, shift 16
acc cl acc mIU fwIU
61.421430 61.866688 46.878984 51.400744
baseline_3stage: seg, shift 32
acc cl acc mIU fwIU
59.179855 13.635292 4.642013 41.671485
baseline_3stage: bdry, shift 32
acc cl acc mIU fwIU
11.052108 11.406026 1.778673 3.790397
pipeline_3stage: seg, shift 32
acc cl acc mIU fwIU
82.182183 64.902390 49.651163 70.833176
pipeline_3stage: bdry, shift 32
acc cl acc mIU fwIU
50.599466 48.977500 35.428999 41.064723
"""
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"matplotlib.pyplot.axis",
"datasets.pascal_voc.Pascal",
"matplotlib.pyplot.figure",
"lib.score_util.score_out_gt",
"collections.namedtuple",
"lib.score_util.score_out_gt_bdry",
"PIL.Image.fromarray",
"caffe.Net",
"lib.score_util.get_scores",
"time.localtime"
] |
[((243, 295), 'datasets.pascal_voc.Pascal', 'Pascal', (['"""C:\\\\ALISURE\\\\Data\\\\voc\\\\VOCdevkit\\\\VOC2012"""'], {}), "('C:\\\\ALISURE\\\\Data\\\\voc\\\\VOCdevkit\\\\VOC2012')\n", (249, 295), False, 'from datasets.pascal_voc import Pascal\n'), ((1270, 1338), 'collections.namedtuple', 'namedtuple', (['"""Method"""', '"""method arch weights infer_func, input_offset"""'], {}), "('Method', 'method arch weights infer_func, input_offset')\n", (1280, 1338), False, 'from collections import namedtuple\n'), ((671, 683), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (681, 683), True, 'import matplotlib.pyplot as plt\n'), ((1020, 1035), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1028, 1035), True, 'import matplotlib.pyplot as plt\n'), ((1147, 1162), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1155, 1162), True, 'import matplotlib.pyplot as plt\n'), ((1168, 1178), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1176, 1178), True, 'import matplotlib.pyplot as plt\n'), ((2456, 2492), 'caffe.Net', 'caffe.Net', (['arch', 'weights', 'caffe.TEST'], {}), '(arch, weights, caffe.TEST)\n', (2465, 2492), False, 'import caffe\n'), ((767, 781), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (777, 781), True, 'import matplotlib.pyplot as plt\n'), ((790, 805), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (798, 805), True, 'import matplotlib.pyplot as plt\n'), ((913, 928), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (921, 928), True, 'import matplotlib.pyplot as plt\n'), ((2512, 2550), 'numpy.zeros', 'np.zeros', (['(class_number, class_number)'], {}), '((class_number, class_number))\n', (2520, 2550), True, 'import numpy as np\n'), ((2552, 2590), 'numpy.zeros', 'np.zeros', (['(class_number, class_number)'], {}), '((class_number, class_number))\n', (2560, 2590), True, 'import numpy as np\n'), ((4084, 4108), 'lib.score_util.get_scores', 'score_util.get_scores', (['h'], {}), '(h)\n', (4105, 4108), False, 'from lib import score_util\n'), ((3756, 3812), 'lib.score_util.score_out_gt', 'score_util.score_out_gt', (['out', 'label_t'], {'n_cl': 'class_number'}), '(out, label_t, n_cl=class_number)\n', (3779, 3812), False, 'from lib import score_util\n'), ((3903, 3970), 'lib.score_util.score_out_gt_bdry', 'score_util.score_out_gt_bdry', (['out', 'label_t', 'bdry'], {'n_cl': 'class_number'}), '(out, label_t, bdry, n_cl=class_number)\n', (3931, 3970), False, 'from lib import score_util\n'), ((2708, 2724), 'time.localtime', 'time.localtime', ([], {}), '()\n', (2722, 2724), False, 'import time\n'), ((3599, 3615), 'time.localtime', 'time.localtime', ([], {}), '()\n', (3613, 3615), False, 'import time\n'), ((3689, 3714), 'PIL.Image.fromarray', 'Image.fromarray', (['(out * 12)'], {}), '(out * 12)\n', (3704, 3714), False, 'from PIL import Image\n')]
|
import os
import pytest
from itertools import combinations
from compas.datastructures import Network
from coop_assembly.help_functions import find_point_id
from coop_assembly.help_functions import find_point_id, tet_surface_area, \
tet_volume, distance_point_triangle
from coop_assembly.geometry_generation.tet_sequencing import \
compute_distance_from_grounded_node
from coop_assembly.geometry_generation.tet_sequencing import \
get_pt2tri_search_heuristic_fn, \
point2point_shortest_distance_tet_sequencing, \
point2triangle_tet_sequencing
from coop_assembly.geometry_generation.execute import execute_from_points
from coop_assembly.assembly_info_generation import calculate_gripping_plane, calculate_offset
from coop_assembly.help_functions.parsing import export_structure_data, parse_saved_structure_data
@pytest.fixture
def save_dir():
here = os.path.abspath(os.path.dirname(__file__))
return os.path.join(here, 'test_data')
@pytest.mark.gen_from_pts
# @pytest.mark.parametrize('test_set_name', [('single_cube'), ('YJ_12_bars')])
@pytest.mark.parametrize('test_set_name', [('YJ_12_bars')])
@pytest.mark.parametrize('radius', [(3.17), ])
# @pytest.mark.parametrize('pt_search_method', [('point2point'), ])
@pytest.mark.parametrize('pt_search_method', [('point2triangle'), ])
# @pytest.mark.parametrize('pt_search_method', [('point2point'), ('point2triangle')])
def test_generate_from_points(points_library, test_set_name, radius, pt_search_method, save_dir, write):
points, base_tri_pts = points_library[test_set_name]
print('\n' + '#'*10)
print('Testing generate from point for set: {}, total # of pts: {}'.format(test_set_name, len(points)))
start_tri_ids = [find_point_id(base_pt, points) for base_pt in base_tri_pts]
assert len(start_tri_ids) == 3, 'start triangle should only have three points!'
print('base triangle ids: {}'.format(start_tri_ids))
if pt_search_method == 'point2point':
cost_from_node = {}
all_pt_ids = list(range(len(points)))
elements = list(combinations(all_pt_ids, 2))
cost_from_node = compute_distance_from_grounded_node(elements, points, start_tri_ids)
tet_node_ids = point2point_shortest_distance_tet_sequencing(points, cost_from_node)
elif pt_search_method == 'point2triangle':
ordering_heuristic = 'tet_surface_area'
penalty_cost = 2.0
print('pt search strategy: {} | heuristic: {} | penalty cost: {}'.format(pt_search_method, ordering_heuristic, penalty_cost))
heuristic_fn = get_pt2tri_search_heuristic_fn(points, penalty_cost, ordering_heuristic)
tet_node_ids = point2triangle_tet_sequencing(points, start_tri_ids)
else:
raise NotImplementedError('search method not implemented!')
b_struct_data, o_struct_data = execute_from_points(points, tet_node_ids, radius, correct=True, check_collision=True)
if write:
export_structure_data(save_dir, b_struct_data, o_struct_data, file_name=test_set_name+'_'+pt_search_method+'.json')
@pytest.mark.gen_grasp_planes
@pytest.mark.parametrize('test_file_name', [('YJ_12_bars_point2triangle.json'),])
def test_gen_grasp_planes(points_library, test_file_name, save_dir):
b_struct_data, o_struct_data, _ = parse_saved_structure_data(os.path.join(save_dir, test_file_name))
o_struct = Network.from_data(o_struct_data)
b_struct = Network.from_data(b_struct_data)
o_struct.struct_bar = b_struct
offset_d1, offset_d2 = 5, 5
nb_rot, nb_trans = 4, 4
seq = [v for v in b_struct.vertex]
for v in b_struct.vertex:
calculate_gripping_plane(b_struct, v, b_struct.vertex[v]["mean_point"], nb_rot=nb_rot, nb_trans=nb_trans)
calculate_offset(o_struct, b_struct, v, offset_d1, offset_d2, seq)
|
[
"coop_assembly.geometry_generation.execute.execute_from_points",
"compas.datastructures.Network.from_data",
"coop_assembly.geometry_generation.tet_sequencing.point2point_shortest_distance_tet_sequencing",
"os.path.dirname",
"coop_assembly.geometry_generation.tet_sequencing.point2triangle_tet_sequencing",
"itertools.combinations",
"coop_assembly.geometry_generation.tet_sequencing.get_pt2tri_search_heuristic_fn",
"coop_assembly.assembly_info_generation.calculate_gripping_plane",
"coop_assembly.assembly_info_generation.calculate_offset",
"coop_assembly.geometry_generation.tet_sequencing.compute_distance_from_grounded_node",
"pytest.mark.parametrize",
"coop_assembly.help_functions.parsing.export_structure_data",
"os.path.join",
"coop_assembly.help_functions.find_point_id"
] |
[((1068, 1124), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_set_name"""', "['YJ_12_bars']"], {}), "('test_set_name', ['YJ_12_bars'])\n", (1091, 1124), False, 'import pytest\n'), ((1128, 1169), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""radius"""', '[3.17]'], {}), "('radius', [3.17])\n", (1151, 1169), False, 'import pytest\n'), ((1243, 1306), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pt_search_method"""', "['point2triangle']"], {}), "('pt_search_method', ['point2triangle'])\n", (1266, 1306), False, 'import pytest\n'), ((3069, 3146), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_file_name"""', "['YJ_12_bars_point2triangle.json']"], {}), "('test_file_name', ['YJ_12_bars_point2triangle.json'])\n", (3092, 3146), False, 'import pytest\n'), ((929, 960), 'os.path.join', 'os.path.join', (['here', '"""test_data"""'], {}), "(here, 'test_data')\n", (941, 960), False, 'import os\n'), ((2813, 2902), 'coop_assembly.geometry_generation.execute.execute_from_points', 'execute_from_points', (['points', 'tet_node_ids', 'radius'], {'correct': '(True)', 'check_collision': '(True)'}), '(points, tet_node_ids, radius, correct=True,\n check_collision=True)\n', (2832, 2902), False, 'from coop_assembly.geometry_generation.execute import execute_from_points\n'), ((3340, 3372), 'compas.datastructures.Network.from_data', 'Network.from_data', (['o_struct_data'], {}), '(o_struct_data)\n', (3357, 3372), False, 'from compas.datastructures import Network\n'), ((3388, 3420), 'compas.datastructures.Network.from_data', 'Network.from_data', (['b_struct_data'], {}), '(b_struct_data)\n', (3405, 3420), False, 'from compas.datastructures import Network\n'), ((891, 916), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (906, 916), False, 'import os\n'), ((1714, 1744), 'coop_assembly.help_functions.find_point_id', 'find_point_id', (['base_pt', 'points'], {}), '(base_pt, points)\n', (1727, 1744), False, 'from coop_assembly.help_functions import find_point_id, tet_surface_area, tet_volume, distance_point_triangle\n'), ((2110, 2178), 'coop_assembly.geometry_generation.tet_sequencing.compute_distance_from_grounded_node', 'compute_distance_from_grounded_node', (['elements', 'points', 'start_tri_ids'], {}), '(elements, points, start_tri_ids)\n', (2145, 2178), False, 'from coop_assembly.geometry_generation.tet_sequencing import compute_distance_from_grounded_node\n'), ((2202, 2270), 'coop_assembly.geometry_generation.tet_sequencing.point2point_shortest_distance_tet_sequencing', 'point2point_shortest_distance_tet_sequencing', (['points', 'cost_from_node'], {}), '(points, cost_from_node)\n', (2246, 2270), False, 'from coop_assembly.geometry_generation.tet_sequencing import get_pt2tri_search_heuristic_fn, point2point_shortest_distance_tet_sequencing, point2triangle_tet_sequencing\n'), ((2921, 3047), 'coop_assembly.help_functions.parsing.export_structure_data', 'export_structure_data', (['save_dir', 'b_struct_data', 'o_struct_data'], {'file_name': "(test_set_name + '_' + pt_search_method + '.json')"}), "(save_dir, b_struct_data, o_struct_data, file_name=\n test_set_name + '_' + pt_search_method + '.json')\n", (2942, 3047), False, 'from coop_assembly.help_functions.parsing import export_structure_data, parse_saved_structure_data\n'), ((3284, 3322), 'os.path.join', 'os.path.join', (['save_dir', 'test_file_name'], {}), '(save_dir, test_file_name)\n', (3296, 3322), False, 'import os\n'), ((3595, 3704), 'coop_assembly.assembly_info_generation.calculate_gripping_plane', 'calculate_gripping_plane', (['b_struct', 'v', "b_struct.vertex[v]['mean_point']"], {'nb_rot': 'nb_rot', 'nb_trans': 'nb_trans'}), "(b_struct, v, b_struct.vertex[v]['mean_point'],\n nb_rot=nb_rot, nb_trans=nb_trans)\n", (3619, 3704), False, 'from coop_assembly.assembly_info_generation import calculate_gripping_plane, calculate_offset\n'), ((3709, 3775), 'coop_assembly.assembly_info_generation.calculate_offset', 'calculate_offset', (['o_struct', 'b_struct', 'v', 'offset_d1', 'offset_d2', 'seq'], {}), '(o_struct, b_struct, v, offset_d1, offset_d2, seq)\n', (3725, 3775), False, 'from coop_assembly.assembly_info_generation import calculate_gripping_plane, calculate_offset\n'), ((2056, 2083), 'itertools.combinations', 'combinations', (['all_pt_ids', '(2)'], {}), '(all_pt_ids, 2)\n', (2068, 2083), False, 'from itertools import combinations\n'), ((2550, 2622), 'coop_assembly.geometry_generation.tet_sequencing.get_pt2tri_search_heuristic_fn', 'get_pt2tri_search_heuristic_fn', (['points', 'penalty_cost', 'ordering_heuristic'], {}), '(points, penalty_cost, ordering_heuristic)\n', (2580, 2622), False, 'from coop_assembly.geometry_generation.tet_sequencing import get_pt2tri_search_heuristic_fn, point2point_shortest_distance_tet_sequencing, point2triangle_tet_sequencing\n'), ((2646, 2698), 'coop_assembly.geometry_generation.tet_sequencing.point2triangle_tet_sequencing', 'point2triangle_tet_sequencing', (['points', 'start_tri_ids'], {}), '(points, start_tri_ids)\n', (2675, 2698), False, 'from coop_assembly.geometry_generation.tet_sequencing import get_pt2tri_search_heuristic_fn, point2point_shortest_distance_tet_sequencing, point2triangle_tet_sequencing\n')]
|
""" This function saves figures in PNG format.
from etutils.viz.savefig import savefig
A=savefig(data, <optional>)
INPUT:
data: fig object
OPTIONAL
OUTPUT
BOOLEAN
[0]: If not succesful
[1]: If succesful
DESCRIPTION
This function saves figures in PNG format.
EXAMPLE
from etutils.viz.donutchart import donutchart
from etutils.viz.savefig import savefig
A = donutchart([15, 30, 45, 10],['aap','boom','mies','banaan'])
B = savefig(A,"c://temp//magweg//fig.png")
SEE ALSO
"""
#print(__doc__)
#--------------------------------------------------------------------------
# Name : savefig.py
# Version : 0.1.0
# Author : E.Taskesen
# Date : Sep. 2017
#--------------------------------------------------------------------------
# Libraries
from os import mkdir
from os import path
#%%
def savefig(fig, filepath, dpi=100, transp=False):
out=False # Returns True if succesful
Param = {}
Param['filepath'] = filepath
Param['dpi'] = dpi
Param['transp'] = transp
# Write figure to path
if Param['filepath']!="":
# Check dir
[getpath, getfilename] = path.split(Param['filepath'])
if path.exists(getpath)==False:
mkdir(getpath)
#save file
#print(fig.canvas.get_supported_filetypes())
fig.savefig(Param['filepath'], dpi=Param['dpi'], transparent=Param['transp'], bbox_inches='tight')
out=True
return(out)
|
[
"os.mkdir",
"os.path.split",
"os.path.exists"
] |
[((1188, 1217), 'os.path.split', 'path.split', (["Param['filepath']"], {}), "(Param['filepath'])\n", (1198, 1217), False, 'from os import path\n'), ((1229, 1249), 'os.path.exists', 'path.exists', (['getpath'], {}), '(getpath)\n', (1240, 1249), False, 'from os import path\n'), ((1270, 1284), 'os.mkdir', 'mkdir', (['getpath'], {}), '(getpath)\n', (1275, 1284), False, 'from os import mkdir\n')]
|
from app.models import db
from app.models.projects import Project
class Team(db.Model):
__tablename__ = 'teams'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
logo = db.Column(db.String)
tokens = db.Column(db.Integer)
description = db.Column(db.String)
id_assigned_project = db.Column(db.Integer, db.ForeignKey(Project.id, name="fk_assigned_project_id"))
project = db.relationship('Project', lazy='joined')
def __repr__(self):
return "<Team (id='{}, name='{}', logo='{}', tokens='{}', description='{}', id_assigned_project='{}')>".format(
self.id, self.name, self.logo, self.tokens, self.description, self.id_assigned_project)
|
[
"app.models.db.Column",
"app.models.db.ForeignKey",
"app.models.db.relationship"
] |
[((128, 167), 'app.models.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (137, 167), False, 'from app.models import db\n'), ((179, 199), 'app.models.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (188, 199), False, 'from app.models import db\n'), ((211, 231), 'app.models.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (220, 231), False, 'from app.models import db\n'), ((245, 266), 'app.models.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (254, 266), False, 'from app.models import db\n'), ((285, 305), 'app.models.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (294, 305), False, 'from app.models import db\n'), ((427, 468), 'app.models.db.relationship', 'db.relationship', (['"""Project"""'], {'lazy': '"""joined"""'}), "('Project', lazy='joined')\n", (442, 468), False, 'from app.models import db\n'), ((355, 411), 'app.models.db.ForeignKey', 'db.ForeignKey', (['Project.id'], {'name': '"""fk_assigned_project_id"""'}), "(Project.id, name='fk_assigned_project_id')\n", (368, 411), False, 'from app.models import db\n')]
|
#!/usr/bin/env python3
import os
from fetch import fetch
def main():
ip_list = fetch()
print('# PKU free ip')
for ip, _, netmask in ip_list:
print('route', ip, netmask, 'net_gateway')
if __name__ == '__main__':
main()
|
[
"fetch.fetch"
] |
[((87, 94), 'fetch.fetch', 'fetch', ([], {}), '()\n', (92, 94), False, 'from fetch import fetch\n')]
|
from django.urls import path
from core import views
app_name = 'core'
urlpatterns = [
path('', views.index, name='index'),
path('noticias/', views.noticiaListView.as_view(), name='noticiaListView'),
path('noticias/new/', views.noticiaCadastro, name='noticiaCadastro'),
path('noticias/<int:id>/', views.noticiaDetalhe, name='noticiaDetalhe'),
path('noticias/<int:id>/edit/', views.noticiaEdit, name='noticiaEdit'),
path('noticias/<int:id>/delete/', views.noticiaExcluirBotao, name='noticiaExcluirBotao'),
path('noticias/<int:id>/delete/confirm/', views.noticiaExcluir, name='noticiaExcluir'),
path('comentarios/<int:id>/new/', views.comentarioCadastro, name='comentarioCadastro'),
]
|
[
"core.views.noticiaListView.as_view",
"django.urls.path"
] |
[((88, 123), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (92, 123), False, 'from django.urls import path\n'), ((204, 272), 'django.urls.path', 'path', (['"""noticias/new/"""', 'views.noticiaCadastro'], {'name': '"""noticiaCadastro"""'}), "('noticias/new/', views.noticiaCadastro, name='noticiaCadastro')\n", (208, 272), False, 'from django.urls import path\n'), ((275, 346), 'django.urls.path', 'path', (['"""noticias/<int:id>/"""', 'views.noticiaDetalhe'], {'name': '"""noticiaDetalhe"""'}), "('noticias/<int:id>/', views.noticiaDetalhe, name='noticiaDetalhe')\n", (279, 346), False, 'from django.urls import path\n'), ((349, 419), 'django.urls.path', 'path', (['"""noticias/<int:id>/edit/"""', 'views.noticiaEdit'], {'name': '"""noticiaEdit"""'}), "('noticias/<int:id>/edit/', views.noticiaEdit, name='noticiaEdit')\n", (353, 419), False, 'from django.urls import path\n'), ((422, 515), 'django.urls.path', 'path', (['"""noticias/<int:id>/delete/"""', 'views.noticiaExcluirBotao'], {'name': '"""noticiaExcluirBotao"""'}), "('noticias/<int:id>/delete/', views.noticiaExcluirBotao, name=\n 'noticiaExcluirBotao')\n", (426, 515), False, 'from django.urls import path\n'), ((513, 604), 'django.urls.path', 'path', (['"""noticias/<int:id>/delete/confirm/"""', 'views.noticiaExcluir'], {'name': '"""noticiaExcluir"""'}), "('noticias/<int:id>/delete/confirm/', views.noticiaExcluir, name=\n 'noticiaExcluir')\n", (517, 604), False, 'from django.urls import path\n'), ((603, 694), 'django.urls.path', 'path', (['"""comentarios/<int:id>/new/"""', 'views.comentarioCadastro'], {'name': '"""comentarioCadastro"""'}), "('comentarios/<int:id>/new/', views.comentarioCadastro, name=\n 'comentarioCadastro')\n", (607, 694), False, 'from django.urls import path\n'), ((145, 176), 'core.views.noticiaListView.as_view', 'views.noticiaListView.as_view', ([], {}), '()\n', (174, 176), False, 'from core import views\n')]
|
import os.path as path
import sys
import numpy as np
thisdir = path.dirname(path.realpath(__file__))
sys.path.append(path.join(thisdir, '..'))
import dataset
import model
from graph_server import GraphServer
name = sys.argv[1]
build_dir = path.join(thisdir, '..', 'outputs', 'builds')
clusters = model.load(path.join(build_dir, '%s.cluster.npz' % name))
distance = model.load(path.join(build_dir, '%s.distance.hd5' % name))
connectivity = model.load(path.join(build_dir, 'connectivity.hd5'))
nodes = dataset.news.fetch(100000)
server = GraphServer(clusters, distance, connectivity, nodes, verbose=True)
server.listen()
# test
# print(server._groups_from_title('Denmark'))
# print(server._fetch_single_group(300))
|
[
"graph_server.GraphServer",
"os.path.realpath",
"os.path.join",
"dataset.news.fetch"
] |
[((244, 289), 'os.path.join', 'path.join', (['thisdir', '""".."""', '"""outputs"""', '"""builds"""'], {}), "(thisdir, '..', 'outputs', 'builds')\n", (253, 289), True, 'import os.path as path\n'), ((506, 532), 'dataset.news.fetch', 'dataset.news.fetch', (['(100000)'], {}), '(100000)\n', (524, 532), False, 'import dataset\n'), ((543, 609), 'graph_server.GraphServer', 'GraphServer', (['clusters', 'distance', 'connectivity', 'nodes'], {'verbose': '(True)'}), '(clusters, distance, connectivity, nodes, verbose=True)\n', (554, 609), False, 'from graph_server import GraphServer\n'), ((78, 101), 'os.path.realpath', 'path.realpath', (['__file__'], {}), '(__file__)\n', (91, 101), True, 'import os.path as path\n'), ((119, 143), 'os.path.join', 'path.join', (['thisdir', '""".."""'], {}), "(thisdir, '..')\n", (128, 143), True, 'import os.path as path\n'), ((313, 358), 'os.path.join', 'path.join', (['build_dir', "('%s.cluster.npz' % name)"], {}), "(build_dir, '%s.cluster.npz' % name)\n", (322, 358), True, 'import os.path as path\n'), ((382, 428), 'os.path.join', 'path.join', (['build_dir', "('%s.distance.hd5' % name)"], {}), "(build_dir, '%s.distance.hd5' % name)\n", (391, 428), True, 'import os.path as path\n'), ((456, 496), 'os.path.join', 'path.join', (['build_dir', '"""connectivity.hd5"""'], {}), "(build_dir, 'connectivity.hd5')\n", (465, 496), True, 'import os.path as path\n')]
|
#!/usr/bin/env python3
from pygmy.core.initialize import initialize
initialize()
from pygmy.rest.manage import app
if __name__ == '__main__':
app.run()
|
[
"pygmy.rest.manage.app.run",
"pygmy.core.initialize.initialize"
] |
[((68, 80), 'pygmy.core.initialize.initialize', 'initialize', ([], {}), '()\n', (78, 80), False, 'from pygmy.core.initialize import initialize\n'), ((147, 156), 'pygmy.rest.manage.app.run', 'app.run', ([], {}), '()\n', (154, 156), False, 'from pygmy.rest.manage import app\n')]
|
from django.conf import settings
from django.http import HttpResponse
from rest_framework import generics, status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from datachimp.models.machinelearning_model import MachineLearningModel
from datachimp.models.membership import Membership
from datachimp.serializers.machinelearning_model import MachineLearningModelSerializer
from datachimp.utils.data_utils import execute_query
from datachimp.api_permissions import HasProjectMembership
from rest_framework.permissions import IsAuthenticated
class MLModelAPI(generics.ListAPIView):
serializer_class = MachineLearningModelSerializer
queryset = MachineLearningModel.objects.select_related('user__profile').all()
permission_classes = (IsAuthenticated, HasProjectMembership)
def list(self, request,model_id=None, project_id=None, st=None):
if model_id:
queryset = self.get_queryset().filter(id=model_id, project=project_id).order_by("-date_created")
else:
queryset = self.get_queryset().filter(project=project_id).order_by("-date_created")
serializer = MachineLearningModelSerializer(queryset, many=True)
if st is None:
st = status.HTTP_200_OK
return Response(serializer.data, status=st)
def delete(self, request, project_id):
mid = request.data.get('model_id')
user = request.user
ml_model_obj = MachineLearningModel.objects.get(pk=mid)
# Set an owner flag based on project_owner or model owner
owner_flag = True if user == ml_model_obj.user or user == ml_model_obj.project.user else False
if not owner_flag:
return Response(status=status.HTTP_401_UNAUTHORIZED)
ml_model_obj.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class CreateExperimentAPI(generics.CreateAPIView):
serializer_class = MachineLearningModelSerializer
queryset = MachineLearningModel.objects.all()
permission_classes = (IsAuthenticated, HasProjectMembership)
def create(self, request, *args, **kwargs):
data = request.data.copy()
data['user'] = self.request.user.id
experiment_id = self.request.data.get('experiment_id')
# If the experiment already exists then don't create the experiment
try:
exp_obj = MachineLearningModel.objects.get(experiment_id = experiment_id)
return Response({ 'model_id': exp_obj.id }, status=status.HTTP_200_OK)
except MachineLearningModel.DoesNotExist:
pass
serializer = self.get_serializer(data=data)
serializer.is_valid()
exp_obj = serializer.save()
headers = self.get_success_headers(serializer.data)
return Response({ 'model_id': exp_obj.id }, status=status.HTTP_201_CREATED)
@api_view(['GET'])
@permission_classes((HasProjectMembership, IsAuthenticated))
def get_param_fields(request, project_id):
'''
List of model parameters as columnas to be shown on customize menu
'''
try:
project_id = int(project_id)
except Exception as e:
return Response("Error: %s" % e, status=status.HTTP_400_BAD_REQUEST)
# Check the user has permission for the project
try:
Membership.objects.get(user=request.user, project=project_id)
except Membership.DoesNotExist:
return Response(status=status.HTTP_403_FORBIDDEN)
query = '''
select distinct json_object_keys(model_parameters::json) as parameter
from datachimp_machinelearningmodel ml
where json_typeof(model_parameters::json) = 'object'
and project_id = %s
'''
query = query % (
project_id,
)
result_raw = execute_query(query)
return Response(result_raw, status=status.HTTP_200_OK)
@api_view(['POST'])
@permission_classes((HasProjectMembership, IsAuthenticated))
def send_selected_param_data(request, project_id):
'''
Send the data of the model parameters to be displayed in the table
'''
try:
param_fields = request.data.getlist('param_fields[]')
except Exception as e:
return Response("Error: %s" % e, status=status.HTTP_400_BAD_REQUEST)
query = '''
select distinct id,key,value
from datachimp_machinelearningmodel ml, json_each_text(model_parameters::json)
where json_typeof(model_parameters::json) = 'object'
and project_id = %s
and key in (%s)
'''
query = query % (
project_id,
",".join([ "'" + param + "'" for param in param_fields])
)
result_raw = execute_query(query)
return Response(result_raw, status=status.HTTP_200_OK)
|
[
"datachimp.models.machinelearning_model.MachineLearningModel.objects.get",
"datachimp.models.machinelearning_model.MachineLearningModel.objects.all",
"datachimp.models.machinelearning_model.MachineLearningModel.objects.select_related",
"datachimp.utils.data_utils.execute_query",
"datachimp.serializers.machinelearning_model.MachineLearningModelSerializer",
"datachimp.models.membership.Membership.objects.get",
"rest_framework.response.Response",
"rest_framework.decorators.permission_classes",
"rest_framework.decorators.api_view"
] |
[((2651, 2668), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (2659, 2668), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((2670, 2729), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(HasProjectMembership, IsAuthenticated)'], {}), '((HasProjectMembership, IsAuthenticated))\n', (2688, 2729), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((3541, 3559), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (3549, 3559), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((3561, 3620), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(HasProjectMembership, IsAuthenticated)'], {}), '((HasProjectMembership, IsAuthenticated))\n', (3579, 3620), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((1864, 1898), 'datachimp.models.machinelearning_model.MachineLearningModel.objects.all', 'MachineLearningModel.objects.all', ([], {}), '()\n', (1896, 1898), False, 'from datachimp.models.machinelearning_model import MachineLearningModel\n'), ((3460, 3480), 'datachimp.utils.data_utils.execute_query', 'execute_query', (['query'], {}), '(query)\n', (3473, 3480), False, 'from datachimp.utils.data_utils import execute_query\n'), ((3490, 3537), 'rest_framework.response.Response', 'Response', (['result_raw'], {'status': 'status.HTTP_200_OK'}), '(result_raw, status=status.HTTP_200_OK)\n', (3498, 3537), False, 'from rest_framework.response import Response\n'), ((4246, 4266), 'datachimp.utils.data_utils.execute_query', 'execute_query', (['query'], {}), '(query)\n', (4259, 4266), False, 'from datachimp.utils.data_utils import execute_query\n'), ((4276, 4323), 'rest_framework.response.Response', 'Response', (['result_raw'], {'status': 'status.HTTP_200_OK'}), '(result_raw, status=status.HTTP_200_OK)\n', (4284, 4323), False, 'from rest_framework.response import Response\n'), ((1133, 1184), 'datachimp.serializers.machinelearning_model.MachineLearningModelSerializer', 'MachineLearningModelSerializer', (['queryset'], {'many': '(True)'}), '(queryset, many=True)\n', (1163, 1184), False, 'from datachimp.serializers.machinelearning_model import MachineLearningModelSerializer\n'), ((1239, 1275), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'st'}), '(serializer.data, status=st)\n', (1247, 1275), False, 'from rest_framework.response import Response\n'), ((1393, 1433), 'datachimp.models.machinelearning_model.MachineLearningModel.objects.get', 'MachineLearningModel.objects.get', ([], {'pk': 'mid'}), '(pk=mid)\n', (1425, 1433), False, 'from datachimp.models.machinelearning_model import MachineLearningModel\n'), ((1704, 1747), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_204_NO_CONTENT'}), '(status=status.HTTP_204_NO_CONTENT)\n', (1712, 1747), False, 'from rest_framework.response import Response\n'), ((2579, 2645), 'rest_framework.response.Response', 'Response', (["{'model_id': exp_obj.id}"], {'status': 'status.HTTP_201_CREATED'}), "({'model_id': exp_obj.id}, status=status.HTTP_201_CREATED)\n", (2587, 2645), False, 'from rest_framework.response import Response\n'), ((3051, 3112), 'datachimp.models.membership.Membership.objects.get', 'Membership.objects.get', ([], {'user': 'request.user', 'project': 'project_id'}), '(user=request.user, project=project_id)\n', (3073, 3112), False, 'from datachimp.models.membership import Membership\n'), ((711, 771), 'datachimp.models.machinelearning_model.MachineLearningModel.objects.select_related', 'MachineLearningModel.objects.select_related', (['"""user__profile"""'], {}), "('user__profile')\n", (754, 771), False, 'from datachimp.models.machinelearning_model import MachineLearningModel\n'), ((1623, 1668), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_401_UNAUTHORIZED'}), '(status=status.HTTP_401_UNAUTHORIZED)\n', (1631, 1668), False, 'from rest_framework.response import Response\n'), ((2223, 2284), 'datachimp.models.machinelearning_model.MachineLearningModel.objects.get', 'MachineLearningModel.objects.get', ([], {'experiment_id': 'experiment_id'}), '(experiment_id=experiment_id)\n', (2255, 2284), False, 'from datachimp.models.machinelearning_model import MachineLearningModel\n'), ((2298, 2359), 'rest_framework.response.Response', 'Response', (["{'model_id': exp_obj.id}"], {'status': 'status.HTTP_200_OK'}), "({'model_id': exp_obj.id}, status=status.HTTP_200_OK)\n", (2306, 2359), False, 'from rest_framework.response import Response\n'), ((2928, 2989), 'rest_framework.response.Response', 'Response', (["('Error: %s' % e)"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Error: %s' % e, status=status.HTTP_400_BAD_REQUEST)\n", (2936, 2989), False, 'from rest_framework.response import Response\n'), ((3158, 3200), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_403_FORBIDDEN'}), '(status=status.HTTP_403_FORBIDDEN)\n', (3166, 3200), False, 'from rest_framework.response import Response\n'), ((3851, 3912), 'rest_framework.response.Response', 'Response', (["('Error: %s' % e)"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Error: %s' % e, status=status.HTTP_400_BAD_REQUEST)\n", (3859, 3912), False, 'from rest_framework.response import Response\n')]
|
"""camera_fusion CameraCorrected class tests."""
import cv2
import os
import sys
import filecmp
import pytest
import numpy as np
import shutil
import time
import unittest.mock as mock
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import camera_fusion # noqa
class Vc(object):
"""VideoCapture mockup."""
def __init__(self, parent, real_captured_frame=None):
"""Initialize VideoCapture mockup.
Args:
parent(Camera object): parent's Camera.
"""
self.parent = parent
self.real_captured_frame = real_captured_frame
def get(self, setting):
"""Mock VideoCapture's get function only to get width and height."""
if setting == 3:
return 1280
if setting == 4:
return 720
return setting
def isOpened(self):
"""Mock VideoCapture's isOpened function."""
return True
def read(self):
"""Mock VideoCapture's read function."""
time.sleep(0.33)
self.parent.stop = True
print('1 frame')
return (True, self.real_captured_frame)
def set(self, setting0, setting1):
print(setting0, setting1)
# Import tests
def test_import_CameraCorrected():
"""Test CameraCorrected class importation."""
assert camera_fusion.CameraCorrected.__module__ == 'camera_fusion.CameraCorrected' # noqa
# PostureBuffer tests
def test_PostureBuffer():
"""Test PostureBuffer class definition."""
c = camera_fusion.CameraCorrected(0, 11)
assert c.board_post.window_length == 4
def test_PostureBuffer_pop():
"""Test PostureBuffer buffer abilities."""
rvec = np.array([[1], [0], [0]])
tvec = np.array([[1], [0], [0]])
c = camera_fusion.CameraCorrected(0, 11)
frvec, ftvec = c.board_post.update(rvec, tvec)
b_rvecs_shape = c.board_post.buff_rvecs.shape
b_tvecs_shape = c.board_post.buff_tvecs.shape
assert (frvec.shape, ftvec.shape, b_rvecs_shape, b_tvecs_shape) == (
(3,), (3,), (3, 1), (3, 1))
def test_PostureBuffer_filter():
"""Test PostureBuffer filtering."""
rvec = np.array([[0.1], [0.2], [0]])
tvec = np.array([[0.2], [0.1], [0]])
c = camera_fusion.CameraCorrected(0, 0)
frvec, ftvec = c.board_post.update(rvec, tvec)
frvec, ftvec = c.board_post.update(rvec * 0.1, tvec * 0.1)
frvec, ftvec = c.board_post.update(rvec, tvec)
# This should trigger the filter default avg_max_std=0.1 maximal limit
frvec, ftvec = c.board_post.update(rvec * 2, tvec * 2)
frvec, ftvec = c.board_post.update(rvec * 3, tvec * 3)
np.testing.assert_allclose([[0.3], [0.6], [0.0]], frvec)
np.testing.assert_allclose([[0.6], [0.3], [0.]], ftvec)
# Camera tests
def test_calibrate_camera_correction():
"""Test calibrate_camera_correction function."""
c = camera_fusion.CameraCorrected(0, 11)
assert os.path.isdir('./data')
shutil.rmtree('data')
shutil.copytree('./tests/test_CameraCorrected', 'data')
c.calibrate_camera_correction()
assert c.aruco_dict_num == 11
assert c.charuco_square_length == 3.7999999999999999e-02
assert c.charuco_marker_size == 2.9000000000000001e-02
assert c.width == 1280
assert c.height == 720
np.testing.assert_allclose(
[[1.0824122780443031e+03, 0., 6.4165850036653376e+02],
[0., 1.0824122780443031e+03, 3.5960861017399100e+02],
[0., 0., 1.]],
c.camera_matrix)
np.testing.assert_allclose(
[[7.6732549196567842e-02, -4.1976860824194072e-02, 0., 0.,
-1.8028155099783838e-01]], c.dist_coeffs)
shutil.rmtree('data')
def test_detect_markers():
"""Test the detect_markers function."""
c = camera_fusion.CameraCorrected(0, 11)
shutil.rmtree('data')
shutil.copytree('./tests/test_CameraCorrected', 'data')
c.calibrate_camera_correction()
real_captured_frame = np.load('./data/real_captured_frame.npy')
with mock.patch('camera_fusion.CameraCorrected.read',
return_value=real_captured_frame):
frame, corners, ids = c.detect_markers()
np.testing.assert_array_equal(frame, real_captured_frame)
correct_corners = np.array([
[[[1112., 506.], [1111., 374.], [1245., 368.], [1245., 500.]]],
[[[22., 194.], [11., 57.], [144., 51.], [158., 189.]]],
[[[744., 164.], [739., 23.], [878., 17.], [879., 157.]]],
[[[243., 715.], [236., 585.], [366., 580.], [373., 708.]]],
[[[591., 699.], [584., 570.], [714., 565.], [720., 694.]]],
[[[940., 688.], [934., 558.], [1067., 552.], [1072., 684.]]],
[[[57., 549.], [45., 419.], [178., 413.], [189., 543.]]],
[[[407., 534.], [399., 405.], [529., 399.], [538., 528.]]],
[[[757., 519.], [752., 390.], [884., 384.], [888., 514.]]],
[[[220., 367.], [207., 234.], [341., 228.], [351., 362.]]],
[[[573., 353.], [565., 219.], [699., 213.], [705., 347.]]],
[[[930., 337.], [927., 201.], [1062., 195.], [1065., 330.]]],
[[[383., 180.], [372., 42.], [508., 34.], [517., 175.]]]])
np.testing.assert_array_equal(corners, correct_corners)
correct_ids = np.array(
[[15], [1], [11], [2], [7], [12], [0], [5], [10], [3], [8], [13], [6]])
np.testing.assert_array_equal(ids, correct_ids)
shutil.rmtree('data')
def test_draw_fps():
"""Test draw_fps function."""
with mock.patch('time.time', return_value=0):
c = camera_fusion.CameraCorrected(0, 11)
c.width = 1280
c.height = 720
frame = np.load('./tests/test_CameraCorrected/real_captured_frame.npy')
with mock.patch('time.time', return_value=0.03):
frame = c.draw_fps(frame) # 33 fps
np.testing.assert_array_equal(np.load(
'./tests/test_CameraCorrected/real_captured_frame_with_30fps.npy'),
frame)
def test_draw_text():
"""Test draw_text function."""
c = camera_fusion.CameraCorrected(0, 11)
c.width = 1280
c.height = 720
frame = np.load('./tests/test_CameraCorrected/real_captured_frame.npy')
frame = c.draw_text(frame, 'test') # 33 fps
np.save('./tests/test_CameraCorrected/real_captured_frame_withText.npy',
frame)
np.testing.assert_array_equal(
np.load(
'./tests/test_CameraCorrected/real_captured_frame_withText.npy'),
frame)
def test_estimate_markers_posture():
"""Test the estimate_markers_posture function."""
c = camera_fusion.CameraCorrected(0, 11)
shutil.rmtree('data')
shutil.copytree('./tests/test_CameraCorrected', 'data')
c.calibrate_camera_correction()
real_captured_frame = np.load('./data/real_captured_frame.npy')
with mock.patch('camera_fusion.CameraCorrected.read',
return_value=real_captured_frame):
frame = c.estimate_markers_posture()
correct_markers_posture_frame = np.load(
'./data/correct_markers_posture_frame.npy')
np.testing.assert_array_equal(frame, correct_markers_posture_frame)
shutil.rmtree('data')
def test_estimate_board_posture():
"""Test the estimate_board_posture function."""
c = camera_fusion.CameraCorrected(0, 11)
shutil.rmtree('data')
shutil.copytree('./tests/test_CameraCorrected', 'data')
c.calibrate_camera_correction()
real_captured_frame = np.load('./data/real_captured_frame.npy')
with mock.patch('camera_fusion.CameraCorrected.read',
return_value=real_captured_frame):
frame = c.estimate_board_posture()
correct_board_posture_frame = np.load(
'./data/correct_board_posture_frame.npy')
np.testing.assert_array_equal(frame, correct_board_posture_frame)
shutil.rmtree('data')
def test_estimate_board_and_markers_posture():
"""Test the estimate_estimate_board_and_markers_posture function."""
c = camera_fusion.CameraCorrected(0, 11)
shutil.rmtree('data')
shutil.copytree('./tests/test_CameraCorrected', 'data')
c.calibrate_camera_correction()
real_captured_frame = np.load('./data/real_captured_frame.npy')
with mock.patch('camera_fusion.CameraCorrected.read',
return_value=real_captured_frame):
frame = c.estimate_board_and_markers_posture()
np.save(
'./tests/test_CameraCorrected/correct_board_and_markers_posture_frame.npy', # noqa
frame)
np.save('./data/correct_board_and_markers_posture_frame.npy',
frame)
correct_board_and_markers_posture_frame = np.load(
'./data/correct_board_and_markers_posture_frame.npy')
np.testing.assert_array_equal(
frame, correct_board_and_markers_posture_frame)
shutil.rmtree('data')
def test_initialize():
"""Test CameraCorrected's initialize function."""
c = camera_fusion.CameraCorrected(0, 11)
c.settings = [(0, 0), (1, 1), (3, 1280), (4, 720)]
frame = np.load('./tests/test_CameraCorrected/real_captured_frame.npy')
c.current_frame = frame
with mock.patch('cv2.VideoCapture', return_value=Vc(c)):
with mock.patch(
'camera_fusion.CameraCorrected.calibrate_camera_correction'):
with mock.patch('camera_fusion.CameraCorrected.read',
return_value=frame):
c.initialize()
def test_read_undistort():
"""Test the read_undistort function."""
c = camera_fusion.CameraCorrected(0, 11)
shutil.rmtree('data')
shutil.copytree('./tests/test_CameraCorrected', 'data')
c.calibrate_camera_correction()
with mock.patch('camera_fusion.CameraCorrected.read',
return_value=np.load('./data/real_captured_frame.npy')):
frame_undistored = c.read_undistort()
valid_frame_undistored = np.load('./data/real_undistored_frame.npy')
np.testing.assert_array_equal(valid_frame_undistored, frame_undistored)
shutil.rmtree('data')
def test_test_camera():
"""Test the basic camera test."""
shutil.copytree('./tests/test_CameraCorrected', 'data')
c = camera_fusion.CameraCorrected(0, 11)
c.calibrate_camera_correction()
# Testing camera setup
with mock.patch('camera_fusion.CameraCorrected.read',
return_value=np.load(
'./data/real_captured_frame.npy')):
c.test_camera()
shutil.rmtree('data')
def test__update_frame():
"""Test the _update_frame function."""
c = camera_fusion.CameraCorrected(0, 11)
c.stop = False
shutil.rmtree('data')
shutil.copytree('./tests/test_CameraCorrected', 'data')
real_captured_frame = np.load('./data/real_captured_frame.npy')
c.cap = Vc(c, real_captured_frame)
c.calibrate_camera_correction()
# Testing camera frame read and update
c.cap = Vc(c, real_captured_frame)
c._update_frame()
np.testing.assert_array_equal(c.current_frame, real_captured_frame)
shutil.rmtree('data')
# def test_write_defaultConfig():
# """Test write_defaultConfig function."""
# shutil.rmtree('data')
# c = camera_fusion.CameraCorrected(0, 11)
# c.width = 1280
# c.height = 720
# with mock.patch('builtins.input', return_value=0.03):
# c.write_defaultConfig()
# assert os.path.isfile('./data/defaultConfig.xml')
# assert filecmp.cmp(
# './data/defaultConfig.xml',
# './tests/test_CameraCorrected/defaultConfig_assert.xml')
# shutil.rmtree('data')
|
[
"numpy.load",
"numpy.save",
"shutil.copytree",
"shutil.rmtree",
"os.path.isdir",
"numpy.testing.assert_array_equal",
"os.path.dirname",
"time.sleep",
"unittest.mock.patch",
"camera_fusion.CameraCorrected",
"numpy.array",
"numpy.testing.assert_allclose"
] |
[((1520, 1556), 'camera_fusion.CameraCorrected', 'camera_fusion.CameraCorrected', (['(0)', '(11)'], {}), '(0, 11)\n', (1549, 1556), False, 'import camera_fusion\n'), ((1690, 1715), 'numpy.array', 'np.array', (['[[1], [0], [0]]'], {}), '([[1], [0], [0]])\n', (1698, 1715), True, 'import numpy as np\n'), ((1727, 1752), 'numpy.array', 'np.array', (['[[1], [0], [0]]'], {}), '([[1], [0], [0]])\n', (1735, 1752), True, 'import numpy as np\n'), ((1761, 1797), 'camera_fusion.CameraCorrected', 'camera_fusion.CameraCorrected', (['(0)', '(11)'], {}), '(0, 11)\n', (1790, 1797), False, 'import camera_fusion\n'), ((2146, 2175), 'numpy.array', 'np.array', (['[[0.1], [0.2], [0]]'], {}), '([[0.1], [0.2], [0]])\n', (2154, 2175), True, 'import numpy as np\n'), ((2187, 2216), 'numpy.array', 'np.array', (['[[0.2], [0.1], [0]]'], {}), '([[0.2], [0.1], [0]])\n', (2195, 2216), True, 'import numpy as np\n'), ((2225, 2260), 'camera_fusion.CameraCorrected', 'camera_fusion.CameraCorrected', (['(0)', '(0)'], {}), '(0, 0)\n', (2254, 2260), False, 'import camera_fusion\n'), ((2624, 2680), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[[0.3], [0.6], [0.0]]', 'frvec'], {}), '([[0.3], [0.6], [0.0]], frvec)\n', (2650, 2680), True, 'import numpy as np\n'), ((2685, 2741), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[[0.6], [0.3], [0.0]]', 'ftvec'], {}), '([[0.6], [0.3], [0.0]], ftvec)\n', (2711, 2741), True, 'import numpy as np\n'), ((2859, 2895), 'camera_fusion.CameraCorrected', 'camera_fusion.CameraCorrected', (['(0)', '(11)'], {}), '(0, 11)\n', (2888, 2895), False, 'import camera_fusion\n'), ((2907, 2930), 'os.path.isdir', 'os.path.isdir', (['"""./data"""'], {}), "('./data')\n", (2920, 2930), False, 'import os\n'), ((2935, 2956), 'shutil.rmtree', 'shutil.rmtree', (['"""data"""'], {}), "('data')\n", (2948, 2956), False, 'import shutil\n'), ((2961, 3016), 'shutil.copytree', 'shutil.copytree', (['"""./tests/test_CameraCorrected"""', '"""data"""'], {}), "('./tests/test_CameraCorrected', 'data')\n", (2976, 3016), False, 'import shutil\n'), ((3265, 3426), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[[1082.412278044303, 0.0, 641.6585003665338], [0.0, 1082.412278044303, \n 359.608610173991], [0.0, 0.0, 1.0]]', 'c.camera_matrix'], {}), '([[1082.412278044303, 0.0, 641.6585003665338], [\n 0.0, 1082.412278044303, 359.608610173991], [0.0, 0.0, 1.0]], c.\n camera_matrix)\n', (3291, 3426), True, 'import numpy as np\n'), ((3472, 3596), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[[0.07673254919656784, -0.04197686082419407, 0.0, 0.0, -0.18028155099783838]]', 'c.dist_coeffs'], {}), '([[0.07673254919656784, -0.04197686082419407, 0.0,\n 0.0, -0.18028155099783838]], c.dist_coeffs)\n', (3498, 3596), True, 'import numpy as np\n'), ((3622, 3643), 'shutil.rmtree', 'shutil.rmtree', (['"""data"""'], {}), "('data')\n", (3635, 3643), False, 'import shutil\n'), ((3725, 3761), 'camera_fusion.CameraCorrected', 'camera_fusion.CameraCorrected', (['(0)', '(11)'], {}), '(0, 11)\n', (3754, 3761), False, 'import camera_fusion\n'), ((3766, 3787), 'shutil.rmtree', 'shutil.rmtree', (['"""data"""'], {}), "('data')\n", (3779, 3787), False, 'import shutil\n'), ((3792, 3847), 'shutil.copytree', 'shutil.copytree', (['"""./tests/test_CameraCorrected"""', '"""data"""'], {}), "('./tests/test_CameraCorrected', 'data')\n", (3807, 3847), False, 'import shutil\n'), ((3910, 3951), 'numpy.load', 'np.load', (['"""./data/real_captured_frame.npy"""'], {}), "('./data/real_captured_frame.npy')\n", (3917, 3951), True, 'import numpy as np\n'), ((4118, 4175), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['frame', 'real_captured_frame'], {}), '(frame, real_captured_frame)\n', (4147, 4175), True, 'import numpy as np\n'), ((4199, 5150), 'numpy.array', 'np.array', (['[[[[1112.0, 506.0], [1111.0, 374.0], [1245.0, 368.0], [1245.0, 500.0]]], [[\n [22.0, 194.0], [11.0, 57.0], [144.0, 51.0], [158.0, 189.0]]], [[[744.0,\n 164.0], [739.0, 23.0], [878.0, 17.0], [879.0, 157.0]]], [[[243.0, 715.0\n ], [236.0, 585.0], [366.0, 580.0], [373.0, 708.0]]], [[[591.0, 699.0],\n [584.0, 570.0], [714.0, 565.0], [720.0, 694.0]]], [[[940.0, 688.0], [\n 934.0, 558.0], [1067.0, 552.0], [1072.0, 684.0]]], [[[57.0, 549.0], [\n 45.0, 419.0], [178.0, 413.0], [189.0, 543.0]]], [[[407.0, 534.0], [\n 399.0, 405.0], [529.0, 399.0], [538.0, 528.0]]], [[[757.0, 519.0], [\n 752.0, 390.0], [884.0, 384.0], [888.0, 514.0]]], [[[220.0, 367.0], [\n 207.0, 234.0], [341.0, 228.0], [351.0, 362.0]]], [[[573.0, 353.0], [\n 565.0, 219.0], [699.0, 213.0], [705.0, 347.0]]], [[[930.0, 337.0], [\n 927.0, 201.0], [1062.0, 195.0], [1065.0, 330.0]]], [[[383.0, 180.0], [\n 372.0, 42.0], [508.0, 34.0], [517.0, 175.0]]]]'], {}), '([[[[1112.0, 506.0], [1111.0, 374.0], [1245.0, 368.0], [1245.0, \n 500.0]]], [[[22.0, 194.0], [11.0, 57.0], [144.0, 51.0], [158.0, 189.0]]\n ], [[[744.0, 164.0], [739.0, 23.0], [878.0, 17.0], [879.0, 157.0]]], [[\n [243.0, 715.0], [236.0, 585.0], [366.0, 580.0], [373.0, 708.0]]], [[[\n 591.0, 699.0], [584.0, 570.0], [714.0, 565.0], [720.0, 694.0]]], [[[\n 940.0, 688.0], [934.0, 558.0], [1067.0, 552.0], [1072.0, 684.0]]], [[[\n 57.0, 549.0], [45.0, 419.0], [178.0, 413.0], [189.0, 543.0]]], [[[407.0,\n 534.0], [399.0, 405.0], [529.0, 399.0], [538.0, 528.0]]], [[[757.0, \n 519.0], [752.0, 390.0], [884.0, 384.0], [888.0, 514.0]]], [[[220.0, \n 367.0], [207.0, 234.0], [341.0, 228.0], [351.0, 362.0]]], [[[573.0, \n 353.0], [565.0, 219.0], [699.0, 213.0], [705.0, 347.0]]], [[[930.0, \n 337.0], [927.0, 201.0], [1062.0, 195.0], [1065.0, 330.0]]], [[[383.0, \n 180.0], [372.0, 42.0], [508.0, 34.0], [517.0, 175.0]]]])\n', (4207, 5150), True, 'import numpy as np\n'), ((5100, 5155), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['corners', 'correct_corners'], {}), '(corners, correct_corners)\n', (5129, 5155), True, 'import numpy as np\n'), ((5175, 5260), 'numpy.array', 'np.array', (['[[15], [1], [11], [2], [7], [12], [0], [5], [10], [3], [8], [13], [6]]'], {}), '([[15], [1], [11], [2], [7], [12], [0], [5], [10], [3], [8], [13], [6]]\n )\n', (5183, 5260), True, 'import numpy as np\n'), ((5269, 5316), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ids', 'correct_ids'], {}), '(ids, correct_ids)\n', (5298, 5316), True, 'import numpy as np\n'), ((5321, 5342), 'shutil.rmtree', 'shutil.rmtree', (['"""data"""'], {}), "('data')\n", (5334, 5342), False, 'import shutil\n'), ((5549, 5612), 'numpy.load', 'np.load', (['"""./tests/test_CameraCorrected/real_captured_frame.npy"""'], {}), "('./tests/test_CameraCorrected/real_captured_frame.npy')\n", (5556, 5612), True, 'import numpy as np\n'), ((5911, 5947), 'camera_fusion.CameraCorrected', 'camera_fusion.CameraCorrected', (['(0)', '(11)'], {}), '(0, 11)\n', (5940, 5947), False, 'import camera_fusion\n'), ((5998, 6061), 'numpy.load', 'np.load', (['"""./tests/test_CameraCorrected/real_captured_frame.npy"""'], {}), "('./tests/test_CameraCorrected/real_captured_frame.npy')\n", (6005, 6061), True, 'import numpy as np\n'), ((6115, 6194), 'numpy.save', 'np.save', (['"""./tests/test_CameraCorrected/real_captured_frame_withText.npy"""', 'frame'], {}), "('./tests/test_CameraCorrected/real_captured_frame_withText.npy', frame)\n", (6122, 6194), True, 'import numpy as np\n'), ((6453, 6489), 'camera_fusion.CameraCorrected', 'camera_fusion.CameraCorrected', (['(0)', '(11)'], {}), '(0, 11)\n', (6482, 6489), False, 'import camera_fusion\n'), ((6494, 6515), 'shutil.rmtree', 'shutil.rmtree', (['"""data"""'], {}), "('data')\n", (6507, 6515), False, 'import shutil\n'), ((6520, 6575), 'shutil.copytree', 'shutil.copytree', (['"""./tests/test_CameraCorrected"""', '"""data"""'], {}), "('./tests/test_CameraCorrected', 'data')\n", (6535, 6575), False, 'import shutil\n'), ((6638, 6679), 'numpy.load', 'np.load', (['"""./data/real_captured_frame.npy"""'], {}), "('./data/real_captured_frame.npy')\n", (6645, 6679), True, 'import numpy as np\n'), ((6874, 6925), 'numpy.load', 'np.load', (['"""./data/correct_markers_posture_frame.npy"""'], {}), "('./data/correct_markers_posture_frame.npy')\n", (6881, 6925), True, 'import numpy as np\n'), ((6939, 7006), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['frame', 'correct_markers_posture_frame'], {}), '(frame, correct_markers_posture_frame)\n', (6968, 7006), True, 'import numpy as np\n'), ((7011, 7032), 'shutil.rmtree', 'shutil.rmtree', (['"""data"""'], {}), "('data')\n", (7024, 7032), False, 'import shutil\n'), ((7130, 7166), 'camera_fusion.CameraCorrected', 'camera_fusion.CameraCorrected', (['(0)', '(11)'], {}), '(0, 11)\n', (7159, 7166), False, 'import camera_fusion\n'), ((7171, 7192), 'shutil.rmtree', 'shutil.rmtree', (['"""data"""'], {}), "('data')\n", (7184, 7192), False, 'import shutil\n'), ((7197, 7252), 'shutil.copytree', 'shutil.copytree', (['"""./tests/test_CameraCorrected"""', '"""data"""'], {}), "('./tests/test_CameraCorrected', 'data')\n", (7212, 7252), False, 'import shutil\n'), ((7315, 7356), 'numpy.load', 'np.load', (['"""./data/real_captured_frame.npy"""'], {}), "('./data/real_captured_frame.npy')\n", (7322, 7356), True, 'import numpy as np\n'), ((7547, 7596), 'numpy.load', 'np.load', (['"""./data/correct_board_posture_frame.npy"""'], {}), "('./data/correct_board_posture_frame.npy')\n", (7554, 7596), True, 'import numpy as np\n'), ((7610, 7675), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['frame', 'correct_board_posture_frame'], {}), '(frame, correct_board_posture_frame)\n', (7639, 7675), True, 'import numpy as np\n'), ((7680, 7701), 'shutil.rmtree', 'shutil.rmtree', (['"""data"""'], {}), "('data')\n", (7693, 7701), False, 'import shutil\n'), ((7832, 7868), 'camera_fusion.CameraCorrected', 'camera_fusion.CameraCorrected', (['(0)', '(11)'], {}), '(0, 11)\n', (7861, 7868), False, 'import camera_fusion\n'), ((7873, 7894), 'shutil.rmtree', 'shutil.rmtree', (['"""data"""'], {}), "('data')\n", (7886, 7894), False, 'import shutil\n'), ((7899, 7954), 'shutil.copytree', 'shutil.copytree', (['"""./tests/test_CameraCorrected"""', '"""data"""'], {}), "('./tests/test_CameraCorrected', 'data')\n", (7914, 7954), False, 'import shutil\n'), ((8017, 8058), 'numpy.load', 'np.load', (['"""./data/real_captured_frame.npy"""'], {}), "('./data/real_captured_frame.npy')\n", (8024, 8058), True, 'import numpy as np\n'), ((8231, 8330), 'numpy.save', 'np.save', (['"""./tests/test_CameraCorrected/correct_board_and_markers_posture_frame.npy"""', 'frame'], {}), "(\n './tests/test_CameraCorrected/correct_board_and_markers_posture_frame.npy',\n frame)\n", (8238, 8330), True, 'import numpy as np\n'), ((8351, 8419), 'numpy.save', 'np.save', (['"""./data/correct_board_and_markers_posture_frame.npy"""', 'frame'], {}), "('./data/correct_board_and_markers_posture_frame.npy', frame)\n", (8358, 8419), True, 'import numpy as np\n'), ((8478, 8539), 'numpy.load', 'np.load', (['"""./data/correct_board_and_markers_posture_frame.npy"""'], {}), "('./data/correct_board_and_markers_posture_frame.npy')\n", (8485, 8539), True, 'import numpy as np\n'), ((8553, 8630), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['frame', 'correct_board_and_markers_posture_frame'], {}), '(frame, correct_board_and_markers_posture_frame)\n', (8582, 8630), True, 'import numpy as np\n'), ((8644, 8665), 'shutil.rmtree', 'shutil.rmtree', (['"""data"""'], {}), "('data')\n", (8657, 8665), False, 'import shutil\n'), ((8753, 8789), 'camera_fusion.CameraCorrected', 'camera_fusion.CameraCorrected', (['(0)', '(11)'], {}), '(0, 11)\n', (8782, 8789), False, 'import camera_fusion\n'), ((8857, 8920), 'numpy.load', 'np.load', (['"""./tests/test_CameraCorrected/real_captured_frame.npy"""'], {}), "('./tests/test_CameraCorrected/real_captured_frame.npy')\n", (8864, 8920), True, 'import numpy as np\n'), ((9340, 9376), 'camera_fusion.CameraCorrected', 'camera_fusion.CameraCorrected', (['(0)', '(11)'], {}), '(0, 11)\n', (9369, 9376), False, 'import camera_fusion\n'), ((9381, 9402), 'shutil.rmtree', 'shutil.rmtree', (['"""data"""'], {}), "('data')\n", (9394, 9402), False, 'import shutil\n'), ((9407, 9462), 'shutil.copytree', 'shutil.copytree', (['"""./tests/test_CameraCorrected"""', '"""data"""'], {}), "('./tests/test_CameraCorrected', 'data')\n", (9422, 9462), False, 'import shutil\n'), ((9710, 9753), 'numpy.load', 'np.load', (['"""./data/real_undistored_frame.npy"""'], {}), "('./data/real_undistored_frame.npy')\n", (9717, 9753), True, 'import numpy as np\n'), ((9758, 9829), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['valid_frame_undistored', 'frame_undistored'], {}), '(valid_frame_undistored, frame_undistored)\n', (9787, 9829), True, 'import numpy as np\n'), ((9834, 9855), 'shutil.rmtree', 'shutil.rmtree', (['"""data"""'], {}), "('data')\n", (9847, 9855), False, 'import shutil\n'), ((9924, 9979), 'shutil.copytree', 'shutil.copytree', (['"""./tests/test_CameraCorrected"""', '"""data"""'], {}), "('./tests/test_CameraCorrected', 'data')\n", (9939, 9979), False, 'import shutil\n'), ((9988, 10024), 'camera_fusion.CameraCorrected', 'camera_fusion.CameraCorrected', (['(0)', '(11)'], {}), '(0, 11)\n', (10017, 10024), False, 'import camera_fusion\n'), ((10276, 10297), 'shutil.rmtree', 'shutil.rmtree', (['"""data"""'], {}), "('data')\n", (10289, 10297), False, 'import shutil\n'), ((10377, 10413), 'camera_fusion.CameraCorrected', 'camera_fusion.CameraCorrected', (['(0)', '(11)'], {}), '(0, 11)\n', (10406, 10413), False, 'import camera_fusion\n'), ((10437, 10458), 'shutil.rmtree', 'shutil.rmtree', (['"""data"""'], {}), "('data')\n", (10450, 10458), False, 'import shutil\n'), ((10463, 10518), 'shutil.copytree', 'shutil.copytree', (['"""./tests/test_CameraCorrected"""', '"""data"""'], {}), "('./tests/test_CameraCorrected', 'data')\n", (10478, 10518), False, 'import shutil\n'), ((10545, 10586), 'numpy.load', 'np.load', (['"""./data/real_captured_frame.npy"""'], {}), "('./data/real_captured_frame.npy')\n", (10552, 10586), True, 'import numpy as np\n'), ((10770, 10837), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['c.current_frame', 'real_captured_frame'], {}), '(c.current_frame, real_captured_frame)\n', (10799, 10837), True, 'import numpy as np\n'), ((10842, 10863), 'shutil.rmtree', 'shutil.rmtree', (['"""data"""'], {}), "('data')\n", (10855, 10863), False, 'import shutil\n'), ((1022, 1038), 'time.sleep', 'time.sleep', (['(0.33)'], {}), '(0.33)\n', (1032, 1038), False, 'import time\n'), ((3961, 4048), 'unittest.mock.patch', 'mock.patch', (['"""camera_fusion.CameraCorrected.read"""'], {'return_value': 'real_captured_frame'}), "('camera_fusion.CameraCorrected.read', return_value=\n real_captured_frame)\n", (3971, 4048), True, 'import unittest.mock as mock\n'), ((5409, 5448), 'unittest.mock.patch', 'mock.patch', (['"""time.time"""'], {'return_value': '(0)'}), "('time.time', return_value=0)\n", (5419, 5448), True, 'import unittest.mock as mock\n'), ((5462, 5498), 'camera_fusion.CameraCorrected', 'camera_fusion.CameraCorrected', (['(0)', '(11)'], {}), '(0, 11)\n', (5491, 5498), False, 'import camera_fusion\n'), ((5622, 5664), 'unittest.mock.patch', 'mock.patch', (['"""time.time"""'], {'return_value': '(0.03)'}), "('time.time', return_value=0.03)\n", (5632, 5664), True, 'import unittest.mock as mock\n'), ((5744, 5818), 'numpy.load', 'np.load', (['"""./tests/test_CameraCorrected/real_captured_frame_with_30fps.npy"""'], {}), "('./tests/test_CameraCorrected/real_captured_frame_with_30fps.npy')\n", (5751, 5818), True, 'import numpy as np\n'), ((6250, 6322), 'numpy.load', 'np.load', (['"""./tests/test_CameraCorrected/real_captured_frame_withText.npy"""'], {}), "('./tests/test_CameraCorrected/real_captured_frame_withText.npy')\n", (6257, 6322), True, 'import numpy as np\n'), ((6689, 6776), 'unittest.mock.patch', 'mock.patch', (['"""camera_fusion.CameraCorrected.read"""'], {'return_value': 'real_captured_frame'}), "('camera_fusion.CameraCorrected.read', return_value=\n real_captured_frame)\n", (6699, 6776), True, 'import unittest.mock as mock\n'), ((7366, 7453), 'unittest.mock.patch', 'mock.patch', (['"""camera_fusion.CameraCorrected.read"""'], {'return_value': 'real_captured_frame'}), "('camera_fusion.CameraCorrected.read', return_value=\n real_captured_frame)\n", (7376, 7453), True, 'import unittest.mock as mock\n'), ((8068, 8155), 'unittest.mock.patch', 'mock.patch', (['"""camera_fusion.CameraCorrected.read"""'], {'return_value': 'real_captured_frame'}), "('camera_fusion.CameraCorrected.read', return_value=\n real_captured_frame)\n", (8078, 8155), True, 'import unittest.mock as mock\n'), ((238, 263), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (253, 263), False, 'import os\n'), ((9023, 9094), 'unittest.mock.patch', 'mock.patch', (['"""camera_fusion.CameraCorrected.calibrate_camera_correction"""'], {}), "('camera_fusion.CameraCorrected.calibrate_camera_correction')\n", (9033, 9094), True, 'import unittest.mock as mock\n'), ((9130, 9198), 'unittest.mock.patch', 'mock.patch', (['"""camera_fusion.CameraCorrected.read"""'], {'return_value': 'frame'}), "('camera_fusion.CameraCorrected.read', return_value=frame)\n", (9140, 9198), True, 'import unittest.mock as mock\n'), ((9591, 9632), 'numpy.load', 'np.load', (['"""./data/real_captured_frame.npy"""'], {}), "('./data/real_captured_frame.npy')\n", (9598, 9632), True, 'import numpy as np\n'), ((10179, 10220), 'numpy.load', 'np.load', (['"""./data/real_captured_frame.npy"""'], {}), "('./data/real_captured_frame.npy')\n", (10186, 10220), True, 'import numpy as np\n')]
|
from sqlalchemy.engine import create_engine
from sqlalchemy.orm.session import Session
def setup_module():
global transaction, connection, engine
# Connect to the database and create the schema within a transaction
engine = create_engine('postgresql:///yourdb')
connection = engine.connect()
transaction = connection.begin()
# If you want to insert fixtures to the DB, do it here
def teardown_module():
# Roll back the top level transaction and disconnect from the database
transaction.rollback()
connection.close()
engine.dispose()
class DatabaseTest:
def setup(self):
self.__transaction = connection.begin_nested()
self.session = Session(connection)
def teardown(self):
self.session.close()
self.__transaction.rollback()
|
[
"sqlalchemy.orm.session.Session",
"sqlalchemy.engine.create_engine"
] |
[((240, 277), 'sqlalchemy.engine.create_engine', 'create_engine', (['"""postgresql:///yourdb"""'], {}), "('postgresql:///yourdb')\n", (253, 277), False, 'from sqlalchemy.engine import create_engine\n'), ((701, 720), 'sqlalchemy.orm.session.Session', 'Session', (['connection'], {}), '(connection)\n', (708, 720), False, 'from sqlalchemy.orm.session import Session\n')]
|
import base64
import json
import logging
import mimetypes
import email.encoders as encoder
import socket
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from django.conf import settings
from google.oauth2 import service_account
from googleapiclient.discovery import build
from django.core.mail.backends.smtp import EmailBackend
logger = logging.getLogger(__name__)
class GmailApiBackend(EmailBackend):
def __init__(
self,
fail_silently=False,
**kwargs
):
super().__init__(fail_silently=fail_silently)
self.connection = build('gmail', 'v1', cache_discovery=False, credentials=get_credentials())
def send_messages(self, email_messages):
new_conn_created = self.open()
if not self.connection or new_conn_created is None:
return 0
num_sent = 0
for email_message in email_messages:
message = create_message(email_message)
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
try:
self.connection.users().messages().send(userId=settings.GMAIL_USER, body=email_message).execute()
except Exception as error:
logger.error('Error sending email', error)
if settings.EMAIL_BACKEND and settings.EMAIL_BACKEND == "mailer.backend.DbBackend":
# If using "django-mailer" https://github.com/pinax/django-mailer, tt marks the related message as
# deferred only for some exceptions, so we raise one of them to save the error on the db
raise socket.error(error)
else:
raise
return True
def get_credentials():
credentials = service_account.Credentials.from_service_account_info(
json.loads(settings.GOOGLE_SERVICE_ACCOUNT), scopes=settings.GMAIL_SCOPES, subject=settings.GMAIL_USER)
return credentials
def create_message(email_message):
if email_message.attachments:
message = MIMEMultipart()
msg = MIMEText(email_message.body, email_message.content_subtype)
message.attach(msg)
else:
message = MIMEText(email_message.body, email_message.content_subtype)
message['to'] = ','.join(map(str, email_message.to))
message['from'] = email_message.from_email
if email_message.reply_to:
message['reply-to'] = ','.join(map(str, email_message.reply_to))
if email_message.cc:
message['cc'] = ','.join(map(str, email_message.cc))
if email_message.bcc:
message['bcc'] = ','.join(map(str, email_message.bcc))
message['subject'] = str(email_message.subject)
if email_message.attachments:
for attachment in email_message.attachments:
content_type, encoding = mimetypes.guess_type(attachment[0])
if content_type is None or encoding is not None:
content_type = 'application/octet-stream'
main_type, sub_type = content_type.split('/', 1)
if main_type == 'text':
fp = open(attachment[1], 'rb')
msg = MIMEText(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'image':
fp = open(attachment[1], 'rb')
msg = MIMEImage(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'audio':
fp = open(attachment[1], 'rb')
msg = MIMEAudio(fp.read(), _subtype=sub_type)
fp.close()
elif type(attachment[1]) is bytes:
msg = MIMEBase(main_type, sub_type)
msg.set_payload(attachment[1])
else:
fp = open(attachment[1], 'rb')
msg = MIMEBase(main_type, sub_type)
msg.set_payload(fp.read())
fp.close()
filename = attachment[0]
msg.add_header('Content-Disposition', 'attachment', filename=filename)
encoder.encode_base64(msg)
message.attach(msg)
b64_bytes = base64.urlsafe_b64encode(message.as_bytes())
b64_string = b64_bytes.decode()
return {'raw': b64_string}
|
[
"json.loads",
"email.mime.text.MIMEText",
"email.mime.base.MIMEBase",
"email.encoders.encode_base64",
"email.mime.multipart.MIMEMultipart",
"socket.error",
"logging.getLogger",
"mimetypes.guess_type"
] |
[((490, 517), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (507, 517), False, 'import logging\n'), ((2037, 2080), 'json.loads', 'json.loads', (['settings.GOOGLE_SERVICE_ACCOUNT'], {}), '(settings.GOOGLE_SERVICE_ACCOUNT)\n', (2047, 2080), False, 'import json\n'), ((2253, 2268), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', ([], {}), '()\n', (2266, 2268), False, 'from email.mime.multipart import MIMEMultipart\n'), ((2283, 2342), 'email.mime.text.MIMEText', 'MIMEText', (['email_message.body', 'email_message.content_subtype'], {}), '(email_message.body, email_message.content_subtype)\n', (2291, 2342), False, 'from email.mime.text import MIMEText\n'), ((2399, 2458), 'email.mime.text.MIMEText', 'MIMEText', (['email_message.body', 'email_message.content_subtype'], {}), '(email_message.body, email_message.content_subtype)\n', (2407, 2458), False, 'from email.mime.text import MIMEText\n'), ((3019, 3054), 'mimetypes.guess_type', 'mimetypes.guess_type', (['attachment[0]'], {}), '(attachment[0])\n', (3039, 3054), False, 'import mimetypes\n'), ((4223, 4249), 'email.encoders.encode_base64', 'encoder.encode_base64', (['msg'], {}), '(msg)\n', (4244, 4249), True, 'import email.encoders as encoder\n'), ((1851, 1870), 'socket.error', 'socket.error', (['error'], {}), '(error)\n', (1863, 1870), False, 'import socket\n'), ((3825, 3854), 'email.mime.base.MIMEBase', 'MIMEBase', (['main_type', 'sub_type'], {}), '(main_type, sub_type)\n', (3833, 3854), False, 'from email.mime.base import MIMEBase\n'), ((3989, 4018), 'email.mime.base.MIMEBase', 'MIMEBase', (['main_type', 'sub_type'], {}), '(main_type, sub_type)\n', (3997, 4018), False, 'from email.mime.base import MIMEBase\n')]
|
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mayan.apps.smart_settings.classes import Namespace
from .literals import DEFAULT_MAXIMUM_TITLE_LENGTH
namespace = Namespace(label=_('Appearance'), name='appearance')
setting_max_title_length = namespace.add_setting(
default=DEFAULT_MAXIMUM_TITLE_LENGTH,
global_name='APPEARANCE_MAXIMUM_TITLE_LENGTH', help_text=_(
'Maximum number of characters that will be displayed as the view '
'title.'
)
)
|
[
"django.utils.translation.ugettext_lazy"
] |
[((235, 250), 'django.utils.translation.ugettext_lazy', '_', (['"""Appearance"""'], {}), "('Appearance')\n", (236, 250), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((425, 500), 'django.utils.translation.ugettext_lazy', '_', (['"""Maximum number of characters that will be displayed as the view title."""'], {}), "('Maximum number of characters that will be displayed as the view title.')\n", (426, 500), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
"""
Functions to plot data using the `cartopy` library.
These require the `shapely` and `cartopy` libraries to be installed.
CartoPy is sometimes difficult to install.
"""
import numpy as N
from cartopy import crs, feature
from shapely.geometry import Polygon
from ..error.axes import hyperbolic_axes
from ..stereonet import plane_errors, normal_errors
def fix_stereonet_coords(coords):
coords[:,1] *= -1
return coords
def cartopy_girdle(fit, **kw):
d = hyperbolic_axes(fit,**kw)
cm = N.diag(d)
sheets = {i: N.degrees(plane_errors(fit.axes, cm, sheet=i))
for i in ('upper','lower')}
geom = Polygon(sheets['upper'], [sheets['lower'][::-1]])
geometries = [geom]
return feature.ShapelyFeature(geometries, crs.PlateCarree())
def cartopy_normal(fit, **kw):
d = hyperbolic_axes(fit,**kw)
cm = N.diag(d)
upper = N.degrees(normal_errors(fit.axes, cm))
geom = Polygon(upper)
geometries = [geom]
return feature.ShapelyFeature(geometries, crs.PlateCarree())
|
[
"cartopy.crs.PlateCarree",
"numpy.diag",
"shapely.geometry.Polygon"
] |
[((504, 513), 'numpy.diag', 'N.diag', (['d'], {}), '(d)\n', (510, 513), True, 'import numpy as N\n'), ((625, 674), 'shapely.geometry.Polygon', 'Polygon', (["sheets['upper']", "[sheets['lower'][::-1]]"], {}), "(sheets['upper'], [sheets['lower'][::-1]])\n", (632, 674), False, 'from shapely.geometry import Polygon\n'), ((839, 848), 'numpy.diag', 'N.diag', (['d'], {}), '(d)\n', (845, 848), True, 'import numpy as N\n'), ((911, 925), 'shapely.geometry.Polygon', 'Polygon', (['upper'], {}), '(upper)\n', (918, 925), False, 'from shapely.geometry import Polygon\n'), ((745, 762), 'cartopy.crs.PlateCarree', 'crs.PlateCarree', ([], {}), '()\n', (760, 762), False, 'from cartopy import crs, feature\n'), ((996, 1013), 'cartopy.crs.PlateCarree', 'crs.PlateCarree', ([], {}), '()\n', (1011, 1013), False, 'from cartopy import crs, feature\n')]
|
# Copyright 2021 Dynatrace LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from typing import Union
import boto3
class SelfMonitoringContext:
def __init__(self, function_name):
self._function_name = function_name
self._kinesis_records_age = []
self._record_data_compressed_size = []
self._record_data_decompressed_size = []
self._log_entries_by_log_group = defaultdict(lambda: 0)
self._log_content_len_by_log_group = defaultdict(lambda: 0)
self._batches_prepared = 0
self._log_entries_prepared = 0
self._data_volume_prepared = 0
self._batches_delivered = 0
self._log_entries_delivered = 0
self._data_volume_delivered = 0
self._issue_count_by_type = defaultdict(lambda: 0)
self._log_content_trimmed = 0
self._log_attr_trimmed = 0
self._logs_age_min_sec = None
self._logs_age_avg_sec = None
self._logs_age_max_sec = None
self._requests_sent = 0
self._requests_durations_ms = []
self._requests_count_by_status_code = defaultdict(lambda: 0)
def kinesis_record_age(self, age_sec):
self._kinesis_records_age.append(age_sec)
def kinesis_record_decoded(self, record_data_compressed_size, record_data_decompressed_size):
self._record_data_compressed_size.append(record_data_compressed_size)
self._record_data_decompressed_size.append(record_data_decompressed_size)
def single_record_transformed(self, log_group, log_entries_count, log_content_len):
self._log_entries_by_log_group[log_group] += log_entries_count
self._log_content_len_by_log_group[log_group] += log_content_len
def batch_prepared(self, log_entries_count, data_volume):
self._batches_prepared += 1
self._log_entries_prepared += log_entries_count
self._data_volume_prepared += data_volume
def batch_delivered(self, log_entries_count, data_volume):
self._batches_delivered += 1
self._log_entries_delivered += log_entries_count
self._data_volume_delivered += data_volume
def issue(self, what_issue):
self._issue_count_by_type[what_issue] += 1
print("SFM: issue registered, type " + what_issue)
def log_content_trimmed(self):
self._log_content_trimmed += 1
def log_attr_trimmed(self):
self._log_attr_trimmed += 1
def logs_age(self, logs_age_min_sec, logs_age_avg_sec, logs_age_max_sec):
self._logs_age_min_sec = logs_age_min_sec
self._logs_age_avg_sec = logs_age_avg_sec
self._logs_age_max_sec = logs_age_max_sec
def request_sent(self):
self._requests_sent += 1
def request_finished_with_status_code(self, status_code, duration_ms):
self._requests_count_by_status_code[status_code] += 1
self._requests_durations_ms.append(duration_ms)
def _generate_metrics(self):
metrics = []
common_dimensions = [{
"Name": "function_name",
"Value": self._function_name,
}]
metrics.append(_prepare_cloudwatch_metric(
"Kinesis record age", self._kinesis_records_age, "Seconds", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Kinesis record.data compressed size", self._record_data_compressed_size, "Bytes", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Kinesis record.data decompressed size", self._record_data_decompressed_size, "Bytes", common_dimensions))
# TO BE RESTORED IN DIFFERENT WAY IN APM-306046
# please remove this then
# for log_group, log_entries_count in self._log_entries_by_log_group.items():
# metrics.append(_prepare_cloudwatch_metric(
# "Log entries by LogGroup", log_entries_count, "None",
# common_dimensions + [{"Name": "log_group", "Value": log_group}]
# ))
#
# for log_group, log_content_len in self._log_content_len_by_log_group.items():
# metrics.append(_prepare_cloudwatch_metric(
# "Log content length by LogGroup", log_content_len, "None",
# common_dimensions + [{"Name": "log_group", "Value": log_group}]
# ))
metrics.append(_prepare_cloudwatch_metric(
"Batches prepared", self._batches_prepared, "None", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Log entries prepared", self._log_entries_prepared, "None", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Data volume prepared", self._data_volume_prepared, "Bytes", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Batches delivered", self._batches_delivered, "None", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Log entries delivered", self._log_entries_delivered, "None", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Data volume delivered", self._data_volume_delivered, "Bytes", common_dimensions))
for issue, count in self._issue_count_by_type.items():
metrics.append(_prepare_cloudwatch_metric(
"Issues", count, "None",
common_dimensions + [{"Name": "type", "Value": issue}]
))
metrics.append(_prepare_cloudwatch_metric(
"Log content trimmed", self._log_content_trimmed, "None", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Log attr trimmed", self._log_attr_trimmed, "None", common_dimensions))
if self._logs_age_min_sec:
metrics.append(_prepare_cloudwatch_metric(
"Log age min", self._logs_age_min_sec, "Seconds", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Log age avg", self._logs_age_avg_sec, "Seconds", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Log age max", self._logs_age_max_sec, "Seconds", common_dimensions))
metrics.append(_prepare_cloudwatch_metric(
"Requests sent", self._requests_sent, "None", common_dimensions))
if self._requests_durations_ms:
metrics.append(_prepare_cloudwatch_metric(
"Requests duration", self._requests_durations_ms, "Milliseconds", common_dimensions))
for status_code, count in self._requests_count_by_status_code.items():
metrics.append(_prepare_cloudwatch_metric(
"Requests status code count", count, "None",
common_dimensions + [{"Name": "status_code", "Value": str(status_code)}]
))
return metrics
def push_sfm_to_cloudwatch(self):
metrics = self._generate_metrics()
cloudwatch = boto3.client('cloudwatch')
try:
for i in range(0, len(metrics), 20):
metrics_batch = metrics[i:(i + 20)]
cloudwatch.put_metric_data(MetricData=metrics_batch, Namespace='DT/LogsStreaming')
except Exception as e:
print("Print metrics on SFM push failure: " + str(metrics))
raise e
def _prepare_cloudwatch_metric(metric_name, value: Union[int, float, list], unit, dimensions) -> dict:
cw_metric = {
'MetricName': metric_name,
'Dimensions': dimensions,
'Unit': unit,
}
if isinstance(value, list):
cw_metric["Values"] = value
else:
cw_metric["Value"] = value
return cw_metric
|
[
"collections.defaultdict",
"boto3.client"
] |
[((966, 989), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (977, 989), False, 'from collections import defaultdict\n'), ((1034, 1057), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (1045, 1057), False, 'from collections import defaultdict\n'), ((1325, 1348), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (1336, 1348), False, 'from collections import defaultdict\n'), ((1657, 1680), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (1668, 1680), False, 'from collections import defaultdict\n'), ((7434, 7460), 'boto3.client', 'boto3.client', (['"""cloudwatch"""'], {}), "('cloudwatch')\n", (7446, 7460), False, 'import boto3\n')]
|
import argparse
import importlib
import os
from fairseq.models import MODEL_REGISTRY, ARCH_MODEL_INV_REGISTRY
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if not file.startswith('_') and not file.startswith('.') and (file.endswith('.py') or os.path.isdir(path)):
model_name = file[:file.find('.py')] if file.endswith('.py') else file
module = importlib.import_module('infoxlm.models.' + model_name)
# extra `model_parser` for sphinx
if model_name in MODEL_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group('Named architectures')
group_archs.add_argument('--arch', choices=ARCH_MODEL_INV_REGISTRY[model_name])
group_args = parser.add_argument_group('Additional command-line arguments')
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[model_name + '_parser'] = parser
|
[
"argparse.ArgumentParser",
"importlib.import_module",
"os.path.isdir",
"os.path.dirname",
"os.path.join",
"os.listdir"
] |
[((191, 216), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (206, 216), False, 'import os\n'), ((229, 251), 'os.listdir', 'os.listdir', (['models_dir'], {}), '(models_dir)\n', (239, 251), False, 'import os\n'), ((262, 292), 'os.path.join', 'os.path.join', (['models_dir', 'file'], {}), '(models_dir, file)\n', (274, 292), False, 'import os\n'), ((491, 546), 'importlib.import_module', 'importlib.import_module', (["('infoxlm.models.' + model_name)"], {}), "('infoxlm.models.' + model_name)\n", (514, 546), False, 'import importlib\n'), ((381, 400), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (394, 400), False, 'import os\n'), ((638, 677), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (661, 677), False, 'import argparse\n')]
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Script to convert HierText to TFExamples.
This script is only intended to run locally.
python3 data_preprocess/convert.py \
--gt_file=/path/to/gt.jsonl \
--img_dir=/path/to/image \
--out_file=/path/to/tfrecords/file-prefix
"""
import json
import os
import random
from absl import app
from absl import flags
import tensorflow as tf
import tqdm
import utils
_GT_FILE = flags.DEFINE_string('gt_file', None, 'Path to the GT file')
_IMG_DIR = flags.DEFINE_string('img_dir', None, 'Path to the image folder.')
_OUT_FILE = flags.DEFINE_string('out_file', None, 'Path for the tfrecords.')
_NUM_SHARD = flags.DEFINE_integer(
'num_shard', 100, 'The number of shards of tfrecords.')
def main(unused_argv) -> None:
annotations = json.load(open(_GT_FILE.value))['annotations']
random.shuffle(annotations)
n_sample = len(annotations)
n_shards = _NUM_SHARD.value
n_sample_per_shard = (n_sample - 1) // n_shards + 1
for shard in tqdm.tqdm(range(n_shards)):
output_path = f'{_OUT_FILE.value}-{shard:05}-{n_shards:05}.tfrecords'
annotation_subset = annotations[
shard * n_sample_per_shard : (shard + 1) * n_sample_per_shard]
with tf.io.TFRecordWriter(output_path) as file_writer:
for annotation in annotation_subset:
img_file_path = os.path.join(_IMG_DIR.value,
f"{annotation['image_id']}.jpg")
tfexample = utils.convert_to_tfe(img_file_path, annotation)
file_writer.write(tfexample)
if __name__ == '__main__':
flags.mark_flags_as_required(['gt_file', 'img_dir', 'out_file'])
app.run(main)
|
[
"absl.flags.mark_flags_as_required",
"utils.convert_to_tfe",
"random.shuffle",
"absl.flags.DEFINE_string",
"absl.app.run",
"absl.flags.DEFINE_integer",
"tensorflow.io.TFRecordWriter",
"os.path.join"
] |
[((987, 1046), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""gt_file"""', 'None', '"""Path to the GT file"""'], {}), "('gt_file', None, 'Path to the GT file')\n", (1006, 1046), False, 'from absl import flags\n'), ((1058, 1123), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""img_dir"""', 'None', '"""Path to the image folder."""'], {}), "('img_dir', None, 'Path to the image folder.')\n", (1077, 1123), False, 'from absl import flags\n'), ((1136, 1200), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""out_file"""', 'None', '"""Path for the tfrecords."""'], {}), "('out_file', None, 'Path for the tfrecords.')\n", (1155, 1200), False, 'from absl import flags\n'), ((1214, 1290), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_shard"""', '(100)', '"""The number of shards of tfrecords."""'], {}), "('num_shard', 100, 'The number of shards of tfrecords.')\n", (1234, 1290), False, 'from absl import flags\n'), ((1394, 1421), 'random.shuffle', 'random.shuffle', (['annotations'], {}), '(annotations)\n', (1408, 1421), False, 'import random\n'), ((2124, 2188), 'absl.flags.mark_flags_as_required', 'flags.mark_flags_as_required', (["['gt_file', 'img_dir', 'out_file']"], {}), "(['gt_file', 'img_dir', 'out_file'])\n", (2152, 2188), False, 'from absl import flags\n'), ((2191, 2204), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (2198, 2204), False, 'from absl import app\n'), ((1772, 1805), 'tensorflow.io.TFRecordWriter', 'tf.io.TFRecordWriter', (['output_path'], {}), '(output_path)\n', (1792, 1805), True, 'import tensorflow as tf\n'), ((1889, 1950), 'os.path.join', 'os.path.join', (['_IMG_DIR.value', 'f"""{annotation[\'image_id\']}.jpg"""'], {}), '(_IMG_DIR.value, f"{annotation[\'image_id\']}.jpg")\n', (1901, 1950), False, 'import os\n'), ((2008, 2055), 'utils.convert_to_tfe', 'utils.convert_to_tfe', (['img_file_path', 'annotation'], {}), '(img_file_path, annotation)\n', (2028, 2055), False, 'import utils\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-28 18:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0017_auto_20161123_2339'),
]
operations = [
migrations.CreateModel(
name='Dmis_reponse_tools',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ReportID', models.IntegerField()),
('RIT', models.CharField(max_length=255)),
('RDRT', models.CharField(max_length=255)),
('FACT', models.CharField(max_length=255)),
('ERU', models.CharField(max_length=255)),
('RFL', models.CharField(max_length=255)),
],
),
]
|
[
"django.db.models.CharField",
"django.db.models.IntegerField",
"django.db.models.AutoField"
] |
[((404, 497), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (420, 497), False, 'from django.db import migrations, models\n'), ((525, 546), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (544, 546), False, 'from django.db import migrations, models\n'), ((573, 605), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (589, 605), False, 'from django.db import migrations, models\n'), ((633, 665), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (649, 665), False, 'from django.db import migrations, models\n'), ((693, 725), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (709, 725), False, 'from django.db import migrations, models\n'), ((752, 784), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (768, 784), False, 'from django.db import migrations, models\n'), ((811, 843), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (827, 843), False, 'from django.db import migrations, models\n')]
|
import matplotlib.pyplot as plot
import numpy as np
#Function to get x and y coordinates from the first 10 clicks, then close the image.
def onclick(event):
x.append(event.xdata)
y.append(event.ydata)
print(len(x))
xval = int(event.xdata)
yval = int(event.ydata)
print(str([xval,yval]))
if len(x) == 10:
event.canvas.mpl_disconnect(cid)
print('DISCONNECT')
plot.close()
'''
This script takes a DEM image as input, and allows the user to click 10 points along the rim. A circle is fit to the
points and the center coordinates are returned
'''
def circlefit(dem):
#define x and y as global list variables
global x,y
x = []
y = []
#show the DEM
plot.imshow(dem)
ax = plot.gca()
fig = plot.gcf()
fig.suptitle('Click 10 points on the crater rim to fit with a circle:')
#Set up to run the function onclick every time the user clicks the image
global cid
cid = fig.canvas.mpl_connect('button_press_event',onclick)
plot.show()
# define coordinates as arrays
x = np.array(x)
y = np.array(y)
# create arrays used in circle calculation
a1 = np.array([x, y, np.ones(np.shape(x))])
a2 = np.array([-(x ** 2 + y ** 2)])
# solve the least squares fit to get the center point
a = np.linalg.lstsq(a1.T, a2.T, rcond=None)[0]
xc = -0.5 * a[0]
yc = -0.5 * a[1]
return xc, yc
|
[
"matplotlib.pyplot.show",
"numpy.linalg.lstsq",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.close",
"numpy.shape",
"numpy.array",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf"
] |
[((723, 739), 'matplotlib.pyplot.imshow', 'plot.imshow', (['dem'], {}), '(dem)\n', (734, 739), True, 'import matplotlib.pyplot as plot\n'), ((749, 759), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (757, 759), True, 'import matplotlib.pyplot as plot\n'), ((770, 780), 'matplotlib.pyplot.gcf', 'plot.gcf', ([], {}), '()\n', (778, 780), True, 'import matplotlib.pyplot as plot\n'), ((1017, 1028), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (1026, 1028), True, 'import matplotlib.pyplot as plot\n'), ((1073, 1084), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1081, 1084), True, 'import numpy as np\n'), ((1093, 1104), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1101, 1104), True, 'import numpy as np\n'), ((1209, 1239), 'numpy.array', 'np.array', (['[-(x ** 2 + y ** 2)]'], {}), '([-(x ** 2 + y ** 2)])\n', (1217, 1239), True, 'import numpy as np\n'), ((411, 423), 'matplotlib.pyplot.close', 'plot.close', ([], {}), '()\n', (421, 423), True, 'import matplotlib.pyplot as plot\n'), ((1307, 1346), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['a1.T', 'a2.T'], {'rcond': 'None'}), '(a1.T, a2.T, rcond=None)\n', (1322, 1346), True, 'import numpy as np\n'), ((1185, 1196), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (1193, 1196), True, 'import numpy as np\n')]
|
import logging
import coloredlogs
FORMAT = '[%(name)s] %(levelname)s:%(message)s'
FORMATTER = logging.Formatter(fmt=FORMAT)
def get_logger(name='default', level=logging.DEBUG, colored=False):
logger = logging.getLogger(name)
logger.propagate = False
logger.setLevel(level)
if not logger.handlers:
handler = logging.StreamHandler()
handler.setFormatter(fmt=FORMATTER)
logger.addHandler(handler)
if colored:
coloredlogs.install(level=level, logger=logger)
return logger
|
[
"logging.Formatter",
"logging.StreamHandler",
"coloredlogs.install",
"logging.getLogger"
] |
[((96, 125), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': 'FORMAT'}), '(fmt=FORMAT)\n', (113, 125), False, 'import logging\n'), ((209, 232), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (226, 232), False, 'import logging\n'), ((336, 359), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (357, 359), False, 'import logging\n'), ((464, 511), 'coloredlogs.install', 'coloredlogs.install', ([], {'level': 'level', 'logger': 'logger'}), '(level=level, logger=logger)\n', (483, 511), False, 'import coloredlogs\n')]
|
## https://notes.desy.de/s/ljPrespZd#Infrastructure--Cluster-login
import torch
## for the code to work, install torchvision
## $ python -m pip install --user -I --no-deps torchvision
import torchvision
from torchvision import datasets, transforms
## NB: in case torchvision cannot be found inside a jupyter notebook, fix the PYTHONPATH through
## import sys
## sys.path.append("/home/haicore-project-ws-hip-2021/mk7540/.local/lib/python3.8/site-packages/")
def load_data(
somepath,
norm_loc=(0.1307,), ## mu of normal dist to normalize by
norm_scale=(0.3081,), ## sigma of normal dist to normalize by
train_kwargs={"batch_size": 64, "shuffle": True},
test_kwargs={"batch_size": 1_000},
use_cuda=torch.cuda.device_count() > 0,
):
"""load MNIST data and return train/test loader object"""
transform_ = transforms.Compose(
# TODO where do the magic numbers come from?
[transforms.ToTensor(), transforms.Normalize(norm_loc, norm_scale)]
)
train_dataset = datasets.MNIST(
somepath, download=True, transform=transform_, train=True
)
test_dataset = datasets.MNIST(
somepath, download=True, transform=transform_, train=False
)
if use_cuda:
train_kwargs.update({"num_workers": 1, "pin_memory": True, "shuffle": True})
test_kwargs.update({"num_workers": 1, "pin_memory": True, "shuffle": True})
train_loader = torch.utils.data.DataLoader(train_dataset, **train_kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, **test_kwargs)
return train_loader, test_loader
import torch.nn as nn
import torch.nn.functional as F
class MyNetwork(nn.Module):
"""a very basic relu neural network involving conv, dense, max_pool and dropout layers"""
def __init__(self):
super(MyNetwork, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
# x.shape = (batchsize, 10)
output = F.log_softmax(x, dim=1)
return output
from pathlib import Path
import torch.optim as optim
# from torch.utils.tensorboard import SummaryWriter
from tensorboardX import SummaryWriter
def main(somepath="./pytorch-data"):
"""load the data set and run a random init CNN on it"""
# is a GPU available?
cuda_present = torch.cuda.is_available()
ndevices = torch.cuda.device_count()
use_cuda = cuda_present and ndevices > 0
device = torch.device("cuda" if use_cuda else "cpu") # "cuda:0" ... default device
# "cuda:1" would be GPU index 1, "cuda:2" etc
train_loader, test_loader = load_data(somepath, use_cuda=use_cuda)
model = MyNetwork().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=1.0)
max_nepochs = 1
log_interval = 5
init_params = list(model.parameters())[0].clone().detach()
writer = SummaryWriter(log_dir="logs", comment="this is the test of SummaryWriter")
model.train(True)
chpfolder = Path("chkpts")
if not chpfolder.is_dir():
chpfolder.mkdir()
for epoch in range(1, max_nepochs + 1):
for batch_idx, (X, y) in enumerate(train_loader):
# print("train", batch_idx, X.shape, y.shape)
X, y = X.to(device), y.to(device)
# download from GPU to CPU: X_cpu = X.cpu()
# download from GPU to CPU: X_cpu = X.to(torch.device("cpu"))
# download from GPU to CPU: X_cpu = X.detach().numpy()
optimizer.zero_grad()
prediction = model(X)
loss = F.nll_loss(prediction, y)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print(
"Train Epoch:",
epoch,
batch_idx * len(X),
len(train_loader.dataset),
loss.item(),
)
if batch_idx % 10 == 0:
writer.add_scalar("Loss/train/batch10", loss.item(), batch_idx)
# epoch finished
cpath = chpfolder / f"epoch-{epoch:03.0f}.pth"
torch.save(
{
"final_epoch": epoch,
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
},
cpath,
)
assert cpath.is_file() and cpath.stat().st_size > 0
final_params = list(model.parameters())[0].clone().detach()
assert not torch.allclose(init_params, final_params)
# when to reload chkp
# payload = torch.load(cpath)
# model = MyNetwork()
# model.load_state_dict(payload['model_state_dict'])
# continue learning/training after this
if __name__ == "__main__":
main()
print("Ok. Checkpoint on loading data reached.")
|
[
"torch.nn.Dropout",
"torch.flatten",
"tensorboardX.SummaryWriter",
"torch.utils.data.DataLoader",
"torch.allclose",
"torch.nn.Conv2d",
"torch.cuda.device_count",
"torch.nn.Linear",
"pathlib.Path",
"torch.cuda.is_available",
"torch.nn.functional.max_pool2d",
"torch.nn.functional.log_softmax",
"torch.device",
"torch.nn.functional.nll_loss",
"torch.nn.functional.relu",
"torchvision.transforms.Normalize",
"torchvision.datasets.MNIST",
"torchvision.transforms.ToTensor"
] |
[((1023, 1096), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['somepath'], {'download': '(True)', 'transform': 'transform_', 'train': '(True)'}), '(somepath, download=True, transform=transform_, train=True)\n', (1037, 1096), False, 'from torchvision import datasets, transforms\n'), ((1130, 1204), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['somepath'], {'download': '(True)', 'transform': 'transform_', 'train': '(False)'}), '(somepath, download=True, transform=transform_, train=False)\n', (1144, 1204), False, 'from torchvision import datasets, transforms\n'), ((1426, 1484), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {}), '(train_dataset, **train_kwargs)\n', (1453, 1484), False, 'import torch\n'), ((1503, 1559), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {}), '(test_dataset, **test_kwargs)\n', (1530, 1559), False, 'import torch\n'), ((2802, 2827), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2825, 2827), False, 'import torch\n'), ((2843, 2868), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2866, 2868), False, 'import torch\n'), ((2927, 2970), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (2939, 2970), False, 'import torch\n'), ((3337, 3411), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'log_dir': '"""logs"""', 'comment': '"""this is the test of SummaryWriter"""'}), "(log_dir='logs', comment='this is the test of SummaryWriter')\n", (3350, 3411), False, 'from tensorboardX import SummaryWriter\n'), ((3451, 3465), 'pathlib.Path', 'Path', (['"""chkpts"""'], {}), "('chkpts')\n", (3455, 3465), False, 'from pathlib import Path\n'), ((733, 758), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (756, 758), False, 'import torch\n'), ((1866, 1888), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(32)', '(3)', '(1)'], {}), '(1, 32, 3, 1)\n', (1875, 1888), True, 'import torch.nn as nn\n'), ((1910, 1930), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)', '(3)'], {}), '(32, 64, 3)\n', (1919, 1930), True, 'import torch.nn as nn\n'), ((1956, 1972), 'torch.nn.Dropout', 'nn.Dropout', (['(0.25)'], {}), '(0.25)\n', (1966, 1972), True, 'import torch.nn as nn\n'), ((1997, 2012), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2007, 2012), True, 'import torch.nn as nn\n'), ((2033, 2053), 'torch.nn.Linear', 'nn.Linear', (['(9216)', '(128)'], {}), '(9216, 128)\n', (2042, 2053), True, 'import torch.nn as nn\n'), ((2073, 2091), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(10)'], {}), '(128, 10)\n', (2082, 2091), True, 'import torch.nn as nn\n'), ((2157, 2166), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2163, 2166), True, 'import torch.nn.functional as F\n'), ((2206, 2215), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2212, 2215), True, 'import torch.nn.functional as F\n'), ((2229, 2247), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (2241, 2247), True, 'import torch.nn.functional as F\n'), ((2289, 2308), 'torch.flatten', 'torch.flatten', (['x', '(1)'], {}), '(x, 1)\n', (2302, 2308), False, 'import torch\n'), ((2346, 2355), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2352, 2355), True, 'import torch.nn.functional as F\n'), ((2463, 2486), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (2476, 2486), True, 'import torch.nn.functional as F\n'), ((4947, 4988), 'torch.allclose', 'torch.allclose', (['init_params', 'final_params'], {}), '(init_params, final_params)\n', (4961, 4988), False, 'import torch\n'), ((929, 950), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (948, 950), False, 'from torchvision import datasets, transforms\n'), ((952, 994), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['norm_loc', 'norm_scale'], {}), '(norm_loc, norm_scale)\n', (972, 994), False, 'from torchvision import datasets, transforms\n'), ((4017, 4042), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['prediction', 'y'], {}), '(prediction, y)\n', (4027, 4042), True, 'import torch.nn.functional as F\n')]
|
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, Range1d, FuncTickFormatter, FixedTicker
from math import pi, floor
#ColourOptions = ["red","blue","green","black","yellow","purple"]
class Collision_BarChart(object):
def __init__(self, xVals, yVals, colours = None, width=None):
self.xVals = xVals
self.yVals = yVals
self.colours = colours
self.width = width
self.create_BarChart()
def create_BarChart(self):
xVals = self.xVals
yVals = self.yVals
colours = self.colours
width = self.width
Max = 0
Min=0
N=len(xVals)
# create list of colours
if (colours==None):
colours=list(xVals)
for i in range(0,N):
colours[i]="red"
else:
if (not isinstance(colours,list)):
colours=[colours]
for i in range(1,N):
colours.append(colours[0])
# create list of widths
if (width==None):
width=[]
for i in range(0,N):
width.append(1)
# initialise values for loop
self.fig=figure(tools="")
self.barSources=[]
x=0
places=[]
label_places=[]
index={}
for i in range(0,N):
# add ColumnDataSource describing each bar
self.barSources.append(ColumnDataSource(data=dict(x=[x, x, x+width[i],x+width[i]],
y=[0,yVals[i], yVals[i], 0])))
# update Max and Min for y_range
if (yVals[i]+1>Max):
Max=yVals[i]+1
elif (yVals[i]<0 and yVals[i]-1<Min):
Min=yVals[i]-1
# create bar
self.fig.patch(x='x', y='y', fill_color=colours[i], source=self.barSources[i], line_color=None)
br=xVals[i].find('\n')
places.append(x+width[i]/2.0)
if (br==-1):
# remember bar position
label_places.append(x+width[i]/2.0)
# remember label that should be written at that postion
index[str(int(100*(x+width[i]/2.0)))] = [xVals[i]]
else:
label=[]
while (br!=-1):
label.append(xVals[i][0:br])
xVals[i]=xVals[i][br+1:]
br=xVals[i].find('\n')
label.append(xVals[i])
N=len(label)
for j in range(0,N):
index[str(int(100*(x+width[i]*(j+1)/(N+1.0))))] = [label[j]]
label_places.append((floor(100*(x+width[i]*(j+1)/(N+1.0)))/100.0))
# increase x
x+=width[i]+1
# set figure properties
self.fig.x_range=Range1d(-1,x)
self.fig.y_range=Range1d(Min,Max)
self.fig.grid.visible=False
self.fig.xaxis.major_label_text_font_size="14pt"
self.fig.xaxis.major_tick_line_color=None
self.fig.xaxis.major_label_orientation=pi/2
self.fig.yaxis.major_label_orientation=pi/2
self.fig.yaxis.axis_label="Kinetic Energy ( Joule )"
self.fig.toolbar.logo = None
# only give x ticks at bars
self.fig.xaxis[0].ticker=FixedTicker(ticks=label_places)
# save vals in ColumnDataSource so ticker_func can use it as default val
index_obj = ColumnDataSource(data=index)
#print(index_obj.data)
# define function which assigns values to tick labels
# def ticker_func(labels=index_obj):
# return labels.data[str(tick*100)]
ticker_func_JS = """
var idx = tick*100;
return labels.data[idx.toString()]
"""
self.index_obj = index_obj
# call ticker_func
self.fig.xaxis[0].formatter = FuncTickFormatter(code=ticker_func_JS, args=dict(labels=self.index_obj))
def change_label(self):
self.index_obj.data = {'50': [self.xVals[0]], '250': [self.xVals[1]], '450': [self.xVals[2]]}
def setTitle(self,title):
self.fig.title=title
def getFig(self):
return self.fig
# define operator[]
def __getItem__ (self,key):
return self.barSources[key].data
def __setItem__ (self,key):
return self.barSources[key].data
def setHeight(self,key,height):
self.barSources[key].data=dict(x=list(self.barSources[key].data['x']),y=[0,height,height,0])
def Height(self,height):
self.fig.height=height
def Width(self,width):
self.fig.width=width
|
[
"bokeh.models.ColumnDataSource",
"bokeh.plotting.figure",
"math.floor",
"bokeh.models.Range1d",
"bokeh.models.FixedTicker"
] |
[((1208, 1224), 'bokeh.plotting.figure', 'figure', ([], {'tools': '""""""'}), "(tools='')\n", (1214, 1224), False, 'from bokeh.plotting import figure\n'), ((2807, 2821), 'bokeh.models.Range1d', 'Range1d', (['(-1)', 'x'], {}), '(-1, x)\n', (2814, 2821), False, 'from bokeh.models import ColumnDataSource, Range1d, FuncTickFormatter, FixedTicker\n'), ((2846, 2863), 'bokeh.models.Range1d', 'Range1d', (['Min', 'Max'], {}), '(Min, Max)\n', (2853, 2863), False, 'from bokeh.models import ColumnDataSource, Range1d, FuncTickFormatter, FixedTicker\n'), ((3277, 3308), 'bokeh.models.FixedTicker', 'FixedTicker', ([], {'ticks': 'label_places'}), '(ticks=label_places)\n', (3288, 3308), False, 'from bokeh.models import ColumnDataSource, Range1d, FuncTickFormatter, FixedTicker\n'), ((3410, 3438), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'index'}), '(data=index)\n', (3426, 3438), False, 'from bokeh.models import ColumnDataSource, Range1d, FuncTickFormatter, FixedTicker\n'), ((2644, 2693), 'math.floor', 'floor', (['(100 * (x + width[i] * (j + 1) / (N + 1.0)))'], {}), '(100 * (x + width[i] * (j + 1) / (N + 1.0)))\n', (2649, 2693), False, 'from math import pi, floor\n')]
|
import sqlite3
MASTER_PASSWORD = "<PASSWORD>"
senha = input("Insira sua senha master: ")
if senha != MASTER_PASSWORD:
print("Senha inválida! Encerrando ...")
exit()
conn = sqlite3.connect('password.db')
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS users(
service TEXT NOT NULL,
username TEXT NOT NULL,
password TEXT NOT NULL
);
''')
def menu():
print("***************************************")
print("* i : inserir nova senha *")
print("* l : listar serviços salvos *")
print("* r : recuperar uma senha *")
print("* s : sair *")
print("***************************************")
def get_password(service):
cursor.execute(f'''
SELECT username, password FROM users
WHERE service = '{service}'
''')
if cursor.rowcount == 0:
print("Serviço não cadastrado(use 'l' para verificar os serviços).")
else:
for user in cursor.fetchall():
print(user)
def insert_password(service, username, password):
cursor.execute(f'''
insert into users (service, username, password)
values('{service}', '{username}', '{password}')
''')
conn.commit()
def show_password():
cursor.execute('''
SELECT service FROM users;
''')
for service in cursor.fetchall():
print(service)
while True:
menu()
op = input("O que deseja fazer ?")
if op not in ['l', 'i', 'r', 's']:
print("Opção inválida")
continue
if op == 's':
break
if op == 'i':
service = input('Qual o nome do serviço ? ')
username = input('Qual o nome do usuário ? ')
password = input('Qual a senha? ')
insert_password(service, username, password)
if op == 'l':
show_password()
if op == 'r':
service = input('Qual o serviço para o qual quer a senha ?')
get_password(service)
conn.close()
|
[
"sqlite3.connect"
] |
[((192, 222), 'sqlite3.connect', 'sqlite3.connect', (['"""password.db"""'], {}), "('password.db')\n", (207, 222), False, 'import sqlite3\n')]
|
"""
Created on Nov 21, 2013
@author: <NAME>
In this module you can find the :class:`MyPlotGrid` which is just
a :class:`PyQt5.QtGui.QScrollArea` with some additions.
More important is the :class:`MyPlotContent`.
It shows an overview of many :class:`src.myplotwidget.MyPlotWidget`
and manages them.
"""
from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
from swan.widgets.plot_widget import MyPlotWidget
from swan.widgets.indicator_cell import IndicatorWidget
from numpy.random import choice
class MyPlotGrid(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
QtWidgets.QWidget.__init__(self, *args, **kwargs)
self.main_grid_layout = QtWidgets.QGridLayout()
self.scroll_area = QtWidgets.QScrollArea(self)
self.scroll_area.setWidgetResizable(True)
self.scroll_area.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.scroll_area.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.child = MyPlotContent(self)
self.scroll_area.setWidget(self.child)
self.main_grid_layout.addWidget(self.scroll_area)
self.setLayout(self.main_grid_layout)
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.MinimumExpanding)
def minimumSizeHint(self) -> QtCore.QSize:
return QtCore.QSize(600, 400)
class MyPlotContent(QtWidgets.QWidget):
"""
A class that manages :class:`src.myplotwidget.MyPlotWidget`
objects in a grid.
The *args* and *kwargs* are passed to :class:`PyQt5.QtWidgets.QWidget`.
"""
plot_selected = QtCore.pyqtSignal(object, bool)
indicator_toggle = QtCore.pyqtSignal()
visibility_toggle = QtCore.pyqtSignal(int, int, bool)
def __init__(self, *args, **kwargs):
"""
**Properties**
*_shape* (tuple of integer):
The shape of the plot grid.
Format: (rows, cols)
*_plots* (list of :class:`src.myplotwidget.MyPlotWidget`):
The plots in a list for iterating over them.
*_selected* (list of :class:`MyPlotWidget`):
A list containing the selected plots.
*_rows* (dictionary):
A dictionary containing the row as key and a list
of plots as value for the plots in that row.
*_cols* (dictionary):
A dictionary containing the column as key and a list
of plots as value for the plots in that column.
*_yrange* (tuple of float):
The y range all plots should have.
"""
QtWidgets.QWidget.__init__(self, *args, **kwargs)
self.grid_layout = QtWidgets.QGridLayout(self)
self._shape = None
self._plots = []
self._indicators = []
self._selected = []
self._rows = {}
self._cols = {}
self._yrange = (-0.001, 0.0006)
self._xrange = (0, 0)
self._second_select = None
self._width = 60
self._height = 45
self._constant_dimension = 75
self._plot_gray = QtGui.QColor(180, 180, 180, 85)
self.sample_waveform_number = 500
self.grid_layout.setColumnStretch(1000, 1000)
self.grid_layout.setRowStretch(1000, 1000)
self.grid_layout.setHorizontalSpacing(1)
self.grid_layout.setVerticalSpacing(1)
def make_plots(self, rows, cols, dates=None):
"""
Creates a plot grid of the given shape.
**Arguments**
*rows* (integer):
The number of rows of the grid.
*cols* (integer):
The number of columns of the grid.
"""
self.delete_plots()
self._shape = (rows, cols)
self._plots = []
self._indicators = []
self._rows = {}
self._cols = {}
pivot_indicator = IndicatorWidget("Sessions (dd.mm.yy)\n\u2192\n\n\u2193 Units",
indicator_type='pivot', position=None,
width=self._width, height=self._height,
const_dim=self._constant_dimension)
pivot_indicator.responsive = False
self.grid_layout.addWidget(pivot_indicator, 0, 0, QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
for global_unit_id in range(rows):
iw = IndicatorWidget(
str(global_unit_id + 1), indicator_type='unit', position=global_unit_id,
width=self._width, height=self._height, const_dim=self._constant_dimension
)
self._indicators.append(iw)
iw.select_indicator.connect(self.indicator_toggled)
self.grid_layout.addWidget(iw, global_unit_id + 1, 0, QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
for session_id in range(cols):
if dates is not None:
iw = IndicatorWidget(
str(session_id + 1) + " (" + str(dates[session_id].strftime("%d.%m.%y")) + ")",
indicator_type='session', position=session_id,
width=self._width, height=self._height, const_dim=self._constant_dimension
)
else:
iw = IndicatorWidget(
str(session_id), indicator_type='session', position=session_id,
width=self._width, height=self._height, const_dim=self._constant_dimension
)
self._indicators.append(iw)
iw.select_indicator.connect(self.indicator_toggled)
self.grid_layout.addWidget(iw, 0, session_id + 1, QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
for unit_id in range(rows):
self._rows[unit_id] = []
for session_id in range(cols):
if session_id not in self._cols:
self._cols[session_id] = []
plot_widget = MyPlotWidget(width=self._width, height=self._height)
self._plots.append(plot_widget)
plot_widget.pos = (session_id, unit_id)
self._rows[unit_id].append(plot_widget)
self._cols[session_id].append(plot_widget)
plot_widget.select_plot.connect(self.select_plot)
plot_widget.colour_strip_toggle.connect(self.toggle_indicator_colour)
plot_widget.visibility_toggle.connect(self.toggle_plot_visibility)
self.grid_layout.addWidget(plot_widget, unit_id + 1, session_id + 1,
QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
return self._plots
@QtCore.pyqtSlot(object, int)
def toggle_indicator_colour(self, colour, i):
iw = next((x for x in self._indicators if x.row == i))
if any(plot.has_plot for plot in self._rows[i]):
iw.toggle_colour_strip(colour)
else:
iw.toggle_colour_strip(None)
@QtCore.pyqtSlot(object)
def indicator_toggled(self, indicator):
row = indicator.row
col = indicator.col
indicator_type = indicator.indicator_type
if indicator_type == 'unit':
plot_widgets = [pw for pw in self._plots if pw.pos[1] == row]
elif indicator_type == 'session':
plot_widgets = [pw for pw in self._plots if pw.pos[0] == col]
else:
plot_widgets = []
for pw in plot_widgets:
if not indicator.selected:
if indicator_type == 'session':
pw.enable("col")
elif indicator_type == 'unit':
pw.enable("row")
else:
pw.disable()
if indicator_type == 'session':
pw.inhibited_by_col = True
elif indicator_type == 'unit':
pw.inhibited_by_row = True
self.indicator_toggle.emit()
def toggle_plot_visibility(self, session_id, unit_id, visible):
self.visibility_toggle.emit(session_id, unit_id, visible)
def delete_plots(self):
"""
Deletes all plots.
"""
for p in self._plots:
p.close()
for i in self._indicators:
i.close()
def clear_plots(self):
"""
Clears all plots.
"""
for p in self._plots:
p.clear_()
p.enable("all")
for i in self._indicators:
i.colourStrip.hide()
if not i.selected:
i._bg = i.bgs["selected"]
i.set_background(i._bg)
i.selected = True
def do_plot(self, vum, data):
"""
Plots data on all plots.
**Arguments**
*vum* (:class:`src.virtualunitmap.VirtualUnitMap`):
Is needed to get the mapping.
*data* (:class:`src.neodata.NeoData`):
Is needed to get the data.
"""
active = vum.get_active()
for session in range(len(active)):
for global_unit_id in range(len(active[session])):
plot_widget = self.find_plot(global_unit_id, session)
if plot_widget.to_be_updated:
plot_widget.clear_()
pen_colour = vum.get_colour(global_unit_id)
plot_widget.default_pen_colour = pen_colour
if active[session][global_unit_id]:
unit = vum.get_realunit(session, global_unit_id, data)
mean_waveform = data.get_data("average", unit)
# all_waveforms = data.get_data("all", unit)
# try:
# plot_widget.plot_many(all_waveforms[choice(all_waveforms.shape[0],
# size=self.sample_waveform_number,
# replace=False)],
# self._plot_gray)
# except ValueError:
# plot_widget.plot_many(all_waveforms, self._plot_gray)
plot_widget.plot(mean_waveform.magnitude, pen_colour)
plot_widget.hasPlot = True
plot_widget.toggle_colour_strip(pen_colour)
plot_widget.plot_widget.setXRange(0., data.get_wave_length(), padding=None, update=True)
else:
plot_widget.toggle_colour_strip(pen_colour)
plot_widget.to_be_updated = False
def set_all_for_update(self):
for plot in self._plots:
plot.to_be_updated = True
def find_plot(self, global_unit_id, session_id):
"""
Finds a plot at a given position.
**Arguments**
*global_unit_id* (integer):
The row index.
*session_id* (integer):
The column index.
**Returns**: :class:`src.myplotwidget.MyPlotWidget`
The plot at position (global_unit_id, session_id).
"""
return self._rows[global_unit_id][session_id]
@QtCore.pyqtSlot(object)
def highlight_plot(self, item):
if item.opts['clickable']:
unit_id = item.opts['unit_id']
session = item.opts['session']
p = self.find_plot(unit_id, session)
self.select_plot(p, not p.selected)
def select_plot(self, plot, select):
"""
Selects or deselects a plot on the grid.
If nothing is selected, the plot will be selected.
Second selection is only allowed if the plot is in the same column
as the other one and if not two are already selected.
**Arguments**
*plot* (:class:`src.myplotwidget.MyPlotWidget`):
The plot to (de)select.
*select* (boolean):
Whether or not the plot should be selected.
"""
if select:
if len(self._selected) == 1 and self._selected[0].pos[0] == plot.pos[0]:
self._selected.append(plot)
plot.change_background(select)
plot.selected = select
self._second_select = plot
self.plot_selected.emit(plot, select)
elif not self._selected:
self._selected.append(plot)
plot.change_background(select)
plot.selected = select
self._second_select = None
self.plot_selected.emit(plot, select)
elif self._second_select is not None and self._selected[0].pos[0] == plot.pos[0]:
self._selected.remove(self._second_select)
self._second_select.change_background(not select)
self._second_select.selected = not select
self.plot_selected.emit(self._second_select, not select)
self._second_select = plot
self._selected.append(plot)
plot.change_background(select)
plot.selected = select
self.plot_selected.emit(plot, select)
elif plot in self._selected:
self._selected.remove(plot)
plot.change_background(select)
plot.selected = select
self.plot_selected.emit(plot, select)
def reset_selection(self):
"""
Resets the selection.
"""
for p in self._selected:
p.selected = False
p.change_background(False)
self._selected = []
def get_selection(self):
"""
**Returns**: list of :class:`src.myplotwidget.MyPlotWidget`
The selected plots.
"""
return self._selected
def zoom_in(self, step=25.0):
"""
Zooms in the plots.
**Arguments**
*step* (float):
The zoom step percentage.
Default: 25.0 percent.
"""
for plot in self._plots:
plot.change_size(width=step, height=step)
for indicator in self._indicators:
indicator.change_size(width=step, height=step)
def zoom_out(self, step=25.0):
"""
Zooms out the plots.
**Arguments**
*step* (float):
The zoom step percentage.
Default: 25.0 percent.
"""
for plot in self._plots:
plot.change_size(width=-step, height=-step)
for indicator in self._indicators:
indicator.change_size(width=-step, height=-step)
def expand(self, step=150):
"""
Increases the y range of the plots.
**Arguments**
*step* (integer):
The expand step.
Default: 150 pixels.
"""
self.set_yranges(self._yrange[0] - step, self._yrange[1] + step)
def collapse(self, step=150):
"""
Decreases the y range of the plots.
**Arguments**
*step* (integer):
The collapse step.
Default: 150 pixels.
"""
self.set_yranges(self._yrange[0] + step, self._yrange[1] - step)
def set_yranges(self, min0, max0):
"""
Sets the y ranges of all plots.
**Arguments**
*min0* (float):
The minimal y.
*max0* (float):
The maximal y.
"""
self._yrange = (min0, max0)
for plot in self._plots:
plot.plot_widget.setYRange(min0, max0, padding=None, update=True)
def set_xranges(self, min0, max0):
"""
Sets the y ranges of all plots.
**Arguments**
*min0* (float):
The minimal y.
*max0* (float):
The maximal y.
"""
self._xrange = (min0, max0)
for plot in self._plots:
plot.plot_widget.setXRange(min0, max0, padding=None, update=True)
def set_tooltips(self, tooltips):
"""
Sets tool tips for all plots.
**Arguments**
*tooltips* (dictionary):
A dictionary containing for each column of the grid
a list of string containing the tool tips for that column.
"""
for col in self._cols.keys():
tips = tooltips[col]
plots = self._cols[col]
for t, plot in zip(tips, plots):
plot.set_tooltip(t)
def swap_tooltips(self, p1, p2):
"""
Swaps the tooltips for two plots that have been swapped.
"""
tip1 = p1.toolTip()
tip2 = p2.toolTip()
p1.set_tooltip(tip2)
p2.set_tooltip(tip1)
|
[
"pyqtgraph.Qt.QtWidgets.QWidget.__init__",
"pyqtgraph.Qt.QtCore.pyqtSignal",
"pyqtgraph.Qt.QtWidgets.QGridLayout",
"pyqtgraph.Qt.QtCore.QSize",
"pyqtgraph.Qt.QtGui.QColor",
"swan.widgets.plot_widget.MyPlotWidget",
"swan.widgets.indicator_cell.IndicatorWidget",
"pyqtgraph.Qt.QtWidgets.QScrollArea",
"pyqtgraph.Qt.QtCore.pyqtSlot"
] |
[((1593, 1624), 'pyqtgraph.Qt.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['object', 'bool'], {}), '(object, bool)\n', (1610, 1624), False, 'from pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((1648, 1667), 'pyqtgraph.Qt.QtCore.pyqtSignal', 'QtCore.pyqtSignal', ([], {}), '()\n', (1665, 1667), False, 'from pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((1692, 1725), 'pyqtgraph.Qt.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['int', 'int', 'bool'], {}), '(int, int, bool)\n', (1709, 1725), False, 'from pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((6656, 6684), 'pyqtgraph.Qt.QtCore.pyqtSlot', 'QtCore.pyqtSlot', (['object', 'int'], {}), '(object, int)\n', (6671, 6684), False, 'from pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((6959, 6982), 'pyqtgraph.Qt.QtCore.pyqtSlot', 'QtCore.pyqtSlot', (['object'], {}), '(object)\n', (6974, 6982), False, 'from pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((11276, 11299), 'pyqtgraph.Qt.QtCore.pyqtSlot', 'QtCore.pyqtSlot', (['object'], {}), '(object)\n', (11291, 11299), False, 'from pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((582, 631), 'pyqtgraph.Qt.QtWidgets.QWidget.__init__', 'QtWidgets.QWidget.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (608, 631), False, 'from pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((665, 688), 'pyqtgraph.Qt.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', ([], {}), '()\n', (686, 688), False, 'from pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((717, 744), 'pyqtgraph.Qt.QtWidgets.QScrollArea', 'QtWidgets.QScrollArea', (['self'], {}), '(self)\n', (738, 744), False, 'from pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((1317, 1339), 'pyqtgraph.Qt.QtCore.QSize', 'QtCore.QSize', (['(600)', '(400)'], {}), '(600, 400)\n', (1329, 1339), False, 'from pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((2626, 2675), 'pyqtgraph.Qt.QtWidgets.QWidget.__init__', 'QtWidgets.QWidget.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (2652, 2675), False, 'from pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((2703, 2730), 'pyqtgraph.Qt.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self'], {}), '(self)\n', (2724, 2730), False, 'from pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((3111, 3142), 'pyqtgraph.Qt.QtGui.QColor', 'QtGui.QColor', (['(180)', '(180)', '(180)', '(85)'], {}), '(180, 180, 180, 85)\n', (3123, 3142), False, 'from pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((3913, 4090), 'swan.widgets.indicator_cell.IndicatorWidget', 'IndicatorWidget', (['"""Sessions (dd.mm.yy)\n→\n\n↓ Units"""'], {'indicator_type': '"""pivot"""', 'position': 'None', 'width': 'self._width', 'height': 'self._height', 'const_dim': 'self._constant_dimension'}), '("""Sessions (dd.mm.yy)\n→\n\n↓ Units""", indicator_type=\n \'pivot\', position=None, width=self._width, height=self._height,\n const_dim=self._constant_dimension)\n', (3928, 4090), False, 'from swan.widgets.indicator_cell import IndicatorWidget\n'), ((5941, 5993), 'swan.widgets.plot_widget.MyPlotWidget', 'MyPlotWidget', ([], {'width': 'self._width', 'height': 'self._height'}), '(width=self._width, height=self._height)\n', (5953, 5993), False, 'from swan.widgets.plot_widget import MyPlotWidget\n')]
|
# coding: utf-8
"""
Criteo API Transition Swagger
This is used to help Criteo clients transition from MAPI to Criteo API # noqa: E501
The version of the OpenAPI document: 1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class PatchAdSet(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'scheduling': 'PatchAdSetScheduling',
'bidding': 'PatchAdSetBidding',
'targeting': 'AdSetTargeting',
'budget': 'PatchAdSetBudget'
}
attribute_map = {
'name': 'name',
'scheduling': 'scheduling',
'bidding': 'bidding',
'targeting': 'targeting',
'budget': 'budget'
}
def __init__(self, name=None, scheduling=None, bidding=None, targeting=None, budget=None): # noqa: E501
"""PatchAdSet - a model defined in OpenAPI""" # noqa: E501
self._name = None
self._scheduling = None
self._bidding = None
self._targeting = None
self._budget = None
self.discriminator = None
if name is not None:
self.name = name
if scheduling is not None:
self.scheduling = scheduling
if bidding is not None:
self.bidding = bidding
if targeting is not None:
self.targeting = targeting
if budget is not None:
self.budget = budget
@property
def name(self):
"""Gets the name of this PatchAdSet. # noqa: E501
Name of the ad set # noqa: E501
:return: The name of this PatchAdSet. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PatchAdSet.
Name of the ad set # noqa: E501
:param name: The name of this PatchAdSet. # noqa: E501
:type: str
"""
self._name = name
@property
def scheduling(self):
"""Gets the scheduling of this PatchAdSet. # noqa: E501
:return: The scheduling of this PatchAdSet. # noqa: E501
:rtype: PatchAdSetScheduling
"""
return self._scheduling
@scheduling.setter
def scheduling(self, scheduling):
"""Sets the scheduling of this PatchAdSet.
:param scheduling: The scheduling of this PatchAdSet. # noqa: E501
:type: PatchAdSetScheduling
"""
self._scheduling = scheduling
@property
def bidding(self):
"""Gets the bidding of this PatchAdSet. # noqa: E501
:return: The bidding of this PatchAdSet. # noqa: E501
:rtype: PatchAdSetBidding
"""
return self._bidding
@bidding.setter
def bidding(self, bidding):
"""Sets the bidding of this PatchAdSet.
:param bidding: The bidding of this PatchAdSet. # noqa: E501
:type: PatchAdSetBidding
"""
self._bidding = bidding
@property
def targeting(self):
"""Gets the targeting of this PatchAdSet. # noqa: E501
:return: The targeting of this PatchAdSet. # noqa: E501
:rtype: AdSetTargeting
"""
return self._targeting
@targeting.setter
def targeting(self, targeting):
"""Sets the targeting of this PatchAdSet.
:param targeting: The targeting of this PatchAdSet. # noqa: E501
:type: AdSetTargeting
"""
self._targeting = targeting
@property
def budget(self):
"""Gets the budget of this PatchAdSet. # noqa: E501
:return: The budget of this PatchAdSet. # noqa: E501
:rtype: PatchAdSetBudget
"""
return self._budget
@budget.setter
def budget(self, budget):
"""Sets the budget of this PatchAdSet.
:param budget: The budget of this PatchAdSet. # noqa: E501
:type: PatchAdSetBudget
"""
self._budget = budget
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PatchAdSet):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"six.iteritems"
] |
[((4475, 4508), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (4488, 4508), False, 'import six\n')]
|
import pika
import config
class RabbitMq:
def __init__(self):
self.conn = None
self.channel = None
self.__config = config.RABBITMQ
self.__connect()
def __connect(self):
self.conn = pika.BlockingConnection(
pika.ConnectionParameters(
host=self.__config['HOST']
)
)
self.channel = self.conn.channel()
self.channel.queue_declare(queue=self.__config['QUEUE'])
def send_message(self, message: str):
self.channel.basic_publish(
exchange='',
routing_key=self.__config['QUEUE'],
body=message,
properties=pika.BasicProperties(
delivery_mode=2,
)
)
self.conn.close()
|
[
"pika.BasicProperties",
"pika.ConnectionParameters"
] |
[((269, 322), 'pika.ConnectionParameters', 'pika.ConnectionParameters', ([], {'host': "self.__config['HOST']"}), "(host=self.__config['HOST'])\n", (294, 322), False, 'import pika\n'), ((672, 709), 'pika.BasicProperties', 'pika.BasicProperties', ([], {'delivery_mode': '(2)'}), '(delivery_mode=2)\n', (692, 709), False, 'import pika\n')]
|
from django import forms
from django_grapesjs.settings import BASE, GRAPESJS_DEFAULT_HTML, REDACTOR_CONFIG
from django_grapesjs.utils import apply_string_handling
from .widgets import GrapesJsWidget
__all__ = (
'GrapesJsField',
)
class GrapesJsField(forms.CharField):
'''
Form field with support grapesjs.
'''
widget = GrapesJsWidget
def __init__(self, default_html=GRAPESJS_DEFAULT_HTML, html_name_init_conf=REDACTOR_CONFIG[BASE],
apply_django_tag=False, validate_tags=False, template_choices=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.widget.default_html = default_html
self.widget.html_name_init_conf = html_name_init_conf
self.widget.apply_django_tag = apply_django_tag
self.widget.template_choices = template_choices
self.validate_tags = validate_tags
def validate(self, value):
super().validate(value)
# TODO: check the field
# if self.validate_tags:
def clean(self, value):
value = apply_string_handling(value, 'apply_tag_save')
return super().clean(value)
|
[
"django_grapesjs.utils.apply_string_handling"
] |
[((1047, 1093), 'django_grapesjs.utils.apply_string_handling', 'apply_string_handling', (['value', '"""apply_tag_save"""'], {}), "(value, 'apply_tag_save')\n", (1068, 1093), False, 'from django_grapesjs.utils import apply_string_handling\n')]
|
import base64
def base64_string(val):
if val is None or val == b'':
return None
return base64.b64encode(val).decode('utf-8')
def to_int(val):
if val is None:
return val
if val == b'':
return 0
if isinstance(val, bytes):
return int.from_bytes(val, 'big')
if isinstance(val, str):
return int(val)
return val
def to_none_if_empty(val):
if val == '':
return None
return val
|
[
"base64.b64encode"
] |
[((105, 126), 'base64.b64encode', 'base64.b64encode', (['val'], {}), '(val)\n', (121, 126), False, 'import base64\n')]
|
#!/usr/bin/env python3
"""
Script that logs into `google.com`
"""
from logging import debug
from typing import Generator
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.common.keys import Keys
from ..script import Script as BaseClass
URL = 'https://www.google.com'
BAD_REQUEST = 400
UNAUTHORIZED = 401
SUCCESS = 200
class Script (BaseClass):
"""
Script that is imported by `Scraper` object.
See `Scraper.scrape()` function.
"""
def goto_login_page(self):
"""
Navigates to login page.
"""
if self.exists('//*[contains(@href, "google.com/Logout")]'):
debug('User already logged in')
return False
debug('going to login page')
# click login button
self.click('//a[contains(@href, "google.com/ServiceLogin")]')
# check for input field
if not self.exists('//input[@type="email"]'):
# click on use another account button
self.click('(//*[@tabindex="0"])[last()-2]')
return True
def execute(self, **kwargs) -> Generator[dict, None, None]:
"""
Attempts to log into website.
Args:
**max_page (int): last page to scrape. Default to 99
**page (int): Starting page to start execution. Default to 1
**retries (int): number of times to retry execution. Default to 2
**user_name (str, optional): email address of google user. Defaults to SERVER_EMAIL.
**user_pass (str, optional): password of google user. Defaults to SERVER_SECRET.
**kwargs (dict[str, any]): Used to pass arguments to script
Raises:
TimeoutException: if required elements are not found on page
Yields:
Generator[dict, None, None]: [description]
"""
options = {**self.options, **kwargs}
self.max_page = options.pop('max_page', 99)
# page = options.pop('page', 1)
retries = options.pop('retries', 2)
user_name = options.pop('user_name', '')
user_pass = options.pop('user_pass', '')
retries = options.pop('retries', 2)
# exit early if no user name / pass
if len(user_name) < 2 or len(user_pass) < 2:
yield BAD_REQUEST
return
try:
self.driver.get(URL)
if self.goto_login_page():
debug('Entering email address')
self.send_keys('//input[@type="email"]', user_name + Keys.ENTER, True)
self.sleep(5)
debug('Entering password')
self.send_keys('//input[@type="password"]', user_pass + Keys.ENTER, True)
self.sleep(10)
yield SUCCESS
except NoSuchElementException:
# failure
yield UNAUTHORIZED
except TimeoutException as err:
# no more attempts?
if retries < 1:
raise err
# Try again
self.execute(**kwargs, retries=retries - 1)
|
[
"logging.debug"
] |
[((739, 767), 'logging.debug', 'debug', (['"""going to login page"""'], {}), "('going to login page')\n", (744, 767), False, 'from logging import debug\n'), ((673, 704), 'logging.debug', 'debug', (['"""User already logged in"""'], {}), "('User already logged in')\n", (678, 704), False, 'from logging import debug\n'), ((2438, 2469), 'logging.debug', 'debug', (['"""Entering email address"""'], {}), "('Entering email address')\n", (2443, 2469), False, 'from logging import debug\n'), ((2604, 2630), 'logging.debug', 'debug', (['"""Entering password"""'], {}), "('Entering password')\n", (2609, 2630), False, 'from logging import debug\n')]
|
from dataclasses import dataclass
from typing import cast
from fastapi import Request
__all__ = ['RequestAnalyzer']
@dataclass
class RequestAnalyzer:
request: Request
@property
def client_ip_address(self) -> str:
return cast(str, self.request.client.host)
@property
def client_user_agent(self) -> str:
return self.request.headers['user-agent']
|
[
"typing.cast"
] |
[((246, 281), 'typing.cast', 'cast', (['str', 'self.request.client.host'], {}), '(str, self.request.client.host)\n', (250, 281), False, 'from typing import cast\n')]
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Zyxel.MSAN.get_version
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
"""
"""
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
class Script(BaseScript):
name = "Zyxel.MSAN.get_version"
interface = IGetVersion
cache = True
rx_ver1 = re.compile(
r"^\s*product model\s*:\s+(?P<platform>\S+)\s*\n"
r"^\s*system up time\s*:\s+(?P<uptime>\S+)\s*\n"
r"^\s*f/w version\s*:\s+(?P<version>\S+) \| \S+\s*\n"
r"^\s*bootbase version\s*:\s+(?P<bootprom>\S+) \| \S+\s*\n",
re.MULTILINE,
)
rx_ver2 = re.compile(
r"^\s*Model: (?:\S+ \/ )?(?P<platform>\S+)\s*\n"
r"^\s*ZyNOS version: (?P<version>\S+) \| \S+\s*\n"
r".+?\n"
r"^\s*Bootbase version: (?P<bootprom>\S+) \| \S+\s*\n"
r".+?\n"
r"(^\s*Hardware version: (?P<hardware>\S+)\s*\n)?"
r"^\s*Serial number: (?P<serial>\S+)\s*\n",
re.MULTILINE | re.DOTALL,
)
rx_ver3 = re.compile(
r"^\s*ZyNOS version\s*: (?P<version>\S+) \| \S+\s*\n"
r".+?\n"
r".+?\n"
r"^\s*bootbase version\s*: (?P<bootprom>\S+)"
r"\((?P<platform>MSC\S+)\) \| \S+\s*\n",
re.MULTILINE,
)
rx_ver4 = re.compile(
r"^\s*Bootcode Version: (?P<bootprom>.+)\s*\n"
r"^\s*Hardware Version: (?P<hardware>.+)\s*\n"
r"^\s*Serial Number: (?P<serial>.+)\s*\n"
r"^\s*F/W Version: (?P<version>\S+)\s*\n",
re.MULTILINE,
)
rx_chips = re.compile(r"^\s*(?P<platform>\S+?)(/\S+)?\s+")
def execute(self):
slots = self.profile.get_slots_n(self)
try:
c = self.cli("sys version")
match = self.rx_ver1.search(c)
except self.CLISyntaxError:
c = self.cli("sys info show", cached=True)
match = self.rx_ver2.search(c)
if not match:
match = self.rx_ver3.search(c)
if match:
platform = self.profile.get_platform(self, slots, match.group("platform"))
else:
match = self.rx_ver4.search(self.cli("sys info show", cached=True))
if match:
match1 = self.rx_chips.search(self.cli("chips info"))
r = {
"vendor": "ZyXEL",
"platform": match1.group("platform"),
"version": match.group("version"),
}
if match.group("bootprom") != "not defined":
if "attributes" not in r:
r["attributes"] = {}
r["attributes"]["Boot PROM"] = match.group("bootprom")
if match.group("hardware") != "not defined":
if "attributes" not in r:
r["attributes"] = {}
r["attributes"]["HW version"] = match.group("hardware")
if match.group("serial") != "not defined":
if "attributes" not in r:
r["attributes"] = {}
r["attributes"]["Serial Number"] = match.group("serial")
return r
else:
raise self.NotSupportedError()
r = {
"vendor": "ZyXEL",
"platform": platform,
"version": match.group("version"),
"attributes": {"Boot PROM": match.group("bootprom")},
}
if ("hardware" in match.groupdict()) and (match.group("hardware")):
r["attributes"]["HW version"] = match.group("hardware")
if ("serial" in match.groupdict()) and (match.group("serial")):
r["attributes"]["Serial Number"] = match.group("serial")
return r
|
[
"re.compile"
] |
[((605, 868), 're.compile', 're.compile', (['"""^\\\\s*product model\\\\s*:\\\\s+(?P<platform>\\\\S+)\\\\s*\\\\n^\\\\s*system up time\\\\s*:\\\\s+(?P<uptime>\\\\S+)\\\\s*\\\\n^\\\\s*f/w version\\\\s*:\\\\s+(?P<version>\\\\S+) \\\\| \\\\S+\\\\s*\\\\n^\\\\s*bootbase version\\\\s*:\\\\s+(?P<bootprom>\\\\S+) \\\\| \\\\S+\\\\s*\\\\n"""', 're.MULTILINE'], {}), "(\n '^\\\\s*product model\\\\s*:\\\\s+(?P<platform>\\\\S+)\\\\s*\\\\n^\\\\s*system up time\\\\s*:\\\\s+(?P<uptime>\\\\S+)\\\\s*\\\\n^\\\\s*f/w version\\\\s*:\\\\s+(?P<version>\\\\S+) \\\\| \\\\S+\\\\s*\\\\n^\\\\s*bootbase version\\\\s*:\\\\s+(?P<bootprom>\\\\S+) \\\\| \\\\S+\\\\s*\\\\n'\n , re.MULTILINE)\n", (615, 868), False, 'import re\n'), ((905, 1222), 're.compile', 're.compile', (['"""^\\\\s*Model: (?:\\\\S+ \\\\/ )?(?P<platform>\\\\S+)\\\\s*\\\\n^\\\\s*ZyNOS version: (?P<version>\\\\S+) \\\\| \\\\S+\\\\s*\\\\n.+?\\\\n^\\\\s*Bootbase version: (?P<bootprom>\\\\S+) \\\\| \\\\S+\\\\s*\\\\n.+?\\\\n(^\\\\s*Hardware version: (?P<hardware>\\\\S+)\\\\s*\\\\n)?^\\\\s*Serial number: (?P<serial>\\\\S+)\\\\s*\\\\n"""', '(re.MULTILINE | re.DOTALL)'], {}), "(\n '^\\\\s*Model: (?:\\\\S+ \\\\/ )?(?P<platform>\\\\S+)\\\\s*\\\\n^\\\\s*ZyNOS version: (?P<version>\\\\S+) \\\\| \\\\S+\\\\s*\\\\n.+?\\\\n^\\\\s*Bootbase version: (?P<bootprom>\\\\S+) \\\\| \\\\S+\\\\s*\\\\n.+?\\\\n(^\\\\s*Hardware version: (?P<hardware>\\\\S+)\\\\s*\\\\n)?^\\\\s*Serial number: (?P<serial>\\\\S+)\\\\s*\\\\n'\n , re.MULTILINE | re.DOTALL)\n", (915, 1222), False, 'import re\n'), ((1295, 1490), 're.compile', 're.compile', (['"""^\\\\s*ZyNOS version\\\\s*: (?P<version>\\\\S+) \\\\| \\\\S+\\\\s*\\\\n.+?\\\\n.+?\\\\n^\\\\s*bootbase version\\\\s*: (?P<bootprom>\\\\S+)\\\\((?P<platform>MSC\\\\S+)\\\\) \\\\| \\\\S+\\\\s*\\\\n"""', 're.MULTILINE'], {}), "(\n '^\\\\s*ZyNOS version\\\\s*: (?P<version>\\\\S+) \\\\| \\\\S+\\\\s*\\\\n.+?\\\\n.+?\\\\n^\\\\s*bootbase version\\\\s*: (?P<bootprom>\\\\S+)\\\\((?P<platform>MSC\\\\S+)\\\\) \\\\| \\\\S+\\\\s*\\\\n'\n , re.MULTILINE)\n", (1305, 1490), False, 'import re\n'), ((1548, 1761), 're.compile', 're.compile', (['"""^\\\\s*Bootcode Version: (?P<bootprom>.+)\\\\s*\\\\n^\\\\s*Hardware Version: (?P<hardware>.+)\\\\s*\\\\n^\\\\s*Serial Number: (?P<serial>.+)\\\\s*\\\\n^\\\\s*F/W Version: (?P<version>\\\\S+)\\\\s*\\\\n"""', 're.MULTILINE'], {}), "(\n '^\\\\s*Bootcode Version: (?P<bootprom>.+)\\\\s*\\\\n^\\\\s*Hardware Version: (?P<hardware>.+)\\\\s*\\\\n^\\\\s*Serial Number: (?P<serial>.+)\\\\s*\\\\n^\\\\s*F/W Version: (?P<version>\\\\S+)\\\\s*\\\\n'\n , re.MULTILINE)\n", (1558, 1761), False, 'import re\n'), ((1814, 1864), 're.compile', 're.compile', (['"""^\\\\s*(?P<platform>\\\\S+?)(/\\\\S+)?\\\\s+"""'], {}), "('^\\\\s*(?P<platform>\\\\S+?)(/\\\\S+)?\\\\s+')\n", (1824, 1864), False, 'import re\n')]
|
import open3d as o3d
import copy
import numpy as np
# Helper visualization function
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw_geometries([source_temp, target_temp])
# input
source = o3d.io.read_point_cloud("../test_data/icp/cloud_bin_0.pcd")
target = o3d.io.read_point_cloud("../test_data/icp/cloud_bin_1.pcd")
trans_init = np.asarray([[0.862, 0.011, -0.507, 0.5],
[-0.139, 0.967, -0.215, 0.7],
[0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]])
draw_registration_result(source, target, trans_init)
# init
print("Initial alignment")
threshold = 0.02
evaluation = o3d.pipelines.registration.evaluate_registration(
source, target, threshold, trans_init)
print(evaluation)
# point-to-point ICP
print("Apply point-to-point ICP")
reg_p2p = o3d.pipelines.registration.registration_icp(
source, target, threshold, trans_init,
o3d.pipelines.registration.TransformationEstimationPointToPoint())
print(reg_p2p)
print("Transformation is:")
print(reg_p2p.transformation)
draw_registration_result(source, target, reg_p2p.transformation)
# point-to-point ICP, max_iteration
reg_p2p = o3d.pipelines.registration.registration_icp(
source, target, threshold, trans_init,
o3d.pipelines.registration.TransformationEstimationPointToPoint(),
o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration=2000))
print(reg_p2p)
print("Transformation is:")
print(reg_p2p.transformation)
draw_registration_result(source, target, reg_p2p.transformation)
# point-to plane ICP
print("Apply point-to-plane ICP")
reg_p2l = o3d.pipelines.registration.registration_icp(
source, target, threshold, trans_init,
o3d.pipelines.registration.TransformationEstimationPointToPlane())
print(reg_p2l)
print("Transformation is:")
print(reg_p2l.transformation)
draw_registration_result(source, target, reg_p2l.transformation)
|
[
"copy.deepcopy",
"numpy.asarray",
"open3d.io.read_point_cloud",
"open3d.visualization.draw_geometries",
"open3d.pipelines.registration.evaluate_registration",
"open3d.pipelines.registration.ICPConvergenceCriteria",
"open3d.pipelines.registration.TransformationEstimationPointToPoint",
"open3d.pipelines.registration.TransformationEstimationPointToPlane"
] |
[((459, 518), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['"""../test_data/icp/cloud_bin_0.pcd"""'], {}), "('../test_data/icp/cloud_bin_0.pcd')\n", (482, 518), True, 'import open3d as o3d\n'), ((528, 587), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['"""../test_data/icp/cloud_bin_1.pcd"""'], {}), "('../test_data/icp/cloud_bin_1.pcd')\n", (551, 587), True, 'import open3d as o3d\n'), ((602, 729), 'numpy.asarray', 'np.asarray', (['[[0.862, 0.011, -0.507, 0.5], [-0.139, 0.967, -0.215, 0.7], [0.487, 0.255, \n 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]]'], {}), '([[0.862, 0.011, -0.507, 0.5], [-0.139, 0.967, -0.215, 0.7], [\n 0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]])\n', (612, 729), True, 'import numpy as np\n'), ((894, 985), 'open3d.pipelines.registration.evaluate_registration', 'o3d.pipelines.registration.evaluate_registration', (['source', 'target', 'threshold', 'trans_init'], {}), '(source, target, threshold,\n trans_init)\n', (942, 985), True, 'import open3d as o3d\n'), ((165, 186), 'copy.deepcopy', 'copy.deepcopy', (['source'], {}), '(source)\n', (178, 186), False, 'import copy\n'), ((205, 226), 'copy.deepcopy', 'copy.deepcopy', (['target'], {}), '(target)\n', (218, 226), False, 'import copy\n'), ((379, 440), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[source_temp, target_temp]'], {}), '([source_temp, target_temp])\n', (412, 440), True, 'import open3d as o3d\n'), ((1163, 1228), 'open3d.pipelines.registration.TransformationEstimationPointToPoint', 'o3d.pipelines.registration.TransformationEstimationPointToPoint', ([], {}), '()\n', (1226, 1228), True, 'import open3d as o3d\n'), ((1507, 1572), 'open3d.pipelines.registration.TransformationEstimationPointToPoint', 'o3d.pipelines.registration.TransformationEstimationPointToPoint', ([], {}), '()\n', (1570, 1572), True, 'import open3d as o3d\n'), ((1578, 1647), 'open3d.pipelines.registration.ICPConvergenceCriteria', 'o3d.pipelines.registration.ICPConvergenceCriteria', ([], {'max_iteration': '(2000)'}), '(max_iteration=2000)\n', (1627, 1647), True, 'import open3d as o3d\n'), ((1945, 2010), 'open3d.pipelines.registration.TransformationEstimationPointToPlane', 'o3d.pipelines.registration.TransformationEstimationPointToPlane', ([], {}), '()\n', (2008, 2010), True, 'import open3d as o3d\n')]
|
"""
Developer : <NAME>
VER : 1.0
Data Source : https://api.covid19india.org/v4/data.json
"""
import urllib.request
import urllib.error
import json
import matplotlib.pyplot as plt
import numpy as np
import time
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
#Graph Plotting Function
def ploting(chart_type,chart_name,lim):
plt.subplots()
y_pos=np.arange(len(districts))
plt.barh(y_pos,chart_type, align='center', alpha=0.6,)
for i, v in enumerate(chart_type):
plt.text(v, i-.2, str(v), color='red')
plt.yticks(y_pos,districts)
plt.xlabel('Cases')
plt.xlim([0,lim])
name='Districtwise '+chart_name+' on '+date
plt.title(name)
plt.savefig(chart_name+'.png',bbox_inches='tight',pad_inches=0.2)
plt.show()
#District Data Extraction Function
def data_district(data_type):
name_type=data_type
confirmed_cases=(Alappuzha["total"][name_type],Ernakulam["total"][name_type],Idukki["total"][name_type],Kannur["total"][name_type],
Kasargod["total"][name_type],Kollam["total"][name_type],Kottayam["total"][name_type],
Kozhikode["total"][name_type],Malappuram["total"][name_type],Palakkad["total"][name_type],
Pathanamthitta["total"][name_type],Thiruvananthapuram["total"][name_type],
Thrissur["total"][name_type],Wayanad["total"][name_type])
return(confirmed_cases)
#Graph Image Pasting Function
def image_paste(image_name,x_cord,y_cord):
img_total = Image.open(image_name)
img_total = img_total.resize((round(img_total.size[0]*2.3), round(img_total.size[1]*2.3)))
graph_img = img.copy()
graph_img.paste(img_total, (x_cord, y_cord))
return graph_img
#Loading Constants and Data
date=time.strftime("%d-%m-%Y")
url = 'https://api.covid19india.org/v4/data.json'
output = urllib.request.urlopen(url).read()
tree = json.loads(output)
#Setting District Constants
Alappuzha=tree["KL"]["districts"]["Alappuzha"]
Ernakulam=tree["KL"]["districts"]["Ernakulam"]
Idukki=tree["KL"]["districts"]["Idukki"]
Kannur=tree["KL"]["districts"]["Kannur"]
Kasargod=tree["KL"]["districts"]["Kasaragod"]
Kollam=tree["KL"]["districts"]["Kollam"]
Kottayam=tree["KL"]["districts"]["Kottayam"]
Kozhikode=tree["KL"]["districts"]["Kozhikode"]
Malappuram=tree["KL"]["districts"]["Malappuram"]
Palakkad=tree["KL"]["districts"]["Palakkad"]
Pathanamthitta=tree["KL"]["districts"]["Pathanamthitta"]
Thiruvananthapuram=tree["KL"]["districts"]["Thiruvananthapuram"]
Thrissur=tree["KL"]["districts"]["Thrissur"]
Wayanad=tree["KL"]["districts"]["Wayanad"]
totalKerala=tree["KL"]["total"]
#Creating District Name Tuple
districts=('Allapuzha','Ernakulam','Idukki','Kannur','Kasargod','Kollam','Kottayam','Kozhikode','Malappuram',
'Palakkad','Pathnamthitta','Thiruvananthapuram','Thrissur','Wayanad')
#Plotting Bar Charts
cases=data_district('confirmed')#Confirmed Cases
ploting(cases,'total cases',13500)
recovered=data_district('recovered')#Recovered Cases
ploting(recovered,'recoveries',8000)
death=data_district('deceased')#Deaths
ploting(death, 'total deaths',80)
active_cases=res = tuple(map(lambda i, j,k: i - j - k, cases, recovered,death)) #Active Cases
ploting(active_cases,'active cases',6000)
#Report Text Generation
report_name='COVID-19 Kerala Statistics'
font='lt.otf' #Replace with prefered font name
img = Image.open("template.png")
img = img.convert('RGB')
draw = ImageDraw.Draw(img)
selectFont = ImageFont.truetype(font, size = 130)
draw.text( (200,100), report_name.upper(),(90,115,95), font=selectFont)
selectFont = ImageFont.truetype(font, size = 60)
draw.text( (920,260), 'DATE : '+date,(100,100,100), font=selectFont)
draw.text( (380,660), 'Total Cases : '+str(totalKerala['confirmed']),(200,0,0), font=selectFont)
draw.text( (1350,760), 'Total Recoveries : '+str(totalKerala['recovered']),(0,200,0), font=selectFont)
draw.text( (1350,660), 'Total Deaths : '+str(totalKerala['deceased']),(50,50,50), font=selectFont)
draw.text( (380,760), 'Active Cases : '+str(totalKerala['confirmed']-totalKerala['recovered']-totalKerala['deceased']),(0,0,200), font=selectFont)
#Report Graph Pasteing
tot=image_paste('total cases.png', 80, 1020)#Confirmed cases
img=tot
rec=image_paste('recoveries.png', 1210, 1020)#Recoveries
img=rec
det=image_paste('total deaths.png', 80, 1820)#Deaths
img=det
act=image_paste('active cases.png', 1210, 1820)#Active
act.save('report_'+str(date)+'.png')
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"json.loads",
"matplotlib.pyplot.yticks",
"time.strftime",
"PIL.Image.open",
"PIL.ImageFont.truetype",
"matplotlib.pyplot.barh",
"PIL.ImageDraw.Draw",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((1789, 1814), 'time.strftime', 'time.strftime', (['"""%d-%m-%Y"""'], {}), "('%d-%m-%Y')\n", (1802, 1814), False, 'import time\n'), ((1916, 1934), 'json.loads', 'json.loads', (['output'], {}), '(output)\n', (1926, 1934), False, 'import json\n'), ((3404, 3430), 'PIL.Image.open', 'Image.open', (['"""template.png"""'], {}), "('template.png')\n", (3414, 3430), False, 'from PIL import Image\n'), ((3463, 3482), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (3477, 3482), False, 'from PIL import ImageDraw\n'), ((3496, 3530), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['font'], {'size': '(130)'}), '(font, size=130)\n', (3514, 3530), False, 'from PIL import ImageFont\n'), ((3618, 3651), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['font'], {'size': '(60)'}), '(font, size=60)\n', (3636, 3651), False, 'from PIL import ImageFont\n'), ((357, 371), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (369, 371), True, 'import matplotlib.pyplot as plt\n'), ((412, 466), 'matplotlib.pyplot.barh', 'plt.barh', (['y_pos', 'chart_type'], {'align': '"""center"""', 'alpha': '(0.6)'}), "(y_pos, chart_type, align='center', alpha=0.6)\n", (420, 466), True, 'import matplotlib.pyplot as plt\n'), ((557, 585), 'matplotlib.pyplot.yticks', 'plt.yticks', (['y_pos', 'districts'], {}), '(y_pos, districts)\n', (567, 585), True, 'import matplotlib.pyplot as plt\n'), ((589, 608), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cases"""'], {}), "('Cases')\n", (599, 608), True, 'import matplotlib.pyplot as plt\n'), ((617, 635), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, lim]'], {}), '([0, lim])\n', (625, 635), True, 'import matplotlib.pyplot as plt\n'), ((690, 705), 'matplotlib.pyplot.title', 'plt.title', (['name'], {}), '(name)\n', (699, 705), True, 'import matplotlib.pyplot as plt\n'), ((710, 779), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(chart_name + '.png')"], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.2)'}), "(chart_name + '.png', bbox_inches='tight', pad_inches=0.2)\n", (721, 779), True, 'import matplotlib.pyplot as plt\n'), ((780, 790), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (788, 790), True, 'import matplotlib.pyplot as plt\n'), ((1539, 1561), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (1549, 1561), False, 'from PIL import Image\n')]
|
#!/usr/bin/env python3
"""
Author: <NAME>.
Included in TOGA by <NAME>.
"""
import argparse
import sys
from datetime import datetime as dt
from collections import defaultdict
try:
from modules.common import make_cds_track
from modules.common import flatten
except ImportError:
from common import make_cds_track
from common import flatten
# artificial 0-scored points
SOURCE = "SOURCE"
SINK = "SINK"
SCORE_THRESHOLD = 0.5
EXON_COV_THRESHOLD = 1.33
MAX_OVERLAP = 60
class Vertex:
"""Graph vertex."""
def __init__(self, name, start, end, score):
self.name = name
self.start = start
self.end = end
self.score = score
self.children = list()
def add_child(self, v):
if v not in self.children:
self.children.append(v)
class Graph:
"""Build a directed graph using adjacency list."""
def __init__(self):
self.vertices = {}
def add_vertex(self, vertex):
"""Add vertex if it's not in the vertices list."""
if isinstance(vertex, Vertex) and vertex.name not in self.vertices:
self.vertices[vertex.name] = vertex
return True
else:
return False
def add_edge(self, parent, child):
"""Find vertices with parent and child names."""
if parent in self.vertices and child in self.vertices:
self.vertices[parent].add_child(child)
return True
else:
return False
def topological_sort_util(self, v, visited, stack):
"""Perform Depth First Search
Mark the current node as visited.
"""
visited[v] = True
# check all children of this vertex if they're visited
for i in self.vertices[v].children:
if visited[i] is False:
self.topological_sort_util(i, visited, stack)
# add current vertex to stack
stack.insert(0, v)
def topological_sort(self):
"""Perform topological sort.
Use recursive function topological_sort_util().
Mark all the vertices as not visited.
"""
visited = {v: False for v in self.vertices}
# initiate stack to store sorted vertices
stack = []
for vertex in self.vertices:
if visited[vertex] is False:
self.topological_sort_util(vertex, visited, stack)
# return sorted list of vertices
return stack
def __repr__(self):
lines = []
for elem in self.vertices.keys():
line = f"{elem}\t{self.vertices[elem].children}\n"
lines.append(line)
return "".join(lines)
def parse_args():
"""Parse CMD args."""
app = argparse.ArgumentParser()
app.add_argument("chain_file", help="Chain file")
app.add_argument(
"chain_scores_file", help="XGBoost output: chain orthology probabilities"
)
app.add_argument("bed_file", help="Bed file containing gene loci.")
app.add_argument(
"--only_fragmented",
"--of",
action="store_true",
dest="only_fragmented",
help="Output fragmented genes only.",
)
if len(sys.argv) < 3:
app.print_help()
sys.exit(0)
args = app.parse_args()
return args
def read_gene_scores(score_file):
"""Read orthology_score.tsv file into a dict.
Dict structure is:
{GENEid : [(chain, score), (chain2, score2), ..] }.
"""
ret = defaultdict(list)
f = open(score_file, "r")
f.__next__() # skip header
for line in f:
line_data = line.rstrip().split()
gene = line_data[0]
chain_id = int(line_data[1])
chain_score = float(line_data[2])
if chain_score < SCORE_THRESHOLD:
continue
item = (chain_id, chain_score)
ret[gene].append(item)
f.close()
return ret
def read_chain_file(chain_file):
"""Read chain file.
Create dict chain_id: (start, end)."""
ret = {}
f = open(chain_file, "r")
for line in f:
if not line.startswith("chain"):
continue
line_data = line.rstrip().split()
start = int(line_data[5])
end = int(line_data[6])
chain_id = int(line_data[12])
ret[chain_id] = (start, end)
f.close()
return ret
def read_gene_loci(bed_file):
"""For each bed entry get coding locus."""
# TODO: not the most optimal solution, fix it
ret = {}
f = open(bed_file, "r")
for line in f:
cds_line = make_cds_track(line).split("\t")
# extract absolute exon coordinates
chrom_start = int(cds_line[1])
name = cds_line[3]
if name.endswith("_CDS"):
name = name[:-4]
block_count = int(cds_line[9])
block_sizes = [int(x) for x in cds_line[10].split(",") if x != ""]
block_starts = [int(x) for x in cds_line[11].split(",") if x != ""]
block_ends = [block_starts[i] + block_sizes[i] for i in range(block_count)]
block_abs_starts = [block_starts[i] + chrom_start for i in range(block_count)]
block_abs_ends = [block_ends[i] + chrom_start for i in range(block_count)]
exon_nums = list(range(block_count))
exon_coords = list(zip(exon_nums, block_abs_starts, block_abs_ends))
ret[name] = exon_coords
f.close()
return ret
def build_chain_graph(chain_id_to_loc, intersecting_chains_wscores):
"""Build chain graph.
Read chains and corresponding scores into
a chain dictionary {chain: (start, end, score)}.
"""
chain_graph = Graph()
# add all vertices to the chain graph
for chain_id, score in intersecting_chains_wscores:
start, end = chain_id_to_loc.get(chain_id, (None, None))
if start is None:
raise ValueError(f"Cannot find chain {chain_id}")
v = Vertex(chain_id, start, end, -1 * score)
chain_graph.add_vertex(v)
# add edges to the chain graph
for i in chain_graph.vertices:
for j in chain_graph.vertices:
if i == j:
# no need to connect the point to itself
continue
i_vertex = chain_graph.vertices[i]
j_vertex = chain_graph.vertices[j]
# allow some overlap between chains
# defined in the MAX_OVERLAP constant
i_vertex_and_flank = i_vertex.end - MAX_OVERLAP
if i_vertex_and_flank <= j_vertex.start:
chain_graph.add_edge(i_vertex.name, j_vertex.name)
return chain_graph
def add_source_sink_graph(graph_name):
"""Add artificial Source and Sink vertices to the chain graph.
Assign them zero length and zero score.
"""
source_end = min(
[graph_name.vertices[vertex].start for vertex in graph_name.vertices]
)
source_start = source_end
sink_start = max(
[graph_name.vertices[vertex].end for vertex in graph_name.vertices]
)
sink_end = sink_start
graph_name.add_vertex(Vertex(SOURCE, source_start, source_end, 0))
graph_name.add_vertex(Vertex(SINK, sink_start, sink_end, 0))
# add edges from Source to each vertex
for vertex in graph_name.vertices:
if vertex != SOURCE:
graph_name.add_edge(SOURCE, vertex)
# add edges from each vertex to Sink
for vertex in graph_name.vertices:
if vertex != SINK:
graph_name.add_edge(vertex, SINK)
return # all
def find_shortest_path(graph_name, source, sink, sorted_vertices):
"""Find shortest path in directed acyclic graph.
Initiate dictionary with shortest paths to each node:
{vertex: (value, path itself)}.
"""
shortest_paths = {}
for sorted_vertex in sorted_vertices:
shortest_paths[sorted_vertex] = (0, [source])
# check each child of the current vertex
# and update shortest path to this vertex in the dictionary
for sorted_vertex in sorted_vertices:
children = graph_name.vertices[sorted_vertex].children
for child in children:
current_score = shortest_paths[child][0]
sp_sv_0 = shortest_paths[sorted_vertex][0]
gn_sv_s = graph_name.vertices[child].score
score_if_updated = sp_sv_0 + gn_sv_s
if score_if_updated < current_score:
new_path = list(shortest_paths[sorted_vertex][1])
if sorted_vertex not in new_path:
new_path.append(sorted_vertex)
shortest_paths[child] = (score_if_updated, new_path)
return shortest_paths[sink]
# def intersect(range_1, range_2):
# """Return intersection size."""
# return min(range_1[1], range_2[1]) - max(range_1[0], range_2[0])
def check_exon_coverage(chains, chain_id_to_loc, exons_loci):
"""For each chain check whether it intersects all gene exons."""
exon_num = len(exons_loci)
chain_id_coverage = {}
for chain_id in chains:
# for each chain create a bool list indicating what exon
# it intersects
# remove exons where end < chain_start
# and exon start > chain_end
chain_loc = chain_id_to_loc[chain_id]
intersect_exon_nums = [
x[0] for x in exons_loci if x[2] > chain_loc[0] and x[1] < chain_loc[1]
]
bool__exon_cov = [False for _ in range(exon_num)]
for i in intersect_exon_nums:
bool__exon_cov[i] = True
# bool__exon_cov = [intersect(e, chain_loc) > 0 for e in exons_loci]
chain_id_coverage[chain_id] = bool__exon_cov
return chain_id_coverage
def get_average_exon_cov(chain_to_exon_cov, exon_num):
"""Compute average exon coverage."""
exon_cov = [0 for _ in range(exon_num)]
for coverage in chain_to_exon_cov.values():
# there are bool values in coverage
# covert them to int such as True = 1 and False = 0
coverage_numeric = [1 if c else 0 for c in coverage]
for i in range(exon_num):
exon_cov[i] += coverage_numeric[i]
average_cov = sum(exon_cov) / exon_num
return average_cov
def stitch_scaffolds(chain_file, chain_scores_file, bed_file, fragments_only=False):
"""Stitch chains of fragmented orthologs."""
gene_score_dict = read_gene_scores(chain_scores_file)
# func read_chain_file returns data about all chains in the file
# however, we need only orthologous ones
# to avoid contamination with paralogous chains we further filter the
# chain_id_to_loc dictionary
# gene score dict: gene_id: [(chain, score), (chain, score), ...]
# Iterate over dict values (lists of tuples), get the 1st elem of each tuple (chain_id)
orth_chains = set(
flatten([v[0] for v in vals] for vals in gene_score_dict.values())
)
chain_id_to_loc__no_filt = read_chain_file(chain_file)
chain_id_to_loc = {
k: v for k, v in chain_id_to_loc__no_filt.items() if k in orth_chains
}
genes_to_exon_coords = read_gene_loci(bed_file)
gene_to_path = {}
task_size = len(gene_score_dict.keys())
count = 1
for gene, intersecting_chains_wscores in gene_score_dict.items():
if count % 500 == 0:
print(f"Processing gene: {count} / {task_size}", flush=True)
count += 1
if len(intersecting_chains_wscores) <= 1:
continue
# intersecting chains: list of tuples
# [(chain, score), (chain, score), ...]
# chains that intersect this gene
exon_coords = genes_to_exon_coords.get(gene)
if exon_coords is None:
# must never happen
raise ValueError(f"Cannot find a bed track for {gene}")
# extract some extra information about exon coverage
intersecting_chains = [x[0] for x in intersecting_chains_wscores]
chain_id_to_exon_cov = check_exon_coverage(
intersecting_chains, chain_id_to_loc, exon_coords
)
# for k, v in chain_id_to_exon_cov.items():
# print(k, v)
chain_id_covers_all = {
k: all(v for v in val) for k, val in chain_id_to_exon_cov.items()
}
if any(chain_id_covers_all.values()):
# if there is a chain that covers the gene entirely: skip this
continue
average_exon_coverage = get_average_exon_cov(
chain_id_to_exon_cov, len(exon_coords)
)
if average_exon_coverage > EXON_COV_THRESHOLD:
# skip if each exon is covered > EXON_COV_THRESHOLD times in average
continue
# Initiate chain graph
chain_graph = build_chain_graph(chain_id_to_loc, intersecting_chains_wscores)
add_source_sink_graph(chain_graph)
# Topologically sort chain graph
sorted_vertices = chain_graph.topological_sort()
# Find 'longest' (=highest scoring) path in the graph =
# find shortest path in the graph with negative scoring vertices.
longest_path_chain_graph = find_shortest_path(
chain_graph, SOURCE, SINK, sorted_vertices
)
_, _path = longest_path_chain_graph
path = _path[1:] # starts with [SOURCE, ... ]
if fragments_only and len(path) < 2:
# this gene is covered entirely by a single chain
continue
gene_to_path[gene] = path
del chain_graph
return gene_to_path
if __name__ == "__main__":
t0 = dt.now()
args = parse_args()
gene_to_path = stitch_scaffolds(
args.chain_file,
args.chain_scores_file,
args.bed_file,
fragments_only=args.only_fragmented,
)
# save output
for k, v in gene_to_path.items():
v_str = ",".join(map(str, v))
print(f"{k}\t{v_str}")
elapsed = dt.now() - t0
print(f"# Elapsed: {elapsed}")
|
[
"argparse.ArgumentParser",
"collections.defaultdict",
"common.make_cds_track",
"datetime.datetime.now",
"sys.exit"
] |
[((2706, 2731), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2729, 2731), False, 'import argparse\n'), ((3446, 3463), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3457, 3463), False, 'from collections import defaultdict\n'), ((13333, 13341), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (13339, 13341), True, 'from datetime import datetime as dt\n'), ((3207, 3218), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3215, 3218), False, 'import sys\n'), ((13673, 13681), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (13679, 13681), True, 'from datetime import datetime as dt\n'), ((4503, 4523), 'common.make_cds_track', 'make_cds_track', (['line'], {}), '(line)\n', (4517, 4523), False, 'from common import make_cds_track\n')]
|
"""
pyjs9.py: connects Python and JS9 via the JS9 (back-end) helper
"""
from __future__ import print_function
import time
import json
import base64
import logging
from traceback import format_exc
from threading import Condition
from io import BytesIO
import requests
__all__ = ['JS9', 'js9Globals']
"""
pyjs9.py connects Python and JS9 via the JS9 (back-end) helper
- The JS9 class constructor connects to a single JS9 instance in a web page.
- The JS9 object supports the JS9 Public API and a shorter command-line syntax.
- See: http://js9.si.edu/js9/help/publicapi.html
- Send/retrieve numpy arrays and astropy (or pyfits) hdulists to/from js9.
- Use python-socketio for fast, persistent connections to the JS9 back-end
"""
# pyjs9 version
__version__ = '3.6'
# try to be a little bit neat with global parameters
js9Globals = {}
js9Globals['version'] = __version__
# what sort of fits verification gets done on SetFITS() output?
# see astropy documentation on write method
js9Globals['output_verify'] = 'ignore'
# retrieve image data from JS9 as an array or as base64 encoded string
# in the early days, base64 seemed to be faster
# js9Globals['retrieveAs'] = 'base64'
# array allows us to deal with larger images
js9Globals['retrieveAs'] = 'array'
# load fits, if available
try:
from astropy.io import fits
js9Globals['fits'] = 1
except ImportError:
try:
import pyfits as fits
if fits.__version__ >= '2.2':
js9Globals['fits'] = 2
else:
js9Globals['fits'] = 0
except ImportError:
js9Globals['fits'] = 0
# load numpy, if available
try:
import numpy
js9Globals['numpy'] = 1
except ImportError:
js9Globals['numpy'] = 0
# load socket.io, if available
try:
import socketio
logging.info('set socketio transport')
js9Globals['transport'] = 'socketio'
js9Globals['wait'] = 10
except ImportError:
logging.info('no python-socketio, use html transport')
js9Globals['transport'] = 'html'
js9Globals['wait'] = 0
# utilities
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
# numpy-dependent routines
if js9Globals['numpy']:
def _bp2np(bitpix): # pylint: disable=too-many-return-statements
"""
Convert FITS bitpix to numpy datatype
"""
if bitpix == 8:
return numpy.uint8
if bitpix == 16:
return numpy.int16
if bitpix == 32:
return numpy.int32
if bitpix == 64:
return numpy.int64
if bitpix == -32:
return numpy.float32
if bitpix == -64:
return numpy.float64
if bitpix == -16:
return numpy.uint16
raise ValueError('unsupported bitpix: %d' % bitpix)
_NP_TYPE_MAP = (
# pylint: disable=bad-whitespace
(numpy.uint8 , numpy.uint8, ),
(numpy.int8 , numpy.int16, ),
(numpy.uint16 , numpy.uint16, ),
(numpy.int16 , numpy.int16, ),
(numpy.int32 , numpy.int32, ),
(numpy.uint32 , numpy.int64, ),
(numpy.int64 , numpy.int64, ),
(numpy.float16, numpy.float32,),
(numpy.float32, numpy.float32,),
(numpy.float64, numpy.float64,),
)
def _cvt2np(ndarr: numpy.ndarray):
# NOTE cvt2np may be merged into np2bp
dtype = ndarr.dtype
for t in _NP_TYPE_MAP:
if numpy.issubdtype(dtype, t[0]):
return ndarr.astype(t[1])
return ndarr
def _np2bp(dtype): # pylint: disable=too-many-return-statements
"""
Convert numpy datatype to FITS bitpix
"""
if dtype == numpy.uint8:
return 8
if dtype == numpy.int16:
return 16
if dtype == numpy.int32:
return 32
if dtype == numpy.int64:
return 64
if dtype == numpy.float32:
return -32
if dtype == numpy.float64:
return -64
if dtype == numpy.uint16:
return -16
raise ValueError('unsupported dtype: %s' % dtype)
def _bp2py(bitpix): # pylint: disable=too-many-return-statements
"""
Convert FITS bitpix to python datatype
"""
if bitpix == 8:
return 'B'
if bitpix == 16:
return 'h'
if bitpix == 32:
return 'l'
if bitpix == 64:
return 'q'
if bitpix == -32:
return 'f'
if bitpix == -64:
return 'd'
if bitpix == -16:
return 'H'
raise ValueError('unsupported bitpix: %d' % bitpix)
def _im2np(im):
"""
Convert GetImageData object to numpy
"""
w = int(im['width'])
h = int(im['height'])
d = 1
bp = int(im['bitpix'])
dtype = _bp2np(bp)
dlen = h * w * abs(bp) // 8
if js9Globals['retrieveAs'] == 'array':
s = im['data'][0:h*w]
if d > 1:
arr = numpy.array(s, dtype=dtype).reshape((d, h, w))
else:
arr = numpy.array(s, dtype=dtype).reshape((h, w))
elif js9Globals['retrieveAs'] == 'base64':
s = base64.decodebytes(im['data'].encode())[0:dlen]
if d > 1:
arr = numpy.frombuffer(s, dtype=dtype).reshape((d, h, w))
else:
arr = numpy.frombuffer(s, dtype=dtype).reshape((h, w))
else:
raise ValueError('unknown retrieveAs type for GetImageData()')
return arr
class JS9:
"""
The JS9 class supports communication with an instance of JS9 in a web
page, utilizing the JS9 Public API calls as class methods.
JS9's public access library is documented here:
- http://js9.si.edu/js9/help/publicapi.html
In addition, a number of special methods are implemented to facilitate data
access to/from well-known Python objects:
- GetNumpy: retrieve a FITS image or an array into a numpy array
- SetNumpy: send a numpy array to JS9 for display
- GetFITS: retrieve a FITS image into an astropy (or pyfits) HDU list
- SetFITS: send a astropy (or pyfits) HDU list to JS9 for display
"""
def __init__(self, host='http://localhost:2718', id='JS9', maxtries=5, delay=1, debug=False): # pylint: disable=redefined-builtin, too-many-arguments
"""
:param host: host[:port] (def: 'http://localhost:2718')
:param id: the JS9 display id (def: 'JS9')
:rtype: JS9 object connected to a single instance of js9
The JS9() contructor takes its first argument to be the host (and
optional port) on which the back-end js9Helper is running. The default
is 'http://localhost:2718', which generally will be the correct value
for running locally. The default port (2718) will be added if no port
value is found. The string 'http://' will be prefixed to the host if a
URL protocol is not supplied. Thus, to connect to the main JS9 web
site, you can use host='js9.si.edu'.
The second argument is a JS9 display id on the web page. The
default is 'JS9' which is the default JS9 display id. Thus:
>>> JS9 = pyjs9.JS9()
is appropriate for local web pages having only one JS9 display.
"""
self.__dict__['id'] = id
# add default port, if necessary
c = host.rfind(':')
s = host.find('/')
if c <= s:
host += ':2718'
if s < 0:
host = 'http://' + host
self.__dict__['host'] = host
# open socket.io connection, if necessary
if js9Globals['transport'] == 'socketio':
try:
if debug:
self.sockio = socketio.Client(logger=True,
engineio_logger=True)
else:
self.sockio = socketio.Client()
self.sockio.connect(host)
except Exception as e: # pylint: disable=broad-except
logging.warning('socketio connect failed: %s, using html', e)
js9Globals['transport'] = 'html'
self._block_cb = None
# wait for connect be ready, but success doesn't really matter here
tries = 0
while tries < maxtries:
try:
self._alive()
except Exception: # pylint: disable=broad-except
time.sleep(delay)
tries = tries - 1
else:
break
def __setitem__(self, itemname, value):
"""
An internal routine to process some assignments specially
"""
self.__dict__[itemname] = value
if itemname in ('host', 'id',):
self._alive()
def _alive(self):
"""
An internal routine to send a test message to the helper
"""
self.send(None, msg='alive')
def sockioCB(self, *args):
"""
Internal routine
"""
logging.debug('socketio callback, args: %s', args)
self.__dict__['sockioResult'] = args[0]
self._block_cb.acquire()
self._block_cb.notify()
self._block_cb.release()
def send(self, obj, msg='msg'):
"""
:obj: dictionary containing command and args keys
:rtype: returned data or info (in format specified by public api)
examples:
>>> js9 = pyjs9.JS9()
>>> js9.send({'cmd': 'GetColormap'})
{u'bias': 0.5, u'colormap': u'cool', u'contrast': 1}
>>> js9.send({'cmd': 'SetColormap', 'args': ['red']})
'OK'
"""
if obj is None:
obj = {}
obj['id'] = self.__dict__['id']
if js9Globals['transport'] == 'html': # pylint: disable=no-else-return
host = self.__dict__['host']
try:
url = requests.post(host + '/' + msg, json=obj)
except IOError as e:
raise IOError('Cannot connect to {0}: {1}'.format(host, e))
urtn = url.text
if 'ERROR:' in urtn:
raise ValueError(urtn)
try:
# TODO: url.json() decode the json for us:
# http://www.python-requests.org/en/latest/user/quickstart/#json-response-content
# res = url.json()
res = json.loads(urtn, object_hook=_decode_dict)
except ValueError: # not json
res = urtn
if type(res) == str:
res = res.strip()
return res
else:
self.__dict__['sockioResult'] = ''
self._block_cb = Condition()
self._block_cb.acquire()
self.sockio.emit('msg', obj, callback=self.sockioCB)
self._block_cb.wait(timeout=js9Globals['wait'])
self._block_cb.release()
# self.sockio.wait_for_callbacks(seconds=js9Globals['wait'])
if self.__dict__['sockioResult'] and \
isinstance(self.__dict__['sockioResult'], str) and \
'ERROR:' in self.__dict__['sockioResult']:
raise ValueError(self.__dict__['sockioResult'])
return self.__dict__['sockioResult']
def close(self):
"""
Close the socketio connection and disconnect from the server
"""
if js9Globals['transport'] == 'socketio':
try:
self.sockio.disconnect()
except Exception as e: # pylint: disable=broad-except
logging.error('socketio close failed: %s', e)
if js9Globals['fits']:
def GetFITS(self):
"""
:rtype: fits hdulist
To read FITS data or a raw array from js9 into fits, use the
'GetFITS' method. It takes no args and returns an hdu list::
>>> hdul = j.GetFITS()
>>> hdul.info()
Filename: StringIO.StringIO
No. Name Type Cards Dimensions Format
0 PRIMARY PrimaryHDU 24 (1024, 1024) float32
>>> data = hdul[0].data
>>> data.shape
(1024, 1024)
"""
# get image data from JS9
im = self.GetImageData(js9Globals['retrieveAs'])
# if the image is too large, we can back get an empty string
if im == '':
raise ValueError('GetImageData failed: image too large for Python transport?')
# convert to numpy
arr = _im2np(im)
# add fits cards
# create FITS primary hdu from numpy array
hdu = fits.PrimaryHDU(arr)
hdulist = fits.HDUList([hdu])
return hdulist
def SetFITS(self, hdul, name=None):
"""
:param hdul: fits hdulist
:param name: fits file or object name (used as id)
After manipulating or otherwise modifying a fits hdulist (or
making a new one), you can display it in js9 using the 'SetFITS'
method, which takes the hdulist as its sole argument::
>>> j.SetFITS(nhdul)
Note that this routine creates a new image in the JS9 display. If
you want to update the current image, use RefreshImage. In that
case, the hdul's numpy array must be converted to a list:
>>>> j.RefreshImage(hdul[0].data.tolist())
"""
if not js9Globals['fits']:
raise ValueError('SetFITS not defined (fits not found)')
if not isinstance(hdul, fits.HDUList):
if js9Globals['fits'] == 1:
raise ValueError('requires astropy.HDUList as input')
raise ValueError('requires pyfits.HDUList as input')
# in-memory string
memstr = BytesIO()
# write fits to memory string
hdul.writeto(memstr, output_verify=js9Globals['output_verify'])
# get memory string as an encoded string
encstr = base64.b64encode(memstr.getvalue()).decode()
# set up JS9 options
opts = {}
if name:
opts['filename'] = name
# send encoded file to JS9 for display
got = self.Load(encstr, opts)
# finished with memory string
memstr.close()
return got
else:
@staticmethod
def GetFITS():
"""
This method is not defined because fits in not installed.
"""
raise ValueError('GetFITS not defined (astropy.io.fits not found)')
@staticmethod
def SetFITS():
"""
This method is not defined because fits in not installed.
"""
raise ValueError('SetFITS not defined (astropy.io.fits not found)')
if js9Globals['numpy']:
def GetNumpy(self):
"""
:rtype: numpy array
To read a FITS file or an array from js9 into a numpy array, use
the 'GetNumpy' method. It takes no arguments and returns the
np array::
>>> j.get('file')
'/home/eric/data/casa.fits[EVENTS]'
>>> arr = j.GetNumpy()
>>> arr.shape
(1024, 1024)
>>> arr.dtype
dtype('float32')
>>> arr.max()
51.0
"""
# get image data from JS9
im = self.GetImageData(js9Globals['retrieveAs'])
# if the image is too large, we can get back an empty string
if im == '':
raise ValueError('GetImageData failed: image too large for Python transport?')
# convert to numpy
arr = _im2np(im)
return arr
def SetNumpy(self, arr, filename=None, dtype=None):
"""
:param arr: numpy array
:param name: file or object name (used as id)
:param dtype: data type into which to convert array before sending
After manipulating or otherwise modifying a numpy array (or making
a new one), you can display it in js9 using the 'SetNumpy' method,
which takes the array as its first argument::
>>> j.SetNumpy(arr)
An optional second argument specifies a datatype into which the
array will be converted before being sent to js9. This is
important in the case where the array has datatype np.uint64,
which is not recognized by js9::
>>> j.SetNumpy(arru64)
...
ValueError: uint64 is unsupported by JS9 (or FITS)
>>> j.SetNumpy(arru64,dtype=np.float64)
Also note that np.int8 is sent to js9 as int16 data, np.uint32 is
sent as int64 data, and np.float16 is sent as float32 data.
Note that this routine creates a new image in the JS9 display. If
you want to update the current image, use RefreshImage. In that
case, the numpy array must be converted to a list:
>>>> j.RefreshImage(arr.tolist())
"""
if not isinstance(arr, numpy.ndarray):
raise ValueError('requires numpy.ndarray as input')
if dtype and dtype != arr.dtype:
narr = arr.astype(dtype)
else:
narr = _cvt2np(arr)
if not narr.flags['C_CONTIGUOUS']:
narr = numpy.ascontiguousarray(narr)
# parameters to pass back to JS9
bp = _np2bp(narr.dtype)
(h, w) = narr.shape
dmin = narr.min().tolist()
dmax = narr.max().tolist()
# base64-encode numpy array in native format
encarr = base64.b64encode(narr.tostring()).decode()
# create object to send to JS9 containing encoded array
hdu = {'naxis': 2, 'naxis1': w, 'naxis2': h, 'bitpix': bp,
'dmin': dmin, 'dmax': dmax, 'encoding': 'base64',
'image': encarr}
if filename:
hdu['filename'] = filename
# send encoded file to JS9 for display
return self.Load(hdu)
else:
@staticmethod
def GetNumpy():
"""
This method is not defined because numpy in not installed.
"""
raise ValueError('GetNumpy not defined (numpy not found)')
@staticmethod
def SetNumpy():
"""
This method is not defined because numpy in not installed.
"""
raise ValueError('SetNumpy not defined (numpy not found)')
def Load(self, *args):
"""
Load an image into JS9
call:
JS9.Load(url, opts)
where:
- url: url, fits object, or in-memory FITS
- opts: object containing image parameters
NB: In Python, you probably want to call JS9.SetFITS() or
JS9.SetNumpy() to load a local file into JS9.
Load a FITS file or a PNG representation file into JS9. Note that
a relative URL is relative to the JS9 install directory.
You also can pass an in-memory buffer containing a FITS file, or a
string containing a base64-encoded FITS file.
Finally, you can pass a fits object containing the following
properties:
- naxis: number of axes in the image
- axis: array of image dimensions for each axis or ...
- naxis[n] image dimensions of each axis (naxis1, naxis2, ...)
- bitpix: FITS bitpix value
- head: object containing header keywords as properties
- image: list containing image pixels
- dmin: data min (optional)
- dmax: data max (optional)
To override default image parameters, pass the image opts argument:
>>> j.Load('png/m13.png', {'scale':'linear', 'colormap':'sls'})
"""
return self.send({'cmd': 'Load', 'args': args})
def LoadWindow(self, *args):
"""
Load an image into a light window or a new (separate) window
call:
JS9.LoadWindow(url, opts, type, html, winopts)
where:
- url: remote URL image to load
- opts: object containing image parameters
- type: "light" or "new"
- html: html for the new page (def: menubar, image, colorbar)
- winopts: for "light", optional dhtml window options
returns:
- id: the id of the JS9 display div
This routine will load an image into a light-weight window or an
entirely new window. The url and opts arguments are identical to
the standard JS9.Load() call, except that opts can contain:
- id: string specifying the id of the JS9 display being created:
if no id is specified, a unique id is generated
- clone: the id of a display to clone when creating a light window:
the menubar and colorbar will be created if and only if they are
present in the cloned display
The type argument determines whether to create a light-weight
window ("light", the default) or a new separate window ("new").
You can use the html argument to supply different web page elements
for the window. Furthermore, if you create a light window, a default
set of DynamicDrive dhtmlwindow parameters will be used to make the
window the correct size for the default html:
"width=512px,height=542px,center=1,resize=1,scrolling=1"
You can supply your own parameters for the new dhtmlwindow using the
winOpts argument. See the Dynamic Drive web site:
http://www.dynamicdrive.com/dynamicindex8/dhtmlwindow/index.htm
for more information about their light-weight window.
To create a new light window without loading an image, use:
>>>> JS9.LoadWindow("", "", "light");
"""
return self.send({'cmd': 'LoadWindow', 'args': args})
def LoadProxy(self, *args):
"""
Load an FITS image link into JS9 using a proxy server
call:
JS9.LoadProxy(url, opts)
where:
- url: remote URL link to load
- opts: object containing image parameters
Load a FITS file specified by an arbitrary URL into JS9 using
the JS9 back-end helper as a proxy server. Not all back-end
servers support the proxy functionality. The main JS9 web
site does support proxy service, and can be used to view
images from arbitrary URLs.
The JS9.LoadProxy() call takes a URL as its first argument.
This URL will be retrieved using curl or wget and stored on the
back-end server in a directory specifically tied to the web page.
(The directory and its contents will be deleted when the page is
unloaded.) JS9 then will load the file from this directory.
Note that since the file resides on the back-end server, all
back-end analysis defined on that server is available.
To override default image parameters, pass the image opts argument:
>>> j.LoadProxy('http://hea-www.cfa.harvard.edu/~eric/coma.fits',
{'scale':'linear', 'colormap':'sls'})
If an onload callback function is specified in opts, it will be called
after the image is loaded:
>>> j.LoadProxy('http://hea-www.cfa.harvard.edu/~eric/coma.fits',
{'scale': 'linear', 'onload': func})
"""
return self.send({'cmd': 'LoadProxy', 'args': args})
def GetStatus(self, *args):
"""
Get Processing Status
call:
status = JS9.GetStatus(type, id)
where:
- type: the type of status
- id: the id of the file that was loaded into JS9
returns:
- status: status of the processing
This routine returns the status of one of the following specified
asynchronous processing types: "Load", "CreateMosaic",
"DisplaySection", "LoadCatalog", "LoadRegions", "ReprojectData",
"RotateData", "RunAnalysis".
A status of "complete" means that the image is fully processed. Other
statuses include:
- processing: the image is being processed
- loading: the image is in process of loading ("Load" only)
- error: image did not load due to an error
- other: another image is loaded into this display
- none: no image is loaded into this display
"""
return self.send({'cmd': 'GetStatus', 'args': args})
def GetLoadStatus(self, *args):
"""
Get Load Status
call:
status = JS9.GetLoadStatus(id)
where:
- id: the id of the file that was loaded into JS9
returns:
- status: status of the load
This routine returns the status of the load of this image.
Provided for backward compatibility, it simply calls the more general
GetStatus() routine with "Load" as the first argument.
A status of 'complete' means that the image is fully loaded. Other
statuses include:
- loading: the image is in process of loading
- error: image did not load due to an error
- other: another image is loaded into this display
- none: no image is loaded into this display
"""
return self.send({'cmd': 'GetLoadStatus', 'args': args})
def DisplayImage(self, *args):
"""
Display an image
call:
JS9.RefreshImage(step)
where:
- step: starting step to take when displaying the image
The display steps are: "colors" (remake colors when cmap has changed),
"scaled" (rescale data values), "primary" (convert scaled data values
to color values), and "display" (write color values to the web page).
The default step is "primary", which displays the image without
recalculating color data, scaled data, etc. This generally is what you
want, unless you have changed parameter(s) used in a prior step.
"""
return self.send({'cmd': 'DisplayImage', 'args': args})
def RefreshImage(self, *args):
"""
Re-read the image data and re-display
call:
JS9.RefreshImage(input)
where:
- input: python list
This routine can be used, for example, in laboratory settings where
data is being gathered in real-time and the JS9 display needs to be
refreshed periodically. The first input argument can be one of the
following:
- a list containing image pixels (for numpy, use tolist() to convert)
- a two-dimensional list containing image pixels
- a dictionary containing a required image property and any of the
following optional properties:
- naxis: number of axes in the image
- axis: array of image dimensions for each axis or ...
- naxis[n] image dimensions of each axis (naxis1, naxis2, ...)
- bitpix: FITS bitpix value
- head: object containing header keywords as properties
- dmin: data min (optional)
- dmax: data max (optional)
When passing an object as input, the required image property that
contains the image data can be a list or a list of lists containing
data. It also can contain a base64-encoded string containing a list.
This latter can be useful when calling JS9.RefreshImage() via HTTP.
Ordinarily, when refreshing an image, there is no need to specify the
optional axis, bitpix, or header properties. But note that you actually
can change these values on the fly, and JS9 will process the new data
correctly. Also, if you do not pass dmin or dmax, they will be
calculated by JS9.
Note that you can pass a complete FITS file to this routine. It will be
passed to the underlying FITS-handler before being displayed. Thus,
processing time is slightly greater than if you pass the image data
directly.
The main difference between JS9.RefreshImage() and JS9.Load() is
that the former updates the data into an existing image, while the
latter adds a completely new image to the display.
"""
return self.send({'cmd': 'RefreshImage', 'args': args})
def CloseImage(self, *args):
"""
Clear the image from the display and mark resources for release
call:
JS9.CloseImage()
Each loaded image claims a non-trivial amount of memory from a finite
amount of browser heap space. For example, the default 32-bit version
of Google Chrome has a memory limit of approximately 500Mb. If you are
finished viewing an image, closing it tells the browser that the
image's memory can be freed. In principle, this is can help reduce
overall memory usage as successive images are loaded and discarded.
Note, however, that closing an image only provides a hint to the
browser, since this sort of garbage collection is not directly
accessible to JavaScript programming.
Some day, all browsers will support full 64-bit addressing and this
problem will go away ...
"""
return self.send({'cmd': 'CloseImage', 'args': args})
def GetImageData(self, *args):
"""Get image data and auxiliary info for the specified image
call:
imdata = JS9.GetImageData(dflag)
where:
- dflag: specifies whether the data should also be returned
returns:
- imdata: image data object
NB: In Python, you probably want to call JS9.GetFITS() or
JS9.GetNumpy() to retrieve an image.
The image data object contains the following information:
- id: the id of the file that was loaded into JS9
- file: the file or URL that was loaded into JS9
- fits: the FITS file associated with this image
- source: 'fits' if a FITS file was downloaded, 'fits2png' if a
representation file was retrieved
- imtab: 'image' for FITS images and png files, 'table' for FITS
binary tables
- width: x dimension of image
- height: y dimension of image
- bitpix: FITS bits/pixel of each image element (8 for unsigned
char, 16, 32 for signed integer, -32 or -64 for float)
- header: object containing FITS header values
- data: buffer containing raw data values
This call can return raw data for subsequent use in local analysis
tasks. The format of the returned data depends on the exact value of
dflag. If dflag is the boolean value true, an HTML5 typed array
is returned, which translates into a dictionary of pixels values in
Python. While typed arrays are more efficient than ordinary JavaScript
arrays, this is almost certainly not what you want in Python.
If dflag is the string 'array', a Python list of pixel values is
returned. Intuitively, this would seem to what is wanted, but ... it
appears that base64-encoded strings are transferred more quickly
through the JS9 helper than are binary data.
If dflag is the string 'base64', a base64-encoded string is returned.
Oddly, this seems to be the fastest method of transferring
data via socket.io to an external process such as Python, and, in
fact, is the method used by the pyjs9 numpy and fits routines.
The file value can be a FITS file or a representation PNG file. The
fits value will be the path of the FITS file associated with this
image. For a presentation PNG file, the path generally will be relative
to the JS9 install directory. For a normal FITS file, the path usually
is an absolute path to the FITS file.
"""
return self.send({'cmd': 'GetImageData', 'args': args})
def GetDisplayData(self, *args):
"""
Get image data for all images loaded into the specified display
call:
imarr = JS9.GetDisplayData()
returns:
- imarr: array of image data objects
The JS9.GetDisplayData() routine returns an array of image data
objects, one for each images loaded into the specified display.
That is, it returns the same type of information as JS9.GetImageData(),
but does so for each image associated with the display, not just the
current image.
"""
return self.send({'cmd': 'GetDisplayData', 'args': args})
def DisplayPlugin(self, *args):
"""
Display plugin in a light window
call:
JS9.DisplayPlugin(plugin)
where:
- plugin: name of the plugin
Toggle the light-window display of the named plugin, as is done
by the View and Analysis menus. That is, if the plugin is not
visible, make it visible. If the plugin is visible, hide it.
You can supply the full class and plugin name or just the name, using
exact case or lower case, e.g.:
- JS9Panner or panner
- JS9Magnifier or magnifier
- JS9Info or info
- JS9Console or console
- DataSourcesArchivesCatalogs or archivescatalogs
- FitsBinning or binning
- ImExamEncEnergy or encenergy
- ImExamPxTabl or pxtabl
- ImExamRadialProj or radialproj
- ImExamHistogram or histogram
- ImExamRegionStats or regionstats
- ImExamXProj or xproj
- ImExamYProj or yproj
- ImExam3dPlot or 3dplot
- ImExamContours or contours
As with plugins in the View and Analysis menus, this routine does
nothing if the plugin is explicitly defined on the web page.
"""
return self.send({'cmd': 'DisplayPlugin', 'args': args})
def DisplayExtension(self, *args):
"""
Display an extension from a multi-extension FITS file
call:
JS9.DisplayExtension(extid, opts)
where:
- extid: HDU extension number or the HDU's EXTNAME value
- opts: object containing options
This routine allows you to display images and even binary
tables from a multi-extension FITS file. (See, for example,
http://fits.gsfc.nasa.gov/fits_primer.htmlthe FITS Primer
for information about HDUs and multi-extension FITS).
"""
return self.send({'cmd': 'DisplayExtension', 'args': args})
def DisplaySection(self, *args):
"""
Extract and display a section of a FITS file
call:
JS9.DisplaySection(opts)
where:
- opts: object containing options
This routine allows you to extract and display a section of FITS file.
The opts object contains properties specifying how to generate and
display the section:
- xcen: x center of the section in file (physical) coords (required)
- ycen: y center of the section in file (physical) coords (required)
- xdim: x dimension of section to extract before binning
- ydim: y dimension of section to extract before binning
- bin: bin factor to apply after extracting the section
- filter: for tables, row/event filter to apply when extracting a
section
- separate: if true, display as a separate image (def: to update
the current image)
All properties are optional: by default, the routine will extract a bin
1 image from the center of the file.
For example, if an image has dimensions 4096 x 4096, then specifying:
- center: 1024, 1024
- dimensions: 1024, 1024
- bin: 2
will bin the upper left 1024 x 1024 section of the image by 2 to
produce a 512 x 512 image. Note that 0,0 can be used to specify the
file center.
Table filtering allows you to select rows from an FITS binary table
(e.g., an X-ray event list) by checking each row against an expression
involving the columns in the table. When a table is filtered, only
valid rows satisfying these expressions are used to make the image.
A filter expression consists of an arithmetic or logical operation
involving one or more column values from a table. Columns can be
compared to other columns or to numeric constants. Standard JavaScript
math functions can be applied to columns. JavaScript (or C) semantics
are used when constructing expressions, with the usual precedence and
associativity rules holding sway:
Operator Associativity
-------- -------------
() left to right
! (bitwise not) - (unary minus) right to left
* / left to right
+ - left to right
< <= > >= left to right
== != left to right
& (bitwise and) left to right
^ (bitwise exclusive or) left to right
| (bitwise inclusive or) left to right
&& (logical and) left to right
|| (logical or) left to right
= right to left
For example, if energy and pha are columns in a table, then the
following are valid expressions:
pha > 1
energy == pha
pha > 1 && energy <= 2
max(pha,energy) >= 2.5
NB: JS9 uses cfitsio by default (you can, but should not, use the
deprecated fitsy.js), and therefore follows cfitsio filtering
conventions, which are documented in:
https://heasarc.gsfc.nasa.gov/docs/software/fitsio/c/c_user/node97.html
"""
return self.send({'cmd': 'DisplaySection', 'args': args})
def DisplaySlice(self, *args):
"""
Display a slice of a FITS data cube
call:
JS9.DisplaySlice(slice, opts)
where:
- slice: slice description or slice number
- opts: object containing options
This routine allows you to display a 2D slice of a 3D or 4D
FITS data cube, i.e. a FITS image containing 3 or 4 axes.
The slice parameter can either be the numeric value of the
slice in the third (or fourth) image dimension (starting
with 1) or it can be a slice description string: a combination
of asterisks and a numeric value defines the slice axis. Thus, for
example, in a 1024 x 1024 x 16 cube, you can display the sixth slice
along the third axis in one of two ways:
>>> JS9.DisplaySlice(6)
or:
>>> JS9.DisplaySlice("*,*,6")
If the image was organized as 16 x 1024 x 1024, you would use the
string description:
>>> JS9.DisplaySlice("6,*,*")
By default, the new slice replaces the data in the currently displayed
image. You can display the slice as a separate image by supplying
an opts object with its separate property set to true.
For example:
>>> JS9.DisplaySlice("6,*,*", {separate: true})
will display the sixth slice of the first image dimension separately
from the original file, allowing blinking, image blending, etc. between
the two "files". Note that the new id and filename are adjusted to be
the original file's values with the cfitsio image section [6:6,*,*]
appended.
"""
return self.send({'cmd': 'DisplaySlice', 'args': args})
def MoveToDisplay(self, *args):
"""
Move an image to a new JS9 display
call:
JS9.MoveToDisplay(dname)
where:
- dname: name of JS9 display to which the current image will be moved
The JS9.MoveToDisplay() routine moves the current image to the
specified display:
>>> JS9.MoveToDisplay("myJS9")
will move the current image displayed in the "JS9" display window to
the "myJS9" window.
Note that the new JS9 display must already exist. New displays can be
created with the JS9.LoadWindow() public access routine or
the File:new JS9 light window menu option.
"""
return self.send({'cmd': 'MoveToDisplay', 'args': args})
def BlendImage(self, *args):
"""
Blend the image in an image stack using W3C composite/blend modes
call:
JS9.BlendImage(blendMode, opacity)
calling sequences:
JS9.BlendImage() # return current blend params
JS9.BlendImage(true||false) # turn on/off blending
JS9.BlendImage(mode, opacity) # set blend mode and/or opacity
where:
- mode: one of the W3C bend modes
- opacity: the opacity of the blended image (percent from 0 to 1)
Image processing programs such as Adobe Photoshop and Gimp allow you
to blend a stack of images together by mixing the RGB colors. The W3C
has defined a number of composite and blending modes which have been
implemented by Firefox, Chrome, and Safari (what about IE?):
- normal
- multiply
- screen
- overlay
- darken
- lighten
- color-dodge
- color-burn
- hard-light
- soft-light
- difference
- exclusion
- hue
- saturation
- color
- luminosity
In addition, the following Porter-Duff compositing modes are available
(though its unclear how useful they are in JS9 image processing):
- clear
- copy
- source-over
- destination-over
- source-in
- destination-in
- source-out
- destination-out
- source-atop
- destination-atop
- xor
- lighter
Blending and compositing modes are described in detail in:
https://www.w3.org/TR/compositing-1
https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API/Tutorial/Compositing
JS9 allows you to use these modes to blend images together. If
you load two images of the same object into JS9, you can use
the JS9.ReprojectData() routine to align them by WCS. You then
can blend one image into the other by specifying a blend mode
and an optional opacity. For example, if chandra.fits and
spitzer.fits are two aligned images of the same object, and
chandra.fits is currently being displayed, you can blend
spitzer into chandra using the "screen" blend and opacity 0.9
mode this way:
>>> JS9.BlendImage("screen", 0.9)
After the spitzer image is blended, both images will be
displayed as part of the chandra.fits display. However,
changing the colormap, scale, contrast, or bias will only
affect the current chandra image, not the blended spitzer
part. In this way, you can continue to manipulate the current
image and the image blending will update automatically.
Also note that the spitzer image is still available separately
for display and manipulation. You can switch to displaying
spitzer and change colormap, scale, bias, contrast, etc. But
since the images are now blended, changes to spitzer will be
reflected in the spitzer part of the blended chandra
display. Thus, if you change the colormap on the display of
spitzer, and change back to chandra, the blended chandra image
will utilize the new colormap.
This linkage is maintained during zoom and pan operations. If
you display the blended chandra image and then zoom or pan it,
both images will be updated correctly to maintain
alignment. But note that this means when you go back to the
spitzer display, its zoom and/or pan values will have been
updated. In this way, the spitzer image always is correctly
linked to the blended version.
The JS9.BlendImage() call accepts a variable number of
arguments to perform a variety of functions:
JS9.BlendImage() returns an object containing the following properties:
- active: boolean specifying whether this image is to be blended
- mode: string specifying the blend mode
- opacity: opacity value (0 to 1)
>>> JS9.BlendImage() # returns a blend object for the current image
>>> JS9.BlendImage(true||false) # turns on/off blending of
>>> JS9.BlendImage(blend, opacity) # set/modify blend mode or opacity
"""
return self.send({'cmd': 'BlendImage', 'args': args})
def SyncImages(self, *args):
"""
Synchronize operations between two or more images
call:
JS9.SyncImages([ops], [images], [opts]) # set up synchronization
JS9.SyncImages(true||false) # turn on/off synchronization
where:
- ops: array of operations on which to sync
- images: array of images to sync with this image
- opts: options for sync'ing
Synchronize two or more images, so that when an operation is performed
on one image, it also is performed on the other(s). For example, when
the colormap or scale is changed on an image, it also is changed on
the sync'ed images. Or, when a region is created, moved, resized, or
removed on an image, the same happens on the sync'ed images.
When the SyncImages() call is invoked, the current image is
configured to synchronize the specified images. In addition, if
the reciprocate property is set in the opts object (see below),
the other images are also configured to synchronize one another (as
well as the current image). Once configuration is complete, a sync
command is executed immediately. If the current image already
displays one or more regions, these will be created in the target
images.
The operations that can be specified for sync'ing are:
"colormap", "pan", "regions", "scale", "wcs", "zoom", "contrastbias".
If no array is specified, the default array in JS9.globalOpts.syncOps
is used.
Images to synchronize can be specified as an array of image handles or
image ids. If no array is specified, all currently displayed images
are sync'ed.
The optional opts object can contain:
- reciprocate: boolean determining whether images sync one another
- reverse: boolean to reverse this image and target images (def: false)
If the opts object is not specified, the default value of
reciprocate is the value of the JS9.globalOpts.syncReciprocate
property.
Examples:
>>> # the current image will sync all operations for all images
>>> # sync reciprocally, so that changing any image syncs the others
>>> SyncImages()
>>> # current image will sync specified ops for foo1.fits,foo2.fits:
>>> SyncImages(["scale", "colormap"], ["foo1.fits", "foo2.fits"])
>>> # the current image will sync two images with default ops,
>>> # but the two images themselves will not sync images reciprocally
>>> SyncImages(null, ["foo1.fits", "foo2.fits"], {reciprocate: false});
Note that if the pan operation syncs two images having differently
sized fields of view, the smaller image will stop panning when it
reaches its edge, rather than displaying a blank field.
You can turn on/off syncing for a given image by specifying a single
boolean argument:
>>> # turn off sync'ing temporarily
>>> SyncImages(false);
This is different from unsync'ing in that you can turn sync'ing back
on without having to re-sync the images.
"""
return self.send({'cmd': 'SyncImages', 'args': args})
def UnsyncImages(self, *args):
"""
Unsynchronize two or more previously synchronized images
call:
JS9.UnsyncImages([ops], [images], [opts]) # clear synchronization
where:
- ops: array of operations to unsync
- images: array of images to unsync with this image
- opts: options for unsync'ing
Unsynchronize previously sync'ed images.
The operations that can be specified for unsync'ing are:
"colormap", "pan", "regions", "scale", "wcs", "zoom", "contrastbias".
If no array is specified, the default array in JS9.globalOpts.syncOps is
used. Thus, you can turn off sync'ing for specified operations, while
leaving others to be sync'ed.
Images to be unsync'ed can be specified as an array of image handles or
image ids. If no array is specified, all currently displayed images
are unsync'ed.
The optional opts object can contain:
- reciprocate: boolean determining whether images sync one another
- reverse: boolean to reverse this image and target images (def: false)
If the opts object is not specified, the default is to reciprocate based
on the value of the JS9.globalOpts.syncReciprocate property.
Examples:
>>> # this image won't sync on scale for foo1.fits and foo2.fits,
>>> # and they also will stop sync'ing
UnsyncImages(["scale"], ["foo1.fits", "foo2.fits"])
>>> # this image will still sync foo1.fits and foo2.fits, but
>>> # foo1.fits and foo2.fits will no longer sync this image:
UnsyncImages(null, ["foo1.fits", "foo2.fits"],
{reverse: true, reciprocal: false})
"""
return self.send({'cmd': 'UnsyncImages', 'args': args})
def MaskImage(self, *args):
"""
Mask an image using values in another image
call:
JS9.MaskImage(image, opts)
calling sequences:
JS9.MaskImage() # return current mask params
JS9.MaskImage(true||false) # turn on/off masking
JS9.MaskImage(image, opts) # set mask and optionally, its params
JS9.MaskImage(opts) # set mask params
where:
- image: image handle or image id to use as a mask
- opts: optional mask properties
and where the mask properties are:
- mode: "mask", "opacity", or "overlay"
- value: mask value that triggers masking (def: 0) for "mask" mode
- invert: whether to invert the mask (def: false) for "mask" mode
- def: object containing default RGBA values for "overlay" mode
- opacity: opacity when masking (def: 0, range 0 to 1) for both mode
The pixel values in one image can be used to mask the pixels in
another image if the two images have the same image dimensions.
The type of masking depends on the mode: "overlay" (default) or "mask".
For "mask" mode, if the value of a pixel in the mask is less than or
equal to the value property, the opacity of the displayed pixel
is set to the opacity property. You can also invert the mask
using the invert property. In effect, this mode displays only
the image pixels "covered" by a mask.
For "opacity" mode, each image pixel is assigned an opacity equal
to the value of the mask pixel (whose values are assumed to range
from 0 to 1.)
For "overlay" mode, if the mask pixel has a non-zero alpha, its color
is blended with the image pixel using source-atop composition.
Otherwise, the image pixel color alone is used in the display.
This is one way you can display a mask overlay on top of an image.
A static colormap is usually used in conjunction with an overlay
mask, since pixel values not explicitly assigned a color are
transparent. Note that, when blending a mask and image pixel, the
global mask opacity and the individual pixel opacity are multiplied to
get the final pixel opacity.
To set up a mask initially, call the routine with an already-loaded
mask image as the first parameter, and an optional opts object as the
second parameter:
>>> # default is "overlay"
>>> JS9.ImageMask("casa_mask.fits");
>>> JS9.ImageMask("casa_mask.fits", {mode: "overlay"});
>>> # "mask" mode: set lower threshold for masking and masked opacity
>>> JS9.ImageMask("mask.fits",{"mode":"mask","value":5,"opacity":0.2});
You can change the mask parameters at any time:
>>> JS9.ImageMask({value: 2, opacity: 0});
or temporarily turn off and on the mask:
>>> JS9.ImageMask(false);
>>> ...
>>> JS9.ImageMask(true);
"""
return self.send({'cmd': 'MaskImage', 'args': args})
def BlendDisplay(self, *args):
"""
Set global blend mode for specified display
call:
mode = JS9.BlendDisplay(True|False)
returns:
- mode: current image blend mode
This routine will turn on/off the global image blend mode for the
specified display. If no argument is specified, it returns the current
blend mode.
"""
return self.send({'cmd': 'BlendDisplay', 'args': args})
def GetColormap(self, *args):
"""
Get the image colormap
call:
cmap = JS9.GetColormap()
returns:
- cmap: an object containing colormap information.
The returned cmap object will contain the following properties:
- colormap: colormap name
- contrast: contrast value (range: 0 to 10)
- bias: bias value (range 0 to 1)
"""
return self.send({'cmd': 'GetColormap', 'args': args})
def SetColormap(self, *args):
"""
Set the image colormap
call:
JS9.SetColormap(cmap, [contrast, bias])
calling sequences:
JS9.SetColormap(colormap)
JS9.SetColormap(colormap, contrast, bias)
JS9.SetColormap(colormap, staticOpts)
JS9.SetColormap(contrast, bias)
JS9.SetColormap(staticOpts)
where:
- cmap: colormap name
- contrast: contrast value (range: 0 to 10)
- bias: bias value (range 0 to 1)
- staticOpts: static colormap opts
Set the current colormap, contrast/bias, or both. This call takes one
(colormap), two (contrast, bias) or three (colormap, contrast, bias)
arguments. It also takes the following single arguments:
- rgb: toggle RGB mode
- invert: toggle inversion of the colormap
- reset: reset contrast, bias, and invert values
- staticOpts: opts for a static colormap
The staticOpts argument is an array of parameters to change
in a static colormap. Each parameter can take one of two forms:
- [color, min, max]
- [color, opacity|alpha]
- [color, true|false]
The color parameter must match one of the colors specified when
the static colormap was created. The min and max properties replace
the originally specified min and max values. Specifying a number
between 0 and 1 (inclusive) will change the opacity, while specifying
a number greater than 1 will change the alpha (i.e., opacity * 255).
Specifying true or false will set or unset the active flag for that
color, i.e. it will turn on or off use of that color. When turned off,
the pixels in that range will be transparent. For example:
>>> SetColormap '[["red", 0.5], ["green", true], ["blue", false]]'
sets the opacity of red pixels to 0.5, turns on the green pixels,
and turns off the blue pixels in the currently active static colormap.
"""
return self.send({'cmd': 'SetColormap', 'args': args})
def SaveColormap(self, *args):
"""
Save colormap(s)
calling sequences:
JS9.SaveColormap() # save current colormap to "js9.cmap"
JS9.SaveColormap(fname) # save current colormap to fname
JS9.SaveColormap(cmapArray) # save array of ccmaps to "js9.cmap"
JS9.SaveColormap(fname, cmapArray) # save array of cmaps to fname
where:
- fname: output file name
- cmapArray: optional array of colormap names to save
As shown by the calling sequences above, you can use this routine to
save either the current colormap or a list of colormaps taken from the
specified array. You also can choose to save to a particular filename
or the default "js9.cmap":
>>> # save the current colormap in js9.cmap
>>> JS9.SaveColormap()
>>> # save the current colormap in foo.cmap
>>> JS9.SaveColormap("foo.cmap")
>>> # save the foo1 and foo2 colormaps in js9.cmap
>>> JS9.SaveColormap(["foo1", "foo2"])
>>> # save the user-defined foo1 and foo2 colormaps in foo.cmap
>>> JS9.SaveColormap("foo.cmap", ["foo1", "foo2"])
The colormaps are saved in JSON format. Multiple saved colormaps will
be stored in a JSON array, while a single saved colormap will be saved
at the top level.
Don't forget that the file is saved by the browser, in whatever
location you have set up for downloads.
"""
return self.send({'cmd': 'SaveColormap', 'args': args})
def AddColormap(self, *args):
"""
Add a colormap to JS9
call:
JS9.AddColormap(name, aa|rr,gg,bb|obj|json)
where:
- name: colormap name
- aa: an array containing RGB color triplets
- rr,gg,bb: 3 arrays of vertices specifying color changes
- obj: object containing one of the two colormap definition formats
- json: json string containing one of the colormap definition formats
You can add new colormaps to JS9 using one of two formats. The
first is an array of RGB triplets (i.e. an array of 3-D
arrays), where each triplet defines a color. The elements of
the colormap are divided evenly between these 3-D triplets.
For example, the i8 colormap is defined as:
>>> JS9.AddColormap("i8",
[[0,0,0], [0,1,0], [0,0,1], [0,1,1], [1,0,0],
[1,1,0], [1,0,1], [1,1,1]]))
Here, the colormap is divided into 8 sections having the
following colors: black, green, blue, cyan (green + blue),
red, yellow (red + green), purple (red + blue), and white. A
colormap such as sls also utilizes an array of RGB triplets,
but it has 200 entries, leading to much more gradual
transitions between colors.
The second colormap format consists three arrays of vertices
defining the change in intensity of red, green, and blue,
respectively. For each of these three color triplets, the
first coordinate of each vertex is the x-distance along the
colormap axis (scaled from 0 to 1) and the second coordinate
is the y-intensity of the color. Colors are interpolated
between the vertices. For example, consider the following:
>>> JS9.AddColormap("red",
[[0,0],[1,1]], [[0,0], [0,0]], [[0,0],[0,0]])
>>> JS9.AddColormap("blue",
[[0,0],[0,0]], [[0,0], [0,0]], [[0,0],[1,1]])
>>> JS9.AddColormap("purple",
[[0,0],[1,1]], [[0,0], [0,0]],[[0,0],[1,1]])
In the red (blue) colormap, the red (blue) array contains two
vertices, whose color ranges from no intensity (0) to full
intensity (1) over the whole range of the colormap (0 to
1). The same holds true for the purple colormap, except that
both red and blue change from zero to full intensity.
For a more complicated example, consider the a colormap, which is
defined as:
>>> JS9.AddColormap("a",
[[0,0], [0.25,0], [0.5,1], [1,1]],
[[0,0], [0.25,1], [0.5,0], [0.77,0], [1,1]],
[[0,0], [0.125,0], [0.5, 1], [0.64,0.5],
[0.77, 0], [1,0]])
Here we see that red is absent for the first quarter of the
colormap, then gradually increases to full intensity by the
half mark, after which it stays at full intensity to the
end. Green ramps up to full intensity in the first quarter,
then drops to zero by the half and stays that way until a bit
more than three-quarters along, after which it gradually
increases again. Blue starts off at no intensity for an
eighth, then gradually increases to full intensity by the
half-way mark, decreasing gradually to zero by the
three-quarter mark. The result is that you see, for example,
green at the beginning and yellow (red + green) at the end,
with some purple (red + blue) in the middle of the colormap.
As a convenience, you also can pass an object or json string
containing the colormap definition:
# RGB color triplets for the I8 colormap in a "colors" property
{"name":"i8",
"colors":[[0,0,0],[0,1,0],[0,0,1],[0,1,1],
[1,0,0],[1,1,0],[1,0,1],[1,1,1]]}
# all 3 vertex arrays for purple colormap in one "vertices" property
{"name":"purple",
"vertices":[[[0,0],[1,1]],[[0,0],[0,0]],[[0,0],[1,1]]]}
Finally, note that JS9.AddColormap() adds its new colormap to
all JS9 displays on the given page.
"""
return self.send({'cmd': 'AddColormap', 'args': args})
def LoadColormap(self, *args):
"""
Load a colormap file into JS9
LoadColormap(filename)
where:
- filename: input file name or URL
Load the specified colormap file into the web page. The filename,
which must be specified, can be a local file (with absolute path or a
path relative to the displayed web page) or a URL. It should contain a
JSON representation of a colormap, either in RGB color format or in
vertex format (see AddColormap() above):
>>> # RGB color format
>>> {
>>> "name": "purplish",
>>> "colors": [
>>> [0.196, 0.196, 0.196],
>>> [0.475, 0, 0.608],
>>> [0, 0, 0.785],
>>> [0.373, 0.655, 0.925],
>>> [0, 0.596, 0],
>>> [0, 0.965, 0],
>>> [1, 1, 0],
>>> [1, 0.694, 0],
>>> [1, 0, 0]
>>> ]
>>> }
>>> # vertex format
>>> {
>>> "name": "aips0",
>>> "vertices": [
>>> [
>>> [0.203, 0],
>>> [0.236, 0.245],
>>> [0.282, 0.5],
>>> [0.342, 0.706],
>>> [0.411, 0.882],
>>> [0.497, 1]
>>> ],
>>> [
>>> [0.394, 0],
>>> [0.411, 0.196],
>>> [0.464, 0.48],
>>> [0.526, 0.696],
>>> [0.593, 0.882],
>>> [0.673, 1],
>>> [0.94, 1],
>>> [0.94, 0]
>>> ],
>>> [
>>> [0.091, 0],
>>> [0.091, 0.373],
>>> [0.262, 1],
>>> [0.94, 1],
>>> [0.94, 0]
>>> ] ]
>>> }
As with AddColormap(), the new colormap will be available
in all displays.
"""
return self.send({'cmd': 'LoadColormap', 'args': args})
def GetRGBMode(self, *args):
"""
Get RGB mode information
call:
rgbobj = JS9.GetRGBMode()
returns:
- rgbobj: RGB mode information
This routine returns an object containing the following RGB mode
information:
- active: boolean specifying whether RGB mode is active
- rid: image id of "red" image
- gid: image id of "green" image
- bid: image id of "blue" image
"""
return self.send({'cmd': 'GetRGBMode', 'args': args})
def SetRGBMode(self, *args):
"""
call:
JS9.SetRGBMode(mode, [imobj])
where:
- mode: boolean true to activate RGB mode, false to disable
- imobj: optional object specifying three images to set to the
"red", "green", and "blue" colormaps
In RGB mode, three images assigned the "red", "green", and "blue"
colormaps are displayed as a single image. The RGB color of each
displayed pixel is a combination of the "red", "green", and "blue"
pixel value taken from the appropriate image. Note that all three
images are not required: you can display an RGB image using two of
the three colors simply by not assigning the third colormap.
The SetRGBMode() call turns on or off RGB mode. The
boolean mode argument specifies whether to activate or
de-activate RGB mode. The optional imobj object specifies
(already-loaded) images to assign to the three colormaps:
- rid: image id (or handle) to set to the "red" colormap
- gid: image id (or handle) to set to the "green" colormap
- bid: image id (or handle) to set to the "blue" colormap
If imobj is not specified, it is assumed that images have been
assigned the "red", "green", and "blue" colormaps by another means.
(Once again, it is not necessary to assign all three colormaps.)
"""
return self.send({'cmd': 'SetRGBMode', 'args': args})
def GetOpacity(self, *args):
"""
Get the image opacity
call:
opacity = JS9.GetOpacity()
returns:
- opacity: opacity object
The returned opacity object will contain the following properties:
- opacity: opacity value assigned to image pixels
- flooropacity: opacity assigned when the image pixel value is
less than or equal to the floor value (if defined)
- floorvalue: floor value to test image pixel values against
(if defined)
"""
return self.send({'cmd': 'GetOpacity', 'args': args})
def SetOpacity(self, *args):
"""
Set the image opacity
calling sequences:
JS9.SetOpacity(opacity) # set default opacity for all image pixels
JS9.SetOpacity(fvalue, fopacity) # pixels <= fvalue use fopacity
JS9.SetOpacity(opacity, fvalue, fopacity) # set def and floor opacity
JS9.SetOpacity("reset") # reset default opacity to 1
JS9.SetOpacity("resetfloor") # remove opacity floor
JS9.SetOpacity("resetall") # reset def opacity to 1, remove floor
where:
- opacity: opacity value for image pixels
- floorvalue: floor value to test image pixel values against
- flooropacity: floor opacity value to set
Set the current opacity, floor opacity, or both. This call takes one
(opacity), two (floorvalue, flooropacity) or three (opacity,
floorvalue, flooropacity) arguments.
The floor value & opacity option allows you to set the opacity
for pixels whose image value is less then or equal to a specified
floor value. It takes two arguments: the floor pixel value to check,
and the floor opacity to apply. For example, when both arguments are 0,
pixels whose image values are less than or equal to 0
will be transparent. Specifying 5 and 0.5, respectively, means that
pixels whose image values less than or equal to 5 will have an opacity
of 0.5. A useful case is to make the pixels transparent at a
given value, allowing features of one image to be blended into
another, without blending extraneous pixels.
The various reset options allow you to reset the default value,
floor values, or both.
"""
return self.send({'cmd': 'SetOpacity', 'args': args})
def GetZoom(self, *args):
"""
Get the image zoom factor
call:
zoom = JS9.GetZoom()
returns:
- zoom: floating point zoom factor
"""
return self.send({'cmd': 'GetZoom', 'args': args})
def SetZoom(self, *args):
"""
Set the image zoom factor
call:
JS9.SetZoom(zoom)
where:
- zoom: floating or integer zoom factor or zoom directive string
The zoom directives are:
- x[n]|X[n]: multiply the zoom by n (e.g. 'x2')
- /[n]: divide the zoom by n (e.g. '/2')
- in|In: zoom in by a factor of two
- out|Out: zoom out by a factor of two
- toFit|ToFit: zoom to fit image in display
"""
return self.send({'cmd': 'SetZoom', 'args': args})
def GetPan(self, *args):
"""
Get the image pan position
call:
ipos = JS9.GetPan()
returns:
- ipos: object containing image information for pan
The returned ipos object will contain the following properties:
- x: x image coordinate of center
- y: y image coordinate of center
"""
return self.send({'cmd': 'GetPan', 'args': args})
def SetPan(self, *args):
"""
Set the image pan position
call:
JS9.SetPan(x, y)
where:
- x: x image coordinate
- y: y image coordinate
Set the current pan position using image coordinates. Note that you can
use JS9.WCSToPix() and JS9.PixToWCS() to convert between image
and WCS coordinates.
"""
return self.send({'cmd': 'SetPan', 'args': args})
def AlignPanZoom(self, *args):
"""
Align pan and zoom of the current image to a target image
call:
JS9.AlignPanZoom(im)
where:
- im: image containing the WCS used to perform the alignment
This routine changes the pan and zoom of the current image to match a
target image, assuming both have WCS info available. The image is
panned to the RA, Dec at the center of the target image's display. The
zoom is also matched. The pixel size (as specified by the FITS CDELT1
parameter) will be taken into account when zooming, but not the image
rotation or flip. This routine is faster than ReprojectData() for
aligning reasonably similar images.
No attempt is make to keep the images aligned after the call. This
allows you to make adjustments to the current and/or target images and
then re-align as needed.
"""
return self.send({'cmd': 'AlignPanZoom', 'args': args})
def GetScale(self, *args):
"""
Get the image scale
call:
scale = JS9.GetScale()
returns:
- scale: object containing scale information
The returned scale object will contain the following properties:
- scale: scale name
- scalemin: min value for scaling
- scalemax: max value for scaling
"""
return self.send({'cmd': 'GetScale', 'args': args})
def SetScale(self, *args):
"""
Set the image scale
call:
JS9.SetScale(scale, smin, smax)
where:
- scale: scale name
- smin: scale min value
- smax: scale max value
Set the current scale, min/max, or both. This call takes one (scale),
two (smin, max) or three (scale, smin, smax) arguments.
"""
return self.send({'cmd': 'SetScale', 'args': args})
def GetFlip(self, *args):
"""
Get flip state of an image
call:
flip = JS9.GetFlip()
returns:
- flip: current flip state
Possible returned flip states are: "x", "y", "xy", or "none".
"""
return self.send({'cmd': 'GetFlip', 'args': args})
def SetFlip(self, *args):
"""
Flip an image around the x or y axis
call:
JS9.SetFlip(flip)
where:
- flip: "x", "y"
Flip an image around the specified axis. Flipping is relative to the
current state of the display, so flipping by x twice will return you
to the original orientation.
Since this operation is applied to the entire display canvas instead
of the image, image parameters such as the WCS are not affected.
"""
return self.send({'cmd': 'SetFlip', 'args': args})
def GetRotate(self, *args):
"""
Get the rotate state of an image
call:
flip = JS9.GetRotate()
returns:
- rot: current rotation value for this image
Return the current rotation.
"""
return self.send({'cmd': 'GetRotate', 'args': args})
def SetRotate(self, *args):
"""
Rotate an image by a specified number of degrees
call:
JS9.SetRotate(rot)
where:
- rot: rotation in degrees
Set the rotation of an image to the specified number of degrees. The
rotation is performed in terms of an absolute angle: if you rotate by
20 degrees and then do it again, there is no change. Also, setting the
rotation to 0 sets the angle to 0.
Since this operation is applied to the entire display canvas instead
of the image, image parameters such as the WCS are not affected.
"""
return self.send({'cmd': 'SetRotate', 'args': args})
def GetRot90(self, *args):
"""
Get the rotate state of an image
call:
flip = JS9.GetRot90()
returns:
- rot: current rotation value for this image
The returned rotation value will be a multiple of 90, depending on
how many rotations have been executed and in which direction.
"""
return self.send({'cmd': 'GetRot90', 'args': args})
def SetRot90(self, *args):
"""
Rotate an image by +/- 90 degrees
call:
JS9.SetRot90(rot)
where:
- rot: +/- 90
Rotate an image by a multiple of 90 degrees. Rot90 rotations are
relative to the current state of the display, so four rotations will
return you to the original orientation.
Since this operation is applied to the entire display canvas instead
of the image, image parameters such as the WCS are not affected.
"""
return self.send({'cmd': 'SetRot90', 'args': args})
def GetParam(self, *args):
"""
Get an image parameter value
val = GetParam(param)
where:
- param: name of the parameter
returns:
- val: value of the parameter
Return the value of an image parameter. The available parameters are
listed below in the SetParam() section.
"""
return self.send({'cmd': 'GetParam', 'args': args})
def SetParam(self, *args):
"""
Set an image parameter value
ovalue = SetParam(param, value)
where:
- param: name of the parameter
- val: new value of the parameter
returns:
- ovalue: the previous value of the parameter
A number of miscellaneous image parameters are copied from the
JS9.imageOpts object to each image when it is first loaded. You can
use the SetParam() routine to modify these values subsequently.
The available parameters and their current default values are listed
below:
- exp: 1000, default exp value for scaling
- listonchange: false, list regions after a region change?
- opacity: 1.0, image display opacity, between 0 and 1
- nancolor: "#000000", 6-digit #hex color for NaN values
- valpos: true, display value/position?
- wcsalign: true, align image using wcs after reproj?
- xeqonchange: true, xeq an onchange callback after a region change?
- zscalecontrast: 0.25, default zscale value from ds9
- zscalesamples: 600, default zscale value from ds9
- zscaleline: 120, default zscale value from ds9
The routine returns the previous value of the parameter, which can
be useful when temporarily turning off a function. For example:
>>> oval = SetParam("xeqonchange", false);
>>> .... processing ...
>>> SetParam("xeqonchange", oval);
will temporarily disable execution of the previously defined regions
onload callback, resetting it to the old value after processing
is complete.
"""
return self.send({'cmd': 'SetParam', 'args': args})
def GetValPos(self, *args):
"""
Get value/position information
call:
valpos = JS9.GetValPos(ipos)
where:
- ipos: image position object containing x and y image coord values
returns:
- valpos: value/position object
This routine determines the data value at a given image position and
returns an object containing the following information:
- ix: image x coordinate
- iy: image y coordinate
- isys: image system (i.e. 'image')
- px: physical x coordinate
- py: physical y coordinate
- psys: currently selected pixel-based system (i.e. 'image' or
'physical') for the above px, py values
- ra: ra in degrees (if WCS is available)
- dec: dec in degrees (if WCS is available)
- wcssys: wcs system (if WCS is available)
- val: floating point pixel value
- val3: pixel value as a string truncated to 3 decimal digits
- vstr: string containing value and position info
- id: id of the image
- file: filename of the image
- object: object name of the image from the FITS header
"""
return self.send({'cmd': 'GetValPos', 'args': args})
def PixToWCS(self, *args):
"""
Convert image pixel position to WCS position
call:
wcsobj = JS9.PixToWCS(x, y)
where:
- x: x image coordinate
- y: y image coordinate
returns:
- wcsobj: world coordinate system object
The wcs object contains the following properties:
- ra: right ascension in floating point degrees
- dec: declination in floating point degrees
- sys: current world coordinate system being used
- str: string of wcs in current system ('[ra] [dec] [sys]')
"""
return self.send({'cmd': 'PixToWCS', 'args': args})
def WCSToPix(self, *args):
"""
Convert WCS position to image pixel position
call:
pixobj = JS9.WCSToPix(ra, dec)
where:
- ra: right ascension in floating point degrees
- dec: declination in floating point degrees
returns:
- pixobj: pixel object
The pixel object contains the following properties:
- x: x image coordinate
- y: y image coordinate
- str: string of pixel values ('[x]' '[y]')
"""
return self.send({'cmd': 'WCSToPix', 'args': args})
def ImageToDisplayPos(self, *args):
"""
Get the display coordinates from the image coordinates
call:
dpos = JS9.ImageToDisplayPos(ipos)
where:
- ipos: image position object containing x and y image coordinate
values
returns:
- dpos: display position object containing x and y display
coordinate values
Get display (screen) coordinates from image coordinates. Note that
image coordinates are one-indexed, as per FITS conventions, while
display coordinate are 0-indexed.
"""
return self.send({'cmd': 'ImageToDisplayPos', 'args': args})
def DisplayToImagePos(self, *args):
"""
Get the image coordinates from the display coordinates
call:
ipos = JS9.DisplayToImagePos(dpos)
where:
- dpos: display position object containing x and y display
coordinate values
returns:
- ipos: image position object containing x and y image coordinate
values
Note that image coordinates are one-indexed, as per FITS conventions,
while display coordinate are 0-indexed.
"""
return self.send({'cmd': 'DisplayToImagePos', 'args': args})
def ImageToLogicalPos(self, *args):
"""
Get the logical coordinates from the image coordinates
call:
lpos = JS9.ImageToLogicalPos(ipos, lcs)
where:
- ipos: image position object containing x and y image coordinate
values
returns:
- lpos: logical position object containing x and y logical
coordinate values
Logical coordinate systems include: 'physical' (defined by LTM/LTV
keywords in a FITS header), 'detector' (DTM/DTV keywords), and
'amplifier' (ATM/ATV keywords). Physical coordinates are the most
common. In the world of X-ray astronomy, they refer to the 'zoom 1'
coordinates of the data file.
This routine will convert from image to logical coordinates. By
default, the current logical coordinate system is used. You can specify
a different logical coordinate system (assuming the appropriate
keywords have been defined).
"""
return self.send({'cmd': 'ImageToLogicalPos', 'args': args})
def LogicalToImagePos(self, *args):
"""
Get the image coordinates from the logical coordinates
call:
ipos = JS9.LogicalToImagePos(lpos, lcs)
where:
- lpos: logical position object containing x and y logical
coordinate values
returns:
- ipos: image position object containing x and y image coordinate
values
Logical coordinate systems include: 'physical' (defined by LTM/LTV
keywords in a FITS header), 'detector' (DTM/DTV keywords), and
'amplifier' (ATM/ATV keywords). Physical coordinates are the most
common. In the world of X-ray astronomy, they refer to the 'zoom 1'
coordinates of the data file.
This routine will convert from logical to image coordinates. By
default, the current logical coordinate system is used. You can specify
a different logical coordinate system (assuming the appropriate
keywords have been defined).
"""
return self.send({'cmd': 'LogicalToImagePos', 'args': args})
def GetWCSUnits(self, *args):
"""
Get the current WCS units
call:
unitsstr = JS9.GetWCSUnits()
returns:
- unitstr: 'pixels', 'degrees' or 'sexagesimal'
"""
return self.send({'cmd': 'GetWCSUnits', 'args': args})
def SetWCSUnits(self, *args):
"""
Set the current WCS units
call:
JS9.SetWCSUnits(unitsstr)
where:
- unitstr: 'pixels', 'degrees' or 'sexagesimal'
Set the current WCS units.
"""
return self.send({'cmd': 'SetWCSUnits', 'args': args})
def GetWCSSys(self, *args):
"""
Get the current World Coordinate System
call:
sysstr = JS9.GetWCSSys()
returns:
- sysstr: current World Coordinate System ('FK4', 'FK5', 'ICRS',
'galactic', 'ecliptic', 'image', or 'physical')
"""
return self.send({'cmd': 'GetWCSSys', 'args': args})
def SetWCSSys(self, *args):
"""
Set the current World Coordinate System
call:
JS9.SetWCSSys(sysstr)
where:
- sysstr: World Coordinate System ('FK4', 'FK5', 'ICRS',
'galactic', 'ecliptic', 'image', or 'physical')
Set current WCS system. The WCS systems are available only if WCS
information is contained in the FITS header. Also note that 'physical'
coordinates are the coordinates tied to the original file. They are
mainly used in X-ray astronomy where individually detected photon
events are binned into an image, possibly using a blocking factor. For
optical images, image and physical coordinate usually are identical.
"""
return self.send({'cmd': 'SetWCSSys', 'args': args})
def DisplayMessage(self, *args):
"""
Display a text message
call:
JS9.DisplayMessage(which, text)
where:
- which: "info" or "regions"
- text: text to display
The text string is displayed in the "info" area (usually occupied by the
valpos display) or the "region" area (where regions are displayed). The
empty string will clear the previous message.
"""
return self.send({'cmd': 'DisplayMessage', 'args': args})
def DisplayCoordGrid(self, *args):
"""
Display a WCS-based coordinate grid
call:
JS9.DisplayCoordGrid(mode, opts)
where:
- mode: true (display) or false (hide)
- opts: optional object or json string containing grid parameters
A coordinate grid displays lines of constant RA and constant Dec, with
the points of intersection labeled by their RA and Dec values. The
labels are in sexagesimal notation if the WCS units are sexagesimal,
otherwise they are in degrees. When using sexagesimal notation, labels
will be shortened if possible, e.g., if the RA hours are the same in
two successive labels but the minutes are different, only the minutes
are shown in the second label.
If no arguments are supplied, the routine returns true if the
coordinate grid is currently being displayed, false otherwise. A
boolean first argument specifies whether to display the coordinate
grid or not.
The optional second argument is an opts object (or a json-formatted
string) containing properties to override the default JS9.Grid.opts
properties. These properties include:
- raLines: approx. number of RA grid lines
- decLines: approx. number of Dec grid lines
- stride: fineness of grid lines
- margin: edge margin for displaying a line
- lineColor: color of grid lines
- strokeWidth: grid stroke width
- raAngle: rotation for RA label
- decAngle: rotation for Dec label
- labelColor: color of text labels
- labelFontFamily: label font
- labelFontSize: label font size
- labelRAOffx: x offset of RA labels
- labelRAOffy: y offset of RA labels
- labelDecOffx: x offset of Dec labels
- labelDecOffy: y offset of Dec labels
- degPrec: precision for degree labels
- sexaPrec: precision for sexagesimal labels
- reduceDims: reduce lines of smaller image dim?
- cover: grid lines cover: display or image
The strokeWidth property determines the width of the grid
lines. It also serves as a reminder that you can pass other
standard shape properties in the opts object.
JS9's label placement algorithm puts labels close to the
intersection of RA and Dec lines. A number of properties can be
useful in cases where this simple algorithm is not sufficient:
the raAngle and decAngle properties allow you to rotate the
labels with respect to the grid lines. The four
label[RA,Dec]Off[x,y] properties allow you to move the label with
respect to the grid lines. The raSkip and decSkip properties
allow you to skip labelling the first available lines within the
display. It can be useful, for example, on a rotated image, when
the labels are placed in a corner.
The degPrec and sexaPrec properties specify the precision for
degree values and segagesimal values, respectively. Higher
precision will use more digits and take more space along each line.
A number of properties are (more or less) internal but might be
of use: the reduceDims property will reduce the raLines and
decLines properties by the ratio of image dimensions if one
dimension is smaller than the other. This can prevent crowding in
the smaller dimension. The stride property specifies the length
of each line segment that together make up a grid line. A smaller
stride might make the grid lines smoother in some cases, at the
price of more processing time. The cover property determines
whether the grid is drawn over the entire image or just the
displayed part of the image. At the moment, drawing lines over
the displayed part of the image seems to be sufficient.
Note that you can specify global site-wide values for all these
parameters (overriding the JS9.Grid.opts defaults) by supplying them
in a grid object within the globalOpts object in the js9prefs.js file.
Example: display a coordinate grid, specifying the line color:
>>> JS9.DisplayCoordGrid(true, {lineColor: "pink"});
"""
return self.send({'cmd': 'DisplayCoordGrid', 'args': args})
def CountsInRegions(self, *args):
"""
Get background-subtracted counts in regions
call:
JS9.CountsInRegions(sregion, bregion, opts)
where:
- sregion: source region ("$sregions" for displayed source regions)
- bregion: background region ("$bregions" for displayed bkgd regions)
- opts: optional object or json string containing region parameters
The regcnts program (and its predecessor, funcnts) counts photons in
specified source regions and optionally, in specified background
regions. Displayed results include the bkgd-subtracted counts in each
region, as well as the error on the counts, the area in each region,
and the surface brightness (cnts/area**2) calculated for each region.
Regcnts for desktop use is available on GitHub at:
https://github.com/ericmandel/regions.
The regcnts program has been compiled into JS9 using Emscripten.
Using this routine, regcnts can be run on the FITS memory-based file
for the currently displayed image. The first two arguments specify
the source region(s) and background region(s), respectively.
You can pass a standard region specifier as the source
or background region. If the string "$sregions" ("$bregions") is
specified, the source (background) regions are taken from the
currently displayed image.
In keeping with how desktop regcnts works, if no argument or null or a
null string is specified as the source region, the entire field is
used as the source region. If no argument or null or a null string is
explicitly specified as a background region, no regions are used for
the background. In particular, if you pass only the source region
argument, or pass only the source region and opts arguments, no
background region is used. To recap:
>>> # use entire field, no background
>>> JS9.CountsInRegions([opts])
>>> JS9.CountsInRegions("field"||null||""[, opts])
>>> # use displayed source and displayed background
>>> JS9.CountsInRegions("$sregions", "$bregions"[, opts])
>>> # use displayed source, no background
>>> JS9.CountsInRegions("$sregions"[, opts])
>>> # use displayed source and specified background
>>> JS9.CountsInRegions("$sregions", bregions[, opts])
>>> # use specified source, no background
>>> JS9.CountsInRegions(sregions[, opts])
>>> # use specified source and specified background
>>> JS9.CountsInRegions(sregions, bregions[, opts])
>>> # use specified source and displayed background
>>> JS9.CountsInRegions(sregions, "$bregions"[, opts])
>>> # use entire field and specified background
>>> JS9.CountsInRegions("field"||null||"", bregions[, opts])
>>> # use entire field and displayed background
>>> JS9.CountsInRegions("field"||null||"", "$bregions"[, opts])
The third argument allows you to specify options to regcnts:
- cmdswitches: command line switches passed to regcnts
- dim: size of reduced image (def: max of JS9.globalOpts.image.[xy]dim)
- reduce: reduce image size? (def: true)
- lightwin: if true, results displayed in light window
The command line switches that can be specified in cmdswitches are
detailed in https://js9.si.edu/regions/regcnts.html, the regcnts help
page. Aside from switches which control important aspects of the
analysis, the "-j" switch (which returns the output in JSON format)
might be useful in the browser environment. Some examples:
>>> # display results in a light window
>>> JS9.CountsInRegions({lightwin: true})
>>> # return json using maximum precision in output
>>> JS9.CountsInRegions({cmdswitches: "-j -G"})
Results are also returned as a text string.
The regcnts code is memory (and cpu) intensive. In the desktop
environment, this is not typically a problem, but the
memory-constrained browser environment can present a challenge for
large images and binary tables. To avoid running out of memory (and
for large images, to speed up processing considerably), the
CountsInRegions() routine will bin the image to reduce its size,
unless the reduce option is explicitly set to false. The binned
image size can be specified by the dim option, defaulting to
the global value of the image dimension options. When a file is binned
in this manner, the returned resolution value (e.g., arcsec/pixel)
will reflect the applied binning. Note that the number of photons
found inside a binned and unbinned region differ slightly, due to the
difference in the pixel boundaries in the two cases.
The Counts in Regions option of the Analysis -> Client-side
Analysis menu runs regcnts on the source and background regions of
the currently displayed image. The results are displayed in a light
window.
Finally, note that the main JS9 web site at https://js9.si.edu
also offers regcnts as a server-based analysis program in the
Analysis menu. The displayed source and background regions are passed
to the server for processing. Because this version runs the desktop
program, it runs on the original file and does no binning to reduce
the image size (which, by the way, could lengthen the processing
time). But the server-side task also can be useful for
JS9 large file support, which involves displaying a small
representation file associated with a much larger parent
file stored on the server. In this case, you often want to run
the analysis on the larger (original) file.
"""
return self.send({'cmd': 'CountsInRegions', 'args': args})
def GaussBlurData(self, *args):
"""
Gaussian blur of raw data
call:
JS9.GaussBlurData(sigma, opts)
where:
- sigma: sigma of Gaussian function
- opts: options object
This routine creates a new raw data layer called "gaussBlur"
in which the image pixel values are blurred using a Gaussian
function with the specified sigma. The routine uses the fast
Gaussian blur algorithm (approximating a full Gaussian blur
with three passes of a box blur) described in:
http://blog.ivank.net/fastest-gaussian-blur.html.
"""
return self.send({'cmd': 'GaussBlurData', 'args': args})
def ImarithData(self, *args):
"""
Perform image arithmetic on raw data
call:
JS9.ImarithData(op, arg1, opts)
where:
- op: image operation: "add", "sub", "mul", "div",
"min", "max", "reset"
- arg1: image handle, image id or numeric value
- opts: options object
The JS9.ImarithData() routine performs basic arithmetic
(addition, subtraction, multiplication, division, minimum,
maximum, average) between the currently displayed image and
either another image or a constant value. The first op
argument is a string, as detailed above. The second arg1
argument can be a numeric value or an image id. In the former
case, the constant value is applied to each pixel in the
image. In the latter case, the operation is performed between
the corresponding pixels in the two images. For example:
>>> JS9.ImarithData("max", "foo.fits")
will make a new data layer of the currently displayed image, where
each pixel is the larger value from that image and the foo.fits image
(which can be in any display).
This routine creates a new raw data layer called "imarith"
containing the results of the operation. Successive calls to
this routine are cumulative, so that you can build up a more
complex operation from simple ones. For example:
>>> # foo.fits is displayed in the "myJS9" display
>>> myim = JS9.GetImage()
>>> JS9.ImarithData("max", myim)
>>> JS9.ImarithData("add", 2.718)
will make a new data layer where each pixel is the larger value from
the two images, after which an approximation of the irrational number
e is added to each pixel.
The special reset operation deletes the "imarith" raw data
layer, allowing you to start afresh.
The bitpix value of the new "imarith" layer is chosen as follows:
- for operations between two images, bitpix the "larger" of
the two images (where float is "larger" than int).
- for operations between an image and a constant, bitpix of -32
(single float) is chosen unless the image itself has bitpix of -64, in
which case the double float bitpix is chosen.
You can override the choice of bitpix by passing a bitpix property
in the optional opts object.
Finally, note that the two images must have the same dimensions. We
might be able to remove this restriction in the future, although
it is unclear how one lines up images of different dimensions.
"""
return self.send({'cmd': 'ImarithData', 'args': args})
def ShiftData(self, *args):
"""
Shift raw data
call:
JS9.ShiftData(x, y, opts)
where:
- x: number of pixels to shift in the x (width) direction
- y: number of pixels to shift in the y (height) direction
- opts: options object
This routine creates a new raw data layer called "shift" in which
the pixels are shifted from the original image array by the specified
amount in x and/or y. The results of successive shifts are
cumulative. The routine is used by the Harvard-Smithsonian Center for
Astrophysics MicroObservatory project interactively to align images
that are only slightly offset from one another.
"""
return self.send({'cmd': 'ImarithData', 'args': args})
def FilterRGBImage(self, *args):
"""
Apply a filter to the RGB image
call:
JS9.FilterRGBImage(filter, args)
where:
- filter: name of image filter to apply to the RGB data
- args: filter-specific arguments, where applicable
In JS9, you can change the raw data (and hence the displayed
image) using routines such as JS9.GaussBlurData() or the more
general JS9.RawDataLayer(). You also can apply image
processing techniques directly to the displayed RGB image
without changing the underlying raw data, using this
routine. The web has an overwhelming amount of information
about image processing. A good technical article concerning
the use of image filters with Javascript and the HTML5 canvas
is available at:
http://www.html5rocks.com/en/tutorials/canvas/imagefilters/
The JS9.FilterRGBImage() routine supports a number of image
processing routines, which are listed below. To call one of
them using JS9.FilterRGBImage(), supply the filter name,
followed by any filter-specific arguments, e.g.:
>>> JS9.FilterRGBImage("luminance")
>>> JS9.FilterRGBImage("duotone", "g")
>>> JS9.FilterRGBImage("convolve", [-1,-1,-1,-1,8,-1,-1,-1,-1])
You can, of course, use the default arguments where applicable.
Note that the standard JS9 colormaps, scale, contrast and bias
selections are applied to the raw data to regenerate the RGB
image. Thus, if you use any of the image processing techniques
listed below and then change colormap, contrast, bias, or
scale, you will undo the applied image processing. This is a
good way to reset the displayed image. The same thing can be
accomplished programmatically by specifying "reset" as the
filter name:
>>> JS9.FilterRGBImage("reset")
The following simple image processing filters are available:
- luminance():convert to greyscale using the CIE luminance:
0.2126*r + 0.7152*g + 0.0722*b
- greyscale():convert to greyscale using the standard greyscale:
0.3*r + 0.59*g + 0.11*b
- greyscaleAvg():convert to greyscale using averaging:
(r+g+b) / 3
- brighten(val): add const val to each pixel to change the brightness:
[r + val, g + val, b + val]
- noise(v1, v2): add random noise:
pixel += Math.floor((Math.random()*(v2-v1)) - v2),
defaults are v1=-30, v2=30
- duotone("r"|"g"|"b"): remove a color by setting it to
the avg of the two others: r=(g+b)/2, default color is "r"
- invert(): the RGB channels of the image are inverted:
[255-r, 255-g, 255-b, a]
- pixelate(size):make image look coarser by creating a square tiling
effect of the specified size, default size is 2
- sepia(): image takes on shades of brown, like an antique photograph
- contrast(val): change the difference in brightness between the min
and max intensity of a pixel, default val is 2
- threshold(thresh, low, high):create a two-color image in which pixels
less bright than thresh are assigned the low value (default 0 for
black), otherwise the high value (default: 255 for white)
- gamma(gcorr): apply the nonlinear gamma operation, used to code and
decode luminance values in video or still image systems:
out = pow(in, gcorr), default gcorr is 0.2
- posterize(): convert a smooth gradation of tone to regions
of fewer tones, with abrupt changes between them
- scatter(): scatters the colors of a pixel in its neighborhood, akin
to viewing through brittle cracked glass
- solarize(): which image is wholly or partially reversed in
tone. Dark areas appear light or light areas appear dark.
The following image convolutions are available:
- convolve(weights, [opaque]) convolve the image using the
weights array as a square convolution matrix. If opaque is true
(default), the image will have an opaque alpha channel, otherwise the
alpha is convolved as well.
- sobel(): use the Sobel operator to create an image that
emphasizes the edges
- medianFilter(): noise reduction technique that replaces each
pixel with the median of neighboring pixels
- gaussBlur5(): image pixel values are blurred using a 5x5 Gaussian
- edgeDetect(): detect edges using the kernel
[ -1, -1, -1, -1, 8, -1, -1, -1, -1 ]
- sharpen(val): sharpen the image using the kernel
[ 0, -3, 0, -3, val, -3, 0, -3, 0 ]
- blur(): blur the image using the kernel
[ 1, 2, 1, 2, 1, 2, 1, 2, 1 ]
- emboss(val): produce embossing effect using the kernel
[-18, -9, 9, -9, 100 - val, 9, 0, 9, 18 ]
- lighten(val): apply the kernel
[ 0, 0, 0, 0, val, 0, 0, 0, 0 ],
default val of 12/9 lightens the image
- darken(val): apply the kernel
[ 0, 0, 0, 0, val, 0, 0, 0, 0],
default val of 6/9 darkens the image
With no arguments, the routine returns an array of available filters:
>>> JS9.FilterRGBImage()
["convolve", "luminance", ..., "blur", "emboss", "lighten", "darken"]
"""
return self.send({'cmd': 'FilterRGBImage', 'args': args})
def ReprojectData(self, *args):
"""
Reproject an image using a specified WCS
call:
JS9.ReprojectData(wcsim, opts)
where:
- wcsim: image containing the WCS used to perform the reprojection
- opts: options object
JS9.ReprojectData() creates a new raw data layer (with default id of
"reproject") in which the pixels are reprojected using the WCS from
another image. The mProjectPP program from the Montage software suite
is used to perform the reprojection. Please read the documentation on
mProjectPP from the Montage web site, which includes this explanation:
mProjectPP performs a plane-to-plane transform on the input
image, and is an adaptation of the Mopex algorithm and
developed in collaboration with the Spitzer Space
Telescope. It provides a speed increase of approximately a
factor of 30 over the general-purpose mProject. However,
mProjectPP is only suitable for projections which can be
approximated by tangent-plane projections (TAN, SIN, ZEA, STG,
ARC), and is therefore not suited for images covering large
portions of the sky. Also note that it does not directly
support changes in coordinate system (i.e. equatorial to
galactic coordinates), though these changes can be facilitated
by the use of an alternate header.
The wcsim argument is an image id, image filename, or image
object pointing to the WCS image.
The opts object can contain the following reproject-specific props:
- rawid: the id of the raw data layer to create (default: "reproject")
- cmdswitches: a string containing mProjectPP command line switches
The cmdswitches will be prepended to the mProjectPP command line:
{cmdswitches: "-d 1 -z .75"}
will set the mProjectPP debugging and the drizzle factor,
resulting in a command line that looks like this:
mProjectPP -d 1 -z .75 -s statusfile in.fits out.fits template.hdr
See the mProjectPP documentation for more information about
command switches.
Reprojection is an intensive process which can take a
considerable amount of memory and processing time. To avoid
crashes, we currently restrict the WCS image size used for
reprojection to a value defined by JS9.REPROJDIM, currently
2200 x 2200. Even this might be too large for iOS devices
under certain circumstances, although issues regarding memory
are evolving rapidly.
"""
return self.send({'cmd': 'ReprojectData', 'args': args})
def RotateData(self, *args):
"""
Rotate an image around the WCS CRPIX point
call:
JS9.RotateData(angle, opts)
where:
- angle: rotation angle in degrees
- opts: options object
The JS9.RotateData() routine uses JS9.ReprojectData() to rotate
image data by the specified angle (in degrees). If the string
"northup" or "northisup" is specified, the rotation angle is set to 0.
The rotation is performed about the WCS CRPIX1, CRPIX2 point.
The optional opts object is passed directly to the JS9.ReprojectData()
routine. See JS9.ReprojectData() above for more information.
"""
return self.send({'cmd': 'RotateData', 'args': args})
def SaveSession(self, *args):
"""
Save an image session to a file
call:
JS9.SaveSession(session)
where:
- session: name of the file to create when saving this session
This routine saves all essential session information about the
currently displayed image (filename, scaling, colormap, contrast/bias,
zoom, regions, catalogs, etc) in a json-formatted file. You can
subsequently load this file into JS9 to restore the image session.
The session file is a text file and can be edited, subject to the
usual rules of json formatting. For example, you can change the
colormap, scaling, etc. after the fact.
Don't forget that the file is saved by the browser, in whatever
location you have set up for downloads.
The session file contains a file property near the top that
specifies the location of the image. A local file usually will
contain an absolute path or a path relative to the web page
being displayed. However, if the image was originally opened
using drag-and-drop, no pathname information is available, in
accordance with standard web security protocols. In this case,
you must edit the session file to supply the path (either
absolute or relative to the web page) before re-loading the
session.
"""
return self.send({'cmd': 'SaveSession', 'args': args})
def LoadSession(self, *args):
"""
Load a previously saved image session from a file
call:
JS9.LoadSession(session)
where:
- session: name of the session file to load
Restore an image session by loading a json-formatted session file. The
image itself is retrieved and loaded, and all of the saved parameters
and graphics (scale, colormap, regions, catalogs etc) are applied to
the display.
The session file contains a file property near the top that
specifies the location of the image. A local file usually will
contain an absolute path or a path relative to the web page
being displayed. However, if the image was originally opened
using drag-and-drop, no pathname information is available, in
accordance with standard web security protocols. In this case,
you must edit the session file to supply the path (either
absolute or relative to the web page) before re-loading the
session.
Note that the raw data file itself is not saved (only its
pathname), so you must have access to that file in order to
restore a session. However, the data file need not be in the
same location as it was originally: you can adjust the path of
the data file by editing the file property as needed.
"""
return self.send({'cmd': 'LoadSession', 'args': args})
def NewShapeLayer(self, *args):
"""
Create a new shape layer
call:
lid = JS9.NewShapeLayer(layer, opts)
where:
- layer: name of the layer to create
- opts: default options for this layer
returns:
- lid: layer id
This routine creates a new named shape layer. You can then, add,
change, and remove shapes in this layer using the routines below. The
catalogs displayed by the Catalog plugin are examples of separate shape
layers. The optional opts parameter allows you to specify default
options for this layer. You can set a default for any property needed
by your shape layer. See JS9.Regions.opts in js9.js for an example of
the default options for the regions layer.
"""
return self.send({'cmd': 'NewShapeLayer', 'args': args})
def ShowShapeLayer(self, *args):
"""
Show or hide the specified shape layer
call:
JS9.ShowShapeLayer(layer, mode)
where:
- layer: name of layer
- mode: true (show layer) or false (hide layer)
Shape layers can be hidden from display. This could be useful, for
example, if you have several catalogs loaded into a display and want to
view one at a time.
If mode is true, a previously hidden shape layer will be displayed. If
mode is false, a displayed shape layer will be hidden. If the
mode argument is not supplied, the current mode is returned.
"""
return self.send({'cmd': 'ShowShapeLayer', 'args': args})
def ToggleShapeLayers(self, *args):
"""
Toggle display of the active shape layers
call:
JS9.ToggleShapeLayers()
While ShowShapeLayer() allows you to display or hide a single shape
layer, this routine will toggle display of all active layers in the
current image. An active layer is one that has not been turned off
usng the Shape Layers plugin or ShowShapeLayer().
The routine remembers which layers were active at the moment when
layers are hidden and restores only those layers in the next toggle.
Thus, if you have two layers, "regions" and "catalog1", and the
"catalog1" layer has previously been turned off, calling this routine
repeatedly will turn on and off the "regions" layer only.
"""
return self.send({'cmd': 'ToggleShapeLayers', 'args': args})
def ActiveShapeLayer(self, *args):
"""
Make the specified shape layer the active layer
call:
JS9.ActiveShapeLayer(layer)
where:
- layer: name of layer
returns:
- active: the active shape layer (if no args are specified)
For a given image, one shape layer at a time is active, responding to
mouse and touch events. Ordinarily, a shape layer becomes the active
layer when it is first created and shapes are added to it. Thus, the
first time you create a region, the regions layer becomes active. If
you then load a catalog into a layer, that layer becomes active.
If no arguments are supplied, the routine returns the currently active
layer. Specify the name of a layer as the first argument to make it
active. Note that the specified layer must be visible.
"""
return self.send({'cmd': 'ActiveShapeLayer', 'args': args})
def AddShapes(self, *args):
"""
Add one or more shapes to the specified layer
call:
JS9.AddShapes(layer, sarr, opts)
where:
- layer: name of layer
- sarr: a shape string, shape object, or an array of shape objects
- opts: global values to apply to each created shape
returns:
- id: id of last shape created
The sarr argument can be a shape ('annulus', 'box', 'circle',
'ellipse', 'point', 'polygon', 'text'), a single shape object, or an
array of shape objects. Shape objects contain one or more properties,
of which the most important are:
- shape: 'annulus', 'box', 'circle', 'ellipse', 'point', 'polygon',
'text' [REQUIRED]
- x: image x position
- y: image y position
- dx: increment from current image x position
- dy: increment from current image y position
- tags: comma separated list of tag strings
- radii: array of radii for annulus shape
- width: width for box shape
- height: height for box shape
- radius: radius value for circle shape
- r1: x radius for ellipse shape (misnomer noted)
- r2: y radius for ellipse shape (misnomer noted)
- pts: array of objects containing x and y positions, for polygons
- points: array of objects containing x and y offsets from the
specified center, for polygons
- angle: angle in degrees for box and ellipse shapes
- color: shape color (string name or #rrggbb syntax)
- text: text associated with text shape
Other available properties include:
- fixinplace: if true, shape cannot be moved or resized
- lockMovementX: shape cannot be moved in the x direction
- lockMovementY: shape cannot be moved in the y direction
- lockRotation: shape cannot be rotated
- lockScalingX: shape cannot be resized in the x direction
- lockScalingY: shape cannot be resized in the y direction
- fontFamily: font parameter for text shape
- fontSize: font parameter for text shape
- fontStyle: font parameter for text shape
- fontWeight: font parameter for text shape
"""
return self.send({'cmd': 'AddShapes', 'args': args})
def RemoveShapes(self, *args):
"""
Remove one or more shapes from the specified shape layer
call:
JS9.RemoveShapes(layer, shapes)
where:
- layer: name of layer
- shapes: which shapes to remove
If the shapes argument is not specified, it defaults to "all". You
can specify a selector using any of the following:
- all: all shapes not including child text shapes
- All: all shapes including child text shapes
- selected: the selected shape (or shapes in a selected group)
- [color]: shapes of the specified color
- [shape]: shapes of the specified shape
- [wcs]: shapes whose initial wcs matches the specified wcs
- [tag]: shapes having the specified tag
- /[regexp]/: shapes with a tag matching the specified regexp
- child: a child shape (i.e. text child of another shape)
- parent: a shape that has a child (i.e. has a text child)
"""
return self.send({'cmd': 'RemoveShapes', 'args': args})
def GetShapes(self, *args):
"""
Get information about one or more shapes in the specified shape
layer
call:
JS9.GetShapes(layer, shapes)
where:
- layer: name of layer
- shapes: which shapes to retrieve
returns:
- sarr: array of shape objects
Each returned shape object contains the following properties:
- id: numeric region id (assigned by JS9 automatically)
- mode: 'add', 'remove', or 'change'
- shape: region shape ('annulus', 'box', 'circle', 'ellipse',
'point', 'polygon', 'text')
- tags: comma delimited list of region tags (e.g., 'source',
'include')
- color: region color
- x,y: image coordinates of region
- size: object containing width and height for box region
- radius: radius value for circle region
- radii: array of radii for annulus region
- eradius: object containing x and y radii for ellipse regions
- pts: array of objects containing x and y positions, for polygons
- angle: angle in degrees for box and ellipse regions
"""
return self.send({'cmd': 'GetShapes', 'args': args})
def ChangeShapes(self, *args):
"""
Change one or more shapes in the specified layer
call:
JS9.ChangeShapes(layer, shapes, opts)
where:
- layer: name of layer
- shapes: which shapes to change
- opts: object containing options to change in each shape
Change one or more shapes. The opts object can contain the parameters
described in the JS9.AddShapes() section. However, you cannot (yet)
change the shape itself (e.g. from 'box' to 'circle').
If the shapes argument is not specified, it defaults to "all". You
can specify a selector using any of the following:
- all: all shapes not including child text shapes
- All: all shapes including child text shapes
- selected: the selected shape (or shapes in a selected group)
- [color]: shapes of the specified color
- [shape]: shapes of the specified shape
- [wcs]: shapes whose initial wcs matches the specified wcs
- [tag]: shapes having the specified tag
- /[regexp]/: shapes with a tag matching the specified regexp
- child: a child shape (i.e. text child of another shape)
- parent: a shape that has a child (i.e. has a text child)
"""
return self.send({'cmd': 'ChangeShapes', 'args': args})
def CopyShapes(self, *args):
"""
Copy a shape layer to another image
call:
JS9.CopyShapes(to, layer)
where:
- to: image id to which to copy shapes
- layer: shape layer to copy
Copy regions to a different image. If to is "all", then the
regions are copied to all images.
All shapes in the shape layer are copied to the new image.
"""
return self.send({'cmd': 'CopyShapes', 'args': args})
def SelectShapes(self, *args):
"""
Gather Shapes into a Selection
call:
JS9.SelectShapes(layer, shapes)
where:
- layer: shape layer
- shapes: which shapes to select
JS9 has a rich mouse-based interface for selecting shapes: a single
shape is selected by clicking on it. A number of shapes can be
gathered into a group selection by pressing the left mouse button and
dragging the mouse over the desired shapes. To add to an
already-existing selection, shift-click the mouse on a shape.
This routine allows you to create a selection programmatically by
specifying which shapes make up the selection. The first argument
is the shape layer. The second argument is the regions selection.
If not specified, it defaults to "all". The call creates a selection
of shapes which can be moved as one unit.
For example:
>>> j.SelectShapes("myreg", "circle") # select all circles
>>> j.SelectShapes("myreg", "circle&&!foo2") # circles w/o 'foo2' tag
Regions in a selection are processed individually, i.e. a regions
selection will match the regions inside a group. Thus for example,
if you create a selection containing circles, changing the color using
the "circle" specification will also affect the circles within the
selection. You can, of course, process only the regions inside a
selection using the selected specification.
"""
return self.send({'cmd': 'SelectShapes', 'args': args})
def UnselectShapes(self, *args):
"""Remove Shapes From a Selection
call:
JS9.UnselectShapes(layer, shapes)
where:
- layer: shape layer
- shapes: which shapes to select
JS9 has a rich mouse-based interface for selecting shapes: a single
shape is selected by clicking on it. A number of shapes can be
gathered into a group selection by pressing the left mouse button and
dragging the mouse over the desired shapes. To add to an
already-existing selection, shift-click the mouse on a shape.
This routine allows you to remove one or more shapes from a shape
selection programmatically by specifying which shapes to remove.
The first argument is the shape layer. The second argument is the
shape selection. In not specified, or specified as "all" or "selected",
the selection is undone. Otherwise the call will make a new
selection, not containing the unselected shapes, which can be
moved as one unit.
"""
return self.send({'cmd': 'UnselectShapes', 'args': args})
def GroupShapes(self, *args):
"""
Gather Shapes into a Long-lived Group
call:
JS9.GroupShapes(layer, shapes, opts)
where:
- layer: shape layer
- shapes: which shapes to group
- opts: optional object containing grouping options
returns:
- groupid: the group id associated with the newly created group
A shape group can be moved and resized as a single unit. To
first order, it is a long-lived form of a region selection.
The latter gets dissolved when you click the mouse outside the
selection, but a shape group is dissolved only by
calling j.UngroupShapes().
This routine allows you to create a group by specifying the shapes
which will compose it. The first argument is the regions selection.
If not specified, it defaults to either 'selected' or 'all', depending
on whether a shape selection currently exits.
The optional opts argument contains the following properties:
- groupid: the group id to use, if possible (default: 'group_[n]')
- select: if false, the group is not selected upon creation
By default, the groupid will be the string 'group_' followed by
an integer chosen so that the groupid is unique. You can supply your
own groupid, but if it already is associated with an existing group,
an integer value will be appended to make it unique. Also, by default
the newly created group will be 'selected'. You can pass
the select property with a value of false in order to
avoid selecting the group (e.g., if you are creating a number of
groups and do not want to see each of them selected in turn.)
The returned groupid string can be used to select and process all the
shapes in that group. Thus, for example, you can use the groupid to
change the color of all grouped shapes:
>>> gid = j.GroupShapes('myreg', 'circle && foo1');
>>> j.ChangeShapes('myreg', gid, {'color':'red'});
Note however, that unlike the temporary shape selections, shapes
in a group are not available individually, i.e., a regions selection
using a non-groupid does not match shapes inside a group. Thus, for
example, if you have created a group of circles, changing the color
using a 'circle' specification does not affect circles within the group:
>>> gid = j.GroupShapes('myreg', 'circle && foo1');
>>> j.ChangeShapes('myreg', 'circle', {'color':'cyan'}) # no
>>> j.ChangeShapes('myreg', gid, {'color':'red'}); # yes
Furthermore, a given shape can only be part of one group at a
time. In the case where a shape already is part of an existing group,
the globalOpts.regGroupConflict property determines how that shape
is processed. The default is skip, meaning that the shape is
silently skipped over when creating the new group. The alternative
is error, which will throw an error.
"""
return self.send({'cmd': 'GroupShapes', 'args': args})
def UngroupShapes(self, *args):
"""
Dissolve a Group of Shapes
call:
JS9.UngroupShapes(layer, groupid, opts)
where:
- layer: shape layer
- groupid: group id of the group to dissolve
- opts: optional object containing ungrouping options
This routine allows you to dissolve an existing group, so that the
shapes contained therein once again become separate. The first
argument is the groupid, previously returned by the JS9.GroupShapes()
call.
The optional opts argument contains the following properties:
- select: newly separate shapes in the group are 'selected'?
By default, the ungrouped shapes unobtrusively take their place among
other shapes on the display. You can make them be selected by
passing the select: true property in opts. Doing this, for
example, would allow you to remove them easily with the Delete key.
For example:
>>> gid = j.GroupShapes('myreg', 'circle || ellipse')
>>> j.UngroupShapes('myreg', gid)
"""
return self.send({'cmd': 'UngroupShapes', 'args': args})
def AddRegions(self, *args):
"""
Add one or more regions to the regions layer
call:
id = JS9.AddRegions(rarr, opts)
where:
- rarr: a shape string, region object or an array of region objects
- opts: global values to apply to each created region
returns:
- id: id of last region created
The rarr argument can be a region shape ('annulus', 'box', 'circle',
'ellipse', 'point', 'polygon', 'text'), a single region object, or an
array of region objects. Region objects contain one or more properties,
of which the most important are:
- shape: 'annulus', 'box', 'circle', 'ellipse', 'point', 'polygon',
'text' [REQUIRED]
- x: image x position
- y: image y position
- lcs: object containing logical x, y and sys (e.g. 'physical')
- dx: increment from current image x position
- dy: increment from current image y position
- tags: comma separated list of tag strings
- radii: array of radii for annulus region
- width: width for box region
- height: height for box region
- radius: radius value for circle region
- r1: x radius for ellipse region (misnomer noted)
- r2: y radius for ellipse region (misnomer noted)
- pts: array of objects containing x and y positions for polygons
- points: array of objects containing x and y offsets from the
center for polygons
- angle: angle in degrees for box and ellipse regions
- color: region color (string name or #rrggbb syntax)
- text: text associated with text region
Other available properties include:
- fixinplace: if true, region cannot be moved or resized
- lockMovementX: region cannot be moved in the x direction
- lockMovementY: region cannot be moved in the y direction
- lockRotation: region cannot be rotated
- lockScalingX: region cannot be resized in the x direction
- lockScalingY: region cannot be resized in the y direction
- fontFamily: font parameter for text region
- fontSize: font parameter for text region
- fontStyle: font parameter for text region
- fontWeight: font parameter for text region
"""
return self.send({'cmd': 'AddRegions', 'args': args})
def GetRegions(self, *args):
"""
Get information about one or more regions
call:
rarr = JS9.GetRegions(regions)
where:
- regions: which regions to retrieve
returns:
- rarr: array of region objects
If the regions argument is not specified, it defaults to
"selected" if there are selected regions, otherwise "all".
Each returned region object contains the following properties:
- id: numeric region id (assigned by JS9 automatically)
- mode: 'add', 'remove' or 'change'
- shape: region shape ('annulus', 'box', 'circle', 'ellipse',
'point', 'polygon', 'text')
- tags: comma delimited list of region tags (e.g., 'source',
'include')
- color: region color
- x,y: image coordinates of region
- radii: array of radii for annulus region
- width: width for box region
- height: height for box region
- radius: radius value for circle region
- r1: x radius for ellipse region (misnomer noted)
- r2: y radius for ellipse region (misnomer noted)
- pts: array of objects containing x and y positions, for polygons
- points: array of objects containing x and y offsets from the
specified center, for polygons
- angle: angle in degrees for box and ellipse regions
- wcsstr: region string in wcs coordinates
- wcssys: wcs system (e.g. 'FK5')
- imstr: region string in image or physical coordinates
- imsys: image system ('image' or 'physical')
"""
return self.send({'cmd': 'GetRegions', 'args': args})
def ListRegions(self, *args):
"""
List one or more regions
call:
JS9.ListRegions(regions, opts)
where:
- regions: which regions to list
- opts: object containing options
List (and return) the specified regions. By default, a light window
is displayed listing all regions (i.e., as if the list option of the
Regions menu had been selected.) You can also list "selected" regions
or use any of the standard regions specifications.
The opts object supports the following properties:
- mode: display/return mode (1,2,3)
- wcssys: wcs system to use (ICRS, FK5, galactic, physical, etc.)
- wcsunits: units for wcs output (sexagesimal, degrees, pixels)
- includejson: include JSON object
- includecomments: include comments
- layer: which layer to display (def: regions layer)
The mode property accepts the following values:
- 1: no display, return full region string including json, comments
- 2: display and return shortened region string (no json, comments)
- 3: display and return full region string (including json, comments)
"""
return self.send({'cmd': 'ListRegions', 'args': args})
def ListGroups(self, *args):
"""
List one or more region/shape groups
call:
JS9.ListGroups(group, opts)
where:
- group: which group(s) to list
- opts: object containing options
List the specified region/shape group(s) in the specified layer
(default is "regions"). The first argument is the groupid of the
group to list, or "all" to list all groups.
The optional opts object can contain the following properties:
- includeregions: display regions as well as the group name (def: true)
- layer: layer to list (def: "regions")
By default, the display will includes the name of the group and the
regions in the group. To skip the display of regions, supply
an opts object with the includeregions property set to False.
For example:
>>> j.ListGroups("all", {"includeregions": false})
grp1
grp2
grp3
>>> j.ListGroups("grp1")
grp1:
circle(3980.00,4120.00,20.00) # source,include,foo1
ellipse(4090.00,4120.00,25.00,15.00,0.0000) # source,include,foo1
"""
return self.send({'cmd': 'ListGroups', 'args': args})
def EditRegions(self, *args):
"""
Edit one or more regions
call:
JS9.EditRegions()
Edit one or more selected regions using an Edit dialog box. If a
single region has been selected by clicking that region, all of its
properties can be edited via the displayed dialog box. If a group of
regions has been selected using Meta-mousemove to highlight one or
more regions, then properties such as color, stroke width, dash
pattern, and tags can be edited for all of the selected regions using
the displayed dialog box. In the latter case, use shift-click to add
additional regions to the edit group.
"""
return self.send({'cmd': 'EditRegions', 'args': args})
def ChangeRegions(self, *args):
"""
Change one or more regions
call:
JS9.ChangeRegions(regions, opts)
where:
- regions: which regions to change
- opts: object containing options to change in each region
Change one or more regions. The opts object can contain the parameters
described in the JS9.AddRegions() section. However, you cannot (yet)
change the shape itself (e.g. from 'box' to 'circle'). See
js9onchange.html for examples of how to use this routine.
If the regions argument is not specified, it defaults to
"selected" if there are selected regions, otherwise "all".
You can specify a region selector using any of the following:
- all: all regions not including child text regions
- All: all regions including child text regions
- selected: the selected region (or regions in a selected group)
- [color]: regions of the specified color
- [shape]: regions of the specified shape
- [wcs]: regions whose initial wcs matches the specified wcs
- [tag]: regions having the specified tag
- /[regexp]/: regions with a tag matching the specified regexp
- child: a child region (i.e. text child of another region)
- parent: a region that has a child (i.e. has a text child)
"""
return self.send({'cmd': 'ChangeRegions', 'args': args})
def CopyRegions(self, *args):
"""
Copy one or more regions to another image
call:
JS9.CopyRegions(to, regions)
where:
- to: image id to which to copy regions
- regions: which regions to copy
Copy regions to a different image. If to is "all", then the
regions are copied to all images.
If the regions argument is not specified, it defaults to
"selected" if there are selected regions, otherwise "all".
You can specify a region selector using any of the following:
- all: all regions not including child text regions
- All: all regions including child text regions
- selected: the selected region (or regions in a selected group)
- [color]: regions of the specified color
- [shape]: regions of the specified shape
- [wcs]: regions whose initial wcs matches the specified wcs
- [tag]: regions having the specified tag
- /[regexp]/: regions with a tag matching the specified regexp
- child: a child region (i.e. text child of another region)
- parent: a region that has a child (i.e. has a text child)
"""
return self.send({'cmd': 'CopyRegions', 'args': args})
def RemoveRegions(self, *args):
"""
Remove one or more regions from the region layer
call:
JS9.RemoveRegions(regions)
where:
- regions: which regions to remove
If the regions argument is not specified, it defaults to
"selected" if there are selected regions, otherwise "all".
You can specify a region selector using any of the following:
- all: all regions not including child text regions
- All: all regions including child text regions
- selected: the selected region (or regions in a selected group)
- [color]: regions of the specified color
- [shape]: regions of the specified shape
- [wcs]: regions whose initial wcs matches the specified wcs
- [tag]: regions having the specified tag
- /[regexp]/: regions with a tag matching the specified regexp
- child: a child region (i.e. text child of another region)
- parent: a region that has a child (i.e. has a text child)
"""
return self.send({'cmd': 'RemoveRegions', 'args': args})
def UnremoveRegions(self, *args):
"""
Unremove one or more previously removed regions
call:
JS9.RemoveRegions()
If you accidentally remove one or more regions, you can use restore
them using this call. JS9 maintains a stack of removed regions (of
size JS9.globalOpts.unremoveReg, current default is 100). Each
time one or more regions is removed, they are stored as a single entry
on this stack. The UnremoveRegions call pops the last entry off
the stack and calls AddRegions.
"""
return self.send({'cmd': 'UnremoveRegions', 'args': args})
def SaveRegions(self, *args):
"""
Save regions from the current image to a file
call:
JS9.SaveRegions(filename, which, layer)
where:
- filename: output file name
- which: which regions to save (def: "all")
- layer: which layer save (def: "regions")
Save the current regions for the displayed image as JS9 regions file.
If filename is not specified, the file will be saved as "js9.reg".
Don't forget that the file is saved by the browser, in whatever
location you have set up for downloads.
If the which argument is not specified, it defaults to "all". You
can specify "selected" to return information about the selected
regions, or a tag value to save regions having that tag.
If the layer argument is not specified, it defaults to "regions",
i.e. the usual regions layer. You can specify a different layer,
e.g., if you want to save a catalog layer as a region file
(since SaveCatalog() will save the data in table format instead
of as regions).
"""
return self.send({'cmd': 'SaveRegions', 'args': args})
def SelectRegions(self, *args):
"""
Group Regions into a Selection
call:
JS9.SelectRegions(regions)
where:
- regions: which regions to select
JS9 has a rich mouse-based interface for selecting regions: a single
region is selected by clicking on it. A number of regions can be
gathered into a group selection by pressing the left mouse button and
dragging the mouse over the desired regions. To add to an
already-existing selection, shift-click the mouse on a region.
This routine allows you to create a selection programmatically by
specifying which regions make up the selection. The first argument is
the regions selection. If not specified, it defaults to "all".
The call makes a selection of regions which can be moved as one unit.
For example:
>>> j.SelectRegions("circle") # select all circles
>>> j.SelectRegions("circle && !foo2") # all circles without tag 'foo2'
Regions in a selection are processed individually, i.e. a regions
selection will match the regions inside a group. Thus for example,
if you create a selection containing circles, changing the color using
the "circle" specification will also affect the circles within the
selection. You can, of course, process only the regions inside a
selection using the selected specification.
"""
return self.send({'cmd': 'SelectRegions', 'args': args})
def UnselectRegions(self, *args):
"""
Remove Regions From a Selection
call:
JS9.UnselectRegions(regions)
where:
- regions: which regions to select
JS9 has a rich mouse-based interface for selecting regions: a single
region is selected by clicking on it. A number of regions can be
gathered into a group selection by pressing the left mouse button and
dragging the mouse over the desired regions. To add to an
already-existing selection, shift-click the mouse on a region.
This routine allows you to remove one or more regions from a region
selection programmatically by specifying which regions to remove.
The first argument is the regions selection. In not specified,
or specified as "all" or "selected", the selection is undone.
Otherwise the call will make a new selection, not containing
the unselected regions, which can be moved as one unit.
For example:
>>> j.UnselectRegions("circle&&!foo2") # unselect circles w/o tag 'foo2'
"""
return self.send({'cmd': 'UnselectRegions', 'args': args})
def GroupRegions(self, *args):
"""
Gather Regions into a Long-lived Group
call:
JS9.GroupRegions(shapes, opts)
where:
- regions: which regions to group
- opts: optional object containing grouping options
returns:
- groupid: the group id associated with the newly created group
A region group can be moved and resized as a single unit. To
first order, it is a long-lived form of a region selection.
The latter gets dissolved when you click the mouse outside the
selection, but a region group is dissolved only by calling
JS9.UngroupRegions().
This routine allows you to create a group by specifying the regions
which will compose it. The first argument is the regions selection.
If not specified, it defaults to either 'selected' or 'all', depending
on whether a region selection currently exits.
The optional opts argument contains the following properties:
- groupid: the group id to use, if possible (default: 'group_[n]')
- select: if false, the group is not selected upon creation
By default, the groupid will be the string 'group_' followed by
an integer chosen so that the groupid is unique. You can supply your
own groupid, but if it already is associated with an existing group,
an integer value will be appended to make it unique. Also, by default
the newly created group will be 'selected'. You can pass
the select property with a value of false in order to
avoid selecting the group (e.g., if you are creating a number of
groups and do not want to see each of them selected in turn.)
The returned groupid string can be used to select and process all the
regions in that group. Thus, for example, you can use the groupid to
change the color of all grouped regions:
>>> gid = j.GroupRegions('circle && foo1');
>>> j.ChangeRegions(gid, {'color':'red'});
Furthermore, when creating a regions file via JS9.SaveRegions(),
the groupid will be stored in each grouped region's JSON object, and
will be used to reconstitute the group when the file is reloaded.
Note however, that unlike the temporary region selections, regions
in a group are not available individually, i.e., a regions selection
using a non-groupid does not match regions inside a group. Thus, for
example, if you have created a group of circles, changing the color
using a 'circle' specification does not affect circles within the group:
>>> gid = j.GroupRegions('circle && foo1');
>>> j.ChangeRegions('circle', {'color':'cyan'}) # won't change group
>>> j.ChangeRegions(gid, {'color':'red'}); # change regions in group
Furthermore, a given region can only be part of one group at a
time. In the case where a region already is part of an existing group,
the globalOpts.regGroupConflict property determines how that region
is processed. The default is skip, meaning that the region is
silently skipped over when creating the new group. The alternative
is error, which will throw an error.
"""
return self.send({'cmd': 'GroupRegions', 'args': args})
def UngroupRegions(self, *args):
"""
Dissolve a Group of Regions
call:
JS9.UngroupRegions(groupid, opts)
where:
- groupid: group id of the group to dissolve
- opts: optional object containing ungrouping options
This routine allows you to dissolve an existing group, so that the
regions contained therein once again become separate. The first
argument is the groupid, previously returned by the JS9.GroupRegions()
call.
The optional opts argument contains the following properties:
- select: newly separate regions in the group are 'selected'?
By default, the ungrouped regions unobtrusively take their place among
other regions on the display. You can make them be selected by
passing the select: true property in opts. Doing this, for
example, would allow you to remove them easily with the Delete key.
For example:
>>> gid = j.GroupRegions('circle || ellipse')
>>> j.UngroupRegions(gid)
"""
return self.send({'cmd': 'UngroupRegions', 'args': args})
def ChangeRegionTags(self, *args):
"""
Change region tags for the specified image(s)
call:
JS9.ChangeRegionTags(which, addreg, removereg)
where:
- which: which regions to process (def: 'all')
- addreg: array or comma-delimited string of regions to add
- removereg: array or comma-delimited string of regions to remove
While region tags can be changed wholesale using JS9.ChangeRegions(),
this routine allows you to add and/or remove specific tags. The first
argument specifies which regions to change. The second argument is a
list of tags to add, while the third argument is a list of tags to
remove. In each case, the tags argument can be an array of tag strings
or a single string containing a comma-separated list of tags:
>>> JS9.ChangeRegionTags('selected', ['foo1', 'foo2'], ['goo1']);
>>> JS9.ChangeRegionTags('selected', 'foo1,foo2', 'goo1');
"""
return self.send({'cmd': 'ChangeRegionTags', 'args': args})
def ToggleRegionTags(self, *args):
"""
Toggle two region tags for the specified image(s)
call:
JS9.toggleRegionTags(which, tag1, tag2)
where:
- which: which regions to process (def: 'all')
- tag1: tag #1 to toggle
- tag2: tag #2 to toggle
While region tags can be changed wholesale using JS9.ChangeRegions(),
this routine allows you to toggle between two tags, e.g., a source
region and background region, or include and exclude. For example:
>>> JS9.ToggleRegionTags('selected', 'source', 'background');
will change a background region into a source region
or vice-versa, depending on the state of the region, while:
>>> JS9.ToggleRegionTags('selected', 'include', 'exclude');
will toggle between include and exclude.
"""
return self.send({'cmd': 'ToggleRegionTags', 'args': args})
def LoadRegions(self, *args):
"""
Load regions from a file into the current image
call:
JS9.LoadRegions(filename)
where:
- filename: input file name or URL
Load the specified regions file into the displayed image. The filename,
which must be specified, can be a local file (with absolute path or a
path relative to the displayed web page) or a URL.
"""
return self.send({'cmd': 'LoadRegions', 'args': args})
def LoadCatalog(self, *args):
"""
Load an astronomical catalog
call:
JS9.LoadCatalog(layer, table, opts)
where:
- name of shape layer into which to load the catalog
- table: string or blob containing the catalog table
- opts: catalog options
Astronomical catalogs are a special type of shape layer, in which
the shapes have been generated from a tab-delimited text file of
columns, including two columns that contain RA and Dec values. An
astronomical catalog can have a pre-amble of comments, which, by
default, have a '#' character in the first column.
The JS9.LoadCatalog() routine will read a file in this format,
processing the data rows by converting the RA and Dec values into
image position values that will be displayed as shapes in a new
catalog layer.
The first argument to the JS9.LoadCatalog() routine is the name
of the shape layer that will contain the objects in the catalog.
Specifying the name of an existing layer is valid: previous shapes
in that layer will be removed.
The second argument should be a string containing the table
data described above (the result of reading a file, performing
a URL get, etc.)
The third argument is an optional object used to specify
parameters, including:
- xcol: name of the RA column in the table
- ycol: name of the Dec column in the table
- wcssys: wcs system (FK4, FK5, ICRS, galactic, ecliptic)
- shape: shape of catalog object
- color: color of catalog shapes
- width: width of box catalog shapes
- height: height of box catalog shapes
- radius: radius of circle catalog shapes
- r1: r1 of ellipse catalog shapes
- r2: r2 of ellipse catalog shapes
- tooltip: format of tooltip string to display for each object
- skip: comment character in table file
Most of these properties have default values that are stored
in the JS9.globalOpts.catalogs object. The values listed above
also can be changed by users via the Catalog tab in the
Preferences plugin.
"""
return self.send({'cmd': 'LoadCatalog', 'args': args})
def SaveCatalog(self, *args):
"""
Save an astronomical catalog to a file
call:
JS9.SaveCatalog(filename, which)
where:
- filename: output file name
- which: layer containing catalog objects to save
Save the specified catalog layer as a text file. If filename is not
specified, the file will be saved as [layer].cat.
Don't forget that the file is saved by the browser, in whatever
location you have set up for downloads.
If the which argument is not specified, the catalog associated
with the current active layer will be saved. In either case, the
layer to save must be a catalog created from a tab-delimited
file (or URL) of catalog objects (e.g., not the regions layer).
"""
return self.send({'cmd': 'SaveCatalog', 'args': args})
def GetAnalysis(self, *args):
"""
Get server-side analysis task definitions
call:
JS9.GetAnalysis()
The JS9.GetAnalysis() routine returns an array of analysis task
definitions, each containing the following information:
- name: a short identifier string (typically one word)
- title: a longer string that will be displayed in the Analysis menu
- files: a rule that will be matched against to determine whether this
- task is available for the current image
- purl: a URL pointing to a web page containing a user parameter form
- action: the command to execute on the server side
- rtype: return type: text, plot, fits, png, regions, catalog, alert,
none
- hidden: if true, the analysis task is not shown in the Analysis menu
Not every property will be present in every task definition
(e.g., purl is only present when there is a parameter form).
Also note that hidden tasks are not returned by this call.
"""
return self.send({'cmd': 'GetAnalysis', 'args': args})
def RunAnalysis(self, *args):
"""
Run a simple server-side analysis task
call:
JS9.RunAnalysis(name, parr)
where:
- name: name of analysis tool
- parr: optional array of macro-expansion options for command line
The JS9.RunAnalysis() routine is used to execute a server-side analysis
task and return the results for further processing within Python.
NB: Prior to JS9 v1.10, this routine displayed the results on the JS9
web page instead of returning them to Python. If you want to display
the results in JS9, use the "analysis" short-cut routine instead.
The optional parr array of parameters is passed to the JS9 analysis
macro expander so that values can be added to the command line. The
array is in jQuery name/value serialized object format, which is
described here:
http://api.jquery.com/serializeArray/
"""
return self.send({'cmd': 'RunAnalysis', 'args': args})
def SavePNG(self, *args):
"""
Save image as a PNG file
call:
JS9.SavePNG(filename, opts)
where:
- filename: output file name
- opts: optional save parameters
Save the currently displayed image as a PNG file. If filename is not
specified, the file will be saved as "js9.png".
The opts object can specify the following properties:
- layers: save graphical layers (e.g. regions) (def: true)
- source: "image" or "display" (def: "display")
By default, SavePNG() will save all of the 2D graphics in the
shape layers (regions, catalogs, etc.) as well as the image. Set
the layers property to false to save only the image.
Also by default, SavePNG() will save the RGB pixels from the
display. This means, for example, that a blended set of images will
save the blended pixels. If you want to save the RGB pixels from one
of the images in a blended image, you can specify the source
property to the image. For example, in the js9blend.html demo,
you can save the RGB pixels of the Chandra image by specifying use of
the "image" source and specifying the image's id in the display
parameter:
>>> SavePNG("foo.png", {"source":"image"}, {"display":"chandra.fits"});
Don't forget that the file is saved by the browser, in whatever
location you have set up for downloads.
"""
return self.send({'cmd': 'SavePNG', 'args': args})
def SaveJPEG(self, *args):
"""
Save image as a JPEG file
call:
JS9.SaveJPEG(filename, opts)
where:
- filename: output file name
- opts: optional save parameters or a number between 0 and 1
indicating image quality
Save the currently displayed image as a JPEG file. If filename is not
specified, the file will be saved as "js9.png".
The opts object can specify the following properties:
- layers: save graphical layers (e.g. regions) (def: true)
- source: "image" or "display" (def: "display")
- quality: JPEG encoder quality
By default, SaveJPEG() will save all of the 2D graphics in the
shape layers (regions, catalogs, etc.) as well as the image. Set
the layers property to false to save only the image.
Also by default, SaveJPEG() will save the RGB pixels from the
display. This means, for example, that a blended set of images will
save the blended pixels. If you want to save the RGB pixels from one
of the images in a blended image, you can specify the source
property to the image. For example, in the js9blend.html demo,
you can save the RGB pixels of the Chandra image by specifying use of
the "image" source and specifying the image's id in the display
parameter:
>>> SaveJPEG("foo.png", {"source":"image"}, {"display":"chandra.fits"});
If encoder quality parameter is not specified, a suitable default is
used. On FireFox (at least), this default values is 0.95 (I think).
Don't forget that the file is saved by the browser, in whatever
location you have set up for downloads.
"""
return self.send({'cmd': 'SaveJPEG', 'args': args})
def GetToolbar(self, *args):
"""
Get toolbar values from the Toolbar plugin
val = GetToolbar(type)
where:
- type: type of information to retrieve
returns:
- val: array of tool objects (or an argument-dependent return)
The GetToolbar() routine returns global information about the
Toolbar plugin. If the first argument is "showTooltips", the returned
value specifies whether tooltips are currently displayed. Otherwise
an array of tool objects is returned, one for each of the defined
tools in the toolbar.
"""
return self.send({'cmd': 'GetToolbar', 'args': args})
def SetToolbar(self, *args):
"""
Set toolbar values for the Toolbar plugin
SetToolbar(arg1, arg2)
where:
- arg1: a type-dependent id or value to set
- arg2: a type-dependent value to set
The SetToolbar() routine sets global information about the Toolbar
plugin. The following values can be specified as the first argument:
- init: the text "init" triggers a re-initialization of all
display Toolbar plugins, which is useful if you have changed
the JS9.globalOpts.toolBar array to specify a new set of
top-level tools.
- showTooltips: the text "showTooltips" uses the value of the
boolean arg2 to specify whether tooltips are displayed as the mouse
hovers over a tool.
- [text]: other text is assumed to be a JSON-formatted text
containing either a new tool to add to the toolbar, or an array of
tools.
- [object]: an object is assumed to be new tool to add to the toolbar
- [array]: an array is assumed to be an array of new tools to add to
the toolbar
New tools can be added to the toolbar at any time using this routine.
The text properties associated with a tool object are:
- name: name of the tool
- tip: a tooltip to display when the mouse hovers over the tool
- image: url (relative to the install directory) containing a PNG
image file to display as the tool icon
- cmd: name of the JS9 public routine to execute when the tool is
clicked
- args: array of arguments to pass to the JS9 public routine
Only the name and cmd properties are required. If no image is
specified, a button labeled by the name value will be used.
Examples of tool objects:
>>> {
>>> "name": "linear",
>>> "tip": "linear scale",
>>> "image": "images/toolbar/dax_images/lin.png",
>>> "cmd": "SetScale",
>>> "args": ["linear"]
>>> },
>>> {
>>> "name": "histeq",
>>> "tip": "histogram equalization",
>>> "cmd": "SetScale",
>>> "args": ["histeq"]
>>> },
>>> {
>>> "name": "annulus",
>>> "tip": "annulus region",
>>> "image": "images/toolbar/dax_images/annulus.png",
>>> "cmd": "AddRegions",
>>> "args": ["annulus"]
>>> },
>>> {
>>> "name": "remove",
>>> "tip": "remove selected region",
>>> "image": "images/toolbar/dax_images/erase.png",
>>> "cmd": "RemoveRegions",
>>> "args": ["selected"]
>>> },
>>> {
>>> "name": "zoom1",
>>> "tip": "zoom 1",
>>> "image": "images/toolbar/dax_images/mag_one.png",
>>> "cmd": "SetZoom",
>>> "args": [1]
>>> },
>>> {
>>> "name": "magnifier",
>>> "tip": "toggle magnifier display",
>>> "image": "images/toolbar/dax_images/mag.png",
>>> "cmd": "DisplayPlugin",
>>> "args": ["JS9Magnifier"]
>>> }
Each time a tool is added to the list of available tools, the active
Toolbar plugins will be re-initialized to display that tool. By
default, the new tool not be added to the top-level list: you must
also edit the JS9.globalOpts.toolBar array to add the name of the
tool. If this is done after you add the tool, remember to re-initialize
active toolbars by calling:
>>> SetToolbar("init");
"""
return self.send({'cmd': 'SetToolbar', 'args': args})
def UploadFITSFile(self, *args):
"""
Upload the currently displayed FITS file to a proxy server
call:
JS9.UploadFITSFile()
Upload the currently displayed FITS file to the proxy server, so
back-end analysis can be performed. This routine requires that a
Node.js-based JS9 helper is running and that the helper has enabled
the loadProxy property and set up a workDir directory in which to
store the FITS file.
"""
return self.send({'cmd': 'UploadFITSFile', 'args': args})
def GetFITSHeader(self, *args):
"""
Get FITS header as a string
call:
JS9.GetFITSHeader(nlflag)
where:
- nlflag: true if newlines should added to each card
Return the FITS header as a string. By default, the returned string
contains the 80-character FITS cards all concatenated together. If
nlflag is true, each card will have a new-line appended.
Note that the JS9.GetImageData() routine also returns the FITS
header, but as an object whose properties contain the header
values. For example, obj.SIMPLE will usually have a value of
true, obj.BITPIX will have contain the bits/pixel, etc. This
object is more useful for programming tasks, but does not
contain the FITS comments associated with each header card.
"""
return self.send({'cmd': 'GetFITSHeader', 'args': args})
def Print(self, *args):
"""
Print the current image
"""
return self.send({'cmd': 'Print', 'args': args})
def DisplayNextImage(self, *args):
"""
Display the Next (or Previous) Image
call:
JS9.DisplayNextImage(n)
where:
- n: number of images beyond (or prior to) the one currently displayed
The JS9.DisplayNextImage() routine displays the nth image in
the display's image list beyond the currently displayed image. The
default value for n is 1. You can supply a negative number to
display an image prior to the current one in the display's image list.
"""
return self.send({'cmd': 'DisplayNextImage', 'args': args})
def CreateMosaic(self, *args):
"""
Create a Mosaic Image
call:
JS9.CreateMosaic(which, opts)
where:
- which: which images to use in the mosaic
- opts: mosaic options
The JS9.CreateMosaic() creates a mosaic image from the specified
(previously-loaded) FITS images using the mProjectPP and mAdd programs
form the Montage software suite. These Montage programs have been
compiled into JS9 using Emscripten.
Because the browser environment is memory-limited, there are some
restrictions on generating mosaics in JS9. The FITS files must be
well-behaved, i.e. they must have WCS projections which can be
approximated by tangent-plane projections (TAN, SIN, ZEA, STG, ARC).
This precludes creating mosaics from images covering large portions of
the sky. For large sky areas, please use Montage itself on your desktop
to create a mosaic. A simplified js9mosaic script is included in
the JS9 distribution or, for more control, use the Montage programs
directly. Of course, in either case, you must install Montage.
The which parameter determine which images are used in the mosaic:
- "current" or null: the current image in this display
- "all": all images in this display
- im: the image id an image from any display
- [im1, im2, ...]: an array of image ids from any display
Use "current" (or null) if you have loaded a multi-extension
FITS mosaic into JS9. Use "all" if you have loaded several
FITS files into JS9 and want to create a mosaic.
In order to keep the size of the resulting mosaic within memory
limits, JS9 reduces the size of each image before adding them all
together The options parameter determines how the reduction is
performed:
- dim: size of mosaic (def: max of JS9.globalOpts.image.[xdim,ydim])
- reduce: image size reduction technique: "js9" (def) or "shrink"
- verbose: if true, processing output is sent to the javascript console
The "dim" parameter is a target size: the larger of the resulting
mosaic dimensions will be approximately this value, depending on how
Montage processes the images. The "reduce" technique either runs
internal JS9 image sectioning code (to produce smaller internal
images, each of which are reprojected and added together) or runs the
Montage mShrinkHdr code (which reprojects the full images into smaller
files). The former seems to be faster than the latter in most
cases. The "verbose" parameter will display output on the JavaScript
console to let you know that the CreateMosaic() call is running
properly.
The resulting mosaic will be loaded into the specified JS9 display as
a separate image. Because the mosaic is separate from the original
image(s), you can view each of the latter individually (or view each
image extension of a single image using the Extensions plugin).
Internal analysis can be performed on the mosaic but,
of course, no external analysis tasks will be available.
"""
return self.send({'cmd': 'CreateMosaic', 'args': args})
def ResizeDisplay(self, *args):
"""
Change the width and height of the JS9 display
call:
JS9.ResizeDisplay(width, height)
where:
- width: new width of the display in HTML pixels
- height: new height of the display in HTML pixels
- opts: optional object containing resize parameters
You can resize the JS9 display element by supplying new width and
height parameters. The div on the web page will be resized and the
image will be re-centered in the new display. If the display size has
been increased, more of the image will be displayed as needed (up to
the new size of the display). For example, if the original display was
512x512 and you increase it to 1024x1024, a 1024x1024 image will now
be displayed in its entirety.
The opts object can contain the following properties:
- resizeMenubar: change the width of the menubar as well
The default for resizeMenubar is True, so you only need
to pass this property if you do not want to perform the resize.
"""
return self.send({'cmd': 'ResizeDisplay', 'args': args})
def GatherDisplay(self, *args):
"""
Gather other images to this JS9 Display
call:
JS9.GatherDisplay(dname, opts)
where:
- dname: name of JS9 display to which the images will be gathered
- opts: optional object
You can supply an opts object containing the following properties:
- images: array of image handles (or indexes into JS9.images array)
to gather
This routine move all or selected images in other displays to this
display.
"""
return self.send({'cmd': 'GatherDisplay', 'args': args})
def SeparateDisplay(self, *args):
"""
Separate images in this JS9 Display into new displays
call:
JS9.SeparateDisplay(dname, opts)
where:
- dname: name of JS9 display from which the images will be separated
- opts: optional object for layout properties
This routine moves each image in this display to a new display.
You can supply an opts object containing the following properties:
- images: array of image handles (or indexes into JS9.images array)
to separate
- layout: can be "horizontal", "vertical", "auto" (default: "auto")
- leftMargin: margin in pixels between horizontally separated images
- topMargin: margin in pixels between vertically separated images
The "horizontal" layout will generate a single row of images. The
"vertical" layout will generate a single column of images. The "auto"
option will layout the images in one or more rows. Each row will
contain one or more images such that at least one-half of the
right-most image is visible in the browser without the need for
horizontal scrolling.
"""
return self.send({'cmd': 'SeparateDisplay', 'args': args})
def CenterDisplay(self, *args):
"""
Scroll the JS9 display to the center of the viewport
call:
JS9.CenterDisplay()
where:
- dname: name of JS9 display to center
This routine scrolls this display to the center of the viewport.
"""
return self.send({'cmd': 'CenterDisplay', 'args': args})
def CloseDisplay(self, *args):
"""
Close all images in a display
call:
JS9.CloseDisplay(dname)
where:
- dname: name of JS9 display whose images will be closed
This routine closes all images in the specified display.
"""
return self.send({'cmd': 'CloseDisplay', 'args': args})
def RenameDisplay(self, *args):
"""
Rename the id of a JS9 display
calling sequences:
JS9.RenameDisplay(nid) # change default id (usually "JS9") to nid
JS9.RenameDisplay(oid, nid) # change oid to nid
where:
- oid: old name of JS9 display
- nid: new name of JS9 display
This routine is used by the Desktop version of JS9 to implement the
--title (and --renameid) switch(es), which change the id of the
JS9 display(s) to the specified id(s). Once an id has been renamed,
external communication (via the js9 script or pyjs9) should target
the new id instead of the original id.
The original id is still available internally, so Javascript public
API calls on the web page itself can target either the original or
the new id using the {display: "id"} syntax.
"""
return self.send({'cmd': 'RenameDisplay', 'args': args})
def RemoveDisplay(self, *args):
"""
Close all images in a display and remove the display
call:
JS9.RemoveDisplay(dname)
where:
- dname: name of JS9 display to remove
This routine will close all images in the specified display and then
remove the display. It is available for displays contained in
light windows and for displays contained in JS9 Grid Containers. When
removing the display inside a light window, the light window is
immediately closed without a confirmation dialog box (unlike a light
window being closed via its close button.) For a display inside
a JS9 Grid Container, the display is removed from the DOM, so that it
no longer is part of the grid layout. Note, however, that you cannot
remove all displays from a grid container: at least one display must be
left in the container.
"""
return self.send({'cmd': 'RemoveDisplay', 'args': args})
def DisplayHelp(self, *args):
"""
Display help in a light window
call:
JS9.DisplayHelp(name)
where:
- name: name of a help file or url of a web site to display
The help file names are the property names in JS9.helpOpts (e.g.,
'user' for the user page, 'install' for the install page, etc.).
Alternatively, you can specify an arbitrary URL to display (just
because).
"""
return self.send({'cmd': 'DisplayHelp', 'args': args})
def LightWindow(self, *args):
"""
Display content in a light window
call:
JS9.LightWindow(id, type, content, title, opts)
where:
- id: unique id for light window div(default: "lightWindow" + uniqueID)
- type: content type: "inline", "div", "ajax", "iframe" (def: "inline")
- content: content of the light window (default: none)
- title: title (default: "JS9 light window")
- opts: configuration string
(default: "width=830px,height=400px,center=1,resize=1,scrolling=1")
Display arbitrary content inside a light window. There are any number
of light window routines available on the Net. JS9 uses light window
routines developed by Dynamic Drive (http://www.dynamicdrive.com).
Extensive documentation can be found on the Dynamic Drive web
site: http://www.dynamicdrive.com/dynamicindex8/dhtmlwindow.
The content shown inside the window depends on the content parameter:
- iframe: the URL of the page to display (ie: "http://www.google.com")
- inline: the HTML to display (back-slashing any special JavaScript
characters, such as apostrophes)
- ajax: the relative path to the external page to display, relative to
the current page (ie: "../external.htm")
- div: define a DIV element on the page with a unique ID attribute
(probably hidden using style="display:none") and the use the DIV's id
as the content value
JS9 typically uses the inline option. Note that web sites often
do not allow themselves to be embedded in an iframe, so this is an
unreliable option.
The opts parameter specifies options for the light window, such
as its size. This parameter consists of a string with comma-separated
keywords, e.g.:
>>> "width=830px,height=400px,center=1,resize=1,scrolling=1"
The opts keywords, defined in the Dynamic Drive documentation, are:
width, height, left, top, center, resize, and scrolling. The
JS9.lightOpts.dhtml object defines oft-used lightwin configurations,
and the JS9.lightOpts.dhtml.textWin property is used as the
default for this call. You can utilize these properties in your own
call to LightWindow() or make up your own configuration string.
As an extension to the Dynamic Drive light window support, JS9 adds
the ability to double-click the title bar in order to close the window.
"""
return self.send({'cmd': 'LightWindow', 'args': args})
def analysis(self, *args):
"""
run/list analysis for current image
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string.
"""
return self.send({'cmd': 'analysis', 'args': args})
def colormap(self, *args):
"""
set/get colormap for current image
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string: 'colormap contrast bias'
"""
return self.send({'cmd': 'colormap', 'args': args})
def cmap(self, *args):
"""
set/get colormap for current image (alias)
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string: 'colormap contrast bias'
"""
return self.send({'cmd': 'cmap', 'args': args})
def colormaps(self, *args):
"""
get list of available colormaps
No setter routine is provided.
Returned results are of type string: 'grey, red, ...'
"""
return self.send({'cmd': 'colormaps', 'args': args})
def helper(self, *args):
"""
get helper info
"""
return self.send({'cmd': 'helper', 'args': args})
def image(self, *args):
"""
get name of currently loaded image or display specified image
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string.
"""
return self.send({'cmd': 'image', 'args': args})
def images(self, *args):
"""
get list of currently loaded images
No setter routine is provided.
Returned results are of type string.
"""
return self.send({'cmd': 'images', 'args': args})
def load(self, *args):
"""
load image(s)
No getter routine is provided.
"""
return self.send({'cmd': 'load', 'args': args})
def pan(self, *args):
"""
set/get pan location for current image
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string: 'x y'
"""
return self.send({'cmd': 'pan', 'args': args})
def regcnts(self, *args):
"""
get background-subtracted counts in regions
This is a commmand-style routine, easier to type than the full routine:
- with no arguments, acts as if the Analysis menu option was chosen
- with arguments, acts like the full routine
With arguments, returned results are of type string.
"""
return self.send({'cmd': 'regcnts', 'args': args})
def region(self, *args):
"""
add region to current image or list all regions (alias)
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string.
"""
return self.send({'cmd': 'region', 'args': args})
def regions(self, *args):
"""
add region to current image or list all regions
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string.
"""
return self.send({'cmd': 'regions', 'args': args})
def resize(self, *args):
"""
set/get size of the JS9 display
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string: 'width height'
"""
return self.send({'cmd': 'resize', 'args': args})
def scale(self, *args):
"""
set/get scaling for current image
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string: 'scale scalemin scalemax'
"""
return self.send({'cmd': 'scale', 'args': args})
def scales(self, *args):
"""
get list of available scales
No setter routine is provided.
Returned results are of type string: 'linear, log, ...'
"""
return self.send({'cmd': 'scales', 'args': args})
def wcssys(self, *args):
"""
set/get wcs system for current image
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string.
"""
return self.send({'cmd': 'wcssys', 'args': args})
def wcsu(self, *args):
"""
set/get wcs units used for current image
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are of type string.
"""
return self.send({'cmd': 'wcsu', 'args': args})
def wcssystems(self, *args):
"""
get list of available wcs systems
No setter routine is provided.
Returned results are of type string: 'FK4, FK5, ...'
"""
return self.send({'cmd': 'wcssystems', 'args': args})
def wcsunits(self, *args):
"""
get list of available wcs units
No setter routine is provided.
Returned results are of type string: 'degrees, ...'
"""
return self.send({'cmd': 'wcsunits', 'args': args})
def zoom(self, *args):
"""
set/get zoom for current image
This is a commmand-style routine, easier to type than the full routine
at the expense of some flexibility:
- with no arguments, the getter is called to retrieve current values.
- with arguments, the setter is called to set current values.
Returned results are type integer or float.
"""
return self.send({'cmd': 'zoom', 'args': args})
|
[
"pyfits.HDUList",
"io.BytesIO",
"logging.error",
"logging.debug",
"json.loads",
"socketio.Client",
"logging.warning",
"numpy.frombuffer",
"threading.Condition",
"time.sleep",
"logging.info",
"pyfits.PrimaryHDU",
"numpy.array",
"requests.post",
"numpy.ascontiguousarray",
"numpy.issubdtype"
] |
[((1775, 1813), 'logging.info', 'logging.info', (['"""set socketio transport"""'], {}), "('set socketio transport')\n", (1787, 1813), False, 'import logging\n'), ((1907, 1961), 'logging.info', 'logging.info', (['"""no python-socketio, use html transport"""'], {}), "('no python-socketio, use html transport')\n", (1919, 1961), False, 'import logging\n'), ((9471, 9521), 'logging.debug', 'logging.debug', (['"""socketio callback, args: %s"""', 'args'], {}), "('socketio callback, args: %s', args)\n", (9484, 9521), False, 'import logging\n'), ((3841, 3870), 'numpy.issubdtype', 'numpy.issubdtype', (['dtype', 't[0]'], {}), '(dtype, t[0])\n', (3857, 3870), False, 'import numpy\n'), ((11121, 11132), 'threading.Condition', 'Condition', ([], {}), '()\n', (11130, 11132), False, 'from threading import Condition\n'), ((13117, 13137), 'pyfits.PrimaryHDU', 'fits.PrimaryHDU', (['arr'], {}), '(arr)\n', (13132, 13137), True, 'import pyfits as fits\n'), ((13160, 13179), 'pyfits.HDUList', 'fits.HDUList', (['[hdu]'], {}), '([hdu])\n', (13172, 13179), True, 'import pyfits as fits\n'), ((14324, 14333), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (14331, 14333), False, 'from io import BytesIO\n'), ((10337, 10378), 'requests.post', 'requests.post', (["(host + '/' + msg)"], {'json': 'obj'}), "(host + '/' + msg, json=obj)\n", (10350, 10378), False, 'import requests\n'), ((10819, 10861), 'json.loads', 'json.loads', (['urtn'], {'object_hook': '_decode_dict'}), '(urtn, object_hook=_decode_dict)\n', (10829, 10861), False, 'import json\n'), ((17996, 18025), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['narr'], {}), '(narr)\n', (18019, 18025), False, 'import numpy\n'), ((8209, 8259), 'socketio.Client', 'socketio.Client', ([], {'logger': '(True)', 'engineio_logger': '(True)'}), '(logger=True, engineio_logger=True)\n', (8224, 8259), False, 'import socketio\n'), ((8366, 8383), 'socketio.Client', 'socketio.Client', ([], {}), '()\n', (8381, 8383), False, 'import socketio\n'), ((8509, 8570), 'logging.warning', 'logging.warning', (['"""socketio connect failed: %s, using html"""', 'e'], {}), "('socketio connect failed: %s, using html', e)\n", (8524, 8570), False, 'import logging\n'), ((8900, 8917), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (8910, 8917), False, 'import time\n'), ((12000, 12045), 'logging.error', 'logging.error', (['"""socketio close failed: %s"""', 'e'], {}), "('socketio close failed: %s', e)\n", (12013, 12045), False, 'import logging\n'), ((5448, 5475), 'numpy.array', 'numpy.array', (['s'], {'dtype': 'dtype'}), '(s, dtype=dtype)\n', (5459, 5475), False, 'import numpy\n'), ((5535, 5562), 'numpy.array', 'numpy.array', (['s'], {'dtype': 'dtype'}), '(s, dtype=dtype)\n', (5546, 5562), False, 'import numpy\n'), ((5738, 5770), 'numpy.frombuffer', 'numpy.frombuffer', (['s'], {'dtype': 'dtype'}), '(s, dtype=dtype)\n', (5754, 5770), False, 'import numpy\n'), ((5830, 5862), 'numpy.frombuffer', 'numpy.frombuffer', (['s'], {'dtype': 'dtype'}), '(s, dtype=dtype)\n', (5846, 5862), False, 'import numpy\n')]
|
#!/usr/bin/env python
from collections import defaultdict
from math import ceil
def solve(input):
def element(s):
n, el = s.split()
return (el, int(n))
def nxt(needs):
for el, n in needs.items():
if el != 'ORE' and n > 0:
return (el, n)
def ore_needed(f):
needs = defaultdict(int)
needs['FUEL'] = f
while next_el := nxt(needs):
el, need = next_el
produces, ins = reactions[el]
factor = ceil(need/produces)
needs[el] -= factor*produces
for (in_el, in_need) in ins:
requires = factor*in_need
needs[in_el] += requires
return needs['ORE']
reactions = {}
for line in input:
ins, out = line.split(' => ')
el, produces = element(out)
reactions[el] = (produces, [element(i) for i in ins.split(', ')])
ore_1fuel = ore_needed(1)
print(ore_1fuel)
max_ore = 1000000000000
low = max_ore // ore_1fuel
high = low * 2
max_fuel = 0
while low < high:
fuel = (low+high+1)//2
ore = ore_needed(fuel)
if ore <= max_ore:
max_fuel = max(fuel, max_fuel)
low = fuel
else:
high = fuel-1
print(max_fuel)
# with open('test.txt', 'r') as f:
# input = f.read().splitlines()
# solve(input)
with open('input.txt', 'r') as f:
input = f.read().splitlines()
solve(input)
|
[
"collections.defaultdict",
"math.ceil"
] |
[((341, 357), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (352, 357), False, 'from collections import defaultdict\n'), ((516, 537), 'math.ceil', 'ceil', (['(need / produces)'], {}), '(need / produces)\n', (520, 537), False, 'from math import ceil\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright (c) the purl authors
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Visit https://github.com/package-url/packageurl-python for support and
# download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from packageurl import PackageURL
from packageurl.contrib.route import Router
from packageurl.contrib.route import NoRouteAvailable
import requests
router = Router()
def purl2url(purl):
"""
Return a URL inferred from the `purl` string
"""
if purl:
try:
return router.process(purl)
except NoRouteAvailable:
return
get_url = purl2url
@router.route("pkg:cargo/.*")
def build_cargo_download_url(purl):
"""
Return a cargo download URL `url` from a the `purl` string
"""
purl_data = PackageURL.from_string(purl)
name = purl_data.name
version = purl_data.version
if not (name and version):
return
return "https://crates.io/api/v1/crates/{name}/{version}/download".format(
name=name, version=version
)
@router.route("pkg:bitbucket/.*")
def build_bitbucket_homepage_url(purl):
"""
Return a bitbucket homepage URL `url` from a the `purl` string
"""
purl_data = PackageURL.from_string(purl)
namespace = purl_data.namespace
name = purl_data.name
version = purl_data.version
subpath = purl_data.subpath
if not (name and namespace):
return
url = "https://bitbucket.org/{namespace}/{name}".format(
namespace=namespace, name=name
)
if version:
url = "{url}/src/{version}".format(url=url, version=version)
if subpath:
url = "{url}/{subpath}".format(url=url, subpath=subpath)
return url
@router.route("pkg:github/.*")
def build_github_homepage_url(purl):
"""
Return a github homepage URL `url` from a the `purl` string
"""
purl_data = PackageURL.from_string(purl)
namespace = purl_data.namespace
name = purl_data.name
version = purl_data.version
subpath = purl_data.subpath
if not (name and namespace):
return
url = "https://github.com/{namespace}/{name}".format(namespace=namespace, name=name)
if version:
url = "{url}/tree/{version}".format(url=url, version=version)
if subpath:
url = "{url}/{subpath}".format(url=url, subpath=subpath)
return url
@router.route("pkg:gitlab/.*")
def build_gitlab_homepage_url(purl):
"""
Return a gitlab homepage URL `url` from a the `purl` string
"""
purl_data = PackageURL.from_string(purl)
namespace = purl_data.namespace
name = purl_data.name
version = purl_data.version
subpath = purl_data.subpath
if not (name and namespace):
return
url = "https://gitlab.com/{namespace}/{name}".format(namespace=namespace, name=name)
if version:
url = "{url}/-/tree/{version}".format(url=url, version=version)
if subpath:
url = "{url}/{subpath}".format(url=url, subpath=subpath)
return url
@router.route("pkg:rubygems/.*")
def build_gem_download_url(purl):
"""
Return a rubygems homepage URL `url` from a the `purl` string
"""
purl_data = PackageURL.from_string(purl)
name = purl_data.name
version = purl_data.version
if not (name and version):
return
return "https://rubygems.org/downloads/{name}-{version}.gem".format(
name=name, version=version
)
@router.route("pkg:maven/.*")
def build_maven_download_url(purl):
"""
Return a maven homepage URL `url` from a the `purl` string
"""
purl_data = PackageURL.from_string(purl)
name = purl_data.name
version = purl_data.version
#distribution = '' # for binary jar
distribution = '-sources' # for source
if not (name and version):
return
return "https://repo.maven.apache.org/maven2/{name}/{name}/{version}/{name}-{version}{distribution}.jar".format(name=name, version=version, distribution=distribution)
@router.route("pkg:npm/.*")
def build_npm_download_url(purl):
"""
Return an npm homepage URL `url` from a the `purl` string
"""
purl_data = PackageURL.from_string(purl)
namespace = purl_data.namespace
name = purl_data.name
version = purl_data.version
# javascript appears to be source-only.
# Across all of the npmjs URLs we've observed, all have '/-/' before the name-version.tgz.
# They all end in tgz. (No zip files, etc.)
# If namespace '@something' is present, it is placed before 'name'.
# Otherwise the namespace level is collapsed.
if namespace:
return "https://registry.npmjs.org/{namespace}/{name}/-/{name}-{version}.tgz".format(namespace=namespace, name=name, version=version)
else:
return "https://registry.npmjs.org/{name}/-/{name}-{version}.tgz".format(name=name, version=version)
@router.route("pkg:pypi/.*")
def build_pypi_download_url(purl):
"""
Return an npm homepage URL `url` from a the `purl` string
"""
purl_data = PackageURL.from_string(purl)
namespace = purl_data.namespace
name = purl_data.name
version = purl_data.version
subpath = purl_data.subpath
#distribution = 'bdist' # for binary (wheel, etc.)
distribution = 'sdist' # for source
# TODO: Caching these results would allow multiple versions to be
# handled from the same query.
results = requests.get("https://pypi.org/pypi/{name}/json".format(name=name))
try:
if results.status_code == 200:
for item in results.json()['releases'][version]:
# packagetype can be 'sdist', 'bdist-whl', etc.
if distribution in item['packagetype']:
# if distribution is never found, fall through and return None.
return item['url']
except KeyError:
# assume that an IndexError is caused by a bad reply and return None.
pass
except KeyError:
# assume that a KeyError is caused by an incorrect version string, or
# a bad reply. Return None in either case.
pass
# return None unless complete success.
return None
|
[
"packageurl.contrib.route.Router",
"packageurl.PackageURL.from_string"
] |
[((1512, 1520), 'packageurl.contrib.route.Router', 'Router', ([], {}), '()\n', (1518, 1520), False, 'from packageurl.contrib.route import Router\n'), ((1910, 1938), 'packageurl.PackageURL.from_string', 'PackageURL.from_string', (['purl'], {}), '(purl)\n', (1932, 1938), False, 'from packageurl import PackageURL\n'), ((2341, 2369), 'packageurl.PackageURL.from_string', 'PackageURL.from_string', (['purl'], {}), '(purl)\n', (2363, 2369), False, 'from packageurl import PackageURL\n'), ((3002, 3030), 'packageurl.PackageURL.from_string', 'PackageURL.from_string', (['purl'], {}), '(purl)\n', (3024, 3030), False, 'from packageurl import PackageURL\n'), ((3648, 3676), 'packageurl.PackageURL.from_string', 'PackageURL.from_string', (['purl'], {}), '(purl)\n', (3670, 3676), False, 'from packageurl import PackageURL\n'), ((4297, 4325), 'packageurl.PackageURL.from_string', 'PackageURL.from_string', (['purl'], {}), '(purl)\n', (4319, 4325), False, 'from packageurl import PackageURL\n'), ((4709, 4737), 'packageurl.PackageURL.from_string', 'PackageURL.from_string', (['purl'], {}), '(purl)\n', (4731, 4737), False, 'from packageurl import PackageURL\n'), ((5257, 5285), 'packageurl.PackageURL.from_string', 'PackageURL.from_string', (['purl'], {}), '(purl)\n', (5279, 5285), False, 'from packageurl import PackageURL\n'), ((6131, 6159), 'packageurl.PackageURL.from_string', 'PackageURL.from_string', (['purl'], {}), '(purl)\n', (6153, 6159), False, 'from packageurl import PackageURL\n')]
|
import json
from influxdb import InfluxDBClient
import logging
import platform
from typing import Dict, List
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s.%(msecs)03d %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger(__name__)
class InfluxDBConnector(object):
def __init__(self, database, password, username, host=None, port=None):
if not host:
host = 'localhost'
if not port:
port = 8086
self.influx_client = InfluxDBClient(host, port, username, password, database)
def send(self, data: Dict, hostname: str = None) -> None:
"""
Send the data over to the Influx server.
"""
json_payload = _build_payload(data, hostname=hostname)
logger.info('Sending payload to InfluxDB server')
logger.info(json.dumps(json_payload, indent=2))
self.influx_client.write_points(json_payload)
logger.info('Payload sent')
def _build_payload(data: Dict, hostname: str = None) -> List:
"""
Break out each reading into measurements that Influx will understand.
"""
logger.info('Building payload for Influxdb')
payload_values = []
# location isn't a measurement we want to log.
location = data.pop('location', 'unset location')
if not hostname:
hostname = platform.node()
for name, value in data.items():
payload = {
'measurement': name,
'tags': {'host': hostname, 'location': location},
'fields': {'value': float(value)},
}
payload_values.append(payload)
return payload_values
|
[
"platform.node",
"logging.basicConfig",
"influxdb.InfluxDBClient",
"json.dumps",
"logging.getLogger"
] |
[((111, 251), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.ERROR', 'format': '"""%(asctime)s.%(msecs)03d %(levelname)s: %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(level=logging.ERROR, format=\n '%(asctime)s.%(msecs)03d %(levelname)s: %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S')\n", (130, 251), False, 'import logging\n'), ((267, 294), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (284, 294), False, 'import logging\n'), ((535, 591), 'influxdb.InfluxDBClient', 'InfluxDBClient', (['host', 'port', 'username', 'password', 'database'], {}), '(host, port, username, password, database)\n', (549, 591), False, 'from influxdb import InfluxDBClient\n'), ((1369, 1384), 'platform.node', 'platform.node', ([], {}), '()\n', (1382, 1384), False, 'import platform\n'), ((869, 903), 'json.dumps', 'json.dumps', (['json_payload'], {'indent': '(2)'}), '(json_payload, indent=2)\n', (879, 903), False, 'import json\n')]
|
import json
import sys
import os
import jsonlines
import traceback
import logging
from tqdm import tqdm
import pickle
import itertools
import linecache
import html
import re
ALL_TITLES = {}
class WikiElement(object):
def get_ids(self) -> list:
"""Returns list of all ids in that element"""
pass
def get_id(self) ->str:
"""Return the specific id of that element"""
def id_repr(self) -> str:
"""Returns a string representation of all ids in that element"""
pass
def __str__(self) -> str:
"""Returns a string representation of the element's content"""
pass
def process_text(text):
return text.strip()
def calculate_title_to_json_map(input_path):
title_to_json_map = {}
from utils.wiki_processor import WikiDataProcessor
wiki_processor = WikiDataProcessor(os.path.join(input_path))
for page in wiki_processor:
# if page.title.name in title_to_json_map:
title_to_json_map[page.title.content] = (wiki_processor.current_file, wiki_processor.current_line)
# else:
# title_to_json_map[page.title.name] = (wiki_processor.current_file, )
return title_to_json_map
class Reader:
def __init__(self,encoding="utf-8"):
self.enc = encoding
def read(self,file):
with open(file,"r",encoding = self.enc) as f:
return self.process(f)
def process(self,f):
pass
class JSONReader(Reader):
def process(self,fp):
return json.load(fp)
class JSONLineReader(Reader):
def process(self,fp):
data = []
for line in fp.readlines():
data.append(json.loads(line.strip()))
return data
|
[
"json.load",
"os.path.join"
] |
[((848, 872), 'os.path.join', 'os.path.join', (['input_path'], {}), '(input_path)\n', (860, 872), False, 'import os\n'), ((1500, 1513), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (1509, 1513), False, 'import json\n')]
|
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
import time,random,names,os,requests,sys
from seleniumwire import webdriver
from random_username.generate import generate_username
from selenium.webdriver.firefox.options import Options
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.common.proxy import *
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
global DELAYS,CREDS,driver,proxyauth_plugin_path,proxy
import undetected_chromedriver as uc
from pyvirtualdisplay import Display
from selenium import webdriver
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent
display = Display(visible=0, size=(800, 600))
display.start()
CREDS={}
DELAYS={
"keys_min":50,# Delays in miliseconds
"keys_max":200,
"min":200,
"max":1000,
}
def getProxy():
a=requests.get('https://api.proxyflow.io/v1/proxy/random?token=c9997120aae549e43c4b45a3&country=US&anonymity=elite&ssl=true').json()
while a['protocol'] not in ['https','socks4','socks5']:
a=requests.get('https://api.proxyflow.io/v1/proxy/random?token=c9997120aae549e43c4b45a3&country=US&anonymity=elite&ssl=true').json()
return a
def agent():
a=[
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36',
'Mozilla/5.0 (X11; Linux i686; rv:82.0) Gecko/20100101 Firefox/82.0',
'Mozilla/5.0 (Linux x86_64; rv:82.0) Gecko/20100101 Firefox/82.0',
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:82.0) Gecko/20100101 Firefox/82.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:82.0) Gecko/20100101 Firefox/82.0',
'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:82.0) Gecko/20100101 Firefox/82.0',
'Mozilla/5.0 (X11; Linux i686; rv:78.0) Gecko/20100101 Firefox/78.0',
'Mozilla/5.0 (Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0',
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:78.0) Gecko/20100101 Firefox/78.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0',
'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0',
]
return random.choice(a)
def startDriver():
print('------ startDriver init')
from pyvirtualdisplay import Display
display = Display(visible=0, size=(800, 600))
display.start()
opts = webdriver.ChromeOptions()
opts.binary_location = "/usr/bin/google-chrome"
opts.add_argument(f'--proxy-server={self.proxy}')
opts.add_extension(os.getcwd()+"/Proxy Auto Auth.crx")
opts.add_argument("--proxy-server=http://nataly.ltespace.com:15584")
opts.add_argument("user-agent=%s" % self.agent())
opts.add_argument('--disable-gpu')
# opts.add_argument('--headless')
opts.add_argument('--no-sandbox')
capabilities = DesiredCapabilities.CHROME
self.driver = webdriver.Chrome(chrome_options=opts, desired_capabilities=capabilities,executable_path=f'{BASE_DIR}/chromedriver')
print('------ driver started')
def openLogin():
print('open login')
try:
driver.get('https://www.instagram.com/accounts/login/')
print('login page')
time.sleep(4)
driver.find_element_by_name("username")
return True
except:
return False
def login(un,pw):
print('---------- def login init')
x=openLogin()
while not x:
x=openLogin()
time.sleep(random.randint(DELAYS["min"], DELAYS["max"])/100.0)
driver.find_element_by_name("username").send_keys(un)
time.sleep(random.randint(DELAYS["min"], DELAYS["max"])/100.0)
driver.find_element_by_name("password").send_keys(pw)
time.sleep(random.randint(DELAYS["min"], DELAYS["max"])/100.0)
driver.find_element_by_xpath('''//div[contains(text(),"Log In")]''').click()
time.sleep(10)
def searchUser(name):
print('---------- def searchUser init')
open(os.getcwd()+"/dmlog.txt","a").write('Seraching followers of %s \n' % name)
driver.get('https://www.instagram.com/accounts/onetap')
time.sleep(4)
driver.find_element_by_xpath('''//input[@placeholder='Search']''').send_keys(name)
time.sleep(2)
for i in driver.find_elements_by_xpath('//div/a'):
link=i.get_attribute('href')
if link=='https://www.instagram.com/':
pass
else:
profileLink=link
print(profileLink)
break
if profileLink[-1]=="/":
profileLink=profileLink[:-1]
driver.get(profileLink)
time.sleep(5)
driver.find_element_by_xpath('''//a[@href='/%s/followers/']''' % (profileLink.split("/")[-1])).click()
time.sleep(5)
driver.find_elements_by_xpath('//div/ul/div/li')[0].click()
while 1:
x=len(driver.find_elements_by_xpath('//div/ul/div/li'))
for i in range(500):
try:
driver.find_elements_by_xpath('//div/ul/div/li')[-1].click()
except:
pass
ActionChains(driver).send_keys(Keys.DOWN).perform()
y=len(driver.find_elements_by_xpath('//div/ul/div/li'))
if not y>x:
break
print("Found %d Followers"%(len(driver.find_elements_by_xpath('//div/ul/div/li'))))
open(os.getcwd()+"/dmlog.txt","a").write("Found %d Followers"%(len(driver.find_elements_by_xpath('//div/ul/div/li'))))
return driver.find_elements_by_xpath('//div/ul/div/li//span/a')
def dm(user,message):
print('---------- direct mesasge init')
driver.get("https://www.instagram.com/"+user)
time.sleep(5)
try:
driver.find_element_by_xpath('''//button[contains(text(),"Message")]''').click()
except:
try:
driver.find_element_by_xpath('''//button[contains(text(),"Follow")]''')
except:
print("Cant Send Message")
open(os.getcwd()+"/dmlog.txt","a").write('Cant send DM to %s \n' % user)
raise Exception("Cant Send Message")
time.sleep(5)
try:
driver.find_element_by_xpath('''//button[contains(text(),"Not Now")]''').click()
except:
pass
time.sleep(2)
for i in message:
driver.find_element_by_xpath('''//textarea[@placeholder='Message...']''').send_keys(i)
time.sleep(random.randint(DELAYS["keys_min"], DELAYS["keys_max"])/1000.0)
time.sleep(2)
driver.find_element_by_xpath('''//button[contains(text(),"Send")]''').click()
open(os.getcwd()+"/dmlog.txt","a").write('DM send successfully to %s \n' % user)
def getFollowers(name):
print('---------- def getFollowers init')
un='sajith8827'
pw='36SJ7QBUphCCkY9'
startDriver()
login(un,pw)
return searchUser(name)
def sendDM(user,message,un,pw):
print('---------- def sendDM init')
## un='sajith8827'
## pw='36SJ7QBUphCCkY9'
un=un
pw=pw
startDriver()
login(un,pw)
try:
for i in user:
open(os.getcwd()+"/dmlog.txt","a").write('Sending DM to %s \n' % i)
dm(i,message)
return True
except:
return False
# open(os.getcwd()+"/dmlog.txt","a").write('Starting' )
# us=sys.argv[1]
# msg=sys.argv[2]
# un=sys.argv[3]
# pw=sys.argv[4]
# x=getFollowers(us)
# x=list(map(lambda y:y.text, x))
# sendDM(x,msg,un,pw)
#x=getFollowers("shanaka")
|
[
"random.randint",
"os.getcwd",
"random.choice",
"time.sleep",
"pyvirtualdisplay.Display",
"pathlib.Path",
"selenium.webdriver.ChromeOptions",
"selenium.webdriver.Chrome",
"requests.get",
"selenium.webdriver.ActionChains"
] |
[((879, 914), 'pyvirtualdisplay.Display', 'Display', ([], {'visible': '(0)', 'size': '(800, 600)'}), '(visible=0, size=(800, 600))\n', (886, 914), False, 'from pyvirtualdisplay import Display\n'), ((2390, 2406), 'random.choice', 'random.choice', (['a'], {}), '(a)\n', (2403, 2406), False, 'import time, random, names, os, requests, sys\n'), ((2519, 2554), 'pyvirtualdisplay.Display', 'Display', ([], {'visible': '(0)', 'size': '(800, 600)'}), '(visible=0, size=(800, 600))\n', (2526, 2554), False, 'from pyvirtualdisplay import Display\n'), ((2586, 2611), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (2609, 2611), False, 'from selenium import webdriver\n'), ((3083, 3203), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'chrome_options': 'opts', 'desired_capabilities': 'capabilities', 'executable_path': 'f"""{BASE_DIR}/chromedriver"""'}), "(chrome_options=opts, desired_capabilities=capabilities,\n executable_path=f'{BASE_DIR}/chromedriver')\n", (3099, 3203), False, 'from selenium import webdriver\n'), ((4029, 4043), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (4039, 4043), False, 'import time, random, names, os, requests, sys\n'), ((4260, 4273), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (4270, 4273), False, 'import time, random, names, os, requests, sys\n'), ((4365, 4378), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4375, 4378), False, 'import time, random, names, os, requests, sys\n'), ((4725, 4738), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4735, 4738), False, 'import time, random, names, os, requests, sys\n'), ((4850, 4863), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4860, 4863), False, 'import time, random, names, os, requests, sys\n'), ((5744, 5757), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (5754, 5757), False, 'import time, random, names, os, requests, sys\n'), ((6158, 6171), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (6168, 6171), False, 'import time, random, names, os, requests, sys\n'), ((6299, 6312), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (6309, 6312), False, 'import time, random, names, os, requests, sys\n'), ((6516, 6529), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (6526, 6529), False, 'import time, random, names, os, requests, sys\n'), ((3396, 3409), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (3406, 3409), False, 'import time, random, names, os, requests, sys\n'), ((835, 849), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (839, 849), False, 'from pathlib import Path\n'), ((1094, 1227), 'requests.get', 'requests.get', (['"""https://api.proxyflow.io/v1/proxy/random?token=c9997120aae549e43c4b45a3&country=US&anonymity=elite&ssl=true"""'], {}), "(\n 'https://api.proxyflow.io/v1/proxy/random?token=c9997120aae549e43c4b45a3&country=US&anonymity=elite&ssl=true'\n )\n", (1106, 1227), False, 'import time, random, names, os, requests, sys\n'), ((2741, 2752), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2750, 2752), False, 'import time, random, names, os, requests, sys\n'), ((3642, 3686), 'random.randint', 'random.randint', (["DELAYS['min']", "DELAYS['max']"], {}), "(DELAYS['min'], DELAYS['max'])\n", (3656, 3686), False, 'import time, random, names, os, requests, sys\n'), ((3767, 3811), 'random.randint', 'random.randint', (["DELAYS['min']", "DELAYS['max']"], {}), "(DELAYS['min'], DELAYS['max'])\n", (3781, 3811), False, 'import time, random, names, os, requests, sys\n'), ((3892, 3936), 'random.randint', 'random.randint', (["DELAYS['min']", "DELAYS['max']"], {}), "(DELAYS['min'], DELAYS['max'])\n", (3906, 3936), False, 'import time, random, names, os, requests, sys\n'), ((1295, 1428), 'requests.get', 'requests.get', (['"""https://api.proxyflow.io/v1/proxy/random?token=c9997120aae549e43c4b45a3&country=US&anonymity=elite&ssl=true"""'], {}), "(\n 'https://api.proxyflow.io/v1/proxy/random?token=c9997120aae549e43c4b45a3&country=US&anonymity=elite&ssl=true'\n )\n", (1307, 1428), False, 'import time, random, names, os, requests, sys\n'), ((6449, 6503), 'random.randint', 'random.randint', (["DELAYS['keys_min']", "DELAYS['keys_max']"], {}), "(DELAYS['keys_min'], DELAYS['keys_max'])\n", (6463, 6503), False, 'import time, random, names, os, requests, sys\n'), ((4121, 4132), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4130, 4132), False, 'import time, random, names, os, requests, sys\n'), ((5441, 5452), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5450, 5452), False, 'import time, random, names, os, requests, sys\n'), ((6621, 6632), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6630, 6632), False, 'import time, random, names, os, requests, sys\n'), ((5190, 5210), 'selenium.webdriver.ActionChains', 'ActionChains', (['driver'], {}), '(driver)\n', (5202, 5210), False, 'from selenium.webdriver import ActionChains\n'), ((7108, 7119), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7117, 7119), False, 'import time, random, names, os, requests, sys\n'), ((6037, 6048), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6046, 6048), False, 'import time, random, names, os, requests, sys\n')]
|
"""Dock of card GUI."""
import random
from tkinter import Button # Import tkinter
from tkinter import Frame # Import tkinter
from tkinter import Label # Import tkinter
from tkinter import LEFT # Import tkinter
from tkinter import PhotoImage
from tkinter import Tk # Import tkinter
class DeckOfCardsGUI(object):
"""Docstring for DeckOfCardsGUI."""
def __init__(self):
"""Initialize DeckOfCardsGUI."""
window = Tk() # Create a window
window.title("Pick Four Cards Randomly") # Set title
self.image_list = [] # Store images for cards
for i in range(1, 53):
self.image_list.append(PhotoImage(
file="image/card/" + str(i) + ".gif"))
frame = Frame(window) # Hold four labels for cards
frame.pack()
self.label_list = [] # A list of four labels
for i in range(4):
self.label_list.append(Label(frame, image=self.image_list[i]))
self.label_list[i].pack(side=LEFT)
Button(window, text="Shuffle", command=self.shuffle).pack()
window.mainloop() # Create an event loop
def shuffle(self):
"""Choose four random cards."""
random.shuffle(self.image_list)
for i in range(4):
self.label_list[i]["image"] = self.image_list[i]
DeckOfCardsGUI() # Create GUI
|
[
"tkinter.Button",
"random.shuffle",
"tkinter.Frame",
"tkinter.Label",
"tkinter.Tk"
] |
[((441, 445), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (443, 445), False, 'from tkinter import Tk\n'), ((733, 746), 'tkinter.Frame', 'Frame', (['window'], {}), '(window)\n', (738, 746), False, 'from tkinter import Frame\n'), ((1194, 1225), 'random.shuffle', 'random.shuffle', (['self.image_list'], {}), '(self.image_list)\n', (1208, 1225), False, 'import random\n'), ((915, 953), 'tkinter.Label', 'Label', (['frame'], {'image': 'self.image_list[i]'}), '(frame, image=self.image_list[i])\n', (920, 953), False, 'from tkinter import Label\n'), ((1011, 1063), 'tkinter.Button', 'Button', (['window'], {'text': '"""Shuffle"""', 'command': 'self.shuffle'}), "(window, text='Shuffle', command=self.shuffle)\n", (1017, 1063), False, 'from tkinter import Button\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# __author__: Yxn
# date: 2016/10/14
from flask_assets import Bundle
bundles = {
'common_css': Bundle(
'css/body.css',
'css/lib/adminlte/adminlte.min.css',
'css/lib/bootstrap/bootstrap.min.css',
'css/lib/skins/skin-blue.min.css',
'css/lib/fontawesome/font-awesome.min.css',
output='css/common.css',
filters='cssmin'
),
'common_js': Bundle(
'js/lib/jquery/jquery.min.js',
'js/lib/app/app.min.js',
'js/lib/bootstrap/bootstrap.min.js',
'js/lib/validate/jquery.validate.js',
output='js/common.js',
filters='jsmin'
),
}
|
[
"flask_assets.Bundle"
] |
[((147, 383), 'flask_assets.Bundle', 'Bundle', (['"""css/body.css"""', '"""css/lib/adminlte/adminlte.min.css"""', '"""css/lib/bootstrap/bootstrap.min.css"""', '"""css/lib/skins/skin-blue.min.css"""', '"""css/lib/fontawesome/font-awesome.min.css"""'], {'output': '"""css/common.css"""', 'filters': '"""cssmin"""'}), "('css/body.css', 'css/lib/adminlte/adminlte.min.css',\n 'css/lib/bootstrap/bootstrap.min.css',\n 'css/lib/skins/skin-blue.min.css',\n 'css/lib/fontawesome/font-awesome.min.css', output='css/common.css',\n filters='cssmin')\n", (153, 383), False, 'from flask_assets import Bundle\n'), ((448, 638), 'flask_assets.Bundle', 'Bundle', (['"""js/lib/jquery/jquery.min.js"""', '"""js/lib/app/app.min.js"""', '"""js/lib/bootstrap/bootstrap.min.js"""', '"""js/lib/validate/jquery.validate.js"""'], {'output': '"""js/common.js"""', 'filters': '"""jsmin"""'}), "('js/lib/jquery/jquery.min.js', 'js/lib/app/app.min.js',\n 'js/lib/bootstrap/bootstrap.min.js',\n 'js/lib/validate/jquery.validate.js', output='js/common.js', filters=\n 'jsmin')\n", (454, 638), False, 'from flask_assets import Bundle\n')]
|
import govt
import service
from cryptography.hazmat.primitives.asymmetric import dh
import tools
def print_as_hex(data: bytes):
print(data.hex())
def main():
NAME = "Alice"
# TODO: This is just to get a large prime number
params = dh.generate_parameters(generator=2, key_size=512)
p = params.parameter_numbers().p
g = pow(tools.get_random_int(p), 2, p)
q = int((p + 1) / 2) - 1
print(f"{pow(g, q - 1, p)==g}")
print(f"g={g}, p={p}, q={q}")
# generate SSN
my_govt = govt.Govt()
ssn = my_govt.register(NAME, q)
my_service = service.Service()
uid = my_service.new_user(NAME)
r = tools.get_random_int(q)
Y = pow(g, ssn, p)
A = pow(g, r, p)
c = my_service.get_challenge(uid, Y, A, q)
z = r + (c * ssn) % p
if my_service.verify(uid, g, p, z):
print("Successfully verified!\n")
return 0
else:
print("Failed to verify!\n")
return 1
# show credentials anonymously
if __name__ == "__main__":
REPETITIONS = 100
for i in range(REPETITIONS):
if main() > 0:
break
|
[
"govt.Govt",
"cryptography.hazmat.primitives.asymmetric.dh.generate_parameters",
"service.Service",
"tools.get_random_int"
] |
[((253, 302), 'cryptography.hazmat.primitives.asymmetric.dh.generate_parameters', 'dh.generate_parameters', ([], {'generator': '(2)', 'key_size': '(512)'}), '(generator=2, key_size=512)\n', (275, 302), False, 'from cryptography.hazmat.primitives.asymmetric import dh\n'), ((519, 530), 'govt.Govt', 'govt.Govt', ([], {}), '()\n', (528, 530), False, 'import govt\n'), ((585, 602), 'service.Service', 'service.Service', ([], {}), '()\n', (600, 602), False, 'import service\n'), ((648, 671), 'tools.get_random_int', 'tools.get_random_int', (['q'], {}), '(q)\n', (668, 671), False, 'import tools\n'), ((353, 376), 'tools.get_random_int', 'tools.get_random_int', (['p'], {}), '(p)\n', (373, 376), False, 'import tools\n')]
|
import os
import h5py
import pandas as pd
import logging
import numpy as np
from progress.bar import Bar
from multiprocessing import Pool, cpu_count
from omegaconf import OmegaConf
from tools.utils import io
# from ANCSH_lib.utils import NetworkType
# from tools.visualization import Viewer, ANCSHVisualizer
import utils
from utils import JointType
log = logging.getLogger('proc_stage2')
class ProcStage2Impl:
def __init__(self, cfg):
self.output_path = cfg.output_path
self.input_h5_path = cfg.input_h5_path
self.stage1_tmp_dir = cfg.stage1_tmp_dir
self.tmp_output_dir = cfg.tmp_output_dir
self.rest_state_data_filename = cfg.rest_state_data_filename
self.object_infos_path = cfg.object_infos_path
self.heatmap_threshold = cfg.heatmap_threshold
self.epsilon = 10e-8
self.export = cfg.export
@staticmethod
def get_joint_params(vertices, joint, selected_vertices):
heatmap = -np.ones((vertices.shape[0]))
unitvec = np.zeros((vertices.shape[0], 3))
joint_pos = joint['abs_position']
joint_axis = joint['axis']
joint_axis = joint_axis / np.linalg.norm(joint_axis)
joint_axis = joint_axis.reshape((3, 1))
if joint['type'] == JointType.revolute.value:
vec1 = vertices - joint_pos
# project to joint axis
proj_len = np.dot(vec1, joint_axis)
# np.clip(proj_len, a_min=self.epsilon, a_max=None, out=proj_len)
proj_vec = proj_len * joint_axis.transpose()
orthogonal_vec = - vec1 + proj_vec
tmp_heatmap = np.linalg.norm(orthogonal_vec, axis=1).reshape(-1, 1)
tmp_unitvec = orthogonal_vec / tmp_heatmap
heatmap[selected_vertices] = tmp_heatmap[selected_vertices].reshape(-1)
unitvec[selected_vertices] = tmp_unitvec[selected_vertices]
elif joint['type'] == JointType.prismatic.value:
heatmap[selected_vertices] = 0
unitvec[selected_vertices] = joint_axis.transpose()
else:
log.error(f'Invalid joint type {joint["axis"]}')
heatmap = np.where(heatmap >= 0, heatmap, np.inf)
return heatmap, unitvec
def __call__(self, idx, input_data):
input_h5 = h5py.File(self.input_h5_path, 'r')
object_infos = io.read_json(self.object_infos_path)
output_filepath = os.path.splitext(self.output_path)[0] + f'_{idx}' + os.path.splitext(self.output_path)[-1]
h5file = h5py.File(output_filepath, 'w')
bar = Bar(f'Stage2 Processing chunk {idx}', max=len(input_data))
for index, row in input_data.iterrows():
instance_name = f'{row["objectCat"]}_{row["objectId"]}_{row["articulationId"]}_{row["frameId"]}'
in_h5frame = input_h5[instance_name]
mask = in_h5frame['mask'][:]
points_camera = in_h5frame['points_camera'][:]
points_rest_state = in_h5frame['points_rest_state'][:]
parts_camera2rest_state = in_h5frame['parts_transformation'][:]
camera2base = in_h5frame['base_transformation'][:]
stage1_tmp_data_dir = os.path.join(self.stage1_tmp_dir, row['objectCat'], row['objectId'])
rest_state_data_path = os.path.join(stage1_tmp_data_dir, self.rest_state_data_filename)
rest_state_data = io.read_json(rest_state_data_path)
part_info = object_infos[row['objectCat']][row['objectId']]['part']
num_parts = len(part_info)
# process points related ground truth
object_info = object_infos[row['objectCat']][row['objectId']]['object']
# diagonal axis aligned bounding box length to 1
# (0.5, 0.5, 0.5) centered
naocs_translation = - np.asarray(object_info['center']) + 0.5 * object_info['scale']
naocs_scale = 1.0 / object_info['scale']
naocs = points_rest_state + naocs_translation
naocs *= naocs_scale
naocs_transformation = np.reshape(camera2base, (4, 4), order='F')
naocs_transformation[:3, 3] += naocs_translation
naocs2cam_transformation = np.linalg.inv(naocs_transformation).flatten('F')
naocs2cam_scale = 1.0 / naocs_scale
points_class = np.empty_like(mask)
npcs = np.empty_like(points_rest_state)
parts_npcs2cam_transformation = np.empty_like(parts_camera2rest_state)
parts_npcs2cam_scale = np.empty(num_parts)
parts_min_bounds = np.empty((num_parts, 3))
parts_max_bounds = np.empty((num_parts, 3))
for link_index, link in enumerate(rest_state_data['links']):
if link['virtual']:
continue
link_index_key = str(link_index)
part_points = points_rest_state[mask == link_index]
center = np.asarray(part_info[link_index_key]['center'])
# diagonal axis aligned bounding box length to 1
# (0.5, 0.5, 0.5) centered
npcs_translation = - center + 0.5 * part_info[link_index_key]['scale']
npcs_scale = 1.0 / part_info[link_index_key]['scale']
part_points_norm = part_points + npcs_translation
part_points_norm *= npcs_scale
npcs[mask == link_index] = part_points_norm
part_class = part_info[link_index_key]['part_class']
points_class[mask == link_index] = part_class
npcs_transformation = np.reshape(parts_camera2rest_state[link['part_index']], (4, 4), order='F')
npcs_transformation[:3, 3] += npcs_translation
npcs2cam_transformation = np.linalg.inv(npcs_transformation)
parts_npcs2cam_transformation[part_class] = npcs2cam_transformation.flatten('F')
parts_npcs2cam_scale[part_class] = 1.0 / npcs_scale
parts_min_bounds[part_class] = np.asarray(part_info[link_index_key]['min_bound'])
parts_max_bounds[part_class] = np.asarray(part_info[link_index_key]['max_bound'])
# process joints related ground truth
link_names = [link['name'] for link in rest_state_data['links']]
# transform joints to naocs space
# viewer = Viewer()
naocs_joints = rest_state_data['joints']
for i, joint in enumerate(rest_state_data['joints']):
if not joint:
continue
joint_pose = np.asarray(joint['pose2link']).reshape((4, 4), order='F')
joint_parent = joint['parent']
parent_link = rest_state_data['links'][link_names.index(joint_parent)]
parent_link_abs_pose = np.asarray(parent_link['abs_pose']).reshape((4, 4), order='F')
joint_abs_pose = np.dot(parent_link_abs_pose, joint_pose)
joint_pos = joint_abs_pose[:3, 3]
naocs_joint_pos = joint_pos + naocs_translation
naocs_joint_pos *= naocs_scale
joint_axis = np.dot(joint_abs_pose[:3, :3], joint['axis'])
joint_axis = joint_axis / np.linalg.norm(joint_axis)
naocs_joints[i]['abs_position'] = naocs_joint_pos
naocs_joints[i]['axis'] = joint_axis
joint_child = joint['child']
child_link_class = part_info[str(link_names.index(joint_child))]['part_class']
joint_class = child_link_class
naocs_joints[i]['class'] = joint_class
joint_type = JointType[joint['type']].value
naocs_joints[i]['type'] = joint_type
# if self.export:
# viewer.add_trimesh_arrows([naocs_joint_pos], [joint_axis], color=Viewer.rgba_by_index(joint_class))
# if self.export:
# tmp_data_dir = os.path.join(self.tmp_output_dir, row['objectCat'], row['objectId'],
# row['articulationId'])
# io.ensure_dir_exists(tmp_data_dir)
# viewer.export(os.path.join(tmp_data_dir, instance_name + '_naocs_arrows.ply'))
valid_joints = [joint for joint in naocs_joints if joint if joint['type'] >= 0]
num_valid_joints = len(valid_joints)
tmp_heatmap = np.empty((num_valid_joints, naocs.shape[0]))
tmp_unitvec = np.empty((num_valid_joints, naocs.shape[0], 3))
for i, joint in enumerate(valid_joints):
joint_class = joint['class']
parent_links = [i for i, link in enumerate(rest_state_data['links'])
if link if not link['virtual'] if joint['parent'] == link['name']]
child_links = [i for i, link in enumerate(rest_state_data['links'])
if link if not link['virtual'] if joint['child'] == link['name']]
connected_links = parent_links + child_links
part_classes = [part_info[str(link_index)]['part_class'] for link_index in connected_links]
if joint['type'] == JointType.prismatic.value:
part_classes = [part_class for part_class in part_classes if part_class == joint_class]
selected_vertex_indices = np.isin(points_class, part_classes)
part_heatmap, part_unitvec = ProcStage2Impl.get_joint_params(naocs, joint, selected_vertex_indices)
tmp_heatmap[joint_class - 1] = part_heatmap
tmp_unitvec[joint_class - 1] = part_unitvec
joints_association = tmp_heatmap.argmin(axis=0)
points_heatmap = tmp_heatmap[joints_association, np.arange(naocs.shape[0])]
points_unitvec = tmp_unitvec[joints_association, np.arange(naocs.shape[0])]
points_unitvec[points_heatmap >= self.heatmap_threshold] = np.zeros(3)
joints_association[points_heatmap >= self.heatmap_threshold] = -1
points_heatmap_result = 1.0 - points_heatmap / self.heatmap_threshold
points_heatmap_result[points_heatmap >= self.heatmap_threshold] = -1
# points with no joint association has value 0
joints_association += 1
joints_axis = np.zeros((naocs.shape[0], 3))
joint_types = -np.ones(num_parts)
for joint in naocs_joints:
if joint:
joints_axis[joints_association == joint['class']] = joint['axis']
joint_types[joint['class']] = joint['type']
h5frame = h5file.require_group(instance_name)
h5frame.attrs['objectCat'] = row["objectCat"]
h5frame.attrs['objectId'] = row["objectId"]
h5frame.attrs['articulationId'] = row["articulationId"]
h5frame.attrs['frameId'] = row["frameId"]
h5frame.attrs['numParts'] = num_parts
h5frame.attrs['id'] = instance_name
h5frame.create_dataset("seg_per_point", shape=points_class.shape, data=points_class, compression="gzip")
h5frame.create_dataset("camcs_per_point", shape=points_camera.shape, data=points_camera, compression="gzip")
h5frame.create_dataset("npcs_per_point", shape=npcs.shape, data=npcs, compression="gzip")
h5frame.create_dataset("naocs_per_point", shape=naocs.shape, data=naocs, compression="gzip")
h5frame.create_dataset("heatmap_per_point", shape=points_heatmap_result.shape, data=points_heatmap_result,
compression="gzip")
h5frame.create_dataset("unitvec_per_point", shape=points_unitvec.shape, data=points_unitvec,
compression="gzip")
h5frame.create_dataset("axis_per_point", shape=joints_axis.shape, data=joints_axis,
compression="gzip")
h5frame.create_dataset("joint_cls_per_point", shape=joints_association.shape, data=joints_association,
compression="gzip")
h5frame.create_dataset("joint_type", shape=joint_types.shape, data=joint_types, compression="gzip")
# 6D transformation from npcs to camcs
h5frame.create_dataset("npcs2cam_rt", shape=parts_npcs2cam_transformation.shape,
data=parts_npcs2cam_transformation, compression="gzip")
# scale from npcs to camcs
h5frame.create_dataset("npcs2cam_scale", shape=parts_npcs2cam_scale.shape, data=parts_npcs2cam_scale,
compression="gzip")
h5frame.create_dataset("naocs2cam_rt", shape=naocs2cam_transformation.shape,
data=naocs2cam_transformation, compression="gzip")
h5frame.create_dataset("naocs2cam_scale", shape=(1,), data=naocs2cam_scale,
compression="gzip")
norm_factors = 1.0 / parts_npcs2cam_scale
h5frame.create_dataset("norm_factors", shape=norm_factors.shape, data=norm_factors,
compression="gzip")
# part bounds at rest state
norm_corners = np.stack((parts_min_bounds, parts_max_bounds), axis=1)
h5frame.create_dataset("norm_corners", shape=norm_corners.shape, data=norm_corners,
compression="gzip")
bar.next()
bar.finish()
h5file.close()
input_h5.close()
return output_filepath
class ProcStage2:
def __init__(self, cfg):
self.cfg = cfg
self.input_cfg = self.cfg.paths.preprocess.stage2.input
self.input_h5_path = os.path.join(self.cfg.paths.preprocess.output_dir, self.input_cfg.pcd_data)
self.output_dir = self.cfg.paths.preprocess.output_dir
self.stag1_tmp_output = self.cfg.paths.preprocess.stage1.tmp_output
self.tmp_output = self.cfg.paths.preprocess.stage2.tmp_output
self.split_info = None
self.debug = self.cfg.debug
self.show = self.cfg.show
self.export = self.cfg.export
stage1_input = self.cfg.paths.preprocess.stage1.input
self.part_orders = io.read_json(os.path.join(self.cfg.paths.preprocess.input_dir, stage1_input.part_order_file))
self.heatmap_threshold = self.cfg.params.joint_association_threshold
def split_data(self, train_percent=.6, split_on='objectId', seed=None):
instances = []
visit_groups = lambda name, node: instances.append(name) if isinstance(node, h5py.Group) else None
input_h5 = h5py.File(self.input_h5_path, 'r')
input_h5.visititems(visit_groups)
df_dataset = pd.DataFrame([name.split('_') for name in instances],
columns=['objectCat', 'objectId', 'articulationId', 'frameId'])
df_dataset = df_dataset.drop_duplicates(ignore_index=True)
# select data in config
selected_categories = df_dataset['objectCat'].isin(self.cfg.settings.categories) \
if len(self.cfg.settings.categories) > 0 else df_dataset['objectCat'].astype(bool)
selected_object_ids = df_dataset['objectId'].isin(self.cfg.settings.object_ids) \
if len(self.cfg.settings.object_ids) > 0 else df_dataset['objectId'].astype(bool)
selected_articulation_ids = df_dataset['articulationId'].isin(self.cfg.settings.articulation_ids) \
if len(self.cfg.settings.articulation_ids) > 0 else df_dataset['articulationId'].astype(bool)
df_dataset = df_dataset[selected_categories & selected_object_ids & selected_articulation_ids]
if io.file_exist(self.cfg.paths.preprocess.stage2.input.split_info, ext='.csv'):
input_split_info = pd.read_csv(self.cfg.paths.preprocess.stage2.input.split_info, dtype=object)
split_on_columns = ['objectCat', 'objectId', 'articulationId', 'frameId']
train_set = input_split_info[input_split_info["set"] == "train"]
val_set = input_split_info[input_split_info["set"] == "val"]
test_set = input_split_info[input_split_info["set"] == "test"]
train = train_set.merge(df_dataset, how='left', on=split_on_columns)
val = val_set.merge(df_dataset, how='left', on=split_on_columns)
test = test_set.merge(df_dataset, how='left', on=split_on_columns)
self.split_info = pd.concat([train, val, test], keys=["train", "val", "test"], names=['set', 'index'])
else:
# split to train, val, test
log.info(f'Split on key {split_on}')
if len(df_dataset):
if split_on == 'objectId':
split_on_columns = ['objectCat', 'objectId']
elif split_on == 'articulationId':
split_on_columns = ['objectCat', 'objectId', 'articulationId']
elif split_on == 'frameId':
split_on_columns = ['objectCat', 'objectId', 'articulationId', 'frameId']
else:
split_on_columns = ['objectCat', 'objectId']
log.warning(f'Cannot parse split_on {split_on}, split on objectId by default')
val_end = train_percent + (1.0 - train_percent) / 2.0
split_df = df_dataset[split_on_columns].drop_duplicates()
set_size = len(split_df)
train_set, val_set, test_set = np.split(
split_df.sample(frac=1.0, random_state=seed),
[int(train_percent * set_size), int(val_end * set_size)]
)
train = train_set.merge(df_dataset, how='left', on=split_on_columns)
val = val_set.merge(df_dataset, how='left', on=split_on_columns)
test = test_set.merge(df_dataset, how='left', on=split_on_columns)
self.split_info = pd.concat([train, val, test], keys=["train", "val", "test"], names=['set', 'index'])
else:
log.error('No data to split!')
return
self.split_info.to_csv(os.path.join(self.output_dir, self.cfg.paths.preprocess.stage2.output.split_info))
def process(self):
io.ensure_dir_exists(self.output_dir)
if self.split_info is None or self.split_info.empty:
log.error('No data to process!')
return
train = self.split_info.loc['train']
log.info(f'Stage2 Process Train Set {len(train)} instances')
self.process_set(train, self.output_dir, self.cfg.paths.preprocess.stage2.output.train_data)
val = self.split_info.loc['val']
log.info(f'Stage2 Process Val Set {len(val)} instances')
self.process_set(val, self.output_dir, self.cfg.paths.preprocess.stage2.output.val_data)
test = self.split_info.loc['test']
log.info(f'Stage2 Process Test Set {len(test)} instances')
self.process_set(test, self.output_dir, self.cfg.paths.preprocess.stage2.output.test_data)
def process_set(self, input_data, output_dir, output_filename):
# process object info
object_df = input_data[['objectCat', 'objectId']].drop_duplicates()
object_infos = {}
bar = Bar('Stage2 Parse Object Infos', max=len(object_df))
for index, row in object_df.iterrows():
stage1_tmp_data_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.stag1_tmp_output.folder_name,
row['objectCat'], row['objectId'])
rest_state_data_path = os.path.join(stage1_tmp_data_dir, self.stag1_tmp_output.rest_state_data)
rest_state_data = io.read_json(rest_state_data_path)
object_mesh_path = os.path.join(stage1_tmp_data_dir, self.stag1_tmp_output.rest_state_mesh)
object_dict = utils.get_mesh_info(object_mesh_path)
part_dict = {}
part_order = None
if self.part_orders:
part_order = self.part_orders[row['objectCat']][row['objectId']]
part_index = 0
for link_index, link in enumerate(rest_state_data['links']):
if link['virtual']:
continue
part_mesh_path = os.path.join(stage1_tmp_data_dir,
f'{link["name"]}_{self.stag1_tmp_output.rest_state_mesh}')
part_dict[link_index] = utils.get_mesh_info(part_mesh_path)
if part_order:
part_dict[link_index]['part_class'] = part_order.index(link['part_index'])
else:
part_dict[link_index]['part_class'] = part_index
part_index += 1
if row['objectCat'] in object_infos:
object_infos[row['objectCat']][row['objectId']] = {'object': object_dict, 'part': part_dict}
else:
object_infos[row['objectCat']] = {row['objectId']: {'object': object_dict, 'part': part_dict}}
bar.next()
bar.finish()
tmp_data_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name)
io.ensure_dir_exists(tmp_data_dir)
object_infos_path = os.path.join(tmp_data_dir, self.tmp_output.object_info)
io.write_json(object_infos, object_infos_path)
num_processes = min(cpu_count(), self.cfg.num_workers)
# calculate the chunk size
chunk_size = max(1, int(input_data.shape[0] / num_processes))
chunks = [input_data.iloc[input_data.index[i:i + chunk_size]] for i in
range(0, input_data.shape[0], chunk_size)]
log.info(f'Stage2 Processing Start with {num_processes} workers and {len(chunks)} chunks')
config = OmegaConf.create()
config.input_h5_path = self.input_h5_path
config.stage1_tmp_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.stag1_tmp_output.folder_name)
config.tmp_output_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name)
config.output_path = os.path.join(config.tmp_output_dir, output_filename)
config.rest_state_data_filename = self.stag1_tmp_output.rest_state_data
config.object_infos_path = object_infos_path
config.heatmap_threshold = self.heatmap_threshold
config.export = self.cfg.export
with Pool(processes=num_processes) as pool:
proc_impl = ProcStage2Impl(config)
output_filepath_list = pool.starmap(proc_impl, enumerate(chunks))
h5_output_path = os.path.join(output_dir, output_filename)
h5file = h5py.File(h5_output_path, 'w')
for filepath in output_filepath_list:
with h5py.File(filepath, 'r') as h5f:
for key in h5f.keys():
h5f.copy(key, h5file)
h5file.close()
# if self.debug:
# tmp_data_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name)
# io.ensure_dir_exists(tmp_data_dir)
# with h5py.File(h5_output_path, 'r') as h5file:
# visualizer = ANCSHVisualizer(h5file, NetworkType.ANCSH, gt=True, sampling=20)
# visualizer.point_size = 5
# visualizer.arrow_sampling = 10
# visualizer.prefix = ''
# visualizer.render(show=self.show, export=tmp_data_dir, export_mesh=self.export)
|
[
"numpy.isin",
"pandas.read_csv",
"numpy.empty",
"numpy.ones",
"tools.utils.io.write_json",
"numpy.linalg.norm",
"numpy.arange",
"os.path.join",
"multiprocessing.cpu_count",
"tools.utils.io.file_exist",
"numpy.empty_like",
"numpy.reshape",
"pandas.concat",
"numpy.stack",
"h5py.File",
"numpy.asarray",
"numpy.linalg.inv",
"utils.get_mesh_info",
"multiprocessing.Pool",
"numpy.dot",
"tools.utils.io.ensure_dir_exists",
"numpy.zeros",
"numpy.where",
"omegaconf.OmegaConf.create",
"os.path.splitext",
"tools.utils.io.read_json",
"logging.getLogger"
] |
[((358, 390), 'logging.getLogger', 'logging.getLogger', (['"""proc_stage2"""'], {}), "('proc_stage2')\n", (375, 390), False, 'import logging\n'), ((1020, 1052), 'numpy.zeros', 'np.zeros', (['(vertices.shape[0], 3)'], {}), '((vertices.shape[0], 3))\n', (1028, 1052), True, 'import numpy as np\n'), ((2148, 2187), 'numpy.where', 'np.where', (['(heatmap >= 0)', 'heatmap', 'np.inf'], {}), '(heatmap >= 0, heatmap, np.inf)\n', (2156, 2187), True, 'import numpy as np\n'), ((2281, 2315), 'h5py.File', 'h5py.File', (['self.input_h5_path', '"""r"""'], {}), "(self.input_h5_path, 'r')\n", (2290, 2315), False, 'import h5py\n'), ((2339, 2375), 'tools.utils.io.read_json', 'io.read_json', (['self.object_infos_path'], {}), '(self.object_infos_path)\n', (2351, 2375), False, 'from tools.utils import io\n'), ((2510, 2541), 'h5py.File', 'h5py.File', (['output_filepath', '"""w"""'], {}), "(output_filepath, 'w')\n", (2519, 2541), False, 'import h5py\n'), ((13713, 13788), 'os.path.join', 'os.path.join', (['self.cfg.paths.preprocess.output_dir', 'self.input_cfg.pcd_data'], {}), '(self.cfg.paths.preprocess.output_dir, self.input_cfg.pcd_data)\n', (13725, 13788), False, 'import os\n'), ((14623, 14657), 'h5py.File', 'h5py.File', (['self.input_h5_path', '"""r"""'], {}), "(self.input_h5_path, 'r')\n", (14632, 14657), False, 'import h5py\n'), ((15671, 15747), 'tools.utils.io.file_exist', 'io.file_exist', (['self.cfg.paths.preprocess.stage2.input.split_info'], {'ext': '""".csv"""'}), "(self.cfg.paths.preprocess.stage2.input.split_info, ext='.csv')\n", (15684, 15747), False, 'from tools.utils import io\n'), ((18228, 18265), 'tools.utils.io.ensure_dir_exists', 'io.ensure_dir_exists', (['self.output_dir'], {}), '(self.output_dir)\n', (18248, 18265), False, 'from tools.utils import io\n'), ((21065, 21141), 'os.path.join', 'os.path.join', (['self.cfg.paths.preprocess.tmp_dir', 'self.tmp_output.folder_name'], {}), '(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name)\n', (21077, 21141), False, 'import os\n'), ((21150, 21184), 'tools.utils.io.ensure_dir_exists', 'io.ensure_dir_exists', (['tmp_data_dir'], {}), '(tmp_data_dir)\n', (21170, 21184), False, 'from tools.utils import io\n'), ((21213, 21268), 'os.path.join', 'os.path.join', (['tmp_data_dir', 'self.tmp_output.object_info'], {}), '(tmp_data_dir, self.tmp_output.object_info)\n', (21225, 21268), False, 'import os\n'), ((21277, 21323), 'tools.utils.io.write_json', 'io.write_json', (['object_infos', 'object_infos_path'], {}), '(object_infos, object_infos_path)\n', (21290, 21323), False, 'from tools.utils import io\n'), ((21750, 21768), 'omegaconf.OmegaConf.create', 'OmegaConf.create', ([], {}), '()\n', (21766, 21768), False, 'from omegaconf import OmegaConf\n'), ((21851, 21938), 'os.path.join', 'os.path.join', (['self.cfg.paths.preprocess.tmp_dir', 'self.stag1_tmp_output.folder_name'], {}), '(self.cfg.paths.preprocess.tmp_dir, self.stag1_tmp_output.\n folder_name)\n', (21863, 21938), False, 'import os\n'), ((21966, 22042), 'os.path.join', 'os.path.join', (['self.cfg.paths.preprocess.tmp_dir', 'self.tmp_output.folder_name'], {}), '(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name)\n', (21978, 22042), False, 'import os\n'), ((22072, 22124), 'os.path.join', 'os.path.join', (['config.tmp_output_dir', 'output_filename'], {}), '(config.tmp_output_dir, output_filename)\n', (22084, 22124), False, 'import os\n'), ((22560, 22601), 'os.path.join', 'os.path.join', (['output_dir', 'output_filename'], {}), '(output_dir, output_filename)\n', (22572, 22601), False, 'import os\n'), ((22619, 22649), 'h5py.File', 'h5py.File', (['h5_output_path', '"""w"""'], {}), "(h5_output_path, 'w')\n", (22628, 22649), False, 'import h5py\n'), ((973, 999), 'numpy.ones', 'np.ones', (['vertices.shape[0]'], {}), '(vertices.shape[0])\n', (980, 999), True, 'import numpy as np\n'), ((1164, 1190), 'numpy.linalg.norm', 'np.linalg.norm', (['joint_axis'], {}), '(joint_axis)\n', (1178, 1190), True, 'import numpy as np\n'), ((1392, 1416), 'numpy.dot', 'np.dot', (['vec1', 'joint_axis'], {}), '(vec1, joint_axis)\n', (1398, 1416), True, 'import numpy as np\n'), ((3163, 3231), 'os.path.join', 'os.path.join', (['self.stage1_tmp_dir', "row['objectCat']", "row['objectId']"], {}), "(self.stage1_tmp_dir, row['objectCat'], row['objectId'])\n", (3175, 3231), False, 'import os\n'), ((3267, 3331), 'os.path.join', 'os.path.join', (['stage1_tmp_data_dir', 'self.rest_state_data_filename'], {}), '(stage1_tmp_data_dir, self.rest_state_data_filename)\n', (3279, 3331), False, 'import os\n'), ((3362, 3396), 'tools.utils.io.read_json', 'io.read_json', (['rest_state_data_path'], {}), '(rest_state_data_path)\n', (3374, 3396), False, 'from tools.utils import io\n'), ((4029, 4071), 'numpy.reshape', 'np.reshape', (['camera2base', '(4, 4)'], {'order': '"""F"""'}), "(camera2base, (4, 4), order='F')\n", (4039, 4071), True, 'import numpy as np\n'), ((4297, 4316), 'numpy.empty_like', 'np.empty_like', (['mask'], {}), '(mask)\n', (4310, 4316), True, 'import numpy as np\n'), ((4336, 4368), 'numpy.empty_like', 'np.empty_like', (['points_rest_state'], {}), '(points_rest_state)\n', (4349, 4368), True, 'import numpy as np\n'), ((4413, 4451), 'numpy.empty_like', 'np.empty_like', (['parts_camera2rest_state'], {}), '(parts_camera2rest_state)\n', (4426, 4451), True, 'import numpy as np\n'), ((4487, 4506), 'numpy.empty', 'np.empty', (['num_parts'], {}), '(num_parts)\n', (4495, 4506), True, 'import numpy as np\n'), ((4538, 4562), 'numpy.empty', 'np.empty', (['(num_parts, 3)'], {}), '((num_parts, 3))\n', (4546, 4562), True, 'import numpy as np\n'), ((4594, 4618), 'numpy.empty', 'np.empty', (['(num_parts, 3)'], {}), '((num_parts, 3))\n', (4602, 4618), True, 'import numpy as np\n'), ((8371, 8415), 'numpy.empty', 'np.empty', (['(num_valid_joints, naocs.shape[0])'], {}), '((num_valid_joints, naocs.shape[0]))\n', (8379, 8415), True, 'import numpy as np\n'), ((8442, 8489), 'numpy.empty', 'np.empty', (['(num_valid_joints, naocs.shape[0], 3)'], {}), '((num_valid_joints, naocs.shape[0], 3))\n', (8450, 8489), True, 'import numpy as np\n'), ((9916, 9927), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (9924, 9927), True, 'import numpy as np\n'), ((10290, 10319), 'numpy.zeros', 'np.zeros', (['(naocs.shape[0], 3)'], {}), '((naocs.shape[0], 3))\n', (10298, 10319), True, 'import numpy as np\n'), ((13219, 13273), 'numpy.stack', 'np.stack', (['(parts_min_bounds, parts_max_bounds)'], {'axis': '(1)'}), '((parts_min_bounds, parts_max_bounds), axis=1)\n', (13227, 13273), True, 'import numpy as np\n'), ((14239, 14318), 'os.path.join', 'os.path.join', (['self.cfg.paths.preprocess.input_dir', 'stage1_input.part_order_file'], {}), '(self.cfg.paths.preprocess.input_dir, stage1_input.part_order_file)\n', (14251, 14318), False, 'import os\n'), ((15780, 15856), 'pandas.read_csv', 'pd.read_csv', (['self.cfg.paths.preprocess.stage2.input.split_info'], {'dtype': 'object'}), '(self.cfg.paths.preprocess.stage2.input.split_info, dtype=object)\n', (15791, 15856), True, 'import pandas as pd\n'), ((16435, 16523), 'pandas.concat', 'pd.concat', (['[train, val, test]'], {'keys': "['train', 'val', 'test']", 'names': "['set', 'index']"}), "([train, val, test], keys=['train', 'val', 'test'], names=['set',\n 'index'])\n", (16444, 16523), True, 'import pandas as pd\n'), ((18113, 18199), 'os.path.join', 'os.path.join', (['self.output_dir', 'self.cfg.paths.preprocess.stage2.output.split_info'], {}), '(self.output_dir, self.cfg.paths.preprocess.stage2.output.\n split_info)\n', (18125, 18199), False, 'import os\n'), ((19368, 19490), 'os.path.join', 'os.path.join', (['self.cfg.paths.preprocess.tmp_dir', 'self.stag1_tmp_output.folder_name', "row['objectCat']", "row['objectId']"], {}), "(self.cfg.paths.preprocess.tmp_dir, self.stag1_tmp_output.\n folder_name, row['objectCat'], row['objectId'])\n", (19380, 19490), False, 'import os\n'), ((19568, 19640), 'os.path.join', 'os.path.join', (['stage1_tmp_data_dir', 'self.stag1_tmp_output.rest_state_data'], {}), '(stage1_tmp_data_dir, self.stag1_tmp_output.rest_state_data)\n', (19580, 19640), False, 'import os\n'), ((19671, 19705), 'tools.utils.io.read_json', 'io.read_json', (['rest_state_data_path'], {}), '(rest_state_data_path)\n', (19683, 19705), False, 'from tools.utils import io\n'), ((19737, 19809), 'os.path.join', 'os.path.join', (['stage1_tmp_data_dir', 'self.stag1_tmp_output.rest_state_mesh'], {}), '(stage1_tmp_data_dir, self.stag1_tmp_output.rest_state_mesh)\n', (19749, 19809), False, 'import os\n'), ((19836, 19873), 'utils.get_mesh_info', 'utils.get_mesh_info', (['object_mesh_path'], {}), '(object_mesh_path)\n', (19855, 19873), False, 'import utils\n'), ((21353, 21364), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (21362, 21364), False, 'from multiprocessing import Pool, cpu_count\n'), ((22370, 22399), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'num_processes'}), '(processes=num_processes)\n', (22374, 22399), False, 'from multiprocessing import Pool, cpu_count\n'), ((2454, 2488), 'os.path.splitext', 'os.path.splitext', (['self.output_path'], {}), '(self.output_path)\n', (2470, 2488), False, 'import os\n'), ((4899, 4946), 'numpy.asarray', 'np.asarray', (["part_info[link_index_key]['center']"], {}), "(part_info[link_index_key]['center'])\n", (4909, 4946), True, 'import numpy as np\n'), ((5555, 5629), 'numpy.reshape', 'np.reshape', (["parts_camera2rest_state[link['part_index']]", '(4, 4)'], {'order': '"""F"""'}), "(parts_camera2rest_state[link['part_index']], (4, 4), order='F')\n", (5565, 5629), True, 'import numpy as np\n'), ((5735, 5769), 'numpy.linalg.inv', 'np.linalg.inv', (['npcs_transformation'], {}), '(npcs_transformation)\n', (5748, 5769), True, 'import numpy as np\n'), ((5982, 6032), 'numpy.asarray', 'np.asarray', (["part_info[link_index_key]['min_bound']"], {}), "(part_info[link_index_key]['min_bound'])\n", (5992, 6032), True, 'import numpy as np\n'), ((6080, 6130), 'numpy.asarray', 'np.asarray', (["part_info[link_index_key]['max_bound']"], {}), "(part_info[link_index_key]['max_bound'])\n", (6090, 6130), True, 'import numpy as np\n'), ((6871, 6911), 'numpy.dot', 'np.dot', (['parent_link_abs_pose', 'joint_pose'], {}), '(parent_link_abs_pose, joint_pose)\n', (6877, 6911), True, 'import numpy as np\n'), ((7102, 7147), 'numpy.dot', 'np.dot', (['joint_abs_pose[:3, :3]', "joint['axis']"], {}), "(joint_abs_pose[:3, :3], joint['axis'])\n", (7108, 7147), True, 'import numpy as np\n'), ((9335, 9370), 'numpy.isin', 'np.isin', (['points_class', 'part_classes'], {}), '(points_class, part_classes)\n', (9342, 9370), True, 'import numpy as np\n'), ((10347, 10365), 'numpy.ones', 'np.ones', (['num_parts'], {}), '(num_parts)\n', (10354, 10365), True, 'import numpy as np\n'), ((17909, 17997), 'pandas.concat', 'pd.concat', (['[train, val, test]'], {'keys': "['train', 'val', 'test']", 'names': "['set', 'index']"}), "([train, val, test], keys=['train', 'val', 'test'], names=['set',\n 'index'])\n", (17918, 17997), True, 'import pandas as pd\n'), ((20243, 20339), 'os.path.join', 'os.path.join', (['stage1_tmp_data_dir', 'f"""{link[\'name\']}_{self.stag1_tmp_output.rest_state_mesh}"""'], {}), '(stage1_tmp_data_dir,\n f"{link[\'name\']}_{self.stag1_tmp_output.rest_state_mesh}")\n', (20255, 20339), False, 'import os\n'), ((20422, 20457), 'utils.get_mesh_info', 'utils.get_mesh_info', (['part_mesh_path'], {}), '(part_mesh_path)\n', (20441, 20457), False, 'import utils\n'), ((22713, 22737), 'h5py.File', 'h5py.File', (['filepath', '"""r"""'], {}), "(filepath, 'r')\n", (22722, 22737), False, 'import h5py\n'), ((1625, 1663), 'numpy.linalg.norm', 'np.linalg.norm', (['orthogonal_vec'], {'axis': '(1)'}), '(orthogonal_vec, axis=1)\n', (1639, 1663), True, 'import numpy as np\n'), ((2402, 2436), 'os.path.splitext', 'os.path.splitext', (['self.output_path'], {}), '(self.output_path)\n', (2418, 2436), False, 'import os\n'), ((3786, 3819), 'numpy.asarray', 'np.asarray', (["object_info['center']"], {}), "(object_info['center'])\n", (3796, 3819), True, 'import numpy as np\n'), ((4172, 4207), 'numpy.linalg.inv', 'np.linalg.inv', (['naocs_transformation'], {}), '(naocs_transformation)\n', (4185, 4207), True, 'import numpy as np\n'), ((7190, 7216), 'numpy.linalg.norm', 'np.linalg.norm', (['joint_axis'], {}), '(joint_axis)\n', (7204, 7216), True, 'import numpy as np\n'), ((9730, 9755), 'numpy.arange', 'np.arange', (['naocs.shape[0]'], {}), '(naocs.shape[0])\n', (9739, 9755), True, 'import numpy as np\n'), ((9818, 9843), 'numpy.arange', 'np.arange', (['naocs.shape[0]'], {}), '(naocs.shape[0])\n', (9827, 9843), True, 'import numpy as np\n'), ((6544, 6574), 'numpy.asarray', 'np.asarray', (["joint['pose2link']"], {}), "(joint['pose2link'])\n", (6554, 6574), True, 'import numpy as np\n'), ((6775, 6810), 'numpy.asarray', 'np.asarray', (["parent_link['abs_pose']"], {}), "(parent_link['abs_pose'])\n", (6785, 6810), True, 'import numpy as np\n')]
|
# flowbysector.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
Produces a FlowBySector data frame based on a method file for the given class
To run code, specify the "Run/Debug Configurations" Parameters to the
"flowsa/data/flowbysectormethods" yaml file name
you want to use.
Example: "Parameters: --m Water_national_2015_m1"
Files necessary to run FBS:
a. a method yaml in "flowsa/data/flowbysectormethods"
b. crosswalk(s) for the main dataset you are allocating and any datasets
used to allocate to sectors
c. a .py file in "flowsa/" for the main dataset you are allocating if
you need functions to clean up the FBA
before allocating to FBS
"""
import argparse
import yaml
import pandas as pd
from esupy.processed_data_mgmt import write_df_to_file
import flowsa
from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, \
fips_number_key, flow_by_activity_fields, load_source_catalog, \
flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, \
paths, fba_activity_fields, rename_log_file, \
fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, \
fbs_grouping_fields_w_activities, logoutputpath
from flowsa.metadata import set_fb_meta, write_metadata
from flowsa.fbs_allocation import direct_allocation_method, function_allocation_method, \
dataset_allocation_method
from flowsa.sectormapping import add_sectors_to_flowbyactivity, map_fbs_flows, \
get_sector_list
from flowsa.flowbyfunctions import agg_by_geoscale, sector_aggregation, \
aggregator, subset_df_by_geoscale, sector_disaggregation, dynamically_import_fxn
from flowsa.dataclean import clean_df, harmonize_FBS_columns, reset_fbs_dq_scores
from flowsa.validation import allocate_dropped_sector_data,\
compare_activity_to_sector_flowamounts, \
compare_fba_geo_subset_and_fbs_output_totals, compare_geographic_totals,\
replace_naics_w_naics_from_another_year, calculate_flowamount_diff_between_dfs
def parse_args():
"""
Make year and source script parameters
:return: dictionary, 'method'
"""
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--method",
required=True, help="Method for flow by sector file. "
"A valid method config file must exist with this name.")
args = vars(ap.parse_args())
return args
def load_method(method_name):
"""
Loads a flowbysector method from a YAML
:param method_name: str, FBS method name (ex. 'Water_national_m1_2015')
:return: dictionary, items in the FBS method yaml
"""
sfile = flowbysectormethodpath + method_name + '.yaml'
try:
with open(sfile, 'r') as f:
method = yaml.safe_load(f)
except IOError:
log.error("FlowBySector method file not found.")
return method
def load_source_dataframe(k, v):
"""
Load the source dataframe. Data can be a FlowbyActivity or
FlowBySector parquet stored in flowsa, or a FlowBySector
formatted dataframe from another package.
:param k: str, The datasource name
:param v: dictionary, The datasource parameters
:return: df of identified parquet
"""
if v['data_format'] == 'FBA':
# if yaml specifies a geoscale to load, use parameter to filter dataframe
if 'source_fba_load_scale' in v:
geo_level = v['source_fba_load_scale']
else:
geo_level = None
vLog.info("Retrieving flowbyactivity for datasource %s in year %s", k, str(v['year']))
flows_df = flowsa.getFlowByActivity(datasource=k, year=v['year'], flowclass=v['class'],
geographic_level=geo_level)
elif v['data_format'] == 'FBS':
vLog.info("Retrieving flowbysector for datasource %s", k)
flows_df = flowsa.getFlowBySector(k)
elif v['data_format'] == 'FBS_outside_flowsa':
vLog.info("Retrieving flowbysector for datasource %s", k)
flows_df = dynamically_import_fxn(k, v["FBS_datapull_fxn"])(v)
else:
vLog.error("Data format not specified in method file for datasource %s", k)
return flows_df
def main(**kwargs):
"""
Creates a flowbysector dataset
:param kwargs: dictionary of arguments, only argument is "method_name", the name of method
corresponding to flowbysector method yaml name
:return: parquet, FBS save to local folder
"""
if len(kwargs) == 0:
kwargs = parse_args()
method_name = kwargs['method']
# assign arguments
vLog.info("Initiating flowbysector creation for %s", method_name)
# call on method
method = load_method(method_name)
# create dictionary of data and allocation datasets
fb = method['source_names']
# Create empty list for storing fbs files
fbs_list = []
for k, v in fb.items():
# pull fba data for allocation
flows = load_source_dataframe(k, v)
if v['data_format'] == 'FBA':
# ensure correct datatypes and that all fields exist
flows = clean_df(flows, flow_by_activity_fields,
fba_fill_na_dict, drop_description=False)
# map flows to federal flow list or material flow list
flows_mapped, mapping_files = map_fbs_flows(flows, k, v, keep_fba_columns=True)
# clean up fba, if specified in yaml
if "clean_fba_df_fxn" in v:
vLog.info("Cleaning up %s FlowByActivity", k)
flows_mapped = dynamically_import_fxn(k, v["clean_fba_df_fxn"])(flows_mapped)
# if activity_sets are specified in a file, call them here
if 'activity_set_file' in v:
aset_names = pd.read_csv(flowbysectoractivitysetspath +
v['activity_set_file'], dtype=str)
else:
aset_names = None
# create dictionary of allocation datasets for different activities
activities = v['activity_sets']
# subset activity data and allocate to sector
for aset, attr in activities.items():
# subset by named activities
if 'activity_set_file' in v:
names = aset_names[aset_names['activity_set'] == aset]['name']
else:
names = attr['names']
vLog.info("Preparing to handle %s in %s", aset, k)
# subset fba data by activity
flows_subset =\
flows_mapped[(flows_mapped[fba_activity_fields[0]].isin(names)) |
(flows_mapped[fba_activity_fields[1]].isin(names)
)].reset_index(drop=True)
# if activities are sector-like, check sectors are valid
if load_source_catalog()[k]['sector-like_activities']:
flows_subset2 =\
replace_naics_w_naics_from_another_year(flows_subset,
method['target_sector_source'])
# check impact on df FlowAmounts
vLog.info('Calculate FlowAmount difference caused by '
'replacing NAICS Codes with %s, saving difference in Validation log',
method['target_sector_source'],)
calculate_flowamount_diff_between_dfs(flows_subset, flows_subset2)
else:
flows_subset2 = flows_subset.copy()
# extract relevant geoscale data or aggregate existing data
flows_subset_geo = subset_df_by_geoscale(flows_subset2, v['geoscale_to_use'],
attr['allocation_from_scale'])
# if loading data subnational geoscale, check for data loss
if attr['allocation_from_scale'] != 'national':
compare_geographic_totals(flows_subset_geo, flows_mapped, k,
attr, aset, names)
# Add sectors to df activity, depending on level of specified sector aggregation
log.info("Adding sectors to %s", k)
flows_subset_wsec =\
add_sectors_to_flowbyactivity(flows_subset_geo,
sectorsourcename=method['target_sector_source'],
allocationmethod=attr['allocation_method'])
# clean up fba with sectors, if specified in yaml
if "clean_fba_w_sec_df_fxn" in v:
vLog.info("Cleaning up %s FlowByActivity with sectors", k)
flows_subset_wsec = \
dynamically_import_fxn(k, v["clean_fba_w_sec_df_fxn"])(flows_subset_wsec,
attr=attr,
method=method)
# rename SourceName to MetaSources and drop columns
flows_mapped_wsec = flows_subset_wsec.\
rename(columns={'SourceName': 'MetaSources'}).\
drop(columns=['FlowName', 'Compartment'])
# if allocation method is "direct", then no need to create alloc ratios,
# else need to use allocation
# dataframe to create sector allocation ratios
if attr['allocation_method'] == 'direct':
fbs = direct_allocation_method(flows_mapped_wsec, k, names, method)
# if allocation method for an activity set requires a specific
# function due to the complicated nature
# of the allocation, call on function here
elif attr['allocation_method'] == 'allocation_function':
fbs = function_allocation_method(flows_mapped_wsec, k, names, attr, fbs_list)
else:
fbs =\
dataset_allocation_method(flows_mapped_wsec, attr,
names, method, k, v, aset,
method_name, aset_names)
# drop rows where flowamount = 0 (although this includes dropping suppressed data)
fbs = fbs[fbs['FlowAmount'] != 0].reset_index(drop=True)
# define grouping columns dependent on sectors being activity-like or not
if load_source_catalog()[k]['sector-like_activities'] is False:
groupingcols = fbs_grouping_fields_w_activities
groupingdict = flow_by_sector_fields_w_activity
else:
groupingcols = fbs_default_grouping_fields
groupingdict = flow_by_sector_fields
# clean df
fbs = clean_df(fbs, groupingdict, fbs_fill_na_dict)
# aggregate df geographically, if necessary
log.info("Aggregating flowbysector to %s level", method['target_geoscale'])
# determine from scale
if fips_number_key[v['geoscale_to_use']] <\
fips_number_key[attr['allocation_from_scale']]:
from_scale = v['geoscale_to_use']
else:
from_scale = attr['allocation_from_scale']
fbs_geo_agg = agg_by_geoscale(fbs, from_scale,
method['target_geoscale'], groupingcols)
# aggregate data to every sector level
log.info("Aggregating flowbysector to all sector levels")
fbs_sec_agg = sector_aggregation(fbs_geo_agg, groupingcols)
# add missing naics5/6 when only one naics5/6 associated with a naics4
fbs_agg = sector_disaggregation(fbs_sec_agg)
# check if any sector information is lost before reaching
# the target sector length, if so,
# allocate values equally to disaggregated sectors
vLog.info('Searching for and allocating FlowAmounts for any parent '
'NAICS that were dropped in the subset to '
'%s child NAICS', method['target_sector_level'])
fbs_agg_2 = allocate_dropped_sector_data(fbs_agg, method['target_sector_level'])
# compare flowbysector with flowbyactivity
compare_activity_to_sector_flowamounts(
flows_mapped_wsec, fbs_agg_2, aset, k, method)
# return sector level specified in method yaml
# load the crosswalk linking sector lengths
sector_list = get_sector_list(method['target_sector_level'])
# subset df, necessary because not all of the sectors are
# NAICS and can get duplicate rows
fbs_1 = fbs_agg_2.loc[(fbs_agg_2[fbs_activity_fields[0]].isin(sector_list)) &
(fbs_agg_2[fbs_activity_fields[1]].isin(sector_list))].\
reset_index(drop=True)
fbs_2 = fbs_agg_2.loc[(fbs_agg_2[fbs_activity_fields[0]].isin(sector_list)) &
(fbs_agg_2[fbs_activity_fields[1]].isnull())].\
reset_index(drop=True)
fbs_3 = fbs_agg_2.loc[(fbs_agg_2[fbs_activity_fields[0]].isnull()) &
(fbs_agg_2[fbs_activity_fields[1]].isin(sector_list))].\
reset_index(drop=True)
fbs_sector_subset = pd.concat([fbs_1, fbs_2, fbs_3])
# drop activity columns
fbs_sector_subset = fbs_sector_subset.drop(['ActivityProducedBy',
'ActivityConsumedBy'],
axis=1, errors='ignore')
# save comparison of FBA total to FBS total for an activity set
compare_fba_geo_subset_and_fbs_output_totals(flows_subset_geo, fbs_sector_subset,
aset, k, v, attr, method)
log.info("Completed flowbysector for %s", aset)
fbs_list.append(fbs_sector_subset)
else:
# if the loaded flow dt is already in FBS format, append directly to list of FBS
log.info("Append %s to FBS list", k)
# ensure correct field datatypes and add any missing fields
flows = clean_df(flows, flow_by_sector_fields, fbs_fill_na_dict)
fbs_list.append(flows)
# create single df of all activities
log.info("Concat data for all activities")
fbss = pd.concat(fbs_list, ignore_index=True, sort=False)
log.info("Clean final dataframe")
# add missing fields, ensure correct data type, add missing columns, reorder columns
fbss = clean_df(fbss, flow_by_sector_fields, fbs_fill_na_dict)
# prior to aggregating, replace MetaSources string with all sources
# that share context/flowable/sector values
fbss = harmonize_FBS_columns(fbss)
# aggregate df as activities might have data for the same specified sector length
fbss = aggregator(fbss, fbs_default_grouping_fields)
# sort df
log.info("Sort and store dataframe")
# ensure correct data types/order of columns
fbss = clean_df(fbss, flow_by_sector_fields, fbs_fill_na_dict)
fbss = fbss.sort_values(
['SectorProducedBy', 'SectorConsumedBy', 'Flowable', 'Context']).reset_index(drop=True)
# tmp reset data quality scores
fbss = reset_fbs_dq_scores(fbss)
# save parquet file
meta = set_fb_meta(method_name, "FlowBySector")
write_df_to_file(fbss, paths, meta)
write_metadata(method_name, method, meta, "FlowBySector")
# rename the log file saved to local directory
rename_log_file(method_name, meta)
log.info('See the Validation log for detailed assessment of model results in %s', logoutputpath)
if __name__ == '__main__':
main()
|
[
"flowsa.fbs_allocation.direct_allocation_method",
"flowsa.metadata.write_metadata",
"argparse.ArgumentParser",
"flowsa.sectormapping.map_fbs_flows",
"pandas.read_csv",
"flowsa.fbs_allocation.dataset_allocation_method",
"flowsa.common.load_source_catalog",
"flowsa.flowbyfunctions.aggregator",
"yaml.safe_load",
"flowsa.flowbyfunctions.agg_by_geoscale",
"flowsa.dataclean.reset_fbs_dq_scores",
"flowsa.dataclean.harmonize_FBS_columns",
"flowsa.common.vLog.info",
"flowsa.getFlowBySector",
"flowsa.metadata.set_fb_meta",
"flowsa.validation.allocate_dropped_sector_data",
"flowsa.flowbyfunctions.subset_df_by_geoscale",
"flowsa.validation.compare_activity_to_sector_flowamounts",
"pandas.concat",
"flowsa.validation.compare_geographic_totals",
"flowsa.fbs_allocation.function_allocation_method",
"flowsa.flowbyfunctions.sector_aggregation",
"flowsa.sectormapping.get_sector_list",
"flowsa.dataclean.clean_df",
"flowsa.getFlowByActivity",
"flowsa.validation.calculate_flowamount_diff_between_dfs",
"flowsa.common.vLog.error",
"flowsa.common.log.error",
"flowsa.common.rename_log_file",
"flowsa.flowbyfunctions.sector_disaggregation",
"flowsa.validation.compare_fba_geo_subset_and_fbs_output_totals",
"esupy.processed_data_mgmt.write_df_to_file",
"flowsa.flowbyfunctions.dynamically_import_fxn",
"flowsa.validation.replace_naics_w_naics_from_another_year",
"flowsa.sectormapping.add_sectors_to_flowbyactivity",
"flowsa.common.log.info"
] |
[((2108, 2133), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2131, 2133), False, 'import argparse\n'), ((4566, 4631), 'flowsa.common.vLog.info', 'vLog.info', (['"""Initiating flowbysector creation for %s"""', 'method_name'], {}), "('Initiating flowbysector creation for %s', method_name)\n", (4575, 4631), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((14834, 14876), 'flowsa.common.log.info', 'log.info', (['"""Concat data for all activities"""'], {}), "('Concat data for all activities')\n", (14842, 14876), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((14888, 14938), 'pandas.concat', 'pd.concat', (['fbs_list'], {'ignore_index': '(True)', 'sort': '(False)'}), '(fbs_list, ignore_index=True, sort=False)\n', (14897, 14938), True, 'import pandas as pd\n'), ((14943, 14976), 'flowsa.common.log.info', 'log.info', (['"""Clean final dataframe"""'], {}), "('Clean final dataframe')\n", (14951, 14976), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((15077, 15132), 'flowsa.dataclean.clean_df', 'clean_df', (['fbss', 'flow_by_sector_fields', 'fbs_fill_na_dict'], {}), '(fbss, flow_by_sector_fields, fbs_fill_na_dict)\n', (15085, 15132), False, 'from flowsa.dataclean import clean_df, harmonize_FBS_columns, reset_fbs_dq_scores\n'), ((15264, 15291), 'flowsa.dataclean.harmonize_FBS_columns', 'harmonize_FBS_columns', (['fbss'], {}), '(fbss)\n', (15285, 15291), False, 'from flowsa.dataclean import clean_df, harmonize_FBS_columns, reset_fbs_dq_scores\n'), ((15389, 15434), 'flowsa.flowbyfunctions.aggregator', 'aggregator', (['fbss', 'fbs_default_grouping_fields'], {}), '(fbss, fbs_default_grouping_fields)\n', (15399, 15434), False, 'from flowsa.flowbyfunctions import agg_by_geoscale, sector_aggregation, aggregator, subset_df_by_geoscale, sector_disaggregation, dynamically_import_fxn\n'), ((15453, 15489), 'flowsa.common.log.info', 'log.info', (['"""Sort and store dataframe"""'], {}), "('Sort and store dataframe')\n", (15461, 15489), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((15550, 15605), 'flowsa.dataclean.clean_df', 'clean_df', (['fbss', 'flow_by_sector_fields', 'fbs_fill_na_dict'], {}), '(fbss, flow_by_sector_fields, fbs_fill_na_dict)\n', (15558, 15605), False, 'from flowsa.dataclean import clean_df, harmonize_FBS_columns, reset_fbs_dq_scores\n'), ((15778, 15803), 'flowsa.dataclean.reset_fbs_dq_scores', 'reset_fbs_dq_scores', (['fbss'], {}), '(fbss)\n', (15797, 15803), False, 'from flowsa.dataclean import clean_df, harmonize_FBS_columns, reset_fbs_dq_scores\n'), ((15839, 15879), 'flowsa.metadata.set_fb_meta', 'set_fb_meta', (['method_name', '"""FlowBySector"""'], {}), "(method_name, 'FlowBySector')\n", (15850, 15879), False, 'from flowsa.metadata import set_fb_meta, write_metadata\n'), ((15884, 15919), 'esupy.processed_data_mgmt.write_df_to_file', 'write_df_to_file', (['fbss', 'paths', 'meta'], {}), '(fbss, paths, meta)\n', (15900, 15919), False, 'from esupy.processed_data_mgmt import write_df_to_file\n'), ((15924, 15981), 'flowsa.metadata.write_metadata', 'write_metadata', (['method_name', 'method', 'meta', '"""FlowBySector"""'], {}), "(method_name, method, meta, 'FlowBySector')\n", (15938, 15981), False, 'from flowsa.metadata import set_fb_meta, write_metadata\n'), ((16037, 16071), 'flowsa.common.rename_log_file', 'rename_log_file', (['method_name', 'meta'], {}), '(method_name, meta)\n', (16052, 16071), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((16076, 16181), 'flowsa.common.log.info', 'log.info', (['"""See the Validation log for detailed assessment of model results in %s"""', 'logoutputpath'], {}), "(\n 'See the Validation log for detailed assessment of model results in %s',\n logoutputpath)\n", (16084, 16181), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((3568, 3676), 'flowsa.getFlowByActivity', 'flowsa.getFlowByActivity', ([], {'datasource': 'k', 'year': "v['year']", 'flowclass': "v['class']", 'geographic_level': 'geo_level'}), "(datasource=k, year=v['year'], flowclass=v['class'],\n geographic_level=geo_level)\n", (3592, 3676), False, 'import flowsa\n'), ((2740, 2757), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (2754, 2757), False, 'import yaml\n'), ((2786, 2834), 'flowsa.common.log.error', 'log.error', (['"""FlowBySector method file not found."""'], {}), "('FlowBySector method file not found.')\n", (2795, 2834), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((3761, 3818), 'flowsa.common.vLog.info', 'vLog.info', (['"""Retrieving flowbysector for datasource %s"""', 'k'], {}), "('Retrieving flowbysector for datasource %s', k)\n", (3770, 3818), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((3838, 3863), 'flowsa.getFlowBySector', 'flowsa.getFlowBySector', (['k'], {}), '(k)\n', (3860, 3863), False, 'import flowsa\n'), ((5078, 5165), 'flowsa.dataclean.clean_df', 'clean_df', (['flows', 'flow_by_activity_fields', 'fba_fill_na_dict'], {'drop_description': '(False)'}), '(flows, flow_by_activity_fields, fba_fill_na_dict, drop_description\n =False)\n', (5086, 5165), False, 'from flowsa.dataclean import clean_df, harmonize_FBS_columns, reset_fbs_dq_scores\n'), ((5300, 5349), 'flowsa.sectormapping.map_fbs_flows', 'map_fbs_flows', (['flows', 'k', 'v'], {'keep_fba_columns': '(True)'}), '(flows, k, v, keep_fba_columns=True)\n', (5313, 5349), False, 'from flowsa.sectormapping import add_sectors_to_flowbyactivity, map_fbs_flows, get_sector_list\n'), ((14568, 14604), 'flowsa.common.log.info', 'log.info', (['"""Append %s to FBS list"""', 'k'], {}), "('Append %s to FBS list', k)\n", (14576, 14604), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((14697, 14753), 'flowsa.dataclean.clean_df', 'clean_df', (['flows', 'flow_by_sector_fields', 'fbs_fill_na_dict'], {}), '(flows, flow_by_sector_fields, fbs_fill_na_dict)\n', (14705, 14753), False, 'from flowsa.dataclean import clean_df, harmonize_FBS_columns, reset_fbs_dq_scores\n'), ((3923, 3980), 'flowsa.common.vLog.info', 'vLog.info', (['"""Retrieving flowbysector for datasource %s"""', 'k'], {}), "('Retrieving flowbysector for datasource %s', k)\n", (3932, 3980), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((4070, 4145), 'flowsa.common.vLog.error', 'vLog.error', (['"""Data format not specified in method file for datasource %s"""', 'k'], {}), "('Data format not specified in method file for datasource %s', k)\n", (4080, 4145), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((5456, 5501), 'flowsa.common.vLog.info', 'vLog.info', (['"""Cleaning up %s FlowByActivity"""', 'k'], {}), "('Cleaning up %s FlowByActivity', k)\n", (5465, 5501), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((5738, 5815), 'pandas.read_csv', 'pd.read_csv', (["(flowbysectoractivitysetspath + v['activity_set_file'])"], {'dtype': 'str'}), "(flowbysectoractivitysetspath + v['activity_set_file'], dtype=str)\n", (5749, 5815), True, 'import pandas as pd\n'), ((6396, 6446), 'flowsa.common.vLog.info', 'vLog.info', (['"""Preparing to handle %s in %s"""', 'aset', 'k'], {}), "('Preparing to handle %s in %s', aset, k)\n", (6405, 6446), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((7672, 7766), 'flowsa.flowbyfunctions.subset_df_by_geoscale', 'subset_df_by_geoscale', (['flows_subset2', "v['geoscale_to_use']", "attr['allocation_from_scale']"], {}), "(flows_subset2, v['geoscale_to_use'], attr[\n 'allocation_from_scale'])\n", (7693, 7766), False, 'from flowsa.flowbyfunctions import agg_by_geoscale, sector_aggregation, aggregator, subset_df_by_geoscale, sector_disaggregation, dynamically_import_fxn\n'), ((8219, 8254), 'flowsa.common.log.info', 'log.info', (['"""Adding sectors to %s"""', 'k'], {}), "('Adding sectors to %s', k)\n", (8227, 8254), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((8312, 8457), 'flowsa.sectormapping.add_sectors_to_flowbyactivity', 'add_sectors_to_flowbyactivity', (['flows_subset_geo'], {'sectorsourcename': "method['target_sector_source']", 'allocationmethod': "attr['allocation_method']"}), "(flows_subset_geo, sectorsourcename=method[\n 'target_sector_source'], allocationmethod=attr['allocation_method'])\n", (8341, 8457), False, 'from flowsa.sectormapping import add_sectors_to_flowbyactivity, map_fbs_flows, get_sector_list\n'), ((10986, 11031), 'flowsa.dataclean.clean_df', 'clean_df', (['fbs', 'groupingdict', 'fbs_fill_na_dict'], {}), '(fbs, groupingdict, fbs_fill_na_dict)\n', (10994, 11031), False, 'from flowsa.dataclean import clean_df, harmonize_FBS_columns, reset_fbs_dq_scores\n'), ((11109, 11184), 'flowsa.common.log.info', 'log.info', (['"""Aggregating flowbysector to %s level"""', "method['target_geoscale']"], {}), "('Aggregating flowbysector to %s level', method['target_geoscale'])\n", (11117, 11184), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((11526, 11599), 'flowsa.flowbyfunctions.agg_by_geoscale', 'agg_by_geoscale', (['fbs', 'from_scale', "method['target_geoscale']", 'groupingcols'], {}), "(fbs, from_scale, method['target_geoscale'], groupingcols)\n", (11541, 11599), False, 'from flowsa.flowbyfunctions import agg_by_geoscale, sector_aggregation, aggregator, subset_df_by_geoscale, sector_disaggregation, dynamically_import_fxn\n'), ((11718, 11775), 'flowsa.common.log.info', 'log.info', (['"""Aggregating flowbysector to all sector levels"""'], {}), "('Aggregating flowbysector to all sector levels')\n", (11726, 11775), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((11806, 11851), 'flowsa.flowbyfunctions.sector_aggregation', 'sector_aggregation', (['fbs_geo_agg', 'groupingcols'], {}), '(fbs_geo_agg, groupingcols)\n', (11824, 11851), False, 'from flowsa.flowbyfunctions import agg_by_geoscale, sector_aggregation, aggregator, subset_df_by_geoscale, sector_disaggregation, dynamically_import_fxn\n'), ((11965, 11999), 'flowsa.flowbyfunctions.sector_disaggregation', 'sector_disaggregation', (['fbs_sec_agg'], {}), '(fbs_sec_agg)\n', (11986, 11999), False, 'from flowsa.flowbyfunctions import agg_by_geoscale, sector_aggregation, aggregator, subset_df_by_geoscale, sector_disaggregation, dynamically_import_fxn\n'), ((12209, 12374), 'flowsa.common.vLog.info', 'vLog.info', (['"""Searching for and allocating FlowAmounts for any parent NAICS that were dropped in the subset to %s child NAICS"""', "method['target_sector_level']"], {}), "(\n 'Searching for and allocating FlowAmounts for any parent NAICS that were dropped in the subset to %s child NAICS'\n , method['target_sector_level'])\n", (12218, 12374), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((12451, 12519), 'flowsa.validation.allocate_dropped_sector_data', 'allocate_dropped_sector_data', (['fbs_agg', "method['target_sector_level']"], {}), "(fbs_agg, method['target_sector_level'])\n", (12479, 12519), False, 'from flowsa.validation import allocate_dropped_sector_data, compare_activity_to_sector_flowamounts, compare_fba_geo_subset_and_fbs_output_totals, compare_geographic_totals, replace_naics_w_naics_from_another_year, calculate_flowamount_diff_between_dfs\n'), ((12596, 12685), 'flowsa.validation.compare_activity_to_sector_flowamounts', 'compare_activity_to_sector_flowamounts', (['flows_mapped_wsec', 'fbs_agg_2', 'aset', 'k', 'method'], {}), '(flows_mapped_wsec, fbs_agg_2, aset,\n k, method)\n', (12634, 12685), False, 'from flowsa.validation import allocate_dropped_sector_data, compare_activity_to_sector_flowamounts, compare_fba_geo_subset_and_fbs_output_totals, compare_geographic_totals, replace_naics_w_naics_from_another_year, calculate_flowamount_diff_between_dfs\n'), ((12857, 12903), 'flowsa.sectormapping.get_sector_list', 'get_sector_list', (["method['target_sector_level']"], {}), "(method['target_sector_level'])\n", (12872, 12903), False, 'from flowsa.sectormapping import add_sectors_to_flowbyactivity, map_fbs_flows, get_sector_list\n'), ((13744, 13776), 'pandas.concat', 'pd.concat', (['[fbs_1, fbs_2, fbs_3]'], {}), '([fbs_1, fbs_2, fbs_3])\n', (13753, 13776), True, 'import pandas as pd\n'), ((14164, 14275), 'flowsa.validation.compare_fba_geo_subset_and_fbs_output_totals', 'compare_fba_geo_subset_and_fbs_output_totals', (['flows_subset_geo', 'fbs_sector_subset', 'aset', 'k', 'v', 'attr', 'method'], {}), '(flows_subset_geo,\n fbs_sector_subset, aset, k, v, attr, method)\n', (14208, 14275), False, 'from flowsa.validation import allocate_dropped_sector_data, compare_activity_to_sector_flowamounts, compare_fba_geo_subset_and_fbs_output_totals, compare_geographic_totals, replace_naics_w_naics_from_another_year, calculate_flowamount_diff_between_dfs\n'), ((14350, 14397), 'flowsa.common.log.info', 'log.info', (['"""Completed flowbysector for %s"""', 'aset'], {}), "('Completed flowbysector for %s', aset)\n", (14358, 14397), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((4000, 4048), 'flowsa.flowbyfunctions.dynamically_import_fxn', 'dynamically_import_fxn', (['k', "v['FBS_datapull_fxn']"], {}), "(k, v['FBS_datapull_fxn'])\n", (4022, 4048), False, 'from flowsa.flowbyfunctions import agg_by_geoscale, sector_aggregation, aggregator, subset_df_by_geoscale, sector_disaggregation, dynamically_import_fxn\n'), ((5533, 5581), 'flowsa.flowbyfunctions.dynamically_import_fxn', 'dynamically_import_fxn', (['k', "v['clean_fba_df_fxn']"], {}), "(k, v['clean_fba_df_fxn'])\n", (5555, 5581), False, 'from flowsa.flowbyfunctions import agg_by_geoscale, sector_aggregation, aggregator, subset_df_by_geoscale, sector_disaggregation, dynamically_import_fxn\n'), ((6954, 7044), 'flowsa.validation.replace_naics_w_naics_from_another_year', 'replace_naics_w_naics_from_another_year', (['flows_subset', "method['target_sector_source']"], {}), "(flows_subset, method[\n 'target_sector_source'])\n", (6993, 7044), False, 'from flowsa.validation import allocate_dropped_sector_data, compare_activity_to_sector_flowamounts, compare_fba_geo_subset_and_fbs_output_totals, compare_geographic_totals, replace_naics_w_naics_from_another_year, calculate_flowamount_diff_between_dfs\n'), ((7177, 7340), 'flowsa.common.vLog.info', 'vLog.info', (['"""Calculate FlowAmount difference caused by replacing NAICS Codes with %s, saving difference in Validation log"""', "method['target_sector_source']"], {}), "(\n 'Calculate FlowAmount difference caused by replacing NAICS Codes with %s, saving difference in Validation log'\n , method['target_sector_source'])\n", (7186, 7340), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((7415, 7481), 'flowsa.validation.calculate_flowamount_diff_between_dfs', 'calculate_flowamount_diff_between_dfs', (['flows_subset', 'flows_subset2'], {}), '(flows_subset, flows_subset2)\n', (7452, 7481), False, 'from flowsa.validation import allocate_dropped_sector_data, compare_activity_to_sector_flowamounts, compare_fba_geo_subset_and_fbs_output_totals, compare_geographic_totals, replace_naics_w_naics_from_another_year, calculate_flowamount_diff_between_dfs\n'), ((7979, 8058), 'flowsa.validation.compare_geographic_totals', 'compare_geographic_totals', (['flows_subset_geo', 'flows_mapped', 'k', 'attr', 'aset', 'names'], {}), '(flows_subset_geo, flows_mapped, k, attr, aset, names)\n', (8004, 8058), False, 'from flowsa.validation import allocate_dropped_sector_data, compare_activity_to_sector_flowamounts, compare_fba_geo_subset_and_fbs_output_totals, compare_geographic_totals, replace_naics_w_naics_from_another_year, calculate_flowamount_diff_between_dfs\n'), ((8689, 8747), 'flowsa.common.vLog.info', 'vLog.info', (['"""Cleaning up %s FlowByActivity with sectors"""', 'k'], {}), "('Cleaning up %s FlowByActivity with sectors', k)\n", (8698, 8747), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((9610, 9671), 'flowsa.fbs_allocation.direct_allocation_method', 'direct_allocation_method', (['flows_mapped_wsec', 'k', 'names', 'method'], {}), '(flows_mapped_wsec, k, names, method)\n', (9634, 9671), False, 'from flowsa.fbs_allocation import direct_allocation_method, function_allocation_method, dataset_allocation_method\n'), ((6841, 6862), 'flowsa.common.load_source_catalog', 'load_source_catalog', ([], {}), '()\n', (6860, 6862), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n'), ((8814, 8868), 'flowsa.flowbyfunctions.dynamically_import_fxn', 'dynamically_import_fxn', (['k', "v['clean_fba_w_sec_df_fxn']"], {}), "(k, v['clean_fba_w_sec_df_fxn'])\n", (8836, 8868), False, 'from flowsa.flowbyfunctions import agg_by_geoscale, sector_aggregation, aggregator, subset_df_by_geoscale, sector_disaggregation, dynamically_import_fxn\n'), ((9966, 10037), 'flowsa.fbs_allocation.function_allocation_method', 'function_allocation_method', (['flows_mapped_wsec', 'k', 'names', 'attr', 'fbs_list'], {}), '(flows_mapped_wsec, k, names, attr, fbs_list)\n', (9992, 10037), False, 'from flowsa.fbs_allocation import direct_allocation_method, function_allocation_method, dataset_allocation_method\n'), ((10111, 10217), 'flowsa.fbs_allocation.dataset_allocation_method', 'dataset_allocation_method', (['flows_mapped_wsec', 'attr', 'names', 'method', 'k', 'v', 'aset', 'method_name', 'aset_names'], {}), '(flows_mapped_wsec, attr, names, method, k, v,\n aset, method_name, aset_names)\n', (10136, 10217), False, 'from flowsa.fbs_allocation import direct_allocation_method, function_allocation_method, dataset_allocation_method\n'), ((10597, 10618), 'flowsa.common.load_source_catalog', 'load_source_catalog', ([], {}), '()\n', (10616, 10618), False, 'from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, fips_number_key, flow_by_activity_fields, load_source_catalog, flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, paths, fba_activity_fields, rename_log_file, fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, fbs_grouping_fields_w_activities, logoutputpath\n')]
|
#----------------------------------------------------------------------------#
# Imports
#----------------------------------------------------------------------------#
import json
import dateutil.parser
import babel
from flask import Flask, render_template, request, Response, flash, redirect, url_for
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
import logging
from logging import Formatter, FileHandler
from flask_wtf import Form, FlaskForm
from forms import *
from flask_migrate import Migrate
import psycopg2
#----------------------------------------------------------------------------#
# App Config.
#----------------------------------------------------------------------------#
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
# TODO: connect to a local postgresql database
app.config.from_object('config')
db.init_app(app)
return app
app = create_app()
moment = Moment(app)
migrate = Migrate(app, db)
#----------------------------------------------------------------------------#
# Models.
#----------------------------------------------------------------------------#
# Seperated into models.py
from models import Venue, Artist, Shows
#----------------------------------------------------------------------------#
# Filters.
#----------------------------------------------------------------------------#
def format_datetime(value, format='medium'):
date = dateutil.parser.parse(value)
if format == 'full':
format="EEEE MMMM, d, y 'at' h:mma"
elif format == 'medium':
format="EE MM, dd, y h:mma"
return babel.dates.format_datetime(date, format)
app.jinja_env.filters['datetime'] = format_datetime
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
@app.route('/')
def index():
return render_template('pages/home.html')
# ----------------------------------------------------------------
# Routes
# ----------------------------------------------------------------
from routes import *
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def server_error(error):
return render_template('errors/500.html'), 500
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port:
# if __name__ == '__main__':
# app.run()
# Or specify port manually:
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
|
[
"logging.FileHandler",
"flask.Flask",
"logging.Formatter",
"flask_moment.Moment",
"flask_sqlalchemy.SQLAlchemy",
"flask_migrate.Migrate",
"flask.render_template",
"babel.dates.format_datetime"
] |
[((721, 733), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (731, 733), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((932, 943), 'flask_moment.Moment', 'Moment', (['app'], {}), '(app)\n', (938, 943), False, 'from flask_moment import Moment\n'), ((954, 970), 'flask_migrate.Migrate', 'Migrate', (['app', 'db'], {}), '(app, db)\n', (961, 970), False, 'from flask_migrate import Migrate\n'), ((762, 777), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (767, 777), False, 'from flask import Flask, render_template, request, Response, flash, redirect, url_for\n'), ((1597, 1638), 'babel.dates.format_datetime', 'babel.dates.format_datetime', (['date', 'format'], {}), '(date, format)\n', (1624, 1638), False, 'import babel\n'), ((1905, 1939), 'flask.render_template', 'render_template', (['"""pages/home.html"""'], {}), "('pages/home.html')\n", (1920, 1939), False, 'from flask import Flask, render_template, request, Response, flash, redirect, url_for\n'), ((2352, 2376), 'logging.FileHandler', 'FileHandler', (['"""error.log"""'], {}), "('error.log')\n", (2363, 2376), False, 'from logging import Formatter, FileHandler\n'), ((2173, 2207), 'flask.render_template', 'render_template', (['"""errors/404.html"""'], {}), "('errors/404.html')\n", (2188, 2207), False, 'from flask import Flask, render_template, request, Response, flash, redirect, url_for\n'), ((2273, 2307), 'flask.render_template', 'render_template', (['"""errors/500.html"""'], {}), "('errors/500.html')\n", (2288, 2307), False, 'from flask import Flask, render_template, request, Response, flash, redirect, url_for\n'), ((2416, 2501), 'logging.Formatter', 'Formatter', (['"""%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]"""'], {}), "('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'\n )\n", (2425, 2501), False, 'from logging import Formatter, FileHandler\n')]
|
"""
Created on 13 Dec 2016
@author: <NAME> (<EMAIL>)
the I2C addresses of the internal (in A4 pot) and external (exposed to air) SHTs
example JSON:
{"int": "0x44", "ext": "0x45"}
"""
from collections import OrderedDict
from scs_core.data.json import PersistentJSONable
from scs_dfe.climate.sht31 import SHT31
# --------------------------------------------------------------------------------------------------------------------
class SHTConf(PersistentJSONable):
"""
classdocs
"""
__FILENAME = "sht_conf.json"
@classmethod
def persistence_location(cls, host):
return host.conf_dir(), cls.__FILENAME
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def __addr_str(cls, addr):
if addr is None:
return None
return "0x%02x" % addr
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict):
if not jdict:
return None
int_str = jdict.get('int')
ext_str = jdict.get('ext')
int_addr = None if int_str is None else int(int_str, 0)
ext_addr = None if ext_str is None else int(ext_str, 0)
return SHTConf(int_addr, ext_addr)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, int_addr, ext_addr):
"""
Constructor
"""
super().__init__()
self.__int_addr = int_addr # int I2C address of SHT in A4 package
self.__ext_addr = ext_addr # int I2C address of SHT exposed to air
# ----------------------------------------------------------------------------------------------------------------
def int_sht(self):
if self.__int_addr is None:
return None
return SHT31(self.__int_addr)
def ext_sht(self):
if self.__ext_addr is None:
return None
return SHT31(self.__ext_addr)
# ----------------------------------------------------------------------------------------------------------------
@property
def int_addr(self):
return self.__int_addr
@property
def ext_addr(self):
return self.__ext_addr
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['int'] = SHTConf.__addr_str(self.__int_addr)
jdict['ext'] = SHTConf.__addr_str(self.__ext_addr)
return jdict
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "SHTConf:{int_addr:%s, ext_addr:%s}" % \
(SHTConf.__addr_str(self.int_addr), SHTConf.__addr_str(self.ext_addr))
|
[
"collections.OrderedDict",
"scs_dfe.climate.sht31.SHT31"
] |
[((2002, 2024), 'scs_dfe.climate.sht31.SHT31', 'SHT31', (['self.__int_addr'], {}), '(self.__int_addr)\n', (2007, 2024), False, 'from scs_dfe.climate.sht31 import SHT31\n'), ((2126, 2148), 'scs_dfe.climate.sht31.SHT31', 'SHT31', (['self.__ext_addr'], {}), '(self.__ext_addr)\n', (2131, 2148), False, 'from scs_dfe.climate.sht31 import SHT31\n'), ((2572, 2585), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2583, 2585), False, 'from collections import OrderedDict\n')]
|
import scxx.preprocessing as pp
import scxx.plotting as pl
import scanorama
import os
import numpy as np
import scanpy as sc
from anndata import AnnData
np.random.seed(0)
NAMESPACE = 'mouse_brain'
BATCH_SIZE = 1000
result_dir="./results/1M_mouse_brain/scanorama/"
data_names = [
'data/mouse_brain/nuclei',
'data/mouse_brain/dropviz/Cerebellum_ALT',
'data/mouse_brain/dropviz/Cortex_noRep5_FRONTALonly',
'data/mouse_brain/dropviz/Cortex_noRep5_POSTERIORonly',
'data/mouse_brain/dropviz/EntoPeduncular',
'data/mouse_brain/dropviz/GlobusPallidus',
'data/mouse_brain/dropviz/Hippocampus',
'data/mouse_brain/dropviz/Striatum',
'data/mouse_brain/dropviz/SubstantiaNigra',
'data/mouse_brain/dropviz/Thalamus',
]
import pandas as pd
genelist = pd.read_csv("./data/scanorama_data/data/mouse_brain/genelist_vipcca.txt",header=None,index_col=0).index
datasets=[]
for i in range(len(data_names)):
name=data_names[i]
ann = pp.read_sc_data("./data/scanorama_data/"+name+"/data.h5ad",batch_name=str(i))
ann=ann[:,genelist]
ann.write("./data/scanorama_data/"+name+"/data_subset.h5ad")
datasets.append(ann)
integrated, corrected = scanorama.correct_scanpy(datasets, return_dimred=True, dimred=16)
scanorama_X=integrated[0]
adata_corrected=corrected[0]
adata_corrected.obs=datasets[0].obs
for i in np.arange(1,len(integrated)):
scanorama_X=np.concatenate([scanorama_X,integrated[i]])
adata_i=corrected[i]
adata_i.obs=datasets[i].obs
adata_corrected=adata_corrected.concatenate(adata_i,index_unique=None)
adata_corrected.raw=adata_corrected.copy()
adata_corrected.X=adata_corrected.X.todense()
adata_corrected.obsm["X_scanorama"]=scanorama_X
adata_corrected.obs_names_make_unique()
# 1,094,150
adata_corrected.write(result_dir+"output.h5ad")
|
[
"scanorama.correct_scanpy",
"numpy.random.seed",
"pandas.read_csv",
"numpy.concatenate"
] |
[((154, 171), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (168, 171), True, 'import numpy as np\n'), ((1165, 1230), 'scanorama.correct_scanpy', 'scanorama.correct_scanpy', (['datasets'], {'return_dimred': '(True)', 'dimred': '(16)'}), '(datasets, return_dimred=True, dimred=16)\n', (1189, 1230), False, 'import scanorama\n'), ((779, 882), 'pandas.read_csv', 'pd.read_csv', (['"""./data/scanorama_data/data/mouse_brain/genelist_vipcca.txt"""'], {'header': 'None', 'index_col': '(0)'}), "('./data/scanorama_data/data/mouse_brain/genelist_vipcca.txt',\n header=None, index_col=0)\n", (790, 882), True, 'import pandas as pd\n'), ((1375, 1419), 'numpy.concatenate', 'np.concatenate', (['[scanorama_X, integrated[i]]'], {}), '([scanorama_X, integrated[i]])\n', (1389, 1419), True, 'import numpy as np\n')]
|
#-*- coding:utf-8 -*-
from generate_face import *
from gan_model import ganModel
import tensorflow as tf
if __name__ == '__main__':
hparams = tf.contrib.training.HParams(
data_root = './../../datas/gan_face/img_align_celeba',
crop_h = 108, #对原始图片裁剪后高
crop_w = 108, #对原始图片裁剪后宽
resize_h = 64, #对裁剪后图片缩放的高
resize_w = 64, #对裁剪图片缩放的宽
is_crop = True, #是否裁剪
z_dim = 100, #随机噪声z的维度,用户generator生成图片
batch_size = 64, #批次
sample_size = 64,#选取作为测试样本
output_h = 64, #generator生成图片的高
output_w = 64, #generator生成图片的宽
gf_dim = 64, #generator的feature map的deep
df_dim = 64) #discriminator的feature map的deep
face = generateFace(hparams)
sample_images,sample_z = face.get_sample(hparams.sample_size)
is_training = tf.placeholder(tf.bool,name='is_training')
images = tf.placeholder(tf.float32, [None,hparams.resize_h,hparams.output_w,3],name='real_images')
z = tf.placeholder(tf.float32, [None,hparams.z_dim], name='z')
model = ganModel(hparams)
g_loss,d_loss,g_vars,d_vars,g_sum,d_sum,G = model.build_model(is_training,images,z)
d_optim,g_optim = model.optimizer(g_loss,d_loss,g_vars,d_vars)
saver = tf.train.Saver(tf.global_variables())
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state('./../../datas/model/share/gan_face/')
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter("train_gan", sess.graph)
step = 0
while True:
step = model.global_step.eval()
batch_images,batch_z = face.next_batch(hparams.batch_size)
#Update D network
_, summary_str = sess.run([d_optim,d_sum],
feed_dict={images:batch_images, z:batch_z, is_training:True})
summary_writer.add_summary(summary_str,step)
#Update G network
_, summary_str = sess.run([g_optim,g_sum],
feed_dict={z:batch_z, is_training:True})
summary_writer.add_summary(summary_str,step)
d_err = d_loss.eval({images:batch_images, z:batch_z, is_training:False})
g_err = g_loss.eval({z:batch_z,is_training:False})
print("step:%d,d_loss:%f,g_loss:%f" % (step,d_err,g_err))
if step%1000 == 0:
samples, d_err, g_err = sess.run([G,d_loss,g_loss],
feed_dict={images:sample_images, z:sample_z, is_training:False})
print("sample step:%d,d_err:%f,g_err:%f" % (step,d_err,g_err))
save_images(samples,image_manifold_size(samples.shape[0]), './../../datas/train/share/gan_face/samples/train_{:d}.png'.format(step))
saver.save(sess,"./../../datas/model/share/gan_face/gans.ckpt",global_step = step)
|
[
"tensorflow.contrib.training.HParams",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"gan_model.ganModel",
"tensorflow.placeholder",
"tensorflow.global_variables",
"tensorflow.summary.FileWriter",
"tensorflow.train.checkpoint_exists",
"tensorflow.train.get_checkpoint_state"
] |
[((147, 394), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'data_root': '"""./../../datas/gan_face/img_align_celeba"""', 'crop_h': '(108)', 'crop_w': '(108)', 'resize_h': '(64)', 'resize_w': '(64)', 'is_crop': '(True)', 'z_dim': '(100)', 'batch_size': '(64)', 'sample_size': '(64)', 'output_h': '(64)', 'output_w': '(64)', 'gf_dim': '(64)', 'df_dim': '(64)'}), "(data_root=\n './../../datas/gan_face/img_align_celeba', crop_h=108, crop_w=108,\n resize_h=64, resize_w=64, is_crop=True, z_dim=100, batch_size=64,\n sample_size=64, output_h=64, output_w=64, gf_dim=64, df_dim=64)\n", (174, 394), True, 'import tensorflow as tf\n'), ((841, 884), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'name': '"""is_training"""'}), "(tf.bool, name='is_training')\n", (855, 884), True, 'import tensorflow as tf\n'), ((897, 994), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, hparams.resize_h, hparams.output_w, 3]'], {'name': '"""real_images"""'}), "(tf.float32, [None, hparams.resize_h, hparams.output_w, 3],\n name='real_images')\n", (911, 994), True, 'import tensorflow as tf\n'), ((995, 1054), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, hparams.z_dim]'], {'name': '"""z"""'}), "(tf.float32, [None, hparams.z_dim], name='z')\n", (1009, 1054), True, 'import tensorflow as tf\n'), ((1066, 1083), 'gan_model.ganModel', 'ganModel', (['hparams'], {}), '(hparams)\n', (1074, 1083), False, 'from gan_model import ganModel\n'), ((1267, 1288), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (1286, 1288), True, 'import tensorflow as tf\n'), ((1299, 1311), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1309, 1311), True, 'import tensorflow as tf\n'), ((1336, 1404), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['"""./../../datas/model/share/gan_face/"""'], {}), "('./../../datas/model/share/gan_face/')\n", (1365, 1404), True, 'import tensorflow as tf\n'), ((1777, 1823), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""train_gan"""', 'sess.graph'], {}), "('train_gan', sess.graph)\n", (1798, 1823), True, 'import tensorflow as tf\n'), ((1425, 1479), 'tensorflow.train.checkpoint_exists', 'tf.train.checkpoint_exists', (['ckpt.model_checkpoint_path'], {}), '(ckpt.model_checkpoint_path)\n', (1451, 1479), True, 'import tensorflow as tf\n'), ((1717, 1750), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1748, 1750), True, 'import tensorflow as tf\n')]
|
# -*- coding: utf-8 -*-
"""
"""
import copy
import time
import itertools
class CalcRecord():
"""
逆ポーランド用の要素格納クラス
"""
def __init__(self, a,b,c,d,op1,op2,op3):
self.a = a
self.b = b
self.c = c
self.d = d
self.op1 = op1
self.op2 = op2
self.op3 = op3
self.result = -1
def print(self):
print("{0} {1} {2} {3} {4} {5} {6} {7}".format(
self.a,
self.b,
self.c,
self.d,
self.op1,
self.op2,
self.op3,
self.result
))
def main():
t1 = time.time()
exist_count = 0
# src_list, count = calc_puzle(1, 3, 3, 7, detail_print = True)
# src_list, count = calc_puzle(4, 6, 7, 9, detail_print = True)
# if count > 0:
# exist_count += 1
# print("{0} count = {1}".format(src_list, count))
#
#数字の重複を省く
#(1,2,3,1) === (1,1,2,3)
for a in range(0, 10):
for b in range(a, 10):
for c in range(b, 10):
for d in range(c, 10):
src_list, count = calc_puzle(a, b, c, d, detail_print = False)
print("{0} count = {1}".format(src_list, count))
if count > 0:
exist_count += 1
print("成立個数:{0}".format(exist_count))
t2 = time.time()
print("実行時間:{0}".format(t2-t1))
def calc_puzle(a,b,c,d, detail_print = False):
ans_count = 0
#問題文数字リスト
src_nums = [str(a), str(b), str(c), str(d)]
#演算子リスト
oprs = ["+", "-", "*", "/"]
#順列を作成する
permu_list = list(itertools.permutations(src_nums))
#演算子の組み合わせを作成する
op_list= list(itertools.product(oprs, repeat=3))
#逆ポーランド式要素全順列格納用リスト
calc_list = []
for op_set in op_list:
for one_line in permu_list:
calc_list.append(CalcRecord(
one_line[0],
one_line[1],
one_line[2],
one_line[3],
op_set[0],
op_set[1],
op_set[2]
))
calc_work = []
#逆ポーランド式要素全順列より4パターンの
#逆ポーランド演算式を計算し答えを求める
for cur_calc in calc_list:
calc_work.clear()
#逆ポーランド演算式1
# A B op1 C D op2 op3
# (A op1 B) op3 (C op2 D)
calc_work.append(cur_calc.a)
calc_work.append(cur_calc.b)
exec_revpol(cur_calc.op1, calc_work)
calc_work.append(cur_calc.c)
calc_work.append(cur_calc.d)
exec_revpol(cur_calc.op2, calc_work)
exec_revpol(cur_calc.op3, calc_work)
cur_calc.result = calc_work.pop()
# print_formula(cur_calc, "F1")
if 10 == cur_calc.result:
ans_count += 1
if detail_print:
print_formula(cur_calc, "F1")
# return [src_nums, ans_count]
calc_work.clear()
#逆ポーランド演算式2
# A B op1 C op2 D op3
# ((A op1 B) op2 C) op3 D
calc_work.append(cur_calc.a)
calc_work.append(cur_calc.b)
exec_revpol(cur_calc.op1, calc_work)
calc_work.append(cur_calc.c)
exec_revpol(cur_calc.op2, calc_work)
calc_work.append(cur_calc.d)
exec_revpol(cur_calc.op3, calc_work)
cur_calc.result = calc_work.pop()
# print_formula(cur_calc, "F2")
if 10 == cur_calc.result:
ans_count += 1
if detail_print:
print_formula(cur_calc, "F2")
# return [src_nums, ans_count]
calc_work.clear()
#逆ポーランド演算式3
# A B C op1 op2 D op3
# (A op2 (B op1 C)) op3 D
calc_work.append(cur_calc.a)
calc_work.append(cur_calc.b)
calc_work.append(cur_calc.c)
exec_revpol(cur_calc.op1, calc_work)
exec_revpol(cur_calc.op2, calc_work)
calc_work.append(cur_calc.d)
exec_revpol(cur_calc.op3, calc_work)
cur_calc.result = calc_work.pop()
# print_formula(cur_calc, "F3")
if 10 == cur_calc.result:
ans_count += 1
if detail_print:
print_formula(cur_calc, "F3")
# return [src_nums, ans_count]
calc_work.clear()
#逆ポーランド演算式4
# A B C D op1 op2 op3
# A op3 (B op2 (C op1 D))
calc_work.append(cur_calc.a)
calc_work.append(cur_calc.b)
calc_work.append(cur_calc.c)
calc_work.append(cur_calc.d)
exec_revpol(cur_calc.op1, calc_work)
exec_revpol(cur_calc.op2, calc_work)
exec_revpol(cur_calc.op3, calc_work)
cur_calc.result = calc_work.pop()
# print_formula(cur_calc, "F4")
if 10 == cur_calc.result:
ans_count += 1
if detail_print:
print_formula(cur_calc, "F4")
# return [src_nums, ans_count]
return [src_nums, ans_count]
def print_formula(cur_calc, msg):
"""
逆ポーランド演算子表示
"""
if msg == "F1":
# A B op1 C D op2 op3
# (A op1 B) op3 (C op2 D)
print("{8}# {0} {1} {2} {3} {4} {5} {6}\n(({0} {2} {1}) {5} {3}) {6} {4} = {7}".format(
cur_calc.a,
cur_calc.b,
cur_calc.op1,
cur_calc.c,
cur_calc.d,
cur_calc.op2,
cur_calc.op3,
cur_calc.result,
msg
))
elif msg == "F2":
# A B op1 C op2 D op3
# ((A op1 B) op2 C) op3 D
print("{8}# {0} {1} {2} {3} {4} {5} {6}\n(({0} {2} {1}) {4} {3}) {6} {5}= {7}".format(
cur_calc.a,
cur_calc.b,
cur_calc.op1,
cur_calc.c,
cur_calc.op2,
cur_calc.d,
cur_calc.op3,
cur_calc.result,
msg
))
elif msg == "F3":
# A B C op1 op2 D op3
# (A op2 (B op1 C)) op3 D
print("{8}# {0} {1} {2} {3} {4} {5} {6}\n({0} {4} ({1} {3} {2})) {6} {5} = {7}".format(
cur_calc.a,
cur_calc.b,
cur_calc.c,
cur_calc.op1,
cur_calc.op2,
cur_calc.d,
cur_calc.op3,
cur_calc.result,
msg
))
else:
# A B C D op1 op2 op3
# A op3 (B op2 (C op1 D))
print("{8}# {0} {1} {2} {3} {4} {5} {6}\n{0} {6} ({1} {5}({2} {4} {3})) = {7}".format(
cur_calc.a,
cur_calc.b,
cur_calc.c,
cur_calc.d,
cur_calc.op1,
cur_calc.op2,
cur_calc.op3,
cur_calc.result,
msg
))
def exec_revpol(op, work_list):
"""
逆ポーランド演算を実施する
op - 演算子
work_list - 演算の要素と結果を格納するスタック
"""
s2 = work_list.pop()
s1 = work_list.pop()
if (s1 == "∞" or s2 == "∞"):
work_list.append("∞")
return
r2 = float(s2)
r1 = float(s1)
if op == "+":
# print("{0} {1} {2} = {3}".format(
# r1,
# op,
# r2,
# r1 + r2
# ))
work_list.append(r1 + r2)
elif op == "-":
# print("{0} {1} {2} = {3}".format(
# r1,
# op,
# r2,
# r1 - r2
# ))
work_list.append(r1 - r2)
elif op == "*":
# print("{0} {1} {2} = {3}".format(
# r1,
# op,
# r2,
# r1 * r2
# ))
work_list.append(r1 * r2)
elif op == "/":
if (0 == r2):
# print("{0} {1} {2} = {3}".format(
# r1,
# op,
# r2,
# "∞"
# ))
work_list.append("∞")
else:
# print("{0} {1} {2} = {3}".format(
# r1,
# op,
# r2,
# r1 / r2
# ))
work_list.append(r1 / r2)
if __name__ == '__main__':
main()
|
[
"itertools.product",
"itertools.permutations",
"time.time"
] |
[((628, 639), 'time.time', 'time.time', ([], {}), '()\n', (637, 639), False, 'import time\n'), ((1359, 1370), 'time.time', 'time.time', ([], {}), '()\n', (1368, 1370), False, 'import time\n'), ((1617, 1649), 'itertools.permutations', 'itertools.permutations', (['src_nums'], {}), '(src_nums)\n', (1639, 1649), False, 'import itertools\n'), ((1690, 1723), 'itertools.product', 'itertools.product', (['oprs'], {'repeat': '(3)'}), '(oprs, repeat=3)\n', (1707, 1723), False, 'import itertools\n')]
|
#!/usr/bin/env python3
"""
Polyglot v2 node server for WeatherFlow Weather Station data.
Copyright (c) 2018,2019 <NAME>
"""
import polyinterface
import sys
import time
import datetime
import urllib3
import json
import socket
import math
import threading
LOGGER = polyinterface.LOGGER
class PrecipitationNode(polyinterface.Node):
id = 'precipitation'
hint = [1,11,5,0]
units = 'metric'
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 46}, # rate
{'driver': 'GV0', 'value': 0, 'uom': 82}, # hourly
{'driver': 'GV1', 'value': 0, 'uom': 82}, # daily
{'driver': 'GV2', 'value': 0, 'uom': 82}, # weekly
{'driver': 'GV3', 'value': 0, 'uom': 82}, # monthly
{'driver': 'GV4', 'value': 0, 'uom': 82}, # yearly
{'driver': 'GV5', 'value': 0, 'uom': 82} # yesterday
]
hourly_rain = 0
daily_rain = 0
weekly_rain = 0
monthly_rain = 0
yearly_rain = 0
yesterday_rain = 0
prev_hour = 0
prev_day = 0
prev_week = 0
prev_month = 0
prev_year = 0
def InitializeRain(self, acc):
self.daily_rain = acc['daily']
self.hourly_rain = acc['hourly']
self.weekly_rain = acc['weekly']
self.monthly_rain = acc['monthly']
self.yearly_rain = acc['yearly']
self.yesterday_rain = acc['yesterday']
self.prev_hour = acc['hour']
self.prev_day = acc['day']
self.prev_week = acc['week']
self.prev_month = acc['month']
self.prev_year = acc['year']
now = datetime.datetime.now()
# Need to compare saved date with current date and clear out
# any accumlations that are old.
current_hour = now.hour
if self.prev_hour != now.hour:
LOGGER.info('Clearing old hourly data')
self.prev_hour = now.hour
self.hourly_rain = 0
if self.prev_day != now.day:
LOGGER.info('Clearing old daily, hourly data')
self.yesterday_rain = self.daily_rain
self.prev_day = now.day
self.hourly_rain = 0
self.daily_rain = 0
if self.prev_week != now.isocalendar()[1]:
LOGGER.info('Clearing old weekly, daily, hourly data')
self.prev_week = now.isocalendar()[1]
self.hourly_rain = 0
self.daily_rain = 0
self.weekly_rain = 0
if self.prev_month != now.month:
LOGGER.info('Clearing old monthly, daily, hourly data')
self.prev_month = now.month
self.hourly_rain = 0
self.daily_rain = 0
self.weekly_rain = 0
self.monthly_rain = 0
if self.prev_year != now.year:
LOGGER.info('Clearing old yearly, monthly, daily, hourly data')
self.prev_year = now.year
self.hourly_rain = 0
self.daily_rain = 0
self.weekly_rain = 0
self.monthly_rain = 0
self.yearly_rain = 0
def SetUnits(self, u):
self.units = u
if (u == 'mm'):
self.drivers[0]['uom'] = 46
self.drivers[1]['uom'] = 82
self.drivers[2]['uom'] = 82
self.drivers[3]['uom'] = 82
self.drivers[4]['uom'] = 82
self.drivers[5]['uom'] = 82
self.drivers[6]['uom'] = 82
self.id = 'precipitation'
elif (u == 'in'):
self.drivers[0]['uom'] = 24
self.drivers[1]['uom'] = 105
self.drivers[2]['uom'] = 105
self.drivers[3]['uom'] = 105
self.drivers[4]['uom'] = 105
self.drivers[5]['uom'] = 105
self.drivers[6]['uom'] = 105
self.id = 'precipitationUS'
def hourly_accumulation(self, r):
current_hour = datetime.datetime.now().hour
if (current_hour != self.prev_hour):
self.prev_hour = current_hour
self.hourly_rain = 0
self.hourly_rain += r
return self.hourly_rain
def daily_accumulation(self, r):
current_day = datetime.datetime.now().day
if (current_day != self.prev_day):
self.yesterday_rain = self.daily_rain
self.prev_day = current_day
self.daily_rain = 0
self.daily_rain += r
return self.daily_rain
def yesterday_accumulation(self):
return self.yesterday_rain
def weekly_accumulation(self, r):
(y, w, d) = datetime.datetime.now().isocalendar()
if w != self.prev_week:
self.prev_week = w
self.weekly_rain = 0
self.weekly_rain += r
return self.weekly_rain
def monthly_accumulation(self, r):
current_month = datetime.datetime.now().month
if (current_month != self.prev_month):
self.prev_month = current_month
self.monthly_rain = 0
self.monthly_rain += r
return self.monthly_rain
def yearly_accumulation(self, r):
current_year = datetime.datetime.now().year
if (current_year != self.prev_year):
self.prev_year = current_year
self.yearly_rain = 0
self.yearly_rain += r
return self.yearly_rain
def setDriver(self, driver, value):
if (self.units == 'in'):
value = round(value * 0.03937, 2)
else:
value = round(value, 3)
super(PrecipitationNode, self).setDriver(driver, value, report=True, force=True)
|
[
"datetime.datetime.now"
] |
[((1574, 1597), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1595, 1597), False, 'import datetime\n'), ((3830, 3853), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3851, 3853), False, 'import datetime\n'), ((4102, 4125), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4123, 4125), False, 'import datetime\n'), ((4750, 4773), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4771, 4773), False, 'import datetime\n'), ((5032, 5055), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5053, 5055), False, 'import datetime\n'), ((4489, 4512), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4510, 4512), False, 'import datetime\n')]
|
import numpy as np
from aura import aura_loader
import os
import time
import random
def break_aura(path, pieces):
"""
Breaks an aura file into smaller chunks. Saves chunks to local folders.
:param path: A string type of the path to the aura file that is being chunked.
:param pieces: An integer type of how many pieces should result
"""
array = aura_loader.read_file(path)
filepath = "../ChunkedAura" + str(time.time())[5:10]
print("Saving to " + filepath)
os.mkdir(filepath)
l, w, n = array.shape
print(array.shape)
chunkSize = int(n / pieces)
print("Chunking into " + str(chunkSize) + " sized pieces.")
chunk = np.zeros((l, w, chunkSize), dtype=np.float16)
for piece in range(pieces):
print("Chunking piece " + str(piece))
print("Extracting " + str(chunkSize * piece) + " to " + str(chunkSize * piece + chunkSize))
for i in range(chunkSize):
chunk[:, :, i] = array[:, :, i + (chunkSize * piece)]
f = filepath + "/{" + str(l) + "x" + str(w) + "x" + str(chunk.shape[2]) + "}Chunk" + str(piece) + ".aura"
print("Saving chunk " + str(piece) + " to " + f + "\n")
chunk.tofile(f)
print("----------------- CHUNKING COMPLETE -----------------")
def percentise_aura(path, percent):
"""
Breaks an aura file into two pieces of percent sizes.
:param path: A string type of the path to the aura file that is being chunked.
:param percent: A float or double type of the percentage that should be in the first chunk.
Example: percent=0.9 would be 90% of data in first chunk, 10% in the second chunk
"""
array = aura_loader.read_file(path).T
random.shuffle(array)
filepath = "../ChunkedAura" + str(time.time())[5:10]
print("Saving to " + filepath)
os.mkdir(filepath)
n, l, w = array.shape
print(array.shape)
print("Chunking into " + str(percent * 100) + "% and " + str((1 - percent) * 100) + "%")
size1 = int(n * percent)
size2 = int(n * (1 - percent))
print("Chunk1 size = " + str(size1))
print("Chunk2 size = " + str(size2))
chunk1 = np.zeros((l, w, size1), dtype=np.float16)
chunk2 = np.zeros((l, w, size2), dtype=np.float16)
print("Chunking piece 1")
for i in range(size1):
chunk1[:, :, i] = array[i]
f1 = filepath + "/{" + str(chunk1.shape[0]) + "x" + str(chunk1.shape[1]) + "x" + str(
chunk1.shape[2]) + "}Chunk1.aura"
print("Saving chunk1 to " + f1 + "\n")
chunk1.tofile(f1)
for i in range(size2):
chunk2[:, :, i] = array[i + (size1)]
f2 = filepath + "/{" + str(chunk2.shape[0]) + "x" + str(chunk2.shape[1]) + "x" + str(
chunk2.shape[2]) + "}Chunk2.aura"
print("Saving chunk1 to " + f2 + "\n")
chunk2.tofile(f2)
print("----------------- CHUNKING COMPLETE -----------------")
|
[
"os.mkdir",
"random.shuffle",
"numpy.zeros",
"time.time",
"aura.aura_loader.read_file"
] |
[((373, 400), 'aura.aura_loader.read_file', 'aura_loader.read_file', (['path'], {}), '(path)\n', (394, 400), False, 'from aura import aura_loader\n'), ((497, 515), 'os.mkdir', 'os.mkdir', (['filepath'], {}), '(filepath)\n', (505, 515), False, 'import os\n'), ((673, 718), 'numpy.zeros', 'np.zeros', (['(l, w, chunkSize)'], {'dtype': 'np.float16'}), '((l, w, chunkSize), dtype=np.float16)\n', (681, 718), True, 'import numpy as np\n'), ((1692, 1713), 'random.shuffle', 'random.shuffle', (['array'], {}), '(array)\n', (1706, 1713), False, 'import random\n'), ((1810, 1828), 'os.mkdir', 'os.mkdir', (['filepath'], {}), '(filepath)\n', (1818, 1828), False, 'import os\n'), ((2132, 2173), 'numpy.zeros', 'np.zeros', (['(l, w, size1)'], {'dtype': 'np.float16'}), '((l, w, size1), dtype=np.float16)\n', (2140, 2173), True, 'import numpy as np\n'), ((2187, 2228), 'numpy.zeros', 'np.zeros', (['(l, w, size2)'], {'dtype': 'np.float16'}), '((l, w, size2), dtype=np.float16)\n', (2195, 2228), True, 'import numpy as np\n'), ((1658, 1685), 'aura.aura_loader.read_file', 'aura_loader.read_file', (['path'], {}), '(path)\n', (1679, 1685), False, 'from aura import aura_loader\n'), ((439, 450), 'time.time', 'time.time', ([], {}), '()\n', (448, 450), False, 'import time\n'), ((1752, 1763), 'time.time', 'time.time', ([], {}), '()\n', (1761, 1763), False, 'import time\n')]
|