input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>ibm_watson/text_to_speech_v1.py
# coding: utf-8
# (C) Copyright IBM Corp. 2015, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The IBM® Text to Speech service provides APIs that use IBM's speech-synthesis
capabilities to synthesize text into natural-sounding speech in a variety of languages,
dialects, and voices. The service supports at least one male or female voice, sometimes
both, for each language. The audio is streamed back to the client with minimal delay.
For speech synthesis, the service supports a synchronous HTTP Representational State
Transfer (REST) interface and a WebSocket interface. Both interfaces support plain text
and SSML input. SSML is an XML-based markup language that provides text annotation for
speech-synthesis applications. The WebSocket interface also supports the SSML
<code><mark></code> element and word timings.
The service offers a customization interface that you can use to define sounds-like or
phonetic translations for words. A sounds-like translation consists of one or more words
that, when combined, sound like the word. A phonetic translation is based on the SSML
phoneme format for representing a word. You can specify a phonetic translation in standard
International Phonetic Alphabet (IPA) representation or in the proprietary IBM Symbolic
Phonetic Representation (SPR). The Arabic, Chinese, Dutch, and Korean languages support
only IPA.
"""
import json
from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator
from .common import get_sdk_headers
from enum import Enum
from ibm_cloud_sdk_core import BaseService
from ibm_cloud_sdk_core import DetailedResponse
from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment
from typing import Dict
from typing import List
##############################################################################
# Service
##############################################################################
class TextToSpeechV1(BaseService):
"""The Text to Speech V1 service."""
DEFAULT_SERVICE_URL = 'https://stream.watsonplatform.net/text-to-speech/api'
DEFAULT_SERVICE_NAME = 'text_to_speech'
def __init__(
self,
authenticator: Authenticator = None,
service_name: str = DEFAULT_SERVICE_NAME,
) -> None:
"""
Construct a new client for the Text to Speech service.
:param Authenticator authenticator: The authenticator specifies the authentication mechanism.
Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md
about initializing the authenticator of your choice.
"""
if not authenticator:
authenticator = get_authenticator_from_environment(service_name)
BaseService.__init__(self,
service_url=self.DEFAULT_SERVICE_URL,
authenticator=authenticator,
disable_ssl_verification=False)
self.configure_service(service_name)
#########################
# Voices
#########################
def list_voices(self, **kwargs) -> 'DetailedResponse':
"""
List voices.
Lists all voices available for use with the service. The information includes the
name, language, gender, and other details about the voice. To see information
about a specific voice, use the **Get a voice** method.
**See also:** [Listing all available
voices](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices#listVoices).
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_voices')
headers.update(sdk_headers)
url = '/v1/voices'
request = self.prepare_request(method='GET', url=url, headers=headers)
response = self.send(request)
return response
def get_voice(self,
voice: str,
*,
customization_id: str = None,
**kwargs) -> 'DetailedResponse':
"""
Get a voice.
Gets information about the specified voice. The information includes the name,
language, gender, and other details about the voice. Specify a customization ID to
obtain information for a custom voice model that is defined for the language of
the specified voice. To list information about all available voices, use the
**List voices** method.
**See also:** [Listing a specific
voice](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices#listVoice).
:param str voice: The voice for which information is to be returned.
:param str customization_id: (optional) The customization ID (GUID) of a
custom voice model for which information is to be returned. You must make
the request with credentials for the instance of the service that owns the
custom model. Omit the parameter to see information about the specified
voice with no customization.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if voice is None:
raise ValueError('voice must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_voice')
headers.update(sdk_headers)
params = {'customization_id': customization_id}
url = '/v1/voices/{0}'.format(*self._encode_path_vars(voice))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
#########################
# Synthesis
#########################
def synthesize(self,
text: str,
*,
accept: str = None,
voice: str = None,
customization_id: str = None,
**kwargs) -> 'DetailedResponse':
"""
Synthesize audio.
Synthesizes text to audio that is spoken in the specified voice. The service bases
its understanding of the language for the input text on the specified voice. Use a
voice that matches the language of the input text.
The method accepts a maximum of 5 KB of input text in the body of the request, and
8 KB for the URL and headers. The 5 KB limit includes any SSML tags that you
specify. The service returns the synthesized audio stream as an array of bytes.
**See also:** [The HTTP
interface](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-usingHTTP#usingHTTP).
### Audio formats (accept types)
The service can return audio in the following formats (MIME types).
* Where indicated, you can optionally specify the sampling rate (`rate`) of the
audio. You must specify a sampling rate for the `audio/l16` and `audio/mulaw`
formats. A specified sampling rate must lie in the range of 8 kHz to 192 kHz. Some
formats restrict the sampling rate to certain values, as noted.
* For the `audio/l16` format, you can optionally specify the endianness
(`endianness`) of the audio: `endianness=big-endian` or
`endianness=little-endian`.
Use the `Accept` header or the `accept` parameter to specify the requested format
of the response audio. If you omit an audio format altogether, the service returns
the audio in Ogg format with the Opus codec (`audio/ogg;codecs=opus`). The service
always returns single-channel audio.
* `audio/basic` - The service returns audio with a sampling rate of 8000 Hz.
* `audio/flac` - You can optionally specify the `rate` of the audio. The default
sampling rate is 22,050 Hz.
* `audio/l16` - You must specify the `rate` of the audio. You can optionally
specify the `endianness` of the audio. The default endianness is `little-endian`.
* `audio/mp3` - You can optionally specify the `rate` of the audio. The default
sampling rate is 22,050 Hz.
* `audio/mpeg` - You can optionally specify the `rate` of the audio. The default
sampling rate is 22,050 Hz.
* `audio/mulaw` - You must specify the `rate` of the audio.
* `audio/ogg` - The service returns the audio in the `vorbis` codec. You can
optionally specify the `rate` of the audio. The default sampling rate is 22,050
Hz.
* `audio/ogg;codecs=opus` - You can optionally specify the `rate` of the audio.
Only the following values are valid sampling rates: `48000`, `24000`, `16000`,
`12000`, or `8000`. If you specify a value other than one of these, the service
returns an error. The default sampling rate is 48,000 Hz.
* `audio/ogg;codecs=vorbis` - You can optionally specify the `rate` of the audio.
The default sampling rate is 22,050 Hz.
* `audio/wav` - You can optionally specify the `rate` of the audio. The default
sampling rate is 22,050 Hz.
* `audio/webm` - The service returns the audio in the `opus` codec. The service
returns audio with a sampling rate of 48,000 Hz.
* `audio/webm;codecs=opus` - The service returns audio with a sampling rate of
48,000 Hz.
* `audio/webm;codecs=vorbis` - You can optionally specify the `rate` of the audio.
The default sampling rate is 22,050 Hz.
For more information about specifying an audio format, including additional
details about some of the formats, see [Audio
formats](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-audioFormats#audioFormats).
### Warning messages
If a request includes invalid query parameters, the service returns a `Warnings`
response header that provides messages about the invalid parameters. The warning
includes a descriptive message and a list of invalid argument strings. For
example, a message such as `"Unknown arguments:"` or `"Unknown url query
arguments:"` followed by a list of the form `"{invalid_arg_1}, {invalid_arg_2}."`
The request succeeds despite the warnings.
:param str text: The text to synthesize.
:param str accept: (optional) The requested | |
style="color:#00C000"><b>default</b></span>: false).</li>
<li><b>tf_log_level</b> (<i>int >= 0</i>) – TensorFlow log level, additional C++
logging messages can be enabled by setting os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"/"2"
before importing Tensorforce/TensorFlow
(<span style="color:#00C000"><b>default</b></span>: 40, only error and critical).</li>
</ul>
saver (path | specification): TensorFlow checkpoints directory, or checkpoint manager
configuration with the following attributes, for periodic implicit saving as alternative
to explicit saving via agent.save()
(<span style="color:#00C000"><b>default</b></span>: no saver):
<ul>
<li><b>directory</b> (<i>path</i>) – checkpoint directory
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>filename</b> (<i>string</i>) – checkpoint filename
(<span style="color:#00C000"><b>default</b></span>: agent name).</li>
<li><b>frequency</b> (<i>int > 0</i>) – how frequently to save a checkpoint
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>unit</b> (<i>"timesteps" | "episodes" | "updates"</i>) – frequency unit
(<span style="color:#00C000"><b>default</b></span>: updates).</li>
<li><b>max_checkpoints</b> (<i>int > 0</i>) – maximum number of checkpoints to
keep (<span style="color:#00C000"><b>default</b></span>: 10).</li>
<li><b>max_hour_frequency</b> (<i>int > 0</i>) – ignoring max-checkpoints,
definitely keep a checkpoint in given hour frequency
(<span style="color:#00C000"><b>default</b></span>: none).</li>
</ul>
summarizer (path | specification): TensorBoard summaries directory, or summarizer
configuration with the following attributes
(<span style="color:#00C000"><b>default</b></span>: no summarizer):
<ul>
<li><b>directory</b> (<i>path</i>) – summarizer directory
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>filename</b> (<i>path</i>) – summarizer filename, max_summaries does not
apply if name specified
(<span style="color:#00C000"><b>default</b></span>: "summary-%Y%m%d-%H%M%S").</li>
<li><b>max_summaries</b> (<i>int > 0</i>) – maximum number of (generically-named)
summaries to keep
(<span style="color:#00C000"><b>default</b></span>: 7, number of different colors in
Tensorboard).</li>
<li><b>flush</b> (<i>int > 0</i>) – how frequently in seconds to flush the
summary writer (<span style="color:#00C000"><b>default</b></span>: 10).</li>
<li><b>summaries</b> (<i>"all" | iter[string]</i>) – which summaries to record,
"all" implies all numerical summaries, so all summaries except "graph"
(<span style="color:#00C000"><b>default</b></span>: "all"):</li>
<li>"action-value": value of each action (timestep-based)</li>
<li>"distribution": distribution parameters like probabilities or mean and stddev
(timestep-based)</li>
<li>"entropy": entropy of (per-action) policy distribution(s) (timestep-based)</li>
<li>"graph": computation graph</li>
<li>"kl-divergence": KL-divergence of previous and updated (per-action) policy
distribution(s) (update-based)</li>
<li>"loss": policy and baseline loss plus loss components (update-based)</li>
<li>"parameters": parameter values (according to parameter unit)</li>
<li>"reward": reward per timestep, episode length and reward, plus intermediate
reward/return/advantage estimates and processed values
(timestep/episode/update-based)</li>
<li>"update-norm": global norm of update (update-based)</li>
<li>"updates": mean and variance of update tensors per variable (update-based)</li>
<li>"variables": mean of trainable variables tensors (update-based)</li>
</ul>
tracking ("all" | iter[string]): Which tensors to track, available values are a subset of
the values of summarizer[summaries] above
(<span style="color:#00C000"><b>default</b></span>: no tracking).
The current value of tracked tensors can be retrieved via tracked_tensors() at any time,
however, note that tensor values change at different timescales (timesteps, episodes,
updates).
recorder (path | specification): Traces recordings directory, or recorder configuration with
the following attributes (see
[record-and-pretrain script](https://github.com/tensorforce/tensorforce/blob/master/examples/record_and_pretrain.py)
for example application)
(<span style="color:#00C000"><b>default</b></span>: no recorder):
<ul>
<li><b>directory</b> (<i>path</i>) – recorder directory
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>frequency</b> (<i>int > 0</i>) – how frequently in episodes to record
traces (<span style="color:#00C000"><b>default</b></span>: every episode).</li>
<li><b>start</b> (<i>int >= 0</i>) – how many episodes to skip before starting to
record traces (<span style="color:#00C000"><b>default</b></span>: 0).</li>
<li><b>max-traces</b> (<i>int > 0</i>) – maximum number of traces to keep
(<span style="color:#00C000"><b>default</b></span>: all).</li>
"""
def __init__(
# Required
self, states, actions, update, optimizer, objective, reward_estimation,
# Environment
max_episode_timesteps=None,
# Agent
policy='auto', memory=None,
# Baseline
baseline=None, baseline_optimizer=None, baseline_objective=None,
# Regularization
l2_regularization=0.0, entropy_regularization=0.0,
# Preprocessing
state_preprocessing='linear_normalization',
# Exploration
exploration=0.0, variable_noise=0.0,
# Parallel interactions
parallel_interactions=1,
# Config, saver, summarizer, tracking, recorder
config=None, saver=None, summarizer=None, tracking=None, recorder=None,
# Deprecated
**kwargs
):
if 'estimate_actions' in reward_estimation:
raise TensorforceError.deprecated(
name='Agent', argument='reward_estimation[estimate_actions]',
replacement='reward_estimation[predict_action_values]'
)
if 'estimate_terminal' in reward_estimation:
raise TensorforceError.deprecated(
name='Agent', argument='reward_estimation[estimate_terminal]',
replacement='reward_estimation[predict_terminal_values]'
)
if summarizer is not None and 'labels' in summarizer:
raise TensorforceError.deprecated(
name='Agent', argument='summarizer[labels]', replacement='summarizer[summaries]'
)
if 'baseline_policy' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='baseline_policy', replacement='baseline'
)
if 'reward_preprocessing' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='reward_preprocessing',
replacement='reward_estimation[reward_processing]'
)
if 'name' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='name', replacement='config[name]'
)
if 'buffer_observe' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='buffer_observe', replacement='config[buffer_observe]'
)
if 'device' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='device', replacement='config[device]'
)
if 'seed' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='seed', replacement='config[seed]'
)
if len(kwargs) > 0:
raise TensorforceError.invalid(name='Agent', argument=', '.join(kwargs))
if not hasattr(self, 'spec'):
self.spec = OrderedDict(
agent='tensorforce',
# Environment
states=states, actions=actions, max_episode_timesteps=max_episode_timesteps,
# Agent
policy=policy, memory=memory, update=update, optimizer=optimizer,
objective=objective, reward_estimation=reward_estimation,
# Baseline
baseline=baseline, baseline_optimizer=baseline_optimizer,
baseline_objective=baseline_objective,
# Regularization
l2_regularization=l2_regularization, entropy_regularization=entropy_regularization,
# Preprocessing
state_preprocessing=state_preprocessing,
# Exploration
exploration=exploration, variable_noise=variable_noise,
# Parallel interactions
parallel_interactions=parallel_interactions,
# Config, saver, summarizer, recorder
config=config, saver=saver, summarizer=summarizer, tracking=tracking,
recorder=recorder
)
if memory is None:
memory = dict(type='recent')
if isinstance(update, int):
update = dict(unit='timesteps', batch_size=update)
if config is None:
config = dict()
else:
config = dict(config)
# TODO: should this change if summarizer is specified?
if parallel_interactions > 1:
if 'buffer_observe' not in config:
if max_episode_timesteps is None:
raise TensorforceError.required(
name='Agent', argument='max_episode_timesteps',
condition='parallel_interactions > 1'
)
config['buffer_observe'] = 'episode'
# elif config['buffer_observe'] < max_episode_timesteps:
# raise TensorforceError.value(
# name='Agent', argument='config[buffer_observe]',
# hint='< max_episode_timesteps', condition='parallel_interactions > 1'
# )
elif update['unit'] == 'timesteps':
update_frequency = update.get('frequency', update['batch_size'])
if 'buffer_observe' not in config:
if isinstance(update_frequency, int):
config['buffer_observe'] = update_frequency
else:
config['buffer_observe'] = 1
elif isinstance(update_frequency, int) and (
config['buffer_observe'] == 'episode' or config['buffer_observe'] > update_frequency
):
raise TensorforceError.value(
name='Agent', argument='config[buffer_observe]', value=config['buffer_observe'],
hint='> update[frequency]', condition='update[unit] = "timesteps"'
)
elif update['unit'] == 'episodes':
if 'buffer_observe' not in config:
config['buffer_observe'] = 'episode'
# reward_estimation = dict(reward_estimation)
# if reward_estimation['horizon'] == 'episode':
# if max_episode_timesteps is None:
# raise TensorforceError.required(
# name='Agent', argument='max_episode_timesteps',
# condition='reward_estimation[horizon] = "episode"'
# )
# reward_estimation['horizon'] = max_episode_timesteps
super().__init__(
states=states, actions=actions, max_episode_timesteps=max_episode_timesteps,
parallel_interactions=parallel_interactions, config=config, recorder=recorder
)
self.model = TensorforceModel(
states=self.states_spec, actions=self.actions_spec,
max_episode_timesteps=self.max_episode_timesteps,
policy=policy, memory=memory, update=update, optimizer=optimizer, objective=objective,
reward_estimation=reward_estimation,
baseline=baseline, baseline_optimizer=baseline_optimizer,
baseline_objective=baseline_objective,
l2_regularization=l2_regularization, entropy_regularization=entropy_regularization,
state_preprocessing=state_preprocessing,
exploration=exploration, variable_noise=variable_noise,
parallel_interactions=self.parallel_interactions,
config=self.config, saver=saver, summarizer=summarizer, tracking=tracking
)
def experience(self, states, actions, terminal, reward, internals=None):
"""
Feed experience traces.
See the [act-experience-update script](https://github.com/tensorforce/tensorforce/blob/master/examples/act_experience_update_interface.py)
for an example application as part of the act-experience-update interface, which is an
alternative to the act-observe interaction pattern.
Args:
states (dict[array[state]]): Dictionary containing arrays of states
(<span style="color:#C00000"><b>required</b></span>).
actions (dict[array[action]]): Dictionary containing arrays of actions
(<span style="color:#C00000"><b>required</b></span>).
terminal (array[bool]): Array of terminals
(<span style="color:#C00000"><b>required</b></span>).
reward (array[float]): Array of rewards
(<span style="color:#C00000"><b>required</b></span>).
internals (dict[state]): Dictionary containing arrays of internal agent states
(<span style="color:#C00000"><b>required</b></span> if agent has internal states).
"""
if not all(len(buffer) == 0 for buffer in self.terminal_buffer):
raise TensorforceError(message="Calling agent.experience is not possible mid-episode.")
# Process states input and infer batching structure
states, batched, num_instances, is_iter_of_dicts = self._process_states_input(
states=states, function_name='Agent.experience'
)
if is_iter_of_dicts:
# Input structure iter[dict[input]]
# Internals
if internals is None:
internals = ArrayDict(self.initial_internals())
internals = internals.fmap(function=(lambda x: np.repeat(np.expand_dims(x, axis=0), repeats=num_instances, axis=0)))
elif not isinstance(internals, (tuple, list)):
raise TensorforceError.type(
name='Agent.experience', argument='internals', dtype=type(internals),
hint='is not tuple/list'
)
else:
internals = [ArrayDict(internal) for internal in internals]
internals = internals[0].fmap(
function=(lambda *xs: np.stack(xs, axis=0)), zip_values=internals[1:]
)
# Actions
if isinstance(actions, np.ndarray):
actions = ArrayDict(singleton=actions)
elif not isinstance(actions, (tuple, list)):
raise TensorforceError.type(
name='Agent.experience', argument='actions', dtype=type(actions),
hint='is not tuple/list'
)
elif not isinstance(actions[0], dict):
actions = ArrayDict(singleton=np.asarray(actions))
else:
actions = [ArrayDict(action) for action in actions]
actions = actions[0].fmap(
function=(lambda *xs: np.stack(xs, axis=0)), zip_values=actions[1:]
)
else:
# Input structure dict[iter[input]]
# Internals
if internals is None:
internals = ArrayDict(self.initial_internals())
internals = internals.fmap(function=(lambda x: np.tile(np.expand_dims(x, axis=0), reps=(num_instances,))))
elif not isinstance(internals, dict):
raise TensorforceError.type(
name='Agent.experience', argument='internals', dtype=type(internals),
hint='is not dict'
)
else:
internals = ArrayDict(internals)
# Actions
if not isinstance(actions, np.ndarray):
actions = ArrayDict(singleton=actions)
elif not isinstance(actions, dict):
raise TensorforceError.type(
name='Agent.experience', argument='actions', dtype=type(actions),
hint='is not dict'
)
else:
actions = ArrayDict(actions)
# Expand inputs if not batched
if not batched:
internals = internals.fmap(function=(lambda x: np.expand_dims(x, axis=0)))
actions = actions.fmap(function=(lambda x: np.expand_dims(x, axis=0)))
terminal = np.asarray([terminal])
reward = np.asarray([reward])
else:
terminal = np.asarray(terminal)
reward = np.asarray(reward)
# Check number of inputs
for name, internal in internals.items():
if internal.shape[0] != num_instances:
raise TensorforceError.value(
name='Agent.experience', argument='len(internals[{}])'.format(name),
value=internal.shape[0], hint='!= len(states)'
)
for name, action in actions.items():
if action.shape[0] != num_instances:
raise TensorforceError.value(
name='Agent.experience', argument='len(actions[{}])'.format(name),
value=action.shape[0], hint='!= len(states)'
)
if terminal.shape[0] != num_instances:
raise TensorforceError.value(
name='Agent.experience', argument='len(terminal)'.format(name),
value=terminal.shape[0], hint='!= len(states)'
)
if reward.shape[0] != num_instances:
raise TensorforceError.value(
name='Agent.experience', argument='len(reward)'.format(name),
value=reward.shape[0], hint='!= len(states)'
)
def function(name, spec):
auxiliary = ArrayDict()
if self.config.enable_int_action_masking and spec.type == 'int' and \
spec.num_values is not None:
if name is None:
name = 'action'
# Mask, either part of states or default | |
# -*- coding: utf-8 -*-
"""
Calculate pair distribution functions.
"""
MINDISTANCE = 2.0 # shorter distances are ignored
PDFCUTOFF = 1.0 # g(r) = 0 if r < PDFCUTOFF
__all__ = ["PDF"]
import numpy as np
import math
from util.logger import Logger
import sys
from core.calculation.discretization import DiscretizationCache
import itertools
import os.path
from config.configuration import config
logger = Logger("statistics.pdf")
logger.setstream("default", sys.stdout, Logger.DEBUG)
class PDF(object):
"""
Calculate pair distribution functions for atoms and cavities.
"""
def __init__(self, *args):
"""
Create a sample from atom and cavity positions and smooth them to
get the PDFs
The constructor can be called in two ways:
- ``PDF(results)`` :
retrieve the data from :class:`core.data.Results`
- ``PDF(positions, elements, cavitycenters, volume)`` :
use the given arrays and the volume object
"""
if len(args) == 1:
results = args[0]
positions = results.atoms.positions
elements = results.atoms.elements
volume = results.atoms.volume
if results.domains is not None:
centers = results.domains.centers
cachedir = os.path.expanduser(config.Path.cache_dir)
cachepath = os.path.join(cachedir, 'discretization_cache.hdf5')
dcache = DiscretizationCache(cachepath)
disc = dcache.get_discretization(volume, results.resolution)
centers = map(disc.discrete_to_continuous, centers)
else:
centers = []
elif len(args) == 4:
positions, elements, centers, volume = args
else:
raise TypeError("PDF expects 1 or 4 parameters")
self.positions = np.array(positions, copy=False)
self.elements = np.array(elements, dtype="|S4", copy=False)
self.centers = np.array(centers, copy=False)
self.volume = volume
self.num_atoms = np.where(self.elements != "cav")[0].size
self.numberdensity = float(self.num_atoms) / self.volume.volume
self.stats = self._genstats(self.positions,
self.elements,
self.centers,
self.volume)
def pdf(self, elem1, elem2, cutoff=None, h=None, kernel=None):
"""
Calculate a smoothed pair distribution function between the elements
`elem1` and `elem2`.
**Parameters:**
`elem1`, `elem2` :
Chemical element, e.g. 'Ge', 'Te' or 'cav' for cavities
`h` :
Smoothing parameter. The greater `h` is,
the more the function is smoothed.
`kernel` :
Smoothing kernel
**Returns:**
Python function that represents the pair distribution function.
It also accepts Numpy arrays as input.
Returns `None` if the given elements do not exist or if there is
not enough data to create the function.
"""
if kernel is None:
#kernel = Kernels.epanechnikov
kernel = Kernels.epanechnikov
data = None
for s in self.stats:
if set((elem1.lower(), elem2.lower())) \
== set((s[0].lower(), s[1].lower())):
data = s[2]
break;
if data is None:
logger.debug("No statistical data for '{}-{}' found.".format(
elem1, elem2))
return None # TODO: raise Exception
if cutoff is None:
cutoff = data.max()
sel = np.where(np.logical_and(data > MINDISTANCE, data <= cutoff))[0]
sel = data[sel]
if h == 0:
return sel
if len(sel) < 2:
logger.debug("Not enough data for '{}-{}' in cutoff={} range.".format(elem1, elem2, cutoff))
return None # TODO: raise Exception
if h is None:
## magic constant
## minimizes the first peak
#h = min(0.5, 0.5772778 * sel.min())
h = 0.4
#if h > 0.9 * sel.min():
# logger.debug("Bandwidth {} above threshold. Setting to {}.".format(h, 0.9 * sel.min()))
# h = 0.9 * sel.min()
kde = Functions.KDE(sel, h=h, kernel=kernel)
def wfunc(r):
y = np.zeros_like(r)
i = np.where(np.abs(r) > PDFCUTOFF)[0]
y[i] = self.volume.volume / (data.size * 4 * math.pi * r[i]**2)
return y
return Functions.Product(kde, wfunc)
@staticmethod
def _correlatedistance(pos1, pos2=None, volume=None):
"""
Measure the distances between coordinates.
- ``_correlatedistance(pos1)`` :
Correlate the coordinates in `pos1` with each other
- ``_correlatedistance(pos1, pos2)`` :
Correlate each coordinate in pos1 with each coordinate in pos2
**Returns:**
A sorted sample.
"""
if volume is None:
distance = lambda x, y: np.abs(y-x)
else:
distance = volume.get_distance
if pos2 is None:
n = pos1.shape[0]
a1 = np.vstack([pos1[0:-i, :] for i in range(1, n)])
a2 = np.vstack([pos1[i:, :] for i in range(1, n)])
else:
n1 = pos1.shape[0]
n2 = pos2.shape[0]
a1 = np.hstack([pos1] * n2).reshape(n1 * n2, 3)
a2 = np.vstack([pos2] * n1)
samples = np.linalg.norm(distance(a1, a2), 2, axis=-1)
samples.sort()
return samples
@classmethod
def _genstats(cls, positions, elements, centers, volume=None):
"""
Iterate through the elements and cavities and correlate the
distances between atoms of that element/cavity and atoms of
the other elements.
"""
elemlist = np.unique(elements).tolist()
if len(centers) > 0 and not "cav" in elemlist:
elemlist.append("cav")
pos = [None] * len(elemlist)
for i, e in enumerate(elemlist):
pos[i] = positions[np.where(elements == e)[0], :]
if len(centers) > 0:
i = elemlist.index("cav")
if pos[i].size > 0:
pos[i] = np.vstack((pos[i], centers))
else:
pos[i] = centers
stats = []
for i, e1 in enumerate(elemlist):
for j in range(i, len(elemlist)):
e2 = elemlist[j]
if i == j:
s = cls._correlatedistance(pos[i], volume=volume)
else:
s = cls._correlatedistance(pos[i], pos[j], volume=volume)
if len(s) > 1:
stats.append((e1, e2, s))
return stats
class Functions(object):
"""
Utility class with Callables
"""
class KDE(object):
"""
Kernel density estimation. Calculate a convolution
of delta pulses with a smoothing kernel.
"""
def __init__(self, x, y=None, h=1.0, kernel=None):
if y is None:
y = np.ones_like(x)
if kernel is None:
kernel = Kernels.gauss
self.x = x
self.y = y
self.h = h
self.kernel = kernel
def __call__(self, p):
result = np.zeros_like(p)
if len(self.x) <= len(p):
p = np.asarray(p)
for xi, yi in itertools.izip(self.x, self.y):
result += yi * self.kernel((p - xi) / self.h) / self.h
else:
x = np.asarray(self.x)
y = np.asarray(self.y)
for i, pi in enumerate(p):
result[i] = np.sum(y * self.kernel((pi - x) / self.h) / self.h)
return result
class Product(object):
"""
Product of two functions
"""
def __init__(self, f, g):
self.f = f
self.g = g
def __call__(self, x):
return self.f(x) * self.g(x)
class Kernels(object):
"""
Utility class with smoothing kernels
"""
@staticmethod
def gauss(x):
c = 1.0 / math.sqrt(2.0 * math.pi)
return c * np.exp(-0.5 * x**2)
@staticmethod
def compact(x):
c = 2.25228362104
if not isinstance(x, np.ndarray):
if abs(x) < 1.0:
return c * math.exp(1.0 / (x**2 - 1.0))
else:
return 0.0
else:
i = np.where(np.abs(x) < 1.0)[0]
y = np.zeros(x.size)
y[i] = c * np.exp(1.0 / (x[i]**2 - 1.0))
return y
@staticmethod
def triang(x):
c = 1.0
if not isinstance(x, np.ndarray):
if abs(x) < 1.0:
return c * (1.0 - abs(x))
else:
return 0.0
else:
i = np.where(np.abs(x) < 1.0)[0]
y = np.zeros(x.size)
y[i] = c * (1.0 - np.abs(x[i]))
return y
@staticmethod
def quad(x):
c = 0.5
if not isinstance(x, np.ndarray):
if abs(x) < 1.0:
return c
else:
return 0.0
else:
i = np.where(np.abs(x) < 1.0)[0]
y = np.zeros(x.size)
y[i] = c
return y
@staticmethod
def posquad(x):
c = 1.0
if not isinstance(x, np.ndarray):
if 0 <= x < 1.0:
return c
else:
return 0.0
else:
i = np.where(np.logical_and(x >= 0.0, x < 1.0))[0]
y = np.zeros(x.size)
y[i] = c
return y
@staticmethod
def negquad(x):
c = 1.0
if not isinstance(x, np.ndarray):
if -1.0 < x <= 0.0:
return c
else:
return 0.0
else:
i = np.where(np.logical_and(x > -1.0, x <= 0.0))[0]
y = np.zeros(x.size)
y[i] = c
return y
@staticmethod
def epanechnikov(x):
if not isinstance(x, np.ndarray):
if abs(x) < 1.0:
return 3 / 4 * (1.0 - x**2)
else:
return 0.0
else:
i = np.where(np.abs(x) < 1.0)[0]
y = np.zeros_like(x)
y[i] = 3.0 / 4.0 * (1.0 - x[i]**2)
return y
@staticmethod
def bandwidth(n, d=1):
"""
Scott's factor for bandwidth estimation
"""
return (4.0 / 3.0 * n) ** (-1.0 / (d + 4.0))
class _TestPDF(object):
@staticmethod
def continuous_coordinates(coords, volume, resolution):
cachedir = os.path.expanduser(config.Path.cache_dir)
cachepath = os.path.join(cachedir, 'discretization_cache.hdf5')
dcache = DiscretizationCache(cachepath)
disc = dcache.get_discretization(volume, resolution)
return np.array(map(disc.discrete_to_continuous, coords))
@staticmethod
def plotfunc(pdf, e1, e2, px, h, *args):
gr = pdf.pdf(e1, e2, h=h)
plt.plot(px, gr(px), *args, label=str(h))
py = gr(px)
m = np.argmax(py)
print "h={}, xi={}, g({}) = {}".format(gr.f.h, gr.f.x.min(), px[m], py[m])
@classmethod
def plotpdf(cls, pdf, e1, e2):
px = np.linspace(0, 2, 1000)
plt.figure()
cls.plotfunc(pdf, e1, e2, px, 0.25, "g--")
cls.plotfunc(pdf, e1, e2, px, 0.5, "r-")
#cls.plotfunc(pdf, e1, e2, px, 1.0, "b--")
plt.legend(loc=0)
plt.title("{}-{}".format(e1, e2))
@classmethod
def run(cls):
import core.calculation as calculation
calc = calculation.Calculation("../results")
filename = "../xyz/structure_c.xyz"
resolution = 64
frame = 9
#filename = "../xyz/GST_111_128_bulk.xyz"
#resolution = 256
#frame = 0
settings = calculation.CalculationSettings(
{filename : [frame]},
resolution, True, False, False)
print "calculating..."
res = calc.calculate(settings)[0][0]
print "generating statistics..."
pdf = PDF(res)
#centers = cls.continuous_coordinates(res.domains.centers,
# res.atoms.volume,
# res.resolution)
#pdf = PDF(res.atoms.positions, res.atoms.elements,
# centers, res.atoms.volume)
print "plotting..."
#cls.plotpdf(pdf, "Ge", "Ge")
#cls.plotpdf(pdf, "Ge", "Te")
cls.plotpdf(pdf, "cav", "cav")
plt.show()
if __name__ == "__main__":
import matplotlib.pyplot as plt
_TestPDF.run()
#x = np.linspace(-2, 2, 200)
#plt.plot(x, | |
<filename>server_class.py
"""reads Requests from the server and handles method calls
"""
import os
from server_logic import Adminservices
from server_logic import Userservices
class Server:
"""
File management for server
Attributes:
--------------
username : string
stores username
password : string
stores password
root_directory : string
stores root directory location
current_directory : string
stores current working directory
message : string
stores the message from the client
privilege : string
stores whether the privileges are for admin or a user
Methods:
--------------
__init__(self):
Initialises all the attributes
getpassword(self, user_name):
Passwords for both admin and user
check(self, given_username, given_password, user, password):
checks whether username and password are matched with the previous database
initilise(self):
Initialises user according to the privileges.If client is
admin it initialises admin services.If client is user
it initialises user services.
login(self, split_message):
Initialises login for the client
if successfull:returns 'successfull'
else: returns 'failure'
register(self, user_name, password, privilage):
Initialises registeration for clients
create_folder(self, user_name, privilage):
Creating a folder
create_user_log(self, directory, user_name):
Creating a user log
create_admin_log(self, directory)
login for admin
modify_file(self, directory, file_name, input1)
modifying the file
find(self, user_name, privilage):
checks whether the user is already registered or not
if it exists then returns exist or else it returns ok
start_register(self):
Checks if registeration request is valid
split(self, message):
splits the message from the client into several parts and stores into a list and
initialises the analysis
analize(self, split_message):
Analyses all the requests from the client and calls the file handling methods
"""
def __init__(self):
"""Initialises all the attributes
"""
self.username = ''
self.password = ''
self.root_directory = os.getcwd()
self.current_directory = ''
self.message = ''
self.privilege = ''
def getpassword(self, user_name):
"""passwords for both admin and user
parameters:
user_name : string
stores the username
"""
admin_log = 'adminlog.txt'
admin_file = open(admin_log, 'r')
admin_file_lines = admin_file.readlines()
admin_line_count = sum(1 for line in open('adminlog.txt'))
admin_numbers = []
admin_names = []
admin_pass = []
for i in range(admin_line_count):
file = admin_file_lines[i].strip()
find = file.find(",")
admin_numbers.append(find)
admin_names.append(file[:admin_numbers[i]])
admin_pass.append(file[admin_numbers[i]+1:])
for j in range(len(admin_names)):
if user_name == admin_names[j]:
out = str(f'{admin_names[j]} {admin_pass[j]} admin')
return out
user_log = 'userlog.txt'
user_file = open(user_log, 'r')
user_file_lines = user_file.readlines()
user_line_count = sum(1 for line in open('userlog.txt'))
user_numbers = []
user_names = []
user_pass = []
for i in range(user_line_count):
file = user_file_lines[i].strip()
find = file.find(",")
user_numbers.append(find)
user_names.append(file[:user_numbers[i]])
user_pass.append(file[user_numbers[i]+1:])
for j in range(0, len(user_names)):
if user_name == user_names[j]:
uout = str(f'{user_names[j]} {user_pass[j]} user')
return uout
uout = 'failed'
return uout
def check(self, given_username, given_password, user, password):
"""checks whether username and password are matched with the previous database
parameters :
given_username : string
username which is given by the client
given_password : <PASSWORD>
password given by the client
user : string
username which is already stored
password : string
password with respect to their username which is stored previously
"""
if given_username == user:
if given_password == password:
return 'successful'
return 'failed'
def initilise(self):
"""Initialises user according to the privileges.If client is
admin it initialises admin services.If client is user
it initialises user services.
"""
if self.privilege == 'admin':
self.client = Adminservices(
self.root_directory,
self.current_directory,
self.username,
self.password
)
elif self.privilege == 'user':
self.client = Userservices(
self.root_directory,
self.current_directory,
self.username,
self.password
)
def checklog(self, username):
"""
checks if the username is already logged in from
another client
Parameters:
username : string
stores the users name
"""
log_file = 'loginlog.txt'
with open(log_file) as f_r:
if username in f_r.read():
return True
return False
def login(self, split_message):
"""Initialises login for the client
if successfull:returns 'successfull'
else: returns 'failure'
Parameters:
split_message : string
splits the message
"""
username = split_message[1]
if self.checklog(username):
return 'loggedin'
password = split_message[2]
reply = self.getpassword(username)
split_message_reply = reply.split(' ', 2) #list
given_username = split_message_reply[0]
if given_username == 'failed':
return 'failed'
given_password = split_message_reply[1]
privilege = split_message_reply[2]
check_reply = self.check(given_username, given_password, username, password)
if check_reply == 'successful':
#cwd = str(f'{self.root_directory}\\{username}')
cwd = os.path.join(self.root_directory, username)
self.current_directory = cwd
self.username = username
self.password = password
self.privilege = privilege
self.initilise()
self.modify_file(self.root_directory, 'loginlog.txt', self.username)
return 'successful'
elif check_reply == 'failed':
return 'failed'
def register(self, user_name, password, privilage):
"""Initialises registeration for clients
parameters:
user_name : string
stores the user name
password : string
stores the password
privilage : string
stores whether the privileges are for admin or a user
"""
if privilage == 'admin':
file_name = str(f'{self.root_directory}\\adminlog.txt')
elif privilage == 'user':
file_name = str(f'{self.root_directory}\\userlog.txt')
file = open(file_name, "a+")
user_data = str(f'\n{user_name},{password}')
file.writelines(user_data)
file.close()
self.create_folder(user_name, privilage)
def create_folder(self, user_name, privilage):
"""
Creating a folder
Parameters:
user_name : string
stores the username
privilage : string
stores whether the privileges are for admin or a user
"""
path = os.path.join(self.root_directory, user_name)
os.mkdir(path)
if privilage == 'admin':
self.create_admin_log(path)
else:
self.create_user_log(path, user_name)
def create_user_log(self, directory, user_name):
"""Creating a user log
Parameters:
directory : string
stores the directory
user_name : string
returns the username
"""
file_name = str(f'{directory}\\log.txt')
file = open(file_name, "w")
data = user_name
user_data = [data, "\n"]
file.writelines(user_data)
file.close()
self.create_admin_log(directory)
def create_admin_log(self, directory):
"""
login for admin
Parameters:
directory : string
stores the directory
"""
path = self.root_directory #admin directory
file_name = str(f'{path}\\adminlog.txt')
open_file = open(file_name, 'r')
file_lines = open_file.readlines()
num_lines = sum(1 for line in open('adminlog.txt'))
i = 0
numbers = []
names = []
for i in range(num_lines):
file = file_lines[i].strip()
find = file.find(",")
numbers.append(find)
names.append(file[:numbers[i]])
for i in names:
self.modify_file(directory, 'log.txt', i)
def modify_file(self, directory, file_name, input1):
"""modifying the file
parameters:
directory : string
stores the directory
file_name : string
stores the filename
input1 : string
stores the userdata
"""
file_name = str(f'{directory}\\{file_name}')
input1 = input1
file = open(file_name, 'a+')
user_data = [input1, "\n"]
file.writelines(user_data)
file.close()
def find(self, user_name, privilage):
"""checks whether the user is already registered or not
if it exists then returns exist or else it returns ok
parameters:
user_name : string
stores the username
privilage : string
stores whether the privileges are for admin or a user
"""
try:
if privilage == 'admin':
log_name = 'adminlog.txt'
else:
log_name = 'userlog.txt'
file_name = str(f'{self.root_directory}\\{log_name}')
open_file = open(file_name, 'r')
file_lines = open_file.readlines()
num_lines = sum(1 for line in open(file_name, 'r'))
i = 0
numbers = []
names = []
for i in range(num_lines):
file = file_lines[i].strip()
find = file.find(",")
numbers.append(find)
names.append(file[:numbers[i]])
if user_name in names:
return 'exist'
return 'ok'
except:
return 'error occured'
def start_register(self):
"""Checks if registeration request is valid
"""
split_message = self.message.split(' ', 3)
username = split_message[1]
password = split_message[2]
privilage = split_message[3]
reply = self.find(username, privilage)
if reply == 'exist':
return reply
self.register(username, password, privilage)
split_message = ['login', username, password]
reply = self.login(split_message)
return reply
def analize(self, split_message):
"""Analyses all the requests from the client and calls the file handling methods
Parameters :
split_message : list(str, str, str)
stores arguments in a form of list
"""
command = split_message[0]
if self.username == '':
if command == 'login':
try:
reply = self.login(split_message)
assert reply is not None
except AssertionError:
reply = 'Something went wrong'
except:
reply = 'error occurred'
return reply
elif command == 'register':
try:
reply = self.start_register()
assert reply is not None
except AssertionError:
reply = 'Something went wrong'
except:
reply = 'error occurred'
return reply
return 'failed'
else:
if command == 'list':
try:
reply = self.client.list_files()
assert reply is not None
except AssertionError:
reply = 'Something went wrong'
except:
reply = 'error occured'
return reply
elif command == 'change_folder':
try:
argument_1 = split_message[1]
reply = self.client.change_directory(argument_1, self.privilege)
assert reply is not None
except AssertionError:
reply = 'Something went wrong'
except:
reply = 'Failed'
return reply
elif command == 'read_file':
try:
argument_1 = split_message[1]
reply = self.client.start_read(argument_1)
assert reply is not None
except AssertionError:
reply = 'Something went wrong'
except IndexError:
reply = self.client.start_read(None)
except:
reply = 'error occured'
return reply
elif command == 'write_file':
try:
argument_1 = split_message[1]
except IndexError:
reply = 'invalid Argument'
return reply
try:
argument_2 = split_message[2]
reply = self.client.write_file(argument_1, argument_2)
assert reply is not None
except IndexError:
reply = self.client.write_file(argument_1)
assert reply is not None
except AssertionError:
reply | |
`false`.
"""
return pulumi.get(self, "include_control_details")
@property
@pulumi.getter(name="includeNullAndEmpty")
def include_null_and_empty(self) -> Optional[bool]:
"""
Include NULL and empty columns for records migrated to the endpoint. The default is `false`.
"""
return pulumi.get(self, "include_null_and_empty")
@property
@pulumi.getter(name="includePartitionValue")
def include_partition_value(self) -> Optional[bool]:
"""
Shows the partition value within the Kafka message output unless the partition type is `schema-table-type`. The default is `false`.
"""
return pulumi.get(self, "include_partition_value")
@property
@pulumi.getter(name="includeTableAlterOperations")
def include_table_alter_operations(self) -> Optional[bool]:
"""
Includes any data definition language (DDL) operations that change the table in the control data, such as `rename-table`, `drop-table`, `add-column`, `drop-column`, and `rename-column`. The default is `false`.
"""
return pulumi.get(self, "include_table_alter_operations")
@property
@pulumi.getter(name="includeTransactionDetails")
def include_transaction_details(self) -> Optional[bool]:
"""
Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for `transaction_id`, previous `transaction_id`, and `transaction_record_id` (the record offset within a transaction). The default is `false`.
"""
return pulumi.get(self, "include_transaction_details")
@property
@pulumi.getter(name="messageFormat")
def message_format(self) -> Optional[str]:
"""
The output format for the records created on the endpoint. The message format is `JSON` (default) or `JSON_UNFORMATTED` (a single line with no tab).
"""
return pulumi.get(self, "message_format")
@property
@pulumi.getter(name="messageMaxBytes")
def message_max_bytes(self) -> Optional[int]:
"""
The maximum size in bytes for records created on the endpoint The default is `1,000,000`.
"""
return pulumi.get(self, "message_max_bytes")
@property
@pulumi.getter(name="noHexPrefix")
def no_hex_prefix(self) -> Optional[bool]:
"""
Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the `no_hex_prefix` endpoint setting to enable migration of RAW data type columns without adding the `'0x'` prefix.
"""
return pulumi.get(self, "no_hex_prefix")
@property
@pulumi.getter(name="partitionIncludeSchemaTable")
def partition_include_schema_table(self) -> Optional[bool]:
"""
Prefixes schema and table names to partition values, when the partition type is `primary-key-type`. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. The default is `false`.
"""
return pulumi.get(self, "partition_include_schema_table")
@property
@pulumi.getter(name="saslPassword")
def sasl_password(self) -> Optional[str]:
"""
The secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
"""
return pulumi.get(self, "sasl_password")
@property
@pulumi.getter(name="saslUsername")
def sasl_username(self) -> Optional[str]:
"""
The secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
"""
return pulumi.get(self, "sasl_username")
@property
@pulumi.getter(name="securityProtocol")
def security_protocol(self) -> Optional[str]:
"""
Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include `ssl-encryption`, `ssl-authentication`, and `sasl-ssl`. `sasl-ssl` requires `sasl_username` and `sasl_password`.
"""
return pulumi.get(self, "security_protocol")
@property
@pulumi.getter(name="sslCaCertificateArn")
def ssl_ca_certificate_arn(self) -> Optional[str]:
"""
The Amazon Resource Name (ARN) for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
"""
return pulumi.get(self, "ssl_ca_certificate_arn")
@property
@pulumi.getter(name="sslClientCertificateArn")
def ssl_client_certificate_arn(self) -> Optional[str]:
"""
The Amazon Resource Name (ARN) of the client certificate used to securely connect to a Kafka target endpoint.
"""
return pulumi.get(self, "ssl_client_certificate_arn")
@property
@pulumi.getter(name="sslClientKeyArn")
def ssl_client_key_arn(self) -> Optional[str]:
"""
The Amazon Resource Name (ARN) for the client private key used to securely connect to a Kafka target endpoint.
"""
return pulumi.get(self, "ssl_client_key_arn")
@property
@pulumi.getter(name="sslClientKeyPassword")
def ssl_client_key_password(self) -> Optional[str]:
"""
The password for the client private key used to securely connect to a Kafka target endpoint.
"""
return pulumi.get(self, "ssl_client_key_password")
@property
@pulumi.getter
def topic(self) -> Optional[str]:
"""
Kafka topic for migration. Defaults to `kafka-default-topic`.
"""
return pulumi.get(self, "topic")
@pulumi.output_type
class EndpointKinesisSettings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "includeControlDetails":
suggest = "include_control_details"
elif key == "includeNullAndEmpty":
suggest = "include_null_and_empty"
elif key == "includePartitionValue":
suggest = "include_partition_value"
elif key == "includeTableAlterOperations":
suggest = "include_table_alter_operations"
elif key == "includeTransactionDetails":
suggest = "include_transaction_details"
elif key == "messageFormat":
suggest = "message_format"
elif key == "partitionIncludeSchemaTable":
suggest = "partition_include_schema_table"
elif key == "serviceAccessRoleArn":
suggest = "service_access_role_arn"
elif key == "streamArn":
suggest = "stream_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointKinesisSettings. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointKinesisSettings.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointKinesisSettings.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
include_control_details: Optional[bool] = None,
include_null_and_empty: Optional[bool] = None,
include_partition_value: Optional[bool] = None,
include_table_alter_operations: Optional[bool] = None,
include_transaction_details: Optional[bool] = None,
message_format: Optional[str] = None,
partition_include_schema_table: Optional[bool] = None,
service_access_role_arn: Optional[str] = None,
stream_arn: Optional[str] = None):
"""
:param bool include_control_details: Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. The default is `false`.
:param bool include_null_and_empty: Include NULL and empty columns in the target. The default is `false`.
:param bool include_partition_value: Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. The default is `false`.
:param bool include_table_alter_operations: Includes any data definition language (DDL) operations that change the table in the control data. The default is `false`.
:param bool include_transaction_details: Provides detailed transaction information from the source database. The default is `false`.
:param str message_format: Output format for the records created. Defaults to `json`. Valid values are `json` and `json_unformatted` (a single line with no tab).
:param bool partition_include_schema_table: Prefixes schema and table names to partition values, when the partition type is primary-key-type. The default is `false`.
:param str service_access_role_arn: Amazon Resource Name (ARN) of the IAM Role with permissions to write to the Kinesis data stream.
:param str stream_arn: Amazon Resource Name (ARN) of the Kinesis data stream.
"""
if include_control_details is not None:
pulumi.set(__self__, "include_control_details", include_control_details)
if include_null_and_empty is not None:
pulumi.set(__self__, "include_null_and_empty", include_null_and_empty)
if include_partition_value is not None:
pulumi.set(__self__, "include_partition_value", include_partition_value)
if include_table_alter_operations is not None:
pulumi.set(__self__, "include_table_alter_operations", include_table_alter_operations)
if include_transaction_details is not None:
pulumi.set(__self__, "include_transaction_details", include_transaction_details)
if message_format is not None:
pulumi.set(__self__, "message_format", message_format)
if partition_include_schema_table is not None:
pulumi.set(__self__, "partition_include_schema_table", partition_include_schema_table)
if service_access_role_arn is not None:
pulumi.set(__self__, "service_access_role_arn", service_access_role_arn)
if stream_arn is not None:
pulumi.set(__self__, "stream_arn", stream_arn)
@property
@pulumi.getter(name="includeControlDetails")
def include_control_details(self) -> Optional[bool]:
"""
Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. The default is `false`.
"""
return pulumi.get(self, "include_control_details")
@property
@pulumi.getter(name="includeNullAndEmpty")
def include_null_and_empty(self) -> Optional[bool]:
"""
Include NULL and empty columns in the target. The default is `false`.
"""
return pulumi.get(self, "include_null_and_empty")
@property
@pulumi.getter(name="includePartitionValue")
def include_partition_value(self) -> Optional[bool]:
"""
Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. The default is `false`.
"""
return pulumi.get(self, "include_partition_value")
@property
@pulumi.getter(name="includeTableAlterOperations")
def include_table_alter_operations(self) -> Optional[bool]:
"""
Includes any data definition language (DDL) operations that change the table in the control data. The default is `false`.
"""
return pulumi.get(self, "include_table_alter_operations")
@property
@pulumi.getter(name="includeTransactionDetails")
def include_transaction_details(self) -> Optional[bool]:
"""
Provides detailed transaction information from the source database. The default is `false`.
"""
return pulumi.get(self, "include_transaction_details")
@property
@pulumi.getter(name="messageFormat")
def message_format(self) -> Optional[str]:
"""
Output format for the records created. Defaults to `json`. Valid values are `json` and `json_unformatted` (a single line with no tab).
"""
return pulumi.get(self, "message_format")
@property
@pulumi.getter(name="partitionIncludeSchemaTable")
def partition_include_schema_table(self) -> Optional[bool]:
"""
Prefixes schema and table names to partition values, when the partition type is primary-key-type. The default is `false`.
"""
return pulumi.get(self, "partition_include_schema_table")
@property
@pulumi.getter(name="serviceAccessRoleArn")
def service_access_role_arn(self) -> Optional[str]:
"""
Amazon Resource Name (ARN) of the IAM Role with permissions to write to the Kinesis data stream.
"""
return pulumi.get(self, "service_access_role_arn")
@property
@pulumi.getter(name="streamArn")
def stream_arn(self) -> Optional[str]:
"""
Amazon Resource Name (ARN) of the Kinesis data stream.
"""
return pulumi.get(self, "stream_arn")
@pulumi.output_type
class EndpointMongodbSettings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authMechanism":
suggest = "auth_mechanism"
| |
= x0+w//2
i1 = int(x1+0.5)
i2 = int(x2+0.5)
if i2 >= n :
i2 = n-1
i1 = i2-w+1
if i1 < 0 :
i1 = 0
i2 = i1+w-1
xx = x[i1:i2+1]
yy = y[i1:i2+1]
coef,cov = optimize.curve_fit (quadratic_func,xx,yy)
c,b,a = coef # BACKWARDS!
# CHECK THAT THE SOLUTION YIELDS A POSITIVE PEAK
if (positive and b <= 0.0) or (a == 0.0) :
return np.nan,coef
# CHECK THAT SOLUTION REMAINS WITHIN THE WINDOW
xp = -0.5*b/a
if xp < x1 or xp > x2 :
return np.nan,coef
return xp,coef
def show_hdu (hdu, vmin=None, vmax=None, aspect=None, colourbar=False, flip=False, kappa=3.0, fits_coords=True) :
"""
Display an image from a FITS HDU using pixel-centered coordinates..
If "kappa" is given, then only a region above and below (+/-kappa*stddev) the median value is displayed.
If "flip" is True, then the images are displayed with the numpy y-origin on top, which is the
computer science standard; the lower left corner of the bottom left pixel is then (x,y) = (-0.5,NY-0.5)
and the upper right corner of the upper right pixel is (NX-0.5,-0.5).
If "fitscoord" is True, then the pixel coordinates are displayed in the FITS standard : not flipped,
lower left corner of the lower left pixel is -0.5,-0.5 and upper right corner of the upper right pixel
is NX+0.5,NY+0.5).
"""
hdr = hdu.header
xmin,xmax,ymin,ymax,zmin,zmax = get_image_limits (hdu)
xmin = -0.5
xmax += 0.5
# GET COORDINATES OF EXTREME IMAGE LIMITS INCLUDING PIXEL SIZES
if flip :
ymin = ymax+0.5
ymax = -0.5
elif fits_coords :
flip = False
xmin,ymin = -0.5,-0.5
ymax += 0.5
zmed,zsig = np.median(hdu.data),np.std(hdu.data)
if vmax is not None :
zmax = vmax
elif kappa is not None :
zmax = zmed+kappa*zsig
if vmin is not None :
zmin = vmin
elif kappa is not None :
zmin = zmed-kappa*zsig
plt.clf()
if flip :
origin = 'upper'
else :
origin = 'lower'
data = np.array(hdu.data,dtype=float)+0.
im = plt.imshow (data, interpolation='none', aspect=aspect, origin=origin,
extent=(xmin,xmax,ymin,ymax), vmax=zmax, vmin=zmin)
if colourbar :
plt.colorbar(im)
return im
def vector2Table (hdu, xlabel='wavelength',ylabel='flux') :
"""
Reads a 1-D vector from a FITS HDU into a Table.
If present, the wavelength scale is hopefully in a simple, linear WCS!
"""
hdr = hdu.header
if hdr['NAXIS'] != 1 :
logging.error ('vector2Table can only construct 1-D tables!')
return None
nw = hdr['NAXIS1']
pixl = np.arange(nw)
wave = None
# GET FLUX
bscale = 1.0
bzero = 0.0
"""
if 'BSCALE' in hdr and 'BZERO' in hdr :
bscale = hdr['BSCALE']
bzero = hdr['BZERO']
"""
flux = hdu.data*bscale+bzero
# GET WAVELENGTH
if 'CRVAL1' in hdr and 'CDELT1' in hdr : # SIMPLE WCS
crpix1 = 1
if 'CRPIX1' in hdr :
crpix1 = hdr['CRPIX1']
w0 = hdr['CRVAL1']
dwdx = hdr['CDELT1']
wave = w0+dwdx*(pixl+1-(crpix1-1))
# GET UNITS
if 'CUNIT1' in hdr :
cunit1 = hdr['CUNIT1']
elif wave is not None : # ASSUME ASTRONOMERS USE ANGSTROMS
cunit1 = 'nm'
wave /= 10.
else :
cunit1 = 'pix'
# CONSTRUCT Table
t = Table()
if wave is not None :
t[xlabel] = Column(wave,unit=cunit1, description=xlabel)
else :
t[xlabel] = Column(pixl,unit=cunit1, description=xlabel)
t[ylabel] = Column(flux,unit='unknown', description=ylabel)
t.meta = hdr
return t
def read_tables (pathname=None, fmt=None) :
"""
Reads spectra as a list of Tables from one or more files.
If no format string is given, then the path name is the name of the file.
If the format string is given, the spectra are read from multiple
files, e.g. 'spectrum_*.txt', and the pathname is the directory used.
returns a list of tables and the primary HDU header, if available.
"""
tables = []
header = None
if fmt is not None :
name,ftype = os.path.splitext(fmt)
fullname = pathname+'/'+fmt
else :
name,ftype = os.path.splitext(pathname)
fullname = pathname
if ftype not in PYFU_EXTENSIONS :
logging.error ('unknown file format for {0} or missing prefix format string: {1}'.format(pathname,ftype))
return None
files = get_list_of_paths_and_filenames (fullname)
for f,name in files :
if ftype == '.fit' or ftype == '.fits' or ftype == '.fits.gz' :
hdus = fits.open (f)
header = hdus[0].header
for i in range(1,len(hdus)) :
hdu = hdus[i]
hdr = hdu.header
# BINARY TABLE
if 'XTENSION' in hdr and hdr['XTENSION'] == 'BINTABLE' :
header = hdr
t = Table.read (hdus,hdu=i)
t.meta['FILENAME'] = name+'#{0}'.format(i)
tables.append(t)
# 1-D "IMAGE"
elif 'NAXIS1' in hdr :
t = vector2Table (hdu)
if t is not None :
t.meta['FILENAME'] = name
tables.append(t)
# READ CONGLOMERATED TABLE
elif fmt is None :
logging.info ('reading conglomerated ascii spectra {0} ...'.format(f))
try :
t = Table.read (f, format=PYFU_EXTENSIONS[ftype])
# SEPARATE INTO INDIVIDUAL TABLES
tabs = {}
for key in t.colnames() :
if '__' in key :
try :
i = key.rindex('_')
idx = int(key[i+1:])
oldkey = key[:i-1]
if not idx in tabs :
tabs[idx] = Table()
tabs.meta['FILENAME'] = +name+'#{0}'.format(i)
tables.append(tabs[idx])
tabs[idx][oldkey] = t[key]
except ValueError :
pass
except Exception as e :
logging.info (str(e))
# MULTIPLE TEXT FILES
else :
logging.info ('reading ascii spectrum {0} ...'.format(f))
t = Table.read (pathname+'/'+f,format=extensions[ftype])
if 'FILENAME' not in t.meta :
t.meta['FILENAME'] = f
tables.append(t)
# RETURN RESULT
return tables,header
def get_image_limits (hdu,mode='number') :
"""
Get size and intensity limits from an image stored in a FITS HDU (python coordinates!).
"""
hdr = hdu.header
data = hdu.data
xmin = 0
xmax = hdr['NAXIS1']-1
ymin = 0
ymax = hdr['NAXIS2']-1
zmin = np.nanmin(data)
zmax = np.nanmax(data)
if mode == 'outside' : # INCLUDE SIZE OF PIXELS, E.G. FOR pyplot.imshow()
xmin -= 0.5
xmax += 0.5
ymin -= 0.5
ymax += 0.5
return xmin,xmax,ymin,ymax,zmin,zmax
def centroid1D (yarr,pos,width, get_sigma=False, get_fwhm=False, subt_bkg=True) :
"""
Get the centroid of 1-D x and y sub-arrays at a particular position and window width.
"""
w = int(width-1)//2
i1 = int(pos-w)
if i1 < 0 : i1=0
i2 = int(i1+width-1)
if i2 >= len(yarr) : i2=len(yarr)-1
i1 = int(i2-width+1)
n = len(yarr)
xarr = np.arange(n)
x = xarr[i1:i2+1]
y = yarr[i1:i2+1]
if subt_bkg :
bkg = np.min(y)
else :
bkg = 0.
cntrd = np.sum(x*(y-bkg))/np.sum(y-bkg)
width = 3.*np.sqrt(np.abs(np.sum((y-bkg)*(x-cntrd)**2)/np.sum(y-bkg)))
i = int(cntrd+0.5)
mx = yarr[i]
i1,i2 = i-1,i+1
while i1 > 0 and yarr[i1] > 0.5*mx : i1 -= 1
while i2 < n-1 and yarr[i2] > 0.5*mx : i2 += 1
x1 = (0.5*mx-yarr[i1]*(i1+1)+yarr[i1+1]*i1)/(yarr[i1+1]-yarr[i1])
x2 = (0.5*mx-yarr[i2-1]*i2+yarr[i2]*(i2-1))/(yarr[i2]-yarr[i2-1])
fwhm = x2-x1
if np.abs(fwhm-(i2-i1)) > 1 :
fwhm = i2-i1
if not get_sigma and not get_fwhm :
return cntrd
elif get_sigma and not get_fwhm :
return cntrd,width
elif get_fwhm and not get_sigma :
return cntrd,fwhm
else :
return cntrd,width,fwhm
def peak_local_max_1D (arr, min_distance=5, threshold_abs=None, threshold_rel=None) :
"""
Simple 1-D replacement for scikit.features.peak_local_max(), which is too finicky.
"""
if threshold_abs is not None :
threshold = threshold_abs
elif threshold_rel is not None :
threshold = np.max(arr)*threshold_rel
else :
threshold = np.min(arr)
n = len(arr)
peaks = []
for i in range(min_distance,n-min_distance) :
arri = arr[i]
if arri > arr[i-min_distance] and arri > arr[i+min_distance] :
if len(peaks) > 0 :
di = i-peaks[-1][0]
if di <= min_distance and peaks[-1][1] < arri : # LAST ONE NOT AS GOOD?
peaks[-1] = [i,arri]
else :
peaks.append([i,arri])
return peaks[:][0]
def centroid (x,y,m, subtract_median=False, subtract_min=False) :
"""
Returns the centroid and a measure of the width of an array y(x)
for m values around the peak.
If "subtract_median" is True, then the median value is first subtracted.
"""
n = len(x)
sumxy = 0.0
sumy = 0.0
ysub = 0.0
if subtract_median :
ysub = np.median(y)
elif subtract_min :
ysub = np.min(y)
peak = np.argmax(y) # INDEX OF HIGHEST PEAK
for i in range(peak-m//2,peak+m//2) :
if i >= 0 and i < n :
sumxy += (y[i]-ysub)*x[i]
sumy += y[i]-ysub
if np.isnan(sumy) or sumy == 0.0 :
return np.nan,np.nan
x0 = sumxy/sumy
sumydx2 = 0.0
for i in range(peak-m//2,peak+m//2+1) :
if i >= 0 and i < n :
dx = x[i]-x0
sumydx2 += (y[i]-ysub)*dx*dx
w = 3.*np.sqrt(np.abs(sumydx2/sumy))
return x0,w
def read_spectrum (filename, hdu=1) :
"""
Extracts a spectrum table from a FITS or ascii table.
"""
if filename.endswith ('.csv') :
table = Table.read (filename, format='ascii.csv')
return table # [wcol],table[fcol],table.meta
elif filename.endswith ('.txt') or filename.endswith ('.dat') :
table = Table.read (filename,format='ascii.tab')
return table
elif filename.endswith ('.fits') :
hdus = fits.open (filename)
if len(hdus) == 1 :
table = vector2Table (hdus[0])
else :
table = Table.read (hdus, hdu=hdu)
return table
else :
logging.error ('Unable to read {0}'.format(filename))
sys.exit(1)
def write_spectra (filename, spectra, pheader, overwrite='True') :
"""
Writes a list of spectra Tables to a FITS table file.
"pheader" is the header of the original file.
"""
phdu = fits.PrimaryHDU(header=pheader)
hdus = [phdu]
for spectrum in spectra :
hdus.append (BinTableHDU(spectrum))
writeto (filename, overwrite=overwrite)
def write_spectrum (tab,filename) :
"""
Writes a spectrum table to a FITS or ascii table.
"""
if filename.endswith ('.csv') :
table.write (filename, format='ascii.csv')
elif filename.endswith ('.txt') or filename.endswith ('.dat') :
table.write (filename,format='ascii.tab')
elif filename.endswith ('.fits') :
table.write (filename,format='fits')
else :
raise Exception ('Unable to write {0}'.format(filename))
def cubic (x,a,b,c) :
return a+b*x+c*x**2+d*x**3
def poly (x,a,b,c,d) :
return a+b*x+c*x**2+d*x**3
def line (x,a,b) :
return a+b*x
def cubic_equation (a,b,c,d) :
"""
Solves the cubic equation
a*x^3+b*x^2+c*x+d = 0
by reducing to the depressed cubic
t^3+p*t+q=0
x = t-b/(3*a)
p = (3*a*c-b^2)/(3*a^2)
q = (2*b^3-9*a*b*c+27*a^2*d)/(27*a^3)
which, using Vieta's substitution
t = w-p/(3*w)
becomes
w^3+q-p**3/(27*w^3) = 0
or the quadratic equation
(w^3)^2+q*(w^3)-p^3/27. = 0
which has the roots
w1
"""
raise NotImplementedException ('cubic_equation')
def parse_arguments (arguments, readme=None, config=None, parser=None, verbose=False) :
"""
Extends argparse command line parsing with the possibility of using a default YAML dictionary.
The input dictionary "arguments" contains
keyword: {'path':dict_path,'default':actual_default,
'flg':one_char_flag,'type':type,'help':help_text}
dict_path is a string encoding where in the dictionary the value should be placed, e.g.
'path':'scatter:model:polynomial:order'
means that the argument should be placed as {'scatter':{'model':{'polynomial':{'order':HERE}}}}.
If no path is given, then the value is placed at the highest level of the configuration dictionary.
If the path does not end in ":", then the parameter name given is the last path entry, otherwise
it's the parsed name.
argparse is not given any real defaults: 'default' is the default displayed in the help text and
the default used after the argparse arguments have been combined with a YAML dictionary so that
the YAML can supercede the argparse defaults | |
<filename>pymc4/random_variables/continuous.py
"""
PyMC4 continuous random variables.
Wraps selected tfp.distributions (listed in __all__) as pm.RandomVariables.
Implements random variables not supported by tfp as distributions.
"""
# pylint: disable=undefined-all-variable
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
from .random_variable import PositiveContinuousRV, RandomVariable, UnitContinuousRV
from .random_variable import TensorLike, IntTensorLike
class Beta(UnitContinuousRV):
r"""
Beta random variable.
The pdf of this distribution is
.. math::
f(x \mid \alpha, \beta) =
\frac{x^{\alpha - 1} (1 - x)^{\beta - 1}}{B(\alpha, \beta)}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 1, 200)
alphas = [.5, 5., 1., 2., 2.]
betas = [.5, 1., 3., 2., 5.]
for a, b in zip(alphas, betas):
pdf = st.beta.pdf(x, a, b)
plt.plot(x, pdf, label=r'$\alpha$ = {}, $\beta$ = {}'.format(a, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0, 4.5)
plt.legend(loc=9)
plt.show()
======== ==============================================================
Support :math:`x \in (0, 1)`
Mean :math:`\dfrac{\alpha}{\alpha + \beta}`
Variance :math:`\dfrac{\alpha \beta}{(\alpha+\beta)^2(\alpha+\beta+1)}`
======== ==============================================================
Parameters
----------
alpha : float
alpha > 0.
beta : float
beta > 0.
Notes
-----
Beta distribution is a conjugate prior for the parameter :math:`p` of
the binomial distribution.
Developer Notes
---------------
Parameter mappings to TensorFlow Probability are as follows:
- alpha: concentration0
- beta: concentration1
"""
def _base_dist(self, alpha: TensorLike, beta: TensorLike, *args, **kwargs):
return tfd.Beta(concentration0=alpha, concentration1=beta, *args, **kwargs)
class Cauchy(RandomVariable):
r"""
Cauchy random variable.
Also known as the Lorentz or the Breit-Wigner distribution.
The pdf of this distribution is
.. math::
f(x \mid \alpha, \beta) =
\frac{1}{\pi \beta [1 + (\frac{x-\alpha}{\beta})^2]}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(-5, 5, 500)
alphas = [0., 0., 0., -2.]
betas = [.5, 1., 2., 1.]
for a, b in zip(alphas, betas):
pdf = st.cauchy.pdf(x, loc=a, scale=b)
plt.plot(x, pdf, label=r'$\alpha$ = {}, $\beta$ = {}'.format(a, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ========================
Support :math:`x \in \mathbb{R}`
Mode :math:`\alpha`
Mean undefined
Variance undefined
======== ========================
Parameters
----------
alpha : float
Location parameter
beta : float
Scale parameter > 0
Developer Notes
----------------
Parameter mappings to TensorFlow Probability are as follows:
- alpha: loc
- beta: scale
"""
def _base_dist(self, alpha: TensorLike, beta: TensorLike, *args, **kwargs):
return tfd.Cauchy(loc=alpha, scale=beta, **kwargs)
class ChiSquared(PositiveContinuousRV):
r"""
:math:`\chi^2` random variable.
The pdf of this distribution is
.. math::
f(x \mid \nu) = \frac{x^{(\nu-2)/2}e^{-x/2}}{2^{\nu/2}\Gamma(\nu/2)}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 15, 200)
for df in [1, 2, 3, 6, 9]:
pdf = st.chi2.pdf(x, df)
plt.plot(x, pdf, label=r'$\nu$ = {}'.format(df))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0, 0.6)
plt.legend(loc=1)
plt.show()
======== ===============================
Support :math:`x \in [0, \infty)`
Mean :math:`\nu`
Variance :math:`2 \nu`
======== ===============================
Parameters
----------
nu : int
Degrees of freedom (nu > 0).
Developer Notes
----------------
Parameter mappings to TensorFlow Probability are as follows:
- nu: df
The ChiSquared distribution name is copied over from PyMC3 for continuity. We map it to the
Chi2 distribution in TensorFlow Probability.
"""
def _base_dist(self, nu: IntTensorLike, *args, **kwargs):
return tfd.Chi2(df=nu, *args, **kwargs)
class Exponential(PositiveContinuousRV):
r"""
Exponential random variable.
The pdf of this distribution is
.. math::
f(x \mid \lambda) = \lambda \exp\left\{ -\lambda x \right\}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 3, 100)
for lam in [0.5, 1., 2.]:
pdf = st.expon.pdf(x, scale=1.0/lam)
plt.plot(x, pdf, label=r'$\lambda$ = {}'.format(lam))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ============================
Support :math:`x \in [0, \infty)`
Mean :math:`\dfrac{1}{\lambda}`
Variance :math:`\dfrac{1}{\lambda^2}`
======== ============================
Parameters
----------
lam : float
Rate or inverse scale (lam > 0)
Developer Notes
----------------
Parameter mappings to TensorFlow Probability are as follows:
- lam: rate
"""
def _base_dist(self, lam: TensorLike, *args, **kwargs):
return tfd.Exponential(rate=lam)
class Gamma(PositiveContinuousRV):
r"""
Gamma random variable.
Represents the sum of alpha exponentially distributed random variables,
each of which has mean beta.
The pdf of this distribution is
.. math::
f(x \mid \alpha, \beta) =
\frac{\beta^{\alpha}x^{\alpha-1}e^{-\beta x}}{\Gamma(\alpha)}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 20, 200)
alphas = [1., 2., 3., 7.5]
betas = [.5, .5, 1., 1.]
for a, b in zip(alphas, betas):
pdf = st.gamma.pdf(x, a, scale=1.0/b)
plt.plot(x, pdf, label=r'$\alpha$ = {}, $\beta$ = {}'.format(a, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ===============================
Support :math:`x \in (0, \infty)`
Mean :math:`\dfrac{\alpha}{\beta}`
Variance :math:`\dfrac{\alpha}{\beta^2}`
======== ===============================
Parameters
----------
alpha : float
Shape parameter (alpha > 0).
beta : float
Rate parameter (beta > 0).
Developer Notes
---------------
Parameter mappings to TensorFlow Probability are as follows:
- alpha: concentration
- beta: rate
"""
def _base_dist(self, alpha: TensorLike, beta: TensorLike, *args, **kwargs):
return tfd.Gamma(concentration=alpha, rate=beta, *args, **kwargs)
class Gumbel(RandomVariable):
r"""
Univariate Gumbel random variable.
The pdf of this distribution is
.. math::
f(x \mid \mu, \beta) = \frac{1}{\beta}e^{-(z + e^{-z})}
where
.. math::
z = \frac{x - \mu}{\beta}.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(-10, 20, 200)
mus = [0., 4., -1.]
betas = [2., 2., 4.]
for mu, beta in zip(mus, betas):
pdf = st.gumbel_r.pdf(x, loc=mu, scale=beta)
plt.plot(x, pdf, label=r'$\mu$ = {}, $\beta$ = {}'.format(mu, beta))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================================
Support :math:`x \in \mathbb{R}`
Mean :math:`\mu + \beta\gamma`, where \gamma is the Euler-Mascheroni constant
Variance :math:`\frac{\pi^2}{6} \beta^2`
======== ==========================================
Parameters
----------
mu : float
Location parameter.
beta : float
Scale parameter (beta > 0).
Developer Notes
---------------
Parameter mappings to TensorFlow Probability are as follows:
- mu: loc
- beta: scale
"""
def _base_dist(self, mu: TensorLike, beta: TensorLike, *args, **kwargs):
return tfd.Gumbel(loc=mu, scale=beta, *args, **kwargs)
class HalfCauchy(PositiveContinuousRV):
r"""
Half-Cauchy random variable.
The pdf of this distribution is
.. math::
f(x \mid \beta) = \frac{2}{\pi \beta [1 + (\frac{x}{\beta})^2]}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 5, 200)
for b in [0.5, 1.0, 2.0]:
pdf = st.cauchy.pdf(x, scale=b)
plt.plot(x, pdf, label=r'$\beta$ = {}'.format(b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ========================
Support :math:`x \in [0, \infty)`
Mode 0
Mean undefined
Variance undefined
======== ========================
Parameters
----------
beta : float
Scale parameter (beta > 0).
Developer Notes
----------------
Parameter mappings to TensorFlow Probability are as follows:
- beta: scale
In PyMC3, HalfCauchy's location was always zero. However, in a future PR, this can be changed.
"""
def _base_dist(self, beta: TensorLike, *args, **kwargs):
return tfd.HalfCauchy(loc=0, scale=beta)
class HalfNormal(PositiveContinuousRV):
r"""
Half-normal random variable.
The pdf of this distribution is
.. math::
f(x \mid \tau) =
\sqrt{\frac{2\tau}{\pi}}
\exp\left(\frac{-x^2 \tau}{2}\right)
f(x \mid \sigma) =\sigma
\sqrt{\frac{2}{\pi}}
\exp\left(\frac{-x^2}{2\sigma^2}\right)
.. note::
The parameters ``sigma``/``tau`` (:math:`\sigma`/:math:`\tau`) refer to
the standard deviation/precision of the unfolded normal distribution, for
the standard deviation of the half-normal distribution, see below. For
the half-normal, they are just two parameterisation :math:`\sigma^2
\equiv \frac{1}{\tau}` of a scale parameter
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 5, 200)
for sigma in [0.4, 1., 2.]:
pdf = st.halfnorm.pdf(x, scale=sigma)
plt.plot(x, pdf, label=r'$\sigma$ = {}'.format(sigma))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================================
Support :math:`x \in [0, \infty)`
Mean :math:`\sqrt{\dfrac{2}{\tau \pi}}` or :math:`\dfrac{\sigma \sqrt{2}}{\sqrt{\pi}}`
Variance :math:`\dfrac{1}{\tau}\left(1 - \dfrac{2}{\pi}\right)` or :math:`\sigma^2\left(1 - \dfrac{2}{\pi}\right)`
======== ==========================================
Parameters
----------
sigma : float
Scale parameter :math:`sigma` (``sigma`` > 0) (only required if ``tau`` is not specified).
Examples
--------
.. code-block:: python
@pm.model
def model():
x = pm.HalfNormal('x', sigma=10)
Developer Notes
---------------
Parameter mappings to TensorFlow Probability are as follows:
- sigma: scale
"""
def _base_dist(self, sigma: TensorLike, *args, **kwargs):
return tfd.HalfNormal(scale=sigma, **kwargs)
class HalfStudentT(PositiveContinuousRV):
r"""
Half Student's T random variable.
The pdf of this distribution is
.. math::
f(x \mid \sigma,\nu) =
\frac{2\;\Gamma\left(\frac{\nu+1}{2}\right)}
{\Gamma\left(\frac{\nu}{2}\right)\sqrt{\nu\pi\sigma^2}}
\left(1+\frac{1}{\nu}\frac{x^2}{\sigma^2}\right)^{-\frac{\nu+1}{2}}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 5, 200)
sigmas = [1., 1., 2., 1.]
nus = [.5, 1., 1., 30.]
for sigma, nu in zip(sigmas, nus):
pdf = | |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_data.ipynb (unless otherwise specified).
__all__ = ['VIIRS750_download', 'BandsFilter', 'BandsRename', 'MergeTiles', 'BandsAssertShape', 'ActiveFires',
'MirCalc', 'BaseDataset', 'Viirs750Dataset', 'MCD64Dataset', 'FireCCI51Dataset', 'AusCoverDataset',
'MTBSDataset', 'ICNFDataset', 'Region2Tiles']
# Cell
import numpy as np
import pandas as pd
import re
import sys
from rasterio.coords import disjoint_bounds
from tqdm import tqdm
import scipy.io as sio
from functools import partial
from netCDF4 import Dataset
from pyhdf.SD import SD, SDC
from geopandas import GeoDataFrame
from shapely.geometry import Point
import matplotlib.pyplot as plt
from concurrent.futures import ThreadPoolExecutor
from warnings import warn
from nbdev.imports import test_eq
from geoget.download import *
from .core import *
from .geo import *
# Cell
class VIIRS750_download(Ladsweb):
"Utility for downloading VIIRS 750m data to create the dataset."
def __init__(self, region, tstart, tend):
product = 'NPP_VMAES_L1'
collection = '5000'
bands = ['Reflectance_M5', 'Reflectance_M7', 'Reflectance_M10',
'Radiance_M12', 'Radiance_M15', 'SolarZenithAngle',
'SatelliteZenithAngle']
super().__init__(product, collection, tstart, tend, list(region.bbox),
bands, daynight='D', repPixSize=region.pixel_size)
# Cell
class BandsFilter():
"""Remove bands not in to_keep list from the dictionary."""
def __init__(self, to_keep: list):
self.to_keep = to_keep if isinstance(to_keep, list) else [to_keep]
def __call__(self, data:dict, *args, **kwargs) -> dict:
keys = [k for k in data]
for k in keys:
if k not in self.to_keep:
del data[k]
return data
# Cell
class BandsRename():
def __init__(self, input_names:list, output_names:list):
self.input_names = input_names if isinstance(input_names, list) else [input_names]
self.output_names = output_names if isinstance(output_names, list) else [output_names]
def __call__(self, data:dict, *args, **kwargs) -> dict:
for i, o in zip(self.input_names, self.output_names):
data[o] = data.pop(i)
return data
# Cell
class MergeTiles():
def __init__(self, band:str):
self.band = band
def __call__(self, data:dict, *args, **kwargs) -> dict:
d = np.nanmean(np.array(data[self.band]), axis=(1,2))
d = np.array(np.array(d).argsort())
masks = np.array(data[self.band])[d]
for k in data:
data_aux = np.zeros_like(data[k][0])*np.nan
for dband, mask in zip(np.array(data[k])[d], masks):
I = (np.isnan(data_aux)) & (~np.isnan(mask))
data_aux[I] = dband[I]
data[k] = data_aux
return data
# Cell
class BandsAssertShape():
def __call__(self, data:dict, *args, **kwargs) -> dict:
for k in kwargs['cls'].bands:
rshape = kwargs['cls'].region.shape
if isinstance(data[k], list):
for d in data[k]:
shape = d.shape
if len(shape) == 3: # first is time
shape = shape[1:]
if shape != rshape:
error = f'{k} shape {shape} does not match region shape {rshape}'
raise Exception(error)
else:
shape = data[k].shape
if len(shape) == 3: # first is time
shape = shape[1:]
if shape != rshape:
error = f'{k} shape {shape} does not match region shape {rshape}'
raise Exception(error)
return data
# Cell
class ActiveFires():
"""Get active fires and interpolate to grid."""
def __init__(self, file):
self.file = file
self.lon = None
self.lat = None
self.df = self.load_csv()
def load_csv(self):
return pd.read_csv(self.file, parse_dates=['acq_date']).set_index('acq_date')
def __call__(self, data, time, *args, **kwargs):
if self.lon is None or self.lat is None:
self.lon, self.lat = kwargs['cls'].region.coords()
frp = self.df[self.df.index == time]
if len(frp) > 0:
geometry = [Point(xy) for xy in zip(frp['longitude'], frp['latitude'])]
frp = GeoDataFrame(frp, geometry=geometry)
out = rasterize(frp, 'frp', kwargs['cls'].region, merge_alg='add')
out[out==0] = np.nan
else: out = np.zeros(kwargs['cls'].region.shape)*np.nan
data['FRP'] = out
return data
# Cell
class MirCalc():
def __init__(self, solar_zenith_angle:str, mir_radiance:str, tir_radiance:str,
output_name:str='MIR'):
self.sza = solar_zenith_angle
self.r_mir = mir_radiance
self.r_tir = tir_radiance
self.output_name = output_name
def __call__(self, data:dict, *args, **kwargs):
sza = data[self.sza]
mir = data[self.r_mir]
tir = data[self.r_tir]
data[self.output_name] = self.refl_mir_calc(mir, tir, sza, sensor=kwargs['cls'].name)
return data
def refl_mir_calc(self, mir, tir, sza, sensor):
"""
Computes the MIR reflectance from MIR radiance and Longwave IR radiance.
sensor can be "VIIRS375" or "VIIRS750"
sza is the solar zenith angle
for VIIRS375, mir is band I4 and tir band I5
for VIIRS750, mir is band M12 and tir band M15
returns a matrix of MIR reflectances with the same shape as mir and tir inputs.
Missing values are represented by 0.
"""
lambda_M12= 3.6966
lambda_M15=10.7343
lambda_I4 = 3.7486
lambda_I5 = 11.4979
c1 = 1.1911e8 # [ W m-2 sr-1 (micrometer -1)-4 ]
c2 = 1.439e4 # [ K micrometer ]
E_0_mir_M12 = 11.7881 # M12 newkur_semcab
E_0_mir_I4= 11.2640 # I4 newkur_semcab
if sensor=='VIIRS375':
lambda_mir = lambda_I4
lambda_tir = lambda_I5
E_0_mir = E_0_mir_I4
elif sensor=='VIIRS750':
lambda_mir = lambda_M12
lambda_tir = lambda_M15
E_0_mir = E_0_mir_M12
else: raise NotImplementedError(
f'refl_mir_calc not implemented for {sensor}. Available options are VIIRS750 and VIIRS375.')
miu_0=np.cos((sza*np.pi)/180)
mir[mir <= 0] = np.nan
tir[tir <= 0] = np.nan
# Brighness temperature
a1 = (lambda_tir**5)
a = c1/(a1*tir)
logaritmo = np.log(a+1)
divisor = lambda_tir*logaritmo
T = (c2/divisor)
del a, logaritmo, divisor
# Plank function
divisor2 = (lambda_mir*T)
exponencial = np.exp(c2/divisor2)
b = c1*(lambda_mir**-5)
BT_mir = b/(exponencial-1)
del divisor2, exponencial, b, T
# MIR reflectance
c = (E_0_mir*miu_0)/np.pi
termo1 = (mir-BT_mir)
termo2 = (c-BT_mir)
Refl_mir = termo1/termo2
Refl_mir[Refl_mir <= 0] = 0
return Refl_mir
# Cell
class BaseDataset():
def __init__(self, name:str, paths:InOutPath, region:Region,
times:pd.DatetimeIndex=None, bands:list=None):
self.paths = paths
self.region = region
self.name = name
self.times = times
self.bands = bands
if self.times is None:
self.times = self.find_dates()
def list_files(self, time:pd.Timestamp) -> list:
"This method should return a list of filenames corresponding to the given Timestamp."
pass
def find_dates(self):
"""This method should return a pd.DatetimeIndex
with list of dates present in the data available in the input path."""
pass
def match_times(self, other, on='month'):
"Set the times attribute to match the times of other dataset."
if on != 'month':
raise NotImplementedError('match_times is only implemented on month.')
ym_other = sorted(set([(t.year, t.month) for t in other.times]))
out = []
for t in self.times:
if (t.year, t.month) in ym_other:
out.append(t)
self.times = pd.DatetimeIndex(out)
def filter_times(self, year):
"""To select only a specific year. This can be usefull for testing and
for adding more new years and avoid reprocessing all the dataset."""
if year is not None:
self.times = self.times[self.times.year == year]
def open(self, files:list) -> dict:
"""This method is used to open a file or list of files for a given
time period and returns a dictionary with the data ready to be passed
to the processing functions."""
pass
def save(self, time:pd.Timestamp, data:dict, do_compression=True):
"Saves data in a single file for a specific timestamp in .mat format."
tstr = time.strftime('%Y%m%d')
filename = f'{self.paths.dst}/{self.name}{self.region.name}_{tstr}.mat'
sio.savemat(filename, data, do_compression=do_compression)
def process_one(self, time:pd.Timestamp, proc_funcs:list=[], save=True, **proc_funcs_kwargs):
"""This method defines a processing pipeline consisting of opening the file
using the `open` method, applying each of the `proc_funcs` to the output of the previous
and `save` the processed data using save method."""
tstr = time.strftime('%Y%m%d')
files = self.list_files(time)
#try:
if len(files) > 0:
data = self.open(files)
proc_funcs = [BandsAssertShape()] + proc_funcs
kwargs = {'cls': self, **proc_funcs_kwargs}
for f in proc_funcs:
data = f(data, time, **kwargs)
if save:
self.save(time, data)
else: return data
else:
warn(f'No files for {time}. Skipping to the next time.')
#except:
# msg = f'Unable to process files for {time}. Check if files are corrupted. Skipping to the next time. { sys.exc_info()[0]}'
# warn(msg, UserWarning)
def process_all(self, proc_funcs=[], max_workers=1, **proc_funcs_kwargs):
"""`process_all` runs `process_one` in parallel using the number of workers defined
by `max_workers` and passes the `proc_funcs` list to `process_one` method"""
process_one = partial(self.process_one, proc_funcs=proc_funcs, **proc_funcs_kwargs)
with ThreadPoolExecutor(max_workers) as e:
list(tqdm(e.map(process_one, self.times), total=len(self.times)))
def __repr__(self):
return '\n'.join([f'{i}: {o}' for i, o in self.__dict__.items()]) + '\n'
# Cell
class Viirs750Dataset(BaseDataset):
"Subclass of `BaseDataset` to process VIIRS 750-meter bands."
_use_netcdf4 = True
def __init__(self, paths:InOutPath, region:Region,
times:pd.DatetimeIndex=None, bands:list=None):
super().__init__('VIIRS750', paths, region, times, bands)
self.times = self.check_files()
def list_files(self, time:pd.Timestamp) -> list:
if time in self.times:
dayOfYear = str(time.dayofyear).zfill(3)
files = self.paths.src.ls(include=['NPP', f'.A{time.year}{dayOfYear}.'])
return files
def check_files(self):
not_missing = []
for i, t in tqdm(enumerate(self.times), total=len(self.times)):
files = self.list_files(t)
files = ';'.join([f.stem for f in files])
if sum([s in files for s in self.bands]) != len(self.bands):
print(f'Missing files for {t}')
else: not_missing.append(i)
return self.times[not_missing]
def find_dates(self, first:pd.Timestamp=None, last:pd.Timestamp=None):
pattern = r'^\w+.A(20[0-9][0-9])([0-3][0-9][0-9])..*$'
times = []
for f in self.paths.src.ls():
x = re.search(pattern, f.stem)
if x is not None:
year, doy = map(x.group, [1,2])
times.append(pd.Timestamp(f'{year}-01-01') + pd.Timedelta(days=int(doy)-1))
self.times = pd.DatetimeIndex(sorted(set(times)))
if first is not None:
self.times = self.times[self.times>=first]
if last is not None:
self.times = self.times[self.times<=last]
return self.times
def open_hdf4(self, files:list) -> dict:
data_dict = {b: [] for b in self.bands}
for s in self.bands:
f = sorted([f for f in files if s in f.name])
if len(f) == 0:
warn(f'No file for {s} found on {files}')
for f0 in f:
hdf_data = SD(str(f0), SDC.READ)
hdf_file = hdf_data.select(s)
hdf_attr = hdf_file.attributes()
| |
if filetable2 != None:
self.reduced_variables[varid+'_2'] = reduced_variable(
variableid = vbase, filetable=filetable2, reduced_var_id = varid+'_2',
reduction_function=(lambda x, vid: reduce2latlon_seasonal_level(x, self.season, num, vid)))
self.composite_plotspecs = {}
self.single_plotspecs={}
self.single_plotspecs[self.plot1_id] = plotspec(
vid=varid+'_1',
zvars = [varid+'_1'], zfunc = (lambda z:z),
plottype = self.plottype)
self.composite_plotspecs[self.plotall_id] = [self.plot1_id]
if filetable2 != None:
self.single_plotspecs[self.plot2_id] = plotspec(
vid=varid+'_2',
zvars = [varid+'_1'], zfunc = (lambda z:z),
plottype = self.plottype)
self.single_plotspecs[self.plot3_id] = plotspec(
vid=varid+'_3',
zvars = [varid+'_1', varid+'_2'], zfunc = (lambda z:z),
plottype = self.plottype)
self.composite_plotspecs[self.plotall_id].append(self.plot2_id)
self.composite_plotspecs[self.plotall_id].append(self.plot3_id)
if(filetable2 != None):
if varid not in lmwg_plot_set2._derived_varnames and varid not in lmwg_plot_set2._level_varnames:
self.reduced_variables[varid+'_2'] = reduced_variable(variableid = varid,
filetable=filetable2,
reduced_var_id=varid+'_2',
reduction_function=(lambda x, vid: reduce2latlon_seasonal(x, self.season, vid)))
# else:
# self.reduced_variables['RAIN_2'] = reduced_variable(
# variableid = 'RAIN', filetable = filetable2, reduced_var_id = varid+'_2',
# reduction_function=(lambda x, vid: reduce2latlon_seasonal(x, self.season, vid)))
# self.reduced_variables['SNOW_2'] = reduced_variable(
# variableid = 'SNOW', filetable = filetable2, reduced_var_id = varid+'_2',
# reduction_function=(lambda x, vid: reduce2latlon_seasonal(x, self.season, vid)))
# self.derived_variables['PREC_2'] = derived_var(
# vid='PREC_2', inputs=['RAIN_2', 'SNOW_2'], func=aplusb)
# level_varnames already did their plots
print 'checking for single == None'
if self.single_plotspecs == None:
print 'it was none'
self.single_plotspecs = {}
self.composite_plotspecs = {}
self.single_plotspecs[self.plot1_id] = plotspec(
vid = varid+'_1',
zvars = [varid+'_1'], zfunc = (lambda z: z),
plottype = self.plottype)
self.composite_plotspecs[self.plotall_id] = [self.plot1_id]
if(filetable2 != None):
self.single_plotspecs[self.plot2_id] = plotspec(
vid = varid+'_2',
zvars = [varid+'_2'], zfunc = (lambda z: z),
plottype = self.plottype)
self.single_plotspecs[self.plot3_id] = plotspec(
vid = varid+'_diff',
zvars = [varid+'_1', varid+'_2'], zfunc = aminusb_2ax,
plottype = self.plottype)
self.composite_plotspecs[self.plotall_id].append(self.plot2_id)
self.composite_plotspecs[self.plotall_id].append(self.plot3_id)
self.computation_planned = True
def _results(self,newgrid=0):
results = plot_spec._results(self,newgrid)
if results is None: return None
return self.plotspec_values[self.plotall_id]
###############################################################################
###############################################################################
### Set 3 - Grouped Line plots of monthly climatology: regional air ###
### temperature, precipitation, runoff, snow depth, radiative fluxes, and ###
### turbulent fluxes ###
###############################################################################
###############################################################################
### This should be combined with set6. They share lots of common code.
class lmwg_plot_set3(lmwg_plot_spec):
name = '3 - Grouped Line plots of monthly climatology: regional air temperature, precipitation, runoff, snow depth, radiative fluxes, and turbulent fluxes'
number = '3'
def __init__(self, filetable1, filetable2, varid, seasonid=None, region=None, aux=None):
plot_spec.__init__(self, seasonid)
self.plottype = 'Yxvsx'
self.seasons = defines.all_months
self._var_baseid = '_'.join([varid, 'set3'])
ft1id,ft2id = filetable_ids(filetable1,filetable2)
self.plot1_id = ft1id+'_'+varid
if filetable2 is not None:
self.plot2_id = ft2id+'_'+varid
self.plot3_id = ft1id+' - '+ft2id+'_'+varid
self.plotall_id = ft1id+'_'+ft2id+'_'+varid
else:
self.plot2_id = None
self.plot3_id = None
self.plotall_id = None
if not self.computation_planned:
self.plan_computation(filetable1, filetable2, varid, seasonid, region, aux)
@staticmethod
def _list_variables(filetable1=None, filetable2 = None):
# conceivably these could be the same names as the composite plot IDs but this is not a problem now.
# see _results() for what I'm getting at
varlist = ['Total_Precip_Runoff_SnowDepth', 'Radiative_Fluxes', 'Turbulent_Fluxes', 'Carbon_Nitrogen_Fluxes',
'Fire_Fluxes', 'Energy_Moist_Control_of_Evap', 'Snow_vs_Obs', 'Albedo_vs_Obs', 'Hydrology']
return varlist
@staticmethod
# given the list_vars list above, I don't understand why this is here, or why it is listing what it is....
# but, being consistent with amwg2
def _all_variables(filetable1=None,filetable2=None):
vlist = {vn:basic_plot_variable for vn in lmwg_plot_set3._list_variables(filetable1, filetable2) }
return vlist
def plan_computation(self, filetable1, filetable2, varid, seasonid, region, aux):
# This is not scalable, but apparently is the way to do things. Fortunately, we only have 9 variables to deal with
if 'Albedo' in varid:
self.composite_plotspecs['Albedo_vs_Obs'] = []
for v in self.albedos.keys():
self.reduced_variables[v+'_1'] = albedos_redvar(filetable1, 'TREND', self.albedos[v], region=region, flag='MONTHLY')
vlist = ['ASA', 'VBSA', 'NBSA', 'VWSA', 'NWSA']
# vlist = ['ASA', 'VBSA', 'NBSA', 'VWSA', 'NWSA']
# This assumes FT2 is obs. Needs some way to determine if that is true
# TODO: ASA is much more complicated than the others.
if filetable2 != None:
for v in vlist:
if v == 'ASA':
print 'Comparison to ASA in obs set not implemented yet\n'
pass
self.reduced_variables[v+'_2'] = reduced_variable(
variableid = v, filetable=filetable2, reduced_var_id=v+'_2',
reduction_function=(lambda x, vid: reduceRegion(x, region, vid=vid)))
for v in vlist:
self.single_plotspecs[v+'_1'] = plotspec(vid=v+'_1', zvars=[v+'_1'], zfunc=(lambda z:z),
plottype = self.plottype, title=varinfo[v]['desc'])
if filetable2 != None:
if v == 'ASA':
pass
self.single_plotspecs[v+'_1'].z2vars=[v+'_2']
self.single_plotspecs[v+'_1'].z2func=(lambda z:z)
self.composite_plotspecs['Albedo_vs_Obs'].append(v+'_1')
### TODO Figure out how to generate Obs ASA
# if filetable2 == None:
# self.single_plotspecs['ASA_1'] = plotspec(vid='ASA_1', zvars=['ASA_1'], zfunc=(lambda z:z),
# plottype = self.plottype)
# self.composite_plotspecs['Albedo_vs_Obs'].append('ASA_1')
if filetable2 != None:
print "NOTE: TODO - NEED TO CALCULATE ASA FROM OBS DATA"
# No obs, so second DS is models
# Plots are RNET and PREC and ET? on same graph
if 'Moist' in varid:
red_varlist = ['QVEGE', 'QVEGT', 'QSOIL', 'RAIN', 'SNOW']
for v in red_varlist:
self.reduced_variables[v+'_1'] = reduced_variable(
variableid = v, filetable=filetable1, reduced_var_id=v+'_1',
reduction_function=(lambda x, vid: reduceMonthlyTrendRegion(x, region, vid)))
if filetable2 != None:
self.reduced_variables[v+'_2'] = reduced_variable(
variableid = v, filetable=filetable2, reduced_var_id=v+'_2',
reduction_function=(lambda x, vid: reduceMonthlyTrendRegion(x, region, vid)))
self.reduced_variables['RNET_1'] = rnet_redvar(filetable1, 'TREND', region=region, flag='MONTHLY')
self.derived_variables['ET_1'] = derived_var(
vid='ET_1', inputs=['QVEGE_1', 'QVEGT_1', 'QSOIL_1'], func=sum3)
self.derived_variables['PREC_1'] = derived_var(
vid='PREC_1', inputs=['RAIN_1', 'SNOW_1'], func=aplusb)
if filetable2 != None:
self.reduced_variables['RNET_2'] = rnet_redvar(filetable2, 'TREND', region=region, flag='MONTHLY')
self.derived_variables['ET_2'] = derived_var(
vid='ET_2', inputs=['QVEGE_2', 'QVEGT_2', 'QSOIL_2'], func=sum3)
self.derived_variables['PREC_2'] = derived_var(
vid='PREC_2', inputs=['RAIN_2', 'SNOW_2'], func=aplusb)
# The NCAR plots do something like this; we don't support z3vars yet though, so making it separate plots
# self.single_plotspecs['DS_1'] = plotspec(vid='DS_1',
# zvars=['ET_1'], zfunc=(lambda z:z),
# z2vars=['PREC_1'], z2func=(lambda z:z),
# z3vars=['RNET_1'], z3func=(lambda z:z),
# plottype = self.plottype)
self.single_plotspecs['ET_1'] = plotspec(vid='ET_1',
zvars=['ET_1'], zfunc=(lambda z:z),
plottype = self.plottype, title=varinfo['ET']['desc'])
self.single_plotspecs['PREC_1'] = plotspec(vid='PREC_1',
zvars=['PREC_1'], zfunc=(lambda z:z),
plottype = self.plottype, title=varinfo['PREC']['desc'])
self.single_plotspecs['RNET_1'] = plotspec(vid='RNET_1',
zvars=['RNET_1'], zfunc=(lambda z:z),
plottype = self.plottype, title=varinfo['RNET']['desc'])
if filetable2 != None:
self.single_plotspecs['ET_2'] = plotspec(vid='ET_2',
zvars=['ET_2'], zfunc=(lambda z:z),
plottype = self.plottype)
self.single_plotspecs['PREC_2'] = plotspec(vid='PREC_2',
zvars=['PREC_2'], zfunc=(lambda z:z),
plottype = self.plottype)
self.single_plotspecs['RNET_2'] = plotspec(vid='RNET_2',
zvars=['RNET_2'], zfunc=(lambda z:z),
plottype = self.plottype)
self.composite_plotspecs = {
# 'Energy_Moisture' : ['DS_1']
'Energy_Moisture' : ['ET_1', 'RNET_1', 'PREC_1']
}
if filetable2 != None:
self.composite_plotspecs['Energy_Moisture'].append('ET_2')
self.composite_plotspecs['Energy_Moisture'].append('PREC_2')
self.composite_plotspecs['Energy_Moisture'].append('RNET_2')
# No obs for this, so FT2 should be a 2nd model
if 'Radiative' in varid:
self.composite_plotspecs['Radiative_Fluxes'] = []
red_varlist = ['FSDS', 'FSA', 'FLDS', 'FIRE', 'FIRA']
for v in red_varlist:
self.reduced_variables[v+'_1'] = reduced_variable(
variableid = v, filetable=filetable1, reduced_var_id=v+'_1',
reduction_function=(lambda x, vid: reduceMonthlyTrendRegion(x, region, vid)))
self.single_plotspecs[v] = plotspec(vid=v+'_1',
zvars = [v+'_1'], zfunc=(lambda z:z),
plottype = self.plottype, title=varinfo[v]['desc'])
if filetable2 != None:
self.reduced_variables[v+'_2'] = reduced_variable(
variableid = v, filetable=filetable2, reduced_var_id=v+'_2',
reduction_function=(lambda x, vid: reduceMonthlyTrendRegion(x, region, vid)))
self.single_plotspecs[v].z2vars = [v+'_2']
self.single_plotspecs[v].z2func = (lambda z:z)
self.composite_plotspecs['Radiative_Fluxes'].append(v)
self.reduced_variables['ASA_1'] = albedos_redvar(filetable1, 'TREND', ['FSR', 'FSDS'], region=region, flag='MONTHLY')
self.reduced_variables['RNET_1' ] = rnet_redvar(filetable1, 'TREND', region=region, flag='MONTHLY')
if filetable2 != None:
self.reduced_variables['ASA_2'] = albedos_redvar(filetable2, 'TREND', ['FSR', 'FSDS'], region=region, flag='MONTHLY')
self.reduced_variables['RNET_2' ] = rnet_redvar(filetable2, 'TREND', region=region, flag='MONTHLY')
self.single_plotspecs['Albedo'] = plotspec(vid='ASA_1',
zvars = ['ASA_1'], zfunc=(lambda z:z),
plottype = self.plottype, title=varinfo['ASA']['desc'])
self.single_plotspecs['NetRadiation'] = plotspec(vid='RNET_1',
zvars = ['RNET_1'], zfunc=(lambda z:z),
plottype = self.plottype, title=varinfo['RNET']['desc'])
if filetable2 != None:
self.single_plotspecs['Albedo'].z2vars = ['ASA_2']
self.single_plotspecs['Albedo'].z2func = (lambda z:z)
self.single_plotspecs['NetRadiation'].z2vars = ['RNET_2']
self.single_plotspecs['NetRadiation'].z2func = (lambda z:z)
self.composite_plotspecs['Radiative_Fluxes'].append('Albedo')
self.composite_plotspecs['Radiative_Fluxes'].append('NetRadiation')
# No obs for this, so FT2 should be a 2nd model
if 'Turbulent' in varid:
self.composite_plotspecs['Turbulent_Fluxes'] = []
red_varlist = ['FSH', 'FCTR', 'FCEV', 'FGEV', 'FGR', 'BTRAN', 'TLAI']
for v in red_varlist:
self.reduced_variables[v+'_1'] = reduced_variable(
variableid = v, filetable=filetable1, reduced_var_id=v+'_1',
reduction_function=(lambda x, vid: reduceMonthlyTrendRegion(x, region, vid)))
if filetable2 != None:
self.reduced_variables[v+'_2'] = reduced_variable(
variableid = v, filetable=filetable2, reduced_var_id=v+'_2',
reduction_function=(lambda x, vid: reduceMonthlyTrendRegion(x, region, vid)))
self.single_plotspecs[v] = plotspec(vid=v+'_1',
zvars = [v+'_1'], zfunc=(lambda z:z),
plottype = self.plottype, title=varinfo[v]['desc'])
if filetable2 != None:
self.single_plotspecs[v].z2vars = [v+'_2']
self.single_plotspecs[v].z2func = (lambda z:z)
self.composite_plotspecs['Turbulent_Fluxes'].append(v)
sub_varlist = ['FCTR', 'FGEV', 'FCEV']
for v in sub_varlist:
self.reduced_variables[v+'_1'] = reduced_variable(
variableid = v, filetable=filetable1, reduced_var_id=v+'_1',
reduction_function=(lambda x, vid: reduceMonthlyTrendRegion(x, region, vid)))
if filetable2 != None:
self.reduced_variables[v+'_2'] = reduced_variable(
variableid = v, filetable=filetable2, reduced_var_id=v+'_2',
reduction_function=(lambda x, vid: reduceMonthlyTrendRegion(x, region, vid)))
### Can we do these with reduceMonthlyTrendRegion? Needs investigation
self.derived_variables['LHEAT_1'] = derived_var(
vid='LHEAT_1', inputs=['FCTR_1', 'FGEV_1', 'FCEV_1'], func=sum3)
self.reduced_variables['EVAPFRAC_1'] = evapfrac_redvar(filetable1, 'TREND', region=region, flag='MONTHLY')
self.reduced_variables['RNET_1'] = rnet_redvar(filetable1, 'TREND', region=region, flag='MONTHLY')
if filetable2 != None:
self.derived_variables['LHEAT_2'] = derived_var(
vid='LHEAT_2', inputs=['FCTR_2', 'FGEV_2', 'FCEV_2'], func=sum3)
self.reduced_variables['EVAPFRAC_2'] = evapfrac_redvar(filetable2, 'TREND', region=region, flag='MONTHLY')
self.reduced_variables['RNET_2'] = rnet_redvar(filetable2, 'TREND', region=region, flag='MONTHLY')
self.single_plotspecs['LatentHeat'] = plotspec(vid='LHEAT_1',
zvars = ['LHEAT_1'], zfunc=(lambda z:z),
plottype = self.plottype, title=varinfo['LHEAT']['desc'])
self.single_plotspecs['EvaporativeFraction'] = plotspec(vid='EVAPFRAC_1',
zvars=['EVAPFRAC_1'], zfunc=(lambda z:z),
plottype = self.plottype, title=varinfo['EVAPFRAC']['desc'])
self.single_plotspecs['NetRadiation'] = plotspec(vid='RNET_1',
zvars=['RNET_1'], zfunc=(lambda z:z),
plottype = self.plottype, title=varinfo['RNET']['desc'])
if filetable2 != None:
self.single_plotspecs['NetRadiation'].z2vars = ['RNET_2']
self.single_plotspecs['NetRadiation'].z2func = (lambda z:z)
self.single_plotspecs['LatentHeat'].z2vars = ['LHEAT_2']
self.single_plotspecs['LatentHeat'].z2func = (lambda z:z)
self.single_plotspecs['EvaporativeFraction'].z2vars = ['EVAPFRAC_2']
self.single_plotspecs['EvaporativeFraction'].z2func = (lambda z:z)
self.composite_plotspecs['Turbulent_Fluxes'].append('EvaporativeFraction')
self.composite_plotspecs['Turbulent_Fluxes'].append('LatentHeat')
| |
"""相対時間の抽出・正規化処理を定義するモジュール."""
from copy import deepcopy
from typing import List, Tuple
from pynormalizenumexp.expression.base import INF, NNumber, NTime, NumberModifier
from pynormalizenumexp.expression.reltime import ReltimeExpression, ReltimePattern
from pynormalizenumexp.utility.dict_loader import DictLoader
from .base import BaseNormalizer
from .number_normalizer import NumberNormalizer
class ReltimeExpressionNormalizer(BaseNormalizer):
"""相対時間の抽出・正規化を行うクラス."""
limited_expressions: List[ReltimePattern]
prefix_counters: List[ReltimePattern]
def __init__(self, dict_loader: DictLoader) -> None:
"""コンストラクタ.
Parameters
----------
dict_loader : DictLoader
辞書ファイルのローダー
"""
super().__init__(dict_loader)
self.number_normalizer = NumberNormalizer(dict_loader)
self.load_dictionaries("reltime_expression.json", "reltime_prefix_counter.json",
"reltime_prefix.json", "reltime_suffix.json")
def load_dictionaries(self, limited_expr_dict_file: str, prefix_counter_dict_file: str,
prefix_number_modifier_dict_file: str, suffix_number_modifier_dict_file: str) -> None:
"""辞書ファイルの読み込み.
Parameters
----------
limited_expr_dict_file : str
相対時間のパターンを定義した辞書ファイル名
prefix_counter_dict_file : str
接頭表現(単位や年代など)を定義した辞書ファイル名
prefix_number_modifier_dict_file : str
接尾表現(範囲表現)を定義した辞書ファイル名
suffix_number_modifier_dict_file : str
接尾表現を定義した辞書ファイル名
"""
self.limited_expressions = self.dict_loader.load_limited_reltime_expr_dict(limited_expr_dict_file)
self.prefix_counters = self.dict_loader.load_limited_reltime_expr_dict(prefix_counter_dict_file)
self.prefix_number_modifier = self.dict_loader.load_number_modifier_dict(prefix_number_modifier_dict_file)
self.suffix_number_modifier = self.dict_loader.load_number_modifier_dict(suffix_number_modifier_dict_file)
self.limited_expression_patterns = self.build_patterns(self.limited_expressions)
self.prefix_counter_patterns = self.build_patterns(self.prefix_counters)
self.prefix_number_modifier_patterns = self.build_patterns(self.prefix_number_modifier)
self.suffix_number_modifier_patterns = self.build_patterns(self.suffix_number_modifier)
for expr in self.limited_expressions:
expr.set_total_number_of_place_holder()
expr.set_len_of_after_final_place_holder()
def normalize_number(self, text: str) -> List[NNumber]:
"""テキストから数値表現を抽出する.
Parameters
----------
text : str
抽出対象のテキスト
Returns
-------
List[NNumber]
抽出した数値表現
"""
return self.number_normalizer.process(text)
def numbers2expressions(self, numbers: List[NNumber]) -> List[ReltimeExpression]: # type: ignore[override]
"""抽出した数値表現を相対時間表現のオブジェクトに変換する.
Parameters
----------
numbers : List[NNumber]
抽出した数値表現
Returns
-------
List[ReltimeExpression]
相対時間表現のオブジェクト
"""
return [ReltimeExpression(number) for number in numbers]
def revise_reltime_expr_by_process_type(self, reltime_expr: ReltimeExpression,
process_type: str,
matching_reltime_expr: ReltimePattern) -> ReltimeExpression:
"""修飾語でないパターンに含まれるprocess_typeによる正規化表現の補正を行う.
Parameters
----------
reltime_expr : ReltimeExpression
補正対象の相対時間表現
process_type : str
処理タイプ
matching_reltime_expr : ReltimePattern
マッチした相対時間表現パターン
Returns
-------
ReltimeExpression
補正後の相対時間表現
"""
new_reltime_expr = deepcopy(reltime_expr)
if process_type == "han":
if len(matching_reltime_expr.corresponding_time_position) == 0:
return new_reltime_expr
new_reltime_expr.value_lower_bound_rel, new_reltime_expr.value_upper_bound_rel \
= self.do_option_han(new_reltime_expr, matching_reltime_expr.corresponding_time_position[-1])
elif process_type == "or_over":
reltime_expr.value_upper_bound_abs = NTime(-INF)
elif process_type == "or_less":
reltime_expr.value_lower_bound_abs = NTime(INF)
elif process_type == "over":
reltime_expr.value_upper_bound_abs = NTime(-INF)
reltime_expr.include_lower_bound = False
elif process_type == "less":
reltime_expr.value_lower_bound_abs = NTime(INF)
reltime_expr.include_upper_bound = False
elif process_type == "inai":
reltime_expr.value_lower_bound_rel = NTime(0)
elif process_type == "none":
pass
return new_reltime_expr
def revise_expr_by_matching_limited_expression(self, exprs: List[ReltimeExpression], # type: ignore[override]
expr_id: int,
matching_expr: ReltimePattern) -> List[ReltimeExpression]:
"""マッチした相対時間表現の補正を行う.
Parameters
----------
exprs : List[ReltimeExpression]
抽出された相対時間表現
expr_id : int
どの相対時間表現に着目するかのID(インデックス)
matching_expr : ReltimePattern
マッチした表現辞書パターン
Returns
-------
List[ReltimeExpression]
補正済みの相対時間表現
"""
new_exprs = deepcopy(exprs)
final_expr_id = expr_id + matching_expr.total_number_of_place_holder
new_exprs[expr_id].position_end = new_exprs[final_expr_id].position_end \
+ matching_expr.len_of_after_final_place_holder
for i, time_position in enumerate(matching_expr.corresponding_time_position):
new_exprs[expr_id] = self.set_time(new_exprs[expr_id],
time_position,
new_exprs[expr_id+i])
for i, process_type in enumerate(matching_expr.process_type):
new_exprs[expr_id] = self.revise_reltime_expr_by_process_type(
new_exprs[expr_id], process_type, matching_expr
)
new_exprs[expr_id].ordinary = matching_expr.ordinary
min_id = expr_id + 1
max_id = expr_id + matching_expr.total_number_of_place_holder
return [x[1] for x in filter(lambda x: min_id > x[0] or x[0] > max_id, enumerate(new_exprs))]
def revise_expr_by_matching_prefix_counter(self, expr: ReltimeExpression, # type: ignore[override]
matching_expr: ReltimePattern) -> ReltimeExpression:
"""マッチした単位表現から相対時間表現の補正を行う.
Parameters
----------
expr : ReltimeExpression
抽出された相対時間表現
matching_expr : ReltimePattern
マッチした表現辞書パターン
Returns
-------
ReltimeExpression
補正済みの相対時間表現
"""
new_expr = deepcopy(expr)
if matching_expr.option == "add_relation":
# 「去年3月」などの、「相対時間表現」+「絶対時間表現」からなる処理
if self.normalizer_utility.is_null_time(new_expr.value_lower_bound_abs) \
and self.normalizer_utility.is_null_time(new_expr.value_upper_bound_abs):
# 絶対時間表現が抽出されていなければ、処理を行わない
return new_expr
relation_val = int(matching_expr.process_type[0])
if matching_expr.corresponding_time_position[0] == "y":
new_expr.value_lower_bound_rel.year = new_expr.value_upper_bound_rel.year = relation_val
elif matching_expr.corresponding_time_position[0] == "m":
new_expr.value_lower_bound_rel.month = new_expr.value_upper_bound_rel.month = relation_val
elif matching_expr.corresponding_time_position[0] == "d":
new_expr.value_lower_bound_rel.day = new_expr.value_upper_bound_rel.day = relation_val
elif matching_expr.corresponding_time_position[0] == "h":
new_expr.value_lower_bound_rel.hour = new_expr.value_upper_bound_rel.hour = relation_val
elif matching_expr.corresponding_time_position[0] == "mn":
new_expr.value_lower_bound_rel.minute = new_expr.value_upper_bound_rel.minute = relation_val
elif matching_expr.corresponding_time_position[0] == "s":
new_expr.value_lower_bound_rel.second = new_expr.value_upper_bound_rel.second = relation_val
new_expr.position_start -= len(matching_expr.pattern)
return new_expr
def revise_expr_by_number_modifier(self, expr: ReltimeExpression, # type: ignore[override] # noqa: C901
number_modifier: NumberModifier) -> ReltimeExpression:
"""マッチした修飾表現から相対時間表現の補正を行う.
Parameters
----------
expr : ReltimeExpression
抽出された相対時間表現
number_modifier : NumberModifier
マッチした修飾表現
Returns
-------
ReltimeExpression
補正後の相対時間表現
"""
new_expr = deepcopy(expr)
if number_modifier.process_type == "about":
val_lb_rel, val_ub_rel = self.do_time_about(new_expr)
new_expr.value_lower_bound_rel = val_lb_rel
new_expr.value_upper_bound_rel = val_ub_rel
elif number_modifier.process_type == "zenhan":
val_lb_abs, val_ub_abs = self.do_time_zenhan(new_expr)
new_expr.value_lower_bound_abs = val_lb_abs
new_expr.value_upper_bound_abs = val_ub_abs
elif number_modifier.process_type == "nakaba":
val_lb_abs, val_ub_abs = self.do_time_nakaba(new_expr)
new_expr.value_lower_bound_abs = val_lb_abs
new_expr.value_upper_bound_abs = val_ub_abs
elif number_modifier.process_type == "kouhan":
val_lb_abs, val_ub_abs = self.do_time_kouhan(new_expr)
new_expr.value_lower_bound_abs = val_lb_abs
new_expr.value_upper_bound_abs = val_ub_abs
elif number_modifier.process_type == "joujun":
val_lb_abs, val_ub_abs = self.do_time_joujun(new_expr)
new_expr.value_lower_bound_abs = val_lb_abs
new_expr.value_upper_bound_abs = val_ub_abs
elif number_modifier.process_type == "tyujun":
val_lb_abs, val_ub_abs = self.do_time_tyujun(new_expr)
new_expr.value_lower_bound_abs = val_lb_abs
new_expr.value_upper_bound_abs = val_ub_abs
elif number_modifier.process_type == "gejun":
val_lb_abs, val_ub_abs = self.do_time_gejun(new_expr)
new_expr.value_lower_bound_abs = val_lb_abs
new_expr.value_upper_bound_abs = val_ub_abs
else:
new_expr.options.append(number_modifier.process_type)
return new_expr
def delete_not_expression(self, # type: ignore[override]
exprs: List[ReltimeExpression]) -> List[ReltimeExpression]:
"""時間オブジェクトがNullの相対時間表現を削除する.
Parameters
----------
exprs : List[ReltimeExpression]
抽出された相対時間表現
Returns
-------
List[ReltimeExpression]
削除後の相対時間表現
"""
for i in range(len(exprs)):
if self.normalizer_utility.is_null_time(exprs[i].value_lower_bound_rel) \
and self.normalizer_utility.is_null_time(exprs[i].value_upper_bound_rel):
exprs[i] = None # type: ignore
return [expr for expr in exprs if expr]
def fix_by_range_expression(self, # type: ignore[override] # noqa: C901
text: str, exprs: List[ReltimeExpression]) -> List[ReltimeExpression]:
"""相対時間の範囲表現の修正を行う.
Parameters
----------
text : str
元のテキスト
exprs : List[ReltimeExpression]
抽出された相対時間表現
Returns
-------
List[ReltimeExpression]
修正後の相対時間表現
"""
def is_registered(number: NNumber, reltime_exprs: List[ReltimeExpression]) -> bool:
for expr in reltime_exprs:
if expr.position_start <= number.position_start and number.position_end <= expr.position_end:
return True
return False
for i in range(len(exprs) - 1):
if exprs[i] is None \
or not self.have_kara_suffix(exprs[i].options) \
or not self.have_kara_prefix(exprs[i+1].options) \
or exprs[i].position_end + 2 < exprs[i+1].position_start:
continue
# 範囲表現として設定する
exprs[i].value_upper_bound_rel = exprs[i+1].value_upper_bound_rel
exprs[i].value_upper_bound_abs = exprs[i+1].value_upper_bound_abs
exprs[i].position_end = exprs[i+1].position_end
exprs[i].set_original_expr_from_position(text)
exprs[i].options = self.merge_options(exprs[i].options, exprs[i+1].options)
# i+1番目は使わないのでNoneにする -> あとでfilterでキレイにする
exprs[i+1] = None # type: ignore
exprs = [expr for expr in exprs if expr]
# 今日、明日、来年だけの表現を抽出する
add_reltime_exprs: List[ReltimeExpression] = []
for prefix_counter in self.prefix_counters:
try:
idx = text.index(prefix_counter.pattern)
prefix_counter.set_len_of_after_final_place_holder()
number = NNumber(prefix_counter.pattern, idx, idx+prefix_counter.len_of_after_final_place_holder)
if is_registered(number, exprs):
continue
reltime_expr = ReltimeExpression(number)
relation_val = int(prefix_counter.process_type[0])
if prefix_counter.corresponding_time_position[0] == "y":
reltime_expr.value_lower_bound_rel.year = reltime_expr.value_upper_bound_rel.year = relation_val
elif prefix_counter.corresponding_time_position[0] == "m":
reltime_expr.value_lower_bound_rel.month = reltime_expr.value_upper_bound_rel.month = relation_val
elif prefix_counter.corresponding_time_position[0] == "d":
reltime_expr.value_lower_bound_rel.day = reltime_expr.value_upper_bound_rel.day = relation_val
add_reltime_exprs.append(reltime_expr)
except ValueError:
pass
exprs += add_reltime_exprs
return exprs
def do_option_han(self, reltime_expr: ReltimeExpression, # noqa: C901
corresponding_time_position: str) -> Tuple[NTime, NTime]:
"""「半」表現の場合の日付計算を行う.
Parameters
----------
reltime_expr : ReltimeExpression
計算対象の相対時間表現
corresponding_time_position : str
時間表現の単位種別
Returns
-------
Tuple[NTime, NTime]
計算後の日付情報
"""
# TODO 「週」、「世紀」に対応していない部分がある
val_lb_rel = reltime_expr.value_lower_bound_rel
val_ub_rel = reltime_expr.value_upper_bound_rel
if corresponding_time_position == "+y":
val_lb_rel.year += 0.5
val_ub_rel.year += 0.5
elif corresponding_time_position == "+m":
val_lb_rel.month += 0.5
val_ub_rel.month += 0.5
elif corresponding_time_position == "+d":
val_lb_rel.day += 0.5
val_ub_rel.day += 0.5
elif corresponding_time_position == "+h":
val_lb_rel.hour += 0.5
val_ub_rel.hour += 0.5
elif corresponding_time_position == "+mn":
val_lb_rel.minute += 0.5
val_ub_rel.minute += 0.5
elif corresponding_time_position == "+s":
val_lb_rel.minute += 0.5
val_ub_rel.minute += 0.5
elif corresponding_time_position == "+seiki":
val_lb_rel.year += 50
val_ub_rel.year += 50
elif corresponding_time_position == "-y":
val_lb_rel.year -= 0.5
val_ub_rel.year -= 0.5
elif corresponding_time_position == "-m":
val_lb_rel.month -= 0.5
val_ub_rel.month -= 0.5
elif corresponding_time_position == "-d":
val_lb_rel.day -= 0.5
val_ub_rel.day -= 0.5
elif corresponding_time_position == "-h":
val_lb_rel.hour -= 0.5
val_ub_rel.hour -= 0.5
elif corresponding_time_position == "-mn":
val_lb_rel.minute -= 0.5
val_ub_rel.minute -= 0.5
elif corresponding_time_position == "-s":
val_lb_rel.minute -= 0.5
val_ub_rel.minute -= 0.5
elif corresponding_time_position == "-seiki":
val_lb_rel.year -= 50
val_ub_rel.year -= 50
return val_lb_rel, val_ub_rel
def do_time_about(self, reltime_expr: ReltimeExpression) -> Tuple[NTime, NTime]:
"""about表現の場合の日付計算を行う.
Parameters
----------
reltime_expr : ReltimeExpression
計算対象の相対時間表現
Returns
-------
Tuple[NTime, NTime]
計算後の日付情報
"""
# 「およそ1000年前」「2か月前頃」など
val_lb_rel = reltime_expr.value_lower_bound_rel
val_ub_rel = reltime_expr.value_upper_bound_rel
target_time_position = self.normalizer_utility.identify_time_detail(val_lb_rel)
if target_time_position == "y":
val_lb_rel.year -= 5
val_ub_rel.year += 5
elif target_time_position == "m":
val_lb_rel.month -= 1
val_ub_rel.month += 1
elif target_time_position == "d":
val_lb_rel.day -= 1
val_ub_rel.day += 1
elif target_time_position == "h":
val_lb_rel.hour -= 1
val_ub_rel.hour += 1
elif target_time_position == "mn":
val_lb_rel.minute -= 5
val_ub_rel.minute += 5
elif target_time_position == "s":
val_lb_rel.second -= 5
val_ub_rel.second += 5
return val_lb_rel, val_ub_rel
def do_time_zenhan(self, reltime_expr: ReltimeExpression) -> Tuple[NTime, NTime]:
"""前半表現の場合の日付計算を行う.
Parameters
----------
reltime_expr : ReltimeExpression
計算対象の相対時間表現
Returns
-------
Tuple[NTime, NTime]
計算後の日付情報
"""
val_lb_abs = reltime_expr.value_lower_bound_abs
val_ub_abs = reltime_expr.value_upper_bound_abs
target_time_position = self.normalizer_utility.identify_time_detail(val_lb_abs)
if target_time_position == "y":
if val_lb_abs.year != val_ub_abs.year:
# 「18世紀前半」のような場合
val_ub_abs.year = (val_lb_abs.year + val_ub_abs.year) / 2 - 0.5
else:
# 「1989年前半」のような場合
val_lb_abs.month = 1
val_ub_abs.month = 6
elif target_time_position == "m":
# 「7月前半」のような場合
val_lb_abs.day = 1
val_ub_abs.day = 15
elif target_time_position == "d":
# 「3日朝」のような場合
val_lb_abs.hour = 5
val_ub_abs.hour = 12
else:
pass
return val_lb_abs, val_ub_abs
def do_time_kouhan(self, reltime_expr: ReltimeExpression) -> Tuple[NTime, NTime]:
"""後半表現の場合の日付計算を行う.
Parameters
----------
reltime_expr : ReltimeExpression
計算対象の相対時間表現
Returns
-------
Tuple[NTime, NTime]
計算後の日付情報
"""
val_lb_abs = reltime_expr.value_lower_bound_abs
val_ub_abs = reltime_expr.value_upper_bound_abs
target_time_position = self.normalizer_utility.identify_time_detail(val_lb_abs)
if target_time_position == "y":
if val_lb_abs.year != val_ub_abs.year:
# 「18世紀後半」のような場合
val_lb_abs.year = (val_lb_abs.year + val_ub_abs.year) / 2 + 0.5
else:
# 「1989年後半」のような場合
val_lb_abs.month = 7
val_ub_abs.month = 12
elif target_time_position == "m":
# 「7月後半」のような場合
val_lb_abs.day = 16
val_ub_abs.day = 31
elif target_time_position == "d":
# 「3日夜」のような場合
val_lb_abs.hour = 18
val_ub_abs.hour = 24
else:
pass
return val_lb_abs, val_ub_abs
def do_time_nakaba(self, reltime_expr: ReltimeExpression) -> Tuple[NTime, NTime]:
"""半ば表現の場合の日付計算を行う.
Parameters
| |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import math
import os
import sys
import tensorflow as tf
from tensorflow_transform.saved import input_fn_maker
from tensorflow_transform.tf_metadata import metadata_io
from tensorflow.contrib.learn.python.learn import learn_runner, metric_spec
from tensorflow.python.ops import metrics
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.python.platform import tf_logging as logging
tf.logging.set_verbosity(tf.logging.INFO)
from trainer.features import LABEL_COLUMN, DISPLAY_ID_COLUMN, DISPLAY_ID_AND_IS_LEAK_ENCODED_COLUMN, CATEGORICAL_COLUMNS, DOC_CATEGORICAL_MULTIVALUED_COLUMNS, BOOL_COLUMNS, INT_COLUMNS, FLOAT_COLUMNS_LOG_BIN_TRANSFORM, FLOAT_COLUMNS_SIMPLE_BIN_TRANSFORM
from itertools import combinations
HASH_KEY = 42
MODEL_TYPES = ['wide', 'deep', 'wide_n_deep']
WIDE, DEEP, WIDE_N_DEEP = MODEL_TYPES
KEY_FEATURE_COLUMN = 'example_id'
TARGET_FEATURE_COLUMN = 'label'
#Default dataset sizes for evaluation
TRAIN_DATASET_SIZE = 55000000
EVAL_DATASET_SIZE = 27380257
#The MAP metric is computed by assessing the ranking quality of the ads for each display_id
#The validation dataset is already sorted by display_id, so all respective ads will be in the batch,
#except for the initial and final part of the batch, whose display_ids might have being truncated.
#For this reason, the eval batch should not be too small, to avoid noise in the measurement
EVAL_BATCH_SIZE=3000
def create_parser():
"""Initialize command line parser using arparse.
Returns:
An argparse.ArgumentParser.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
help='Model type to train on',
choices=MODEL_TYPES,
default=WIDE)
parser.add_argument(
'--train_data_paths', type=str, action='append', required=True)
parser.add_argument(
'--eval_data_paths', type=str, action='append', required=True)
parser.add_argument('--output_path', type=str, required=True)
# The following three parameters are required for tf.Transform.
parser.add_argument('--raw_metadata_path', type=str, required=True)
parser.add_argument('--transformed_metadata_path', type=str, required=True)
parser.add_argument('--transform_savedmodel', type=str, required=True)
parser.add_argument(
'--deep_hidden_units',
help='String with hidden units per layer, separated by space. All layers are fully connected. Ex.'
'`64 32` means first layer has 64 nodes and second one has 32.',
default="100 50",
type=str)
parser.add_argument(
'--train_batch_size',
help='Number of input records used per batch',
default=512,
type=int)
parser.add_argument(
'--eval_batch_size',
help='Number of eval records used per batch',
default=EVAL_BATCH_SIZE,
type=int)
parser.add_argument(
'--train_steps', help='Number of training steps to perform.', type=int)
parser.add_argument(
'--eval_steps',
help='Number of evaluation steps to perform.',
type=int)
parser.add_argument(
'--train_set_size',
help='Number of samples on the train dataset.',
type=int,
default=TRAIN_DATASET_SIZE)
parser.add_argument(
'--eval_set_size',
help='Number of samples on the train dataset.',
type=int,
default=EVAL_DATASET_SIZE)
parser.add_argument('--linear_l1_regularization', help='L1 Regularization for Linear Model', type=float, default=0.1)
parser.add_argument('--linear_l2_regularization', help='L2 Regularization for Linear Model', type=float, default=0.0)
parser.add_argument('--linear_learning_rate', help='Learning Rate for Linear Model', type=float, default=0.05)
parser.add_argument('--deep_l1_regularization', help='L1 Regularization for Deep Model', type=float, default=0.0)
parser.add_argument('--deep_l2_regularization', help='L2 Regularization for Deep Model', type=float, default=0.00)
parser.add_argument('--deep_learning_rate', help='Learning Rate for Deep Model', type=float, default=0.05)
parser.add_argument('--deep_dropout', help='Dropout regularization for Deep Model', type=float, default=0.1)
parser.add_argument('--deep_embedding_size_factor', help='Constant factor to apply on get_embedding_size() = embedding_size_factor * unique_val_count**0.25', type=int, default=6)
parser.add_argument(
'--num_epochs', help='Number of epochs', default=5, type=int)
parser.add_argument(
'--ignore_crosses',
action='store_true',
default=False,
help='Whether to ignore crosses (linear model only).')
parser.add_argument(
'--full_evaluation_after_training',
action='store_true',
default=False,
help='Weather to evaluate the full validation set only after training steps are completed (for a very accurate MAP evaluation).')
return parser
def get_embedding_size(const_mult, unique_val_count):
return int(math.floor(const_mult * unique_val_count**0.25))
def get_feature_columns(model_type, linear_use_crosses, embedding_size_factor):
wide_columns = []
deep_columns = []
event_weekend = tf.contrib.layers.sparse_column_with_integerized_feature('event_weekend', bucket_size=2, dtype=tf.int16, combiner="sum") #0-1
user_has_already_viewed_doc = tf.contrib.layers.sparse_column_with_integerized_feature("user_has_already_viewed_doc", bucket_size=2, dtype=tf.int16, combiner="sum") #0-1
event_hour = tf.contrib.layers.sparse_column_with_integerized_feature("event_hour", bucket_size=7, dtype=tf.int64, combiner="sum") #1-6
event_platform = tf.contrib.layers.sparse_column_with_integerized_feature("event_platform", bucket_size=4, dtype=tf.int64, combiner="sum") #1-3
traffic_source = tf.contrib.layers.sparse_column_with_integerized_feature("traffic_source", bucket_size=4, dtype=tf.int64, combiner="sum") #1-3
#TODO: Campaign (10803 unique values) is usually an important feature. Add this feature in the next feature engineering round
if 'wide' in model_type:
#Single-valued categories
ad_id = tf.contrib.layers.sparse_column_with_hash_bucket('ad_id', hash_bucket_size=250000, dtype=tf.int64, combiner="sum") #418295
doc_id = tf.contrib.layers.sparse_column_with_hash_bucket('doc_id', hash_bucket_size=100000, dtype=tf.int64, combiner="sum") #143856
doc_event_id = tf.contrib.layers.sparse_column_with_hash_bucket('doc_event_id', hash_bucket_size=300000, dtype=tf.int64, combiner="sum") #636482
ad_advertiser = tf.contrib.layers.sparse_column_with_hash_bucket('ad_advertiser', hash_bucket_size=2500, dtype=tf.int32, combiner="sum") #2052
doc_ad_publisher_id = tf.contrib.layers.sparse_column_with_hash_bucket('doc_ad_publisher_id', hash_bucket_size=1000, dtype=tf.int32, combiner="sum") #830
doc_ad_source_id = tf.contrib.layers.sparse_column_with_hash_bucket('doc_ad_source_id', hash_bucket_size=4000, dtype=tf.int32, combiner="sum") #6339
doc_event_publisher_id = tf.contrib.layers.sparse_column_with_hash_bucket('doc_event_publisher_id', hash_bucket_size=1000, dtype=tf.int32, combiner="sum") #830
doc_event_source_id = tf.contrib.layers.sparse_column_with_hash_bucket('doc_event_source_id', hash_bucket_size=4000, dtype=tf.int32, combiner="sum") #6339
event_country = tf.contrib.layers.sparse_column_with_hash_bucket('event_country', hash_bucket_size=300, dtype=tf.int32, combiner="sum") #222
event_country_state = tf.contrib.layers.sparse_column_with_hash_bucket('event_country_state', hash_bucket_size=2000, dtype=tf.int32, combiner="sum") #1892
event_geo_location = tf.contrib.layers.sparse_column_with_hash_bucket('event_geo_location', hash_bucket_size=2500, dtype=tf.int32, combiner="sum") #2273
#Multi-valued categories
doc_ad_category_id = tf.contrib.layers.sparse_column_with_hash_bucket('doc_ad_category_id', hash_bucket_size=100, dtype=tf.int32, combiner="sum") #90
doc_ad_topic_id = tf.contrib.layers.sparse_column_with_hash_bucket('doc_ad_topic_id', hash_bucket_size=350, dtype=tf.int32, combiner="sum") #301
doc_ad_entity_id = tf.contrib.layers.sparse_column_with_hash_bucket('doc_ad_entity_id', hash_bucket_size=10000, dtype=tf.int64, combiner="sum") #52439
doc_event_category_id = tf.contrib.layers.sparse_column_with_hash_bucket('doc_event_category_id', hash_bucket_size=100, dtype=tf.int32, combiner="sum") #90
doc_event_topic_id = tf.contrib.layers.sparse_column_with_hash_bucket('doc_event_topic_id', hash_bucket_size=350, dtype=tf.int32, combiner="sum") #301
doc_event_entity_id = tf.contrib.layers.sparse_column_with_hash_bucket('doc_event_entity_id', hash_bucket_size=10000, dtype=tf.int64, combiner="sum") #52439
float_simple_binned = []
for name in FLOAT_COLUMNS_SIMPLE_BIN_TRANSFORM:
field_name = name
float_simple_binned.append((field_name, tf.contrib.layers.sparse_column_with_integerized_feature(field_name+'_binned', bucket_size=15, dtype=tf.int16, combiner="sum")))
float_simple_binned_dict = dict(float_simple_binned)
float_log_binned = []
for name in FLOAT_COLUMNS_LOG_BIN_TRANSFORM:
field_name = name
float_log_binned.append((field_name, tf.contrib.layers.sparse_column_with_integerized_feature(field_name+'_log_binned', bucket_size=15, dtype=tf.int16, combiner="sum")))
float_log_binned_dict = dict(float_log_binned)
int_log_binned = []
for name in INT_COLUMNS:
field_name = name
int_log_binned.append((field_name, tf.contrib.layers.sparse_column_with_integerized_feature(field_name+'_log_int', bucket_size=15, dtype=tf.int16, combiner="sum")))
int_log_binned_dict = dict(int_log_binned)
# Wide columns
wide_columns = [event_weekend, user_has_already_viewed_doc, event_hour, event_platform, traffic_source,
ad_id, doc_id, doc_event_id, ad_advertiser, doc_ad_source_id, doc_event_publisher_id,
doc_event_source_id, event_country, event_country_state, event_geo_location,
doc_ad_category_id, doc_ad_topic_id, doc_ad_entity_id,
doc_event_category_id, doc_event_topic_id, doc_event_entity_id
] + float_simple_binned_dict.values() \
+ float_log_binned_dict.values() \
+ int_log_binned_dict.values()
if linear_use_crosses:
wide_interaction_features = [
ad_id, doc_id, doc_event_id, ad_advertiser, doc_ad_source_id,
doc_ad_publisher_id, doc_event_publisher_id, doc_event_source_id,
event_country, event_country_state, event_geo_location,
doc_ad_category_id, doc_ad_topic_id, doc_ad_entity_id,
doc_event_category_id, doc_event_topic_id, doc_event_entity_id,
event_weekend, user_has_already_viewed_doc, event_hour, event_platform, traffic_source]
full_interactions = combinations(wide_interaction_features, 2)
#Combinations meaningless for prediction to ignore
interactions_to_ignore = list(combinations([ad_id, ad_advertiser, doc_id, doc_ad_source_id, doc_ad_publisher_id] , 2)) + \
list(combinations([doc_event_id, doc_event_publisher_id, doc_event_source_id], 2)) + \
list(combinations([event_country, event_country_state, event_geo_location], 2)) + \
list(combinations([doc_event_category_id, doc_event_entity_id, doc_event_entity_id], 2)) + \
[(ad_id, doc_ad_category_id),
(ad_id, doc_ad_topic_id),
(ad_id, doc_ad_entity_id)] + \
[(doc_id, doc_ad_category_id),
(doc_id, doc_ad_topic_id),
(doc_id, doc_ad_entity_id)]
meaningful_interactions = set(full_interactions) - set(interactions_to_ignore) - \
set(map(lambda x: (x[1], x[0]), interactions_to_ignore))
for interaction in meaningful_interactions:
bucket_size = interaction[0].bucket_size * interaction[1].bucket_size
#If both categorical features are sparse, reduce their space to something manageable
if not (interaction[0].is_integerized and interaction[1].is_integerized):
bucket_size = int(math.pow(bucket_size, 0.78))
wide_columns.append(tf.contrib.layers.crossed_column(interaction, hash_key=HASH_KEY, combiner="sum",
hash_bucket_size=bucket_size)
)
if 'deep' in model_type:
event_weekend_ohe = tf.contrib.layers.one_hot_column(event_weekend) #0-1
user_has_already_viewed_doc_ohe = tf.contrib.layers.one_hot_column(user_has_already_viewed_doc) #0-1
event_hour_ohe = tf.contrib.layers.one_hot_column(event_hour) #1-6
event_platform_ohe = tf.contrib.layers.one_hot_column(event_platform) #1-3
traffic_source_ohe = tf.contrib.layers.one_hot_column(traffic_source) #1-3
float_columns_simple = []
for name in FLOAT_COLUMNS_SIMPLE_BIN_TRANSFORM:
float_columns_simple.append(tf.contrib.layers.real_valued_column(name))
float_columns_log_01scaled = []
for name in FLOAT_COLUMNS_LOG_BIN_TRANSFORM:
float_columns_log_01scaled.append(tf.contrib.layers.real_valued_column(name+'_log_01scaled'))
int_columns_log_01scaled = []
for name in INT_COLUMNS:
int_columns_log_01scaled.append(tf.contrib.layers.real_valued_column(name+'_log_01scaled'))
deep_columns = [ event_weekend_ohe,
user_has_already_viewed_doc_ohe,
event_hour_ohe,
event_platform_ohe,
traffic_source_ohe,
#Single-valued categories
tf.contrib.layers.scattered_embedding_column('ad_id', size=250000*get_embedding_size(embedding_size_factor, 250000), dimension=get_embedding_size(embedding_size_factor, 250000), hash_key=HASH_KEY, combiner="sum"),
tf.contrib.layers.scattered_embedding_column('doc_id', size=100000*get_embedding_size(embedding_size_factor, 100000), dimension=get_embedding_size(embedding_size_factor, 100000), hash_key=HASH_KEY, combiner="sum"),
tf.contrib.layers.scattered_embedding_column('doc_event_id', size=300000*get_embedding_size(embedding_size_factor, 300000), dimension=get_embedding_size(embedding_size_factor, 300000), hash_key=HASH_KEY, combiner="sum"),
tf.contrib.layers.scattered_embedding_column('ad_advertiser', size=2500*get_embedding_size(embedding_size_factor, 2500), dimension=get_embedding_size(embedding_size_factor, 2500), hash_key=HASH_KEY, combiner="sum"),
tf.contrib.layers.scattered_embedding_column('doc_ad_publisher_id', size=1000*get_embedding_size(embedding_size_factor, 1000), dimension=get_embedding_size(embedding_size_factor, 4000), hash_key=HASH_KEY, combiner="sum"),
tf.contrib.layers.scattered_embedding_column('doc_ad_source_id', size=4000*get_embedding_size(embedding_size_factor, 4000), dimension=get_embedding_size(embedding_size_factor, 4000), hash_key=HASH_KEY, combiner="sum"),
tf.contrib.layers.scattered_embedding_column('doc_event_publisher_id', size=1000*get_embedding_size(embedding_size_factor, 1000), dimension=get_embedding_size(embedding_size_factor, 1000), hash_key=HASH_KEY, combiner="sum"),
tf.contrib.layers.scattered_embedding_column('doc_event_source_id', size=4000*get_embedding_size(embedding_size_factor, 4000), dimension=get_embedding_size(embedding_size_factor, 4000), hash_key=HASH_KEY, combiner="sum"),
tf.contrib.layers.scattered_embedding_column('event_country', size=300*get_embedding_size(embedding_size_factor, 300), dimension=get_embedding_size(embedding_size_factor, 300), hash_key=HASH_KEY, combiner="sum"),
tf.contrib.layers.scattered_embedding_column('event_country_state', size=2000*get_embedding_size(embedding_size_factor, 2000), dimension=get_embedding_size(embedding_size_factor, 2000), hash_key=HASH_KEY, combiner="sum"),
tf.contrib.layers.scattered_embedding_column('event_geo_location', size=2500*get_embedding_size(embedding_size_factor, 2500), dimension=get_embedding_size(embedding_size_factor, 2500), hash_key=HASH_KEY, combiner="sum"),
#Multi-valued categories
tf.contrib.layers.scattered_embedding_column('doc_ad_category_id', size=100*get_embedding_size(embedding_size_factor, 100), dimension=get_embedding_size(embedding_size_factor, 100), hash_key=HASH_KEY, combiner="sum"),
tf.contrib.layers.scattered_embedding_column('doc_ad_topic_id', size=350*get_embedding_size(embedding_size_factor, 350), dimension=get_embedding_size(embedding_size_factor, 350), hash_key=HASH_KEY, combiner="sum"),
tf.contrib.layers.scattered_embedding_column('doc_ad_entity_id', size=10000*get_embedding_size(embedding_size_factor, 10000), dimension=get_embedding_size(embedding_size_factor, 10000), hash_key=HASH_KEY, combiner="sum"),
tf.contrib.layers.scattered_embedding_column('doc_event_category_id', size=100*get_embedding_size(embedding_size_factor, 100), dimension=get_embedding_size(embedding_size_factor, 100), hash_key=HASH_KEY, combiner="sum"),
tf.contrib.layers.scattered_embedding_column('doc_event_topic_id', size=350*get_embedding_size(embedding_size_factor, 350), dimension=get_embedding_size(embedding_size_factor, 350), hash_key=HASH_KEY, combiner="sum"),
tf.contrib.layers.scattered_embedding_column('doc_event_entity_id', size=10000*get_embedding_size(embedding_size_factor, 10000), dimension=get_embedding_size(embedding_size_factor, 10000), hash_key=HASH_KEY, combiner="sum"),
] + float_columns_simple + float_columns_log_01scaled + int_columns_log_01scaled
return wide_columns, deep_columns
def gzip_reader_fn():
return tf.TFRecordReader(options=tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP))
def get_transformed_reader_input_fn(transformed_metadata,
transformed_data_paths,
batch_size,
mode):
"""Wrap the get input features function to provide the runtime arguments."""
return input_fn_maker.build_training_input_fn(
metadata=transformed_metadata,
file_pattern=(
transformed_data_paths[0] if len(transformed_data_paths) == 1
else transformed_data_paths),
training_batch_size=batch_size,
label_keys=[LABEL_COLUMN],
reader=gzip_reader_fn,
key_feature_name=KEY_FEATURE_COLUMN,
reader_num_threads=4,
queue_capacity=batch_size * 2,
randomize_input=(mode != tf.contrib.learn.ModeKeys.EVAL),
num_epochs=(1 if mode == tf.contrib.learn.ModeKeys.EVAL else None))
def get_vocab_sizes():
"""Read vocabulary sizes from the metadata."""
# TODO(b/35300113) This method will change as we move to tf-transform and use
# the new schema and statistics protos. For now return a large-ish constant
# (exact vocabulary size not needed, since we are doing "mod" in tf.Learn).
# Note that the current workaround might come with a quality sacrifice that
# should hopefully be lifted soon.
return {'event_weekend': 2,
'user_has_already_viewed_doc': 2,
'event_hour': 6,
'event_platform': 3,
'traffic_source': 3,
'ad_id': 418295,
'doc_id': 143856,
'doc_event_id': 636482,
'ad_advertiser': 2052,
'doc_ad_source_id': 6339,
'doc_event_publisher_id': 830,
'doc_event_source_id': 6339,
'event_country': 222,
'event_country_state': 1892,
'event_geo_location': 2273,
'doc_ad_category_id': 90,
'doc_ad_topic_id': 301,
'doc_ad_entity_id': 52439,
'doc_event_category_id': 90,
'doc_event_topic_id': 301,
'doc_event_entity_id': 52439}
#'MAP' metric, compute the ranking quality of the ads for each display_id
def map_custom_metric(predictions, labels, weights=None,
metrics_collections=None, updates_collections=None,
name=None):
display_ids_tf | |
автоматическом режиме маршрутизатор будет динамически регулировать распределение полосы пропускания в соответствии с текущим использованием сети, чтобы обеспечить бесперебойную работу сети'
},
'优先级模式下路由器会动态调整带宽分配,保证优先级较高的设备网络体验流畅': {
'en': 'In priority mode, the router will dynamically adjust the bandwidth allocation to ensure a smooth network experience for devices with higher priority',
'ru': 'В режиме приоритета маршрутизатор динамически регулирует распределение пропускной способности, чтобы обеспечить бесперебойную работу сети для устройств с более высоким приоритетом'
},
'手工模式下路由器会根据您设置的速度调整带宽分配': {
'en': 'In manual mode, the router will adjust the bandwidth allocation according to the speed you set',
'ru': 'В ручном режиме маршрутизатор регулирует распределение полосы пропускания в соответствии с установленной Вами скоростью'
},
'系统根据设备需要自动调配网速': {
'en': 'The system automatically adjusts the network speed according to the needs of the device',
'ru': 'Система автоматически регулирует скорость сети в соответствии с потребностями устройства'
},
'优先保证打游戏的网速,不卡顿不掉线': {
'en': 'Priority to ensure the network speed for playing games, without lags and disconnection',
'ru': 'Приоритет для обеспечения скорости сети для игр, без лагов и отключений'
},
'优先保证浏览网页的网速,大图秒打开': {
'en': 'Priority to ensure the speed for Web browsing, big pages are opened in seconds',
'ru': 'Приоритет для обеспечения скорости просмотра веб-страниц, большие страницы открываются за секунды'
},
'优先保证看视频的网速,高清也流畅': {
'en': 'Priority to ensure the speed of watching videos, also in high-definition',
'ru': 'Приоритет для обеспечения скорости просмотра видео, в том числе и в высоком разрешении'
},
'家庭WiFi限速': {'en': 'Home Wi-Fi speed limit', 'ru': 'Ограничение домашней Wi-Fi сети'},
'当前网速': {'en': 'Current speed', 'ru': 'Текущая скорость'},
'限速模式': {'en': 'Speed limit mode', 'ru': 'Режим ограничения скорости'},
'最大速度': {'en': 'Maximum speed', 'ru': 'Максимальная скорость'},
'优先级': {'en': 'Priority', 'ru': 'Приоритет'},
'编辑': {'en': 'Edit', 'ru': 'Изменить'},
'访客WiFi限速': {'en': 'Guest Wi-Fi speed limit', 'ru': 'Ограничение скорости гостевой сети Wi-Fi'},
'上传限速': {'en': 'Upload speed limit', 'ru': 'Ограничение исходящей скорости'},
'下载限速': {'en': 'Download speed limit', 'ru': 'Ограничение входящей скорости'},
'路由器自身上传下载限速': {
'en': 'The router\\\'s own upload and download speed limit',
'ru': 'Ограничение исходящей и входящей скорости роутера'
},
'上传': {'en': 'Upload', 'ru': 'Исходящее'},
'下载': {'en': 'Download', 'ru': 'Входящее'},
'系统自动': {'en': 'System automatically', 'ru': 'Система автоматически'},
'无限制': {'en': 'Unlimited', 'ru': 'Нет ограничений'},
'上传速度': {'en': 'Upload speed', 'ru': 'Исходящая скорость'},
'下载速度': {'en': 'Download speed', 'ru': 'Входящая скорость'},
'IP和MAC': {'en': 'IP and MAC ', 'ru': 'IP и MAC '},
'系统错误,请重试': {'en': 'System error, please try again', 'ru': 'Ошибка системы, пожалуйста, попробуйте снова'},
'未设置': {'en': 'not set', 'ru': 'Не установлено'},
'低': {'en': 'Low', 'ru': 'Низкий'},
'中': {'en': 'Medium', 'ru': 'Средний'},
'高': {'en': 'High', 'ru': 'Высокий'},
'你确定要清除这个设备的QoS配置?': {
'en': 'Are you sure you want to clear the QoS configuration for this device?',
'ru': 'Вы уверены, что хотите очистить конфигурацию QoS для этого устройства?'
},
'设置QoS': {'en': 'Set QoS', 'ru': 'Установить QoS'},
'已绑定的设备列表': {'en': 'List of bound devices', 'ru': 'Список связанных устройств'},
'查询中': {'en': 'Querying', 'ru': 'Запрос'},
'解绑选中': {'en': 'Unbind selected', 'ru': 'Отвзять выбранное'},
'解除绑定': {'en': 'Unbind', 'ru': 'Отвязать'},
'没有设置信息': {'en': 'No setup info', 'ru': 'Нет информации о настройке'},
'移除': {'en': 'Remove', 'ru': 'Удалить'},
'增加一项': {'en': 'Add', 'ru': 'Добавить'},
'一键绑定': {'en': 'Bind', 'ru': 'Привязать'},
'绑定设备': {'en': 'Bind the device', 'ru': 'Привязать устройство'},
'你确定要解除此项绑定?': {
'en': 'Are you sure you want to unbind this device?',
'ru': 'Вы уверены, что хотите отменить привязку этого устройства?'
},
'存在IP冲突,请检查输入项': {
'en': 'There is an IP conflict, please check the entry',
'ru': 'Конфликт IP адресов, проверьте адрес'
},
'出现异常,请刷新页面': {
'en': 'An error occurred, please refresh the page',
'ru': 'Произошла ошибка, пожалуйста, перегрузите страницу'
},
'确定要删除这项数据吗': {
'en': 'Are you sure you want to delete this data?',
'ru': 'Вы уверены, что хотите удалить эти данные?'
},
'你还未选择任何设备': {'en': 'You have not selected any devices', 'ru': 'Вы не выбрали ни одного устройства'},
'确认要解除选中项目的绑定关系?': {
'en': 'Are you sure you want to unbind the selected item?',
'ru': 'Вы уверены, что хотите отменить привязку выбранного элемента?'
},
'什么是DDNS?': {'en': 'What is DDNS?'},
'是动态域名服务的缩写': {'en': 'Abbreviation for Dynamic Domain Name Service'},
'目前路由器拨号上网获得的多半都是动态IP,DDNS可以将路由器变化的外网IP和固定的域名绑定,从而用户可以在外网通过该固定域名来访问路由器': {'en': 'At present, most of the ISP provides access with dynamic IP. DDNS can bind the router\\\'s changed external network IP to a fixed domain name, so users can access the router on the external network through the fixed domain name'},
'如何设置DDNS?': {'en': 'How to set up DDNS?'},
'通过DDNS服务商获得域名及账号密码信息;': {'en': 'Obtain domain name and account password information through DDNS service provider;'},
'开启DDNS服务,添加服务,输入前一步获取的信息,保存并启用该服务': {'en': 'Enable the DDNS service, add the service, enter the information obtained in the previous step, save and enable the service'},
'注意事项': {'en': 'Instructions'},
'可以通过内置的DDNS运营商去该运营商官网去注册账号及申请域名;': {'en': 'You can go to the service\\\'s official website to register an account and apply for a domain name through the built-in DDNS service;'},
'用户名和密码为注册该运营商的用户名与密码;': {'en': 'The user name and password are the user name and password for the DDNS service;'},
'主机名称为从运营商申请的域名;': {'en': 'The host name is the domain name applied for from the DDNS service;'},
'强制检查为WAN口IP的检查时间,如无特殊需求,建议使用默认配置;': {'en': 'The mandatory check is the check time of the WAN port IP. If there is no special requirement, it is recommended to use the default configuration;'},
'强制更新为域名与IP强制更新的时间,如无特殊需求,建议使用默认配置;': {'en': 'The forced update is the time when the domain name and IP are forced to update. If there is no special requirement, it is recommended to use the default configuration;'},
'只能同时开启一个DDNS服务': {'en': 'Only one DDNS service can be enabled at the same time'},
'服务列表': {'en': 'List of services', 'ru': 'Список сервисов'},
'服务提供商及主机名称': {'en': 'Service provider and host name', 'ru': 'Провайдер и имя хоста'},
'最近更新': {'en': 'Latest update', 'ru': 'Последнее обновление'},
'状态': {'en': ' Status', 'ru': ' Состояние'},
'手动更新': {'en': 'Manual update', 'ru': 'Обновить'},
'口': {'en': ' external ', 'ru': ' внешнее '},
'已启用': {'en': 'Activated', 'ru': 'Активировано'},
'停用': {'en': 'Disable', 'ru': 'Выключить'},
'未启用': {'en': 'Disabled', 'ru': 'Выключено'},
'启用': {'en': 'Enable', 'ru': 'Включено'},
'连接正常': {'en': 'Connection is normal', 'ru': 'Соединение нормальное'},
'连接中': {'en': 'Connecting', 'ru': 'Подключение'},
'连接错误': {'en': 'Connection error', 'ru': 'Ошибка подключения'},
'服务提供商': {'en': 'Service provider', 'ru': 'Провайдер сервиса'},
'用户名': {'en': 'Username', 'ru': 'Имя пользователя'},
'主机名称': {'en': 'Hostname', 'ru': 'Имя хоста'},
'状态检查': {'en': 'Status check', 'ru': 'Проверка состояния'},
'状态检查的分钟间隔': {'en': 'Status check period, mins', 'ru': 'Период проверки состояния, мин'},
'强制更新': {'en': 'Force update', 'ru': 'Принудительное обновление'},
'强制更新的小时间隔': {'en': 'Force update period, hours', 'ru': 'Период принудительного обновления, час'},
'更新成功': {'en': 'Update completed', 'ru': 'Обновление завершено'},
'未启动无法强制更新': {'en': 'Unable to force update', 'ru': 'Не удалось принудительно обновить'},
'还没有服务添加进来': {'en': 'No services have been added yet', 'ru': 'Сервисы еще не были добавлены'},
'暂无更新': {'en': 'Not updated yet', 'ru': 'Еще не обновлено'},
'端口转发规则列表': {'en': 'List of port forwarding rules', 'ru': 'Список правил переадресации портов'},
'范围转发规则列表': {
'en': 'List of port ranges forwarding rules',
'ru': 'Список правил переадресации диапазонов портов'
},
'协议': {'en': 'Protocol', 'ru': 'Протокол'},
'外部端口': {'en': 'External port', 'ru': 'Внешний порт'},
'内部IP地址': {'en': 'Internal IP address', 'ru': 'Внутренний IP-адрес'},
'内部端口': {'en': 'Internal port', 'ru': 'Внутренний порт'},
'起始端口': {'en': 'Start port', 'ru': 'Начальный порт'},
'结束端口': {'en': 'End port', 'ru': 'Конечный порт'},
'目标IP': {'en': 'Target IP', 'ru': 'Целевой IP'},
'开启DMZ功能可以将内网某一个设备的IP映射到外网,方便从外网访问到该设备': {'en': 'Turning on the DMZ function can map the IP of a device on the internal network to the external network, which is convenient for accessing the device from the external network'},
'DMZ状态': {'en': 'DMZ Status:', 'ru': 'Состояние DMZ:'},
'未生效': {'en': 'Not active', 'ru': 'Не активно'},
'TCP和UDP': {'en': 'TCP and UDP ', 'ru': 'TCP и UDP'},
'IP地址最后一位': {'en': 'Last digits of IP address', 'ru': 'Последние цифры IP-адреса'},
'新建端口转发规则': {'en': 'Create a new port forwarding rule', 'ru': 'Создайте новое правило переадресации портов'},
'确定要删除这条规则吗?': {
'en': 'Are you sure you want to delete this rule?',
'ru': 'Вы уверены, что хотите удалить это правило?'
},
'新建范围转发规则': {'en': 'Create a new range forwarding rule', 'ru': 'Создать новое правило переадресации диапазона'},
'规则正在生效中,请等待': {'en': 'The rule is applying, please wait', 'ru': 'Правило применяется, пожалуйста, подождите'},
'编辑端口转发规则': {'en': 'Edit port forwarding rules', 'ru': 'Изменить правила переадресации портов'},
'更新': {'en': 'Replace', 'ru': 'Заменить'},
'编辑范围转发规则': {
'en': 'Edit port ranges forwarding rules',
'ru': 'Изменить правила переадресации диапазонов портов'
},
'添加规则': {'en': 'Add rule', 'ru': 'Добавить правило'},
'已生效': {'en': 'In force', 'ru': 'В силе'},
'端口转发开启了,不可以设置DMZ': {
'en': 'Port forwarding is | |
<reponame>pulumi/pulumi-rancher2
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['AuthConfigOpenLdapArgs', 'AuthConfigOpenLdap']
@pulumi.input_type
class AuthConfigOpenLdapArgs:
def __init__(__self__, *,
servers: pulumi.Input[Sequence[pulumi.Input[str]]],
service_account_distinguished_name: pulumi.Input[str],
service_account_password: pulumi.Input[str],
test_password: pulumi.Input[str],
test_username: pulumi.Input[str],
user_search_base: pulumi.Input[str],
access_mode: Optional[pulumi.Input[str]] = None,
allowed_principal_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
certificate: Optional[pulumi.Input[str]] = None,
connection_timeout: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
group_dn_attribute: Optional[pulumi.Input[str]] = None,
group_member_mapping_attribute: Optional[pulumi.Input[str]] = None,
group_member_user_attribute: Optional[pulumi.Input[str]] = None,
group_name_attribute: Optional[pulumi.Input[str]] = None,
group_object_class: Optional[pulumi.Input[str]] = None,
group_search_attribute: Optional[pulumi.Input[str]] = None,
group_search_base: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
nested_group_membership_enabled: Optional[pulumi.Input[bool]] = None,
port: Optional[pulumi.Input[int]] = None,
tls: Optional[pulumi.Input[bool]] = None,
user_disabled_bit_mask: Optional[pulumi.Input[int]] = None,
user_enabled_attribute: Optional[pulumi.Input[str]] = None,
user_login_attribute: Optional[pulumi.Input[str]] = None,
user_member_attribute: Optional[pulumi.Input[str]] = None,
user_name_attribute: Optional[pulumi.Input[str]] = None,
user_object_class: Optional[pulumi.Input[str]] = None,
user_search_attribute: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a AuthConfigOpenLdap resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] servers: OpenLdap servers list (list)
:param pulumi.Input[str] service_account_distinguished_name: Service account DN for access OpenLdap service (string)
:param pulumi.Input[str] service_account_password: Service account password for access OpenLdap service (string)
:param pulumi.Input[str] test_password: Password for test access to OpenLdap service (string)
:param pulumi.Input[str] test_username: Username for test access to OpenLdap service (string)
:param pulumi.Input[str] user_search_base: User search base DN (string)
:param pulumi.Input[str] access_mode: Access mode for auth. `required`, `restricted`, `unrestricted` are supported. Default `unrestricted` (string)
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_principal_ids: Allowed principal ids for auth. Required if `access_mode` is `required` or `restricted`. Ex: `openldap_user://<DN>` `openldap_group://<DN>` (list)
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations of the resource (map)
:param pulumi.Input[str] certificate: Base64 encoded CA certificate for TLS if self-signed. Use filebase64(<FILE>) for encoding file (string)
:param pulumi.Input[int] connection_timeout: OpenLdap connection timeout. Default `5000` (int)
:param pulumi.Input[bool] enabled: Enable auth config provider. Default `true` (bool)
:param pulumi.Input[str] group_dn_attribute: Group DN attribute. Default `entryDN` (string)
:param pulumi.Input[str] group_member_mapping_attribute: Group member mapping attribute. Default `member` (string)
:param pulumi.Input[str] group_member_user_attribute: Group member user attribute. Default `entryDN` (string)
:param pulumi.Input[str] group_name_attribute: Group name attribute. Default `cn` (string)
:param pulumi.Input[str] group_object_class: Group object class. Default `groupOfNames` (string)
:param pulumi.Input[str] group_search_attribute: Group search attribute. Default `cn` (string)
:param pulumi.Input[str] group_search_base: Group search base (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels of the resource (map)
:param pulumi.Input[bool] nested_group_membership_enabled: Nested group membership enable. Default `false` (bool)
:param pulumi.Input[int] port: OpenLdap port. Default `389` (int)
:param pulumi.Input[bool] tls: Enable TLS connection (bool)
:param pulumi.Input[int] user_disabled_bit_mask: User disabled bit mask (int)
:param pulumi.Input[str] user_enabled_attribute: User enable attribute (string)
:param pulumi.Input[str] user_login_attribute: User login attribute. Default `uid` (string)
:param pulumi.Input[str] user_member_attribute: User member attribute. Default `memberOf` (string)
:param pulumi.Input[str] user_name_attribute: User name attribute. Default `givenName` (string)
:param pulumi.Input[str] user_object_class: User object class. Default `inetorgperson` (string)
:param pulumi.Input[str] user_search_attribute: User search attribute. Default `uid|sn|givenName` (string)
"""
pulumi.set(__self__, "servers", servers)
pulumi.set(__self__, "service_account_distinguished_name", service_account_distinguished_name)
pulumi.set(__self__, "service_account_password", service_account_password)
pulumi.set(__self__, "test_password", test_password)
pulumi.set(__self__, "test_username", test_username)
pulumi.set(__self__, "user_search_base", user_search_base)
if access_mode is not None:
pulumi.set(__self__, "access_mode", access_mode)
if allowed_principal_ids is not None:
pulumi.set(__self__, "allowed_principal_ids", allowed_principal_ids)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if connection_timeout is not None:
pulumi.set(__self__, "connection_timeout", connection_timeout)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if group_dn_attribute is not None:
pulumi.set(__self__, "group_dn_attribute", group_dn_attribute)
if group_member_mapping_attribute is not None:
pulumi.set(__self__, "group_member_mapping_attribute", group_member_mapping_attribute)
if group_member_user_attribute is not None:
pulumi.set(__self__, "group_member_user_attribute", group_member_user_attribute)
if group_name_attribute is not None:
pulumi.set(__self__, "group_name_attribute", group_name_attribute)
if group_object_class is not None:
pulumi.set(__self__, "group_object_class", group_object_class)
if group_search_attribute is not None:
pulumi.set(__self__, "group_search_attribute", group_search_attribute)
if group_search_base is not None:
pulumi.set(__self__, "group_search_base", group_search_base)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if nested_group_membership_enabled is not None:
pulumi.set(__self__, "nested_group_membership_enabled", nested_group_membership_enabled)
if port is not None:
pulumi.set(__self__, "port", port)
if tls is not None:
pulumi.set(__self__, "tls", tls)
if user_disabled_bit_mask is not None:
pulumi.set(__self__, "user_disabled_bit_mask", user_disabled_bit_mask)
if user_enabled_attribute is not None:
pulumi.set(__self__, "user_enabled_attribute", user_enabled_attribute)
if user_login_attribute is not None:
pulumi.set(__self__, "user_login_attribute", user_login_attribute)
if user_member_attribute is not None:
pulumi.set(__self__, "user_member_attribute", user_member_attribute)
if user_name_attribute is not None:
pulumi.set(__self__, "user_name_attribute", user_name_attribute)
if user_object_class is not None:
pulumi.set(__self__, "user_object_class", user_object_class)
if user_search_attribute is not None:
pulumi.set(__self__, "user_search_attribute", user_search_attribute)
@property
@pulumi.getter
def servers(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
OpenLdap servers list (list)
"""
return pulumi.get(self, "servers")
@servers.setter
def servers(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "servers", value)
@property
@pulumi.getter(name="serviceAccountDistinguishedName")
def service_account_distinguished_name(self) -> pulumi.Input[str]:
"""
Service account DN for access OpenLdap service (string)
"""
return pulumi.get(self, "service_account_distinguished_name")
@service_account_distinguished_name.setter
def service_account_distinguished_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_account_distinguished_name", value)
@property
@pulumi.getter(name="serviceAccountPassword")
def service_account_password(self) -> pulumi.Input[str]:
"""
Service account password for access OpenLdap service (string)
"""
return pulumi.get(self, "service_account_password")
@service_account_password.setter
def service_account_password(self, value: pulumi.Input[str]):
pulumi.set(self, "service_account_password", value)
@property
@pulumi.getter(name="testPassword")
def test_password(self) -> pulumi.Input[str]:
"""
Password for test access to OpenLdap service (string)
"""
return pulumi.get(self, "test_password")
@test_password.setter
def test_password(self, value: pulumi.Input[str]):
pulumi.set(self, "test_password", value)
@property
@pulumi.getter(name="testUsername")
def test_username(self) -> pulumi.Input[str]:
"""
Username for test access to OpenLdap service (string)
"""
return pulumi.get(self, "test_username")
@test_username.setter
def test_username(self, value: pulumi.Input[str]):
pulumi.set(self, "test_username", value)
@property
@pulumi.getter(name="userSearchBase")
def user_search_base(self) -> pulumi.Input[str]:
"""
User search base DN (string)
"""
return pulumi.get(self, "user_search_base")
@user_search_base.setter
def user_search_base(self, value: pulumi.Input[str]):
pulumi.set(self, "user_search_base", value)
@property
@pulumi.getter(name="accessMode")
def access_mode(self) -> Optional[pulumi.Input[str]]:
"""
Access mode for auth. `required`, `restricted`, `unrestricted` are supported. Default `unrestricted` (string)
"""
return pulumi.get(self, "access_mode")
@access_mode.setter
def access_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_mode", value)
@property
@pulumi.getter(name="allowedPrincipalIds")
def allowed_principal_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Allowed principal ids for auth. Required if `access_mode` is `required` or `restricted`. Ex: `openldap_user://<DN>` `openldap_group://<DN>` (list)
"""
return pulumi.get(self, "allowed_principal_ids")
@allowed_principal_ids.setter
def allowed_principal_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_principal_ids", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Annotations of the resource (map)
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def certificate(self) -> Optional[pulumi.Input[str]]:
"""
Base64 encoded CA certificate for TLS if self-signed. Use filebase64(<FILE>) for encoding file (string)
"""
return pulumi.get(self, "certificate")
@certificate.setter
def certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate", value)
@property
@pulumi.getter(name="connectionTimeout")
def connection_timeout(self) -> Optional[pulumi.Input[int]]:
"""
OpenLdap connection timeout. Default `5000` (int)
"""
return pulumi.get(self, "connection_timeout")
@connection_timeout.setter
def connection_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "connection_timeout", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable auth config provider. Default `true` (bool)
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="groupDnAttribute")
def group_dn_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group DN attribute. Default `entryDN` (string)
"""
return pulumi.get(self, "group_dn_attribute")
@group_dn_attribute.setter
def group_dn_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_dn_attribute", value)
@property
@pulumi.getter(name="groupMemberMappingAttribute")
def group_member_mapping_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group member mapping attribute. Default `member` (string)
"""
return pulumi.get(self, "group_member_mapping_attribute")
@group_member_mapping_attribute.setter
def group_member_mapping_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_member_mapping_attribute", value)
@property
@pulumi.getter(name="groupMemberUserAttribute")
def group_member_user_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group member user attribute. Default `entryDN` (string)
"""
return pulumi.get(self, "group_member_user_attribute")
@group_member_user_attribute.setter
def group_member_user_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_member_user_attribute", value)
@property
@pulumi.getter(name="groupNameAttribute")
def group_name_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group name attribute. Default `cn` (string)
"""
return pulumi.get(self, "group_name_attribute")
@group_name_attribute.setter
def group_name_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_name_attribute", value)
@property
@pulumi.getter(name="groupObjectClass")
def group_object_class(self) -> Optional[pulumi.Input[str]]:
"""
Group object class. Default `groupOfNames` (string)
"""
return pulumi.get(self, "group_object_class")
@group_object_class.setter
def group_object_class(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_object_class", value)
@property
@pulumi.getter(name="groupSearchAttribute")
def group_search_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group search attribute. Default `cn` (string)
"""
return pulumi.get(self, "group_search_attribute")
@group_search_attribute.setter
def group_search_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_search_attribute", value)
@property
@pulumi.getter(name="groupSearchBase")
def group_search_base(self) -> Optional[pulumi.Input[str]]:
"""
Group search base (string)
"""
return pulumi.get(self, "group_search_base")
@group_search_base.setter
def group_search_base(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_search_base", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Labels of the resource (map)
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="nestedGroupMembershipEnabled")
def nested_group_membership_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Nested group membership enable. Default `false` (bool)
"""
return pulumi.get(self, "nested_group_membership_enabled")
@nested_group_membership_enabled.setter
def nested_group_membership_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "nested_group_membership_enabled", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
OpenLdap port. Default `389` (int)
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def tls(self) -> Optional[pulumi.Input[bool]]:
"""
Enable TLS connection (bool)
"""
return pulumi.get(self, "tls")
@tls.setter
def tls(self, value: | |
= self.trajevents[self._FScompatibleNames(evnames)]
except AttributeError:
# empty pointset
return None
else:
result.indepvararray += t_offset
return result
else:
# assume a sequence of strings
assert all([ev in compat_evnames for ev in evnames]), \
"Invalid event name(s) provided: %s"%str(evnames)
result = {}
for (evname, evptset) in self.trajevents.items():
compat_evname = self._FScompatibleNamesInv(evname)
if compat_evname not in evnames:
continue
result[compat_evname] = copy(evptset)
try:
result[compat_evname].indepvararray += t_offset
except AttributeError:
# empty pointset
pass
return result
def getEventTimes(self, evnames=None, asGlobalTime=True):
"""Produce dictionary of lists of all flagged events' independent
variable values, for each event (whether terminal or not).
Times will be globalized if optional asGlobalTime argument is True
(default behavior). If a single event name is passed, only the pointset
is returned (not a dictionary).
evnames may be a singleton string or list of strings, or left blank to
return data for all events.
The events are guaranteed to be ordered by the value of the
independent variable.
"""
result = {}
if asGlobalTime:
t_offset = self.globalt0
else:
t_offset = 0
compat_evnames = self._FScompatibleNamesInv(list(self.trajevents.keys()))
if evnames is None:
evnames = compat_evnames
if isinstance(evnames, six.string_types):
# singleton
assert evnames in compat_evnames, "Invalid event name provided: %s"%evnames
try:
return self.trajevents[self._FScompatibleNames(evnames)].indepvararray \
+ t_offset
except AttributeError:
# empty pointset
return []
else:
# assume a sequence of strings
assert all([ev in compat_evnames for ev in evnames]), \
"Invalid event name(s) provided: %s"%str(evnames)
for (evname, evptset) in self.trajevents.items():
compat_evname = self._FScompatibleNamesInv(evname)
if compat_evname not in compat_evnames:
continue
try:
result[compat_evname] = evptset.indepvararray + t_offset
except AttributeError:
# empty pointset
result[compat_evname] = []
return result
def query(self, querykey=''):
"""Return info about Generator set-up.
Valid query key: 'pars', 'parameters', 'pardomains', 'events',
'ics', 'initialconditions', 'vars', 'variables',
'auxvars', 'auxvariables', 'vardomains'
"""
assert isinstance(querykey, six.string_types), \
("Query argument must be a single string")
if querykey not in self._querykeys:
print('Valid query keys are: %r' % (self._querykeys, ))
print("('events' key only queries model-level events, not those")
print(" inside sub-models)")
raise ValueError('Query key '+querykey+' is not valid')
if querykey in ['pars', 'parameters']:
result = self._FScompatibleNamesInv(self.pars)
elif querykey in ['ics', 'initialconditions']:
try:
result = self._FScompatibleNamesInv(self.initialconditions)
except AttributeError:
result = None
elif querykey == 'events':
result = self.eventstruct.events
elif querykey in ['vars', 'variables']:
result = self._FScompatibleNamesInv(self.funcspec.vars)
elif querykey in ['auxvars', 'auxvariables']:
result = self._FScompatibleNamesInv(self.funcspec.auxvars)
elif querykey == 'vardomains':
result = {}
for varname, var in self.variables.items():
result[self._FScompatibleNamesInv(varname)] = \
var.depdomain
elif querykey == 'pardomains':
result = {}
for parname, pardom in self.parameterDomains.items():
result[self._FScompatibleNamesInv(parname)] = \
pardom
elif querykey == 'abseps':
result = self._abseps
return result
def get(self, key):
"""For API compatibility with ModelInterface: get will make a copy of
the key and pass it through the inverse FuncSpec-compatible name map.
"""
return self._FScompatibleNamesInv(getattr(self, key))
def haveJacobian(self):
"""Default method. Can be overridden by subclasses."""
return False
def haveJacobian_pars(self):
"""Default method. Can be overridden by subclasses."""
return False
def info(self, verbose=1):
print(self._infostr(verbose))
def _kw_process_dispatch(self, keys, kw):
# compile funcspec arguments by processing init keys
# make ignorespecial initially empty so that it can safely be
# extended by its own dispatch method and by process_system
fs_args = {'name': self.name,
'ignorespecial': []}
# make sure to do varspecs first in case of FOR macros
self._kw_process_varspecs(kw, fs_args)
for key in keys:
if key == 'varspecs':
# already did it
continue
f = getattr(self, '_kw_process_'+key)
# f can update fs_args in place
f(kw, fs_args)
return fs_args
def _kw_process_varspecs(self, kw, fs_args):
if 'varspecs' in kw:
for varname, varspec in kw['varspecs'].items():
if not isinstance(varname, (six.string_types, QuantSpec,
Quantity)):
print("Expected string, QuantSpec, or Quantity to name variable, got type %s" %(type(varname)))
raise PyDSTool_TypeError("Invalid type for Variable name: %s"%str(varname))
if not isinstance(varspec, (six.string_types, QuantSpec,
Quantity)):
print("Expected string, QuantSpec, or Quantity definition for %s, got type %s" %(varname, type(varspec)))
raise PyDSTool_TypeError("Invalid type for Variable %s's specification."%varname)
self.foundKeys += 1
fs_args['varspecs'] = \
self._FScompatibleNames(ensureStrArgDict(kw['varspecs']))
else:
raise PyDSTool_KeyError("Keyword 'varspecs' missing from "
"argument")
all_vars = []
# record number of variables defined by macros for FuncSpec checks
fs_args['_for_macro_info'] = args(totforvars=0, numfors=0, varsbyforspec={})
for specname, specstr in fs_args['varspecs'].items():
if not '[' in specname:
# record non-FOR variables as identity mapping
all_vars.append(specname)
fs_args['_for_macro_info'].varsbyforspec[specname] = [specname]
continue
# assume this will be a FOR macro (FuncSpec will check properly later)
assert specstr[:4] == 'for(', ('Expected `for` macro when '
'square brackets used in name definition')
# read contents of braces
ok, arglist, nargs = readArgs(specstr[3:])
if not ok:
raise ValueError('Error finding '
'arguments applicable to `for` '
'macro')
rootstr = specname[:specname.find('[')]
assert len(arglist) == 4, ('Wrong number of arguments passed '
'to `for` macro. Expected 4')
ilo = int(arglist[1])
ihi = int(arglist[2])
new_vars = [rootstr+str(i) for i in range(ilo,ihi+1)]
all_vars.extend(new_vars)
fs_args['_for_macro_info'].numfors += 1
fs_args['_for_macro_info'].totforvars += (ihi-ilo+1)
fs_args['_for_macro_info'].varsbyforspec[specname] = new_vars
# Temporary record of all var names, will be deleted before finalizing
# class initialization
self.__all_vars = all_vars
def _kw_process_tdomain(self, kw, fs_args):
if 'tdomain' in kw:
self.tdomain = kw['tdomain']
if not self._is_domain_ordered(self.tdomain[0], self.tdomain[1]):
print("Time domain specified: [%s, %s]"%(self.tdomain[0],
self.tdomain[1]))
raise PyDSTool_ValueError("tdomain values must be in order of "
"increasing size")
self.foundKeys += 1
else:
self.tdomain = [-Inf, Inf]
def _kw_process_ttype(self, kw, fs_args):
# e.g. for map system
if 'ttype' in kw:
try:
self.indepvartype = _num_equivtype[kw['ttype']]
except KeyError:
raise TypeError('Invalid ttype: %s'%str(kw['ttype']))
self.foundKeys += 1
else:
self.indepvartype = float
def _kw_process_tdata(self, kw, fs_args):
# set tdomain first
if 'tdata' in kw:
self.tdata = kw['tdata']
if self.tdata[0] >= self.tdata[1]:
raise PyDSTool_ValueError("tdata values must be in order of "
"increasing size")
# _tdata is made into list to be consistent with
# other uses of it in other Generators...
if self.tdomain[0] > self.tdata[0]:
raise ValueError('tdata cannot be specified below smallest '\
'value in tdomain\n (possibly due to uncertain'\
'bounding)')
if self.tdomain[1] < self.tdata[1]:
raise ValueError('tdata cannot be specified above largest '\
'value in tdomain\n (possibly due to uncertain '\
'bounding)')
self.foundKeys += 1
else:
self.tdata = self.tdomain # default needed
def _kw_process_tstep(self, kw, fs_args):
# requires self.indepvartype (e.g. for map system)
if 'tstep' in kw:
self.tstep = kw['tstep']
if self.tstep > self.tdata[1]-self.tdata[0]:
raise PyDSTool_ValueError('tstep too large')
if compareNumTypes(self.indepvartype, _all_int) and round(self.tstep) != self.tstep:
raise PyDSTool_ValueError('tstep must be an integer for integer ttype')
self.foundKeys += 1
else:
if compareNumTypes(self.indepvartype, _all_int):
# default to 1 for integer types
self.tstep = 1
else:
# no reasonable default - so raise error
raise PyDSTool_KeyError('tstep key needed for float ttype')
def _kw_process_inputs(self, kw, fs_args):
if 'inputs' in kw:
inputs = copy(kw['inputs'])
if isinstance(inputs, Trajectory):
# extract the variables
self.inputs.update(self._FScompatibleNames(inputs.variables))
elif isinstance(inputs, Variable):
self.inputs.update({self._FScompatibleNames(inputs.name): \
inputs})
elif isinstance(inputs, Pointset):
# turn into Variables with linear interpoolation between
# independent variable values
for n in inputs.coordnames:
x_array = inputs[n]
nFS = self._FScompatibleNames(n)
self.inputs[nFS] = \
Variable(interp1d(inputs.indepvararray,
x_array), 't',
Interval(nFS, float, extent(x_array),
abseps=self._abseps),
name=n) # keep original name here
elif isinstance(inputs, dict):
self.inputs.update(self._FScompatibleNames(inputs))
# ensure values are Variables or Pointsets
for k, v in self.inputs.items():
if not isinstance(v, Variable):
try:
self.inputs[k]=Variable(v)
except:
raise TypeError("Invalid specification of inputs")
else:
raise TypeError("Invalid specification of inputs")
self._register(self.inputs)
self.foundKeys += 1
# only signal that _extInputsChanged if there are actually some
# defined, e.g. inputs may be formally present in the keys but in
# fact unused
self._extInputsChanged = (self.inputs != {})
fs_args['inputs'] = list(self.inputs.keys())
else:
self._extInputsChanged = False
def _kw_process_ics(self, kw, fs_args):
if 'ics' in kw:
self._xdatadict = {}
for k, v in dict(kw['ics']).items():
self._xdatadict[self._FScompatibleNames(str(k))] = ensurefloat(v)
self.initialconditions = self._xdatadict.copy()
unspecd = remain(self._xdatadict.keys(), self.__all_vars)
if unspecd != []:
# ics were declared for variables not in varspecs
raise ValueError("Missing varspec entries for declared ICs: " + str(unspecd))
for name in remain(self.__all_vars,
self._xdatadict.keys()):
self.initialconditions[name] = NaN
self.foundKeys += 1
else:
self._xdatadict = {}
for name in self.__all_vars:
self.initialconditions[name] = NaN
def _kw_process_allvars(self, kw, fs_args):
if 'auxvars' in kw:
assert 'vars' not in kw, ("Cannot use both 'auxvars' and 'vars' "
"keywords")
if isinstance(kw['auxvars'], list):
auxvars = self._FScompatibleNames([str(v) for v in kw['auxvars']])
else:
auxvars = self._FScompatibleNames([str(kw['auxvars'])])
vars = remain(self.__all_vars, auxvars)
self.foundKeys += 1
elif 'vars' in kw:
assert 'auxvars' not in kw, \
"Cannot use both | |
# -*- coding: utf-8 -*-
import scrapy
import re
from .. import items
from bs4 import BeautifulSoup
from selenium import webdriver
from requests import get
from time import sleep
# change this path to wherever the chrome webdriver is located in your system
WEBDRIVER_PATH = ("D:\\chromedriver_win32\\" + "chromedriver.exe")
class AlibabaCrawlerSpider(scrapy.Spider):
name = "AlibabaCrawler"
def __init__(self):
# open the webdriver when the spider starts crawling
self.driver = webdriver.Chrome(WEBDRIVER_PATH)
self.product_driver = webdriver.Chrome(WEBDRIVER_PATH)
self.seller_driver = webdriver.Chrome(WEBDRIVER_PATH)
self.origins = dict({})
self.ships_to_NA = dict({})
def start_requests(self):
urls = [
"https://www.alibaba.com/products/helix_snail.html?IndexArea=product_en&page=1",
"https://www.alibaba.com/products/live_snail.html?IndexArea=product_en&page=1",
"https://www.alibaba.com/products/edible_snail.html?IndexArea=product_en&page=1",
"https://www.alibaba.com/products/giant_african_snail.html?IndexArea=product_en&page=1",
"https://www.alibaba.com/products/achatina.html?IndexArea=product_en&page=1",
"https://www.alibaba.com/products/brown_garden_snail.html?IndexArea=product_en&page=1"
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse_catalogue)
def close_spider(self, spider):
# quit the webdriver when the spider closes
self.driver.quit()
self.product_driver.quit()
self.seller_driver.quit()
def parse_catalogue(self, response):
# open up the webpage using selenium
self.driver.get(response.url)
# scroll to bottom of the page to load the second half of the catalogue
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# sleep 1 second to allow the page to load
sleep(3)
page_html = self.driver.page_source
# first parse html into beautiful soup
soup = BeautifulSoup(page_html, "lxml")
# obtain all product elements from the current catalogue page
products = soup.find_all("div", class_="item-content")
# for each one of the products parse the information on the ad
for product in products:
# pass the product to the parse product helper
prod_info = self._parse_product(response, product)
# yield the product item after parsing
if (prod_info is not None):
yield prod_info
# recursivley parse up until page 50 or until there are no products
# store starting url
starting_URL = response.request.url
str_len = len("&page=")
# check if what the current page number is
page_number = int(starting_URL[starting_URL.find("&page=") + str_len:])
if (page_number < 50 and len(products) > 0):
# get a substring of the original url removing the page number
base_URL = starting_URL[0:starting_URL.find("&page=") + str_len]
# generate new URL
new_URL = base_URL + str(page_number + 1)
# parse the next page
yield response.follow(new_URL, callback=self.parse_catalogue)
def _parse_product(self, response, prod):
# obtain the title
# start by getting the title element
title_element = prod.find("div", class_="title-wrap")
# Ensure the title_element isn't None
if (title_element is not None):
# obtain the actual title from the element
title = title_element.h2.text.strip()
# next obtain the the URL
URL = title_element.h2.a["href"]
# request the product page
page_response = self.product_driver.get("http:" + URL)
product_page = BeautifulSoup(self.product_driver.page_source, "lxml")
# parse the product categories
(category1, category2, category3, category4) = self._parse_product_category(product_page)
# parse the product description
description = self._parse_product_description(product_page)
# parse the product price
(price_mag, currency, unit) = self._parse_product_price(prod)
# parse the seller information
(seller_name, origin, doesShip) = self._parse_seller_info(prod)
# parse the url for the search term
term = self._parse_search_term(response.url)
# generate and return product item
return items.Product(Title=title, URL=URL, Category1=category1,
Category2=category2, Category3=category3,
Category4=category4, Description=description,
Price=price_mag, Currency=currency, Unit=unit,
Seller=seller_name, Origin=origin, Ships_To_NA=doesShip,
Search_Term=term)
else:
return None
def _parse_product_category(self, product_page):
# obtain the product category spans
category1_span = product_page.select("#page-container > div.content-header > div > div > div > ol > li:nth-child(3) > a > span")
category2_span = product_page.select("#page-container > div.content-header > div > div > div > ol > li:nth-child(4) > a > span")
category3_span = product_page.select("#page-container > div.content-header > div > div > div > ol > li:nth-child(5) > a > span")
category4_span = product_page.select("#page-container > div.content-header > div > div > div > ol > li:nth-child(6) > a > span")
# initialize product categories
category1 = None
category2 = None
category3 = None
category4 = None
# obtain the text of product categories
if (category1_span):
category1 = " ".join(category1_span[0].getText(separator=" ").replace("\r", " ").replace("\n", " ").replace("\xa0", " ").replace("\t", " ").split()).strip()
if (category2_span):
category2 = " ".join(category2_span[0].getText(separator=" ").replace("\r", " ").replace("\n", " ").replace("\xa0", " ").replace("\t", " ").split()).strip()
if (category3_span):
category3 = " ".join(category3_span[0].getText(separator=" ").replace("\r", " ").replace("\n", " ").replace("\xa0", " ").replace("\t", " ").split()).strip()
if (category4_span):
category4 = " ".join(category4_span[0].getText(separator=" ").replace("\r", " ").replace("\n", " ").replace("\xa0", " ").replace("\t", " ").split()).strip()
return (category1, category2, category3, category4)
def _parse_product_description(self, product_page):
# obtain the product description tab
description_tab = product_page.find("div", class_="richtext richtext-detail rich-text-description")
# initialize info var
description = None
if (description_tab is not None):
# obtain the text of product description
description = description_tab.getText(separator=" ").replace("\r", " ").replace("\n", " ").replace("\xa0", " ").replace("\t", " ").strip()
description = re.sub(" +", " ", description)
return description
def _parse_product_price(self, prod):
# now obtain the cost information
price_element = prod.find("div", class_="price")
# intitlize the magnitude of the price and currency
price_mag = None
currency = None
unit = None
# check if the price_element is none
if (price_element is not None):
# obtain the complete price text
complete_price = price_element.text.strip()
# check if a unit is given
unit_index = complete_price.find("/")
if (unit_index != -1):
unit_text = complete_price[unit_index:].strip()
price = complete_price[0:unit_index].strip()
else:
unit_text = None
price = complete_price
# check if there is a "-" in the price
range_index = price.find("-")
if (range_index != -1):
# if there is a range take lower end of the cost
price = price[0:range_index]
# get the magnitude of the price
price_mag = isolate_numeric(price, exclusions=["."])
# obtain the currency from the price
currency = isolate_alpha(price)
# obtain unit
if (unit_text is not None):
unit = unit_text[1:].strip()
else:
unit = None
return (price_mag, currency, unit)
def _parse_seller_info(self, prod):
# obtain the div containing the seller info
seller_info = prod.find("div", class_="extra-wrap")
# initialize info vars
seller_name = None
origin = None
doesShip = None
# ensure the seller info has been found
if (seller_info is not None):
# extract the seller name
seller_name = seller_info.a.text.strip()
if (seller_name in self.origins):
origin = self.origins[seller_name]
else:
# now obtain the seller origin
origin_elements = seller_info.find("div", class_="list-item__seller-info").find_all("div", class_="list-item__slash-wrapper")
# seller origin exists in the second element
if (len(origin_elements) > 1):
origin_element = origin_elements[1].find("div")
# check if the seller origin is available
if (origin_element is not None):
# extract the name from the element
if (origin_element.has_attr("title")):
origin = origin_element.get("title")
self.origins[seller_name] = origin
# check if the current seller name in is in the ships to an dict
if (seller_name in self.ships_to_NA):
# if it is then obtain if the seller ships to na
doesShip = self.ships_to_NA[seller_name]
else:
# if the name isn't in the dict obtain the seller page url
# request the seller page
page_response = self.seller_driver.get("http:" + seller_info.a["href"])
seller_page = BeautifulSoup(self.seller_driver.page_source, "lxml")
(doesShip, origin2) = self._parse_seller_page(seller_page)
self.ships_to_NA[seller_name] = doesShip
# use the origin obtained from seller page
if (origin is None):
origin = origin2
self.origins[seller_name] = origin
# now return the extracted seller info
return (seller_name, origin, doesShip)
def _parse_seller_page(self, seller_page):
# obtain the trade capabilities pane
trade_caps = seller_page.find("div", class_="icbu-pc-cpTradeCapability")
# obtain the div containing company location
location_info = seller_page.find("div", class_="com-location")
# initialize info vars
doesShip = None
origin = None
# check if the div was found
if (location_info is not None):
# obtain the seller origin
origin = location_info.text.strip().split(",")[-1].strip()
# check if the pane was found
if (trade_caps is not None):
# if the pane was found find all of its information lists
trade_info = trade_caps.find_all("div", class_="infoList-mod-field")
# loop through each element looking for list on main markets
market_index = -1
curr_index = 0
while (curr_index < len(trade_info) and market_index == -1):
# obtain the title element
curr_list = trade_info[curr_index]
list_title = curr_list.find("div", class_="title").text
# check if title contains "Main Markets"
if (list_title.find("Main Markets") != -1):
# if it is found then set the market_index to curr_index
market_index = curr_index
# iterate curr_index
curr_index += 1
# check if Main Markets was found
if (market_index != -1):
# if the market was found then check the list of markets for NA
main_markets = trade_info[market_index]
market_info = main_markets.find("div", class_="content").text
# check if the market info contains North America
contains_NA = market_info.find("North America")
if (contains_NA != -1):
doesShip = True
else:
doesShip = False
else:
doesShip = None
else:
# try parsing the trade capabilities from an alternate format
trade_caps = seller_page.find("div", class_="widget-supplier-trade-market")
if (trade_caps is not None):
# if the trade caps were found in an alternate forma
# check if the North American is listed
contains_NA = trade_caps.text.upper().find("NORTH AMERICA")
if (contains_NA | |
not specified then an attempt will be made to find a model
corresponding to the current dataset name,
`'model_' + self.dataset.name + '.pkl'`. If there is no current
dataset then the most recent model will be loaded.
This method is only intended to be used to deserialise models created
by this interactive Jupyter widget modeller, and will not successfully
load complicated ReflectModel created outside of the interactive
modeller.
Parameters
----------
f: file like or str, optional
pickle file to load model from.
"""
if f is None and self.dataset is not None:
# try and load the model corresponding to the current dataset
f = "model_" + self.dataset.name + ".pkl"
elif f is None:
# load the most recent model file
files = list(filter(os.path.isfile, glob.glob("model_*.pkl")))
files.sort(key=lambda x: os.path.getmtime(x))
files.reverse()
if len(files):
f = files[0]
if f is None:
self._print("No model file is specified/available.")
return
try:
with possibly_open_file(f, "rb") as g:
reflect_model = pickle.load(g)
self.set_model(reflect_model)
except (RuntimeError, FileNotFoundError) as exc:
# RuntimeError if the file isn't a ReflectModel
# FileNotFoundError if the specified file name wasn't found
self._print(repr(exc), repr(f))
def set_model(self, model):
"""
Change the `refnx.reflect.ReflectModel` associated with the `Motofit`
instance.
Parameters
----------
model: refnx.reflect.ReflectModel
"""
if not isinstance(model, ReflectModel):
raise RuntimeError("`model` was not an instance of ReflectModel")
if self.model_view is not None:
self.model_view.unobserve_all()
# figure out if the reflect_model is a different instance. If it is
# then the objective has to be updated.
if model is not self.model:
self.model = model
self._update_analysis_objects()
self.model = model
self.model_view = ReflectModelView(self.model)
self.model_view.observe(self.update_model, names=["view_changed"])
self.model_view.observe(self.redraw, names=["view_redraw"])
# observe when the number of varying parameters changed. This
# invalidates a curvefitter, and a new one has to be produced.
self.model_view.observe(
self._on_num_varying_changed, names=["num_varying"]
)
self.model_view.do_fit_button.on_click(self.do_fit)
self.model_view.to_code_button.on_click(self._to_code)
self.model_view.save_model_button.on_click(self.save_model)
self.model_view.load_model_button.on_click(self.load_model)
self.redraw(None)
def update_model(self, change):
"""
Updates the plots when the parameters change
Parameters
----------
change
"""
if not self.fig:
return
q = np.linspace(self.qmin, self.qmax, self.qpnt)
theoretical = self.model.model(q)
yt, _ = self.transform(q, theoretical)
sld_profile = self.model.structure.sld_profile()
z, sld = sld_profile
if self.theoretical_plot is not None:
self.theoretical_plot.set_data(q, yt)
self.theoretical_plot_sld.set_data(z, sld)
self.ax_sld.relim()
self.ax_sld.autoscale_view()
if self.dataset is not None:
# if there's a dataset loaded then residuals_plot
# should exist
residuals = self.objective.residuals()
self.chisqr.value = np.sum(residuals ** 2)
self.residuals_plot.set_data(self.dataset.x, residuals)
self.ax_residual.relim()
self.ax_residual.autoscale_view()
self.fig.canvas.draw()
def _on_num_varying_changed(self, change):
# observe when the number of varying parameters changed. This
# invalidates a curvefitter, and a new one has to be produced.
if change["new"] != change["old"]:
self._curvefitter = None
def _update_analysis_objects(self):
use_weights = self.use_weights.value == "Yes"
self.objective = Objective(
self.model,
self.dataset,
transform=self.transform,
use_weights=use_weights,
)
self._curvefitter = None
def __call__(self, data=None, model=None):
"""
Display the `Motofit` GUI in a Jupyter notebook cell.
Parameters
----------
data: refnx.dataset.Data1D
The dataset to associate with the `Motofit` instance.
model: refnx.reflect.ReflectModel or str or file-like
A model to associate with the data.
If `model` is a `str` or `file`-like then the `load_model` method
will be used to try and load the model from file. This assumes that
the file is a pickle of a `ReflectModel`
"""
# the theoretical model
# display the main graph
import matplotlib.pyplot as plt
self.fig = plt.figure(figsize=(9, 4))
# grid specs depending on whether the residuals are displayed
if self.display_residuals.value:
d_gs = self._gridspec1[0, 0]
sld_gs = self._gridspec1[:, 1]
else:
d_gs = self._gridspec2[0, 0]
sld_gs = self._gridspec2[0, 1]
self.ax_data = self.fig.add_subplot(d_gs)
self.ax_data.set_xlabel(r"$Q/\AA^{-1}$")
self.ax_data.set_ylabel("Reflectivity")
self.ax_data.grid(True, color="b", linestyle="--", linewidth=0.1)
self.ax_sld = self.fig.add_subplot(sld_gs)
self.ax_sld.set_ylabel(r"$\rho/10^{-6}\AA^{-2}$")
self.ax_sld.set_xlabel(r"$z/\AA$")
self.ax_residual = self.fig.add_subplot(
self._gridspec1[1, 0], sharex=self.ax_data
)
self.ax_residual.set_xlabel(r"$Q/\AA^{-1}$")
self.ax_residual.grid(True, color="b", linestyle="--", linewidth=0.1)
self.ax_residual.set_visible(self.display_residuals.value)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.fig.tight_layout()
q = np.linspace(self.qmin, self.qmax, self.qpnt)
theoretical = self.model.model(q)
yt, _ = self.transform(q, theoretical)
self.theoretical_plot = self.ax_data.plot(q, yt, zorder=2)[0]
self.ax_data.set_yscale("log")
z, sld = self.model.structure.sld_profile()
self.theoretical_plot_sld = self.ax_sld.plot(z, sld)[0]
# the figure has been reset, so remove ref to the data_plot,
# residual_plot
self.data_plot = None
self.residuals_plot = None
self.dataset = None
if data is not None:
self.load_data(data)
if isinstance(model, ReflectModel):
self.set_model(model)
return self.display_box
elif model is not None:
self.load_model(model)
return self.display_box
self.redraw(None)
return self.display_box
def load_data(self, data):
"""
Load a dataset into the `Motofit` instance.
Parameters
----------
data: refnx.dataset.Data1D, or str, or file-like
"""
if isinstance(data, ReflectDataset):
self.dataset = data
else:
self.dataset = ReflectDataset(data)
self.dataset_name.value = self.dataset.name
# loading a dataset changes the objective and curvefitter
self._update_analysis_objects()
self.qmin = np.min(self.dataset.x)
self.qmax = np.max(self.dataset.x)
if self.fig is not None:
yt, et = self.transform(self.dataset.x, self.dataset.y)
if self.data_plot is None:
(self.data_plot,) = self.ax_data.plot(
self.dataset.x,
yt,
label=self.dataset.name,
ms=2,
marker="o",
ls="",
zorder=1,
)
self.data_plot.set_label(self.dataset.name)
self.ax_data.legend()
# no need to calculate residuals here, that'll be updated in
# the redraw method
(self.residuals_plot,) = self.ax_residual.plot(self.dataset.x)
else:
self.data_plot.set_xdata(self.dataset.x)
self.data_plot.set_ydata(yt)
# calculate theoretical model over same range as data
# use redraw over update_model because it ensures chi2 widget gets
# displayed
self.redraw(None)
self.ax_data.relim()
self.ax_data.autoscale_view()
self.ax_residual.relim()
self.ax_residual.autoscale_view()
self.fig.canvas.draw()
def redraw(self, change):
"""
Redraw the Jupyter GUI associated with the `Motofit` instance.
"""
self._update_display_box(self.display_box)
self.update_model(None)
@property
def curvefitter(self):
"""
class:`CurveFitter` : Object for fitting the data based on the
objective.
"""
if self.objective is not None and self._curvefitter is None:
self._curvefitter = CurveFitter(self.objective)
return self._curvefitter
def _print(self, string):
"""
Print to the output widget
"""
from IPython.display import clear_output
with self.output:
clear_output()
print(string)
def do_fit(self, *args):
"""
Ask the Motofit object to perform a fit (differential evolution).
Parameters
----------
change
Notes
-----
After performing the fit the Jupyter display is updated.
"""
if self.dataset is None:
return
if not self.model.parameters.varying_parameters():
self._print("No parameters are being varied")
return
try:
logp = self.objective.logp()
if not np.isfinite(logp):
self._print(
"One of your parameter values lies outside its"
" bounds. Please adjust the value, or the bounds."
)
return
except ZeroDivisionError:
self._print(
"One parameter has equal lower and upper bounds."
" Either alter the bounds, or don't let that"
" parameter vary."
)
return
def callback(xk, convergence):
self.chisqr.value = self.objective.chisqr(xk)
self.curvefitter.fit("differential_evolution", callback=callback)
# need to update the widgets as the model will be updated.
# this also redraws GUI.
# self.model_view.refresh()
self.set_model(self.model)
self._print(str(self.objective))
def _to_code(self, change=None):
self._print(self.code)
@property
def code(self):
"""
str : A Python code fragment capable of fitting the data.
Executable Python code fragment for the GUI model.
"""
if self.objective is None:
self._update_analysis_objects()
return to_code(self.objective)
def _on_tab_changed(self, change):
pass
def _on_plot_type_changed(self, change):
"""
User would like to plot and fit as logR/linR/RQ4/RQ2, etc
"""
self.transform = Transform(change["new"])
if self.objective is not None:
self.objective.transform = self.transform
if self.dataset is not None:
yt, _ = self.transform(self.dataset.x, self.dataset.y)
self.data_plot.set_xdata(self.dataset.x)
self.data_plot.set_ydata(yt)
self.update_model(None)
# probably have to change LHS axis of the data plot when
# going between different plot types.
if change["new"] == "logY":
self.ax_data.set_yscale("linear")
else:
self.ax_data.set_yscale("log")
self.ax_data.relim()
self.ax_data.autoscale_view()
self.fig.canvas.draw()
def _on_use_weights_changed(self, change):
self._update_analysis_objects()
self.update_model(None)
def _on_display_residuals_changed(self, change):
import matplotlib.pyplot as plt
if change["new"]:
self.ax_residual.set_visible(True)
self.ax_data.set_position(
self._gridspec1[0, 0].get_position(self.fig)
)
self.ax_sld.set_position(
self._gridspec1[:, 1].get_position(self.fig)
)
plt.setp(self.ax_data.get_xticklabels(), visible=False)
else:
self.ax_residual.set_visible(False)
self.ax_data.set_position(
self._gridspec2[:, 0].get_position(self.fig)
)
self.ax_sld.set_position(
self._gridspec2[:, 1].get_position(self.fig)
)
plt.setp(self.ax_data.get_xticklabels(), visible=True)
@property
def _options_box(self):
return widgets.VBox(
[self.plot_type, self.use_weights, self.display_residuals]
)
def _update_display_box(self, box):
"""
Redraw the Jupyter GUI associated with the `Motofit` instance
"""
vbox_widgets = []
if self.dataset is not None:
vbox_widgets.append(widgets.HBox([self.dataset_name, self.chisqr]))
self.tab.children = [
self.model_view.model_box,
self.model_view.limits_box,
self._options_box,
]
vbox_widgets.append(self.tab)
vbox_widgets.append(self.output)
box.children = tuple(vbox_widgets)
def rename_params(structure):
for i in range(1, len(structure) - 1):
structure[i].thick.name = "%d - thick" % i
structure[i].sld.real.name = "%d - sld" % i
structure[i].sld.imag.name = "%d - isld" % i
structure[i].rough.name = "%d - rough" % i
structure[0].sld.real.name = "sld - fronting"
structure[0].sld.imag.name = "isld - fronting"
structure[-1].sld.real.name = "sld - backing"
structure[-1].sld.imag.name = "isld - backing"
structure[-1].rough.name = "rough - backing"
def to_code(objective):
"""
Create executable Python code fragment that corresponds to the model in the
GUI.
Parameters
----------
objective: refnx.analysis.Objective
Returns
-------
code: str
Python code that can construct a reflectometry fitting system
"""
header = """import numpy as np
import refnx
from refnx.analysis import Objective, CurveFitter, Transform
from refnx.reflect import SLD, Slab, ReflectModel, Structure
from refnx.dataset import ReflectDataset
print(refnx.version.version)
"""
code = [header]
# the dataset
code.append('data = ReflectDataset("{0}")'.format(objective.data.filename))
# make some SLD's and slabs
slds = ["\n# set up the SLD objects for each layer"]
slabs | |
#:
greenygrey: str = "#7ea07a" #:
greenyyellow: str = "#c6f808" #:
grey: str = "#929591" #:
greyblue: str = "#647d8e" #:
greybrown: str = "#7f7053" #:
greygreen: str = "#86a17d" #:
greyish: str = "#a8a495" #:
greyishblue: str = "#5e819d" #:
greyishbrown: str = "#7a6a4f" #:
greyishgreen: str = "#82a67d" #:
greyishpink: str = "#c88d94" #:
greyishpurple: str = "#887191" #:
greyishteal: str = "#719f91" #:
greypink: str = "#c3909b" #:
greypurple: str = "#826d8c" #:
greyteal: str = "#5e9b8a" #:
grossgreen: str = "#a0bf16" #:
gunmetal: str = "#536267" #:
hazel: str = "#8e7618" #:
heather: str = "#a484ac" #:
heliotrope: str = "#d94ff5" #:
highlightergreen: str = "#1bfc06" #:
hospitalgreen: str = "#9be5aa" #:
hotgreen: str = "#25ff29" #:
hotmagenta: str = "#f504c9" #:
hotpink: str = "#ff028d" #:
hotpurple: str = "#cb00f5" #:
huntergreen: str = "#0b4008" #:
ice: str = "#d6fffa" #:
iceblue: str = "#d7fffe" #:
ickygreen: str = "#8fae22" #:
indianred: str = "#850e04" #:
indigo: str = "#380282" #:
indigoblue: str = "#3a18b1" #:
iris: str = "#6258c4" #:
irishgreen: str = "#019529" #:
ivory: str = "#ffffcb" #:
jade: str = "#1fa774" #:
jadegreen: str = "#2baf6a" #:
junglegreen: str = "#048243" #:
kelleygreen: str = "#009337" #:
kellygreen: str = "#02ab2e" #:
kermitgreen: str = "#5cb200" #:
keylime: str = "#aeff6e" #:
khaki: str = "#aaa662" #:
khakigreen: str = "#728639" #:
kiwi: str = "#9cef43" #:
kiwigreen: str = "#8ee53f" #:
lavender: str = "#c79fef" #:
lavenderblue: str = "#8b88f8" #:
lavenderpink: str = "#dd85d7" #:
lawngreen: str = "#4da409" #:
leaf: str = "#71aa34" #:
leafgreen: str = "#5ca904" #:
leafygreen: str = "#51b73b" #:
leather: str = "#ac7434" #:
lemon: str = "#fdff52" #:
lemongreen: str = "#adf802" #:
lemonlime: str = "#bffe28" #:
lemonyellow: str = "#fdff38" #:
lichen: str = "#8fb67b" #:
lightaqua: str = "#8cffdb" #:
lightaquamarine: str = "#7bfdc7" #:
lightbeige: str = "#fffeb6" #:
lightblue: str = "#7bc8f6" #:
lightbluegreen: str = "#7efbb3" #:
lightbluegrey: str = "#b7c9e2" #:
lightbluishgreen: str = "#76fda8" #:
lightbrightgreen: str = "#53fe5c" #:
lightbrown: str = "#ad8150" #:
lightburgundy: str = "#a8415b" #:
lightcyan: str = "#acfffc" #:
lighteggplant: str = "#894585" #:
lightergreen: str = "#75fd63" #:
lighterpurple: str = "#a55af4" #:
lightforestgreen: str = "#4f9153" #:
lightgold: str = "#fddc5c" #:
lightgrassgreen: str = "#9af764" #:
lightgreen: str = "#76ff7b" #:
lightgreenblue: str = "#56fca2" #:
lightgreenishblue: str = "#63f7b4" #:
lightgrey: str = "#d8dcd6" #:
lightgreyblue: str = "#9dbcd4" #:
lightgreygreen: str = "#b7e1a1" #:
lightindigo: str = "#6d5acf" #:
lightishblue: str = "#3d7afd" #:
lightishgreen: str = "#61e160" #:
lightishpurple: str = "#a552e6" #:
lightishred: str = "#fe2f4a" #:
lightkhaki: str = "#e6f2a2" #:
lightlavendar: str = "#efc0fe" #:
lightlavender: str = "#dfc5fe" #:
lightlightblue: str = "#cafffb" #:
lightlightgreen: str = "#c8ffb0" #:
lightlilac: str = "#edc8ff" #:
lightlime: str = "#aefd6c" #:
lightlimegreen: str = "#b9ff66" #:
lightmagenta: str = "#fa5ff7" #:
lightmaroon: str = "#a24857" #:
lightmauve: str = "#c292a1" #:
lightmint: str = "#b6ffbb" #:
lightmintgreen: str = "#a6fbb2" #:
lightmossgreen: str = "#a6c875" #:
lightmustard: str = "#f7d560" #:
lightnavy: str = "#155084" #:
lightnavyblue: str = "#2e5a88" #:
lightneongreen: str = "#4efd54" #:
lightolive: str = "#acbf69" #:
lightolivegreen: str = "#a4be5c" #:
lightorange: str = "#fdaa48" #:
lightpastelgreen: str = "#b2fba5" #:
lightpeach: str = "#ffd8b1" #:
lightpeagreen: str = "#c4fe82" #:
lightperiwinkle: str = "#c1c6fc" #:
lightpink: str = "#ffd1df" #:
lightplum: str = "#9d5783" #:
lightpurple: str = "#bf77f6" #:
lightred: str = "#ff474c" #:
lightrose: str = "#ffc5cb" #:
lightroyalblue: str = "#3a2efe" #:
lightsage: str = "#bcecac" #:
lightsalmon: str = "#fea993" #:
lightseafoam: str = "#a0febf" #:
lightseafoamgreen: str = "#a7ffb5" #:
lightseagreen: str = "#98f6b0" #:
lightskyblue: str = "#c6fcff" #:
lighttan: str = "#fbeeac" #:
lightteal: str = "#90e4c1" #:
lightturquoise: str = "#7ef4cc" #:
lighturple: str = "#b36ff6" #:
lightviolet: str = "#d6b4fc" #:
lightyellow: str = "#fffe7a" #:
lightyellowgreen: str = "#ccfd7f" #:
lightyellowishgreen: str = "#c2ff89" #:
lilac: str = "#cea2fd" #:
liliac: str = "#c48efd" #:
lime: str = "#aaff32" #:
limegreen: str = "#89fe05" #:
limeyellow: str = "#d0fe1d" #:
lipstick: str = "#d5174e" #:
lipstickred: str = "#c0022f" #:
macaroniandcheese: str = "#efb435" #:
magenta: str = "#c20078" #:
mahogany: str = "#4a0100" #:
maize: str = "#f4d054" #:
mango: str = "#ffa62b" #:
manilla: str = "#fffa86" #:
marigold: str = "#fcc006" #:
marine: str = "#042e60" #:
marineblue: str = "#01386a" #:
maroon: str = "#650021" #:
mauve: str = "#ae7181" #:
mediumblue: str = "#2c6fbb" #:
mediumbrown: str = "#7f5112" #:
mediumgreen: str = "#39ad48" #:
mediumgrey: str = "#7d7f7c" #:
mediumpink: str = "#f36196" #:
mediumpurple: str = "#9e43a2" #:
melon: str = "#ff7855" #:
merlot: str = "#730039" #:
metallicblue: str = "#4f738e" #:
midblue: str = "#276ab3" #:
midgreen: str = "#50a747" #:
midnight: str = "#03012d" #:
midnightblue: str = "#020035" #:
midnightpurple: str = "#280137" #:
militarygreen: str = "#667c3e" #:
milkchocolate: str = "#7f4e1e" #:
mint: str = "#9ffeb0" #:
mintgreen: str = "#8fff9f" #:
mintygreen: str = "#0bf77d" #:
mocha: str = "#9d7651" #:
moss: str = "#769958" #:
mossgreen: str = "#658b38" #:
mossygreen: str = "#638b27" #:
mud: str = "#735c12" #:
mudbrown: str = "#60460f" #:
muddybrown: str = "#886806" #:
muddygreen: str = "#657432" #:
muddyyellow: str = "#bfac05" #:
mudgreen: str = "#606602" #:
mulberry: str = "#920a4e" #:
murkygreen: str = "#6c7a0e" #:
mushroom: str = "#ba9e88" #:
mustard: str = "#ceb301" #:
mustardbrown: str = "#ac7e04" #:
mustardgreen: str = "#a8b504" #:
mustardyellow: str = "#d2bd0a" #:
mutedblue: str = "#3b719f" #:
mutedgreen: str = "#5fa052" #:
mutedpink: str = "#d1768f" #:
mutedpurple: str = "#805b87" #:
nastygreen: str = "#70b23f" #:
navy: str = "#01153e" #:
navyblue: str = "#001146" #:
navygreen: str = "#35530a" #:
neonblue: str = "#04d9ff" #:
neongreen: str = "#0cff0c" #:
neonpink: str = "#fe019a" #:
neonpurple: str = "#bc13fe" #:
neonred: str = "#ff073a" #:
neonyellow: str = "#cfff04" #:
niceblue: str = "#107ab0" #:
nightblue: str = "#040348" #:
ocean: str = "#017b92" #:
oceanblue: str = "#03719c" #:
oceangreen: str = "#3d9973" #:
ocher: str = "#bf9b0c" #:
ochre: str = "#bf9005" #:
ocre: str = "#c69c04" #:
offblue: str = "#5684ae" #:
offgreen: str = "#6ba353" #:
offwhite: str = "#ffffe4" #:
offyellow: str = "#f1f33f" #:
oldpink: str = "#c77986" #:
oldrose: str = "#c87f89" #:
olive: str = "#6e750e" #:
olivebrown: str = "#645403" #:
olivedrab: str = "#6f7632" #:
olivegreen: str = "#677a04" #:
oliveyellow: str = "#c2b709" #:
orange: str = "#f97306" #:
orangebrown: str = "#be6400" #:
orangeish: str = "#fd8d49" #:
orangepink: str = "#ff6f52" #:
orangered: str = "#fe420f" #:
orangeybrown: str = "#b16002" #:
orangeyellow: str = "#ffad01" #:
orangeyred: str = "#fa4224" #:
orangeyyellow: str = "#fdb915" #:
orangish: str = "#fc824a" #:
orangishbrown: str = "#b25f03" #:
orangishred: str = "#f43605" #:
orchid: str = "#c875c4" #:
pale: str = "#fff9d0" #:
paleaqua: str = "#b8ffeb" #:
paleblue: str = "#d0fefe" #:
palebrown: str = "#b1916e" #:
palecyan: str = "#b7fffa" #:
palegold: str = "#fdde6c" #:
palegreen: str = "#c7fdb5" #:
palegrey: str = "#fdfdfe" #:
palelavender: str = "#eecffe" #:
palelightgreen: str = "#b1fc99" #:
palelilac: str = "#e4cbff" #:
palelime: str = "#befd73" #:
palelimegreen: str = "#b1ff65" #:
palemagenta: str = "#d767ad" #:
palemauve: str = "#fed0fc" #:
paleolive: str = "#b9cc81" #:
paleolivegreen: str = "#b1d27b" #:
paleorange: str = "#ffa756" #:
palepeach: str = "#ffe5ad" #:
palepink: str = "#ffcfdc" #:
palepurple: str = "#b790d4" #:
palered: str = "#d9544d" | |
from fastai.torch_core import *
from fastai.basic_train import *
from fastai.callbacks import *
from fastai.data_block import CategoryList
from fastai.basic_data import *
from fastai.datasets import *
from fastai.metrics import accuracy
from fastai.train import GradientClipping
from fastai.layers import *
from fastai.text.models import *
from fastai.text.transform import *
from fastai.text.data import *
from fastai.text import *
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from sklearn.metrics import confusion_matrix
import warnings
warnings.filterwarnings("ignore")
class Attention(nn.Module):
def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1, score_function='dot_product', dropout=0):
''' Attention Mechanism
:param embed_dim:
:param hidden_dim:
:param out_dim:
:param n_head: num of head (Multi-Head Attention)
:param score_function: scaled_dot_product / mlp (concat) / bi_linear (general dot)
:return (?, q_len, out_dim,)
'''
super(Attention, self).__init__()
if hidden_dim is None:
hidden_dim = embed_dim // n_head
if out_dim is None:
out_dim = embed_dim
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.n_head = n_head
self.score_function = score_function
self.w_k = nn.Linear(embed_dim, n_head * hidden_dim)
self.w_q = nn.Linear(embed_dim, n_head * hidden_dim)
self.proj = nn.Linear(n_head * hidden_dim, out_dim)
self.dropout = nn.Dropout(dropout)
if score_function == 'mlp':
self.weight = nn.Parameter(torch.Tensor(hidden_dim*2))
elif self.score_function == 'bi_linear':
self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim))
else: # dot_product / scaled_dot_product
self.register_parameter('weight', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.hidden_dim)
if self.weight is not None:
self.weight.data.uniform_(-stdv, stdv)
def forward(self, k, q):
if len(q.shape) == 2: # q_len missing
q = torch.unsqueeze(q, dim=1)
if len(k.shape) == 2: # k_len missing
k = torch.unsqueeze(k, dim=1)
mb_size = k.shape[0] # ?
k_len = k.shape[1]
q_len = q.shape[1]
# k: (?, k_len, embed_dim,)
# q: (?, q_len, embed_dim,)
# kx: (n_head*?, k_len, hidden_dim)
# qx: (n_head*?, q_len, hidden_dim)
# score: (n_head*?, q_len, k_len,)
# output: (?, q_len, out_dim,)
kx = self.w_k(k).view(mb_size, k_len, self.n_head, self.hidden_dim)
kx = kx.permute(2, 0, 1, 3).contiguous().view(-1, k_len, self.hidden_dim)
qx = self.w_q(q).view(mb_size, q_len, self.n_head, self.hidden_dim)
qx = qx.permute(2, 0, 1, 3).contiguous().view(-1, q_len, self.hidden_dim)
if self.score_function == 'dot_product':
kt = kx.permute(0, 2, 1)
score = torch.bmm(qx, kt)
elif self.score_function == 'scaled_dot_product':
kt = kx.permute(0, 2, 1)
qkt = torch.bmm(qx, kt)
score = torch.div(qkt, math.sqrt(self.hidden_dim))
elif self.score_function == 'mlp':
kxx = torch.unsqueeze(kx, dim=1).expand(-1, q_len, -1, -1)
qxx = torch.unsqueeze(qx, dim=2).expand(-1, -1, k_len, -1)
kq = torch.cat((kxx, qxx), dim=-1) # (n_head*?, q_len, k_len, hidden_dim*2)
# kq = torch.unsqueeze(kx, dim=1) + torch.unsqueeze(qx, dim=2)
score = F.tanh(torch.matmul(kq, self.weight))
elif self.score_function == 'bi_linear':
qw = torch.matmul(qx, self.weight)
kt = kx.permute(0, 2, 1)
score = torch.bmm(qw, kt)
else:
raise RuntimeError('invalid score_function')
score = F.softmax(score, dim=-1)
output = torch.bmm(score, kx) # (n_head*?, q_len, hidden_dim)
output = torch.cat(torch.split(output, mb_size, dim=0), dim=-1) # (?, q_len, n_head*hidden_dim)
output = self.proj(output) # (?, q_len, out_dim)
output = self.dropout(output)
return output, score
class DynamicLSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=1, bias=True, batch_first=True, dropout=0,
bidirectional=False, only_use_last_hidden_state=False, rnn_type='LSTM'):
"""
LSTM which can hold variable length sequence, use like TensorFlow's RNN(input, length...).
:param input_size:The number of expected features in the input x
:param hidden_size:The number of features in the hidden state h
:param num_layers:Number of recurrent layers.
:param bias:If False, then the layer does not use bias weights b_ih and b_hh. Default: True
:param batch_first:If True, then the input and output tensors are provided as (batch, seq, feature)
:param dropout:If non-zero, introduces a dropout layer on the outputs of each RNN layer except the last layer
:param bidirectional:If True, becomes a bidirectional RNN. Default: False
:param rnn_type: {LSTM, GRU, RNN}
"""
super(DynamicLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.bidirectional = bidirectional
self.only_use_last_hidden_state = only_use_last_hidden_state
self.rnn_type = rnn_type
if self.rnn_type == 'LSTM':
self.RNN = nn.LSTM(
input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional)
elif self.rnn_type == 'GRU':
self.RNN = nn.GRU(
input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional)
elif self.rnn_type == 'RNN':
self.RNN = nn.RNN(
input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional)
def forward(self, x, x_len):
"""
sequence -> sort -> pad and pack ->process using RNN -> unpack ->unsort
:param x: sequence embedding vectors
:param x_len: numpy/tensor list
:return:
"""
"""sort"""
x_sort_idx = torch.sort(-x_len)[1].long()
x_unsort_idx = torch.sort(x_sort_idx)[1].long()
x_len = x_len[x_sort_idx]
x = x[x_sort_idx]
"""pack"""
x_emb_p = torch.nn.utils.rnn.pack_padded_sequence(x, x_len, batch_first=self.batch_first)
# process using the selected RNN
if self.rnn_type == 'LSTM':
out_pack, (ht, ct) = self.RNN(x_emb_p, None)
else:
out_pack, ht = self.RNN(x_emb_p, None)
ct = None
"""unsort: h"""
ht = torch.transpose(ht, 0, 1)[
x_unsort_idx] # (num_layers * num_directions, batch, hidden_size) -> (batch, ...)
ht = torch.transpose(ht, 0, 1)
if self.only_use_last_hidden_state:
return ht
else:
"""unpack: out"""
out = torch.nn.utils.rnn.pad_packed_sequence(out_pack, batch_first=self.batch_first) # (sequence, lengths)
out = out[0] #
out = out[x_unsort_idx]
"""unsort: out c"""
if self.rnn_type == 'LSTM':
ct = torch.transpose(ct, 0, 1)[
x_unsort_idx] # (num_layers * num_directions, batch, hidden_size) -> (batch, ...)
ct = torch.transpose(ct, 0, 1)
return out, (ht, ct)
class DynamicLSTM(nn.Module):
'''
LSTM which can hold variable length sequence, use like TensorFlow's RNN(input, lenght...).
'''
def __init__(self, input_size, hidden_size, num_layers=1, bias=True, batch_first=True, dropout=0,
bidirectional=False, only_use_last_hidden_state=False, rnn_type='LSTM'):
super(DynamicLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.bidirectional = bidirectional
self.only_use_last_hidden_state = only_use_last_hidden_state
self.rnn_type = rnn_type
if self.rnn_type == 'LSTM':
self.RNN = nn.LSTM(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional)
elif self.rnn_type == 'GRU':
self.RNN = nn.GRU(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional)
elif self.rnn_type == 'RNN':
self.RNN = nn.RNN(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional)
def forward(self, x, x_len):
'''
sequence -> sort -> pad and pack -> process using RNN -> unpack -> unsort
'''
'''sort'''
x_sort_idx = torch.sort(x_len, descending=True)[1].long()
x_unsort_idx = torch.sort(x_sort_idx)[1].long()
x_len = x_len[x_sort_idx]
x = x[x_sort_idx]
'''pack'''
x_emb_p = torch.nn.utils.rnn.pack_padded_sequence(x, x_len, batch_first=self.batch_first)
''' process '''
if self.rnn_type == 'LSTM':
out_pack, (ht, ct) = self.RNN(x_emb_p, None)
else:
out_pack, ht = self.RNN(x_emb_p, None)
ct = None
'''unsort'''
ht = ht[:, x_unsort_idx]
if self.only_use_last_hidden_state:
return ht
else:
out, _ = torch.nn.utils.rnn.pad_packed_sequence(out_pack, batch_first=self.batch_first)
if self.batch_first:
out = out[x_unsort_idx]
else:
out = out[:, x_unsort_idx]
if self.rnn_type == 'LSTM':
ct = ct[:, x_unsort_idx]
return out, (ht, ct)
class SqueezeEmbedding(nn.Module):
'''
Squeeze sequence embedding length to the longest one in the batch
'''
def __init__(self, batch_first=True):
super(SqueezeEmbedding, self).__init__()
self.batch_first = batch_first
def forward(self, x, x_len):
'''
sequence -> sort -> pad and pack -> unpack -> unsort
'''
'''sort'''
x_sort_idx = torch.sort(x_len, descending=True)[1].long()
x_unsort_idx = torch.sort(x_sort_idx)[1].long()
x_len = x_len[x_sort_idx]
x = x[x_sort_idx]
'''pack'''
x_emb_p = torch.nn.utils.rnn.pack_padded_sequence(x, x_len, batch_first=self.batch_first)
'''unpack'''
out, _ = torch.nn.utils.rnn.pad_packed_sequence(x_emb_p, batch_first=self.batch_first)
if self.batch_first:
out = out[x_unsort_idx]
else:
out = out[:, x_unsort_idx]
return out
class SoftAttention(nn.Module):
'''
Attention Mechanism for ATAE-LSTM
'''
def __init__(self, hidden_dim, embed_dim):
super(SoftAttention, self).__init__()
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.w_h = nn.Linear(hidden_dim, hidden_dim, bias=False).cuda()
self.w_v = nn.Linear(embed_dim, embed_dim, bias=False).cuda()
self.w_p = nn.Linear(hidden_dim, hidden_dim, bias=False).cuda()
self.w_x = nn.Linear(hidden_dim, hidden_dim, bias=False).cuda()
self.weight = nn.Parameter(torch.Tensor(hidden_dim + embed_dim)).cuda()
self.fc_dp = nn.Dropout(0.2) # fc dropout
def forward(self, h, aspect):
hx = self.w_h(h)
vx = self.w_v(aspect)
hv = F.tanh(torch.cat((hx, vx), dim=-1))
ax = torch.unsqueeze(F.softmax(torch.matmul(hv, self.weight), dim=-1), dim=1)
rx = torch.squeeze(torch.bmm(ax, h), dim=1)
hn = h[:, -1, :]
hs = F.tanh(self.w_p(rx) + self.w_x(hn))
# hs = self.fc_dp(hs)
return rx, hs
class ATAE_LSTM(nn.Module):
''' Attention-based LSTM with Aspect Embedding '''
def __init__(self, embedding_weights):
super(ATAE_LSTM, self).__init__()
self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_weights, dtype=torch.float))
self.squeeze_embedding = SqueezeEmbedding()
self.lstm = DynamicLSTM(400+400, 400, num_layers=1, batch_first=True)
self.attention = SoftAttention(400, 400)
self.dense = nn.Linear(400, 3)
def forward(self, inputs):
text, aspect_text = inputs[:,0,:], inputs[:,1,:]
x_len = torch.sum(text != 1, dim=-1)
x_len_max = torch.max(x_len)
aspect_len = torch.sum(aspect_text != 1, dim=-1).float()
x = self.embed(text)
x = self.squeeze_embedding(x, x_len)
aspect = self.embed(aspect_text)
aspect_pool = torch.div(torch.sum(aspect, dim=1), aspect_len.view(aspect_len.size(0), 1))
aspect = torch.unsqueeze(aspect_pool, dim=1).expand(-1, x_len_max, -1)
x = torch.cat((aspect, x), dim=-1)
h, _ = self.lstm(x, x_len)
hs = self.attention(h, aspect)
out = self.dense(hs)
return out
class AlignmentMatrix(nn.Module):
def __init__(self):
super(AlignmentMatrix, self).__init__()
self.w_u = nn.Parameter(torch.Tensor(3*400, 1))
def forward(self, batch_size, ctx, asp):
ctx_len = ctx.size(1)
asp_len = asp.size(1)
alignment_mat = torch.zeros(batch_size , ctx_len, asp_len).cuda()
ctx_chunks = ctx.chunk(ctx_len, dim=1)
asp_chunks = asp.chunk(asp_len, dim=1)
for i, ctx_chunk in enumerate(ctx_chunks):
for j, asp_chunk in enumerate(asp_chunks):
feat = torch.cat([ctx_chunk, asp_chunk, ctx_chunk*asp_chunk], dim=2) # batch_size x 1 x 6*hidden_dim
alignment_mat[:, i, j] = feat.matmul(self.w_u.expand(batch_size, -1, -1)).squeeze(-1).squeeze(-1)
return alignment_mat
#%% load text file
# df = pd.read_csv('test_fx_sentment_v3.csv') # for single currency
df = pd.read_csv('fx_sentment_news_all.csv') # for currency pairs
# df = df[(df['entityid'] == 'USD/JPY') |(df['entityid'] == 'EUR/USD') | |
% period - tse0 % period) / 0.01#**2
def flatbottom(x, y, sep, swidth):
check = (x<sep+swidth/3.) * (x>sep-swidth/3.)
grady = np.gradient(y)
grady_m = np.polyfit(x[check], grady[check], 1)[0]
if abs(grady_m)<0.1:
return 0.01
elif abs(grady_m)>10.:
return 0.4
else:
return 0.1
def guess_rrat(sdep, pdep):
if (pdep>0.2):
val = sdep/pdep*1.4
if val>1.:
return 0.95
elif val<0.5:
return 0.7
return val
else:
return np.clip(sdep/pdep, 0.1, 0.95)
def check_lcresiduals(x, y, ey):
degrees = [2, 5, 9, 13]
bic = np.zeros(len(degrees))
for i in range(len(degrees)):
z = np.poly1d(np.polyfit(x, y, degrees[i]))
bic[i] = np.sum(((z(x) - y)/ey)**2) + degrees[i]*np.log(len(y))
bic_slope = np.median(np.diff(bic))/bic[0]
if bic_slope < -0.1:
return bic, bic_slope, True
return bic, bic_slope, False
def ilnprior(isopars, keblat):
bounds = np.array([keblat.parbounds['msum'], keblat.parbounds['mrat'],
keblat.parbounds['z0'], keblat.parbounds['age'],
keblat.parbounds['dist'],
keblat.parbounds['ebv'], keblat.parbounds['h0'],
keblat.parbounds['isoerr']])
pcheck = np.all((np.array(isopars) >= bounds[:, 0]) & \
(np.array(isopars) <= bounds[:, 1]))
if pcheck:
return 0.0 + isopars[3]*np.log(10.) + np.log(np.log(10.))
else:
return -np.inf
def ilnprob(isopars, keblat, lc_constraints=None):
lp = ilnprior(isopars, keblat)
if np.isinf(lp):
return -np.inf, str((0, 0, 0))
ll, blobs = keblat.ilnlike(isopars, lc_constraints=lc_constraints)
if (np.isnan(ll) or np.isinf(ll)):
return -np.inf, str((0, 0, 0))
return lp + ll, blobs
def lnprior(allpars, keblat, crowd_fit=False):
m1, m2, z0, age, dist, ebv, h0, period, tpe, esinw, ecosw, \
b, q1, q2, q3, q4, lcerr, isoerr = allpars[:18]
lcerr = np.log(lcerr)
isoerr = np.log(isoerr)
e = np.sqrt(esinw**2 + ecosw**2)
pars2check = np.array([m1, m2, z0, age, dist, ebv, h0, \
period, tpe, e, b, q1, q2, q3, q4, lcerr, isoerr])
bounds = np.array([keblat.parbounds['m1'], keblat.parbounds['m2'],
keblat.parbounds['z0'], keblat.parbounds['age'],
keblat.parbounds['dist'], keblat.parbounds['ebv'],
keblat.parbounds['h0'], keblat.parbounds['period'],
keblat.parbounds['tpe'], (0., 0.99), keblat.parbounds['b'],
keblat.parbounds['q1'], keblat.parbounds['q2'],
keblat.parbounds['q3'], keblat.parbounds['q4'],
(-16, -4), (-16, -1)])
if crowd_fit:
bounds = np.vstack((bounds, [[0, 1]]*len(allpars[18:])))
pars2check = np.append(pars2check, allpars[18:])
pcheck = np.all((pars2check >= bounds[:,0]) & \
(pars2check <= bounds[:,1]))
if pcheck:
return 0.0 + age*np.log(10.) + np.log(np.log(10.))
else:
return -np.inf
def lnlike(allpars, keblat, polyorder=2, crowd_fit=None):
m1, m2, z0, age, dist, ebv, h0, period, tpe, esinw, \
ecosw, b, q1, q2, q3, q4, lcerr, isoerr = allpars[:18]
ldcoeffs = np.array([q1, q2, q3, q4])
keblat.updatepars(m1=m1, m2=m2, z0=z0, age=age, dist=dist, ebv=ebv,
h0=h0, period=period, tpe=tpe, esinw=esinw,
ecosw=ecosw, b=b, q1=q1, q2=q2, q3=q3, q4=q4,
lcerr=lcerr, isoerr=isoerr, msum=m1+m2, mrat=m2/m1)
isopars = [m1, m2, z0, age, dist, ebv, h0, isoerr]
magsmod = keblat.isofit(isopars)
if np.any(np.isinf(magsmod)):
return -np.inf
lc_inputs = np.array([(keblat.r1+keblat.r2)/(m1+m2)**(1./3.), keblat.r2/keblat.r1, keblat.frat])
if np.any(np.isinf(lc_inputs)):
return -np.inf
isores = (magsmod - keblat.magsobs) / np.sqrt(keblat.emagsobs**2 + isoerr**2) #/ np.sqrt(self.emagsobs**2 + isoerr**2)
# for ii, dii, jj in zip([keblat.armstrongT1, keblat.armstrongT2],
# [keblat.armstrongdT1, keblat.armstrongdT2],
# [10**keblat.temp1, 10**keblat.temp2]):
# if ii is not None:
# if dii is None:
# dii=0.05*ii
# isores = np.append(isores, (ii-jj)/dii)
sed_like = np.sum(isores**2) + np.sum(np.log((keblat.emagsobs**2 + isoerr**2)))
sed_like += ((isopars[5] - keblat.ebv)/(keblat.debv))**2
sed_like += ((isopars[2] - keblat.z0)/(0.2 * np.log(10) * keblat.z0))**2
sed_like *= -0.5
#now the light curve fitting part
lcpars = np.concatenate((np.array([m1+m2, keblat.r1+keblat.r2,
keblat.r2/keblat.r1, period,
tpe, esinw, ecosw, b, keblat.frat]),
ldcoeffs))
clip = keblat.clip
lcmod, lcpol = keblat.lcfit(lcpars, keblat.jd[clip],
keblat.quarter[clip], keblat.flux[clip],
keblat.fluxerr[clip], np.ones(clip.sum()),
polyorder=polyorder)
if np.any(np.isinf(lcmod)):
return -np.inf
a = keblat.flux[clip] - 1.
b = lcmod*lcpol - 1.
bad = (b == 0)
c2 = 2.*(keblat.fluxerr[clip]**2 + lcerr**2)
A = np.sum((b**2/c2)[~bad])
lc_like = -0.5*np.sum(np.log(c2[~bad])) - 0.5 * np.log(A) + \
np.sum((a*b/c2)[~bad])**2 / A - np.sum((a**2/c2)[~bad])
erf_sum = scipy.special.erf(np.sum(((b**2 - a*b)/c2)[~bad]) / np.sqrt(A)) + \
scipy.special.erf(np.sum((a*b/c2)[~bad]) / (np.sqrt(A)))
if erf_sum == 0: erf_sum = 1e-16
lc_like += np.log(erf_sum)
# if np.isinf(lc_like):
# print A, -0.5*np.sum(np.log(c2[~bad])), np.sum((a*b/c2)[~bad])**2 / A, \
# np.sum((a**2/c2)[~bad]), scipy.special.erf(np.sum(((b**2 - a*b)/c2)[~bad]) / np.sqrt(A)), \
# scipy.special.erf(np.sum((a*b/c2)[~bad]) / (np.sqrt(A)))
# print -0.5*np.sum(np.log(c2)), -0.5 * np.log(A)
# print np.sum(a*b/c2)**2 / A, -np.sum(a**2/c2)
# print scipy.special.erf(np.sum((b**2 - a*b)/c2) / np.sqrt(A)), scipy.special.erf(np.sum(a*b/c2) / (np.sqrt(A)))
# lc_like = -0.5 * np.sum(np.log(keblat.fluxerr[clip]**2 + lcerr**2)) + \
# 0.5 * np.sum(mmod**2 / (keblat.fluxerr[clip]**2 + lcerr**2)) + \
# np.log(scipy.special.erf(np.sum((lcmod*lcpol - keblat.flux[clip])/mmod)) + \
# scipy.special.erf(np.sum((keblat.flux[clip]-1)/mmod)))
# if np.isinf(lc_like):
# print np.log(scipy.special.erf((lcmod*lcpol - keblat.flux[clip])/mmod) +
# scipy.special.erf((keblat.flux[clip]-1)/mmod)), np.sum(np.log(scipy.special.erf((lcmod*lcpol - keblat.flux[clip])/mmod) +
# scipy.special.erf((keblat.flux[clip]-1)/mmod)))
#chisq += ((isopars[6] - 119.)/(15.))**2
lili = lc_like + sed_like
crowd_star = ((keblat.flux[clip]-1)/b)[~bad]
chunk = np.unique(np.append([0, len(b[~bad])], np.where(np.diff(keblat.quarter[clip][~bad])>0)[0]+1))
# bad = np.isinf(crowd_star)
# if bad.sum() > 0:
# return lcmod, lcpol
# crowd_star[bad] = 1.0
# if bad.sum()>0:
# print keblat.jd[clip][bad], lcmod[bad], lcpol[bad], keblat.flux[clip][bad], mmod[bad], crowd_star[bad]
crowding = [np.sum(crowd_star[chunk[ii]:chunk[ii+1]])/(chunk[ii+1]-chunk[ii]) for ii in range(len(chunk)-1)]
# if np.isinf(lili):
# print lc_like, sed_like, bad.sum(), A, -0.5*np.sum(np.log(c2[~bad])), np.sum((a*b/c2)[~bad])**2 / A, \
# np.sum((a**2/c2)[~bad]), scipy.special.erf(np.sum(((b**2 - a*b)/c2)[~bad]) / np.sqrt(A)), \
# scipy.special.erf(np.sum((a*b/c2)[~bad]) / (np.sqrt(A)))
return lili, crowding
def lnprob(allpars, keblat, lc_constraints=None, crowd_fit=False):
lp = lnprior(allpars, keblat, crowd_fit=crowd_fit)
if np.isinf(lp):
return -np.inf, str([-np.inf]*(3+len(np.unique(keblat.quarter[keblat.clip]))))
# allpars[-2:] = np.exp(allpars[-2:])
# ll = keblat.lnlike(allpars[:18], lc_constraints=lc_constraints,
# qua=np.unique(keblat.quarter),
# crowd_fit=allpars[18:] if crowd_fit else None)
ll, crowd = lnlike(allpars[:18], keblat)
if (np.isnan(ll) or np.isinf(ll)):
return -np.inf, str([-np.inf]*(3+len(crowd)))
return lp + ll, str([keblat.r1, keblat.r2, keblat.frat]+crowd)
def mix_lnprior(allpars, keblat):
m1, m2, z0, age, dist, ebv, h0, period, tpe, esinw, ecosw, \
b, q1, q2, q3, q4, lnlcerr = allpars[:17]
Pb, Yb, lnisoerr = allpars[17:]
e = np.sqrt(esinw**2 + ecosw**2)
pars2check = np.array([m1, m2, z0, age, dist, ebv, h0, \
period, tpe, e, b, q1, q2, q3, q4, lnlcerr, Pb, Yb, lnisoerr])
bounds = np.array([(.1, 12.), (.1, 12.), (0.001, 0.06), (6., 10.1),
(10., 15000.), (0.0, 1.0), (119-20., 119+20.), #(10., 15000.), (0.0, 1.0), (119-20., 119+20.),
(5., 3000.), (0., 1e8), (0., 0.99), (0., 10.), (0.,1.),
(0.,1.), (0.,1.), (0.,1.), (-14, -4.5),
(0., 1.), (np.min(keblat.magsobs-2.), np.max(keblat.magsobs)+2.), (-8, 0.)])
pcheck = np.all((pars2check >= bounds[:,0]) & \
(pars2check <= bounds[:,1]))
if pcheck:
return 0.0 + age*np.log(10.) + np.log(np.log(10.))
else:
return -np.inf
def mix_lnlike2(allpars, keblat, polyorder=2, ooe=True):
m1, m2, z0, age, dist, ebv, h0, period, tpe, esinw, \
ecosw, b, q1, q2, q3, q4, lcerr = allpars[:17]
#dist = np.exp(dist)
Pb, Yb = allpars[17:-1]
isoerr = np.exp(allpars[-1])
lcerr = np.exp(lcerr)
ldcoeffs = np.array([q1, q2, q3, q4])
keblat.updatepars(m1=m1, m2=m2, z0=z0, age=age, dist=dist, ebv=ebv,
h0=h0, period=period, tpe=tpe, esinw=esinw,
ecosw=ecosw, b=b, q1=q1, q2=q2, q3=q3, q4=q4,
lcerr=lcerr, isoerr=isoerr)
isopars = [m1, m2, z0, age, dist, ebv, h0, isoerr]
magsmod = keblat.isofit(isopars)
isores = (magsmod - keblat.magsobs) / keblat.emagsobs
Lin = 1./(np.sqrt(TWOPI)*keblat.emagsobs) * np.exp(-0.5 * isores**2)
Lout = 1./np.sqrt(TWOPI * (isoerr**2 + keblat.emagsobs**2)) * \
np.exp(-0.5 * (keblat.magsobs-Yb)**2 / (isoerr**2+keblat.emagsobs**2))
lnll = np.sum(np.log((1.-Pb) * Lin + Pb * Lout))
print lnll
lcpars = np.concatenate((np.array([m1+m2, keblat.r1+keblat.r2,
keblat.r2/keblat.r1, period,
tpe, esinw, ecosw, b, keblat.frat]),
ldcoeffs))
clip = keblat.clip
lcmod, lcpol = keblat.lcfit(lcpars, keblat.jd[clip],
keblat.quarter[clip], keblat.flux[clip],
keblat.dflux[clip], keblat.crowd[clip],
polyorder=polyorder, ooe=ooe)
lcres = (lcmod*lcpol - keblat.flux[clip]) / np.sqrt(keblat.dflux[clip]**2 + lcerr**2)
if np.any(np.isinf(lcmod)):
return -np.inf
lnll += -0.5 * (np.sum(lcres**2) + np.sum(np.log((keblat.dflux[clip]**2 + lcerr**2))))
lnll += -0.5 * ((isopars[5] - keblat.ebv)/(keblat.debv))**2
#lnll += -0.5 * ((isopars[2] - keblat.z0)/(0.2 * np.log(10) * keblat.z0))**2
return lnll
def mix_lnlike(allpars, keblat, polyorder=2, split=False, ooe=True):
m1, m2, z0, age, dist, ebv, h0, period, tpe, esinw, \
ecosw, b, q1, q2, q3, q4, lcerr = allpars[:17]
#dist = np.exp(dist)
Pb, Yb = allpars[17:-1]
isoerr = np.exp(allpars[-1])
lcerr = np.exp(lcerr)
ldcoeffs = np.array([q1, q2, q3, q4])
keblat.updatepars(m1=m1, m2=m2, z0=z0, age=age, dist=dist, ebv=ebv,
h0=h0, period=period, tpe=tpe, esinw=esinw,
ecosw=ecosw, b=b, q1=q1, q2=q2, q3=q3, q4=q4,
lcerr=lcerr, isoerr=isoerr)
isopars = [m1, m2, z0, age, dist, ebv, h0, isoerr]
magsmod = keblat.isofit(isopars)
if np.any(np.isinf(magsmod)) or np.isinf(keblat.r1) or np.isinf(keblat.r2):
return -np.inf #/ np.sqrt(self.emagsobs**2 + isoerr**2)
isores = (magsmod - keblat.magsobs) / keblat.emagsobs
# lnll = (1.-Pb)/(np.sqrt(TWOPI)*keblat.emagsobs) * np.exp(-0.5 * isores**2) + \
# Pb/np.sqrt(TWOPI * (isoerr**2 + keblat.emagsobs**2)) * \
# np.exp(-0.5 * (keblat.magsobs-Yb)**2 / (isoerr**2+keblat.emagsobs**2))
# lnll = np.sum(np.log(lnll))
# in case of numerical instabilities with small log sums...
Lin = -0.5 * isores**2 + np.log((1.-Pb)/(np.sqrt(TWOPI)*keblat.emagsobs))
Lout = -0.5 * (keblat.magsobs-Yb)**2 / (isoerr**2 + keblat.emagsobs**2) + \
np.log(Pb/np.sqrt(TWOPI * (isoerr**2 + keblat.emagsobs**2)))
lnll = np.logaddexp(Lin, Lout)
lnll = np.sum(lnll)
#now the light curve fitting part
lcpars = np.concatenate((np.array([m1+m2, keblat.r1+keblat.r2,
keblat.r2/keblat.r1, period,
tpe, esinw, ecosw, b, keblat.frat]),
ldcoeffs))
clip = keblat.clip
lcmod, lcpol = keblat.lcfit(lcpars, keblat.jd[clip],
keblat.quarter[clip], keblat.flux[clip],
keblat.dflux[clip], keblat.crowd[clip],
polyorder=polyorder, ooe=ooe)
lcres = (lcmod*lcpol - keblat.flux[clip]) / np.sqrt(keblat.dflux[clip]**2 + lcerr**2)
if np.any(np.isinf(lcmod)):
return -np.inf
lnll += -0.5 * (np.sum(lcres**2) + np.sum(np.log((keblat.dflux[clip]**2 + lcerr**2))))
lnll += -0.5 * ((isopars[5] - keblat.ebv)/(keblat.debv))**2
#lnll += -0.5 * ((isopars[2] - keblat.z0)/(0.2 * np.log(10) * keblat.z0))**2
if split:
return lnll, -0.5 * (np.sum(lcres**2) + np.sum(np.log((keblat.dflux[clip]**2 + lcerr**2)))) + \
-0.5 * ((isopars[5] - keblat.ebv)/(keblat.debv))**2
return lnll
def mix_lnprob(allpars, keblat, polyorder=2):
lp = mix_lnprior(allpars, keblat)
if | |
#!/usr/bin/env python2
#
# xpyBuild - eXtensible Python-based Build System
#
# Copyright (c) 2013 - 2018 Software AG, Darmstadt, Germany and/or its licensors
# Copyright (c) 2013 - 2018 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# $Id: xpybuild.py 305815 2017-04-13 17:20:20Z bsp $
# Requires: Python 2.7
#
# Goals:
# - correct build
# - fast, parallelisable, scalable build
# - simple build files, all complexities abstracted away in reusable helper
# classes
# - fail-early on build configuration bugs (e.g. setting an unknown property)
#
# Key concepts:
# - properties - immutable values specified by build files or overridden on
# command line. May be path, a string, True/False or list.
# Can be evaluated using "${propertyName}. All properties must be defined
# in a build file before they can be used.
# - target - something that generates an output file (or directory)
# if the output file doesn't exist or is out of date with respect to
# other targets it depends on; has the ability to clean/delete any
# generated output.
# Targets are named based on their output file, but may also have
# tags to make referring to them easier.
# - tag - an alias for a target or set of targets, grouped together to make
# running them easier from the command line
#
import sys, os, getopt, time, traceback, logging, multiprocessing, threading, re
if float(sys.version[:3]) < 2.7: raise Exception('xpybuild.py requires at least Python 2.7 - unsupported python version %s'%sys.version[:3])
# a general-purpose mechanism for adding extra python modules when invoking
# xpybuild, useful for third party plugins that may only be present on some
# user's systems, and/or for importing user-defined plugins such as output
# formatters
if os.getenv('XPYBUILD_PYTHONPATH'):
sys.path.extend(os.getenv('XPYBUILD_PYTHONPATH').split(os.pathsep))
if os.getenv('XPYBUILD_IMPORTS'):
import importlib
for i in os.getenv('XPYBUILD_IMPORTS').split(','):
importlib.import_module(i)
from buildcommon import *
from buildcommon import _XPYBUILD_VERSION
from buildcontext import *
from utils.fileutils import mkdir, deleteDir
from propertysupport import defineOption, parsePropertiesFile
from internal.stacktrace import listen_for_stack_signal
from buildexceptions import BuildException
from utils.consoleformatter import _registeredConsoleFormatters, publishArtifact
from utils.timeutils import formatTimePeriod
import utils.teamcity # to get handler registered
import utils.visualstudio # needed to create the entry in _handlers
import utils.make # needed to create the entry in _handlers
import utils.progress # needed to create the entry in _handlers
import utils.platformutils
from internal.outputbuffering import OutputBufferingStreamWrapper, outputBufferingManager
log = logging.getLogger('xpybuild')
_TASK_BUILD = 'build'
_TASK_CLEAN = 'clean'
_TASK_REBUILD = 'rebuild'
_TASK_LIST_TARGETS = 'listTargets'
_TASK_LIST_FIND_TARGETS = 'findTargets'
_TASK_LIST_PROPERTIES = 'listProperties'
_TASK_LIST_OPTIONS = 'listOptions'
_TASK_LIST_TARGET_INFO = 'listTargetInfo'
def main(args):
""" Command line argument parser.
"""
try:
usage = [
'',
'eXtensible Python-based Build System %s on Python %s.%s.%s'% (_XPYBUILD_VERSION, sys.version_info[0], sys.version_info[1], sys.version_info[2]),
'',
'xpybuild.py [operation]? [options]* [property=value]* [-x] [target|tag|regex]* ',
'',
'A regex containing * can be used instead of a target, but only if it uniquely ',
'identifies a single target. ',
'',
'Special pseudo-tags:',
' all Include all targets (default if none are provided)',
'',
'Special properties:',
' OUTPUT_DIR=output The main directory output will be written to',
' BUILD_MODE=release Specifies release, debug (or user-defined) mode',
' BUILD_NUMBER=n Build number string, for reporting and use by build',
'',
'Operations: ',
' (if none is specified, the default operation is a normal build)',
' --clean Clean specified targets incl all deps (default=all)',
' --rebuild Clean specified targets incl all deps then build',
'',
' --ft --find-targets <str> List targets containing the specified substring',
' --ti --target-info <str> Print details including build file location for ',
' targets containing the specified substring',
' --targets List available targets and tags (filtered by any ',
' target or tag names specified on the command line)',
' --properties List properties that can be set and their ',
' defaults in this build file',
' --options List the target options available to build rules ',
' and their default values in this build file',
'',
'Options:',
' -x --exclude <target> Specifies a target or tag to exclude (unless ',
' needed as a dependency of an included target) ',
'',
' -J --parallel Build in parallel, automatically determining the ',
' number of threads using number of CPUs and/or the ',
' XPYBUILD_MAX_WORKERS or XPYBUILD_WORKERS_PER_CPU ',
' environment variables set on this machine',
' -j --workers <number> Build in parallel, using <number> worker threads',
' (ignores any environment variables)',
'',
' -k --keep-going Continue rather than aborting on errors',
'',
' -n --dry-run Don\'t actually build anything, just print',
' what would be done (finds missing dependencies)',
'',
' --id --ignore-deps Skip all dependency/up-to-date checking: only ',
' clean/build targets that do not exist at all ',
' (faster builds, but no guarantee of correctness)',
'',
' -f --buildfile <file> Specify the root build file to import ',
' (default is ./root.xpybuild.py)',
'',
' -l --log-level LEVEL Set the log level to debug/info/critical',
' -L --logfile <file> Set the log file location',
' --timefile <file> Dump the time for each target in <file> at the',
' end of the run',
' --depgraph <file> Just resolve dependencies and dump them to <file>',
' --cpu-stats Log CPU utilisation stats',
' --random-priority Randomizes build order',
' --profile Profiles all the worker threads',
' -F --format Message output format.',
' Options:',
] + [
' - '+ h for h in _registeredConsoleFormatters
] + [
]
if reduce(max, map(len, usage)) > 80:
raise Exception('Invalid usage string - all lines must be less than 80 characters')
# set up defaults
properties = {}
buildOptions = { "keep-going":False, "workers":1, "dry-run":False, "ignore-deps":False, "logCPUUtilisation":False, "profile":False }
includedTargets = []
excludedTargets = []
task = _TASK_BUILD
buildFile = os.path.abspath('root.xpybuild.py')
logLevel = None
logFile = None
findTargetsPattern = None
format = "default"
opts,targets = getopt.gnu_getopt(args, "knJh?x:j:l:L:f:F:",
["help","exclude=","parallel","workers=","keep-going",
"log-level=","logfile=","buildfile=", "dry-run",
"targets", 'target-info=', 'ti=', "properties", "options", "clean", "rebuild", "ignore-deps", "id",
"format=", "timefile=", "ft=", "find-targets=", "depgraph=", 'cpu-stats', 'random-priority', 'profile'])
for o, a in opts: # option arguments
o = o.strip('-')
if o in ["?", "h", "help"]:
print '\n'.join(usage)
return 0
elif o in ["x", "exclude"]:
excludedTargets.append(a)
elif o in ["f", "buildfile"]:
buildFile = os.path.abspath(a)
elif o in ['targets']:
task = _TASK_LIST_TARGETS
elif o in ['find-targets', 'ft']:
task = _TASK_LIST_FIND_TARGETS
findTargetsPattern = a
elif o in ['target-info', 'ti']:
task = _TASK_LIST_TARGET_INFO
findTargetsPattern = a
elif o in ['properties']:
task = _TASK_LIST_PROPERTIES
elif o in ['options']:
task = _TASK_LIST_OPTIONS
elif o in ['J', 'parallel']:
# env var for limiting numbers on threads on a given machine,
# e.g. due to disk contention on wide machines
buildOptions['workers'] = multiprocessing.cpu_count()
if os.getenv('XPYBUILD_WORKERS_PER_CPU'):
buildOptions['workers'] = max(1, int(round(buildOptions['workers'] * float(os.getenv('XPYBUILD_WORKERS_PER_CPU')))))
if os.getenv('XPYBUILD_MAX_WORKERS'):
buildOptions['workers'] = min(buildOptions['workers'], int(os.getenv('XPYBUILD_MAX_WORKERS')))
elif o in ['j', 'workers']:
buildOptions['workers'] = int(a)
elif o in ['l', 'log-level']:
logLevel = getattr(logging, a.upper(), None)
elif o in ['cpu-stats']:
buildOptions["logCPUUtilisation"] = True
elif o in ['random-priority']:
buildOptions["randomizePriorities"] = True
elif o in ['L', 'logfile']:
logFile = a
elif o in ['F', 'format']:
format = None
if a =='xpybuild': a = 'default' # for compatibility
for h in _registeredConsoleFormatters:
if h.upper() == a.upper():
format = h
if not format:
print 'invalid format "%s"; valid formatters are: %s'%(a, ', '.join(_registeredConsoleFormatters.keys()))
print '\n'.join(usage)
return 1
elif o in ['clean']:
task = _TASK_CLEAN
buildOptions['keep-going'] = True
elif o in ['rebuild']:
task = _TASK_REBUILD
elif o in ['id', 'ignore-deps']:
buildOptions['ignore-deps'] = True
elif o in ['k', 'keep-going']:
buildOptions['keep-going'] = True
elif o in ['n', 'dry-run']:
buildOptions['dry-run'] = True
elif o in ['timefile']:
buildOptions['timeFile'] = a
elif o in ['profile']:
buildOptions['profile'] = True
elif o in ['depgraph']:
buildOptions['depGraphFile'] = a
else:
assert False, "unhandled option: '%s'" % o
if buildOptions['workers'] < 1: raise Exception('Number of workers is invalid: %s'%buildOptions['workers'])
for o in targets: # non-option arguments (i.e. no -- prefix)
arg = o.strip()
if arg:
if '=' in arg:
properties[arg.split('=')[0].upper()] = arg.split('=')[1]
else:
includedTargets.append(arg)
# default is all
if (not includedTargets) or includedTargets==['']:
includedTargets = ['all']
except getopt.error, msg:
print msg
print "For help use --help"
return 2
threading.currentThread().setName('main')
logging.getLogger().setLevel(logLevel or logging.INFO)
outputBufferingDisabled = buildOptions['workers']==1
# initialize logging to stdout - minimal output to avoid clutter, but indicate progress
hdlr = _registeredConsoleFormatters.get(format, None)
assert hdlr # shouldn't happen
wrapper = OutputBufferingStreamWrapper(sys.stdout, bufferingDisabled=outputBufferingDisabled)
# actually instantiate it
hdlr = hdlr(
wrapper,
buildOptions)
if hdlr.bufferingDisabled: wrapper.bufferingDisabled = True
hdlr.setLevel(logLevel or logging.WARNING)
logging.getLogger().addHandler(hdlr)
log.info('Build options: %s'%buildOptions)
stdout = sys.stdout
# redirect to None, to prevent any target code from doing 'print' statements - should always use the logger
sys.stdout = None
listen_for_stack_signal() # make USR1 print a python stack trace
allTargets = ('all' in includedTargets) and not excludedTargets
try:
def loadBuildFile():
init = BuildInitializationContext(properties)
isRealBuild = (task in [_TASK_BUILD, _TASK_CLEAN, _TASK_REBUILD])
init._defineOption("process.timeout", 600)
init._defineOption("build.keepGoing", buildOptions["keep-going"])
init._defineOption("build.workers", buildOptions["workers"])
init.initializeFromBuildFile(buildFile, isRealBuild=isRealBuild)
return init
if buildOptions['profile']:
import cProfile, pstats
profiler = cProfile.Profile()
profiler.enable()
init = loadBuildFile()
# nb: don't import any modules that might define options (including outputhandler)
# until build file is loaded
# or we may not have a build context in place yet#
from | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 28 14:11:31 2016
@author: adam
"""
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
from scipy.signal import argrelextrema
from sklearn import neighbors as nb
from sklearn.gaussian_process import GaussianProcess
from scipy.interpolate import SmoothBivariateSpline
from scipy.interpolate import LSQBivariateSpline
def build_tree(pointcloud, leafsize):
return nb.KDTree(pointcloud, leaf_size=leafsize)
def find_nearest(array,value):
idx = (np.abs(array-value)).argmin()
return idx
def compute_zs(t_f, params, t_f_uncert):
"""
Take in the total freeboard (tf, float array), slope and intercept for an empirical model
of snow depth from elevation (s_i, tuple) and the total freeboard uncertainty
(tf_uncert, float array)
Return a snow depth, with an associated uncertainty.
zs, uncert = compute_zs(tf, ([slope, intercept]), tf_uncert)
"""
nz_ = np.where(t_f > 0)
#nz_ = nz_[0]
lz_ = np.where(t_f <= 0)
#lz_ = nz_[0]
z_s = np.zeros(len(t_f))
z_s[nz_] = params[0] * t_f[nz_] + params[1]
z_s[lz_] = 0
z_s[z_s < 0] = 0
z_s_uncert = params[0] * t_f_uncert
return z_s, z_s_uncert
def compute_zi(tf, zs, d_ice, d_water, d_snow, sd_tf, sd_zs, sd_dsnow, \
sd_dice, sd_dwater):
'''
sea ice thickness from elevation error propagation
after Kwok, 2010; kwok and cunningham, 2008
equations:
4: sea ice thickness from elevation
6: taylor series expansion of variance/covariance propagation using
partial derivatives
8, 9 and 10: partial derivatives used in the taylor series
'''
zi = (d_water / (d_water-d_ice)) * tf - ((d_water-d_snow) / \
(d_water - d_ice)) * zs
zi_uncert = sd_tf**2 * (d_water / (d_water - d_ice))**2 + \
sd_zs**2 * ((d_snow - d_water) / (d_water - d_ice))**2 + \
sd_dsnow**2 * (zs / (d_water - d_ice))**2 + \
sd_dice**2 * (tf / (d_water - d_ice))**2 + \
sd_dwater**2 * (((-d_ice * tf) + ((d_ice-d_snow) * zs)) / \
(d_water - d_ice)**2)**2
return zi, zi_uncert
def zero_mean(data):
"""
take in any array of data
retuun an array modified such that mean(data) == 0
"""
oldmean = np.mean(data)
return data - oldmean
def find_lowpoints(xyz, leafsize, nhood):
"""
Pass in an array of elevation vlues and an integer number of points to use
as a neighbourhood.
Get back the indices of elevation values closest to the neighbourhooed median
for neighbourhoods with more than 40 points
"""
low_tree = build_tree(xyz, leafsize)
nhoods = low_tree.query_radius(xyz, r=nhood)
point_filt = []
for nh in nhoods:
#print(nhood)
#print(xyz[nhood,2])
# we want indices in the input cloud
# of the minimum value in each nhood?
if len(nh) > 5*nhood:
point_filt.append(nh[find_nearest(xyz[nh[:],2], np.median(xyz[nh[:],2]))])
#print(xyz[nhood,:])
#print(new_z[i])
return point_filt
def find_low_intens(intens,pntile):
"""
Take in some return intensity data (array) and a percentile (float)
Return the indices of chosen percentil of intensity values
"""
li_idx = np.where(intens <= np.percentile(intens, pntile))
return list(li_idx[0])
def find_water_keys(xyzi, e_p, i_p):
"""
Pass in:
- a set of X,Y,Z and Intensity points
- the number of points to use as a neighbourhood for choosing low elevation
- a percentile level for intensity thresholding
Get back:
- a set of XYZI points corresponding to 'local sea level'
"""
#find lowest intensity values
low_intens_inds = find_low_intens(xyzi[:, 3], i_p)
li_xyzi = xyzi[low_intens_inds, :]
#find low elevations
low_elev_inds = find_lowpoints(li_xyzi[:,0:3], 60, e_p)
#low_elev_inds = find_lowpoints(li_xyzi[:, 2], e_p)
low_xyzi = li_xyzi[low_elev_inds, :]
#low_xyzi = np.squeeze(low_xyzi)
#return an array of xyz coordinates of points which pass the low intensity
#and nhood filter
return low_xyzi
def n_filter(pointcloud, tree, radius):
'''
takes in a point cloud (xyzi), a kDtree (generated in spatialmedian),
and a neighbourhood.
returns the standard deviation of points within (radius) metres
of each point as a new point list.
'''
nhoods = tree.query_radius(pointcloud[:,0:3], r=radius)
n_stats = []
i = 0
for nhood in nhoods:
#print(nhood)
n_stats.append([np.mean(pointcloud[nhood[:],2]),\
np.median(pointcloud[nhood[:],2]),\
np.std(pointcloud[nhood[:],2])])
#print(pointcloud[i,:])
#print(new_z[i])
i += 1
return n_stats
def fit_points(in_, params, knnf):
'''
takes in:
- input xyzi points
- parameters to find water points [points in nhood, intenseity percentile]
- KDtree to query for nhoods
gives back:
- modified xyzi points
- a set of points used to create the regression model
'''
# find open water points using find_water_keys
xyz_fp = np.squeeze(find_water_keys(in_, params[0], params[1]))
#fit a neighbourhood regression model object to those points
this = knnf.fit(np.array([xyz_fp[:, 0], xyz_fp[:, 1]]).reshape(len(xyz_fp[:, 1]), 2), xyz_fp[:, 2])
#genrate a set of corrections for fit points
fitpoint_adjust = this.predict(np.array([xyz_fp[:,0], xyz_fp[:, 1]]).reshape(len(xyz_fp[:, 0]), 2))
#apply them
fitpoints_mod = np.column_stack([xyz_fp[:,0], xyz_fp[:,1], \
xyz_fp[:,2] - fitpoint_adjust])
print('mean fitpoint adjustment (Z): {}'.format(np.mean(fitpoint_adjust)))
#using the knn model, predict Z values for all lIDAR points
z_fit = knnf.predict(np.array([in_[:,0], in_[:, 1]]).reshape(len(in_[:, 0]), 2))
#remove predicted values from elevations
xyzi_mod = np.column_stack([in_[:,0], in_[:,1], \
in_[:,2] - z_fit, in_[:,3]])
return xyzi_mod, fitpoints_mod
def f_points(in_, params, dims, smth):
'''
find open water points using find_water_keys
fit a surface to central tendency of keypoints
adjust the rest of a LiDAR swath by water keypoint values
In this method, coordinates are normalised so that
interpolation occurs in an almost-gridded environment which data pretty
much fill.
Best applied with a lot of smoothing
'''
#normalise input data to a unit block
#http://stackoverflow.com/questions/3864899/resampling-irregularly-spaced-data-to-a-regular-grid-in-python
# first get min/max of points
xmin = np.min(in_[:,0])
ymin = np.min(in_[:,1])
#translate to starting at [0,0]
t_x = in_[:,0]-xmin
t_y = in_[:,1]-ymin
#normalise coordinates in each direction
xmax = np.max(t_x)
ymax = np.max(t_y)
norm_x = t_x/xmax
norm_y = t_y/ymax
#set up a translated world-space array to send to water point finding
to_waterkeys = np.column_stack([t_x, t_y, in_[:,2], in_[:,3]])
#print(to_waterkeys)
#find water points
xyz_fp = np.squeeze(find_water_keys(to_waterkeys, params[0], params[1]))
#translate the results to the same normalised coordinate system
norm_fit_x = xyz_fp[:,0]/xmax
norm_fit_y = xyz_fp[:,1]/ymax
#check extents
print('min. norm fit Y: {}, max. norm fit Y: {}'.format(min(norm_fit_y), max(norm_fit_y)))
#another 2D spline, pretty much the same as SmoothBivariateSpline
#xcoord = np.linspace(0, 1, 0.1)
#ycoord = np.linspace(0, 1, 0.1)
#this_f = LSQBivariateSpline(norm_fit_x, norm_fit_y,
# xyz_fp[:, 2], xcoord, ycoord)
#this one is the winner right now.
#fit a spline using normalised XY coordinates
this_f = SmoothBivariateSpline(norm_fit_x, norm_fit_y,
xyz_fp[:, 2], kx=dims[0], ky=dims[1],
bbox=[0,1,0,1], s = smth*len(norm_fit_x))
#evaluate the function at real-space coordinates
#fpoints_mod = this_f.ev(xyz_fp[:, 0], xyz_fp[:, 1])
#or normalised?
fpoints_mod = this_f.ev(norm_fit_x, norm_fit_y)
#one more filter - kill extreme values!
adjusted_points = xyz_fp[:, 2] - fpoints_mod
e_f = np.where((adjusted_points >= 0-3*np.std(adjusted_points)) & (adjusted_points<= 0+3*np.std(adjusted_points)))
print('mean fitpoint adjustment (Z): {}'.format(np.mean(fpoints_mod[e_f[0]])))
#translate fit points back!
fitpoints_mod = np.column_stack([xyz_fp[e_f[0], 0]+xmin, xyz_fp[e_f[0], 1]+ymin,
adjusted_points[e_f[0]]])
#evaluate the surface at
#z_mod = this_f.ev(in_[:, 0], in_[:, 1])
#normalised coordinatesfrom sklearn.gaussian_process import GaussianProcess
z_mod = this_f.ev(norm_x, norm_y)
coeffs = this_f.get_coeffs()
resid = this_f.get_residual()
#remove predicted values from elevations
xyzi_mod = np.column_stack([in_[:, 0], in_[:, 1],
in_[:, 2] - z_mod, in_[:, 3]])
return xyzi_mod, fitpoints_mod, resid, coeffs
def gp_f_points(in_, params):
'''
This is a replicate of the function above, using an ordinary Kriging
approach.
not currently used, it's unhappy with input data being irregular
points AFAICS, and I don't really want to grid it.
The input cloud could be gridded just for the interpolation, but that's
pretty hungry code when a smooth spline is fast and seems to do OK.
'''
#normalise input data to a unit block
#http://stackoverflow.com/questions/3864899/resampling-irregularly-spaced-data-to-a-regular-grid-in-python
# first get min/max of points
xmin = np.min(in_[:,0])
ymin = np.min(in_[:,1])
#translate to starting at [0,0]
t_x = in_[:,0]-xmin
t_y = in_[:,1]-ymin
#normalise coordinates in each direction
#xmax = np.max(t_x)
#ymax = np.max(t_y)
#norm_x = t_x/xmax
#norm_y = t_y/ymax
to_waterkeys = np.column_stack([t_x, t_y, in_[:,2], in_[:,3]])
print(to_waterkeys)
#find water points
xyz_fp = np.squeeze(find_water_keys(to_waterkeys, params[0], params[1]))
#translate these to the same normalised coordinate system
#norm_fit_x = xyz_fp[:,0]/xmax
#norm_fit_y = xyz_fp[:,1]/ymax
#fit using a gaussian process (kriging):
# http://stackoverflow.com/questions/24978052/interpolation-over-regular-grid-in-python
# http://scikit-learn.org/stable/modules/gaussian_process.html
gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1., nugget=0.5)
gp.fit(np.column_stack((xyz_fp[:,0], xyz_fp[:,1])), xyz_fp[:,2])
fpoints_mod = gp.predict(xyz_fp[:,0], nxyz_fp[:,1])
#fit a spline using normalised XY coordinates
#this_f = SmoothBivariateSpline(norm_fit_x, norm_fit_y,
# xyz_fp[:, 2], kx=5, ky=5)
#evaluate the function at real-space coordinates
#fpoints_mod = this_f.ev(xyz_fp[:, 0], xyz_fp[:, 1])
#or | |
for this Project': 'No Organisations for this Project',
'No Packs for Item': 'No Packs for Item',
'No Patients currently registered': 'No Patients currently registered',
'No People currently committed': 'No People currently committed',
'No People currently registered in this camp': 'No People currently registered in this camp',
'No People currently registered in this shelter': 'No People currently registered in this shelter',
'No Persons currently registered': 'No Persons currently registered',
'No Persons currently reported missing': 'No Persons currently reported missing',
'No Persons found': 'No Persons found',
'No Photos found': 'No Photos found',
'No Picture': 'No Picture',
'No Population Statistics currently registered': 'No Population Statistics currently registered',
'No Presence Log Entries currently registered': 'No Presence Log Entries currently registered',
'No Problems currently defined': 'No Problems currently defined',
'No Projections currently defined': 'No Projections currently defined',
'No Projects currently registered': 'No Projects currently registered',
'No Question Meta-Data currently registered': 'No Question Meta-Data currently registered',
'No Rapid Assessments currently registered': 'No Rapid Assessments currently registered',
'No Ratings for Skill Type': 'No Ratings for Skill Type',
'No Received Items currently registered': 'No Received Items currently registered',
'No Received Shipments': 'No Received Shipments',
'No Records currently available': 'No Records currently available',
'No Relatives currently registered': 'No Relatives currently registered',
'No Request Items currently registered': 'No Request Items currently registered',
'No Requests': 'No Requests',
'No Requests for Donations': 'No Requests for Donations',
'No Requests for Volunteers': 'No Requests for Volunteers',
'No Rivers currently registered': 'No Rivers currently registered',
'No Roles currently defined': 'No Roles currently defined',
'No Rooms currently registered': 'No Rooms currently registered',
'No Scenarios currently registered': 'No Scenarios currently registered',
'No Search saved': 'No Search saved',
'No Sections currently registered': 'No Sections currently registered',
'No Sectors currently registered': 'No Sectors currently registered',
'No Sent Items currently registered': 'No Sent Items currently registered',
'No Sent Shipments': 'No Sent Shipments',
'No Settings currently defined': 'No Settings currently defined',
'No Shelter Services currently registered': 'No Shelter Services currently registered',
'No Shelter Types currently registered': 'No Shelter Types currently registered',
'No Shelters currently registered': 'No Shelters currently registered',
'No Skills currently requested': 'No Skills currently requested',
'No Solutions currently defined': 'No Solutions currently defined',
'No Staff Types currently registered': 'No Staff Types currently registered',
'No Subscription available': 'No Subscription available',
'No Subsectors currently registered': 'No Subsectors currently registered',
'No Support Requests currently registered': 'No Support Requests currently registered',
'No Tasks currently registered in this event': 'No Tasks currently registered in this event',
'No Tasks currently registered in this scenario': 'No Tasks currently registered in this scenario',
'No Teams currently registered': 'No Teams currently registered',
'No Template Section currently registered': 'No Template Section currently registered',
'No Themes currently defined': 'No Themes currently defined',
'No Tickets currently registered': 'No Tickets currently registered',
'No Users currently registered': 'No Users currently registered',
'No Vehicle Details currently defined': 'No Vehicle Details currently defined',
'No Vehicles currently registered': 'No Vehicles currently registered',
'No Warehouses currently registered': 'No Warehouses currently registered',
'No access at all': 'No access at all',
'No access to this record!': 'No access to this record!',
'No action recommended': 'No action recommended',
'No contact information available': 'No contact information available',
'No contact method found': 'No contact method found',
'No contacts currently registered': 'No contacts currently registered',
'No data in this table - cannot create PDF!': 'No data in this table - cannot create PDF!',
'No databases in this application': 'No databases in this application',
'No dead body reports available': 'No dead body reports available',
'No entries found': 'No entries found',
'No entry available': 'No entry available',
'No forms to the corresponding resource have been downloaded yet.': 'No forms to the corresponding resource have been downloaded yet.',
'No jobs configured': 'No jobs configured',
'No jobs configured yet': 'No jobs configured yet',
'No match': 'No match',
'No matching records found': 'No matching records found',
'No messages in the system': 'No messages in the system',
'No person record found for current user.': 'No person record found for current user.',
'No problem group defined yet': 'No problem group defined yet',
'No reports available.': 'No reports available.',
'No reports currently available': 'No reports currently available',
'No repositories configured': 'No repositories configured',
'No requests found': 'No requests found',
'No resources configured yet': 'No resources configured yet',
'No resources currently reported': 'No resources currently reported',
'No service profile available': 'No service profile available',
'No skills currently set': 'No skills currently set',
'No staff or volunteers currently registered': 'No staff or volunteers currently registered',
'No status information available': 'No status information available',
'No tasks currently assigned': 'No tasks currently assigned',
'No tasks currently registered': 'No tasks currently registered',
'No units currently registered': 'No units currently registered',
'No volunteer availability registered': 'No volunteer availability registered',
'Non-structural Hazards': 'Non-structural Hazards',
'None': 'None',
'None (no such record)': 'None (no such record)',
'Noodles': 'Noodles',
'Normal': 'Normal',
'Not Applicable': 'Not Applicable',
'Not Authorised!': 'Not Authorised!',
'Not Possible': 'Not Possible',
'Not authorised!': 'Not authorised!',
'Not installed or incorrectly configured.': 'Not installed or incorrectly configured.',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead',
'Notes': 'Notes',
'Notice to Airmen': 'Notice to Airmen',
'Number of Patients': 'Number of Patients',
'Number of People Required': 'Number of People Required',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Number of additional beds of that type expected to become available in this unit within the next 24 hours.',
'Number of alternative places for studying': 'Number of alternative places for studying',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Number of available/vacant beds of that type in this unit at the time of reporting.',
'Number of bodies found': 'Number of bodies found',
'Number of deaths during the past 24 hours.': 'Number of deaths during the past 24 hours.',
'Number of discharged patients during the past 24 hours.': 'Number of discharged patients during the past 24 hours.',
'Number of doctors': 'Number of doctors',
'Number of in-patients at the time of reporting.': 'Number of in-patients at the time of reporting.',
'Number of newly admitted patients during the past 24 hours.': 'Number of newly admitted patients during the past 24 hours.',
'Number of non-medical staff': 'Number of non-medical staff',
'Number of nurses': 'Number of nurses',
'Number of private schools': 'Number of private schools',
'Number of public schools': 'Number of public schools',
'Number of religious schools': 'Number of religious schools',
'Number of residential units': 'Number of residential units',
'Number of residential units not habitable': 'Number of residential units not habitable',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Number of vacant/available beds in this hospital. Automatically updated from daily reports.',
'Number of vacant/available units to which victims can be transported immediately.': 'Number of vacant/available units to which victims can be transported immediately.',
'Number or Label on the identification tag this person is wearing (if any).': 'Number or Label on the identification tag this person is wearing (if any).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)',
'Number/Percentage of affected population that is Female & Aged 0-5': 'Number/Percentage of affected population that is Female & Aged 0-5',
'Number/Percentage of affected population that is Female & Aged 13-17': 'Number/Percentage of affected population that is Female & Aged 13-17',
'Number/Percentage of affected population that is Female & Aged 18-25': 'Number/Percentage of affected population that is Female & Aged 18-25',
'Number/Percentage of affected population that is Female & Aged 26-60': 'Number/Percentage of affected population that is Female & Aged 26-60',
'Number/Percentage of affected population that is Female & Aged 6-12': 'Number/Percentage of affected population that is Female & Aged 6-12',
'Number/Percentage of affected population that is Female & Aged 61+': 'Number/Percentage of affected population that is Female & Aged 61+',
'Number/Percentage of affected population that is Male & Aged 0-5': 'Number/Percentage of affected population that is Male & Aged 0-5',
'Number/Percentage of affected population that is Male & Aged 13-17': 'Number/Percentage of affected population that is Male & Aged 13-17',
'Number/Percentage of affected population that is Male & Aged 18-25': 'Number/Percentage of affected population that is Male & Aged 18-25',
'Number/Percentage of affected population that is Male & Aged 26-60': 'Number/Percentage of affected population that | |
'chrUn_JTFH01000337v1_decoy',
'chrUn_JTFH01000338v1_decoy',
'chrUn_JTFH01000339v1_decoy',
'chrUn_JTFH01000340v1_decoy',
'chrUn_JTFH01000341v1_decoy',
'chrUn_JTFH01000342v1_decoy',
'chrUn_JTFH01000343v1_decoy',
'chrUn_JTFH01000344v1_decoy',
'chrUn_JTFH01000345v1_decoy',
'chrUn_JTFH01000346v1_decoy',
'chrUn_JTFH01000347v1_decoy',
'chrUn_JTFH01000348v1_decoy',
'chrUn_JTFH01000349v1_decoy',
'chrUn_JTFH01000350v1_decoy',
'chrUn_JTFH01000351v1_decoy',
'chrUn_JTFH01000352v1_decoy',
'chrUn_JTFH01000353v1_decoy',
'chrUn_JTFH01000354v1_decoy',
'chrUn_JTFH01000355v1_decoy',
'chrUn_JTFH01000356v1_decoy',
'chrUn_JTFH01000357v1_decoy',
'chrUn_JTFH01000358v1_decoy',
'chrUn_JTFH01000359v1_decoy',
'chrUn_JTFH01000360v1_decoy',
'chrUn_JTFH01000361v1_decoy',
'chrUn_JTFH01000362v1_decoy',
'chrUn_JTFH01000363v1_decoy',
'chrUn_JTFH01000364v1_decoy',
'chrUn_JTFH01000365v1_decoy',
'chrUn_JTFH01000366v1_decoy',
'chrUn_JTFH01000367v1_decoy',
'chrUn_JTFH01000368v1_decoy',
'chrUn_JTFH01000369v1_decoy',
'chrUn_JTFH01000370v1_decoy',
'chrUn_JTFH01000371v1_decoy',
'chrUn_JTFH01000372v1_decoy',
'chrUn_JTFH01000373v1_decoy',
'chrUn_JTFH01000374v1_decoy',
'chrUn_JTFH01000375v1_decoy',
'chrUn_JTFH01000376v1_decoy',
'chrUn_JTFH01000377v1_decoy',
'chrUn_JTFH01000378v1_decoy',
'chrUn_JTFH01000379v1_decoy',
'chrUn_JTFH01000380v1_decoy',
'chrUn_JTFH01000381v1_decoy',
'chrUn_JTFH01000382v1_decoy',
'chrUn_JTFH01000383v1_decoy',
'chrUn_JTFH01000384v1_decoy',
'chrUn_JTFH01000385v1_decoy',
'chrUn_JTFH01000386v1_decoy',
'chrUn_JTFH01000387v1_decoy',
'chrUn_JTFH01000388v1_decoy',
'chrUn_JTFH01000389v1_decoy',
'chrUn_JTFH01000390v1_decoy',
'chrUn_JTFH01000391v1_decoy',
'chrUn_JTFH01000392v1_decoy',
'chrUn_JTFH01000393v1_decoy',
'chrUn_JTFH01000394v1_decoy',
'chrUn_JTFH01000395v1_decoy',
'chrUn_JTFH01000396v1_decoy',
'chrUn_JTFH01000397v1_decoy',
'chrUn_JTFH01000398v1_decoy',
'chrUn_JTFH01000399v1_decoy',
'chrUn_JTFH01000400v1_decoy',
'chrUn_JTFH01000401v1_decoy',
'chrUn_JTFH01000402v1_decoy',
'chrUn_JTFH01000403v1_decoy',
'chrUn_JTFH01000404v1_decoy',
'chrUn_JTFH01000405v1_decoy',
'chrUn_JTFH01000406v1_decoy',
'chrUn_JTFH01000407v1_decoy',
'chrUn_JTFH01000408v1_decoy',
'chrUn_JTFH01000409v1_decoy',
'chrUn_JTFH01000410v1_decoy',
'chrUn_JTFH01000411v1_decoy',
'chrUn_JTFH01000412v1_decoy',
'chrUn_JTFH01000413v1_decoy',
'chrUn_JTFH01000414v1_decoy',
'chrUn_JTFH01000415v1_decoy',
'chrUn_JTFH01000416v1_decoy',
'chrUn_JTFH01000417v1_decoy',
'chrUn_JTFH01000418v1_decoy',
'chrUn_JTFH01000419v1_decoy',
'chrUn_JTFH01000420v1_decoy',
'chrUn_JTFH01000421v1_decoy',
'chrUn_JTFH01000422v1_decoy',
'chrUn_JTFH01000423v1_decoy',
'chrUn_JTFH01000424v1_decoy',
'chrUn_JTFH01000425v1_decoy',
'chrUn_JTFH01000426v1_decoy',
'chrUn_JTFH01000427v1_decoy',
'chrUn_JTFH01000428v1_decoy',
'chrUn_JTFH01000429v1_decoy',
'chrUn_JTFH01000430v1_decoy',
'chrUn_JTFH01000431v1_decoy',
'chrUn_JTFH01000432v1_decoy',
'chrUn_JTFH01000433v1_decoy',
'chrUn_JTFH01000434v1_decoy',
'chrUn_JTFH01000435v1_decoy',
'chrUn_JTFH01000436v1_decoy',
'chrUn_JTFH01000437v1_decoy',
'chrUn_JTFH01000438v1_decoy',
'chrUn_JTFH01000439v1_decoy',
'chrUn_JTFH01000440v1_decoy',
'chrUn_JTFH01000441v1_decoy',
'chrUn_JTFH01000442v1_decoy',
'chrUn_JTFH01000443v1_decoy',
'chrUn_JTFH01000444v1_decoy',
'chrUn_JTFH01000445v1_decoy',
'chrUn_JTFH01000446v1_decoy',
'chrUn_JTFH01000447v1_decoy',
'chrUn_JTFH01000448v1_decoy',
'chrUn_JTFH01000449v1_decoy',
'chrUn_JTFH01000450v1_decoy',
'chrUn_JTFH01000451v1_decoy',
'chrUn_JTFH01000452v1_decoy',
'chrUn_JTFH01000453v1_decoy',
'chrUn_JTFH01000454v1_decoy',
'chrUn_JTFH01000455v1_decoy',
'chrUn_JTFH01000456v1_decoy',
'chrUn_JTFH01000457v1_decoy',
'chrUn_JTFH01000458v1_decoy',
'chrUn_JTFH01000459v1_decoy',
'chrUn_JTFH01000460v1_decoy',
'chrUn_JTFH01000461v1_decoy',
'chrUn_JTFH01000462v1_decoy',
'chrUn_JTFH01000463v1_decoy',
'chrUn_JTFH01000464v1_decoy',
'chrUn_JTFH01000465v1_decoy',
'chrUn_JTFH01000466v1_decoy',
'chrUn_JTFH01000467v1_decoy',
'chrUn_JTFH01000468v1_decoy',
'chrUn_JTFH01000469v1_decoy',
'chrUn_JTFH01000470v1_decoy',
'chrUn_JTFH01000471v1_decoy',
'chrUn_JTFH01000472v1_decoy',
'chrUn_JTFH01000473v1_decoy',
'chrUn_JTFH01000474v1_decoy',
'chrUn_JTFH01000475v1_decoy',
'chrUn_JTFH01000476v1_decoy',
'chrUn_JTFH01000477v1_decoy',
'chrUn_JTFH01000478v1_decoy',
'chrUn_JTFH01000479v1_decoy',
'chrUn_JTFH01000480v1_decoy',
'chrUn_JTFH01000481v1_decoy',
'chrUn_JTFH01000482v1_decoy',
'chrUn_JTFH01000483v1_decoy',
'chrUn_JTFH01000484v1_decoy',
'chrUn_JTFH01000485v1_decoy',
'chrUn_JTFH01000486v1_decoy',
'chrUn_JTFH01000487v1_decoy',
'chrUn_JTFH01000488v1_decoy',
'chrUn_JTFH01000489v1_decoy',
'chrUn_JTFH01000490v1_decoy',
'chrUn_JTFH01000491v1_decoy',
'chrUn_JTFH01000492v1_decoy',
'chrUn_JTFH01000493v1_decoy',
'chrUn_JTFH01000494v1_decoy',
'chrUn_JTFH01000495v1_decoy',
'chrUn_JTFH01000496v1_decoy',
'chrUn_JTFH01000497v1_decoy',
'chrUn_JTFH01000498v1_decoy',
'chrUn_JTFH01000499v1_decoy',
'chrUn_JTFH01000500v1_decoy',
'chrUn_JTFH01000501v1_decoy',
'chrUn_JTFH01000502v1_decoy',
'chrUn_JTFH01000503v1_decoy',
'chrUn_JTFH01000504v1_decoy',
'chrUn_JTFH01000505v1_decoy',
'chrUn_JTFH01000506v1_decoy',
'chrUn_JTFH01000507v1_decoy',
'chrUn_JTFH01000508v1_decoy',
'chrUn_JTFH01000509v1_decoy',
'chrUn_JTFH01000510v1_decoy',
'chrUn_JTFH01000511v1_decoy',
'chrUn_JTFH01000512v1_decoy',
'chrUn_JTFH01000513v1_decoy',
'chrUn_JTFH01000514v1_decoy',
'chrUn_JTFH01000515v1_decoy',
'chrUn_JTFH01000516v1_decoy',
'chrUn_JTFH01000517v1_decoy',
'chrUn_JTFH01000518v1_decoy',
'chrUn_JTFH01000519v1_decoy',
'chrUn_JTFH01000520v1_decoy',
'chrUn_JTFH01000521v1_decoy',
'chrUn_JTFH01000522v1_decoy',
'chrUn_JTFH01000523v1_decoy',
'chrUn_JTFH01000524v1_decoy',
'chrUn_JTFH01000525v1_decoy',
'chrUn_JTFH01000526v1_decoy',
'chrUn_JTFH01000527v1_decoy',
'chrUn_JTFH01000528v1_decoy',
'chrUn_JTFH01000529v1_decoy',
'chrUn_JTFH01000530v1_decoy',
'chrUn_JTFH01000531v1_decoy',
'chrUn_JTFH01000532v1_decoy',
'chrUn_JTFH01000533v1_decoy',
'chrUn_JTFH01000534v1_decoy',
'chrUn_JTFH01000535v1_decoy',
'chrUn_JTFH01000536v1_decoy',
'chrUn_JTFH01000537v1_decoy',
'chrUn_JTFH01000538v1_decoy',
'chrUn_JTFH01000539v1_decoy',
'chrUn_JTFH01000540v1_decoy',
'chrUn_JTFH01000541v1_decoy',
'chrUn_JTFH01000542v1_decoy',
'chrUn_JTFH01000543v1_decoy',
'chrUn_JTFH01000544v1_decoy',
'chrUn_JTFH01000545v1_decoy',
'chrUn_JTFH01000546v1_decoy',
'chrUn_JTFH01000547v1_decoy',
'chrUn_JTFH01000548v1_decoy',
'chrUn_JTFH01000549v1_decoy',
'chrUn_JTFH01000550v1_decoy',
'chrUn_JTFH01000551v1_decoy',
'chrUn_JTFH01000552v1_decoy',
'chrUn_JTFH01000553v1_decoy',
'chrUn_JTFH01000554v1_decoy',
'chrUn_JTFH01000555v1_decoy',
'chrUn_JTFH01000556v1_decoy',
'chrUn_JTFH01000557v1_decoy',
'chrUn_JTFH01000558v1_decoy',
'chrUn_JTFH01000559v1_decoy',
'chrUn_JTFH01000560v1_decoy',
'chrUn_JTFH01000561v1_decoy',
'chrUn_JTFH01000562v1_decoy',
'chrUn_JTFH01000563v1_decoy',
'chrUn_JTFH01000564v1_decoy',
'chrUn_JTFH01000565v1_decoy',
'chrUn_JTFH01000566v1_decoy',
'chrUn_JTFH01000567v1_decoy',
'chrUn_JTFH01000568v1_decoy',
'chrUn_JTFH01000569v1_decoy',
'chrUn_JTFH01000570v1_decoy',
'chrUn_JTFH01000571v1_decoy',
'chrUn_JTFH01000572v1_decoy',
'chrUn_JTFH01000573v1_decoy',
'chrUn_JTFH01000574v1_decoy',
'chrUn_JTFH01000575v1_decoy',
'chrUn_JTFH01000576v1_decoy',
'chrUn_JTFH01000577v1_decoy',
'chrUn_JTFH01000578v1_decoy',
'chrUn_JTFH01000579v1_decoy',
'chrUn_JTFH01000580v1_decoy',
'chrUn_JTFH01000581v1_decoy',
'chrUn_JTFH01000582v1_decoy',
'chrUn_JTFH01000583v1_decoy',
'chrUn_JTFH01000584v1_decoy',
'chrUn_JTFH01000585v1_decoy',
'chrUn_JTFH01000586v1_decoy',
'chrUn_JTFH01000587v1_decoy',
'chrUn_JTFH01000588v1_decoy',
'chrUn_JTFH01000589v1_decoy',
'chrUn_JTFH01000590v1_decoy',
'chrUn_JTFH01000591v1_decoy',
'chrUn_JTFH01000592v1_decoy',
'chrUn_JTFH01000593v1_decoy',
'chrUn_JTFH01000594v1_decoy',
'chrUn_JTFH01000595v1_decoy',
'chrUn_JTFH01000596v1_decoy',
'chrUn_JTFH01000597v1_decoy',
'chrUn_JTFH01000598v1_decoy',
'chrUn_JTFH01000599v1_decoy',
'chrUn_JTFH01000600v1_decoy',
'chrUn_JTFH01000601v1_decoy',
'chrUn_JTFH01000602v1_decoy',
'chrUn_JTFH01000603v1_decoy',
'chrUn_JTFH01000604v1_decoy',
'chrUn_JTFH01000605v1_decoy',
'chrUn_JTFH01000606v1_decoy',
'chrUn_JTFH01000607v1_decoy',
'chrUn_JTFH01000608v1_decoy',
'chrUn_JTFH01000609v1_decoy',
'chrUn_JTFH01000610v1_decoy',
'chrUn_JTFH01000611v1_decoy',
'chrUn_JTFH01000612v1_decoy',
'chrUn_JTFH01000613v1_decoy',
'chrUn_JTFH01000614v1_decoy',
'chrUn_JTFH01000615v1_decoy',
'chrUn_JTFH01000616v1_decoy',
'chrUn_JTFH01000617v1_decoy',
'chrUn_JTFH01000618v1_decoy',
'chrUn_JTFH01000619v1_decoy',
'chrUn_JTFH01000620v1_decoy',
'chrUn_JTFH01000621v1_decoy',
'chrUn_JTFH01000622v1_decoy',
'chrUn_JTFH01000623v1_decoy',
'chrUn_JTFH01000624v1_decoy',
'chrUn_JTFH01000625v1_decoy',
'chrUn_JTFH01000626v1_decoy',
'chrUn_JTFH01000627v1_decoy',
'chrUn_JTFH01000628v1_decoy',
'chrUn_JTFH01000629v1_decoy',
'chrUn_JTFH01000630v1_decoy',
'chrUn_JTFH01000631v1_decoy',
'chrUn_JTFH01000632v1_decoy',
'chrUn_JTFH01000633v1_decoy',
'chrUn_JTFH01000634v1_decoy',
'chrUn_JTFH01000635v1_decoy',
'chrUn_JTFH01000636v1_decoy',
'chrUn_JTFH01000637v1_decoy',
'chrUn_JTFH01000638v1_decoy',
'chrUn_JTFH01000639v1_decoy',
'chrUn_JTFH01000640v1_decoy',
'chrUn_JTFH01000641v1_decoy',
'chrUn_JTFH01000642v1_decoy',
'chrUn_JTFH01000643v1_decoy',
'chrUn_JTFH01000644v1_decoy',
'chrUn_JTFH01000645v1_decoy',
'chrUn_JTFH01000646v1_decoy',
'chrUn_JTFH01000647v1_decoy',
'chrUn_JTFH01000648v1_decoy',
'chrUn_JTFH01000649v1_decoy',
'chrUn_JTFH01000650v1_decoy',
'chrUn_JTFH01000651v1_decoy',
'chrUn_JTFH01000652v1_decoy',
'chrUn_JTFH01000653v1_decoy',
'chrUn_JTFH01000654v1_decoy',
'chrUn_JTFH01000655v1_decoy',
'chrUn_JTFH01000656v1_decoy',
'chrUn_JTFH01000657v1_decoy',
'chrUn_JTFH01000658v1_decoy',
'chrUn_JTFH01000659v1_decoy',
'chrUn_JTFH01000660v1_decoy',
'chrUn_JTFH01000661v1_decoy',
'chrUn_JTFH01000662v1_decoy',
'chrUn_JTFH01000663v1_decoy',
'chrUn_JTFH01000664v1_decoy',
'chrUn_JTFH01000665v1_decoy',
'chrUn_JTFH01000666v1_decoy',
'chrUn_JTFH01000667v1_decoy',
'chrUn_JTFH01000668v1_decoy',
'chrUn_JTFH01000669v1_decoy',
'chrUn_JTFH01000670v1_decoy',
'chrUn_JTFH01000671v1_decoy',
'chrUn_JTFH01000672v1_decoy',
'chrUn_JTFH01000673v1_decoy',
'chrUn_JTFH01000674v1_decoy',
'chrUn_JTFH01000675v1_decoy',
'chrUn_JTFH01000676v1_decoy',
'chrUn_JTFH01000677v1_decoy',
'chrUn_JTFH01000678v1_decoy',
'chrUn_JTFH01000679v1_decoy',
'chrUn_JTFH01000680v1_decoy',
'chrUn_JTFH01000681v1_decoy',
'chrUn_JTFH01000682v1_decoy',
'chrUn_JTFH01000683v1_decoy',
'chrUn_JTFH01000684v1_decoy',
'chrUn_JTFH01000685v1_decoy',
'chrUn_JTFH01000686v1_decoy',
'chrUn_JTFH01000687v1_decoy',
'chrUn_JTFH01000688v1_decoy',
'chrUn_JTFH01000689v1_decoy',
'chrUn_JTFH01000690v1_decoy',
'chrUn_JTFH01000691v1_decoy',
'chrUn_JTFH01000692v1_decoy',
'chrUn_JTFH01000693v1_decoy',
'chrUn_JTFH01000694v1_decoy',
'chrUn_JTFH01000695v1_decoy',
'chrUn_JTFH01000696v1_decoy',
'chrUn_JTFH01000697v1_decoy',
'chrUn_JTFH01000698v1_decoy',
'chrUn_JTFH01000699v1_decoy',
'chrUn_JTFH01000700v1_decoy',
'chrUn_JTFH01000701v1_decoy',
'chrUn_JTFH01000702v1_decoy',
'chrUn_JTFH01000703v1_decoy',
'chrUn_JTFH01000704v1_decoy',
'chrUn_JTFH01000705v1_decoy',
'chrUn_JTFH01000706v1_decoy',
'chrUn_JTFH01000707v1_decoy',
'chrUn_JTFH01000708v1_decoy',
'chrUn_JTFH01000709v1_decoy',
'chrUn_JTFH01000710v1_decoy',
'chrUn_JTFH01000711v1_decoy',
'chrUn_JTFH01000712v1_decoy',
'chrUn_JTFH01000713v1_decoy',
'chrUn_JTFH01000714v1_decoy',
'chrUn_JTFH01000715v1_decoy',
'chrUn_JTFH01000716v1_decoy',
'chrUn_JTFH01000717v1_decoy',
'chrUn_JTFH01000718v1_decoy',
'chrUn_JTFH01000719v1_decoy',
'chrUn_JTFH01000720v1_decoy',
'chrUn_JTFH01000721v1_decoy',
'chrUn_JTFH01000722v1_decoy',
'chrUn_JTFH01000723v1_decoy',
'chrUn_JTFH01000724v1_decoy',
'chrUn_JTFH01000725v1_decoy',
'chrUn_JTFH01000726v1_decoy',
'chrUn_JTFH01000727v1_decoy',
'chrUn_JTFH01000728v1_decoy',
'chrUn_JTFH01000729v1_decoy',
'chrUn_JTFH01000730v1_decoy',
'chrUn_JTFH01000731v1_decoy',
'chrUn_JTFH01000732v1_decoy',
'chrUn_JTFH01000733v1_decoy',
'chrUn_JTFH01000734v1_decoy',
'chrUn_JTFH01000735v1_decoy',
'chrUn_JTFH01000736v1_decoy',
'chrUn_JTFH01000737v1_decoy',
'chrUn_JTFH01000738v1_decoy',
'chrUn_JTFH01000739v1_decoy',
'chrUn_JTFH01000740v1_decoy',
'chrUn_JTFH01000741v1_decoy',
'chrUn_JTFH01000742v1_decoy',
'chrUn_JTFH01000743v1_decoy',
'chrUn_JTFH01000744v1_decoy',
'chrUn_JTFH01000745v1_decoy',
'chrUn_JTFH01000746v1_decoy',
'chrUn_JTFH01000747v1_decoy',
'chrUn_JTFH01000748v1_decoy',
'chrUn_JTFH01000749v1_decoy',
'chrUn_JTFH01000750v1_decoy',
'chrUn_JTFH01000751v1_decoy',
'chrUn_JTFH01000752v1_decoy',
'chrUn_JTFH01000753v1_decoy',
'chrUn_JTFH01000754v1_decoy',
'chrUn_JTFH01000755v1_decoy',
'chrUn_JTFH01000756v1_decoy',
'chrUn_JTFH01000757v1_decoy',
'chrUn_JTFH01000758v1_decoy',
'chrUn_JTFH01000759v1_decoy',
'chrUn_JTFH01000760v1_decoy',
'chrUn_JTFH01000761v1_decoy',
'chrUn_JTFH01000762v1_decoy',
'chrUn_JTFH01000763v1_decoy',
'chrUn_JTFH01000764v1_decoy',
'chrUn_JTFH01000765v1_decoy',
'chrUn_JTFH01000766v1_decoy',
'chrUn_JTFH01000767v1_decoy',
'chrUn_JTFH01000768v1_decoy',
'chrUn_JTFH01000769v1_decoy',
'chrUn_JTFH01000770v1_decoy',
'chrUn_JTFH01000771v1_decoy',
'chrUn_JTFH01000772v1_decoy',
'chrUn_JTFH01000773v1_decoy',
'chrUn_JTFH01000774v1_decoy',
'chrUn_JTFH01000775v1_decoy',
'chrUn_JTFH01000776v1_decoy',
'chrUn_JTFH01000777v1_decoy',
'chrUn_JTFH01000778v1_decoy',
'chrUn_JTFH01000779v1_decoy',
'chrUn_JTFH01000780v1_decoy',
'chrUn_JTFH01000781v1_decoy',
'chrUn_JTFH01000782v1_decoy',
'chrUn_JTFH01000783v1_decoy',
'chrUn_JTFH01000784v1_decoy',
'chrUn_JTFH01000785v1_decoy',
'chrUn_JTFH01000786v1_decoy',
'chrUn_JTFH01000787v1_decoy',
'chrUn_JTFH01000788v1_decoy',
'chrUn_JTFH01000789v1_decoy',
'chrUn_JTFH01000790v1_decoy',
'chrUn_JTFH01000791v1_decoy',
'chrUn_JTFH01000792v1_decoy',
'chrUn_JTFH01000793v1_decoy',
'chrUn_JTFH01000794v1_decoy',
'chrUn_JTFH01000795v1_decoy',
'chrUn_JTFH01000796v1_decoy',
'chrUn_JTFH01000797v1_decoy',
'chrUn_JTFH01000798v1_decoy',
'chrUn_JTFH01000799v1_decoy',
'chrUn_JTFH01000800v1_decoy',
'chrUn_JTFH01000801v1_decoy',
'chrUn_JTFH01000802v1_decoy',
'chrUn_JTFH01000803v1_decoy',
'chrUn_JTFH01000804v1_decoy',
'chrUn_JTFH01000805v1_decoy',
'chrUn_JTFH01000806v1_decoy',
'chrUn_JTFH01000807v1_decoy',
'chrUn_JTFH01000808v1_decoy',
'chrUn_JTFH01000809v1_decoy',
'chrUn_JTFH01000810v1_decoy',
'chrUn_JTFH01000811v1_decoy',
'chrUn_JTFH01000812v1_decoy',
'chrUn_JTFH01000813v1_decoy',
'chrUn_JTFH01000814v1_decoy',
'chrUn_JTFH01000815v1_decoy',
'chrUn_JTFH01000816v1_decoy',
'chrUn_JTFH01000817v1_decoy',
'chrUn_JTFH01000818v1_decoy',
'chrUn_JTFH01000819v1_decoy',
'chrUn_JTFH01000820v1_decoy',
'chrUn_JTFH01000821v1_decoy',
'chrUn_JTFH01000822v1_decoy',
'chrUn_JTFH01000823v1_decoy',
'chrUn_JTFH01000824v1_decoy',
'chrUn_JTFH01000825v1_decoy',
'chrUn_JTFH01000826v1_decoy',
'chrUn_JTFH01000827v1_decoy',
'chrUn_JTFH01000828v1_decoy',
'chrUn_JTFH01000829v1_decoy',
'chrUn_JTFH01000830v1_decoy',
'chrUn_JTFH01000831v1_decoy',
'chrUn_JTFH01000832v1_decoy',
'chrUn_JTFH01000833v1_decoy',
'chrUn_JTFH01000834v1_decoy',
'chrUn_JTFH01000835v1_decoy',
'chrUn_JTFH01000836v1_decoy',
'chrUn_JTFH01000837v1_decoy',
'chrUn_JTFH01000838v1_decoy',
'chrUn_JTFH01000839v1_decoy',
'chrUn_JTFH01000840v1_decoy',
'chrUn_JTFH01000841v1_decoy',
'chrUn_JTFH01000842v1_decoy',
'chrUn_JTFH01000843v1_decoy',
'chrUn_JTFH01000844v1_decoy',
'chrUn_JTFH01000845v1_decoy',
'chrUn_JTFH01000846v1_decoy',
'chrUn_JTFH01000847v1_decoy',
'chrUn_JTFH01000848v1_decoy',
'chrUn_JTFH01000849v1_decoy',
'chrUn_JTFH01000850v1_decoy',
'chrUn_JTFH01000851v1_decoy',
'chrUn_JTFH01000852v1_decoy',
'chrUn_JTFH01000853v1_decoy',
'chrUn_JTFH01000854v1_decoy',
'chrUn_JTFH01000855v1_decoy',
'chrUn_JTFH01000856v1_decoy',
'chrUn_JTFH01000857v1_decoy',
'chrUn_JTFH01000858v1_decoy',
'chrUn_JTFH01000859v1_decoy',
'chrUn_JTFH01000860v1_decoy',
'chrUn_JTFH01000861v1_decoy',
'chrUn_JTFH01000862v1_decoy',
'chrUn_JTFH01000863v1_decoy',
'chrUn_JTFH01000864v1_decoy',
'chrUn_JTFH01000865v1_decoy',
'chrUn_JTFH01000866v1_decoy',
'chrUn_JTFH01000867v1_decoy',
'chrUn_JTFH01000868v1_decoy',
'chrUn_JTFH01000869v1_decoy',
'chrUn_JTFH01000870v1_decoy',
'chrUn_JTFH01000871v1_decoy',
'chrUn_JTFH01000872v1_decoy',
'chrUn_JTFH01000873v1_decoy',
'chrUn_JTFH01000874v1_decoy',
'chrUn_JTFH01000875v1_decoy',
'chrUn_JTFH01000876v1_decoy',
'chrUn_JTFH01000877v1_decoy',
'chrUn_JTFH01000878v1_decoy',
'chrUn_JTFH01000879v1_decoy',
'chrUn_JTFH01000880v1_decoy',
'chrUn_JTFH01000881v1_decoy',
'chrUn_JTFH01000882v1_decoy',
'chrUn_JTFH01000883v1_decoy',
'chrUn_JTFH01000884v1_decoy',
'chrUn_JTFH01000885v1_decoy',
'chrUn_JTFH01000886v1_decoy',
'chrUn_JTFH01000887v1_decoy',
'chrUn_JTFH01000888v1_decoy',
'chrUn_JTFH01000889v1_decoy',
'chrUn_JTFH01000890v1_decoy',
'chrUn_JTFH01000891v1_decoy',
'chrUn_JTFH01000892v1_decoy',
'chrUn_JTFH01000893v1_decoy',
'chrUn_JTFH01000894v1_decoy',
'chrUn_JTFH01000895v1_decoy',
'chrUn_JTFH01000896v1_decoy',
'chrUn_JTFH01000897v1_decoy',
'chrUn_JTFH01000898v1_decoy',
'chrUn_JTFH01000899v1_decoy',
'chrUn_JTFH01000900v1_decoy',
'chrUn_JTFH01000901v1_decoy',
'chrUn_JTFH01000902v1_decoy',
'chrUn_JTFH01000903v1_decoy',
'chrUn_JTFH01000904v1_decoy',
'chrUn_JTFH01000905v1_decoy',
'chrUn_JTFH01000906v1_decoy',
'chrUn_JTFH01000907v1_decoy',
'chrUn_JTFH01000908v1_decoy',
'chrUn_JTFH01000909v1_decoy',
'chrUn_JTFH01000910v1_decoy',
'chrUn_JTFH01000911v1_decoy',
'chrUn_JTFH01000912v1_decoy',
'chrUn_JTFH01000913v1_decoy',
'chrUn_JTFH01000914v1_decoy',
'chrUn_JTFH01000915v1_decoy',
'chrUn_JTFH01000916v1_decoy',
'chrUn_JTFH01000917v1_decoy',
'chrUn_JTFH01000918v1_decoy',
'chrUn_JTFH01000919v1_decoy',
'chrUn_JTFH01000920v1_decoy',
'chrUn_JTFH01000921v1_decoy',
'chrUn_JTFH01000922v1_decoy',
'chrUn_JTFH01000923v1_decoy',
'chrUn_JTFH01000924v1_decoy',
'chrUn_JTFH01000925v1_decoy',
'chrUn_JTFH01000926v1_decoy',
'chrUn_JTFH01000927v1_decoy',
'chrUn_JTFH01000928v1_decoy',
'chrUn_JTFH01000929v1_decoy',
'chrUn_JTFH01000930v1_decoy',
'chrUn_JTFH01000931v1_decoy',
'chrUn_JTFH01000932v1_decoy',
'chrUn_JTFH01000933v1_decoy',
'chrUn_JTFH01000934v1_decoy',
'chrUn_JTFH01000935v1_decoy',
'chrUn_JTFH01000936v1_decoy',
'chrUn_JTFH01000937v1_decoy',
'chrUn_JTFH01000938v1_decoy',
'chrUn_JTFH01000939v1_decoy',
'chrUn_JTFH01000940v1_decoy',
'chrUn_JTFH01000941v1_decoy',
'chrUn_JTFH01000942v1_decoy',
'chrUn_JTFH01000943v1_decoy',
'chrUn_JTFH01000944v1_decoy',
'chrUn_JTFH01000945v1_decoy',
'chrUn_JTFH01000946v1_decoy',
'chrUn_JTFH01000947v1_decoy',
'chrUn_JTFH01000948v1_decoy',
'chrUn_JTFH01000949v1_decoy',
'chrUn_JTFH01000950v1_decoy',
'chrUn_JTFH01000951v1_decoy',
'chrUn_JTFH01000952v1_decoy',
'chrUn_JTFH01000953v1_decoy',
'chrUn_JTFH01000954v1_decoy',
'chrUn_JTFH01000955v1_decoy',
'chrUn_JTFH01000956v1_decoy',
'chrUn_JTFH01000957v1_decoy',
'chrUn_JTFH01000958v1_decoy',
'chrUn_JTFH01000959v1_decoy',
'chrUn_JTFH01000960v1_decoy',
'chrUn_JTFH01000961v1_decoy',
'chrUn_JTFH01000962v1_decoy',
'chrUn_JTFH01000963v1_decoy',
'chrUn_JTFH01000964v1_decoy',
'chrUn_JTFH01000965v1_decoy',
'chrUn_JTFH01000966v1_decoy',
'chrUn_JTFH01000967v1_decoy',
'chrUn_JTFH01000968v1_decoy',
'chrUn_JTFH01000969v1_decoy',
'chrUn_JTFH01000970v1_decoy',
'chrUn_JTFH01000971v1_decoy',
'chrUn_JTFH01000972v1_decoy',
'chrUn_JTFH01000973v1_decoy',
'chrUn_JTFH01000974v1_decoy',
'chrUn_JTFH01000975v1_decoy',
'chrUn_JTFH01000976v1_decoy',
'chrUn_JTFH01000977v1_decoy',
'chrUn_JTFH01000978v1_decoy',
'chrUn_JTFH01000979v1_decoy',
'chrUn_JTFH01000980v1_decoy',
'chrUn_JTFH01000981v1_decoy',
'chrUn_JTFH01000982v1_decoy',
'chrUn_JTFH01000983v1_decoy',
'chrUn_JTFH01000984v1_decoy',
'chrUn_JTFH01000985v1_decoy',
'chrUn_JTFH01000986v1_decoy',
'chrUn_JTFH01000987v1_decoy',
'chrUn_JTFH01000988v1_decoy',
'chrUn_JTFH01000989v1_decoy',
'chrUn_JTFH01000990v1_decoy',
'chrUn_JTFH01000991v1_decoy',
'chrUn_JTFH01000992v1_decoy',
'chrUn_JTFH01000993v1_decoy',
'chrUn_JTFH01000994v1_decoy',
'chrUn_JTFH01000995v1_decoy',
'chrUn_JTFH01000996v1_decoy',
'chrUn_JTFH01000997v1_decoy',
'chrUn_JTFH01000998v1_decoy',
'chrUn_JTFH01000999v1_decoy',
'chrUn_JTFH01001000v1_decoy',
'chrUn_JTFH01001001v1_decoy',
'chrUn_JTFH01001002v1_decoy',
'chrUn_JTFH01001003v1_decoy',
'chrUn_JTFH01001004v1_decoy',
'chrUn_JTFH01001005v1_decoy',
'chrUn_JTFH01001006v1_decoy',
'chrUn_JTFH01001007v1_decoy',
'chrUn_JTFH01001008v1_decoy',
'chrUn_JTFH01001009v1_decoy',
'chrUn_JTFH01001010v1_decoy',
'chrUn_JTFH01001011v1_decoy',
'chrUn_JTFH01001012v1_decoy',
'chrUn_JTFH01001013v1_decoy',
'chrUn_JTFH01001014v1_decoy',
'chrUn_JTFH01001015v1_decoy',
'chrUn_JTFH01001016v1_decoy',
'chrUn_JTFH01001017v1_decoy',
'chrUn_JTFH01001018v1_decoy',
'chrUn_JTFH01001019v1_decoy',
'chrUn_JTFH01001020v1_decoy',
'chrUn_JTFH01001021v1_decoy',
'chrUn_JTFH01001022v1_decoy',
'chrUn_JTFH01001023v1_decoy',
'chrUn_JTFH01001024v1_decoy',
'chrUn_JTFH01001025v1_decoy',
'chrUn_JTFH01001026v1_decoy',
'chrUn_JTFH01001027v1_decoy',
'chrUn_JTFH01001028v1_decoy',
'chrUn_JTFH01001029v1_decoy',
'chrUn_JTFH01001030v1_decoy',
'chrUn_JTFH01001031v1_decoy',
'chrUn_JTFH01001032v1_decoy',
'chrUn_JTFH01001033v1_decoy',
'chrUn_JTFH01001034v1_decoy',
'chrUn_JTFH01001035v1_decoy',
'chrUn_JTFH01001036v1_decoy',
'chrUn_JTFH01001037v1_decoy',
'chrUn_JTFH01001038v1_decoy',
'chrUn_JTFH01001039v1_decoy',
'chrUn_JTFH01001040v1_decoy',
'chrUn_JTFH01001041v1_decoy',
'chrUn_JTFH01001042v1_decoy',
'chrUn_JTFH01001043v1_decoy',
'chrUn_JTFH01001044v1_decoy',
'chrUn_JTFH01001045v1_decoy',
'chrUn_JTFH01001046v1_decoy',
'chrUn_JTFH01001047v1_decoy',
'chrUn_JTFH01001048v1_decoy',
'chrUn_JTFH01001049v1_decoy',
'chrUn_JTFH01001050v1_decoy',
'chrUn_JTFH01001051v1_decoy',
'chrUn_JTFH01001052v1_decoy',
'chrUn_JTFH01001053v1_decoy',
'chrUn_JTFH01001054v1_decoy',
'chrUn_JTFH01001055v1_decoy',
'chrUn_JTFH01001056v1_decoy',
'chrUn_JTFH01001057v1_decoy',
'chrUn_JTFH01001058v1_decoy',
'chrUn_JTFH01001059v1_decoy',
'chrUn_JTFH01001060v1_decoy',
'chrUn_JTFH01001061v1_decoy',
'chrUn_JTFH01001062v1_decoy',
'chrUn_JTFH01001063v1_decoy',
'chrUn_JTFH01001064v1_decoy',
'chrUn_JTFH01001065v1_decoy',
'chrUn_JTFH01001066v1_decoy',
'chrUn_JTFH01001067v1_decoy',
'chrUn_JTFH01001068v1_decoy',
'chrUn_JTFH01001069v1_decoy',
'chrUn_JTFH01001070v1_decoy',
'chrUn_JTFH01001071v1_decoy',
'chrUn_JTFH01001072v1_decoy',
'chrUn_JTFH01001073v1_decoy',
'chrUn_JTFH01001074v1_decoy',
'chrUn_JTFH01001075v1_decoy',
'chrUn_JTFH01001076v1_decoy',
'chrUn_JTFH01001077v1_decoy',
'chrUn_JTFH01001078v1_decoy',
'chrUn_JTFH01001079v1_decoy',
'chrUn_JTFH01001080v1_decoy',
'chrUn_JTFH01001081v1_decoy',
'chrUn_JTFH01001082v1_decoy',
'chrUn_JTFH01001083v1_decoy',
'chrUn_JTFH01001084v1_decoy',
'chrUn_JTFH01001085v1_decoy',
'chrUn_JTFH01001086v1_decoy',
'chrUn_JTFH01001087v1_decoy',
'chrUn_JTFH01001088v1_decoy',
'chrUn_JTFH01001089v1_decoy',
'chrUn_JTFH01001090v1_decoy',
'chrUn_JTFH01001091v1_decoy',
'chrUn_JTFH01001092v1_decoy',
'chrUn_JTFH01001093v1_decoy',
'chrUn_JTFH01001094v1_decoy',
'chrUn_JTFH01001095v1_decoy',
'chrUn_JTFH01001096v1_decoy',
'chrUn_JTFH01001097v1_decoy',
'chrUn_JTFH01001098v1_decoy',
'chrUn_JTFH01001099v1_decoy',
'chrUn_JTFH01001100v1_decoy',
'chrUn_JTFH01001101v1_decoy',
'chrUn_JTFH01001102v1_decoy',
'chrUn_JTFH01001103v1_decoy',
'chrUn_JTFH01001104v1_decoy',
'chrUn_JTFH01001105v1_decoy',
'chrUn_JTFH01001106v1_decoy',
'chrUn_JTFH01001107v1_decoy',
'chrUn_JTFH01001108v1_decoy',
'chrUn_JTFH01001109v1_decoy',
'chrUn_JTFH01001110v1_decoy',
'chrUn_JTFH01001111v1_decoy',
'chrUn_JTFH01001112v1_decoy',
'chrUn_JTFH01001113v1_decoy',
'chrUn_JTFH01001114v1_decoy',
'chrUn_JTFH01001115v1_decoy',
'chrUn_JTFH01001116v1_decoy',
'chrUn_JTFH01001117v1_decoy',
'chrUn_JTFH01001118v1_decoy',
'chrUn_JTFH01001119v1_decoy',
'chrUn_JTFH01001120v1_decoy',
'chrUn_JTFH01001121v1_decoy',
'chrUn_JTFH01001122v1_decoy',
'chrUn_JTFH01001123v1_decoy',
'chrUn_JTFH01001124v1_decoy',
'chrUn_JTFH01001125v1_decoy',
'chrUn_JTFH01001126v1_decoy',
'chrUn_JTFH01001127v1_decoy',
'chrUn_JTFH01001128v1_decoy',
'chrUn_JTFH01001129v1_decoy',
'chrUn_JTFH01001130v1_decoy',
'chrUn_JTFH01001131v1_decoy',
'chrUn_JTFH01001132v1_decoy',
'chrUn_JTFH01001133v1_decoy',
'chrUn_JTFH01001134v1_decoy',
'chrUn_JTFH01001135v1_decoy',
'chrUn_JTFH01001136v1_decoy',
'chrUn_JTFH01001137v1_decoy',
'chrUn_JTFH01001138v1_decoy',
'chrUn_JTFH01001139v1_decoy',
'chrUn_JTFH01001140v1_decoy',
'chrUn_JTFH01001141v1_decoy',
'chrUn_JTFH01001142v1_decoy',
'chrUn_JTFH01001143v1_decoy',
'chrUn_JTFH01001144v1_decoy',
'chrUn_JTFH01001145v1_decoy',
'chrUn_JTFH01001146v1_decoy',
'chrUn_JTFH01001147v1_decoy',
'chrUn_JTFH01001148v1_decoy',
'chrUn_JTFH01001149v1_decoy',
'chrUn_JTFH01001150v1_decoy',
'chrUn_JTFH01001151v1_decoy',
'chrUn_JTFH01001152v1_decoy',
'chrUn_JTFH01001153v1_decoy',
'chrUn_JTFH01001154v1_decoy',
'chrUn_JTFH01001155v1_decoy',
'chrUn_JTFH01001156v1_decoy',
'chrUn_JTFH01001157v1_decoy',
'chrUn_JTFH01001158v1_decoy',
'chrUn_JTFH01001159v1_decoy',
'chrUn_JTFH01001160v1_decoy',
'chrUn_JTFH01001161v1_decoy',
'chrUn_JTFH01001162v1_decoy',
'chrUn_JTFH01001163v1_decoy',
'chrUn_JTFH01001164v1_decoy',
'chrUn_JTFH01001165v1_decoy',
'chrUn_JTFH01001166v1_decoy',
'chrUn_JTFH01001167v1_decoy',
'chrUn_JTFH01001168v1_decoy',
'chrUn_JTFH01001169v1_decoy',
'chrUn_JTFH01001170v1_decoy',
'chrUn_JTFH01001171v1_decoy',
'chrUn_JTFH01001172v1_decoy',
'chrUn_JTFH01001173v1_decoy',
'chrUn_JTFH01001174v1_decoy',
'chrUn_JTFH01001175v1_decoy',
'chrUn_JTFH01001176v1_decoy',
'chrUn_JTFH01001177v1_decoy',
'chrUn_JTFH01001178v1_decoy',
'chrUn_JTFH01001179v1_decoy',
'chrUn_JTFH01001180v1_decoy',
'chrUn_JTFH01001181v1_decoy',
'chrUn_JTFH01001182v1_decoy',
'chrUn_JTFH01001183v1_decoy',
'chrUn_JTFH01001184v1_decoy',
'chrUn_JTFH01001185v1_decoy',
'chrUn_JTFH01001186v1_decoy',
'chrUn_JTFH01001187v1_decoy',
'chrUn_JTFH01001188v1_decoy',
'chrUn_JTFH01001189v1_decoy',
'chrUn_JTFH01001190v1_decoy',
'chrUn_JTFH01001191v1_decoy',
'chrUn_JTFH01001192v1_decoy',
'chrUn_JTFH01001193v1_decoy',
'chrUn_JTFH01001194v1_decoy',
'chrUn_JTFH01001195v1_decoy',
'chrUn_JTFH01001196v1_decoy',
'chrUn_JTFH01001197v1_decoy',
'chrUn_JTFH01001198v1_decoy',
'chrUn_JTFH01001199v1_decoy',
'chrUn_JTFH01001200v1_decoy',
'chrUn_JTFH01001201v1_decoy',
'chrUn_JTFH01001202v1_decoy',
'chrUn_JTFH01001203v1_decoy',
'chrUn_JTFH01001204v1_decoy',
'chrUn_JTFH01001205v1_decoy',
'chrUn_JTFH01001206v1_decoy',
'chrUn_JTFH01001207v1_decoy',
'chrUn_JTFH01001208v1_decoy',
'chrUn_JTFH01001209v1_decoy',
'chrUn_JTFH01001210v1_decoy',
'chrUn_JTFH01001211v1_decoy',
'chrUn_JTFH01001212v1_decoy',
'chrUn_JTFH01001213v1_decoy',
'chrUn_JTFH01001214v1_decoy',
'chrUn_JTFH01001215v1_decoy',
'chrUn_JTFH01001216v1_decoy',
'chrUn_JTFH01001217v1_decoy',
'chrUn_JTFH01001218v1_decoy',
'chrUn_JTFH01001219v1_decoy',
'chrUn_JTFH01001220v1_decoy',
'chrUn_JTFH01001221v1_decoy',
'chrUn_JTFH01001222v1_decoy',
'chrUn_JTFH01001223v1_decoy',
'chrUn_JTFH01001224v1_decoy',
'chrUn_JTFH01001225v1_decoy',
'chrUn_JTFH01001226v1_decoy',
'chrUn_JTFH01001227v1_decoy',
'chrUn_JTFH01001228v1_decoy',
'chrUn_JTFH01001229v1_decoy',
'chrUn_JTFH01001230v1_decoy',
'chrUn_JTFH01001231v1_decoy',
'chrUn_JTFH01001232v1_decoy',
'chrUn_JTFH01001233v1_decoy',
'chrUn_JTFH01001234v1_decoy',
'chrUn_JTFH01001235v1_decoy',
'chrUn_JTFH01001236v1_decoy',
'chrUn_JTFH01001237v1_decoy',
'chrUn_JTFH01001238v1_decoy',
'chrUn_JTFH01001239v1_decoy',
'chrUn_JTFH01001240v1_decoy',
'chrUn_JTFH01001241v1_decoy',
'chrUn_JTFH01001242v1_decoy',
'chrUn_JTFH01001243v1_decoy',
'chrUn_JTFH01001244v1_decoy',
'chrUn_JTFH01001245v1_decoy',
'chrUn_JTFH01001246v1_decoy',
'chrUn_JTFH01001247v1_decoy',
'chrUn_JTFH01001248v1_decoy',
'chrUn_JTFH01001249v1_decoy',
'chrUn_JTFH01001250v1_decoy',
'chrUn_JTFH01001251v1_decoy',
'chrUn_JTFH01001252v1_decoy',
'chrUn_JTFH01001253v1_decoy',
'chrUn_JTFH01001254v1_decoy',
'chrUn_JTFH01001255v1_decoy',
'chrUn_JTFH01001256v1_decoy',
'chrUn_JTFH01001257v1_decoy',
'chrUn_JTFH01001258v1_decoy',
'chrUn_JTFH01001259v1_decoy',
'chrUn_JTFH01001260v1_decoy',
'chrUn_JTFH01001261v1_decoy',
'chrUn_JTFH01001262v1_decoy',
'chrUn_JTFH01001263v1_decoy',
'chrUn_JTFH01001264v1_decoy',
'chrUn_JTFH01001265v1_decoy',
'chrUn_JTFH01001266v1_decoy',
'chrUn_JTFH01001267v1_decoy',
'chrUn_JTFH01001268v1_decoy',
'chrUn_JTFH01001269v1_decoy',
'chrUn_JTFH01001270v1_decoy',
'chrUn_JTFH01001271v1_decoy',
'chrUn_JTFH01001272v1_decoy',
'chrUn_JTFH01001273v1_decoy',
'chrUn_JTFH01001274v1_decoy',
'chrUn_JTFH01001275v1_decoy',
'chrUn_JTFH01001276v1_decoy',
'chrUn_JTFH01001277v1_decoy',
'chrUn_JTFH01001278v1_decoy',
'chrUn_JTFH01001279v1_decoy',
'chrUn_JTFH01001280v1_decoy',
'chrUn_JTFH01001281v1_decoy',
'chrUn_JTFH01001282v1_decoy',
'chrUn_JTFH01001283v1_decoy',
'chrUn_JTFH01001284v1_decoy',
'chrUn_JTFH01001285v1_decoy',
'chrUn_JTFH01001286v1_decoy',
'chrUn_JTFH01001287v1_decoy',
'chrUn_JTFH01001288v1_decoy',
'chrUn_JTFH01001289v1_decoy',
'chrUn_JTFH01001290v1_decoy',
'chrUn_JTFH01001291v1_decoy',
'chrUn_JTFH01001292v1_decoy',
'chrUn_JTFH01001293v1_decoy',
'chrUn_JTFH01001294v1_decoy',
'chrUn_JTFH01001295v1_decoy',
'chrUn_JTFH01001296v1_decoy',
'chrUn_JTFH01001297v1_decoy',
'chrUn_JTFH01001298v1_decoy',
'chrUn_JTFH01001299v1_decoy',
'chrUn_JTFH01001300v1_decoy',
'chrUn_JTFH01001301v1_decoy',
'chrUn_JTFH01001302v1_decoy',
'chrUn_JTFH01001303v1_decoy',
'chrUn_JTFH01001304v1_decoy',
'chrUn_JTFH01001305v1_decoy',
'chrUn_JTFH01001306v1_decoy',
'chrUn_JTFH01001307v1_decoy',
'chrUn_JTFH01001308v1_decoy',
'chrUn_JTFH01001309v1_decoy',
'chrUn_JTFH01001310v1_decoy',
'chrUn_JTFH01001311v1_decoy',
'chrUn_JTFH01001312v1_decoy',
'chrUn_JTFH01001313v1_decoy',
'chrUn_JTFH01001314v1_decoy',
'chrUn_JTFH01001315v1_decoy',
'chrUn_JTFH01001316v1_decoy',
'chrUn_JTFH01001317v1_decoy',
'chrUn_JTFH01001318v1_decoy',
'chrUn_JTFH01001319v1_decoy',
'chrUn_JTFH01001320v1_decoy',
'chrUn_JTFH01001321v1_decoy',
'chrUn_JTFH01001322v1_decoy',
'chrUn_JTFH01001323v1_decoy',
'chrUn_JTFH01001324v1_decoy',
'chrUn_JTFH01001325v1_decoy',
'chrUn_JTFH01001326v1_decoy',
'chrUn_JTFH01001327v1_decoy',
'chrUn_JTFH01001328v1_decoy',
'chrUn_JTFH01001329v1_decoy',
'chrUn_JTFH01001330v1_decoy',
'chrUn_JTFH01001331v1_decoy',
'chrUn_JTFH01001332v1_decoy',
'chrUn_JTFH01001333v1_decoy',
'chrUn_JTFH01001334v1_decoy',
'chrUn_JTFH01001335v1_decoy',
'chrUn_JTFH01001336v1_decoy',
'chrUn_JTFH01001337v1_decoy',
'chrUn_JTFH01001338v1_decoy',
'chrUn_JTFH01001339v1_decoy',
'chrUn_JTFH01001340v1_decoy',
'chrUn_JTFH01001341v1_decoy',
'chrUn_JTFH01001342v1_decoy',
'chrUn_JTFH01001343v1_decoy',
'chrUn_JTFH01001344v1_decoy',
'chrUn_JTFH01001345v1_decoy',
'chrUn_JTFH01001346v1_decoy',
'chrUn_JTFH01001347v1_decoy',
'chrUn_JTFH01001348v1_decoy',
'chrUn_JTFH01001349v1_decoy',
'chrUn_JTFH01001350v1_decoy',
'chrUn_JTFH01001351v1_decoy',
'chrUn_JTFH01001352v1_decoy',
'chrUn_JTFH01001353v1_decoy',
'chrUn_JTFH01001354v1_decoy',
'chrUn_JTFH01001355v1_decoy',
'chrUn_JTFH01001356v1_decoy',
'chrUn_JTFH01001357v1_decoy',
'chrUn_JTFH01001358v1_decoy',
'chrUn_JTFH01001359v1_decoy',
'chrUn_JTFH01001360v1_decoy',
'chrUn_JTFH01001361v1_decoy',
'chrUn_JTFH01001362v1_decoy',
'chrUn_JTFH01001363v1_decoy',
'chrUn_JTFH01001364v1_decoy',
'chrUn_JTFH01001365v1_decoy',
'chrUn_JTFH01001366v1_decoy',
'chrUn_JTFH01001367v1_decoy',
'chrUn_JTFH01001368v1_decoy',
'chrUn_JTFH01001369v1_decoy',
'chrUn_JTFH01001370v1_decoy',
'chrUn_JTFH01001371v1_decoy',
'chrUn_JTFH01001372v1_decoy',
'chrUn_JTFH01001373v1_decoy',
'chrUn_JTFH01001374v1_decoy',
'chrUn_JTFH01001375v1_decoy',
'chrUn_JTFH01001376v1_decoy',
'chrUn_JTFH01001377v1_decoy',
'chrUn_JTFH01001378v1_decoy',
'chrUn_JTFH01001379v1_decoy',
'chrUn_JTFH01001380v1_decoy',
'chrUn_JTFH01001381v1_decoy',
'chrUn_JTFH01001382v1_decoy',
'chrUn_JTFH01001383v1_decoy',
'chrUn_JTFH01001384v1_decoy',
'chrUn_JTFH01001385v1_decoy',
'chrUn_JTFH01001386v1_decoy',
'chrUn_JTFH01001387v1_decoy',
'chrUn_JTFH01001388v1_decoy',
'chrUn_JTFH01001389v1_decoy',
'chrUn_JTFH01001390v1_decoy',
'chrUn_JTFH01001391v1_decoy',
'chrUn_JTFH01001392v1_decoy',
'chrUn_JTFH01001393v1_decoy',
'chrUn_JTFH01001394v1_decoy',
'chrUn_JTFH01001395v1_decoy',
'chrUn_JTFH01001396v1_decoy',
'chrUn_JTFH01001397v1_decoy',
'chrUn_JTFH01001398v1_decoy',
'chrUn_JTFH01001399v1_decoy',
'chrUn_JTFH01001400v1_decoy',
'chrUn_JTFH01001401v1_decoy',
'chrUn_JTFH01001402v1_decoy',
'chrUn_JTFH01001403v1_decoy',
'chrUn_JTFH01001404v1_decoy',
'chrUn_JTFH01001405v1_decoy',
'chrUn_JTFH01001406v1_decoy',
'chrUn_JTFH01001407v1_decoy',
'chrUn_JTFH01001408v1_decoy',
'chrUn_JTFH01001409v1_decoy',
'chrUn_JTFH01001410v1_decoy',
'chrUn_JTFH01001411v1_decoy',
'chrUn_JTFH01001412v1_decoy',
'chrUn_JTFH01001413v1_decoy',
'chrUn_JTFH01001414v1_decoy',
'chrUn_JTFH01001415v1_decoy',
'chrUn_JTFH01001416v1_decoy',
'chrUn_JTFH01001417v1_decoy',
'chrUn_JTFH01001418v1_decoy',
'chrUn_JTFH01001419v1_decoy',
'chrUn_JTFH01001420v1_decoy',
'chrUn_JTFH01001421v1_decoy',
'chrUn_JTFH01001422v1_decoy',
'chrUn_JTFH01001423v1_decoy',
'chrUn_JTFH01001424v1_decoy',
'chrUn_JTFH01001425v1_decoy',
'chrUn_JTFH01001426v1_decoy',
'chrUn_JTFH01001427v1_decoy',
'chrUn_JTFH01001428v1_decoy',
'chrUn_JTFH01001429v1_decoy',
'chrUn_JTFH01001430v1_decoy',
'chrUn_JTFH01001431v1_decoy',
'chrUn_JTFH01001432v1_decoy',
'chrUn_JTFH01001433v1_decoy',
'chrUn_JTFH01001434v1_decoy',
'chrUn_JTFH01001435v1_decoy',
'chrUn_JTFH01001436v1_decoy',
'chrUn_JTFH01001437v1_decoy',
'chrUn_JTFH01001438v1_decoy',
'chrUn_JTFH01001439v1_decoy',
'chrUn_JTFH01001440v1_decoy',
'chrUn_JTFH01001441v1_decoy',
'chrUn_JTFH01001442v1_decoy',
'chrUn_JTFH01001443v1_decoy',
'chrUn_JTFH01001444v1_decoy',
'chrUn_JTFH01001445v1_decoy',
'chrUn_JTFH01001446v1_decoy',
'chrUn_JTFH01001447v1_decoy',
'chrUn_JTFH01001448v1_decoy',
'chrUn_JTFH01001449v1_decoy',
'chrUn_JTFH01001450v1_decoy',
'chrUn_JTFH01001451v1_decoy',
'chrUn_JTFH01001452v1_decoy',
'chrUn_JTFH01001453v1_decoy',
'chrUn_JTFH01001454v1_decoy',
'chrUn_JTFH01001455v1_decoy',
'chrUn_JTFH01001456v1_decoy',
'chrUn_JTFH01001457v1_decoy',
'chrUn_JTFH01001458v1_decoy',
'chrUn_JTFH01001459v1_decoy',
'chrUn_JTFH01001460v1_decoy',
'chrUn_JTFH01001461v1_decoy',
'chrUn_JTFH01001462v1_decoy',
'chrUn_JTFH01001463v1_decoy',
'chrUn_JTFH01001464v1_decoy',
'chrUn_JTFH01001465v1_decoy',
'chrUn_JTFH01001466v1_decoy',
'chrUn_JTFH01001467v1_decoy',
'chrUn_JTFH01001468v1_decoy',
'chrUn_JTFH01001469v1_decoy',
'chrUn_JTFH01001470v1_decoy',
'chrUn_JTFH01001471v1_decoy',
'chrUn_JTFH01001472v1_decoy',
'chrUn_JTFH01001473v1_decoy',
'chrUn_JTFH01001474v1_decoy',
'chrUn_JTFH01001475v1_decoy',
'chrUn_JTFH01001476v1_decoy',
'chrUn_JTFH01001477v1_decoy',
'chrUn_JTFH01001478v1_decoy',
'chrUn_JTFH01001479v1_decoy',
'chrUn_JTFH01001480v1_decoy',
'chrUn_JTFH01001481v1_decoy',
'chrUn_JTFH01001482v1_decoy',
'chrUn_JTFH01001483v1_decoy',
'chrUn_JTFH01001484v1_decoy',
'chrUn_JTFH01001485v1_decoy',
'chrUn_JTFH01001486v1_decoy',
'chrUn_JTFH01001487v1_decoy',
'chrUn_JTFH01001488v1_decoy',
'chrUn_JTFH01001489v1_decoy',
'chrUn_JTFH01001490v1_decoy',
'chrUn_JTFH01001491v1_decoy',
'chrUn_JTFH01001492v1_decoy',
'chrUn_JTFH01001493v1_decoy',
'chrUn_JTFH01001494v1_decoy',
'chrUn_JTFH01001495v1_decoy',
'chrUn_JTFH01001496v1_decoy',
'chrUn_JTFH01001497v1_decoy',
'chrUn_JTFH01001498v1_decoy',
'chrUn_JTFH01001499v1_decoy',
'chrUn_JTFH01001500v1_decoy',
'chrUn_JTFH01001501v1_decoy',
'chrUn_JTFH01001502v1_decoy',
'chrUn_JTFH01001503v1_decoy',
'chrUn_JTFH01001504v1_decoy',
'chrUn_JTFH01001505v1_decoy',
'chrUn_JTFH01001506v1_decoy',
'chrUn_JTFH01001507v1_decoy',
'chrUn_JTFH01001508v1_decoy',
'chrUn_JTFH01001509v1_decoy',
'chrUn_JTFH01001510v1_decoy',
'chrUn_JTFH01001511v1_decoy',
'chrUn_JTFH01001512v1_decoy',
'chrUn_JTFH01001513v1_decoy',
'chrUn_JTFH01001514v1_decoy',
'chrUn_JTFH01001515v1_decoy',
'chrUn_JTFH01001516v1_decoy',
'chrUn_JTFH01001517v1_decoy',
'chrUn_JTFH01001518v1_decoy',
'chrUn_JTFH01001519v1_decoy',
'chrUn_JTFH01001520v1_decoy',
'chrUn_JTFH01001521v1_decoy',
'chrUn_JTFH01001522v1_decoy',
'chrUn_JTFH01001523v1_decoy',
'chrUn_JTFH01001524v1_decoy',
'chrUn_JTFH01001525v1_decoy',
'chrUn_JTFH01001526v1_decoy',
'chrUn_JTFH01001527v1_decoy',
'chrUn_JTFH01001528v1_decoy',
'chrUn_JTFH01001529v1_decoy',
'chrUn_JTFH01001530v1_decoy',
'chrUn_JTFH01001531v1_decoy',
'chrUn_JTFH01001532v1_decoy',
'chrUn_JTFH01001533v1_decoy',
'chrUn_JTFH01001534v1_decoy',
'chrUn_JTFH01001535v1_decoy',
'chrUn_JTFH01001536v1_decoy',
'chrUn_JTFH01001537v1_decoy',
'chrUn_JTFH01001538v1_decoy',
'chrUn_JTFH01001539v1_decoy',
'chrUn_JTFH01001540v1_decoy',
'chrUn_JTFH01001541v1_decoy',
'chrUn_JTFH01001542v1_decoy',
'chrUn_JTFH01001543v1_decoy',
'chrUn_JTFH01001544v1_decoy',
'chrUn_JTFH01001545v1_decoy',
'chrUn_JTFH01001546v1_decoy',
'chrUn_JTFH01001547v1_decoy',
'chrUn_JTFH01001548v1_decoy',
'chrUn_JTFH01001549v1_decoy',
'chrUn_JTFH01001550v1_decoy',
'chrUn_JTFH01001551v1_decoy',
'chrUn_JTFH01001552v1_decoy',
'chrUn_JTFH01001553v1_decoy',
'chrUn_JTFH01001554v1_decoy',
'chrUn_JTFH01001555v1_decoy',
'chrUn_JTFH01001556v1_decoy',
'chrUn_JTFH01001557v1_decoy',
'chrUn_JTFH01001558v1_decoy',
'chrUn_JTFH01001559v1_decoy',
'chrUn_JTFH01001560v1_decoy',
'chrUn_JTFH01001561v1_decoy',
'chrUn_JTFH01001562v1_decoy',
'chrUn_JTFH01001563v1_decoy',
'chrUn_JTFH01001564v1_decoy',
'chrUn_JTFH01001565v1_decoy',
'chrUn_JTFH01001566v1_decoy',
'chrUn_JTFH01001567v1_decoy',
'chrUn_JTFH01001568v1_decoy',
'chrUn_JTFH01001569v1_decoy',
'chrUn_JTFH01001570v1_decoy',
'chrUn_JTFH01001571v1_decoy',
'chrUn_JTFH01001572v1_decoy',
'chrUn_JTFH01001573v1_decoy',
'chrUn_JTFH01001574v1_decoy',
'chrUn_JTFH01001575v1_decoy',
'chrUn_JTFH01001576v1_decoy',
'chrUn_JTFH01001577v1_decoy',
'chrUn_JTFH01001578v1_decoy',
'chrUn_JTFH01001579v1_decoy',
'chrUn_JTFH01001580v1_decoy',
'chrUn_JTFH01001581v1_decoy',
'chrUn_JTFH01001582v1_decoy',
'chrUn_JTFH01001583v1_decoy',
'chrUn_JTFH01001584v1_decoy',
'chrUn_JTFH01001585v1_decoy',
'chrUn_JTFH01001586v1_decoy',
'chrUn_JTFH01001587v1_decoy',
'chrUn_JTFH01001588v1_decoy',
'chrUn_JTFH01001589v1_decoy',
'chrUn_JTFH01001590v1_decoy',
'chrUn_JTFH01001591v1_decoy',
'chrUn_JTFH01001592v1_decoy',
'chrUn_JTFH01001593v1_decoy',
'chrUn_JTFH01001594v1_decoy',
'chrUn_JTFH01001595v1_decoy',
'chrUn_JTFH01001596v1_decoy',
'chrUn_JTFH01001597v1_decoy',
'chrUn_JTFH01001598v1_decoy',
'chrUn_JTFH01001599v1_decoy',
'chrUn_JTFH01001600v1_decoy',
'chrUn_JTFH01001601v1_decoy',
'chrUn_JTFH01001602v1_decoy',
'chrUn_JTFH01001603v1_decoy',
'chrUn_JTFH01001604v1_decoy',
'chrUn_JTFH01001605v1_decoy',
'chrUn_JTFH01001606v1_decoy',
'chrUn_JTFH01001607v1_decoy',
'chrUn_JTFH01001608v1_decoy',
'chrUn_JTFH01001609v1_decoy',
'chrUn_JTFH01001610v1_decoy',
'chrUn_JTFH01001611v1_decoy',
'chrUn_JTFH01001612v1_decoy',
'chrUn_JTFH01001613v1_decoy',
'chrUn_JTFH01001614v1_decoy',
'chrUn_JTFH01001615v1_decoy',
'chrUn_JTFH01001616v1_decoy',
| |
x, y, triangles, _, dataIndex = createTriangles(mesh)
if len(data) == mesh.cellCount():
z = data[dataIndex]
else:
z = data
gci = None
if levels is None:
levels = autolevel(data, nLevs,
zMin=cMin, zMax=cMax, logScale=logScale)
if len(z) == len(triangles):
shading = kwargs.pop('shading', 'flat')
# bounds = np.linspace(levels[0], levels[-1], nLevs)
# norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)
if shading == 'gouraud':
z = pg.meshtools.cellDataToNodeData(mesh, data)
gci = ax.tripcolor(x, y, triangles, z,
shading=shading, **kwargs)
else:
gci = ax.tripcolor(x, y, triangles, facecolors=z,
shading=shading, **kwargs)
elif len(z) == mesh.nodeCount():
shading = kwargs.pop('shading', None)
if shading is not None:
gci = ax.tripcolor(x, y, triangles, z, shading=shading, **kwargs)
else:
fillContour = kwargs.pop('fillContour', True)
contourLines = kwargs.pop('contourLines', True)
if fillContour:
# add outer climits to fill lower and upper too
levs = np.array(levels)
# if min(z) < min(levels):
# levs = np.hstack([min(z), levs])
# if max(z) > max(levels):
# levs = np.hstack([levs, max(z)])
if nCols is not None:
if logScale:
levs = np.geomspace(min(levels), max(levels), nCols+1)
else:
levs = np.linspace(min(levels), max(levels), nCols+1)
gci = ax.tricontourf(x, y, triangles, z,
# antialiased=True, # not allways nice
levels=levs, **kwargs
)
if contourLines:
ax.tricontour(x, y, triangles, z, levels=levels,
colors=kwargs.pop('colors', ['0.5']), **kwargs)
else:
gci = None
raise Exception("Data size does not fit mesh size: ", len(z),
mesh.cellCount(), mesh.nodeCount())
# we should ne adapt cols here at all -- test remove
# if gci and cMin and cMax:
# gci.set_clim(cMin, cMax)
if fitView is True:
ax.set_xlim(mesh.xmin(), mesh.xmax())
ax.set_ylim(mesh.ymin(), mesh.ymax())
ax.set_aspect('equal')
updateAxes_(ax)
return gci
def drawStreamLines(ax, mesh, u, nx=25, ny=25, **kwargs):
"""Draw streamlines for the gradients of field values u on a mesh.
The matplotlib routine streamplot needs equidistant spacings so
we interpolate first on a grid defined by nx and ny nodes.
Additionally arguments are piped to streamplot.
This works only for rectangular regions.
You should use pg.viewer.mpl.drawStreams, which is more comfortable and
more flexible.
Parameters
----------
ax : mpl axe
mesh : :gimliapi:`GIMLI::Mesh`
2D mesh
u : iterable float
Scalar data field.
"""
X, Y = np.meshgrid(
np.linspace(mesh.xmin(), mesh.xmax(), nx),
np.linspace(mesh.ymin(), mesh.ymax(), ny))
U = X.copy()
V = X.copy()
for i, row in enumerate(X):
for j in range(len(row)):
p = [X[i, j], Y[i, j]]
gr = [0.0, 0.0]
c = mesh.findCell(p)
if c:
gr = c.grad(p, u)
U[i, j] = -gr[0]
V[i, j] = -gr[1]
gci = ax.streamplot(X, Y, U, V, **kwargs)
updateAxes_(ax)
return gci
def drawStreamLine_(ax, mesh, c, data, dataMesh=None, linewidth=1.0,
dropTol=0.0, **kwargs):
"""Draw a single streamline.
Draw a single streamline into a given mesh for given data stating at
the center of cell c.
The Streamline will be enlarged until she reached a cell that
already contains a streamline.
TODO
linewidth and color depends on absolute velocity
or background color saturation
Parameters
----------
ax : matplotlib.ax
ax to draw into
mesh : :gimliapi:`GIMLI::Mesh`
2d mesh
c : :gimliapi:`GIMLI::Cell`
Start point is c.center()
data : iterable float | [float, float]
If data is an array (per cell or node) gradients are calculated
otherwise the data will be interpreted as vector field per nodes or
cell centers.
dataMesh : :gimliapi:`GIMLI::Mesh` [None]
Optional mesh for the data. If you want high resolution
data to plot on coarse draw mesh.
linewidth : float [1.0]
Streamline linewidth
dropTol : float [0.0]
Don't draw stream lines with velocity lower than drop tolerance.
Keyword Arguments
-----------------
**kwargs
arrowSize: int
Size of the arrow's head.
arrowColor: str
Color of the arrow's head.
Additional kwargs are being forwarded to mpl.LineCollection, mpl.Polygon
"""
x, y, v = streamline(mesh, data, startCoord=c.center(), dLengthSteps=5,
dataMesh=dataMesh, maxSteps=10000, verbose=False,
coords=[0, 1])
if 'color' not in kwargs:
kwargs['color'] = 'black'
arrowSize = kwargs.pop('arrowSize', 12)
arrowColor = kwargs.pop('arrowColor', 'black')
lines = None
if len(x) > 2:
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lwidths = pg.Vector(len(v), linewidth)
lwidths[pg.find(pg.Vector(v) < dropTol)] = 0.0
lines = mpl.collections.LineCollection(
segments, linewidths=lwidths, **kwargs)
ax.add_collection(lines)
# probably the limits are wrong without plot call
# lines = ax.plot(x, y, **kwargs)
# updateAxes_(ax, lines)
# ax.plot(x, y, '.-', color='black', **kwargs)
if len(x) > 3:
xmid = int(len(x) / 2)
ymid = int(len(y) / 2)
dx = x[xmid + 1] - x[xmid]
dy = y[ymid + 1] - y[ymid]
c = mesh.findCell([x[xmid], y[ymid]])
if v[xmid] > dropTol:
absArrowSize = True
if absArrowSize:
ax.annotate('',
xytext=(x[xmid]-dx, y[ymid]-dy),
xy=(x[xmid], y[ymid]),
arrowprops=dict(arrowstyle="-|>", color=arrowColor),
size=arrowSize, **kwargs,
)
else:
ax.arrow(x[xmid], y[ymid], dx, dy,
shape='full', lw=0,
length_includes_head=True,
fc=arrowColor,
head_width=.35, **kwargs)
# dx90 = -dy
# dy90 = dx
# aLen = 3
# aWid = 1
# xy = list(zip([x[xmid] + dx90*aWid, x[xmid] + dx*aLen,
# x[xmid] - dx90*aWid],
# [y[ymid] + dy90*aWid, y[ymid] + dy*aLen,
# y[ymid] - dy90*aWid]))
# arrow = mpl.patches.Polygon(xy, ls=None, lw=0, closed=True,
# **kwargs)
#ax.add_patch(arrow)
return lines
def drawStreams(ax, mesh, data, startStream=3, coarseMesh=None, quiver=False,
**kwargs):
"""Draw streamlines based on an unstructured mesh.
Every cell contains only one streamline and every new stream line
starts in the center of a cell. You can alternatively provide a second mesh
with coarser mesh to draw streams for.
Parameters
----------
ax : matplotlib.ax
ax to draw into
mesh : :gimliapi:`GIMLI::Mesh`
2d mesh
data : iterable float | [float, float] | pg.core.R3Vector
If data is an array (per cell or node) gradients are calculated
otherwise the data will be interpreted as vector field per nodes or
cell centers.
startStream : int
variate the start stream drawing, try values from 1 to 3 what every
you like more.
coarseMesh : :gimliapi:`GIMLI::Mesh`
Instead of draw a stream for every cell in mesh, draw a streamline
segment for each cell in coarseMesh.
quiver : bool [False]
Draw arrows instead of streamlines.
Keyword Arguments
-----------------
**kwargs
Additional kwargs forwarded to axe.quiver, drawStreamLine_
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import pygimli as pg
>>> from pygimli.viewer.mpl import drawStreams
>>> n = np.linspace(0, 1, 10)
>>> mesh = pg.createGrid(x=n, y=n)
>>> nx = pg.x(mesh.positions())
>>> ny = pg.y(mesh.positions())
>>> data = np.cos(1.5 * nx) * np.sin(1.5 * ny)
>>> fig, ax = plt.subplots()
>>> drawStreams(ax, mesh, data, color='red')
>>> drawStreams(ax, mesh, data, dropTol=0.9)
>>> drawStreams(ax, mesh, pg.solver.grad(mesh, data),
... color='green', quiver=True)
>>> ax.set_aspect('equal')
>>> pg.wait()
"""
viewMesh = None
dataMesh = None
if quiver:
x = None
y = None
u = None
v = None
if len(data) == mesh.nodeCount():
x = pg.x(mesh.positions())
y = pg.y(mesh.positions())
elif len(data) == mesh.cellCount():
x = pg.x(mesh.cellCenters())
y = pg.y(mesh.cellCenters())
elif len(data) == mesh.boundaryCount():
x = pg.x(mesh.boundaryCenters())
y = pg.y(mesh.boundaryCenters())
if isinstance(data, pg.core.R3Vector):
u = pg.x(data)
v = pg.y(data)
else:
u = data[:, 0]
v = data[:, 1]
ax.quiver(x, y, u, v, **kwargs)
updateAxes_(ax)
return
if coarseMesh is not None:
viewMesh = coarseMesh
dataMesh = mesh
dataMesh.createNeighborInfos()
else:
viewMesh = mesh
viewMesh.createNeighborInfos()
for c in viewMesh.cells():
c.setValid(True)
if startStream == 1:
# start a stream from each boundary cell
for y in np.linspace(viewMesh.ymin(), viewMesh.ymax(), 100):
c = viewMesh.findCell(
[(viewMesh.xmax() - viewMesh.xmax()) / 2.0, y])
if c is not None:
if c.valid():
drawStreamLine_(ax, viewMesh, c, data, dataMesh, **kwargs)
elif startStream == 2:
# start a stream from each boundary cell
for x in np.linspace(viewMesh.xmin(), viewMesh.xmax(), 100):
c = viewMesh.findCell(
[x, (viewMesh.ymax() - viewMesh.ymax()) / 2.0])
if c is not None:
if c.valid():
drawStreamLine_(ax, viewMesh, c, data, dataMesh, **kwargs)
elif startStream == 3:
# start a stream from each boundary cell
for b in viewMesh.findBoundaryByMarker(1, 99):
c = b.leftCell()
if c is None:
c = b.rightCell()
if c.valid():
drawStreamLine_(ax, viewMesh, c, data, dataMesh, **kwargs)
# start a stream from each unused cell
for c in viewMesh.cells():
if c.valid():
drawStreamLine_(ax, viewMesh, c, data, dataMesh, **kwargs)
for c in viewMesh.cells():
c.setValid(True)
updateAxes_(ax)
def drawSensors(ax, sensors, diam=None, coords=None, **kwargs):
"""Draw sensor positions as black dots with a given diameter.
Parameters
----------
ax : mpl axe instance
sensors : vector or list of RVector3
list of positions to plot
diam : float [None]
diameter of circles (None leads to point distance by 8)
coords: (int, int) [0, | |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""orm links tests for the export and import routines"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import with_statement
import io
import os
import tarfile
from six.moves import range
from aiida import orm
from aiida.backends.testbase import AiidaTestCase
from aiida.backends.tests.tools.importexport.utils import get_all_node_links
from aiida.backends.tests.utils.configuration import with_temp_dir
from aiida.common import json
from aiida.common.folders import SandboxFolder
from aiida.common.links import LinkType
from aiida.common.utils import get_new_uuid
from aiida.tools.importexport import import_data, export
class TestLinks(AiidaTestCase):
"""Test ex-/import cases related to Links"""
def setUp(self):
self.reset_database()
def tearDown(self):
self.reset_database()
@with_temp_dir
def test_links_to_unknown_nodes(self, temp_dir):
"""
Test importing of nodes, that have links to unknown nodes.
"""
node_label = "Test structure data"
struct = orm.StructureData()
struct.label = str(node_label)
struct.store()
struct_uuid = struct.uuid
filename = os.path.join(temp_dir, "export.tar.gz")
export([struct], outfile=filename, silent=True)
unpack = SandboxFolder()
with tarfile.open(filename, "r:gz", format=tarfile.PAX_FORMAT) as tar:
tar.extractall(unpack.abspath)
with io.open(unpack.get_abs_path('data.json'), 'r', encoding='utf8') as fhandle:
metadata = json.load(fhandle)
metadata['links_uuid'].append({
'output': struct.uuid,
# note: this uuid is supposed to not be in the DB
'input': get_new_uuid(),
'label': 'parent'
})
with io.open(unpack.get_abs_path('data.json'), 'wb') as fhandle:
json.dump(metadata, fhandle)
with tarfile.open(filename, "w:gz", format=tarfile.PAX_FORMAT) as tar:
tar.add(unpack.abspath, arcname="")
self.clean_db()
self.create_user()
with self.assertRaises(ValueError):
import_data(filename, silent=True)
import_data(filename, ignore_unknown_nodes=True, silent=True)
self.assertEqual(orm.load_node(struct_uuid).label, node_label)
@with_temp_dir
def test_input_and_create_links(self, temp_dir):
"""
Simple test that will verify that INPUT and CREATE links are properly exported and
correctly recreated upon import.
"""
node_work = orm.CalculationNode()
node_input = orm.Int(1).store()
node_output = orm.Int(2).store()
node_work.add_incoming(node_input, LinkType.INPUT_CALC, 'input')
node_work.store()
node_output.add_incoming(node_work, LinkType.CREATE, 'output')
export_links = get_all_node_links()
export_file = os.path.join(temp_dir, 'export.tar.gz')
export([node_output], outfile=export_file, silent=True)
self.reset_database()
import_data(export_file, silent=True)
import_links = get_all_node_links()
export_set = [tuple(_) for _ in export_links]
import_set = [tuple(_) for _ in import_links]
self.assertSetEqual(set(export_set), set(import_set))
def construct_complex_graph(self, export_combination=0, work_nodes=None, calc_nodes=None):
"""
This method creates a "complex" graph with all available link types:
INPUT_WORK, INPUT_CALC, CALL_WORK, CALL_CALC, CREATE, and RETURN
and returns the nodes of the graph. It also returns various combinations
of nodes that need to be extracted but also the final expected set of nodes
(after adding the expected predecessors, desuccessors).
"""
if export_combination < 0 or export_combination > 9:
return None
if work_nodes is None:
work_nodes = ["WorkflowNode", "WorkflowNode"]
if calc_nodes is None:
calc_nodes = ["orm.CalculationNode", "orm.CalculationNode"]
# Class mapping
# "CalcJobNode" is left out, since it is special.
string_to_class = {
"WorkflowNode": orm.WorkflowNode,
"WorkChainNode": orm.WorkChainNode,
"WorkFunctionNode": orm.WorkFunctionNode,
"orm.CalculationNode": orm.CalculationNode,
"CalcFunctionNode": orm.CalcFunctionNode
}
# Node creation
data1 = orm.Int(1).store()
data2 = orm.Int(1).store()
work1 = string_to_class[work_nodes[0]]()
work2 = string_to_class[work_nodes[1]]()
if calc_nodes[0] == "CalcJobNode":
calc1 = orm.CalcJobNode()
calc1.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1})
else:
calc1 = string_to_class[calc_nodes[0]]()
calc1.computer = self.computer
# Waiting to store Data nodes until they have been "created" with the links below,
# because @calcfunctions cannot return data, i.e. return stored Data nodes
data3 = orm.Int(1)
data4 = orm.Int(1)
if calc_nodes[1] == "CalcJobNode":
calc2 = orm.CalcJobNode()
calc2.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1})
else:
calc2 = string_to_class[calc_nodes[1]]()
calc2.computer = self.computer
# Waiting to store Data nodes until they have been "created" with the links below,
# because @calcfunctions cannot return data, i.e. return stored Data nodes
data5 = orm.Int(1)
data6 = orm.Int(1)
# Link creation
work1.add_incoming(data1, LinkType.INPUT_WORK, 'input1')
work1.add_incoming(data2, LinkType.INPUT_WORK, 'input2')
work2.add_incoming(data1, LinkType.INPUT_WORK, 'input1')
work2.add_incoming(work1, LinkType.CALL_WORK, 'call2')
work1.store()
work2.store()
calc1.add_incoming(data1, LinkType.INPUT_CALC, 'input1')
calc1.add_incoming(work2, LinkType.CALL_CALC, 'call1')
calc1.store()
data3.add_incoming(calc1, LinkType.CREATE, 'create3')
# data3 is stored now, because a @workfunction cannot return unstored Data,
# i.e. create data.
data3.store()
data3.add_incoming(work2, LinkType.RETURN, 'return3')
data4.add_incoming(calc1, LinkType.CREATE, 'create4')
# data3 is stored now, because a @workfunction cannot return unstored Data,
# i.e. create data.
data4.store()
data4.add_incoming(work2, LinkType.RETURN, 'return4')
calc2.add_incoming(data4, LinkType.INPUT_CALC, 'input4')
calc2.store()
data5.add_incoming(calc2, LinkType.CREATE, 'create5')
data6.add_incoming(calc2, LinkType.CREATE, 'create6')
data5.store()
data6.store()
graph_nodes = [data1, data2, data3, data4, data5, data6, calc1, calc2, work1, work2]
# Create various combinations of nodes that should be exported
# and the final set of nodes that are exported in each case, following
# predecessor(INPUT, CREATE)/successor(CALL, RETURN, CREATE) links.
export_list = [(work1, [data1, data2, data3, data4, calc1, work1, work2]),
(work2, [data1, data3, data4, calc1, work2]), (data3, [data1, data3, data4, calc1]),
(data4, [data1, data3, data4, calc1]), (data5, [data1, data3, data4, data5, data6, calc1,
calc2]),
(data6, [data1, data3, data4, data5, data6, calc1, calc2]), (calc1, [data1, data3, data4,
calc1]),
(calc2, [data1, data3, data4, data5, data6, calc1, calc2]), (data1, [data1]), (data2, [data2])]
return graph_nodes, export_list[export_combination]
@with_temp_dir
def test_data_create_reversed_false(self, temp_dir):
"""Verify that create_reversed = False is respected when only exporting Data nodes."""
data_input = orm.Int(1).store()
data_output = orm.Int(2).store()
calc = orm.CalcJobNode()
calc.computer = self.computer
calc.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
calc.add_incoming(data_input, LinkType.INPUT_CALC, 'input')
calc.store()
data_output.add_incoming(calc, LinkType.CREATE, 'create')
data_output_uuid = data_output.uuid
group = orm.Group(label='test_group').store()
group.add_nodes(data_output)
export_file = os.path.join(temp_dir, 'export.tar.gz')
export([group], outfile=export_file, silent=True, create_reversed=False)
self.reset_database()
import_data(export_file, silent=True)
builder = orm.QueryBuilder()
builder.append(orm.Data)
self.assertEqual(builder.count(), 1, 'Expected a single Data node but got {}'.format(builder.count()))
self.assertEqual(builder.all()[0][0].uuid, data_output_uuid)
builder = orm.QueryBuilder()
builder.append(orm.CalcJobNode)
self.assertEqual(builder.count(), 0, 'Expected no Calculation nodes')
@with_temp_dir
def test_complex_workflow_graph_links(self, temp_dir):
"""
This test checks that all the needed links are correctly exported and
imported. More precisely, it checks that INPUT, CREATE, RETURN and CALL
links connecting Data nodes, CalcJobNodes and WorkCalculations are
exported and imported correctly.
"""
graph_nodes, _ = self.construct_complex_graph()
# Getting the input, create, return and call links
builder = orm.QueryBuilder()
builder.append(orm.Node, project='uuid')
builder.append(
orm.Node,
project='uuid',
edge_project=['label', 'type'],
edge_filters={
'type': {
'in': (LinkType.INPUT_CALC.value, LinkType.INPUT_WORK.value, LinkType.CREATE.value,
LinkType.RETURN.value, LinkType.CALL_CALC.value, LinkType.CALL_WORK.value)
}
})
export_links = builder.all()
export_file = os.path.join(temp_dir, 'export.tar.gz')
export(graph_nodes, outfile=export_file, silent=True)
self.reset_database()
import_data(export_file, silent=True)
import_links = get_all_node_links()
export_set = [tuple(_) for _ in export_links]
import_set = [tuple(_) for _ in import_links]
self.assertSetEqual(set(export_set), set(import_set))
@with_temp_dir
def test_complex_workflow_graph_export_sets(self, temp_dir):
"""Test ex-/import of individual nodes in complex graph"""
for export_conf in range(0, 9):
_, (export_node, export_target) = self.construct_complex_graph(export_conf)
export_target_uuids = set(str(_.uuid) for _ in export_target)
export_file = os.path.join(temp_dir, 'export.tar.gz')
export([export_node], outfile=export_file, silent=True, overwrite=True)
export_node_str = str(export_node)
self.reset_database()
import_data(export_file, silent=True)
# Get all the nodes of the database
builder = orm.QueryBuilder()
builder.append(orm.Node, project='uuid')
imported_node_uuids = set(str(_[0]) for _ in builder.all())
self.assertSetEqual(
export_target_uuids, imported_node_uuids,
"Problem in comparison of export node: " + str(export_node_str) + "\n" + "Expected set: " +
str(export_target_uuids) + "\n" + "Imported set: " + str(imported_node_uuids) + "\n" + "Difference: " +
str([_ for _ in export_target_uuids.symmetric_difference(imported_node_uuids)]))
@with_temp_dir
def test_high_level_workflow_links(self, temp_dir):
"""
This test checks that all the needed links are correctly exported and imported.
INPUT_CALC, INPUT_WORK, CALL_CALC, CALL_WORK, CREATE, and RETURN
links connecting Data nodes and high-level Calculation and Workflow nodes:
CalcJobNode, CalcFunctionNode, WorkChainNode, WorkFunctionNode
"""
high_level_calc_nodes = [["CalcJobNode", "CalcJobNode"], ["CalcJobNode", "CalcFunctionNode"],
["CalcFunctionNode", "CalcJobNode"], ["CalcFunctionNode", "CalcFunctionNode"]]
high_level_work_nodes = [["WorkChainNode", "WorkChainNode"], ["WorkChainNode", "WorkFunctionNode"],
["WorkFunctionNode", "WorkChainNode"], ["WorkFunctionNode", "WorkFunctionNode"]]
for calcs in high_level_calc_nodes:
for works in high_level_work_nodes:
self.reset_database()
graph_nodes, _ = self.construct_complex_graph(calc_nodes=calcs, work_nodes=works)
# Getting the input, create, return and call links
builder = orm.QueryBuilder()
builder.append(orm.Node, project='uuid')
builder.append(
orm.Node,
project='uuid',
edge_project=['label', 'type'],
edge_filters={
'type': {
'in': (LinkType.INPUT_CALC.value, LinkType.INPUT_WORK.value, LinkType.CREATE.value,
LinkType.RETURN.value, LinkType.CALL_CALC.value, LinkType.CALL_WORK.value)
}
})
self.assertEqual(
builder.count(),
13,
msg="Failed with c1={}, c2={}, w1={}, w2={}".format(calcs[0], calcs[1], works[0], works[1]))
export_links = builder.all()
export_file = os.path.join(temp_dir, 'export.tar.gz')
export(graph_nodes, outfile=export_file, silent=True, overwrite=True)
self.reset_database()
import_data(export_file, silent=True)
import_links = get_all_node_links()
export_set = [tuple(_) for _ in export_links]
import_set = [tuple(_) for _ in import_links]
self.assertSetEqual(
set(export_set),
set(import_set),
msg="Failed with c1={}, c2={}, w1={}, w2={}".format(calcs[0], calcs[1], works[0], works[1]))
@with_temp_dir
def test_links_for_workflows(self, temp_dir):
"""
Check export flag `return_reversed=True`.
Check that CALL links are not followed in the export procedure,
and the only creation is followed for data::
____ ____ ____
| | INP | | CALL | |
| i1 | --> | w1 | <--- | w2 |
|____| |____| |____|
|
v RETURN
____
| |
| o1 |
|____|
"""
work1 = orm.WorkflowNode()
work2 = orm.WorkflowNode().store()
data_in = orm.Int(1).store()
data_out = orm.Int(2).store()
work1.add_incoming(data_in, LinkType.INPUT_WORK, 'input_i1')
work1.add_incoming(work2, LinkType.CALL_WORK, 'call')
work1.store()
data_out.add_incoming(work1, LinkType.RETURN, 'returned')
links_count_wanted = 2 # All 3 links, except CALL links (the CALL_WORK)
links_wanted = [
l for l in get_all_node_links() if l[3] not in (LinkType.CALL_WORK.value, LinkType.CALL_CALC.value)
]
# Check all links except CALL links | |
"09", # noqa: E501
}
read_only_vars = {}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""JobSearchResponseAggregierungenPlzebene2 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_10 (int): [optional] # noqa: E501
_12 (int): [optional] # noqa: E501
_13 (int): [optional] # noqa: E501
_14 (int): [optional] # noqa: E501
_15 (int): [optional] # noqa: E501
_16 (int): [optional] # noqa: E501
_17 (int): [optional] # noqa: E501
_18 (int): [optional] # noqa: E501
_19 (int): [optional] # noqa: E501
_20 (int): [optional] # noqa: E501
_21 (int): [optional] # noqa: E501
_22 (int): [optional] # noqa: E501
_23 (int): [optional] # noqa: E501
_24 (int): [optional] # noqa: E501
_25 (int): [optional] # noqa: E501
_26 (int): [optional] # noqa: E501
_27 (int): [optional] # noqa: E501
_28 (int): [optional] # noqa: E501
_29 (int): [optional] # noqa: E501
_30 (int): [optional] # noqa: E501
_31 (int): [optional] # noqa: E501
_32 (int): [optional] # noqa: E501
_33 (int): [optional] # noqa: E501
_34 (int): [optional] # noqa: E501
_35 (int): [optional] # noqa: E501
_36 (int): [optional] # noqa: E501
_37 (int): [optional] # noqa: E501
_38 (int): [optional] # noqa: E501
_39 (int): [optional] # noqa: E501
_40 (int): [optional] # noqa: E501
_41 (int): [optional] # noqa: E501
_42 (int): [optional] # noqa: E501
_44 (int): [optional] # noqa: E501
_45 (int): [optional] # noqa: E501
_46 (int): [optional] # noqa: E501
_47 (int): [optional] # noqa: E501
_48 (int): [optional] # noqa: E501
_49 (int): [optional] # noqa: E501
_50 (int): [optional] # noqa: E501
_51 (int): [optional] # noqa: E501
_52 (int): [optional] # noqa: E501
_53 (int): [optional] # noqa: E501
_54 (int): [optional] # noqa: E501
_55 (int): [optional] # noqa: E501
_56 (int): [optional] # noqa: E501
_57 (int): [optional] # noqa: E501
_58 (int): [optional] # noqa: E501
_59 (int): [optional] # noqa: E501
_60 (int): [optional] # noqa: E501
_61 (int): [optional] # noqa: E501
_63 (int): [optional] # noqa: E501
_64 (int): [optional] # noqa: E501
_65 (int): [optional] # noqa: E501
_66 (int): [optional] # noqa: E501
_67 (int): [optional] # noqa: E501
_68 (int): [optional] # noqa: E501
_69 (int): [optional] # noqa: E501
_70 (int): [optional] # noqa: E501
_71 (int): [optional] # noqa: E501
_72 (int): [optional] # noqa: E501
_73 (int): [optional] # noqa: E501
_74 (int): [optional] # noqa: E501
_75 (int): [optional] # noqa: E501
_76 (int): [optional] # noqa: E501
_77 (int): [optional] # noqa: E501
_78 (int): [optional] # noqa: E501
_79 (int): [optional] # noqa: E501
_80 (int): [optional] # noqa: E501
_81 (int): [optional] # noqa: E501
_82 (int): [optional] # noqa: E501
_83 (int): [optional] # noqa: E501
_84 (int): [optional] # noqa: E501
_85 (int): [optional] # noqa: E501
_86 (int): [optional] # noqa: E501
_87 (int): [optional] # noqa: E501
_88 (int): [optional] # noqa: E501
_89 (int): [optional] # noqa: E501
_90 (int): [optional] # noqa: E501
_91 (int): [optional] # noqa: E501
_92 (int): [optional] # noqa: E501
_93 (int): [optional] # noqa: E501
_94 (int): [optional] # noqa: E501
_95 (int): [optional] # noqa: E501
_96 (int): [optional] # noqa: E501
_97 (int): [optional] # noqa: E501
_98 (int): [optional] # noqa: E501
_99 (int): [optional] # noqa: E501
_01 (int): [optional] # noqa: E501
_02 (int): [optional] # noqa: E501
_03 (int): [optional] # noqa: E501
_04 (int): [optional] # noqa: E501
_06 (int): [optional] # noqa: E501
_07 (int): [optional] # noqa: E501
_08 (int): [optional] # noqa: E501
_09 (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""JobSearchResponseAggregierungenPlzebene2 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_10 (int): [optional] # noqa: E501
_12 (int): [optional] # noqa: E501
_13 (int): [optional] # noqa: E501
_14 (int): [optional] # noqa: E501
_15 (int): [optional] # noqa: E501
_16 (int): [optional] # noqa: E501
_17 (int): [optional] # noqa: E501
_18 (int): [optional] # noqa: E501
_19 (int): [optional] # noqa: E501
_20 (int): [optional] # noqa: E501
_21 (int): [optional] # noqa: E501
_22 (int): [optional] # noqa: E501
_23 (int): [optional] # noqa: E501
_24 (int): [optional] # noqa: E501
_25 (int): [optional] # noqa: E501
_26 (int): [optional] # noqa: E501
_27 (int): [optional] # noqa: E501
_28 (int): [optional] # noqa: E501
_29 (int): [optional] # noqa: E501
_30 (int): [optional] # noqa: E501
_31 (int): | |
<reponame>RoryKurek/thermo<filename>thermo/phases/iapws_phase.py
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2019, 2020 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
__all__ = ['IAPWS95', 'IAPWS95Gas', 'IAPWS95Liquid', 'IAPWS97']
from chemicals import iapws
from chemicals.viscosity import mu_IAPWS
from chemicals.thermal_conductivity import k_IAPWS
from .helmholtz_eos import HelmholtzEOS
from chemicals.utils import rho_to_Vm, Vm_to_rho
from .phase import Phase
class IAPWS95(HelmholtzEOS):
model_name = 'iapws95'
_MW = iapws.iapws95_MW
Tc = iapws.iapws95_Tc
Pc = iapws.iapws95_Pc
rhoc_mass = iapws.iapws95_rhoc
rhoc_mass_inv = 1.0/rhoc_mass
rhoc_inv = rho_to_Vm(rhoc_mass, _MW)
rhoc = 1.0/rhoc_inv
rho_red = rhoc
rho_red_inv = rhoc_inv
T_red = Tc
T_fixed_transport = 1.5*T_red
_MW_kg = _MW*1e-3
R = _MW_kg*iapws.iapws95_R # This is just the gas constant 8.314... but matching iapws to their decimals
R_inv = 1.0/R
R2 = R*R
#R = property_mass_to_molar(iapws95_R, iapws95_MW)
zs = [1.0]
cmps = [0]
# HeatCapacityGases = iapws_correlations.HeatCapacityGases
T_MAX_FIXED = 5000.0
T_MIN_FIXED = 243.0 # PU has flash failures at < 242 ish K
_d4Ar_ddelta2dtau2_func = staticmethod(iapws.iapws95_d4Ar_ddelta2dtau2)
_d3Ar_ddeltadtau2_func = staticmethod(iapws.iapws95_d3Ar_ddeltadtau2)
_d3Ar_ddelta2dtau_func = staticmethod(iapws.iapws95_d3Ar_ddelta2dtau)
_d2Ar_ddeltadtau_func = staticmethod(iapws.iapws95_d2Ar_ddeltadtau)
_d2Ar_dtau2_func = staticmethod(iapws.iapws95_d2Ar_dtau2)
_dAr_dtau_func = staticmethod(iapws.iapws95_dAr_dtau)
_d3Ar_ddelta3_func = staticmethod(iapws.iapws95_d3Ar_ddelta3)
_d2Ar_ddelta2_func = staticmethod(iapws.iapws95_d2Ar_ddelta2)
_dAr_ddelta_func = staticmethod(iapws.iapws95_dAr_ddelta)
_Ar_func = staticmethod(iapws.iapws95_Ar)
def __init__(self, T=None, P=None, zs=None):
self.T = T
self.P = P
self._rho_mass = rho_mass = iapws.iapws95_rho(T, P)
self._V = rho_to_Vm(rho=rho_mass, MW=self._MW)
self.tau = tau = self.Tc/T
self.delta = delta = rho_mass*self.rhoc_mass_inv
self.A0, self.dA0_dtau, self.d2A0_dtau2, self.d3A0_dtau3 = iapws.iapws95_A0_tau_derivatives(tau, delta)
def to_TP_zs(self, T, P, zs):
new = self.__class__.__new__(self.__class__)
new.zs = zs
new.T = T
new.P = P
new._rho_mass = rho_mass = iapws.iapws95_rho(T, P)
new._V = rho_to_Vm(rho=rho_mass, MW=self._MW)
new.tau = tau = new.Tc/T
new.delta = delta = rho_mass*new.rhoc_mass_inv
new.A0, new.dA0_dtau, new.d2A0_dtau2, new.d3A0_dtau3 = iapws.iapws95_A0_tau_derivatives(tau, delta)
return new
def to(self, zs, T=None, P=None, V=None):
new = self.__class__.__new__(self.__class__)
new.zs = zs
if T is not None and P is not None:
new.T = T
new._rho_mass = rho_mass = iapws.iapws95_rho(T, P)
new._V = rho_to_Vm(rho=rho_mass, MW=self._MW)
new.P = P
elif T is not None and V is not None:
new.T = T
new._rho_mass = rho_mass = 1e-3*self._MW/V
P = iapws.iapws95_P(T, rho_mass)
new._V = V
new.P = P
elif P is not None and V is not None:
new._rho_mass = rho_mass = Vm_to_rho(V, MW=self._MW)
T = new.T = iapws.iapws95_T(P, rho_mass)
new._V = V
new.P = P
else:
raise ValueError("Two of T, P, or V are needed")
new.P = P
new.T = T
new.tau = tau = new.Tc/T
new.delta = delta = rho_mass*new.rhoc_mass_inv
new.A0, new.dA0_dtau, new.d2A0_dtau2, new.d3A0_dtau3 = iapws.iapws95_A0_tau_derivatives(tau, delta)
return new
def mu(self):
r'''Calculate and return the viscosity of water according to the IAPWS.
For details, see :obj:`chemicals.viscosity.mu_IAPWS`.
Returns
-------
mu : float
Viscosity of water, [Pa*s]
'''
try:
return self._mu
except:
pass
self.__mu_k()
return self._mu
def k(self):
r'''Calculate and return the thermal conductivity of water according to the IAPWS.
For details, see :obj:`chemicals.thermal_conductivity.k_IAPWS`.
Returns
-------
k : float
Thermal conductivity of water, [W/m/K]
'''
try:
return self._k
except:
pass
self.__mu_k()
return self._k
def __mu_k(self):
drho_mass_dP = self.drho_mass_dP()
# TODO: curve fit drho_dP_Tr better than IAPWS did (mpmath)
drho_dP_Tr = self.to(T=self.T_fixed_transport, V=self._V, zs=self.zs).drho_mass_dP()
self._mu = mu_IAPWS(T=self.T, rho=self._rho_mass, drho_dP=drho_mass_dP,
drho_dP_Tr=drho_dP_Tr)
self._k = k_IAPWS(T=self.T, rho=self._rho_mass, Cp=self.Cp_mass(), Cv=self.Cv_mass(),
mu=self._mu, drho_dP=drho_mass_dP, drho_dP_Tr=drho_dP_Tr)
class IAPWS95Gas(IAPWS95):
is_gas = True
is_liquid = False
force_phase = 'g'
class IAPWS95Liquid(IAPWS95):
force_phase = 'l'
is_gas = False
is_liquid = True
class IAPWS97(Phase):
model_name = 'iapws97'
model_attributes = ('model_name',)
_MW = 18.015268
R = 461.526
Tc = 647.096
Pc = 22.064E6
rhoc = 322.
zs = [1.0]
cmps = [0]
def mu(self):
return mu_IAPWS(T=self.T, rho=self._rho_mass)
def k(self):
# TODO add properties; even industrial formulation recommends them
return k_IAPWS(T=self.T, rho=self._rho_mass)
### Region 1,2,5 Gibbs
def G(self):
try:
return self._G
except:
pass
tau, pi, region = self.tau, self.pi, self.region
if region == 1:
G = iapws.iapws97_G_region1(tau, pi)
elif region == 2:
G = iapws.iapws97_Gr_region2(tau, pi) + iapws.iapws97_G0_region2(tau, pi)
elif region == 5:
G = iapws.iapws97_Gr_region5(tau, pi) + iapws.iapws97_G0_region5(tau, pi)
elif region == 4:
G = self.H() - self.T*self.S()
self._G = G
return G
def dG_dpi(self):
try:
return self._dG_dpi
except:
pass
tau, pi, region = self.tau, self.pi, self.region
if region == 1:
dG_dpi = iapws.iapws97_dG_dpi_region1(tau, pi)
elif region == 2:
dG_dpi = 1.0/pi + iapws.iapws97_dGr_dpi_region2(tau, pi)
elif region == 5:
dG_dpi = 1.0/pi + iapws.iapws97_dGr_dpi_region5(tau, pi)
self._dG_dpi = dG_dpi
return dG_dpi
def d2G_d2pi(self):
try:
return self._d2G_d2pi
except:
pass
tau, pi, region = self.tau, self.pi, self.region
if region == 1:
d2G_d2pi = iapws.iapws97_d2G_dpi2_region1(tau, pi)
elif region == 2:
d2G_d2pi = -1.0/(pi*pi) + iapws.iapws97_d2Gr_dpi2_region2(tau, pi)
elif region == 5:
d2G_d2pi = -1.0/(pi*pi) + iapws.iapws97_d2Gr_dpi2_region5(tau, pi)
self._d2G_d2pi = d2G_d2pi
return d2G_d2pi
def dG_dtau(self):
try:
return self._dG_dtau
except:
pass
tau, pi, region = self.tau, self.pi, self.region
if region == 1:
dG_dtau = iapws.iapws97_dG_dtau_region1(tau, pi)
elif region == 2:
dG_dtau = iapws.iapws97_dG0_dtau_region2(tau, pi) + iapws.iapws97_dGr_dtau_region2(tau, pi)
elif region == 5:
dG_dtau = iapws.iapws97_dG0_dtau_region5(tau, pi) + iapws.iapws97_dGr_dtau_region5(tau, pi)
self._dG_dtau = dG_dtau
return dG_dtau
def d2G_d2tau(self):
try:
return self._d2G_d2tau
except:
pass
tau, pi, region = self.tau, self.pi, self.region
if region == 1:
d2G_d2tau = iapws.iapws97_d2G_dtau2_region1(tau, pi)
elif region == 2:
d2G_d2tau = (iapws.iapws97_d2Gr_dtau2_region2(tau, pi)
+ iapws.iapws97_d2G0_dtau2_region2(tau, pi))
elif region == 5:
d2G_d2tau = (iapws.iapws97_d2Gr_dtau2_region5(tau, pi)
+ iapws.iapws97_d2G0_dtau2_region5(tau, pi))
self._d2G_d2tau = d2G_d2tau
return d2G_d2tau
def d2G_dpidtau(self):
try:
return self._d2G_dpidtau
except:
pass
tau, pi, region = self.tau, self.pi, self.region
if region == 1:
d2G_dpidtau = iapws.iapws97_d2G_dpidtau_region1(tau, pi)
elif region == 2:
d2G_dpidtau = iapws.iapws97_d2Gr_dpidtau_region2(tau, pi)
elif region == 5:
d2G_dpidtau = iapws.iapws97_d2Gr_dpidtau_region5(tau, pi)
self._d2G_dpidtau = d2G_dpidtau
return d2G_dpidtau
### Region 3 Helmholtz
def A_region3(self):
try:
return self._A_region3
except:
pass
self._A_region3 = A_region3 = iapws.iapws97_A_region3_region3(self.tau, self.delta)
return A_region3
def dA_ddelta(self):
try:
return self._dA_ddelta
except:
pass
self._dA_ddelta = dA_ddelta = iapws.iapws97_dA_ddelta_region3(self.tau, self.delta)
return dA_ddelta
def d2A_d2delta(self):
try:
return self._d2A_d2delta
except:
pass
self._d2A_d2delta = d2A_d2delta = iapws.iapws97_d2A_d2delta_region3(self.tau, self.delta)
return d2A_d2delta
def dA_dtau(self):
try:
return self._dA_dtau
except:
pass
self._dA_dtau = dA_dtau = iapws.iapws97_dA_dtau_region3(self.tau, self.delta)
return dA_dtau
def d2A_d2tau(self):
try:
return self._d2A_d2tau
except:
pass
self._d2A_d2tau = d2A_d2tau = iapws.iapws97_d2A_d2tau_region3(self.tau, self.delta)
return d2A_d2tau
def d2A_ddeltadtau(self):
try:
return self._d2A_ddeltadtau
except:
pass
self._d2A_ddeltadtau = d2A_ddeltadtau = iapws.iapws97_d2A_ddeltadtau_region3(self.tau, self.delta)
return d2A_ddeltadtau
def __init__(self, T=None, P=None, zs=None):
self.T = T
self.P = P
self._rho_mass = iapws.iapws97_rho(T, P)
self._V = rho_to_Vm(rho=self._rho_mass, MW=self._MW)
self.region = region = iapws.iapws97_identify_region_TP(T, P)
if region == 1:
self.pi = P*6.049606775559589e-08 #1/16.53E6
self.tau = 1386.0/T
self.Pref = 16.53E6
self.Tref = 1386.0
elif region == 2:
self.pi = P*1e-6
self.tau = 540.0/T
self.Pref = 1e6
self.Tref = 540.0
elif region == 3:
self.tau = self.Tc/T
self.Tref = self.Tc
self.delta = self._rho_mass*0.003105590062111801 # 1/322.0
self.rhoref = 322.0
elif region == 5:
self.pi = P*1e-6
self.tau = 1000.0/T
self.Tref = 1000.0
self.Pref = 1e6
def to_TP_zs(self, T, P, zs, other_eos=None):
new = self.__class__.__new__(self.__class__)
new.T = T
new.P = P
new.zs = zs
self._rho_mass = iapws.iapws97_rho(T, P)
self._V = rho_to_Vm(rho=self._rho_mass, MW=self._MW)
self.region = region = iapws.iapws97_identify_region_TP(T, P)
if region == 1:
self.pi = P*6.049606775559589e-08 #1/16.53E6
self.tau = 1386.0/T
elif region == 2:
self.pi = P*1e-6
self.tau = 540.0/T
elif region == 3:
self.tau = self.Tc/T
self.delta = self._rho_mass*0.003105590062111801 # 1/322.0
elif region == 5:
self.pi = P*1e-6
self.tau = 1000.0/T
def to(self, zs, T=None, P=None, V=None):
new = self.__class__.__new__(self.__class__)
new.zs = zs
if T is not None:
new.T = T
if P is | |
<filename>main/main.py
# src: https://github.com/facebookresearch/DrQA/blob/master/scripts/reader/train.py
import sys
sys.path.append(".")
sys.path.append("..")
import os
import json
import torch
import logging
import subprocess
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import clie.config as config
import clie.inputters.utils as util
from collections import OrderedDict
from tqdm import tqdm
from clie.utils.timer import AverageMeter, Timer
import clie.inputters.vector as vector
import clie.inputters.dataset as data
from clie.inputters import constant
from clie.utils import scorer
from main.model import EventRelationExtractor
logger = logging.getLogger()
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1', 'y')
def human_format(num):
num = float('{:.3g}'.format(num))
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '{}{}'.format('{:f}'.format(num).rstrip('0').rstrip('.'),
['', 'K', 'M', 'B', 'T'][magnitude])
def add_train_args(parser):
"""Adds commandline arguments pertaining to training a model. These
are different from the arguments dictating the model architecture.
"""
parser.register('type', 'bool', str2bool)
# Runtime environment
runtime = parser.add_argument_group('Environment')
runtime.add_argument('--data_workers', type=int, default=5,
help='Number of subprocesses for data loading')
runtime.add_argument('--random_seed', type=int, default=1013,
help=('Random seed for all numpy/torch/cuda '
'operations (for reproducibility)'))
runtime.add_argument('--num_epochs', type=int, default=40,
help='Train data iterations')
runtime.add_argument('--batch_size', type=int, default=32,
help='Batch size for training')
runtime.add_argument('--test_batch_size', type=int, default=128,
help='Batch size during validation/testing')
runtime.add_argument('--fp16', type='bool', default=False,
help="Whether to use 16-bit float precision instead of 32-bit")
runtime.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
# Files
files = parser.add_argument_group('Filesystem')
files.add_argument('--model_dir', type=str, default='/tmp/',
help='Directory for saved models/checkpoints/logs')
files.add_argument('--model_name', type=str, required=True,
help='Unique model identifier (.mdl, .txt, .checkpoint)')
files.add_argument('--data_dir', type=str, required=True,
help='Directory of training/validation data')
files.add_argument('--bert_feats', type=str, default='',
help='Directory of BERT features')
files.add_argument('--bert_dir', type=str, default='',
help='Directory of bert files')
files.add_argument('--vocab_file', type=str, default='vocab.txt',
help='Preprocessed vocab file')
files.add_argument('--train_filename', type=str, default='train',
help='Preprocessed vocab file')
files.add_argument('--valid_filename', type=str, default='dev',
help='Preprocessed vocab file')
files.add_argument('--embed_dir', type=str, default='',
help='Directory of pre-trained embedding files')
files.add_argument('--embedding_file', type=str, default='',
help='Space-separated pretrained embeddings file')
# Saving + loading
save_load = parser.add_argument_group('Saving/Loading')
save_load.add_argument('--checkpoint', type='bool', default=False,
help='Save model + optimizer state after each epoch')
save_load.add_argument('--pretrained', type=str, default=None,
help='Path to a pretrained model to warm-start with')
# Data preprocessing
preprocess = parser.add_argument_group('Preprocessing')
preprocess.add_argument('--max_examples', type=int, default=-1,
help='Maximum number of examples for training')
# General
general = parser.add_argument_group('General')
general.add_argument('--valid_metric', type=str, default='f1',
help='The evaluation metric used for model selection')
general.add_argument('--sort_by_len', type='bool', default=True,
help='Sort batches by length for speed')
general.add_argument('--only_test', type='bool', default=False,
help='Only do testing')
def set_defaults(args):
"""Make sure the commandline arguments are initialized properly."""
# Check critical files exist
args.train_file = []
args.valid_file = []
args.bert_train_file = []
args.bert_valid_file = []
for lang in args.language:
if not args.only_test:
train_file = os.path.join(args.data_dir, '{}/{}.json'.format(
constant.LANG_MAP[lang], args.train_filename))
if not os.path.isfile(train_file):
raise IOError('No such file: %s' % train_file)
args.train_file.append(train_file)
bert_train_file = os.path.join(args.bert_feats, '{}/{}.npz'.format(
constant.LANG_MAP[lang], args.train_filename))
if not os.path.isfile(bert_train_file):
raise IOError('No such file: %s' % bert_train_file)
args.bert_train_file.append(bert_train_file)
valid_file = os.path.join(args.data_dir, '{}/{}.json'.format(
constant.LANG_MAP[lang], args.valid_filename))
if not os.path.isfile(valid_file):
raise IOError('No such file: %s' % valid_file)
args.valid_file.append(valid_file)
bert_valid_file = os.path.join(args.bert_feats, '{}/{}.npz'.format(
constant.LANG_MAP[lang], args.valid_filename))
if not os.path.isfile(bert_valid_file):
raise IOError('No such file: %s' % bert_valid_file)
args.bert_valid_file.append(bert_valid_file)
if not args.only_test:
args.vocab_file = os.path.join(args.data_dir, args.vocab_file)
if not os.path.isfile(args.vocab_file):
raise IOError('No such file: %s' % args.vocab_file)
# Set model directory
subprocess.call(['mkdir', '-p', args.model_dir])
# Set model name
if not args.model_name:
import uuid
import time
args.model_name = time.strftime("%Y%m%d-") + str(uuid.uuid4())[:8]
# Set log + model file names
suffix = ('_%s_test' % ('_'.join(args.language))) if args.only_test else ''
args.log_file = os.path.join(args.model_dir, args.model_name + '%s.txt' % suffix)
args.pred_file = os.path.join(args.model_dir, args.model_name + '%s.json' % suffix)
args.model_file = os.path.join(args.model_dir, args.model_name + '.mdl')
if args.pretrained:
args.pretrained = os.path.join(args.model_dir, args.pretrained + '.mdl')
if args.embedding_file:
args.embedding_file = os.path.join(args.embed_dir, args.embedding_file)
if not os.path.isfile(args.embedding_file):
raise IOError('No such file: %s' % args.embedding_file)
with open(args.embedding_file, encoding='utf-8') as f:
# if first line is of form count/dim.
line = f.readline().rstrip().split(' ')
dim = int(line[1]) if len(line) == 2 \
else len(line) - 1
args.word_dim = dim if args.use_word else 0
return args
# ------------------------------------------------------------------------------
# Train loop.
# ------------------------------------------------------------------------------
def train(args, data_loader, model, global_stats):
"""Run through one epoch of model training with the provided data loader."""
# Initialize meters + timers
cl_loss = AverageMeter()
epoch_time = Timer()
current_epoch = global_stats['epoch']
pbar = tqdm(data_loader)
pbar.set_description("%s" % 'Epoch = %d [loss = x.xx]' % global_stats['epoch'])
# Run one epoch
for idx, ex in enumerate(pbar):
bsz = ex['batch_size']
if args.optimizer in ['sgd', 'adam'] and current_epoch <= args.warmup_epochs:
cur_lrate = global_stats['warmup_factor'] * (model.updates + 1)
for param_group in model.optimizer.param_groups:
param_group['lr'] = cur_lrate
loss = model.update(ex)
cl_loss.update(loss, bsz)
log_info = 'Epoch = %d [loss = %.2f]' % (global_stats['epoch'], cl_loss.avg)
pbar.set_description("%s" % log_info)
logger.info('train: Epoch %d | loss = %.2f | Time for epoch = %.2f (s)' %
(global_stats['epoch'], cl_loss.avg, epoch_time.time()))
# Checkpoint
if args.checkpoint:
model.checkpoint(args.model_file + '.checkpoint', global_stats['epoch'] + 1)
# ------------------------------------------------------------------------------
# Validation loops.
# ------------------------------------------------------------------------------
def draw_confusion_matrix(cm, labels, filename):
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['axes.titlesize'] = 18
cm_sum = np.sum(cm, axis=1, keepdims=True)
cm_perc = cm / cm_sum.astype(float) * 100
annot = np.empty_like(cm).astype(str)
nrows, ncols = cm.shape
for i in range(nrows):
for j in range(ncols):
c = cm[i, j]
p = cm_perc[i, j]
if i == j:
s = cm_sum[i]
# annot[i, j] = '%.1f%%\n%d/%d' % (p, c, s)
annot[i, j] = '%d' % c
elif c == 0:
annot[i, j] = ''
else:
# annot[i, j] = '%.1f%%\n%d' % (p, c)
annot[i, j] = '%d' % c
cm = pd.DataFrame(cm, index=labels, columns=labels)
cm.index.name = 'Actual'
cm.columns.name = 'Predicted'
fig, ax = plt.subplots(figsize=(16, 16))
midpoint = (cm.values.max() - cm.values.min()) / 2
sns.set(font_scale=1.5)
sns.heatmap(cm, annot=annot, fmt='', ax=ax, center=midpoint,
linewidths=0.01, cmap="Blues", cbar=False)
plt.tick_params(labelsize=18)
plt.tight_layout()
plt.savefig(filename)
def validate(args, data_loader, model, global_stats, mode='valid'):
"""Run one full official validation. Uses exact spans and same
exact match/F1 score computation as in the SQuAD script.
Extra arguments:
offsets: The character start/end indices for the tokens in each context.
texts: Map of qid --> raw text of examples context (matches offsets).
answers: Map of qid --> list of accepted answers.
"""
eval_time = Timer()
results = []
total_example = 0
with torch.no_grad():
pbar = tqdm(data_loader)
for ex in pbar:
output = model.predict(ex)
gold_labels = ex['labels'].tolist()
for idx in range(len(gold_labels)):
results.append(OrderedDict([
('id', ex['ids'][idx]),
('subject', ex['subject'][idx]),
('object', ex['object'][idx]),
('pred', model.label_dict[output['predictions'][idx]]),
('gold', model.label_dict[gold_labels[idx]])
]))
pbar.set_description("%s" % 'Epoch = %d [validating ... ]' %
global_stats['epoch'])
total_example += ex['batch_size']
scorer_out = scorer.score(results, verbose=True)
logger.info('Validation: precision = %.2f | recall = %.2f | f1 = %.2f |'
' examples = %d | %s time = %.2f (s) ' %
(scorer_out['precision'] * 100, scorer_out['recall'] * 100,
scorer_out['f1'] * 100, total_example, mode, eval_time.time()))
logger.info('\n' + scorer_out['verbose_out'])
with open(args.pred_file, 'w') as fw:
for item in results:
fw.write(json.dumps(item) + '\n')
if mode == 'test':
cm_filename = os.path.join(
args.model_dir, args.model_name + '_%s.png' % ('_'.join(args.language)))
draw_confusion_matrix(cm=scorer_out['confusion_matrix'],
labels=scorer_out['labels'],
filename=cm_filename)
return {
'precision': scorer_out['precision'],
'recall': scorer_out['recall'],
'f1': scorer_out['f1']
}
# ------------------------------------------------------------------------------
# Main.
# ------------------------------------------------------------------------------
def supervise_training(args, model, start_epoch, train_loader, dev_loader):
stats = {
'timer': Timer(),
'epoch': start_epoch,
'best_valid': 0,
'no_improvement': 0
}
if args.optimizer in ['sgd', 'adam'] and args.warmup_epochs >= start_epoch:
logger.info("Using warmup learning rate for the %d epoch, from 0 up to %s." %
(args.warmup_epochs, args.learning_rate))
warmup_factor = (args.learning_rate + 0.) / (args.num_train_batch * args.warmup_epochs)
stats['warmup_factor'] = warmup_factor
for epoch in range(start_epoch, args.num_epochs + 1):
stats['epoch'] = epoch
train(args, train_loader, model, stats)
result = validate(args, dev_loader, model, stats)
valid_metric_perf = float(result['{}'.format(args.valid_metric)])
# Save best valid
if valid_metric_perf > stats['best_valid']:
logger.info('Best valid: %s = %.4f (epoch %d, %d updates)' %
(args.valid_metric, valid_metric_perf,
stats['epoch'], model.updates))
model.save(args.model_file)
stats['best_valid'] = valid_metric_perf
stats['no_improvement'] = 0
else:
stats['no_improvement'] += 1
if stats['no_improvement'] >= args.early_stop:
break
# if validation performance decreases, we decay the learning rate
if args.optimizer in ['sgd', 'adam'] and epoch > args.decay_epoch:
old_lr = model.optimizer.param_groups[0]['lr']
new_lr = old_lr * args.lr_decay
model.optimizer.param_groups[0]['lr'] = new_lr
logger.info("Decaying the learning rate from {:.6} to {:.6} [rate:{}].".
format(old_lr, new_lr, args.lr_decay))
if new_lr < args.min_lr:
logger.info("Training stopped as the learning rate: {:.6} drops "
"below the threshold {}.".format(new_lr, args.min_lr))
break
def main(args):
# --------------------------------------------------------------------------
# MODEL
logger.info('-' * 100)
start_epoch = 1
if args.only_test:
if args.pretrained:
model = EventRelationExtractor.load(args.pretrained)
else:
if not os.path.isfile(args.model_file):
raise IOError('No such file: %s' % args.model_file)
model = EventRelationExtractor.load(args.model_file)
else:
if args.checkpoint and os.path.isfile(args.model_file + '.checkpoint'):
# Just resume training, no modifications.
logger.info('Found a checkpoint...')
checkpoint_file = args.model_file + '.checkpoint'
model, start_epoch = EventRelationExtractor.load_checkpoint(checkpoint_file, args.cuda)
else:
# Training starts fresh. But the model state is either pretrained or
| |
<reponame>mklewitz-kisura/dotfiles<gh_stars>0
"""
The :mod:`jedi.api.classes` module contains the return classes of the API.
These classes are the much bigger part of the whole API, because they contain
the interesting information about completion and goto operations.
"""
import warnings
import re
from jedi._compatibility import u
from jedi import settings
from jedi import common
from jedi.parser.cache import parser_cache
from jedi.cache import memoize_method
from jedi.evaluate import representation as er
from jedi.evaluate import instance
from jedi.evaluate import imports
from jedi.evaluate import compiled
from jedi.evaluate.filters import ParamName
from jedi.evaluate.imports import ImportName
from jedi.api.keywords import KeywordName
def _sort_names_by_start_pos(names):
return sorted(names, key=lambda s: s.start_pos or (0, 0))
def defined_names(evaluator, context):
"""
List sub-definitions (e.g., methods in class).
:type scope: Scope
:rtype: list of Definition
"""
filter = next(context.get_filters(search_global=True))
names = [name for name in filter.values()]
return [Definition(evaluator, n) for n in _sort_names_by_start_pos(names)]
class BaseDefinition(object):
_mapping = {
'posixpath': 'os.path',
'riscospath': 'os.path',
'ntpath': 'os.path',
'os2emxpath': 'os.path',
'macpath': 'os.path',
'genericpath': 'os.path',
'posix': 'os',
'_io': 'io',
'_functools': 'functools',
'_sqlite3': 'sqlite3',
'__builtin__': '',
'builtins': '',
}
_tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
'argparse._ActionsContainer': 'argparse.ArgumentParser',
}.items())
def __init__(self, evaluator, name):
self._evaluator = evaluator
self._name = name
"""
An instance of :class:`jedi.parser.reprsentation.Name` subclass.
"""
self.is_keyword = isinstance(self._name, KeywordName)
# generate a path to the definition
self._module = name.get_root_context()
if self.in_builtin_module():
self.module_path = None
else:
self.module_path = self._module.py__file__()
"""Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``"""
@property
def name(self):
"""
Name of variable/function/class/module.
For example, for ``x = None`` it returns ``'x'``.
:rtype: str or None
"""
return self._name.string_name
@property
def type(self):
"""
The type of the definition.
Here is an example of the value of this attribute. Let's consider
the following source. As what is in ``variable`` is unambiguous
to Jedi, :meth:`jedi.Script.goto_definitions` should return a list of
definition for ``sys``, ``f``, ``C`` and ``x``.
>>> from jedi import Script
>>> source = '''
... import keyword
...
... class C:
... pass
...
... class D:
... pass
...
... x = D()
...
... def f():
... pass
...
... for variable in [keyword, f, C, x]:
... variable'''
>>> script = Script(source)
>>> defs = script.goto_definitions()
Before showing what is in ``defs``, let's sort it by :attr:`line`
so that it is easy to relate the result to the source code.
>>> defs = sorted(defs, key=lambda d: d.line)
>>> defs # doctest: +NORMALIZE_WHITESPACE
[<Definition module keyword>, <Definition class C>,
<Definition instance D>, <Definition def f>]
Finally, here is what you can get from :attr:`type`:
>>> defs[0].type
'module'
>>> defs[1].type
'class'
>>> defs[2].type
'instance'
>>> defs[3].type
'function'
"""
tree_name = self._name.tree_name
resolve = False
if tree_name is not None:
# TODO move this to their respective names.
definition = tree_name.get_definition()
if definition.type == 'import_from' and \
tree_name in definition.get_defined_names():
resolve = True
if isinstance(self._name, imports.SubModuleName) or resolve:
for context in self._name.infer():
return context.api_type
return self._name.api_type
def _path(self):
"""The path to a module/class/function definition."""
def to_reverse():
name = self._name
if name.api_type == 'module':
try:
name = list(name.infer())[0].name
except IndexError:
pass
if name.api_type == 'module':
module_context, = name.infer()
for n in reversed(module_context.py__name__().split('.')):
yield n
else:
yield name.string_name
parent_context = name.parent_context
while parent_context is not None:
try:
method = parent_context.py__name__
except AttributeError:
try:
yield parent_context.name.string_name
except AttributeError:
pass
else:
for name in reversed(method().split('.')):
yield name
parent_context = parent_context.parent_context
return reversed(list(to_reverse()))
@property
def module_name(self):
"""
The module name.
>>> from jedi import Script
>>> source = 'import json'
>>> script = Script(source, path='example.py')
>>> d = script.goto_definitions()[0]
>>> print(d.module_name) # doctest: +ELLIPSIS
json
"""
return self._module.name.string_name
def in_builtin_module(self):
"""Whether this is a builtin module."""
return isinstance(self._module, compiled.CompiledObject)
@property
def line(self):
"""The line where the definition occurs (starting with 1)."""
start_pos = self._name.start_pos
if start_pos is None:
return None
return start_pos[0]
@property
def column(self):
"""The column where the definition occurs (starting with 0)."""
start_pos = self._name.start_pos
if start_pos is None:
return None
return start_pos[1]
def docstring(self, raw=False, fast=True):
r"""
Return a document string for this completion object.
Example:
>>> from jedi import Script
>>> source = '''\
... def f(a, b=1):
... "Document for function f."
... '''
>>> script = Script(source, 1, len('def f'), 'example.py')
>>> doc = script.goto_definitions()[0].docstring()
>>> print(doc)
f(a, b=1)
<BLANKLINE>
Document for function f.
Notice that useful extra information is added to the actual
docstring. For function, it is call signature. If you need
actual docstring, use ``raw=True`` instead.
>>> print(script.goto_definitions()[0].docstring(raw=True))
Document for function f.
:param fast: Don't follow imports that are only one level deep like
``import foo``, but follow ``from foo import bar``. This makes
sense for speed reasons. Completing `import a` is slow if you use
the ``foo.docstring(fast=False)`` on every object, because it
parses all libraries starting with ``a``.
"""
return _Help(self._name).docstring(fast=fast, raw=raw)
@property
def doc(self):
"""
.. deprecated:: 0.8.0
Use :meth:`.docstring` instead.
.. todo:: Remove!
"""
warnings.warn("Deprecated since Jedi 0.8. Use docstring() instead.", DeprecationWarning, stacklevel=2)
return self.docstring(raw=False)
@property
def raw_doc(self):
"""
.. deprecated:: 0.8.0
Use :meth:`.docstring` instead.
.. todo:: Remove!
"""
warnings.warn("Deprecated since Jedi 0.8. Use docstring() instead.", DeprecationWarning, stacklevel=2)
return self.docstring(raw=True)
@property
def description(self):
"""A textual description of the object."""
return u(self._name.string_name)
@property
def full_name(self):
"""
Dot-separated path of this object.
It is in the form of ``<module>[.<submodule>[...]][.<object>]``.
It is useful when you want to look up Python manual of the
object at hand.
Example:
>>> from jedi import Script
>>> source = '''
... import os
... os.path.join'''
>>> script = Script(source, 3, len('os.path.join'), 'example.py')
>>> print(script.goto_definitions()[0].full_name)
os.path.join
Notice that it returns ``'os.path.join'`` instead of (for example)
``'posixpath.join'``. This is not correct, since the modules name would
be ``<module 'posixpath' ...>```. However most users find the latter
more practical.
"""
path = list(self._path())
# TODO add further checks, the mapping should only occur on stdlib.
if not path:
return None # for keywords the path is empty
with common.ignored(KeyError):
path[0] = self._mapping[path[0]]
for key, repl in self._tuple_mapping.items():
if tuple(path[:len(key)]) == key:
path = [repl] + path[len(key):]
return '.'.join(path if path[0] else path[1:])
def goto_assignments(self):
if self._name.tree_name is None:
return self
names = self._evaluator.goto(self._name.parent_context, self._name.tree_name)
return [Definition(self._evaluator, n) for n in names]
def _goto_definitions(self):
# TODO make this function public.
return [Definition(self._evaluator, d.name) for d in self._name.infer()]
@property
@memoize_method
def params(self):
"""
Raises an ``AttributeError``if the definition is not callable.
Otherwise returns a list of `Definition` that represents the params.
"""
def get_param_names(context):
param_names = []
if context.api_type == 'function':
param_names = list(context.get_param_names())
if isinstance(context, instance.BoundMethod):
param_names = param_names[1:]
elif isinstance(context, (instance.AbstractInstanceContext, er.ClassContext)):
if isinstance(context, er.ClassContext):
search = '__init__'
else:
search = '__call__'
names = context.get_function_slot_names(search)
if not names:
return []
# Just take the first one here, not optimal, but currently
# there's no better solution.
inferred = names[0].infer()
param_names = get_param_names(next(iter(inferred)))
if isinstance(context, er.ClassContext):
param_names = param_names[1:]
return param_names
elif isinstance(context, compiled.CompiledObject):
return list(context.get_param_names())
return param_names
followed = list(self._name.infer())
if not followed or not hasattr(followed[0], 'py__call__'):
raise AttributeError()
context = followed[0] # only check the first one.
return [_Param(self._evaluator, n) for n in get_param_names(context)]
def parent(self):
context = self._name.parent_context
if context is None:
return None
if isinstance(context, er.FunctionExecutionContext):
# TODO the function context should be a part of the function
# execution context.
context = er.FunctionContext(
self._evaluator, context.parent_context, context.tree_node)
return Definition(self._evaluator, context.name)
def __repr__(self):
return "<%s %s>" % (type(self).__name__, self.description)
def get_line_code(self, before=0, after=0):
"""
Returns the line of code where this object was defined.
:param before: Add n lines before the current line to the output.
:param after: Add n lines after the current line to the output.
:return str: Returns the line(s) of code or an empty string if it's a
builtin.
"""
if self.in_builtin_module():
return ''
path = self._name.get_root_context().py__file__()
lines = parser_cache[path].lines
line_nr = self._name.start_pos[0]
start_line_nr = line_nr - before
return ''.join(lines[start_line_nr:line_nr + after + 1])
class Completion(BaseDefinition):
"""
`Completion` objects are returned from :meth:`api.Script.completions`. They
provide additional information about a completion.
"""
def __init__(self, evaluator, name, stack, like_name_length):
super(Completion, self).__init__(evaluator, name)
self._like_name_length = like_name_length
self._stack = stack
# Completion objects with the same Completion name (which means
# duplicate items in the | |
late, or haven't shipped
on_time = orders_due[np.where((orders_due[:,env.ob_indices['shipped']]==1) &
(orders_due[:,env.ob_indices['on_time']]==1))].shape[0]
late = orders_due[np.where((orders_due[:,env.ob_indices['shipped']]==1) &
(orders_due[:,env.ob_indices['on_time']]==-1))].shape[0]
not_shipped = orders_due[np.where((orders_due[:,env.ob_indices['shipped']]==0))].shape[0]
if orders_due.shape[0] == 0:
cs_level = np.array([0, 0, 0])
else:
cs_level = np.array([on_time, late, not_shipped]) / orders_due.shape[0]
return cs_level
# Calculate cost of holding inventory
def calculate_inventory_cost(env):
env.order_book = env.order_book.astype(float)
# Aggregate orders based on material code
# unique_gmid is an array of the unique materials
# gmid_locs is an array giving the locations of the unique identifiers
unique_gmid, gmid_locs, gmid_counts = np.unique(
env.order_book[:, env.ob_indices['gmid']],
return_inverse=True,
return_counts=True)
beta_i = np.bincount(gmid_locs, env.order_book[:,env.ob_indices['var_std_margin']]) / gmid_counts
# Add 0 as placeholder for beta_i if lengths are unequal
if len(beta_i) < len(env.gmid_index_map):
for i in env.gmids:
if i not in unique_gmid:
beta_i = np.insert(beta_i, env.gmid_index_map[i], 0)
# Check gmid maps to determine if OG is to be included in the calculation.
# Where len(gmid_map) < len(inventory): OG is to be excluded, if equal: include
_og_flag = len(env.inventory) - len(env.gmid_index_map)
assert _og_flag >= 0, "Discrepancy between GMID's and inventory mapping: {}".format(_og_flag)
assert _og_flag <= 1, "Discrepancy between GMID's and inventory mapping: {}".format(_og_flag)
return sum([env.inventory[env.gmid_index_map[i] + _og_flag] * beta_i[env.gmid_index_map[i]]
for i in unique_gmid]) * env.working_capital_per * -1
def plot_gantt(env, save_location=None):
# Get available products from the environment
labels = env.product_data[:,
env.prod_data_indices['product_name']]
unique_products = env.product_data[:,
env.prod_data_indices['gmid']].astype(int)
# Find products that have not been scheduled, if any to ensure proper
# labeling.
unscheduled_products = [p for p in unique_products if
p not in env.containers.actions]
# Combine actual schedule with unscheduled products
extended_schedule = np.hstack([env.containers.actions,
unscheduled_products])
# Organize products in a matrix where the row indexes the product and
# the columns index the day
gantt_matrix = np.zeros((env.n_products, extended_schedule.shape[0]))
# Populate matrix with values
for i, j in enumerate(extended_schedule):
for k in range(env.n_products):
if j == k + 1:
gantt_matrix[k, i] = j
# Set color scheme
cmap = mpl.cm.get_cmap('Paired')
c = plt.rcParams['axes.prop_cycle'].by_key()['color']
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
# Cycle through products and plot accordingly
for i in range(gantt_matrix.shape[0]):
# Cycle through time slots for each product
for j, k in enumerate(gantt_matrix[i]):
if k != 0:
start = j
# Later update to match on GMID rather than simply the index
# because the GMID is unlikely to have a one-to-one
# correspondence to the location.
prod_duration = env.product_data[int(k - 1),
env.prod_data_indices['min_run_time']].astype(int)
ax.barh(i, prod_duration, left=start, color=c[i])
# Format plot
ax.invert_yaxis()
ax.grid(color='k', linestyle=':')
pos = np.arange(gantt_matrix.shape[0]) + 0.5
locsy, labelsy = plt.yticks(pos, unique_products)
plt.title('Gantt Chart')
plt.xlabel('Day')
plt.xlim([0, len(env.containers.actions)])
plt.ylabel('Product')
if save_location is not None:
plt.savefig(save_location)
plt.show()
def get_state_labels(env, predicted=False):
# TODO: Update function to make selections based on state setting not length of obs_space
state_labels = []
obs_space = env.observation_space.shape[0]
prod_names = env.product_data[:, env.product_data_cols.index('product_name')].astype(str)
state_labels = ['state_' + i.lower() for i in prod_names]
if obs_space == env.n_products:
pass
elif obs_space == env.n_products + 1:
state_labels.insert(0, 'state_og')
elif obs_space == 2 * env.n_products + 1:
prod_state_labels = ['state_production_' + i.lower() for i in prod_names]
prod_state_labels.insert(0, 'state_production_shut_down')
state_labels = prod_state_labels + state_labels
elif env.state_setting == 'IO_PRODUCT' or env.state_setting == 'INV_BALANCE_PRODUCTION':
prod_state_labels = ['state_production_' + i.lower() for i in prod_names]
state_labels = prod_state_labels + state_labels
elif env.state_setting == 'CONCAT_FORECAST':
prod_state_labels = ['state_production_' + i.lower() for i in prod_names]
forecast_labels = ['net_forecast_' + str(j) + '_' + i
for i in calendar.month_abbr[1:] for j in env.gmids]
state_labels = prod_state_labels + state_labels + forecast_labels
else:
raise ValueError("No labeling rule for {} state data of length {} and {} products.".format(
env.settings['STATE_SETTING'], obs_space, env.n_products))
# Label predictions to differentiate from actual values
if predicted:
state_labels = ['predicted_' + i for i in state_labels]
return state_labels
# Get planning_data headers
def get_planning_data_headers(env):
names = env.containers.get_names()
col_names = ['planning_day', 'heuristic_flag']
# Append action probabilities and predicted state labels
for a in env.action_list:
col_names.append('action_prob_' + str(a))
for i in names:
if np.array(getattr(env.containers, i)).size > 0:
x = np.vstack(getattr(env.containers, i))
# Generate column names
dims = x.shape[1]
if i == 'state':
state_labels = get_state_labels(env, predicted=False)
col_names = col_names + state_labels
elif i == 'predicted_state':
state_labels = get_state_labels(env, predicted=True)
col_names = col_names + state_labels
elif dims > 1 and dims <= env.n_products:
for j in range(dims):
col_names.append(str(i) + '_' + string.ascii_lowercase[j])
elif dims > env.n_products:
# Off-grade needs to be added to the first value
# Add flag to differentiate between state settings
for j in range(dims):
if j == 0:
col_names.append(str(i) + '_og')
else:
col_names.append(str(i) + '_' + string.ascii_lowercase[j - 1])
else:
col_names.append(str(i))
# Also return a dict of names and indices for easy reference
planning_data_indices = {k: i for i, k in enumerate(col_names)}
return col_names, planning_data_indices
def get_mpc_data_headers(env):
names = env.containers.get_names()
col_names = []
for i in names:
if len(getattr(env.containers, i)) > 0:
x = np.vstack(getattr(env.containers, i))
dims = x.shape[1]
if dims > 1 and dims <= env.n_products:
for j in range(dims):
col_names.append(str(i) + '_' + string.ascii_lowercase[j])
elif dims > env.n_products:
for j in range(dims):
if j == 0:
col_names.append(str(i) + '_og')
else:
col_names.append(str(i) + '_' + string.ascii_lowercase[j - 1])
else:
col_names.append(str(i))
planning_data_indices = {k: i for i, k in enumerate(col_names)}
return col_names, planning_data_indices
def load_current_state_data(settings, path=None):
if path is None:
path = 'production_models/bahia_blanca/'
files = os.listdir(path)
inv_path, prod_path, f_path, order_path = None, None, None, None
for f in files:
if 'inventory' in f:
inv_path = os.path.join(path, f)
elif 'product' in f:
prod_path = os.path.join(path, f)
elif 'forecast' in f:
f_path = os.path.join(path, f)
elif 'orders' in f:
order_path = os.path.join(path, f)
# Both pd and tm should be the same as what the agent was trained on
# production_data, transition_matrix = load_current_production_data(
# settings, prod_path)
order_book = load_current_order_data(order_path)
forecast = load_current_forecast_data(f_path)
inventory = load_current_inventory_data(inv_path)
return inventory, order_book, forecast
def load_current_production_data(settings, path):
# Ensure testing and training values are identical
try:
train_prods, train_tm = load_scenario_data(settings['PRODUCT_DATA_PATH'])
training_loaded = True
except KeyError:
warnings.warn('No training environment found, cannot guarantee environments match.')
answer = input('Continue? y/n')
if answer == False:
sys.exit('Program exited.')
test_prods, test_tm = load_scenario_data(path)
if training_loaded:
assert np.array_equal(train_prods, test_prods), 'Product data for test and train environments do not match.'
assert np.array_equal(train_tm, test_tm), 'Transition matrices for test and train environments do not match.'
return test_prods, test_tm
def load_current_schedule(env):
sched_dbf_path = os.path.split(env.settings['PRODUCT_DATA_PATH'])[0]
sched_dbf_path = os.path.join(sched_dbf_path, "EXPORT.DBF")
print("Loading Current Schedule from {0}".format(sched_dbf_path))
dbf = Dbf5(sched_dbf_path)
df = dbf.to_dataframe()
# Build Schedule
sched = []
b_id = 0
booked_inv = 0.0
off_grade = 0.0
actual_prod = 0.0
sched_start_row = df.iloc[0,:]
start_split = sched_start_row["START_DATE"].split("-")
if len(sched_start_row["START_TIME"]) > 3:
start_hour = int(sched_start_row["START_TIME"][:2])
else:
start_hour = int(sched_start_row["START_TIME"][0])
start_min = int(sched_start_row["START_TIME"][-2:])
sched_start = datetime(int(start_split[0]),int(start_split[1]),int(start_split[2]),start_hour, start_min)
sched_end_dt = sched_start
idx = 0
## Cut current schedule to only include fixed planning horizon elements
while sched_end_dt < sched_start + timedelta(hours = 24.0*env.fixed_planning_horizon):
row = df.iloc[idx,:]
gmid = int(row["GMID"])
prod_rate = row["PROD_RATE"]
prod_qty = row["QUANTITY"]
prod_time = prod_qty / prod_rate
start_split = row["START_DATE"].split("-")
if len(row["START_TIME"]) > 3:
start_hour = int(row["START_TIME"][:2])
else:
start_hour = int(row["START_TIME"][0])
start_min = int(row["START_TIME"][-2:])
datetime_start = datetime(int(start_split[0]),int(start_split[1]),int(start_split[2]),start_hour, start_min)
prod_start = datetime_start - sched_start
prod_start = prod_start.total_seconds() / (60*60)
prod_end = int(prod_start + prod_time)
cure_time = 24
cure_end = prod_end + cure_time
inv_index = env.gmids.index(gmid) + 1
sched_row = [b_id,gmid,prod_rate,prod_qty, prod_time, prod_start, prod_end,
cure_time, cure_end, booked_inv, inv_index, off_grade, actual_prod]
b_id += 1
sched.append(sched_row)
idx += 1
sched_end_dt = datetime_start
schedule = np.stack(sched)
return schedule
def load_current_order_data(path=None):
try:
orders = _load_current_order_data()
except NotImplementedError:
# warnings.warn('Current order data connections currently not' + \
# 'implemented. Loading most recent manual order file.')
orders = _load_state_data_by_file(path, 'Orders', True)
return orders
def load_current_forecast_data(path=None):
try:
forecast = _load_current_forecast_data()
except NotImplementedError:
# warnings.warn('Current order data connections currently not' + \
# 'implemented. Loading most recent manual order file.')
forecast = _load_state_data_by_file(path, 'Forecast', True)
return forecast
def load_current_inventory_data(path=None):
try:
# Get data from system
inventory = _load_current_inventory_data()
except NotImplementedError:
# warnings.warn('Current inventory data connections currently not implemented.' + \
# ' Loading most recent manual inventory file.')
inventory = _load_state_data_by_file(path, 'Inventory', True)
return inventory
# TODO: Complete the following function
def _load_current_inventory_data():
# Loads current inventory from SAP HANA or relevant | |
<gh_stars>10-100
# AUTORIGHTS
# ---------------------------------------------------------
# Copyright (c) 2017, <NAME>
#
# This file is part of the VCOCO dataset hooks and is available
# under the terms of the Simplified BSD License provided in
# LICENSE. Please retain this notice and LICENSE if you use
# this file (or any portion of it) in your project.
# ---------------------------------------------------------
# vsrl_data is a dictionary for each action class:
# image_id - Nx1
# ann_id - Nx1
# label - Nx1
# action_name - string
# role_name - ['agent', 'obj', 'instr']
# role_object_id - N x K matrix, obviously [:,0] is same as ann_id
import numpy as np
from pycocotools.coco import COCO
import os, json
import copy
import pickle
import ipdb
from ult.config import cfg
class VCOCOeval(object):
def __init__(self, vsrl_annot_file, coco_annot_file,
split_file):
"""Input:
vslr_annot_file: path to the vcoco annotations
coco_annot_file: path to the coco annotations
split_file: image ids for split
"""
self.set_list = [(0, 38), (1, 31), (1, 32), (2, 43), (2, 44), (2, 77), (3, 1), (3, 19), (3, 28), (3, 46), (3, 47),
(3, 48), (3, 49), (3, 51), (3, 52), (3, 54), (3, 55), (3, 56), (4, 2), (4, 3), (4, 4), (4, 6), (4, 7),
(4, 8), (4, 9), (4, 18), (4, 21), (5, 68), (6, 33), (7, 64), (8, 47), (8, 48), (8, 49), (8, 50),
(8, 51), (8, 52), (8, 53), (8, 54), (8, 55), (8, 56), (9, 2), (9, 4), (9, 14), (9, 18), (9, 21),
(9, 25), (9, 27), (9, 29), (9, 57), (9, 58), (9, 60), (9, 61), (9, 62), (9, 64), (10, 31), (10, 32),
(10, 37), (10, 38), (11, 14), (11, 57), (11, 58), (11, 60), (11, 61), (12, 40), (12, 41), (12, 42),
(12, 46), (13, 1), (13, 25), (13, 26), (13, 27), (13, 29), (13, 30), (13, 31), (13, 32), (13, 33),
(13, 34), (13, 35), (13, 37), (13, 38), (13, 39), (13, 40), (13, 41), (13, 42), (13, 47), (13, 50),
(13, 68), (13, 74), (13, 75), (13, 78), (14, 30), (14, 33), (15, 43), (15, 44), (15, 45), (16, 1),
(16, 2), (16, 3), (16, 4), (16, 5), (16, 6), (16, 7), (16, 8), (16, 11), (16, 14), (16, 15), (16, 16),
(16, 17), (16, 18), (16, 19), (16, 20), (16, 21), (16, 24), (16, 25), (16, 26), (16, 27), (16, 28),
(16, 29), (16, 30), (16, 31), (16, 32), (16, 33), (16, 34), (16, 35), (16, 36), (16, 37), (16, 38),
(16, 39), (16, 40), (16, 41), (16, 42), (16, 43), (16, 44), (16, 45), (16, 46), (16, 47), (16, 48),
(16, 49), (16, 51), (16, 53), (16, 54), (16, 55), (16, 56), (16, 57), (16, 61), (16, 62), (16, 63),
(16, 64), (16, 65), (16, 66), (16, 67), (16, 68), (16, 73), (16, 74), (16, 75), (16, 77), (17, 35),
(17, 39), (18, 33), (19, 31), (19, 32), (20, 74), (21, 1), (21, 2), (21, 4), (21, 8), (21, 9), (21, 14),
(21, 15), (21, 16), (21, 17), (21, 18), (21, 19), (21, 21), (21, 25), (21, 26), (21, 27), (21, 28),
(21, 29), (21, 30), (21, 31), (21, 32), (21, 33), (21, 34), (21, 35), (21, 36), (21, 37), (21, 38),
(21, 39), (21, 40), (21, 41), (21, 42), (21, 43), (21, 44), (21, 45), (21, 46), (21, 47), (21, 48),
(21, 49), (21, 50), (21, 51), (21, 52), (21, 53), (21, 54), (21, 55), (21, 56), (21, 57), (21, 64),
(21, 65), (21, 66), (21, 67), (21, 68), (21, 73), (21, 74), (21, 77), (21, 78), (21, 79), (21, 80),
(22, 32), (22, 37), (23, 30), (23, 33)]
self.label_nums = np.asarray([485, 434, 3, 3, 207, 37, 6, 6, 3, 1, 3, 4, 7, 1, 7, 32, 2, 160, 67, 9, 126, 1, 24, 6, 31, 108, 73, 292, 134, 398, 21, 3, 60, 4, 7, 1, 61, 110, 80, 56, 56, 119, 107, 96, 59, 2, 1, 4, 430, 136, 55, 1, 5, 1, 20, 165, 278, 26, 24, 1, 29, 228, 1, 15, 55, 54, 1, 2, 57, 52, 93, 72, 3, 7, 12, 6, 6, 1, 11, 105, 4, 2, 1, 1, 7, 1, 17, 1, 1, 2, 170, 91, 86, 28, 39, 445, 6, 1, 2, 5, 1, 12, 4, 1, 1, 1, 14, 18, 7, 7, 5, 8, 4, 7, 4, 1, 3, 9, 390, 45, 156, 521, 15, 4, 5, 338, 254, 3, 5, 11, 15, 12, 43, 12, 12, 2, 2, 14, 1, 11, 37, 18, 134, 1, 7, 1, 29, 291, 1, 3, 4, 62, 4, 75, 1, 22, 109, 233, 228, 1, 366, 86, 50, 46, 68, 1, 1, 1, 1, 8, 14, 45, 2, 5, 45, 70, 89, 9, 99, 186, 50, 56, 54, 9, 120, 66, 56, 160, 269, 32, 65, 83, 67, 197, 43, 13, 26, 5, 46, 3, 6, 1, 60, 67, 56, 20, 2, 78, 11, 58, 1, 350, 1, 83, 41, 18, 2, 9, 1, 466, 224, 32])
nonrare = np.argwhere(self.label_nums > 10) # non rare
rare = np.argwhere(self.label_nums <= 10)
self.verb_trans = {
2: 3,
3: 2,
17: 18,
18: 17,
8: 15,
15: 8,
}
self.l_map = {0: 0,
1: 1,
2: 2,
4: 3,
5: 4,
6: 5,
7: 6,
8: 7,
9: 8,
10: 9,
11: 10,
12: 11,
13: 12,
14: 13,
15: 14,
16: 15,
18: 16,
19: 17,
20: 18,
21: 19,
24: 20,
25: 21,
26: 22,
28: 23}
map_24_to_2 = {}
# 26
self.COCO = COCO(coco_annot_file)
self.VCOCO = _load_vcoco(vsrl_annot_file)
self.image_ids = np.loadtxt(open(split_file, 'r'))
# simple check
assert np.all(np.equal(np.sort(np.unique(self.VCOCO[0]['image_id'])), np.sort(self.image_ids)))
self._init_coco()
self._init_vcoco()
self.vcocodb = self._get_vcocodb()
def _init_vcoco(self):
actions = [x['action_name'] for x in self.VCOCO]
roles = [x['role_name'] for x in self.VCOCO]
self.actions = actions
self.actions_to_id_map = {v: i for i, v in enumerate(self.actions)}
self.num_actions = 222
self.roles = roles
def _init_coco(self):
category_ids = self.COCO.getCatIds()
categories = [c['name'] for c in self.COCO.loadCats(category_ids)]
self.category_to_id_map = dict(zip(categories, category_ids))
self.classes = ['__background__'] + categories
self.num_classes = len(self.classes)
self.json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(self.COCO.getCatIds())}
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()}
def _get_vcocodb(self):
vcocodb = copy.deepcopy(self.COCO.loadImgs(self.image_ids.tolist()))
res_labels = []
counts = 0
s_labels = []
for entry in vcocodb:
self._prep_vcocodb_entry(entry)
labels, seen_labels, count = self._add_gt_annotations(entry)
res_labels.extend(labels)
s_labels.extend(seen_labels)
counts += count
print(counts, len(res_labels), len(set(res_labels)), sorted(list(set(res_labels))))
print(len(set(s_labels)), sorted(list(set(s_labels))))
# print
if 0:
nums = np.zeros((self.num_actions), dtype=np.int32)
for entry in vcocodb:
for aid in range(self.num_actions):
nums[aid] += np.sum(np.logical_and(entry['gt_actions'][:, aid]==1, entry['gt_classes']==1))
for aid in range(self.num_actions):
print('Action %s = %d'%(self.actions[aid], nums[aid]))
return vcocodb
def _prep_vcocodb_entry(self, entry):
entry['boxes'] = np.empty((0, 4), dtype=np.float32)
entry['is_crowd'] = np.empty((0), dtype=np.bool)
entry['gt_classes'] = np.empty((0), dtype=np.int32)
entry['gt_actions'] = np.empty((0, self.num_actions), dtype=np.int32)
entry['gt_role_id'] = np.empty((0, self.num_actions), dtype=np.int32)
def _add_gt_annotations(self, entry):
ann_ids = self.COCO.getAnnIds(imgIds=entry['id'], iscrowd=None)
objs = self.COCO.loadAnns(ann_ids)
# Sanitize bboxes -- some are invalid
valid_objs = []
valid_ann_ids = []
width = entry['width']
height = entry['height']
for i, obj in enumerate(objs):
if 'ignore' in obj and obj['ignore'] == 1:
continue
# Convert form x1, y1, w, h to x1, y1, x2, y2
x1 = obj['bbox'][0]
y1 = obj['bbox'][1]
x2 = x1 + np.maximum(0., obj['bbox'][2] - 1.)
y2 = y1 + np.maximum(0., obj['bbox'][3] - 1.)
x1, y1, x2, y2 = clip_xyxy_to_image(
x1, y1, x2, y2, height, width)
# Require non-zero seg area and more than 1x1 box size
if obj['area'] > 0 and x2 > x1 and y2 > y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
valid_ann_ids.append(ann_ids[i])
num_valid_objs = len(valid_objs)
assert num_valid_objs == len(valid_ann_ids)
boxes = np.zeros((num_valid_objs, 4), dtype=entry['boxes'].dtype)
is_crowd = np.zeros((num_valid_objs), dtype=entry['is_crowd'].dtype)
gt_classes = np.zeros((num_valid_objs), dtype=entry['gt_classes'].dtype)
gt_actions = -np.ones((num_valid_objs, self.num_actions), dtype=entry['gt_actions'].dtype)
gt_role_id = -np.ones((num_valid_objs, self.num_actions), dtype=entry['gt_role_id'].dtype)
unseen_labels = []
seen_labels = []
counts = 0
for ix, obj in enumerate(valid_objs):
cls = self.json_category_id_to_contiguous_id[obj['category_id']]
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
is_crowd[ix] = obj['iscrowd']
tmp_action, tmp_role_id = \
self._get_vsrl_data(valid_ann_ids[ix],
valid_ann_ids, valid_objs, 26)
# reconstruct 222 from 26
label_map = json.load(open(cfg.LOCAL_DATA + "/Data/action_index.json"))
role_id = -np.ones((self.num_actions), | |
+ kDrawPointTag + "BCPMarks = 0")
exec("params." + kDrawPointTag + "PointLabels = 0")
exec("params." + kShowMetaTag + "Outline = 0") # shows filled outline in lower right of meta data area under glyph outline.
exec("params." + kShowMetaTag + "Name = 1")
exec("params." + kShowMetaTag + "BBox = 0")
exec("params." + kShowMetaTag + "SideBearings = 1")
exec("params." + kShowMetaTag + "V_SideBearings = 0")
exec("params." + kShowMetaTag + "Parts = 0")
exec("params." + kShowMetaTag + "Paths = 0")
exec("params." + kShowMetaTag + "Hints = 0")
params.DrawFilledOutline = 1
params.metaDataXoffset = 0
params.metaDataTextSize = 80
params.metaDataNameSize = 80
params.glyphsPerPage = 156
params.metaDataAboveGlyph = 0
elif arg == "-fontsetplot":
exec("params." + kDrawTag + "HHints = 0")
exec("params." + kDrawTag + "VHints = 0")
exec("params." + kShowMetaTag + "Hints = 0")
exec("params." + kDrawTag + "BlueZones = 0")
exec("params." + kDrawTag + "Baseline = 0")
exec("params." + kDrawTag + "EMBox = 0")
exec("params." + kDrawTag + "XAdvance = 0")
exec("params." + kDrawTag + "ContourLabels = 0")
exec("params." + kDrawTag + "Outline = 1")
exec("params." + kDrawPointTag + "PointMarks = 0")
exec("params." + kDrawPointTag + "BCPMarks = 0")
exec("params." + kDrawPointTag + "PointLabels = 0")
exec("params." + kShowMetaTag + "Outline = 0") # shows filled outline in lower right of meta data area under glyph outline.
exec("params." + kShowMetaTag + "Name = 0")
exec("params." + kShowMetaTag + "BBox = 0")
exec("params." + kShowMetaTag + "SideBearings = 0")
exec("params." + kShowMetaTag + "V_SideBearings = 0")
exec("params." + kShowMetaTag + "Parts = 0")
exec("params." + kShowMetaTag + "Paths = 0")
exec("params." + kShowMetaTag + "Hints = 0")
params.DrawFilledOutline = 1
params.rt_doFontSet = 1
if params.userPtSize == None: # set default point size.
params.userPtSize = 12
elif arg == "-alpha":
params.rt_alphaSort = 1
elif arg == "-waterfallplot":
if not params.waterfallRange:
params.waterfallRange = (36,24,20,18,16,14,12,10,9,8,7,6)
elif arg == "-wfr":
i = i +1
sizeString = sys.argv[i]
if sizeString[0] == "-":
raise OptionParseError("Option Error: it looks like the first item in the waterfall size list following '-wfr' is another option.")
waterfallRange = parseGlyphListArg(sizeString)
params.waterfallRange = []
for ptSize in waterfallRange:
params.waterfallRange.append(eval(ptSize))
print params.waterfallRange
elif arg == "-gpp":
i = i +1
try:
params.glyphsPerPage = int( sys.argv[i])
except ValueError:
raise OptionParseError("Option Error: -glyphsPerPage must be followed by an integer number")
elif arg == "-pt":
i = i +1
try:
params.userPtSize = int( sys.argv[i])
except ValueError:
raise OptionParseError("Option Error: -pt must be followed by an integer number")
elif arg == "-g":
i = i +1
glyphString = sys.argv[i]
if glyphString[0] == "-":
raise OptionParseError("Option Error: it looks like the first item in the glyph list following '-g' is another option.")
params.rt_optionGlyphList += parseGlyphListArg(glyphString)
elif arg == "-gf":
i = i +1
rt_filePath = sys.argv[i]
if rt_filePath[0] == "-":
raise OptionParseError("Option Error: it looks like the the glyph list file following '-gf' is another option.")
try:
gf = file(rt_filePath, "rt")
glyphString = gf.read()
gf.close()
except (IOError,OSError):
raise OptionParseError("Option Error: could not open glyph list file <%s>." % rt_filePath)
params.rt_optionGlyphList += parseGlyphListArg(glyphString)
elif arg == "-o":
i = i +1
params.rt_pdfFileName = sys.argv[i]
elif arg == "-dno":
params.openPDFWhenDone = 0
elif arg == "-do":
params.openPDFWhenDone = 1
elif arg == "-black":
params.emboxColorRGB = params.baselineColorRGB = params.xAdvanceColorRGB = params.contourLabelColorRGB = params.pointLabelColorRGB = params.MetaDataGlyphColorRGB = (0,0,0)
params.hintColorRGB = params.hintColorOverlapRGB = params.alignmentZoneColorRGB = (0.5,0.5,0.5)
elif arg == "-lf":
i = i +1
rt_filePath = sys.argv[i]
if rt_filePath[0] == "-":
raise OptionParseError("Option Error: it looks like the the layout file following '-gf' is another option.")
if not os.path.exists(rt_filePath):
raise OptionParseError("Option Error: The layout file %s does not exist." % (rt_filePath))
params.rt_optionLayoutDict = parseLayoutFile(rt_filePath)
exec("params." + kShowMetaTag + "WidthOnly = 1")
exec("params." + kShowMetaTag + "HintDir = 1")
exec("params." + kShowMetaTag + "RowFont = 1")
exec("params." + kShowMetaTag + "Hints = 0")
exec("params." + kShowMetaTag + "SideBearings = 0")
exec("params." + kShowMetaTag + "V_SideBearings = 0")
elif arg == "-select_hints":
i = i +1
indexList = sys.argv[i]
indexList = re.findall(r"([^,]+)", indexList)
try:
indexList = map(int, indexList)
params.rt_hintTableList = indexList
except ValueError:
raise OptionParseError("Option Error: in \" -select_hints %s\, one of the indices in the argument list is not an integer." % sys.argv[i])
elif arg == "-baseline":
i = i +1
try:
params.userBaseLine= int( sys.argv[i])
except ValueError:
raise OptionParseError("Option Error: -pt must be followed by an integer number")
elif arg == "-v":
exec("params." + kShowMetaTag + "SideBearings = 0")
exec("params." + kShowMetaTag + "V_SideBearings = 1")
elif arg == "-vh":
exec("params." + kShowMetaTag + "SideBearings = 1")
exec("params." + kShowMetaTag + "V_SideBearings = 1")
elif arg[0] == "-":
raise OptionParseError("Option Error: Unknown option <%s>." % arg)
else:
params.rt_fileList += [arg]
i += 1
if not params.rt_fileList:
raise OptionParseError("Option Error: You must provide at least one font file path.")
return params
def getGlyphID(glyphTag, fontGlyphList):
glyphID = None
try:
glyphID = int(glyphTag)
glyphName = fontGlyphList[glyphID]
except IndexError:
pass
except ValueError:
try:
glyphID = fontGlyphList.index(glyphTag)
except IndexError:
pass
except ValueError:
pass
return glyphID
def getGlyphNames(glyphTag, fontGlyphList, fontFileName):
glyphNameList = []
rangeList = glyphTag.split("-")
prevGID = getGlyphID(rangeList[0], fontGlyphList)
if prevGID == None:
if len(rangeList) > 1:
logMsg( "\tWarning: glyph ID <%s> in range %s from glyph selection list option is not in font. <%s>." % (rangeList[0], glyphTag, fontFileName))
else:
logMsg( "\tWarning: glyph ID <%s> from glyph selection list option is not in font. <%s>." % (rangeList[0], fontFileName))
return None
glyphNameList.append(fontGlyphList[prevGID])
for glyphTag2 in rangeList[1:]:
#import pdb
#pdb.set_trace()
gid = getGlyphID(glyphTag2, fontGlyphList)
if gid == None:
logMsg( "\tWarning: glyph ID <%s> in range %s from glyph selection list option is not in font. <%s>." % (glyphTag2, glyphTag, fontFileName))
return None
for i in range(prevGID+1, gid+1):
glyphNameList.append(fontGlyphList[i])
prevGID = gid
return glyphNameList
def filterGlyphList(params, fontGlyphList, fontFileName):
# Return the list of glyphs which are in the intersection of the argument list and the glyphs in the font
# Complain about glyphs in the argument list which are not in the font.
if not params.rt_optionGlyphList:
glyphList = fontGlyphList
else:
# expand ranges:
glyphList = []
for glyphTag in params.rt_optionGlyphList:
glyphNames = getGlyphNames(glyphTag, fontGlyphList, fontFileName)
if glyphNames != None:
glyphList.extend(glyphNames)
return glyphList
def openFile(path, txPath):
# If input font is CFF or PS, build a dummy ttFont.
tempPathCFF = None
try:
ff = file(path, "rb")
data = ff.read(10)
ff.close()
except (IOError, OSError):
import traceback
traceback.print_exc()
raise FontError("Failed to open and read font file %s. Check file/directory permissions." % path)
if len(data) < 10:
raise FontError("Error: font file was zero size: may be a resource fork font, which this program does not process. <%s>." % path)
if (data[:4] == "OTTO") or (data[:4] == "true") or (data[:4] == "\0\1\0\0"): # it is an OTF/TTF font, can process file directly
try:
ttFont = ttLib.TTFont(path)
except (IOError, OSError):
raise FontError("Error opening or reading from font file <%s>." % path)
except TTLibError:
raise FontError("Error parsing font file 333 <%s>." % path)
if not (ttFont.has_key('CFF ') or ttFont.has_key('glyf')):
raise FontError("Error: font is not a CFF or TrueType font <%s>." % path)
return ttFont, tempPathCFF
# It is not an OTF file.
if (data[0] == '\1') and (data[1] == '\0'): # CFF file
cffPath = path
elif not "%" in data:
#not a PS file either
logMsg("Font file must be a PS, CFF or OTF fontfile: %s." % path)
raise FontError("Font file must be PS, CFF or OTF file: %s." % path)
else: # It is a PS file. Convert to CFF.
tfd,tempPathCFF = tempfile.mkstemp()
os.close(tfd)
cffPath = tempPathCFF
command="%s -cff +b \"%s\" \"%s\" 2>&1" % (txPath, path, tempPathCFF)
report = FDKUtils.runShellCmd(command)
if "fatal" in report:
logMsg("Attempted to convert font %s from PS to a temporary CFF data file." % path)
logMsg(report)
raise FontError("Failed to convert PS font %s to a temp CFF font." % path)
# now package the CFF font as an OTF font for use by autohint.
ff = file(cffPath, "rb")
data = ff.read()
ff.close()
try:
ttFont = ttLib.TTFont()
cffModule = ttLib.getTableModule('CFF ')
cffTable = cffModule.table_C_F_F_('CFF ')
ttFont['CFF '] = cffTable
cffTable.decompile(data, ttFont)
except:
import traceback
traceback.print_exc()
logMsg("Attempted to read font %s as CFF." % path)
raise FontError("Error parsing font file <%s>." % path)
return ttFont, tempPathCFF
def proofMakePDF(pathList, params, txPath):
# use fontTools library to open font and extract CFF table.
# If error, skip font and report error.
if params.rt_doFontSet:
pdfFontList = []
logMsg("")
logMsg( "Collecting font data from:")
fontCount = 0
for path in pathList:
fontFileName = os.path.basename(path)
params.rt_filePath = os.path.abspath(path)
logMsg( "\t%s." % (path))
try:
ttFont, tempPathCFF = openFile(path, txPath)
except FontError:
print traceback.format_exception_only(sys.exc_type, sys.exc_value)[-1]
return
try:
fontGlyphList = ttFont.getGlyphOrder()
except FontError:
print traceback.format_exception_only(sys.exc_type, sys.exc_value)[-1]
return
# filter specified list, if any, with font list.
glyphList = filterGlyphList(params, fontGlyphList, fontFileName)
if not glyphList:
raise FontError("Error: selected glyph list is empty for font <%s>." % fontFileName)
params.rt_reporter = logMsg
if ttFont.has_key("CFF "):
pdfFont = otfPDF.txPDFFont(ttFont, params)
elif ttFont.has_key("glyf"):
pdfFont = ttfPDF.txPDFFont(ttFont, params)
else:
logMsg( "Quitting. Font type is not recognized. %s.." % (path))
break
pdfFontList.append([glyphList, pdfFont, tempPathCFF])
pdfFilePath = makeFontSetPDF(pdfFontList, params)
for entry in pdfFontList:
tempPathCFF = entry[2]
pdfFont = entry[1]
pdfFont.clientFont.close()
if tempPathCFF:
os.remove(tempPathCFF)
logMsg( "Wrote proof file %s. End time: %s." % (pdfFilePath, time.asctime()))
if pdfFilePath and params.openPDFWhenDone:
if curSystem == "Windows":
curdir = os.getcwdu()
basedir, pdfName = os.path.split(pdfFilePath)
os.chdir(basedir)
command = "start %s" % (pdfName)
print command
FDKUtils.runShellCmdLogging(command)
os.chdir(curdir)
elif os.name == "Linux":
command = "xdg-open \"" + pdfFilePath + "\"" + " &"
FDKUtils.runShellCmdLogging(command)
else:
command = "open \"" + pdfFilePath + "\"" + " &"
FDKUtils.runShellCmdLogging(command)
else:
for path in pathList:
fontFileName = os.path.basename(path)
params.rt_filePath = os.path.abspath(path)
logMsg("")
logMsg( "Proofing font %s. Start time: %s." % (path, time.asctime()))
try:
ttFont, tempPathCFF = openFile(path, txPath)
fontGlyphList = ttFont.getGlyphOrder()
except FontError:
print traceback.format_exception_only(sys.exc_type, sys.exc_value)[-1]
return
# filter specified list, if any, with font list.
params.rt_glyphList = filterGlyphList(params, fontGlyphList, fontFileName)
if not params.rt_glyphList:
raise FontError("Error: selected glyph | |
0x111][0][0x300a, 0x11a][1][0x300a, 0x11c].value
# ssd = data[0x300a, 0xb0][2][0x300a, 0x111][0][0x300a, 0x130].value/10
# energy = data[0x300a, 0xb0][2][0x300a, 0x111][0][0x300a, 0x114].value
# leaf_bou = data[0x300a, 0xb0][2][0x300a, 0xb6][2][0x300a, 0xbe].value
# depth = 100 - ssd
# leaf_pos = np.reshape(leaf_pos,(2,int(len(leaf_pos)/2)))
# leaf_pos = np.transpose(leaf_pos)
# #print(leaf_pos)
# field = []
# for i in range(len(leaf_pos)):
# if leaf_bou[i] >= jawy_pos[0] and leaf_bou[i+1] <= jawy_pos[1]:
# leaf_pair = {'xl': leaf_pos[i][0]/10, 'xr': leaf_pos[i][1]/10,'dy': (leaf_bou[i+1]-leaf_bou[i])/10,'y':leaf_bou[i]/10}
# else:
# leaf_pair = {'xl': 0, 'xr': 0,'dy': (leaf_bou[i+1]-leaf_bou[i])/10,'y':leaf_bou[i]/10}
# field.append(leaf_pair)
# return (field, depth, energy)
def read_mlc(file_path):
data = dcmread(file_path)
#print(data)
fields = {}
beam_sequence = data[0x300a, 0xb0]
for beam in beam_sequence:
print(beam[0x300a, 0xc2].value)
#print(data[0x300a, 0xb0][0x300a, 0x111][0x300a, 0x11a][0x300a, 0x11c])
beam_name = beam[0x300a, 0xc2].value
leaf_pos = beam[0x300a, 0x111][0][0x300a, 0x11a][2][0x300a, 0x11c].value
jawy_pos = beam[0x300a, 0x111][0][0x300a, 0x11a][1][0x300a, 0x11c].value
ssd = beam[0x300a, 0x111][0][0x300a, 0x130].value/10
energy = beam[0x300a, 0x111][0][0x300a, 0x114].value
leaf_bou = beam[0x300a, 0xb6][2][0x300a, 0xbe].value
#print(data)
depth = 100 - ssd
leaf_pos = np.reshape(leaf_pos,(2,int(len(leaf_pos)/2)))
leaf_pos = np.transpose(leaf_pos)
#print(leaf_pos)
field = []
for i in range(len(leaf_pos)):
if leaf_bou[i] >= jawy_pos[0] and leaf_bou[i+1] <= jawy_pos[1]:
leaf_pair = {'xl': leaf_pos[i][0]/10, 'xr': leaf_pos[i][1]/10,'dy': (leaf_bou[i+1]-leaf_bou[i])/10,'y':leaf_bou[i]/10}
else:
leaf_pair = {'xl': 0, 'xr': 0,'dy': (leaf_bou[i+1]-leaf_bou[i])/10,'y':leaf_bou[i]/10}
field.append(leaf_pair)
fields[beam_name] = (field, depth, energy)
return fields
def sterling_irr(field):
U = 0
A = 0
x_b = (0,0)
for leaf in field:
d_x_l = np.abs(x_b[0]-leaf['xl'])
d_x_r = np.abs(x_b[1]-leaf['xr'])
x_b = (leaf['xl'],leaf['xr'])
if leaf['xl'] < leaf['xr']:
U = U + d_x_l + d_x_r + 2*leaf['dy']
else:
U = U + d_x_l + d_x_r
a = np.abs(leaf['xr']-leaf['xl'])*leaf['dy']
A = A + a
U = U + abs(x_b[1]-x_b[0])
return 4*A/U
def center_mass_irr(field):
A = 0
X = 0
Y = 0
for leaf in field:
dx = np.abs(-leaf['xl'] + leaf['xr'])
xp = leaf['xl'] + dx/2
yp = leaf['y'] + leaf['dy']/2
dx = np.abs(-leaf['xl'] + leaf['xr'])
ap = dx*leaf['dy']
A = A + ap
X = X + xp*ap
Y = Y + yp*ap
X = X/A
Y = Y/A
return (X,Y)
def field_cm_system(field):
x_cm,y_cm = center_mass_irr(field)
field_cm = []
for leaf in field:
if leaf['xl']<leaf['xr']:
field_cm.append({'xl':leaf['xl']-x_cm,'xr':leaf['xr']-x_cm,'dy':leaf['dy'],'y':leaf['y']-y_cm})
else:
field_cm.append({'xl':leaf['xl'],'xr':leaf['xr'],'dy':leaf['dy'],'y':leaf['y']})
return field_cm
#Equivalent Square WFF/WFF
class EquivalentSquareIrr:
def __init__(self,field,z=10,tpr2010=0.671,epsilon=0.000001,max_iter=100,no_kernel=True,beam='Beam_1',center_mass=True,start_value='geometric_mean'):
if isinstance(field, str):
field, z, energy_dcm = read_mlc(field)[beam]
self.energy_dcm = energy_dcm
self.field = field
self.z = z
self.tpr2010 = tpr2010
self.epsilon = epsilon
self.max_iter = max_iter
self.no_kernel = no_kernel
self.center_mass = center_mass
self.start_value = start_value
self.update()
#SET FUNCTIONS
def update(self):
field = self.field
z = self.z
t = self.tpr2010
epsilon = self.epsilon
max_iter = self.max_iter
no_kernel = self.no_kernel
center_mass = self.center_mass
start_value = self.start_value
if center_mass:
field = field_cm_system(field)
self.field = field
re_res, re_err = D_i_field(field,z,t)
#print(re_res)
round_digits = 2
area_ges = 0
for leaf in field:
area = leaf['dy']*(leaf['xr']-leaf['xl'])
area_ges = area_ges + area
geo_mean_raw = (area_ges)**0.5
geo_mean = round(geo_mean_raw,round_digits)
sterling_raw = sterling_irr(field)
sterling = round(sterling_raw,round_digits)
self.geometric_mean = geo_mean
self.geometric_mean_raw = geo_mean_raw
self.sterling = sterling
self.sterling_raw = sterling_raw
self.kernel_re = re_res
if start_value == 'sterling':
newton_start = sterling_raw
else:
newton_start = geo_mean_raw
equi_newton = newton(f=newton_f,Df=newton_df,x0=newton_start,epsilon=epsilon,max_iter=max_iter,args=(z,t,re_res))
self.equi_sq_raw = equi_newton
self.equi_sq = round(equi_newton,round_digits)
if no_kernel:
self.kernel_sq = 0
self.kernel_sq_er = 0
self.kernel_dif = 0
else:
sq_res, sq_err = Dz_square_tpr(equi_newton,z,t)
self.kernel_sq = sq_res
self.kernel_sq_er = sq_err
self.kernel_dif = re_res - sq_res
#self.geo_dif = round(abs(self.geometric_mean-self.equi_sq),round_digits)
self.geo_dif = round(self.geometric_mean-self.equi_sq,round_digits)
self.geo_dif_rel = round(self.geo_dif/self.equi_sq,4)
self.sterling_dif = round(self.sterling-self.equi_sq,round_digits)
self.sterling_dif_rel = round(self.sterling_dif/self.equi_sq,4)
def W_pre_int_irr(x,y,energy='6mv'):
if energy=='6mv':
return sig_gaussian2((x**2+y**2)**(1/2),5.15229707e-01,4.70403758e+00,2.00309769e+01,1.56120538e-02,5.73646811e-01,9.94490425e+00,1.26526257e-01,-3.44333164e+00)
else:
return sig_gaussian2((x**2+y**2)**(1/2),3.71770516e-01,4.22423908e+00,2.00093005e+01,1.25045275e-02,5.27197010e-01,9.24571498e+00,1.76659943e-01,-3.12352475e+00)
def p_xy_w(y,x,z,t,energy):
n_fac = 1/W_pre_int_irr(0,0,energy)
return p_xy(y,x,z,t)*W_pre_int_irr(x,y,energy)*n_fac
def D_xy_w(x0,x1,y0,y1,z,t,energy):
return integrate.dblquad(p_xy_w,x0,x1,y0,y1, args=[z,t,energy])[0]
def D_i_field_w(field,z=10,t=0.671,energy='6mv'):
D_ges = 0
for leaf in field:
if leaf['xl'] < leaf['xr']:
x0=leaf['xl']
x1=leaf['xr']
y0 = leaf['y']
y1 = y0 + leaf['dy']
D_ges = D_ges + D_xy_w(x0,x1,y0,y1,z,t,energy)
return (D_ges, 0)
#Equivalent Square FFF/FFF
class EquivalentSquareFFFIrr:
def __init__(self,field,z=10,tpr2010=0.671,epsilon=0.000001,max_iter=100,no_kernel=True,energy='6mv',beam='Beam_1',center_mass=True,start_value='geometric_mean'):
if isinstance(field, str):
field, z, energy_dcm = read_mlc(field)[beam]
self.energy_dcm = energy_dcm
self.field = field
self.z = z
self.tpr2010 = tpr2010
self.epsilon = epsilon
self.max_iter = max_iter
self.no_kernel = no_kernel
self.energy = energy
self.center_mass = center_mass
self.start_value = start_value
self.update()
#SET FUNCTIONS
def update(self):
field = self.field
z = self.z
t = self.tpr2010
epsilon = self.epsilon
max_iter = self.max_iter
no_kernel = self.no_kernel
energy = self.energy
center_mass = self.center_mass
start_value = self.start_value
if center_mass:
field = field_cm_system(field)
self.field = field
re_res, re_err = D_i_field_w(field,z,t,energy)
round_digits = 2
area_ges = 0
for leaf in field:
area = leaf['dy']*(leaf['xr']-leaf['xl'])
area_ges = area_ges + area
geo_mean_raw = (area_ges)**0.5
geo_mean = round(geo_mean_raw,round_digits)
sterling_raw = sterling_irr(field)
sterling = round(sterling_raw,round_digits)
self.geometric_mean = geo_mean
self.geometric_mean_raw = geo_mean_raw
self.sterling = sterling
self.sterling_raw = sterling_raw
self.kernel_re = re_res
if start_value == 'sterling':
newton_start = sterling_raw
else:
newton_start = geo_mean_raw
equi_newton = newton(f=newton_f_fff,Df=newton_df_fff,x0=newton_start,epsilon=epsilon,max_iter=max_iter,args=(z,t,re_res))
self.equi_sq_raw = equi_newton
self.equi_sq = round(equi_newton,round_digits)
if no_kernel:
self.kernel_sq = 0
self.kernel_sq_er = 0
self.kernel_dif = 0
else:
sq_res, sq_err = Dz_w_square_tpr(equi_newton,z,t,energy)
self.kernel_sq = sq_res
self.kernel_sq_er = sq_err
self.kernel_dif = re_res - sq_res
#self.geo_dif = round(abs(self.geometric_mean-self.equi_sq),round_digits)
self.geo_dif = round(self.geometric_mean-self.equi_sq,round_digits)
self.geo_dif_rel = round(self.geo_dif/self.equi_sq,4)
self.sterling_dif = round(self.sterling-self.equi_sq,round_digits)
self.sterling_dif_rel = round(self.sterling_dif/self.equi_sq,4)
#Equivalent Square FFF/WFF
class EquivalentSquareFFFWFFIrr:
def __init__(self,field,z=10,tpr2010=0.671,epsilon=0.000001,max_iter=1000,no_kernel=True,energy='6mv',beam='Beam_1',center_mass=True,start_value='geometric_mean'):
if isinstance(field, str):
field, z, energy_dcm = read_mlc(field)[beam]
self.energy_dcm = energy_dcm
self.field = field
self.z = z
self.tpr2010 = tpr2010
self.epsilon = epsilon
self.max_iter = max_iter
self.no_kernel = no_kernel
self.energy = energy
self.center_mass = center_mass
self.start_value = start_value
self.update()
#SET FUNCTIONS
def update(self):
field = self.field
z = self.z
t = self.tpr2010
epsilon = self.epsilon
max_iter = self.max_iter
no_kernel = self.no_kernel
energy = self.energy
center_mass = self.center_mass
start_value = self.start_value
if center_mass:
field = field_cm_system(field)
self.field = field
re_res, re_err = D_i_field_w(field,z,t,energy)
round_digits = 2
area_ges = 0
for leaf in field:
area = leaf['dy']*(leaf['xr']-leaf['xl'])
area_ges = area_ges + area
geo_mean_raw = (area_ges)**0.5
geo_mean = round(geo_mean_raw,round_digits)
sterling_raw = sterling_irr(field)
sterling = round(sterling_raw,round_digits)
self.geometric_mean = geo_mean
self.geometric_mean_raw = geo_mean_raw
self.sterling = sterling
self.sterling_raw = sterling_raw
self.kernel_re = re_res
if start_value == 'sterling':
newton_start = sterling_raw
else:
newton_start = geo_mean_raw
equi_newton = newton(f=newton_f,Df=newton_df,x0=newton_start,epsilon=epsilon,max_iter=max_iter,args=(z,t,re_res))
self.equi_sq_raw = equi_newton
self.equi_sq = round(equi_newton,round_digits)
if no_kernel:
self.kernel_sq = 0
self.kernel_sq_er = 0
self.kernel_dif = 0
else:
sq_res, sq_err = Dz_square_tpr(equi_newton,z,t)
self.kernel_sq = sq_res
self.kernel_sq_er = sq_err
self.kernel_dif = re_res - sq_res
#self.geo_dif = round(abs(self.geometric_mean-self.equi_sq),round_digits)
self.geo_dif = round(self.geometric_mean-self.equi_sq,round_digits)
self.geo_dif_rel = round(self.geo_dif/self.equi_sq,4)
self.sterling_dif = round(self.sterling-self.equi_sq,round_digits)
self.sterling_dif_rel = round(self.sterling_dif/self.equi_sq,4)
#Equivalent Square WFF/WFF: Using same TPR2010 as Equivalent Square definition
class EquivalentSquareTPRIrr:
def __init__(self,field,z=10,tpr2010=0.671,epsilon=0.000001,max_iter=100,no_kernel=False,beam='Beam_1',center_mass=True,start_value='geometric_mean'):
if isinstance(field, str):
field, z, energy_dcm = read_mlc(field)[beam]
self.energy_dcm = energy_dcm
self.field = field
self.tpr2010 = tpr2010
self.epsilon = epsilon
self.max_iter = max_iter
self.no_kernel = no_kernel
self.center_mass = center_mass
self.start_value = start_value
self.update()
#SET FUNCTIONS
def update(self):
field = self.field
t = self.tpr2010
epsilon = self.epsilon
max_iter = self.max_iter
no_kernel = self.no_kernel
center_mass = self.center_mass
start_value = self.start_value
if center_mass:
field = field_cm_system(field)
self.field = field
re_res = calc_tpr(D_i_field,field,tpr2010=t)
round_digits = 2
area_ges = 0
for leaf in field:
area = leaf['dy']*(leaf['xr']-leaf['xl'])
area_ges = area_ges + area
geo_mean_raw = (area_ges)**0.5
geo_mean = round(geo_mean_raw,round_digits)
sterling_raw = sterling_irr(field)
sterling = round(sterling_raw,round_digits)
self.geometric_mean = geo_mean
self.geometric_mean_raw = geo_mean_raw
self.sterling = sterling
self.sterling_raw = sterling_raw
self.tpr = re_res
if start_value == 'sterling':
newton_start = sterling_raw
else:
newton_start = geo_mean_raw
equi_newton = newton(f=newton_tpr_f,Df=newton_tpr_df,x0=newton_start,epsilon=epsilon,max_iter=max_iter,args=(Dz_square_tpr,re_res,t))
self.equi_sq_raw = equi_newton
self.equi_sq = round(equi_newton,round_digits)
if no_kernel:
self.tpr_sq = 0
self.tpr_dif = 0
else:
sq_res = calc_tpr(Dz_square_tpr,equi_newton,tpr2010=t)
self.tpr_sq = sq_res
self.tpr_dif = re_res - sq_res
self.geo_dif = round(self.geometric_mean-self.equi_sq,round_digits)
self.geo_dif_rel = round(self.geo_dif/self.equi_sq,4)
self.sterling_dif = round(self.sterling-self.equi_sq,round_digits)
self.sterling_dif_rel = round(self.sterling_dif/self.equi_sq,4)
#Equivalent Square FFF/FFF: Using same TPR2010 as Equivalent Square definition
class EquivalentSquareFFFTPRIrr:
def __init__(self,field,z=10,tpr2010=0.671,epsilon=0.000001,max_iter=100,no_kernel=False,energy='6mv',beam='Beam_1',center_mass=True,start_value='geometric_mean'):
if isinstance(field, str):
field, z, energy_dcm = read_mlc(field)[beam]
self.energy_dcm = energy_dcm
self.field = field
self.tpr2010 = tpr2010
self.epsilon = epsilon
self.max_iter = max_iter
self.no_kernel = no_kernel
self.energy = energy
self.center_mass = center_mass
self.start_value = start_value
self.update()
#SET FUNCTIONS
def update(self):
field = self.field
t = self.tpr2010
epsilon = self.epsilon
max_iter = self.max_iter
no_kernel = self.no_kernel
energy = self.energy
center_mass = self.center_mass
start_value = self.start_value
if center_mass:
field = field_cm_system(field)
self.field = field
re_res = calc_tpr_e(D_i_field_w,field,tpr2010=t,energy=energy)
print(re_res)
round_digits = 2
area_ges = 0
for leaf in field:
area = leaf['dy']*(leaf['xr']-leaf['xl'])
area_ges = area_ges + area
geo_mean_raw = (area_ges)**0.5
geo_mean = round(geo_mean_raw,round_digits)
sterling_raw = sterling_irr(field)
sterling = round(sterling_raw,round_digits)
self.geometric_mean = geo_mean
self.geometric_mean_raw = geo_mean_raw
self.sterling = sterling
self.sterling_raw = sterling_raw
self.tpr = re_res
if start_value == 'sterling':
newton_start = sterling_raw
else:
newton_start = geo_mean_raw
equi_newton = newton(f=newton_tpr_f_fff,Df=newton_tpr_df_fff,x0=newton_start,epsilon=epsilon,max_iter=max_iter,args=(Dz_w_square_tpr,re_res,t,energy))
self.equi_sq_raw = equi_newton
self.equi_sq = round(equi_newton,round_digits)
if no_kernel:
self.tpr_sq = 0
self.tpr_dif = 0
else:
sq_res = calc_tpr(Dz_square_tpr,equi_newton,tpr2010=t)
self.tpr_sq = sq_res
self.tpr_dif = re_res - sq_res
self.geo_dif = round(self.geometric_mean-self.equi_sq,round_digits)
self.geo_dif_rel = round(self.geo_dif/self.equi_sq,4)
self.sterling_dif = round(self.sterling-self.equi_sq,round_digits)
self.sterling_dif_rel = round(self.sterling_dif/self.equi_sq,4)
#Equivalent Square FFF/FFF: Using same TPR2010 as Equivalent Square definition
class EquivalentSquareFFFWFFTPRIrr:
def __init__(self,field,z=10,tpr2010=0.671,epsilon=0.000001,max_iter=100,no_kernel=False,energy='6mv',beam='Beam_1',center_mass=True,start_value='geometric_mean'):
if isinstance(field, str):
field, z, energy_dcm = read_mlc(field)[beam]
self.energy_dcm = energy_dcm
self.field = field
self.tpr2010 = tpr2010
self.epsilon = epsilon
self.max_iter = max_iter
self.no_kernel = no_kernel
self.energy = energy
self.center_mass = center_mass
self.start_value = start_value
self.update()
#SET FUNCTIONS
def update(self):
field = self.field
t = self.tpr2010
epsilon = self.epsilon
max_iter = self.max_iter
no_kernel = self.no_kernel
energy = self.energy
center_mass = self.center_mass
start_value = self.start_value
if center_mass:
field = field_cm_system(field)
self.field = field
re_res = calc_tpr_e(D_i_field_w,field,tpr2010=t,energy=energy)
round_digits = 2
area_ges = 0
for leaf in field:
area = leaf['dy']*(leaf['xr']-leaf['xl'])
area_ges = area_ges + area
geo_mean_raw = (area_ges)**0.5
geo_mean = round(geo_mean_raw,round_digits)
sterling_raw = sterling_irr(field)
sterling = round(sterling_raw,round_digits)
self.geometric_mean = geo_mean
self.geometric_mean_raw = geo_mean_raw
self.sterling = sterling
self.sterling_raw = sterling_raw
self.tpr = re_res
if start_value == 'sterling':
newton_start = sterling_raw
else:
newton_start = geo_mean_raw
equi_newton = newton(f=newton_tpr_f,Df=newton_tpr_df,x0=newton_start,epsilon=epsilon,max_iter=max_iter,args=(Dz_square_tpr,re_res,t))
self.equi_sq_raw = equi_newton
self.equi_sq = round(equi_newton,round_digits)
if no_kernel:
self.tpr_sq = 0
self.tpr_dif = 0
else:
sq_res = calc_tpr(Dz_square_tpr,equi_newton,tpr2010=t)
self.tpr_sq = sq_res
self.tpr_dif = re_res - sq_res
self.geo_dif = round(self.geometric_mean-self.equi_sq,round_digits)
self.geo_dif_rel = round(self.geo_dif/self.equi_sq,4)
self.sterling_dif = round(self.sterling-self.equi_sq,round_digits)
self.sterling_dif_rel = round(self.sterling_dif/self.equi_sq,4)
##CALCULATIONS FOR ROUND KIND OF FIELDS##
#ROUND TO IRREGULAR FIELDS
def round_to_irr(r,theta=2*np.pi,dy=0.5,y_max=20):
theta = 2*np.pi - theta
field_range = np.arange(-y_max,y_max,dy)
field = []
for y in field_range:
if np.abs(y + dy/2) <= r:
xl = (r**2 - (y+dy/2)**2)**(1/2)
if np.arcsin(np.abs((y + dy/2)/r))<= theta/2:
if theta <= np.pi:
xr = abs((y + dy/2)/np.tan(theta/2))
else:
xr = -abs((y + dy/2)/np.tan(theta/2))
if xr < -xl:
xr=-xl
else:
xr = xl
xl = -xl
xr = xr
else:
xl = 0
xr = 0
field.append({'xl': round(xl,1),'xr': round(xr,1), 'dy': dy, 'y':y})
return field
def EquivalentSquareIrrRound(r,theta=2*np.pi,dy=0.5,y_max=20,z=10,tpr2010=0.671,epsilon=0.000001,max_iter=100,no_kernel=False,energy='6mv',center_mass=True,start_value='geometric_mean',definition='axis_dose',mode='WFF-WFF'):
field = round_to_irr(r,theta,dy,y_max)
if definition == 'axis_dose':
if mode == 'WFF-WFF':
equi_object = EquivalentSquareIrr(field=field,z=z,tpr2010=tpr2010,epsilon=epsilon,max_iter=max_iter,no_kernel=no_kernel,center_mass=center_mass,start_value=start_value)
elif mode == 'FFF-FFF':
equi_object = EquivalentSquareFFFIrr(field=field,z=z,tpr2010=tpr2010,epsilon=epsilon,max_iter=max_iter,no_kernel=no_kernel,energy=energy,center_mass=center_mass,start_value=start_value)
else:
equi_object = EquivalentSquareFFFWFFIrr(field=field,z=z,tpr2010=tpr2010,epsilon=epsilon,max_iter=max_iter,no_kernel=no_kernel,energy=energy,center_mass=center_mass,start_value=start_value)
else:
if mode == 'WFF-WFF':
equi_object = EquivalentSquareTPRIrr(field=field,z=z,tpr2010=tpr2010,epsilon=epsilon,max_iter=max_iter,no_kernel=no_kernel,energy=energy,center_mass=center_mass,start_value=start_value)
elif mode == 'FFF-FFF':
equi_object = EquivalentSquareFFFTPRIrr(field=field,z=z,tpr2010=tpr2010,epsilon=epsilon,max_iter=max_iter,no_kernel=no_kernel,energy=energy,center_mass=center_mass,start_value=start_value)
else:
equi_object = EquivalentSquareFFFWFFTPRIrr(field=field,z=z,tpr2010=tpr2010,epsilon=epsilon,max_iter=max_iter,no_kernel=no_kernel,energy=energy,center_mass=center_mass,start_value=start_value)
equi_object.r = r
equi_object.theta = theta
equi_object.dy = dy
equi_object.y_max = y_max
equi_object.definition = definition
equi_object.mode = mode
return equi_object
#PURE ROUND CALCULATIONS
def D_round(r,theta,z,t):
return (theta*((Az(z,t)/az(z,t))*(1-np.exp(-az(z,t)*r))+(Bz(z,t)/bz(z,t))*(1-np.exp(-bz(z,t)*r))),0)
def geo_mean_round(r,theta):
return (r**2*theta/2)**(1/2)
def sterling_round(r,theta):
A = r**2*theta/2
if theta != 2*np.pi:
U = theta*r + 2*r
else:
U = theta*r
return 4*A/U
#Equivalent Square WFF/WFF
class EquivalentSquareRound:
def __init__(self,r,theta=2*np.pi,z=10,tpr2010=0.671,epsilon=0.000001,max_iter=100,no_kernel=True):
self.r = r
self.theta = theta
self.z = z
self.tpr2010 = tpr2010
self.epsilon = epsilon
self.max_iter = max_iter
self.no_kernel = no_kernel
self.update()
#SET FUNCTIONS
def update(self):
r = self.r
theta = self.theta
z = self.z
t = self.tpr2010
epsilon = self.epsilon
max_iter = self.max_iter
no_kernel = self.no_kernel
re_res, re_err = D_round(r,theta,z,t)
round_digits = 2
geo_mean = round(geo_mean_round(r,theta),round_digits)
sterling =round(sterling_round(r,theta),round_digits)
self.geometric_mean = geo_mean
self.sterling = sterling
self.kernel_re = re_res
self.kernal_re_er = re_err
equi_newton = newton(f=newton_f,Df=newton_df,x0=geo_mean,epsilon=epsilon,max_iter=max_iter,args=(z,t,re_res))
self.equi_sq_raw = equi_newton
self.equi_sq = round(equi_newton,round_digits)
if no_kernel:
self.kernel_sq = 0
self.kernel_sq_er = 0
self.kernel_dif = 0
else:
sq_res, sq_err = Dz_square_tpr(equi_newton,z,t)
self.kernel_sq = sq_res
self.kernel_sq_er = sq_err
self.kernel_dif = re_res - sq_res
#self.geo_dif = round(abs(self.geometric_mean-self.equi_sq),round_digits)
self.geo_dif = round(self.geometric_mean-self.equi_sq,round_digits)
self.geo_dif_rel = round(self.geo_dif/self.equi_sq,4)
#self.sterling_dif = round(abs(self.sterling-self.equi_sq),round_digits)
self.sterling_dif = round(self.sterling-self.equi_sq,round_digits)
self.sterling_dif_rel = round(self.sterling_dif/self.equi_sq,4)
def p_round_w(r,theta,z,t,energy):
n_fac = 1/W_pre_int(0,energy)
return theta*(Az(z,t)*np.exp(-az(z,t)*r)+Bz(z,t)*np.exp(-bz(z,t)*r))*W_pre_int(r,energy)*n_fac
def D_round_w(r,theta,z,t,energy='6mv'):
return integrate.quad(p_round_w,0,r,args=(theta,z,t,energy))
#Equivalent Square FFF/FFF
class EquivalentSquareFFFRound:
def __init__(self,r,theta,z=10,tpr2010=0.671,epsilon=0.000001,max_iter=100,no_kernel=True,energy='6mv'):
self.r = r
self.theta = theta
self.z = z
self.tpr2010 = tpr2010
self.epsilon = epsilon
self.max_iter = max_iter
self.no_kernel = no_kernel
self.energy = energy
self.update()
#SET FUNCTIONS
def update(self):
r = self.r
theta = self.theta
z = self.z
t = | |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import copy
import numpy as np
import pandas as pd
from datamanage.lite.tag import tagaction
from datamanage.pro.datamap import dmaction
from datamanage.pro.datamap.serializers import BasicListSerializer, DataValueSerializer
from datamanage.pro.datastocktake.dataset_process import dataset_filter
from datamanage.pro.datastocktake.metrics.score_level import level_distribution
from datamanage.pro.datastocktake.metrics.storage import storage_capacity_trend
from datamanage.pro.datastocktake.metrics.trend import score_trend_pandas_groupby
from datamanage.pro.datastocktake.sankey_diagram import (
fetch_value_between_source_target,
format_sankey_diagram,
minimal_value,
)
from datamanage.pro.datastocktake.settings import SCORE_DICT
from datamanage.pro.lifecycle.metrics.cost import hum_storage_unit
from datamanage.pro.lifecycle.metrics.ranking import score_aggregate
from datamanage.pro.utils.time import get_date
from datamanage.utils.api.dataquery import DataqueryApi
from django.conf import settings
from django.core.cache import cache
from django.db import connections
from django.utils.translation import ugettext as _
from pandas import DataFrame
from rest_framework.response import Response
from common.bklanguage import bktranslates
from common.decorators import list_route, params_valid
from common.views import APIViewSet
RUN_MODE = getattr(settings, "RUN_MODE", "DEVELOP")
METRIC_DICT = {
"active": "project_id",
"app_important_level_name": "bk_biz_id",
"is_bip": "bk_biz_id",
}
ABN_BIP_GRADE_NAME_LIST = ["确认自研信用", "退市", "已下架"]
CORR_BIP_GRADE_NAME = _("其他")
OPER_STATE_NAME_ORDER = [
"前期接触",
"接入评估",
"接入准备",
"接入中",
"技术封测",
"测试阶段",
"封测",
"内测",
"不删档",
"公测",
"正式运行",
"停运",
"取消",
"退市",
"其他",
]
BIP_GRADE_NAME_ORDER = [
"其他",
"暂无评级",
"长尾",
"三星",
"预备四星",
"四星",
"限制性预备五星",
"预备五星",
"五星",
"预备六星",
"六星",
]
class QueryView(APIViewSet):
@list_route(methods=["get"], url_path="popular_query")
def popular_query(self, request):
"""
@api {get} /datamanage/datastocktake/query/popular_query/ 获取最近热门查询表
@apiVersion 0.1.0
@apiGroup Query
@apiName popular_query
@apiParam {top} int 获取最近热门topn查询表
@apiSuccessExample Success-Response:
{
"errors":null,
"message":"ok",
"code":"1500200",
"data":[
{
"count":20000,
"result_table_name_alias":"xx",
"app_code":"xx",
"result_table_id":"xx"
}
],
"result":true
}
"""
# 获取前日日期,格式:'20200115',用昨日日期的话,凌晨1点前若离线任务没有算完会导致热门查询没有数据
yesterday = get_date()
top = int(request.query_params.get("top", 10))
prefer_storage = "tspider"
sql_latest = """SELECT count, result_table_id, result_table_name_alias, app_code
FROM 591_dataquery_processing_type_rt_alias_one_day
WHERE thedate={} and result_table_id is not null and app_code is not null
order by count desc limit {}""".format(
yesterday, top
)
ret_latest = DataqueryApi.query({"sql": sql_latest, "prefer_storage": prefer_storage}).data
if ret_latest:
ret_latest = ret_latest.get("list", [])
app_code_dict = cache.get("app_code_dict")
for each in ret_latest:
if "app_code" in each:
each["app_code_alias"] = (
app_code_dict[each["app_code"]] if app_code_dict.get(each["app_code"]) else each["app_code"]
)
else:
return Response([])
# 此处缺ret_latest里app_code的中文
return Response(ret_latest)
class DataSourceDistributionView(APIViewSet):
@list_route(methods=["post"], url_path="distribution")
@params_valid(serializer=BasicListSerializer)
def data_source_distribution(self, request, params):
"""
@api {post} /datamanage/datastocktake/data_source/distribution/ 获取数据来源分布情况
@apiVersion 0.1.0
@apiGroup DataSourceDistribution
@apiName data_source_distribution
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"tag_code":"online",
"me_type":"tag",
"cal_type":["standard","only_standard"],
"page":1,
"page_size":10,
"data_set_type":"all",//result_table、raw_data
"created_by":"xiaoming"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"tag_count_list": [
6,
278
],
"tag_code_list": [
"xx",
"other"
],
"tag_alias_list": [
"XX",
"其他"
]
},
"result": true
}
"""
params.pop("has_standard", None)
result_dict = dmaction.floating_window_query_dgraph(
params,
connections["bkdata_basic_slave"],
dmaction.NEED_DATA_SET_ID_DETAIL,
data_source_distribute=True,
).get("dgraph_result")
# 数据来源对应的数据
tag_code_list = ["components", "sys_host", "system"]
tag_alias_list = [_("组件"), _("设备"), _("系统")]
tag_count_list = [
result_dict.get("c_%s" % each_tag_code)[0].get("count", 0)
for each_tag_code in tag_code_list
if result_dict.get("c_%s" % each_tag_code)
]
# 满足查询条件的总数
rt_count = result_dict.get("rt_count")[0].get("count", 0) if result_dict.get("rt_count") else 0
rd_count = result_dict.get("rd_count")[0].get("count", 0) if result_dict.get("rd_count") else 0
tdw_count = result_dict.get("tdw_count")[0].get("count", 0) if result_dict.get("tdw_count") else 0
total_count = rt_count + rd_count + tdw_count
# 其他对应的数目
other_count = total_count
for each_tag_count in tag_count_list:
other_count -= each_tag_count
tag_count_list.append(other_count)
tag_code_list.append("other")
tag_alias_list.append(_("其他"))
return Response(
{
"tag_count_list": tag_count_list,
"tag_alias_list": tag_alias_list,
"tag_code_list": tag_code_list,
}
)
@list_route(methods=["post"], url_path="detail_distribution")
@params_valid(serializer=BasicListSerializer)
def data_source_detail_distribution(self, request, params):
"""
@api {post} /datamanage/datastocktake/data_source/detail_distribution/ 获取数据来源详细分布情况
@apiVersion 0.1.0
@apiGroup DataSourceDistribution
@apiName data_source_detail_distribution
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"tag_code":"online",
"me_type":"tag",
"cal_type":["standard","only_standard"],
"page":1,
"page_size":10,
"data_set_type":"all",//result_table、raw_data
"created_by":"xiaoming",
"top":5,
"parent_tag_code":"components"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"tag_count_list": [
6,
278
],
"tag_code_list": [
"xx",
"other"
],
"tag_alias_list": [
"XX",
"其他"
]
},
"result": true
}
"""
params.pop("has_standard", None)
top = params.get("top", 5)
parent_code = params.get("parent_tag_code", "all")
# 1 对所有数据来源标签的查询结果
result_dict = dmaction.floating_window_query_dgraph(
params,
connections["bkdata_basic_slave"],
dmaction.NEED_DATA_SET_ID_DETAIL,
data_source_detail_distribute=True,
)
dgraph_result = result_dict.get("dgraph_result")
data_source_tag_list = result_dict.get("data_source_tag_list")
for each_tag in data_source_tag_list:
each_tag["count"] = (
dgraph_result.get("c_%s" % each_tag.get("tag_code"))[0].get("count", 0)
if dgraph_result.get("c_%s" % each_tag.get("tag_code"))
else 0
)
# 按照count对所有数据来源二级标签进行排序
data_source_tag_list.sort(key=lambda k: (k.get("count", 0)), reverse=True)
if parent_code == "all":
top_tag_list = data_source_tag_list[:top]
# 满足查询条件的总数
rt_count = dgraph_result.get("rt_count")[0].get("count", 0) if dgraph_result.get("rt_count") else 0
rd_count = dgraph_result.get("rd_count")[0].get("count", 0) if dgraph_result.get("rd_count") else 0
tdw_count = dgraph_result.get("tdw_count")[0].get("count", 0) if dgraph_result.get("tdw_count") else 0
total_count = rt_count + rd_count + tdw_count
else:
total_count = data_source_tag_list[0].get("count")
top_tag_list = data_source_tag_list[1 : (top + 1)]
other_count = total_count
for each_tag_count in top_tag_list:
other_count -= each_tag_count.get("count")
# 数据来源对应的数据
tag_code_list = [each_tag.get("tag_code") for each_tag in top_tag_list]
tag_alias_list = [each_tag.get("tag_alias") for each_tag in top_tag_list]
tag_count_list = [each_tag.get("count") for each_tag in top_tag_list]
if other_count > 0:
tag_count_list.append(other_count)
tag_code_list.append("other")
tag_alias_list.append(_("其他"))
return Response(
{
"tag_count_list": tag_count_list,
"tag_alias_list": tag_alias_list,
"tag_code_list": tag_code_list,
}
)
class DataTypeDistributionView(APIViewSet):
@list_route(methods=["post"], url_path="distribution")
@params_valid(serializer=BasicListSerializer)
def data_type_distribution(self, request, params):
"""
@api {post} /datamanage/datastocktake/data_type/distribution/ 获取数据类型分布情况
@apiVersion 0.1.0
@apiGroup DataTypeDistribution
@apiName data_type_distribution
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"tag_code":"online",
"me_type":"tag",
"cal_type":["standard","only_standard"],
"page":1,
"page_size":10,
"data_set_type":"all",//result_table、raw_data
"created_by":"xiaoming"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"tag_count_list": [
6,
278
],
"tag_code_list": [
"xx",
"other"
],
"tag_alias_list": [
"XX",
"其他"
]
},
"result": true
}
"""
params.pop("has_standard", None)
# 1 对所有数据来源标签的查询结果
result_dict = dmaction.floating_window_query_dgraph(
params,
connections["bkdata_basic_slave"],
dmaction.NEED_DATA_SET_ID_DETAIL,
data_type_distribute=True,
)
dgraph_result = result_dict.get("dgraph_result")
data_type_tag_list = result_dict.get("data_source_tag_list")
for each_tag in data_type_tag_list:
each_tag["count"] = (
dgraph_result.get("c_%s" % each_tag.get("tag_code"))[0].get("count", 0)
if dgraph_result.get("c_%s" % each_tag.get("tag_code"))
else 0
)
# 满足查询条件的总数
rt_count = dgraph_result.get("rt_count")[0].get("count", 0) if dgraph_result.get("rt_count") else 0
rd_count = dgraph_result.get("rd_count")[0].get("count", 0) if dgraph_result.get("rd_count") else 0
tdw_count = dgraph_result.get("tdw_count")[0].get("count", 0) if dgraph_result.get("tdw_count") else 0
total_count = rt_count + rd_count + tdw_count
other_count = total_count
for each_tag_count in data_type_tag_list:
other_count -= each_tag_count.get("count")
# 数据来源对应的数据
tag_code_list = [each_tag.get("tag_code") for each_tag in data_type_tag_list]
tag_alias_list = [each_tag.get("tag_alias") for each_tag in data_type_tag_list]
tag_count_list = [each_tag.get("count") for each_tag in data_type_tag_list]
if other_count > 0:
tag_count_list.append(other_count)
tag_code_list.append("other")
tag_alias_list.append(_("其他"))
return Response(
{
"tag_count_list": tag_count_list,
"tag_alias_list": tag_alias_list,
"tag_code_list": tag_code_list,
}
)
class SankeyDiagramView(APIViewSet):
@list_route(methods=["post"], url_path="distribution")
@params_valid(serializer=BasicListSerializer)
def sankey_diagram_distribution(self, request, params):
"""
@api {post} /datamanage/datastocktake/sankey_diagram/distribution/ 获取桑基图
@apiGroup SankeyDiagramView
@apiName sankey_diagram_distribution
"""
params.pop("has_standard", None)
level = params.get("level", 4)
platform = params.get("platform", "all")
if platform == "tdw":
return Response(
{
"label": [],
"source": [],
"target": [],
"value": [],
"alias": [],
"other_app_code_list": [],
"level": 0,
}
)
# 1 对所有数据来源标签的查询结果
result_dict = dmaction.floating_window_query_dgraph(
params,
connections["bkdata_basic_slave"],
dmaction.NEED_DATA_SET_ID_DETAIL,
sankey_diagram_distribute=True,
)
dgraph_result = result_dict.get("dgraph_result")
rt_count = dgraph_result.get("rt_count")[0].get("count") if dgraph_result.get("rt_count") else 0
if rt_count == 0:
return Response(
{
"label": [],
"source": [],
"target": [],
"value": [],
"alias": [],
"other_app_code_list": [],
"level": 0,
}
)
first_level_tag_list = result_dict.get("first_level_tag_list")
second_level_tag_list = result_dict.get("second_level_tag_list")
# 第一层label
label = []
alias_list = []
for each_tag in first_level_tag_list:
if each_tag.get("tag_code") and each_tag.get("tag_alias"):
label.append(each_tag.get("tag_code"))
alias_list.append(each_tag.get("tag_alias"))
# 其他标签
label.append("other")
alias_list.append(_("其他"))
source = []
target = []
value = []
second_level_have_data_list = []
for each_tag in second_level_tag_list:
# 第二层label
if each_tag.get("tag_code") and each_tag.get("tag_code") not in label:
label.append(each_tag.get("tag_code"))
alias_list.append(each_tag.get("tag_alias"))
# 第一层和第二层之间的value
if dgraph_result.get("c_%s" % each_tag.get("tag_code")) and dgraph_result.get(
"c_%s" % each_tag.get("tag_code")
)[0].get("count"):
# 记录第二层有哪些节点有数据
if each_tag.get("tag_code") not in second_level_have_data_list:
second_level_have_data_list.append(each_tag.get("tag_code"))
source.append(label.index(each_tag.get("parent_code")))
target.append(label.index(each_tag.get("tag_code")))
value.append(dgraph_result.get("c_%s" % each_tag.get("tag_code"))[0].get("count"))
processing_type_dict = copy.deepcopy(dmaction.processing_type_dict)
processing_type_dict["batch_model"] = _("ModelFlow模型(batch)")
processing_type_dict["stream_model"] = _("ModelFlow模型(stream)")
fetch_value_between_source_target(
second_level_tag_list,
dgraph_result,
processing_type_dict,
label,
alias_list,
source,
target,
value,
)
other_app_code_list = []
if level == 4:
other_app_code_list, real_level = format_sankey_diagram(
params,
request,
source,
target,
value,
level,
label,
alias_list,
processing_type_dict,
)
# 第二层存在的节点到第三层之间没有link,导致第二层的节点放置在第三层
for each_tag in second_level_have_data_list:
if each_tag in label and label.index(each_tag) not in source:
for each_processing_type in dmaction.processing_type_list:
if each_processing_type in label:
source.append(label.index(each_tag))
target.append(label.index(each_processing_type))
value.append(minimal_value)
break
# 将分类中"其他"放在第二层,即将"其他"前加输入
if "other" in label:
for each_tag in first_level_tag_list:
if each_tag.get("tag_code") in label and label.index(each_tag.get("tag_code")) in source:
source.append(label.index(each_tag.get("tag_code")))
target.append(label.index("other"))
value.append(minimal_value)
| |
data features
#
def Overall_Correlations(self, save_folder_path, Font_Size, colours):
# create the correlation dataset (subset from the original data)
self.corr_df = DataUpload.recall_dataframe(1)[['dx', 'dx_type', 'age', 'sex','localization']]
self.header_list = self.corr_df.columns.values.tolist()
self.header_list.remove('age')
# convert categorical variables into numeric
le = LabelEncoder()
for i in self.header_list:
le.fit(self.corr_df[i])
LabelEncoder()
self.corr_df[i + '_label'] = le.transform(self.corr_df[i])
print(i, 'encoded successfully')
# create the correlation matrix
self.corr_df = self.corr_df.drop(self.header_list, axis = 1)
self.new_heads = self.corr_df.columns.values.tolist()
self.new_heads = self.corr_df.columns[1:].str.split('_label')
self.new_headers = ['age']
for i in range(len(self.new_heads)):
self.new_headers.append(self.new_heads[i][0])
self.corr_df.columns = self.new_headers # adjsut the table column names
# Drow the graph
self.corr_mat = sns.heatmap(self.corr_df.corr(), annot = True
,cmap = colours)
self.corr_mat.set_yticklabels(self.corr_mat.get_yticklabels(), rotation=45)
self.corr_mat.set_xticklabels(self.corr_mat.get_xmajorticklabels(), fontsize = Font_Size) # adjust x axis font
self.corr_mat.set_yticklabels(self.corr_mat.get_yticklabels(), rotation=45, fontsize = Font_Size) # adjust y axis format
self.corr_mat.set_title('All features Correlation Matrix')
if save_folder_path != 0:
plt.savefig(save_folder_path + '\\Overall Corr. Matrix.png', dpi=300)
plt.clf()
##
# Correlation between each lesion type and the localization
#
def dx_localization_Correlations (self, save_folder_path, Colours, Font_Size, Values_Fontsize):
# Create the dummy variables for localization and dx
self.localization_dx = pd.get_dummies(DataUpload.recall_dataframe(1)[['dx','localization']])
# create the adjusted dx headers
self.names = self.localization_dx.columns.str.split('dx_')
self.names = self.names.tolist()
self.new_names = []
for i in range(len(self.names)):
if len(self.names[i]) == 2:
self.new_names.append(self.names[i][1])
print(self.names[i],'adjusted and added successfully')
# Create the adjusted localization headers
self.names = self.localization_dx.columns.str.split('localization_')
self.names =self. names.tolist()
for i in range(len(self.names)):
if len(self.names[i]) == 2:
self.new_names.append(self.names[i][1])
print(self.names[i],'adjusted successfully')
# Replace the localization_dx column names
self.localization_dx.columns = self.new_names
# create the confusion matrix
# Full matrix
self.corrMatrix = self.localization_dx.corr()
sns.heatmap(self.corrMatrix, annot=True, annot_kws={"size": 3})
if save_folder_path != 0:
plt.savefig(save_folder_path + '\\Loc dx Corr matrix.png', dpi=300)
plt.clf()
# Shrinked matrix
self.new_corrMatrix = self.corrMatrix[DataUpload.recall_dataframe(1).dx.unique()]
self.new_corrMatrix = self.new_corrMatrix.loc[DataUpload.recall_dataframe(1).localization.unique()]
self.loc_dx_corr = sns.heatmap(self.new_corrMatrix, annot=True,
annot_kws={"size": Values_Fontsize}, cmap=Colours, cbar =False) # annot_kws set the font size inside the table
self.loc_dx_corr.set_xticklabels(self.loc_dx_corr.get_xmajorticklabels(), fontsize = Font_Size) # adjust x axis font
self.loc_dx_corr.set_yticklabels(self.loc_dx_corr.get_yticklabels(), rotation=45, fontsize = Font_Size) # adjust y axis format
self.loc_dx_corr.set_title('Lesion Types to Localizations Correlations')
if save_folder_path != 0:
plt.savefig(save_folder_path + '\\Lesion Types to Localization Correlations.png', dpi=300)
plt.clf()
# Print the max and min correlations for each lesion type
for i in self.new_corrMatrix:
print('max % in', i, 'is', max(self.new_corrMatrix[i]),
'and min % is', min(self.new_corrMatrix[i]))
DesAnalysis = Descriptive_Analysis()
#DesAnalysis.Data_Exploration('age','dx')
#DesAnalysis.Des_Graphs_Num('age','dx' ,0, Figures_path, 15, 0, 10) # Descriptive analysis graph for numerical features
#DesAnalysis.Des_Graphs_Cat('dx',0 ,0, Figures_path, 0, 11, 10, 8) # Descriptive analysis graph for categorical features
#DesAnalysis.Overall_Correlations(Figures_path, 10, "coolwarm") # Overall correlation matrix
#DesAnalysis.dx_localization_Correlations(Figures_path, "coolwarm", 7, 7) # Correlation matrix focused on dx and localization only
#YlGnBu
##
# Getting the main statistics for age after removing the 0s and null values
#
DataUpload.recall_dataframe(0)[(DataUpload.recall_dataframe(0).age != 0) &
(DataUpload.recall_dataframe(0).age.notnull())]['age'].describe()
##
# The relationship between gender and lesion type (dx)
#
Age_Lesion = pd.DataFrame([['bkl'], ['nv'], ['df'], ['mel'], ['vasc'], ['bcc'], ['akiec']], columns = ['dx'])
for s in ['male', 'female']:
Age_Lesion[s] = DataUpload.recall_dataframe(1)[(DataUpload.recall_dataframe(1).dx == 'bkl') & (DataUpload.recall_dataframe(1).sex == s)].shape[0],\
DataUpload.recall_dataframe(1)[(DataUpload.recall_dataframe(1).dx == 'nv') & (DataUpload.recall_dataframe(1).sex == s)].shape[0],\
DataUpload.recall_dataframe(1)[(DataUpload.recall_dataframe(1).dx == 'df') & (DataUpload.recall_dataframe(1).sex == s)].shape[0],\
DataUpload.recall_dataframe(1)[(DataUpload.recall_dataframe(1).dx == 'mel') & (DataUpload.recall_dataframe(1).sex == s)].shape[0],\
DataUpload.recall_dataframe(1)[(DataUpload.recall_dataframe(1).dx == 'vasc') & (DataUpload.recall_dataframe(1).sex == s)].shape[0],\
DataUpload.recall_dataframe(1)[(DataUpload.recall_dataframe(1).dx == 'bcc') & (DataUpload.recall_dataframe(1).sex == s)].shape[0],\
DataUpload.recall_dataframe(1)[(DataUpload.recall_dataframe(1).dx == 'akiec') & (DataUpload.recall_dataframe(1).sex == s)].shape[0]
Age_Lesion['male%'] = Age_Lesion[Age_Lesion.dx == 'bkl'].male.item() / (Age_Lesion[Age_Lesion.dx == 'bkl'].male.item() + Age_Lesion[Age_Lesion.dx == 'bkl'].female.item()),\
Age_Lesion[Age_Lesion.dx == 'nv'].male.item() / (Age_Lesion[Age_Lesion.dx == 'nv'].male.item() + Age_Lesion[Age_Lesion.dx == 'nv'].female.item()),\
Age_Lesion[Age_Lesion.dx == 'df'].male.item() / (Age_Lesion[Age_Lesion.dx == 'df'].male.item() + Age_Lesion[Age_Lesion.dx == 'df'].female.item()),\
Age_Lesion[Age_Lesion.dx == 'mel'].male.item() / (Age_Lesion[Age_Lesion.dx == 'mel'].male.item() + Age_Lesion[Age_Lesion.dx == 'mel'].female.item()),\
Age_Lesion[Age_Lesion.dx == 'vasc'].male.item() / (Age_Lesion[Age_Lesion.dx == 'vasc'].male.item() + Age_Lesion[Age_Lesion.dx == 'vasc'].female.item()),\
Age_Lesion[Age_Lesion.dx == 'bcc'].male.item() / (Age_Lesion[Age_Lesion.dx == 'bcc'].male.item() + Age_Lesion[Age_Lesion.dx == 'bcc'].female.item()),\
Age_Lesion[Age_Lesion.dx == 'akiec'].male.item() / (Age_Lesion[Age_Lesion.dx == 'akiec'].male.item() + Age_Lesion[Age_Lesion.dx == 'akiec'].female.item())
Age_Lesion['female%'] = Age_Lesion[Age_Lesion.dx == 'bkl'].female.item() / (Age_Lesion[Age_Lesion.dx == 'bkl'].male.item() + Age_Lesion[Age_Lesion.dx == 'bkl'].female.item()),\
Age_Lesion[Age_Lesion.dx == 'nv'].female.item() / (Age_Lesion[Age_Lesion.dx == 'nv'].male.item() + Age_Lesion[Age_Lesion.dx == 'nv'].female.item()),\
Age_Lesion[Age_Lesion.dx == 'df'].female.item() / (Age_Lesion[Age_Lesion.dx == 'df'].male.item() + Age_Lesion[Age_Lesion.dx == 'df'].female.item()),\
Age_Lesion[Age_Lesion.dx == 'mel'].female.item() / (Age_Lesion[Age_Lesion.dx == 'mel'].male.item() + Age_Lesion[Age_Lesion.dx == 'mel'].female.item()),\
Age_Lesion[Age_Lesion.dx == 'vasc'].female.item() / (Age_Lesion[Age_Lesion.dx == 'vasc'].male.item() + Age_Lesion[Age_Lesion.dx == 'vasc'].female.item()),\
Age_Lesion[Age_Lesion.dx == 'bcc'].female.item() / (Age_Lesion[Age_Lesion.dx == 'bcc'].male.item() + Age_Lesion[Age_Lesion.dx == 'bcc'].female.item()),\
Age_Lesion[Age_Lesion.dx == 'akiec'].female.item() / (Age_Lesion[Age_Lesion.dx == 'akiec'].male.item() + Age_Lesion[Age_Lesion.dx == 'akiec'].female.item())
Age_Lesion[['dx','male%','female%']].plot(
x = 'dx',
kind = 'barh',
stacked = False,
title = 'Gender Proportion per Lesion Type',
mark_right = True)
plt.savefig(Figures_path + '\\Age_Lesion', dpi=300)
# Age_Lesion_total = Age_Lesion["male%"] + Age_Lesion["female%"]
# Age_Lesion_rel = Age_Lesion[Age_Lesion.columns[1:]].div(Age_Lesion_total, 0)*100
# for n in Age_Lesion_rel:
# for i, (cs, ab, pc) in enumerate(zip(Age_Lesion.iloc[:, 1:].cumsum(1)[n],
# Age_Lesion[n], Age_Lesion_rel[n])):
# plt.text(cs - ab / 2, i, str(np.round(pc, 1)) + '%',
# va = 'center', ha = 'center')
# plt.savefig(Figures_path + '\\Age_Lesion', dpi=300)
# =========================================================================
# Data Preprocessing
# =========================================================================
##
# Encode targer (transform to categorical data)
#
class Data_Preprocessing:
# Encoding a single categorical feature
def Encoding(self, column):
self.le = LabelEncoder()
self.le.fit(DataUpload.recall_dataframe(1)[column])
LabelEncoder()
print(list(self.le.classes_)) # print the unique values per encoding
self.processed_data = DataUpload.recall_dataframe(1)
self.processed_data['{C}Label'.format(C = column)] = self.le.transform(self.processed_data[column]) # Encoding the required column
print()
print(column, 'encoded successfully, and below is a sample of it')
print(self.processed_data[[column, '{C}Label'.format(C = column)]].sample(20))
# Splitting the data into training-validation and testing
def Train_Test_Split(self, test_ratio):
self.train_val_ratio = 1 - test_ratio
# Create the trainig-validation dataset for each lesion type (dx)
self.akiec_size = round(self.processed_data[self.processed_data.dx == 'akiec'].shape[0] * self.train_val_ratio)
self.train_akiec = self.processed_data[self.processed_data.dx == 'akiec'].sample(n = self.akiec_size)
self.bcc_size = round(self.processed_data[self.processed_data.dx == 'bcc'].shape[0] * self.train_val_ratio)
self.train_bcc = self.processed_data[self.processed_data.dx == 'bcc'].sample(n = self.bcc_size)
self.mel_size = round(self.processed_data[self.processed_data.dx == 'mel'].shape[0] * self.train_val_ratio)
self.train_mel = self.processed_data[self.processed_data.dx == 'mel'].sample(n = self.mel_size)
self.bkl_size = round(self.processed_data[self.processed_data.dx == 'bkl'].shape[0] * self.train_val_ratio)
self.train_bkl = self.processed_data[self.processed_data.dx == 'bkl'].sample(n = self.bkl_size)
self.df_size = round(self.processed_data[self.processed_data.dx == 'df'].shape[0] * self.train_val_ratio)
self.train_df = self.processed_data[self.processed_data.dx == 'df'].sample(n = self.df_size)
self.nv_size = round(self.processed_data[self.processed_data.dx == 'nv'].shape[0] * self.train_val_ratio)
self.train_nv = self.processed_data[self.processed_data.dx == 'nv'].sample(n = self.nv_size)
self.vasc_size = round(self.processed_data[self.processed_data.dx == 'vasc'].shape[0] * self.train_val_ratio)
self.train_vasc = self.processed_data[self.processed_data.dx == 'vasc'].sample(n = self.vasc_size)
# Concatinate the training-validation dataset
self.train_val_data = pd.concat([self.train_akiec, self.train_bcc, self.train_mel,self.train_bkl, self.train_df, self.train_nv,self.train_vasc])
# Create the test data
self.test_data = self.processed_data[~self.processed_data.image_id.isin(self.train_val_data.image_id.values.tolist())]
print('Data split successfully to train-validation and test data with proportions', self.train_val_ratio, 'and', test_ratio)
print('Training-validation data size is', self.train_val_data.shape[0])
print('Testing data size is', self.test_data.shape[0])
# Balancing the data
def Balancing_Data(self, Sample_Volume):
# Split the training-validation data into 7 datasets (1 for each class)
self.dx0 = DataPreprocessing.ProcessedData('train')[DataPreprocessing.ProcessedData('train').dxLabel == 0]
self.dx1 = DataPreprocessing.ProcessedData('train')[DataPreprocessing.ProcessedData('train').dxLabel == 1]
self.dx2 = DataPreprocessing.ProcessedData('train')[DataPreprocessing.ProcessedData('train').dxLabel == 2]
self.dx3 = DataPreprocessing.ProcessedData('train')[DataPreprocessing.ProcessedData('train').dxLabel == 3]
self.dx4 = DataPreprocessing.ProcessedData('train')[DataPreprocessing.ProcessedData('train').dxLabel == 4]
self.dx5 = DataPreprocessing.ProcessedData('train')[DataPreprocessing.ProcessedData('train').dxLabel == 5]
self.dx6 = DataPreprocessing.ProcessedData('train')[DataPreprocessing.ProcessedData('train').dxLabel == 6]
# Balance the lesion types
from sklearn.utils import resample
self.dx0Balanced = resample(self.dx0, replace=True, n_samples = Sample_Volume, random_state = 42)
self.dx1Balanced = resample(self.dx1, replace=True, n_samples = Sample_Volume, random_state = 42)
self.dx2Balanced = resample(self.dx2, replace=True, n_samples = Sample_Volume, random_state = 42)
self.dx3Balanced = resample(self.dx3, replace=True, n_samples = Sample_Volume, random_state = 42)
self.dx4Balanced = resample(self.dx4, replace=True, n_samples = Sample_Volume, random_state = 42)
self.dx5Balanced = resample(self.dx5, replace=True, n_samples = Sample_Volume, random_state = 42)
self.dx6Balanced = resample(self.dx6, replace=True, n_samples = Sample_Volume, random_state = 42)
# Concatenate the balanced dx types in a new dataset
self.BalancedData = pd.concat([self.dx0Balanced, self.dx1Balanced, self.dx2Balanced,
self.dx3Balanced, self.dx4Balanced, self.dx5Balanced, self.dx6Balanced])
print('Trainig-validation data balanced where wach lesion count is', Sample_Volume)
print()
print('Lesioin frequency before balancing the data:')
print(DataPreprocessing.ProcessedData('train').dx.value_counts())
print()
print('Count per lesion:')
print(self.BalancedData.dx.value_counts())
# Recall_the_data (options are 'train','test','all','balanced data')
def ProcessedData(self, RequiredData):
if RequiredData == 'train':
return self.train_val_data
elif RequiredData == 'test':
return self.test_data
elif RequiredData == 'all':
return self.processed_data
elif RequiredData == 'balanced data':
return self.BalancedData
else:
print('valid arguments are "train", "test", "all", or "balanced data"')
DataPreprocessing = Data_Preprocessing()
DataPreprocessing.Encoding('dx') # encoding the target (dx)
DataPreprocessing.Train_Test_Split(0.25) # splitting the data into training-validation (75%) and testing (25%)
# pp = DataPreprocessing.ProcessedData('all') # recalling processed data (dx encoded)
# dd = DataPreprocessing.ProcessedData('train') # recalling training-validation data
# tt = DataPreprocessing.ProcessedData('test') # recalling testing data
DataPreprocessing.Balancing_Data(500)
# =========================================================================
# DEVELOPING THE MODEL
# =========================================================================
##
# Build the model
#
class CNN_Model:
# Set the training and validation dataset
def train_val_split(self, selected_data, validation_ratio):
self.number_of_classes = len(selected_data.dxLabel.unique())
self.selected_data = selected_data
self.X = np.asarray(selected_data['image'].tolist())
self.X = self.X/255. # Scale values | |
property constructs an averaging operator that maps scalar
quantities from cell centers to edge. This averaging operator is
used when a discrete scalar quantity defined cell centers must be
projected to edges. Once constructed, the operator is stored
permanently as a property of the mesh. *See notes*.
Returns
-------
(n_edges, n_cells) scipy.sparse.csr_matrix
The scalar averaging operator from edges to cell centers
Notes
-----
Let :math:`\\boldsymbol{\\phi_c}` be a discrete scalar quantity that
lives at cell centers. **average_cell_to_edge** constructs a discrete
linear operator :math:`\\mathbf{A_{ce}}` that projects
:math:`\\boldsymbol{\\phi_c}` to edges, i.e.:
.. math::
\\boldsymbol{\\phi_e} = \\mathbf{A_{ce}} \\, \\boldsymbol{\\phi_c}
where :math:`\\boldsymbol{\\phi_e}` approximates the value of the scalar
quantity at the edges. For each edge, we are performing a weighted average
between the values at adjacent cell centers. In 1D, where adjacent cells
:math:`i` and :math:`i+1` have widths :math:`h_i` and :math:`h_{i+1}`,
:math:`\\phi` on edge (node location in 1D) is approximated by:
.. math::
\\phi_{i \\! + \\! 1/2} \\approx \\frac{h_{i+1} \\phi_i + h_i \\phi_{i+1}}{h_i + h_{i+1}}
On boundary edges, nearest neighbour is used to extrapolate the value
from the nearest cell center. Once the operator is construct, the averaging
is implemented as a matrix vector product, i.e.::
phi_e = Ace @ phi_c
Examples
--------
Here we compute the values of a scalar function at cell centers. We then create
an averaging operator to approximate the function on the edges. We choose
to define a scalar function that is strongly discontinuous in some places to
demonstrate how the averaging operator will smooth out discontinuities.
We start by importing the necessary packages and defining a mesh.
>>> from discretize import TensorMesh
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> h = np.ones(40)
>>> mesh = TensorMesh([h, h], x0="CC")
Then we create a scalar variable at cell centers
>>> phi_c = np.zeros(mesh.nC)
>>> xy = mesh.cell_centers
>>> phi_c[(xy[:, 1] > 0)] = 25.0
>>> phi_c[(xy[:, 1] < -10.0) & (xy[:, 0] > -10.0) & (xy[:, 0] < 10.0)] = 50.0
Next, we construct the averaging operator and apply it to
the discrete scalar quantity to approximate the value at the edges.
>>> Ace = mesh.average_cell_to_edge
>>> phi_e = Ace @ phi_c
And plot the results:
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(11, 5))
>>> ax1 = fig.add_subplot(121)
>>> mesh.plot_image(phi_c, ax=ax1, v_type="CC")
>>> ax1.set_title("Variable at cell centers", fontsize=16)
>>> ax2 = fig.add_subplot(122)
>>> mesh.plot_image(phi_e, ax=ax2, v_type="E")
>>> ax2.set_title("Averaged to edges", fontsize=16)
>>> plt.show()
Below, we show a spy plot illustrating the sparsity and mapping
of the operator.
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(9, 9))
>>> ax1 = fig.add_subplot(111)
>>> ax1.spy(Ace, ms=1)
>>> ax1.set_title("Cell Index", fontsize=12, pad=5)
>>> ax1.set_ylabel("Edge Index", fontsize=12)
>>> plt.show()
"""
if getattr(self, "_average_cell_to_edge", None) is None:
n = self.shape_cells
if self.dim == 1:
avg = sp.eye(n[0])
elif self.dim == 2:
avg = sp.vstack(
(
sp.kron(av_extrap(n[1]), speye(n[0])),
sp.kron(speye(n[1]), av_extrap(n[0])),
),
format="csr",
)
elif self.dim == 3:
avg = sp.vstack(
(
kron3(av_extrap(n[2]), av_extrap(n[1]), speye(n[0])),
kron3(av_extrap(n[2]), speye(n[1]), av_extrap(n[0])),
kron3(speye(n[2]), av_extrap(n[1]), av_extrap(n[0])),
),
format="csr",
)
self._average_cell_to_edge = avg
return self._average_cell_to_edge
@property
def average_edge_to_cell(self):
"""Averaging operator from edges to cell centers (scalar quantities).
This property constructs a 2nd order averaging operator that maps scalar
quantities from edges to cell centers. This averaging operator is
used when a discrete scalar quantity defined on mesh edges must be
projected to cell centers. Once constructed, the operator is stored
permanently as a property of the mesh. *See notes*.
Returns
-------
(n_cells, n_edges) scipy.sparse.csr_matrix
The scalar averaging operator from edges to cell centers
Notes
-----
Let :math:`\\boldsymbol{\\phi_e}` be a discrete scalar quantity that
lives on mesh edges. **average_edge_to_cell** constructs a discrete
linear operator :math:`\\mathbf{A_{ec}}` that projects
:math:`\\boldsymbol{\\phi_e}` to cell centers, i.e.:
.. math::
\\boldsymbol{\\phi_c} = \\mathbf{A_{ec}} \\, \\boldsymbol{\\phi_e}
where :math:`\\boldsymbol{\\phi_c}` approximates the value of the scalar
quantity at cell centers. For each cell, we are simply averaging
the values defined on its edges. The operation is implemented as a
matrix vector product, i.e.::
phi_c = Aec @ phi_e
Examples
--------
Here we compute the values of a scalar function on the edges. We then create
an averaging operator to approximate the function at cell centers. We choose
to define a scalar function that is strongly discontinuous in some places to
demonstrate how the averaging operator will smooth out discontinuities.
We start by importing the necessary packages and defining a mesh.
>>> from discretize import TensorMesh
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> h = np.ones(40)
>>> mesh = TensorMesh([h, h], x0="CC")
Then we create a scalar variable on edges,
>>> phi_e = np.zeros(mesh.nE)
>>> xy = mesh.edges
>>> phi_e[(xy[:, 1] > 0)] = 25.0
>>> phi_e[(xy[:, 1] < -10.0) & (xy[:, 0] > -10.0) & (xy[:, 0] < 10.0)] = 50.0
Next, we construct the averaging operator and apply it to
the discrete scalar quantity to approximate the value at cell centers.
>>> Aec = mesh.average_edge_to_cell
>>> phi_c = Aec @ phi_e
And plot the results:
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(11, 5))
>>> ax1 = fig.add_subplot(121)
>>> mesh.plot_image(phi_e, ax=ax1, v_type="E")
>>> ax1.set_title("Variable at edges", fontsize=16)
>>> ax2 = fig.add_subplot(122)
>>> mesh.plot_image(phi_c, ax=ax2, v_type="CC")
>>> ax2.set_title("Averaged to cell centers", fontsize=16)
>>> plt.show()
Below, we show a spy plot illustrating the sparsity and mapping
of the operator
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(9, 9))
>>> ax1 = fig.add_subplot(111)
>>> ax1.spy(Aec, ms=1)
>>> ax1.set_title("Edge Index", fontsize=12, pad=5)
>>> ax1.set_ylabel("Cell Index", fontsize=12)
>>> plt.show()
"""
if getattr(self, "_average_edge_to_cell", None) is None:
if self.dim == 1:
self._avE2CC = self.aveEx2CC
elif self.dim == 2:
self._avE2CC = 0.5 * sp.hstack(
(self.aveEx2CC, self.aveEy2CC), format="csr"
)
elif self.dim == 3:
self._avE2CC = (1.0 / 3) * sp.hstack(
(self.aveEx2CC, self.aveEy2CC, self.aveEz2CC), format="csr"
)
return self._avE2CC
@property
def average_edge_to_cell_vector(self):
"""Averaging operator from edges to cell centers (vector quantities).
This property constructs the averaging operator that independently maps the
Cartesian components of vector quantities from edges to cell centers.
This averaging operators is used when a discrete vector quantity defined on mesh edges
must be approximated at cell centers. Once constructed, the operator is
stored permanently as a property of the mesh.
Be aware that the Cartesian components of the original vector
are defined on their respective edges; e.g. the x-component lives
on x-edges. However, the x, y and z components are being averaged
separately to cell centers. The operation is implemented as a
matrix vector product, i.e.::
u_c = Aec @ u_e
Returns
-------
(dim * n_cells, n_edges) scipy.sparse.csr_matrix
The vector averaging operator from edges to cell centers. Since we
are averaging a vector quantity to cell centers, the first dimension
of the operator is the mesh dimension times the number of cells.
Notes
-----
Let :math:`\\mathbf{u_e}` be the discrete representation of a vector
quantity whose Cartesian components are defined on their respective edges;
e.g. :math:`u_x` is defined on x-edges.
**average_edge_to_cell_vector** constructs a discrete linear operator
:math:`\\mathbf{A_{ec}}` that projects each Cartesian component of
:math:`\\mathbf{u_e}` independently to cell centers, i.e.:
.. math::
\\mathbf{u_c} = \\mathbf{A_{ec}} \\, \\mathbf{u_e}
where :math:`\\mathbf{u_c}` is a discrete vector quantity whose Cartesian
components defined at the cell centers and organized into a 1D array of
the form np.r_[ux, uy, uz]. For each cell, and for each Cartesian component,
we are simply taking the average of the values
defined on the cell's corresponding edges and placing the result at
the cell's center.
Examples
--------
Here we compute the values of a vector function discretized to the edges.
We then create an averaging operator to approximate the function at cell centers.
We start by importing the necessary packages and defining a mesh.
>>> from discretize import TensorMesh
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> h = 0.5 * np.ones(40)
>>> | |
<gh_stars>0
import xml.etree.cElementTree as ET
import time
import datetime
import random
from pathlib import Path
from zipfile import ZipFile
import os
import copy
from shutil import rmtree
import string
import json
from components.dataconnection.index import get_resource_description, get_resource_metadata
from x5gonwp3tools.tools.difficulty.difficulty import __wpm, wikification2con_per_sec
from SETTINGS import EXP_IDS
# general settings
Path(f"tmp").mkdir(parents=True, exist_ok=True)
generated_folder = "tmp"
# Build the mbz from the plalist
def build_mbz(playlist_infos):
# Generated course/context ids
course_id = str(random.randint(10000, 100000))
course_name = f"gdcrsx5pst_{course_id}"
context_id = str(random.randint(10000, 100000))
backup_timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M")
course_backupfile_name = f"backup-moodle2-course-{course_id}-{course_name}-{backup_timestamp}-nu"
# Prepare folders structure
Path(f"{generated_folder}/{course_backupfile_name}").mkdir(parents=True, exist_ok=True)
Path(f"{generated_folder}/{course_backupfile_name}/activities").mkdir(parents=True, exist_ok=True)
Path(f"{generated_folder}/{course_backupfile_name}/course").mkdir(parents=True, exist_ok=True)
Path(f"{generated_folder}/{course_backupfile_name}/sections").mkdir(parents=True, exist_ok=True)
open(f"{generated_folder}/{course_backupfile_name}/moodle_backup.xml", mode='w', buffering=-1, encoding=None)
# Needed file for X5gon moodle plugin
open(f"{generated_folder}/{course_backupfile_name}/plst.json", mode='w', buffering=-1, encoding=None)
# Prepare the playlist items
playlist_general_infos = playlist_infos['playlist_general_infos']
# Get resource list from playlist (from service input)
playlist_enriched = enrich_playlist_items(playlist_infos)
with open(f"{generated_folder}/{course_backupfile_name}/plst.json", 'w') as fp:
json.dump(playlist_enriched, fp)
playlist_items = playlist_enriched['playlist_items']
# General course infos
crs_str_from_pst = generate_needed_ids(playlist_items, playlist_general_infos)
course_infos = {"crs_full_name": f"{playlist_general_infos['pst_name']}",
"crs_short_name": f"{course_name}",
"crs_id": f"{course_id}",
"crs_context_id": f"{context_id}",
"crs_bkp_name": f"{course_backupfile_name}",
"crs_bkp_timestamp": f"{backup_timestamp}",
"crs_playlist_url": f"{playlist_general_infos['pst_url']}",
"crs_sections": crs_str_from_pst
}
try:
# Generate needed files
generate_mdl_bkpfile(ET, course_infos)
# Generated the compressed mbz file
compress_folder(course_backupfile_name)
return {"mbz_build": "success",
"directory": "tmp",
"filename": f"{course_backupfile_name}.mbz",
"mbz_folder": f"{course_backupfile_name}",
"plst_obj": playlist_enriched,
"error": ""
}
except Exception as e:
print(e)
return {"mbz_build": "failed",
"error": f"error occured: {e}"}
def enrich_playlist_items(playlist_infos):
pst_items_ix = [{'x5gon_id': item['x5gon_id'],
'xlearn_id': item['material_id'],
'item_ix': i}
for i, item in enumerate(playlist_infos['playlist_items'])]
x5gon_items_ix = [item
for item in pst_items_ix
if item['x5gon_id'] not in [None, 'null', '']
]
resources_ids = [item['x5gon_id'] for item in x5gon_items_ix]
resources_needed_infos = get_resource_description(resources_ids,
{"concepts": EXP_IDS['wikifier']['SIMPLE']['experiment_id'],
"keywords": EXP_IDS['text2tfidf']['SIMPLE']['experiment_id']},
max_concepts=5,
max_keywords=5)
res_metadata = get_resource_metadata(resources_ids)
resources_final_infos = []
for i, pstx5item in enumerate(x5gon_items_ix):
rid = pstx5item['x5gon_id']
rix = pstx5item['item_ix']
res_infos = resources_needed_infos.get(int(rid), dict())
res_metainfos = res_metadata.get(int(rid), dict())
# The following metadata will be fetched directly from the plalist infos
if 'title' not in playlist_infos['playlist_items'][rix] or playlist_infos['playlist_items'][rix]['title'] is None:
playlist_infos['playlist_items'][rix]['title'] = res_metainfos['title'] if res_metainfos['title'] is not None else ''
if 'description' not in playlist_infos['playlist_items'][rix] or playlist_infos['playlist_items'][rix]['description'] is None:
playlist_infos['playlist_items'][rix]['description'] = ' '.join(res_metainfos['description'].split()[:150]) if res_metainfos['description'] is not None else ''
if 'duration' not in playlist_infos['playlist_items'][rix] or playlist_infos['playlist_items'][rix]['duration'] in [None, '']:
playlist_infos['playlist_items'][rix]['duration'] = f"~ {res_metainfos['len_word'] / __wpm()['Slide']} mins" if 'len_word' in res_metainfos else "~ unknown"
playlist_infos['playlist_items'][rix]['url'] = res_metainfos['url'] if 'url' in res_metainfos else playlist_infos['playlist_items'][rix]['url']
playlist_infos['playlist_items'][rix]['author'] = ", ".join(res_metainfos['authors']) if ('authors' in res_metainfos and res_metainfos['authors'] is not None) else ''
playlist_infos['playlist_items'][rix]['date'] = res_metainfos['date'] if ('date' in res_metainfos and res_metainfos['date'] not in ['', None]) else (playlist_infos['playlist_items'][rix]['date'] if 'date' in playlist_infos['playlist_items'][rix] else '')
playlist_infos['playlist_items'][rix]['mediatype'] = res_metainfos['type'] if ('type' in res_metainfos and res_metainfos['type'] !='') else playlist_infos['playlist_items'][rix]['mediatype']
playlist_infos['playlist_items'][rix]['license'] = res_metainfos['license'] if ('license' in res_metainfos and res_metainfos['license'] is not None) else ''
# This is to make sure that there is no "" chars can brake the xmls
playlist_infos['playlist_items'][rix]['title'] = ''.join(filter(lambda x: x in string.printable, playlist_infos['playlist_items'][rix]['title'].replace('"',"")))
playlist_infos['playlist_items'][rix]['description'] = ''.join(filter(lambda x: x in string.printable, playlist_infos['playlist_items'][rix]['description'].replace('"',"")))
if res_infos:
res_infos['difficulty'] = wikification2con_per_sec(res_infos['len_char'], res_infos['len_concepts'])
del res_infos['keywords_full']
del res_infos['wikifier_full']
# update only the metadata of the found oers in db
playlist_infos['playlist_items'][rix]['difficulty'] = res_infos['difficulty']
playlist_infos['playlist_items'][rix]['keywords'] = ", ".join([keyword['label'] for i, keyword in enumerate(res_infos['keywords'])])
playlist_infos['playlist_items'][rix]['concepts'] = res_infos['wikifier']
else:
playlist_infos['playlist_items'][rix]['difficulty'] = ''
playlist_infos['playlist_items'][rix]['keywords'] = ''
playlist_infos['playlist_items'][rix]['concepts'] = []
resources_final_infos.append(res_infos)
return playlist_infos
def clean_mbz(mbz_path):
Path(f"{generated_folder}/{mbz_path}.mbz").unlink()
rmtree(f"{generated_folder}/{mbz_path}")
def generate_mdl_bkpfile(ET, course_infos):
moodle_backup = ET.Element("moodle_backup")
information = ET.SubElement(moodle_backup, "information")
ET = mdl_bkfile_geninfosec(ET, information, course_infos)
details = ET.SubElement(information, "details")
ET = mdl_bklfile_detailssec(ET, details)
contents = ET.SubElement(information, "contents")
ET = mdl_bklfile_contentssec(ET, contents, course_infos)
settings = ET.SubElement(information, "settings")
ET = mdl_bklfile_settingssec(ET, settings, course_infos)
writetoxml_update_needed_mdl_header(f"{generated_folder}/{course_infos['crs_bkp_name']}/moodle_backup.xml", moodle_backup)
# Populate the other backupfiles
open(f"{generated_folder}/{course_infos['crs_bkp_name']}/roles.xml", mode='w', buffering=-1, encoding=None)
roles = ET.Element("roles_definition")
role = ET.SubElement(roles, "role", id='5')
ET.SubElement(role, "name").text = ''
ET.SubElement(role, "shortname").text = 'student'
ET.SubElement(role, "nameincourse").text = '$@NULL@$'
ET.SubElement(role, "description").text = ''
ET.SubElement(role, "sortorder").text = '5'
ET.SubElement(role, "archtype").text = 'student'
writetoxml_update_needed_mdl_header(f"{generated_folder}/{course_infos['crs_bkp_name']}/roles.xml", roles)
open(f"{generated_folder}/{course_infos['crs_bkp_name']}/scales.xml", mode='w', buffering=-1, encoding=None)
scales = ET.Element("scales_definition")
writetoxml_update_needed_mdl_header(f"{generated_folder}/{course_infos['crs_bkp_name']}/scales.xml", scales)
open(f"{generated_folder}/{course_infos['crs_bkp_name']}/completion.xml", mode='w', buffering=-1, encoding=None)
completion = ET.Element("course_completion")
writetoxml_update_needed_mdl_header(f"{generated_folder}/{course_infos['crs_bkp_name']}/completion.xml", completion)
open(f"{generated_folder}/{course_infos['crs_bkp_name']}/questions.xml", mode='w', buffering=-1, encoding=None)
questions = ET.Element("question_categories")
writetoxml_update_needed_mdl_header(f"{generated_folder}/{course_infos['crs_bkp_name']}/questions.xml", questions)
open(f"{generated_folder}/{course_infos['crs_bkp_name']}/outcomes.xml", mode='w', buffering=-1, encoding=None)
outcomes = ET.Element("outcomes_definition")
writetoxml_update_needed_mdl_header(f"{generated_folder}/{course_infos['crs_bkp_name']}/outcomes.xml", outcomes)
open(f"{generated_folder}/{course_infos['crs_bkp_name']}/groups.xml", mode='w', buffering=-1, encoding=None)
groups = ET.Element("groups")
ET.SubElement(groups, "groupings")
writetoxml_update_needed_mdl_header(f"{generated_folder}/{course_infos['crs_bkp_name']}/groups.xml", groups)
open(f"{generated_folder}/{course_infos['crs_bkp_name']}/files.xml", mode='w', buffering=-1, encoding=None)
files = ET.Element("files")
writetoxml_update_needed_mdl_header(f"{generated_folder}/{course_infos['crs_bkp_name']}/files.xml", files)
open(f"{generated_folder}/{course_infos['crs_bkp_name']}/grade_history.xml", mode='w', buffering=-1, encoding=None)
grade_history = ET.Element("grade_history")
ET.SubElement(grade_history, "grade_grades")
writetoxml_update_needed_mdl_header(f"{generated_folder}/{course_infos['crs_bkp_name']}/grade_history.xml", grade_history)
open(f"{generated_folder}/{course_infos['crs_bkp_name']}/gradebook.xml", mode='w', buffering=-1, encoding=None)
gradebook = ET.Element("gradebook")
ET.SubElement(gradebook, "attributes")
ET.SubElement(gradebook, "grade_categories")
ET.SubElement(gradebook, "grade_items")
ET.SubElement(gradebook, "grade_letters")
grade_settings = ET.SubElement(gradebook, "grade_settings")
grade_setting = ET.SubElement(grade_settings, "grade_setting", id='')
ET.SubElement(grade_setting, "name").text = 'minmaxtouse'
ET.SubElement(grade_setting, "value").text = '1'
writetoxml_update_needed_mdl_header(f"{generated_folder}/{course_infos['crs_bkp_name']}/gradebook.xml", gradebook)
def writetoxml_update_needed_mdl_header(xml_file_name, xml_content):
# Write to xml file
xml_tree = ET.ElementTree(xml_content)
xml_tree.write(xml_file_name,
xml_declaration=True,
encoding="UTF-8",
method="xml")
# Fix header
xml_file = open(xml_file_name,'r')
old = xml_file.read()
new = old.replace("<?xml version='1.0' encoding='UTF-8'?>", '<?xml version="1.0" encoding="UTF-8"?>')
xml_file = open(xml_file_name,'w')
xml_file.write(new)
def compress_folder(dirName):
# create a ZipFile object
with ZipFile(f"{generated_folder}/{dirName}.mbz", 'w') as zipObj:
# Iterate over all the files in directory
for folderName, subfolders, filenames in os.walk(f"{generated_folder}/{dirName}/"):
for filename in filenames:
# create complete filepath of file in directory
filepath = os.path.join(folderName, filename)
# Add file to zip
zipObj.write(filepath, os.path.relpath(filepath, f"{generated_folder}/{dirName}/"))
def generate_needed_ids(playlist_items, playlist_general_infos):
# Our vision of crs structure
# general structure infos
sec_items1 = copy.deepcopy(playlist_items)
sec_items2 = copy.deepcopy(playlist_items)
crs_structure = [{ "sec_label": 'abstract',
"sec_title": 'Abstract',
"sec_summary": f"This is an auto-generated course from Xlearn playlist: {playlist_general_infos['pst_name']}",
"sec_order": "0",
"sec_activities": [],
"sec_activity_descstyle": "bullets",
"sec_extra_infos": {"Title": f"{playlist_general_infos['pst_name']}",
"Description": f"{playlist_general_infos['pst_description']}",
"Author": f"{playlist_general_infos['pst_author']}",
"Creation date": f"{playlist_general_infos['pst_creation_date']}",
"License": f"{playlist_general_infos['pst_license']}",
"Url": f"{playlist_general_infos['pst_url']}"
# "Thumbnail": f"{playlist_general_infos['pst_thumbnail_url']}"
}
},
{
"sec_label": 'main',
"sec_title": 'Playlist presentation',
"sec_summary": 'Here are the playlist items:',
"sec_order": "1",
"sec_activities": sec_items1,
"sec_activity_descstyle": "table",
"sec_extra_infos": {}
}
# ,{
# "sec_label": 'main1',
# "sec_title": 'Playlist presentation',
# "sec_summary": 'Here are the playlist items:',
# "sec_order": "2",
# "sec_activities": sec_items2,
# "sec_activity_descstyle": "bullets",
# "sec_extra_infos": {}
# }
]
# Generate random section ids
sections_rand_id = random.sample(range(1000), len(crs_structure))
for k, sec_infos in enumerate(crs_structure):
sec_infos['sec_id'] = str(sections_rand_id[k])
activities_rand_id = random.sample(range(1000), len(sec_infos['sec_activities']))
for i, res in enumerate(sec_infos['sec_activities']):
res['generated_activity_id'] = str(activities_rand_id[i])
res['generated_section_id'] = str(sec_infos['sec_id'])
res['activity_descstyle'] = sec_infos['sec_activity_descstyle']
return crs_structure
def mdl_bkfile_geninfosec(ET, information, course_infos):
ET.SubElement(information, "name").text = f"{course_infos['crs_bkp_name']}.mbz"
ET.SubElement(information, "moodle_version").text = "2018120303.12"
ET.SubElement(information, "moodle_release").text = "3.6.3+ (Build: 20190423)"
ET.SubElement(information, "backup_version").text = "2018120300"
ET.SubElement(information, "backup_release").text = "3.6"
ET.SubElement(information, "backup_date").text = str(int(time.time()))
ET.SubElement(information, "mnet_remoteusers").text = "0"
ET.SubElement(information, "include_files").text = "1"
ET.SubElement(information, "include_file_references_to_external_content").text = "0"
ET.SubElement(information, "original_wwwroot").text = "<a href='http://x5gon.org' target='_blank'>x5gon.org</a>"
ET.SubElement(information, "original_site_identifier_hash").text = "a48d6267a5fc08d341e51ba07b19ba7d48f3e66e"
ET.SubElement(information, "original_course_id").text = course_infos['crs_id']
ET.SubElement(information, "original_course_format").text = "topics"
ET.SubElement(information, "original_course_fullname").text = f"{course_infos['crs_full_name']}"
ET.SubElement(information, "original_course_shortname").text = f"{course_infos['crs_short_name']}"
ET.SubElement(information, "original_course_startdate").text = str(int(time.time()))
ET.SubElement(information, "original_course_enddate").text = "0"
ET.SubElement(information, "original_course_contextid").text = f"{course_infos['crs_context_id']}"
ET.SubElement(information, "original_system_contextid").text = "1"
return ET
def mdl_bklfile_detailssec(ET, details):
detail = ET.SubElement(details, "detail", name="4ab9f7c4a0efca1acfd034ba23c58440")
ET.SubElement(detail, "type").text = "course"
ET.SubElement(detail, "format").text = "moodle2"
ET.SubElement(detail, "interactive").text = "1"
ET.SubElement(detail, "mode").text = "10"
ET.SubElement(detail, "execution").text = "1"
ET.SubElement(detail, "executiontime").text = "0"
return ET
def mdl_bklfile_contentssec(ET, contents, course_infos):
# For our case: all activities are in the same section
## sec_id = resources[1]['generated_section_id']
## activities_rand_id = [res['generated_activity_id'] for res in resources]
# Activities sub-section
activities = ET.SubElement(contents, "activities")
sections_infos = course_infos['crs_sections']
for k, sec_infos in enumerate(sections_infos):
for i, res in enumerate(sec_infos['sec_activities']):
ET = mdl_bklfile_activitysec(ET, activities, res, sec_infos['sec_id'], res['generated_activity_id'], course_infos)
# Sections sub-section
sections = ET.SubElement(contents, "sections")
for i, sec_infos in enumerate(sections_infos):
ET = mdl_bklfile_sectionsec(ET, sections, sec_infos, course_infos)
# Course sub-section
course = ET.SubElement(contents, "course")
ET = mdl_bklfile_coursesec(ET, course, course_infos)
return ET
def mdl_bklfile_settingssec(ET, settings, course_infos):
# For our case: all activities are in the same section
sections = course_infos['crs_sections']
# general settings
gen_settings = [
{"level":"root", "name":"filename", "value":"0"},
{"level":"root", "name":"imscc11", "value":"0"},
{"level":"root", "name":"users", "value":"0"},
{"level":"root", "name":"anonymize", "value":"0"},
{"level":"root", "name":"role_assignments", "value":"0"},
{"level":"root", "name":"activities", "value":"1"},
{"level":"root", "name":"blocks", "value":"0"},
{"level":"root", "name":"filters", "value":"0"},
{"level":"root", "name":"comments", "value":"0"},
{"level":"root", "name":"badges", "value":"0"},
{"level":"root", "name":"calendarevents", "value":"0"},
{"level":"root", "name":"userscompletion", "value":"0"},
{"level":"root", "name":"logs", "value":"0"},
{"level":"root", "name":"grade_histories", "value":"0"},
{"level":"root", "name":"questionbank", "value":"0"},
{"level":"root", "name":"groups", "value":"0"},
{"level":"root", "name":"competencies", "value":"0"}
]
# general settings
for i, sett in enumerate(gen_settings):
setting = ET.SubElement(settings, "setting")
for key in sett.keys():
ET.SubElement(setting, key).text = sett[key]
# sections settings
for k, sec_infos in enumerate(sections):
setting = ET.SubElement(settings, "setting")
sec_id = sec_infos['sec_id']
ET.SubElement(setting, "level").text = "section"
ET.SubElement(setting, "section").text = f"section_{sec_id}"
ET.SubElement(setting, "name").text = f"section_{sec_id}_included"
ET.SubElement(setting, "value").text = "1"
setting = ET.SubElement(settings, "setting")
ET.SubElement(setting, "level").text = "section"
ET.SubElement(setting, "section").text = f"section_{sec_id}"
ET.SubElement(setting, "name").text = f"section_{sec_id}_userinfo"
ET.SubElement(setting, "value").text = "0"
# activities settings
for k, sec_infos in enumerate(sections):
resources = sec_infos['sec_activities']
for i, res in enumerate(resources):
setting = ET.SubElement(settings, "setting")
activity_id = res['generated_activity_id']
| |
"""Test bpack field descriptors."""
import sys
import enum
from typing import List
import pytest
import bpack
from bpack.descriptors import get_field_descriptor
class TestFieldFactory:
@staticmethod
def test_base():
bpack.field(size=1, offset=0, signed=False, default=0, repeat=1)
@staticmethod
def test_field_vs_field_class():
field_ = bpack.field(size=1)
assert bpack.descriptors.is_field(field_)
assert isinstance(field_, bpack.descriptors.Field)
@staticmethod
@pytest.mark.parametrize(argnames='size', argvalues=[1.3, 'x'])
def test_invalid_size_type(size):
with pytest.raises(TypeError):
bpack.field(size=size, default=1/3)
@staticmethod
@pytest.mark.parametrize(argnames='size', argvalues=[0, -8])
def test_invalid_size(size):
with pytest.raises(ValueError):
bpack.field(size=size, default=1/3)
@staticmethod
@pytest.mark.parametrize(argnames='offset', argvalues=[1.3, 'x'])
def test_invalid_offset_type(offset):
with pytest.raises(TypeError):
bpack.field(size=8, default=1/3, offset=offset)
@staticmethod
def test_invalid_offset():
with pytest.raises(ValueError):
bpack.field(size=8, default=1/3, offset=-8)
@staticmethod
@pytest.mark.parametrize('value', [-8, 'a'])
def test_invalid_signed_type(value):
with pytest.raises(TypeError):
bpack.field(size=8, default=1, signed=value) # noqa
@staticmethod
@pytest.mark.parametrize(argnames='repeat', argvalues=[1.3, 'x'])
def test_invalid_repeat_type(repeat):
with pytest.raises(TypeError):
bpack.field(size=8, default=1/3, repeat=repeat)
@staticmethod
def test_invalid_repeat():
with pytest.raises(ValueError):
bpack.field(size=8, default=1/3, repeat=0)
@staticmethod
def test_metadata_key():
field_ = bpack.field(size=1)
assert bpack.descriptors.METADATA_KEY in field_.metadata
class TestRecordFields:
@staticmethod
def test_field_properties_01():
@bpack.descriptor
class Record:
field_1: int = bpack.field(size=4, default=0, signed=True)
field_2: float = bpack.field(size=8, default=1/3)
field_3: List[int] = bpack.field(size=1, default=1, repeat=1)
# name, type, size, offset, repeat
field_data = [
('field_1', int, 4, 0, True, None),
('field_2', float, 8, 4, None, None),
('field_3', List[int], 1, 12, None, 1),
]
for field_, data in zip(bpack.fields(Record), field_data):
name, type_, size, offset, signed, repeat = data
assert field_.name == name
assert field_.type == type_
field_descr = get_field_descriptor(field_)
assert field_descr.type == type_
assert field_descr.size == size
assert field_descr.offset == offset
assert field_descr.signed == signed
assert field_descr.repeat == repeat
@staticmethod
def test_field_properties_02():
@bpack.descriptor
class Record:
field_1: int = bpack.field(size=4, offset=1, default=0,
signed=False)
field_2: float = bpack.field(size=8, default=1/3)
field_3: List[int] = bpack.field(size=1, default=1, repeat=1)
# name, type, size, offset, repeat
field_data = [
('field_1', int, 4, 1, False, None),
('field_2', float, 8, 5, None, None),
('field_3', List[int], 1, 13, None, 1),
]
for field_, data in zip(bpack.fields(Record), field_data):
name, type_, size, offset, signed, repeat = data
assert field_.name == name
assert field_.type == type_
field_descr = get_field_descriptor(field_)
assert field_descr.type == type_
assert field_descr.size == size
assert field_descr.offset == offset
assert field_descr.signed == signed
assert field_descr.repeat == repeat
@staticmethod
def test_field_properties_03():
@bpack.descriptor
class Record:
field_1: int = bpack.field(size=4, offset=1, default=0)
field_2: float = bpack.field(size=8, offset=6, default=1/3)
# name, type, size, offset, repeat
field_data = [
('field_1', int, 4, 1, None, None),
('field_2', float, 8, 6, None, None),
]
for field_, data in zip(bpack.fields(Record), field_data):
name, type_, size, offset, signed, repeat = data
assert field_.name == name
assert field_.type == type_
field_descr = get_field_descriptor(field_)
assert field_descr.type == type_
assert field_descr.size == size
assert field_descr.offset == offset
assert field_descr.signed == signed
assert field_descr.repeat == repeat
@staticmethod
def test_finvalid_field_type():
with pytest.raises(TypeError):
@bpack.descriptor
class Record: # noqa
field_1: 'invalid' = bpack.field(size=4) # noqa: F821
class TestEnumFields:
@staticmethod
def test_enum():
class EEnumType(enum.Enum):
A = 'a'
B = 'b'
C = 'c'
@bpack.descriptor
class Record:
field_1: int = bpack.field(size=4, default=0)
field_2: EEnumType = bpack.field(size=1, default=EEnumType.A)
field_2 = bpack.fields(Record)[1]
assert field_2.name == 'field_2'
assert field_2.type is EEnumType
assert field_2.default is EEnumType.A
assert isinstance(Record().field_2, EEnumType)
@staticmethod
def test_int_enum():
class EEnumType(enum.IntEnum):
A = 1
B = 2
C = 4
@bpack.descriptor
class Record:
field_1: int = bpack.field(size=4, default=0)
field_2: EEnumType = bpack.field(size=1, signed=True,
default=EEnumType.A)
field_2 = bpack.fields(Record)[1]
assert field_2.name == 'field_2'
assert field_2.type is EEnumType
assert field_2.default is EEnumType.A
assert isinstance(Record().field_2, EEnumType)
@staticmethod
def test_intflag_enum():
class EEnumType(enum.IntFlag):
A = 1
B = 2
C = 4
@bpack.descriptor
class Record:
field_1: int = bpack.field(size=4, default=0)
field_2: EEnumType = bpack.field(size=1, default=EEnumType.A)
field_2 = bpack.fields(Record)[1]
assert field_2.name == 'field_2'
assert field_2.type is EEnumType
assert field_2.default is EEnumType.A
assert isinstance(Record().field_2, EEnumType)
@staticmethod
def test_invalid_enum():
class EEnumType(enum.Enum):
A = 1
B = 'b'
C = 4
with pytest.raises(TypeError):
@bpack.descriptor
class Record: # noqa
field_1: int = bpack.field(size=4, default=0)
field_2: EEnumType = bpack.field(size=1, default=EEnumType.A)
@staticmethod
def test_invalid_signed_qualifier():
class EEnumType(enum.Enum):
A = 'a'
B = 'b'
C = 'c'
with pytest.warns(UserWarning):
@bpack.descriptor
class Record: # noqa
field_1: int = bpack.field(size=4, default=0)
field_2: EEnumType = bpack.field(size=1, signed=True,
default=EEnumType.A)
class TestFieldDescriptor:
@staticmethod
def test_empty_init():
descr = bpack.descriptors.BinFieldDescriptor()
assert descr.type is None
assert descr.size is None
assert descr.offset is None
assert descr.signed is None
assert descr.repeat is None
assert len(vars(descr)) == 5
@staticmethod
def test_init():
descr = bpack.descriptors.BinFieldDescriptor(int, 1, 2, True, 1)
assert descr.type is int
assert descr.size == 1
assert descr.offset == 2
assert descr.signed is True
assert descr.repeat == 1
assert len(vars(descr)) == 5
@staticmethod
def test_init_kw():
descr = bpack.descriptors.BinFieldDescriptor(type=int, size=1,
offset=2, signed=True,
repeat=1)
assert descr.type is int
assert descr.size == 1
assert descr.offset == 2
assert descr.signed is True
assert descr.repeat == 1
assert len(vars(descr)) == 5
@staticmethod
def test_init_invalid_type():
with pytest.raises(TypeError):
bpack.descriptors.BinFieldDescriptor(size=1.1) # noqa
with pytest.raises(TypeError):
bpack.descriptors.BinFieldDescriptor(offset=2.1) # noqa
with pytest.raises(TypeError):
bpack.descriptors.BinFieldDescriptor(
signed=complex(3.1, 0)) # noqa
with pytest.raises(TypeError):
bpack.descriptors.BinFieldDescriptor(repeat=1.1) # noqa
@staticmethod
def test_init_invalid_value():
with pytest.raises(ValueError):
bpack.descriptors.BinFieldDescriptor(size=-1)
with pytest.raises(ValueError):
bpack.descriptors.BinFieldDescriptor(size=0)
with pytest.raises(ValueError):
bpack.descriptors.BinFieldDescriptor(offset=-1)
with pytest.raises(ValueError):
bpack.descriptors.BinFieldDescriptor(repeat=0)
@staticmethod
def test_validate():
descr = bpack.descriptors.BinFieldDescriptor(int, 1)
descr.validate()
descr = bpack.descriptors.BinFieldDescriptor(int, 1, 2)
descr.validate()
descr = bpack.descriptors.BinFieldDescriptor(int, 1, 2, True)
descr.validate()
descr = bpack.descriptors.BinFieldDescriptor(List[int], 1, 2, True, 1)
descr.validate()
@staticmethod
def test_validation_warning():
descr = bpack.descriptors.BinFieldDescriptor(type=float, size=4,
signed=True)
with pytest.warns(UserWarning, match='ignore'):
descr.validate()
@staticmethod
def test_validation_error():
descr = bpack.descriptors.BinFieldDescriptor()
with pytest.raises(TypeError):
descr.validate()
descr = bpack.descriptors.BinFieldDescriptor(type=int)
with pytest.raises(TypeError):
descr.validate()
descr = bpack.descriptors.BinFieldDescriptor(size=1)
with pytest.raises(TypeError):
descr.validate()
descr = bpack.descriptors.BinFieldDescriptor(type=int, size=1,
repeat=2)
with pytest.raises(TypeError):
descr.validate()
@staticmethod
def test_post_validation_error_on_type():
descr = bpack.descriptors.BinFieldDescriptor(int, 1, 2)
descr.validate()
descr.type = None
with pytest.raises(TypeError):
descr.validate()
@staticmethod
@pytest.mark.parametrize('size, error_type',
[(None, TypeError),
(0, ValueError),
(-1, ValueError),
(1.1, TypeError)],
ids=['None', 'zero', 'negative', 'float'])
def test_post_validation_error_on_size(size, error_type):
descr = bpack.descriptors.BinFieldDescriptor(int, 1, 2)
descr.validate()
descr.size = size
with pytest.raises(error_type):
descr.validate()
@staticmethod
@pytest.mark.parametrize('offset, error_type',
[(-1, ValueError),
(1.1, TypeError)],
ids=['negative', 'float'])
def test_post_validation_error_on_offset(offset, error_type):
descr = bpack.descriptors.BinFieldDescriptor(int, 1, 2)
descr.validate()
descr.offset = offset
with pytest.raises(error_type):
descr.validate()
@staticmethod
def test_post_validation_warning_on_signed():
descr = bpack.descriptors.BinFieldDescriptor(int, 1, 2, signed=True)
descr.validate()
descr.type = float
with pytest.warns(UserWarning, match='ignore'):
descr.validate()
@staticmethod
def test_post_validation_error_on_repeat():
descr = bpack.descriptors.BinFieldDescriptor(int, 1, 2, signed=True)
descr.validate()
descr.repeat = 2
with pytest.raises(TypeError):
descr.validate()
descr = bpack.descriptors.BinFieldDescriptor(List[int], 1, 2,
signed=True, repeat=2)
descr.validate()
descr.repeat = 0
with pytest.raises(ValueError):
descr.validate()
def test_methods(self):
descr = bpack.descriptors.BinFieldDescriptor(int, 1)
descr.validate()
assert descr.is_int_type()
assert not descr.is_sequence_type()
assert not descr.is_enum_type()
descr = bpack.descriptors.BinFieldDescriptor(float, 1)
descr.validate()
assert not descr.is_int_type()
assert not descr.is_sequence_type()
assert not descr.is_enum_type()
descr = bpack.descriptors.BinFieldDescriptor(List[int], 1, repeat=10)
descr.validate()
assert descr.is_int_type()
assert descr.is_sequence_type()
assert not descr.is_enum_type()
descr = bpack.descriptors.BinFieldDescriptor(List[float], 1, repeat=10)
descr.validate()
assert not descr.is_int_type()
assert descr.is_sequence_type()
assert not descr.is_enum_type()
class EEnumType(enum.Enum):
A = 'a'
descr = bpack.descriptors.BinFieldDescriptor(EEnumType, 1)
descr.validate()
assert not descr.is_int_type()
assert not descr.is_sequence_type()
assert descr.is_enum_type()
class EEnumType(enum.IntEnum):
A = 1
descr = bpack.descriptors.BinFieldDescriptor(EEnumType, 1)
descr.validate()
assert descr.is_int_type()
assert not descr.is_sequence_type()
assert descr.is_enum_type()
class EEnumType(enum.IntFlag):
A = 1
descr = bpack.descriptors.BinFieldDescriptor(EEnumType, 1)
descr.validate()
assert descr.is_int_type()
assert not descr.is_sequence_type()
assert descr.is_enum_type()
class TestAnnotatedType:
@staticmethod
@pytest.mark.parametrize('byteorder', ['>', '<', '|', ''],
ids=['>', '<', '|', 'None'])
def test_annotated_type(byteorder):
@bpack.descriptor(byteorder=byteorder if byteorder != '|' else '')
class Record:
field_1: bpack.T[f'{byteorder}i4'] # noqa: F821
field_2: bpack.T[f'{byteorder}u4'] # noqa: F821
field_3: bpack.T[f'{byteorder}f4'] # noqa: F821
field_4: bpack.T[f'{byteorder}c4'] # noqa: F821
field_5: bpack.T[f'{byteorder}S4'] # noqa: F821
fields = dict(
(field.name, get_field_descriptor(field))
for field in bpack.fields(Record)
)
assert fields['field_1'].type == int
assert fields['field_1'].size == 4
assert fields['field_1'].signed is True
assert fields['field_1'].repeat is None
assert fields['field_2'].type == int
assert fields['field_2'].size == 4
assert fields['field_2'].signed is False
assert fields['field_2'].repeat is None
assert fields['field_3'].type == float
assert fields['field_3'].size == 4
assert fields['field_3'].signed is None
assert fields['field_3'].repeat is None
assert fields['field_4'].type == complex
assert fields['field_4'].size == 4
assert fields['field_4'].signed is None
assert fields['field_4'].repeat is None
assert fields['field_5'].type == bytes
assert fields['field_5'].size == 4
assert fields['field_5'].signed is None
assert fields['field_5'].repeat is None
@staticmethod
def test_list_with_annotated_type():
typestr = 'i4'
@bpack.descriptor
class Record:
field_1: List[bpack.T[typestr]] = bpack.field(repeat=2)
field = bpack.fields(Record)[0]
assert field.type == List[bpack.T[typestr]]
field_descr = get_field_descriptor(field)
assert field_descr.type == List[int]
assert field_descr.size == 4
assert field_descr.signed is True
assert field_descr.repeat == 2
@staticmethod
def test_byteorder_consistency():
typestr = '>i8'
with pytest.raises(bpack.descriptors.DescriptorConsistencyError):
@bpack.descriptor(byteorder=bpack.EByteOrder.LE)
class Record: # noqa
field: bpack.T[typestr]
typestr = '<i8'
with pytest.raises(bpack.descriptors.DescriptorConsistencyError):
@bpack.descriptor(byteorder=bpack.EByteOrder.BE) # noqa: F811
class Record: # noqa
field: bpack.T[typestr]
typestr = '>i8' if sys.byteorder == 'little' else '<i8'
with pytest.raises(bpack.descriptors.DescriptorConsistencyError):
@bpack.descriptor # noqa: F811
class Record: # noqa
field: bpack.T[typestr]
typestr = '<i8' if sys.byteorder == 'little' else '>i8'
| |
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
question_data = {
"AssessmentA": [{
"name": "<NAME>", # noqa: E501
"question": "You have effective organisational security management led at board level and articulated clearly "
"in corresponding policies.", # noqa: E501
"answers": [{
"answer": "The security of network and information systems related to the operation of essential functions is not discussed or reported on regularly at board-level.", # noqa: E501
"score": 0
}, {
"answer": "Board-level discussions on the security of networks and information systems are based on partial or out-of-date information, without the benefit of expert guidance.", # noqa: E501
"score": 0
}, {
"answer": "The security of networks and information systems supporting your essential functions are not driven effectively by the direction set at board level.", # noqa: E501
"score": 0
}, {
"answer": "Senior management or other pockets of the organisation consider themselves exempt from some policies or expect special accommodations to be made.", # noqa: E501
"score": 0
}, {
"answer": "Your organisation's approach and policy relating to the security of networks and information systems supporting the operation of essential functions are owned and managed at board level. These are communicated, in a meaningful way, to risk management decision-makers across the organisation.", # noqa: E501
"score": 2
}, {
"answer": "Regular board discussions on the security of network and information systems supporting the operation of your essential function take place, based on timely and accurate information and informed by expert guidance.", # noqa: E501
"score": 2
}, {
"answer": "There is a board-level individual who has overall accountability for the security of networks and information systems and drives regular discussion at board-level.", # noqa: E501
"score": 2
}, {
"answer": "Direction set at board level is translated into effective organisational practices that direct and control the security of the networks and information systems supporting your essential function.", # noqa: E501
"score": 2
}],
"total": 8
}, {
"name": "A1b Roles and Responsibilities", # noqa: E501
"question": "Your organisation has established roles and responsibilities for the security of networks and information systems at all levels, with clear and well-understood channels for communicating and escalating risks.", # noqa: E501
"answers": [{
"answer": "Key roles are missing, left vacant, or fulfilled on an ad-hoc or informal basis.", # noqa: E501
"score": 0
}, {
"answer": "Staff are assigned security responsibilities but without adequate authority or resources to fulfil them.", # noqa: E501
"score": 0
}, {
"answer": "Staff are unsure what their responsibilities are for the security of the essential function.", # noqa: E501
"score": 0
}, {
"answer": "Necessary roles and responsibilities for the security of networks and information systems supporting your essential function have been identified. These are reviewed periodically to ensure they remain fit for purpose.", # noqa: E501
"score": 2
}, {
"answer": "Appropriately capable and knowledgeable staff fill those roles and are given the time, authority, and resources to carry out their duties.", # noqa: E501
"score": 2
}, {
"answer": "There is clarity on who in your organisation has overall accountability for the security of the networks and information systems supporting your essential function.", # noqa: E501
"score": 2
}]
}, {
"name": "A1c Decision-making", # noqa: E501
"question": "You have senior-level accountability for the security of networks and information systems, and delegate decision-making authority appropriately and effectively. Risks to network and information systems related to the operation of essential functions are considered in the context of other organisational risks.", # noqa: E501
"answers": [{
"answer": "What should be relatively straightforward risk decisions are constantly referred up the chain, or not made.", # noqa: E501
"score": 0
}, {
"answer": "Risks are resolved informally (or ignored) at a local level without a formal reporting mechanism when it is not appropriate.", # noqa: E501
"score": 0
}, {
"answer": "Decision-makers are unsure of what senior management's risk appetite is, or only understand it in vague terms such as \"averse\" or \"cautious\".", # noqa: E501
"score": 0
}, {
"answer": "Organisational structure causes risk decisions to be made in isolation. (e.g. engineering and IT don't talk to each other about risk).", # noqa: E501
"score": 0
}, {
"answer": "Risk priorities are too vague to make meaningful distinctions between them. (e.g. almost all risks are rated 'medium' or 'amber').", # noqa: E501
"score": 0
}, {
"answer": "Senior management have visibility of key risk decisions made throughout the organisation.", # noqa: E501
"score": 2
}, {
"answer": "Risk management decision-makers understand their responsibilities for making effective and timely decisions in the context of the risk appetite regarding the essential function, as set by senior management.", # noqa: E501
"score": 2
}, {
"answer": "Risk management decision-making is delegated and escalated where necessary, across the organisation, to people who have the skills, knowledge, tools, and authority they need.", # noqa: E501
"score": 2
}, {
"answer": "Risk management decisions are periodically reviewed to ensure their continued relevance and validity.", # noqa: E501
"score": 2
}]
}, {
"name": "A2a Risk Management Process", # noqa: E501
"question": "Your organisation has effective internal processes for managing risks to the security of network and information systems related to the operation of essential functions and communicating associated activities.", # noqa: E501
"answers": [{
"answer": "Risk assessments are not based on a clearly defined set of threat assumptions.", # noqa: E501
"score": 0
}, {
"answer": "Risk assessment outputs are too complex or unwieldy to be consumed by decision-makers and are not effectively communicated in a clear and timely manner.", # noqa: E501
"score": 0
}, {
"answer": "Risk assessments for critical systems are a \"one-off\" activity (or not done at all).", # noqa: E501
"score": 0
}, {
"answer": "The security elements of projects or programmes are solely dependent on the completion of a risk management assessment without any regard to the outcomes.", # noqa: E501
"score": 0
}, {
"answer": "There is no systematic process in place to ensure that identified security risks are managed effectively.", # noqa: E501
"score": 0
}, {
"answer": "Systems are assessed in isolation, without consideration of dependencies and interactions with other systems. (e.g. interactions between IT and OT environments).", # noqa: E501
"score": 0
}, {
"answer": "Security requirements and mitigation's are arbitrary or are applied from a control catalogue without consideration of how they contribute to the security of the essential function.", # noqa: E501
"score": 0
}, {
"answer": "Risks remain unresolved on a register for prolonged periods of time awaiting senior decision-making or resource allocation to resolve.", # noqa: E501
"score": 0
}, {
"answer": "Your organisational process ensures that security risks to networks and information systems relevant to essential functions are identified, analysed, prioritised, and managed.", # noqa: E501
"score": 1
}, {
"answer": "Your risk assessments are informed by an understanding of the vulnerabilities in the networks and information systems supporting your essential function.", # noqa: E501
"score": 1
}, {
"answer": "The output from your risk management process is a clear set of security requirements that will address the risks in line with your organisational approach to security.", # noqa: E501
"score": 1
}, {
"answer": "Significant conclusions reached in the course of your risk management process are communicated to key security decision-makers and accountable individuals.", # noqa: E501
"score": 1
}, {
"answer": "You conduct risk assessments when significant events potentially affect the essential function, such as replacing a system or a change in the cyber security threat.", # noqa: E501
"score": 1
}, {
"answer": "You perform threat analysis and understand how generic threats apply to your organisation.", # noqa: E501
"score": 1
}, {
"answer": "Your organisational process ensures that security risks | |
v)][i + 1] - 1] > 0:
decomp.add_edge(v, u)
edge_add = True
if not edge_add:
if level_i != []:
level_u = 1e9 #g.number_of_nodes()
v_u = -1
for v in decomp.nodes(data=True):
if v[0] == u:
continue
if out[s[min(u, v[0])][max(u, v[0])][v[1]['level']] - 1] > 0:
if level_u > v[1]['level']:
level_u = v[1]['level']
v_u = v[0]
decomp.add_edge(v_u, u)
level_i.append(level)
# print level
# show_graph(decomp,1)
# verify_decomp(g=g, s=decomp, width=width, root=root)
if return_decomp: return decomp
def verify_decomp(g, s, width, roots):
# sys.stderr.write("\nValidating tree depth decomposition\n")
# sys.stderr.flush()
if not isinstance(roots, (list, tuple, set)):
roots = [roots]
# print g.edges()
for e in g.edges():
try:
nx.shortest_path(s, e[0], e[1])
except:
try:
nx.shortest_path(s, e[1], e[0])
except:
raise Exception("Edge %i %i not covered\n" % (e[0], e[1]))
for v, d in s.degree():
count = 0
if d != 1:
continue
for i in roots:
try:
if len(nx.shortest_path(s, i, v)) - 1 > width:
raise ValueError("depth of tree more than width\n")
except:
count += 1
if count == len(roots):
raise Exception("No root found for %i\n" % v)
continue
sys.stderr.write("Valid treedepth decomp\n")
sys.stderr.flush()
def show_graph(graph, layout=1, nolabel=0):
""" show graph
layout 1:graphviz,
2:circular,
3:spring,
4:spectral,
5: random,
6: shell
"""
m = graph.copy()
if layout == 1:
pos = graphviz_layout(m)
elif layout == 2:
pos = nx.circular_layout(m)
elif layout == 3:
pos = nx.spring_layout(m)
elif layout == 4:
pos = nx.spectral_layout(m)
elif layout == 5:
pos = nx.random_layout(m)
elif layout == 6:
pos = nx.shell_layout(m)
else:
pos = nx.spring_layout(m)
if not nolabel:
nx.draw_networkx_edge_labels(m, pos)
nx.draw_networkx_labels(m, pos)
nx.draw_networkx_nodes(m, pos)
# write_dot(m, "m1.dot")
# os.system("dot -Tps m1.dot -o m1.ps")
nx.draw(m, pos)
plt.show()
class Timer(object):
def __init__(self, time_list=None):
self.time_list = time_list
def __enter__(self):
self.start = time.time()
self.end = self.duration = None
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.end = time.time()
self.duration = self.end - self.start
if self.time_list is not None:
self.time_list.append(self.duration)
if exc_val is None:
self.err = None
else:
self.err = (exc_type, exc_val, exc_tb)
# print >> sys.stderr, "\ntimed block terminated abruptly after", self.duration, "seconds"
# print >> sys.stderr, self.err
print("\ntimed block terminated abruptly after", self.duration, "seconds", file=sys.stderr)
print(self.err, file=sys.stderr)
def solve_component(g, cli_args, debug=False, wandb=None):
lb = 0
ub = 0
to = False
encoding_times = list()
solving_times = list()
n = g.number_of_nodes()
if n <= 1:
return n, n, n, to, encoding_times, solving_times
temp = os.path.abspath(cli_args.temp)
instance = cli_args.instance
# looprange = range(g.number_of_nodes() + 2, 1, -1) # original
maxweight = max(map(itemgetter(1), g.nodes.data("weight", default=0)))
if cli_args.depth >= 0:
loopstart = cli_args.depth + 1
if debug and loopstart < n + maxweight + 1:
print(f"saved {n+maxweight+1 - loopstart} on loopstart")
else:
loopstart = n + maxweight + 1
looprange = range(loopstart, maxweight, -1)
if debug: print("looprange", looprange, n)
for i in looprange:
with Timer(time_list=encoding_times):
encoding = generate_encoding(g, i)
cnf = os.path.join(temp, instance + '_' + str(i) + ".cnf")
with open(cnf, 'w') as ofile:
ofile.write(encoding.get_cnf())
sol = os.path.join(temp, instance + '_' + str(i) + ".sol")
if cli_args.solver == "default":
solver = "glucose"
else:
solver = cli_args.solver
cmd = [solver, '-cpu-lim={}'.format(cli_args.timeout), cnf, sol]
# print cmd
with Timer(time_list=solving_times):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
rc = p.returncode
sys.stderr.write('*' * 10 + '\n')
# print output, err
sys.stderr.write("%i %i\n" % (i - 1, rc))
if rc == 0:
to = True
if cli_args.early_exit:
lb = looprange.stop
return i, lb, ub, to, encoding_times, solving_times
if rc == 10:
ub = i - 1
if cli_args.logging and wandb is not None: wandb.log({"best_depth": ub})
if rc == 20:
lb = i
return i, lb, ub, to, encoding_times, solving_times
raise ValueError("should not reach here")
def solve_max_component(g, cli_args, debug=False, reuse_encoding=False):
encoding_times = list()
solving_times = list()
n = g.number_of_nodes()
if n <= 1:
return True, n, encoding_times, solving_times
temp = os.path.abspath(cli_args.temp)
instance = cli_args.instance
maxweight = max(map(itemgetter(1), g.nodes.data("weight", default=0)))
if cli_args.depth >= 0:
loopstart = cli_args.depth + 1
else:
loopstart = n + maxweight + 1
with Timer(time_list=encoding_times):
cnf = os.path.join(temp, instance + '_' + "max" + ".cnf")
if not reuse_encoding:
encoding = generate_maxsat_encoding(g, loopstart)
with open(cnf, 'w') as ofile:
ofile.write(encoding.get_cnf())
sol = os.path.join(temp, instance + '_' + "max" + ".sol")
solver = MAXSAT_SOLVERS[cli_args.solver]
with Timer(time_list=solving_times):
res = solver(cnf, sol, cli_args, debug=debug)
return res, loopstart, encoding_times, solving_times
def signal_handler(signum, frame):
print("aborting due to signal", signum)
print("* final treedepth ?")
sys.exit(0)
def main(args, debug=False):
cpu_time = time.time()
instance = args.instance
if instance is not None:
edge = read_edge(instance)
g = nx.MultiGraph()
g.add_edges_from(edge)
instance = os.path.basename(instance)
instance = instance.split('.')
instance = instance[0]
else:
g = args.graph if "graph" in args else nx.balanced_tree(2, 2)
# g = nx.complete_bipartite_graph(2,2)
# g = nx.complete_graph(7)
# g = nx.balanced_tree(2, 2)
# g = nx.cycle_graph(15)
# g = nx.path_graph(70)
instance = 'random'
# show_graph(g,6)
args.instance = instance
n = g.number_of_nodes()
m = g.number_of_edges()
buff = 0
with Timer() as prep_timer:
if args.preprocess:
print("preprocessing...", file=sys.stderr)
g = degree_one_reduction(g=g)
g, buff = apex_vertices(g=g)
# print("* buffer verts:", buff)
if debug: print('treedepthp2sat', instance, n, m, g.number_of_nodes(), buff)
if args.width != -1:
return
ncomps = nx.number_connected_components(g)
icomp = 0
global_lb, global_ub = 1e9, -1
if ncomps == 0: # only empty graph remains after preprocessing, for loop won't be triggered
global_lb = global_ub = 0
decomptrees = []
for comp_nodes in nx.connected_components(g):
icomp += 1
subgraph = g.subgraph(comp_nodes)
if debug: print("\ncomponent", icomp, "of", ncomps, file=sys.stderr)
if len(subgraph) == 1:
singlenode = next(iter(subgraph.nodes))
nodeweight = subgraph.nodes[singlenode].get("weight", 0)
if debug: print("single node component")
global_lb = min(global_lb, nodeweight+1)
global_ub = max(global_ub, nodeweight+1)
decomptree = nx.DiGraph()
decomptree.add_node(singlenode, weight=nodeweight)
decomptrees.append(decomptree)
continue # proceed to next component
component = nx.convert_node_labels_to_integers(subgraph, first_label=0,
label_attribute="original_label")
label_mapping = dict(component.nodes.data("original_label"))
inverse_mapping = {v: u for u, v in label_mapping.items()}
found_ancestries = component.graph.get("forced_ancestries", [])
if debug: print("found ancestries:", found_ancestries)
remapped_ancestries = []
for v,u in found_ancestries:
if v in subgraph and u in subgraph:
remapped_ancestries.append((inverse_mapping[v], inverse_mapping[u]))
if debug: print("remapped ancestries:", remapped_ancestries)
component.graph["forced_ancestries"] = remapped_ancestries
if debug: print("weights:", component.nodes.data("weight", default=0))
i, lb, ub, to, encoding_time, solving_time = solve_component(component, args)
if ub == 0: # not a single YES-instance, so no tree decomp
print("#### no yes-instances, nothing appended")
continue
sol_file = os.path.join(args.temp, instance + '_' + str(ub + 1) + ".sol")
decomptree = decode_output(sol=sol_file, g=component, reqwidth=ub + 1,
return_decomp=True)
# reapply weights
for u, weight in component.nodes.data("weight"):
if weight is not None: decomptree.nodes[u]["weight"] = weight
decomptree = nx.relabel_nodes(decomptree, label_mapping)
if debug: print(i - 2, lb, ub, to, time.time() - cpu_time, prep_timer.duration,
sum(encoding_time), sum(solving_time), end="")
for j in solving_time:
if debug: print(j, end="")
if debug: print()
if debug: print("* component treedepth range: [{}-{}]".format(lb, ub), file=sys.stderr)
global_lb = min(global_lb, lb)
global_ub = max(global_ub, ub)
decomptrees.append(decomptree)
print("* final treedepth:", end="", file=sys.stderr)
if global_ub == global_lb:
print(buff + global_ub, end="", file=sys.stderr)
else:
print("[{}-{}]".format(buff + global_lb, buff + global_ub), end="", file=sys.stderr)
print("\ttotal-time: {:.2f}s".format(time.time() - cpu_time), file=sys.stderr)
return decomptrees
def main_max(args, debug=False):
cpu_time = time.time()
instance = args.instance
if instance is not None:
edge = read_edge(instance)
g = nx.MultiGraph()
g.add_edges_from(edge)
instance = os.path.basename(instance)
instance = instance.split('.')
instance = instance[0]
else:
g = args.graph if "graph" in args else nx.balanced_tree(2, 2)
instance = 'random'
args.instance = instance
n = g.number_of_nodes()
m = g.number_of_edges()
buff = 0
with Timer() as prep_timer:
if args.preprocess:
print("preprocessing...", file=sys.stderr)
g = degree_one_reduction(g=g)
g, buff = apex_vertices(g=g)
# print("* buffer verts:", buff)
if debug: print('treedepthp2sat', instance, n, m, g.number_of_nodes(), buff)
if args.width != -1:
return
ncomps = nx.number_connected_components(g)
icomp = 0
global_lb, global_ub = 1e9, -1
if ncomps == 0: # only empty graph remains after preprocessing, for loop won't be triggered
global_lb = global_ub = 0
decomptrees = []
for comp_nodes in nx.connected_components(g):
icomp += 1
subgraph = g.subgraph(comp_nodes)
if debug: print("\ncomponent", icomp, "of", ncomps, file=sys.stderr)
if len(subgraph) == 1:
singlenode = next(iter(subgraph.nodes))
nodeweight = subgraph.nodes[singlenode].get("weight", 0)
if debug: print("single node component")
global_lb = min(global_lb, nodeweight+1)
global_ub = max(global_ub, nodeweight+1)
decomptree = nx.DiGraph()
decomptree.add_node(singlenode, weight=nodeweight)
decomptrees.append(decomptree)
continue # proceed to next component
component = nx.convert_node_labels_to_integers(subgraph, first_label=0,
label_attribute="original_label")
label_mapping = dict(component.nodes.data("original_label"))
inverse_mapping = {v: u for u, v in label_mapping.items()}
found_ancestries = component.graph.get("forced_ancestries", [])
if | |
<reponame>dllehr-amd/pytorch
# Owner(s): ["module: sparse"]
import torch
import warnings
import unittest
import random
import itertools
from torch.testing import get_all_complex_dtypes, get_all_fp_dtypes, make_tensor
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(IS_MACOS, IS_WINDOWS, TEST_WITH_ROCM, TestCase, run_tests, load_tests, coalescedonoff)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoCusparseGeneric,
precisionOverride, skipMeta, skipCUDAIf)
from torch.testing._internal.common_cuda import _get_torch_cuda_version
from torch.testing._internal.common_dtype import floating_types, get_all_dtypes
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
def _check_cusparse_triangular_solve_available():
version = _get_torch_cuda_version()
# cusparseSpSM was added in 11.3.1 but we don't have access to patch version
min_supported_version = (11, 4)
return version >= min_supported_version
def _check_cusparse_spgemm_available():
# cusparseSpGEMM was added in 11.0
version = _get_torch_cuda_version()
min_supported_version = (11, 0)
return version >= min_supported_version
# This should be just an import from test_linalg instead of code duplication
# but https://github.com/pytorch/pytorch/pull/63511#discussion_r733989701
def _test_addmm_addmv(test_case, f, t, m, v, *, alpha=None, beta=None, transpose_out=False, layout=torch.strided, all_sparse=False):
"""
Unified test for checking `f(t, m, v, alpha=alpha, beta=beta)` computation,
where f is `torch.addmv` or `torch.addmm`.
`transpose_out` controls whether the out argument is in column-major order.
`layout` controls whether `m` is converted to specified layout or not.
Custom behaviour is implemented only for torch.sparse_csr layout.
"""
dtype = t.dtype
numpy_dtype = dtype
if dtype in {torch.bfloat16}:
numpy_dtype = torch.float
if dtype.is_complex:
alpha = 0.9 + 0.3j if alpha is None else alpha
beta = 0.5 + 0.6j if beta is None else beta
else:
alpha = 1.2 if alpha is None else alpha
beta = 0.8 if beta is None else beta
def convert_layout(mat):
if layout == torch.sparse_csr:
return mat.to_sparse_csr()
else:
assert mat.layout == layout
return mat
if all_sparse:
res1 = f(*map(convert_layout, (t, m, v)), alpha=alpha, beta=beta)
res1 = res1.to_dense()
else:
res1 = f(t, convert_layout(m), v, alpha=alpha, beta=beta)
res2 = torch.full_like(res1, float('nan'))
if transpose_out:
res2 = res2.t().clone(memory_format=torch.contiguous_format).t()
f(t, convert_layout(m), v, alpha=alpha, beta=beta, out=res2)
res3 = alpha * (m.to(numpy_dtype).cpu().numpy() @ v.to(numpy_dtype).cpu().numpy())
if beta != 0:
res3 += (beta * t).to(numpy_dtype).cpu().numpy()
res3 = torch.from_numpy(res3).to(dtype)
test_case.assertEqual(res1, res2)
test_case.assertEqual(res1, res3)
class TestSparseCSRSampler(TestCase):
def test_make_crow_indices(self):
# Here we test the correctness of the crow_indices algorithm
# and testing it on CPU and with int32 dtype will be
# sufficient.
device = torch.device('cpu')
index_dtype = torch.int32
for n_rows in range(1, 10):
for n_cols in range(1, 10):
for nnz in range(0, n_rows * n_cols + 1):
crow_indices = self._make_crow_indices(
n_rows, n_cols, nnz,
device=device, dtype=index_dtype)
self.assertEqual(len(crow_indices), n_rows + 1)
counts = crow_indices[1:] - crow_indices[:-1]
self.assertEqual(counts.sum(), nnz)
self.assertGreaterEqual(counts.min(), 0)
self.assertLessEqual(counts.max(), n_cols)
class TestSparseCSR(TestCase):
@onlyCPU
def test_csr_layout(self):
self.assertEqual(str(torch.sparse_csr), 'torch.sparse_csr')
self.assertEqual(type(torch.sparse_csr), torch.layout)
@dtypes(*get_all_dtypes())
def test_sparse_csr_constructor_shape_inference(self, device, dtype):
crow_indices = [0, 2, 4]
col_indices = [0, 1, 0, 1]
values = [1, 2, 3, 4]
sparse = torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
torch.tensor(col_indices, dtype=torch.int64),
torch.tensor(values), dtype=dtype, device=device)
self.assertEqual(torch.tensor(crow_indices, dtype=torch.int64), sparse.crow_indices())
self.assertEqual((len(crow_indices) - 1, max(col_indices) + 1), sparse.shape)
self.assertEqual(dtype, sparse.dtype)
self.assertEqual(torch.device(device), sparse.device)
@dtypes(*get_all_dtypes())
def test_sparse_csr_constructor(self, device, dtype):
crow_indices = [0, 2, 4]
col_indices = [0, 1, 0, 1]
values = [1, 2, 3, 4]
for index_dtype in [torch.int32, torch.int64]:
sparse = torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=index_dtype),
torch.tensor(col_indices, dtype=index_dtype),
torch.tensor(values),
size=(2, 10),
dtype=dtype,
device=device)
self.assertEqual((2, 10), sparse.shape)
self.assertEqual(torch.tensor(crow_indices, dtype=index_dtype), sparse.crow_indices())
self.assertEqual(torch.tensor(col_indices, dtype=index_dtype), sparse.col_indices())
self.assertEqual(torch.tensor(values, dtype=dtype), sparse.values())
@dtypes(*get_all_dtypes())
def test_sparse_csr_constructor_from_lists(self, device, dtype):
# without size
sparse = torch.sparse_csr_tensor([0, 2, 4],
[0, 1, 0, 1],
[1, 2, 3, 4],
dtype=dtype,
device=device)
self.assertEqual((2, 2), sparse.shape)
self.assertEqual(4, sparse.numel())
self.assertEqual(torch.tensor([0, 2, 4], dtype=torch.int64, device=device), sparse.crow_indices())
self.assertEqual(torch.tensor([0, 1, 0, 1], dtype=torch.int64, device=device), sparse.col_indices())
self.assertEqual(torch.tensor([1, 2, 3, 4], dtype=dtype, device=device), sparse.values())
# with size
for sparse_csr_tensor in [torch.sparse_csr_tensor, torch._sparse_csr_tensor_unsafe]:
sparse = sparse_csr_tensor([0, 2, 4],
[0, 1, 0, 1],
[1, 2, 3, 4],
size=(2, 10),
dtype=dtype,
device=device)
self.assertEqual((2, 10), sparse.shape)
self.assertEqual(torch.tensor([0, 2, 4], dtype=torch.int64, device=device), sparse.crow_indices())
self.assertEqual(torch.tensor([0, 1, 0, 1], dtype=torch.int64, device=device), sparse.col_indices())
self.assertEqual(torch.tensor([1, 2, 3, 4], dtype=dtype, device=device), sparse.values())
@skipMeta
@dtypes(*get_all_dtypes())
def test_empty(self, device, dtype):
ns = [5, 2, 0]
for shape in itertools.product(ns, ns):
result = torch.empty(shape, dtype=dtype, device=device, layout=torch.sparse_csr)
self.assertEqual(result.shape, shape)
self.assertEqual(result.dtype, dtype)
self.assertEqual(result.device, torch.device(device))
self.assertEqual(result.layout, torch.sparse_csr)
self.assertEqual(result.crow_indices().shape, (shape[0] + 1,))
self.assertEqual(result.col_indices().shape, (0,))
self.assertEqual(result.values().shape, (0,))
self.assertEqual(result._nnz(), 0)
self.assertEqual(result.crow_indices().device, torch.device(device))
self.assertEqual(result.col_indices().device, torch.device(device))
self.assertEqual(result.values().device, torch.device(device))
self.assertEqual(result.crow_indices().dtype, torch.int64)
self.assertEqual(result.col_indices().dtype, torch.int64)
self.assertEqual(result.values().dtype, dtype)
@skipMeta
@dtypes(*get_all_dtypes())
def test_empty_errors(self, device, dtype):
with self.assertRaisesRegex(RuntimeError, "torch.empty: Only 2D sparse CSR tensors are supported."):
torch.empty((5,), dtype=dtype, device=device, layout=torch.sparse_csr)
with self.assertRaisesRegex(RuntimeError, "torch.empty: Only 2D sparse CSR tensors are supported."):
torch.empty((2, 3, 4), dtype=dtype, device=device, layout=torch.sparse_csr)
@skipMeta
@dtypes(*get_all_dtypes())
def test_copy(self, device, dtype):
def run_test(shape, nnz, index_type):
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
b = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a.copy_(b)
self.assertEqual(a.crow_indices(), b.crow_indices())
self.assertEqual(a.col_indices(), b.col_indices())
self.assertEqual(a.values(), b.values())
ns = [5, 2, 0]
for shape, index_dtype in zip(itertools.product(ns, ns), [torch.int32, torch.int64]):
run_test(shape, 0, index_dtype)
run_test(shape, shape[0] * shape[1], index_dtype)
@skipMeta
@dtypes(*get_all_dtypes())
def test_copy_errors(self, device, dtype):
for index_dtype in [torch.int32, torch.int64]:
shape1 = (2, 3)
shape2 = (3, 2)
a = self.genSparseCSRTensor(shape1, 0, dtype=dtype, device=device, index_dtype=index_dtype)
b = self.genSparseCSRTensor(shape2, 0, dtype=dtype, device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError, "only same size tensors are supported."):
a.copy_(b)
with self.assertRaisesRegex(RuntimeError, "copy between different layouts is not supported."):
a.copy_(torch.empty(a.shape, dtype=dtype, device=device))
b = self.genSparseCSRTensor(shape1, 1, dtype=dtype, device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError, "only tensors with the same number of specified elements are supported."):
a.copy_(b)
@skipMeta
@dtypes(*get_all_dtypes())
def test_resize(self, device, dtype):
for index_dtype in [torch.int32, torch.int64]:
shape = (2, 3)
nnz = 6
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
new_shape = (4, 5)
a.resize_(new_shape)
self.assertEqual(a.shape, new_shape)
# resize to larger shape doesn't add specified elements
self.assertEqual(a._nnz(), nnz)
new_shape = (1, 5)
a.resize_(new_shape)
self.assertEqual(a.shape, new_shape)
# resize to smaller shape trims specified elements
self.assertEqual(a._nnz(), 5)
@skipMeta
@dtypes(*get_all_dtypes())
def test_resize_errors(self, device, dtype):
for index_dtype in [torch.int32, torch.int64]:
shape = (2, 3)
nnz = 6
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError, "torch.resize_: Only 2D sparse CSR tensors are supported."):
new_shape = (4,)
a.resize_(new_shape)
# resizing of columns to smaller size is not implemented
with self.assertRaisesRegex(
RuntimeError,
"torch.resize_: Resizing columns of sparse CSR tensors to a smaller value is not supported.",
):
new_shape = (2, 2)
a.resize_(new_shape)
def test_factory_type_invariants_check(self, device):
with self.assertRaisesRegex(RuntimeError, "both crow_indices and col_indices should have the same type."):
torch.sparse_csr_tensor(torch.tensor([0, 2, 4], dtype=torch.int64),
torch.tensor([0, 1, 0, 1], dtype=torch.int32),
torch.tensor([1, 2, 3, 4]),
device=device)
with self.assertRaisesRegex(RuntimeError, r"\"csr_construct_check\" not implemented for 'Short'"):
torch.sparse_csr_tensor(torch.tensor([0, 2, 4], dtype=torch.int16),
torch.tensor([0, 1, 0, 1], dtype=torch.int16),
torch.tensor([1, 2, 3, 4]),
device=device)
def test_factory_layout_invariants_check(self, device):
with self.assertRaisesRegex(RuntimeError, "expected values to be a strided and contiguous tensor"):
values = torch.tensor([1.], device=device).expand(4,)
torch.sparse_csr_tensor(torch.tensor([0, 2, 4], device=device),
torch.tensor([0, 1, 0, 1], device=device),
values)
with self.assertRaisesRegex(RuntimeError, "expected col_indices to be a strided and contiguous tensor"):
col_indices = torch.tensor([0], device=device).expand(4,)
torch.sparse_csr_tensor(torch.tensor([0, 2, 4]),
col_indices,
torch.tensor([1, 2, 3, 4]))
with self.assertRaisesRegex(RuntimeError, "expected crow_indices to be a strided and contiguous tensor"):
crow_indices = torch.arange(6, device=device)
torch.sparse_csr_tensor(crow_indices[::2],
torch.tensor([0, 1, 0, 1], device=device),
torch.tensor([1, 2, 3, 4]))
def test_factory_shape_invariants_check(self, device):
crow_indices = [0, 2, 4]
col_indices = [0, 1, 0, 1]
values = [1, 2, 3, 4]
size = (2, 10)
torch.sparse_csr_tensor(torch.tensor(crow_indices), torch.tensor(col_indices), torch.tensor(values), size,
device=device)
with self.assertRaisesRegex(RuntimeError, r"size of a CSR tensor must be of length 2, but got: 3"):
torch.sparse_csr_tensor(torch.tensor(crow_indices), torch.tensor(col_indices), torch.tensor(values),
size=(2, 10, 2),
device=device)
with self.assertRaisesRegex(RuntimeError, r"crow_indices must have dim\=1 but got crow_indices\.dim\(\)\=2"):
torch.sparse_csr_tensor(torch.tensor(crow_indices).repeat(2, 1),
torch.tensor(col_indices),
torch.tensor(values),
size,
device=device)
with self.assertRaisesRegex(RuntimeError, r"col_indices must have dim\=1 but got col_indices\.dim\(\)\=2"):
torch.sparse_csr_tensor(torch.tensor(crow_indices),
torch.tensor(col_indices).repeat(2, 1),
torch.tensor(values),
size,
device=device)
with self.assertRaisesRegex(RuntimeError, r"values must have dim\=1 but got values\.dim\(\)\=2"):
torch.sparse_csr_tensor(torch.tensor(crow_indices),
torch.tensor(col_indices),
torch.tensor(values).repeat(2, 1),
size,
device=device)
with self.assertRaisesRegex(RuntimeError,
r"crow_indices\.numel\(\) must be size\(0\) \+ 1, but got: 3"):
torch.sparse_csr_tensor(torch.tensor(crow_indices), torch.tensor(col_indices), torch.tensor(values), (1, 1),
device=device)
with self.assertRaisesRegex(RuntimeError,
r"col_indices and values must have equal sizes, " +
r"but got col_indices\.numel\(\): 3, values\.numel\(\): 4"):
torch.sparse_csr_tensor(torch.tensor(crow_indices), torch.tensor([0, 1, 0]), torch.tensor(values), size,
device=device)
def test_factory_indices_invariants_check(self, device):
crow_indices = [0, 2, 4]
col_indices = [0, 1, 0, 1]
values = [1, 2, 3, 4]
size = (2, 10)
with self.assertRaisesRegex(RuntimeError, "0th value of crow_indices must be 0."):
torch.sparse_csr_tensor(torch.tensor([-1, 0, 4]), torch.tensor(col_indices), torch.tensor(values), size,
device=device)
with self.assertRaisesRegex(RuntimeError,
"last value of crow_indices should be equal to the length of col_indices."):
torch.sparse_csr_tensor(torch.tensor([0, 2, 5]), torch.tensor(col_indices), torch.tensor(values), size,
device=device)
with self.assertRaisesRegex(RuntimeError,
r"at position i \= 2," +
r" this condition crow_indices\[i - 1\] <\= crow_indices\[i\] fails"):
torch.sparse_csr_tensor(torch.tensor([0, 5, | |
#!/usr/bin/env python3
import json
from time import time, sleep # only use the function that gives the current time
import random
import sys
from cscore import CameraServer, VideoSource, UsbCamera, MjpegServer
from networktables import NetworkTablesInstance
import ntcore
import numpy as np
import cv2
from enum import Enum
import statistics
import math
class GripPipeline:
"""
An OpenCV pipeline generated by GRIP.
"""
def __init__(self):
"""initializes all values to presets or None if need to be set
"""
self.__resize_image_width = 320.0
self.__resize_image_height = 240.0
self.__resize_image_interpolation = cv2.INTER_LINEAR
self.resize_image_output = None
self.__blur_input = self.resize_image_output
self.__blur_type = BlurType.Box_Blur
self.__blur_radius = 2
self.blur_output = None
self.__hsl_threshold_input = self.blur_output
self.__hsl_threshold_hue = [39.53237410071942, 113.20819112627987]
self.__hsl_threshold_saturation = [32.10431654676259, 255.0]
self.__hsl_threshold_luminance = [10, 255.0]
self.hsl_threshold_output = None
self.__find_contours_input = self.hsl_threshold_output
self.__find_contours_external_only = False
self.find_contours_output = None
self.__filter_contours_contours = self.find_contours_output
self.__filter_contours_min_area = 60.0
self.__filter_contours_min_perimeter = 0.0
self.__filter_contours_min_width = 30.0
self.__filter_contours_max_width = 500.0
self.__filter_contours_min_height = 15.0
self.__filter_contours_max_height = 1000.0
self.__filter_contours_solidity = [20.68345323741007, 57.09897610921503]
self.__filter_contours_max_vertices = 235.0
self.__filter_contours_min_vertices = 30
self.__filter_contours_min_ratio = 1.25
self.__filter_contours_max_ratio = 2.9
self.filter_contours_output = None
def process(self, source0):
"""
Runs the pipeline and sets all outputs to new values.
"""
# Step Resize_Image0:
self.__resize_image_input = source0
(self.resize_image_output) = self.__resize_image(self.__resize_image_input, self.__resize_image_width, self.__resize_image_height, self.__resize_image_interpolation)
# Step Blur0:
self.__blur_input = self.resize_image_output
(self.blur_output) = self.__blur(self.__blur_input, self.__blur_type, self.__blur_radius)
# Step HSL_Threshold0:
self.__hsl_threshold_input = self.blur_output
(self.hsl_threshold_output) = self.__hsl_threshold(self.__hsl_threshold_input, self.__hsl_threshold_hue, self.__hsl_threshold_saturation, self.__hsl_threshold_luminance)
# Step Find_Contours0:
self.__find_contours_input = self.hsl_threshold_output
(self.find_contours_output) = self.__find_contours(self.__find_contours_input, self.__find_contours_external_only)
if debug:black.fill(0)
# Step Filter_Contours0:
self.__filter_contours_contours = self.find_contours_output
(self.filter_contours_output) = self.__filter_contours(self.__filter_contours_contours, self.__filter_contours_min_area, \
self.__filter_contours_min_perimeter, self.__filter_contours_min_width, \
self.__filter_contours_max_width, self.__filter_contours_min_height, \
self.__filter_contours_max_height, self.__filter_contours_solidity, \
self.__filter_contours_max_vertices, self.__filter_contours_min_vertices, \
self.__filter_contours_min_ratio, self.__filter_contours_max_ratio)
if debug:
output2.putFrame(self.blur_output)
output3.putFrame(self.hsl_threshold_output)
output4.putFrame(black)
black.fill(0)
cv2.drawContours(black, self.filter_contours_output, -1, (255,255,255), 1)
output5.putFrame(black)
@staticmethod
def __resize_image(input, width, height, interpolation):
"""Scales and image to an exact size.
Args:
input: A numpy.ndarray.
Width: The desired width in pixels.
Height: The desired height in pixels.
interpolation: Opencv enum for the type fo interpolation.
Returns:
A numpy.ndarray of the new size.
"""
return cv2.resize(input, ((int)(width), (int)(height)), 0, 0, interpolation)
@staticmethod
def __blur(src, type, radius):
"""Softens an image using one of several filters.
Args:
src: The source mat (numpy.ndarray).
type: The blurType to perform represented as an int.
radius: The radius for the blur as a float.
Returns:
A numpy.ndarray that has been blurred.
"""
if(type is BlurType.Box_Blur):
ksize = int(2 * round(radius) + 1)
return cv2.blur(src, (ksize, ksize))
elif(type is BlurType.Gaussian_Blur):
ksize = int(6 * round(radius) + 1)
return cv2.GaussianBlur(src, (ksize, ksize), round(radius))
elif(type is BlurType.Median_Filter):
ksize = int(2 * round(radius) + 1)
return cv2.medianBlur(src, ksize)
else:
return cv2.bilateralFilter(src, -1, round(radius), round(radius))
@staticmethod
def __hsl_threshold(input, hue, sat, lum):
"""Segment an image based on hue, saturation, and luminance ranges.
Args:
input: A BGR numpy.ndarray.
hue: A list of two numbers the are the min and max hue.
sat: A list of two numbers the are the min and max saturation.
lum: A list of two numbers the are the min and max luminance.
Returns:
A black and white numpy.ndarray.
"""
out = cv2.cvtColor(input, cv2.COLOR_BGR2HLS)
return cv2.inRange(out, (hue[0], lum[0], sat[0]), (hue[1], lum[1], sat[1]))
@staticmethod
def __find_contours(input, external_only):
"""Sets the values of pixels in a binary image to their distance to the nearest black pixel.
Args:
input: A numpy.ndarray.
external_only: A boolean. If true only external contours are found.
Return:
A list of numpy.ndarray where each one represents a contour.
"""
if(external_only):
mode = cv2.RETR_EXTERNAL
else:
mode = cv2.RETR_LIST
method = cv2.CHAIN_APPROX_SIMPLE
im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
return contours
@staticmethod
def __filter_contours(input_contours, min_area, min_perimeter, min_width, max_width,
min_height, max_height, solidity, max_vertex_count, min_vertex_count,
min_ratio, max_ratio):
"""Filters out contours that do not meet certain criteria.
Args:
input_contours: Contours as a list of numpy.ndarray.
min_area: The minimum area of a contour that will be kept.
min_perimeter: The minimum perimeter of a contour that will be kept.
min_width: Minimum width of a contour.
max_width: MaxWidth maximum width.
min_height: Minimum height.
max_height: Maximimum height.
solidity: The minimum and maximum solidity of a contour.
min_vertex_count: Minimum vertex Count of the contours.
max_vertex_count: Maximum vertex Count.
min_ratio: Minimum ratio of width to height.
max_ratio: Maximum ratio of width to height.
Returns:
Contours as a list of numpy.ndarray.
"""
output = []
for contour in input_contours:
x,y,w,h = cv2.boundingRect(contour)
if (w < min_width or w > max_width):
if debug:cv2.drawContours(black, [contour], -1, (255,0,0), 1)
continue
if (h < min_height or h > max_height):
if debug:cv2.drawContours(black, [contour], -1, (0,255,0), 1)
continue
ratio = float(w) / h
if (ratio < min_ratio or ratio > max_ratio):
if debug:cv2.drawContours(black, [contour], -1, (255,255,255), 1)
if debug:print(ratio)
continue
area = cv2.contourArea(contour)
if (area < min_area):
if debug:cv2.drawContours(black, [contour], -1, (0,0,255), 1)
continue
if (cv2.arcLength(contour, True) < min_perimeter):
if debug:cv2.drawContours(black, [contour], -1, (255,255,0), 1)
continue
hull = cv2.convexHull(contour)
solid = 100 * (area / cv2.contourArea(hull))
if (solid < solidity[0] or solid > solidity[1]):
if debug:cv2.drawContours(black, [contour], -1, (0,255,255), 1)
continue
if (len(contour) < min_vertex_count or len(contour) > max_vertex_count):
if debug:print(len(contour))
if debug:cv2.drawContours(black, [contour], -1, (255,0,255), 1)
continue
output.append(contour)
return output
debug=False
BlurType = Enum('BlurType', 'Box_Blur Gaussian_Blur Median_Filter Bilateral_Filter')
def processFrame(frame):
pipeline.process(frame)
res=pipeline.filter_contours_output
if len(res)!=0:
return cv2.boundingRect(res[0])
else:
return False
def getRPMAndAngle(distance, origInchesDist):
print ("origDist: ",origInchesDist)
distance+=0.74 # account for distance to back hole from target
velocity, angle = getVelocityAndAngle(distance)
#print("rpm3", rpm)
rpm=convertVelocityToRPM(velocity)
#print("orig ", rpm)
#print ("origang ", angle)
rpm=applyRPMFudge(rpm)
angle=applyAngleFudge(angle, rpm)
#print("rpmfudge: ", fudge)
#print("anglefudge: ", angleFudge)
if angle<25:# mechanism can only go to 25 degrees
print("long")
angle=25
rpm = 6.1939836823240230e+003 -4.8649057198059033e+001*origInchesDist + 1.5200366775589044e-001*origInchesDist*origInchesDist
#rpm = 6.4079401547895068e+003 - 5.0691675348074611e+001*origInchesDist + 1.5661658315760379e-001*origInchesDist*origInchesDist
"""rpm = tuneRPM
#rpm=
if origInchesDist>178.47031876854825:
print("reallylong")
rpm=2.3973463633773163e+003 + -3.9206380581987790e+000 * origInchesDist + 1.5232575217969729e-002 * origInchesDist*origInchesDist
rpm = tuneRPM
#rpm=applyRPMFudge(rpm)"""
#rpm = tuneRPM
print(origInchesDist, rpm)
if debug:print("vel", velocity)
if debug:print("rpm", rpm)
#print("newang", angle)
return [rpm, angle]
def convertVelocityToRPM(velocity):
flywheelCircumference=0.319186 # meters
rpm=velocity/flywheelCircumference
ratioMultiplier=0.47619
rpm = rpm*ratioMultiplier # the gears increase the flywheel rpm, so to go back to the motor rpm we need to take 20/42 of it
rpm=rpm*60 # convert from rpsecond to rpminute
return rpm
def getLongshotVelocity(distance, x, y):
radians25 = 0.436332
c=math.cos(radians25)
s=math.sin(radians25)
g=9.81
#print("long"),
v=math.sqrt(((g*x*x)/(2*c*c))*((-1)/(y-((s*x)/c))))
return v
def applyRPMFudge(rpm):
return rpm * (3.0541393921880791e+000 - 7.2487484146391550e-004 * rpm)
def applyRPMFudgeForLongshot(rpm):
#fudge=7.4412442850755873e+000 + -4.7755445685672233e-003 * rpm
fudge=1.90
#print("orig:"+str(rpm))
#print("fudge:"+str(fudge))
#print("new:"+str(rpm*fudge))
return rpm * fudge
def applyRPMFudgeForVeryLongshot(rpm):
fudge=1.8
#fudge=-4.4268244278918143e+000 + 5.6807972678785852e-003 * rpm
print("orig:"+str(rpm))
print("fudge:"+str(fudge))
print("new:"+str(rpm*fudge))
return rpm * fudge
def applyAngleFudge(origAngle, rpm):
angFudge= -1.1306745180644164e+001 + 2.1768502008778193e-003 * rpm
return origAngle+angFudge
def getVelocityAndAngle(distance):
g=9.81
x=distance
y=1.47955 # vertical distance the ball needs to travel
vyInitial=math.sqrt(2*g*y)
vxInitial=(g*x)/vyInitial
velocity=math.sqrt(vyInitial**2+vxInitial**2)# pythagorean theorem
angle=math.degrees(math.atan(vyInitial/vxInitial))
return [velocity, angle]
def heightToDistance(h):
return 5.7349476148636040e+002 +( -1.5554292862671193e+001 * h) + (1.2522754639035663e-001 * h*h)
def addToRollingAverage(h):
global averageIndex
previousSeenHeights[averageIndex]=h
averageIndex+=1
if(averageIndex>=windowSize):
averageIndex=0
def clearAverage():
global averageIndex
averageIndex=0
previousSeenHeights=[0 for i in range(windowSize)]
def getAverageOverRecent():
return statistics.mean(previousSeenHeights)
def getAverageOverLastFew():
return statistics.mean(previousSeenHeights[0:10])
averageIndex=0
windowSize=40
previousSeenHeights=[0 for i in range(windowSize)]
ntinst = NetworkTablesInstance.getDefault()
dashTable = None
tuneRPM = 2000
if __name__ == "__main__":
print("Setting up NetworkTables client for team {}".format(3018))
ntinst.startClientTeam(3018)
netOut=ntinst.getTable("vision")
dashTable = ntinst.getTable("SmartDashboard")
dashTable.putNumber("TuneRPM", 1000)
dashTable.putBoolean("EnterRPM", False)
# get vision camera
inst = CameraServer.getInstance()
camera = UsbCamera("rPi Camera 0", "/dev/video0")
camServer = inst.startAutomaticCapture(camera=camera, return_server=True)
camera.setConnectionStrategy(VideoSource.ConnectionStrategy.kKeepOpen)
#camera2 = UsbCamera("rPi Camera 1", "/dev/video1")
#camServer2 = inst.startAutomaticCapture(camera=camera2, return_server=True)
#camera2.setConnectionStrategy(VideoSource.ConnectionStrategy.kKeepOpen)
vidSource=inst.getVideo()
width=320
height=240
camera.setResolution(width, height)
camera.setExposureManual(5)
camServer.setCompression(100)
if debug:
output=inst.putVideo("VisOut",width ,height)
output2=inst.putVideo("VisOut2",width ,height)
output3=inst.putVideo("VisOut3",width ,height)
output4=inst.putVideo("VisOut4",width ,height)
output5=inst.putVideo("VisOut5",width ,height)
black = np.zeros(shape=(height, width, 3), dtype=np.uint8)
frame = np.zeros(shape=(height, width, 3), dtype=np.uint8)
pipeline=GripPipeline()
start = time()
netOut.putNumber("frameID", random.randint(0,10000))
# loop forever on vision program
while True:
if dashTable.getBoolean("EnterRPM", False):
tuneRPM = dashTable.getNumber("TuneRPM", 1000)
t, frame = vidSource.grabFrame(frame)
result=processFrame(frame)
if debug:output.putFrame(frame)
if debug:print("FPS: {:.1f}".format(1 / (time() - start)))
#print(camera.isConnected())
if result:
x, y, w, h = result
#h=40
cx = x+w/2
cy = y+h/2
netOut.putNumber("width", w)
netOut.putNumber("height", h)
netOut.putNumber("x", cx)
netOut.putNumber("y", cy)
trueHeight=17# height of target
#correctedHeight=h*0.696 # since we know the angle we are looking at, we can just make this multiplier for perspective be a constant
#focalLength=289.09
if debug:print(h)
if debug:print(correctedHeight)
#dist = (trueHeight*focalLength) / correctedHeight
addToRollingAverage(h)
h=getAverageOverRecent()
dist = heightToDistance(h)
origDist=dist
dist+= 3.75 #shooter is behind cam
print ("dist:",dist)
netOut.putNumber("distance", dist)
dist= dist/39.37 # convert inches to | |
graph.plot(xm,yp,'r')
plt.setp(graph.get_xticklabels(), rotation=30, ha="right")
ln1 = len(xm)
if ln1<10:
graph.xaxis.set_major_locator(plt.LinearLocator(numticks=ln1))
graph.yaxis.set_major_locator(plt.LinearLocator(numticks=ln1))
else:
graph.xaxis.set_major_locator(plt.MaxNLocator(10))
graph.yaxis.set_major_locator(plt.MaxNLocator(10))
text = graph.annotate("Plotted by Sentinair device\n developed by\n Dr. <NAME> 2019",\
xy=(.3,.7),xycoords='figure fraction',rotation=-30,size=16,alpha=0.2)
graph.set_xlabel('Date_time')
graph.grid(True)
head = header[j].split("_")
except Exception as e:
logging.warning("Error in plotting hourly mean data:\r\n" ,exc_info=True)
return
try:
ylabel = head[-1]
except:
ylabel = header[j]
try:
graph.set_ylabel(ylabel)
header[j] = header[j].replace('%','')
header[j] = header[j].replace('/','')
imgdir = IMG_DIR.rstrip("/")
fig.savefig(imgdir + "/" + fn + header[j] + ".png",dpi=80,format='png',bbox_inches='tight')
plt.close('all')
except Exception as e:
logging.warning("Error in saving hourly means data image: ",exc_info=True)
return
j=j+1
#routine for plotting data of the measurements file related to the daily averages
def plot_file_d(filename,header):
global datacolsd
global rate
fn1 = filename.lstrip(DATA_DIR)
fn = fn1.rstrip("txt")
j = 0
for cols in header:
if j == 0:
j=j+1
continue
try:
xm2 = [datetime.strptime(d,"%d/%m/%Y") for d in datacolsd[0]]
xm = mpl.dates.date2num(xm2)
fig = plt.figure()
graph = fig.add_subplot(111)
red_patch = mpatches.Patch(color='red', label=header[j])
graph.legend(handles=[red_patch])
hfmt = mpl.dates.DateFormatter("%d/%m/%Y")
graph.xaxis.set_major_formatter(hfmt)
if len(datacolsd[j]) > len(xm):
df = len(datacolsd[j])-len(xm)
yp = datacolsd[j][:-df]
if len(xm) > len(datacolsd[j]):
df = len(xm)- len(datacolsd[j])
xm = xm[:-df]
if len(datacolsd[j]) == len(xm):
yp = datacolsd[j]
if rate == 0:
datacolsd = []
plt.close('all')
return
graph.plot(xm,yp,'r')
plt.setp(graph.get_xticklabels(), rotation=30, ha="right")
ln1 = len(xm)
if ln1<10:
graph.xaxis.set_major_locator(plt.LinearLocator(numticks=ln1))
graph.yaxis.set_major_locator(plt.LinearLocator(numticks=ln1))
else:
graph.xaxis.set_major_locator(plt.MaxNLocator(10))
graph.yaxis.set_major_locator(plt.MaxNLocator(10))
text = graph.annotate("Plotted by Sentinair device\n developed by\n Dr. <NAME> 2019",\
xy=(.3,.7),xycoords='figure fraction',rotation=-30,size=16,alpha=0.2)
graph.set_xlabel('Date')
graph.grid(True)
head = header[j].split("_")
except Exception as e:
logging.warning("Error in plotting daily mean data:\r\n",exc_info=True)
return
try:
ylabel = head[-1]
except:
ylabel = header[j]
try:
graph.set_ylabel(ylabel)
header[j] = header[j].replace('%','')
header[j] = header[j].replace('/','')
imgdir = IMG_DIR.rstrip("/")
fig.savefig(imgdir + "/" + fn + header[j] + ".png",dpi=80,format='png',bbox_inches='tight')
plt.close('all')
except Exception as e:
logging.warning("Error in saving daily means data image: ",exc_info=True)
return
j=j+1
###############################
##### AVERAGES CALCULATIONS ROUTINES
def get_decimal(ff):
sstr = str(ff)
if not '.' in sstr:
return 0
else:
return len(sstr) - sstr.index('.')-1
### hourly averages calculation
def mean_hour(hrprev,hrnow,stephrn,smh,rec,fh,el):
global datacolsh
rec1 = rec.rstrip("\n")
smh1 = smh.rstrip("\n")
rec1p = rec1.split(';')
smh1p = smh1.split(';')
means = [None]*len(smh1p)
smh1p[0] = str(hrprev)
medie1 = time.strftime("%d/%m/%Y_") + "{:02d}".format(hrnow) + ';'
sommenuove1 = smh1p[0] + ';'
if hrprev == hrnow:
stephrn = stephrn + 1
ss = 1
while ss < len(smh1p):
fltsum = round(float(smh1p[ss]) + float(rec1p[ss]),get_decimal(rec1p[ss]))
means[ss] = round(fltsum/float(stephrn),get_decimal(fltsum))
medie1 = medie1 + str(means[ss]) + ';'
sommenuove1 = sommenuove1 + str(fltsum) + ';'
ss = ss + 1
medie = medie1.rstrip(';')
sommenuove2 = sommenuove1.rstrip(';')
sommenuove = sommenuove2 + "\n"
errorLevel = 0
else:
ss = 1
while ss < len(smh1p):
fltsum = round(float(smh1p[ss]),get_decimal(smh1p[ss]))
if stephrn == 0:
stephrn = stephrn + 1
means[ss] = round(fltsum/float(stephrn),get_decimal(fltsum))
medie1 = medie1 + str(means[ss]) + ';'
ss = ss + 1
medie = medie1.rstrip(';')
ms1 = medie.rstrip("\n")
ms = ms1.split(";")
colnum = 0
for m in ms:
datacolsh[colnum].append(ms[colnum])
colnum += 1
errorLevel = measure_logging(fh,medie)
stephrn = 0
sommenuove = rec
medie = rec
hrprev = hrnow
return hrprev,sommenuove,stephrn,medie,errorLevel
### daily averages calculation
def mean_daily(dprev,dnow,stepdn,smd,rec,fh,el):
global datacolsd
rec1 = rec.rstrip("\n")
smd1 = smd.rstrip("\n")
rec1p = rec1.split(';')
smd1p = smd1.split(';')
means = [None]*len(smd1p)
smd1p[0] = str(dprev)
medie1 = dprev + ';'
sommenuove1 = smd1p[0] + ';'
if dprev == dnow:
stepdn = stepdn + 1
ss = 1
while ss < len(smd1p):
fltsum = round(float(smd1p[ss]) + float(rec1p[ss]),get_decimal(rec1p[ss]))
means[ss] = round(fltsum/float(stepdn),get_decimal(fltsum))
medie1 = medie1 + str(means[ss]) + ';'
sommenuove1 = sommenuove1 + str(fltsum) + ';'
ss = ss + 1
medie = medie1.rstrip(';')
sommenuove2 = sommenuove1.rstrip(';')
sommenuove = sommenuove2 + "\n"
errorLevel = 0
else:
ss = 1
while ss < len(smd1p):
fltsum = round(float(smd1p[ss]),get_decimal(smd1p[ss]))
if stepdn == 0:
stepdn = stepdn + 1
means[ss] = round(fltsum/float(stepdn),get_decimal(fltsum))
medie1 = medie1 + str(means[ss]) + ';'
ss = ss + 1
medie = medie1.rstrip(';')
ms1 = medie.rstrip("\n")
ms = ms1.split(";")
colnum = 0
for m in ms:
datacolsd[colnum].append(ms[colnum])
colnum += 1
errorLevel = measure_logging(fh,medie)
stepdn = 0
sommenuove = rec
medie = rec
dprev = dnow
return dprev,sommenuove,stepdn,medie,errorLevel
###################################
## devices scanning: this routine search devices and the ports where they are plugged into.
## Then it creates the connections
def device_scanning(conn_dev,dev,sk1,ser1,flag):
global fault
#resetting the fault alarm
fault = False
# number of magnitudes to acquire
num_mag = 0
for cn in conn_dev:
cn.terminate()
del cn
conn_dev = []
for dve in dev:
conn_type = dve.getConnectionType()
if (conn_type != USB_CONNECTION_TYPE) and (conn_type != SERIAL_CONNECTION_TYPE):
conn_par = dve.getConnectionParams()
for address in conn_par:
if conn_type == I2C_CONNECTION_TYPE:
address_to_check = hex(address)
else:
address_to_check = address
if flag != 0:
send_output("\nSearching for " + dve.getIdentity() + " on " + address_to_check + " ",sk1,ser1)
else:
print ("\nSearching for " + dve.getIdentity() + " on " + address_to_check)
logging.info("Searching for " + dve.getIdentity() + " on " + address_to_check)
conn_dev.append(copy.deepcopy(dve))
if conn_dev[-1].connect(address) == 1:
sens = conn_dev[-1].getSensors()
meas = conn_dev[-1].sample()
num_sens = sens.split(';')
num_meas = meas.split(';')
if len(num_sens) != len(num_meas):
conn_dev[-1].terminate()
del conn_dev[-1]
continue
if flag != 0:
send_output("FOUND " + conn_dev[-1].getIdentity(),sk1,ser1)
send_output("measures: " + conn_dev[-1].getSensors(),sk1,ser1)
else:
print ("FOUND " + conn_dev[-1].getIdentity())
print ("measures: " + conn_dev[-1].getSensors())
logging.info("FOUND " + conn_dev[-1].getIdentity() + "; " + "measures: " + conn_dev[-1].getSensors())
#updating the number of magnitudes to acquire
num_mag = num_mag + len(num_sens)
#updating device identity for multi-copies purposes
original_identity = conn_dev[-1].getIdentity()
conn_dev[-1].setIdentity(original_identity + "-" + address_to_check)
else:
if flag != 0:
send_output(dve.getIdentity() + " NOT FOUND",sk1,ser1)
else:
print (dve.getIdentity() + " NOT FOUND")
logging.info(dve.getIdentity() + " NOT FOUND")
del conn_dev[-1]
continue
ports = list(serial.tools.list_ports.comports())
for prt in ports:
for dv in dev:
conn_type = dv.getConnectionType()
if (conn_type == USB_CONNECTION_TYPE) or (conn_type == SERIAL_CONNECTION_TYPE):
if flag != 0:
send_output("\nSearching for " + dv.getIdentity() + " on " + prt[0] + " port",sk1,ser1)
else:
print ("\nSearching for " + dv.getIdentity() + " on " + prt[0] + " port")
logging.info("Searching for " + dv.getIdentity() + " on " + prt[0] + " port")
conn_dev.append(copy.deepcopy(dv))
if conn_dev[-1].connect(prt[0]) == 1:
sens = conn_dev[-1].getSensors()
meas = conn_dev[-1].sample()
num_sens = sens.split(';')
num_meas = meas.split(';')
if len(num_sens) != len(num_meas):
conn_dev[-1].terminate()
del conn_dev[-1]
continue
if flag != 0:
send_output("FOUND " + conn_dev[-1].getIdentity(),sk1,ser1)
send_output("measures: " + conn_dev[-1].getSensors(),sk1,ser1)
else:
print ("FOUND " + conn_dev[-1].getIdentity())
print ("measures: " + conn_dev[-1].getSensors())
logging.info("FOUND " + conn_dev[-1].getIdentity() + "; " + "measures: " + conn_dev[-1].getSensors())
#updating the number of magnitudes to acquire
num_mag = num_mag + len(num_sens)
#updating device identity for multi-copies purposes
original_identity = conn_dev[-1].getIdentity()
conn_dev[-1].setIdentity(original_identity + "-" + prt[0])
break
else:
if flag != 0:
send_output(dv.getIdentity() + " NOT FOUND",sk1,ser1)
else:
print (dv.getIdentity() + " NOT FOUND")
logging.info(dve.getIdentity() + " NOT FOUND")
del conn_dev[-1]
else:
continue
if len(conn_dev) == 0:
if flag != 0:
send_output("\nNo device connected to SentinAir",sk1,ser1)
else:
print("\nNo device connected to SentinAir")
logging.info("No device connected to SentinAir")
return conn_dev,num_mag
## getting the devices informations: identity, measurements units, current measurements
def check_devices(conn_dev,sk1,ser1):
if len(conn_dev) == 0:
send_output("\nNo device connected to SentinAir",sk1,ser1)
else:
send_output("\nDevices connected:\n",sk1,ser1)
for cnd in conn_dev:
send_output(cnd.getIdentity(),sk1,ser1)
send_output(cnd.getSensors(),sk1,ser1)
send_output(cnd.sample() + "\n",sk1,ser1)
def close_file(f):
try:
f.close()
return 0
except:
return 10
## formats the measures string to log and data logging
def measure_logging(f,towrite):
try:
towrite1 = towrite.rstrip(";")
f.write(towrite1 + "\n")
f.flush()
return 0
except:
return 1
## builds the record to store in the file data by gathering measure from all the devices connected
def make_record(conn_dvc):
view = time.strftime("%d/%m/%Y_%H:%M:%S") + "\n"
rec = time.strftime("%d/%m/%Y_%H:%M:%S") + ';'
errs = 0
for cnd in conn_dvc:
view = view + cnd.getIdentity() + "\n"
head = cnd.getSensors()
meas = cnd.sample()
index = 0
hh = head.split(';')
mm = meas.split(';')
try:
while index < len(hh):
view = view + hh[index] + ": " + mm[index] + "\n"
index = index + 1
except:
rec = "ERROR 21: bad data string from " + cnd.getIdentity()
logging.warning(rec)
view = rec
errs = 21
return rec,view,errs
rec = rec + meas + ';'
return rec,view,errs
## measurement session initalization operations
def init_session(conn_dvc,rt):
sdir = DEFAULT_DIR.rstrip("/")
ddir = DATA_DIR.rstrip("/")
try:
ff = open(sdir + "/" + "status.sentinair","w")
ff.write(str(rt) + "\n")
ff.close
except:
pass
try:
fh = None
temp = time.strftime("%Y-%m-%d_%H-%M-%S")
try:
f = open("/etc/hostname","r")
machine_name = f.readline().rstrip("\r\n")
except:
machine_name = | |
<filename>implementation.py
from heapq import heappush, heappop
from itertools import count
from itertools import permutations
from collections import defaultdict
from itertools import chain, combinations
from functools import reduce
import copy
from collections import deque
import signal
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self,signum, frame):
self.kill_now = True
killer = GracefulKiller()
input()
n = int(input().split()[1])
e = int(input().split()[1])
graph = defaultdict(dict)
max_cost = 0
is_terminal = dict()
for x in range(e):
s,u,v,w = input().split()
u,v,w = map(int,(u,v,w))
graph[u][v] = w
graph[v][u] = w
is_terminal[u] = False
is_terminal[v] = False
max_cost += w
input() # end
check = input()
if 'SECTION' not in check:input()
t = int(input().split()[1])
terminals = []
for x in range(t):
_, ter = input().split()
terminals.append(int(ter))
is_terminal[int(ter)] = True
terminals = sorted(terminals)
#terminals = sorted(terminals,key = lambda x: (len(graph[x]),x),reverse = True)
steinertree = defaultdict(dict)
steinercost = copy.deepcopy(graph)
def add_edges(path):
global steinertree, steinercost
if isinstance(path,int):return
if len(path)<= 1:return
for a,b in zip(path[:-1],path[1:]):
w = graph[a][b]
steinertree[a][b] = w
steinertree[b][a] = w
steinercost[a][b] = 0
steinercost[b][a] = 0
def remove_edges(edges):
global steinertrees
reduced = set((min(a,b),max(a,b)) for a,b in edges)
for a,b in reduced:
steinertree[a].pop(b, None)
steinertree[b].pop(a, None)
if len(steinertree[a]) == 0:
steinertree.pop(a, None)
if len(steinertree[b]) == 0:
steinertree.pop(b, None)
def get_all_subsets(source):
return chain(*[combinations(source, i) for i in range(1,len(source)+1)])
def optimize(relevant_nodes, keep,dist,score_to_improve):
global graph
keep = sorted(keep)
root = keep[0]
sources = keep[1:]
missing_sources = {repr(list(I)): [x for x in sources if x not in I] for I in get_all_subsets(sources)}
l = defaultdict(dict)
b = defaultdict(dict)
for v in relevant_nodes:
for I in get_all_subsets(sources):
l[v][repr(list(I))] = float('inf')
b[v][repr(list(I))] = [] # empty set
for v in relevant_nodes:
if v in sources:
l[v][repr([v])] = 0
l[v][repr([])] = 0
# N = [(s,[s]) for s in sources]
P = [(v,[]) for v in relevant_nodes]
N = []
for s in sources:
push(N, (l[s][repr([s])], s, [s]))
while (root, sources) not in P:
if killer.kill_now:
return score_to_improve,None
min_cost, v, I = pop(N)
P.append((v,I))
for w in graph[v]:
if killer.kill_now:
return score_to_improve,None
# only search relevant nodes
if w not in relevant_nodes:continue
if l[v][repr(I)] + steinercost[v][w] < l[w][repr(I)] and (w,I) not in P:
l[w][repr(I)] = l[v][repr(I)] + steinercost[v][w]
b[w][repr(I)] = [(v,I)]
# l(v,I) + L(v,R\I) -> L could also be 0 to be valid
extra_cost = 0
missing = missing_sources[repr(I)]
if len(missing) == 0:
extra_cost = 0
elif len(missing) == 1:
extra_cost = dist[missing[0]][w]
else:
extra_cost = min(0.5*(dist[m1][w]+dist[m2][w]) for m1, m2 in combinations(missing,2))
push(N,(l[w][repr(I)] + extra_cost,w,I))
for J in get_all_subsets(sources):
if killer.kill_now:
return score_to_improve,None
J = list(J)
if J == I:continue
if (v,J) not in P:continue
combined = list(sorted(set(I+J)))
if l[v][repr(I)] + l[v][repr(J)] <= l[v][repr(combined)] and (v,combined) not in P:
l[v][repr(combined)] = l[v][repr(I)] + l[v][repr(J)]
b[v][repr(combined)] = [(v,I),(v,J)]
extra_cost = 0
missing = missing_sources[repr(combined)]
if len(missing) == 0:
extra_cost = 0
elif len(missing) == 1:
extra_cost = dist[missing[0]][v]
else:
extra_cost = min(0.5*(dist[m1][v]+dist[m2][v]) for m1, m2 in combinations(missing,2))
push(N,(l[v][repr(combined)] + extra_cost,v,combined))
if l[root][repr(sources)] < score_to_improve:
return l[root][repr(sources)], reconstruct(root,sources,b)
return l[root][repr(sources)], None
def add_edge_list(edges):
global steinertree
for a,b in edges:
w = graph[a][b]
steinertree[a][b] = w
steinertree[b][a] = w
def tree_cost():
global steinertree
steinertree_edges = set()
for a in steinertree.keys():
steinertree_edges.update((min(a,b),max(a,b)) for b in steinertree[a].keys())
tree_cost = sum(steinertree[a][b] for a,b in steinertree_edges)
return tree_cost
def reduce_nodes(keep, current_cost):
push = heappush
pop = heappop
c = count()
dist = defaultdict(dict) # dictionary of final distances
occurences = defaultdict(set)
#cumulated_dist = defaultdict(list)
cumulated_dist = dict()
min_max_keep_dist = current_cost
#paths = defaultdict(dict)
for node in keep:
max_keep_dist_per_node = 0
cutoff = current_cost
#paths[node][node] = [node]
seen = {}
fringe = []
for source in [node]:
seen[source] = 0
push(fringe, (0, next(c), source))
while fringe:
(d, _, v) = pop(fringe)
if v in dist[node]:
continue # already searched this node.
dist[node][v] = d
for u, cost in steinercost[v].items():
vu_dist = dist[node][v] + cost
# if cutoff is not None:
if vu_dist > cutoff:
continue
if u in keep and u!=node:
max_keep_dist_per_node = max(vu_dist,max_keep_dist_per_node)
#cutoff = min(cutoff, vu_dist)
#cutoff = min(current_cost, cutoff-vu_dist)
if u in dist[node]:
if vu_dist < dist[node][u]:
seen[u] = vu_dist
push(fringe, (vu_dist, next(c), u))
occurences[u].add(node)
#paths[node][u] = paths[node][v] + [u]
elif u not in seen or vu_dist < seen[u]:
seen[u] = vu_dist
push(fringe, (vu_dist, next(c), u))
occurences[u].add(node)
#paths[node][u] = paths[node][v] + [u]
min_max_keep_dist = min(min_max_keep_dist,max_keep_dist_per_node)
for k,v in dist[node].items():
#cumulated_dist[k].append(v)
cumulated_dist[k] = cumulated_dist.get(k,0) + v
if len(keep) == 3:
nodes_relevant = keep + [k for k,v in occurences.items() if len(v)==len(keep) and cumulated_dist[k]<=(min_max_keep_dist+current_cost)]
#nodes_relevant = keep + [k for k,v in occurences.items() if len(v)==len(keep) and cumulated_dist[k]<=(current_cost/3+current_cost)]
elif len(keep) == 4:
nodes_relevant = keep + [k for k,v in occurences.items() if len(v)==len(keep) and cumulated_dist[k]<=(2*min_max_keep_dist+current_cost)]
#nodes_relevant = keep + [k for k,v in occurences.items() if len(v)==len(keep)]
else:
nodes_relevant = keep + [k for k,v in occurences.items() if len(v)==len(keep)]
return nodes_relevant, dist
edge_list = set()
def reconstruct(v,X,b):
global edge_list
edge_list = set()
trace = traceback(v,X,b)
all_edges(trace)
return edge_list
def traceback(v, X,b):
edge_list = set()
res = b[v][repr(X)]
if res == []:
return [traceback(v, [x]) for x in X if x!=v]
else:
if len(res)>1:
return [traceback(v_i, X_i,b) for v_i,X_i in res]
else:
res_u, res_x =res[0]
return [(v,res_u)] + traceback(res_u,X,b)
#if isinstance(res_u, int) and X == res_x:
# trace = traceback(root, sources)
# edge_list = set()
def all_edges(l):
global edge_list
for x in l:
if x == []:continue
if isinstance(x,tuple):
a,b = x
edge_list.add((min(a,b),max(a,b)))
else:
all_edges(x)
def get_shorter(path,path_length,max_size = 4):
global steinertree, steinercost,graph
start = path[0]
intersection = path[-1]
if is_terminal[intersection]:return None,None
nodes_to_keep = [start]
current_dist = path_length
paths_to_intersections = []
nodes_in_path = set()
base_node = intersection
for v in steinertree[base_node].keys():
if v in nodes_in_path:continue # don't go back to path already taken
prev = base_node
current_dist += graph[v][prev]
paths_to_intersections.append((v,prev))
nodes_in_path.add(v)
nodes_in_path.add(prev)
while not v in terminals and len(steinertree[v]) == 2:
v,prev = [k for k in steinertree[v].keys() if k!= prev][0],v
current_dist += graph[v][prev]
paths_to_intersections.append((v,prev))
nodes_in_path.add(v)
nodes_to_keep.append(v)
if len(nodes_to_keep)>max_size:return None,None
if len(nodes_to_keep)<3:
return None,None
for a,b in paths_to_intersections:
w = graph[a][b]
steinercost[a][b] = w
steinercost[b][a] = w
relevant, dijkstra_keep = reduce_nodes(nodes_to_keep,current_dist)
try:
opt_result,new_edges = optimize(relevant, nodes_to_keep,dijkstra_keep,current_dist) # all_nodes
except:
return None,None
if opt_result<current_dist:
return new_edges, paths_to_intersections
remove_edges(paths_to_intersections)
add_edge_list(new_edges)
for a,b in new_edges:
steinercost[a][b] = 0
steinercost[b][a] = 0
else:
return None,None
# for a,b in paths_to_intersections:
# steinercost[a][b] = 0
# steinercost[b][a] = 0
return None, None
best_solution = max_cost + 1
best_tree = None
best_terminal = 0
limit = min(t,200)
check_terminals = terminals[:limit]
terminals_checked = 0
while check_terminals:
if killer.kill_now:
break
ttt = check_terminals.pop(0)
#sources = terminals
push = heappush
pop = heappop
dist = {} # dictionary of final distances
seen = {}
# fringe is heapq with 3-tuples (distance,c,node)
# use the count c to avoid comparing nodes (may not be able to)
c = count()
fringe = []
paths = {source: [source] for source in terminals}
start = ttt
sources = [start]
searched = set()
for source in sources:
seen[source] = 0
push(fringe, (0, next(c), source))
paths[source] = [source]
#closest_terminal[source] = (0, [source], source)
while fringe:
(d, _, v) = pop(fringe)
if v in searched:
continue # already searched this node.
dist[v] = d
searched.add(v)
for u, cost in graph[v].items():
vu_dist = dist[v] + cost
# if cutoff is not None:
# if vu_dist > cutoff:
# continue
if u in dist:
if vu_dist < dist[u]:
seen[u] = vu_dist
push(fringe, (vu_dist, next(c), u))
paths[u] = paths[v] + [u]
closest_terminal[u] = (vu_dist, paths[u], source)
elif u not in seen or vu_dist < seen[u]:
seen[u] = vu_dist
push(fringe, (vu_dist, next(c), u))
paths[u] = paths[v] + [u]
steinertree = defaultdict(dict)
steinercost = copy.deepcopy(graph)
other_terminals = sorted([_ for _ in terminals if _!=ttt],key = lambda x: (dist[x],x),reverse=False)
closest = other_terminals[0]
closest_path = paths[closest]
add_edges(closest_path)
if killer.kill_now:
break
for ter in other_terminals[1:]:
if killer.kill_now:
break
dist = {} # dictionary of final distances
seen = {}
# fringe is heapq with 3-tuples (distance,c,node)
# use the count c to avoid comparing nodes (may not be able to)
c = count()
fringe = []
paths = {ter: [ter]}
closest_dist = 2**32
closest_path = None
sources = | |
from cloudshell.shell.core.driver_context import ResourceCommandContext, AutoLoadDetails, AutoLoadAttribute, \
AutoLoadResource
from collections import defaultdict
class LegacyUtils(object):
def __init__(self):
self._datamodel_clss_dict = self.__generate_datamodel_classes_dict()
def migrate_autoload_details(self, autoload_details, context):
model_name = context.resource.model
root_name = context.resource.name
root = self.__create_resource_from_datamodel(model_name, root_name)
attributes = self.__create_attributes_dict(autoload_details.attributes)
self.__attach_attributes_to_resource(attributes, '', root)
self.__build_sub_resoruces_hierarchy(root, autoload_details.resources, attributes)
return root
def __create_resource_from_datamodel(self, model_name, res_name):
return self._datamodel_clss_dict[model_name](res_name)
def __create_attributes_dict(self, attributes_lst):
d = defaultdict(list)
for attribute in attributes_lst:
d[attribute.relative_address].append(attribute)
return d
def __build_sub_resoruces_hierarchy(self, root, sub_resources, attributes):
d = defaultdict(list)
for resource in sub_resources:
splitted = resource.relative_address.split('/')
parent = '' if len(splitted) == 1 else resource.relative_address.rsplit('/', 1)[0]
rank = len(splitted)
d[rank].append((parent, resource))
self.__set_models_hierarchy_recursively(d, 1, root, '', attributes)
def __set_models_hierarchy_recursively(self, dict, rank, manipulated_resource, resource_relative_addr, attributes):
if rank not in dict: # validate if key exists
pass
for (parent, resource) in dict[rank]:
if parent == resource_relative_addr:
sub_resource = self.__create_resource_from_datamodel(
resource.model.replace(' ', ''),
resource.name)
self.__attach_attributes_to_resource(attributes, resource.relative_address, sub_resource)
manipulated_resource.add_sub_resource(
self.__slice_parent_from_relative_path(parent, resource.relative_address), sub_resource)
self.__set_models_hierarchy_recursively(
dict,
rank + 1,
sub_resource,
resource.relative_address,
attributes)
def __attach_attributes_to_resource(self, attributes, curr_relative_addr, resource):
for attribute in attributes[curr_relative_addr]:
setattr(resource, attribute.attribute_name.lower().replace(' ', '_'), attribute.attribute_value)
del attributes[curr_relative_addr]
def __slice_parent_from_relative_path(self, parent, relative_addr):
if parent is '':
return relative_addr
return relative_addr[len(parent) + 1:] # + 1 because we want to remove the seperator also
def __generate_datamodel_classes_dict(self):
return dict(self.__collect_generated_classes())
def __collect_generated_classes(self):
import sys, inspect
return inspect.getmembers(sys.modules[__name__], inspect.isclass)
class ApV2(object):
def __init__(self, name):
"""
"""
self.attributes = {}
self.resources = {}
self._cloudshell_model_name = 'ApV2'
self._name = name
def add_sub_resource(self, relative_path, sub_resource):
self.resources[relative_path] = sub_resource
@classmethod
def create_from_context(cls, context):
"""
Creates an instance of NXOS by given context
:param context: cloudshell.shell.core.driver_context.ResourceCommandContext
:type context: cloudshell.shell.core.driver_context.ResourceCommandContext
:return:
:rtype ApV2
"""
result = ApV2(name=context.resource.name)
for attr in context.resource.attributes:
result.attributes[attr] = context.resource.attributes[attr]
return result
def create_autoload_details(self, relative_path=''):
"""
:param relative_path:
:type relative_path: str
:return
"""
resources = [AutoLoadResource(model=self.resources[r].cloudshell_model_name,
name=self.resources[r].name,
relative_address=self._get_relative_path(r, relative_path))
for r in self.resources]
attributes = [AutoLoadAttribute(relative_path, a, self.attributes[a]) for a in self.attributes]
autoload_details = AutoLoadDetails(resources, attributes)
for r in self.resources:
curr_path = relative_path + '/' + r if relative_path else r
curr_auto_load_details = self.resources[r].create_autoload_details(curr_path)
autoload_details = self._merge_autoload_details(autoload_details, curr_auto_load_details)
return autoload_details
def _get_relative_path(self, child_path, parent_path):
"""
Combines relative path
:param child_path: Path of a model within it parent model, i.e 1
:type child_path: str
:param parent_path: Full path of parent model, i.e 1/1. Might be empty for root model
:type parent_path: str
:return: Combined path
:rtype str
"""
return parent_path + '/' + child_path if parent_path else child_path
@staticmethod
def _merge_autoload_details(autoload_details1, autoload_details2):
"""
Merges two instances of AutoLoadDetails into the first one
:param autoload_details1:
:type autoload_details1: AutoLoadDetails
:param autoload_details2:
:type autoload_details2: AutoLoadDetails
:return:
:rtype AutoLoadDetails
"""
for attribute in autoload_details2.attributes:
autoload_details1.attributes.append(attribute)
for resource in autoload_details2.resources:
autoload_details1.resources.append(resource)
return autoload_details1
@property
def cloudshell_model_name(self):
"""
Returns the name of the Cloudshell model
:return:
"""
return 'ApV2'
@property
def bands(self):
"""
:rtype: str
"""
return self.attributes['ApV2.Bands'] if 'ApV2.Bands' in self.attributes else None
@bands.setter
def bands(self, value='dual-band'):
"""
:type value: str
"""
self.attributes['ApV2.Bands'] = value
@property
def radios(self):
"""
:rtype: str
"""
return self.attributes['ApV2.Radios'] if 'ApV2.Radios' in self.attributes else None
@radios.setter
def radios(self, value='2.4Ghz (2x2)'):
"""
:type value: str
"""
self.attributes['ApV2.Radios'] = value
@property
def radio_2dot4ghz(self):
"""
:rtype: str
"""
return self.attributes['ApV2.Radio 2dot4Ghz'] if 'ApV2.Radio 2dot4Ghz' in self.attributes else None
@radio_2dot4ghz.setter
def radio_2dot4ghz(self, value='(2x2)'):
"""
:type value: str
"""
self.attributes['ApV2.Radio 2dot4Ghz'] = value
@property
def radio_5ghz_1(self):
"""
:rtype: str
"""
return self.attributes['ApV2.Radio 5Ghz 1'] if 'ApV2.Radio 5Ghz 1' in self.attributes else None
@radio_5ghz_1.setter
def radio_5ghz_1(self, value='(2x2)'):
"""
:type value: str
"""
self.attributes['ApV2.Radio 5Ghz 1'] = value
@property
def radio_5ghz_2(self):
"""
:rtype: str
"""
return self.attributes['ApV2.Radio 5Ghz 2'] if 'ApV2.Radio 5Ghz 2' in self.attributes else None
@radio_5ghz_2.setter
def radio_5ghz_2(self, value='N/A'):
"""
:type value: str
"""
self.attributes['ApV2.Radio 5Ghz 2'] = value
@property
def model(self):
"""
:rtype: str
"""
return self.attributes['ApV2.model'] if 'ApV2.model' in self.attributes else None
@model.setter
def model(self, value):
"""
:type value: str
"""
self.attributes['ApV2.model'] = value
@property
def mode(self):
"""
:rtype: str
"""
return self.attributes['ApV2.mode'] if 'ApV2.mode' in self.attributes else None
@mode.setter
def mode(self, value='Wifi5'):
"""
:type value: str
"""
self.attributes['ApV2.mode'] = value
@property
def serial(self):
"""
:rtype: str
"""
return self.attributes['ApV2.serial'] if 'ApV2.serial' in self.attributes else None
@serial.setter
def serial(self, value):
"""
:type value: str
"""
self.attributes['ApV2.serial'] = value
@property
def jumphost(self):
"""
:rtype: bool
"""
return self.attributes['ApV2.jumphost'] if 'ApV2.jumphost' in self.attributes else None
@jumphost.setter
def jumphost(self, value):
"""
:type value: bool
"""
self.attributes['ApV2.jumphost'] = value
@property
def ip(self):
"""
:rtype: str
"""
return self.attributes['ApV2.ip'] if 'ApV2.ip' in self.attributes else None
@ip.setter
def ip(self, value):
"""
:type value: str
"""
self.attributes['ApV2.ip'] = value
@property
def jumphost_tty(self):
"""
:rtype: str
"""
return self.attributes['ApV2.jumphost_tty'] if 'ApV2.jumphost_tty' in self.attributes else None
@jumphost_tty.setter
def jumphost_tty(self, value='/dev/ttyAP1'):
"""
:type value: str
"""
self.attributes['ApV2.jumphost_tty'] = value
@property
def version(self):
"""
:rtype: str
"""
return self.attributes['ApV2.version'] if 'ApV2.version' in self.attributes else None
@version.setter
def version(self, value):
"""
:type value: str
"""
self.attributes['ApV2.version'] = value
@property
def port(self):
"""
:rtype: float
"""
return self.attributes['ApV2.port'] if 'ApV2.port' in self.attributes else None
@port.setter
def port(self, value='22'):
"""
:type value: float
"""
self.attributes['ApV2.port'] = value
@property
def uname(self):
"""
:rtype: str
"""
return self.attributes['ApV2.uname'] if 'ApV2.uname' in self.attributes else None
@uname.setter
def uname(self, value):
"""
:type value: str
"""
self.attributes['ApV2.uname'] = value
@property
def passkey(self):
"""
:rtype: string
"""
return self.attributes['ApV2.passkey'] if 'ApV2.passkey' in self.attributes else None
@passkey.setter
def passkey(self, value):
"""
:type value: string
"""
self.attributes['ApV2.passkey'] = value
@property
def pdu_host(self):
"""
:rtype: str
"""
return self.attributes['ApV2.PDU Host'] if 'ApV2.PDU Host' in self.attributes else None
@pdu_host.setter
def pdu_host(self, value):
"""
:type value: str
"""
self.attributes['ApV2.PDU Host'] = value
@property
def pdu_user(self):
"""
:rtype: str
"""
return self.attributes['ApV2.PDU User'] if 'ApV2.PDU User' in self.attributes else None
@pdu_user.setter
def pdu_user(self, value):
"""
:type value: str
"""
self.attributes['ApV2.PDU User'] = value
@property
def pdu_password(self):
"""
:rtype: string
"""
return self.attributes['ApV2.PDU Password'] if 'ApV2.PDU Password' in self.attributes else None
@pdu_password.setter
def pdu_password(self, value):
"""
:type value: string
"""
self.attributes['ApV2.PDU Password'] = value
@property
def pdu_port(self):
"""
:rtype: str
"""
return self.attributes['ApV2.PDU Port'] if 'ApV2.PDU Port' in self.attributes else None
@pdu_port.setter
def pdu_port(self, value):
"""
:type value: str
"""
self.attributes['ApV2.PDU Port'] = value
@property
def user(self):
"""
:rtype: str
"""
return self.attributes['ApV2.User'] if 'ApV2.User' in self.attributes else None
@user.setter
def user(self, value):
"""
User with administrative privileges
:type value: str
"""
self.attributes['ApV2.User'] = value
@property
def password(self):
"""
:rtype: string
"""
return self.attributes['ApV2.Password'] if 'ApV2.Password' in self.attributes else None
@password.setter
def password(self, value):
"""
:type value: string
"""
self.attributes['ApV2.Password'] = value
@property
def enable_password(self):
"""
:rtype: string
"""
return self.attributes['ApV2.Enable Password'] if 'ApV2.Enable Password' in self.attributes else None
@enable_password.setter
def enable_password(self, value):
"""
The enable password is required by some CLI protocols such as Telnet and is required according to the device configuration.
:type value: string
"""
self.attributes['ApV2.Enable Password'] = value
@property
def power_management(self):
"""
:rtype: bool
"""
return self.attributes['ApV2.Power Management'] if 'ApV2.Power Management' in self.attributes else None
@power_management.setter
def power_management(self, value=True):
"""
Used by the power management orchestration, if enabled, to determine whether to automatically manage the device power status. Enabled by default.
:type value: bool
"""
self.attributes['ApV2.Power Management'] = value
@property
def sessions_concurrency_limit(self):
"""
:rtype: float
"""
return self.attributes['ApV2.Sessions Concurrency Limit'] if 'ApV2.Sessions Concurrency Limit' in self.attributes else None
@sessions_concurrency_limit.setter
def sessions_concurrency_limit(self, value='1'):
"""
The maximum number of concurrent sessions that the driver will open to the device. Default is 1 (no concurrency).
:type value: float
"""
self.attributes['ApV2.Sessions Concurrency Limit'] = value
@property
def snmp_read_community(self):
"""
:rtype: string
"""
return self.attributes['ApV2.SNMP Read Community'] if 'ApV2.SNMP Read Community' in self.attributes else None
@snmp_read_community.setter
def snmp_read_community(self, value):
"""
The SNMP Read-Only Community String is like a password. It is sent along with each SNMP Get-Request and allows (or denies) access to device.
:type value: string
"""
self.attributes['ApV2.SNMP Read Community'] = value
@property
def snmp_write_community(self):
"""
:rtype: string
"""
return self.attributes['ApV2.SNMP Write Community'] if 'ApV2.SNMP Write Community' in self.attributes else None
@snmp_write_community.setter
def snmp_write_community(self, | |
)
# @TODO implement objectivum_formato as default
# commune.add_argument(
# '--codex-de',
# help='Generate documentation of dictionaries',
# # metavar='',
# dest='codex_de',
# # const=True,
# nargs='?'
# )
commune.add_argument(
'--punctum-separato-de-resultatum',
help='Character(s) used as separator for generate output. ' +
'Used only for tabular results. ' +
'Defaults to tab "\t"',
dest='resultatum_separato',
default="\t",
nargs='?'
)
commune.add_argument(
'--punctum-separato-de-fontem',
help='Character(s) used as separator from input file ' +
'Used only for tabular results. ' +
'Defaults to comma ","',
dest='fontem_separato',
default=",",
nargs='?'
)
# archivum = parser.add_argument_group(
parser.add_argument_group(
# "Archivum",
"HXLTM explānātiōnī",
'[ --methodus=\'hxltm-explanationi\' ] '
'Simple explanation about a single HXLTM file (tabular output). '
'See also status quo for more advanced information. '
'Accepts path and piped in data. '
)
# archivum.add_argument(
# '--de-archivum',
# help='(deprecated, can be removed) Parse single archive',
# # metavar='',
# dest='de_archivum',
# # const=True,
# action='store_true',
# # nargs='?'
# )
# data, n, pl, nominativus, https://en.wiktionary.org/wiki/datum#Latin
# apothēcae, f, s, dativus, https://en.wiktionary.org/wiki/apotheca#Latin
data_apothecae = parser.add_argument_group(
"Data apothēcae",
'[ --methodus=\'data-apothecae\' ] '
'data apothēcae. Data warehouse mode. '
'Compile (sense: export) selected dictionaries to a single place '
'(likely single database entry point)'
)
data_apothecae.add_argument(
'--data-apothecae-ad',
help='Path to file (or reference to database) to store result ',
dest='data_apothecae_ad',
nargs='?',
default='apothecae.datapackage.json'
)
data_apothecae.add_argument(
'--data-apothecae-ex',
help='Comma-separated list of dictionaries to initialize '
'the data warehouse. ',
dest='data_apothecae_ex',
type=lambda x: x.split(',')
)
data_apothecae.add_argument(
'--data-apothecae-ex-archivo',
help='Path to file with list (one item per line) of dictionaries '
'to initialize the data warehouse',
dest='data_apothecae_ex_archivo',
)
# fōrmātō, s, n, Dativus, https://en.wiktionary.org/wiki/formatus#Latin
data_apothecae.add_argument(
'--data-apothecae-formato',
help='Output format. Default will try make a guess from '
'--data-apothecae-ad pattern.',
dest='data_apothecae_formato',
nargs='?',
choices=['datapackage', 'sqlite'],
default=None
)
# dictionaria = parser.add_argument_group(
parser.add_argument_group(
"Dictionaria Numerordĭnātĭo (undocumented feature, deprecated)",
'[ --methodus=\'deprecatum-dictionaria-numerordinatio\' ] '
'Regex table (not in use). No options required'
)
# dictionaria.add_argument(
# '--dictionaria-numerordinatio',
# help='Dictionary of all possible values on stricter '
# ' Numerordĭnātĭo (HXLStantad container)',
# # metavar='',
# dest='dictionaria_numerordinatio',
# # const=True,
# action='store_true',
# # nargs='?'
# )
# https://en.wiktionary.org/wiki/codex#Latin
codex = parser.add_argument_group(
"Cōdex",
'[ --methodus=\'codex\' ] '
"Book/manual creation. Requires --codex-de=1603_NN_NN")
codex.add_argument(
'--codex-de',
help='Generate documentation of dictionaries',
# metavar='',
dest='codex_de',
# const=True,
nargs='?'
)
codex.add_argument(
'--objectivum-linguam',
help='Target natural language (use if not auto-detected). '
'Must be like {ISO 639-3}-{ISO 15924}. Example: arb-Arab. '
'Default: mul-Zyyy ',
# metavar='',
dest='objectivum_linguam',
default='mul-Zyyy',
nargs='?'
)
codex.add_argument(
'--auxilium-linguam',
help='Define auxiliary languages '
'Must be like {ISO 639-3}-{ISO 15924}. '
'Example: "ina-Latn,ile-Latn" '
'Accepts multiple values. ',
# metavar='',
dest='auxilium_linguam',
# default='mul-Zyyy',
# nargs='?'
type=lambda x: x.split(',')
)
codex.add_argument(
'--codex-copertae',
help='Pre-calculate the codex, but only generate '
'Codex cover (SVG)',
# metavar='',
dest='codex_copertae',
# const=True,
action='store_true',
# nargs='?'
)
codex.add_argument(
'--codex-in-tabulam-json',
help='Pre-calculate the codex, but only generate '
'Tabular Data (MediaWiki syntax 1) (JSON). '
'See https://www.mediawiki.org/wiki/Help:Tabular_Data',
# metavar='',
dest='codex_in_tabulam_json',
# const=True,
action='store_true',
# nargs='?'
)
codex.add_argument(
'--objectivum-formatum-asciidoctor',
help='(Default) Output Asciidoctor format',
# metavar='',
dest='ad_asciidoctor',
# const=True,
action='store_true',
# nargs='?'
)
# https://en.wikipedia.org/wiki/Status_quo
# https://en.wiktionary.org/wiki/status_quo#English
status_quo = parser.add_argument_group(
"Status quō",
'[ --methodus=\'status-quo\' ] '
"Calculate current situation. Used to take other actions. "
"Requires --codex-de 1603_NN_NN (focused Codex). "
"Works with --quaero-ix_n1603ia."
)
status_quo.add_argument(
'--status-quo',
help='Compute the status quo, using a codex as initial reference',
# metavar='',
dest='status_quo',
# const=True,
action='store_true',
# nargs='?'
)
status_quo.add_argument(
'--status-quo-in-yaml',
help='Return status in YAML',
# metavar='',
dest='status_in_yaml',
# const=True,
action='store_true',
# nargs='?'
)
status_quo.add_argument(
'--status-quo-in-json',
help='Return status in JSON',
# metavar='',
dest='status_in_json',
# const=True,
action='store_true',
# nargs='?'
)
status_quo.add_argument(
'--status-quo-in-markdown',
help='Return status in Markdown',
# metavar='',
dest='status_in_markdown',
# const=True,
action='store_true',
# nargs='?'
)
status_quo.add_argument(
'--status-quo-in-datapackage',
help='Return status in frictionless datapackage.json. '
'With --ex-librario returns profile data-package-catalog. '
' (low level of details)',
# metavar='',
dest='status_in_datapackage',
# const=True,
action='store_true',
# nargs='?'
)
status_quo.add_argument(
'--status-quo-in-rdf-skos-turtle',
help='RDF/SKOS Turtle format.'
'Simple Knowledge Organization System. '
'https://www.w3.org/TR/skos-reference/',
# metavar='',
dest='status_in_rdf_skos_ttl',
# const=True,
action='store_true',
# nargs='?'
)
# - ex (+ ablative), https://en.wiktionary.org/wiki/ex#Latin
# - librāriō, n, s, /Ablative/,
# https://en.wiktionary.org/wiki/librarium#Latin
status_quo.add_argument(
'--ex-librario',
help='Status novō. New state. Persist changes if necessary',
# metavar='',
dest='ex_librario',
const='',
# action='store_true',
nargs='?'
)
# https://en.wikipedia.org/wiki/Status_quo
# https://en.wiktionary.org/wiki/status_quo#English
opus_temporibus = parser.add_argument_group(
"Opus temporibus",
'[ --methodus=\'opus-temporibus\' ] '
"Crontab/cronjob information "
# "Requires --codex-de 1603_NN_NN"
)
# ex opere temporibus
opus_temporibus.add_argument(
'--ex-opere-temporibus',
help='ex opere temporibus. Out of work times (crontab)',
# metavar='',
dest='ex_opere_temporibus',
# const='',
# action='store_true',
nargs='?'
)
opus_temporibus.add_argument(
'--opus-temporibus-in-tsv',
help='Result in TSV (without headers); first column is Codex',
# metavar='',
dest='opus_temporibus_in_tsv',
# const='',
# action='store_true',
nargs='?'
)
opus_temporibus.add_argument(
'--quaero-ix_n1603ia',
help='Query ix_n1603ia. Rudimentar && (AND) and || (OR). '
'Use var<1 to test 0 or undefined. '
'Query ix_n1603ia. Filter. Ex. "{publicum}>10 && {internale}<1"',
# metavar='',
dest='quaero_ix_n1603ia',
# const='',
# action='store_true',
nargs='?'
)
opus_temporibus.add_argument(
'--quaero-numerordinatio',
help='Query Numerordĭnātĭo. Additional filter list for focused '
' base of dictionaries. Ideal to check if some groups meet '
'other filters. '
'Example: if result return empty and other queries are to check '
'if need to fetch again from Wikidata Q, then you assume no '
'new fetch is necessary',
# metavar='',
dest='quaero_numerordinatio',
# const='',
# action='store_true',
nargs='?'
)
# in (+ ablative), in (+ accusative);;
# (+ accusative) toward, towards, against, at
# https://en.wiktionary.org/wiki/in#Latin
# https://en.wiktionary.org/wiki/limes#Latin
opus_temporibus.add_argument(
'--in-limitem',
help='/Against the limit of/. Limit maximum number of cron jobs '
'to show. ',
dest='in_limitem',
nargs='?'
)
# https://en.wiktionary.org/wiki/ordo#Latin
# https://en.wiktionary.org/wiki/chaos#Latin
opus_temporibus.add_argument(
'--in-ordinem',
help='/Against arrangement (ordering) of/. Sort result list to '
'this rule. Options: '
'numerordinatio=natural order; chaos=random order',
dest='in_ordinem',
nargs='?',
choices=['numerordinatio', 'chaos'],
default='numerordinatio'
)
# # --agendum-linguam is a draft. Not 100% implemented
# parser.add_argument(
# '--agendum-linguam', '-AL',
# help='(Planned, but not fully implemented yet) ' +
# 'Restrict working languages to a list. Useful for ' +
# 'HXLTM to HXLTM or multilingual formats like TBX and TMX. ' +
# 'Requires: multilingual operation. ' +
# 'Accepts multiple values.',
# metavar='agendum_linguam',
# type=lambda x: x.split(',')
# # action='append',
# # nargs='?'
# )
# # --non-agendum-linguam is a draft. Not 100% implemented
# parser.add_argument(
# '--non-agendum-linguam', '-non-AL',
# help='(Planned, but not implemented yet) ' +
# 'Inverse of --agendum-linguam. Document one or more ' +
# 'languages that should be ignored if they exist. ' +
# 'Requires: multilingual operation. ' +
# 'Accept multiple values.',
# metavar='non_agendum_linguam',
# # action='append',
# type=lambda x: x.split(',')
# # nargs='?'
# )
# dictionaria.add_argument(
# '--objectivum-formatum-markdown',
# help='(default) Output Markdown format',
# # metavar='',
# dest='ad_markdown',
# # const=True,
# action='store_true',
# # nargs='?'
# )
return parser.parse_args()
# def execute_cli(self, args, stdin=STDIN, stdout=sys.stdout,
# stderr=sys.stderr):
def execute_cli(self, pyargs, stdin=STDIN, stdout=sys.stdout,
stderr=sys.stderr):
# print('TODO')
self.pyargs = pyargs
# if (self.pyargs.data_apothecae_ex and
# len(self.pyargs.data_apothecae_ex) > 0) or \
# (self.pyargs.data_apothecae_ex_archivo and
# len(self.pyargs.data_apothecae_ex_archivo)):
# TODO: raise error if target already exist, so user could
# avoid override something
if pyargs.methodus == 'data-apothecae':
if self.pyargs.data_apothecae_ex:
data_apothecae_ex = self.pyargs.data_apothecae_ex
else:
# f = open(self.pyargs.data_apothecae_ex_archivo, "r")
# data_apothecae_ex = list(f.readlines())
data_apothecae_ex = []
# print(f.readlines())
with open(
self.pyargs.data_apothecae_ex_archivo, "r") as archivum:
for _lineam in archivum:
if _lineam.startswith('#') or len(_lineam.strip()) == 0:
continue
lineam = _lineam.rstrip('\n')
if lineam.find("\t") > -1:
lineam = lineam.split("\t")[0]
if lineam.find(',') > -1:
lineam = lineam.split(',')[0]
data_apothecae_ex.append(lineam)
# print(data_apothecae_ex)
# libraria.imprimere_in_datapackage_sqlite()
data_apothecae = DataApothecae(
# self.pyargs.data_apothecae_ex,
data_apothecae_ex,
data_apothecae_ad=self.pyargs.data_apothecae_ad,
data_apothecae_formato=self.pyargs.data_apothecae_formato,
)
data_apothecae.praeparatio()
return self.output(data_apothecae.imprimere())
# return self.output(['TODO...'])
# Opus temporibus _____________________________________________________
# if self.pyargs.dictionaria_numerordinatio:
# if pyargs.methodus == 'opus-temporibus' or \
# self.pyargs.ex_opere_temporibus and \
# len(self.pyargs.ex_opere_temporibus) > 0:
if pyargs.methodus == 'opus-temporibus':
# # if self.pyargs.actionem_sparql:
# if self.pyargs.ex_opere_temporibus and \
# len(self.pyargs.ex_opere_temporibus) > 0:
# print(self.pyargs.quaero_numerordinatio)
opus_temporibus = OpusTemporibus(
self.pyargs.ex_opere_temporibus,
self.pyargs.quaero_ix_n1603ia,
in_limitem=self.pyargs.in_limitem,
in_ordinem=self.pyargs.in_ordinem,
quaero_numerordinatio=self.pyargs.quaero_numerordinatio,
)
return self.output(opus_temporibus.imprimere())
# status_quo ___________________________________________________________
if pyargs.methodus == 'status-quo':
if self.pyargs.codex_de:
codex_de = self.pyargs.codex_de
else:
if self.pyargs.ex_librario:
codex_de = '1603_1_1'
else:
raise ValueError('--ex-librario=[{locale,cdn}] ?')
# if self.pyargs.objectivum_formato:
# objectivum_formato = objectivum_formato
# else:
codex = Codex(
codex_de,
objectivum_linguam=self.pyargs.objectivum_linguam,
auxilium_linguam=self.pyargs.auxilium_linguam,
)
libraria = LibrariaStatusQuo(
codex,
self.pyargs.ex_librario)
if self.pyargs.status_in_yaml or \
self.pyargs.objectivum_formato == 'yaml':
return self.output(libraria.imprimere())
if self.pyargs.status_in_json | |
# Copyright (c) 2017-2021 <NAME> (<EMAIL>)
"""
@author: <NAME>
CG model selection via MAP-estimation using mean parameters.
"""
import numpy as np
from numpy import ix_
from cgmodsel.base_solver import BaseCGSolver
# pylint: disable=W0511 # todos
# pylint: disable=R0914 # too many locals
class MAP(BaseCGSolver):
"""
A class for estimating MAP models for CG distributions (mean parameters).
Attributes:
cat_format_required (str): specifier for format - constant.
name: name of the class.
"""
def __init__(self):
"""pass a dictionary that provides with keys dg, dc, and L"""
super().__init__()
self.name = 'MAP'
self.cat_format_required = 'flat'
def _fit_gaussian(self,
mat_v_inv,
deg_freedom: int,
vec_mu0,
n_art_cg: int):
""" fit Gaussian MAP-estimate
Wishart prior for precision matrix
Args:
mat_v_inv: inverse of V for the prior W(Lambda| V, nu).
deg_freedom: degrees of freedom (=nu).
vec_mu0: mean of Gaussian prior N(mu | mu0, (n_art_cg * Lambda)^{-1})
n_art_cg: number of artificial observations for Gaussian prior.
Note:
Setting n_art_cg=0 (and nu = #Gaussians) produces ML estimate.
Returns:
tuple: MAP estimates (vec_mu, mat_sigma)
"""
assert self.meta['n_cat'] == 0, 'use for continuous variables only'
assert self.meta['n_cg'] > 0
vec_mu = np.sum(self.cont_data, axis=0) # sum over rows
vec_mu = n_art_cg * vec_mu0 + vec_mu
vec_mu /= n_art_cg + self.meta['n_data']
mat_sigma = mat_v_inv # this is V^{-1} from the doc
for i in range(self.meta['n_data']):
# add 'scatter matrix' of the evidence
diff_yi_mumap = self.cont_data[i, :] - vec_mu
mat_sigma += np.outer(diff_yi_mumap, diff_yi_mumap)
mudiff = vec_mu - vec_mu0
mat_sigma += n_art_cg * np.outer(mudiff, mudiff)
mat_sigma /= self.meta['n_data'] + deg_freedom - self.meta['n_cg']
return vec_mu, mat_sigma
def fit_fixed_covariance(self,
n_art_cat: int = 1,
n_art_cg: int = 1,
deg_freedom: int = None,
vec_mu0=None,
mat_sigma0=None):
"""fit MAP model with single covariance matrix + individual CG means.
Warning:
This method of model estimation uses
sums over the whole discrete state space.
Args:
n_art_cat (int): Laplacian smoothing parameter
= artificial observations per discrete variable
Dirichlet prior parameters (prior for discrete distribution).
n_art_cg (int): number of 'artificial' data points per CG
(prior parameter for means of conditional Gaussians).
vec_mu0: the value of artificial CG data points.
deg_freedom (int): degrees of freedom
(prior parameter for Wishart prior
for the shared precision matrix of CG distributions).
mat_sigma0: initial guess for the covariance matrix.
Note:
Complete prior is a Dirichlet-Normal-Wishart prior.
Returns:
tuple: MAP-estimate (p(x)_x, mu(x)_x, mat_sigma), where x
are the discrete outcomes.
"""
# TODO(franknu): publish doc
# future ideas(franknu):
# (1) iter only over observed discrete examples,
# all other outcomes default to 'prior'
# (often times much less than the whole discrete state space)
# use dictionary + counts?
# (2) use flag to indicate if cov is fixed or variable
# (avoid code redundancy)
n_cg = self.meta['n_cg']
assert self.meta['n_data'] > 0, 'No data loaded.. use method dropdata'
## defaults for smoothing parameters
if vec_mu0 is None:
# reasonable choice when using standardized data Y
vec_mu0 = np.zeros(n_cg)
assert vec_mu0.shape == (n_cg,)
if deg_freedom is None:
deg_freedom = n_cg # least informative non-degenerate prior
assert deg_freedom >= n_cg, 'need deg >= n_cg for non-degenerate prior'
if mat_sigma0 is None:
mat_sigma0 = np.eye(n_cg) # standardized data --> variances are 1
assert mat_sigma0.shape == (n_cg, n_cg)
# choose V = 1/deg_freedom * mat_sigma0 for the Wishart prior
mat_v_inv = deg_freedom * np.linalg.inv(mat_sigma0)
# note: formerly used n_cg instead of deg_freedom here
## MAP-estimate Gaussians only (with unknown mean and covariance)
if self.meta['n_cat'] == 0:
vec_mu, mat_sigma = self._fit_gaussian(mat_v_inv, deg_freedom,
vec_mu0, n_art_cg)
return np.array([]), vec_mu, mat_sigma
## MAP-estimation in the presence of discrete variables
n_discrete_states = int(np.prod(self.meta['sizes']))
probs_cat = np.zeros(n_discrete_states)
mus = np.zeros((n_discrete_states, n_cg))
mat_sigma = np.zeros((n_cg, n_cg))
## mu and p
for i, state in enumerate(self.cat_data):
probs_cat[state] += 1
mus[state, :] += self.cont_data[i, :]
## MAP-estimates of mu(# MAP estimator for mu(x))
for state in range(n_discrete_states):
# MAP estimator for mu(# MAP estimator for mu(x))
mus[state, :] = (n_art_cg * vec_mu0 + mus[state, :])
mus[state, :] /= n_art_cg + probs_cat[state]
## MAP-estimate of mat_sigma
mat_sigma = mat_v_inv # this is V^{-1} from the doc
for i, state in enumerate(self.cat_data):
# add 'scatter matrix' of the evidence
diff_yi_mumap = self.cont_data[i, :] - mus[state, :]
mat_sigma += np.outer(diff_yi_mumap, diff_yi_mumap)
for state in range(n_discrete_states):
# add scatter part of artificial observations mu0
mudiff = mus[state, :] - vec_mu0
mat_sigma += n_art_cg * np.outer(mudiff, mudiff)
mat_sigma /= deg_freedom + self.meta[
'n_data'] - n_cg - 1 + n_discrete_states
## MAP-estimate of p
probs_cat = probs_cat + n_art_cat
probs_cat /= probs_cat.sum()
# note: without smoothing would yield p = p/n
## reshape to the correct shapes
probs_cat = probs_cat.reshape(self.meta['sizes'])
mus = mus.reshape(self.meta['sizes'] + [n_cg])
return probs_cat, mus, mat_sigma
def fit_variable_covariance(self,
n_art_cat: int = 1,
n_art_cg: int = 1,
deg_freedom: int = None,
vec_mu0=None,
mat_sigma0=None):
"""fit MAP model with individual covariance matrices and means.
Warning:
This method of model estimation uses
sums over the whole discrete state space.
Args:
n_art_cat (int): Laplacian smoothing parameter
= artificial observations per discrete variable
Dirichlet prior parameters (prior for discrete distribution).
n_art_cg (int): number of 'artificial' data points per CG
(prior parameter for means of conditional Gaussians).
vec_mu0: the value of artificial CG data points.
deg_freedom (int): degrees of freedom
(prior parameter for Wishart prior
for the shared precision matrix of CG distributions).
mat_sigma0: initial guess for the covariance matrices.
Note:
Complete prior is a Dirichlet-Normal-Wishart prior.
Returns:
tuple: MAP-estimate (p(x)_x, mu(x)_x, mat_sigma_x), where x
are the discrete outcomes.
"""
# TODO(franknu): publish doc
assert self.meta['n_data'] > 0, 'No data loaded.. use method dropdata'
n_cg = self.meta['n_cg']
## defaults for smoothing parameters
if vec_mu0 is None:
vec_mu0 = np.zeros(n_cg) # reasonable when using standardized data
assert vec_mu0.shape == (n_cg,)
if deg_freedom is None:
deg_freedom = n_cg + 1
# yields mat_sigma(x)=mat_sigma_0 if x not observed
string = 'need deg >= dg+1 for non-degenerate'
string += 'prior and deal with unobserved discrete outcomes'
assert deg_freedom >= n_cg + 1, string
if mat_sigma0 is None:
mat_sigma0 = np.eye(n_cg)
assert mat_sigma0.shape == (n_cg, n_cg)
# choose V = 1/deg_freedom * mat_sigma0 for the Wishart prior
# -> prior mean of W(Lambda(x)|V, deg_freedom) is
# deg_freedom*V= mat_sigma0
mat_v_inv = deg_freedom * np.linalg.inv(mat_sigma0)
# note: formerly used n_cg instead of nu here
## MAP-estimate Gaussians only (with unknown mean and covariance)
if self.meta['n_cat'] == 0:
vec_mu, mat_sigma = self._fit_gaussian(mat_v_inv, deg_freedom,
vec_mu0, n_art_cg)
return np.array([]), vec_mu, mat_sigma
## initialization
n_discrete_states = int(np.prod(self.meta['sizes']))
probs_cat = np.zeros(n_discrete_states)
mus = np.zeros((n_discrete_states, n_cg))
sigmas = np.zeros((n_discrete_states, n_cg, n_cg))
## mu and p
for i, state in enumerate(self.cat_data):
probs_cat[state] += 1
mus[state, :] += self.cont_data[i, :]
## MAP-estimates of mu(state)
for state in range(n_discrete_states):
mus[state, :] = (n_art_cg * vec_mu0 + mus[state, :]) / \
(n_art_cg + probs_cat[state]) # MAP estimator for mu(state)
## MAP-estimate of mat_sigma(state)
for i, state in enumerate(self.cat_data):
# scatter matrix of the evidence
diff_yi_mumap = self.cont_data[i, :] - mus[state, :]
sigmas[state, :, :] += np.outer(diff_yi_mumap, diff_yi_mumap)
for state in range(n_discrete_states):
mudiff = mus[state, :] - vec_mu0
sigmas[state, :, :] += mat_v_inv + \
n_art_cg * np.outer(mudiff, mudiff)
sigmas[state, :, :] /= probs_cat[state] - n_cg + deg_freedom
# note: divisor is > 0 since deg_freedom > n_cg
## MAP-estimate of p
probs_cat = probs_cat + n_art_cat
probs_cat /= probs_cat.sum()
## reshape to the correct shapes
probs_cat = probs_cat.reshape(self.meta['sizes'])
mus = mus.reshape(self.meta['sizes'] + [n_cg])
sigmas = sigmas.reshape(self.meta['sizes'] + [n_cg, n_cg])
return probs_cat, mus, sigmas
def get_plhvalue(self, mean_params):
"""Wrapper for crossvalidate method.
Args:
mean_params: tuple (p, mus, sigmas).
Returns:
pseudo-likelihood value of current data set
"""
_, _, lval = self.crossvalidate(mean_params)
return lval
def crossvalidate(self, mean_params):
"""Perform crossvalidation by node-wise predictions.
Note:
This uses the current data that has been dropped using method
drop_data.
Args:
mean_params: tuple (p, mus, sigmas), where
p has shape sizes, mus has shape sizes + (n_cg),
sigmas has shape (n_cg, n_cg) if independent of discrete vars x,
else has shape sizes + (n_cg, n_cg) if dependent on x.
| |
# -*- coding:UTF-8 -*-
from copy import deepcopy
from getpass import getuser as GETPASSgetuser
from cdms2 import open as CDMS2open
from inspect import stack as INSPECTstack
import json
from numpy import array as NUMPYarray
from os import environ as OSenviron
from os.path import join as OSpath__join
from sys import exit as SYSexit
from sys import path as SYSpath
# ENSO_metrics package
from EnsoMetrics.EnsoCollectionsLib import ReferenceObservations
from EnsoPlots.EnsoPlotToolsLib import find_first_member, get_reference, remove_metrics, sort_members
# user (get your user name for the paths and to save the files)
user_name = GETPASSgetuser()
# path
xmldir = OSenviron['XMLDIR']
path_obs = "/data/" + user_name + "/Obs"
path_netcdf = "/data/" + user_name + "/ENSO_metrics/v20200311"
# My (YYP) package
# set new path where to find programs
# SYSpath.insert(0, "/home/yplanton/New_programs/lib_cmip_bash")
# from getfiles_sh_to_py import find_path_and_files
# from getfiles_sh_to_py import get_ensembles
# ---------------------------------------------------#
# colors for printing
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# ---------------------------------------------------#
def find_members(experiment, frequency, model, project, realm, first_only=False):
"""
Finds member names
Inputs:
------
:param experiment: string
experiment name (e.g., "historical", "piControl")
:param frequency: string
data frequency: "day" for daily, "mon" for monthly
:param model: string
model name (e.g., "CNRM-CM5", "IPSL-CM5A-LR")
:param project: string
project name (e.g., "CMIP5", "CMIP6")
:param realm: string
data realm: "A" for atmosphere, "O" for ocean
**Optional arguments:**
:param first_only: boolean, optional
True to return only the first member
Output:
------
:return members: list
list of member(s) for the given information
"""
members = get_ensembles(exp=experiment, fre=frequency, mod=model, pro=project, rea=realm)
if first_only is True:
members = [find_first_member(members)]
return members
def find_fx(model, experiment='', project='', realm='', ensemble=''):
"""
Finds fixed variables, here areacell and sftlf (landmask)
Inputs:
------
:param model: string
model name (e.g., "CNRM-CM5", "IPSL-CM5A-LR")
**Optional arguments:**
:param experiment: string, optional
experiment name (e.g., "historical", "piControl")
:param project: string, optional
project name (e.g., "CMIP5", "CMIP6")
:param realm: string, optional
data realm: "A" for atmosphere, "O" for ocean
:param ensemble: string, optional
ensemble name (e.g., "r1i1p1", "r1i1p1f1")
Outputs:
-------
:return file_area: string
Path and areacell file name corresponding to the given information (e.g., /path/to/file/areacell.xml)
Set to None if the file cannot be found
:return file_land: string
Path and landmask file name corresponding to the given information (e.g., /path/to/file/landmask.xml)
Set to None if the file cannot be found
"""
if project in ["CMIP5", "CMIP6"]:
if project in ['CMIP5']:
my_ens = "r0i0p0"
else:
my_ens = deepcopy(ensemble)
if realm == "A":
farea1, farea2 = find_path_and_files(ens=my_ens, exp=experiment, fre="fx", mod=model, pro=project,
rea=realm, var="areacella")
fland1, fland2 = find_path_and_files(ens=my_ens, exp=experiment, fre="fx", mod=model, pro=project,
rea=realm, var="sftlf")
file_land = OSpath__join(fland1, fland2[0])
else:
farea1, farea2 = find_path_and_files(ens=my_ens, exp=experiment, fre="fx", mod=model, pro=project,
rea=realm, var="areacello")
file_land = None
file_area = OSpath__join(farea1, farea2[0])
else:
file_area, file_land = find_xml_fx(model, project=project, experiment=experiment, realm=realm)
try: CDMS2open(file_area)
except: file_area = None
try: CDMS2open(file_land)
except: file_land = None
return file_area, file_land
def find_xml_cmip(experiment, frequency, model, project, realm, ensemble, variable):
"""
Finds cmip variable file, as well as corresponding areacell and landmask
Inputs:
------
:param experiment: string
experiment name (e.g., "historical", "piControl")
:param frequency: string
data frequency: "day" for daily, "mon" for monthly
:param model: string
model name (e.g., "CNRM-CM5", "IPSL-CM5A-LR")
:param project: string
project name (e.g., "CMIP5", "CMIP6")
:param realm: string
data realm: "A" for atmosphere, "O" for ocean
:param ensemble: string
ensemble name (e.g., "r1i1p1", "r1i1p1f1")
:param variable: string
variable name (e.g., "pr", "tos")
Outputs:
-------
:return file_name: string
Path and file name corresponding to the given information (e.g., /path/to/file/filename.xml)
:return file_area: string
Path and areacell file name corresponding to the given information (e.g., /path/to/file/areacell.xml)
Set to None if the file cannot be found
:return file_land: string
Path and landmask file name corresponding to the given information (e.g., /path/to/file/landmask.xml)
Set to None if the file cannot be found
"""
try: pathnc, filenc = find_path_and_files(ens=ensemble, exp=experiment, fre=frequency, mod=model, pro=project,
rea=realm, var=variable)
except:
if realm == "O":
new_realm = "A"
else:
new_realm = "O"
# if var is not in realm 'O' (for ocean), look for it in realm 'A' (for atmosphere), and conversely
try: pathnc, filenc = find_path_and_files(ens=ensemble, exp=experiment, fre=frequency, mod=model, pro=project,
rea=new_realm, var=variable)
except:
pathnc, filenc = None, [None]
# given variable is neither in realm 'A' nor 'O'
print(bcolors.FAIL + "%%%%% ----- %%%%%")
print("ERROR: function: " + str(INSPECTstack()[0][3]) + ", line: " + str(INSPECTstack()[0][2]))
print("given variable cannot be found in either realm A or O: " + str(variable))
print("param: " + str(model) + ", " + str(project) + ", " + str(experiment) + ", " + str(ensemble) +
", " + str(frequency) + ", " + str(realm))
print("%%%%% ----- %%%%%" + bcolors.ENDC)
SYSexit("")
file_area, file_land =\
find_fx(model, project=project, experiment=experiment, ensemble=ensemble, realm=new_realm)
else:
file_area, file_land = find_fx(model, project=project, experiment=experiment, ensemble=ensemble, realm=realm)
file_name = OSpath__join(pathnc, str(filenc[0]))
return file_name, file_area, file_land
def find_xml_obs(dataset, variable):
"""
Finds observational variable file, as well as corresponding areacell and landmask
Inputs:
------
:param dataset: string
model name (e.g., "CNRM-CM5", "IPSL-CM5A-LR")
:param variable: string
variable name (e.g., "pr", "tos")
Outputs:
-------
:return file_name: string
Path and file name corresponding to the given information (e.g., /path/to/file/filename.xml)
:return file_area: string
Path and areacell file name corresponding to the given information (e.g., /path/to/file/areacell.xml)
Set to None if the file cannot be found
:return file_land: string
Path and landmask file name corresponding to the given information (e.g., /path/to/file/landmask.xml)
Set to None if the file cannot be found
"""
file_name = OSpath__join(xmldir, "obs_ENSO_metrics_" + str(dataset) + ".xml")
xml = CDMS2open(file_name)
listvar1 = sorted(xml.listvariables())
if variable not in listvar1:
print(bcolors.FAIL + "%%%%% ----- %%%%%")
print(str().ljust(5) + "obs var " + str(variable) + " cannot be found")
print(str().ljust(10) + "file_name = " + str(file_name))
print(str().ljust(10) + "variables = " + str(listvar1))
print("%%%%% ----- %%%%%" + bcolors.ENDC)
SYSexit("")
file_area, file_land = find_fx(dataset)
return file_name, file_area, file_land
def find_xml_fx(model, experiment='', project='', realm=''):
"""
Finds fixed variables, here areacell and sftlf (landmask), mostly used for observational dataset
Inputs:
------
:param model: string
model name (e.g., "CNRM-CM5", "IPSL-CM5A-LR")
**Optional arguments:**
:param experiment: string, optional
experiment name (e.g., "historical", "piControl")
:param project: string, optional
project name (e.g., "CMIP5", "CMIP6")
:param realm: string, optional
data realm: "A" for atmosphere, "O" for ocean
Outputs:
-------
:return file_area: string
Path and areacell file name corresponding to the given information (e.g., /path/to/file/areacell.xml)
Set to None if the file cannot be found
:return file_land: string
Path and landmask file name corresponding to the given information (e.g., /path/to/file/landmask.xml)
Set to None if the file cannot be found
"""
list_obs = list(ReferenceObservations().keys())
if model in list_obs:
if user_name == "yplanton":
file_area = None
if model in ["20CRv2", "NCEP2"]:
file_land = OSpath__join(path_obs, model + "/land.sfc.gauss.nc")
elif model == "ERSSTv5":
file_land = OSpath__join(path_obs, model + "/lsmask_ERSSTv5.nc")
elif model == "CMAP":
file_land = OSpath__join(path_obs, model + "/lsmask_fx_cmap.nc")
elif model == "GPCPv2.3":
file_land = OSpath__join(path_obs, model + "/lsmask_fx_gpcpv2.3.nc")
elif model == "OISSTv2":
file_land = OSpath__join(path_obs, model + "/lsmask_fx_oisstv2.nc")
else:
file_land = None
else:
file_area = None
if model in ["20CRv2", "CMAP", "ERA-Interim", "ERSSTv5", "GPCPv2.3", "NCEP2"]:
file_land = OSpath__join(path_obs, "lsmask_" + model + ".nc")
else:
file_land = None
else:
file_area = OSpath__join(xmldir, str(model) + "_" + str(project) + "_" + str(experiment) + "_r0i0p0_glob_fx_"
+ str(realm) + "_areacell.xml")
file_land = OSpath__join(xmldir, str(model) + "_" + str(project) + "_" + str(experiment) + "_r0i0p0_glob_fx_"
+ str(realm) + "_landmask.xml")
return file_area, file_land
def get_metric_values(project, metric_collection, dict_json, dict_mod_mem, reduced_set=True, portraitplot=False):
"""
Finds fixed variables, here areacell and sftlf (landmask), mostly used for observational dataset
Inputs:
------
:param project: strings
project name (e.g., "CMIP5", "CMIP6")
:param metric_collection: strings
metric collection (e.g., "ENSO_perf", "ENSO_proc", "ENSO_tel")
:param dict_json: dictionary
Dictionary with path and name of json files output of the CLIVAR PRP ENSO metrics package.
:param dict_mod_mem: dictionary
Dictionary with every models available and members in the given json files, for the given projects and metric
collections.
**Optional arguments:**
:param reduced_set: boolean, optional
True to remove extra metrics that are not in the final set chosen by CLIVAR PRP.
If set to False it removes metrics that are in more than one metric collection.
Default value is True.
:param portraitplot: boolean, optional
True | |
Co., Ltd.",
"0016DC": "ARCHOS",
"0016DD": "Gigabeam Corporation",
"0016DE": "FAST Inc",
"0016DF": "Lundinova AB",
"0016E0": "3Com Ltd",
"0016E1": "SiliconStor, Inc.",
"0016E2": "American Fibertek, Inc.",
"0016E3": "ASKEY COMPUTER CORP.",
"0016E4": "VANGUARD SECURITY ENGINEERING CORP.",
"0016E5": "FORDLEY DEVELOPMENT LIMITED",
"0016E6": "GIGA-BYTE TECHNOLOGY CO.,LTD.",
"0016E7": "Dynamix Promotions Limited",
"0016E8": "Sigma Designs, Inc.",
"0016E9": "Tiba Medical Inc",
"0016EA": "Intel Corporate",
"0016EB": "Intel Corporate",
"0016EC": "Elitegroup Computer Systems Co., Ltd.",
"0016ED": "Digital Safety Technologies, Inc",
"0016EE": "RoyalDigital Inc.",
"0016EF": "Koko Fitness, Inc.",
"0016F0": "Dell",
"0016F1": "OmniSense, LLC",
"0016F2": "Dmobile System Co., Ltd.",
"0016F3": "CAST Information Co., Ltd",
"0016F4": "Eidicom Co., Ltd.",
"0016F5": "<NAME> Digital Technology Co.,Ltd",
"0016F6": "Video Products Group",
"0016F7": "L-3 Communications, Aviation Recorders",
"0016F8": "AVIQTECH TECHNOLOGY CO., LTD.",
"0016F9": "CETRTA POT, d.o.o., Kranj",
"0016FA": "ECI Telecom Ltd.",
"0016FB": "SHENZHEN MTC CO.,LTD.",
"0016FC": "TOHKEN CO.,LTD.",
"0016FD": "Jaty Electronics",
"0016FE": "Alps Electric Co., Ltd",
"0016FF": "Wamin Optocomm Mfg Corp",
"001700": "ARRIS Group, Inc.",
"001701": "KDE, Inc.",
"001702": "Osung Midicom Co., Ltd",
"001703": "MOSDAN Internation Co.,Ltd",
"001704": "Shinco Electronics Group Co.,Ltd",
"001705": "Methode Electronics",
"001706": "Techfaith Wireless Communication Technology Limited.",
"001707": "InGrid, Inc",
"001708": "Hewlett-Packard Company",
"001709": "Exalt Communications",
"00170A": "INEW DIGITAL COMPANY",
"00170B": "Contela, Inc.",
"00170C": "Twig Com Ltd.",
"00170D": "Dust Networks Inc.",
"00170E": "CISCO SYSTEMS, INC.",
"00170F": "CISCO SYSTEMS, INC.",
"001710": "Casa Systems Inc.",
"001711": "GE Healthcare Bio-Sciences AB",
"001712": "ISCO International",
"001713": "Tiger NetCom",
"001714": "BR Controls Nederland bv",
"001715": "Qstik",
"001716": "Qno Technology Inc.",
"001717": "Leica Geosystems AG",
"001718": "Vansco Electronics Oy",
"001719": "AudioCodes USA, Inc",
"00171A": "Winegard Company",
"00171B": "Innovation Lab Corp.",
"00171C": "NT MicroSystems, Inc.",
"00171D": "DIGIT",
"00171E": "Theo Benning GmbH & Co. KG",
"00171F": "IMV Corporation",
"001720": "Image Sensing Systems, Inc.",
"001721": "FITRE S.p.A.",
"001722": "Hanazeder Electronic GmbH",
"001723": "Summit Data Communications",
"001724": "Studer Professional Audio GmbH",
"001725": "Liquid Computing",
"001726": "m2c Electronic Technology Ltd.",
"001727": "<NAME> Italia s.r.l.",
"001728": "Selex Communications",
"001729": "Ubicod Co.LTD",
"00172A": "Proware Technology Corp.(By Unifosa)",
"00172B": "Global Technologies Inc.",
"00172C": "TAEJIN INFOTECH",
"00172D": "Axcen Photonics Corporation",
"00172E": "FXC Inc.",
"00172F": "NeuLion Incorporated",
"001730": "Automation Electronics",
"001731": "ASUSTek COMPUTER INC.",
"001732": "Science-Technical Center \"RISSA\"",
"001733": "SFR",
"001734": "ADC Telecommunications",
"001735": "PRIVATE",
"001736": "iiTron Inc.",
"001737": "Industrie Dial Face S.p.A.",
"001738": "International Business Machines",
"001739": "Bright Headphone Electronics Company",
"00173A": "Reach Systems Inc.",
"00173B": "Cisco Systems, Inc.",
"00173C": "Extreme Engineering Solutions",
"00173D": "Neology",
"00173E": "LeucotronEquipamentos Ltda.",
"00173F": "Belkin Corporation",
"001740": "Bluberi Gaming Technologies Inc",
"001741": "DEFIDEV",
"001742": "FUJITSU LIMITED",
"001743": "Deck Srl",
"001744": "Araneo Ltd.",
"001745": "INNOTZ CO., Ltd",
"001746": "Freedom9 Inc.",
"001747": "Trimble",
"001748": "Neokoros Brasil Ltda",
"001749": "HYUNDAE YONG-O-SA CO.,LTD",
"00174A": "SOCOMEC",
"00174B": "Nokia Danmark A/S",
"00174C": "Millipore",
"00174D": "DYNAMIC NETWORK FACTORY, INC.",
"00174E": "Parama-tech Co.,Ltd.",
"00174F": "iCatch Inc.",
"001750": "GSI Group, MicroE Systems",
"001751": "Online Corporation",
"001752": "DAGS, Inc",
"001753": "nFore Technology Inc.",
"001754": "Arkino HiTOP Corporation Limited",
"001755": "GE Security",
"001756": "Vinci Labs Oy",
"001757": "RIX TECHNOLOGY LIMITED",
"001758": "ThruVision Ltd",
"001759": "CISCO SYSTEMS, INC.",
"00175A": "CISCO SYSTEMS, INC.",
"00175B": "ACS Solutions Switzerland Ltd.",
"00175C": "SHARP CORPORATION",
"00175D": "Dongseo system.",
"00175E": "Zed-3",
"00175F": "XENOLINK Communications Co., Ltd.",
"001760": "<NAME> MFG.CO.,LTD",
"001761": "PRIVATE",
"001762": "Solar Technology, Inc.",
"001763": "Essentia S.p.A.",
"001764": "ATMedia GmbH",
"001765": "Nortel",
"001766": "Accense Technology, Inc.",
"001767": "Earforce AS",
"001768": "Zinwave Ltd",
"001769": "Cymphonix Corp",
"00176A": "Avago Technologies",
"00176B": "Kiyon, Inc.",
"00176C": "Pivot3, Inc.",
"00176D": "CORE CORPORATION",
"00176E": "DUCATI SISTEMI",
"00176F": "PAX Computer Technology(Shenzhen) Ltd.",
"001770": "Arti Industrial Electronics Ltd.",
"001771": "APD Communications Ltd",
"001772": "ASTRO Strobel Kommunikationssysteme GmbH",
"001773": "Laketune Technologies Co. Ltd",
"001774": "Elesta GmbH",
"001775": "TTE Germany GmbH",
"001776": "Meso Scale Diagnostics, LLC",
"001777": "Obsidian Research Corporation",
"001778": "Central Music Co.",
"001779": "QuickTel",
"00177A": "ASSA ABLOY AB",
"00177B": "Azalea Networks inc",
"00177C": "Smartlink Network Systems Limited",
"00177D": "IDT International Limited",
"00177E": "Meshcom Technologies Inc.",
"00177F": "Worldsmart Retech",
"001780": "Applied Biosystems B.V.",
"001781": "Greystone Data System, Inc.",
"001782": "LoBenn Inc.",
"001783": "Texas Instruments",
"001784": "ARRIS Group, Inc.",
"001785": "Sparr Electronics Ltd",
"001786": "wisembed",
"001787": "Brother, Brother & Sons ApS",
"001788": "Philips Lighting BV",
"001789": "Zenitron Corporation",
"00178A": "DARTS TECHNOLOGIES CORP.",
"00178B": "Teledyne Technologies Incorporated",
"00178C": "Independent Witness, Inc",
"00178D": "Checkpoint Systems, Inc.",
"00178E": "Gunnebo Cash Automation AB",
"00178F": "NINGBO YIDONG ELECTRONIC CO.,LTD.",
"001790": "HYUNDAI DIGITECH Co, Ltd.",
"001791": "LinTech GmbH",
"001792": "Falcom Wireless Comunications Gmbh",
"001793": "Tigi Corporation",
"001794": "CISCO SYSTEMS, INC.",
"001795": "CISCO SYSTEMS, INC.",
"001796": "Rittmeyer AG",
"001797": "Telsy Elettronica S.p.A.",
"001798": "Azonic Technology Co., LTD",
"001799": "SmarTire Systems Inc.",
"00179A": "D-Link Corporation",
"00179B": "Chant Sincere CO., LTD.",
"00179C": "DEPRAG SCHULZ GMBH u. CO.",
"00179D": "Kelman Limited",
"00179E": "Sirit Inc",
"00179F": "Apricorn",
"0017A0": "RoboTech srl",
"0017A1": "3soft inc.",
"0017A2": "Camrivox Ltd.",
"0017A3": "MIX s.r.l.",
"0017A4": "Hewlett-Packard Company",
"0017A5": "Ralink Technology Corp",
"0017A6": "YOSIN ELECTRONICS CO., LTD.",
"0017A7": "Mobile Computing Promotion Consortium",
"0017A8": "EDM Corporation",
"0017A9": "Sentivision",
"0017AA": "elab-experience inc.",
"0017AB": "Nintendo Co., Ltd.",
"0017AC": "O'Neil Product Development Inc.",
"0017AD": "AceNet Corporation",
"0017AE": "GAI-Tronics",
"0017AF": "Enermet",
"0017B0": "Nokia Danmark A/S",
"0017B1": "ACIST Medical Systems, Inc.",
"0017B2": "SK Telesys",
"0017B3": "Aftek Infosys Limited",
"0017B4": "Remote Security Systems, LLC",
"0017B5": "Peerless Systems Corporation",
"0017B6": "Aquantia",
"0017B7": "Tonze Technology Co.",
"0017B8": "NOVATRON CO., LTD.",
"0017B9": "Gambro Lundia AB",
"0017BA": "SEDO CO., LTD.",
"0017BB": "Syrinx Industrial Electronics",
"0017BC": "Touchtunes Music Corporation",
"0017BD": "Tibetsystem",
"0017BE": "Tratec Telecom B.V.",
"0017BF": "Coherent Research Limited",
"0017C0": "PureTech Systems, Inc.",
"0017C1": "CM Precision Technology LTD.",
"0017C2": "ADB Broadband Italia",
"0017C3": "KTF Technologies Inc.",
"0017C4": "Quanta Microsystems, INC.",
"0017C5": "SonicWALL",
"0017C6": "Cross Match Technologies Inc",
"0017C7": "MARA Systems Consulting AB",
"0017C8": "KYOCERA Document Solutions Inc.",
"0017C9": "Samsung Electronics Co., Ltd.",
"0017CA": "Qisda Corporation",
"0017CB": "Juniper Networks",
"0017CC": "Alcatel-Lucent",
"0017CD": "CEC Wireless R&D Ltd.",
"0017CE": "Screen Service Spa",
"0017CF": "iMCA-GmbH",
"0017D0": "Opticom Communications, LLC",
"0017D1": "Nortel",
"0017D2": "THINLINX PTY LTD",
"0017D3": "Etymotic Research, Inc.",
"0017D4": "Monsoon Multimedia, Inc",
"0017D5": "Samsung Electronics Co., Ltd.",
"0017D6": "Bluechips Microhouse Co.,Ltd.",
"0017D7": "ION Geophysical Corporation Inc.",
"0017D8": "Magnum Semiconductor, Inc.",
"0017D9": "AAI Corporation",
"0017DA": "Spans Logic",
"0017DB": "CANKO TECHNOLOGIES INC.",
"0017DC": "DAEMYUNG ZERO1",
"0017DD": "Clipsal Australia",
"0017DE": "Advantage Six Ltd",
"0017DF": "CISCO SYSTEMS, INC.",
"0017E0": "CISCO SYSTEMS, INC.",
"0017E1": "DACOS Technologies Co., Ltd.",
"0017E2": "ARRIS Group, Inc.",
"0017E3": "Texas Instruments",
"0017E4": "Texas Instruments",
"0017E5": "Texas Instruments",
"0017E6": "Texas Instruments",
"0017E7": "Texas Instruments",
"0017E8": "Texas Instruments",
"0017E9": "Texas Instruments",
"0017EA": "Texas Instruments",
| |
# quad1m = ax.pcolormesh(kzg_interp, -1 * kxg_interp, PhDen_interp_vals[:-1, :-1], norm=colors.LogNorm(vmin=1e-3, vmax=vmax), cmap='inferno')
# else:
# quad1 = ax.pcolormesh(kzg_interp, kxg_interp, PhDen_interp_vals[:-1, :-1], vmin=vmin, vmax=vmax, cmap='inferno')
# quad1m = ax.pcolormesh(kzg_interp, -1 * kxg_interp, PhDen_interp_vals[:-1, :-1], vmin=vmin, vmax=vmax, cmap='inferno')
# curve1 = ax.plot(Pph_Vals[tninds[tind]], 0, marker='x', markersize=10, zorder=11, color="xkcd:steel grey")[0]
# curve1m = ax.plot(Pimp_Vals[tninds[tind]], 0, marker='o', markersize=10, zorder=11, color="xkcd:apple green")[0]
# curve2 = ax.plot(mc, 0, marker='*', markersize=10, zorder=11, color="cyan")[0]
# def rfunc(k): return (pfs.omegak(k, mB, n0, gBB) - 2 * np.pi / tsVals[tninds[tind]])
# kroot = fsolve(rfunc, 1e8); kroot = kroot[kroot >= 0]
# patch_Excitation = plt.Circle((0, 0), kroot[0], edgecolor='red', facecolor='None', linewidth=2)
# ax.add_patch(patch_Excitation)
# # patch_klin = plt.Circle((0, 0), klin, edgecolor='tab:cyan', facecolor='None')
# # ax.add_patch(patch_klin)
# if IRpatch is True:
# patch_IR = plt.Circle((0, 0), kIRcut, edgecolor='#8c564b', facecolor='#8c564b')
# ax.add_patch(patch_IR)
# IR_text = ax.text(0.61, 0.75, r'Weight (IR patch): ' + '{:.2f}%'.format(norm_IRpercent[tninds[tind]]), transform=ax.transAxes, fontsize='small', color='#8c564b')
# rem_text = ax.text(0.61, 0.675, r'Weight (Rem vis): ' + '{:.2f}%'.format(norm_axpercent[tninds[tind]]), transform=ax.transAxes, fontsize='small', color='yellow')
# if FGRBool is True:
# if Lx == 60:
# Omegak_interp_vals = Omegak_da.sel(t=t).values
# else:
# Omegak_interp_vals, kg_interp, thg_interp = pfc.xinterp2D(Omegak_da.sel(t=t), 'k', 'th', interpmul)
# FGRmask0 = np.abs(Omegak_interp_vals) < FGRlim
# Omegak_interp_vals[FGRmask0] = 1
# Omegak_interp_vals[np.logical_not(FGRmask0)] = 0
# p = []
# p.append(ax.contour(kzg_interp, kxg_interp, Omegak_interp_vals, zorder=10, colors='tab:gray'))
# p.append(ax.contour(kzg_interp, -1 * kxg_interp, Omegak_interp_vals, zorder=10, colors='tab:gray'))
# p.append(ax.contour(Pimp_Vals[tind] - kzg_interp, -1 * kxg_interp, Omegak_interp_vals, zorder=10, colors='xkcd:military green'))
# p.append(ax.contour(Pimp_Vals[tind] - kzg_interp, -1 * (-1) * kxg_interp, Omegak_interp_vals, zorder=10, colors='xkcd:military green'))
# ax.set_xlim([-1 * axislim, axislim])
# ax.set_ylim([-1 * axislim, axislim])
# ax.grid(True, linewidth=0.5)
# ax.set_title(r'$t$ [$\xi/c$]: ' + '{:1.2f}'.format(tsVals[tninds[tind]] / tscale))
# ax.set_xlabel(r'$k_{z}$')
# ax.set_ylabel(r'$k_{x}$')
# curve1_LE = Line2D([0], [0], color='none', lw=0, marker='x', markerfacecolor='xkcd:steel grey', markeredgecolor='xkcd:steel grey', markersize=10)
# curve1m_LE = Line2D([0], [0], color='none', lw=0, marker='o', markerfacecolor='xkcd:apple green', markeredgecolor='xkcd:apple green', markersize=10)
# curve2_LE = Line2D([0], [0], color='none', lw=0, marker='*', markerfacecolor='cyan', markeredgecolor='cyan', markersize=10)
# patch_Excitation_LE = Line2D([0], [0], marker='o', color='none', markerfacecolor='none', markeredgecolor='red', markersize=20, mew=2)
# # patch_klin_LE = Line2D([0], [0], marker='o', color='none', markerfacecolor='none', markeredgecolor='tab:cyan', markersize=20, mew=2)
# patch_FGR_ph_LE = Ellipse(xy=(0, 0), width=0.2, height=0.1, angle=0, edgecolor='tab:gray', facecolor='none', lw=3)
# patch_FGR_imp_LE = Ellipse(xy=(0, 0), width=0.2, height=0.1, angle=0, edgecolor='xkcd:military green', facecolor='none', lw=3)
# if IRpatch is True:
# handles = (curve1_LE, curve1m_LE, curve2_LE, patch_Excitation_LE, patch_IR, patch_FGR_ph_LE, patch_FGR_imp_LE)
# labels = (r'$\langle P_{ph} \rangle$', r'$\langle P_{I} \rangle$', r'$(m_{I}c)\vec{e}_{k_{z}}$', r'$\omega_{|k|}^{-1}(\frac{2\pi}{t})$', r'Singular Region', 'FGR Phase Space (ph)', 'FGR Phase Space (imp)')
# else:
# handles = (curve1_LE, curve1m_LE, curve2_LE, patch_Excitation_LE, patch_FGR_ph_LE, patch_FGR_imp_LE)
# labels = (r'$\langle \mathbf{P}_{\rm ph} \rangle$', r'$\langle \mathbf{P}_{\rm imp} \rangle$', r'$(m_{I}c)\mathbf{e}_{k_{z}}$', r'$\omega_{\mathbf{k}}^{-1}(\frac{2\pi}{t})$', 'FGR Phase Space (ph)', 'FGR Phase Space (imp)')
# cbar_ax = fig.add_axes([0.9, 0.2, 0.02, 0.7])
# fig.colorbar(quad1, cax=cbar_ax, extend='both')
# fig.legend(handles, labels, ncol=3, loc='lower center', handler_map={Ellipse: HandlerEllipse()})
# fig.text(0.05, 0.97, '(a)', fontsize=20)
# fig.text(0.05, 0.68, '(c)', fontsize=20)
# fig.text(0.05, 0.38, '(e)', fontsize=20)
# fig.text(0.47, 0.97, '(b)', fontsize=20)
# fig.text(0.47, 0.68, '(d)', fontsize=20)
# fig.text(0.47, 0.38, '(f)', fontsize=20)
# fig.set_size_inches(12, 12)
# fig.subplots_adjust(bottom=0.17, top=0.95, right=0.85, hspace=0.6, wspace=0.4)
# # fig.savefig(figdatapath + '/Fig7.pdf', dpi=20)
# fig.savefig(figdatapath + '/Fig7.jpg', quality=100)
# # # FIG 7 - INDIVIDUAL PHONON MOMENTUM DISTRIBUTION PLOT SLICES (OLD)
# matplotlib.rcParams.update({'font.size': 18})
# class HandlerEllipse(HandlerPatch):
# def create_artists(self, legend, orig_handle,
# xdescent, ydescent, width, height, fontsize, trans):
# center = 0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent
# p = Ellipse(xy=center, width=width + xdescent,
# height=height + ydescent)
# self.update_prop(p, orig_handle, legend)
# p.set_transform(trans)
# return [p]
# Pnorm_des = np.array([0.1, 0.5, 0.8, 1.3, 1.5, 1.8, 3.0, 3.5, 4.0, 5.0, 8.0])
# Pinds = np.zeros(Pnorm_des.size, dtype=int)
# for Pn_ind, Pn in enumerate(Pnorm_des):
# Pinds[Pn_ind] = np.abs(Pnorm - Pn).argmin().astype(int)
# print(PVals[Pinds])
# indP = Pinds[5]
# P = PVals[indP]
# print(aIBi, P)
# vmaxAuto = False
# FGRBool = True; FGRlim = 1e-2
# IRpatch = False
# shortTime = False; tau = 5
# # tau = 100
# # tsVals = tVals[tVals < tau]
# if Lx == 60:
# qds_PaIBi = xr.open_dataset(distdatapath + '/P_{:.3f}_aIBi_{:.2f}.nc'.format(P, aIBi))
# tsVals = qds_PaIBi.coords['tc'].values
# else:
# # qds_PaIBi = qds_aIBi.sel(t=tsVals, P=P)
# qds_PaIBi = qds_aIBi.sel(P=P)
# tsVals = qds_PaIBi.coords['t'].values
# if shortTime is True:
# tsVals = tsVals[tsVals <= tau]
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', qds_PaIBi.coords['k'].values); kgrid.initArray_premade('th', qds_PaIBi.coords['th'].values)
# kVec = kgrid.getArray('k')
# thVec = kgrid.getArray('th')
# kg, thg = np.meshgrid(kVec, thVec, indexing='ij')
# dVk = kgrid.dV()
# axislim = 1.2
# if shortTime is True:
# axislim = 1.01 * P
# # kIRcut = 0.13
# # axislim = 3
# kIRcut = 0.1
# if Lx == 60:
# kIRcut = 0.01
# if vmaxAuto is True:
# kIRcut = -1
# kIRmask = kg < kIRcut
# dVk_IR = dVk.reshape((len(kVec), len(thVec)))[kIRmask]
# axmask = (kg >= kIRcut) * (kg <= axislim)
# dVk_ax = dVk.reshape((len(kVec), len(thVec)))[axmask]
# Omegak_da = xr.DataArray(np.full((tsVals.size, len(kVec), len(thVec)), np.nan, dtype=float), coords=[tsVals, kVec, thVec], dims=['t', 'k', 'th'])
# PhDen_da = xr.DataArray(np.full((tsVals.size, len(kVec), len(thVec)), np.nan, dtype=float), coords=[tsVals, kVec, thVec], dims=['t', 'k', 'th'])
# Nph_Vals = np.zeros(tsVals.size)
# Pph_Vals = np.zeros(tsVals.size)
# Pimp_Vals = np.zeros(tsVals.size)
# norm_IRpercent = np.zeros(tsVals.size)
# norm_axpercent = np.zeros(tsVals.size)
# vmax = 0
# for tind, t in enumerate(tsVals):
# if Lx == 60:
# CSAmp_ds = (qds_PaIBi['Real_CSAmp'] + 1j * qds_PaIBi['Imag_CSAmp']).sel(tc=t)
# else:
# CSAmp_ds = (qds_PaIBi['Real_CSAmp'] + 1j * qds_PaIBi['Imag_CSAmp']).sel(t=t)
# CSAmp_Vals = CSAmp_ds.values
# Nph_Vals[tind] = qds_PaIBi['Nph'].sel(t=t).values
# Pph_Vals[tind] = qds_PaIBi['Pph'].sel(t=t).values
# Pimp_Vals[tind] = P - Pph_Vals[tind]
# Bk_2D_vals = CSAmp_Vals.reshape((len(kVec), len(thVec)))
# PhDen_da.sel(t=t)[:] = ((1 / Nph_Vals[tind]) * np.abs(Bk_2D_vals)**2).real.astype(float)
# norm_tot = np.dot(PhDen_da.sel(t=t).values.flatten(), dVk)
# PhDen_IR = PhDen_da.sel(t=t).values[kIRmask]
# norm_IR = np.dot(PhDen_IR.flatten(), dVk_IR.flatten())
# norm_IRpercent[tind] = 100 * np.abs(norm_IR / norm_tot)
# # print(norm_IRpercent[tind])
# PhDen_ax = PhDen_da.sel(t=t).values[axmask]
# norm_ax = np.dot(PhDen_ax.flatten(), dVk_ax.flatten())
# norm_axpercent[tind] = 100 * np.abs(norm_ax / norm_tot)
# Omegak_da.sel(t=t)[:] = pfs.Omega(kgrid, Pimp_Vals[tind], mI, mB, n0, gBB).reshape((len(kVec), len(thVec))).real.astype(float)
# # print(Omegak_da.sel(t=t))
# maxval = np.max(PhDen_da.sel(t=t).values[np.logical_not(kIRmask)])
# if maxval > vmax:
# vmax = maxval
# # Plot slices
# tnorm = tsVals / tscale
# tnVals_des = np.array([0.5, 8.0, 15.0, 25.0, 40.0, 75.0])
# tninds = np.zeros(tnVals_des.size, dtype=int)
# for tn_ind, tn in enumerate(tnVals_des):
# tninds[tn_ind] = np.abs(tnorm - tn).argmin().astype(int)
# tslices = tsVals[tninds]
# print(vmax)
# vmin = 0
# if (vmaxAuto is False) and (Lx != 60):
# vmax = 800
# if shortTime is True:
# vmax = 200
# interpmul = 5
# if Lx == 60:
# PhDen0_interp_vals = PhDen_da.isel(t=0).values
# kxg_interp = kg * np.sin(thg)
# kzg_interp = kg * np.cos(thg)
# else:
# PhDen0_interp_vals, kg_interp, thg_interp = pfc.xinterp2D(PhDen_da.isel(t=0), 'k', 'th', interpmul)
# kxg_interp = kg_interp * np.sin(thg_interp)
# kzg_interp = kg_interp * np.cos(thg_interp)
# vmax = 3000
# fig, axes = plt.subplots(nrows=3, ncols=2)
# for tind, t in enumerate(tslices):
# if tind == 0:
# ax = axes[0, 0]
# elif tind == 1:
# ax = axes[0, 1]
# if tind == 2:
# ax = axes[1, 0]
# if tind == 3:
# ax = axes[1, 1]
# if tind == 4:
# ax = axes[2, 0]
# if tind == 5:
# ax = axes[2, 1]
# PhDen_interp_vals = PhDen_da.sel(t=t).values
# if vmaxAuto is True:
# quad1 = ax.pcolormesh(kzg_interp, kxg_interp, PhDen_interp_vals[:-1, :-1], norm=colors.LogNorm(vmin=1e-3, vmax=vmax), cmap='inferno')
# quad1m = ax.pcolormesh(kzg_interp, -1 * kxg_interp, PhDen_interp_vals[:-1, :-1], norm=colors.LogNorm(vmin=1e-3, vmax=vmax), cmap='inferno')
# else:
# quad1 = ax.pcolormesh(kzg_interp, kxg_interp, PhDen_interp_vals[:-1, :-1], vmin=vmin, vmax=vmax, cmap='inferno')
# quad1m = ax.pcolormesh(kzg_interp, -1 * kxg_interp, PhDen_interp_vals[:-1, :-1], vmin=vmin, vmax=vmax, cmap='inferno')
# curve1 = ax.plot(Pph_Vals[tninds[tind]], 0, marker='x', markersize=10, zorder=11, color="xkcd:steel grey")[0]
# curve1m = ax.plot(Pimp_Vals[tninds[tind]], 0, marker='o', markersize=10, zorder=11, color="xkcd:apple green")[0]
# curve2 = ax.plot(mc, 0, marker='*', markersize=10, zorder=11, color="cyan")[0]
# def rfunc(k): return (pfs.omegak(k, mB, n0, gBB) - 2 * np.pi / tsVals[tninds[tind]])
# kroot = fsolve(rfunc, 1e8); kroot = kroot[kroot >= 0]
# patch_Excitation = plt.Circle((0, 0), kroot[0], edgecolor='red', facecolor='None', linewidth=2)
# ax.add_patch(patch_Excitation)
# patch_klin = plt.Circle((0, 0), klin, edgecolor='tab:cyan', facecolor='None')
# ax.add_patch(patch_klin)
# if IRpatch is True:
# patch_IR = plt.Circle((0, | |
= self.RegionPlaceholders.query.all()
for region in region_placeholders:
regions_maps.update(region.countries)
# getting country groups from database and insert into the country_groups list
customer_loc = ''
location_list = json_template["conductor_solver"]["locations"]
for location_id, location_info in location_list.items():
customer_loc = location_info['country']
countries = self.CountryLatency.query.get_plan_by_col("country_name", customer_loc)
LOG.info("Customer Location for Latency Reduction " + customer_loc)
if len(countries) == 0:
LOG.info("country is not present is country latency table, looking for * wildcard entry")
countries = self.CountryLatency.query.get_plan_by_col("country_name", "*")
if len(countries) != 0:
LOG.info("Found '*' wild card entry in country latency table")
else:
msg = "No '*' wild card entry found in country latency table. No solution will be provided"
LOG.info(msg)
p.message = msg
for country in countries:
country_groups = country.groups
LOG.info("Done getting Latency Country DB Groups ")
except Exception as error_msg:
LOG.error("Exception thrown while reading region_placeholders and country groups information "
"from database. Exception message: {}".format(error_msg))
try:
request.parse_template(json_template, country_groups, regions_maps)
request.assgin_constraints_to_demands()
requests_to_solve[p.id] = request
opt = optimizer.Optimizer(self.conf, _requests=requests_to_solve)
solution_list = opt.get_solution(num_solution)
except Exception as err:
message = _LE("Plan {} status encountered a "
"parsing error: {}").format(p.id, err)
LOG.error(traceback.print_exc())
p.status = self.Plan.ERROR
p.message = message
while 'FAILURE' in _is_success:
_is_success = p.update(condition=self.solver_owner_condition)
LOG.info(_LI("Encountered a parsing error, changing the template status from solving to error, "
"atomic update response from MUSIC {}").format(_is_success))
continue
LOG.info("Preparing the recommendations ")
# checking if the order is 'initial' or 'speed changed' one
is_speed_change = False
if request and request.request_type == 'speed changed':
is_speed_change = True
recommendations = []
if not solution_list or len(solution_list) < 1:
# when order takes too much time to solve
if (int(round(time.time())) - self.millisec_to_sec(p.updated)) > self.conf.solver.solver_timeout:
message = _LI("Plan {} is timed out, exceed the expected "
"time {} seconds").format(p.id, self.conf.solver.timeout)
# when no solution found
else:
message = _LI("Plan {} search failed, no "
"recommendations found by machine {}").format(p.id, p.solver_owner)
LOG.info(message)
# Update the plan status
p.status = self.Plan.NOT_FOUND
p.message = message
# Metrics to Prometheus
m_svc_name = p.template.get('parameters', {}).get('service_name', 'N/A')
PC.VNF_FAILURE.labels('ONAP', m_svc_name).inc()
while 'FAILURE' in _is_success:
_is_success = p.update(condition=self.solver_owner_condition)
LOG.info(_LI("Plan serach failed, changing the template status from solving to not found, "
"atomic update response from MUSIC {}").format(_is_success))
else:
# Assemble recommendation result JSON
for solution in solution_list:
current_rec = dict()
for demand_name in solution:
resource = solution[demand_name]
if not is_speed_change:
is_rehome = "false"
else:
is_rehome = "false" if resource.get("existing_placement") == 'true' else "true"
location_id = "" if resource.get("cloud_region_version") == '2.5' \
else resource.get("location_id")
rec = {
# FIXME(shankar) A&AI must not be hardcoded here.
# Also, account for more than one Inventory Provider.
"inventory_provider": "aai",
"service_resource_id":
resource.get("service_resource_id"),
"candidate": {
"candidate_id": resource.get("candidate_id"),
"inventory_type": resource.get("inventory_type"),
"cloud_owner": resource.get("cloud_owner"),
"location_type": resource.get("location_type"),
"location_id": location_id,
"is_rehome": is_rehome},
"attributes": {
"physical-location-id":
resource.get("physical_location_id"),
"cloud_owner": resource.get("cloud_owner"),
'aic_version': resource.get("cloud_region_version")},
}
if rec["candidate"]["inventory_type"] in ["nssi", "nsi", "slice_profiles", "nst"]:
rec["candidate"] = resource
if resource.get('vim-id'):
rec["candidate"]['vim-id'] = resource.get('vim-id')
if rec["candidate"]["inventory_type"] == "service":
rec["attributes"]["host_id"] = resource.get("host_id")
rec["attributes"]["service_instance_id"] = resource.get("candidate_id")
rec["candidate"]["host_id"] = resource.get("host_id")
if resource.get('vlan_key'):
rec["attributes"]['vlan_key'] = resource.get('vlan_key')
if resource.get('port_key'):
rec["attributes"]['port_key'] = resource.get('port_key')
if rec["candidate"]["inventory_type"] == "vfmodule":
rec["attributes"]["host_id"] = resource.get("host_id")
rec["attributes"]["service_instance_id"] = resource.get("service_instance_id")
rec["candidate"]["host_id"] = resource.get("host_id")
if resource.get('vlan_key'):
rec["attributes"]['vlan_key'] = resource.get('vlan_key')
if resource.get('port_key'):
rec["attributes"]['port_key'] = resource.get('port_key')
vf_module_data = rec["attributes"]
vf_module_data['nf-name'] = resource.get("nf-name")
vf_module_data['nf-id'] = resource.get("nf-id")
vf_module_data['nf-type'] = resource.get("nf-type")
vf_module_data['vnf-type'] = resource.get("vnf-type")
vf_module_data['vf-module-id'] = resource.get("vf-module-id")
vf_module_data['vf-module-name'] = resource.get("vf-module-name")
vf_module_data['ipv4-oam-address'] = resource.get("ipv4-oam-address")
vf_module_data['ipv6-oam-address'] = resource.get("ipv6-oam-address")
vf_module_data['vservers'] = resource.get("vservers")
elif rec["candidate"]["inventory_type"] == "cloud":
if resource.get("all_directives") and resource.get("flavor_map"):
rec["attributes"]["directives"] = \
self.set_flavor_in_flavor_directives(
resource.get("flavor_map"), resource.get("all_directives"))
# Metrics to Prometheus
m_vim_id = resource.get("vim-id")
m_hpa_score = resource.get("hpa_score", 0)
m_svc_name = p.template['parameters'].get(
'service_name', 'N/A')
for vnfc, flavor in resource.get("flavor_map").items():
PC.VNF_COMPUTE_PROFILES.labels('ONAP',
m_svc_name,
demand_name,
vnfc,
flavor,
m_vim_id).inc()
PC.VNF_SCORE.labels('ONAP', m_svc_name,
demand_name,
m_hpa_score).inc()
if resource.get('conflict_id'):
rec["candidate"]["conflict_id"] = resource.get("conflict_id")
if resource.get('passthrough_attributes'):
for key, value in resource.get('passthrough_attributes').items():
if key in rec["attributes"]:
LOG.error('Passthrough attribute {} in demand {} already exist for candidate {}'.
format(key, demand_name, rec['candidate_id']))
else:
rec["attributes"][key] = value
# TODO(snarayanan): Add total value to recommendations?
# msg = "--- total value of decision = {}"
# LOG.debug(msg.format(_best_path.total_value))
# msg = "--- total cost of decision = {}"
# LOG.debug(msg.format(_best_path.total_cost))
current_rec[demand_name] = rec
recommendations.append(current_rec)
# Update the plan with the solution
p.solution = {
"recommendations": recommendations
}
# multiple spin-ups logic
'''
go through list of recommendations in the solution
for cloud candidates, check if (cloud-region-id + e2evnfkey) is in the order_locks table
if so, insert the row with status 'parked' in order_locks, changes plan status to 'pending' in plans
table (or other status value)
otherwise, insert the row with status 'locked' in order_locks, and change status to 'solved' in plans
table - continue reservation
'''
# clean up the data/record in order_locks table, deleting all records that failed from MSO
order_locks = self.OrderLock.query.all()
for order_lock_record in order_locks:
plans = getattr(order_lock_record, 'plans')
for plan_id, plan_attributes in plans.items():
plan_dict = json.loads(plan_attributes)
if plan_dict.get('status', None) == OrderLock.FAILED:
order_lock_record.delete()
LOG.info(_LI("The order lock record {} with status {} is deleted (due to failure"
" spinup from MSO) from order_locks table").
format(order_lock_record, plan_dict.get('status')))
break
inserted_order_records_dict = dict()
available_dependenies_set = set()
is_inserted_to_order_locks = True
is_conflict_id_missing = False
is_order_translated_before_spinup = False
for solution in solution_list:
for demand_name, candidate in solution.items():
if candidate.get('inventory_type') == 'cloud':
conflict_id = candidate.get('conflict_id')
service_resource_id = candidate.get('service_resource_id')
# TODO(larry): add more logic for missing conflict_id in template
if not conflict_id:
is_conflict_id_missing = True
break
available_dependenies_set.add(conflict_id)
# check if conflict_id exists in order_locks table
order_lock_record = self.OrderLock.query.get_plan_by_col("id", conflict_id)
if order_lock_record:
is_spinup_completed = getattr(order_lock_record[0], 'is_spinup_completed')
spinup_completed_timestamp = getattr(order_lock_record[0],
'spinup_completed_timestamp')
if is_spinup_completed and spinup_completed_timestamp > p.translation_begin_timestamp:
is_order_translated_before_spinup = True
break
elif not is_spinup_completed:
inserted_order_records_dict[conflict_id] = service_resource_id
if is_conflict_id_missing:
message = _LE("Missing conflict identifier field for cloud candidates in the template, "
"could not insert into order_locks table")
LOG.debug(message)
p.status = self.Plan.SOLVED
elif is_order_translated_before_spinup:
message = _LE("Retriggering Plan {} due to the new order arrives before the "
"spinup completion of the old order ").format(p.id)
LOG.debug(message)
p.rehome_plan()
elif len(inserted_order_records_dict) > 0:
new_dependenies_set = available_dependenies_set - set(inserted_order_records_dict.keys())
dependencies = ','.join(str(s) for s in new_dependenies_set)
for conflict_id, service_resource_id in inserted_order_records_dict.items():
plan = {
p.id: {
"status": OrderLock.UNDER_SPIN_UP,
"created": self.current_time_millis(),
"updated": self.current_time_millis(),
"service_resource_id": service_resource_id
}
}
if dependencies:
plan[p.id]['dependencies'] = dependencies
order_lock_row = self.OrderLock(id=conflict_id, plans=plan)
response = order_lock_row.insert()
# TODO(larry): add more logs for inserting order lock record (insert/update)
LOG.info(_LI("Inserting the order lock record to order_locks table in MUSIC, "
"conditional insert operation response from MUSIC {}").format(response))
if response and response.status_code == 200:
body = response.json()
LOG.info("Succcessfully inserted the record in order_locks table with "
"the following response message {}".format(body))
else:
is_inserted_to_order_locks = False
else:
for solution in solution_list:
for demand_name, candidate in solution.items():
if candidate.get('inventory_type') == 'cloud':
conflict_id = candidate.get('conflict_id')
service_resource_id = candidate.get('service_resource_id')
order_lock_record = self.OrderLock.query.get_plan_by_col("id", conflict_id)
if order_lock_record:
deleting_record = order_lock_record[0]
plans = getattr(deleting_record, 'plans')
is_spinup_completed = getattr(deleting_record, 'is_spinup_completed')
spinup_completed_timestamp = getattr(deleting_record, 'spinup_completed_timestamp')
if is_spinup_completed:
# persist the record in order_locks_history table
order_lock_history_record = \
self.OrderLockHistory(conflict_id=conflict_id, plans=plans,
is_spinup_completed=is_spinup_completed,
spinup_completed_timestamp=spinup_completed_timestamp
)
LOG.debug("Inserting the history record with conflict id {}"
" to order_locks_history table".format(conflict_id))
order_lock_history_record.insert()
# remove the older record
LOG.debug("Deleting the order lock record {} from order_locks table"
.format(deleting_record))
deleting_record.delete()
plan = {
p.id: {
"status": OrderLock.UNDER_SPIN_UP,
"created": self.current_time_millis(),
"updated": self.current_time_millis(),
"service_resource_id": service_resource_id
}
}
order_lock_row = self.OrderLock(id=conflict_id, plans=plan)
response = order_lock_row.insert()
# TODO(larry): add more logs for inserting order lock record (insert/update)
LOG.info(_LI("Inserting the order lock record to order_locks table in MUSIC, "
"conditional insert operation response from MUSIC {}").format(response))
if response and response.status_code == 200:
body = response.json()
LOG.info("Succcessfully inserted the record in order_locks table "
"with the following response message {}".format(body))
else:
is_inserted_to_order_locks = False
if not is_inserted_to_order_locks:
message = _LE("Plan {} status encountered an "
"error while inserting order lock message to MUSIC.").format(p.id)
LOG.error(message)
p.status = self.Plan.ERROR
p.message = message
elif p.status == self.Plan.SOLVING:
if len(inserted_order_records_dict) > 0:
LOG.info(_LI("The plan with id {} is parked in order_locks table,"
"waiting for MSO release calls").format(p.id))
p.status = self.Plan.WAITING_SPINUP
else:
LOG.info(_LI("The plan with id {} is inserted in order_locks table.").
format(p.id))
p.status = self.Plan.SOLVED
while 'FAILURE' in _is_success \
and (self.current_time_seconds() - self.millisec_to_sec(p.updated)) <= self.conf.solver.timeout:
_is_success = p.update(condition=self.solver_owner_condition)
LOG.info(_LI("Plan search complete, changing the template status from solving to {}, | |
from abc import abstractmethod
import pandas as pd
from aistac.components.abstract_component import AbstractComponent
from aistac.components.aistac_commons import DataAnalytics
from ds_discovery.components.commons import Commons
from ds_discovery.components.discovery import DataDiscovery, Visualisation
__author__ = '<NAME>'
class AbstractCommonComponent(AbstractComponent):
DEFAULT_MODULE = 'ds_discovery.handlers.pandas_handlers'
DEFAULT_SOURCE_HANDLER = 'PandasSourceHandler'
DEFAULT_PERSIST_HANDLER = 'PandasPersistHandler'
@classmethod
@abstractmethod
def from_uri(cls, task_name: str, uri_pm_path: str, username: str, uri_pm_repo: str=None, pm_file_type: str=None,
pm_module: str=None, pm_handler: str=None, pm_kwargs: dict=None, default_save=None,
reset_templates: bool=None, template_path: str=None, template_module: str=None,
template_source_handler: str=None, template_persist_handler: str=None, align_connectors: bool=None,
default_save_intent: bool=None, default_intent_level: bool=None, order_next_available: bool=None,
default_replace_intent: bool=None, has_contract: bool=None):
return cls
@classmethod
def discovery_pad(cls) -> DataDiscovery:
""" A class method to use the Components discovery methods as a scratch pad"""
return DataDiscovery()
@classmethod
def visual_pad(cls) -> Visualisation:
""" A class method to use the Components visualisation methods as a scratch pad"""
return Visualisation()
@property
def discover(self) -> DataDiscovery:
"""The components instance"""
return DataDiscovery()
@property
def visual(self) -> Visualisation:
"""The visualisation instance"""
return Visualisation()
def load_source_canonical(self, **kwargs) -> pd.DataFrame:
"""returns the contracted source data as a DataFrame """
return self.load_canonical(self.CONNECTOR_SOURCE, **kwargs)
def load_canonical(self, connector_name: str, **kwargs) -> pd.DataFrame:
"""returns the canonical of the referenced connector
:param connector_name: the name or label to identify and reference the connector
"""
canonical = super().load_canonical(connector_name=connector_name, **kwargs)
if isinstance(canonical, dict):
canonical = pd.DataFrame.from_dict(data=canonical)
return canonical
def load_persist_canonical(self, **kwargs) -> pd.DataFrame:
"""loads the clean pandas.DataFrame from the clean folder for this contract"""
return self.load_canonical(self.CONNECTOR_PERSIST, **kwargs)
def save_persist_canonical(self, canonical, auto_connectors: bool=None, **kwargs):
"""Saves the canonical to the clean files folder, auto creating the connector from template if not set"""
if auto_connectors if isinstance(auto_connectors, bool) else True:
if not self.pm.has_connector(self.CONNECTOR_PERSIST):
self.set_persist()
self.persist_canonical(connector_name=self.CONNECTOR_PERSIST, canonical=canonical, **kwargs)
def add_column_description(self, column_name: str, description: str, save: bool=None):
""" adds a description note that is included in with the 'report_column_catalog'"""
if isinstance(description, str) and description:
self.pm.set_intent_description(level=column_name, text=description)
self.pm_persist(save)
return
def setup_bootstrap(self, domain: str=None, project_name: str=None, path: str=None, file_type: str=None,
description: str=None):
""" Creates a bootstrap Transition setup. Note this does not set the source
:param domain: (optional) The domain this simulators sits within e.g. 'Healthcare' or 'Financial Services'
:param project_name: (optional) a project name that will replace the hadron naming on file prefix
:param path: (optional) a path added to the template path default
:param file_type: (optional) a file_type for the persisted file, default is 'parquet'
:param description: (optional) a description of the component instance to overwrite the default
"""
domain = domain.title() if isinstance(domain, str) else 'Unspecified'
file_type = file_type if isinstance(file_type, str) else 'parquet'
project_name = project_name if isinstance(project_name, str) else 'hadron'
file_name = self.pm.file_pattern(name='dataset', project=project_name.lower(), path=path, file_type=file_type,
versioned=True)
self.set_persist(uri_file=file_name)
component = self.pm.manager_name()
if not isinstance(description, str):
description = f"{domain} domain {component} component for {project_name} {self.pm.task_name} contract"
self.set_description(description=description)
def save_report_canonical(self, reports: [str, list], report_canonical: [dict, pd.DataFrame],
replace_connectors: bool=None, auto_connectors: bool=None, save: bool=None, **kwargs):
"""saves one or a list of reports using the TEMPLATE_PERSIST connector contract. Though a report can be of any
name, for convention and consistency each component has a set of REPORT constants <Component>.REPORT_<NAME>
where <Component> is the component Class name and <name> is the name of the report_canonical.
The reports can be a simple string name or a list of names. The name list can be a string or a dictionary
providing more detailed parameters on how to represent the report. These parameters keys are
:key report: the name of the report
:key file_type: (optional) a file type other than the default .json
:key versioned: (optional) if the filename should be versioned
:key stamped: (optional) A string of the timestamp options ['days', 'hours', 'minutes', 'seconds', 'ns']
Some examples
self.REPORT_SCHEMA
[self.REPORT_NOTES, self.REPORT_SCHEMA]
[self.REPORT_NOTES, {'report': self.REPORT_SCHEMA, 'uri_file': '<file_name>'}]
[{'report': self.REPORT_NOTES, 'file_type': 'json'}]
[{'report': self.REPORT_SCHEMA, 'file_type': 'csv', 'versioned': True, 'stamped': days}]
:param reports: a report name or list of report names to save
:param report_canonical: a relating canonical to base the report on
:param auto_connectors: (optional) if a connector should be created automatically
:param replace_connectors: (optional) replace any existing report connectors with these reports
:param save: (optional) if True, save to file. Default is True
:param kwargs: additional kwargs to pass to a Connector Contract
"""
if not isinstance(reports, (str, list)):
raise TypeError(f"The reports type must be a str or list, {type(reports)} type passed")
auto_connectors = auto_connectors if isinstance(auto_connectors, bool) else True
replace_connectors = replace_connectors if isinstance(replace_connectors, bool) else False
_report_list = []
for _report in self.pm.list_formatter(reports):
if not isinstance(_report, (str, dict)):
raise TypeError(f"The report type {type(_report)} is an unsupported type. Must be string or dict")
if isinstance(_report, str):
_report = {'report': _report}
if not _report.get('report', None):
raise ValueError(f"if not a string the reports list dict elements must have a 'report' key")
_report_list.append(_report)
if replace_connectors:
self.set_report_persist(reports=_report_list, save=save)
for _report in _report_list:
connector_name = _report.get('report')
if not self.pm.has_connector(connector_name):
if auto_connectors:
self.set_report_persist(reports=[_report], save=save)
else:
continue
self.persist_canonical(connector_name=connector_name, canonical=report_canonical, **kwargs)
return
def save_canonical_schema(self, schema_name: str=None, canonical: pd.DataFrame=None, schema_tree: list=None,
exclude_associate: list=None, detail_numeric: bool=None, strict_typing: bool=None,
category_limit: int=None, save: bool=None):
""" Saves the canonical schema to the Property contract. The default loads the clean canonical but optionally
a canonical can be passed to base the schema on and optionally a name given other than the default
:param schema_name: (optional) the name of the schema to save
:param canonical: (optional) the canonical to base the schema on
:param schema_tree: (optional) an analytics dict (see Discovery.analyse_association(...)
:param exclude_associate: (optional) a list of dot notation tree of items to exclude from iteration
(e.g. ['age.gender.salary'] will cut 'salary' branch from gender and all sub branches)
:param detail_numeric: (optional) if numeric columns should have detail stats, slowing analysis. default False
:param strict_typing: (optional) stops objects and string types being seen as categories. default True
:param category_limit: (optional) a global cap on categories captured. default is 10
:param save: (optional) if True, save to file. Default is True
"""
schema_name = schema_name if isinstance(schema_name, str) else self.REPORT_SCHEMA
canonical = canonical if isinstance(canonical, pd.DataFrame) else self.load_persist_canonical()
schema_tree = schema_tree if isinstance(schema_tree, list) else canonical.columns.to_list()
detail_numeric = detail_numeric if isinstance(detail_numeric, bool) else False
strict_typing = strict_typing if isinstance(strict_typing, bool) else True
category_limit = category_limit if isinstance(category_limit, int) else 10
analytics = DataDiscovery.analyse_association(canonical, columns_list=schema_tree,
exclude_associate=exclude_associate,
detail_numeric=detail_numeric, strict_typing=strict_typing,
category_limit=category_limit)
self.pm.set_canonical_schema(name=schema_name, schema=analytics)
self.pm_persist(save=save)
return
@staticmethod
def canonical_report(canonical, stylise: bool=True, inc_next_dom: bool=False, report_header: str=None,
condition: str=None):
"""The Canonical Report is a data dictionary of the canonical providing a reference view of the dataset's
attribute properties
:param canonical: the DataFrame to view
:param stylise: if True present the report stylised.
:param inc_next_dom: (optional) if to include the next dominate element column
:param report_header: (optional) filter on a header where the condition is true. Condition must exist
:param condition: (optional) the condition to apply to the header. Header must exist. examples:
' > 0.95', ".str.contains('shed')"
:return:
"""
return DataDiscovery.data_dictionary(df=canonical, stylise=stylise, inc_next_dom=inc_next_dom,
report_header=report_header, condition=condition)
def report_canonical_schema(self, schema: [str, dict]=None, roots: [str, list]=None,
sections: [str, list]=None, elements: [str, list]=None, stylise: bool=True):
""" presents the current canonical schema
:param schema: (optional) the name of the schema
:param roots: (optional) one or more tree roots
:param sections: (optional) the section under the root
:param elements: (optional) the element in the section
:param stylise: if True present the report stylised.
:return: pd.DataFrame
"""
if not isinstance(schema, dict):
schema = schema if isinstance(schema, str) else self.REPORT_SCHEMA
if not self.pm.has_canonical_schema(name=schema):
raise ValueError(f"There is no Schema currently stored under the name '{schema}'")
schema = self.pm.get_canonical_schema(name=schema)
df = pd.DataFrame(columns=['root', 'section', 'element', 'value'])
root_list = DataAnalytics.get_tree_roots(analytics_blob=schema)
if isinstance(roots, (str, list)):
roots = Commons.list_formatter(roots)
for root in roots:
if root not in root_list:
raise ValueError(f"The root '{root}' can not be found in the analytics tree roots")
root_list = roots
for root_items in root_list:
data_analysis = DataAnalytics.from_root(analytics_blob=schema, root=root_items)
for section in data_analysis.section_names:
if isinstance(sections, (str, list)):
if section not in Commons.list_formatter(sections):
continue
for element, value in data_analysis.get(section).items():
if isinstance(elements, (str, list)):
if element not in Commons.list_formatter(elements):
continue
to_append = [root_items, section, element, value]
a_series = pd.Series(to_append, index=df.columns)
df = df.append(a_series, ignore_index=True)
if stylise:
return Commons.report(df, index_header=['root', 'section'], bold='element')
return df
def report_connectors(self, connector_filter: [str, list]=None, inc_pm: bool=None, inc_template: | |
*xs): (k, *tuple(df[k].dtype.type(x) for x in xs)))
.T.set_index([k for k, f in stats + prototypes], append=True).T
# Transpose for fixed width (stats) and variable height (input cols)
# - [Nope: transposing cols mixes dtypes such that mixed str/int/float undermines display.precision smarts]
# .T
)
def _df_quantile(df, q=.5, interpolation='linear'):
"""Like pd.DataFrame.quantile but handles ordered categoricals"""
return df.apply(lambda c: _series_quantile(c, q=q, interpolation=interpolation))
def _series_quantile(s, *args, **kwargs):
"""Like pd.Series.quantile but handles ordered categoricals"""
if s.dtype.name == 'category':
cat_code = s.cat.codes.quantile(*args, **kwargs)
return s.dtype.categories[cat_code] if cat_code != -1 else None
else:
try:
return s.quantile(*args, **kwargs)
except:
# e.g. a column of non-uniform np.array's will fail like:
# ValueError: operands could not be broadcast together with shapes (6599624,) (459648,)
return np.nan
def _sizeof_df_cols(df: pd.DataFrame) -> 'Column[int]':
return df.memory_usage(index=False, deep=True)
# XXX Looks like df.memory_usage(deep=True) is more accurate (previous attempts were missing deep=True)
# def _sizeof_df_cols(df: pd.DataFrame) -> 'Column[int]':
# """
# sizeof is hard, but make our best effort:
# - Use dask.sizeof.sizeof instead of sys.getsizeof, since the latter is unreliable for pandas/numpy objects
# - Use df.applymap, since dask.sizeof.sizeof appears to not do this right [why? seems wrong...]
# """
# try:
# import dask.sizeof
# except:
# return df.apply(lambda c: None)
# else:
# return df.applymap(dask.sizeof.sizeof).sum()
def df_value_counts(
df: pd.DataFrame,
exprs=None, # Cols to surface, as expressions understood by df.eval(expr) (default: df.columns)
limit=10, # Limit rows
exclude_max_n=1, # Exclude cols where max n ≤ exclude_max_n
fillna='', # Fill na cells (for seeing); pass None to leave na cols as NaN (for processing)
unique_names=False, # Give all cols unique names (for processing) instead of reusing 'n' (for seeing)
**kwargs, # kwargs for .value_counts (e.g. dropna)
) -> pd.DataFrame:
"""Series.value_counts() extended over a whole DataFrame (with a few compromises in hygiene)"""
exprs = exprs if exprs is not None else df.columns
return (df
.pipe(df_remove_unused_categories)
.pipe(df_cat_to_str)
.pipe(lambda df: (pd.concat(axis=1, objs=[
ns
for expr_opts in exprs
for expr, opts in [expr_opts if isinstance(expr_opts, tuple) else (expr_opts, dict())]
for ns in [(df
.eval(expr)
.value_counts(**kwargs)
)]
if ns.iloc[0] > exclude_max_n
for ns in [(ns
.pipe(lambda s: (
# NOTE We "sort_index" when "sort_values=True" because the "values" are in the index, as opposed to
# the "counts", which are the default sort
s.sort_values(ascending=opts.get('ascending', False)) if not opts.get('sort_values') else
s.sort_index(ascending=opts.get('ascending', True))
))
.iloc[:limit]
.to_frame()
.rename(columns=lambda x: f'n_{expr}' if unique_names else 'n')
.reset_index()
.rename(columns={'index': expr})
)]
])))
.fillna(fillna)
)
def df_reorder_cols(df: pd.DataFrame, first: List[str] = [], last: List[str] = []) -> pd.DataFrame:
first_last = set(first) | set(last)
return df.reindex(columns=list(first) + [c for c in df.columns if c not in first_last] + list(last))
def df_transform_columns(df: pd.DataFrame, f: Callable[[List[str]], List[str]]) -> pd.DataFrame:
df = df.copy()
df.columns = f(df.columns)
return df
def df_transform_column_names(df: pd.DataFrame, f: Callable[[str], str]) -> pd.DataFrame:
return df_transform_columns(df, lambda cs: [f(c) for c in df.columns])
def df_transform_index(df: pd.DataFrame, f: Callable[[List[str]], List[str]]) -> pd.DataFrame:
df = df.copy()
df.index = f(df.index)
return df
def df_set_index_name(df: pd.DataFrame, name: str) -> pd.DataFrame:
return df_transform_index(df, lambda index: index.rename(name))
def df_remove_unused_categories(df: pd.DataFrame) -> pd.DataFrame:
"""
Do col.remove_unused_categories() for all categorical columns
"""
return df.assign(**{
k: df[k].cat.remove_unused_categories()
for k in df.columns
if df[k].dtype.name == 'category'
})
def df_ordered_cats_like(df: pd.DataFrame, **col_names_to_cats) -> pd.DataFrame:
"""
More flexible than df.astype({'foo': cat_dtype, ...}) / df_ordered_cat(df, ...)
- In addition to cat dtypes, allows cols with cat dtype, lists of cat values, and functions that return any of those
- Like .astype(), preserves unused cat values (caller can use df_remove_unused_categories if desired)
"""
return (df
.assign(**{
col_name: df[col_name].pipe(as_ordered_cat_like, cats)
for col_name, cats in col_names_to_cats.items()
})
)
def as_ordered_cat_like(s: pd.Series, cats) -> pd.Series:
"""
More flexible than s.astype(cat_dtype) / as_ordered_cat(s, cat_values)
- In addition to cat dtypes, allows cols with cat dtype, lists of cat values, and functions that return any of those
- Like .astype(), preserves unused cat values (caller can use df_remove_unused_categories if desired)
"""
# Allow functions (of the input col)
if callable(cats):
cats = cats(s)
# Allow cols with categorical dtype
# - Fail on cols with non-categorical dtype
if isinstance(cats, pd.Series):
cats = cats.dtypes.categories
# Allow categorical dtypes
# - TODO Is there a robust way to isinstance(cats, [np.dtype, pd.dtype]) so we can fail on non-categorical dtypes?
if isinstance(cats, pd.api.types.CategoricalDtype):
cats = cats.categories
# At this point cats should be an iter of cat values
# - Dedupe them for the user, since CategoricalDtype rejects duplicate cat values
return as_ordered_cat(
s,
ordered_cats=list(unique_everseen(cats)),
)
# XXX Try migrating callers to df_ordered_cats_like to see if we can kill this less-usable one
# FIXME Multiple *args appears broken: `.pipe(df_ordered_cat, 'x', 'y')`
# - Workaround: `.pipe(df_ordered_cat, 'x').pipe(df_ordered_cat, 'y')`
def df_ordered_cat(df: pd.DataFrame, *args, transform=lambda x: x, **kwargs) -> pd.DataFrame:
"""
Map many str series to ordered category series
"""
cats = dict(
**{k: lambda df: df[k].unique() for k in args},
**kwargs,
)
return df.assign(**{
k: as_ordered_cat(df[k], list(transform(
x(df) if isinstance(x, types.FunctionType) else x
)))
for k, x in cats.items()
})
def as_ordered_cat(s: pd.Series, ordered_cats: List[str] = None) -> pd.Series:
"""
Map a str series to an ordered category series
- If ordered_cats isn't given, s.unique() is used
"""
return s.astype(CategoricalDtype(ordered_cats or list(s.unique()), ordered=True))
def df_cat_to_str(df: pd.DataFrame) -> pd.DataFrame:
"""
Map any categorical columns to str columns (see cat_to_str for details)
"""
return df.apply(cat_to_str, axis=0)
def cat_to_str(s: pd.Series) -> pd.Series:
"""
If s is a category dtype, map it to a str. This is useful when you want to avoid bottlenecks on large cats:
- s.apply(f) will apply f to each value in s _and_ each value in the category, to make the new output category dtype
- cat_to_str(s).apply(f) will apply f only to each value in s, since there's no output category dtype to compute
"""
return s.astype('str') if s.dtype.name == 'category' else s
# XXX after migrating callers to new name
def df_reverse_cat(*args, **kwargs):
return df_reverse_cats(*args, **kwargs)
def df_reverse_cats(df: pd.DataFrame, *col_names) -> pd.DataFrame:
"""
Reverse the cat.categories values of each (ordered) category column given in col_names
- Useful e.g. for reversing plotnine axes: https://github.com/has2k1/plotnine/issues/116#issuecomment-365911195
"""
return df_transform_cats(df, **{
col_name: reversed
for col_name in col_names
})
def df_transform_cats(
df: pd.DataFrame,
**col_name_to_f,
) -> pd.DataFrame:
"""
Transform the cat.categories values to f(cat.categories) for each category column given in col_names
"""
return df.assign(**{col_name: transform_cat(df[col_name], f=f) for col_name, f in col_name_to_f.items()})
def transform_cat(
s: pd.Series,
f: Callable[[List[str]], Iterable[str]] = lambda xs: xs,
ordered: bool = None,
) -> pd.Series:
"""
Transform the category values of a categorical series
"""
return s.astype('str').astype(CategoricalDtype(
categories=list(f(s.dtype.categories)),
ordered=ordered if ordered is not None else s.dtype.ordered,
))
def reverse_cat(s: pd.Series) -> pd.Series:
"""
Reverse the category values of a categorical series
- Useful e.g. for reversing plotnine axes: https://github.com/has2k1/plotnine/issues/116#issuecomment-365911195
"""
return transform_cat(s, reversed)
def df_ensure(df, **kwargs):
"""
df.assign only the columns that aren't already present
"""
return df.assign(**{
k: v
for k, v in kwargs.items()
if k not in df
})
return df
def df_require_nonempty(df, e: Union[str, Exception]) -> pd.DataFrame:
"""
Raise if df is empty, else return df. Useful in pipelines, e.g.
(df
...
.pipe(df_require_nonempty, f'No requested things found: x[{x}], y[{y}]') # -> ValueError
...
.pipe(df_require_nonempty, AssertionError(f'Oops, my fault'))
...
)
"""
if df.empty:
if isinstance(e, str):
e = ValueError(e)
raise e
return df
# XXX Obviated by df_ensure?
# def produces_cols(*cols):
# cols = [c for c in cols if c != ...]
# def decorator(f):
# @wraps(f)
# def g(*args, **kwargs) -> pd.DataFrame:
# df = _find_df_in_args(*args, **kwargs)
# _refresh = kwargs.pop('_refresh', False)
# if _refresh or not cols or any(c not in df for c in cols):
# df = f(*args, **kwargs)
# return df
# return g
# return decorator
def requires_cols(*required_cols):
required_cols = [c for c in required_cols if c != ...]
def decorator(f):
@wraps(f)
def g(*args, **kwargs) -> any:
input = _find_first_df_or_series_in_args(*args, **kwargs)
input_cols = input.columns if isinstance(input, pd.DataFrame) else input.index # df.columns or series.index
if not set(required_cols) <= set(input_cols):
raise ValueError(f'requires_col: required_cols[{required_cols}] not all in input_cols[{input_cols}]')
return f(*args, **kwargs)
return g
return decorator
def _find_first_df_or_series_in_args(*args, **kwargs):
for x in [*args, *kwargs.values()]:
if isinstance(x, (pd.DataFrame, pd.Series)):
return x
else:
raise ValueError('No df or series found in args')
def requires_nonempty_rows(f):
@wraps(f)
def g(*args, **kwargs) -> any:
input = _find_first_df_or_series_in_args(*args, **kwargs)
input_cols = input.columns if isinstance(input, pd.DataFrame) else input.index # df.columns or series.index
if input.empty:
raise ValueError(f'requires_nonempty_rows: rows | |
if not annot_obj:
self.logger.error('Cannot conver this string to annotation object: '+str(annotation))
return False
#### retreive the annotation object
brsynth_annot = None
obj_annot = sbase_obj.getAnnotation()
if not obj_annot:
sbase_obj.setAnnotation(libsbml.XMLNode.convertStringToXMLNode(self._defaultBRSynthAnnot(meta_id)))
obj_annot = sbase_obj.getAnnotation()
if not obj_annot:
self.logger.error('Cannot update BRSynth annotation')
return False
brsynth_annot = obj_annot.getChild('RDF').getChild('BRSynth').getChild('brsynth')
if not brsynth_annot:
self.logger.error('Cannot find the BRSynth annotation')
return False
# add the annotation and replace if it exists
isfound_target = False
# # self.logger.debug(brsynth_annot.toXMLString())
for i in range(brsynth_annot.getNumChildren()):
# self.logger.debug(annot_header+' -- '+str(brsynth_annot.getChild(i).getName()))
if annot_header == brsynth_annot.getChild(i).getName():
isfound_target = True
'''
self.checklibSBML(brsynth_annot.removeChild(brsynth_annot.getIndex(i)),
'Removing annotation '+str(annot_header))
'''
self.checklibSBML(brsynth_annot.removeChild(i), 'Removing annotation '+str(annot_header))
isfound_source = False
source_brsynth_annot = annot_obj.getChild('RDF').getChild('BRSynth').getChild('brsynth')
for y in range(source_brsynth_annot.getNumChildren()):
# self.logger.debug('\t'+annot_header+' -- '+str(source_brsynth_annot.getChild(y).getName()))
if str(annot_header)==str(source_brsynth_annot.getChild(y).getName()):
isfound_source = True
# self.logger.debug('Adding annotation to the brsynth annotation: '+str(source_brsynth_annot.getChild(y).toXMLString()))
towrite_annot = source_brsynth_annot.getChild(y)
# self.logger.debug(brsynth_annot.toXMLString())
self.checklibSBML(brsynth_annot.addChild(towrite_annot), ' 1 - Adding annotation to the brsynth annotation')
# self.logger.debug(brsynth_annot.toXMLString())
break
if not isfound_source:
self.logger.error('Cannot find '+str(annot_header)+' in source annotation')
if not isfound_target:
# self.logger.debug('Cannot find '+str(annot_header)+' in target annotation')
isfound_source = False
source_brsynth_annot = annot_obj.getChild('RDF').getChild('BRSynth').getChild('brsynth')
for y in range(source_brsynth_annot.getNumChildren()):
# self.logger.debug('\t'+annot_header+' -- '+str(source_brsynth_annot.getChild(y).getName()))
if str(annot_header)==str(source_brsynth_annot.getChild(y).getName()):
isfound_source = True
# self.logger.debug('Adding annotation to the brsynth annotation: '+str(source_brsynth_annot.getChild(y).toXMLString()))
towrite_annot = source_brsynth_annot.getChild(y)
# self.logger.debug(brsynth_annot.toXMLString())
self.checklibSBML(brsynth_annot.addChild(towrite_annot), '2 - Adding annotation to the brsynth annotation')
# self.logger.debug(brsynth_annot.toXMLString())
break
if not isfound_source:
self.logger.error('Cannot find '+str(annot_header)+' in source annotation')
# toWrite_annot = annot_obj.getChild('RDF').getChild('BRSynth').getChild('brsynth').getChild(annot_header)
# self.checklibSBML(brsynth_annot.addChild(toWrite_annot), 'Adding annotation to the brsynth annotation')
return False
'''
if brsynth_annot.getChild(annot_header).toXMLString()=='':
toWrite_annot = annot_obj.getChild('RDF').getChild('BRSynth').getChild('brsynth').getChild(annot_header)
self.checklibSBML(brsynth_annot.addChild(toWrite_annot), 'Adding annotation to the brsynth annotation')
else:
# try:
# self.logger.debug('==============================')
found_child = False
for i in range(brsynth_annot.getNumChildren()):
if annot_header == brsynth_annot.getChild(i).getName():
# self.logger.debug('Found the same name to remove: '+str(annot_header))
self.checklibSBML(brsynth_annot.removeChild(brsynth_annot.getIndex(i)),
'Removing annotation '+str(annot_header))
toWrite_annot = annot_obj.getChild('RDF').getChild('BRSynth').getChild('brsynth').getChild(annot_header)
self.checklibSBML(brsynth_annot.addChild(toWrite_annot), 'Adding annotation to the brsynth annotation')
found_child = True
break
# cause by a bbug with string lookup
if not found_child:
self.logger.warning('Bug with lookup adding it now: '+str(annot_header))
toWrite_annot = annot_obj.getChild('RDF').getChild('BRSynth').getChild('brsynth').getChild(annot_header)
self.checklibSBML(brsynth_annot.addChild(toWrite_annot), 'Adding annotation to the brsynth annotation')
# except OverflowError:
# self.logger.warning('TODO: Overflow error that must be dealt with')
# self.logger.warning(brsynth_annot.getChild(annot_header).toXMLString())
# return False
'''
return True
def addUpdateMIRIAM(self, sbase_obj, type_param, xref, meta_id=None):
"""Append or update an entry to the MIRIAM annotation of the passed libsbml.SBase object.
If the annot_header isn't contained in the annotation it is created. If it already exists it overwrites it
:param sbase_obj: The libSBML object to add the different
:param type_param: The type of parameter entered. Valid include ['compartment', 'reaction', 'species']
:param xref: Dictionnary of the cross reference
:param meta_id: The meta ID to be added to the annotation string
:type sbase_obj: libsbml.SBase
:type type_param: str
:type xref: dict
:type meta_id: str
:rtype: bool
:return: Sucess or failure of the function
"""
if type_param not in ['compartment', 'reaction', 'species']:
self.logger.error('type_param must be '+str(['compartment', 'reaction', 'species'])+' not '+str(type_param))
return False
miriam_annot = None
isReplace = False
try:
miriam_annot = sbase_obj.getAnnotation().getChild('RDF').getChild('Description').getChild('is').getChild('Bag')
miriam_elements = self.readMIRIAMAnnotation(sbase_obj.getAnnotation())
if not miriam_elements:
isReplace = True
if not meta_id:
meta_id = self._genMetaID('tmp_addUpdateMIRIAM')
miriam_annot_1 = libsbml.XMLNode.convertStringToXMLNode(self._defaultBothAnnot(meta_id))
miriam_annot = miriam_annot_1.getChild('RDF').getChild('Description').getChild('is').getChild('Bag')
else:
miriam_elements = None
except AttributeError:
try:
# Cannot find MIRIAM annotation, create it
isReplace = True
if not meta_id:
meta_id = self._genMetaID('tmp_addUpdateMIRIAM')
miriam_annot = libsbml.XMLNode.convertStringToXMLNode(self._defaultMIRIAMAnnot(meta_id))
miriam_annot = miriam_annot.getChild('RDF').getChild('Description').getChild('is').getChild('Bag')
except AttributeError:
self.logger.error('Fatal error fetching the annotation')
return False
# compile the list of current species
inside = {}
for i in range(miriam_annot.getNumChildren()):
single_miriam = miriam_annot.getChild(i)
if single_miriam.getAttributes().getLength()>1:
self.logger.error('MIRIAM annotations should never have more than 1: '+str(single_miriam.toXMLString()))
continue
single_miriam_attr = single_miriam.getAttributes()
if not single_miriam_attr.isEmpty():
try:
db = single_miriam_attr.getValue(0).split('/')[-2]
v = single_miriam_attr.getValue(0).split('/')[-1]
inside[self.header_miriam[type_param][db]].append(v)
except KeyError:
try:
db = single_miriam_attr.getValue(0).split('/')[-2]
v = single_miriam_attr.getValue(0).split('/')[-1]
inside[self.header_miriam[type_param][db]] = [v]
except KeyError:
self.logger.warning('Cannot find the self.header_miriram entry '+str(db))
continue
else:
self.logger.warning('Cannot return MIRIAM attribute')
pass
# add or ignore
toadd = self._compareXref(inside, xref)
for database_id in toadd:
for species_id in toadd[database_id]:
# not sure how to avoid having it that way
if database_id in self.miriam_header[type_param]:
try:
# determine if the dictionnaries
annotation = '''<annotation>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:bqbiol="http://biomodels.net/biology-qualifiers/" xmlns:bqmodel="http://biomodels.net/model-qualifiers/">
<rdf:Description rdf:about="# tmp">
<bqbiol:is>
<rdf:Bag>'''
if type_param=='species':
if database_id=='kegg' and species_id[0]=='C':
annotation += '''
<rdf:li rdf:resource="http://identifiers.org/'''+self.miriam_header[type_param]['kegg_c']+str(species_id)+'''"/>'''
elif database_id=='kegg' and species_id[0]=='D':
annotation += '''
<rdf:li rdf:resource="http://identifiers.org/'''+self.miriam_header[type_param]['kegg_d']+str(species_id)+'''"/>'''
else:
annotation += '''
<rdf:li rdf:resource="http://identifiers.org/'''+self.miriam_header[type_param][database_id]+str(species_id)+'''"/>'''
else:
annotation += '''
<rdf:li rdf:resource="http://identifiers.org/'''+self.miriam_header[type_param][database_id]+str(species_id)+'''"/>'''
annotation += '''
</rdf:Bag>
</bqbiol:is>
</rdf:Description>
</rdf:RDF>
</annotation>'''
toPass_annot = libsbml.XMLNode.convertStringToXMLNode(annotation)
toWrite_annot = toPass_annot.getChild('RDF').getChild('Description').getChild('is').getChild('Bag').getChild(0)
miriam_annot.insertChild(0, toWrite_annot)
except KeyError:
# WARNING need to check this
self.logger.warning('Cannot find '+str(database_id)+' in self.miriam_header for '+str(type_param))
continue
if isReplace:
ori_miriam_annot = sbase_obj.getAnnotation()
if not ori_miriam_annot:
sbase_obj.unsetAnnotation()
sbase_obj.setAnnotation(miriam_annot)
else:
rpSBML.checklibSBML(ori_miriam_annot.getChild('RDF').getChild('Description').getChild('is').removeChild(0), 'Removing annotation "is"')
rpSBML.checklibSBML(ori_miriam_annot.getChild('RDF').getChild('Description').getChild('is').addChild(miriam_annot), 'Adding annotation to the brsynth annotation')
return True
#TODO: change the name of the function to: rpJSON
def genJSON(self, pathway_id='rp_pathway'):
"""Generate the dictionnary of all the annotations of a pathway species, reaction and pathway annotations
:param pathway_id: The pathway ID (Default: rp_pathway)
:type pathway_id: str
:rtype: dict
:return: Dictionnary of the pathway annotation
"""
groups = self.getModel().getPlugin('groups')
rp_pathway = groups.getGroup(pathway_id)
reactions = rp_pathway.getListOfMembers()
# pathway
rpsbml_json = {}
rpsbml_json['pathway'] = {}
rpsbml_json['pathway']['brsynth'] = self.readBRSYNTHAnnotation(rp_pathway.getAnnotation(), self.logger)
# reactions
rpsbml_json['reactions'] = {}
for member in reactions:
reaction = self.getModel().getReaction(member.getIdRef())
annot = reaction.getAnnotation()
rpsbml_json['reactions'][member.getIdRef()] = {}
rpsbml_json['reactions'][member.getIdRef()]['brsynth'] = self.readBRSYNTHAnnotation(annot, self.logger)
rpsbml_json['reactions'][member.getIdRef()]['miriam'] = self.readMIRIAMAnnotation(annot)
# loop though all the species
rpsbml_json['species'] = {}
for spe_id in self.readUniqueRPspecies(pathway_id):
species = self.getModel().getSpecies(spe_id)
annot = species.getAnnotation()
rpsbml_json['species'][spe_id] = {}
rpsbml_json['species'][spe_id]['brsynth'] = self.readBRSYNTHAnnotation(annot, self.logger)
rpsbml_json['species'][spe_id]['miriam'] = self.readMIRIAMAnnotation(annot)
return rpsbml_json
#####################################################################
########################## INPUT/OUTPUT #############################
#####################################################################
def readSBML(self, inFile):
"""Open an SBML file to the object
:param inFile: Path to the input SBML file
:type inFile: str
:raises FileNotFoundError: If the file cannot be found
:raises AttributeError: If the libSBML command encounters an error or the input value is None
:rtype: None
:return: Dictionnary of the pathway annotation
"""
if not os_path.isfile(inFile):
self.logger.error('Invalid input file')
raise FileNotFoundError
self.document = libsbml.readSBMLFromFile(inFile)
rpSBML.checklibSBML(self.getDocument(), 'reading input file')
errors = self.getDocument().getNumErrors()
# display the errors in the log accordning to the severity
for err in [self.getDocument().getError(i) for i in range(self.getDocument().getNumErrors())]:
# TODO if the error is related to packages not enabled (like groups or fbc) activate them
if err.isFatal:
self.logger.error('libSBML reading error: '+str(err.getShortMessage()))
raise FileNotFoundError
else:
self.logger.warning('libSBML reading warning: '+str(err.getShortMessage()))
if not self.getModel():
self.logger.error('Either the file was not read correctly or the SBML is empty')
raise FileNotFoundError
# enabling the extra packages if they do not exists when reading a model
if not self.getModel().isPackageEnabled('groups'):
rpSBML.checklibSBML(self.getModel().enablePackage(
'http://www.sbml.org/sbml/level3/version1/groups/version1',
'groups',
True),
'Enabling the GROUPS package')
rpSBML.checklibSBML(self.getDocument().setPackageRequired('groups', False), 'enabling groups package')
if not self.getModel().isPackageEnabled('fbc'):
rpSBML.checklibSBML(self.getModel().enablePackage(
'http://www.sbml.org/sbml/level3/version1/fbc/version2',
'fbc',
True),
'Enabling the FBC package')
rpSBML.checklibSBML(self.getDocument().setPackageRequired('fbc', False), 'enabling FBC package')
## Export a libSBML model to file
#
# Export the libSBML model to an SBML file
#
# @param model libSBML model to be saved to file
# @param model_id model id, note that the name of the file will be that
# @param path Non required parameter that will define the path where the model will be saved
def writeSBML(self, filename=None):
"""Export the metabolic network to a SBML file
:param path: Path to the output SBML file
:type path: str
:raises FileNotFoundError: If the file cannot be found
:raises AttributeError: If the libSBML command encounters an error or the input value is None
:rtype: bool
:return: Success or failure of the command
"""
ext = ''
if not str(self.getName()).endswith('_sbml'):
ext = '_sbml'
if filename:
out_filename = filename
else:
out_filename = str(self.getName())+ext+'.xml'
libsbml.writeSBMLToFile(self.getDocument(), out_filename)
return True
#####################################################################
########################## FindCreate ###############################
#####################################################################
def findCreateObjective(self, reactions, coefficients, isMax=True, objective_id=None):
"""Find the objective (with only one reaction associated) based on the reaction ID and if not found create it
:param reactions: List of the reactions id's to set as objectives
:param coefficients: List of the coefficients about the objectives
:param isMax: Maximise or minimise the objective
:param objective_id: overwite the default id if created (from obj_[reactions])
:type reactions: list
:type coefficients: list
:type isMax: bool
:type objective_id: str
:raises FileNotFoundError: If the file cannot be found
:raises AttributeError: If the libSBML command encounters an error or the input value is None
:rtype: str
:return: Objective ID
"""
fbc_plugin = self.getModel().getPlugin('fbc')
rpSBML.checklibSBML(fbc_plugin, 'Getting | |
<reponame>Andrew-Brown1/Smooth_AP
# repo originally forked from https://github.com/Confusezius/Deep-Metric-Learning-Baselines
################# LIBRARIES ###############################
import warnings
warnings.filterwarnings("ignore")
import numpy as np, pandas as pd, copy, torch, random, os
from torch.utils.data import Dataset
from PIL import Image
from torchvision import transforms
"""============================================================================"""
################ FUNCTION TO RETURN ALL DATALOADERS NECESSARY ####################
def give_dataloaders(dataset, opt):
"""
Args:
dataset: string, name of dataset for which the dataloaders should be returned.
opt: argparse.Namespace, contains all training-specific parameters.
Returns:
dataloaders: dict of dataloaders for training, testing and evaluation on training.
"""
#Dataset selection
if opt.dataset=='vehicle_id':
datasets = give_VehicleID_datasets(opt)
elif opt.dataset=='Inaturalist':
datasets = give_inaturalist_datasets(opt)
else:
raise Exception('No Dataset >{}< available!'.format(dataset))
#Move datasets to dataloaders.
dataloaders = {}
for key, dataset in datasets.items():
if isinstance(dataset, TrainDatasetsmoothap) and key == 'training':
dataloaders[key] = torch.utils.data.DataLoader(dataset, batch_size=opt.bs,
num_workers=opt.kernels, sampler=torch.utils.data.SequentialSampler(dataset),
pin_memory=True, drop_last=True)
else:
is_val = dataset.is_validation
if key == 'training':
dataloaders[key] = torch.utils.data.DataLoader(dataset, batch_size=opt.bs,
num_workers=opt.kernels, shuffle=not is_val, pin_memory=True, drop_last=not is_val)
else:
dataloaders[key] = torch.utils.data.DataLoader(dataset, batch_size=opt.bs,
num_workers=6, shuffle=not is_val, pin_memory=True, drop_last=not is_val)
return dataloaders
def give_inaturalist_datasets(opt):
"""
This function generates a training, testing and evaluation dataloader for Metric Learning on the Inaturalist 2018 dataset.
For Metric Learning, training and test sets are provided by given json files. Will define a train and test split
So no random shuffling of classes.
Args:
opt: argparse.Namespace, contains all traininig-specific parameters.
Returns:
dict of PyTorch datasets for training, testing and evaluation.
"""
#Load text-files containing classes and imagepaths.
#Generate image_dicts of shape {class_idx:[list of paths to images belong to this class] ...}
train_image_dict, val_image_dict = {},{}
with open(os.path.join(opt.source_path,'Inat_dataset_splits/Inaturalist_train_set1.txt')) as f:
FileLines = f.readlines()
FileLines = [x.strip() for x in FileLines]
for entry in FileLines:
info = entry.split('/')
if '/'.join([info[-3],info[-2]]) not in train_image_dict:
train_image_dict['/'.join([info[-3],info[-2]])] = []
train_image_dict['/'.join([info[-3],info[-2]])].append(os.path.join(opt.source_path,entry))
with open(os.path.join(opt.source_path,'Inat_dataset_splits/Inaturalist_test_set1.txt')) as f:
FileLines = f.readlines()
FileLines = [x.strip() for x in FileLines]
for entry in FileLines:
info = entry.split('/')
if '/'.join([info[-3],info[-2]]) not in val_image_dict:
val_image_dict['/'.join([info[-3],info[-2]])] = []
val_image_dict['/'.join([info[-3],info[-2]])].append(os.path.join(opt.source_path,entry))
new_train_dict = {}
class_ind_ind = 0
for cate in train_image_dict:
new_train_dict["te/%d"%class_ind_ind] = train_image_dict[cate]
class_ind_ind += 1
train_image_dict = new_train_dict
train_dataset = TrainDatasetsmoothap(train_image_dict, opt)
val_dataset = BaseTripletDataset(val_image_dict, opt, is_validation=True)
eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)
#train_dataset.conversion = conversion
#val_dataset.conversion = conversion
#eval_dataset.conversion = conversion
return {'training':train_dataset, 'testing':val_dataset, 'evaluation':eval_dataset}
# return {'training':train_dataset, 'testing':val_dataset, 'evaluation':eval_dataset, 'super_evaluation':super_train_dataset}
def give_VehicleID_datasets(opt):
"""
This function generates a training, testing and evaluation dataloader for Metric Learning on the PKU Vehicle dataset.
For Metric Learning, training and (multiple) test sets are provided by separate text files, train_list and test_list_<n_classes_2_test>.txt.
So no random shuffling of classes.
Args:
opt: argparse.Namespace, contains all traininig-specific parameters.
Returns:
dict of PyTorch datasets for training, testing and evaluation.
"""
#Load respective text-files
train = np.array(pd.read_table(opt.source_path+'/train_test_split/train_list.txt', header=None, delim_whitespace=True))
small_test = np.array(pd.read_table(opt.source_path+'/train_test_split/test_list_800.txt', header=None, delim_whitespace=True))
medium_test = np.array(pd.read_table(opt.source_path+'/train_test_split/test_list_1600.txt', header=None, delim_whitespace=True))
big_test = np.array(pd.read_table(opt.source_path+'/train_test_split/test_list_2400.txt', header=None, delim_whitespace=True))
#Generate conversions
lab_conv_train = {x:i for i,x in enumerate(np.unique(train[:,1]))}
train[:,1] = np.array([lab_conv_train[x] for x in train[:,1]])
lab_conv = {x:i for i,x in enumerate(np.unique(np.concatenate([small_test[:,1], medium_test[:,1], big_test[:,1]])))}
small_test[:,1] = np.array([lab_conv[x] for x in small_test[:,1]])
medium_test[:,1] = np.array([lab_conv[x] for x in medium_test[:,1]])
big_test[:,1] = np.array([lab_conv[x] for x in big_test[:,1]])
#Generate Image-Dicts for training and different testings of shape {class_idx:[list of paths to images belong to this class] ...}
train_image_dict = {}
for img_path, key in train:
if not key in train_image_dict.keys():
train_image_dict[key] = []
train_image_dict[key].append(opt.source_path+'/image/{:07d}.jpg'.format(img_path))
small_test_dict = {}
for img_path, key in small_test:
if not key in small_test_dict.keys():
small_test_dict[key] = []
small_test_dict[key].append(opt.source_path+'/image/{:07d}.jpg'.format(img_path))
medium_test_dict = {}
for img_path, key in medium_test:
if not key in medium_test_dict.keys():
medium_test_dict[key] = []
medium_test_dict[key].append(opt.source_path+'/image/{:07d}.jpg'.format(img_path))
big_test_dict = {}
for img_path, key in big_test:
if not key in big_test_dict.keys():
big_test_dict[key] = []
big_test_dict[key].append(opt.source_path+'/image/{:07d}.jpg'.format(img_path))
attribute = np.array(pd.read_table(opt.source_path+'/attribute/model_attr.txt', header=None, delim_whitespace=True))
new_dict = {}
not_found = 0
for thing in attribute:
if lab_conv_train[thing[0]] not in train_image_dict:
not_found +=1
else:
if thing[1] not in new_dict:
new_dict[thing[1]] = []
new_dict[thing[1]].append(lab_conv_train[thing[0]])
train_dataset = TrainDatasetsmoothap(train_image_dict, opt)
eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)
val_small_dataset = BaseTripletDataset(small_test_dict, opt, is_validation=True)
val_medium_dataset = BaseTripletDataset(medium_test_dict, opt, is_validation=True)
val_big_dataset = BaseTripletDataset(big_test_dict, opt, is_validation=True)
return {'training':train_dataset, 'testing_set1':val_small_dataset, 'testing_set2':val_medium_dataset, \
'testing_set3':val_big_dataset, 'evaluation':eval_dataset}
################## BASIC PYTORCH DATASET USED FOR ALL DATASETS ##################################
class BaseTripletDataset(Dataset):
"""
Dataset class to provide (augmented) correctly prepared training samples corresponding to standard DML literature.
This includes normalizing to ImageNet-standards, and Random & Resized cropping of shapes 224 for ResNet50 and 227 for
GoogLeNet during Training. During validation, only resizing to 256 or center cropping to 224/227 is performed.
"""
def __init__(self, image_dict, opt, samples_per_class=8, is_validation=False):
"""
Dataset Init-Function.
Args:
image_dict: dict, Dictionary of shape {class_idx:[list of paths to images belong to this class] ...} providing all the training paths and classes.
opt: argparse.Namespace, contains all training-specific parameters.
samples_per_class: Number of samples to draw from one class before moving to the next when filling the batch.
is_validation: If is true, dataset properties for validation/testing are used instead of ones for training.
Returns:
Nothing!
"""
#Define length of dataset
self.n_files = np.sum([len(image_dict[key]) for key in image_dict.keys()])
self.is_validation = is_validation
self.pars = opt
self.image_dict = image_dict
self.avail_classes = sorted(list(self.image_dict.keys()))
#Convert image dictionary from classname:content to class_idx:content, because the initial indices are not necessarily from 0 - <n_classes>.
self.image_dict = {i:self.image_dict[key] for i,key in enumerate(self.avail_classes)}
self.avail_classes = sorted(list(self.image_dict.keys()))
#Init. properties that are used when filling up batches.
if not self.is_validation:
self.samples_per_class = samples_per_class
#Select current class to sample images from up to <samples_per_class>
self.current_class = np.random.randint(len(self.avail_classes))
self.classes_visited = [self.current_class, self.current_class]
self.n_samples_drawn = 0
#Data augmentation/processing methods.
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
transf_list = []
if not self.is_validation:
transf_list.extend([transforms.RandomResizedCrop(size=224) if opt.arch=='resnet50' else transforms.RandomResizedCrop(size=227),
transforms.RandomHorizontalFlip(0.5)])
else:
transf_list.extend([transforms.Resize(256),
transforms.CenterCrop(224) if opt.arch=='resnet50' else transforms.CenterCrop(227)])
transf_list.extend([transforms.ToTensor(), normalize])
self.transform = transforms.Compose(transf_list)
#Convert Image-Dict to list of (image_path, image_class). Allows for easier direct sampling.
self.image_list = [[(x,key) for x in self.image_dict[key]] for key in self.image_dict.keys()]
self.image_list = [x for y in self.image_list for x in y]
#Flag that denotes if dataset is called for the first time.
self.is_init = True
def ensure_3dim(self, img):
"""
Function that ensures that the input img is three-dimensional.
Args:
img: PIL.Image, image which is to be checked for three-dimensionality (i.e. if some images are black-and-white in an otherwise coloured dataset).
Returns:
Checked PIL.Image img.
"""
if len(img.size)==2:
img = img.convert('RGB')
return img
def __getitem__(self, idx):
"""
Args:
idx: Sample idx for training sample
Returns:
tuple of form (sample_class, torch.Tensor() of input image)
"""
if self.pars.loss == 'smoothap' or self.pars.loss == 'smoothap_element':
if self.is_init:
#self.current_class = self.avail_classes[idx%len(self.avail_classes)]
self.is_init = False
if not self.is_validation:
if self.samples_per_class==1:
return self.image_list[idx][-1], self.transform(self.ensure_3dim(Image.open(self.image_list[idx][0])))
if self.n_samples_drawn==self.samples_per_class:
#Once enough samples per class have been drawn, we choose another class to draw samples from.
#Note that we ensure with self.classes_visited that no class is chosen if it had been chosen
#previously or one before that.
counter = copy.deepcopy(self.avail_classes)
for prev_class in self.classes_visited:
if prev_class in counter: counter.remove(prev_class)
self.current_class = counter[idx%len(counter)]
#self.classes_visited = self.classes_visited[1:]+[self.current_class]
# EDIT -> there can be no class repeats
self.classes_visited = self.classes_visited+[self.current_class]
self.n_samples_drawn = 0
class_sample_idx = idx%len(self.image_dict[self.current_class])
self.n_samples_drawn += 1
out_img = self.transform(self.ensure_3dim(Image.open(self.image_dict[self.current_class][class_sample_idx])))
return self.current_class,out_img
else:
return self.image_list[idx][-1], self.transform(self.ensure_3dim(Image.open(self.image_list[idx][0])))
else:
if self.is_init:
self.current_class = self.avail_classes[idx%len(self.avail_classes)]
self.is_init = False
if not self.is_validation:
if self.samples_per_class==1:
return self.image_list[idx][-1], self.transform(self.ensure_3dim(Image.open(self.image_list[idx][0])))
if self.n_samples_drawn==self.samples_per_class:
#Once enough samples per class have been drawn, we choose another class to draw samples from.
#Note that we ensure with self.classes_visited that no class is chosen if it had been chosen
#previously or one before that.
counter = copy.deepcopy(self.avail_classes)
for prev_class in self.classes_visited:
if prev_class in counter: counter.remove(prev_class)
self.current_class = counter[idx%len(counter)]
self.classes_visited = self.classes_visited[1:]+[self.current_class]
self.n_samples_drawn = 0
class_sample_idx = idx%len(self.image_dict[self.current_class])
self.n_samples_drawn += 1
out_img = self.transform(self.ensure_3dim(Image.open(self.image_dict[self.current_class][class_sample_idx])))
return self.current_class,out_img
else:
return self.image_list[idx][-1], self.transform(self.ensure_3dim(Image.open(self.image_list[idx][0])))
def __len__(self):
return self.n_files
flatten = lambda l: [item for sublist in l for item in sublist]
######################## dataset for SmoothAP regular training ##################################
class TrainDatasetsmoothap(Dataset):
"""
This dataset class allows mini-batch formation pre-epoch, for greater speed
"""
def __init__(self, image_dict, opt):
"""
Args:
image_dict: two-level dict, `super_dict[super_class_id][class_id]` gives the list of
image paths having the same super-label and class label
"""
self.image_dict = image_dict
self.dataset_name = opt.dataset
self.batch_size = opt.bs
self.samples_per_class = opt.samples_per_class
for sub in self.image_dict:
| |
nlst, ndays,
ntriads, nlags)
------------------------------------------------------------------------
"""
try:
bw_eff
except NameError:
raise NameError('Effective bandwidth must be specified')
else:
if not isinstance(bw_eff, (int, float, list, NP.ndarray)):
raise TypeError('Value of effective bandwidth must be a scalar, list or numpy array')
bw_eff = NP.asarray(bw_eff).reshape(-1)
if NP.any(bw_eff <= 0.0):
raise ValueError('All values in effective bandwidth must be strictly positive')
if freq_center is None:
freq_center = NP.asarray(self.f[self.f.size/2]).reshape(-1)
elif isinstance(freq_center, (int, float, list, NP.ndarray)):
freq_center = NP.asarray(freq_center).reshape(-1)
if NP.any((freq_center <= self.f.min()) | (freq_center >= self.f.max())):
raise ValueError('Value(s) of frequency center(s) must lie strictly inside the observing band')
else:
raise TypeError('Values(s) of frequency center must be scalar, list or numpy array')
if (bw_eff.size == 1) and (freq_center.size > 1):
bw_eff = NP.repeat(bw_eff, freq_center.size)
elif (bw_eff.size > 1) and (freq_center.size == 1):
freq_center = NP.repeat(freq_center, bw_eff.size)
elif bw_eff.size != freq_center.size:
raise ValueError('Effective bandwidth(s) and frequency center(s) must have same number of elements')
if shape is not None:
if not isinstance(shape, str):
raise TypeError('Window shape must be a string')
if shape not in ['rect', 'bhw', 'bnw', 'RECT', 'BHW', 'BNW']:
raise ValueError('Invalid value for window shape specified.')
else:
shape = 'rect'
if fftpow is None:
fftpow = 1.0
else:
if not isinstance(fftpow, (int, float)):
raise TypeError('Power to raise window FFT by must be a scalar value.')
if fftpow < 0.0:
raise ValueError('Power for raising FFT of window by must be positive.')
if pad is None:
pad = 1.0
else:
if not isinstance(pad, (int, float)):
raise TypeError('pad fraction must be a scalar value.')
if pad < 0.0:
pad = 0.0
if verbose:
print('\tPad fraction found to be negative. Resetting to 0.0 (no padding will be applied).')
if not isinstance(datapool, str):
raise TypeError('Input datapool must be a string')
if datapool.lower() not in ['prelim']:
raise ValueError('Specified datapool not supported')
if visscaleinfo is not None:
if not isinstance(visscaleinfo, dict):
raise TypeError('Input visscaleinfo must be a dictionary')
if 'vis' not in visscaleinfo:
raise KeyError('Input visscaleinfo does not contain key "vis"')
if not isinstance(visscaleinfo['vis'], RI.InterferometerArray):
if 'lst' not in visscaleinfo:
raise KeyError('Input visscaleinfo does not contain key "lst"')
lst_vis = visscaleinfo['lst'] * 15.0
if not isinstance(visscaleinfo['vis'], (NP.ndarray,MA.MaskedArray)):
raise TypeError('Input visibilities must be a numpy or a masked array')
if not isinstance(visscaleinfo['vis'], MA.MaskedArray):
visscaleinfo['vis'] = MA.array(visscaleinfo['vis'], mask=NP.isnan(visscaleinfo['vis']))
vistriad = MA.copy(visscaleinfo['vis'])
else:
if 'bltriplet' not in visscaleinfo:
raise KeyError('Input dictionary visscaleinfo does not contain key "bltriplet"')
blind, blrefind, dbl = LKP.find_1NN(visscaleinfo['vis'].baselines, visscaleinfo['bltriplet'], distance_ULIM=0.2, remove_oob=True)
if blrefind.size != 3:
blind_missing = NP.setdiff1d(NP.arange(3), blind, assume_unique=True)
blind_next, blrefind_next, dbl_next = LKP.find_1NN(visscaleinfo['vis'].baselines, -1*visscaleinfo['bltriplet'][blind_missing,:], distance_ULIM=0.2, remove_oob=True)
if blind_next.size + blind.size != 3:
raise ValueError('Exactly three baselines were not found in the reference baselines')
else:
blind = NP.append(blind, blind_missing[blind_next])
blrefind = NP.append(blrefind, blrefind_next)
else:
blind_missing = []
vistriad = NP.transpose(visscaleinfo['vis'].skyvis_freq[blrefind,:,:], (0,2,1))
if len(blind_missing) > 0:
vistriad[-blrefind_next.size:,:,:] = vistriad[-blrefind_next.size:,:,:].conj()
vistriad = MA.array(vistriad, mask=NP.isnan(vistriad))
lst_vis = visscaleinfo['vis'].lst
viswts = MA.array(NP.ones_like(vistriad.data), mask=vistriad.mask, dtype=NP.float)
lst_out = self.cPhase.cpinfo['processed']['prelim']['lstbins'] * 15.0
if lst_vis.size == 1: # Apply the visibility scaling from one reference LST to all LST
vis_ref = vistriad * NP.ones(lst_out.size).reshape(1,-1,1)
wts_ref = viswts * NP.ones(lst_out.size).reshape(1,-1,1)
else:
vis_ref, wts_ref = OPS.interpolate_masked_array_1D(vistriad, viswts, 1, visscaleinfo['smoothinfo'], inploc=lst_vis, outloc=lst_out)
if not isinstance(method, str):
raise TypeError('Input method must be a string')
if method.lower() not in ['fft', 'nufft']:
raise ValueError('Specified FFT method not supported')
if not isinstance(apply_flags, bool):
raise TypeError('Input apply_flags must be boolean')
flagwts = 1.0
visscale = 1.0
if datapool.lower() == 'prelim':
if method.lower() == 'fft':
freq_wts = NP.empty((bw_eff.size, self.f.size), dtype=NP.float_) # nspw x nchan
frac_width = DSP.window_N2width(n_window=None, shape=shape, fftpow=fftpow, area_normalize=False, power_normalize=True)
window_loss_factor = 1 / frac_width
n_window = NP.round(window_loss_factor * bw_eff / self.df).astype(NP.int)
ind_freq_center, ind_channels, dfrequency = LKP.find_1NN(self.f.reshape(-1,1), freq_center.reshape(-1,1), distance_ULIM=0.51*self.df, remove_oob=True)
sortind = NP.argsort(ind_channels)
ind_freq_center = ind_freq_center[sortind]
ind_channels = ind_channels[sortind]
dfrequency = dfrequency[sortind]
n_window = n_window[sortind]
for i,ind_chan in enumerate(ind_channels):
window = NP.sqrt(frac_width * n_window[i]) * DSP.window_fftpow(n_window[i], shape=shape, fftpow=fftpow, centering=True, peak=None, area_normalize=False, power_normalize=True)
window_chans = self.f[ind_chan] + self.df * (NP.arange(n_window[i]) - int(n_window[i]/2))
ind_window_chans, ind_chans, dfreq = LKP.find_1NN(self.f.reshape(-1,1), window_chans.reshape(-1,1), distance_ULIM=0.51*self.df, remove_oob=True)
sind = NP.argsort(ind_window_chans)
ind_window_chans = ind_window_chans[sind]
ind_chans = ind_chans[sind]
dfreq = dfreq[sind]
window = window[ind_window_chans]
window = NP.pad(window, ((ind_chans.min(), self.f.size-1-ind_chans.max())), mode='constant', constant_values=((0.0,0.0)))
freq_wts[i,:] = window
npad = int(self.f.size * pad)
lags = DSP.spectral_axis(self.f.size + npad, delx=self.df, use_real=False, shift=True)
result = {'freq_center': freq_center, 'shape': shape, 'freq_wts': freq_wts, 'bw_eff': bw_eff, 'fftpow': fftpow, 'npad': npad, 'lags': lags, 'lag_corr_length': self.f.size / NP.sum(freq_wts, axis=-1), 'whole': {'dspec': {'twts': self.cPhase.cpinfo['processed'][datapool]['wts']}}, 'residual': {'dspec': {'twts': self.cPhase.cpinfo['processed'][datapool]['wts']}}, 'errinfo': {'dspec0': {'twts': self.cPhase.cpinfo['errinfo']['wts']['0']}, 'dspec1': {'twts': self.cPhase.cpinfo['errinfo']['wts']['1']}}, 'submodel': {}}
if visscaleinfo is not None:
visscale = NP.nansum(NP.transpose(vis_ref[NP.newaxis,NP.newaxis,:,:,:], axes=(0,3,1,2,4)) * freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], axis=-1, keepdims=True) / NP.nansum(freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], axis=-1, keepdims=True) # nspw x nlst x (ndays=1) x (nbl=3) x (nchan=1)
visscale = NP.sqrt(1.0/NP.nansum(1/NP.abs(visscale)**2, axis=-2, keepdims=True)) # nspw x nlst x (ndays=1) x (ntriads=1) x (nchan=1)
for dpool in ['errinfo', 'prelim', 'submodel', 'residual']:
if dpool.lower() == 'errinfo':
for diffind in range(2):
if apply_flags:
flagwts = NP.copy(self.cPhase.cpinfo['errinfo']['wts']['{0}'.format(diffind)].data)
flagwts = flagwts[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan
flagwts = 1.0 * flagwts / NP.mean(flagwts, axis=-1, keepdims=True) # (nspw=1) x nlst x ndays x ntriads x nchan
for stat in self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)]:
eicp = NP.copy(self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].data) # Minimum shape as stored
# eicp = NP.copy(self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].filled(0.0)) # Minimum shape as stored
eicp = NP.broadcast_to(eicp, self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].shape) # Broadcast to final shape
eicp = eicp[NP.newaxis,...] # nlst x ndayscomb x ntriads x nchan --> (nspw=1) x nlst x ndayscomb x ntriads x nchan
ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)]
result[dpool]['dspec{0}'.format(diffind)][stat] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
else:
if dpool in self.cPhase.cpinfo['processed']:
if apply_flags:
flagwts = NP.copy(self.cPhase.cpinfo['processed'][datapool]['wts'].data)
flagwts = flagwts[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan
flagwts = 1.0 * flagwts / NP.mean(flagwts, axis=-1, keepdims=True) # (nspw=1) x nlst x ndays x ntriads x nchan
if dpool == 'submodel':
eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'].data) # Minimum shape as stored
# eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'].filled(1.0)) # Minimum shape as stored
eicp = NP.broadcast_to(eicp, self.cPhase.cpinfo['processed'][datapool]['eicp']['mean'].shape) # Broadcast to final shape
eicp = eicp[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan
ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)]
result[dpool]['dspec'] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
else:
for key in self.cPhase.cpinfo['processed'][dpool]['eicp']:
eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'][key].data)
# eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'][key].filled(1.0))
eicp = eicp[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan
ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)]
if dpool == 'prelim':
result['whole']['dspec'][key] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
else:
result[dpool]['dspec'][key] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
result['lag_kernel'] = DSP.FT1D(NP.pad(flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
self.cPhaseDS = result
if resample:
result_resampled = copy.deepcopy(result)
downsample_factor = NP.min((self.f.size + npad) * self.df / bw_eff)
result_resampled['lags'] = DSP.downsampler(result_resampled['lags'], downsample_factor, axis=-1, method='interp', kind='linear')
result_resampled['lag_kernel'] = DSP.downsampler(result_resampled['lag_kernel'], downsample_factor, axis=-1, method='interp', kind='linear')
for dpool in ['errinfo', 'prelim', 'submodel', 'residual']:
if dpool.lower() == 'errinfo':
for diffind in self.cPhase.cpinfo[dpool]['eicp_diff']:
for key in self.cPhase.cpinfo[dpool]['eicp_diff'][diffind]:
result_resampled[dpool]['dspec'+diffind][key] = DSP.downsampler(result_resampled[dpool]['dspec'+diffind][key], downsample_factor, axis=-1, method='FFT')
if dpool in self.cPhase.cpinfo['processed']:
if dpool == 'submodel':
result_resampled[dpool]['dspec'] = DSP.downsampler(result_resampled[dpool]['dspec'], downsample_factor, axis=-1, method='FFT')
else:
for key in self.cPhase.cpinfo['processed'][datapool]['eicp']:
if dpool == 'prelim':
result_resampled['whole']['dspec'][key] = DSP.downsampler(result_resampled['whole']['dspec'][key], downsample_factor, axis=-1, method='FFT')
else:
result_resampled[dpool]['dspec'][key] = DSP.downsampler(result_resampled[dpool]['dspec'][key], downsample_factor, axis=-1, method='FFT')
self.cPhaseDS_resampled = result_resampled
return result_resampled
else:
return result
############################################################################
def subset(self, selection=None):
"""
------------------------------------------------------------------------
Return triad and time indices to select a subset of processed data
Inputs:
selection [NoneType or dictionary] Selection parameters based on which
triad, LST, and day indices will be returned. If set to None
(default), all triad, LST, and day indices will be returned.
Otherwise it must be a dictionary with the following keys
and values:
'triads' [NoneType | |
nocc: , : , : ].transpose(2,0,3,1)))
if (fully_ic):
numpy.save(intfolder+"W:eaaa", numpy.asfortranarray(eris['ppaa'][nocc: , ncor:nocc, : , : ].transpose(0,2,1,3)))
numpy.save(intfolder+"W:caaa", numpy.asfortranarray(eris['ppaa'][nfro:ncor, ncor:nocc, : , : ].transpose(0,2,1,3)))
# 2 "E"
numpy.save(intfolder+"W:eeca", numpy.asfortranarray(eris['pepe'][nfro:ncor, : , ncor:nocc, : ].transpose(1,3,0,2)))
if (third_order):
numpy.save(intfolder+"W:aece", numpy.asfortranarray(eris['ppee'][ncor:nocc, nfro:ncor, : , : ].transpose(0,2,1,3)))
numpy.save(intfolder+"W:eeec", numpy.asfortranarray(eris['ppee'][nocc: , nfro:ncor, : , : ].transpose(2,0,3,1)))
numpy.save(intfolder+"W:eeea", numpy.asfortranarray(eris['ppee'][nocc: , ncor:nocc, : , : ].transpose(2,0,3,1)))
feri = h5py.File(intfolder+"int2eeee.hdf5", 'w')
ao2mo.full(mc.mol, mo[:,nocc:], feri, compact=False)
for o in range(nvir):
int2eee = feri['eri_mo'][o*(norb-nocc):(o+1)*(norb-nocc),:]
numpy.asfortranarray(int2eee).tofile(intfolder+"W:eeee%04d"%(o))
# OUTPUT EVERYTHING (for debug of PT3)
#int2=ao2mo.outcore.general_iofree(mc.mol, (mo, mo, mo, mo), compact=False)
#int2.shape=(norb,norb,norb,norb)
#inout={}
#inout['c']=[nfro,ncor]
#inout['a']=[ncor,nocc]
#inout['e']=[nocc,norb]
##dom=['c','a','e']
##for p in range(3):
## for q in range(3):
## for r in range(3):
## for s in range(3):
## name="W:"+dom[p]+dom[q]+dom[r]+dom[s]
## test=int2[inout[dom[p]][0]:inout[dom[p]][1],\
## inout[dom[r]][0]:inout[dom[r]][1],\
## inout[dom[q]][0]:inout[dom[q]][1],\
## inout[dom[s]][0]:inout[dom[s]][1]].transpose(0,2,1,3)
## print("Output: {:} Shape: {:}".format(name,test.shape))
## numpy.save(intfolder+name, numpy.asfortranarray(test))
#list=['aaaa','aeae','aeca','aece','aeea',\
# 'caaa','caac','caca','cace','ccaa',\
# 'ccae','ccca','cccc','ccce','cece',\
# 'ceec','eaaa','eaca','eeaa','eeca',\
# 'eecc','eeea','eeec','eeee']
#for i in range(len(list)):
# p=list[i][0]
# q=list[i][1]
# r=list[i][2]
# s=list[i][3]
# name="W:"+p+q+r+s
# test=int2[inout[p][0]:inout[p][1],\
# inout[r][0]:inout[r][1],\
# inout[q][0]:inout[q][1],\
# inout[s][0]:inout[s][1]].transpose(0,2,1,3)
# print("Output: {:} Shape: {:}".format(name,test.shape))
# numpy.save(intfolder+name, numpy.asfortranarray(test))
end = time.time()
print('......savings of INGREDIENTS took %10.2f sec' %(end-start))
print("")
# Write "FCIDUMP_aaav0" and "FCIDUMP_aaac"
if (not fully_ic):
writeFCIDUMPs_MRLCC(mc,eris,eris_sp,int1,energy_core,energyE0,nfro)
return energyE0, norb
def writeFCIDUMPs_NEVPT(mc,eris,eris_sp,aaavsplit,energy_core,energyE0,nfro):
# Initializations
ncor = mc.ncore
nact = mc.ncas
nocc = ncor+nact
mo = mc.mo_coeff
# About symmetry...
from pyscf import symm
mol = mc.mol
orbsymout=[]
orbsym = []
if (mol.symmetry):
orbsym = symm.label_orb_symm(mol, mol.irrep_id,
mol.symm_orb, mo, s=mc._scf.get_ovlp())
if mol.symmetry and orbsym.any():
if mol.groupname.lower() == 'dooh':
orbsymout = [dmrg_sym.IRREP_MAP['D2h'][i % 10] for i in orbsym]
elif mol.groupname.lower() == 'coov':
orbsymout = [dmrg_sym.IRREP_MAP['C2v'][i % 10] for i in orbsym]
else:
orbsymout = [dmrg_sym.IRREP_MAP[mol.groupname][i] for i in orbsym]
else:
orbsymout = []
virtOrbs = list(range(nocc, eris_sp['h1eff'].shape[0]))
chunks = len(virtOrbs) // aaavsplit
virtRange = [virtOrbs[i:i+chunks] for i in range(0, len(virtOrbs), chunks)]
for K in range(aaavsplit):
currentOrbs = list(range(ncor, nocc)) + virtRange[K]
fout = open('FCIDUMP_aaav%d'%(K),'w')
#tools.fcidump.write_head(fout, eris_sp['h1eff'].shape[0]-ncor, mol.nelectron-2*ncor, orbsym= orbsymout[ncor:])
tools.fcidump.write_head(fout, nact+len(virtRange[K]), mol.nelectron-2*ncor,\
orbsym= (orbsymout[ncor:nocc]+orbsymout[virtRange[K][0]:virtRange[K][-1]+1]) )
for i in range(len(currentOrbs)):
for j in range(ncor, nocc):
for k in range(nact):
for l in range(k+1):
I = currentOrbs[i]
if abs(eris['ppaa'][I,j,k,l]) > 1.e-8 :
fout.write(' %17.9e %4d %4d %4d %4d\n' \
% (eris['ppaa'][I,j,k,l], i+1, j+1-ncor, k+1, l+1))
h1eff = numpy.zeros(shape=(nact+len(virtRange[K]), nact+len(virtRange[K])))
h1eff[:nact, :nact] = eris_sp['h1eff'][ncor:nocc,ncor:nocc]
h1eff[nact:, nact:] = eris_sp['h1eff'][virtRange[K][0]:virtRange[K][-1]+1, virtRange[K][0]:virtRange[K][-1]+1]
h1eff[:nact, nact:] = eris_sp['h1eff'][ncor:nocc, virtRange[K][0]:virtRange[K][-1]+1]
h1eff[nact:, :nact] = eris_sp['h1eff'][virtRange[K][0]:virtRange[K][-1]+1, ncor:nocc]
tools.fcidump.write_hcore(fout, h1eff, nact+len(virtRange[K]), tol=1e-8)
#tools.fcidump.write_hcore(fout,\
# eris_sp['h1eff'][virtRange[K][0]:virtRange[K][-1]+1, virtRange[K][0]:virtRange[K][-1]+1], len(virtRange[K]), tol=1e-8)
fout.write(' %17.9e 0 0 0 0\n' %( mc.energy_nuc()+energy_core-energyE0))
fout.close()
print("Wrote FCIDUMP_aaav%d file"%(K))
fout = open('FCIDUMP_aaac','w')
tools.fcidump.write_head(fout, nocc-nfro, mol.nelectron-2*nfro, orbsym= orbsymout[nfro:nocc])
for i in range(nfro,nocc):
for j in range(ncor, nocc):
for k in range(ncor, nocc):
for l in range(ncor,k+1):
if abs(eris['ppaa'][i,j,k-ncor,l-ncor]) > 1.e-8 :
fout.write(' %17.9e %4d %4d %4d %4d\n' \
% (eris['ppaa'][i,j,k-ncor,l-ncor], i+1-nfro, j+1-nfro, k+1-nfro, l+1-nfro))
dmrge = energyE0-mc.energy_nuc()-energy_core
ecore_aaac = 0.0;
for i in range(nfro,ncor):
ecore_aaac += 2.0*eris_sp['h1eff'][i,i]
tools.fcidump.write_hcore(fout, eris_sp['h1eff'][nfro:nocc,nfro:nocc], nocc-nfro, tol=1e-8)
fout.write(' %17.9e 0 0 0 0\n' %( -dmrge-ecore_aaac))
fout.close()
print("Wrote FCIDUMP_aaac file")
print("")
def writeFCIDUMPs_MRLCC(mc,eris,eris_sp,int1,energy_core,energyE0,nfro):
# Initializations
ncor = mc.ncore
nact = mc.ncas
nocc = ncor+nact
mo = mc.mo_coeff
# About symmetry...
from pyscf import symm
mol = mc.mol
orbsymout=[]
orbsym = []
if (mol.symmetry):
orbsym = symm.label_orb_symm(mol, mol.irrep_id,
mol.symm_orb, mo, s=mc._scf.get_ovlp())
if mol.symmetry and orbsym.any():
if mol.groupname.lower() == 'dooh':
orbsymout = [dmrg_sym.IRREP_MAP['D2h'][i % 10] for i in orbsym]
elif mol.groupname.lower() == 'coov':
orbsymout = [dmrg_sym.IRREP_MAP['C2v'][i % 10] for i in orbsym]
else:
orbsymout = [dmrg_sym.IRREP_MAP[mol.groupname][i] for i in orbsym]
else:
orbsymout = []
fout = open('FCIDUMP_aaav0','w')
tools.fcidump.write_head(fout, int1.shape[0]-ncor, mol.nelectron-2*ncor, orbsym= orbsymout[ncor:])
for i in range(ncor,int1.shape[0]):
for j in range(ncor, i+1):
for k in range(mc.ncas):
for l in range(k+1):
if abs(eris['ppaa'][i,j, k,l]) > 1.e-8 :
fout.write(' %17.9e %4d %4d %4d %4d\n' \
% (eris['ppaa'][i,j, k,l], i+1-ncor, j+1-ncor, k+1, l+1))
if (j >= nocc and abs(eris['papa'][i, k, j, l]) > 1.e-8):
fout.write(' %17.9e %4d %4d %4d %4d\n' \
% (eris['papa'][i,k,j, l], i+1-ncor, k+1, l+1, j+1-ncor))
if (j >= nocc and abs(eris['papa'][i, l, j, k]) > 1.e-8):
fout.write(' %17.9e %4d %4d %4d %4d\n' \
% (eris['papa'][i,l, j, k], i+1-ncor, l+1, k+1, j+1-ncor))
tools.fcidump.write_hcore(fout, eris_sp['h1eff'][ncor:,ncor:], int1.shape[0]-ncor, tol=1e-8)
fout.write(' %17.9e 0 0 0 0\n' %( mc.energy_nuc()+energy_core-energyE0))
fout.close()
print("Wrote FCIDUMP_aaav0 file")
eri1cas = ao2mo.outcore.general_iofree(mol, (mo[:,nfro:nocc], mo[:,nfro:nocc], mo[:,nfro:nocc], mo[:,nfro:nocc]), compact=True)
core_only_1e = numpy.einsum('ii', int1[nfro:ncor, nfro:ncor])*2.0
core_only_2e = 2.0 * numpy.einsum('iijj', eris['ppcc'][nfro:ncor,
nfro:ncor,
:ncor-nfro,
:ncor-nfro]) \
- numpy.einsum('ijij', eris['pcpc'][nfro:ncor,
:ncor-nfro,
nfro:ncor,
:ncor-nfro])
energy_fro = energy_core - core_only_1e - core_only_2e
# print("Energy_fro2 = %13.8f"%(energy_fro))
# print("E_nuc_aaac = %13.8f"%(mc.energy_nuc() + energy_fro - energyE0))
tools.fcidump.from_integrals("FCIDUMP_aaac", int1[nfro:nocc,nfro:nocc],
eri1cas, nocc-nfro, mol.nelectron-2*nfro,
nuc=mc.energy_nuc() + energy_fro - energyE0,
orbsym=orbsymout[nfro:nocc], tol=1e-8,
float_format=' %17.9e')
print("Wrote FCIDUMP_aaac file")
print("")
def writeNEVPTIntegralsLEGACY(mc, E1, E2, E1eff, aaavsplit, nfro):
print("LEGACY code from INTEGRALS ::begin::")
# Initializations
ncor = mc.ncore
nact = mc.ncas
norb = mc.mo_coeff.shape[1]
nvir = norb-ncor-nact
nocc = ncor+nact
mo = mc.mo_coeff
eris = _ERIS(mc, mo)
eris_sp={}
eris_sp['h1eff']= 1.*eris['h1eff'] #numpy.zeros(shape=(norb, norb))
eris_sp['h1eff'][:ncor,:ncor] += numpy.einsum('abcd,cd', eris['ppaa'][:ncor, :ncor,: ,:], E1eff)
eris_sp['h1eff'][:ncor,:ncor] -= numpy.einsum('abcd,bd', eris['papa'][:ncor, : ,:ncor,:], E1eff)*0.5
eris_sp['h1eff'][nocc:,nocc:] += numpy.einsum('abcd,cd', eris['ppaa'][nocc:, nocc:,: ,:], E1eff)
eris_sp['h1eff'][nocc:,nocc:] -= numpy.einsum('abcd,bd', eris['papa'][nocc:, : ,nocc:,:], E1eff)*0.5
offdiagonal = 0.0
for k in range(ncor):
for l in range(ncor):
if(k != l):
offdiagonal = max(abs(offdiagonal), abs(eris_sp['h1eff'][k,l] ))
for k in range(nocc, norb):
for l in range(nocc,norb):
if(k != l):
offdiagonal = max(abs(offdiagonal), abs(eris_sp['h1eff'][k,l] ))
if (abs(offdiagonal) > 1e-6):
print("WARNING: Have to use natural orbitals from CAASCF")
print(" offdiagonal elements: {:13.6f}".format(offdiagonal))
print("")
eriscvcv = eris['cvcv']
if (not isinstance(eris_sp['h1eff'], type(eris['cvcv']))):
eriscvcv = lib.chkfile.load(eris['cvcv'].name, "eri_mo")
eris_sp['cvcv'] = eriscvcv.reshape(ncor, nvir, ncor, nvir)
import os
os.system("mkdir int")
numpy.save("int/W:caac", numpy.asfortranarray(eris['papa'][nfro:ncor, :, nfro:ncor, :].transpose(0,3,1,2)))
numpy.save("int/W:aeca", numpy.asfortranarray(eris['papa'][nfro:ncor, :, nocc:, :].transpose(1,2,0,3)))
numpy.save("int/W:ccaa", numpy.asfortranarray(eris['papa'][nfro:ncor, :, nfro:ncor, :].transpose(0,2,1,3)))
numpy.save("int/W:eeaa", numpy.asfortranarray(eris['papa'][nocc:, :, nocc:, :].transpose(0,2,1,3)))
numpy.save("int/W:caca", numpy.asfortranarray(eris['ppaa'][nfro:ncor, nfro:ncor, :, :].transpose(0,2,1,3)))
numpy.save("int/W:eaca", numpy.asfortranarray(eris['ppaa'][nocc:, nfro:ncor, :, :].transpose(0,2,1,3)))
numpy.save("int/W:eecc", numpy.asfortranarray(eris_sp['cvcv'][nfro:,:,nfro:,:].transpose(1,3,0,2)))
numpy.save("int/W:ccae", numpy.asfortranarray(eris['pacv'][nfro:ncor,:,nfro:,:].transpose(0,2,1,3)))
numpy.save("int/W:aaaa", numpy.asfortranarray(eris['ppaa'][ncor:nocc, ncor:nocc, :, :].transpose(0,2,1,3)))
numpy.save("int/W:eeca", numpy.asfortranarray(eris['pacv'][nocc:, :, nfro:, :].transpose(3,0,2,1)))
numpy.save("int/int1eff", numpy.asfortranarray(eris_sp['h1eff'][nfro:,nfro:]))
numpy.save("int/E1.npy", numpy.asfortranarray(E1))
numpy.save("int/E2.npy", numpy.asfortranarray(E2))
energyE0 = 1.0*numpy.einsum('ij,ij', eris_sp['h1eff'][ncor:nocc, ncor:nocc], E1)
energyE0 += 0.5*numpy.einsum('ijkl,ijkl', eris['ppaa'][ncor:nocc, ncor:nocc, :, :].transpose(0,2,1,3), E2)
dmcore = numpy.dot(mo[:,:ncor], mo[:,:ncor].T)*2
vj, vk = mc._scf.get_jk(mc.mol, dmcore)
energyE0 += numpy.einsum('ij,ji', dmcore, mc.get_hcore()) \
+ numpy.einsum('ij,ji', dmcore, vj-0.5*vk) * .5
energyE0 += mc.energy_nuc()
print("Energy = ", energyE0)
dmcore = numpy.dot(mo[:,:ncor], mo[:,:ncor].T)*2
vj, vk = mc._scf.get_jk(mc.mol, dmcore)
energy_core = numpy.einsum('ij,ji', dmcore, mc.get_hcore()) \
+ numpy.einsum('ij,ji', dmcore, vj-0.5*vk) * .5
print(energy_core)
# Write "FCIDUMP_aaav0" and "FCIDUMP_aaac"
if (False):
writeFCIDUMPs_NEVPT(mc,eris,eris_sp,aaavsplit,energy_core,energyE0,nfro)
print("LEGACY code from INTEGRALS ::end::")
return norb, energyE0
def writeMRLCCIntegralsLEGACY(mc, E1, E2, nfro):
print("LEGACY code from INTEGRALS ::begin::")
# Initializations
ncor = mc.ncore
nact = mc.ncas
norb = mc.mo_coeff.shape[1]
nvir = norb-ncor-nact
nocc = ncor+nact
mo = mc.mo_coeff
#this is chemistry notation
int2popo = ao2mo.outcore.general_iofree(mc.mol, (mo, mo[:,:nocc], mo, mo[:,:nocc]), compact=False)
int2ppoo = ao2mo.outcore.general_iofree(mc.mol, (mo, mo, mo[:,:nocc], mo[:,:nocc]), compact=False)
int2popo.shape=(norb, nocc, norb, nocc)
int2ppoo.shape=(norb, norb, nocc, nocc)
dmcore = numpy.dot(mo[:,:nfro], mo[:,:nfro].T)*2
vj, vk = mc._scf.get_jk(mc.mol, dmcore)
vhfcore = reduce(numpy.dot, (mo.T, vj-vk*0.5, mo))
int1 = reduce(numpy.dot, (mo.T, mc.get_hcore(), mo)) +vhfcore
dmcore = numpy.dot(mo[:,:ncor], mo[:,:ncor].T)*2
vj, vk = mc._scf.get_jk(mc.mol, dmcore)
vhfcore = reduce(numpy.dot, (mo.T, vj-vk*0.5, mo))
int1_eff = reduce(numpy.dot, (mo.T, mc.get_hcore(), mo)) +vhfcore
numpy.save("int/E2",numpy.asfortranarray(E2))
numpy.save("int/E1",numpy.asfortranarray(E1))
numpy.save("int/int1",numpy.asfortranarray(int1[nfro:,nfro:]))
numpy.save("int/int1eff",numpy.asfortranarray(int1_eff[nfro:, nfro:]))
numpy.save("int/W:caca", numpy.asfortranarray(int2ppoo[nfro:ncor, nfro:ncor, ncor:nocc, ncor:nocc].transpose(0,2,1,3)))
numpy.save("int/W:caac", numpy.asfortranarray(int2popo[nfro:ncor,ncor:nocc, ncor:nocc, nfro:ncor].transpose(0,2,1,3)))
numpy.save("int/W:cece", numpy.asfortranarray(int2ppoo[nocc:, nocc:, nfro:ncor, nfro:ncor].transpose(2,0,3,1)))
numpy.save("int/W:ceec", numpy.asfortranarray(int2popo[nocc:, nfro:ncor, nocc:, nfro:ncor].transpose(1,2,0,3)))
numpy.save("int/W:aeae", numpy.asfortranarray(int2ppoo[nocc:, nocc:, ncor:nocc,ncor:nocc].transpose(2,0,3,1)))
numpy.save("int/W:aeea", numpy.asfortranarray(int2popo[nocc:, ncor:nocc,nocc:, ncor:nocc].transpose(1,2,0,3)))
numpy.save("int/W:cccc", numpy.asfortranarray(int2ppoo[nfro:ncor,nfro:ncor, nfro:ncor, nfro:ncor].transpose(0,2,1,3)))
numpy.save("int/W:aaaa", numpy.asfortranarray(int2ppoo[ncor:nocc,ncor:nocc, ncor:nocc, ncor:nocc].transpose(0,2,1,3)))
feri = h5py.File("int/int2eeee.hdf5", 'w')
ao2mo.full(mc.mol, mo[:,nocc:], feri, compact=False)
for o in range(nvir):
int2eee = feri['eri_mo'][o*(norb-nocc):(o+1)*(norb-nocc),:]
numpy.asfortranarray(int2eee).tofile("int/W:eeee%04d"%(o))
numpy.save("int/W:eecc", numpy.asfortranarray(int2popo[nocc:,nfro:ncor,nocc:,nfro:ncor].transpose(0,2,1,3)))
numpy.save("int/W:eeca", numpy.asfortranarray(int2popo[nocc:,nfro:ncor, nocc:, ncor:nocc].transpose(0,2,1,3)))
numpy.save("int/W:ccaa", numpy.asfortranarray(int2popo[nfro:ncor,ncor:nocc, nfro:ncor, ncor:nocc].transpose(0,2,1,3)))
numpy.save("int/W:eeaa", numpy.asfortranarray(int2popo[nocc:,ncor:nocc, nocc:, ncor:nocc].transpose(0,2,1,3)))
numpy.save("int/W:eaca", numpy.asfortranarray(int2popo[nocc:,nfro:ncor, ncor:nocc, ncor:nocc].transpose(0,2,1,3)))
numpy.save("int/W:aeca", numpy.asfortranarray(int2popo[ncor:nocc,nfro:ncor, nocc:,ncor:nocc].transpose(0,2,1,3)))
numpy.save("int/W:ccae", numpy.asfortranarray(int2popo[nfro:ncor,ncor:nocc, nocc:, nfro:ncor].transpose(0,3,1,2)))
dmcore = numpy.dot(mo[:,:ncor], mo[:,:ncor].T)*2
vj, vk = mc._scf.get_jk(mc.mol, dmcore)
vhfcore = reduce(numpy.dot, (mo.T, vj-vk*0.5, mo))
h1eff = int1_eff
energy_core = numpy.einsum('ij,ji', dmcore, mc.get_hcore()) \
+ numpy.einsum('ij,ji', dmcore, vj-0.5*vk) * .5
#print energy_core2+mc.mol.energy_nuc(), energy_core+mc.mol.energy_nuc(), energy_nfro_core+mc.mol.energy_nuc()
energyE0 = 1.0*numpy.einsum('ij,ij', h1eff[ncor:nocc, ncor:nocc], E1)
#+ 0.5*numpy.einsum('ikjl,ijkl', E2, int2ppoo[ncor:,ncor:,ncor:,ncor:])
for i in range(mc.ncas):
for j in range(mc.ncas):
for k in range(mc.ncas):
for l in range(mc.ncas):
energyE0 += 0.5*E2[i,k,j,l] * int2ppoo[i+ncor, j+ncor, k+ncor, l+ncor]
energyE0 += energy_core
energyE0 += mc.energy_nuc()
print("Energy = ", energyE0)
# Write "FCIDUMP_aaav0" and "FCIDUMP_aaac"
if (False):
writeFCIDUMPs_MRLCC(mc,eris,eris_sp,int1,energy_core,energyE0,nfro)
print("LEGACY code from INTEGRALS ::end::")
return energyE0, norb
def write_ic_inputs(nelec, ncor, ncas, | |
from sage.misc.flatten import flatten
from sage.ext.fast_callable import fast_callable
from sage.rings.semirings.non_negative_integer_semiring import NN
from sage.rings.real_mpfr import RealField
from sage.misc.functional import numerical_approx as N
from sage.functions.log import exp
from sage.functions.log import log
from sage.functions.other import sqrt
'''
Along the process we create the following lists,
For expresions:
list1 -> stores the expresions (not variables) needed for the taylor AD
list2 -> stores the way to build expresions on list1. Needed to determine AD function
list3 -> stores the list2 with the proper AD function name and the corresponding link or variable
on list1
parsedList -> stores the lines of C code for AD
constList1 -> stores the expresions that involves only parameters
constList2 -> stores the way to build expresions in constList1
constList3 -> stores the constList2 with the proper construction of the arguments
parsedConstList -> stores the C code for generating constants
We will use the unified notation:
vars -> list with the variables of the right side function of the ODE
pars -> list with the parameters (if any) of the right side function of the ODE
varpar -> list with vars and pars
'''
def createLists (f, pars):
'''
Creates list1 and list 2 from the right side function f of an ODE:
input:
f -> right side function for ODE system
pars -> list with the parameters on f
output:
list1 and list2
Example with Lorenz Equation
sage: var('t, x, y, z') # variables for lorenz equations
sage: var('s, r, b') # parameters for lorenz equations
sage: f(t,x,y,z) = [s*(y-x), x*(r-z) - y, x*y - b*z] # Right side function for Lorenz equation
'''
vars = f[0].arguments () # gets the list of variables from function f
varpar = list (vars) + list (pars) # gets the list of vars and pars totegher
_f = f (*vars).function (varpar) # _f is f but with vars and pars as arguments
fastCallList = flatten ([fast_callable (i,vars=varpar).op_list () for i in f], max_level=1)
# This list is the fast callable version of f using a stack-mode call
'''
We create create the lists list1, list2 and stack.
stack will be expresion stack-list
'''
list1 = []; list2 = []; stack = [];
'''
Starts parser on fastCallList.
'''
for s in fastCallList:
if s[0] == 'load_arg': # Loads a variable in stack. no changes on list1, or list2
stack.append (varpar[s[1]]) # appends the variable or parameter on symbolic stack
elif s[0] == 'ipow': # Integer power.
if s[1] in NN: # If natural, parser as products
basis = stack[-1]
for j in range (s[1]-1):
a=stack.pop (-1)
stack.append (a*basis)
list1.append (stack[-1])
list2.append (('mul', a, basis))
elif -s[1] in NN:
basis = stack[-1]
for j in range (-s[1]-1):
a=stack.pop (-1)
stack.append (a*basis)
list1.append (stack[-1])
list2.append (('mul', a, basis))
a = stack.pop (-1);
stack.append (1/a);
list1.append (stack[-1])
list2.append (('div', 1, a))
else: # Attach as normal power
a = stack.pop (-1) #basis
stack.append (a ** s[1])
list1.append (stack[-1])
list2.append (('pow', a, s[1]))
elif s[0] == 'load_const': # Loads a constant value on stack. Not in list1 or list2
stack.append (s[1])
elif s == 'neg': # multiplies by -1.0
a = stack.pop (-1) # expresion to be multiplied by -1
stack.append (-a)
list1.append (stack[-1])
list2.append (('mul', -1, a))
elif s == 'mul': # Product
a=stack.pop (-1)
b=stack.pop (-1)
list2.append (('mul', a, b))
stack.append (a*b)
list1.append (stack[-1])
elif s == 'div': # divission Numerator First.
b=stack.pop (-1) # denominator (after a in stack)
a=stack.pop (-1) # numerator (before b in stack)
if expresionIsConstant (b, pars):
list1.append(1/b)
list2.append(('div', 1, b))
b = 1/b;
stack.append (a*b)
list1.append(stack[-1])
list2.append (('mul', a, b))
else:
list2.append (('div', a, b))
stack.append (a/b)
list1.append (stack[-1])
elif s == 'add': # addition
b = stack.pop (-1) # second operand
a = stack.pop (-1) # first operand
stack.append (a+b)
list1.append (stack[-1])
list2.append (('add', a, b))
elif s == 'pow': # any other pow
b = stack.pop (-1) # exponent
a = stack.pop (-1) # basis
stack.append (a**b)
list1.append (stack[-1])
list2.append (('pow', a, b))
elif s[0] == 'py_call' and 'sqrt' in str (s[1]): # square root. Compute as power
a = stack.pop (-1) # argument of sqrt
stack.append (sqrt (a))
list1.append (stack[-1])
list2.append (('pow', a, 0.5))
elif s[0] == 'py_call' and str (s[1]) == 'log': # logarithm
a = stack.pop (-1); # argument of log
stack.append (log (a))
list1.append (stack[-1])
list2.append (('log', a))
elif s[0] == 'py_call' and str (s[1]) == 'exp':
a = stack.pop (-1); # argument of exp
stack.append (exp (a))
list1.append (stack[-1])
list2.append (('exp', a))
elif s[0] == 'py_call' and str (s[1]) == 'sin': # sine. For AD needs computation of cos
a = stack.pop (-1)
stack.append (sin (a))
list1.append (sin (a))
list1.append (cos (a))
list2.append (('sin', a))
list2.append (('cos', a))
elif s[0] == 'py_call' and str (s[1]) == 'cos': # cosine. For AD needs computation of sin
a = stack.pop (-1)
stack.append (cos (a))
list1.append (sin (a))
list1.append (cos (a))
list2.append (('sin', a))
list2.append (('cos', a))
elif s[0] == 'py_call' and str (s[1]) == 'tan':
a = stack.pop (-1)
stack.append (tan (a))
list1.append (sin (a))
list1.append (cos (a))
list1.append (tan (a))
list2.append (('sin', a))
list2.append (('cos', a))
list2.append (('div', sin (a), cos (a)))
return list1, list2
def removeRepeated (list1, list2):
'''
Removes repeated expresions from list1 and list2
'''
for s1 in range (len (list1) - 1):
s2=s1+1
while s2 < len (list1):
if list1[s2] == list1[s1]:
list1.pop(s2)
list2.pop(s2)
else:
s2 += 1
def expresionIsConstant (expresion, pars):
'''
Returns whether an expresion is constant or not, that is, its variables are included in set pars
'''
return (set (expresion.variables ()).issubset (set (pars)))
def removeConstants (list1, list2, pars):
'''
Remove constant expresions from list1 and list2.
If the constant expresion does not contain any parameter, it is just removed
otherwise it is moved into constList1 and constList2
'''
i=0
RR = RealField () # define Real field
constList1=[]
constList2=[]
while i < len(list1):
if (list1[i] in RR): # checks if it is a Real Number of SAGE (no parameters at all)
list1.pop(i)
list2.pop(i)
elif expresionIsConstant (list1[i], pars): # checks if there are only parameters
constList1.append (list1.pop(i))
constList2.append (list2.pop(i))
else:
i+=1
return constList1, constList2
def createCodeList (list1, list2, constList1, f, pars):
'''
Creates list3. In list3 we identify each expresion with the correspondent variable, parameter,
link, constant expresion or real constant
'''
list3=[]
vars = f[0].arguments() # variables of the function
for s in list2: # s is a tuple with the information to build expresions of list1
oper = s[0] # the external operator of the expresion
if oper in ['log', 'exp', 'sin', 'cos', 'tan']: # unary operator
# argument cannot be parameter, real number or constant expresion.
# those cases are stored in constList1 and constList2
a = s[1] # identify operand
if a in vars: # case of variable
if a == vars[0]:
list3.append ((oper, 'T'))
else:
list3.append ((oper, 'series[{}]'.format (vars.index (a) - 1)))
else:
list3.append ((oper, 'l[{}]'.format (list1.index (a))))
else: # binary operator
a = s[1] # firt operand
b = s[2] # second operand
constA = False # flags to determine whether any of them are constant
constB = False
# not possible both of them
if a in vars: # if a is a variable
if a == vars[0]:
aa = 'T'
else:
aa = 'series[{}]'.format (vars.index (a) - 1) # corresponding string
elif a in list1: # if a is a link
aa = 'l[{}]'.format (list1.index (a)) # corresponding string
else: # ok, a is constant
constA = True # constant flag on
if a in constList1: # a constant expresion
aa = 'c[{}]'.format (constList1.index (a))
elif a in pars: # a parameter
aa = 'par[{}]'.format (pars.index (a))
else: # a is a real constant.
aa = str (N (a)) # write as a string
if b in vars: # same with b
if b == vars[0]:
bb = 'T'
else:
bb = 'series[{}]'.format (vars.index (b) - 1)
elif b in list1:
bb = 'l[{}]'.format (list1.index (b))
else:
constB = True
if b in constList1:
bb = 'c[{}]'.format (constList1.index (b))
elif b in pars:
bb = 'par[{}]'.format (pars.index (b))
else:
bb = str (N (b))
# we set the const argument in the first place
if constA:
oper += '_c'
bb, aa = aa,bb
elif constB:
oper += '_c'
list3.append ((oper, aa, bb))
return list3
def createConstCodeList (constList1, constList2, pars):
'''
Creates constList3. In constList3 we identify each expresion with the correspondent parameter,
constant expresion or real constant
'''
constList3 = []
for s in constList2:
oper = s[0]
if oper in ['log', 'exp', 'sin', 'cos', 'tan']: # unary operator
a = s[1] # get operand (constant expresion or parameter)
if a in pars: # checks if is a parameter
constList3.append ((oper, 'par[{}]'.format (pars.index (a))))
else:
constList3.append ((oper, 'c[{}]'.format (constList1.index (a))))
else: # binary operator
# now any of the operands (not both) can be a real number
a=s[1]
b=s[2]
if a in pars: # check if a is parameter
aa = 'par[{}]'.format (pars.index (a))
elif a in constList1: # check if a is constant expresion
aa = 'c[{}]'.format (constList1.index (a))
else: # a is a real number
aa = '(' + str (N (a)) + ')'
# now the do de same with b
if b in pars:
bb = 'par[{}]'.format (pars.index (b))
elif b in constList1:
bb = 'c[{}]'.format (constList1.index (b))
else:
bb = '(' + str (N (b)) + ')'
constList3.append ((oper, aa, bb))
return constList3
def createParsedConstList (constList3):
'''
Creates the lines of C code to generate constant expresions in the code
'''
parsedConstList = []
for i in range (len (constList3)):
codeLine = constList3[i]
string = '\tc[{}] = '.format(i)
if codeLine[0] == 'add':
string += codeLine[1] + ' + ' + codeLine[2] + ';'
if codeLine[0] == 'mul':
string += codeLine[1] + ' * ' + codeLine[2] + ';'
if codeLine[0] == 'div':
string += codeLine[1] + ' / ' + codeLine[2] + ';'
if codeLine[0] == 'exp':
string += | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['CertificateOrder']
class CertificateOrder(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_renew: Optional[pulumi.Input[bool]] = None,
csr: Optional[pulumi.Input[str]] = None,
distinguished_name: Optional[pulumi.Input[str]] = None,
key_size: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
product_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
validity_in_years: Optional[pulumi.Input[int]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages an App Service Certificate Order.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_certificate_order = azure.appservice.CertificateOrder("exampleCertificateOrder",
resource_group_name=example_resource_group.name,
location="global",
distinguished_name="CN=example.com",
product_type="Standard")
```
## Import
App Service Certificate Orders can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:appservice/certificateOrder:CertificateOrder example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.CertificateRegistration/certificateOrders/certificateorder1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_renew: true if the certificate should be automatically renewed when it expires; otherwise, false. Defaults to true.
:param pulumi.Input[str] csr: Last CSR that was created for this order.
:param pulumi.Input[str] distinguished_name: The Distinguished Name for the App Service Certificate Order.
:param pulumi.Input[int] key_size: Certificate key size. Defaults to 2048.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. Currently the only valid value is `global`.
:param pulumi.Input[str] name: Specifies the name of the certificate. Changing this forces a new resource to be created.
:param pulumi.Input[str] product_type: Certificate product type, such as `Standard` or `WildCard`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the certificate. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[int] validity_in_years: Duration in years (must be between `1` and `3`). Defaults to `1`.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['auto_renew'] = auto_renew
__props__['csr'] = csr
__props__['distinguished_name'] = distinguished_name
__props__['key_size'] = key_size
__props__['location'] = location
__props__['name'] = name
__props__['product_type'] = product_type
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['validity_in_years'] = validity_in_years
__props__['app_service_certificate_not_renewable_reasons'] = None
__props__['certificates'] = None
__props__['domain_verification_token'] = None
__props__['expiration_time'] = None
__props__['intermediate_thumbprint'] = None
__props__['is_private_key_external'] = None
__props__['root_thumbprint'] = None
__props__['signed_certificate_thumbprint'] = None
__props__['status'] = None
super(CertificateOrder, __self__).__init__(
'azure:appservice/certificateOrder:CertificateOrder',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
app_service_certificate_not_renewable_reasons: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
auto_renew: Optional[pulumi.Input[bool]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CertificateOrderCertificateArgs']]]]] = None,
csr: Optional[pulumi.Input[str]] = None,
distinguished_name: Optional[pulumi.Input[str]] = None,
domain_verification_token: Optional[pulumi.Input[str]] = None,
expiration_time: Optional[pulumi.Input[str]] = None,
intermediate_thumbprint: Optional[pulumi.Input[str]] = None,
is_private_key_external: Optional[pulumi.Input[bool]] = None,
key_size: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
product_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
root_thumbprint: Optional[pulumi.Input[str]] = None,
signed_certificate_thumbprint: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
validity_in_years: Optional[pulumi.Input[int]] = None) -> 'CertificateOrder':
"""
Get an existing CertificateOrder resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] app_service_certificate_not_renewable_reasons: Reasons why App Service Certificate is not renewable at the current moment.
:param pulumi.Input[bool] auto_renew: true if the certificate should be automatically renewed when it expires; otherwise, false. Defaults to true.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CertificateOrderCertificateArgs']]]] certificates: State of the Key Vault secret. A `certificates` block as defined below.
:param pulumi.Input[str] csr: Last CSR that was created for this order.
:param pulumi.Input[str] distinguished_name: The Distinguished Name for the App Service Certificate Order.
:param pulumi.Input[str] domain_verification_token: Domain verification token.
:param pulumi.Input[str] expiration_time: Certificate expiration time.
:param pulumi.Input[str] intermediate_thumbprint: Certificate thumbprint intermediate certificate.
:param pulumi.Input[bool] is_private_key_external: Whether the private key is external or not.
:param pulumi.Input[int] key_size: Certificate key size. Defaults to 2048.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. Currently the only valid value is `global`.
:param pulumi.Input[str] name: Specifies the name of the certificate. Changing this forces a new resource to be created.
:param pulumi.Input[str] product_type: Certificate product type, such as `Standard` or `WildCard`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the certificate. Changing this forces a new resource to be created.
:param pulumi.Input[str] root_thumbprint: Certificate thumbprint for root certificate.
:param pulumi.Input[str] signed_certificate_thumbprint: Certificate thumbprint for signed certificate.
:param pulumi.Input[str] status: Current order status.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[int] validity_in_years: Duration in years (must be between `1` and `3`). Defaults to `1`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["app_service_certificate_not_renewable_reasons"] = app_service_certificate_not_renewable_reasons
__props__["auto_renew"] = auto_renew
__props__["certificates"] = certificates
__props__["csr"] = csr
__props__["distinguished_name"] = distinguished_name
__props__["domain_verification_token"] = domain_verification_token
__props__["expiration_time"] = expiration_time
__props__["intermediate_thumbprint"] = intermediate_thumbprint
__props__["is_private_key_external"] = is_private_key_external
__props__["key_size"] = key_size
__props__["location"] = location
__props__["name"] = name
__props__["product_type"] = product_type
__props__["resource_group_name"] = resource_group_name
__props__["root_thumbprint"] = root_thumbprint
__props__["signed_certificate_thumbprint"] = signed_certificate_thumbprint
__props__["status"] = status
__props__["tags"] = tags
__props__["validity_in_years"] = validity_in_years
return CertificateOrder(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="appServiceCertificateNotRenewableReasons")
def app_service_certificate_not_renewable_reasons(self) -> pulumi.Output[Sequence[str]]:
"""
Reasons why App Service Certificate is not renewable at the current moment.
"""
return pulumi.get(self, "app_service_certificate_not_renewable_reasons")
@property
@pulumi.getter(name="autoRenew")
def auto_renew(self) -> pulumi.Output[Optional[bool]]:
"""
true if the certificate should be automatically renewed when it expires; otherwise, false. Defaults to true.
"""
return pulumi.get(self, "auto_renew")
@property
@pulumi.getter
def certificates(self) -> pulumi.Output[Sequence['outputs.CertificateOrderCertificate']]:
"""
State of the Key Vault secret. A `certificates` block as defined below.
"""
return pulumi.get(self, "certificates")
@property
@pulumi.getter
def csr(self) -> pulumi.Output[str]:
"""
Last CSR that was created for this order.
"""
return pulumi.get(self, "csr")
@property
@pulumi.getter(name="distinguishedName")
def distinguished_name(self) -> pulumi.Output[str]:
"""
The Distinguished Name for the App Service Certificate Order.
"""
return pulumi.get(self, "distinguished_name")
@property
@pulumi.getter(name="domainVerificationToken")
def domain_verification_token(self) -> pulumi.Output[str]:
"""
Domain verification token.
"""
return pulumi.get(self, "domain_verification_token")
@property
@pulumi.getter(name="expirationTime")
def expiration_time(self) -> pulumi.Output[str]:
"""
Certificate expiration time.
"""
return pulumi.get(self, "expiration_time")
@property
@pulumi.getter(name="intermediateThumbprint")
def intermediate_thumbprint(self) -> pulumi.Output[str]:
"""
Certificate thumbprint intermediate certificate.
"""
return pulumi.get(self, "intermediate_thumbprint")
@property
@pulumi.getter(name="isPrivateKeyExternal")
def is_private_key_external(self) -> pulumi.Output[bool]:
"""
Whether the private key is external or not.
"""
return pulumi.get(self, "is_private_key_external")
@property
@pulumi.getter(name="keySize")
def key_size(self) -> pulumi.Output[Optional[int]]:
"""
Certificate key size. Defaults to 2048.
"""
return pulumi.get(self, "key_size")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. Currently the only valid value is `global`.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the certificate. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="productType")
def product_type(self) -> pulumi.Output[Optional[str]]:
"""
Certificate product type, such as `Standard` or `WildCard`.
"""
return pulumi.get(self, "product_type")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the certificate. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="rootThumbprint")
def root_thumbprint(self) -> pulumi.Output[str]:
"""
Certificate thumbprint for root certificate.
"""
return pulumi.get(self, "root_thumbprint")
@property
@pulumi.getter(name="signedCertificateThumbprint")
def signed_certificate_thumbprint(self) -> pulumi.Output[str]:
"""
Certificate thumbprint for signed certificate.
"""
| |
""" A module containing the models to be trained on gene expression data """
import copy
import itertools
import pickle
import sys
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Union, Iterable, Tuple, Any
import neptune.new as neptune
import numpy as np
import sklearn.linear_model
import sklearn.decomposition
import torch
import torch.nn as nn
import torch.nn.functional as F
from performer_pytorch import SelfAttention
from torch.utils.data import DataLoader
from tqdm import tqdm
import saged.utils as utils
from saged.datasets import LabeledDataset, UnlabeledDataset, MixedDataset, ExpressionDataset
def get_model_by_name(model_name: str) -> Any:
"""
This function invokes old magic to get a model class from the current file dynamically.
In python the answer to 'How do I get a class from the current file dynamically' is
'Dump all the global variables for the file, it will be there somewhere'
https://stackoverflow.com/questions/734970/python-reference-to-a-class-from-a-string
Arguments
---------
model_name: The name of the class object to return
Returns
-------
model_class: The class the model specified e.g. PCA or LogisticRegression
"""
model_class = globals()[model_name]
return model_class
def embed_data(unsupervised_config: dict,
all_data: MixedDataset,
train_data: LabeledDataset,
unlabeled_data: UnlabeledDataset,
val_data: LabeledDataset,
) -> Tuple[LabeledDataset, LabeledDataset, "UnsupervisedModel"]:
"""
Initialize an unsupervised model and use it to reduce the dimensionality of the data
Arguments
---------
unsupervised_config: The path to the yml file detailing how to initialize the
unsupervised model
all_data: The object storing the data for the entire dataset
train_data: The subset of the data to be used for training
unlabeled_data: The subset of the data that doesn't have labels
val_data: The subset of the data that will be used for validation. To avoid data leakage, the
validation data will not be used to train the unsupervised embedding, but will be
embedded
Returns
-------
train_data: The embedded training data
val_data: The embedded validation data
unsupervised_model: The fitted version of the model
"""
# Initialize the unsupervised model
unsupervised_model_type = unsupervised_config.pop('name')
UnsupervisedClass = get_model_by_name(unsupervised_model_type)
unsupervised_model = UnsupervisedClass(**unsupervised_config)
# Get all data not held in the val split
available_data = all_data.subset_to_samples(train_data.get_samples() +
unlabeled_data.get_samples())
# Embed the training data
unsupervised_model.fit(available_data)
train_data = unsupervised_model.transform(train_data)
# Embed the validation data
val_data = unsupervised_model.transform(val_data)
# Reset filters on all_data which were changed to create available_data
all_data.reset_filters()
return train_data, val_data, unsupervised_model
class ExpressionModel(ABC):
"""
A model API similar to the scikit-learn API that will specify the
base acceptable functions for models in this module's benchmarking code
"""
def __init__(self,
config: dict) -> None:
"""
Standard model init function. We use pass instead of raising a NotImplementedError
here in case inheriting classes decide to call `super()`
"""
pass
@abstractmethod
def load_model(classobject, model_path):
"""
Read a pickeled model from a file and return it
Arguments
---------
model_path: The location where the model is stored
Returns
-------
model: The model saved at `model_path`
"""
raise NotImplementedError
def free_memory(self) -> None:
"""
Some models need help freeing the memory allocated to them. Others do not.
This function is a placeholder that can be overridden by inheriting classes
if needed. PytorchSupervised is a good example of what a custom free_memory function
does.
"""
pass
@abstractmethod
def fit(self, dataset: LabeledDataset, run: neptune.Run) -> "ExpressionModel":
"""
Train a model using the given labeled data
Arguments
---------
dataset: The labeled data for use in training
run: An object for logging training data if applicable
Returns
-------
results: The metrics produced during the training process
"""
raise NotImplementedError
@abstractmethod
def predict(self, dataset: UnlabeledDataset) -> np.ndarray:
"""
Predict the labels for a dataset
Arguments
---------
dataset: The unlabeled data whose labels should be predicted
Returns
-------
predictions: A numpy array of predictions
"""
raise NotImplementedError
@abstractmethod
def evaluate(self, dataset: LabeledDataset) -> Tuple[np.ndarray, np.ndarray]:
"""
Return the predicted and true labels for a dataset
Arguments
---------
dataset: The labeled dataset for use in evaluating the model
Returns
-------
predictions: A numpy array of predictions
labels: The true labels to compare the predictions against
"""
raise NotImplementedError
@abstractmethod
def save_model(self, out_path: str) -> None:
"""
Write the model to a file
Arguments
---------
out_path: The path to the file to write the classifier to
Raises
------
FileNotFoundError if out_path isn't openable
"""
raise NotImplementedError
class LogisticRegression(ExpressionModel):
""" A model API similar to the scikit-learn API that will specify the
base acceptable functions for models in this module's benchmarking code
"""
def __init__(self,
seed: int,
**kwargs,
) -> None:
"""
The initializer for the LogisticRegression class
Arguments
---------
seed: The random seed to use in training
"""
self.model = sklearn.linear_model.LogisticRegression(random_state=seed)
def fit(self, dataset: LabeledDataset, run: neptune.Run = None) -> "LogisticRegression":
"""
Train a model using the given labeled data
Arguments
---------
dataset: The labeled data for use in training
run: An object for logging training data if applicable
Returns
-------
self: The fitted model
"""
X, y = dataset.get_all_data()
self.model = self.model.fit(X, y)
return self
def predict(self, dataset: UnlabeledDataset) -> np.ndarray:
"""
Use the model to predict the labels for a given unlabeled dataset
Arguments
---------
dataset: The unlabeled data whose labels should be predicted
Returns
-------
predictions: A numpy array of predictions
"""
X = dataset.get_all_data()
return self.model.predict(X)
def evaluate(self, dataset: LabeledDataset) -> Tuple[np.ndarray, np.ndarray]:
"""
Return the predicted and true labels for a dataset
Arguments
---------
dataset: The labeled dataset for use in evaluating the model
Returns
-------
predictions: A numpy array of predictions
labels: The true labels to compare the predictions against
"""
X, y = dataset.get_all_data()
return self.model.predict(X), y
def save_model(self, out_path: str) -> None:
"""
Write the classifier to a file
Arguments
---------
out_path: The path to the file to write the classifier to
Raises
------
FileNotFoundError if out_path isn't openable
"""
with open(out_path, 'wb') as out_file:
pickle.dump(self, out_file)
@classmethod
def load_model(classobject, model_path: str, **kwargs):
"""
Read a pickeled model from a file and return it
Arguments
---------
model_path: The location where the model is stored
**kwargs: To be consistent with the API this function takes in config info even though
it doesn't need it
Returns
-------
model: The model saved at `model_path`
"""
with open(model_path, 'rb') as model_file:
return pickle.load(model_file)
class ThreeLayerWideBottleneck(nn.Module):
""" A basic three layer neural net for use in wrappers like PytorchSupervised"""
def __init__(self,
input_size: int,
output_size: int,
**kwargs):
"""
Model initialization function
Arguments
---------
input_size: The number of features in the dataset
output_size: The number of classes to predict
"""
super(ThreeLayerWideBottleneck, self).__init__()
self.fc1 = nn.Linear(input_size, input_size // 2)
self.fc2 = nn.Linear(input_size // 2, input_size // 2)
self.fc3 = nn.Linear(input_size // 2, output_size)
self.dropout = nn.Dropout()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = F.relu(self.fc2(x))
x = self.dropout(x)
x = self.fc3(x)
return x
class ThreeLayerClassifier(nn.Module):
""" A basic three layer neural net for use in wrappers like PytorchSupervised"""
def __init__(self,
input_size: int,
output_size: int,
**kwargs):
"""
Model initialization function
Arguments
---------
input_size: The number of features in the dataset
output_size: The number of classes to predict
"""
super(ThreeLayerClassifier, self).__init__()
self.fc1 = nn.Linear(input_size, input_size // 2)
self.fc2 = nn.Linear(input_size // 2, input_size // 4)
self.fc3 = nn.Linear(input_size // 4, output_size)
self.dropout = nn.Dropout()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = F.relu(self.fc2(x))
x = self.dropout(x)
x = self.fc3(x)
return x
class PytorchLR(nn.Module):
""" A pytorch implementation of logistic regression"""
def __init__(self,
input_size: int,
output_size: int,
**kwargs):
"""
Model initialization function
Arguments
---------
input_size: The number of features in the dataset
output_size: The number of classes to predict
"""
super(PytorchLR, self).__init__()
self.fc1 = nn.Linear(input_size, output_size)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.fc1(x)
return x
class FiveLayerImputation(nn.Module):
"""An imputation model based off the DeepClassifier (five-layer) model"""
def __init__(self,
input_size: int,
output_size: int,
**kwargs):
"""
Model initialization function
Arguments
---------
input_size: The number of features in the dataset
output_size: The number of classes to predict
"""
super(FiveLayerImputation, self).__init__()
DROPOUT_PROB = .5
self.fc1 = nn.Linear(input_size, input_size // 2)
self.bn1 = nn.BatchNorm1d(input_size // 2)
self.fc2 = nn.Linear(input_size // 2, input_size // 2)
self.bn2 = nn.BatchNorm1d(input_size // 2)
self.fc3 = nn.Linear(input_size // 2, input_size // 2)
self.bn3 = nn.BatchNorm1d(input_size // 2)
self.fc4 = nn.Linear(input_size // 2, input_size // 4)
| |
<reponame>Flav-STOR-WL/py-pure-client
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.10
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from typing import List, Optional
from .. import models
class PoliciesApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api210_policies_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
continuation_token=None, # type: str
filter=None, # type: str
ids=None, # type: List[str]
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.PolicyGetResponse
"""List policies
Displays a list of policies.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api210_policies_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param list[str] ids: Performs the operation on the unique resource IDs specified. Enter multiple resource IDs in comma-separated format. The `ids` and `names` parameters cannot be provided together.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: PolicyGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api210_policies_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api210_policies_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.10/policies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PolicyGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api210_policies_members_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
continuation_token=None, # type: str
destroyed=None, # type: bool
filter=None, # type: str
limit=None, # type: int
member_ids=None, # type: List[str]
member_names=None, # type: List[str]
member_types=None, # type: List[str]
policy_ids=None, # type: List[str]
policy_names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.PolicyMemberGetResponse
"""List policy members
Displays a list of policy members.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api210_policies_members_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters.
:param bool destroyed: If set to `true`, lists only destroyed objects that are in the eradication pending state. If set to `false`, lists only objects that are not destroyed. For destroyed objects, the time remaining is displayed in milliseconds.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] member_ids: Performs the operation on the unique member IDs specified. Enter multiple member IDs in comma-separated format. The `member_ids` and `member_names` parameters cannot be provided together.
:param list[str] member_names: Performs the operation on the unique member name specified. Examples of members include volumes, hosts, host groups, and directories. Enter multiple names in comma-separated format. For example, `vol01,vol02`.
:param list[str] member_types: Performs the operation on the member types specified. The type of member is the full name of the resource endpoint. Valid values include | |
}
The main payload of the return data can be found inside the 'updates'
key, containing a list of dictionaries. This list is always returned
in descending date order. Each item may contain different fields
depending on their update type. The primary_entity key represents the
main Shotgun entity that is associated with the update. By default,
this entity is returned with a set of standard fields. By using the
entity_fields parameter, you can extend the returned data to include
additional fields. If for example you wanted to return the asset type
for all assets and the linked sequence for all Shots, pass the
following entity_fields::
{"Shot": ["sg_sequence"], "Asset": ["sg_asset_type"]}
Deep queries can be used in this syntax if you want to
traverse into connected data.
:param str entity_type: Entity type to retrieve activity stream for
:param int entity_id: Entity id to retrieve activity stream for
:param list entity_fields: List of additional fields to include.
See above for details
:param int max_id: Do not retrieve ids greater than this id.
This is useful when implementing paging.
:param int min_id: Do not retrieve ids lesser than this id.
This is useful when implementing caching of
the event stream data and you want to
"top up" an existing cache.
:param int limit: Limit the number of returned records. If not specified,
the system default will be used.
:returns: A complex activity stream data structure. See above for details.
:rtype: dict
"""
if self.server_caps.version and self.server_caps.version < (6, 2, 0):
raise ShotgunError("activity_stream requires server version 6.2.0 or "
"higher, server is %s" % (self.server_caps.version,))
# set up parameters to send to server.
entity_fields = entity_fields or {}
if not isinstance(entity_fields, dict):
raise ValueError("entity_fields parameter must be a dictionary")
params = {"type": entity_type,
"id": entity_id,
"max_id": max_id,
"min_id": min_id,
"limit": limit,
"entity_fields": entity_fields}
record = self._call_rpc("activity_stream", params)
result = self._parse_records(record)[0]
return result
def nav_expand(self, path, seed_entity_field=None, entity_fields=None):
"""
Expand the navigation hierarchy for the supplied path.
.. warning::
This is an experimental method that is not officially part of the
python-api. Usage of this method is discouraged. This method's name,
arguments, and argument types may change at any point.
"""
return self._call_rpc(
"nav_expand",
{
"path": path,
"seed_entity_field": seed_entity_field,
"entity_fields": entity_fields
}
)
def nav_search_string(self, root_path, search_string, seed_entity_field=None):
"""
Search function adapted to work with the navigation hierarchy.
.. warning::
This is an experimental method that is not officially part of the
python-api. Usage of this method is discouraged. This method's name,
arguments, and argument types may change at any point.
"""
return self._call_rpc(
"nav_search",
{
"root_path": root_path,
"seed_entity_field": seed_entity_field,
"search_criteria": {"search_string": search_string}
}
)
def nav_search_entity(self, root_path, entity, seed_entity_field=None):
"""
Search function adapted to work with the navigation hierarchy.
.. warning::
This is an experimental method that is not officially part of the
python-api. Usage of this method is discouraged. This method's name,
arguments, and argument types may change at any point.
"""
return self._call_rpc(
"nav_search",
{
"root_path": root_path,
"seed_entity_field": seed_entity_field,
"search_criteria": {"entity": entity}
}
)
def get_session_token(self):
"""
Get the session token associated with the current session.
If a session token has already been established, this is returned, otherwise a new one is
generated on the server and returned.
>>> sg.get_session_token()
<PASSWORD>
:returns: String containing a session token.
:rtype: str
"""
if self.config.session_token:
return self.config.session_token
rv = self._call_rpc("get_session_token", None)
session_token = (rv or {}).get("session_id")
if not session_token:
raise RuntimeError("Could not extract session_id from %s", rv)
self.config.session_token = session_token
return session_token
def preferences_read(self, prefs=None):
"""
Get a subset of the site preferences.
>>> sg.preferences_read()
{
"pref_name": "pref value"
}
:param list prefs: An optional list of preference names to return.
:returns: Dictionary of preferences and their values.
:rtype: dict
"""
if self.server_caps.version and self.server_caps.version < (7, 10, 0):
raise ShotgunError("preferences_read requires server version 7.10.0 or "
"higher, server is %s" % (self.server_caps.version,))
prefs = prefs or []
return self._call_rpc("preferences_read", {"prefs": prefs})
def _build_opener(self, handler):
"""
Build urllib2 opener with appropriate proxy handler.
"""
handlers = []
if self.__ca_certs and not NO_SSL_VALIDATION:
handlers.append(CACertsHTTPSHandler(self.__ca_certs))
if self.config.proxy_handler:
handlers.append(self.config.proxy_handler)
if handler is not None:
handlers.append(handler)
return urllib.request.build_opener(*handlers)
def _turn_off_ssl_validation(self):
"""
Turn off SSL certificate validation.
"""
global NO_SSL_VALIDATION
self.config.no_ssl_validation = True
NO_SSL_VALIDATION = True
# reset ssl-validation in user-agents
self._user_agents = ["ssl %s (no-validate)" % self.client_caps.ssl_version
if ua.startswith("ssl ") else ua
for ua in self._user_agents]
# Deprecated methods from old wrapper
def schema(self, entity_type):
"""
.. deprecated:: 3.0.0
Use :meth:`~shotgun_api3.Shotgun.schema_field_read` instead.
"""
raise ShotgunError("Deprecated: use schema_field_read('type':'%s') instead" % entity_type)
def entity_types(self):
"""
.. deprecated:: 3.0.0
Use :meth:`~shotgun_api3.Shotgun.schema_entity_read` instead.
"""
raise ShotgunError("Deprecated: use schema_entity_read() instead")
# ========================================================================
# RPC Functions
def _call_rpc(self, method, params, include_auth_params=True, first=False):
"""
Call the specified method on the Shotgun Server sending the supplied payload.
"""
LOG.debug("Starting rpc call to %s with params %s" % (
method, params))
params = self._transform_outbound(params)
payload = self._build_payload(method, params, include_auth_params=include_auth_params)
encoded_payload = self._encode_payload(payload)
req_headers = {
"content-type": "application/json; charset=utf-8",
"connection": "keep-alive"
}
if self.config.localized is True:
req_headers["locale"] = "auto"
http_status, resp_headers, body = self._make_call("POST", self.config.api_path,
encoded_payload, req_headers)
LOG.debug("Completed rpc call to %s" % (method))
try:
self._parse_http_status(http_status)
except ProtocolError as e:
e.headers = resp_headers
# 403 is returned with custom error page when api access is blocked
if e.errcode == 403:
e.errmsg += ": %s" % body
raise
response = self._decode_response(resp_headers, body)
self._response_errors(response)
response = self._transform_inbound(response)
if not isinstance(response, dict) or "results" not in response:
return response
results = response.get("results")
if first and isinstance(results, list):
return results[0]
return results
def _auth_params(self):
"""
Return a dictionary of the authentication parameters being used.
"""
# Used to authenticate HumanUser credentials
if self.config.user_login and self.config.user_password:
auth_params = {
"user_login": str(self.config.user_login),
"user_password": str(<PASSWORD>),
}
if self.config.auth_token:
auth_params["auth_token"] = str(self.config.auth_token)
# Use script name instead
elif self.config.script_name and self.config.api_key:
auth_params = {
"script_name": str(self.config.script_name),
"script_key": str(self.config.api_key),
}
# Authenticate using session_id
elif self.config.session_token:
if self.server_caps.version and self.server_caps.version < (5, 3, 0):
raise ShotgunError("Session token based authentication requires server version "
"5.3.0 or higher, server is %s" % (self.server_caps.version,))
auth_params = {"session_token": str(self.config.session_token)}
# Request server side to raise exception for expired sessions.
# This was added in as part of Shotgun 5.4.4
if self.server_caps.version and self.server_caps.version > (5, 4, 3):
auth_params["reject_if_expired"] = True
else:
raise ValueError("invalid auth params")
if self.config.session_uuid:
auth_params["session_uuid"] = self.config.session_uuid
# Make sure sudo_as_login is supported by server version
if self.config.sudo_as_login:
if self.server_caps.version and self.server_caps.version < (5, 3, 12):
raise ShotgunError("Option 'sudo_as_login' requires server version 5.3.12 or "
"higher, server is %s" % (self.server_caps.version,))
auth_params["sudo_as_login"] = self.config.sudo_as_login
if self.config.extra_auth_params:
auth_params.update(self.config.extra_auth_params)
return auth_params
def _sanitize_auth_params(self, params):
"""
Given an authentication parameter dictionary, sanitize any sensitive
information and return the sanitized dict copy.
"""
sanitized_params = copy.copy(params)
for k in ["user_password", "script_key", "session_token"]:
if k in sanitized_params:
sanitized_params[k] = "********"
return sanitized_params
def _build_payload(self, method, params, include_auth_params=True):
"""
Build the payload to be send to the rpc endpoint.
"""
if not method:
raise ValueError("method is empty")
call_params = []
if include_auth_params:
auth_params = self._auth_params()
call_params.append(auth_params)
if params:
call_params.append(params)
return {
"method_name": method,
"params": call_params
}
def _encode_payload(self, payload):
"""
Encode the payload to a string to be passed to the rpc endpoint.
The payload is json encoded as a unicode string if the content
requires it. The unicode string is then encoded as 'utf-8' as it must
be in a single byte encoding to go over the wire.
"""
wire = json.dumps(payload, ensure_ascii=False)
return six.ensure_binary(wire)
def _make_call(self, verb, path, body, headers):
"""
Make an HTTP call to the server.
Handles retry and failure.
"""
attempt = 0
req_headers = {}
req_headers["user-agent"] = "; ".join(self._user_agents)
if self.config.authorization:
req_headers["Authorization"] = self.config.authorization
req_headers.update(headers or {})
body = body or None
max_rpc_attempts = self.config.max_rpc_attempts
rpc_attempt_interval = self.config.rpc_attempt_interval / 1000.0
while (attempt < max_rpc_attempts):
attempt += 1
try:
return self._http_request(verb, path, body, req_headers)
except ssl_error_classes as e:
# Test whether the exception is due to the fact that this is | |
<filename>op3/envs/blocks/mujoco/block_occlusions.py
import os
import pdb
import numpy as np
import shutil
import pickle
import cv2
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from argparse import ArgumentParser
import op3.envs.blocks.mujoco.utils.data_generation_utils as dgu
from op3.util.plot import plot_multi_image
import mujoco_py
from mujoco_py import load_model_from_xml, MjSim, MjViewer
import op3.envs.blocks.mujoco.contacts as contacts
import copy
MODEL_XML_BASE = """
<mujoco>
<asset>
<material name='wall_visible' rgba='.9 .9 .9 1' specular="0" shininess="0" emission="0.25"/>
<material name='wall_invisible' rgba='.9 .9 .9 0' specular="0" shininess="0" emission="0.25"/>
{}
{}
</asset>
<worldbody>
<camera name='fixed' pos='0 -3 4.5' euler='-300 0 0' fovy='55'/>
<light diffuse='1.5 1.5 1.5' pos='0 -7 8' dir='0 1 1'/>
<light diffuse='1.5 1.5 1.5' pos='0 -7 6' dir='0 1 1'/>
<geom name='wall_floor' type='plane' pos='0 0 0' euler='0 0 0' size='20 10 0.1' material='wall_visible'
condim='3' friction='1 1 1'/>
<geom name='occluding_wall' type='box' pos='2 0 0' euler='0 0.3 0' size='2 0.1 5' material='wall_visible' />
{}
</worldbody>
</mujoco>
"""
#60 = -300
# <geom name='wall_front' type='box' pos='0 -5 0' euler='0 0 0' size='10 0.1 4' material='wall_visible'/>
# <geom name='wall_left' type='box' pos='-5 0 0' euler='0 0 0' size='0.1 10 4' material='wall_visible'/>
# <geom name='wall_right' type='box' pos='5 0 0' euler='0 0 0' size='0.1 10 4' material='wall_visible'/>
# <geom name='wall_back' type='box' pos='0 5 0' euler='0 0 0' size='10 0.1 4' material='wall_visible'/>
# <body name="floor" pos="0 0 0.025">
# <geom size="3.0 3.0 0.02" rgba="0 1 0 1" type="box"/>
# <camera name='fixed' pos='0 -8 8' euler='45 0 0'/>
# </body>
#Red, Lime, Blue, Yellow, Cyan, Magenta, Black, White
COLOR_LIST = [[255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 0], [0, 255, 255], [255, 0, 255], [1, 1, 1], [255, 255, 255]]
def pickRandomColor(an_int):
if an_int is None:
return np.random.uniform(low=0.0, high=1.0, size=3)
tmp = np.random.randint(0, an_int)
return np.array(COLOR_LIST[tmp])/255
class BlockPickAndPlaceEnv():
def __init__(self, num_objects, num_colors, img_dim, include_z, random_initialize=False, view=False):
# self.asset_path = os.path.join(os.getcwd(), '../mujoco_data/stl/')
self.asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../mujoco_data/stl/')
# self.asset_path = os.path.join(os.path.realpath(__file__), 'mujoco_data/stl/')
# self.asset_path = '../mujoco_data/stl/'
self.img_dim = img_dim
self.include_z = include_z
self.polygons = ['cube', 'horizontal_rectangle', 'tetrahedron'][:1]
self.num_colors = num_colors
self.num_objects = num_objects
self.view = view
#Hyper-parameters
self.internal_steps_per_step = 2000
self.drop_height = 3
self.pick_height = 0.59
self.bounds = {'x_min': -2.5, 'x_max': 2.5, 'y_min': 1.0, 'y_max': 4.0, 'z_min': 0.05, 'z_max': 2.2}
self.TOLERANCE = 0.2
self.wall_pos = np.array([2, 0, 0])
self.names = []
self.blocks = []
self._blank_observation = None
if random_initialize:
self.reset()
####Env initialization functions
def get_unique_name(self, polygon):
i = 0
while '{}_{}'.format(polygon, i) in self.names:
i += 1
name = '{}_{}'.format(polygon, i)
self.names.append(name)
return name
def add_mesh(self, polygon, pos, quat, rgba):
name = self.get_unique_name(polygon)
self.blocks.append({'name': name, 'polygon': polygon, 'pos': np.array(pos), 'quat': np.array(quat), 'rgba': rgba,
'material': name})
def get_asset_material_str(self):
asset_base = '<material name="{}" rgba="{}" specular="0" shininess="0" emission="0.25"/>'
asset_list = [asset_base.format(a['name'], self.convert_to_str(a['rgba'])) for a in self.blocks]
asset_str = '\n'.join(asset_list)
return asset_str
def get_asset_mesh_str(self):
asset_base = '<mesh name="{}" scale="0.6 0.6 0.6" file="{}"/>'
asset_list = [asset_base.format(a['name'], os.path.join(self.asset_path, a['polygon'] + '.stl'))
for a in self.blocks]
asset_str = '\n'.join(asset_list)
return asset_str
def get_body_str(self):
body_base = '''
<body name='{}' pos='{}' quat='{}'>
<joint type='free' name='{}'/>
<geom name='{}' type='mesh' mesh='{}' pos='0 0 0' quat='1 0 0 0' material='{}'
condim='3' friction='1 1 1' solimp="0.998 0.998 0.001" solref="0.02 1"/>
</body>
'''
body_list = [body_base.format(m['name'], self.convert_to_str(m['pos']),
self.convert_to_str(m['quat']), m['name'],
m['name'], m['name'], m['material']) for i, m in enumerate(self.blocks)]
body_str = '\n'.join(body_list)
return body_str
def convert_to_str(self, an_iterable):
tmp = ""
for an_item in an_iterable:
tmp += str(an_item) + " "
# tmp = " ".join(str(an_iterable))
return tmp[:-1]
def get_random_pos(self, height=None):
x = np.random.uniform(self.bounds['x_min'], self.bounds['x_max'])
y = np.random.uniform(self.bounds['y_min'], self.bounds['y_max'])
if height is None:
z = np.random.uniform(self.bounds['z_min'], self.bounds['z_max'])
else:
z = height
return np.array([x, y, z])
def get_random_rbga(self, num_colors):
rgb = list(pickRandomColor(num_colors))
return rgb + [1]
def initialize(self, use_cur_pos):
tmp = MODEL_XML_BASE.format(self.get_asset_mesh_str(), self.get_asset_material_str(), self.get_body_str())
model = load_model_from_xml(tmp)
self.sim = MjSim(model)
self._blank_observation = self.get_observation()
if self.view:
self.viewer = MjViewer(self.sim)
else:
self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1)
# self.sim_state = self.sim.get_state()
self._get_starting_step(use_cur_pos)
def _get_starting_step(self, use_cur_pos):
prev_positions = {}
for i, aname in enumerate(self.names):
if use_cur_pos:
prev_positions[aname] = self.get_block_info(aname)["pos"]
self.add_block(aname, [-5+i, -5+i, -5])
for aname in self.names:
if use_cur_pos:
tmp_pos = prev_positions[aname]
# print(aname, tmp_pos)
else:
tmp_pos = self.get_random_pos(self.drop_height)
self.add_block(aname, tmp_pos)
for i in range(self.internal_steps_per_step):
self.internal_step()
if self.view:
self.viewer.render()
# self.sim_state = self.sim.get_state()
####Env internal step functions
def add_block(self, ablock, pos):
#pos (x,y,z)
self.set_block_info(ablock, {"pos": pos})
# self.sim.set_state(self.sim_state)
def pick_block(self, pos):
block_name = None
for a_block in self.names:
if self.intersect(a_block, pos):
block_name = a_block
if block_name is None:
return False
#PICK_LOC = np.array([0, 0, 5])
#info = {"pos":PICK_LOC}
#self.set_block_info(block_name, info)
# self.sim.set_state(self.sim_state)
return block_name
def intersect(self, a_block, pos):
cur_pos = self.get_block_info(a_block)["pos"]
return np.max(np.abs(cur_pos - pos)) < self.TOLERANCE
def get_block_info(self, a_block):
info = {}
info["poly"] = a_block[:-2]
info["pos"] = np.copy(self.sim.data.get_body_xpos(a_block)) #np array
info["quat"] = np.copy(self.sim.data.get_body_xquat(a_block))
info["vel"] = np.copy(self.sim.data.get_body_xvelp(a_block))
info["rot_vel"] = np.copy(self.sim.data.get_body_xvelr(a_block))
return info
def set_block_info(self, a_block, info):
# print(a_block, info)
# print("Setting state: {}, {}".format(a_block, info))
sim_state = self.sim.get_state()
start_ind = self.sim.model.get_joint_qpos_addr(a_block)[0]
if "pos" in info:
sim_state.qpos[start_ind:start_ind+3] = np.array(info["pos"])
if "quat" in info:
sim_state.qpos[start_ind+3:start_ind+7] = info["quat"]
else:
sim_state.qpos[start_ind + 3:start_ind + 7] = np.array([1, 0, 0, 0])
start_ind = self.sim.model.get_joint_qvel_addr(a_block)[0]
if "vel" in info:
sim_state.qvel[start_ind:start_ind + 3] = info["vel"]
else:
sim_state.qvel[start_ind:start_ind + 3] = np.zeros(3)
if "rot_vel" in info:
sim_state.qvel[start_ind + 3:start_ind + 6] = info["rot_vel"]
else:
sim_state.qvel[start_ind + 3:start_ind + 6] = np.zeros(3)
self.sim.set_state(sim_state)
def internal_step(self, action=None):
ablock = False
if action is None:
self.sim.forward()
self.sim.step()
else:
pick_place = action[:3]
drop_place = action[3:]
ablock = self.pick_block(pick_place)
if (ablock):
# print("Dropping: {} {}".format(ablock, drop_place))
self.add_block(ablock, drop_place)
# self.sim_state = self.sim.get_state()
return ablock
####Env external step functions
# Input: action (4) or (6)
# Output: resultant observation after taking the action
def step(self, action):
action = self._pre_process_actions(action)
ablock = self.internal_step(action)
# print(self.get_env_info())
#if ablock:
for i in range(self.internal_steps_per_step):
self.sim.forward()
self.sim.step()
# self.internal_step()
if self.view:
self.viewer.render()
# self.give_down_vel()
# for i in range(200):
# self.sim.forward()
# self.sim.step()
# self.sim_state = self.sim.get_state()
# for aname in self.names: #This looks incorrect TODO: CHECK THIS
# self.add_block(aname, self.get_block_info(aname)["pos"])
return self.get_observation()
# Input: action can either be (A) or (T, A) where we want to execute T actions in a row
# Output: Single obs
def try_step(self, actions):
tmp = self.get_env_info()
# cur_state = copy.deepcopy(self.sim.get_state())
if len(actions.shape) == 1:
self.step(actions)
elif len(actions.shape) == 2:
for action in actions:
self.step(action)
else:
raise KeyError("Wrong shape for actions: {}".format(actions.shape))
obs = self.get_observation()
# self.sim.set_state(cur_state)
self.set_env_info(tmp)
return obs
def reset(self):
self.names = []
self.blocks = []
quat = [1, 0, 0, 0]
for i in range(self.num_objects):
poly = np.random.choice(self.polygons)
pos = self.get_random_pos()
pos[2] = -2 * (i + 1)
self.add_mesh(poly, pos, quat, self.get_random_rbga(self.num_colors))
self.initialize(False)
return self.get_observation()
def get_observation(self):
img = self.sim.render(self.img_dim, self.img_dim, camera_name="fixed") # img is upside down, values btwn 0-255 (D,D,3)
img = img[::-1, :, :] # flips image right side up (D,D,3)
return np.ascontiguousarray(img) # values btwn 0-255 (D,D,3)
def get_segmentation_masks(self):
cur_obs = self.get_observation()
tmp = np.abs(cur_obs - self._blank_observation) #(D,D,3)
dif = np.where(tmp > 5, 1, 0).sum(2) #(D,D,3)->(D,D)
dif = np.where(dif != 0, 1.0, 0.0)
block_seg = dif
background_seg = 1 - dif
return np.array([background_seg, block_seg]) #Note: output btwn 0-1, (2,D,D)
def get_obs_size(self):
return [self.img_dim, self.img_dim]
def get_actions_size(self):
if self.include_z:
return [6]
else:
return [4]
# Inputs: actions (*,6)
# Outputs: (*,6) if including z, (*,4) if not
def _post_process_actions(self, actions):
if self.include_z:
return actions
else:
return actions[..., [0, 1, 3, 4]]
# Inputs: actions (*,4), or (*,6)
# Outputs: actions (*,6)
def _pre_process_actions(self, actions):
if actions.shape[-1] == 6:
return actions
full_actions = np.zeros(list(actions.shape)[:-1] + [6]) # (*,6)
full_actions[..., [0, 1, 3, 4]] = actions
full_actions[..., 2] = self.pick_height
full_actions[..., 5] = self.drop_height
return full_actions
# Inputs: None
# Outputs: Returns name of picked block
# If self.include z: Pick any random block
# Else: Picks a random block which can be picked up with the z pick set to self.pick_height
def _get_rand_block_byz(self):
if len(self.names) == 0:
raise KeyError("No blocks in _get_rand_block_byz()!")
if self.include_z:
aname = np.random.choice(self.names)
else:
z_lim = self.pick_height
tmp = [aname for aname in self.names if abs(self.get_block_info(aname)["pos"][2] - z_lim) < self.TOLERANCE]
# while (len(tmp) == 0):
# z_lim += 0.5
# tmp = [aname for aname in self.names if self.get_block_info(aname)["pos"][2] <= z_lim]
aname = np.random.choice(tmp)
# print(tmp, aname)
return aname
# Input: action_type
# Output: Single action either (6) or (4)
| |
<filename>y/google-cloud-sdk/lib/googlecloudapis/compute/alpha/compute_alpha_client.py
"""Generated client library for compute version alpha."""
# NOTE: This file is autogenerated and should not be edited by hand.
from googlecloudapis.apitools.base.py import base_api
from googlecloudapis.compute.alpha import compute_alpha_messages as messages
class ComputeAlpha(base_api.BaseApiClient):
"""Generated client library for service compute version alpha."""
MESSAGES_MODULE = messages
_PACKAGE = u'compute'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/compute', u'https://www.googleapis.com/auth/compute.readonly', u'https://www.googleapis.com/auth/devstorage.full_control', u'https://www.googleapis.com/auth/devstorage.read_only', u'https://www.googleapis.com/auth/devstorage.read_write']
_VERSION = u'alpha'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = ''
_CLIENT_CLASS_NAME = u'ComputeAlpha'
_URL_VERSION = u'alpha'
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new compute handle."""
url = url or u'https://www.googleapis.com/compute/alpha/'
super(ComputeAlpha, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.addresses = self.AddressesService(self)
self.backendServices = self.BackendServicesService(self)
self.diskTypes = self.DiskTypesService(self)
self.disks = self.DisksService(self)
self.firewalls = self.FirewallsService(self)
self.forwardingRules = self.ForwardingRulesService(self)
self.globalAddresses = self.GlobalAddressesService(self)
self.globalForwardingRules = self.GlobalForwardingRulesService(self)
self.globalOperations = self.GlobalOperationsService(self)
self.httpHealthChecks = self.HttpHealthChecksService(self)
self.httpsHealthChecks = self.HttpsHealthChecksService(self)
self.images = self.ImagesService(self)
self.instanceTemplates = self.InstanceTemplatesService(self)
self.instances = self.InstancesService(self)
self.licenses = self.LicensesService(self)
self.machineTypes = self.MachineTypesService(self)
self.networks = self.NetworksService(self)
self.projects = self.ProjectsService(self)
self.regionOperations = self.RegionOperationsService(self)
self.regions = self.RegionsService(self)
self.routes = self.RoutesService(self)
self.snapshots = self.SnapshotsService(self)
self.sslCertificates = self.SslCertificatesService(self)
self.targetHttpProxies = self.TargetHttpProxiesService(self)
self.targetHttpsProxies = self.TargetHttpsProxiesService(self)
self.targetInstances = self.TargetInstancesService(self)
self.targetPools = self.TargetPoolsService(self)
self.targetVpnGateways = self.TargetVpnGatewaysService(self)
self.urlMaps = self.UrlMapsService(self)
self.vpnTunnels = self.VpnTunnelsService(self)
self.zoneOperations = self.ZoneOperationsService(self)
self.zones = self.ZonesService(self)
class AddressesService(base_api.BaseApiService):
"""Service class for the addresses resource."""
_NAME = u'addresses'
def __init__(self, client):
super(ComputeAlpha.AddressesService, self).__init__(client)
self._method_configs = {
'AggregatedList': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.addresses.aggregatedList',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/aggregated/addresses',
request_field='',
request_type_name=u'ComputeAddressesAggregatedListRequest',
response_type_name=u'AddressAggregatedList',
supports_download=False,
),
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'compute.addresses.delete',
ordered_params=[u'project', u'region', u'address'],
path_params=[u'address', u'project', u'region'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}/addresses/{address}',
request_field='',
request_type_name=u'ComputeAddressesDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.addresses.get',
ordered_params=[u'project', u'region', u'address'],
path_params=[u'address', u'project', u'region'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}/addresses/{address}',
request_field='',
request_type_name=u'ComputeAddressesGetRequest',
response_type_name=u'Address',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.addresses.insert',
ordered_params=[u'project', u'region'],
path_params=[u'project', u'region'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}/addresses',
request_field=u'address',
request_type_name=u'ComputeAddressesInsertRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.addresses.list',
ordered_params=[u'project', u'region'],
path_params=[u'project', u'region'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/regions/{region}/addresses',
request_field='',
request_type_name=u'ComputeAddressesListRequest',
response_type_name=u'AddressList',
supports_download=False,
),
}
self._upload_configs = {
}
def AggregatedList(self, request, global_params=None):
"""Retrieves the list of addresses grouped by scope.
Args:
request: (ComputeAddressesAggregatedListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(AddressAggregatedList) The response message.
"""
config = self.GetMethodConfig('AggregatedList')
return self._RunMethod(
config, request, global_params=global_params)
def Delete(self, request, global_params=None):
"""Deletes the specified address resource.
Args:
request: (ComputeAddressesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the specified address resource.
Args:
request: (ComputeAddressesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Address) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates an address resource in the specified project using the data included in the request.
Args:
request: (ComputeAddressesInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves the list of address resources contained within the specified region.
Args:
request: (ComputeAddressesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(AddressList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class BackendServicesService(base_api.BaseApiService):
"""Service class for the backendServices resource."""
_NAME = u'backendServices'
def __init__(self, client):
super(ComputeAlpha.BackendServicesService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'compute.backendServices.delete',
ordered_params=[u'project', u'backendService'],
path_params=[u'backendService', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/backendServices/{backendService}',
request_field='',
request_type_name=u'ComputeBackendServicesDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.backendServices.get',
ordered_params=[u'project', u'backendService'],
path_params=[u'backendService', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/backendServices/{backendService}',
request_field='',
request_type_name=u'ComputeBackendServicesGetRequest',
response_type_name=u'BackendService',
supports_download=False,
),
'GetHealth': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.backendServices.getHealth',
ordered_params=[u'project', u'backendService'],
path_params=[u'backendService', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/backendServices/{backendService}/getHealth',
request_field=u'resourceGroupReference',
request_type_name=u'ComputeBackendServicesGetHealthRequest',
response_type_name=u'BackendServiceGroupHealth',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.backendServices.insert',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[],
relative_path=u'projects/{project}/global/backendServices',
request_field=u'backendService',
request_type_name=u'ComputeBackendServicesInsertRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.backendServices.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/global/backendServices',
request_field='',
request_type_name=u'ComputeBackendServicesListRequest',
response_type_name=u'BackendServiceList',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'compute.backendServices.patch',
ordered_params=[u'project', u'backendService'],
path_params=[u'backendService', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/backendServices/{backendService}',
request_field=u'backendServiceResource',
request_type_name=u'ComputeBackendServicesPatchRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'compute.backendServices.update',
ordered_params=[u'project', u'backendService'],
path_params=[u'backendService', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/backendServices/{backendService}',
request_field=u'backendServiceResource',
request_type_name=u'ComputeBackendServicesUpdateRequest',
response_type_name=u'Operation',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Deletes the specified BackendService resource.
Args:
request: (ComputeBackendServicesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the specified BackendService resource.
Args:
request: (ComputeBackendServicesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BackendService) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def GetHealth(self, request, global_params=None):
"""Gets the most recent health check results for this BackendService.
Args:
request: (ComputeBackendServicesGetHealthRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BackendServiceGroupHealth) The response message.
"""
config = self.GetMethodConfig('GetHealth')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a BackendService resource in the specified project using the data included in the request.
Args:
request: (ComputeBackendServicesInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves the list of BackendService resources available to the specified project.
Args:
request: (ComputeBackendServicesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BackendServiceList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Update the entire content of the BackendService resource. This method supports patch semantics.
Args:
request: (ComputeBackendServicesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Update the entire content of the BackendService resource.
Args:
request: (ComputeBackendServicesUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class DiskTypesService(base_api.BaseApiService):
"""Service class for the diskTypes resource."""
_NAME = u'diskTypes'
def __init__(self, client):
super(ComputeAlpha.DiskTypesService, self).__init__(client)
self._method_configs = {
'AggregatedList': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.diskTypes.aggregatedList',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/aggregated/diskTypes',
request_field='',
request_type_name=u'ComputeDiskTypesAggregatedListRequest',
response_type_name=u'DiskTypeAggregatedList',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.diskTypes.get',
ordered_params=[u'project', u'zone', u'diskType'],
path_params=[u'diskType', u'project', u'zone'],
query_params=[],
relative_path=u'projects/{project}/zones/{zone}/diskTypes/{diskType}',
request_field='',
request_type_name=u'ComputeDiskTypesGetRequest',
response_type_name=u'DiskType',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.diskTypes.list',
ordered_params=[u'project', u'zone'],
path_params=[u'project', u'zone'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/zones/{zone}/diskTypes',
request_field='',
request_type_name=u'ComputeDiskTypesListRequest',
response_type_name=u'DiskTypeList',
supports_download=False,
),
}
self._upload_configs = {
}
def AggregatedList(self, request, global_params=None):
"""Retrieves the list of disk type resources grouped by scope.
Args:
request: (ComputeDiskTypesAggregatedListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DiskTypeAggregatedList) The response message.
"""
config = self.GetMethodConfig('AggregatedList')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the specified disk type resource.
Args:
request: (ComputeDiskTypesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DiskType) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves the list of disk type resources available to the specified project.
Args:
request: (ComputeDiskTypesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DiskTypeList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class DisksService(base_api.BaseApiService):
"""Service class for the disks resource."""
_NAME = u'disks'
def __init__(self, client):
super(ComputeAlpha.DisksService, self).__init__(client)
self._method_configs = {
'AggregatedList': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.disks.aggregatedList',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/aggregated/disks',
request_field='',
request_type_name=u'ComputeDisksAggregatedListRequest',
response_type_name=u'DiskAggregatedList',
supports_download=False,
),
'CreateSnapshot': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.disks.createSnapshot',
ordered_params=[u'project', u'zone', u'disk'],
path_params=[u'disk', u'project', u'zone'],
query_params=[],
relative_path=u'projects/{project}/zones/{zone}/disks/{disk}/createSnapshot',
request_field=u'snapshot',
request_type_name=u'ComputeDisksCreateSnapshotRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'compute.disks.delete',
ordered_params=[u'project', u'zone', u'disk'],
path_params=[u'disk', u'project', u'zone'],
query_params=[],
relative_path=u'projects/{project}/zones/{zone}/disks/{disk}',
request_field='',
request_type_name=u'ComputeDisksDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.disks.get',
ordered_params=[u'project', u'zone', u'disk'],
path_params=[u'disk', u'project', u'zone'],
query_params=[],
relative_path=u'projects/{project}/zones/{zone}/disks/{disk}',
request_field='',
request_type_name=u'ComputeDisksGetRequest',
response_type_name=u'Disk',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.disks.insert',
ordered_params=[u'project', u'zone'],
path_params=[u'project', u'zone'],
query_params=[u'sourceImage'],
relative_path=u'projects/{project}/zones/{zone}/disks',
request_field=u'disk',
request_type_name=u'ComputeDisksInsertRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.disks.list',
ordered_params=[u'project', u'zone'],
path_params=[u'project', u'zone'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/zones/{zone}/disks',
request_field='',
request_type_name=u'ComputeDisksListRequest',
response_type_name=u'DiskList',
supports_download=False,
),
}
self._upload_configs = {
}
def AggregatedList(self, request, global_params=None):
"""Retrieves the list of disks grouped by scope.
Args:
request: (ComputeDisksAggregatedListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DiskAggregatedList) The response message.
"""
config = self.GetMethodConfig('AggregatedList')
return self._RunMethod(
config, request, global_params=global_params)
def CreateSnapshot(self, request, global_params=None):
"""CreateSnapshot method for the disks service.
Args:
request: (ComputeDisksCreateSnapshotRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('CreateSnapshot')
return self._RunMethod(
config, request, global_params=global_params)
def Delete(self, request, global_params=None):
"""Deletes the specified persistent disk.
| |
one value per subject as in self.subject_names_raw -- #
# -- NOTE: self.online_eval_XX are lists and conventionally the values are stored in lists. -- #
# -- Here we store the values as numpys and transform self.online_eval_XX to a numpy later on -- #
# -- for the calculation per subject, thus we need numpy since this makes it more sufficient. -- #
self.online_eval_tp.append(tp_hard.detach().cpu().numpy())
self.online_eval_fp.append(fp_hard.detach().cpu().numpy())
self.online_eval_fn.append(fn_hard.detach().cpu().numpy())
def finish_online_evaluation_extended(self, task):
r"""Calculate the Dice Score and IoU (Intersection over Union) on the validation dataset during training.
The metrics are calculated for every subject and for every label in the masks, except for background.
NOTE: The function name is different from the original one, since it is used in another context
than the original one, ie. it is only called in special cases which is why it has a different name.
"""
# -- Stack the numpy arrays since they are stored differently depending on the run -- #
self.subject_names_raw = np.array(self.subject_names_raw).flatten()
# -- Reshape the tp, fp, tn lists so the names are flat, but the different mask labels, ie. last dimension is still in tact -- #
self.online_eval_tp = np.array(self.online_eval_tp).reshape(-1, np.array(self.online_eval_tp).shape[-1])
self.online_eval_fp = np.array(self.online_eval_fp).reshape(-1, np.array(self.online_eval_fp).shape[-1])
self.online_eval_fn = np.array(self.online_eval_fn).reshape(-1, np.array(self.online_eval_fn).shape[-1])
# -- Sum the values for tp, fp and fn per subject to get exactly one value per subject in the end -- #
# -- Extract the unique names -- #
subject_names = np.unique(self.subject_names_raw)
# -- Sum the values per subject name based on the idxs -- #
tp = list()
fp = list()
fn = list()
# -- Build up a list (following the order of subject_names) that stores the indices for every subject since -- #
# -- the self.subject_names_raw list matches every other list like tp, fp, fn -- #
idxs = list()
for subject in subject_names:
# -- Get all indices of elements that match the current subject -- #
idxs.append(np.where(self.subject_names_raw == subject))
for subject_idxs in idxs:
# -- Sum only those values that belong to the subject based on the subject_idxs -- #
# -- NOTE: self.online_eval_XX dimensions: (nr_batches, subject_names (--> subject_idxs), nr_classes) -- #
# -- The selection of rows returns all batch results per subject so we only sum them on axis 0 -- #
# -- so we keep the results per nr_classes in tact on don't sum the array in a whole to a single value -- #
tp.append(np.array(self.online_eval_tp)[subject_idxs].sum(axis=0))
fp.append(np.array(self.online_eval_fp)[subject_idxs].sum(axis=0))
fn.append(np.array(self.online_eval_fn)[subject_idxs].sum(axis=0))
# -- Assign the correct values to corresponding lists and remove the three generated lists
self.online_eval_tp, self.online_eval_fp, self.online_eval_fn = tp, fp, fn
del tp, fp, fn
# -- Calculate the IoU per class per subject --> use numpy since those operations do not work on conventional lists -- #
global_iou_per_class_and_subject = list()
global_dc_per_class_and_subject = list()
for idx, (i, j, k) in enumerate(zip(self.online_eval_tp, self.online_eval_fp, self.online_eval_fn)):
# -- If possible, calculate the IoU and Dice per class label and per subject -- #
if not np.isnan(i).any():
# -- IoU -- #
global_iou_per_class_and_subject.extend([i / (i + j + k)])
# -- Dice -- #
global_dc_per_class_and_subject.extend([2 * i / (2 * i + j + k)])
else:
# -- Remove the subject from the list since some value(s) in tp are NaN -- #
del subject_names[idx]
# -- Store IoU and Dice values. Ensure it is float64 so its JSON serializable -- #
# -- Do not use self.all_val_eval_metrics since this is used for plotting and then the -- #
# -- plots do not build correctly because based on self.save_every more dice values than -- #
# -- expected (epochs) are in there --> see plot_progress function in network_trainer.py -- #
# -- Build IoU and Dice dict for storing per subject and class label -- #
store_dict = dict()
for idx, subject in enumerate(subject_names):
store_dict[subject] = dict()
for class_label in range(len(global_iou_per_class_and_subject[idx])):
store_dict[subject]['mask_'+str(class_label+1)] = {
'IoU': np.float64(global_iou_per_class_and_subject[idx][class_label]),
'Dice': np.float64(global_dc_per_class_and_subject[idx][class_label])
}
# -- Add the results to self.validation_results based on task, epoch, subject and class-- #
if self.validation_results.get('epoch_'+str(self.epoch), None) is None:
self.validation_results['epoch_'+str(self.epoch)] = { task: store_dict }
else: # Epoch entry does already exist in self.validation_results, so only add the task with the corresponding values
self.validation_results['epoch_'+str(self.epoch)][task] = store_dict
# -- Empty the variables for next iteration -- #
self.online_eval_foreground_dc = []
self.online_eval_tp = []
self.online_eval_fp = []
self.online_eval_fn = []
self.subject_names_raw = [] # <-- Subject names necessary to map IoU and dice per subject
#------------------------------------------ Partially copied from original implementation ------------------------------------------#
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True):
r"""The Multi Head Trainer needs its own validation, since data from the previous tasks needs to be included in the
validation as well. The validation data from previous tasks will be fully used for the final validation.
NOTE: This function is different from _perform_validation since it uses the parent validate function that perfomrs
validations and saves the results (predicted segmentations) in corresponding folders.
"""
# -- Initialize the variable for all results from the validation -- #
# -- A result is either None or an error --> in case this might be necessary -- #
ret_joined = list()
# -- Extract the information of the current fold -- #
trained_on_folds = self.already_trained_on[str(self.fold)]
# -- Extract the list of tasks the model has already finished training on -- #
trained_on = list(self.mh_network.heads.keys())
# -- If the trained_on_folds raise an error, because at this point the model should have been trained on at least one task -- #
assert len(trained_on) != 0, "Before performing any validation, the model needs to be trained on at least one task."
# -- NOTE: Since the head is an (ordered) ModuleDict, the current task is the last head, so there -- #
# -- is nothing to restore at the end. -- #
# -- For each previously trained task perform the validation on the full validation set -- #
running_task_list = list()
for idx, task in enumerate(trained_on):
# -- Update running task list and create running task which are all (trained tasks and current task joined) for output folder name -- #
running_task_list.append(task)
running_task = join_texts_with_char(running_task_list, '_')
# -- Get default configuration for nnunet/nnunet_ext model (finished training) -- #
plans_file, _, self.dataset_directory, _, stage, \
_ = get_default_configuration(self.network_name, task, running_task, trained_on_folds['prev_trainer'][idx],\
self.tasks_joined_name, self.identifier, extension_type=self.extension)
# -- Load the plans file -- #
self.plans = load_pickle(plans_file)
# -- Update self.gt_niftis_folder that will be used in validation function so the files can be found -- #
self.gt_niftis_folder = join(self.dataset_directory, "gt_segmentations")
# -- Extract the folder with the preprocessed data in it -- #
folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % stage)
# -- Load the dataset for the task from the loop and perform the split on it -- #
self.dataset = load_dataset(folder_with_preprocessed_data)
self.do_split()
# -- Update the log -- #
self.print_to_log_file("Performing validation with validation data from task {}.".format(task))
# -- Activate the current task to train on the right model -- #
# -- Set self.network, since the parent classes all use self.network to train -- #
# -- NOTE: self.mh_network.model is also updated to task split ! -- #
self.network = self.mh_network.assemble_model(task)
# -- Before executing validate function, set network in eval mode -- #
self.network.eval()
# -- Perform individual validations with updated self.gt_niftis_folder -- #
ret_joined.append(super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size,
save_softmax=save_softmax, use_gaussian=use_gaussian,
overwrite=overwrite, validation_folder_name=validation_folder_name+task, debug=debug,
all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs,
run_postprocessing_on_folds=run_postprocessing_on_folds))
# -- Add to the already_trained_on that the validation is done for the task the model trained on previously -- #
self.already_trained_on[str(self.fold)]['finished_validation_on'].append(trained_on[-1])
# -- Remove the additional prev_trainer currently existing in self.already_trained_on -- #
#self.already_trained_on[str(self.fold)]['prev_trainer'] | |
print('Finised computing mol2cell accuracies.')
# Saves results in self.df_c2m for top5 accuracy
self.eval_cell2mol_accuracy(mode = mode)
self.get_acc_df_cell2mol()
print('Finished computing cell2mol accuracies.')
# Run KS tests
print('Running KS test...')
self.run_ks_one_vs_all(n_cores,mode=mode)
print('Finished KS test.')
# Run mol2cell above mean
self.eval_m2c_above_mean_all(mode=mode)
# Aggregate metrics
self.eval_summary()
print('Finished pipeline.')
# Plot results !
if plot:
pass
def eval_summary(self, ks_pval_thresh = 1e-8, above_mean_thresh = 95, return_= False):
"Assumes eval_pipeline() has been executed to calculate all metrics."
# Average top5 cell2mol accuracy
cell2mol_top5_avg = np.mean(self.c2m_acc_df.top5_accuracy)
# Average mol2cell accuracy
mol2cell_avg = np.mean(self.m2c_acc)
# Percentage of molecules with corresponding cells having significantly
# learn relationships, by using KS test of own cells vs all other cells
percentage_ks_low = len(
self.drugbank[self.drugbank.ks_pval < ks_pval_thresh]
) / len(self.test_drugs) * 100
# Percentage of cells above the mean of mol2cell distribution
percentage_above_mean = len(
self.drugbank[self.drugbank.acc_above_mean > above_mean_thresh]
) / len(self.test_drugs) * 100
metrics_dict = {
'cell2mol_top5_acc': cell2mol_top5_avg,
'mol2cell_acc': mol2cell_avg,
'perc_ks_significant': percentage_ks_low,
'perc_above_mean': percentage_above_mean
}
self.summary_stats = metrics_dict
if return_:
return metrics_dict
def get_ix_drug(self, drug_name):
return self.name_to_ix.get(drug_name, 'None')
def get_ix_cells(self, drug_name, verbose = False):
try:
ix_cells = self.adata[self.adata.obs['drug_name']==drug_name].obs.index.values
except:
ix_cells = self.adata[self.adata.obs['drug_name'].str.contains(drug_name)].obs.index.values
if verbose :
print('Getting adata cell indices for :%s'%adata[ix_cells[0]].obs['drug_name'].values[0] )
return ix_cells.astype(int)
def compute_rdkit_mol_from_smiles(self):
self.drugbank['mol'] = self.drugbank.SMILES.apply(
Chem.MolFromSmiles
)
self.name_to_mol = dict(self.drugbank[['drug_name', 'mol']].values)
@torch.no_grad()
def project_molecules(self, _return = False):
"""
Computes molecule embeddings in self.drugbank df.
"""
#Get Rdkit mols in place
self.compute_rdkit_mol_from_smiles()
labels_tensor = torch.arange(len(self.drugbank))
drugs_tensor = get_drug_batch(
labels_tensor,
self.name_to_mol,
self.ix_to_name,
cuda = self.cuda
)
self.model.eval()
mol_embedding = self.model.molecule_encoder.project(
Batch.from_data_list(drugs_tensor)
)
if self.cuda: # bring to CPU
mol_embedding=mol_embedding.cpu().numpy()
else:
mol_embedding = mol_embedding.numpy()
self.mol_embedding = mol_embedding
if _return:
return mol_embedding
# refactoring
# def compute_cosine_arr(self, return_ = False, project_mols = False, n_dims = 64):
# """
# Computes cosine array. It stores an output array
# """
# if project_mols:
# mol_embedding = self.project_molecules()
# else:
# mol_embedding = self.drugbank[['dim_' + str(i) for i in range(1,n_dims +1)]].values
#
# cell_embedding = self.adata.obs[['dim_' + str(i) for i in range(1, n_dims+1)]].values
#
# #self.mol_embedding = mol_embedding
# mol_embedding = self.mol_embedding
#
# self.cell_embedding = cell_embedding
#
# # Normalize to make row vectors
# mol_embedding_norm = mol_embedding / np.linalg.norm(mol_embedding, axis = 1).reshape(-1,1)
# cell_embedding_norm = cell_embedding / np.linalg.norm(cell_embedding, axis = 1).reshape(-1,1)
#
# # Compute cosine similarity, shape (molecules, cells)
# cosine_arr = np.matmul(mol_embedding_norm, cell_embedding_norm.T)
#
# #print('Shape of cosine similarity array: {0}'.format(cosine_arr.shape))
# self.cosine_arr = cosine_arr
#
# if return_:
# return cosine_arr
def compute_cosine_arr(self, return_ = False):
"""
Computes cosine array. It stores an output array
"""
# Extracts the molecule embeddings if already in the object
try :
mol_embedding = self.mol_embedding
except AttributeError:
print('Projecting molecules using model.')
mol_embedding = self.project_molecules()
cell_embedding = self.adata.obs[['dim_' + str(i) for i in range(1, self.embedding_dim+1)]].values
#self.mol_embedding = mol_embedding
mol_embedding = self.mol_embedding
self.cell_embedding = cell_embedding
# Normalize to make row vectors
mol_embedding_norm = mol_embedding / np.linalg.norm(mol_embedding, axis = 1).reshape(-1,1)
cell_embedding_norm = cell_embedding / np.linalg.norm(cell_embedding, axis = 1).reshape(-1,1)
# Compute cosine similarity, shape (molecules, cells)
cosine_arr = np.matmul(mol_embedding_norm, cell_embedding_norm.T)
#print('Shape of cosine similarity array: {0}'.format(cosine_arr.shape))
self.cosine_arr = cosine_arr
if return_:
return cosine_arr
def compute_dist_matrix(self, run_with_torch = False, return_=False):
"""
Computes the euclidean distances between cells and molecules,
and saves it as an attribute.
It assumes compute_cosine_arr() has already been run.
"""
try :
mol_embedding = self.mol_embedding
except NameError:
print('Projecting molecules using model.')
mol_embedding = self.project_molecules()
try:
self.cell_embedding = self.adata.obs[['dim_' + str(i) for i in range(1, self.embedding_dim+1)]].values
except:
raise ValueError('Could not retrieve cell embeddings from adata, check adata or n_dims arg.')
if run_with_torch:
self.D = generalized_distance_matrix_torch(
torch.from_numpy(mol_embedding),
torch.from_numpy(self.cell_embedding)
)
else:
self.D = generalized_distance_matrix(
mol_embedding, self.cell_embedding
)
if return_:
return self.D
def get_top_ixs(self, data_type = 'mols', mode = 'cosine', top_k = 15):
"Returns the top indices from a cosine similarity or L2 distance matrix."
axis = 1 if data_type == 'mols' else 0
#print(axis)
largest = True if mode == 'cosine' else False
#largest = mode == 'cosine'
if data_type == 'mols':
top_k = self.sample_counts.max()
if mode == 'cosine':
top_ixs = (
torch.from_numpy(self.cosine_arr)
.topk(k=top_k, largest=largest, dim=axis)
.indices.numpy()
)
elif mode == 'l2': # distance matrix
top_ixs = (
torch.from_numpy(self.D)
.topk(k=top_k, largest=largest, dim=axis)
.indices.numpy()
)
else:
raise NameError('Mode %s is not implemented. Choose one of [`cosine`, `l2`.]'%mode)
return top_ixs
def get_similarity_drug_one_vs_all(self, drug_name, mode = 'cosine')->Tuple[np.ndarray, np.ndarray]:
"""
Returns the cosine similarity distributions of a drug with cells perturbed by it,
and all other cells coming from other samples.
"""
ix_drug, ix_cells = self.get_ix_drug(drug_name), self.get_ix_cells(drug_name)
if mode == 'cosine':
n_mols, n_cells = self.cosine_arr.shape
# Get the indices of all perturbed with other molecules but `drug_name`'s
other_cells_ix = np.array(list(set(np.arange(n_cells)) - set(ix_cells)))
#similarity_matrix = self.cosine_arr
# Get cosine similarity/ l2 distance distribution of a drug with itself
similarities_cells_drug = self.cosine_arr[ix_drug, ix_cells]
similarities_others = self.cosine_arr[ix_drug, other_cells_ix]
elif mode == 'l2':
n_mols, n_cells = self.D.shape
# Get the indices of all perturbed with other molecules but `drug_name`'s
other_cells_ix = np.array(list(set(np.arange(n_cells)) - set(ix_cells)))
#similarity_matrix = self.D
similarities_cells_drug = self.D[ix_drug, ix_cells]
similarities_others = self.D[ix_drug, other_cells_ix]
else:
raise NameError('Mode %s not implemented.'%mode)
# Get cosine similarity/ l2 distance distribution of a drug with itself
# similarities_cells_drug = similarity_matrix[ix_drug, ix_cells]
#
# # Get the indices of all perturbed with other molecules but `drug_name`'s
# other_cells_ix = np.array(list(set(np.arange(n_cells)) - set(ix_cells)))
# similarities_others = similarity_matrix[ix_drug, other_cells_ix]
return similarities_cells_drug, similarities_others
def eval_mol2cell_accuracy(self, mode= 'cosine', return_ = False, k = 15):
top_ixs_mols = self.get_top_ixs(data_type = 'mols', mode = mode, top_k = k)
# Initialize molecule accuracies list
accs = []
for i, drug_ix in enumerate(self.test_drugs_ixs):
# Get the drug indices for each of the top cells given molecule query
top_ix_mol = self.ix_samples_cell[top_ixs_mols[i]]
# Get only the top n indices, for n the number of cells sampled in experiment
top_ix_mol_normalized = top_ix_mol[: int(self.sample_counts_idx[drug_ix])]
# Acc : fraction of correct cells
acc = np.sum(top_ix_mol_normalized == drug_ix) / (self.sample_counts_idx[drug_ix]) * 100
accs.append(acc)
self.m2c_acc = accs
self.drugbank['m2c_accuracy'] = accs
if return_:
return accs
def eval_cell2mol_accuracy(self, mode = 'cosine', k = 15):
top_ixs_cells = self.get_top_ixs(data_type = 'cells', mode = mode, top_k=k).T
acc_indicator = np.zeros((self.adata.n_obs, 5))
if isinstance(self.test_drugs_ixs, list):
self.test_drugs_ixs = np.array(self.test_drugs_ixs)
for i, sample_ix in tqdm.tqdm(enumerate(self.ix_samples_cell)):
# Get top 1, top3, top5, 10, and 15 accuracy
acc_indicator[i, 0] = 1 if sample_ix == self.test_drugs_ixs[top_ixs_cells[i, 0]] else 0
acc_indicator[i, 1] = 1 if sample_ix in self.test_drugs_ixs[top_ixs_cells[i, :3]] else 0
acc_indicator[i, 2] = 1 if sample_ix in self.test_drugs_ixs[top_ixs_cells[i, :5]] else 0
acc_indicator[i, 3] = 1 if sample_ix in self.test_drugs_ixs[top_ixs_cells[i, :10]] else 0
acc_indicator[i, 4] = 1 if sample_ix in self.test_drugs_ixs[top_ixs_cells[i, :15]] else 0
self.c2m_global_acc = acc_indicator.sum(axis = 0)/ self.adata.n_obs *100
ks = [1, 3, 5, 10, 15]
df_acc = pd.DataFrame(acc_indicator, columns = ['top' + str(i) + '_accuracy' for i in ks])
self.adata.obs = pd.concat([self.adata.obs, df_acc.set_index(self.adata.obs.index)], axis = 1)
def eval_m2c_mean(self, drug_name, centrality_measure = 'mean', mode = 'cosine'):
"""
Computes the fraction of cells that have cosine similarity w.r.t. to its own molecule
higher than the mean of the distribution across all cells.
"""
drug_ix = self.get_ix_drug(drug_name)
# Get cosine distribution for drug and all others
sim_distro_drug, sim_distro_others = self.get_similarity_drug_one_vs_all(
drug_name, mode=mode
)
if centrality_measure=='mean':
central_measure = np.concatenate([sim_distro_drug, sim_distro_others]).mean()
elif centrality_measure=='median':
central_measure = np.median(np.concatenate([sim_distro_drug, sim_distro_others]))
else:
raise NotImplementedError('%s is not implemented'%mode)
if mode == 'cosine':
n_significant = (sim_distro_drug > central_measure).sum()
# Distance of correct molecule lower than mean of whole distribution
elif mode == 'l2':
n_significant = (sim_distro_drug < central_measure).sum()
percent_significant = n_significant / len(sim_distro_drug) * 100
return percent_significant
def eval_m2c_above_mean_all(self, centrality_measure = 'mean', mode = 'cosine',
n_cores = 4, return_ = False):
"Evaluate above-mean accuracy for all drugs."
acc_arr = Parallel(n_jobs = n_cores)(
delayed(self.eval_m2c_mean)(drug, centrality_measure, mode)
for drug in tqdm.tqdm(
self.test_drugs, position = 0, leave = True
)
)
self.drugbank['acc_above_mean'] = acc_arr
if return_:
return acc_arr
def run_ks_test(self, drug_name, mode = 'cosine'):
"Returns statistics of running one vs all test for a given drug."
own, others = self.get_similarity_drug_one_vs_all(drug_name, mode= mode)
if mode == 'cosine':
# Test for true distro of cosine sim having higher values
ks, pval_ks, l1_score = get_stats(own, others)
elif | |
range(0, len(x1), 1):
# If index of element in mask list form 'outliers_filtering' then replace with median
#if i in mask_proc:
# print('Replace with median!')
req_data = np.array([x1[i], y1[i]]).reshape(1, -1)
# Getting number of neighbours
num_nn = vector_start_tree.query_radius(req_data, r=radius, count_only=True)
# Check number of neighboors
'''
if num_nn[0] < total_neighbours:
idx_mask.append(i)
cc[i] = 0.
else:
'''
# Apply median filtering
nn = vector_start_tree.query_radius(req_data, r=radius)
data = np.vstack((uu[nn[0]], vv[nn[0]])).T
####################################################################
# Loop through all found ice drift vectors to filter not homo
####################################################################
for ii in range(num_nn[0]):
# Calculate median
#data[:, 0][ii], data[:, 1][ii]
# Replace raw with median
# If not fast ice (> 5 pixels)
if (np.hypot(uu[i], vv[i]) > fast_ice_th or np.isnan(uu[i]) or np.isnan(vv[i])):
u_median = np.nanmedian(data[:, 0][ii])
v_median = np.nanmedian(data[:, 1][ii])
#u_median = np.nanmean(data[:, 0][ii])
#v_median = np.nanmean(data[:, 1][ii])
uu[i], vv[i] = u_median, v_median
cc[i] = 0
#tt = list(set(idx_mask))
#iidx_mask = np.array(tt)
x1_f = np.array(x1)
y1_f = np.array(y1)
uu_f = np.array(uu)
vv_f = np.array(vv)
cc_f = np.array(cc)
return x1_f, y1_f, uu_f, vv_f, cc_f
def calc_deformations(dx, dy, normalization=False, normalization_time=None, cell_size=1.,
invert_meridional=True, out_png_name='test.png'):
'''
Calculate deformation invariants from X and Y ice drift components
dx, dy - x and y component of motion (pixels)
normalization - normalize to time (boolean)
normalization_time - normalization time (in seconds)
cell_size - ground meters in a pixel
invert_meridional - invert y component (boolean)
'''
# Cell size factor (in cm)
cell_size_cm = cell_size * 100.
cell_size_factor = 1 / cell_size_cm
m_div = np.empty((dx.shape[0], dx.shape[1],))
m_div[:] = np.NAN
m_curl = np.empty((dx.shape[0], dx.shape[1],))
m_curl[:] = np.NAN
m_shear = np.empty((dx.shape[0], dx.shape[1],))
m_shear[:] = np.NAN
m_tdef = np.empty((dx.shape[0], dx.shape[1],))
m_tdef[:] = np.NAN
# Invert meridional component
if invert_meridional:
dy = dy * (-1)
# Normilize u and v to 1 hour
if not normalization:
pass
else:
# Convert to ground distance (pixels*cell size(m) * 100.)
dx = dx * cell_size_cm # cm
dy = dy * cell_size_cm # cm
# Get U/V components of speed (cm/s)
dx = dx / normalization_time
dy = dy / normalization_time
# Calculate magnitude (speed module) (cm/s)
mag_speed = np.hypot(dx, dy)
# Print mean speed in cm/s
print('Mean speed: %s [cm/s]' % (np.nanmean(mag_speed)))
#cell_size_factor = 1 / cell_size
# Test
#plt.clf()
#plt.imshow(m_div)
for i in range(1, dx.shape[0] - 1):
for j in range(1, dx.shape[1] - 1):
# div
if (np.isnan(dx[i, j + 1]) == False and np.isnan(dx[i, j - 1]) == False
and np.isnan(dy[i - 1, j]) == False and np.isnan(dy[i + 1, j]) == False
and (np.isnan(dx[i, j]) == False or np.isnan(dy[i, j]) == False)):
# m_div[i,j] = 0.5*((u_int[i,j + 1] - u_int[i,j - 1]) + (v_int[i + 1,j] - v_int[i - 1,j]))/m_cell_size
# !Exclude cell size factor!
m_div[i, j] = cell_size_factor * 0.5 * ((dx[i, j + 1] - dx[i, j - 1])
+ (dy[i - 1, j] - dy[i + 1, j]))
# print m_div[i,j]
# Curl
if (np.isnan(dy[i, j + 1]) == False and np.isnan(dy[i, j - 1]) == False and
np.isnan(dx[i - 1, j]) == False and np.isnan(dx[i + 1, j]) == False
and (np.isnan(dx[i, j]) == False or np.isnan(dy[i, j]) == False)):
# !Exclude cell size factor!
m_curl[i, j] = cell_size_factor * 0.5 * (dy[i, j + 1] - dy[i, j - 1]
- dx[i - 1, j] + dx[i + 1, j]) / cell_size
# Shear
if (np.isnan(dy[i + 1, j]) == False and np.isnan(dy[i - 1, j]) == False and
np.isnan(dx[i, j - 1]) == False and np.isnan(dx[i, j + 1]) == False and
np.isnan(dy[i, j - 1]) == False and np.isnan(dy[i, j + 1]) == False and
np.isnan(dx[i + 1, j]) == False and np.isnan(dx[i - 1, j]) == False and
(np.isnan(dx[i, j]) == False or np.isnan(dy[i, j]) == False)):
dc_dc = cell_size_factor * 0.5 * (dy[i + 1, j] - dy[i - 1, j])
dr_dr = cell_size_factor * 0.5 * (dx[i, j - 1] - dx[i, j + 1])
dc_dr = cell_size_factor * 0.5 * (dy[i, j - 1] - dy[i, j + 1])
dr_dc = cell_size_factor * 0.5 * (dx[i + 1, j] - dx[i - 1, j])
# !Exclude cell size factor!
m_shear[i, j] = np.sqrt(
(dc_dc - dr_dr) * (dc_dc - dr_dr) + (dc_dr - dr_dc) * (dc_dr - dr_dc)) / cell_size
'''
# Den
dc_dc = 0.5*(v_int[i + 1,j] - v_int[i - 1,j])
dr_dr = 0.5*(u_int[i,j + 1] - u_int[i,j - 1])
dc_dr = 0.5*(v_int[i,j + 1] - v_int[i,j - 1])
dr_dc = 0.5*(u_int[i + 1,j] - u_int[i - 1,j])
m_shear[i,j] = np.sqrt((dc_dc -dr_dr) * (dc_dc -dr_dr) + (dc_dr - dr_dc) * (dc_dr - dr_dc))/m_cell_size
'''
# Total deformation
if (np.isnan(m_shear[i, j]) == False and np.isnan(m_div[i, j]) == False):
m_tdef[i, j] = np.hypot(m_shear[i, j], m_div[i, j])
# Invert dy back
if invert_meridional:
dy = dy * (-1)
# data = np.vstack((np.ravel(xx_int), np.ravel(yy_int), np.ravel(m_div), np.ravel(u_int), np.ravel(v_int))).T
divergence = m_div
# TODO: Plot Test Div
plt.clf()
plt.gca().invert_yaxis()
plt.imshow(divergence, cmap='RdBu', vmin=-0.00008, vmax=0.00008,
interpolation='nearest', zorder=2) # vmin=-0.06, vmax=0.06,
# Plot u and v values inside cells (for testing porposes)
'''
font_size = .0000003
for ii in range(dx.shape[1]):
for jj in range(dx.shape[0]):
try:
if not np.isnan(divergence[ii,jj]):
if divergence[ii,jj] > 0:
plt.text(jj, ii,
'u:%.2f\nv:%.2f\n%s ij:(%s,%s)\n%.6f' %
(dx[ii,jj], dy[ii,jj], '+', ii, jj, divergence[ii,jj]),
horizontalalignment='center',
verticalalignment='center', fontsize=font_size, color='k')
if divergence[ii,jj] < 0:
plt.text(jj, ii,
'u:%.2f\nv:%.2f\n%s ij:(%s,%s)\n%.6f' %
(dx[ii,jj], dy[ii,jj], '-', ii, jj, divergence[ii,jj]),
horizontalalignment='center',
verticalalignment='center', fontsize=font_size, color='k')
if divergence[ii,jj] == 0:
plt.text(jj, ii,
'u:%.2f\nv:%.2f\n%s ij:(%s,%s)\n%.6f' %
(dx[ii,jj], dy[ii,jj], '0', ii, jj, divergence[ii,jj]),
horizontalalignment='center',
verticalalignment='center', fontsize=font_size, color='k')
if np.isnan(divergence[ii,jj]):
plt.text(jj, ii,
'u:%.2f\nv:%.2f\n%s ij:(%s,%s)' %
(dx[ii,jj], dy[ii,jj], '-', ii, jj),
horizontalalignment='center',
verticalalignment='center', fontsize=font_size, color='k')
# Plot arrows on top of the deformation
xxx = range(dx.shape[1])
yyy = range(dx.shape[0])
except:
pass
'''
# Plot drift arrows on the top
#import matplotlib.cm as cm
#from matplotlib.colors import Normalize
# Invert meridional component for plotting
ddy = dy * (-1)
#norm = Normalize()
colors = np.hypot(dx, ddy)
#print(colors)
#norm.autoscale(colors)
# we need to normalize our colors array to match it colormap domain
# which is [0, 1]
#colormap = cm.inferno
# Plot arrows on top of the deformation
xxx = range(dx.shape[1])
yyy = range(dx.shape[0])
plt.quiver(xxx, yyy, dx, ddy, colors, cmap='Greys', zorder=3) #'YlOrBr')
# Invert Y axis
plt.savefig(out_png_name, bbox_inches='tight', dpi=800)
curl = m_curl
shear = m_shear
total_deform = m_tdef
# return mag in cm/s
return mag_speed, divergence, curl, shear, total_deform
# !TODO:
def make_nc(nc_fname, lons, lats, data):
"""
Make netcdf4 file for deformation (divergence, shear, total deformation), scaled 10^(-4)
"""
print('\nStart making nc for defo...')
ds = Dataset(nc_fname, 'w', format='NETCDF4_CLASSIC')
print(ds.file_format)
# Dimensions
y_dim = ds.createDimension('y', lons.shape[0])
x_dim = ds.createDimension('x', lons.shape[1])
time_dim = ds.createDimension('time', None)
#data_dim = ds.createDimension('data', len([k for k in data.keys()]))
# Variables
times = ds.createVariable('time', np.float64, ('time',))
latitudes = ds.createVariable('lat', np.float32, ('y', 'x',))
longitudes = ds.createVariable('lon', np.float32, ('y', 'x',))
for var_name in data.keys():
globals()[var_name] = ds.createVariable(var_name, np.float32, ('y', 'x',))
globals()[var_name][:, :] = data[var_name]['data']
globals()[var_name].units = data[var_name]['units']
globals()[var_name].scale_factor = data[var_name]['scale_factor']
# Global Attributes
ds.description = 'Sea ice deformation product'
ds.history = 'Created ' + time.ctime(time.time())
ds.source = 'NIERSC/NERSC'
# Variable Attributes
latitudes.units = 'degree_north'
longitudes.units = 'degree_east'
times.units = 'hours since 0001-01-01 00:00:00'
times.calendar = 'gregorian'
# Put variables
latitudes[:, :] = lats
longitudes[:, :] = lons
ds.close()
def _create_geotiff(suffix, Array, NDV, xsize, ysize, GeoT, Projection, deformation):
from osgeo import gdal_array
DataType = gdal_array.NumericTypeCodeToGDALTypeCode(Array.dtype)
if type(DataType) != np.int:
if DataType.startswith('gdal.GDT_') == False:
DataType = eval('gdal.GDT_' + DataType)
NewFileName = suffix + '.tif'
zsize = 1 #Array.shape[0]
driver = gdal.GetDriverByName('GTiff')
Array[np.isnan(Array)] = NDV
DataSet = driver.Create(NewFileName, xsize, ysize, zsize, DataType)
DataSet.SetGeoTransform(GeoT)
DataSet.SetProjection(Projection)#.ExportToWkt())
# for testing
# DataSet.SetProjection('PROJCS["NSIDC Sea Ice Polar Stereographic North",GEOGCS["Unspecified datum based upon the Hughes 1980 ellipsoid",DATUM["Not_specified_based_on_Hughes_1980_ellipsoid",SPHEROID["Hughes 1980",6378273,298.279411123061,AUTHORITY["EPSG","7058"]],AUTHORITY["EPSG","6054"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4054"]],PROJECTION["Polar_Stereographic"],PARAMETER["latitude_of_origin",70],PARAMETER["central_meridian",-45],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","3411"]]')
#for i in xrange(0, zsize):
DataSet.GetRasterBand(1).WriteArray(deformation) # Array[i])
DataSet.GetRasterBand(1).SetNoDataValue(NDV)
DataSet.FlushCache()
return NewFileName
def create_geotiff(suffix, data, NDV, GeoT, Projection):
''' Create geotiff file (1 band)'''
# Get GDAL data type
dataType = gdal_array.NumericTypeCodeToGDALTypeCode(data.dtype)
# NaNs to the no data value
data[np.isnan(data)] = NDV
if type(dataType) != | |
== b""
assert doc_enc["h"]["data"] == b"header"
assert doc_dec["h"] == "686561646572"
assert doc_enc["t"]["len"] == b""
assert doc_enc["t"]["data"] == b"0210"
assert doc_dec["t"] == "0210"
assert doc_enc["p"]["len"] == b""
assert doc_enc["p"]["data"] == b"0000000000000000"
assert doc_dec["p"] == "0000000000000000"
assert doc_enc.keys() == set(["h", "t", "p"])
assert doc_dec.keys() == set(["h", "t", "p"])
def test_type_ascii_absent():
"""
ASCII message type is required by spec and not provided
Note: here parser picks up message type as "0000" and fails at primary bitmap.
"""
spec["h"]["data_enc"] = "ascii"
spec["h"]["len_type"] = 0
spec["h"]["max_len"] = 6
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
s = b"header0000000000000000"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 12 bytes, expecting 16: field p pos 10",
):
iso8583.decode(s, spec=spec)
def test_type_ascii_present():
"""
ASCII message type is required by spec and provided
"""
spec["h"]["data_enc"] = "ascii"
spec["h"]["len_type"] = 0
spec["h"]["max_len"] = 6
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
s = b"header02100000000000000000"
doc_dec, doc_enc = iso8583.decode(s, spec=spec)
assert doc_enc["h"]["len"] == b""
assert doc_enc["h"]["data"] == b"header"
assert doc_dec["h"] == "header"
assert doc_enc["t"]["len"] == b""
assert doc_enc["t"]["data"] == b"0210"
assert doc_dec["t"] == "0210"
assert doc_enc["p"]["len"] == b""
assert doc_enc["p"]["data"] == b"0000000000000000"
assert doc_dec["p"] == "0000000000000000"
assert doc_enc.keys() == set(["h", "t", "p"])
assert doc_dec.keys() == set(["h", "t", "p"])
def test_type_ebcdic_absent():
"""
EBCDIC message type is required by spec and not provided
Note: here parser picks up message type as "0000" and fails at primary bitmap.
"""
spec["h"]["data_enc"] = "ascii"
spec["h"]["len_type"] = 0
spec["h"]["max_len"] = 6
spec["t"]["data_enc"] = "cp500"
spec["p"]["data_enc"] = "ascii"
s = b"header0000000000000000"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 12 bytes, expecting 16: field p pos 10",
):
iso8583.decode(s, spec=spec)
def test_type_ebcdic_present():
"""
ASCII message type is required by spec and provided
"""
spec["h"]["data_enc"] = "ascii"
spec["h"]["len_type"] = 0
spec["h"]["max_len"] = 6
spec["t"]["data_enc"] = "cp500"
spec["p"]["data_enc"] = "ascii"
s = b"header\xf0\xf2\xf1\xf00000000000000000"
doc_dec, doc_enc = iso8583.decode(s, spec=spec)
assert doc_enc["h"]["len"] == b""
assert doc_enc["h"]["data"] == b"header"
assert doc_dec["h"] == "header"
assert doc_enc["t"]["len"] == b""
assert doc_enc["t"]["data"] == b"\xf0\xf2\xf1\xf0"
assert doc_dec["t"] == "0210"
assert doc_enc["p"]["len"] == b""
assert doc_enc["p"]["data"] == b"0000000000000000"
assert doc_dec["p"] == "0000000000000000"
assert doc_enc.keys() == set(["h", "t", "p"])
assert doc_dec.keys() == set(["h", "t", "p"])
def test_type_bcd_absent():
"""
BCD message type is required by spec and not provided
Note: here parser picks up message type as "\x30\x30" and fails at primary bitmap.
"""
spec["h"]["data_enc"] = "ascii"
spec["h"]["len_type"] = 0
spec["h"]["max_len"] = 6
spec["t"]["data_enc"] = "b"
spec["p"]["data_enc"] = "ascii"
s = b"header0000000000000000"
with pytest.raises(
iso8583.DecodeError, match="Field data is 14 bytes, expecting 16: field p pos 8"
):
iso8583.decode(s, spec=spec)
def test_type_bcd_present():
"""
ASCII message type is required by spec and provided
"""
spec["h"]["data_enc"] = "ascii"
spec["h"]["len_type"] = 0
spec["h"]["max_len"] = 6
spec["t"]["data_enc"] = "b"
spec["p"]["data_enc"] = "ascii"
s = b"header\x02\x100000000000000000"
doc_dec, doc_enc = iso8583.decode(s, spec=spec)
assert doc_enc["h"]["len"] == b""
assert doc_enc["h"]["data"] == b"header"
assert doc_dec["h"] == "header"
assert doc_enc["t"]["len"] == b""
assert doc_enc["t"]["data"] == b"\x02\x10"
assert doc_dec["t"] == "0210"
assert doc_enc["p"]["len"] == b""
assert doc_enc["p"]["data"] == b"0000000000000000"
assert doc_dec["p"] == "0000000000000000"
assert doc_enc.keys() == set(["h", "t", "p"])
assert doc_dec.keys() == set(["h", "t", "p"])
def test_type_negative_missing():
"""
Type is required for all messages
"""
spec["h"]["data_enc"] = "ascii"
spec["h"]["len_type"] = 0
spec["h"]["max_len"] = 6
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
s = b"header"
with pytest.raises(
iso8583.DecodeError, match="Field data is 0 bytes, expecting 4: field t pos 6"
):
iso8583.decode(s, spec=spec)
def test_type_negative_partial():
"""
Message type is required all messages but partially provided.
"""
spec["h"]["data_enc"] = "ascii"
spec["h"]["len_type"] = 0
spec["h"]["max_len"] = 6
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
s = b"header02"
with pytest.raises(
iso8583.DecodeError, match="Field data is 2 bytes, expecting 4: field t pos 6"
):
iso8583.decode(s, spec=spec)
def test_type_negative_incorrect_encoding():
"""
Message type is required by spec and provided.
However, the spec encoding is not correct
"""
spec["h"]["data_enc"] = "ascii"
spec["h"]["len_type"] = 0
spec["h"]["max_len"] = 6
spec["t"]["data_enc"] = "invalid"
spec["p"]["data_enc"] = "ascii"
s = b"header02100000000000000000"
with pytest.raises(
iso8583.DecodeError,
match="Failed to decode field, unknown encoding specified: field t pos 6",
):
iso8583.decode(s, spec=spec)
def test_type_negative_incorrect_ascii_data():
"""
Message type is required by spec and provided.
However, the data is not ASCII
"""
spec["h"]["data_enc"] = "ascii"
spec["h"]["len_type"] = 0
spec["h"]["max_len"] = 6
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
s = b"header\xff\xff\xff\xff0000000000000000"
with pytest.raises(
iso8583.DecodeError,
match="Failed to decode field, invalid data: field t pos 6",
):
iso8583.decode(s, spec=spec)
def test_type_negative_incorrect_bcd_data():
"""
BCD message type is required by spec and provided.
However, the data is not hex.
Note: this passes, "ab" is valid hex data when decoding.
"""
spec["h"]["data_enc"] = "ascii"
spec["h"]["len_type"] = 0
spec["h"]["max_len"] = 6
spec["t"]["data_enc"] = "b"
spec["p"]["data_enc"] = "ascii"
s = b"headerab0000000000000000"
doc_dec, doc_enc = iso8583.decode(s, spec=spec)
assert doc_enc["h"]["len"] == b""
assert doc_enc["h"]["data"] == b"header"
assert doc_dec["h"] == "header"
assert doc_enc["t"]["len"] == b""
assert doc_enc["t"]["data"] == b"ab"
assert doc_dec["t"] == "6162"
assert doc_enc["p"]["len"] == b""
assert doc_enc["p"]["data"] == b"0000000000000000"
assert doc_dec["p"] == "0000000000000000"
assert doc_enc.keys() == set(["h", "t", "p"])
assert doc_dec.keys() == set(["h", "t", "p"])
def util_set2bitmap(bm):
"""
Enable bits specified in a bm set and return a bitmap bytearray
"""
s = bytearray(b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
# Disable secondary bitmap if no 65-128 fields are present
if bm.isdisjoint(range(65, 129)):
bm.discard(1)
else:
bm.add(1)
for f in bm:
f -= 1 # bms start at 1. Make them zero-bound
byte = int(f / 8) # Place this particular bit in a byte where it belongs
bit = 7 - (
f - byte * 8
) # Determine bit to enable. 7th or left-most is fields 1, 9, 17, etc.
s[byte] |= 1 << bit
if 1 in bm:
return s
else:
return s[0:8]
def util_set2field_data(bm, spec, data_enc, len_enc, len_type):
"""
Create dummy field data for fields specified in a bm set and return a bytearray
Assume that field data is always 2 or 4 bytes representing field number.
For example, field #65 is represented as "0065" with length 4 or
\x00\x65 with length 2.
"""
s = bytearray()
for f in [str(i) for i in sorted(bm)]:
# Secondary bitmap is already appended
if f == "1":
continue
# BCD data is always half of ASCII/EBCDIC data
if data_enc == "b":
spec[f]["max_len"] = 2
else:
spec[f]["max_len"] = 4
spec[f]["data_enc"] = data_enc
spec[f]["len_enc"] = len_enc
spec[f]["len_type"] = len_type
# Append length according to type and encoding
if len_type > 0:
if len_enc == "b":
# odd length is not allowed, double it up for string translation, e.g.:
# length "2" must be "02" to translate to \x02
# length "02" must be "0004" to translate to \x00\x02
s += bytearray.fromhex(
"{:0{len_type}d}".format(spec[f]["max_len"], len_type=len_type * 2)
)
else:
s += bytearray(
"{:0{len_type}d}".format(spec[f]["max_len"], len_type=len_type),
len_enc,
)
# Append data according to encoding
if data_enc == "b":
s += bytearray.fromhex("{:04d}".format(int(f)))
else:
s += bytearray("{:04d}".format(int(f)), data_enc)
return s
def test_primary_bitmap_ascii():
"""
This test will validate bitmap decoding for fields 1-64
"""
spec["h"]["data_enc"] = "ascii"
spec["h"]["len_type"] = 0
spec["h"]["max_len"] = 6
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
bm = set()
for i in range(2, 65):
bm.add(i)
s = bytearray(b"header0210")
s += bytearray(util_set2bitmap(bm).hex(), "ascii")
s += util_set2field_data(bm, spec, "ascii", "ascii", 0)
doc_dec, doc_enc = iso8583.decode(s, spec=spec)
assert doc_enc.keys() ^ set([str(f) for f in bm]) == set(["h", "t", "p"])
assert doc_dec.keys() ^ set([str(f) for f in bm]) == set(["h", "t", "p"])
bm = set()
for i in range(64, 2, -1):
bm.add(i)
s = bytearray(b"header0210")
s += bytearray(util_set2bitmap(bm).hex(), "ascii")
s += util_set2field_data(bm, spec, "ascii", "ascii", 0)
doc_dec, doc_enc = iso8583.decode(s, spec=spec)
assert doc_enc.keys() ^ set([str(f) for f in bm]) == set(["h", "t", "p"])
assert doc_dec.keys() ^ set([str(f) for f in bm]) == set(["h", "t", "p"])
def test_primary_bitmap_ascii_mixed_case():
"""
This test makes sure that lower, upper and mixed case bitmap is
decoded the same way.
"""
spec["h"]["len_type"] = 0
spec["h"]["max_len"] = 0
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["5"]["len_type"] = 0
spec["5"]["max_len"] = 1
spec["5"]["data_enc"] = "ascii"
spec["7"]["len_type"] = 0
spec["7"]["max_len"] = 1
spec["7"]["data_enc"] = "ascii"
spec["9"]["len_type"] = 0
spec["9"]["max_len"] = 1
spec["9"]["data_enc"] = "ascii"
spec["11"]["len_type"] = 0
spec["11"]["max_len"] = 1
spec["11"]["data_enc"] = "ascii"
# Upper case
s = b"02000AA0000000000000ABCD"
doc_dec, doc_enc = iso8583.decode(s, spec)
assert doc_dec["t"] == "0200"
| |
created_ts = datetime_to_timestamp(instance.created)
for stat in stats:
if stat['from'] >= created_ts and stat['to'] - created_ts < hour / 2 and 'value' not in stat:
stat['value'] = 0
return Response(stats, status=status.HTTP_200_OK)
@detail_route()
def calculated_usage(self, request, uuid):
"""
Find max or min utilization of cpu, memory and storage of the instance within timeframe.
"""
# XXX: hook. Should be removed after zabbix refactoring
if not ZABBIX_ENABLED:
raise Http404()
instance = self.get_object()
if not instance.backend_id:
return Response({'detail': 'calculated usage is not available for instance without backend_id'},
status=status.HTTP_409_CONFLICT)
default_start = timezone.now() - datetime.timedelta(hours=1)
timestamp_interval_serializer = core_serializers.TimestampIntervalSerializer(data={
'start': request.query_params.get('from', datetime_to_timestamp(default_start)),
'end': request.query_params.get('to', datetime_to_timestamp(timezone.now()))
})
timestamp_interval_serializer.is_valid(raise_exception=True)
filter_data = timestamp_interval_serializer.get_filter_data()
start = datetime_to_timestamp(filter_data['start'])
end = datetime_to_timestamp(filter_data['end'])
mapped = {
'items': request.query_params.getlist('item'),
'method': request.query_params.get('method'),
}
serializer = serializers.CalculatedUsageSerializer(data={k: v for k, v in mapped.items() if v})
serializer.is_valid(raise_exception=True)
results = serializer.get_stats(instance, start, end)
return Response(results, status=status.HTTP_200_OK)
@detail_route(methods=['post'])
def assign_floating_ip(self, request, uuid):
"""
Assign floating IP to the instance.
"""
instance = self.get_object()
serializer = serializers.AssignFloatingIpSerializer(instance, data=request.data)
serializer.is_valid(raise_exception=True)
if not instance.cloud_project_membership.external_network_id:
return Response({'detail': 'External network ID of the cloud project membership is missing.'},
status=status.HTTP_409_CONFLICT)
elif instance.cloud_project_membership.state in SynchronizationStates.UNSTABLE_STATES:
return Response({'detail': 'Cloud project membership of instance should be in stable state.'},
status=status.HTTP_409_CONFLICT)
elif instance.state in models.Instance.States.UNSTABLE_STATES:
raise core_exceptions.IncorrectStateException(
detail='Cannot add floating IP to instance in unstable state.')
tasks.assign_floating_ip.delay(serializer.validated_data['floating_ip_uuid'], uuid)
return Response({'detail': 'Assigning floating IP to the instance has been scheduled.'},
status=status.HTTP_202_ACCEPTED)
class TemplateFilter(django_filters.FilterSet):
name = django_filters.CharFilter(
lookup_type='icontains',
)
os_type = CategoryFilter(
categories=models.Template.OsTypes.CATEGORIES
)
application_type = django_filters.CharFilter(
name='application_type__slug',
)
class Meta(object):
model = models.Template
fields = (
'os',
'os_type',
'name',
'type',
'application_type',
'is_active',
)
class TemplateViewSet(viewsets.ModelViewSet):
"""
List of VM templates that are accessible by this user.
http://nodeconductor.readthedocs.org/en/latest/api/api.html#templates
"""
queryset = models.Template.objects.all()
serializer_class = serializers.TemplateSerializer
permission_classes = (permissions.IsAuthenticated, permissions.DjangoObjectPermissions)
lookup_field = 'uuid'
filter_backends = (DjangoMappingFilterBackend,)
filter_class = TemplateFilter
def get_serializer_class(self):
if self.request.method in ('POST', 'PUT', 'PATCH'):
return serializers.TemplateCreateSerializer
return super(TemplateViewSet, self).get_serializer_class()
def get_queryset(self):
queryset = super(TemplateViewSet, self).get_queryset()
user = self.request.user
if self.request.method == 'GET':
cloud_uuid = self.request.query_params.get('cloud')
if cloud_uuid is not None:
cloud_queryset = filter_queryset_for_user(models.Cloud.objects.all(), user)
try:
cloud = cloud_queryset.get(uuid=cloud_uuid)
except models.Cloud.DoesNotExist:
return queryset.none()
queryset = queryset.filter(images__cloud=cloud)
return queryset
class TemplateLicenseViewSet(viewsets.ModelViewSet):
"""List of template licenses that are accessible by this user.
http://nodeconductor.readthedocs.org/en/latest/api/api.html#template-licenses
"""
queryset = models.TemplateLicense.objects.all()
serializer_class = serializers.TemplateLicenseSerializer
permission_classes = (permissions.IsAuthenticated, permissions.DjangoObjectPermissions)
lookup_field = 'uuid'
def initial(self, request, *args, **kwargs):
super(TemplateLicenseViewSet, self).initial(request, *args, **kwargs)
if self.action != 'stats' and not self.request.user.is_staff:
raise Http404
def get_queryset(self):
queryset = super(TemplateLicenseViewSet, self).get_queryset()
if 'customer' in self.request.query_params:
customer_uuid = self.request.query_params['customer']
queryset = queryset.filter(templates__images__cloud__customer__uuid=customer_uuid)
return queryset
def _filter_queryset(self, queryset):
if 'customer' in self.request.query_params:
customer_uuid = self.request.query_params['customer']
queryset = queryset.filter(instance__cloud_project_membership__project__customer__uuid=customer_uuid)
if 'name' in self.request.query_params:
queryset = queryset.filter(template_license__name=self.request.query_params['name'])
if 'type' in self.request.query_params:
queryset = queryset.filter(template_license__license_type=self.request.query_params['type'])
return queryset
@list_route()
def stats(self, request):
queryset = filter_queryset_for_user(models.InstanceLicense.objects.all(), request.user)
queryset = self._filter_queryset(queryset)
aggregate_parameters = self.request.query_params.getlist('aggregate', [])
aggregate_parameter_to_field_map = {
'project': [
'instance__cloud_project_membership__project__uuid',
'instance__cloud_project_membership__project__name',
],
'project_group': [
'instance__cloud_project_membership__project__project_groups__uuid',
'instance__cloud_project_membership__project__project_groups__name',
],
'customer': [
'instance__cloud_project_membership__project__customer__uuid',
'instance__cloud_project_membership__project__customer__name',
'instance__cloud_project_membership__project__customer__abbreviation',
],
'type': ['template_license__license_type'],
'name': ['template_license__name'],
}
aggregate_fields = []
for aggregate_parameter in aggregate_parameters:
if aggregate_parameter not in aggregate_parameter_to_field_map:
return Response('Licenses statistics can not be aggregated by %s' % aggregate_parameter,
status=status.HTTP_400_BAD_REQUEST)
aggregate_fields += aggregate_parameter_to_field_map[aggregate_parameter]
queryset = queryset.values(*aggregate_fields).annotate(count=django_models.Count('id', distinct=True))
# This hack can be removed when https://code.djangoproject.com/ticket/16735 will be closed
# Replace databases paths by normal names. Ex: instance__project__uuid is replaced by project_uuid
name_replace_map = {
'instance__cloud_project_membership__project__uuid': 'project_uuid',
'instance__cloud_project_membership__project__name': 'project_name',
'instance__cloud_project_membership__project__project_groups__uuid': 'project_group_uuid',
'instance__cloud_project_membership__project__project_groups__name': 'project_group_name',
'instance__cloud_project_membership__project__customer__uuid': 'customer_uuid',
'instance__cloud_project_membership__project__customer__name': 'customer_name',
'instance__cloud_project_membership__project__customer__abbreviation': 'customer_abbreviation',
'template_license__license_type': 'type',
'template_license__name': 'name',
}
for d in queryset:
for db_name, output_name in name_replace_map.iteritems():
if db_name in d:
d[output_name] = d[db_name]
del d[db_name]
# XXX: hack for portal only. (Provide project group data if aggregation was done by project)
if 'project' in aggregate_parameters and 'project_group' not in aggregate_parameters:
for item in queryset:
project = Project.objects.get(uuid=item['project_uuid'])
if project.project_group is not None:
item['project_group_uuid'] = project.project_group.uuid.hex
item['project_group_name'] = project.project_group.name
return Response(queryset)
class ResourceFilter(django_filters.FilterSet):
project_group_name = django_filters.CharFilter(
name='cloud_project_membership__project__project_groups__name',
distinct=True,
lookup_type='icontains',
)
project_name = django_filters.CharFilter(
name='cloud_project_membership__project__name',
distinct=True,
lookup_type='icontains',
)
project_uuid = django_filters.CharFilter(
name='cloud_project_membership__project__uuid'
)
# FIXME: deprecated, use project_group_name instead
project_groups = django_filters.CharFilter(
name='cloud_project_membership__project__project_groups__name',
distinct=True,
lookup_type='icontains',
)
name = django_filters.CharFilter(lookup_type='icontains')
customer = django_filters.CharFilter(
name='cloud_project_membership__project__customer__uuid'
)
customer_name = django_filters.CharFilter(
name='cloud_project_membership__project__customer__name',
lookup_type='icontains',
)
customer_abbreviation = django_filters.CharFilter(
name='cloud_project_membership__project__customer__abbreviation',
lookup_type='icontains',
)
customer_native_name = django_filters.CharFilter(
name='cloud_project_membership__project__customer__native_name',
lookup_type='icontains',
)
template_name = django_filters.CharFilter(
name='template__name',
lookup_type='icontains',
)
agreed_sla = django_filters.NumberFilter()
actual_sla = django_filters.NumberFilter(
name='slas__value',
distinct=True,
)
class Meta(object):
model = models.Instance
fields = [
'name',
'template_name',
'customer',
'customer_name',
'customer_native_name',
'customer_abbreviation',
'project_name',
'project_uuid',
'project_groups',
'agreed_sla',
'actual_sla',
]
order_by = [
'name',
'template__name',
'cloud_project_membership__project__customer__name',
'cloud_project_membership__project__customer__abbreviation',
'cloud_project_membership__project__customer__native_name',
'cloud_project_membership__project__name',
'cloud_project_membership__project__project_groups__name',
'agreed_sla',
'slas__value',
# desc
'-name',
'-template__name',
'-cloud_project_membership__project__customer__name',
'-cloud_project_membership__project__customer__abbreviation',
'-cloud_project_membership__project__customer__native_name',
'-cloud_project_membership__project__name',
'-cloud_project_membership__project__project_groups__name',
'-agreed_sla',
'-slas__value',
]
order_by_mapping = {
# Proper field naming
'customer_name': 'cloud_project_membership__project__customer__name',
'customer_abbreviation': 'cloud_project_membership__project__customer__abbreviation',
'customer_native_name': 'cloud_project_membership__project__customer__native_name',
'project_name': 'cloud_project_membership__project__name',
'project_group_name': 'cloud_project_membership__project__project_groups__name',
'template_name': 'template__name',
'actual_sla': 'slas__value',
# Backwards compatibility
'project__customer__name': 'cloud_project_membership__project__customer__name',
'project__name': 'cloud_project_membership__project__name',
'project__project_groups__name': 'cloud_project_membership__project__project_groups__name',
}
# XXX: This view has to be rewritten or removed after haystack implementation
class ResourceViewSet(viewsets.ReadOnlyModelViewSet):
queryset = models.Instance.objects.exclude(
state=models.Instance.States.DELETING,
)
serializer_class = ServiceSerializer
lookup_field = 'uuid'
filter_backends = (structure_filters.GenericRoleFilter, DjangoMappingFilterBackend)
filter_class = ResourceFilter
def get_queryset(self):
period = self._get_period()
if '-' in period:
year, month = map(int, period.split('-'))
else:
year = int(period)
month = 12
last_day = calendar.monthrange(year, month)[1]
return super(ResourceViewSet, self).get_queryset().filter(created__lte=datetime.date(year, month, last_day))
def _get_period(self):
period = self.request.query_params.get('period')
if period is None:
today = datetime.date.today()
period = '%s-%s' % (today.year, today.month)
return period
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
context = super(ResourceViewSet, self).get_serializer_context()
context['period'] = self._get_period()
return context
@detail_route()
def events(self, request, uuid):
service = self.get_object()
period = self._get_period()
# TODO: this should use a generic resource model
history = get_object_or_404(models.InstanceSlaHistory, instance__uuid=service.uuid, period=period)
history_events = list(history.events.all().order_by('-timestamp').values('timestamp', 'state'))
serializer = serializers.SlaHistoryEventSerializer(data=history_events,
many=True)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class ResourceStatsView(views.APIView):
def _check_user(self, request):
if not request.user.is_staff:
raise exceptions.PermissionDenied()
def get(self, request, format=None):
self._check_user(request)
auth_url = request.query_params.get('auth_url')
# TODO: auth_url should be coming as a reference to NodeConductor object. Consider introducing this concept.
if auth_url is None:
return Response(
{'detail': 'GET parameter "auth_url" has to be defined'},
status=status.HTTP_400_BAD_REQUEST,
)
cloud = models.Cloud.objects.filter(auth_url=auth_url).first()
if cloud is None:
return Response(
{'detail': 'No clouds with auth url: %s' % auth_url},
status=status.HTTP_400_BAD_REQUEST,
)
memberships = models.CloudProjectMembership.objects.filter(cloud__auth_url=auth_url)
quota_values = models.CloudProjectMembership.get_sum_of_quotas_as_dict(
memberships, ('vcpu', 'ram', 'storage'), fields=['limit'])
# for backward compatibility we need to use this names:
quota_stats = {
'vcpu_quota': quota_values['vcpu'],
'storage_quota': quota_values['storage'],
'memory_quota': quota_values['ram'],
}
stats = cloud.get_statistics()
stats.update(quota_stats)
return Response(sort_dict(stats), status=status.HTTP_200_OK)
class CustomerStatsView(views.APIView):
def get(self, request, format=None):
customer_statistics = []
customer_queryset = filter_queryset_for_user(Customer.objects.all(), request.user)
for customer in customer_queryset:
projects_count = filter_queryset_for_user(
Project.objects.filter(customer=customer), request.user).count()
project_groups_count = filter_queryset_for_user(
ProjectGroup.objects.filter(customer=customer), request.user).count()
instances_count = filter_queryset_for_user(
models.Instance.objects.filter(cloud_project_membership__project__customer=customer),
request.user).count()
customer_statistics.append({
'name': customer.name,
'abbreviation': customer.abbreviation,
'projects': projects_count,
'project_groups': project_groups_count,
'instances': instances_count,
})
return Response(customer_statistics, status=status.HTTP_200_OK)
class UsageStatsView(views.APIView):
aggregate_models = {
'customer': {'model': Customer, 'path': models.Instance.Permissions.customer_path},
'project_group': {'model': ProjectGroup, 'path': models.Instance.Permissions.project_group_path},
'project': {'model': Project, 'path': models.Instance.Permissions.project_path},
}
def _get_aggregate_queryset(self, request, aggregate_model_name):
model = self.aggregate_models[aggregate_model_name]['model']
return filter_queryset_for_user(model.objects.all(), request.user)
def _get_aggregate_filter(self, aggregate_model_name, obj):
path = self.aggregate_models[aggregate_model_name]['path']
return {path: obj}
def get(self, request, format=None):
# XXX: hook. Should be removed after zabbix refactoring
if not ZABBIX_ENABLED:
raise Http404()
usage_stats = []
aggregate_model_name = request.query_params.get('aggregate', 'customer')
if aggregate_model_name not in self.aggregate_models.keys():
return Response(
'Get parameter "aggregate" can take only this values: %s' % ', '.join(self.aggregate_models.keys()),
status=status.HTTP_400_BAD_REQUEST)
# This filters out the things we group by (aka aggregate root) to those that can be seen
# by currently logged in user.
aggregate_queryset = self._get_aggregate_queryset(request, aggregate_model_name)
if 'uuid' in request.query_params:
aggregate_queryset = aggregate_queryset.filter(uuid=request.query_params['uuid'])
# This filters out the vm Instances to those that can be seen
# by currently logged in user. This is done within each aggregate root separately.
visible_instances = filter_queryset_for_user(models.Instance.objects.all(), request.user)
for aggregate_object in aggregate_queryset:
# Narrow down the instance scope to aggregate root.
instances = visible_instances.filter(
**self._get_aggregate_filter(aggregate_model_name, aggregate_object))
if instances:
hour = 60 * 60
data = {
'start_timestamp': request.query_params.get('from', int(time.time() - hour)),
'end_timestamp': request.query_params.get('to', int(time.time())),
'segments_count': request.query_params.get('datapoints', 6),
'item': request.query_params.get('item'),
}
serializer = serializers.UsageStatsSerializer(data=data)
serializer.is_valid(raise_exception=True)
stats = serializer.get_stats(instances)
usage_stats.append({'name': aggregate_object.name, 'datapoints': stats})
else:
usage_stats.append({'name': aggregate_object.name, 'datapoints': []})
return Response(usage_stats, status=status.HTTP_200_OK)
class FlavorViewSet(viewsets.ReadOnlyModelViewSet):
"""List of VM instance flavors that are accessible by this user.
http://nodeconductor.readthedocs.org/en/latest/api/api.html#flavor-management
"""
queryset = models.Flavor.objects.all()
serializer_class = serializers.FlavorSerializer
lookup_field = 'uuid'
| |
rebooting info printed to the console
logger.info('==== Waiting for messages in rebooting ...')
d1 = Dialog([
['{} login: '.format(self.sm.patterns.chassis_hostname),
'sendline({})'.format(self.sm.patterns.chassis_username),
None, True, False],
['Password:',
'sendline({})'.format(self.sm.patterns.chassis_password), None,
True, False],
['.*Successful login attempts for user.*', None, None, True,
False],
['Sending all processes the KILL signal', None, None, True,
False],
['Please stand by while rebooting the system', None, None, True,
False],
['Use SPACE to begin boot immediately', 'send(" ")', None, True,
False],
['Manager image digital signature verification successful',
None, None, True, False],
['System is coming up', None, None, True, False],
['Finished bcm_attach...', None, None, True, False],
['vdc 1 has come online', None, None, False, False],
[r'Connection to .* closed.', None, None, False, False],
])
d1.process(self.spawn_id, timeout=1800)
logger.info('==== Reconnect after reboot...')
# Wait for reboot to finish and reconnect
time.sleep(480)
self.monitor_installation(version=version, timeout=2400)
# set disconnect timeout to maximum
self.set_default_auth_timeouts()
return True
return True
def monitor_installation(self, version, timeout=1500):
""" Check if installation is finished and if the version is correct
:param version: version string of the bundle package
e.g. 2.1(1.64)
:param timeout: Timeout in case the installation takes too long
:return: None
"""
# Go to mio_state
self.spawn_id.sendline()
self.go_to('any')
self.go_to('mio_state')
# Wait for upgrade of fxos to finish
logger.info('==== Check installation progress..')
self.wait_till_firmware_monitor_ready(version, timeout=timeout)
def switch_to_module(self, slot_id, app_identifier=None):
"""Method used to switch between the modules
:param slot_id: the slot Id to connect to
:param deploy_type: the deploy type of the app (container or native)
:param app_identifier: the app identifier (in case of container deploy)
:return:
"""
self.set_current_slot(slot_id)
self.set_current_application(app_identifier)
self.go_to('fireos_state', timeout=240)
def set_value(self, value):
"""
Needed to add cluster password as below:
FPR4120-6-Rack10-A /ssa/logical-device* # enter mgmt-bootstrap ftd
FPR4120-6-Rack10-A /ssa/logical-device/mgmt-bootstrap* # enter bootstrap-key-secret PASSWORD
FPR4120-6-Rack10-A /ssa/logical-device/mgmt-bootstrap/bootstrap-key-secret* # set value
Value:
FPR4120-6-Rack10-A /ssa/logical-device/mgmt-bootstrap/bootstrap-key-secret* # exit
:param value: the value
:return: None
"""
logger.info('=== Set value')
self.spawn_id.sendline('set value')
d = Dialog([
['(V|v)alue:', 'sendline({})'.format(value), None, True, False],
[self.sm.get_state('mio_state').pattern, None, None, False,
False]
])
d.process(self.spawn_id)
def set_key(self, key):
"""
Needed to add cluster password as below:
<PASSWORD>-A# scope ssa
FPR4120-6-Rack10-A /ssa # enter logical-device ftd-logic ftd 1 clustered
FPR4120-6-Rack10-A /ssa/logical-device # enter cluster-bootstrap
PR4120-6-Rack10-A /ssa/logical-device/cluster-bootstrap # set key
Key:
FPR4120-6-Rack10-A /ssa/logical-device/cluster-bootstrap* # commit-buffer
:param key: the key
:return: None
"""
logger.info('=== Set security key')
self.spawn_id.sendline('set key')
d1 = Dialog([
['[K|k]ey:', 'sendline({})'.format(key), None, True, False],
[self.sm.get_state('mio_state').pattern, None, None, False,
False],
])
d1.process(self.spawn_id, timeout=30)
def set_power_bar(self, chassis_power_data):
"""
Sets the chassis power bar information
:param chassis_power_data: a dictionary of the form
{
'power_bar_server': '<the ip of the power bar server>',
'power_bar_port': '<the power port to which the chassis is
connected>',
'power_bar_user': '<the username credential for the power
bar server>'
'power_bar_password': '<the password credential for the power bar
server>'
}
:return:
"""
self.power_bar_server = chassis_power_data.get('power_bar_server', '')
self.power_bar_port = chassis_power_data.get('power_bar_port', '')
self.power_bar_user = chassis_power_data.get('power_bar_user', '')
self.power_bar_pwd = chassis_power_data.get('power_bar_password', '')
def power_cycle(self, wait_until_device_is_on=True, timeout=60):
"""
Cycle power to off an then to on for the chassis using the already
set power bar server information
:param wait_until_device_is_on: wait for device to boot up
:param timeout: the max. time to wait for device boot up
:return:
"""
result = power_cycle_all_ports(self.power_bar_server, self.power_bar_port, self.power_bar_user,
self.power_bar_pwd)
logger.info('Wait for device to be up running ...')
if wait_until_device_is_on:
self.wait_until_device_on(timeout=timeout)
return result
def set_current_slot(self, slot):
"""
Sets the id of the current active slot used by the state machine
:param slot: the slot id
:return: None
"""
self.sm.current_slot = str(slot)
def get_current_slot(self):
"""
Gets the id of the current active slot used by the state machine
:return: the slot id
"""
return self.sm.current_slot
def set_current_application(self, application_identifier):
"""
Sets the id of the current active application used by the state machine
:param slot: the application identifier
:return: None
"""
self.sm.current_application = application_identifier
def get_current_application(self):
"""
Gets the id of the current active application used by the state machine
:return: None
"""
return self.sm.current_application
def go_to(self, state, timeout=30):
to_state = state
try:
super().go_to(to_state, hop_wise=True, timeout=timeout)
except StateMachineError as e:
# trying to handle session disconnect situations
# see more details in the member function documentation
self.__handle_session_disconnect_for_ftd_states(
destination_state=to_state, state_machine_exception=e,
timeout=timeout)
def __handle_session_disconnect_for_ftd_states(self,
destination_state,
state_machine_exception,
timeout=10):
"""
The following implementation tries to bring back the user to the
state he was in on the ftd hosted over the chassis if the chassis
fxos disconnects the current session.
Example: The user goes in a specific ftd state (let's say expert)
and does work there in his test script. Maybe after this he does
some other work and as time passes at some point (depending on the
chassis fxos default-auth settings) the chassis detects that the
session timeout has expired and disconnects the user from the
current session and takes him to the login screen. The user then
wants to do some work on the ftd again and expects that he is
in the last state that he was in before. And surprise, he was
disconnected in the mean time. We thus try to relogin and take
him back to the prevoius state he was in. This is valid only for
ftd application states and only valid for ftds running on top of
chassis hardware.
:param destination_state: the state the user wants to go to
:param state_machine_exception: the exception that helps us
determine what happened (if a session disconnect happened)
:param timeout: the timeout from the parent function used for
state transitions
If we are in any other state machine error that is caused by
another reason different from a session disconnect the function
does not handle it and throws the original error to the user.
The function determines the states that are taken into account
for this session disconnect behavior by interrogating the state
machine ftd_states member defined in the ssp state machine.
"""
if re.match('Failed.*bring.*to.*state.*', str(state_machine_exception)):
if self.sm.ftd_states and self.sm.current_state not in \
self.sm.ftd_states:
raise state_machine_exception
i = 0
# see if logout occurred and bring login prompt to focus
while i < 3:
try:
self.spawn_id.sendline()
if self.spawn_id.expect('[l|L]ogin: $', timeout=5):
break
except:
pass
i += 1
if i >= 3:
# something other than a logout occured
raise state_machine_exception
self.sm.update_cur_state('prelogin_state')
try:
super().go_to('mio_state', hop_wise=True,
timeout=timeout)
except:
pass
super().go_to('any', hop_wise=True, timeout=timeout)
super().go_to(destination_state, hop_wise=True, timeout=timeout)
else:
raise state_machine_exception
def disconnect(self):
"""Disconnect the Device."""
if self.spawn_id is not None:
if self.type == 'ssh':
self.go_to('mio_state')
super().disconnect()
def is_app_instance_ready(self, slot_id, application_name,
app_identifier, in_cluster_mode):
"""
Checks whether the app instance is ready
:param slot_id: the slot id
:param application_name: the application name (ftd)
:param app_identifier: the application identifier (sensor1)
:param in_cluster_mode: device is in cluster mode or not
:return: True or False
"""
logger.info(
"=========== Wait for app_instance: {} {} at slot: {} to be "
"Enabled and Online ===========".format(
application_name, app_identifier, slot_id))
app_instance_list = self.get_app_instance_list()
if app_instance_list == None or len(app_instance_list) == 0:
logger.info('return False in is_app_instance_ready when '
'app_instance_list is empty')
return False
app_instance = [a for a in app_instance_list if
a.application_name == application_name and
a.identifier == app_identifier and
int(a.slot_id) == int(slot_id)]
assert len(app_instance) == 1, "Found {} app instances for app {} {} " \
"in slot {}".format(len(app_instance),
application_name,
app_identifier,
slot_id)
app_instance = app_instance[0]
if app_instance.operational_state == "Install Failed":
raise RuntimeError('Install Failed.')
if in_cluster_mode:
# if the slot is not populated with hardware skip for cluster mode
if app_instance.operational_state == "Not Available":
return True
else:
if (app_instance.admin_state == 'Enabled' and
app_instance.operational_state == 'Online' and
(app_instance.cluster_oper_state == "In Cluster" or
'Master' in app_instance.cluster_oper_state or
'Slave' in app_instance.cluster_oper_state)):
return True
else:
if app_instance.admin_state == 'Enabled' and \
app_instance.operational_state == 'Online':
return True
logger.info('return False in is_app_instance_ready when admin_state '
'is not Enabled or operational_state is not Online')
return False
def wait_till_app_instance_ready(self, slot_id, application_name,
app_identifier, in_cluster_mode,
wait_for_app_to_start):
"""
Waits until the application instance is ready
:param slot_id: the slot id
:param application_name: the application name (ftd)
:param app_identifier: the application identifier (sensor1)
:param in_cluster_mode: device is in clustered mode or not
:param wait_for_app_to_start: the max. time to wait for the app
to start
:return:
"""
self.wait_till(
self.is_app_instance_ready,
(slot_id, application_name, app_identifier, in_cluster_mode),
wait_upto=wait_for_app_to_start)
def download_fxos(self, | |
key
ket_size = H_sizes[ket_key]
bra_size = H_sizes[bra_key]
self.rho_shapes[key] = (ket_size,bra_size)
def load_mu(self):
"""Load the precalculated dipole overlaps. The dipole operator must
be stored as a .npz file, and must contain at least one array, each with three
indices: (new manifold eigenfunction, old manifold eigenfunction,
cartesian coordinate)."""
file_name = os.path.join(self.base_path,'mu.npz')
file_name_pruned = os.path.join(self.base_path,'mu_pruned.npz')
file_name_bool = os.path.join(self.base_path,'mu_boolean.npz')
try:
with np.load(file_name_bool) as mu_boolean_archive:
self.mu_boolean = {key:mu_boolean_archive[key] for key in mu_boolean_archive.keys()}
pruned = True
file_name = file_name_pruned
except FileNotFoundError:
pruned = False
with np.load(file_name) as mu_archive:
self.mu = {key:mu_archive[key] for key in mu_archive.keys()}
if pruned == False:
self.mu_boolean = dict()
for key in self.mu.keys():
self.mu_boolean[key] = np.ones(self.mu[key].shape[:2],dtype='bool')
sparse_flags = []
for key in self.mu.keys():
mu_2D = np.sum(np.abs(self.mu[key])**2,axis=-1)
sparse_flags.append(self.check_sparsity(mu_2D))
sparse_flags = np.array(sparse_flags)
if np.allclose(sparse_flags,True):
self.sparse_mu_flag = True
else:
self.sparse_mu_flag = False
for key in self.mu.keys():
mu_x = self.mu[key][...,0]
mu_y = self.mu[key][...,1]
mu_z = self.mu[key][...,2]
if self.sparse_mu_flag:
self.mu[key] = [csr_matrix(mu_x),csr_matrix(mu_y),csr_matrix(mu_z)]
else:
self.mu[key] = [mu_x,mu_y,mu_z]
def check_sparsity(self,mat):
csr_mat = csr_matrix(mat)
sparsity = csr_mat.nnz / (csr_mat.shape[0]*csr_mat.shape[1])
if sparsity < self.sparsity_threshold:
return True
else:
return False
### Setting the electric field to be used
def set_polarization_sequence(self,polarization_list,*,reset_rhos=True):
"""Sets the sequences used for either parallel or crossed pump and probe
Args:
polarization_list (list): list of strings, can be 'x','y', or
'z' for linear polarizations or 'r' and 'l' for right and
left circularly polarized light, respectively
Returns:
None: sets the attribute polarization sequence
"""
x = np.array([1,0,0])
y = np.array([0,1,0])
z = np.array([0,0,1])
r = np.array([1,-1j,0])/np.sqrt(2)
l = np.conjugate(r)
pol_options = {'x':x,'y':y,'z':z,'r':r,'l':l}
self.polarization_sequence = [pol_options[pol] for pol in polarization_list]
if reset_rhos:
self.rhos = dict()
### Tools for recursively calculating perturbed wavepackets using TDPT
def dipole_matrix(self,pulse_number,key,ket_flag=True,up_flag=True):
"""Calculates the dipole matrix given the electric field polarization vector,
if ket_flag = False then uses the bra-interaction"""
t0 = time.time()
pol = self.polarization_sequence[pulse_number]
if up_flag == ket_flag:
# rotating term
pass
else:
# counter-rotating term
pol = np.conjugate(pol)
x = np.array([1,0,0])
y = np.array([0,1,0])
z = np.array([0,0,1])
try:
mu = self.mu[key]
boolean_matrix = self.mu_boolean[key]
except KeyError:
if ket_flag:
key = 'ket'
else:
key = 'bra'
if up_flag:
key += '_up'
else:
key += '_down'
mu = self.mu[key]
boolean_matrix = self.mu_boolean[key]
if np.all(pol == x):
overlap_matrix = mu[0]#.copy()
elif np.all(pol == y):
overlap_matrix = mu[1]#.copy()
elif np.all(pol == z):
overlap_matrix = mu[2]#.copy()
else:
overlap_matrix = mu[0]*pol[0] + mu[1]*pol[1] + mu[2]*pol[2]
t1 = time.time()
self.dipole_time += t1-t0
return boolean_matrix, overlap_matrix
def electric_field_mask(self,pulse_number,key,conjugate_flag=False):
"""This method determines which molecular transitions will be
supported by the electric field. We assume that the electric field has
0 amplitude outside the minimum and maximum frequency immplied by the
choice of dt and num_conv_points. Otherwise we will inadvertently
alias transitions onto nonzero electric field amplitudes.
"""
ta = time.time()
try:
mask = self.efield_masks[pulse_number][key]
except KeyError:
starting_key, ending_key = key.split('_to_')
efield_t = self.efield_times[pulse_number]
efield_w = self.efield_frequencies[pulse_number]
if conjugate_flag:
center = -self.centers[pulse_number]
else:
center = self.centers[pulse_number]
try:
eig_starting = self.eigenvalues['all_manifolds']
eig_ending = self.eigenvalues['all_manifolds']
except KeyError:
eig_starting = self.eigenvalues[starting_key]
eig_ending = self.eigenvalues[ending_key]
# imag part corresponds to the energetic transitions
diff = np.imag(eig_ending[:,np.newaxis] - eig_starting[np.newaxis,:])
if efield_t.size == 1:
mask = np.ones(diff.shape,dtype='bool')
else:
# The only transitions allowed by the electric field shape are
inds_allowed = np.where((diff + center > efield_w[0]) & (diff + center < efield_w[-1]))
mask = np.zeros(diff.shape,dtype='bool')
mask[inds_allowed] = 1
self.efield_masks[pulse_number][key] = mask
tb = time.time()
self.efield_mask_time += tb - ta
return mask
def mask_dipole_matrix(self,boolean_matrix,overlap_matrix,
starting_manifold_mask,*,next_manifold_mask = None):
"""Takes as input the boolean_matrix and the overlap matrix that it
corresponds to. Also requires the starting manifold mask, which specifies
which states have non-zero amplitude, given the signal tolerance requested.
Trims off unnecessary starting elements, and ending elements. If
next_manifold_mask is None, then the masking is done automatically
based upon which overlap elements are nonzero. If next_manifold_mask is
a 1D numpy boolean array, it is used as the mask for next manifold."""
t0 = time.time()
if np.all(starting_manifold_mask == True):
pass
else:
boolean_matrix = boolean_matrix[:,starting_manifold_mask]
overlap_matrix = overlap_matrix[:,starting_manifold_mask]
#Determine the nonzero elements of the new psi, in the
#eigenenergy basis, n_nonzero
if type(next_manifold_mask) is np.ndarray:
n_nonzero = next_manifold_mask
else:
n_nonzero = np.any(boolean_matrix,axis=1)
if np.all(n_nonzero == True):
pass
else:
overlap_matrix = overlap_matrix[n_nonzero,:]
t1 = time.time()
self.mask_time += t1-t0
return overlap_matrix, n_nonzero
def manifold_key_to_array(self,key):
"""Key must be a string of exactly 2 integers, the first describing
the ket manifold, the second, the bra manifold. If the density
matrix is represented in the full space, rather than being divided
into manifolds, the first integer reperesents the total number of
excitations to the ket side, and the second integers represents
the sum of all excitations to the bra side."""
if len(key) != 2:
raise Exception('manifold key must be a string of exactly two intgers')
return np.array([int(char) for char in key],dtype=int)
def manifold_array_to_key(self,manifold):
"""Inverse of self.manifold_key_to_array"""
if manifold.size != 2 or manifold.dtype != int:
raise Exception('manifold array must contain exactly 2 integer')
return str(manifold[0]) + str(manifold[1])
def get_output_pdc(self,input_pdc,pulse_number,pm_flag):
output_pdc = input_pdc.copy()
if pm_flag == '+':
output_pdc[pulse_number,0] += 1
elif pm_flag == '-':
output_pdc[pulse_number,1] += 1
else:
raise Exception('Cannot parse pm_flag')
return output_pdc
def next_order(self,rho_in,*,ket_flag=True,up_flag=True,
new_manifold_mask = None,pulse_number = 0):
"""This function connects rho_p to rho_pj^(*) using a DFT convolution algorithm.
Args:
rho_in (rho_container): input density matrix
pulse_number (int): index of optical pulse (0,1,2,...)
new_manifold_mask (np.ndarray): optional - define the states to be considered
in the next manifold
Return:
rho_dict (rho_container): next-order density matrix
"""
if ket_flag == up_flag:
# Rotating term excites the ket and de-excites the bra
conjugate_flag = False
else:
# Counter-rotating term
conjugate_flag = True
pulse_time = self.pulse_times[pulse_number]
t = self.efield_times[pulse_number] + pulse_time
dt = self.dts[pulse_number]
old_manifold_key = rho_in.manifold_key
if up_flag:
change = 1
else:
change = -1
if ket_flag:
manifold_change = np.array([change,0],dtype=int)
else:
manifold_change = np.array([0,change],dtype=int)
old_manifold = self.manifold_key_to_array(old_manifold_key)
new_manifold = old_manifold + manifold_change
# my system breaks above 9. Need to fix this...
if new_manifold[0] > 9:
new_manifold[0] = 9
warnings.warn('manifold_key tracking system breaks down after 9 excitations')
if new_manifold[1] > 9:
new_manifold[1] = 9
warnings.warn('manifold_key tracking system breaks down after 9 excitations')
input_pdc = rho_in.pdc
output_pdc = input_pdc.copy()
if conjugate_flag:
output_pdc[pulse_number][1] += 1
else:
output_pdc[pulse_number][0] += 1
if self.check_for_zero_calculation:
output_pdc_tuple = tuple(tuple(output_pdc[i,:]) for i in range(output_pdc.shape[0]))
# print('Testing',output_pdc_tuple)
if output_pdc_tuple in self.composite_rhos.keys():
# do not redo unnecesary calculations
# print("Don't redo calculations",output_pdc_tuple)
return None
if not np.all(self.pdc - output_pdc >= 0):
# too many interactions with one of the pulses
return None
if np.any(new_manifold < self.minimum_manifold):
return None
elif np.any(new_manifold > self.maximum_manifold):
return None
remaining_pdc = self.pdc - output_pdc
remaining_pulse_interactions = np.sum(remaining_pdc,axis=1)
remaining_pulses = np.zeros(remaining_pulse_interactions.shape,dtype='bool')
remaining_pulses[:] = remaining_pulse_interactions
output_t0 = t[0]
for i in range(remaining_pulses.size):
if remaining_pulses[i]:
test_t = self.efield_times[i] + self.pulse_times[i]
if test_t[-1] < output_t0:
# print('Excluded due to pulse non-overlap',output_pdc)
return None
new_manifold_key = self.manifold_array_to_key(new_manifold)
mu_key = old_manifold_key + '_to_' + new_manifold_key
if conjugate_flag:
center = -self.centers[pulse_number]
else:
center = self.centers[pulse_number]
m_nonzero = rho_in.bool_mask
try:
ev1 = self.eigenvalues['all_manifolds']
ev2 = self.eigenvalues['all_manifolds']
except KeyError:
ev1 = self.eigenvalues[old_manifold_key]
ev2 = self.eigenvalues[new_manifold_key]
exp_factor1 = np.exp( (ev1[m_nonzero,np.newaxis] - 1j*center)*t[np.newaxis,:])
rho = rho_in(t) * exp_factor1
if self.conserve_memory:
# move back to the basis the Liouvillian was written in
if 'all_manifolds' in self.manifolds:
rho = self.eigenvectors['all_manifolds'][:,m_nonzero].dot(rho)
ket_size,bra_size = self.rho_shapes['all_manifolds']
else:
rho = self.eigenvectors[old_manifold_key][:,m_nonzero].dot(rho)
ket_size,bra_size = self.rho_shapes[old_manifold_key]
t_size = t.size
rho = rho.reshape(ket_size,bra_size,t_size)
if ket_flag:
old_ket_key = old_manifold_key[0]
new_ket_key = new_manifold_key[0]
if up_flag:
H_mu_key = old_ket_key + '_to_' + new_ket_key
else:
H_mu_key = new_ket_key + '_to_' + old_ket_key
rotating_flag = up_flag
else:
old_bra_key = old_manifold_key[1]
new_bra_key = new_manifold_key[1]
if up_flag:
H_mu_key = old_bra_key + '_to_' + new_bra_key
else:
H_mu_key = new_bra_key + '_to_' + old_bra_key
rotating_flag = not up_flag
overlap_matrix = self.get_H_mu(pulse_number,H_mu_key,
rotating_flag=rotating_flag)
t0 = time.time()
if ket_flag:
rho = np.einsum('ij,jkl',overlap_matrix,rho)
else:
rho = np.einsum('ijl,jk',rho,overlap_matrix)
t1 = time.time()
rho_vec_size = rho.shape[0]*rho.shape[1]
rho = rho.reshape(rho_vec_size,t_size)
if 'all_manifolds' in self.manifolds:
evl = self.left_eigenvectors['all_manifolds']
else:
evl = self.left_eigenvectors[new_manifold_key]
rho = | |
incre(0)
len(m) == 2 and abs(m[0] - m[1]) == 0 and m[0] not in self.bonus_winds and incre(3)
len(m) == 2 and abs(m[0] - m[1]) == 1 and incre(2 if m[0] % 9 > 0 and m[1] % 9 < 8 else 1)
len(m) == 2 and abs(m[0] - m[1]) == 2 and incre(1)
len(m) == 3 and incre(5 if m[0] == m[1] else 4)
return geo_vec
def qh_type(self):
qh_type = []
if len(self.m34) > 0:
meld_types = []
for m in self.m34:
if m[0] // 9 == 3:
continue
if m[0] // 9 not in meld_types:
meld_types.append(m[0] // 9)
if len(meld_types) > 1:
self.shantins[self.QH] = 10
return []
else:
qh_type = meld_types
if (len(qh_type) == 0 and len(self.m34) > 0) or len(self.m34) == 0:
type_geo = [
len([t for t in self.h34 if 0 <= t < 9]),
len([t for t in self.h34 if 9 <= t < 18]),
len([t for t in self.h34 if 18 <= t < 27])
]
max_num = max(type_geo)
qh_type = [i for i in range(3) if type_geo[i] == max_num]
return qh_type
@staticmethod
def geo_vec_normal(p):
geo_vec = [0] * 6
def incre(set_type):
geo_vec[set_type] += 1
for m in p:
len(m) == 1 and incre(0)
len(m) == 2 and abs(m[0] - m[1]) == 0 and incre(3)
len(m) == 2 and abs(m[0] - m[1]) == 1 and incre(2 if m[0] % 9 > 0 and m[1] % 9 < 8 else 1)
len(m) == 2 and abs(m[0] - m[1]) == 2 and incre(1)
len(m) == 3 and incre(5 if m[0] == m[1] else 4)
return geo_vec
@staticmethod
def geo_vec_no19(p):
geo_vec = [0] * 6
def incre(set_type):
geo_vec[set_type] += 1
for m in p:
if m[0] > 26:
continue
len(m) == 1 and 0 < m[0] % 9 < 8 and incre(0)
len(m) == 2 and abs(m[0] - m[1]) == 0 and 0 < m[0] % 9 < 8 and incre(3)
len(m) == 2 and abs(m[0] - m[1]) == 1 and m[0] % 9 > 1 and m[1] % 9 < 7 and incre(2)
len(m) == 2 and abs(m[0] - m[1]) == 1 and (m[0] % 9 == 1 or m[1] % 9 == 7) and incre(1)
len(m) == 2 and abs(m[0] - m[1]) == 2 and m[0] % 9 > 0 and m[1] % 9 < 8 and incre(1)
len(m) == 3 and m[0] == m[1] and 0 < m[0] % 9 < 8 and incre(5)
len(m) == 3 and m[0] != m[1] and incre(4 if m[0] % 9 > 0 and m[2] % 9 < 8 else 1)
return geo_vec
@staticmethod
def geo_vec_qh(p, tp):
allowed_types = [tp, 3]
geo_vec = [0] * 6
def incre(set_type):
geo_vec[set_type] += 1
for m in p:
if m[0] // 9 in allowed_types:
len(m) == 1 and incre(0)
len(m) == 2 and abs(m[0] - m[1]) == 0 and incre(3)
len(m) == 2 and abs(m[0] - m[1]) == 1 and incre(2 if m[0] % 9 > 0 and m[1] % 9 < 8 else 1)
len(m) == 2 and abs(m[0] - m[1]) == 2 and incre(1)
len(m) == 3 and incre(5 if m[0] == m[1] else 4)
return geo_vec
class HandAnalyser(HandParti):
def norm_eff_vec(self, bot):
total_revealed = deepcopy(self.revealed)
for tile in self.h34:
total_revealed[tile] += 1
current_shantin = self.current_shantin
res = []
for to_discard in set(self.h34):
tmp_h34 = deepcopy(self.h34)
tmp_h34.remove(to_discard)
eff = self._eff_nm_p7p(tmp_h34, total_revealed, current_shantin)
if to_discard in self.bonus_tiles:
eff *= 0.9
if to_discard < 27 and to_discard % 9 == 4 and self.h34.count(to_discard) == 1:
if bot.tile_34_to_136(to_discard) in Tile.RED_BONUS:
eff *= 0.9
res.append([to_discard, eff])
res = sorted(res, key=lambda x: 0 if x[0] in Tile.ONENINE else 4 - abs(x[0] % 9 - 4))
res = sorted(res, key=lambda x: -x[1])
self.set_19_prior(res)
return res
def enforce_eff_vec(self, num_turn, bot):
decided_pph, decided_dy, decided_qh = bot.decided_pph, bot.decided_dy, bot.decided_qh
def enforce_eff(index):
# bot.thclient.drawer and bot.thclient.drawer.set_enforce_form(self.names[index])
return self.spec_eff_vec(index, bot)
qh_decided = self.qh_decided(num_turn) or decided_qh or self.shantins[self.QH] == self.current_shantin
pp_decided = self.pp_decided or decided_pph or self.shantins[self.PPH] == self.current_shantin
if qh_decided and pp_decided:
if self.shantins[self.PPH] < self.shantins[self.QH]:
return enforce_eff(self.PPH)
else:
return enforce_eff(self.QH)
elif qh_decided:
return enforce_eff(self.QH)
elif pp_decided:
return enforce_eff(self.PPH)
if self.sp_decided:
return enforce_eff(self.SP)
if decided_dy:
return enforce_eff(self.NO19)
def deep_eff_vec(self, bot):
deep_eff = {}
normal_eff = {}
total_revealed = deepcopy(self.revealed)
for tile in self.h34:
total_revealed[tile] += 1
current_shantin = self.current_shantin
for to_discard in set(self.h34):
tmp_h34 = deepcopy(self.h34)
tmp_h34.remove(to_discard)
drawn_sum = 0
total_eff = 0
hand_ana = HandAnalyser(tmp_h34, self.m34, [1, 0, 0, 0, 1, 0], self.bonus_winds, self.revealed, self.bonus_tiles)
if hand_ana.shantins[self.NORMAL] == current_shantin or hand_ana.shantins[self.SP] == current_shantin:
for drawn in range(34):
if total_revealed[drawn] < 4:
tiles_after_drawn = tmp_h34 + [drawn]
hand_ana = HandAnalyser(tiles_after_drawn, self.m34, [1, 0, 0, 0, 1, 0], self.bonus_winds, self.revealed, self.bonus_tiles)
if hand_ana.shantins[self.NORMAL] < current_shantin or hand_ana.shantins[self.SP] < current_shantin:
remain = 4 - total_revealed[drawn]
drawn_sum += remain
tmp_revealed = deepcopy(total_revealed)
tmp_revealed[drawn] += 1
eff = hand_ana._eff_nm_p7p(tiles_after_drawn, tmp_revealed, current_shantin - 1)
total_eff += eff * remain
if drawn_sum > 0:
factor = 1
if to_discard in self.bonus_tiles:
factor *= 0.9
if to_discard < 27 and to_discard % 9 == 4 and self.h34.count(to_discard) == 1:
if bot.tile_34_to_136(to_discard) in Tile.RED_BONUS:
factor *= 0.9
deep_eff[to_discard] = total_eff / drawn_sum
normal_eff[to_discard] = drawn_sum * factor
else:
deep_eff[to_discard] = 0
normal_eff[to_discard] = 0
normal_eff = sorted(normal_eff.items(), key=lambda x: 0 if x[0] in Tile.ONENINE else 4 - abs(x[0] % 9 - 4))
normal_eff = sorted(normal_eff, key=lambda x: - x[1])
index = 0
res = []
while True:
current_index = index + 1
while current_index < len(normal_eff) and abs(normal_eff[index][1] - normal_eff[current_index][1]) < 2:
current_index += 1
tmp_eff = sorted(normal_eff[index:current_index], key=lambda x: - deep_eff[x[0]])
for pr in tmp_eff:
res.append(pr)
if current_index == len(normal_eff):
break
else:
index = current_index
return res
def _eff_nm_p7p(self, tiles, total_revealed, current_shantin):
eff = 0
for drawn in range(34):
if total_revealed[drawn] >= 4 or not self._has_adj(drawn):
continue
tiles_after = tiles + [drawn]
forms = [1, 0, 0, 0, 1, 0]
hand_analiser = HandAnalyser(tiles_after, self.m34, forms, self.bonus_winds, self.revealed, self.bonus_tiles)
if hand_analiser.shantins[self.NORMAL] < current_shantin or \
hand_analiser.shantins[self.SP] < current_shantin:
eff += (4 - total_revealed[drawn]) * self._get_factor(drawn)
return eff
def spec_eff_vec(self, goal_form, bot):
total_revealed = deepcopy(self.revealed)
for tile in self.h34:
total_revealed[tile] += 1
current_shantin = self.shantins[goal_form]
res = []
for to_discard in set(self.h34):
tmp_h34 = deepcopy(self.h34)
tmp_h34.remove(to_discard)
eff = self._eff_spec(tmp_h34, total_revealed, current_shantin, goal_form)
if to_discard in self.bonus_tiles:
eff *= 0.9
res.append([to_discard, eff])
norm_res = self.norm_eff_vec(bot)
norm_eff = {x[0]: x[1] for x in norm_res}
res = sorted(res, key=lambda x: 0 if x[0] in Tile.ONENINE else 4 - abs(x[0] % 9 - 4))
res = sorted(res, key=lambda x: - norm_eff[x[0]])
res = sorted(res, key=lambda x: -x[1])
self.set_19_prior(res)
return res
def _eff_spec(self, tiles, total_revealed, current_shantin, form):
eff = 0
for drawn in range(34):
if total_revealed[drawn] >= 4 or (form != self.QH and not self._has_adj(drawn)):
continue
forms = [0] * 6
forms[form] = 1
tiles_after = tiles + [drawn]
hand_analiser = HandAnalyser(tiles_after, self.m34, forms, self.bonus_winds, self.revealed, self.bonus_tiles)
if hand_analiser.shantins[form] < current_shantin:
eff += (4 - total_revealed[drawn]) * self._get_factor(drawn)
return eff
def _get_factor(self, tile):
factor = 1
if tile < 27:
if (tile - 2) // 9 == tile // 9 and (tile - 2) in self.bonus_tiles or \
(tile + 2) // 9 == tile // 9 and (tile + 2) in self.bonus_tiles:
factor += 0.2
if (tile - 1) // 9 == tile // 9 and (tile - 1) in self.bonus_tiles or \
(tile + 1) // 9 == tile // 9 and (tile + 1) in self.bonus_tiles:
factor += 0.4
if tile in self.bonus_tiles:
factor += 0.7
return factor
def _has_adj(self, tile):
if tile > 26:
if tile in self.h34:
return True
else:
for diff in range(-2, 3):
if (tile + diff) // 9 == tile // 9 and (tile + diff) in self.h34:
return True
return False
def set_19_prior(self, res_lst):
f19 = -1
for r in res_lst:
if r[0] in Tile.ONENINE:
f19 = res_lst.index(r)
break
while f19 > 0 and abs(res_lst[f19 - 1][1] - | |
# -*- coding: utf-8 -*-
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from odoo.tests import common
def strip_prefix(prefix, names):
size = len(prefix)
return [name[size:] for name in names if name.startswith(prefix)]
class TestOnChange(common.TransactionCase):
def setUp(self):
super(TestOnChange, self).setUp()
self.Discussion = self.env['test_new_api.discussion']
self.Message = self.env['test_new_api.message']
self.EmailMessage = self.env['test_new_api.emailmessage']
def test_default_get(self):
""" checking values returned by default_get() """
fields = ['name', 'categories', 'participants', 'messages']
values = self.Discussion.default_get(fields)
self.assertEqual(values, {})
def test_get_field(self):
""" checking that accessing an unknown attribute does nothing special """
with self.assertRaises(AttributeError):
self.Discussion.not_really_a_method()
def test_onchange(self):
""" test the effect of onchange() """
discussion = self.env.ref('test_new_api.discussion_0')
BODY = "What a beautiful day!"
USER = self.env.user
field_onchange = self.Message._onchange_spec()
self.assertEqual(field_onchange.get('author'), '1')
self.assertEqual(field_onchange.get('body'), '1')
self.assertEqual(field_onchange.get('discussion'), '1')
# changing 'discussion' should recompute 'name'
values = {
'discussion': discussion.id,
'name': "[%s] %s" % ('', USER.name),
'body': False,
'author': USER.id,
'size': 0,
}
self.env.cache.invalidate()
result = self.Message.onchange(values, 'discussion', field_onchange)
self.assertIn('name', result['value'])
self.assertEqual(result['value']['name'], "[%s] %s" % (discussion.name, USER.name))
# changing 'body' should recompute 'size'
values = {
'discussion': discussion.id,
'name': "[%s] %s" % (discussion.name, USER.name),
'body': BODY,
'author': USER.id,
'size': 0,
}
self.env.cache.invalidate()
result = self.Message.onchange(values, 'body', field_onchange)
self.assertIn('size', result['value'])
self.assertEqual(result['value']['size'], len(BODY))
# changing 'body' should not recompute 'name', even if 'discussion' and
# 'name' are not consistent with each other
values = {
'discussion': discussion.id,
'name': False,
'body': BODY,
'author': USER.id,
'size': 0,
}
self.env.cache.invalidate()
result = self.Message.onchange(values, 'body', field_onchange)
self.assertNotIn('name', result['value'])
def test_onchange_many2one(self):
Category = self.env['test_new_api.category']
field_onchange = Category._onchange_spec()
self.assertEqual(field_onchange.get('parent'), '1')
root = Category.create(dict(name='root'))
values = {
'name': 'test',
'parent': root.id,
'root_categ': False,
}
self.env.cache.invalidate()
result = Category.onchange(values, 'parent', field_onchange).get('value', {})
self.assertIn('root_categ', result)
self.assertEqual(result['root_categ'], root.name_get()[0])
values.update(result)
values['parent'] = False
self.env.cache.invalidate()
result = Category.onchange(values, 'parent', field_onchange).get('value', {})
self.assertIn('root_categ', result)
self.assertIs(result['root_categ'], False)
def test_onchange_one2many(self):
""" test the effect of onchange() on one2many fields """
USER = self.env.user
# create an independent message
message1 = self.Message.create({'body': "ABC"})
message2 = self.Message.create({'body': "ABC"})
self.assertEqual(message1.name, "[%s] %s" % ('', USER.name))
field_onchange = self.Discussion._onchange_spec()
self.assertEqual(field_onchange.get('name'), '1')
self.assertEqual(field_onchange.get('messages'), '1')
self.assertItemsEqual(
strip_prefix('messages.', field_onchange),
['author', 'body', 'name', 'size', 'important'],
)
# modify discussion name
values = {
'name': "Foo",
'categories': [],
'moderator': False,
'participants': [],
'messages': [
(4, message1.id),
(4, message2.id),
(1, message2.id, {'body': "XYZ"}),
(0, 0, {
'name': "[%s] %s" % ('', USER.name),
'body': "ABC",
'author': USER.id,
'size': 3,
'important': False,
}),
],
}
self.env.cache.invalidate()
result = self.Discussion.onchange(values, 'name', field_onchange)
self.assertIn('messages', result['value'])
self.assertEqual(result['value']['messages'], [
(5,),
(1, message1.id, {
'name': "[%s] %s" % ("Foo", USER.name),
'body': "ABC",
'author': USER.name_get()[0],
'size': 3,
'important': False,
}),
(1, message2.id, {
'name': "[%s] %s" % ("Foo", USER.name),
'body': "XYZ", # this must be sent back
'author': USER.name_get()[0],
'size': 3,
'important': False,
}),
(0, 0, {
'name': "[%s] %s" % ("Foo", USER.name),
'body': "ABC",
'author': USER.name_get()[0],
'size': 3,
'important': False,
}),
])
# ensure onchange changing one2many without subfield works
one_level_fields = {k: v for k, v in field_onchange.items() if k.count('.') < 1}
values = dict(values, name='{generate_dummy_message}')
result = self.Discussion.with_context(generate_dummy_message=True).onchange(values, 'name', one_level_fields)
self.assertEqual(result['value']['messages'], [
(5,),
(4, message1.id),
(4, message2.id),
(0, 0, {}),
(0, 0, {}),
])
def test_onchange_one2many_reference(self):
""" test the effect of onchange() on one2many fields with line references """
BODY = "What a beautiful day!"
USER = self.env.user
REFERENCE = "virtualid42"
field_onchange = self.Discussion._onchange_spec()
self.assertEqual(field_onchange.get('name'), '1')
self.assertEqual(field_onchange.get('messages'), '1')
self.assertItemsEqual(
strip_prefix('messages.', field_onchange),
['author', 'body', 'name', 'size', 'important'],
)
# modify discussion name, and check that the reference of the new line
# is returned
values = {
'name': "Foo",
'categories': [],
'moderator': False,
'participants': [],
'messages': [
(0, REFERENCE, {
'name': "[%s] %s" % ('', USER.name),
'body': BODY,
'author': USER.id,
'size': len(BODY),
'important': False,
}),
],
}
self.env.cache.invalidate()
result = self.Discussion.onchange(values, 'name', field_onchange)
self.assertIn('messages', result['value'])
self.assertItemsEqual(result['value']['messages'], [
(5,),
(0, REFERENCE, {
'name': "[%s] %s" % ("Foo", USER.name),
'body': BODY,
'author': USER.name_get()[0],
'size': len(BODY),
'important': False,
}),
])
def test_onchange_one2many_multi(self):
""" test the effect of multiple onchange methods on one2many fields """
partner1 = self.env.ref('base.res_partner_1')
multi = self.env['test_new_api.multi'].create({'partner': partner1.id})
line1 = multi.lines.create({'multi': multi.id})
field_onchange = multi._onchange_spec()
self.assertEqual(field_onchange, {
'name': '1',
'partner': '1',
'lines': None,
'lines.name': None,
'lines.partner': None,
'lines.tags': None,
'lines.tags.name': None,
})
values = multi._convert_to_write({key: multi[key] for key in ('name', 'partner', 'lines')})
self.assertEqual(values, {
'name': partner1.name,
'partner': partner1.id,
'lines': [(6, 0, [line1.id])],
})
# modify 'partner'
# -> set 'partner' on all lines
# -> recompute 'name'
# -> set 'name' on all lines
partner2 = self.env.ref('base.res_partner_2')
values = {
'name': partner1.name,
'partner': partner2.id, # this one just changed
'lines': [(6, 0, [line1.id]),
(0, 0, {'name': False, 'partner': False, 'tags': [(5,)]})],
}
self.env.cache.invalidate()
result = multi.onchange(values, 'partner', field_onchange)
self.assertEqual(result['value'], {
'name': partner2.name,
'lines': [
(5,),
(1, line1.id, {
'name': partner2.name,
'partner': (partner2.id, partner2.name),
'tags': [(5,)],
}),
(0, 0, {
'name': partner2.name,
'partner': (partner2.id, partner2.name),
'tags': [(5,)],
}),
],
})
# do it again, but this time with a new tag on the second line
values = {
'name': partner1.name,
'partner': partner2.id, # this one just changed
'lines': [(6, 0, [line1.id]),
(0, 0, {'name': False,
'partner': False,
'tags': [(5,), (0, 0, {'name': 'Tag'})]})],
}
self.env.cache.invalidate()
result = multi.onchange(values, 'partner', field_onchange)
expected_value = {
'name': partner2.name,
'lines': [
(5,),
(1, line1.id, {
'name': partner2.name,
'partner': (partner2.id, partner2.name),
'tags': [(5,)],
}),
(0, 0, {
'name': partner2.name,
'partner': (partner2.id, partner2.name),
'tags': [(5,), (0, 0, {'name': 'Tag'})],
}),
],
}
self.assertEqual(result['value'], expected_value)
# ensure ID is not returned when asked and a many2many record is set to be created
self.env.cache.invalidate()
result = multi.onchange(values, 'partner', dict(field_onchange, **{'lines.tags.id': None}))
self.assertEqual(result['value'], expected_value)
# ensure inverse of one2many field is not returned
self.env.cache.invalidate()
result = multi.onchange(values, 'partner', dict(field_onchange, **{'lines.multi': None}))
self.assertEqual(result['value'], expected_value)
def test_onchange_specific(self):
""" test the effect of field-specific onchange method """
discussion = self.env.ref('test_new_api.discussion_0')
demo = self.env.ref('base.user_demo')
field_onchange = self.Discussion._onchange_spec()
self.assertEqual(field_onchange.get('moderator'), '1')
self.assertItemsEqual(
strip_prefix('participants.', field_onchange),
['display_name'],
)
# first remove demo user from participants
discussion.participants -= demo
self.assertNotIn(demo, discussion.participants)
# check that demo_user is added to participants when set as moderator
values = {
'name': discussion.name,
'moderator': demo.id,
'categories': [(4, cat.id) for cat in discussion.categories],
'messages': [(4, msg.id) for msg in discussion.messages],
'participants': [(4, usr.id) for usr in discussion.participants],
}
self.env.cache.invalidate()
result = discussion.onchange(values, 'moderator', field_onchange)
self.assertIn('participants', result['value'])
self.assertItemsEqual(
result['value']['participants'],
[(5,)] + [(4, user.id) for user in discussion.participants + demo],
)
def test_onchange_default(self):
""" test the effect of a conditional user-default on a field """
Foo = self.env['test_new_api.foo']
field_onchange = Foo._onchange_spec()
self.assertTrue(Foo._fields['value1'].change_default)
self.assertEqual(field_onchange.get('value1'), '1')
# create a user-defined default based on 'value1'
self.env['ir.default'].set('test_new_api.foo', 'value2', 666, condition='value1=42')
# setting 'value1' to 42 should trigger the change of 'value2'
self.env.cache.invalidate()
values = {'name': 'X', 'value1': 42, 'value2': False}
result = Foo.onchange(values, 'value1', field_onchange)
self.assertEqual(result['value'], {'value2': 666})
# setting 'value1' to 24 should not trigger the change of 'value2'
self.env.cache.invalidate()
values = {'name': 'X', 'value1': 24, 'value2': False}
result = Foo.onchange(values, 'value1', field_onchange)
self.assertEqual(result['value'], {})
def test_onchange_one2many_value(self):
""" test the value of the one2many field inside the onchange """
discussion = self.env.ref('test_new_api.discussion_0')
demo = self.env.ref('base.user_demo')
field_onchange = self.Discussion._onchange_spec()
self.assertEqual(field_onchange.get('messages'), '1')
self.assertEqual(len(discussion.messages), 3)
messages = [(4, msg.id) for msg in discussion.messages]
messages[0] = (1, messages[0][1], {'body': 'test onchange'})
lines = ["%s:%s" % (m.name, m.body) for m in discussion.messages]
lines[0] = "%s:%s" % (discussion.messages[0].name, 'test onchange')
values = {
'name': discussion.name,
'moderator': demo.id,
'categories': [(4, cat.id) for cat in discussion.categories],
'messages': messages,
'participants': [(4, usr.id) for usr in discussion.participants],
'message_concat': False,
}
result = discussion.onchange(values, 'messages', field_onchange)
self.assertIn('message_concat', result['value'])
self.assertEqual(result['value']['message_concat'], "\n".join(lines))
def test_onchange_one2many_with_domain_on_related_field(self):
""" test the value of the one2many field when defined with a domain on a related field"""
discussion = self.env.ref('test_new_api.discussion_0')
demo = self.env.ref('base.user_demo')
# mimic UI behaviour, so we get subfields
# (we need at least subfield: 'important_emails.important')
view_info = self.Discussion.fields_view_get(
view_id=self.env.ref('test_new_api.discussion_form').id,
view_type='form')
field_onchange = self.Discussion._onchange_spec(view_info=view_info)
self.assertEqual(field_onchange.get('messages'), '1')
BODY = "What a beautiful day!"
USER = self.env.user
# create standalone email
email = self.EmailMessage.create({
'discussion': discussion.id,
'name': "[%s] %s" % ('', USER.name),
'body': BODY,
'author': USER.id,
'important': False,
'email_to': demo.email,
})
# check if server-side cache is working correctly
self.env.cache.invalidate()
self.assertIn(email, discussion.emails)
self.assertNotIn(email, discussion.important_emails)
email.important = True
self.assertIn(email, discussion.important_emails)
# check | |
be on the X-axis in comparative plots) in comparative analysis plots is determined by the order in PredictionSets
assert(take_lowest > 0 and (int(take_lowest) == take_lowest))
assert(0 <= burial_cutoff <= 2.0)
assert(stability_classication_experimental_cutoff > 0)
assert(stability_classication_predicted_cutoff > 0)
# assert PredictionSet for PredictionSet in PredictionSets is in the database
# calls get_analysis_dataframe(options) over all PredictionSets
# if output_directory is set, save files
# think about how to handle this in-memory. Maybe return a dict like:
#"run_analyis" -> benchmark_name -> {analysis_type -> object}
#"comparative_analysis" -> (benchmark_name_1, benchmark_name_2) -> {analysis_type -> object}
# comparative analysis
# only compare dataframes with the exact same points
# allow cutoffs, take_lowest to differ but report if they do so
@analysis_api
def determine_best_pair(self, prediction_id, score_method_id = 1):
'''This returns the best wildtype/mutant pair for a prediction given a scoring method. NOTE: Consider generalising this to the n best pairs.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@analysis_api
def create_abacus_graph_for_a_single_structure(self, PredictionSet, scoring_method, scoring_type, graph_title = None, PredictionIDs = None, graph_filename = None, cached_results = None, num_datapoints = 0):
'''This function creates an abacus graph for one PDB file. It is useful when scanning all mutations at all positions
on small proteins e.g. ubiquitin to show which mutations at which positions are likely to improve the stability or
binding affinity.
The num_datapoints variable is mainly for debugging - I was tuning the resolution/DPI to fit the number of datapoints.'''
raise Exception('This should work or nearly work. Test it again when we have real data. Does it assume single point mutations?')
results = cached_results
if not results:
results = self.get_flattened_prediction_results(PredictionSet)
pdb_ids = set()
for r in results:
pdb_ids.add(r['PDBFileID'])
if len(pdb_ids) != 1:
raise Exception('This function is only meant to be called when the PredictionSet or the set of results contains records for a single structure. The set of results contains %d structures.' % len(pdb_ids))
sortable_results = {}
for r in results:
if (not PredictionIDs) or (r['PredictionID'] in PredictionIDs):
sortable_results[(json.loads(r['Scores'])['data'][scoring_method][scoring_type]['ddG'], r['ExperimentID'])] = r
count = 0
set_of_mutations = set()
for k, r in sorted(sortable_results.iteritems()):
#if r['FlattenedMutations'].find('A E141L') != -1 and r['FlattenedMutations'].find('A S142A') != -1 and r['FlattenedMutations'].find('A L78Y') != -1:
# print('%f, %s' % (k[0], r['FlattenedMutations']))
#if r['FlattenedMutations'].find('A W103M') != -1 and r['FlattenedMutations'].find('A F70Y') != -1:
# if r['FlattenedMutations'].find('A E141L') == -1 and r['FlattenedMutations'].find('A S142A') == -1 and r['FlattenedMutations'].find('A L78Y') == -1:
# print('%f, %s' % (k[0], r['FlattenedMutations']))
if r['FlattenedMutations'].find('A W103M') != -1 and r['FlattenedMutations'].find('A F70Y') != -1:
if r['FlattenedMutations'].find('A E141L') == -1 and r['FlattenedMutations'].find('A S142A') == -1 and r['FlattenedMutations'].find('A L78Y') == -1:
#print('%f, %s' % (k[0], r['FlattenedMutations']))
count += 1
#A E141L, A S142A
mutations = [m for m in map(string.strip, r['FlattenedMutations'].split(',')) if m]
for m in mutations:
set_of_mutations.add((int(m.split()[1][1:-1]), m))
#if r['FlattenedMutations'].find('A L78Y') == -1:
# print('%f, %s' % (k[0], r['FlattenedMutations']))
# #count += 1
pruned_data = []
for k, r in sorted(sortable_results.iteritems()):
line = []
#print(json.loads(r['Scores'])['data'][scoring_method][scoring_type]['ddG'], r['FlattenedMutations'])
for m in sorted(set_of_mutations):
if r['FlattenedMutations'].find(m[1]) != -1:
line.append(1)
else:
line.append(0)
pruned_data.append((json.loads(r['Scores'])['data'][scoring_method][scoring_type]['ddG'], line))
labels = [m[1].split()[1] for m in sorted(set_of_mutations)]
graph_title = graph_title or r'$\Delta\Delta$G predictions for %s (%s.%s)' % (PredictionSet, scoring_method.replace(',0A', '.0$\AA$').replace('_', ' '), scoring_type)
pruned_data = pruned_data[0:num_datapoints or len(pruned_data)]
colortext.message('Creating graph with %d datapoints...' % len(pruned_data))
number_of_non_zero_datapoints = 0
for p in pruned_data:
if 1 in p[1]:
number_of_non_zero_datapoints += 1
if number_of_non_zero_datapoints > 1:
break
if number_of_non_zero_datapoints < 2:
raise Exception('The dataset must contain at least two non-zero points.')
if graph_filename:
return self.write_abacus_graph(graph_filename, graph_title, labels, pruned_data, scoring_method, scoring_type)
else:
return self.create_abacus_graph(graph_title, labels, pruned_data, scoring_method, scoring_type)
################################################################################################
## Application layer
## These functions combine the database and prediction data with useful klab
################################################################################################
#== PyMOL API ===========================================================
@app_pymol
def create_pymol_session_in_memory(self, prediction_id, task_number, pymol_executable = '/var/www/tg2/tg2env/designdb/pymol/pymol/pymol'):
'''Returns (in memory) a PyMOL session for a pair of structures.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@app_pymol
def write_pymol_session(self, prediction_id, task_number, output_filepath, pymol_executable = '/var/www/tg2/tg2env/designdb/pymol/pymol/pymol'):
'''Writes the PyMOL session for a pair of structures to disk.'''
PSE_file_contents = self.create_pymol_session_in_memory(prediction_id, task_number, pymol_executable = pymol_executable)
write_file(output_filepath, PSE_file_contents, 'wb')
@general_data_entry
def associate_pdb_file_with_project(self, pdb_file_id, project_id, notes = None):
tsession = self.get_session(new_session = True)
record = None
try:
record = get_or_create_in_transaction(tsession, dbmodel.ProjectPDBFile, dict(
PDBFileID = pdb_file_id,
ProjectID = project_id,
Notes = notes,
))
tsession.commit()
tsession.close()
except Exception, e:
tsession.rollback()
tsession.close()
raise
return record
@general_data_entry
def add_dataset(self, user_id, long_id, short_id, description, has_stability_ddg_records, has_binding_affinity_ddg_records, has_binding_affinity_de_records, ddg_convention, dataset_creation_start_date = None, dataset_creation_end_date = None, publication_ids = [], existing_session = None):
'''Adds a UserDataSet record. This is typically called before add_user_dataset_case which adds the user dataset
experiment records (e.g. UserDataSetExperiment or UserPPDataSetExperiment records).
:param user_id: User ID for the user adding this dataset to the database.
:param long_id: This should be a descriptive name e.g. "SSM_Psd95-CRIPT_Rama_10.1038/nature11500" which describes the type of dataset (SSM on the Psd95-CRIPT complex) and includes the DOI of the associated publication.
:param short_id: A short ID which will be used to refer to the dataset by humans e.g. "Psd95-CRIPT".
:param description: A description of the dataset.
:param has_stability_ddg_records: Does the dataset contain DDG data for monomeric stability assays?
:param has_binding_affinity_ddg_records: Does the dataset contain DDG data for binding affinity assays?
:param has_binding_affinity_de_records: Does the dataset contain DeltaE data for binding affinity assays?
:param ddg_convention: Either "Rosetta" (negative values indicate higher stability or binding) or "ProTherm" (negative values indicate lower stability or binding).
:param dataset_creation_start_date: The date when the dataset was first created. For publication datasets, this should be the publication date. For updated resources like ProTherm, this should be the publication date for the first revision.
:param dataset_creation_end_date: The date when the dataset was last modified or finalized. For publication datasets, this should be the publication date. For updated resources like ProTherm, this should be the publication date for the latest revision.
:param publication_id: A list of Publication.ID field values from the associated publications.
:return: The SQLAlchemy DataSet object.
'''
tsession = existing_session or self.get_session(new_session = True)
try:
user_record = tsession.query(dbmodel.User).filter(dbmodel.User.ID == user_id).one()
except:
raise Exception('Could not retrieve a record for user "{0}".'.format(user_id))
if not (ddg_convention == 'Rosetta' or ddg_convention == 'ProTherm'):
raise Exception('The DDG convention should be specified as either "Rosetta" (negative values indicate higher stability or binding) or "ProTherm" (negative values indicate lower stability or binding).')
if (len(long_id) > 128) or (len(short_id) > 32):
raise Exception('The long ID is limited to 128 characters and the short ID is limited to 32 characters.')
dataset_dict = {}
try:
dataset_dict = dict(
ID = long_id,
ShortID = short_id,
UserID = user_id,
Description = description,
DatasetType = self._get_prediction_dataset_type(),
ContainsStabilityDDG = has_stability_ddg_records,
ContainsBindingAffinityDDG = has_binding_affinity_ddg_records,
ContainsBindingAffinityDE = has_binding_affinity_de_records,
CreationDateStart = dataset_creation_start_date,
CreationDateEnd = dataset_creation_end_date,
DDGConvention = ddg_convention,
)
data_set = get_or_create_in_transaction(tsession, dbmodel.DataSet, dataset_dict, variable_columns = ['Description', 'CreationDateStart', 'CreationDateEnd'])
data_set_id = data_set.ID
for publication_id in publication_ids:
dataset_reference = get_or_create_in_transaction(tsession, dbmodel.DataSetReference, dict(
DataSetID = data_set_id,
Publication = publication_id,
))
if existing_session == None:
tsession.commit()
tsession.close()
return data_set
except Exception, e:
colortext.error('An exception occurred while adding the dataset:\n\n{0}\n\n{1}\n{2}'.format(pprint.pformat(dataset_dict), str(e), traceback.format_exc()))
if existing_session == None:
tsession.rollback()
tsession.close()
raise
@general_data_entry
def add_user_dataset(self, user_id, text_id, description, analyze_ddg, analyze_de, existing_session = None):
'''Adds a UserDataSet record. This is typically called before add_user_dataset_case which adds the user dataset
experiment records (e.g. UserDataSetExperiment or UserPPDataSetExperiment records).'''
dt = datetime.datetime.now()
tsession = existing_session or self.get_session(new_session = True)
try:
user_record = tsession.query(dbmodel.User).filter(dbmodel.User.ID == user_id).one()
except:
raise Exception('Could not retrieve a record for user "{0}".'.format(user_id))
user_dataset_dict = {}
try:
user_dataset_dict = dict(
TextID = text_id,
UserID = user_id,
Description = description,
DatasetType = self._get_prediction_dataset_type(),
AnalyzeDDG = analyze_ddg,
AnalyzeDE = analyze_de,
FirstCreated = dt,
LastModified = dt,
)
user_data_set = get_or_create_in_transaction(tsession, dbmodel.UserDataSet, user_dataset_dict, missing_columns = ['ID'], variable_columns = ['Description', 'FirstCreated', 'LastModified'])
if existing_session == None:
tsession.commit()
tsession.close()
return user_data_set
except Exception, e:
colortext.error('An exception occurred while adding the user dataset:\n\n{0}\n\n{1}\n{2}'.format(pprint.pformat(user_dataset_dict), str(e), traceback.format_exc()))
if existing_session == None:
tsession.rollback()
tsession.close()
raise
@general_data_entry
def add_ddg_user_dataset(self, user_id, text_id, description, existing_session = None):
'''Convenience wrapper for add_user_dataset for DDG-only user datasets.'''
return self.add_user_dataset(user_id, text_id, description, | |
if 64 - 64: i11iIiiIii
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
if 60 - 60: iIii1I11I1II1 / i1IIi * oO0o - I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i1IIi % Oo0Ooo
if 68 - 68: Ii1I / O0
if 46 - 46: O0 * II111iiii / IiII * Oo0Ooo * iII111i . I11i
if 62 - 62: i11iIiiIii - II111iiii % I1Ii111 - iIii1I11I1II1 . I1ii11iIi11i . II111iiii
if 61 - 61: oO0o / OoOoOO00 / iII111i * OoO0O00 . II111iiii
if 1 - 1: II111iiii - I1ii11iIi11i % i11iIiiIii + IiII . I1Ii111
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
if 79 - 79: oO0o + I1Ii111 . ooOoO0o * IiII % I11i . I1IiiI
if 94 - 94: iII111i * Ii1I / IiII . i1IIi * iII111i
if 47 - 47: i1IIi % i11iIiiIii
if 20 - 20: ooOoO0o * II111iiii
if 65 - 65: o0oOOo0O0Ooo * iIii1I11I1II1 * ooOoO0o
if 18 - 18: iIii1I11I1II1 / I11i + oO0o / Oo0Ooo - II111iiii - I11i
if 1 - 1: I11i - OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
if 55 - 55: II111iiii
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
import struct
try :
import psyco
have_psyco = True
print 'psyco enabled'
except :
have_psyco = False
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
class ChaCha ( object ) :
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
if 42 - 42: oO0o - i1IIi / i11iIiiIii + OOooOOo + OoO0O00
if 17 - 17: oO0o . Oo0Ooo . I1ii11iIi11i
if 3 - 3: OoOoOO00 . Oo0Ooo . I1IiiI / Ii1I
if 38 - 38: II111iiii % i11iIiiIii . ooOoO0o - OOooOOo + Ii1I
if 66 - 66: OoooooooOO * OoooooooOO . OOooOOo . i1IIi - OOooOOo
if 77 - 77: I11i - iIii1I11I1II1
if 82 - 82: i11iIiiIii . OOooOOo / Oo0Ooo * O0 % oO0o % iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
if 83 - 83: OoooooooOO
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
if 88 - 88: Ii1I / I1Ii111 + iII111i - II111iiii / ooOoO0o - OoOoOO00
if 15 - 15: I1ii11iIi11i + OoOoOO00 - OoooooooOO / OOooOOo
if 58 - 58: i11iIiiIii % I11i
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
if 38 - 38: o0oOOo0O0Ooo
if 57 - 57: O0 / oO0o * I1Ii111 / OoOoOO00 . II111iiii
if 26 - 26: iII111i
if 91 - 91: OoO0O00 . I1ii11iIi11i + OoO0O00 - iII111i / OoooooooOO
if 39 - 39: I1ii11iIi11i / ooOoO0o - II111iiii
if 98 - 98: I1ii11iIi11i / I11i % oO0o . OoOoOO00
if 91 - 91: oO0o % Oo0Ooo
if 64 - 64: I11i % iII111i - I1Ii111 - oO0o
if 31 - 31: I11i - II111iiii . I11i
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
TAU = ( 0x61707865 , 0x3120646e , 0x79622d36 , 0x6b206574 )
SIGMA = ( 0x61707865 , 0x3320646e , 0x79622d32 , 0x6b206574 )
ROUNDS = 8
if 68 - 68: OoO0O00 * OoooooooOO % O0 + OoO0O00 + ooOoO0o
if 4 - 4: ooOoO0o + O0 * OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
def __init__ ( self , key , iv , rounds = ROUNDS ) :
self . _key_setup ( key )
if 25 - 25: I1ii11iIi11i
if 7 - 7: i1IIi / I1IiiI * I1Ii111 . IiII . iIii1I11I1II1
if 13 - 13: OOooOOo / i11iIiiIii
if 2 - 2: I1IiiI / O0 / o0oOOo0O0Ooo % OoOoOO00 % Ii1I
if 52 - 52: o0oOOo0O0Ooo
if 95 - 95: Ii1I
if 87 - 87: ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
self . iv_setup ( iv )
self . rounds = rounds
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
def _key_setup ( self , key ) :
if len ( key ) not in [ 16 , 32 ] :
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if | |
2)).permute(1, 0, 2)
x = self.inner_tf(x)
x = einops.rearrange(x, " inner b (outer mod k) -> b outer inner mod k", outer=self.outer, mod=self.mod,
b=self.batch)
return x
class mod_att(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, heads=8):
super().__init__()
self.pos = pos
if pos:
self.pos_mod = PositionalEncoder(d_model=dmodel)
enc_mod = nn.TransformerEncoderLayer(dmodel, nhead=heads)
self.inner_mod_tf = nn.TransformerEncoder(enc_mod,num_layers)
def forward(self, x):
x_shape = x.shape
self.batch, self.outer, self.inner, self.mod = x_shape[0], x_shape[1], x_shape[2], x_shape[3]
x = einops.rearrange(x, "b outer inner mod k -> mod (inner outer b) k")
if self.pos:
x = self.pos_mod(x.permute(1, 0, 2)).permute(1, 0, 2)
x = self.inner_mod_tf(x)
x = einops.rearrange(x, "mod (inner outer b) k-> b outer inner mod k", outer=self.outer, inner=self.inner,
b=self.batch)
return x
class mod_att_inner(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, heads=8):
super().__init__()
self.pos = pos
if pos:
self.pos_mod = PositionalEncoder(d_model=dmodel*inner)
enc_mod = nn.TransformerEncoderLayer(dmodel*inner, nhead=heads)
self.inner_mod_tf = nn.TransformerEncoder(enc_mod,num_layers)
def forward(self, x):
x_shape = x.shape
self.batch, self.outer, self.inner, self.mod = x_shape[0], x_shape[1], x_shape[2], x_shape[3]
x = einops.rearrange(x, "b outer inner mod k -> mod (outer b) (inner k)")
if self.pos:
x = self.pos_mod(x.permute(1, 0, 2)).permute(1, 0, 2)
x = self.inner_mod_tf(x)
x = einops.rearrange(x, " mod (outer b) (inner k)-> b outer inner mod k", outer=self.outer, inner=self.inner,
b=self.batch)
return x
class mod_att_outer(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, heads=8):
super().__init__()
self.pos = pos
if pos:
self.pos_mod = PositionalEncoder(d_model=dmodel*outer)
enc_mod = nn.TransformerEncoderLayer(dmodel*outer, nhead=heads)
self.inner_mod_tf = nn.TransformerEncoder(enc_mod,num_layers)
def forward(self, x):
x_shape = x.shape
self.batch, self.outer, self.inner, self.mod = x_shape[0], x_shape[1], x_shape[2], x_shape[3]
x = einops.rearrange(x, "b outer inner mod k -> mod (inner b) (outer k)")
if self.pos:
x = self.pos_mod(x.permute(1, 0, 2)).permute(1, 0, 2)
x = self.inner_mod_tf(x)
x = einops.rearrange(x, "mod (inner b) (outer k)-> b outer inner mod k", outer=self.outer, inner=self.inner,
b=self.batch)
return x
class mod_att_inner_outer(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, heads=8):
super().__init__()
self.pos = pos
if pos:
self.pos_mod = PositionalEncoder(d_model=dmodel*inner*outer)
enc_mod = nn.TransformerEncoderLayer(dmodel*inner*outer, nhead=heads)
self.inner_mod_tf = nn.TransformerEncoder(enc_mod,num_layers)
def forward(self, x):
x_shape = x.shape
self.batch, self.outer, self.inner, self.mod = x_shape[0], x_shape[1], x_shape[2], x_shape[3]
x = einops.rearrange(x, "b outer inner mod k -> mod b (inner outer k)")
if self.pos:
x = self.pos_mod(x.permute(1, 0, 2)).permute(1, 0, 2)
x = self.inner_mod_tf(x)
x = einops.rearrange(x, "mod b (inner outer k)-> b outer inner mod k", outer=self.outer, inner=self.inner,
b=self.batch)
return x
class outer_att(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, heads=8):
super().__init__()
self.pos = pos
if pos:
self.pos_outer = PositionalEncoder(d_model=dmodel)
enc_outer = nn.TransformerEncoderLayer(dmodel, nhead=heads, dim_feedforward=1024)
self.outer_tf = nn.TransformerEncoder(enc_outer,num_layers)
def forward(self, x):
x_shape = x.shape
self.batch, self.outer, self.inner, self.mod = x_shape[0], x_shape[1], x_shape[2], x_shape[3]
x = einops.rearrange(x,"b outer inner mod k -> outer (b inner mod) k")
if self.pos:
x = self.pos_outer(x.permute(1, 0, 2)).permute(1, 0, 2)
x = self.outer_tf(x)
x = einops.rearrange(x,"outer (b inner mod) k-> b outer inner mod k", mod =self.mod, inner =self.inner, b=self.batch)
return x
class outer_att_mod(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, heads=8):
super().__init__()
self.pos = pos
if pos:
self.pos_outer = PositionalEncoder(d_model=dmodel*modalities)
enc_outer = nn.TransformerEncoderLayer(dmodel*modalities, nhead=heads)
self.outer_tf = nn.TransformerEncoder(enc_outer,num_layers)
def forward(self, x):
x_shape = x.shape
self.batch, self.outer, self.inner, self.mod = x_shape[0], x_shape[1], x_shape[2], x_shape[3]
x = einops.rearrange(x,"b outer inner mod k -> outer (b inner) (mod k)")
if self.pos:
x = self.pos_outer(x.permute(1, 0, 2)).permute(1, 0, 2)
x = self.outer_tf(x)
x = einops.rearrange(x,"outer (b inner) (mod k)-> b outer inner mod k", mod =self.mod, inner =self.inner, b=self.batch)
return x
class outer_att_inner(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, heads=8):
super().__init__()
self.pos = pos
if pos:
self.pos_outer = PositionalEncoder(d_model=dmodel*inner)
enc_outer = nn.TransformerEncoderLayer(dmodel*inner, nhead=heads)
self.outer_tf = nn.TransformerEncoder(enc_outer,num_layers)
def forward(self, x):
x_shape = x.shape
self.batch, self.outer, self.inner, self.mod = x_shape[0], x_shape[1], x_shape[2], x_shape[3]
x = einops.rearrange(x,"b outer inner mod k -> outer (b mod) (inner k)")
if self.pos:
x = self.pos_outer(x.permute(1, 0, 2)).permute(1, 0, 2)
x = self.outer_tf(x)
x = einops.rearrange(x,"outer (b mod) (inner k)-> b outer inner mod k", mod =self.mod, inner =self.inner, b=self.batch)
return x
class outer_att_inner_mod(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, heads=8):
super().__init__()
self.pos = pos
if pos:
self.pos_outer = PositionalEncoder(d_model=dmodel*inner*modalities)
enc_outer = nn.TransformerEncoderLayer(dmodel*inner*modalities, nhead=heads)
self.outer_tf = nn.TransformerEncoder(enc_outer,num_layers)
def forward(self, x):
x_shape = x.shape
self.batch, self.outer, self.inner, self.mod = x_shape[0], x_shape[1], x_shape[2], x_shape[3]
x = einops.rearrange(x,"b outer inner mod k -> outer b (inner mod k)")
if self.pos:
x = self.pos_outer(x.permute(1, 0, 2)).permute(1, 0, 2)
x = self.outer_tf(x)
x = einops.rearrange(x,"outer b (inner mod k) -> b outer inner mod k", mod =self.mod, inner =self.inner, b=self.batch)
return x
class aggregation_att_outer(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, heads=8):
super().__init__()
self.mod_att = Attention(dmodel)
def forward(self, x):
x_shape = x.shape
self.batch, self.outer, self.inner, self.mod = x_shape[0], x_shape[1], x_shape[2], x_shape[3]
x = einops.rearrange(x,"b outer inner mod k -> outer (b inner mod) k ", mod =self.mod, inner =self.inner, b=self.batch)
w = self.mod_att(x)
x = torch.einsum("ijk,jmi -> mjk", x, w)
x = einops.rearrange(x," outer (b inner mod) k -> b outer inner mod k ", b=self.batch, inner=self.inner, mod=self.mod)
return x
class aggregation_att_inner(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, heads=8):
super().__init__()
self.mod_att = Attention(dmodel*modalities)
def forward(self, x):
x_shape = x.shape
self.batch, self.outer, self.inner, self.mod = x_shape[0], x_shape[1], x_shape[2], x_shape[3]
x = einops.rearrange(x,"b outer inner mod k -> inner (b outer) (mod k) ", mod =self.mod, inner =self.inner, b=self.batch)
w = self.mod_att(x)
x = torch.einsum("ijk,jmi -> mjk", x, w)
x = einops.rearrange(x,"inner (b outer mod) k -> b outer inner mod k ", b=self.batch, outer=self.outer, mod=self.mod)
return x
class aggregation_att_contx_inner(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, heads=8):
super().__init__()
self.mod_att = Context_Attention(dmodel*modalities, 64)
def forward(self, x):
x_shape = x.shape
self.batch, self.outer, self.inner, self.mod = x_shape[0], x_shape[1], x_shape[2], x_shape[3]
x = einops.rearrange(x,"b outer inner mod k -> (b outer) inner (mod k) ", mod =self.mod, inner =self.inner, b=self.batch)
w = self.mod_att(x)
x = torch.einsum("ijk,im -> ik", x, w)
x = einops.rearrange(x,"(b outer inner) (mod k) -> b outer inner mod k ", b=self.batch, outer=self.outer, mod=self.mod, inner=1)
return x
class aggregation_att_contx_inner_mod(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, heads=8):
super().__init__()
self.mod_att = Context_Attention(dmodel, 64)
def forward(self, x):
x_shape = x.shape
self.batch, self.outer, self.inner, self.mod = x_shape[0], x_shape[1], x_shape[2], x_shape[3]
x = einops.rearrange(x,"b outer inner mod k -> (b outer) (inner mod) k", mod =self.mod, inner =self.inner, b=self.batch)
w = self.mod_att(x)
x = torch.einsum("ijk,im -> ik", x, w)
x = einops.rearrange(x,"(b outer inner mod) k -> b outer inner mod k ", b=self.batch, outer=self.outer, mod=1, inner=1)
return x
class aggregation_att_contx_mod(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, heads=8):
super().__init__()
self.mod_att = Context_Attention(dmodel, 64)
def forward(self, x):
x_shape = x.shape
self.batch, self.outer, self.inner, self.mod = x_shape[0], x_shape[1], x_shape[2], x_shape[3]
x = einops.rearrange(x,"b outer inner mod k -> (b outer inner) mod k ", mod =self.mod, inner =self.inner, b=self.batch)
w = self.mod_att(x)
x = torch.einsum("ijk,im -> ik", x, w)
x = einops.rearrange(x,"(b outer inner mod) k -> b outer inner mod k ", b=self.batch, outer=self.outer, mod=1, inner=self.inner)
return x
class aggregation_att_mod(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, heads=8):
super().__init__()
self.mod_att = Attention(dmodel)
def forward(self, x):
x_shape = x.shape
self.batch, self.outer, self.inner, self.mod = x_shape[0], x_shape[1], x_shape[2], x_shape[3]
x = einops.rearrange(x,"b outer inner mod k -> mod (inner outer b) k ", mod =self.mod, inner =self.inner, b=self.batch)
w = self.mod_att(x)
x = torch.einsum("ijk,jmi -> mjk", x, w)
x = einops.rearrange(x,"mod (inner outer b) k -> b outer inner mod k -> ", inner =self.inner, b=self.batch)
return x
class fourier_pos(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, heads=8):
super().__init__()
self.pos = Fourier_Sleep_PositionalEncoder(dmodel, outer, inner, modalities)
def forward(self, x):
return self.pos(x)
class huy_pos_inner(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, heads=8):
super().__init__()
self.pos = PositionalEncoding_AIAYN(dmodel)
def forward(self, x):
x_shape = x.shape
self.batch, self.outer, self.inner, self.mod = x_shape[0], x_shape[1], x_shape[2], x_shape[3]
x = einops.rearrange(x, "b outer inner mod k -> (b outer mod) inner k")
x = self.pos(x)
x = einops.rearrange(x, "(b outer mod) inner k -> b outer inner mod k", b = self.batch, outer = self.outer, mod = self.mod)
return x
class huy_pos_outer(nn.Module):
def __init__(self, dmodel, pos, inner, outer, modalities, num_layers=1, | |
#!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
from __future__ import division, print_function
# viability imports
import pyviability as viab
from pyviability import helper
from pyviability import libviability as lv
from pyviability import tsm_style as topo
# model imports
import examples.AWModel as awm
import examples.ConsumptionModel as cm
import examples.FiniteTimeLakeModel as ftlm
import examples.FiniteTimeLakeModel2 as ftlm2
import examples.GravityPendulumModel as gpm
import examples.PlantModel as pm
import examples.PopulationAndResourceModel as prm
import examples.SwingEquationModel as sqm
import examples.TechChangeModel as tcm
# other useful stuff
import argparse
try:
import argcomplete
except ImportError:
with_argcomplete = False
else:
with_argcomplete = True
import datetime as dt
import functools as ft
import matplotlib as mpl
import matplotlib.pyplot as plt
import numba as nb
import numpy as np
import scipy.optimize as opt
import time
import sys
PRINT_VERBOSITY = 2
def save_figure(filename, fig=None):
if fig is None:
fig = plt.gcf()
print("saving to {!r} ... ".format(filename), end="", flush=True)
fig.savefig(filename)
print("done")
def plotPhaseSpace( evol, boundaries, steps = 2000, xlabel = "", ylabel = "", colorbar = True, style = {}, alpha = None , maskByCond = None, invertAxes = False, ax = plt, lwspeed = False):
# separate the boundaries
Xmin, Ymin, Xmax, Ymax = boundaries
# check boundaries sanity
assert Xmin < Xmax
assert Ymin < Ymax
# build the grid
X = np.linspace(Xmin, Xmax, steps)
Y = np.linspace(Ymin, Ymax, steps)
XY = np.array(np.meshgrid(X, Y))
# if Condition give, set everything to zero that fulfills it
if maskByCond:
mask = maskByCond(XY[0], XY[1])
XY[0] = np.ma.array(XY[0], mask = mask)
XY[1] = np.ma.array(XY[1], mask = mask)
## dummy0 = np.zeros((steps,steps))
## XY[0] = np.where(mask, XY[0], dummy0)
## XY[1] = np.where(mask, XY[1], dummy0)
# calculate the changes ... input is numpy array
dX, dY = evol(XY,0) # that is where deriv from Vera is mapped to
if invertAxes:
data = [Y, X, np.transpose(dY), np.transpose(dX)]
else:
data = [X, Y, dX, dY]
# separate linestyle
linestyle = None
if type(style) == dict and "linestyle" in style.keys():
linestyle = style["linestyle"]
style.pop("linestyle")
# do the actual plot
if style == "dx":
c = ax.streamplot(*data, color=dX, linewidth=5*dX/dX.max(), cmap=plt.cm.autumn)
elif style:
speed = np.sqrt(data[2]**2 + data[3]**2)
if "linewidth" in style and style["linewidth"] and lwspeed:
style["linewidth"] = style["linewidth"] * speed/np.nanmax(speed)
## print speed
## print np.nanmax(speed)
c = ax.streamplot(*data, **style)
else:
# default style formatting
speed = np.sqrt(dX**2 + dY**2)
c = ax.streamplot(*data, color=speed, linewidth=5*speed/speed.max(), cmap=plt.cm.autumn)
# set opacity of the lines
if alpha:
c.lines.set_alpha(alpha)
# set linestyle
if linestyle:
c.lines.set_linestyle(linestyle)
# add labels if given
if invertAxes:
temp = xlabel
xlabel = ylabel
ylabel = temp
if xlabel:
if ax == plt:
ax.xlabel(xlabel)
else:
ax.set_xlabel(xlabel)
if ylabel:
if ax == plt:
ax.ylabel(ylabel)
else:
ax.set_ylabel(ylabel)
# add colorbar
if colorbar:
assert not "color" in style.keys(), "you want a colorbar for only one color?"
ax.colorbar()
def generate_example(default_rhss,
management_rhss,
sunny_fct,
boundaries,
default_parameters=[],
management_parameters=[],
periodicity=[],
default_rhssPS=None,
management_rhssPS=None,
out_of_bounds=True,
compute_eddies=False,
rescaling_epsilon=1e-6,
stepsize=None,
xlabel=None,
ylabel=None,
set_ticks=None,
):
"""Generate the example function for each example.
:param default_rhss: list of callables
length 1, right-hand-side function of the default option. For future compatibiility, this was chosen to be a list already.
:param management_rhss: list of callables
right-hand-side functions of the management options
:param sunny_fct: callable
function that determines whether a point / an array of points is in the sunny region
:param boundaries: array-like, shape : (dim, 2)
for each dimension of the model, give the lower and upper boundary
:param default_parameters: list of dict, optional
length 1, the dict contains the parameter values for the default option. For future compatibiility, this was chosen to be a list already.
:param management_parameters: list of dict, optional
each dict contains the parameter values for the each management option respectively
:param periodicity: list, optional
provide the periodicity of the model's phase space
:param default_rhssPS: list of callables, optional
if the default_rhss are not callable for arrays (which is necessary for the plotting of the phase space), then provide a corresponding (list of) function(s) here
:param management_rhssPS:list of callables, optional
if the management_rhss are not callable for arrays (which is necessary for the plotting of the phase space), then provide a corresponding (list of) function(s) here
:param out_of_bounds: bool, default : True
If going out of the bundaries is interpreted as being in the undesirable region.
:param compute_eddies:
Should the eddies be computed? (Becaus the computation of Eddies might take long, this is skipped for models where it's know that there are no Eddies.)
:param stepsize
step size used during the viability kernel computation
:param rescaling_epsilon:
The epsilon for the time homogenization, see https://arxiv.org/abs/1706.04542 for details.
:param xlabel:
:param ylabel:
:param set_ticks:
:return: callable
function that when being called computes the specific example
"""
plotPS = lambda rhs, boundaries, style: plotPhaseSpace(rhs, [boundaries[0][0], boundaries[1][0], boundaries[0][1], boundaries[1][1]], colorbar=False, style=style)
if not default_parameters:
default_parameters = [{}] * len(default_rhss)
if not management_parameters:
management_parameters = [{}] * len(management_rhss)
xlim, ylim = boundaries
if default_rhssPS is None:
default_rhssPS = default_rhss
if management_rhssPS is None:
management_rhssPS = management_rhss
def example_function(example_name,
grid_type="orthogonal",
backscaling=True,
plotting="points",
run_type="integration",
save_to="",
n0=80,
hidpi=False,
use_numba=True,
stop_when_finished="all",
flow_only=False,
mark_fp=None,
):
plot_points = (plotting == "points")
plot_areas = (plotting == "areas")
grid, scaling_factor, offset, x_step = viab.generate_grid(boundaries,
n0,
grid_type,
periodicity = periodicity) #noqa
states = np.zeros(grid.shape[:-1], dtype=np.int16)
NB_NOPYTHON = False
default_runs = [viab.make_run_function(
nb.jit(rhs, nopython=NB_NOPYTHON),
helper.get_ordered_parameters(rhs, parameters),
offset,
scaling_factor,
returning=run_type,
rescaling_epsilon=rescaling_epsilon,
use_numba=use_numba,
) for rhs, parameters in zip(default_rhss, default_parameters)] #noqa
management_runs = [viab.make_run_function(
nb.jit(rhs, nopython=NB_NOPYTHON),
helper.get_ordered_parameters(rhs, parameters),
offset,
scaling_factor,
returning=run_type,
rescaling_epsilon=rescaling_epsilon,
use_numba=use_numba,
) for rhs, parameters in zip(management_rhss, management_parameters)] #noqa
sunny = viab.scaled_to_one_sunny(sunny_fct, offset, scaling_factor)
# adding the figure here already in case VERBOSE is set
# this makes only sense, if backscaling is switched off
if backscaling:
figure_size = np.array([7.5, 7.5])
else:
figure_size = np.array([7.5, 2.5 * np.sqrt(3) if grid_type == "simplex-based" else 7.5 ])
if hidpi:
figure_size = 2 * figure_size
figure_size = tuple(figure_size.tolist())
if (not backscaling) and plot_points:
# figure_size = (15, 5 * np.sqrt(3) if grid_type == "simplex-based" else 15)
# figure_size = (15, 5 * np.sqrt(3) if grid_type == "simplex-based" else 15)
fig = plt.figure(example_name, figsize=figure_size, tight_layout=True)
# print(lv.STEPSIZE)
# lv.STEPSIZE = 2 * x_step
if stepsize is None:
lv.STEPSIZE = 2 * x_step * max([1, np.sqrt( n0 / 30 )]) # prop to 1/sqrt(n0)
else:
lv.STEPSIZE = stepsize
print(lv.STEPSIZE)
# print(lv.STEPSIZE)
# assert False
print("STEPSIZE / x_step = {:5.3f}".format(lv.STEPSIZE / x_step))
start_time = time.time()
viab.topology_classification(grid, states, default_runs, management_runs, sunny,
periodic_boundaries = periodicity,
grid_type=grid_type,
compute_eddies=compute_eddies,
out_of_bounds=out_of_bounds,
stop_when_finished=stop_when_finished,
verbosity=PRINT_VERBOSITY,
)
time_diff = time.time() - start_time
print("run time: {!s}".format(dt.timedelta(seconds=time_diff)))
if backscaling:
grid = viab.backscaling_grid(grid, scaling_factor, offset)
if plot_points:
fig = plt.figure(example_name, figsize=figure_size, tight_layout=True)
# fig = plt.figure(figsize=(15, 15), tight_layout=True)
if not flow_only:
viab.plot_points(grid, states, markersize=30 if hidpi else 15)
if ARGS.title:
plt.gca().set_title('example: ' + example_name, fontsize=20)
[plotPS(ft.partial(rhs, **parameters), boundaries, topo.styleDefault) #noqa
for rhs, parameters in zip(default_rhssPS, default_parameters)] #noqa
[plotPS(ft.partial(rhs, **parameters), boundaries, style)
for rhs, parameters, style in zip(management_rhssPS, management_parameters, [topo.styleMod1, topo.styleMod2])] #noqa
if set_ticks is not None:
set_ticks()
else:
plt.xlim(xlim)
plt.ylim(ylim)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if save_to:
save_figure(save_to)
if plot_areas:
fig = plt.figure(example_name, figsize=figure_size, tight_layout=True)
if not flow_only:
viab.plot_areas(grid, states)
if ARGS.title:
plt.gca().set_title('example: ' + example_name, fontsize=20)
[plotPS(ft.partial(rhs, **parameters), boundaries, topo.styleDefault) #noqa
for rhs, parameters in zip(default_rhssPS, default_parameters)] #noqa
[plotPS(ft.partial(rhs, **parameters), boundaries, style)
for rhs, parameters, style in zip(management_rhssPS, management_parameters, [topo.styleMod1, topo.styleMod2])] #noqa
if set_ticks is not None:
set_ticks()
else:
plt.xlim(xlim)
plt.ylim(ylim)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if save_to:
save_figure(save_to)
else:
plot_x_limits = [0, 1.5 if grid_type == "simplex-based" else 1]
plot_y_limits = [0, np.sqrt(3)/2 if grid_type == "simplex-based" else 1]
default_PSs = [viab.make_run_function(rhs, helper.get_ordered_parameters(rhs, parameters), offset, scaling_factor, returning="PS") #noqa
for rhs, parameters in zip(default_rhssPS, default_parameters)] #noqa
management_PSs = [viab.make_run_function(rhs, helper.get_ordered_parameters(rhs, parameters), offset, scaling_factor, returning="PS") #noqa
for rhs, parameters in zip(management_rhssPS, management_parameters)] #noqa
if plot_points:
# figure already created above
if not flow_only:
viab.plot_points(grid, states, markersize=30 if hidpi else 15)
if ARGS.title:
plt.gca().set_title('example: ' + example_name, fontsize=20)
[plotPS(rhs, [plot_x_limits, plot_y_limits], topo.styleDefault) for rhs, parameters in zip(default_PSs, default_parameters)]
[plotPS(rhs, [plot_x_limits, plot_y_limits], style) for rhs, parameters, style in zip(management_PSs, management_parameters, [topo.styleMod1, topo.styleMod2])] #noqa
plt.axis("equal")
plt.xlim(plot_x_limits)
plt.ylim(plot_y_limits)
if save_to:
save_figure(save_to)
if plot_areas:
fig = plt.figure(example_name, figsize=(15, 15), tight_layout=True)
if not flow_only:
viab.plot_areas(grid, states)
if ARGS.title:
plt.gca().set_title('example: ' + example_name, fontsize=20)
[plotPS(rhs, [plot_x_limits, plot_y_limits], topo.styleDefault) for rhs, parameters in zip(default_PSs, default_parameters)]
[plotPS(rhs, [plot_x_limits, plot_y_limits], style) for rhs, parameters, style in zip(management_PSs, management_parameters, [topo.styleMod1, topo.styleMod2])] #noqa
plt.axis("equal")
plt.xlim(plot_x_limits)
plt.ylim(plot_y_limits)
if save_to:
save_figure(save_to)
print()
viab.print_evaluation(states)
return example_function
EXAMPLES = {
"finite-time-lake":
generate_example([ftlm.rhs_default],
[ftlm.rhs_management],
ftlm.sunny,
[[-5, 5],[-5, 5]],
out_of_bounds=True,
default_rhssPS=[ftlm.rhs_default_PS],
management_rhssPS=[ftlm.rhs_management_PS],
),
| |
checked.")
print("Apps in",
[d.replace(os.environ["HOME"], "~") for d in app_dirs], "\n")
maxlen = max(len(x.replace(os.environ["HOME"], "~"))
for x in app_dirs)
if sum(apps_check["cask"].values()) > 0:
print("Installed by Cask:")
for d in app_dirs:
if apps_check["cask"][d] == 0:
continue
print("{0:<{1}s} : {2:d}".format(d.replace(
os.environ["HOME"], "~"), maxlen,
apps_check["cask"][d]))
print("")
if sum(apps_check["cask_obsolete"].values())\
> 0: # pragma: no cover
print("Installed by Cask (New version is availble, " +
"try `brew cask upgrade`):")
for d in app_dirs:
if apps_check["cask_obsolete"][d] == 0:
continue
print("{0:<{1}s} : {2:d}".format(d.replace(
os.environ["HOME"], "~"), maxlen,
apps_check["cask_obsolete"][d]))
print("")
if sum(apps_check["brew"].values()) > 0:
print("Installed by brew install command")
for d in app_dirs:
if apps_check["brew"][d] == 0:
continue
print("{0:<{1}s} : {2:d}".format(d.replace(
os.environ["HOME"], "~"), maxlen,
apps_check["brew"][d]))
print("")
if sum(apps_check["has_cask"].values()) > 0:
print("Installed directly, but casks are available:")
for d in app_dirs:
if apps_check["has_cask"][d] == 0:
continue
print("{0:<{1}s} : {2:d}".format(d.replace(
os.environ["HOME"], "~"), maxlen,
apps_check["has_cask"][d]))
print("")
if sum(apps_check["appstore"].values()) > 0: # pragma: no cover
print("Installed from Appstore")
for d in app_dirs:
if apps_check["appstore"][d] == 0:
continue
print("{0:<{1}s} : {2:d}".format(d.replace(
os.environ["HOME"], "~"), maxlen,
apps_check["appstore"][d]))
print("")
if sum(apps_check["no_cask"].values()) > 0:
print("No casks")
for d in app_dirs:
if apps_check["no_cask"][d] == 0:
continue
print("{0:<{1}s} : {2:d}".format(d.replace(
os.environ["HOME"], "~"), maxlen,
apps_check["no_cask"][d]))
print("")
def make_pack_deps(self):
"""Make package dependencies"""
packs = self.get("brew_list")
self.pack_deps = {}
for p in packs:
deps = self.proc("brew deps --1 " + p, False, False)[1]
self.pack_deps[p] = []
for d in deps:
if d in packs:
self.pack_deps[p].append(d)
dep_packs = []
for v in self.pack_deps.values():
dep_packs.extend(v)
self.top_packs = [x for x in packs if x not in dep_packs]
if self.opt["verbose"] > 1:
def print_dep(p, depth=0):
if depth != 0:
print("#", end="")
for i in range(depth-2):
print(" ", end="")
print(p)
for deps in self.pack_deps[p]:
print_dep(deps, depth+2)
for p in packs:
if p not in dep_packs:
print_dep(p)
def my_test(self):
self.make_pack_deps()
out = Tee("test_file")
out.write("test\n")
out.close()
out = Tee(sys.stdout, "test_file")
out.write("test\n")
out.flush()
out.close()
self.remove("test_file")
os.mkdir("dir")
self.remove("dir")
self.remove("aaa")
self.brewinfo.read()
print("read input:", len(self.brewinfo.brew_input))
self.brewinfo.clear()
print("read input cleared:", len(self.brewinfo.brew_input))
self.brewinfo.set_file("/test/not/correct/file/path")
self.brewinfo.read()
self.brewinfo.check_dir()
self.brewinfo.set_val("brew_input_opt", {"test_pack": "test opt"})
self.brewinfo.add("brew_input_opt", {"test_pack2": "test opt2"})
print(self.brewinfo.get("brew_input_opt"))
self.brewinfo.read("testfile")
self.brewinfo.get_tap("/aaa/bbb")
def execute(self):
"""Main execute function"""
# Cask list check
if self.opt["command"] == "casklist":
self.check_cask()
sys.exit(0)
# Set BREWFILE repository
if self.opt["command"] == "set_repo":
self.set_brewfile_repo()
sys.exit(0)
# Set BREWFILE to local file
if self.opt["command"] == "set_local":
self.set_brewfile_local()
sys.exit(0)
# Change brewfile if it is repository's one.
self.check_repo()
# Do pull/push for the repository.
if self.opt["command"] in ["pull", "push"]:
self.repomgr(self.opt["command"])
sys.exit(0)
# brew command
if self.opt["command"] == "brew":
self.brew_cmd()
sys.exit(0)
# Initialize
if self.opt["command"] in ["init", "dump"]:
self.initialize()
sys.exit(0)
# Check input file
# If the file doesn't exist, initialize it.
self.check_input_file()
# Edit
if self.opt["command"] == "edit":
self.edit_brewfile()
sys.exit(0)
# Cat
if self.opt["command"] == "cat":
self.cat_brewfile()
sys.exit(0)
# Get files
if self.opt["command"] == "get_files":
self.get_files(is_print=True)
sys.exit(0)
# Cleanup
if self.opt["command"] == "clean_non_request":
self.clean_non_request()
sys.exit(0)
# Get list for cleanup/install
self.get_list()
# Cleanup
if self.opt["command"] == "clean":
self.cleanup()
sys.exit(0)
# Install
if self.opt["command"] == "install":
self.install()
sys.exit(0)
# Update
if self.opt["command"] == "update":
if not self.opt["noupgradeatupdate"]:
self.proc("brew update")
self.proc("brew upgrade --fetch-HEAD")
self.proc("brew cask upgrade")
if self.opt["repo"] != "":
self.repomgr("pull")
self.install()
if not self.opt["dryrun"]:
self.cleanup()
self.initialize(False)
if self.opt["repo"] != "":
self.repomgr("push")
sys.exit(0)
# test
if self.opt["command"] == "test":
self.my_test()
sys.exit(0)
# No command found
self.err("Wrong command: " + self.opt["command"],
0) # pragma: no cover
self.err("Execute `" + __prog__ +
" help` for more information.", 0) # pragma: no cover
sys.exit(1) # pragma: no cover
def main():
# Prepare BrewFile
b = BrewFile()
import argparse
# Pre Parser
pre_parser = argparse.ArgumentParser(
add_help=False, usage=__prog__+"...")
group = pre_parser.add_mutually_exclusive_group()
group.add_argument("-i", "--init", action="store_const",
dest="command", const="init")
group.add_argument("-s", "--set_repo", action="store_const",
dest="command", const="set_repo")
group.add_argument("--set_local", action="store_const",
dest="command", const="set_local")
group.add_argument("-c", "--clean", action="store_const",
dest="command", const="clean")
group.add_argument("-u", "--update", action="store_const",
dest="command", const="update")
group.add_argument("-e", "--edit", action="store_const",
dest="command", const="edit")
group.add_argument("--cat", action="store_const",
dest="command", const="cat")
group.add_argument("--test", action="store_const",
dest="command", const="test")
group.add_argument("--commands", action="store_const",
dest="command", const="commands")
group.add_argument("-v", "--version", action="store_const",
dest="command", const="version")
# Parent parser
file_parser = argparse.ArgumentParser(add_help=False)
file_parser.add_argument(
"-f", "--file", action="store", dest="input",
default=b.opt["input"],
help="Set input file (default: %(default)s). \n"
"You can set input file by environmental variable,\n"
"HOMEBREW_BREWFILE, like:\n"
" export HOMEBREW_BREWFILE=~/.brewfile")
backup_parser = argparse.ArgumentParser(add_help=False)
backup_parser.add_argument(
"-b", "--backup", action="store", dest="backup",
default=b.opt["backup"],
help="Set backup file (default: %(default)s). \n"
"If it is empty, no backup is made.\n"
"You can set backup file by environmental variable,\n"
" HOMEBREW_BREWFILE_BACKUP, like:\n"
" export HOMEBREW_BREWFILE_BACKUP=~/brewfile.backup")
format_parser = argparse.ArgumentParser(add_help=False)
format_parser.add_argument(
"-F", "--format", "--form", action="store", dest="form",
default=b.opt["form"],
help="Set input file format (default: %(default)s). \n"
"file (or none) : brew vim --HEAD --with-lua\n"
"brewdler or bundle: brew 'vim', args: ['with-lua', 'HEAD']\n"
" Compatible with "
"[homebrew-bundle]"
"(https://github.com/Homebrew/homebrew-bundle).\n"
"command or cmd : brew install vim --HEAD --with-lua\n"
" Can be used as a shell script.\n")
leaves_parser = argparse.ArgumentParser(add_help=False)
leaves_parser.add_argument(
"--leaves", action="store_true", default=b.opt["leaves"],
dest="leaves",
help="Make list only for leaves (taken by `brew leaves`).\n"
"You can set this by environmental variable,"
" HOMEBREW_BREWFILE_LEAVES, like:\n"
" export HOMEBREW_BREWFILE_LEAVES=1")
on_request_parser = argparse.ArgumentParser(add_help=False)
on_request_parser.add_argument(
"--on_request", action="store_true", default=b.opt["on_request"],
dest="on_request",
help="Make list only for packages installed on request.\n"
"This option is given priority over 'leaves'.\n"
"You can set this by environmental variable,"
" HOMEBREW_BREWFILE_ON_REQUEST, like:\n"
" export HOMEBREW_BREWFILE_ON_REQUEST=1")
top_packages_parser = argparse.ArgumentParser(add_help=False)
top_packages_parser.add_argument(
"--top_packages", action="store", default=b.opt["top_packages"],
dest="top_packages",
help="Packages to be listed even if they are under dependencies\n"
"and `leaves`/'on_request' option is used.\n"
"You can set this by environmental variable,\n"
" HOMEBREW_BREWFILE_TOP_PACKAGES (',' separated), like:\n"
" export HOMEBREW_BREWFILE_TOP_PACKAGES=go,coreutils")
noupgradeatupdate_parser = argparse.ArgumentParser(add_help=False)
noupgradeatupdate_parser.add_argument(
"-U", "--noupgrade", action="store_true",
default=b.opt["noupgradeatupdate"], dest="noupgradeatupdate",
help="Do not execute `brew update/brew upgrade`"
" at `brew file update`.")
repo_parser = argparse.ArgumentParser(add_help=False)
repo_parser.add_argument(
"-r", "--repo", action="store", default=b.opt["repo"], dest="repo",
help="Set repository name. Use with set_repo.")
link_parser = argparse.ArgumentParser(add_help=False)
link_parser.add_argument(
"-n", "--nolink", action="store_false", default=b.opt["link"],
dest="link", help="Don't make links for Apps.")
caskonly_parser = argparse.ArgumentParser(add_help=False)
caskonly_parser.add_argument(
"--caskonly", action="store_true", default=b.opt["caskonly"],
dest="caskonly", help="Write out only cask related packages")
appstore_parser = argparse.ArgumentParser(add_help=False)
appstore_parser.add_argument(
"--no_appstore", action="store_false", default=b.opt["appstore"],
dest="appstore", help="Don't check AppStore applications.\n"
"(For other than casklist command.)\n"
"You can set input file by environmental variable:\n"
" export HOMEBREW_BRWEFILE_APPSTORE=0")
dryrun_parser = argparse.ArgumentParser(add_help=False)
dryrun_parser.add_argument(
"-C", action="store_false", default=b.opt["dryrun"],
dest="dryrun", help="Run clean as non dry-run mode.\n"
"Use this option to run clean at update command, too.")
yn_parser = argparse.ArgumentParser(add_help=False)
yn_parser.add_argument(
"-y", "--yes", action="store_true", default=b.opt["yn"],
dest="yn", help="Answer yes to all yes/no questions.")
verbose_parser = argparse.ArgumentParser(add_help=False)
verbose_parser.add_argument("-V", "--verbose", action="store",
default=b.opt["verbose"],
dest="verbose", help="Verbose level 0/1/2")
help_parser = argparse.ArgumentParser(add_help=False)
help_parser.add_argument("-h", "--help", action="store_true",
default=False, dest="help",
help="Print Help (this message) and exit.")
min_parsers = [file_parser, backup_parser, format_parser, leaves_parser,
on_request_parser, top_packages_parser, appstore_parser,
caskonly_parser, yn_parser, verbose_parser]
subparser_options = {
"parents": min_parsers,
"formatter_class": argparse.RawTextHelpFormatter}
# Main parser
parser = argparse.ArgumentParser(
add_help=False, prog=__prog__,
parents=[file_parser, backup_parser, format_parser, leaves_parser,
on_request_parser, top_packages_parser,
noupgradeatupdate_parser, repo_parser, link_parser,
caskonly_parser, appstore_parser, dryrun_parser, yn_parser,
verbose_parser, help_parser],
formatter_class=argparse.RawTextHelpFormatter,
description=__description__)
subparsers = parser.add_subparsers(
title="subcommands", metavar="[command]", help="", dest="command")
help = "Install packages in BREWFILE."
subparsers.add_parser("install", description=help, help=help,
**subparser_options)
help = "Execute brew command, and update BREWFILE."
subparsers.add_parser("brew", description=help, help=help,
parents=min_parsers, add_help=False,
formatter_class=argparse.RawTextHelpFormatter)
help = "or dump/-i/--init\nInitialize/Update BREWFILE "\
"with installed packages."
subparsers.add_parser(
"init", description=help, help=help,
parents=min_parsers+[link_parser, repo_parser],
formatter_class=argparse.RawTextHelpFormatter)
subparsers.add_parser(
"dump",
parents=min_parsers+[link_parser, repo_parser],
formatter_class=argparse.RawTextHelpFormatter)
help = "or -s/--set_repo\nSet BREWFILE repository (e.g. rcmdnk/Brewfile)."
subparsers.add_parser(
"set_repo", description=help, help=help,
parents=min_parsers+[repo_parser],
formatter_class=argparse.RawTextHelpFormatter)
help = "or --set_local\nSet BREWFILE to local file."
subparsers.add_parser(
"set_local", description=help, help=help,
parents=min_parsers,
formatter_class=argparse.RawTextHelpFormatter)
help = "Update BREWFILE from the repository."
subparsers.add_parser("pull", description=help, help=help,
**subparser_options)
help = "Push your BREWFILE to the repository."
subparsers.add_parser("push", description=help, help=help,
**subparser_options)
help = "or -c/--clean\nCleanup.\n"\
"Uninstall packages not in the list.\n"\
"Untap packages not in the list.\n"\
"Cleanup cache (brew cleanup)\n"\
"By default, cleanup runs as dry-run.\n"\
"If you want to enforce cleanup, use '-C' option."
subparsers.add_parser(
"clean", description=help, help=help,
parents=min_parsers+[dryrun_parser],
formatter_class=argparse.RawTextHelpFormatter)
help = "or --clean_non_request.\n"\
"Uninstall packages which were installed as dependencies \n"\
"but parent packages of which were already uninstalled.\n"\
"By default, cleanup runs as dry-run.\n"\
"If you want to enforce cleanup, use '-C' option."
subparsers.add_parser(
"clean_non_request", description=help, help=help,
parents=min_parsers+[dryrun_parser],
formatter_class=argparse.RawTextHelpFormatter)
help = "or -u/--update\nDo brew update/upgrade, cask upgrade, pull, install,\n"\
"init and push.\n"\
"In addition, pull and push\n"\
"will be done if the repository is assigned.\n"\
"'clean' is also executed after install if you give -C option."
subparsers.add_parser(
"update", description=help, help=help,
parents=min_parsers+[link_parser, noupgradeatupdate_parser,
dryrun_parser],
formatter_class=argparse.RawTextHelpFormatter)
help = "or -e/--edit\nEdit input files."
subparsers.add_parser("edit", description=help, help=help,
**subparser_options)
help = "or --cat\nShow contents of | |
centers good"
print "\n%s result:" % algo, dump_json(a1)
# if we want to return the model view like the browser
if 1==0:
# HACK! always do a model view. kmeans last result isn't good? (at least not always)
a = self.kmeans_view(model=a1['model']['_key'], timeoutSecs=30)
verboseprint("\n%s model view result:" % algo, dump_json(a))
else:
a = a1
if (browseAlso | h2o_args.browse_json):
print "Redoing the %s through the browser, no results saved though" % algo
h2b.browseJsonHistoryAsUrlLastMatch(algo)
time.sleep(5)
return a
# params:
# header=1,
# header_from_file
# separator=1 (hex encode?
# exclude=
# noise is a 2-tuple: ("StoreView",params_dict)
def parse(self, key, key2=None,
timeoutSecs=300, retryDelaySecs=0.2, initialDelaySecs=None, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, noPoll=False, **kwargs):
browseAlso = kwargs.pop('browseAlso', False)
# this doesn't work. webforums indicate max_retries might be 0 already? (as of 3 months ago)
# requests.defaults({max_retries : 4})
# https://github.com/kennethreitz/requests/issues/719
# it was closed saying Requests doesn't do retries. (documentation implies otherwise)
algo = "2/Parse2"
verboseprint("\n %s key: %s to key2: %s (if None, means default)" % (algo, key, key2))
# other h2o parse parameters, not in the defauls
# header
# exclude
params_dict = {
'blocking': None, # debug only
'source_key': key, # can be a regex
'destination_key': key2,
'parser_type': None,
'separator': None,
'header': None,
'single_quotes': None,
'header_from_file': None,
'exclude': None,
'delete_on_done': None,
'preview': None,
}
check_params_update_kwargs(params_dict, kwargs, 'parse', print_params=True)
# h2o requires header=1 if header_from_file is used. Force it here to avoid bad test issues
if kwargs.get('header_from_file'): # default None
kwargs['header'] = 1
if benchmarkLogging:
import h2o
h2o.cloudPerfH2O.get_log_save(initOnly=True)
a = self.do_json_request(algo + ".json", timeout=timeoutSecs, params=params_dict)
# Check that the response has the right Progress url it's going to steer us to.
verboseprint(algo + " result:", dump_json(a))
if noPoll:
return a
# noise is a 2-tuple ("StoreView, none) for url plus args for doing during poll to create noise
# no noise if None
verboseprint(algo + ' noise:', noise)
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs,
noise=noise, benchmarkLogging=benchmarkLogging)
verboseprint("\n" + algo + " result:", dump_json(a))
return a
def netstat(self):
return self.do_json_request('Network.json')
def linux_info(self, timeoutSecs=30):
return self.do_json_request("CollectLinuxInfo.json", timeout=timeoutSecs)
def jstack(self, timeoutSecs=30):
return self.do_json_request("JStack.json", timeout=timeoutSecs)
def network_test(self, tdepth=5, timeoutSecs=30):
a = self.do_json_request("2/NetworkTest.json", params={}, timeout=timeoutSecs)
verboseprint("\n network test:", dump_json(a))
return(a)
def jprofile(self, depth=5, timeoutSecs=30):
return self.do_json_request("2/JProfile.json", params={'depth': depth}, timeout=timeoutSecs)
def iostatus(self):
return self.do_json_request("IOStatus.json")
# turns enums into expanded binary features
def one_hot(self, source, timeoutSecs=30, **kwargs):
params = {
"source": source,
}
a = self.do_json_request('2/OneHot.json',
params=params,
timeout=timeoutSecs
)
check_sandbox_for_errors(python_test_name=h2o_args.python_test_name)
return a
# &offset=
# &view=
# FIX! need to have max > 1000?
def inspect(self, key, offset=None, view=None, max_column_display=1000, ignoreH2oError=False,
timeoutSecs=30):
params = {
"src_key": key,
"offset": offset,
# view doesn't exist for 2. let it be passed here from old tests but not used
}
a = self.do_json_request('2/Inspect2.json',
params=params,
ignoreH2oError=ignoreH2oError,
timeout=timeoutSecs
)
return a
# can take a useful 'filter'
# FIX! current hack to h2o to make sure we get "all" rather than just
# default 20 the browser gets. set to max # by default (1024)
# There is a offset= param that's useful also, and filter=
def store_view(self, timeoutSecs=60, print_params=False, **kwargs):
params_dict = {
# now we should default to a big number, so we see everything
'filter': None,
'view': 10000,
'offset': 0,
}
# no checking on legal kwargs?
params_dict.update(kwargs)
if print_params:
print "\nStoreView params list:", params_dict
a = self.do_json_request('StoreView.json',
params=params_dict,
timeout=timeoutSecs)
return a
def rebalance(self, timeoutSecs=180, **kwargs):
params_dict = {
# now we should default to a big number, so we see everything
'source': None,
'after': None,
'chunks': None,
}
params_dict.update(kwargs)
a = self.do_json_request('2/ReBalance.json',
params=params_dict,
timeout=timeoutSecs
)
verboseprint("\n rebalance result:", dump_json(a))
return a
def to_int(self, timeoutSecs=60, **kwargs):
params_dict = {
'src_key': None,
'column_index': None, # ugh. takes 1 based indexing
}
params_dict.update(kwargs)
a = self.do_json_request('2/ToInt2.json', params=params_dict, timeout=timeoutSecs)
verboseprint("\n to_int result:", dump_json(a))
return a
def to_enum(self, timeoutSecs=60, **kwargs):
params_dict = {
'src_key': None,
'column_index': None, # ugh. takes 1 based indexing
}
params_dict.update(kwargs)
a = self.do_json_request('2/ToEnum2.json', params=params_dict, timeout=timeoutSecs)
verboseprint("\n to_int result:", dump_json(a))
return a
def unlock(self, timeoutSecs=30):
a = self.do_json_request('2/UnlockKeys.json', params=None, timeout=timeoutSecs)
return a
# There is also a RemoveAck in the browser, that asks for confirmation from
# the user. This is after that confirmation.
# UPDATE: ignore errors on remove..key might already be gone due to h2o removing it now
# after parse
def remove_key(self, key, timeoutSecs=120):
a = self.do_json_request('Remove.json',
params={"key": key}, ignoreH2oError=True, timeout=timeoutSecs)
self.unlock()
return a
# this removes all keys!
def remove_all_keys(self, timeoutSecs=120):
a = self.do_json_request('2/RemoveAll.json', timeout=timeoutSecs)
return a
# only model keys can be exported?
def export_hdfs(self, source_key, path):
a = self.do_json_request('ExportHdfs.json',
params={"source_key": source_key, "path": path})
verboseprint("\nexport_hdfs result:", dump_json(a))
return a
def export_s3(self, source_key, bucket, obj):
a = self.do_json_request('ExportS3.json',
params={"source_key": source_key, "bucket": bucket, "object": obj})
verboseprint("\nexport_s3 result:", dump_json(a))
return a
# the param name for ImportFiles is 'file', but it can take a directory or a file.
# 192.168.0.37:54323/ImportFiles.html?file=%2Fhome%2F0xdiag%2Fdatasets
def import_files(self, path, timeoutSecs=180):
a = self.do_json_request('2/ImportFiles2.json',
timeout=timeoutSecs,
params={"path": path}
)
verboseprint("\nimport_files result:", dump_json(a))
return a
# 'destination_key', 'escape_nan' 'expression'
def exec_query(self, timeoutSecs=20, ignoreH2oError=False, print_params=False, **kwargs):
# only v2 now
params_dict = {
'str': None,
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'exec_query', print_params=print_params)
a = self.do_json_request('2/Exec2.json',
timeout=timeoutSecs, ignoreH2oError=ignoreH2oError, params=params_dict)
verboseprint("\nexec_query result:", dump_json(a))
return a
def jobs_admin(self, timeoutSecs=120, **kwargs):
params_dict = {
# 'expression': None,
}
browseAlso = kwargs.pop('browseAlso', False)
params_dict.update(kwargs)
verboseprint("\njobs_admin:", params_dict)
a = self.do_json_request('Jobs.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\njobs_admin result:", dump_json(a))
return a
def jobs_cancel(self, timeoutSecs=120, **kwargs):
params_dict = {
'key': None,
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'jobs_cancel', print_params=True)
a = self.do_json_request('Cancel.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\njobs_cancel result:", dump_json(a))
print "Cancelled job:", params_dict['key']
return a
def create_frame(self, timeoutSecs=120, **kwargs):
params_dict = {
'key': None,
'rows': None,
'cols': None,
'seed': None,
'randomize': None,
'value': None,
'real_range': None,
'categorical_fraction': None,
'factors': None,
'integer_fraction': None,
'integer_range': None,
'binary_fraction': None,
'binary_ones_fraction': None,
'missing_fraction': None,
'response_factors': None,
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'create_frame', print_params=True)
a = self.do_json_request('2/CreateFrame.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\ncreate_frame result:", dump_json(a))
return a
def insert_missing_values(self, timeoutSecs=120, **kwargs):
params_dict = {
'key': None,
'seed': None,
'missing_fraction': None,
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'insert_missing_values', print_params=True)
a = self.do_json_request('2/InsertMissingValues.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\ninsert_missing_values result:", dump_json(a))
return a
def impute(self, timeoutSecs=120, **kwargs):
params_dict = {
'source': None,
'column': None,
'method': None, # mean, mode, median
'group_by': None, # comma separated column names
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'impute', print_params=True)
a = self.do_json_request('2/Impute.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nimpute result:", dump_json(a))
return a
def frame_split(self, timeoutSecs=120, **kwargs):
params_dict = {
'source': None,
'ratios': None,
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'frame_split', print_params=True)
a = self.do_json_request('2/FrameSplitPage.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nframe_split result:", dump_json(a))
return a
def nfold_frame_extract(self, timeoutSecs=120, **kwargs):
params_dict = {
'source': None,
'nfolds': None,
'afold': None, # Split to extract
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'nfold_frame_extract', print_params=True)
a = self.do_json_request('2/NFoldFrameExtractPage.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nnfold_frame_extract result:", dump_json(a))
return a
def gap_statistic(self, timeoutSecs=120, retryDelaySecs=1.0, initialDelaySecs=None, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, noPoll=False,
print_params=True, noPrint=False, **kwargs):
params_dict = {
'source': None,
'destination_key': None,
'k_max': None,
'b_max': None,
'bootstrap_fraction': None,
'seed': None,
'cols': None,
'ignored_cols': None,
'ignored_cols_by_name': None,
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'gap_statistic', print_params=True)
start = time.time()
a = self.do_json_request('2/GapStatistic.json', timeout=timeoutSecs, params=params_dict)
if noPoll:
return a
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs, benchmarkLogging=benchmarkLogging,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
verboseprint("\ngap_statistic result:", dump_json(a))
a['python_elapsed'] = time.time() - start
a['python_%timeout'] = a['python_elapsed'] * 100 / timeoutSecs
return a
def speedrf(self, data_key, ntrees=50, max_depth=20, timeoutSecs=300,
retryDelaySecs=1.0, initialDelaySecs=None, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, noPoll=False,
print_params=True, noPrint=False, **kwargs):
params_dict = {
'balance_classes': None,
'classification': 1,
'cols': None,
'destination_key': None,
'ignored_cols': None,
'ignored_cols_by_name': None,
'importance': 0,
'keep_cross_validation_splits': None,
'max_after_balance_size': None,
'max_depth': max_depth,
'mtries': -1.0,
'nbins': 1024.0,
'n_folds': None,
'ntrees': ntrees,
'oobee': 0,
'response': None,
'sample_rate': 0.67,
'sampling_strategy': 'RANDOM',
'score_pojo': None, # create the score pojo
'seed': -1.0,
'select_stat_type': 'ENTROPY', # GINI
'source': data_key,
'validation': None,
'verbose': None,
}
check_params_update_kwargs(params_dict, kwargs, 'SpeeDRF', print_params)
if print_params:
print "\n%s parameters:" % "SpeeDRF", params_dict
sys.stdout.flush()
rf = self.do_json_request('2/SpeeDRF.json', timeout=timeoutSecs, params=params_dict)
print "\n%s result:" % "SpeeDRF", dump_json(rf)
if noPoll:
print "Not polling SpeeDRF"
return rf
time.sleep(2)
rfView = self.poll_url(rf, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs,
noise=noise, benchmarkLogging=benchmarkLogging, noPrint=noPrint)
return rfView
# note ntree in kwargs can overwrite trees! (trees is legacy param)
def random_forest(self, data_key, trees=None,
timeoutSecs=300, retryDelaySecs=1.0, initialDelaySecs=None, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, noPoll=False, rfView=True,
print_params=True, noPrint=False, **kwargs):
print "at top of random_forest, timeoutSec: ", timeoutSecs
algo = '2/DRF'
algoView = '2/DRFView'
params_dict = {
# 'model': | |
request.GET.get('sid', None)
from_str = request.GET.get('from', None)
to_str = request.GET.get('to', None)
from_t = request.GET.get('from_t', None)
to_t = request.GET.get('to_t', None)
# Set values from POST and GET
edit = request.POST.get('edit', None)
if not edit:
edit = request.GET.get('edit', None)
data['edit'] = edit
name = request.POST.get('name', None)
if not tid:
return render(request, 'error.html', {'data': {'error': 'No Thing ID provided'}})
if not intaid:
return render(request, 'error.html', {'data': {'error': 'No App ID provided'}})
# Get all Apps
data['apps'] = App.objects.filter(user=request.user, hidden=False)
# Get App
try:
app = App.objects.get(id=intaid, user=request.user)
except App.DoesNotExist:
data['error'] = 'The App with Internal ID "{}" does not exists or you do not have access rights'.format(intaid)
return render(request, 'error.html', {'data': data})
# Get Thing
try:
thing = Thing.objects.get(tid=tid, app=app)
except Thing.DoesNotExist:
data['error'] = 'The Thing with ID "{}" does not exists or you do not have access rights'.format(tid)
return render(request, 'error.html', {'data': data})
data['thing'] = thing
# Check ownership
if thing.app.user != request.user:
data['error'] = 'The Device with ID "{}" does not exists or you do not have access rights'.format(tid)
return render(request, 'error.html', {'data': data})
# Change name if we have to:
if edit =='name' and name:
logger.debug('Changing device "{}" name to: {}'.format(thing.tid, name))
thing.name=name
thing.save()
# Install App if we have to
if data['action'] == 'install' and confirmed:
if thing.app_set_via != 'backend':
data['error'] = 'Error, the App for Thing "{}" was setup on the Thing itself and a new one cannot be installed'.format(thing.tid)
return render(request, 'error.html', {'data': data})
new_app_intaid = confirmed
logger.info('Will install App "{}" on thing "{}"'.format(new_app_intaid, thing.tid))
# Get the App to install:
try:
new_app = App.objects.get(id=new_app_intaid, user=request.user)
except Thing.DoesNotExist:
data['error'] = 'The new App to install with internal ID "{}" does not exists or you do not have access rights'.format(new_app_intaid)
return render(request, 'error.html', {'data': data})
# Set new App on the Thing
thing.app = new_app
thing.pool = new_app.default_pool
thing.save()
# Redirect to a fresh dashboard
return HttpResponseRedirect('/dashboard_thing/?tid={}&intaid={}'.format(thing.tid, new_app_intaid))
# Uninstall App if we have to
if data['action'] == 'uninstall' and confirmed:
if thing.app_set_via != 'backend':
data['error'] = 'Error, the App for Thing "{}" was setup on the Thing itself and cannot be uninstalled'.format(thing.tid)
return render(request, 'error.html', {'data': data})
logger.info('Will uninstall App on thing "{}"'.format(thing.tid))
# Get the "NoneApp" for this user, and if it does not exist, create it.
try:
none_app = App.objects.get(user=request.user, aid='00000000-0000-0000-0000-000000000000')
except App.DoesNotExist:
none_app = create_none_app(request.user)
# Set new (None) App on the Thing
thing.app = none_app
thing.pool = none_app.default_pool
thing.save()
# Redirect to a fresh dashboard
return HttpResponseRedirect('/dashboard_thing/?tid={}&intaid={}'.format(thing.tid, none_app.id))
# Delete (remove) a Thing
if data['action'] == 'remove' and confirmed:
logger.info('Removing TID "{}" '.format(thing.tid))
data['tid'] = thing.tid
try:
WorkerMessageHandler.delete(aid=thing.app.aid, tid=thing.tid)
except Exception as e:
data['error'] = 'Error in deleting Thing with ID "{}": {}'.format(thing.tid, e)
return render(request, 'error.html', {'data': data})
else:
try:
ManagementMessage.objects.filter(tid=thing.tid).delete()
except Exception as e:
logger.error('Error when deleting management messages for tid "{}"'.format(thing.tid))
thing.delete()
return render(request, 'dashboard_thing_deleted.html', {'data': data})
# If a pool is set get it, and change
if pool_name:
try:
thing.pool = Pool.objects.get(app=thing.app, name=pool_name)
thing.save()
except Pool.DoesNotExist:
data['error'] = 'The pool named "{}" does not exists'.format(pool_name)
return render(request, 'error.html', {'data': data})
# Get last worker messages
last_worker_msgs=[]
try:
last_worker_msgs_or = WorkerMessageHandler.get(aid=thing.app.aid, tid=thing.tid, last=3)
for item in last_worker_msgs_or:
# Fix time
item.ts = str(item.ts.astimezone(profile_timezone)).split('.')[0]
# Convert from json to string
item.data = str(item.data)
# Truncate if too long
if len(item.data) >= 150:
item.data = str(item.data[0:150]) + '...'
last_worker_msgs.append(item)
except Exception as e:
logger.error('Error when looping over worker messages for the dashboard: {}'.format(e))
data['last_worker_msgs'] = last_worker_msgs
# Get last management messages
last_management_msgs = []
try:
last_management_msgs = ManagementMessage.objects.filter(tid=thing.tid, aid=thing.app.aid).order_by('ts')[:3].reverse()
for msg in last_management_msgs:
msg.ts = str(msg.ts.astimezone(profile_timezone)).split('.')[0]
except:
logger.error('Error when looping over management messages for the dashboard: {}'.format(e))
data['last_management_msgs'] = last_management_msgs
# Load session
try:
session = Session.objects.filter(thing=thing).latest('last_contact')
data['session'] = session
session.duration = str(session.last_contact-session.started).split('.')[0]
if session.duration.startswith('0'):
session.duration = '0'+session.duration
except:
data['session'] = None
else:
# Compute status
deltatime_from_last_contact_s = time.time() - s_from_dt(session.last_contact) #session.last_contact
data['connection_status'] = '<font color="red">OFFLINE</font>'
data['thing_status'] = 'OFFLINE'
try:
if deltatime_from_last_contact_s < int(thing.pool.settings.management_interval) + settings.CONTACT_TIMEOUT_TOLERANCE:
data['connection_status'] = '<font color="limegreen">ONLINE</font>'
data['thing_status'] = 'ONLINE'
except:
pass
try:
if deltatime_from_last_contact_s < int(thing.pool.settings.worker_interval) + settings.CONTACT_TIMEOUT_TOLERANCE:
data['connection_status'] = '<font color="limegreen">ONLINE</font>'
data['thing_status'] = 'ONLINE'
except:
pass
# Formatting tricks
session.last_contact = str(session.last_contact.astimezone(profile_timezone)).split('.')[0]
if session.last_pythings_status.startswith('Ok:'):
session.last_pythings_status = 'OK'
# Format worker traceback if any
session.last_worker_status_traceback = None
try:
if session.last_worker_status.startswith('KO: '):
pieces = session.last_worker_status.replace('(Traceback', '\nTraceback').split('\n')
sub_pieces = pieces[0].split(' ')
session.last_worker_status = sub_pieces[0] + ' ' + sub_pieces[1] + ': ' + ' '.join(sub_pieces[2:])
session.last_worker_status_traceback = '\n'.join(pieces[1:])[:-1]
except:
pass
# Format management traceback if any
session.last_management_status_traceback = None
try:
if session.last_management_status.startswith('KO: '):
pieces = session.last_management_status.replace('(Traceback', '\nTraceback').split('\n')
sub_pieces = pieces[0].split(' ')
session.last_management_status = sub_pieces[0] + ' ' + sub_pieces[1] + ': ' + ' '.join(sub_pieces[2:])
session.last_management_status_traceback = '\n'.join(pieces[1:])[:-1]
except:
pass
# Prepare data for the plots: parse last messages json contents, and if float add to the data for the plots
if from_str and to_str:
# Parse from
from_str_date_part, from_str_time_part = from_str.split(' ')
from_day, from_month, from_year = from_str_date_part.split('/')
from_hour, from_minute = from_str_time_part.split(':')
from_dt = dt(int(from_year), int(from_month), int(from_day), int(from_hour), int(from_minute), 0, tzinfo=timezonize(request.user.profile.timezone))
# Parse to
to_str_date_part, to_str_time_part = to_str.split(' ')
to_day, to_month, to_year = to_str_date_part.split('/')
to_hour, to_minute = to_str_time_part.split(':')
to_dt = dt(int(to_year), int(to_month), int(to_day), int(to_hour), int(to_minute), 0, tzinfo=timezonize(request.user.profile.timezone))
elif from_t and to_t:
from_dt = dt_from_s(float(from_t))
to_dt = dt_from_s(float(to_t))
else:
# Set "to" to NOW
to_dt = datetime.datetime.now()
if last == '1m':
from_dt = to_dt - datetime.timedelta(minutes=1)
elif last == '10m':
from_dt = to_dt - datetime.timedelta(minutes=10)
elif last == '1h':
from_dt = to_dt - datetime.timedelta(minutes=60)
elif last == '1d':
from_dt = to_dt - datetime.timedelta(days=1)
elif last == '1W':
from_dt = to_dt - datetime.timedelta(days=7)
elif last == '1M':
from_dt = to_dt - datetime.timedelta(days=31)
elif last == '1Y':
from_dt = to_dt - datetime.timedelta(days=365)
else:
# Default to last is 1 hour
last = '1h'
from_dt = to_dt - datetime.timedelta(minutes=60)
# Now set from_t and to_t
data['from_t'] = s_from_dt(from_dt)
data['to_t'] = s_from_dt(to_dt)
# Add timezone if not already present
try:
from_dt = pytz.UTC.localize(from_dt)
except ValueError:
pass
try:
to_dt = pytz.UTC.localize(to_dt)
except ValueError:
pass
# Move to right timezone
from_dt = from_dt.astimezone(profile_timezone)
to_dt = to_dt.astimezone(profile_timezone)
data['last'] = last
data['from_dt'] = from_dt
data['to_dt'] = to_dt
data['from_dt_str'] = str(from_dt)
data['to_dt_str'] = str(to_dt)
data['from_dt_utcfake_str'] = str(from_dt.replace(tzinfo=pytz.UTC))
data['to_dt_utcfake_str'] = str(to_dt.replace(tzinfo=pytz.UTC))
# Get messages from DB
messages = []
try:
messages = WorkerMessageHandler.get(aid=thing.app.aid, tid=thing.tid, from_dt=from_dt, to_dt=to_dt)
except Exception as e:
logger.error(format_exception(e))
# Prepare data for Dygraphs
total_messages = 0
for message in messages:
# Increment message counter
total_messages += 1
# Load content
content = message.data
if not content:
continue
# Load timestamp
ts = message.ts.astimezone(profile_timezone)
timestamp_dygraphs = '{}/{:02d}/{:02d} {:02d}:{:02d}:{:02d}'.format(ts.year, ts.month, ts.day, ts.hour, ts.minute,ts.second)
#timestamp_dygraphs = int(s_from_dt(ts)*1000)
# Porcess all the message keys
for key in content:
# Try loading as numeric value
try:
metric_num_value = float(content[key])
except:
continue
# Append data
try:
data['timeseries'][key].append((timestamp_dygraphs, metric_num_value, ts))
except KeyError:
data['metrics'][key] = key
data['timeseries'][key] = []
data['timeseries'][key].append((timestamp_dygraphs, metric_num_value, ts))
# Set total messages
data['total_messages'] = total_messages
# Do we have to aggregate?
if total_messages > 10000:
logger.debug('Too many messages, we need to aggregate.')
aggrgeate_by = 10**len(str(int(total_messages/10000.0)))
data['aggregated'] = True
data['aggregate_by'] = aggrgeate_by
data['total_messages_aggregated'] = int(total_messages/aggrgeate_by)
data['timeseries_aggregated']={}
for key in data['timeseries']:
if key not in data['timeseries_aggregated']:
data['timeseries_aggregated'][key] = []
# Support vars
metric_avg = 0
metric_min = None
metric_max = None
start_time_dt = None
end_time_dt = None
# Loop and aggregate data
for i, entry in enumerate(data['timeseries'][key]):
# Start time
if start_time_dt is None:
start_time_dt = entry[2]
# Avg
metric_avg += entry[1]
# Min
if metric_min is None or entry[1] < metric_min:
metric_min = entry[1]
# Max
if metric_max is None | |
= self.grid(processor)
Nc = g.Ncells()
cdata = self.celldata(processor)
for i in range(Nc):
fp.write("%.6f %.6f %.6f %.6f %d %d %d %d %d %d %d %d %d %d %.6f %.6f %.6f\n"%tuple(cdata[i,:]))
fp.close()
def Nk(self,proc):
return self.celldata(proc)[:,4].astype(int32)
def Nke(self,proc):
return self.edgedata(proc)[:,6].astype(int32)
def Nkc(self,proc):
return self.edgedata(proc)[:,7].astype(int32)
def edge_bathymetry(self,proc):
""" If edge depths are defined separately, return an
xyz array of them for the given processor. Otherwise,
return None
"""
fn = self.file_path('edgedepths',proc)
if os.path.exists(fn):
return loadtxt(fn)
def read_bathymetry(self,proc):
gr = self.grid(proc)
return self.celldata(proc)[:,3],gr
def depth_for_cell(self,cell_id,proc):
bath,gr = self.read_bathymetry(proc)
return bath[cell_id]
dzmin=0.001 # someday could read this from a file...
_dz = None
def dz(self):
if self._dz is None:
vsp = open(self.file_path('vertspace'),'rt')
self._dz = array(list(map(float,vsp.read().split())))
return self._dz
def z_levels(self):
""" returns bottoms of the z-levels, but as *soundings*, and not
including z=0.
"""
if self._z_levels is None:
dz_list = self.dz()
self._z_levels = cumsum(dz_list)
return self._z_levels
def primary_boundary_datasource(self):
""" return the datasource that forces the largest number of edges.
This is a hack to find the datasource that forces the freesurface on
the ocean boundary.
"""
best_fg = None
for proc in range(self.num_processors()):
f = forcing.read_boundaries_dat(self,proc)
for fg in f.forcing_groups:
if best_fg is None or len(best_fg.edges) < len(fg.edges):
best_fg = fg
ds_index = best_fg.hydro_datasource()
return best_fg.gforce.datasources[ds_index]
def boundary_forcing(self,proc=None):
""" if proc is not given, return forcing from the first processor that has
some forced cells
this could probably get factored into boundary_inputs:BoundaryWriter
"""
if proc is None:
for proc in range(self.num_processors()):
f = forcing.read_boundaries_dat(self,proc)
if f.has_forced_edges():
return f
else:
return forcing.read_boundaries_dat(self,proc)
def topo_edges(self):
if self._topo_edges is None:
self._topo_edges = []
for p in range(self.num_processors()):
fp = open(self.file_path('topology',p),'rt')
def tokgen():
while 1:
buff = fp.readline()
if buff == "":
return
for t in buff.split():
yield int(t)
tok = tokgen()
nprocs = next(tok)
nneighs = next(tok)
for n in range(nneighs):
neigh = next(tok)
if p < neigh:
self._topo_edges.append( (p,neigh) )
return self._topo_edges
def show_topology(self,procs_per_node=4,topo_edges=None):
if topo_edges is None:
topo_edges = self.topo_edges()
# load the graph:
# the graph is stored just as a set of edges, with
# the processors numbered 0-<nprocs-1>
cla()
# graph the processor connectivity graph:
# round up:
n_nodes = 1 + (self.num_processors()-1)//procs_per_node
nodes = 2*arange(n_nodes) # space nodes out twice as much as cores
cores = arange(procs_per_node)
x,y = meshgrid(cores,nodes)
# I want an array that maps proc_number to an xy pair
proc_locs = transpose( array( (x.ravel(),y.ravel()), float64 ))
# and randomly perturb so we can see all the lines:
proc_locs[:,:1] = proc_locs[:,:1] + 0.4*(random( proc_locs[:,:1].shape ) - 0.5)
# now proc 0-3 are on a line, 4-7, etc.
for i in range(self.num_processors()):
pylab.annotate( "%i"%i, proc_locs[i] )
pylab.plot(proc_locs[:,0],proc_locs[:,1],'ro')
for e in topo_edges:
locs = proc_locs[array(e),:]
pylab.plot( locs[:,0],locs[:,1],'b-' )
pylab.axis('equal')
x1,x2,y1,y2 = pylab.axis()
x1 = x1 - 0.05*(x2 - x1)
x2 = x2 + 0.05*(x2 - x1)
y1 = y1 - 0.05*(y2 - y1)
y2 = y2 + 0.05*(y2 - y1)
pylab.axis( [x1,x2,y1,y2] )
def remap_processors(self,procs_per_node=4,do_plot=False):
import pymetis
# create the adjacency graph in the way that
# pymetis likes it
adj = [None]*s.num_processors()
topo_edges = s.topo_edges()
for a,b in topo_edges:
if adj[a] is None:
adj[a] = []
if adj[b] is None:
adj[b] = []
adj[a].append(b)
adj[b].append(a)
n_nodes = 1 + old_div((s.num_processors() - 1), procs_per_node)
cuts,parts = pymetis.part_graph(n_nodes,adjacency=adj)
print(parts)
# create a mapping of old proc nunmber to new proc number
#parts = array(parts)
mapping = -1*ones(s.num_processors())
# mapping[i] gives the new processor number for proc i
count_per_node = zeros(n_nodes)
for i in range(len(parts)):
# old proc i
my_node = parts[i]
new_proc = my_node * procs_per_node + count_per_node[my_node]
mapping[i] = new_proc
count_per_node[my_node]+=1
# now create a new topo-edges array so we can graph this...
new_topo_edges = mapping[array(s.topo_edges())]
new_topo_edges = new_topo_edges.astype(int32)
if do_plot:
pylab.clf()
pylab.subplot(121)
s.show_topology()
pylab.subplot(122)
s.show_topology(topo_edges=new_topo_edges)
def parse_output(self,output_name=None):
"""
reads the output from a run, hopefully with at least
-vv verbosity.
If the run crashed, sets self.crash to a crash object
Sets self.status to one of 'done','crash','running'
this is a work in progress (but you knew that, right?)
"""
if output_name is None:
output_name = sun_dir+'/output'
run_output = open(output_name)
self.status = 'running'
self.crash = None
while 1:
l = run_output.readline()
if not l:
break
if l.find('Run is blowing up!') >= 0:
self.status = 'crash'
m = re.match(r'Time step (\d+): Processor (\d+), Run is blowing up!',l)
if not m:
print("Failed to match against")
print(l)
else:
# got a crash
crash = SuntansCrash()
crash.sun = self
crash.step = int(m.group(1))
crash.processor = int(m.group(2))
l = run_output.readline()
for i in range(100): # search for CFL details up to 100 lines away
### Vertical Courant number:
m = re.match(r'Courant number problems at \((\d+),(\d+)\), Wmax=([-0-9\.]+), dz=([0-9\.]+) CmaxW=([0-9\.]+) > ([0-9\.]+)',
l)
if m:
crash.cell_id = int(m.group(1))
crash.z_id = int(m.group(2))
crash.w_max = float(m.group(3))
crash.dz = float(m.group(4))
crash.cmax_w = float(m.group(5))
crash.cmax_w_lim = float(m.group(6))
crash.description = SuntansCrash.vertical_courant
break
### Horizontal Courant number:
m = re.match(r'Courant number problems at \((\d+),(\d+)\), Umax=([-0-9\.]+), dx=([0-9\.]+) CmaxU=([0-9\.]+) > ([0-9\.]+)',
l)
if m:
crash.edge_id = int(m.group(1))
crash.z_id = int(m.group(2))
crash.u_max = float(m.group(3))
crash.dx = float(m.group(4))
crash.cmax_u = float(m.group(5))
crash.cmax_u_lim = float(m.group(6))
crash.description = SuntansCrash.horizontal_courant
break
print("Hmm - maybe this isn't a vertical courant number issue")
l = run_output.readline()
self.crash = crash
break
def write_bov(self,label,proc,dims,data):
"""
Write a binary file that will hopefully be readable by
visit through some naming conventions, and can be read
back into sunreader.
label: a name containing no spaces or dashes that describes
what the data is (e.g. m2_fs_amp )
dims: a list identifying the dimensions in order.
[z_level, cell, time_step]
data: an array that matches the described dimensions.
Currently there are only two grids defined in visit:
2D cells
3D cells, with z-level
"""
# Enforce this ordering on the dimensions, which comes from the
# ordering of dimensions in suntans scalar output
required_order = ['time_step','z_level','cell']
given_order = [required_order.index(s) for s in dims]
if sorted(given_order) != given_order:
raise Exception("Order of dimensions must be time_step, cell, z_level")
# Enforce the expected size of each dimension:
g = self.grid(proc)
for i in range(len(dims)):
if dims[i] == 'time_step':
print("Assuming that number of timesteps is okay")
elif dims[i] == 'cell' and data.shape[i] != g.Ncells():
print("WARNING: cell dimension - data shape is %i but grid reports %i"%(data.shape[i],g.Ncells()))
elif dims[i] == 'z_level' and data.shape[i] != self.conf_int('Nkmax'):
print("WARNING: z_level dimension - data shape is %i but Nkmax is %i"%(
data.shape[i],self.conf_int('Nkmax')))
if data.dtype != float64:
print("Converting to 64-bit floats")
data = data.astype(float64)
formatted_name = os.path.join(self.datadir,label + "-" + "-".join(dims) + ".raw.%i"%proc)
print("Writing to %s"%formatted_name)
fp = open(formatted_name,'wb')
fp.write(data.tostring())
fp.close()
def harm_decomposition(self,consts=['constant','M2'],ref_data=None,phase_units='minutes',
skip=0.5):
""" Perform a harmonic decomposition on the freesurface, using
the given constituents, and write the results to
<const name>_<phase or amp>-cell.raw.<proc>
Phase is relative to cos(t), t in simulation time.
At some point ref_data may be used to specify a timeseries that can also
be decomposed, and whose amp/phase will be used as a reference for normalizing
the others...
or set ref_data='forcing' to take the reference to be the forcing on the first forced
cell (i.e. it will loop over processors, and take the first cell with forcing data)
"""
import harm_field
if ref_data == 'forcing':
# this matches the timeline used in harm_field:
# times of freesurface output, using the second half of the run.
# for forcing it would be okay to use the entire run, but I
# think it's more consistent to decompose the forcing at the
# same times as the cell values
t = self.timeline()[self.steps_available()//2:]
forcing = None
for proc in range(self.num_processors()):
forcing = self.boundary_forcing(proc)
if forcing.n_bcells > 0:
print("Getting forcing data for boundary cell 0, processor %s"%proc)
ref_data = forcing.calc_forcing(times=t,units='seconds')
break
if forcing is None:
raise Exception("No forced boundary cells were found")
if ref_data:
ref_t,ref_vals = ref_data
# need to get the right omegas, a bit kludgy.
import harm_plot,harm_decomp
hplot = | |
# Why: #7737 in Alexa global
'http://www.navy.mil/',
# Why: #7738 in Alexa global
'http://www.mg.gov.br/',
# Why: #7739 in Alexa global
'http://gizmodo.uol.com.br/',
# Why: #7740 in Alexa global
'http://www.psychcentral.com/',
# Why: #7741 in Alexa global
'http://www.ultipro.com/',
# Why: #7742 in Alexa global
'http://www.unisa.ac.za/',
# Why: #7743 in Alexa global
'http://www.sooperarticles.com/',
# Why: #7744 in Alexa global
'http://www.wondershare.com/',
# Why: #7745 in Alexa global
'http://www.wholefoodsmarket.com/',
# Why: #7746 in Alexa global
'http://www.dumpaday.com/',
# Why: #7747 in Alexa global
'http://www.littlewoods.com/',
# Why: #7748 in Alexa global
'http://www.carscom.net/',
# Why: #7749 in Alexa global
'http://www.meitu.com/',
# Why: #7750 in Alexa global
'http://www.9lwan.com/',
# Why: #7751 in Alexa global
'http://www.emailmeform.com/',
# Why: #7752 in Alexa global
'http://www.arte.tv/',
# Why: #7753 in Alexa global
'http://www.tribalfootball.com/',
# Why: #7754 in Alexa global
'http://www.howtoforge.com/',
# Why: #7755 in Alexa global
'http://www.cvent.com/',
# Why: #7756 in Alexa global
'http://www.fujitsu.com/',
# Why: #7757 in Alexa global
'http://www.silvergames.com/',
# Why: #7758 in Alexa global
'http://www.tp-link.com.cn/',
# Why: #7759 in Alexa global
'http://www.fatlossfactor.com/',
# Why: #7760 in Alexa global
'http://www.nusport.nl/',
# Why: #7761 in Alexa global
'http://www.todo1.com/',
# Why: #7762 in Alexa global
'http://www.see-tube.com/',
# Why: #7763 in Alexa global
'http://www.lolspots.com/',
# Why: #7764 in Alexa global
'http://www.sucksex.com/',
# Why: #7765 in Alexa global
'http://www.encontreinarede.com/',
# Why: #7766 in Alexa global
'http://www.myarabylinks.com/',
# Why: #7767 in Alexa global
'http://www.v-39.net/',
# Why: #7769 in Alexa global
'http://www.soompi.com/',
# Why: #7770 in Alexa global
'http://www.mltdb.com/',
# Why: #7771 in Alexa global
'http://www.websitetonight.com/',
# Why: #7772 in Alexa global
'http://www.bu.edu/',
# Why: #7773 in Alexa global
'http://www.lazada.co.th/',
# Why: #7774 in Alexa global
'http://www.mature-money.com/',
# Why: #7775 in Alexa global
'http://www.simplemachines.org/',
# Why: #7776 in Alexa global
'http://www.tnt-online.ru/',
# Why: #7777 in Alexa global
'http://www.disput.az/',
# Why: #7779 in Alexa global
'http://www.flirtcafe.de/',
# Why: #7780 in Alexa global
'http://www.d1net.com/',
# Why: #7781 in Alexa global
'http://www.infoplease.com/',
# Why: #7782 in Alexa global
'http://www.unseenimages.co.in/',
# Why: #7783 in Alexa global
'http://www.downloadatoz.com/',
# Why: #7784 in Alexa global
'http://www.norwegian.com/',
# Why: #7785 in Alexa global
'http://www.youtradefx.com/',
# Why: #7786 in Alexa global
'http://www.petapixel.com/',
# Why: #7787 in Alexa global
'http://www.bytes.com/',
# Why: #7788 in Alexa global
'http://ht.ly/',
# Why: #7789 in Alexa global
'http://www.jobberman.com/',
# Why: #7790 in Alexa global
'http://www.xenforo.com/',
# Why: #7791 in Alexa global
'http://www.pomponik.pl/',
# Why: #7792 in Alexa global
'http://www.siambit.org/',
# Why: #7793 in Alexa global
'http://www.twoplustwo.com/',
# Why: #7794 in Alexa global
'http://www.videoslasher.com/',
# Why: #7795 in Alexa global
'http://www.onvista.de/',
# Why: #7796 in Alexa global
'http://www.shopping-search.jp/',
# Why: #7797 in Alexa global
'http://www.canstockphoto.com/',
# Why: #7798 in Alexa global
'http://www.cash4flirt.com/',
# Why: #7799 in Alexa global
'http://www.flashgames.it/',
# Why: #7800 in Alexa global
'http://www.xxxdessert.com/',
# Why: #7801 in Alexa global
'http://www.cda.pl/',
# Why: #7803 in Alexa global
'http://www.costco.ca/',
# Why: #7804 in Alexa global
'http://www.elnuevodiario.com.ni/',
# Why: #7805 in Alexa global
'http://www.svtplay.se/',
# Why: #7806 in Alexa global
'http://www.ftc.gov/',
# Why: #7807 in Alexa global
'http://www.supersonicads.com/',
# Why: #7808 in Alexa global
'http://www.openstreetmap.org/',
# Why: #7809 in Alexa global
'http://www.chinamobile.com/',
# Why: #7810 in Alexa global
'http://www.fastspring.com/',
# Why: #7811 in Alexa global
'http://www.eprice.com.tw/',
# Why: #7813 in Alexa global
'http://www.mcdonalds.com/',
# Why: #7814 in Alexa global
'http://www.egloos.com/',
# Why: #7815 in Alexa global
'http://www.mouser.com/',
# Why: #7816 in Alexa global
'http://livemook.com/',
# Why: #7817 in Alexa global
'http://www.woxiu.com/',
# Why: #7818 in Alexa global
'http://www.pingler.com/',
# Why: #7819 in Alexa global
'http://www.ruelsoft.org/',
# Why: #7820 in Alexa global
'http://www.krone.at/',
# Why: #7821 in Alexa global
'http://www.internetbookshop.it/',
# Why: #7822 in Alexa global
'http://www.alibaba-inc.com/',
# Why: #7823 in Alexa global
'http://www.kimsufi.com/',
# Why: #7824 in Alexa global
'http://www.summitracing.com/',
# Why: #7826 in Alexa global
'http://www.parsfootball.com/',
# Why: #7827 in Alexa global
'http://www.standard.co.uk/',
# Why: #7828 in Alexa global
'http://www.photoblog.pl/',
# Why: #7829 in Alexa global
'http://www.bicaps.com/',
# Why: #7830 in Alexa global
'http://www.digitalplayground.com/',
# Why: #7831 in Alexa global
'http://www.zerochan.net/',
# Why: #7832 in Alexa global
'http://www.whosay.com/',
# Why: #7833 in Alexa global
'http://www.qualityseek.org/',
# Why: #7834 in Alexa global
'http://www.say7.info/',
# Why: #7835 in Alexa global
'http://www.rs.gov.br/',
# Why: #7836 in Alexa global
'http://www.wps.cn/',
# Why: #7837 in Alexa global
'http://www.google.co.mz/',
# Why: #7838 in Alexa global
'http://www.yourlustmovies.com/',
# Why: #7839 in Alexa global
'http://www.zalando.nl/',
# Why: #7840 in Alexa global
'http://www.jn.pt/',
# Why: #7841 in Alexa global
'http://www.homebase.co.uk/',
# Why: #7842 in Alexa global
'http://www.avis.com/',
# Why: #7843 in Alexa global
'http://www.healthboards.com/',
# Why: #7844 in Alexa global
'http://www.filmizlesene.com.tr/',
# Why: #7845 in Alexa global
'http://www.shoutcast.com/',
# Why: #7846 in Alexa global
'http://www.konami.jp/',
# Why: #7847 in Alexa global
'http://www.indiafreestuff.in/',
# Why: #7848 in Alexa global
'http://www.avval.ir/',
# Why: #7849 in Alexa global
'http://www.gamingwonderland.com/',
# Why: #7850 in Alexa global
'http://www.adage.com/',
# Why: #7851 in Alexa global
'http://www.asu.edu/',
# Why: #7852 in Alexa global
'http://www.froma.com/',
# Why: #7853 in Alexa global
'http://www.bezuzyteczna.pl/',
# Why: #7854 in Alexa global
'http://www.workopolis.com/',
# Why: #7855 in Alexa global
'http://extranetinvestment.com/',
# Why: #7856 in Alexa global
'http://www.lablue.de/',
# Why: #7857 in Alexa global
'http://www.geotauaisay.com/',
# Why: #7858 in Alexa global
'http://www.bestchange.ru/',
# Why: #7859 in Alexa global
'http://www.ptp22.com/',
# Why: #7860 in Alexa global
'http://www.tehparadox.com/',
# Why: #7861 in Alexa global
'http://www.ox.ac.uk/',
# Why: #7862 in Alexa global
'http://www.radaris.com/',
# Why: #7863 in Alexa global
'http://www.domdigger.com/',
# Why: #7864 in Alexa global
'http://www.lizads.com/',
# Why: #7865 in Alexa global
'http://www.chatvl.com/',
# Why: #7866 in Alexa global
'http://www.elle.com/',
# Why: #7867 in Alexa global
'http://www.soloaqui.es/',
# Why: #7868 in Alexa global
'http://www.tubejuggs.com/',
# Why: #7869 in Alexa global
'http://www.jsonline.com/',
# Why: #7870 in Alexa global
'http://www.ut.ac.ir/',
# Why: #7871 in Alexa global
'http://www.iitv.info/',
# Why: #7872 in Alexa global
'http://www.runetki.tv/',
# Why: #7873 in Alexa global
'http://www.hyundai.com/',
# Why: #7874 in Alexa global
'http://www.turkiye.gov.tr/',
# Why: #7875 in Alexa global
'http://www.jobstreet.com.sg/',
# Why: #7877 in Alexa global
'http://www.jp-sex.com/',
# Why: #7878 in Alexa global
'http://www.soccer.ru/',
# Why: #7879 in Alexa global
'http://www.slashfilm.com/',
# Why: #7880 in Alexa global
'http://www.couchtuner.eu/',
# Why: #7881 in Alexa global
'http://quanfan.com/',
# Why: #7882 in Alexa global
'http://www.porsche.com/',
# Why: #7883 in Alexa global
'http://www.craftsy.com/',
# Why: #7884 in Alexa global
'http://www.geizhals.at/',
# Why: #7885 in Alexa global
'http://www.spartoo.it/',
# Why: #7886 in Alexa global
'http://yxku.com/',
# Why: #7887 in Alexa global
'http://www.vodonet.net/',
# Why: #7888 in Alexa global
'http://www.photo.net/',
# Why: #7889 in Alexa global
'http://www.raiffeisen.ru/',
# Why: #7890 in Alexa global
'http://www.tablotala.com/',
# Why: #7891 in Alexa global
'http://www.theaa.com/',
# Why: #7892 in Alexa global
'http://www.idownloadblog.com/',
# Why: #7894 in Alexa global
'http://www.rodfile.com/',
# Why: #7895 in Alexa global
'http://www.alabout.com/',
# Why: #7896 in Alexa global
'http://www.f1news.ru/',
# Why: #7897 in Alexa global
'http://www.divxstage.eu/',
# Why: #7898 in Alexa global
'http://www.itusozluk.com/',
# Why: #7899 in Alexa global
'http://www.tokyodisneyresort.co.jp/',
# Why: #7900 in Alexa global
'http://www.hicdma.com/',
# Why: #7901 in Alexa global
'http://www.dota2lounge.com/',
# Why: #7902 in Alexa global
'http://www.meizu.cn/',
# Why: #7903 in Alexa global
'http://www.greensmut.com/',
# Why: #7904 in Alexa global
'http://www.bharatiyamobile.com/',
# Why: #7905 in Alexa global
'http://www.handycafe.com/',
# Why: #7906 in Alexa global
'http://www.regarder-film-gratuit.com/',
# Why: #7907 in Alexa global
'http://www.adultgeek.net/',
# Why: #7908 in Alexa global
'http://www.yintai.com/',
# Why: #7909 in Alexa global
'http://www.brasilescola.com/',
# Why: #7910 in Alexa global
'http://www.verisign.com/',
# Why: #7911 in Alexa global
'http://www.dnslink.com/',
# Why: #7912 in Alexa global
'http://www.standaard.be/',
# Why: #7913 in Alexa global
'http://www.cbengine.com/',
# Why: #7914 in Alexa global
'http://www.pchealthboost.com/',
# Why: #7915 in Alexa global
'http://www.dealdey.com/',
# Why: #7916 in Alexa global
'http://www.cnnturk.com/',
# Why: #7917 in Alexa global
'http://www.trutv.com/',
# Why: #7918 in Alexa global
'http://www.tahrirnews.com/',
# Why: #7919 in Alexa global
'http://www.getit.in/',
# Why: #7920 in Alexa global
'http://www.jquerymobile.com/',
# Why: #7921 in Alexa global
'http://www.girlgames.com/',
# Why: #7922 in Alexa global
'http://www.alhayat.com/',
# Why: #7923 in Alexa global
'http://www.ilpvideo.com/',
# Why: #7924 in Alexa global
'http://www.stihi.ru/',
# Why: #7925 in Alexa global
'http://www.skyscanner.ru/',
# Why: #7926 in Alexa global
| |
if bollo:
_bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
importo_totale += float(_bollo)
importo_totale_da_salvare = importo_totale +imposta_iva
if not "/" in pagamento:
importo_totale = Money(str(importo_totale),"EUR")
importo_totale = importo_totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
fattura.footer(str(importo_totale)," "," "," "," ",str(importo_totale),str(return_currency(imposta_totale)))
fattura.totale(str(ritorna_prezzo_europeo(importo_totale_da_salvare)))
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
if "r.b." in pagamento.lower() or "riba" in pagamento.lower():
riba=True
else:
riba=False
db.fatture_salvate.insert(scadenza=scadenza,nome_cliente=nome_cliente,data_fattura = start_date,numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = importo_totale_da_salvare,richiede_riba=riba,riba_emessa=False,user_id=auth.user_id)
else:
# Devo mettere due fatture con il pagamento e scadenza corretti
first_half = round(importo_totale_da_salvare / 2,2)
second_half= importo_totale_da_salvare - first_half
s=pagamento
st = int(s[s.index("/")+1:s.index("/")+4]) - int(s[s.index("/")-3:s.index("/")])
second_date = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
first_date = second_date - datetime.timedelta(days = int(st) +1)
if "F.M" in pagamento:
pass
first_date = first_date.strftime("%d/%m/%Y")
# day_start,day_end = monthrange(first_date.year, first_date.month)
# first_date = str(day_end)+"/"+str(first_date.month)+"/"+str(first_date.year)
else:
first_date = first_date.strftime("%d/%m/%Y")
second_date = second_date.strftime("%d/%m/%Y")
if "r.b." in pagamento.lower() or "riba" in pagamento.lower():
riba=True
else:
riba=False
first_date = datetime.datetime.strptime(first_date,"%d/%m/%Y")
second_date = datetime.datetime.strptime(second_date,"%d/%m/%Y")
importo_totale = Money(str(importo_totale),"EUR")
importo_totale = importo_totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
fattura.footer(str(importo_totale)," "," "," "," ",str(importo_totale),str(return_currency(imposta_totale)))
fattura.totale(str(ritorna_prezzo_europeo(importo_totale_da_salvare)))
db.fatture_salvate.insert(scadenza=first_date,nome_cliente=nome_cliente,data_fattura = start_date,numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = first_half,richiede_riba=riba,riba_emessa=False,user_id=auth.user_id)
db.fatture_salvate.insert(scadenza=second_date,nome_cliente=nome_cliente,data_fattura = start_date,numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = second_half,richiede_riba=riba,riba_emessa=False,user_id=auth.user_id)
# print "SCADENZA {0}".format(scadenza)
"""
fattura.foote,Field('nome_cliente')sr("Totale merce","Sconto","Netto merce","spese varie","spese_trasporto","totale_imponibile","Totale imposta")
fattura.footer_2("CodIva","Spese accessorie","Imponibile","Iva","Imposta","Bolli")
fattura.footer_2("CodIva2","Spese accessorie2","Imponibile2","Iva2","Imposta2","Bolli2")
fattura.totale("14567645")
"""
fattura.add_row("","","","","","","","","")
fattura.add_row("",annotazioni,"","","","","","","")
fattura.insert_rows()
fattura.create_pdf()
db(db.fattura).delete()
db.fattura.insert(numero_fattura = numero_fattura_da_salvare)
db(db.ddt_da_fatturare.user_id == auth.user_id).delete()
def return_scadenza(fattura_id):
ddts = db(db.fatture_salvate.id == fattura_id).select().first()["id_ddt"]
ddts_list = eval(ddts)
scadenza=""
start_date = datetime.datetime.strptime("28/02/2017","%d/%m/%Y")
for ddt in ddts_list:
rows = db(db.saved_righe_in_ddt_cliente.saved_ddt_id ==ddt).select()
# print "DDT ID : ",ddt
for row in rows:
"""
<Row {'n_riga': '3', 'prezzo': '8.9919', 'saved_ddt_id': '21', 'quantita': '11', 'evasione': datetime.datetime(2017, 1, 31, 8, 56), 'id': 10L, 'codice_articolo': '892069925', 'codice_iva': 'Iva 22%', 'descrizione': 'FLANGIA', 'sconti': None, 'u_m': 'Nr', 'user_id': '1', 'codice_ordine': '1/17', 'id_ordine': '26', 'riferimento_ordine': 'fdsfsdf'}>
"""
"""
La riga del ddt contiene i dati relativi all'ordine (id_ordine)
siccome il pagamento può essere modificato bisogna risalire all'ordine
poi al tipo di pagamento, poi ai giorni e calcolare la data
"""
id_ordine = row.id_ordine
try:
try:
pagamento = db(db.ordine_cliente.id == id_ordine).select().first()["pagamento"]
# print "pagamento = ",pagamento
except:
pagamento = None
if pagamento is None:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if "M.S." in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = 10)
scadenza = scadenza.strftime("%d/%m/%Y")
else:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
except Exception,e:
# print e
pass
return scadenza
@service.jsonrpc
@service.jsonrpc2
def crea_fattura_preview(args):
id_cliente=args['0']
# print "ID CLIENTE : ",id_cliente
numero_corrente_fattura = db(db.fattura).select().first()["numero_fattura"]
numero = int(numero_corrente_fattura.split("/")[0])
anno = int(numero_corrente_fattura.split("/")[1])
numero +=1
numero_fattura_da_salvare = str(numero)+"/"+str(anno)
# print "qui"
"""
Dati cliente
"""
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
iban_cliente = dati_cliente.codice_iban
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
scritta_esenzione_cliente = dati_cliente.descrizione_esenzione_iva
annotazioni=dati_cliente.annotazioni
ddts_id = db(db.ddt_da_fatturare.user_id == auth.user_id).select()
for r in ddts_id:
data_scelta = r.data_emissione
m = datetime.datetime.strptime(data_scelta,"%d/%m/%Y").date()
# print "MESE : "+str(m.month)
day_start,day_end = monthrange(m.year, m.month)
d = str(day_end)+"/"+str(m.month)+"/"+str(m.year)
start_date = datetime.datetime.strptime(d,"%d/%m/%Y")
print "-- DATE CHECK --"
print start_date
fattura = FATTURA("FATTURA DIFFERITA",start_date.strftime("%d/%m/%Y"),numero_fattura_da_salvare,anteprima=True)
fattura.intestazione(nome_cliente,citta_cliente,indirizzo_cliente,cap_cliente,provincia_cliente,nazione_cliente,cf_cliente,pi_cliente)
try:
# print "IBAN : ",iban_cliente
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(iban_cliente),"PAGAMENTO","SCADENZA")
except:
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
ddts_id = db(db.ddt_da_fatturare.user_id == auth.user_id).select()
fattura.rows=[]
lista_codici_iva = {}
importo_totale = 0
imposta_totale = 0
imposta_iva = 0
lista_ddt = []
for ddt_id in ddts_id:
lista_ddt.append(ddt_id.ddt_id)
riferimento_ddt = "Rif. DDT : " + ddt_id.numero_ddt + " del " + ddt_id.data_emissione
fattura.add_row("",riferimento_ddt,"","","","","","","")
print ddt_id
rows = db(db.saved_righe_in_ddt_cliente.saved_ddt_id == ddt_id.ddt_id).select()
print "PAst creation ---##"
# print "DDT ID : ",ddt_id.ddt_id
scritta_esenzione = False
for row in rows:
print row
"""
<Row {'n_riga': '3', 'prezzo': '8.9919', 'saved_ddt_id': '21', 'quantita': '11', 'evasione': datetime.datetime(2017, 1, 31, 8, 56), 'id': 10L, 'codice_articolo': '892069925', 'codice_iva': 'Iva 22%', 'descrizione': 'FLANGIA', 'sconti': None, 'u_m': 'Nr', 'user_id': '1', 'codice_ordine': '1/17', 'id_ordine': '26', 'riferimento_ordine': 'fdsfsdf'}>
"""
"""
La riga del ddt contiene i dati relativi all'ordine (id_ordine)
siccome il pagamento può essere modificato bisogna risalire all'ordine
poi al tipo di pagamento, poi ai giorni e calcolare la data
"""
if not "commento" in row.codice_articolo:
id_ordine = row.id_ordine
try:
try:
pagamento = db(db.ordine_cliente.id == id_ordine).select().first()["pagamento"]
# print "pagamento = ",pagamento
except:
pagamento = None
if pagamento is None:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1 or start_date.date().month==2:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
if int(giorni_da_aggiungere)==90:
giorni_da_aggiungere="86"
if int(giorni_da_aggiungere)==120:
giorni_da_aggiungere="116"
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if ("M.S." or "ms") in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1 or start_date.date().month==2:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
if int(giorni_da_aggiungere)==90:
giorni_da_aggiungere="86"
if int(giorni_da_aggiungere)==120:
giorni_da_aggiungere="116"
giorni_mese_successivo = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni_mese_successivo"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = int(giorni_mese_successivo))
scadenza = scadenza.strftime("%d/%m/%Y")
else:
# Fine mese senza M.S.
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1 or start_date.date().month==2:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
if int(giorni_da_aggiungere)==90:
giorni_da_aggiungere="86"
if int(giorni_da_aggiungere)==120:
giorni_da_aggiungere="116"
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(iban_cliente),pagamento,str(scadenza))
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
# print "Aggiunta rig"
sconti = row.sconti
if row.sconti is None:
sconti=""
try:
if row.prezzo == "0":
row.prezzo = ""
f = float(row.prezzo)
# print "SONO QUI : PREZZO = ".format(f)
except:
msg = "Prezzo non presente " + riferimento_ddt + " Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
try:
f=float(row.quantita)
except:
msg = "Quantità non valida Cod.Art : " + row.codice_articolo + " Qta : "
response.flash=msg
return locals()
pass
importo = saved_importo = float(row.quantita) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
# print "VALLLLE " + row.codice_iva
descrizione_codice_iva = db(db.righe_in_ordine_cliente.id == row.id_riga_ordine, db.righe_in_ordine_cliente.n_riga==row.n_riga).select().first()["codice_iva"]
codice_iva=db(db.anagrafica_codici_iva.descrizione_codice_iva == descrizione_codice_iva).select().first()["codice_iva"]
row.codice_iva=codice_iva
# print "Nuovo codice iva : "+row.codice_iva
if "Esenzione" in descrizione_codice_iva:
scritta_esenzione = True
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == descrizione_codice_iva).select().first()["percentuale_iva"]
importo_totale +=saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = saved_importo
else:
lista_codici_iva[codice_iva] += saved_importo
else:
"""
Passo il commento ma resetto tutti i campi
"""
# print row
row.riferimento_ordine=""
row.u_m=""
row.quantita=""
prezzo=""
sconti=""
importo=""
codice_iva=""
row.codice_articolo=""
# row.descrizione=row.commento
fattura.add_row(row.codice_articolo,row.descrizione,row.riferimento_ordine,row.u_m,row.quantita,prezzo,sconti,importo,codice_iva)
# print lista_codici_iva
bollo= dati_cliente.bollo
if bollo:
print "<NAME>"
codice_articolo="BOLLO"
descrizione="art. 15 | |
<reponame>wbiker/rules_dotnet<filename>dotnet/stdlib.net/net472/generated.bzl
load("@io_bazel_rules_dotnet//dotnet/private:rules/stdlib.bzl", "net_stdlib")
def define_stdlib(context_data):
net_stdlib(
name = "accessibility.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Accessibility.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Accessibility.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "custommarshalers.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/CustomMarshalers.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/CustomMarshalers.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "isymwrapper.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/ISymWrapper.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/ISymWrapper.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "microsoft.activities.build.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.Activities.Build.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.Activities.Build.dll",
deps = [
":mscorlib.dll",
":xamlbuildtask.dll",
":system.xaml.dll",
":system.dll",
":microsoft.build.utilities.v4.0.dll",
":microsoft.build.framework.dll",
":system.activities.dll",
":system.runtime.serialization.dll",
]
)
net_stdlib(
name = "microsoft.build.conversion.v4.0.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.Build.Conversion.v4.0.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.Build.Conversion.v4.0.dll",
deps = [
":mscorlib.dll",
":microsoft.build.dll",
":system.dll",
":microsoft.build.engine.dll",
":system.core.dll",
]
)
net_stdlib(
name = "microsoft.build.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.Build.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.Build.dll",
deps = [
":mscorlib.dll",
":system.dll",
":microsoft.build.framework.dll",
":system.core.dll",
":microsoft.build.engine.dll",
]
)
net_stdlib(
name = "microsoft.build.engine.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.Build.Engine.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.Build.Engine.dll",
deps = [
":mscorlib.dll",
":system.dll",
":microsoft.build.framework.dll",
]
)
net_stdlib(
name = "microsoft.build.framework.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.Build.Framework.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.Build.Framework.dll",
deps = [
":mscorlib.dll",
":system.xaml.dll",
":system.dll",
]
)
net_stdlib(
name = "microsoft.build.tasks.v4.0.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.Build.Tasks.v4.0.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.Build.Tasks.v4.0.dll",
deps = [
":mscorlib.dll",
":system.dll",
":microsoft.build.utilities.v4.0.dll",
":microsoft.build.framework.dll",
":system.core.dll",
":system.security.dll",
":system.xaml.dll",
]
)
net_stdlib(
name = "microsoft.build.utilities.v4.0.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.Build.Utilities.v4.0.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.Build.Utilities.v4.0.dll",
deps = [
":mscorlib.dll",
":microsoft.build.framework.dll",
":system.dll",
":system.core.dll",
]
)
net_stdlib(
name = "microsoft.csharp.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.CSharp.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.CSharp.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.core.dll",
":system.dynamic.dll",
]
)
net_stdlib(
name = "microsoft.jscript.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.JScript.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.JScript.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "microsoft.visualbasic.compatibility.data.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.VisualBasic.Compatibility.Data.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.VisualBasic.Compatibility.Data.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.drawing.dll",
":microsoft.visualbasic.dll",
":microsoft.visualbasic.compatibility.dll",
":system.security.dll",
]
)
net_stdlib(
name = "microsoft.visualbasic.compatibility.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.VisualBasic.Compatibility.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.VisualBasic.Compatibility.dll",
deps = [
":mscorlib.dll",
":system.drawing.dll",
":system.dll",
":microsoft.visualbasic.dll",
]
)
net_stdlib(
name = "microsoft.visualbasic.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.VisualBasic.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.VisualBasic.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.deployment.dll",
":system.management.dll",
":system.core.dll",
":system.xml.linq.dll",
":system.drawing.dll",
]
)
net_stdlib(
name = "microsoft.visualc.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.VisualC.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.VisualC.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "microsoft.visualc.stlclr.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.VisualC.STLCLR.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/Microsoft.VisualC.STLCLR.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "mscorlib.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/mscorlib.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/mscorlib.dll",
deps = [
]
)
net_stdlib(
name = "presentationbuildtasks.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationBuildTasks.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationBuildTasks.dll",
deps = [
":mscorlib.dll",
":system.dll",
":microsoft.build.utilities.v4.0.dll",
":microsoft.build.framework.dll",
":system.core.dll",
]
)
net_stdlib(
name = "presentationcore.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationCore.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationCore.dll",
deps = [
":mscorlib.dll",
":system.dll",
":windowsbase.dll",
":system.xaml.dll",
":uiautomationtypes.dll",
":system.windows.input.manipulations.dll",
":uiautomationprovider.dll",
":system.deployment.dll",
]
)
net_stdlib(
name = "presentationframework.aero.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationFramework.Aero.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationFramework.Aero.dll",
deps = [
":mscorlib.dll",
":windowsbase.dll",
":system.dll",
":presentationcore.dll",
":system.xaml.dll",
]
)
net_stdlib(
name = "presentationframework.aero2.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationFramework.Aero2.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationFramework.Aero2.dll",
deps = [
":mscorlib.dll",
":windowsbase.dll",
":system.dll",
":presentationcore.dll",
":system.xaml.dll",
]
)
net_stdlib(
name = "presentationframework.aerolite.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationFramework.AeroLite.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationFramework.AeroLite.dll",
deps = [
":mscorlib.dll",
":windowsbase.dll",
":system.dll",
":presentationcore.dll",
":system.xaml.dll",
]
)
net_stdlib(
name = "presentationframework.classic.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationFramework.Classic.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationFramework.Classic.dll",
deps = [
":mscorlib.dll",
":windowsbase.dll",
":system.dll",
":presentationcore.dll",
":system.xaml.dll",
]
)
net_stdlib(
name = "presentationframework.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationFramework.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationFramework.dll",
deps = [
":mscorlib.dll",
":system.xaml.dll",
":windowsbase.dll",
":system.dll",
":presentationcore.dll",
":system.core.dll",
":uiautomationprovider.dll",
":uiautomationtypes.dll",
":reachframework.dll",
":accessibility.dll",
":system.deployment.dll",
]
)
net_stdlib(
name = "presentationframework.luna.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationFramework.Luna.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationFramework.Luna.dll",
deps = [
":mscorlib.dll",
":windowsbase.dll",
":system.dll",
":presentationcore.dll",
":system.xaml.dll",
]
)
net_stdlib(
name = "presentationframework.royale.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationFramework.Royale.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/PresentationFramework.Royale.dll",
deps = [
":mscorlib.dll",
":windowsbase.dll",
":system.dll",
":presentationcore.dll",
":system.xaml.dll",
]
)
net_stdlib(
name = "reachframework.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/ReachFramework.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/ReachFramework.dll",
deps = [
":mscorlib.dll",
":presentationcore.dll",
":windowsbase.dll",
":system.dll",
":system.drawing.dll",
":system.security.dll",
]
)
net_stdlib(
name = "sysglobl.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/sysglobl.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/sysglobl.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.activities.core.presentation.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Activities.Core.Presentation.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Activities.Core.Presentation.dll",
deps = [
":mscorlib.dll",
":system.dll",
":windowsbase.dll",
":system.activities.presentation.dll",
":system.xaml.dll",
":presentationcore.dll",
":system.activities.dll",
":system.servicemodel.activities.dll",
":system.xml.linq.dll",
":system.core.dll",
":system.runtime.serialization.dll",
":system.windows.presentation.dll",
]
)
net_stdlib(
name = "system.activities.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Activities.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Activities.dll",
deps = [
":mscorlib.dll",
":system.xaml.dll",
":system.core.dll",
":system.dll",
":system.xml.linq.dll",
":system.runtime.serialization.dll",
":system.runtime.durableinstancing.dll",
":microsoft.visualbasic.dll",
]
)
net_stdlib(
name = "system.activities.durableinstancing.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Activities.DurableInstancing.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Activities.DurableInstancing.dll",
deps = [
":mscorlib.dll",
":system.runtime.durableinstancing.dll",
":system.xml.linq.dll",
":system.activities.dll",
":system.core.dll",
":system.runtime.serialization.dll",
":system.dll",
]
)
net_stdlib(
name = "system.activities.presentation.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Activities.Presentation.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Activities.Presentation.dll",
deps = [
":mscorlib.dll",
":system.xaml.dll",
":system.dll",
":windowsbase.dll",
":presentationcore.dll",
":system.activities.dll",
":system.core.dll",
":system.xml.linq.dll",
":system.drawing.dll",
":windowsformsintegration.dll",
":uiautomationprovider.dll",
":uiautomationtypes.dll",
":reachframework.dll",
":system.servicemodel.activities.dll",
":system.componentmodel.composition.dll",
]
)
net_stdlib(
name = "system.addin.contract.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.AddIn.Contract.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.AddIn.Contract.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.addin.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.AddIn.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.AddIn.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.addin.contract.dll",
]
)
net_stdlib(
name = "system.componentmodel.composition.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.ComponentModel.Composition.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.ComponentModel.Composition.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
":system.dll",
]
)
net_stdlib(
name = "system.componentmodel.composition.registration.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.ComponentModel.Composition.Registration.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.ComponentModel.Composition.Registration.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.componentmodel.composition.dll",
":system.core.dll",
":system.reflection.context.dll",
]
)
net_stdlib(
name = "system.componentmodel.dataannotations.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.ComponentModel.DataAnnotations.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.ComponentModel.DataAnnotations.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.configuration.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Configuration.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Configuration.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.security.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.configuration.install.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Configuration.Install.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Configuration.Install.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.runtime.serialization.dll",
]
)
net_stdlib(
name = "system.core.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Core.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Core.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.numerics.dll",
":system.security.dll",
]
)
net_stdlib(
name = "system.data.datasetextensions.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.DataSetExtensions.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.DataSetExtensions.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.data.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.numerics.dll",
":system.runtime.caching.dll",
":system.core.dll",
":system.enterpriseservices.dll",
]
)
net_stdlib(
name = "system.data.entity.design.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.Entity.Design.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.Entity.Design.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.data.entity.dll",
":system.core.dll",
":system.xml.linq.dll",
":system.data.datasetextensions.dll",
]
)
net_stdlib(
name = "system.data.entity.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.Entity.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.Entity.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
":system.dll",
":system.runtime.serialization.dll",
":system.componentmodel.dataannotations.dll",
":system.xml.linq.dll",
]
)
net_stdlib(
name = "system.data.linq.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.Linq.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.Linq.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
":system.dll",
":system.runtime.serialization.dll",
":system.xml.linq.dll",
]
)
net_stdlib(
name = "system.data.oracleclient.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.OracleClient.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.OracleClient.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.enterpriseservices.dll",
]
)
net_stdlib(
name = "system.data.services.client.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.Services.Client.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.Services.Client.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.core.dll",
":system.xml.linq.dll",
]
)
net_stdlib(
name = "system.data.services.design.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.Services.Design.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.Services.Design.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.core.dll",
":system.data.entity.dll",
":system.data.services.client.dll",
":system.xml.linq.dll",
":system.web.extensions.dll",
]
)
net_stdlib(
name = "system.data.services.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.Services.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.Services.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
":system.dll",
":system.data.services.client.dll",
":system.servicemodel.web.dll",
":system.servicemodel.activation.dll",
":system.runtime.serialization.dll",
":system.data.entity.dll",
":system.xml.linq.dll",
":system.data.linq.dll",
]
)
net_stdlib(
name = "system.data.sqlxml.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.SqlXml.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Data.SqlXml.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.deployment.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Deployment.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Deployment.dll",
deps = [
":mscorlib.dll",
":system.security.dll",
":system.dll",
":system.core.dll",
":system.drawing.dll",
]
)
net_stdlib(
name = "system.design.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Design.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Design.dll",
deps = [
":mscorlib.dll",
":system.drawing.dll",
":system.dll",
":system.data.oracleclient.dll",
":accessibility.dll",
":system.drawing.design.dll",
":system.web.regularexpressions.dll",
":system.runtime.serialization.formatters.soap.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.device.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Device.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Device.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.diagnostics.tracing.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Diagnostics.Tracing.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.Diagnostics.Tracing.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.directoryservices.accountmanagement.dll",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.DirectoryServices.AccountManagement.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net472.1.0.0//:build/.NETFramework/v4.7.2/System.DirectoryServices.AccountManagement.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.directoryservices.dll",
":system.directoryservices.protocols.dll",
]
)
net_stdlib(
name | |
#
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Copyright (C) 2018-2021 UAVCAN Development Team <uavcan.org>
# This software is distributed under the terms of the MIT License.
#
"""
jinja-based :class:`~nunavut.generators.AbstractGenerator` implementation.
"""
import datetime
import io
import logging
import pathlib
import re
import shutil
import typing
import nunavut.generators
import nunavut.lang
import nunavut.postprocessors
import pydsdl
from nunavut._utilities import YesNoDefault
from yaml import Dumper as YamlDumper
from yaml import dump as yaml_dump
from .environment import CodeGenEnvironment
from .jinja2 import Template
from .loaders import DEFAULT_TEMPLATE_PATH, TEMPLATE_SUFFIX, DSDLTemplateLoader
logger = logging.getLogger(__name__)
# +---------------------------------------------------------------------------+
# | JINJA : CodeGenerator
# +---------------------------------------------------------------------------+
class CodeGenerator(nunavut.generators.AbstractGenerator):
"""
Abstract base class for all Generators that build source code using Jinja templates.
:param nunavut.Namespace namespace: The top-level namespace to generates code
at and from.
:param YesNoDefault generate_namespace_types: Set to YES to emit files for namespaces.
NO will suppress namespace file generation and DEFAULT will
use the language's preference.
:param templates_dir: Directories containing jinja templates. These will be available along
with any built-in templates provided by the target language. The templates
at these paths will take precedence masking any built-in templates
where the names are the same. See :class:`jinja2.ChoiceLoader` for rules
on the lookup hierarchy.
:type templates_dir: typing.Optional[typing.Union[pathlib.Path,typing.List[pathlib.Path]]]
:param bool followlinks: If True then symbolic links will be followed when
searching for templates.
:param bool trim_blocks: If this is set to True the first newline after a
block is removed (block, not variable tag!).
:param bool lstrip_blocks: If this is set to True leading spaces and tabs
are stripped from the start of a line to a block.
Defaults to False.
:param typing.Dict[str, typing.Callable] additional_filters: typing.Optional jinja filters to add to the
global environment using the key as the filter name
and the callable as the filter.
:param typing.Dict[str, typing.Callable] additional_tests: typing.Optional jinja tests to add to the
global environment using the key as the test name
and the callable as the test.
:param typing.Dict[str, typing.Any] additional_globals: typing.Optional objects to add to the template
environment globals collection.
:param post_processors: A list of :class:`nunavut.postprocessors.PostProcessor`
:type post_processors: typing.Optional[typing.List[nunavut.postprocessors.PostProcessor]]
:param builtin_template_path: If provided overrides the folder name under which built-in templates are loaded from
within a target language's package (i.e. ignored if no target language is
specified). For example, if the target language is ``c`` and this parameter
was set to ``foo`` then built-in templates would be loaded from
``nunavut.lang.c.foo``.
:raises RuntimeError: If any additional filter or test attempts to replace a built-in
or otherwise already defined filter or test.
"""
@staticmethod
def __augment_post_processors_with_ln_limit_empty_lines(
post_processors: typing.Optional[typing.List["nunavut.postprocessors.PostProcessor"]], limit_empty_lines: int
) -> typing.List["nunavut.postprocessors.PostProcessor"]:
"""
Subroutine of _handle_post_processors method.
"""
from nunavut.postprocessors import LimitEmptyLines
if post_processors is None:
post_processors = [LimitEmptyLines(limit_empty_lines)]
else:
found_pp = False
for pp in post_processors:
if isinstance(pp, LimitEmptyLines):
found_pp = True
break
if not found_pp:
post_processors.append(LimitEmptyLines(limit_empty_lines))
return post_processors
@staticmethod
def __augment_post_processors_with_ln_trim_trailing_whitespace(
post_processors: typing.Optional[typing.List["nunavut.postprocessors.PostProcessor"]],
) -> typing.List["nunavut.postprocessors.PostProcessor"]:
"""
Subroutine of _handle_post_processors method.
"""
from nunavut.postprocessors import TrimTrailingWhitespace
if post_processors is None:
post_processors = [TrimTrailingWhitespace()]
else:
found_pp = False
for pp in post_processors:
if isinstance(pp, TrimTrailingWhitespace):
found_pp = True
break
if not found_pp:
post_processors.append(TrimTrailingWhitespace())
return post_processors
@classmethod
def _handle_post_processors(
cls,
post_processors: typing.Optional[typing.List["nunavut.postprocessors.PostProcessor"]],
target_language: typing.Optional["nunavut.lang.Language"],
) -> typing.Optional[typing.List["nunavut.postprocessors.PostProcessor"]]:
"""
Used by constructor to process an optional list of post-processors and to augment or create this list
if needed to support language options.
"""
if target_language is not None:
try:
limit_empty_lines = target_language.get_config_value("limit_empty_lines")
post_processors = cls.__augment_post_processors_with_ln_limit_empty_lines(
post_processors, int(limit_empty_lines)
)
except KeyError:
pass
if target_language.get_config_value_as_bool("trim_trailing_whitespace"):
post_processors = cls.__augment_post_processors_with_ln_trim_trailing_whitespace(post_processors)
return post_processors
def __init__(
self,
namespace: nunavut.Namespace,
generate_namespace_types: YesNoDefault = YesNoDefault.DEFAULT,
templates_dir: typing.Optional[typing.Union[pathlib.Path, typing.List[pathlib.Path]]] = None,
followlinks: bool = False,
trim_blocks: bool = False,
lstrip_blocks: bool = False,
additional_filters: typing.Optional[typing.Dict[str, typing.Callable]] = None,
additional_tests: typing.Optional[typing.Dict[str, typing.Callable]] = None,
additional_globals: typing.Optional[typing.Dict[str, typing.Any]] = None,
post_processors: typing.Optional[typing.List["nunavut.postprocessors.PostProcessor"]] = None,
builtin_template_path: str = DEFAULT_TEMPLATE_PATH,
):
super().__init__(namespace, generate_namespace_types)
if templates_dir is not None and not isinstance(templates_dir, list):
templates_dir = [templates_dir]
language_context = self._namespace.get_language_context()
target_language = language_context.get_target_language()
self._dsdl_template_loader = DSDLTemplateLoader(
templates_dirs=templates_dir,
package_name_for_templates=(
None if target_language is None else target_language.get_templates_package_name()
),
followlinks=followlinks,
builtin_template_path=builtin_template_path,
)
self._post_processors = self._handle_post_processors(post_processors, target_language)
self._env = CodeGenEnvironment(
lctx=language_context,
loader=self._dsdl_template_loader,
lstrip_blocks=lstrip_blocks,
trim_blocks=trim_blocks,
additional_filters=additional_filters,
additional_tests=additional_tests,
additional_globals=additional_globals,
)
@property
def dsdl_loader(self) -> DSDLTemplateLoader:
return self._dsdl_template_loader
@property
def language_context(self) -> nunavut.lang.LanguageContext:
return self._namespace.get_language_context()
# +-----------------------------------------------------------------------+
# | PROTECTED
# +-----------------------------------------------------------------------+
def _handle_overwrite(self, output_path: pathlib.Path, allow_overwrite: bool) -> None:
if output_path.exists():
if allow_overwrite:
output_path.chmod(output_path.stat().st_mode | 0o220)
else:
raise PermissionError("{} exists and allow_overwrite is False.".format(output_path))
# +-----------------------------------------------------------------------+
# | AbstractGenerator
# +-----------------------------------------------------------------------+
def get_templates(self) -> typing.Iterable[pathlib.Path]:
"""
Enumerate all templates found in the templates path.
:data:`~TEMPLATE_SUFFIX` as the suffix for the filename.
:return: A list of paths to all templates found by this Generator object.
"""
return self._dsdl_template_loader.get_templates()
# +-----------------------------------------------------------------------+
# | PRIVATE
# +-----------------------------------------------------------------------+
@staticmethod
def _filter_and_write_line(
line_and_lineend: typing.Tuple[str, str],
output_file: typing.TextIO,
line_pps: typing.List["nunavut.postprocessors.LinePostProcessor"],
) -> None:
for line_pp in line_pps:
line_and_lineend = line_pp(line_and_lineend)
if line_and_lineend is None:
raise ValueError(
"line post processor must return a 2-tuple. To elide a line return a tuple of empty"
"strings. None is not a valid value."
)
output_file.write(line_and_lineend[0])
output_file.write(line_and_lineend[1])
@classmethod
def _generate_with_line_buffer(
cls,
output_file: typing.TextIO,
template_gen: typing.Generator[str, None, None],
line_pps: typing.List["nunavut.postprocessors.LinePostProcessor"],
) -> None:
newline_pattern = re.compile(r"\n|\r\n", flags=re.MULTILINE)
line_buffer = io.StringIO()
for part in template_gen:
search_pos = 0 # type: int
match_obj = newline_pattern.search(part, search_pos)
while True:
if search_pos < 0 or search_pos >= len(part):
break
if match_obj is None:
line_buffer.write(part[search_pos:])
break
# We have a newline
line_buffer.write(part[search_pos : match_obj.start()])
newline_chars = part[match_obj.start() : match_obj.end()]
line = line_buffer.getvalue() # type: str
line_buffer = io.StringIO()
cls._filter_and_write_line((line, newline_chars), output_file, line_pps)
search_pos = match_obj.end()
match_obj = newline_pattern.search(part, search_pos)
remainder = line_buffer.getvalue()
if len(remainder) > 0:
cls._filter_and_write_line((remainder, ""), output_file, line_pps)
def _generate_code(
self,
output_path: pathlib.Path,
template: Template,
template_gen: typing.Generator[str, None, None],
allow_overwrite: bool,
) -> None:
"""
Logic that should run from _generate_type iff is_dryrun is False.
"""
self._env.now_utc = datetime.datetime.utcnow()
from ..lang._common import UniqueNameGenerator
# reset the name generator state for this type
UniqueNameGenerator.reset()
# Predetermine the post processor types.
line_pps = [] # type: typing.List['nunavut.postprocessors.LinePostProcessor']
file_pps = [] # type: typing.List['nunavut.postprocessors.FilePostProcessor']
if self._post_processors is not None:
for pp in self._post_processors:
if isinstance(pp, nunavut.postprocessors.LinePostProcessor):
line_pps.append(pp)
elif isinstance(pp, nunavut.postprocessors.FilePostProcessor):
file_pps.append(pp)
else:
raise ValueError("PostProcessor type {} is unknown.".format(type(pp)))
logger.debug("Using post-processors: %r %r", line_pps, file_pps)
self._handle_overwrite(output_path, allow_overwrite)
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(str(output_path), "w") as output_file:
if len(line_pps) > 0:
# The logic gets much more complex when doing line post-processing.
self._generate_with_line_buffer(output_file, template_gen, line_pps)
else:
for part in template_gen:
output_file.write(part)
for file_pp in file_pps:
output_path = file_pp(output_path)
# +---------------------------------------------------------------------------+
# | JINJA : DSDLCodeGenerator
# +---------------------------------------------------------------------------+
class DSDLCodeGenerator(CodeGenerator):
"""
:class:`~CodeGenerator` implementation that generates code for a given set
of DSDL types.
"""
# +-----------------------------------------------------------------------+
# | JINJA : filters
# +-----------------------------------------------------------------------+
@staticmethod
def filter_yamlfy(value: typing.Any) -> str:
"""
Filter to, optionally, emit a dump of the dsdl input as a yaml document.
Available as ``yamlfy`` in all template environments.
Example::
/*
{{ T | yamlfy }}
*/
Result Example (truncated for brevity)::
/*
!!python/object:pydsdl.StructureType
_attributes:
- !!python/object:pydsdl.Field
_serializable: !!python/object:pydsdl.UnsignedIntegerType
_bit_length: 16
_cast_mode: &id001 !!python/object/apply:pydsdl.CastMode
- 0
_name: value
*/
:param value: The input value to parse as yaml.
:return: If a yaml parser is available, a pretty dump of the given value as yaml.
If a yaml parser is not available then an empty string is returned.
"""
return str(yaml_dump(value, Dumper=YamlDumper))
def filter_type_to_template(self, value: typing.Any) -> str:
"""
Template for type resolution as a filter. Available as ``type_to_template``
in all template environments.
Example::
{%- for attribute in T.attributes %}
{%* include attribute.data_type | type_to_template %}
{%- if not loop.last %},{% endif %}
{%- endfor %}
:param value: The input value to change into a template include path.
:return: A path to a template named for the type with :any:`TEMPLATE_SUFFIX`
"""
result = self.dsdl_loader.type_to_template(type(value))
if result is None:
raise RuntimeError("No template found for type {}".format(type(value)))
return result.name
def filter_type_to_include_path(self, value: typing.Any, resolve: bool = False) -> str:
"""
Emits an include path to the output target for a given type.
Example::
# include "{{ T.my_type | type_to_include_path }}"
Result Example:
# include "foo/bar/my_type.h"
:param typing.Any value: The type to emit an include for.
:param bool | |
<reponame>ravwojdyla/transform<filename>tensorflow_transform/saved/saved_transform_io.py
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to build input_fns for use with tf.Learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import six
import tensorflow as tf
from tensorflow_transform.saved import constants
from tensorflow_transform.saved import saved_model_loader
from tensorflow.python.framework import ops
from tensorflow.python.training import saver as tf_saver
def _load_transform_saved_model(transform_savedmodel_dir):
"""Load a SavedModel representing a transform function from disk.
Args:
transform_savedmodel_dir: a SavedModel directory.
Returns:
A `SavedModel` protocol buffer.
"""
saved_model = saved_model_loader.parse_saved_model(
transform_savedmodel_dir)
meta_graph_def = saved_model_loader.choose_meta_graph_def(
saved_model, [constants.TRANSFORM_TAG])
signature = meta_graph_def.signature_def[constants.TRANSFORM_SIGNATURE]
# maps name to TensorInfo
input_signature = {logical_name: tensor_info.name
for logical_name, tensor_info
in six.iteritems(signature.inputs)}
output_signature = {logical_name: tensor_info.name
for logical_name, tensor_info
in six.iteritems(signature.outputs)}
# asset_path_dict is {string: string}, mapping tensor names to absolute paths.
asset_path_dict = saved_model_loader.get_asset_tensors(
transform_savedmodel_dir, meta_graph_def)
return meta_graph_def, input_signature, output_signature, asset_path_dict
_PARTITIONED_VARIABLE_NAME_RE = re.compile(r'^(.*)/part_(\d*)$')
def _partially_apply_saved_transform_impl(
saved_model_dir, logical_input_map, tensor_replacement_map=None,
fetch_tensor_names=None):
"""Shared code for partially_apply_saved_transform and fetch_tensor_values.
This adds nodes to a graph that already contains Tensors representing the
inputs. These input Tensors may be placeholders that will be fed when the
graph is executed, or may be the outputs of some Ops. Most typically, the
input Tensors are reading and/or parsing Ops, but they could be anything--
including the outputs of a prior application of this function using another
transform graph.
This function operates on the default Graph in the default Session, and so
must be called within a context where these are provided.
Args:
saved_model_dir: A SavedModel directory providing a transform
graph. The MetaGraphDef and signature are selected from the SavedModel
using keys defined in `../constants.py` ('transform' and
'transform_signature', respectively).
logical_input_map: a dict of logical name to Tensor. The logical names must
be a subset of those in the input signature of the transform graph, and
the corresponding Tensors must have the expected types and shapes.
tensor_replacement_map: a dict of tensor names to `Tensors`.
fetch_tensor_names: a list of tensor names.
Returns:
A tuple of (unbound_inputs, outputs, fetched_tensors) where unbound_inputs
is a dict of logical name to Tensors that are yet to be mapped or fed,
outputs is a dict of logical name to Tensor, as provided by the output
signature of the transform graph, and fetched_tensors is a dict of tensor
names to `Tensor`s where the tensor names are the names given by
`fetched_tensor_names`.
Raises:
ValueError: if the provided input_tensors dict has keys that are not part
of the input signature, or any of the provided inputs have the wrong
type or shape.
RuntimeError: if there is no default graph available to which to apply the
transform.
"""
graph = tf.get_default_graph()
if graph is None:
raise RuntimeError('apply_saved_transform() requires a default graph.')
decomposed_input_tensors = _decompose_sparse_tensors(logical_input_map)
meta_graph_def, input_signature, output_signature, asset_path_dict = (
_load_transform_saved_model(saved_model_dir))
asset_tensor_dict = {k: ops.convert_to_tensor(v)
for k, v in asset_path_dict.items()}
# Check for inputs that were not part of the input signature.
unexpected_inputs = (set(six.iterkeys(decomposed_input_tensors)) -
set(six.iterkeys(input_signature)))
if unexpected_inputs:
raise ValueError('Unexpected inputs '
'to transform: {}'.format(unexpected_inputs))
# Create a map from tensor names in the graph to be imported, to the tensors
# specified in `input_tensors`.
input_map = {
input_signature[decomposed_logical_name]:
decomposed_input_tensors[decomposed_logical_name]
for decomposed_logical_name in decomposed_input_tensors}
input_map.update(asset_tensor_dict)
if tensor_replacement_map:
input_map.update(tensor_replacement_map)
# unique_name may produce e.g. transform_5. The result has no trailing slash.
scope = graph.unique_name('transform', mark_as_used=False)
# unique_name returns an "absolute" name while we want a name relative to the
# current scope. Therefore, we check if the current name stack is non-empty,
# and if so, strip out the existing name scope.
if graph.get_name_scope():
current_name_scope = graph.get_name_scope() + '/'
assert scope.startswith(current_name_scope)
import_scope = scope[len(current_name_scope):]
else:
import_scope = scope
# Save the ASSET_FILEPATHS before importing the MetaGraphDef
current_assets = graph.get_collection(tf.GraphKeys.ASSET_FILEPATHS)
# Warn user if meta_graph_def has saved variables
if tf.GraphKeys.TRAINABLE_VARIABLES in meta_graph_def.collection_def:
trainable_vars = meta_graph_def.collection_def[
tf.GraphKeys.TRAINABLE_VARIABLES].bytes_list.value
if trainable_vars:
raise ValueError(
'The SavedModel contained trainable variables {}. Because this '
'function is typically called in the input_fn, trainable variables '
'are disallowed'.format(trainable_vars))
# Load the transform graph, applying it to existing Tensors via input_map.
# Throws ValueError if the input_map gives mismatched types or shapes.
saver = tf_saver.import_meta_graph(meta_graph_def,
import_scope=import_scope,
input_map=input_map)
# Wipe out AssetFileDef collection; it is obsolete after loading
graph.clear_collection(tf.saved_model.constants.ASSETS_KEY)
# The import may have added Tensors to the ASSET_FILEPATHS collection that
# were substituted via input_map. To account for this, wipe out the
# collection, restore the preexisting collection values, and then write in
# the new substituted Tensors.
graph.clear_collection(tf.GraphKeys.ASSET_FILEPATHS)
for asset_path_tensor in current_assets:
graph.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, asset_path_tensor)
for asset_path_tensor in asset_tensor_dict.values():
graph.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, asset_path_tensor)
if saver:
checkpoint_path = os.path.join(
tf.compat.as_bytes(saved_model_dir),
tf.compat.as_bytes(tf.saved_model.constants.VARIABLES_DIRECTORY),
tf.compat.as_bytes(tf.saved_model.constants.VARIABLES_FILENAME))
# We can't use the scope rename from init_from_checkpoint because it relies
# on var scopes not rebuilt by import_meta_graph. So we need to construct it
# explicitly by iterating over the variables.
var_map = {}
for var in tf.global_variables():
var_name = var.op.name
if not var_name.startswith(scope):
continue
# Generate original name before importing into scope.
original_var_name = var_name[len(scope)+1:]
match = _PARTITIONED_VARIABLE_NAME_RE.match(original_var_name)
if match:
# If the variable is partitioned, extract the base variable name and
# the index in the partition, then update var_map[base_name] to have
# var_map[base_name][partition_index] = var.
base_name = match.group(1)
partition_index = int(match.group(2))
if base_name not in var_map:
var_map[base_name] = []
while not partition_index < len(var_map[base_name]):
var_map[base_name].append(None)
assert var_map[base_name][partition_index] is None
var_map[base_name][partition_index] = var
else:
var_map[original_var_name] = var
if var_map:
tf.train.init_from_checkpoint(checkpoint_path, var_map)
# Add computed output tensors to the output. There are two cases. When the
# output is not in the input_map, then we look up the tensor in the imported
# graph by prepending the import scope and looking up the tensor by name.
# This will fail if the expected output tensor is not now in the graph
# under the expected name scope. When the output is in the input map, then
# that tensor will have been re-mapped so we use the tensor given in the
# input_map.
def lookup_remapped_tensor(tensor_name):
if tensor_name in input_map:
return input_map[tensor_name]
else:
return graph.get_tensor_by_name(
ops.prepend_name_scope(tensor_name, scope))
decomposed_output_tensors = {
decomposed_logical_name: lookup_remapped_tensor(tensor_name)
for decomposed_logical_name, tensor_name
in six.iteritems(output_signature)
}
# Do the same for input tensors, where we assume such tensors are not in the
# input_map since identical tensors in an input_map would be an error.
decomposed_unbound_input_tensors = {
decomposed_logical_name: graph.get_tensor_by_name(
ops.prepend_name_scope(tensor_name, scope))
for decomposed_logical_name, tensor_name in six.iteritems(input_signature)
if decomposed_logical_name not in decomposed_input_tensors
}
if fetch_tensor_names is None:
fetch_tensor_names = []
fetched_tensors = {
name: lookup_remapped_tensor(name) for name in fetch_tensor_names}
outputs = _recompose_sparse_tensors(decomposed_output_tensors)
unbound_inputs = _recompose_sparse_tensors(decomposed_unbound_input_tensors)
return unbound_inputs, outputs, fetched_tensors
def fetch_tensor_values(saved_model_dir, tensor_replacement_map,
fetch_tensor_names):
_, _, fetched_tensors = _partially_apply_saved_transform_impl(
saved_model_dir, {}, tensor_replacement_map, fetch_tensor_names)
return fetched_tensors
def partially_apply_saved_transform(saved_model_dir, logical_input_map,
tensor_replacement_map=None):
"""Deprecated alias for partially_apply_saved_transform_internal."""
tf.logging.warn(
'partially_apply_saved_transform is deprecated. Use the '
'transform_raw_features method of the TFTrandformOutput class instead.')
return partially_apply_saved_transform_internal(
saved_model_dir, logical_input_map, tensor_replacement_map)
def partially_apply_saved_transform_internal(saved_model_dir, logical_input_map,
tensor_replacement_map=None):
"""Apply a transform graph, represented as a SavedModel, to existing Tensors.
For internal use only. Users should use the transform_raw_features method
of the TFTrandformOutput class.
This adds nodes to a graph that already contains Tensors representing the
inputs. These input Tensors may be placeholders that will be fed when the
graph is executed, or may be the outputs of some Ops. Most typically, the
input Tensors are reading and/or parsing Ops, but they could be anything--
including the outputs of a prior application of this function using another
transform graph.
This function operates on the default Graph in the default Session, and so
must be called within a context where these are provided.
Args:
saved_model_dir: A SavedModel directory providing a transform
graph. The MetaGraphDef and signature are | |
60] 4,840
Conv2d-136 [-1, 40, 44, 60] 4,840
BatchNorm2d-137 [-1, 40, 44, 60] 80
Conv2d-138 [-1, 40, 44, 60] 4,840
Conv2d-139 [-1, 40, 44, 60] 4,840
BatchNorm2d-140 [-1, 40, 44, 60] 80
Dropout2d-141 [-1, 40, 44, 60] 0
EDAModule-142 [-1, 450, 44, 60] 0
EDANetX3Block-143 [-1, 450, 44, 60] 0
Conv2d-144 [-1, 11, 44, 60] 4,961
================================================================
Total params: 685,877
Trainable params: 685,877
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 1.93
Forward/backward pass size (MB): 392.58
Params size (MB): 2.62
Estimated Total Size (MB): 397.13
----------------------------------------------------------------
EDANetX3(
2.875 GMac, 100.000% MACs,
(layers): ModuleList(
2.862 GMac, 99.544% MACs,
(0): DownsamplerBlock(
0.016 GMac, 0.555% MACs,
(conv): Conv2d(0.014 GMac, 0.494% MACs, 3, 12, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
(pool): MaxPool2d(0.001 GMac, 0.018% MACs, kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(bn): BatchNorm2d(0.001 GMac, 0.044% MACs, 15, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): DownsamplerBlock(
0.067 GMac, 2.314% MACs,
(conv): Conv2d(0.065 GMac, 2.248% MACs, 15, 45, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
(pool): MaxPool2d(0.001 GMac, 0.022% MACs, kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(bn): BatchNorm2d(0.001 GMac, 0.044% MACs, 60, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(2): EDANetX3Block(
1.333 GMac, 46.349% MACs,
(residual_dense_layers): Sequential(
1.333 GMac, 46.349% MACs,
(0): EDAModule(
0.233 GMac, 8.095% MACs,
(conv1x1): Conv2d(0.026 GMac, 0.896% MACs, 60, 40, kernel_size=(1, 1), stride=(1, 1))
(bn0): BatchNorm2d(0.001 GMac, 0.029% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_1): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))
(conv1x3_1): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1))
(bn1): BatchNorm2d(0.001 GMac, 0.029% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_2): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))
(conv1x3_2): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1))
(bn2): BatchNorm2d(0.001 GMac, 0.029% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(dropout): Dropout2d(0.0 GMac, 0.000% MACs, p=0.02, inplace=False)
)
(1): EDAModule(
0.25 GMac, 8.682% MACs,
(conv1x1): Conv2d(0.043 GMac, 1.484% MACs, 100, 40, kernel_size=(1, 1), stride=(1, 1))
(bn0): BatchNorm2d(0.001 GMac, 0.029% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_1): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))
(conv1x3_1): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1))
(bn1): BatchNorm2d(0.001 GMac, 0.029% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_2): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))
(conv1x3_2): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1))
(bn2): BatchNorm2d(0.001 GMac, 0.029% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(dropout): Dropout2d(0.0 GMac, 0.000% MACs, p=0.02, inplace=False)
)
(2): EDAModule(
0.267 GMac, 9.270% MACs,
(conv1x1): Conv2d(0.06 GMac, 2.071% MACs, 140, 40, kernel_size=(1, 1), stride=(1, 1))
(bn0): BatchNorm2d(0.001 GMac, 0.029% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_1): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))
(conv1x3_1): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1))
(bn1): BatchNorm2d(0.001 GMac, 0.029% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_2): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))
(conv1x3_2): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1))
(bn2): BatchNorm2d(0.001 GMac, 0.029% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(dropout): Dropout2d(0.0 GMac, 0.000% MACs, p=0.02, inplace=False)
)
(3): EDAModule(
0.283 GMac, 9.858% MACs,
(conv1x1): Conv2d(0.076 GMac, 2.659% MACs, 180, 40, kernel_size=(1, 1), stride=(1, 1))
(bn0): BatchNorm2d(0.001 GMac, 0.029% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_1): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))
(conv1x3_1): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1))
(bn1): BatchNorm2d(0.001 GMac, 0.029% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_2): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(2, 0), dilation=(2, 2))
(conv1x3_2): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 2), dilation=(2, 2))
(bn2): BatchNorm2d(0.001 GMac, 0.029% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(dropout): Dropout2d(0.0 GMac, 0.000% MACs, p=0.02, inplace=False)
)
(4): EDAModule(
0.3 GMac, 10.445% MACs,
(conv1x1): Conv2d(0.093 GMac, 3.247% MACs, 220, 40, kernel_size=(1, 1), stride=(1, 1))
(bn0): BatchNorm2d(0.001 GMac, 0.029% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_1): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))
(conv1x3_1): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1))
(bn1): BatchNorm2d(0.001 GMac, 0.029% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_2): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(2, 0), dilation=(2, 2))
(conv1x3_2): Conv2d(0.051 GMac, 1.778% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 2), dilation=(2, 2))
(bn2): BatchNorm2d(0.001 GMac, 0.029% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(dropout): Dropout2d(0.0 GMac, 0.000% MACs, p=0.02, inplace=False)
)
)
)
(3): DownsamplerBlock(
0.804 GMac, 27.967% MACs,
(conv): Conv2d(0.803 GMac, 27.943% MACs, 260, 130, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
(bn): BatchNorm2d(0.001 GMac, 0.024% MACs, 130, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(4): EDANetX3Block(
0.643 GMac, 22.359% MACs,
(residual_dense_layers): Sequential(
0.643 GMac, 22.359% MACs,
(0): EDAModule(
0.066 GMac, 2.281% MACs,
(conv1x1): Conv2d(0.014 GMac, 0.481% MACs, 130, 40, kernel_size=(1, 1), stride=(1, 1))
(bn0): BatchNorm2d(0.0 GMac, 0.007% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_1): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))
(conv1x3_1): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1))
(bn1): BatchNorm2d(0.0 GMac, 0.007% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_2): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(2, 0), dilation=(2, 2))
(conv1x3_2): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 2), dilation=(2, 2))
(bn2): BatchNorm2d(0.0 GMac, 0.007% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(dropout): Dropout2d(0.0 GMac, 0.000% MACs, p=0.02, inplace=False)
)
(1): EDAModule(
0.07 GMac, 2.428% MACs,
(conv1x1): Conv2d(0.018 GMac, 0.628% MACs, 170, 40, kernel_size=(1, 1), stride=(1, 1))
(bn0): BatchNorm2d(0.0 GMac, 0.007% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_1): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))
(conv1x3_1): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1))
(bn1): BatchNorm2d(0.0 GMac, 0.007% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_2): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(2, 0), dilation=(2, 2))
(conv1x3_2): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 2), dilation=(2, 2))
(bn2): BatchNorm2d(0.0 GMac, 0.007% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(dropout): Dropout2d(0.0 GMac, 0.000% MACs, p=0.02, inplace=False)
)
(2): EDAModule(
0.074 GMac, 2.575% MACs,
(conv1x1): Conv2d(0.022 GMac, 0.775% MACs, 210, 40, kernel_size=(1, 1), stride=(1, 1))
(bn0): BatchNorm2d(0.0 GMac, 0.007% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_1): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))
(conv1x3_1): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1))
(bn1): BatchNorm2d(0.0 GMac, 0.007% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_2): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(4, 0), dilation=(4, 4))
(conv1x3_2): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 4), dilation=(4, 4))
(bn2): BatchNorm2d(0.0 GMac, 0.007% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(dropout): Dropout2d(0.0 GMac, 0.000% MACs, p=0.02, inplace=False)
)
(3): EDAModule(
0.078 GMac, 2.721% MACs,
(conv1x1): Conv2d(0.027 GMac, 0.922% MACs, 250, 40, kernel_size=(1, 1), stride=(1, 1))
(bn0): BatchNorm2d(0.0 GMac, 0.007% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_1): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))
(conv1x3_1): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1))
(bn1): BatchNorm2d(0.0 GMac, 0.007% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_2): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(4, 0), dilation=(4, 4))
(conv1x3_2): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, 4), dilation=(4, 4))
(bn2): BatchNorm2d(0.0 GMac, 0.007% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(dropout): Dropout2d(0.0 GMac, 0.000% MACs, p=0.02, inplace=False)
)
(4): EDAModule(
0.082 GMac, 2.868% MACs,
(conv1x1): Conv2d(0.031 GMac, 1.069% MACs, 290, 40, kernel_size=(1, 1), stride=(1, 1))
(bn0): BatchNorm2d(0.0 GMac, 0.007% MACs, 40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3x1_1): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))
(conv1x3_1): Conv2d(0.013 GMac, 0.444% MACs, 40, 40, kernel_size=(1, 3), stride=(1, 1), padding=(0, | |
###################################################################
# Melissa #
#-----------------------------------------------------------------#
# COPYRIGHT (C) 2017 by INRIA and EDF. ALL RIGHTS RESERVED. #
# #
# This source is covered by the BSD 3-Clause License. #
# Refer to the LICENCE file for further information. #
# #
#-----------------------------------------------------------------#
# Original Contributors: #
# <NAME>, #
# <NAME>, #
# <NAME>, #
# <NAME>, #
###################################################################
# TODO: check python linter warnings...
# TODO: Refactor: see if we can remove some of the time.sleeps at some points to run even faster
"""
Simulations and server jobs module
"""
import numpy
import os
import time
import subprocess
import logging
import asyncio
#import openturns as ot
from threading import RLock
from ctypes import cdll, create_string_buffer, c_char_p, c_wchar_p, c_int, c_double, POINTER
melissa_install_prefix = os.getenv('MELISSA_INSTALL_PREFIX')
assert(melissa_install_prefix)
c_int_p = POINTER(c_int)
c_double_p = POINTER(c_double)
melissa_comm4py = cdll.LoadLibrary(melissa_install_prefix + '/lib/libmelissa_comm4py.so')
melissa_comm4py.send_message.argtypes = [c_char_p]
melissa_comm4py.send_job.argtypes = [c_int, c_char_p, c_int, c_double_p]
melissa_comm4py.send_drop.argtypes = [c_int, c_char_p]
melissa_comm4py.send_options.argtypes = [c_char_p]
# Jobs and executions status
NOT_SUBMITTED = -1
PENDING = 0
WAITING = 0
RUNNING = 1
FINISHED = 2
TIMEOUT = 4
COUPLING_DICT = {"MELISSA_COUPLING_NONE":0,
"MELISSA_COUPLING_DEFAULT":0,
"MELISSA_COUPLING_ZMQ":0,
"MELISSA_COUPLING_MPI":1,
"MELISSA_COUPLING_FLOWVR":2}
class Job(object):
"""
Job class
"""
usr_func = {}
stdy_opt = {}
ml_stats = {}
nb_param = 0
def __init__(self):
"""
Job constructor
"""
self.job_status = NOT_SUBMITTED
self.job_id = 0
self.cmd_opt = ''
self.start_time = 0.0
self.job_type = 0
self.node_name = ['']
def set_usr_func(usr_func):
Job.usr_func = usr_func
def set_stdy_opt(stdy_opt):
Job.stdy_opt = stdy_opt
def set_ml_stats(ml_stats):
Job.ml_stats = ml_stats
def set_nb_param(nb_param):
Job.nb_param = nb_param
set_usr_func = staticmethod(set_usr_func)
set_stdy_opt = staticmethod(set_stdy_opt)
set_ml_stats = staticmethod(set_ml_stats)
set_nb_param = staticmethod(set_nb_param)
def cancel(self):
"""
Cancels a job (mandatory)
"""
if "cancel_job" in Job.usr_func.keys() \
and Job.usr_func['cancel_job']:
self.job_status = FINISHED
return Job.usr_func['cancel_job']#(self)
else:
logging.error('Error: no \'cancel_job\' function provided')
exit()
def finalize(self):
"""
Finalize a job (optional)
"""
pass
class Group(Job):
"""
Group class
"""
nb_groups = 0
def __init__(self):
"""
Group constructor
"""
Job.__init__(self)
self.nb_param = Job.nb_param
self.size = 0
self.server_node_name = ''
self.nb_restarts = 0
self.status = NOT_SUBMITTED
self.lock = RLock()
self.coupling = 0
self.group_id = Group.nb_groups
Group.nb_groups += 1
# @classmethod
def reset():
Group.nb_groups = 0
reset = staticmethod(reset)
def create(self):
"""
Creates a group environment
"""
if "create_group" in Job.usr_func.keys() \
and Job.usr_func['create_group']:
Job.usr_func['create_group'](self)
def launch(self):
"""
Launches the group (mandatory)
"""
self.cores = Job.stdy_opt['simulation_cores']
self.nodes = Job.stdy_opt['simulation_nodes']
if "launch_group" in Job.usr_func.keys() \
and Job.usr_func['launch_group']:
Job.usr_func['launch_group'](self)
else:
logging.error('Error: no \'launch_group\' function provided')
exit()
self.job_status = PENDING
self.status = WAITING
def check_job(self):
"""
Checks the group job status (mandatory)
"""
if "check_group_job" in Job.usr_func.keys() \
and Job.usr_func['check_group_job']:
return Job.usr_func['check_group_job'](self)
elif "check_job" in Job.usr_func.keys() \
and Job.usr_func['check_job']:
return Job.usr_func['check_job'](self)
else:
logging.error('Error: no \'check_group_job\''
+' function provided')
exit()
def cancel(self):
"""
Cancels a simulation job (mandatory)
"""
if "cancel_group_job" in Job.usr_func.keys() \
and Job.usr_func['cancel_group_job']:
return Job.usr_func['cancel_group_job']#(self)
elif "cancel_job" in Job.usr_func.keys() \
and Job.usr_func['cancel_job']:
self.job_status = FINISHED
return Job.usr_func['cancel_job']#(self)
else:
logging.error('Error: no \'cancel_job\' function provided')
exit()
def finalize(self):
"""
Finalize the group (optional)
"""
if "finalize_group" in Job.usr_func.keys():
if Job.usr_func['finalize_group']:
Job.usr_func['finalize_group'](self)
class MultiSimuGroup(Group):
"""
Multiple simulation group class (without Sobol' indices)
"""
nb_simu = 0
def __init__(self, param_sets):
"""
MultiSimuGroup constructor
"""
Group.__init__(self)
self.param_set = list()
self.simu_id = list()
self.size = len(param_sets)
self.simu_status = list()
self.coupling = 0
self.job_type = 3
for i in range(self.size):
#if isinstance(param_sets[i], ot.Point):
#self.param_set.append(numpy.zeros(self.nb_param))
#for j in range(self.nb_param):
#self.param_set[i][j] = float(param_sets[i][j])
#else:
self.param_set.append(numpy.copy(param_sets[i]))
self.simu_id.append(MultiSimuGroup.nb_simu)
self.simu_status.append(0)
MultiSimuGroup.nb_simu += 1
def reset():
MultiSimuGroup.nb_simu = 0
reset = staticmethod(reset)
def launch(self):
Group.launch(self)
for i in range(self.size):
params = self.param_set[i]
melissa_comm4py.send_job_init(self.simu_id[i],
str(self.job_id).encode(),
len(params),
(c_double * len(params))(*params))
def restart(self):
"""
Ends and restarts the simulations (mandatory)
"""
crashs_before_redraw = 3
asyncio.get_event_loop().run_until_complete(self.cancel())
self.nb_restarts += 1
if self.nb_restarts > crashs_before_redraw:
#logging.warning('Simulation group ' + str(self.group_id) +
#'crashed 5 times, drawing new parameter sets')
#for i in range(self.size):
#logging.info('old parameter set: ' + str(self.param_set[i]))
#self.param_set[i] = Job.usr_func['draw_parameter_set']()
#logging.info('new parameter set: ' + str(self.param_set[i]))
#self.nb_restarts = 0
logging.warning('Simulation group ' + str(self.group_id) +
'crashed '+str(crashs_before_redraw)+' times, remove simulation')
for i in range(self.size):
logging.warning('Bad parameter set '+str(i)+': ' + str(self.param_set[i]))
melissa_comm4py.send_drop(self.simu_id[i], str(self.job_id).encode())
self.job_status = FINISHED
self.status = FINISHED
return
if "restart_group" in Job.usr_func.keys() \
and Job.usr_func['restart_group']:
Job.usr_func['restart_group'](self)
else:
logging.warning('warning: no \'restart_group\''
+' function provided,'
+' using \'launch_group\' instead')
self.launch()
self.job_status = PENDING
self.status = WAITING
ParamArray = c_double * len(self.param_set[0])
for i in range(self.size):
params = self.param_set[i]
melissa_comm4py.send_job_init(self.simu_id[i],
str(self.job_id).encode(),
len(params),
(c_double * len(params))(*params))
class SobolGroup(Group):
"""
Sobol coupled group class
"""
def __init__(self, param_set_a, param_set_b):
"""
SobolGroup constructor
"""
Group.__init__(self)
self.param_set = list()
self.simu_id = list()
self.job_type = 4
self.coupling = COUPLING_DICT.get(Job.stdy_opt['coupling'].upper(), "MELISSA_COUPLING_DEFAULT")
#if isinstance(param_set_a, ot.Point):
#self.param_set.append(numpy.zeros(self.nb_param))
#for j in range(self.nb_param):
#self.param_set[0][j] = float(param_set_a[j])
#else:
self.param_set.append(numpy.copy(param_set_a))
self.simu_id.append(self.group_id*(len(param_set_a)+2))
#if isinstance(param_set_b, ot.Point):
#self.param_set.append(numpy.zeros(self.nb_param))
#for j in range(self.nb_param):
#self.param_set[1][j] = float(param_set_b[j])
#else:
self.param_set.append(numpy.copy(param_set_b))
self.simu_id.append(self.group_id*(len(param_set_a)+2)+1)
for i in range(len(param_set_a)):
self.param_set.append(numpy.copy(self.param_set[0]))
self.param_set[i+2][i] = numpy.copy(self.param_set[1][i])
self.simu_id.append(self.group_id*(len(param_set_a)+2)+i+2)
self.size = len(self.param_set)
def launch(self):
Group.launch(self)
melissa_comm4py.send_job_init(self.group_id,
str(self.job_id).encode(),
len(self.param_set[0]),
self.param_set[0].ctypes.data_as(POINTER(c_double)))
def restart(self):
"""
Ends and restarts the Sobol group (mandatory)
"""
crashs_before_redraw = 3
self.cancel()
self.nb_restarts += 1
if self.nb_restarts > crashs_before_redraw:
#logging.warning('Group ' +
#str(self.group_id) +
#'crashed 5 times, drawing new parameter sets')
#logging.debug('old parameter set A: ' + str(self.param_set[0]))
#logging.debug('old parameter set B: ' + str(self.param_set[1]))
#self.param_set[0] = Job.usr_func['draw_parameter_set']()
#self.param_set[1] = Job.usr_func['draw_parameter_set']()
#logging.info('new parameter set A: ' + str(self.param_set[0]))
#logging.info('new parameter set B: ' + str(self.param_set[1]))
#for i in range(len(self.param_set[0])):
#self.param_set[i+2] = numpy.copy(self.param_set[0])
#self.param_set[i+2][i] = numpy.copy(self.param_set[1][i])
#self.nb_restarts = 0
logging.warning('Simulation group ' + str(self.group_id) +
'crashed '+str(crashs_before_redraw)+' times, remove simulation')
logging.warning('Bad parameter set A: ' + str(self.param_set[0]))
logging.warning('Bad parameter set B: ' + str(self.param_set[1]))
melissa_comm4py.send_drop(self.group_id, str(self.job_id).encode())
self.job_status = FINISHED
self.status = FINISHED
return
param_str = ""
if "restart_group" in Job.usr_func.keys() \
and Job.usr_func['restart_group']:
Job.usr_func['restart_group'](self)
param_str = " ".join(str(j) for j in self.param_set[0])
else:
logging.warning('warning: no \'restart_group\''
+' function provided,'
+' using \'launch_group\' instead')
self.launch()
self.job_status = PENDING
self.status = WAITING
melissa_comm4py.send_job_init(self.group_id,
str(self.job_id).encode(),
len(self.param_set[0]),
self.param_set[0].ctypes.data_as(POINTER(c_double)))
# Refactor: it's a bit unlogic that this file is named simulation.py and then there is a
# server in it...
class Server(Job):
"""
Server class
"""
def __init__(self):
"""
Server constructor
"""
Job.__init__(self)
self.nb_param = 0
self.status = WAITING
self.first_job_id = ''
self.directory = "./"
# self.create_options()
self.lock = RLock()
self.path = melissa_install_prefix+'/bin'
self.job_type = 1
self.options = ''
self.want_stop = False
def set_path(self, work_dir="./"):
self.directory = work_dir
def set_nb_param(self, nb_param):
self.nb_param = nb_param
def write_node_name(self):
os.chdir(self.directory)
fichier=open("server_name.txt", "w")
fichier.write(self.node_name[0])
fichier.close()
os.system("chmod 744 server_name.txt")
def create_options(self):
"""
Melissa Server command line options
"""
buff = create_string_buffer(256)
melissa_comm4py.get_node_name(buff)
node_name = buff.value.decode()
if Job.stdy_opt['assimilation']:
def mget(what):
op_name = 'assimilation_%s' % what
return Job.stdy_opt[op_name]
options_to_mget = ["total_steps", "ensemble_size", "assimilator_type",
"max_runner_timeout", "server_slowdown_factor"]
filling = [mget(x) for x in options_to_mget]
filling.append(node_name)
print('filling:', filling)
self.cmd_opt = '%d %d %d %d %d %s' % tuple(filling)
else:
op_str = ':'.join([x for x in Job.ml_stats if Job.ml_stats[x]])
self.options = ':'.join([x for x in Job.ml_stats if Job.ml_stats[x]])
field_str = ':'.join([x for x in Job.stdy_opt['field_names']])
self.options += ' '
self.options += ':'.join([x for x in Job.stdy_opt['field_names']])
if field_str == '':
logging.error('error bad option: no field name given')
return
quantile_str = '0'
if Job.ml_stats['quantiles']:
quantile_str = ':'.join([str(x) for x in Job.stdy_opt['quantile_values']])
if quantile_str == '':
logging.error('error bad option: no quantile value given')
return
self.options += ' '
self.options += quantile_str
threshold_str = '0'
if Job.ml_stats['threshold_exceedances']:
threshold_str = ':'.join([str(x) for x in Job.stdy_opt['threshold_values']])
if threshold_str == '':
logging.error('error bad option: no threshold value given')
return
self.options += ' '
self.options += threshold_str
self.cmd_opt = ' '.join(('-o', op_str,
'-p', str(self.nb_param),
'-s', str(Job.stdy_opt['sampling_size']),
'-t', str(Job.stdy_opt['nb_timesteps']),
'-q', quantile_str,
'-e', threshold_str,
'-c', str(Job.stdy_opt['checkpoint_interval']),
'-w', str(Job.stdy_opt['simulation_timeout']),
'-f', field_str,
'-v', str(Job.stdy_opt['verbosity']),
'--txt_push_port', str(Job.stdy_opt['recv_port']),
'--txt_pull_port', str(Job.stdy_opt['send_port']),
'--txt_req_port', str(Job.stdy_opt['resp_port']),
'--data_port', str(Job.stdy_opt['data_port']),
'-n', node_name))
if Job.stdy_opt['learning']:
self.cmd_opt += " -l "+str(Job.stdy_opt['nn_path'])
else:
if op_str == '':
logging.error('error bad option: no operation given')
return
if Job.stdy_opt['disable_fault_tolerance'] == True:
self.cmd_opt += " --disable_fault_tolerancel"
def launch(self):
"""
Launches server job
"""
self.cores = Job.stdy_opt['server_cores']
self.nodes = Job.stdy_opt['server_nodes']
os.chdir(self.directory)
logging.info('launch server')
logging.info('server options: '+self.cmd_opt)
if "launch_server" in Job.usr_func.keys() \
and Job.usr_func['launch_server']:
Job.usr_func['launch_server'](self) # Refactor: here it woulld be better to not give self but only a directory of important properties to not break things and to clearly describe the interface..., same for simulation...
else:
logging.error('Error: no \'launch_server\' function provided')
exit()
self.first_job_id = self.job_id
self.job_status = PENDING
def wait_start(self):
"""
Waits for the server to start
"""
with self.lock:
status = self.status
while status | |
bazbiz():
z = foobar()
lala
''')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1)
differ.parse(code1, parsers=2, copies=1)
def test_one_call_in_function_change(differ):
code1 = dedent('''\
def f(self):
mro = [self]
for a in something:
yield a
def g(self):
return C(
a=str,
b=self,
)
''')
code2 = dedent('''\
def f(self):
mro = [self]
def g(self):
return C(
a=str,
t
b=self,
)
''')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=2, copies=1)
def test_function_deletion(differ):
code1 = dedent('''\
class C(list):
def f(self):
def iterate():
for x in b:
break
return list(iterate())
''')
code2 = dedent('''\
class C():
def f(self):
for x in b:
break
return list(iterate())
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=0, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=0)
def test_docstring_removal(differ):
code1 = dedent('''\
class E(Exception):
"""
1
2
3
"""
class S(object):
@property
def f(self):
return cmd
def __repr__(self):
return cmd2
''')
code2 = dedent('''\
class E(Exception):
"""
1
3
"""
class S(object):
@property
def f(self):
return cmd
return cmd2
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=2)
differ.parse(code1, parsers=3, copies=1)
def test_paren_in_strange_position(differ):
code1 = dedent('''\
class C:
""" ha """
def __init__(self, message):
self.message = message
''')
code2 = dedent('''\
class C:
""" ha """
)
def __init__(self, message):
self.message = message
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=2, expect_error_leaves=True)
differ.parse(code1, parsers=0, copies=2)
def insert_line_into_code(code, index, line):
lines = split_lines(code, keepends=True)
lines.insert(index, line)
return ''.join(lines)
def test_paren_before_docstring(differ):
code1 = dedent('''\
# comment
"""
The
"""
from parso import tree
from parso import python
''')
code2 = insert_line_into_code(code1, 1, ' ' * 16 + 'raise InternalParseError(\n')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=2, copies=1)
def test_parentheses_before_method(differ):
code1 = dedent('''\
class A:
def a(self):
pass
class B:
def b(self):
if 1:
pass
''')
code2 = dedent('''\
class A:
def a(self):
pass
Exception.__init__(self, "x" %
def b(self):
if 1:
pass
''')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=2, copies=1)
def test_indentation_issues(differ):
code1 = dedent('''\
class C:
def f():
1
if 2:
return 3
def g():
to_be_removed
pass
''')
code2 = dedent('''\
class C:
def f():
1
``something``, very ``weird``).
if 2:
return 3
def g():
to_be_removed
pass
''')
code3 = dedent('''\
class C:
def f():
1
if 2:
return 3
def g():
pass
''')
differ.initialize(code1)
differ.parse(code2, parsers=3, copies=1, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=2)
differ.parse(code3, parsers=2, copies=1)
differ.parse(code1, parsers=2, copies=1)
def test_error_dedent_issues(differ):
code1 = dedent('''\
while True:
try:
1
except KeyError:
if 2:
3
except IndexError:
4
5
''')
code2 = dedent('''\
while True:
try:
except KeyError:
1
except KeyError:
if 2:
3
except IndexError:
4
something_inserted
5
''')
differ.initialize(code1)
differ.parse(code2, parsers=3, copies=0, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=0)
def test_random_text_insertion(differ):
code1 = dedent('''\
class C:
def f():
return node
def g():
try:
1
except KeyError:
2
''')
code2 = dedent('''\
class C:
def f():
return node
Some'random text: yeah
for push in plan.dfa_pushes:
def g():
try:
1
except KeyError:
2
''')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=2, copies=1)
def test_many_nested_ifs(differ):
code1 = dedent('''\
class C:
def f(self):
def iterate():
if 1:
yield t
else:
yield
return
def g():
3
''')
code2 = dedent('''\
def f(self):
def iterate():
if 1:
yield t
hahahaha
if 2:
else:
yield
return
def g():
3
''')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=1)
@pytest.mark.parametrize('prefix', ['', 'async '])
def test_with_and_funcdef_in_call(differ, prefix):
code1 = prefix + dedent('''\
with x:
la = C(
a=1,
b=2,
c=3,
)
''')
code2 = insert_line_into_code(code1, 3, 'def y(self, args):\n')
differ.initialize(code1)
differ.parse(code2, parsers=1, expect_error_leaves=True)
differ.parse(code1, parsers=1)
def test_wrong_backslash(differ):
code1 = dedent('''\
def y():
1
for x in y:
continue
''')
code2 = insert_line_into_code(code1, 3, '\\.whl$\n')
differ.initialize(code1)
differ.parse(code2, parsers=3, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=1)
def test_random_unicode_characters(differ):
"""
Those issues were all found with the fuzzer.
"""
differ.initialize('')
differ.parse('\x1dĔBϞɛˁşʑ˳˻ȣſéÎ\x90̕ȟòwʘ\x1dĔBϞɛˁşʑ˳˻ȣſéÎ', parsers=1,
expect_error_leaves=True)
differ.parse('\r\r', parsers=1)
differ.parse("˟Ę\x05À\r rúƣ@\x8a\x15r()\n", parsers=1, expect_error_leaves=True)
differ.parse('a\ntaǁ\rGĒōns__\n\nb', parsers=1)
s = ' if not (self, "_fi\x02\x0e\x08\n\nle"):'
differ.parse(s, parsers=1, expect_error_leaves=True)
differ.parse('')
differ.parse(s + '\n', parsers=1, expect_error_leaves=True)
differ.parse(' result = (\r\f\x17\t\x11res)', parsers=1, expect_error_leaves=True)
differ.parse('')
differ.parse(' a( # xx\ndef', parsers=1, expect_error_leaves=True)
def test_dedent_end_positions(differ):
code1 = dedent('''\
if 1:
if b:
2
c = {
5}
''')
code2 = dedent('''\
if 1:
if ⌟ഒᜈྡྷṭb:
2
'l': ''}
c = {
5}
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, expect_error_leaves=True)
differ.parse(code1, parsers=1)
def test_special_no_newline_ending(differ):
code1 = dedent('''\
1
''')
code2 = dedent('''\
1
is ''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=0)
def test_random_character_insertion(differ):
code1 = dedent('''\
def create(self):
1
if self.path is not None:
return
# 3
# 4
''')
code2 = dedent('''\
def create(self):
1
if 2:
x return
# 3
# 4
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=1)
def test_import_opening_bracket(differ):
code1 = dedent('''\
1
2
from bubu import (X,
''')
code2 = dedent('''\
11
2
from bubu import (X,
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=2, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=2, expect_error_leaves=True)
def test_opening_bracket_at_end(differ):
code1 = dedent('''\
class C:
1
[
''')
code2 = dedent('''\
3
class C:
1
[
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=2, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=1, expect_error_leaves=True)
def test_all_sorts_of_indentation(differ):
code1 = dedent('''\
class C:
1
def f():
'same'
if foo:
a = b
end
''')
code2 = dedent('''\
class C:
1
def f(yield await %|(
'same'
\x02\x06\x0f\x1c\x11
if foo:
a = b
end
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=1, expect_error_leaves=True)
code3 = dedent('''\
if 1:
a
b
c
d
\x00
''')
differ.parse(code3, parsers=1, expect_error_leaves=True)
differ.parse('')
def test_dont_copy_dedents_in_beginning(differ):
code1 = dedent('''\
a
4
''')
code2 = dedent('''\
1
2
3
4
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=1)
def test_dont_copy_error_leaves(differ):
code1 = dedent('''\
def f(n):
x
if 2:
3
''')
code2 = dedent('''\
def f(n):
def if 1:
indent
x
if 2:
3
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, expect_error_leaves=True)
differ.parse(code1, parsers=1)
def test_error_dedent_in_between(differ):
code1 = dedent('''\
class C:
def f():
a
if something:
x
z
''')
code2 = dedent('''\
class C:
def f():
a
dedent
if other_thing:
b
if something:
x
z
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=2, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=2)
def test_some_other_indentation_issues(differ):
code1 = dedent('''\
class C:
x
def f():
""
copied
a
''')
code2 = dedent('''\
try:
de
a
b
c
d
def f():
""
copied
a
''')
differ.initialize(code1)
differ.parse(code2, copies=0, parsers=1, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=1)
def test_open_bracket_case1(differ):
code1 = dedent('''\
class C:
1
2 # ha
''')
code2 = insert_line_into_code(code1, 2, ' [str\n')
code3 = insert_line_into_code(code2, 4, ' str\n')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code3, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=1)
def test_open_bracket_case2(differ):
code1 = dedent('''\
class C:
def f(self):
(
b
c
def g(self):
d
''')
code2 = dedent('''\
class C:
def f(self):
(
b
c
self.
def g(self):
d
''')
differ.initialize(code1)
differ.parse(code2, copies=0, parsers=1, expect_error_leaves=True)
differ.parse(code1, copies=0, parsers=1, expect_error_leaves=True)
def test_some_weird_removals(differ):
code1 = dedent('''\
class C:
1
''')
code2 = dedent('''\
class C:
1
@property
A
return
# x
omega
''')
code3 = dedent('''\
class C:
1
;
omega
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code3, copies=1, parsers=3, expect_error_leaves=True)
differ.parse(code1, copies=1)
def test_async_copy(differ):
code1 = dedent('''\
async def main():
x = 3
print(
''')
code2 = dedent('''\
async def main():
x = 3
print()
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1)
differ.parse(code1, copies=1, parsers=1, expect_error_leaves=True)
def test_parent_on_decorator(differ):
code1 = dedent('''\
class AClass:
@decorator()
def b_test(self):
print("Hello")
print("world")
def a_test(self):
pass''')
code2 = dedent('''\
class AClass:
@decorator()
def b_test(self):
print("Hello")
print("world")
def a_test(self):
pass''')
differ.initialize(code1)
module_node = differ.parse(code2, parsers=1)
cls = module_node.children[0]
cls_suite = cls.children[-1]
assert len(cls_suite.children) == 3
def test_wrong_indent_in_def(differ):
code1 = dedent('''\
def x():
a
b
''')
code2 = dedent('''\
def x():
//
b
c
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, expect_error_leaves=True)
differ.parse(code1, parsers=1)
def test_backslash_issue(differ):
code1 = dedent('''
pre = (
'')
after = 'instead'
''')
code2 = dedent('''
pre = (
'')
\\if
''') # noqa
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=1)
def test_paren_with_indentation(differ):
code1 = dedent('''
class C:
def f(self, fullname, path=None):
x
def load_module(self, fullname):
a
for prefix in self.search_path:
try:
b
except ImportError:
c
else:
raise
def x():
pass
''')
code2 = dedent('''
class C:
def f(self, fullname, path=None):
x
(
a
for prefix in self.search_path:
try:
b
except ImportError:
c
else:
raise
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=3, copies=1)
def test_error_dedent_in_function(differ):
code1 = dedent('''\
def x():
a
b
c
d
''')
code2 = dedent('''\
def x():
a
b
c
d
e
''')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
def test_with_formfeed(differ):
code1 | |
<reponame>WildMeOrg/wbia-utool
# -*- coding: utf-8 -*-
"""
TODO: box and whisker
http://tex.stackexchange.com/questions/115210/boxplot-in-latex
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from six.moves import range, map, zip
import os
import re
import textwrap
try:
import numpy as np
except ImportError:
pass
from os.path import join, splitext, dirname # NOQA
from utool import util_num
from utool import util_inject
print, rrr, profile = util_inject.inject2(__name__)
# def ensure_latex_environ():
# paths = os.environ['PATH'].split(os.pathsep)
# mpl.rc('font',**{'family':'serif'})
# mpl.rc('text', usetex=True)
# mpl.rc('text.latex',unicode=True)
# mpl.rc('text.latex',preamble='\usepackage[utf8]{inputenc}')
def find_ghostscript_exe():
import utool as ut
if ut.WIN32:
gs_exe = r'C:\Program Files (x86)\gs\gs9.16\bin\gswin32c.exe'
else:
gs_exe = 'gs'
return gs_exe
def compress_pdf(pdf_fpath, output_fname=None):
"""uses ghostscript to write a pdf"""
import utool as ut
ut.assertpath(pdf_fpath)
suffix = '_' + ut.get_datestamp(False) + '_compressed'
print('pdf_fpath = %r' % (pdf_fpath,))
output_pdf_fpath = ut.augpath(pdf_fpath, suffix, newfname=output_fname)
print('output_pdf_fpath = %r' % (output_pdf_fpath,))
gs_exe = find_ghostscript_exe()
cmd_list = (
gs_exe,
'-sDEVICE=pdfwrite',
'-dCompatibilityLevel=1.4',
'-dNOPAUSE',
'-dQUIET',
'-dBATCH',
'-sOutputFile=' + output_pdf_fpath,
pdf_fpath,
)
ut.cmd(*cmd_list)
return output_pdf_fpath
def make_full_document(text, title=None, preamp_decl={}, preamb_extra=None):
r"""
dummy preamble and document to wrap around latex fragment
Args:
text (str):
title (str):
Returns:
str
CommandLine:
python -m utool.util_latex --test-make_full_document
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> text = 'foo'
>>> title = 'title'
>>> preamp_decl = {}
>>> text_ = make_full_document(text, title)
>>> result = str(text_)
>>> print(result)
"""
import utool as ut
doc_preamb = ut.codeblock(
"""
%\\documentclass{article}
\\documentclass[10pt,twocolumn,letterpaper]{article}
% \\usepackage[utf8]{inputenc}
\\usepackage[T1]{fontenc}
\\usepackage{times}
\\usepackage{epsfig}
\\usepackage{graphicx}
\\usepackage{amsmath,amsthm,amssymb}
\\usepackage[usenames,dvipsnames,svgnames,table]{xcolor}
\\usepackage{multirow}
\\usepackage{subcaption}
\\usepackage{booktabs}
%\\pagenumbering{gobble}
"""
)
if preamb_extra is not None:
if isinstance(preamb_extra, (list, tuple)):
preamb_extra = '\n'.join(preamb_extra)
doc_preamb += '\n' + preamb_extra + '\n'
if title is not None:
preamp_decl['title'] = title
decl_lines = [
r'\{key}{{{val}}}'.format(key=key, val=val) for key, val in preamp_decl.items()
]
doc_decllines = '\n'.join(decl_lines)
doc_header = ut.codeblock(
r"""
\begin{document}
"""
)
if preamp_decl.get('title') is not None:
doc_header += r'\maketitle'
doc_footer = ut.codeblock(
r"""
\end{document}
"""
)
text_ = '\n'.join((doc_preamb, doc_decllines, doc_header, text, doc_footer))
return text_
def render_latex_text(
input_text, nest_in_doc=False, preamb_extra=None, appname='utool', verbose=None
):
"""compiles latex and shows the result"""
import utool as ut
if verbose is None:
verbose = ut.VERBOSE
dpath = ut.ensure_app_resource_dir(appname, 'latex_tmp')
# put a latex framgent in a full document
# print(input_text)
fname = 'temp_render_latex'
pdf_fpath = ut.compile_latex_text(
input_text, dpath=dpath, fname=fname, preamb_extra=preamb_extra, verbose=verbose
)
ut.startfile(pdf_fpath)
return pdf_fpath
def compile_latex_text(
input_text,
dpath=None,
fname=None,
verbose=True,
move=True,
nest_in_doc=None,
title=None,
preamb_extra=None,
):
r"""
CommandLine:
python -m utool.util_latex --test-compile_latex_text --show
Ignore:
pdflatex -shell-escape --synctex=-1 -src-specials -interaction=nonstopmode\
~/code/ibeis/tmptex/latex_formatter_temp.tex
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> import utool as ut
>>> verbose = True
>>> #dpath = '/home/joncrall/code/ibeis/aidchallenge'
>>> dpath = dirname(ut.grab_test_imgpath())
>>> #ut.vd(dpath)
>>> orig_fpaths = ut.list_images(dpath, fullpath=True)
>>> figure_str = ut.get_latex_figure_str(orig_fpaths, width_str='2.4in', nCols=2)
>>> input_text = figure_str
>>> pdf_fpath = ut.compile_latex_text(input_text, dpath=dpath,
>>> verbose=verbose)
>>> output_pdf_fpath = ut.compress_pdf(pdf_fpath)
>>> print(pdf_fpath)
>>> ut.quit_if_noshow()
>>> ut.startfile(pdf_fpath)
"""
import utool as ut
if verbose:
print('[ut] compile_latex_text')
if nest_in_doc is None:
nest_in_doc = 'documentclass' not in input_text
if nest_in_doc:
text = make_full_document(input_text, title=title, preamb_extra=preamb_extra)
if not dpath:
dpath = os.getcwd()
if fname is None:
fname = 'temp_latex'
# Create temporary work directly
work_dpath = join(dpath, '.tmptex')
ut.ensuredir(work_dpath, verbose=verbose > 1)
fname_tex = ut.ensure_ext(fname, '.tex')
fname_pdf = ut.ensure_ext(fname, '.pdf')
tex_fpath = join(work_dpath, fname_tex)
pdf_fpath_output = join(work_dpath, fname_pdf)
ut.write_to(tex_fpath, text)
with ut.ChdirContext(work_dpath, verbose=verbose > 1):
# print(text)
args = ' '.join(
[
'lualatex',
'-shell-escape',
'--synctex=-1',
'-src-specials',
'-interaction=nonstopmode',
tex_fpath,
]
)
info = ut.cmd2(args, verbose=verbose > 1)
if not ut.checkpath(pdf_fpath_output, verbose=verbose > 1):
print('Error compiling LaTeX')
ut.print_code(text, 'latex')
print(info['out'])
raise RuntimeError('latex failed ')
if move:
pdf_fpath = join(dpath, fname_pdf)
ut.move(pdf_fpath_output, pdf_fpath, verbose=verbose > 1)
else:
pdf_fpath = pdf_fpath_output
return pdf_fpath
def convert_pdf_to_image(pdf_fpath, ext='.jpg', verbose=1, dpi=300, quality=90):
import utool as ut
if verbose:
print('[ut] convert_pdf_to_image.')
img_fpath = ut.ensure_ext(pdf_fpath, ext)
if ut.UNIX:
convert_fpath = ut.cmd2('which convert')['out'].strip()
if not convert_fpath:
raise Exception('ImageMagik convert was not found')
args = ' '.join(
['convert', '-density', str(dpi), pdf_fpath, '-quality', str(quality), img_fpath]
)
info = ut.cmd2(args, verbose=verbose > 1) # NOQA
if not ut.checkpath(img_fpath, verbose=verbose > 1):
print('Failed to convert pdf to ' + ext)
print(info['out'])
raise Exception('ImageMagik failed to convert pdf to ' + ext)
return img_fpath
def render_latex(
input_text, dpath=None, fname=None, preamb_extra=None, verbose=1, **kwargs
):
"""
Renders latex text into a jpeg.
Whitespace that would have appeared in the PDF is removed, so the jpeg is
cropped only the the relevant part. This is ideal for figures that only
take a single page.
Args:
input_text (?):
dpath (str): directory path(default = None)
fname (str): file name(default = None)
preamb_extra (None): (default = None)
verbose (int): verbosity flag(default = 1)
Returns:
str: jpg_fpath - file path string
CommandLine:
python -m utool.util_latex render_latex '$O(n^2)$' --fpath=~/slides/tmp.jpg
Script:
>>> # SCRIPT
>>> from utool.util_latex import * # NOQA
>>> from os.path import split, expanduser
>>> import utool as ut
>>> input_text = ' '.join(ut.get_varargs()[1:])
>>> dpath, fname = split(ut.argval('--fpath', ''))
>>> dpath = expanduser(ut.argval('--dpath', dpath))
>>> fname = ut.argval('--fname', fname)
>>> kwargs = ut.dict_subset(ut.argparse_funckw(ut.convert_pdf_to_image), ['dpi', 'quality'])
>>> jpg_fpath = render_latex(input_text, dpath, fname, **kwargs)
>>> if ut.argflag('--diskshow'):
>>> ut.startfile(jpg_fpath)
"""
import utool as ut
try:
import vtool as vt
except ImportError:
import vtool as vt
# turn off page numbers
input_text_ = '\\pagenumbering{gobble}\n' + input_text
# fname, _ = splitext(fname)
img_fname = ut.ensure_ext(fname, ['.jpg'] + list(ut.IMG_EXTENSIONS))
img_fpath = join(dpath, img_fname)
pdf_fpath = ut.compile_latex_text(
input_text_,
fname=fname,
dpath=dpath,
preamb_extra=preamb_extra,
verbose=verbose,
move=False,
)
ext = splitext(img_fname)[1]
fpath_in = ut.convert_pdf_to_image(pdf_fpath, ext=ext, verbose=verbose)
# Clip of boundaries of the pdf imag
vt.clipwhite_ondisk(fpath_in, fpath_out=img_fpath, verbose=verbose > 1)
return img_fpath
def latex_multicolumn(data, ncol=2, alignstr='|c|'):
data = escape_latex(data)
return r'\multicolumn{%d}{%s}{%s}' % (ncol, alignstr, data)
def latex_multirow(data, nrow=2):
return r'\multirow{%d}{*}{%s}' % (nrow, data)
def latex_get_stats(lbl, data, mode=0):
import utool as ut
stats_ = ut.get_stats(data)
if stats_.get('empty_list', False):
return '% NA: latex_get_stats, data=[]'
try:
max_ = stats_['max']
min_ = stats_['min']
mean = stats_['mean']
std = stats_['std']
shape = stats_['shape']
except KeyError as ex:
stat_keys = stats_.keys() # NOQA
ut.printex(ex, key_list=['stat_keys', 'stats_', 'data'])
raise
# int_fmt = lambda num: util.num_fmt(int(num))
def float_fmt(num):
return util_num.num_fmt(float(num))
def tup_fmt(tup):
return str(tup)
fmttup = (
float_fmt(min_),
float_fmt(max_),
float_fmt(mean),
float_fmt(std),
tup_fmt(shape),
)
lll = ' ' * len(lbl)
if mode == 0:
prefmtstr = r"""
{label} stats & min ; max = %s ; %s\\
{space} & mean; std = %s ; %s\\
{space} & shape = %s \\"""
if mode == 1:
prefmtstr = r"""
{label} stats & min = $%s$\\
{space} & max = $%s$\\
{space} & mean = $%s$\\
{space} & std = $%s$\\
{space} & shape = $%s$\\"""
fmtstr = prefmtstr.format(label=lbl, space=lll)
latex_str = textwrap.dedent(fmtstr % fmttup).strip('\n') + '\n'
return latex_str
def latex_scalar(lbl, data):
return (r'%s & %s\\' % (lbl, util_num.num_fmt(data))) + '\n'
def make_stats_tabular():
"""tabular for dipslaying statistics"""
pass
def ensure_rowvec(arr):
arr = np.array(arr)
arr.shape = (1, arr.size)
return arr
def ensure_colvec(arr):
arr = np.array(arr)
arr.shape = (arr.size, 1)
return arr
def escape_latex(text):
r"""
Args:
text (str): a plain text message
Returns:
str: the message escaped to appear correctly in LaTeX
References:
http://stackoverflow.com/questions/16259923/how-can-i-escape-characters
"""
conv = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\{',
'}': r'\}',
'~': r'\textasciitilde{}',
'^': r'\^{}',
'\\': r'\textbackslash{}',
'<': r'\textless',
'>': r'\textgreater',
}
import six
regex = re.compile(
'|'.join(
re.escape(six.text_type(key))
for key in sorted(conv.keys(), key=lambda item: -len(item))
)
)
return regex.sub(lambda match: conv[match.group()], text)
def replace_all(str_, repltups):
ret = str_
for ser, rep in repltups:
ret = re.sub(ser, rep, ret)
return ret
def make_score_tabular(
row_lbls,
col_lbls,
values,
title=None,
out_of=None,
bold_best=False,
flip=False,
bigger_is_better=True,
multicol_lbls=None,
FORCE_INT=False,
precision=None,
SHORTEN_ROW_LBLS=False,
col_align='l',
col_sep='|',
multicol_sep='|',
centerline=True,
astable=False,
table_position='',
AUTOFIX_LATEX=True,
**kwargs
):
r"""
makes a LaTeX tabular for displaying scores or errors
Args:
row_lbls (list of str):
col_lbls (list of str):
values (ndarray):
title (str): (default = None)
out_of (None): (default = None)
bold_best (bool): (default = True)
flip (bool): (default = False)
table_position (str) : eg '[h]'
Returns:
str: tabular_str
CommandLine:
python -m utool.util_latex --test-make_score_tabular:0 --show
python -m utool.util_latex --test-make_score_tabular:1 --show
python -m utool.util_latex --test-make_score_tabular:2 --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> import utool as ut
>>> row_lbls = ['config1', 'config2']
>>> col_lbls = ['score \\leq 1', 'metric2']
>>> values = np.array([[1.2, 2], [3.2, 4]])
>>> title = 'title'
>>> out_of = 10
>>> bold_best | |
import threading
import time
import logging
import signal
import subprocess
import Queue
import collections
import concurrent.futures
import shlex
import json
import tempfile
import os
import re
import builder.futures
from builder.util import arrow_factory as arrow
import builder.build as build
import networkx as nx
from tornado import gen
from tornado import ioloop
from tornado.web import asynchronous, RequestHandler, Application, StaticFileHandler
from tornado.template import Loader, Template
LOG = logging.getLogger(__name__)
PROCESSING_LOG = logging.getLogger("builder.execution.processing")
TRANSITION_LOG = logging.getLogger("builder.execution.transition")
def _interruptable_sleep(seconds):
# Loop so it can be interrupted quickly (sleep does not pay attention to interrupt)
for i in xrange(max(int(seconds), 1)):
time.sleep(1)
class ExecutionResult(object):
def __init__(self, is_async, status=None, stdout=None, stderr=None):
self._is_async = is_async
self.status = status
self.stdout = stdout
self.stderr = stderr
def finish(self, status, stdout, stderr):
self.status = status
self.stdout = stdout
self.stderr = stderr
def is_finished(self):
return self.status is not None
def is_async(self):
return self._is_async
class Executor(object):
# Should be False if this executor will handle updating the job state
should_update_build_graph = True
def __init__(self, execution_manager, config=None):
self._build_graph = execution_manager.get_build()
self._execution_manager = execution_manager
self._initialized = False
def initialize(self):
"""
Put any expensive operations here that should only happen right before execution starts
"""
pass
def execute(self, job):
"""Execute the specified job.
Returns None if the job does not execute because it is already running or because its get_should_run method returns False.
Otherwise, returns an appropriate ExecutionResult object.
"""
job = self.prepare_job_for_execution(job)
result = None
try:
result = self.do_execute(job)
finally:
if result is not None and not isinstance(result, concurrent.futures.Future):
if not result.is_async():
LOG.debug("Finishing job {}".format(job.get_id()))
self.get_execution_manager()._update_build(lambda: self.finish_job(job, result, self.should_update_build_graph))
return result
def do_execute(self, job):
raise NotImplementedError()
def get_build_graph(self):
return self._build_graph
def get_execution_manager(self):
return self._execution_manager
def prepare_job_for_execution(self, job):
return job
def finish_job(self, job, result, update_job_cache=True):
LOG.info("Job {} complete. Status: {}".format(job.get_id(), result.status))
LOG.debug("{}(stdout): {}".format(job.get_id(), result.stdout))
LOG.debug("{}(stderr): {}".format(job.get_id(), result.stderr))
# Mark this job as finished running
job.last_run = arrow.now()
job.retries += 1
job.is_running = False
job.force = False
if update_job_cache:
target_ids = self.get_build_graph().get_target_ids(job.get_id())
self._execution_manager.update_targets(target_ids)
job_id = job.unique_id
self.get_execution_manager().update_parents_should_run(job_id)
# update all of it's dependents
for target_id in target_ids:
dependent_ids = self.get_build_graph().get_dependent_ids(target_id)
for dependent_id in dependent_ids:
dependent = self.get_build_graph().get_job(dependent_id)
dependent.invalidate()
# check if it succeeded and set retries to 0
if not job.get_should_run_immediate():
job.force = False
job.retries = 0
else:
if job.retries >= self.get_execution_manager().max_retries:
job.set_failed(True)
job.invalidate()
LOG.error("Maximum number of retries reached for {}".format(job))
self.get_execution_manager().add_to_complete_queue(job.get_id())
class LocalExecutor(Executor):
def do_execute(self, job):
command = job.get_command()
command_list = shlex.split(command)
LOG.info("Executing '{}'".format(command))
proc = subprocess.Popen(command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
LOG.info("{} STDOUT: {}".format(command, stdout))
LOG.info("{} STDERR: {}".format(command, stderr))
return ExecutionResult(is_async=False, status=proc.returncode == 0, stdout=stdout, stderr=stderr)
class PrintExecutor(Executor):
""" "Executes" by printing and marking targets as available
"""
should_update_build_graph = False
def do_execute(self, job):
build_graph = self.get_build_graph()
command = job.get_command()
job.set_should_run(False)
job.set_stale(False)
print "Simulation:", command
target_relationships = build_graph.get_target_relationships(job.get_id())
produced_targets = {}
for target_type, target_group in target_relationships.iteritems():
if target_type == "alternates":
continue
produced_targets.update(target_group)
for target_id in produced_targets:
target = build_graph.get_target(target_id)
target.exists = True
target.mtime = arrow.get().timestamp
print "Simulation: Built target {}".format(target.get_id())
for dependent_job_id in build_graph.get_dependent_ids(target_id):
dependent_job = build_graph.get_job(dependent_job_id)
dependent_job.invalidate()
# dependent_job.set_should_run(True)
return ExecutionResult(is_async=False, status=True, stdout='', stderr='')
class ExecutionManager(object):
def __init__(self, build_manager, executor_factory, max_retries=5, job_timeout=30*60, config=None):
self.build_manager = build_manager
self.build = build_manager.make_build()
self.max_retries = max_retries
self.config = config
self._build_lock = threading.RLock()
self._work_queue = Queue.Queue()
self._complete_queue = Queue.Queue()
self.executor = executor_factory(self, config=self.config)
self.execution_times = {}
self.submitted_jobs = 0
self.completed_jobs = 0
self.start_time = None
self.last_job_executed_on = None
self.last_job_completed_on = None
self.last_job_submitted_on = None
self.last_job_worked_on = None
self.job_timeout = job_timeout
self.running = False
def _recursive_invalidate_job(self, job_id):
job = self.build.get_job(job_id)
job.invalidate()
target_ids = self.build.get_target_ids(job_id)
for target_id in target_ids:
self._recursive_invalidate_target(target_id)
def _recursive_invalidate_target(self, target_id):
target = self.build.get_target(target_id)
target.invalidate()
job_ids = self.build.get_dependent_ids(target_id)
for job_id in job_ids:
self._recursive_invalidate_job(job_id)
def submit(self, job_definition_id, build_context, update_topmost=False, update_all=False, **kwargs):
"""
Submit the provided job to be built
"""
if not self.running:
raise RuntimeError("Cannot submit to a execution manager that "
"isn't running")
def update_build_graph():
# Add the job
LOG.debug("SUBMISSION => Expanding build graph for submission {} {}".format(job_definition_id, build_context))
if self.build.rule_dependency_graph.is_job_definition(job_definition_id):
build_update = self.build.add_job(job_definition_id, build_context, **kwargs)
else:
build_update = self.build.add_meta(job_definition_id, build_context, **kwargs)
update_nodes = set()
if update_topmost or update_all:
for node_id in build_update.targets:
if self.build.in_degree(node_id) == 0 or update_all:
if self.build.is_target(node_id):
update_nodes.add(node_id)
LOG.debug("UPDATE_SUBMITTED => {}".format(update_nodes))
self.external_update_targets(update_nodes)
LOG.debug("SUBMISSION => Build graph expansion complete")
# Refresh all uncached existences
LOG.debug("updating {} targets".format(len(build_update.new_targets)))
self.update_targets(build_update.new_targets)
# Invalidate the build graph for all child nodes
newly_invalidated_job_ids = build_update.new_jobs | build_update.newly_forced
LOG.debug("SUBMISSION => Newly invlidated job ids: {}".format(newly_invalidated_job_ids))
LOG.debug("SUBMISSION => Jobs expanded: {}".format(build_update.jobs))
for newly_invalidated_job_id in newly_invalidated_job_ids:
self.update_parents_should_run(newly_invalidated_job_id)
next_job_to_run_ids = self.get_next_jobs_to_run(
newly_invalidated_job_id)
LOG.debug("SUBMISSION => Next jobs {} -> {}".format(newly_invalidated_job_id, next_job_to_run_ids))
for next_job_to_run_id in next_job_to_run_ids:
self.add_to_work_queue(next_job_to_run_id)
self.last_job_submitted_on = arrow.now()
self.submitted_jobs += 1
self._update_build(update_build_graph)
def _update_parents_should_not_run_recurse(self, job_id):
build_graph = self.build
job = build_graph.get_job(job_id)
if not job.get_parents_should_run or job.should_ignore_parents():
return
job.invalidate()
if job.get_parents_should_run() or job.get_should_run():
return
target_ids = build_graph.get_target_ids(job_id)
for target_id in target_ids:
dependent_ids = build_graph.get_dependent_ids(target_id)
for dependent_id in dependent_ids:
self._update_parents_should_not_run_recurse(dependent_id)
def _update_parents_should_run_recurse(self, job_id):
build_graph = self.build
job = build_graph.get_job(job_id)
if job.get_parents_should_run() or job.should_ignore_parents():
return
job.invalidate()
target_ids = build_graph.get_target_ids(job_id)
for target_id in target_ids:
dependent_ids = build_graph.get_dependent_ids(target_id)
for dependent_id in dependent_ids:
self._update_parents_should_run_recurse(dependent_id)
def update_parents_should_run(self, job_id):
build_graph = self.build
job = build_graph.get_job(job_id)
job.invalidate()
target_ids = build_graph.get_target_ids(job_id)
dependent_ids = []
for target_id in target_ids:
dependent_ids = (dependent_ids +
build_graph.get_dependent_ids(target_id))
if job.get_should_run() or job.get_parents_should_run():
for dependent_id in dependent_ids:
self._update_parents_should_run_recurse(dependent_id)
else:
for dependent_id in dependent_ids:
self._update_parents_should_not_run_recurse(dependent_id)
def external_update_targets(self, target_ids):
"""Updates the state of a single target and updates everything below
it
"""
build_graph = self.build
update_job_ids = set()
self.update_targets(target_ids)
for target_id in target_ids:
add_ids = build_graph.get_creator_ids(target_id)
if not add_ids:
add_ids = build_graph.get_dependent_ids(target_id)
for add_id in add_ids:
update_job_ids.add(add_id)
LOG.debug("after updating targets, {} jobs are being updated".format(len(update_job_ids)))
for update_job_id in update_job_ids:
self.update_parents_should_run(update_job_id)
# Update the upper jobs that states depend on the target
for update_job_id in update_job_ids:
next_job_to_run_ids = self.get_next_jobs_to_run(update_job_id)
update_job = self.build.get_job(update_job_id)
for next_job_to_run_id in next_job_to_run_ids:
self.add_to_work_queue(next_job_to_run_id)
def update_top_most(self):
top_most = []
for node_id in self.build:
if self.build.in_degree(node_id) == 0:
if self.build.is_target(node_id):
top_most.append(node_id)
LOG.debug("TOP_MOST_JOBS => {}".format(top_most))
self.external_update_targets(top_most)
def update_targets(self, target_ids):
"""Takes in a list of target ids and updates all of their needed
values
"""
LOG.debug("updating {} targets".format(len(target_ids)))
update_function_list = collections.defaultdict(list)
for target_id in target_ids:
target = self.build.get_target(target_id)
func = target.get_bulk_exists_mtime
update_function_list[func].append(target)
for update_function, targets in update_function_list.iteritems():
update_function(targets)
def add_to_work_queue(self, job_id):
job = self.build.get_job(job_id)
if job.is_running:
return
job.is_running = True
self._work_queue.put(job_id)
LOG.info("Adding {} to ExecutionManager's work queue. There are now approximately {} jobs in the queue.".format(job_id, self._work_queue.qsize()))
def add_to_complete_queue(self, job_id):
LOG.info("Adding {} to ExecutionManager's complete queue".format(job_id))
self._complete_queue.put(job_id)
def start_execution(self, inline=True):
"""
Begin executing jobs
"""
LOG.info("Starting execution")
self.running = True
self.start_time = arrow.now()
self.executor.initialize()
# Seed initial jobs
work_queue = self._work_queue
next_jobs = self.get_jobs_to_run()
map(self.add_to_work_queue, next_jobs)
# Start completed jobs consumer if not inline
executor = None
if not inline:
executor = builder.futures.ThreadPoolExecutor(max_workers=3)
executor.submit(self._consume_completed_jobs, block=True)
executor.submit(self._check_for_timeouts)
executor.submit(self._check_for_passed_curfews)
jobs_executed = 0
ONEYEAR = 365 * 24 * 60 * 60
tick = 0
while (not work_queue.empty() or not inline) and self.running:
PROCESSING_LOG.debug("EXECUTION_LOOP => Getting job from the work queue. Tick {}".format(tick))
tick += 1
try:
job_id = work_queue.get(True, timeout=1)
except Queue.Empty:
continue
self.last_job_worked_on = arrow.now()
TRANSITION_LOG.debug("EXECUTION_LOOP => Got job {} from work queue".format(job_id))
result = self.execute(job_id)
#LOG.debug("EXECUTION_LOOP => Finished job {} from work queue".format(job_id))
jobs_executed += 1
if not isinstance(result, concurrent.futures.Future) and inline:
if result.is_async():
raise NotImplementedError("Cannot run an async executor inline")
self._consume_completed_jobs(block=False)
elif inline:
TRANSITION_LOG.debug("EXECUTION_LOOP => Waiting on execution to complete")
result.result() # Wait for job to complete
self._consume_completed_jobs(block=False)
TRANSITION_LOG.debug("EXECUTION_LOOP => Finished consuming completed jobs for {}".format(job_id))
#else: It is an asynchronous result and we're running asynchronously, so let the _consume_completed_jobs
# thread add new jobs
LOG.debug("EXECUTION_LOOP => Executed {} jobs".format(jobs_executed))
LOG.debug("EXECUTION_LOOP[work_queue] => Execution is exiting")
if executor is not None:
executor.shutdown(wait=True)
def stop_execution(self):
LOG.info("Stopping execution")
self.running = False
def _consume_completed_jobs(self, block=False):
LOG.info("COMPLETION_LOOP => Consuming completed jobs")
complete_queue = self._complete_queue
tick = 0
while (not complete_queue.empty() or block) and self.running:
PROCESSING_LOG.debug("COMPLETION_LOOP => Getting job from the work queue. Tick {}".format(tick))
tick += 1
try:
job_id = complete_queue.get(True, timeout=1)
except Queue.Empty:
continue
self.last_job_completed_on = arrow.now()
self.completed_jobs += 1
try:
job = self.build.get_job(job_id)
del self.execution_times[job]
except KeyError:
pass
TRANSITION_LOG.debug("COMPLETION_LOOP => Completed job {}".format(job_id))
next_jobs = self.get_next_jobs_to_run(job_id)
next_jobs = filter(lambda job_id: not self.build.get_job(job_id).is_running, next_jobs)
TRANSITION_LOG.debug("COMPLETION_LOOP => Received completed job {}. Next jobs are {}".format(job_id, next_jobs))
map(self.add_to_work_queue, next_jobs)
| |
"goh-lng": "Lombardic",
"goi": "Gobasi",
"goj": "Gowlan",
"gol": "Gola",
"gon": "Gondi",
"goo": "Gone Dau",
"gop": "Yeretuar",
"goq": "Gorap",
"gor": "Gorontalo",
"got": "Gothic",
"gou": "Gavar",
"gow": "Gorowa",
"gox": "Gobu",
"goy": "Goundo",
"goz": "Gozarkhani",
"gpa": "Gupa-Abawa",
"gpn": "Taiap",
"gqa": "Ga'anda",
"gqi": "Guiqiong",
"gqn": "Kinikinao",
"gqr": "Gor",
"gqu": "Qau",
"gra": "Rajput Garasia",
"grb": "Grebo",
"grc": "Ancient Greek",
"grc-aeo": "Aeolic Greek",
"grc-arc": "Arcadian Greek",
"grc-arp": "Arcadocypriot Greek",
"grc-att": "Attic Greek",
"grc-boi": "Boeotian Greek",
"grc-dor": "Doric Greek",
"grc-ela": "Elean Greek",
"grc-epc": "Epic Greek",
"grc-ion": "Ionic Greek",
"grc-koi": "Koine Greek",
"grc-kre": "Cretan Ancient Greek",
"grc-pam": "Pamphylian Greek",
"grc-ths": "Thessalian Greek",
"grd": "Guruntum",
"grg": "Madi",
"grh": "Gbiri-Niragu",
"gri": "Ghari",
"grj": "Southern Grebo",
"grk": "Hellenic",
"grk-cal": "Calabrian Greek",
"grk-ita": "Italiot Greek",
"grk-mar": "Mariupol Greek",
"grk-pro": "Proto-Hellenic",
"grm": "Kota Marudu Talantang",
"gro": "Groma",
"grq": "Gorovu",
"grs": "Gresi",
"grt": "Garo",
"gru": "Kistane",
"grv": "Central Grebo",
"grw": "Gweda",
"grx": "Guriaso",
"gry": "Barclayville Grebo",
"grz": "Guramalum",
"gse": "Ghanaian Sign Language",
"gsg": "German Sign Language",
"gsl": "Gusilay",
"gsm": "Guatemalan Sign Language",
"gsn": "Gusan",
"gso": "Southwest Gbaya",
"gsp": "Wasembo",
"gss": "Greek Sign Language",
"gsw": "Alemannic German",
"gta": "Guató",
"gtu": "Aghu Tharrnggala",
"gu": "Gujarati",
"gu-kat": "Kathiyawadi",
"gu-lda": "Lisan ud-Dawat Gujarati",
"gua": "Shiki",
"gub": "Guajajára",
"guc": "Wayuu",
"gud": "<NAME>",
"gue": "Gurindji",
"guf": "Gupapuyngu",
"gug": "Paraguayan Guaraní",
"guh": "Guahibo",
"gui": "Eastern Bolivian Guaraní",
"guk": "Gumuz",
"gul": "Gullah",
"gum": "Guambiano",
"gun": "Mbyá Guaraní",
"guo": "Guayabero",
"gup": "Gunwinggu",
"guq": "Aché",
"gur": "Farefare",
"gus": "Guinean Sign Language",
"gut": "Mal<NAME>",
"guu": "Yanomamö",
"guv": "Gey",
"guw": "Gun",
"gux": "Gourmanchéma",
"guz": "Gusii",
"gv": "Manx",
"gva": "Kaskihá",
"gvc": "Guanano",
"gve": "Duwet",
"gvf": "Golin",
"gvj": "Guajá",
"gvl": "Gulay",
"gvm": "Gurmana",
"gvn": "Kuku-Yalanji",
"gvo": "Gavião do Jiparaná",
"gvp": "Pará Gavião",
"gvr": "Western Gurung",
"gvs": "Gumawana",
"gvy": "Guyani",
"gwa": "Mbato",
"gwb": "Gwa",
"gwc": "Kalami",
"gwd": "Gawwada",
"gwe": "Gweno",
"gwf": "Gowro",
"gwg": "Moo",
"gwi": "Gwich'in",
"gwj": "Gcwi",
"gwm": "Awngthim",
"gwn": "Gwandara",
"gwr": "Gwere",
"gwt": "Gawar-Bati",
"gwu": "Guwamu",
"gww": "Kwini",
"gwx": "Gua",
"gxx": "Wè Southern",
"gya": "Northwest Gbaya",
"gyb": "Garus",
"gyd": "Kayardild",
"gye": "Gyem",
"gyf": "Gungabula",
"gyg": "Gbayi",
"gyi": "Gyele",
"gyl": "Gayil",
"gym": "Ngäbere",
"gyn": "Guyanese Creole English",
"gyo": "Gyalsumdo",
"gyr": "Guarayu",
"gyy": "Gunya",
"gza": "Ganza",
"gzi": "Gazi",
"gzn": "Gane",
"ha": "Hausa",
"haa": "Hän",
"hab": "Hanoi Sign Language",
"hac": "Gurani",
"had": "Hatam",
"hae": "Harar Oromo",
"haf": "Haiphong Sign Language",
"hag": "Hanga",
"hah": "Hahon",
"hai": "Haida",
"haj": "Hajong",
"hak": "Hakka",
"hal": "Halang",
"ham": "Hewa",
"hao": "Hakö",
"hap": "Hupla",
"har": "Harari",
"has": "Haisla",
"hav": "Havu",
"haw": "Hawaiian",
"hax": "Southern Haida",
"hay": "Haya",
"haz": "Hazaragi",
"hba": "Hamba",
"hbb": "Huba",
"hbn": "Heiban",
"hbo": "Biblical Hebrew",
"hbu": "Habu",
"hca": "Andaman Creole Hindi",
"hch": "Huichol",
"hdn": "Northern Haida",
"hds": "Honduras Sign Language",
"hdy": "Hadiyya",
"he": "Hebrew",
"he-IL": "Modern Israeli Hebrew",
"he-med": "Med<NAME>",
"he-mis": "Mish<NAME>",
"hea": "Northern Qiandong Miao",
"hed": "Herdé",
"heg": "Helong",
"heh": "Hehe",
"hei": "Heiltsuk",
"hem": "Hemba",
"hgm": "Haiǁom",
"hgw": "Haigwai",
"hhi": "Hoia Hoia",
"hhr": "Kerak",
"hhy": "Hoyahoya",
"hi": "Hindi",
"hi-mid": "Middle Hindi",
"hi-mum": "Bombay Hindi",
"hia": "Lamang",
"hib": "Hibito",
"hid": "Hidatsa",
"hif": "Fiji Hindi",
"hig": "Kamwe",
"hih": "Pamosu",
"hii": "Hinduri",
"hij": "Hijuk",
"hik": "Seit-Kaitetu",
"hil": "Hiligaynon",
"him": "Western Pahari",
"hio": "Tshwa",
"hir": "Himarimã",
"hit": "Hittite",
"hiw": "Hiw",
"hix": "Hixkaryana",
"hji": "Haji",
"hka": "Kahe",
"hke": "Hunde",
"hkk": "Hunjara-Kaina Ke",
"hkn": "Mel-Khaonh",
"hks": "Hong Kong Sign Language",
"hla": "Halia",
"hlb": "Halbi",
"hld": "Halang Doan",
"hle": "Hlersu",
"hlt": "Nga La",
"hma": "Southern Mashan Hmong",
"hmb": "Humburi Senni",
"hmc": "Central Huishui Hmong",
"hmd": "A-Hmao",
"hme": "Eastern Huishui Hmong",
"hmf": "Hmong Don",
"hmg": "Southwestern Guiyang Hmong",
"hmh": "Southwestern Huishui Hmong",
"hmi": "Northern Huishui Hmong",
"hmj": "Ge",
"hmk": "Maek",
"hml": "Luopohe Hmong",
"hmm": "Central Mashan Hmong",
"hmn": "Hmong",
"hmn-pro": "Proto-Hmong",
"hmp": "Northern Mashan Hmong",
"hmq": "Eastern Qiandong Miao",
"hmr": "Hmar",
"hms": "Southern Qiandong Miao",
"hmt": "Hamtai",
"hmu": "Hamap",
"hmv": "Hmong Dô",
"hmw": "Western Mashan Hmong",
"hmx": "Hmong-Mien",
"hmx-mie": "Mien",
"hmx-mie-pro": "Proto-Mien",
"hmx-pro": "Proto-Hmong-Mien",
"hmy": "Southern Guiyang Hmong",
"hmz": "Hmong Shua",
"hna": "Mina",
"hnd": "Southern Hindko",
"hne": "Chhattisgarhi",
"hnh": "ǁAni",
"hni": "Hani",
"hnj": "Green Hmong",
"hnn": "Hanunoo",
"hno": "Northern Hindko",
"hns": "Caribbean Hindustani",
"hnu": "Hung",
"ho": "<NAME>",
"hoa": "Hoava",
"hob": "Austronesian Mari",
"hoc": "Ho",
"hod": "Holma",
"hoe": "Horom",
"hoh": "Hobyót",
"hoi": "Holikachuk",
"hoj": "Hadothi",
"hok": "Hokan",
"hol": "Holu",
"hom": "Homa",
"hoo": "Holoholo",
"hop": "Hopi",
"hor": "Horo",
"hos": "Ho Chi Minh City Sign Language",
"hot": "Hote",
"hov": "Hovongan",
"how": "Honi",
"hoy": "Holiya",
"hoz": "Hozo",
"hpo": "Hpon",
"hps": "Hawai'i Pidgin Sign Language",
"hra": "Hrangkhol",
"hrc": "<NAME>",
"hre": "Hre",
"hrk": "Haruku",
"hrm": "<NAME>",
"hro": "Haroi",
"hrp": "Nhirrpi",
"hrt": "Hértevin",
"hru": "Hruso",
"hrw": "<NAME>",
"hrx": "Hunsrik",
"hrz": "Harzani",
"hsb": "Upper Sorbian",
"hsh": "Hungarian Sign Language",
"hsl": "Hausa Sign Language",
"hsn": "Xiang",
"hsn-old": "Old Xiang",
"hss": "Harsusi",
"ht": "Haitian Creole",
"ht-sdm": "Saint Dominican Creole French",
"hti": "Hoti",
"hto": "<NAME>",
"hts": "Hadza",
"htu": "Hitu",
"hu": "Hungarian",
"hub": "Huambisa",
"huc": "ǂHoan",
"hud": "Huaulu",
"huf": "Humene",
"hug": "Huachipaeri",
"huh": "Huilliche",
"hui": "Huli",
"huj": "Northern Guiyang Hmong",
"huk": "Hulung",
"hul": "Hula",
"hum": "Hungana",
"huo": "Hu",
"hup": "Hupa",
"huq": "Tsat",
"hur": "Halkomelem",
"hus": "Wastek",
"huu": "<NAME>",
"huv": "Huave",
"huw": "Hukumina",
"hux": "<NAME>",
"huy": "Hulaulá",
"huz": "Hunzib",
"hvc": "Haitian Vodoun Culture Language",
"hvd-dv": "Huvadhu Dhivehi",
"hvk": "Haveke",
"hvn": "Sabu",
"hwa": "Wané",
"hwc": "Hawaiian Creole",
"hwo": "Hwana",
"hy": "Armenian",
"hya": "Hya",
"hyx": "Armenian",
"hyx-pro": "Proto-Armenian",
"hz": "Herero",
"ia": "Interlingua",
"iai": "Iaai",
"ian": "Iatmul",
"iar": "Purari",
"iba": "Iban",
"ibb": "Ibibio",
"ibd": "Iwaidja",
"ibe": "Akpes",
"ibg": "Ibanag",
"ibh": "Bih",
"ibl": "Ibaloi",
"ibm": "Agoi",
"ibn": "Ibino",
"ibr": "Ibuoro",
"ibu": "Ibu",
"iby": "Ibani",
"ica": "Ede Ica",
"ich": "Etkywan",
"icl": "Icelandic Sign Language",
"icr": "Islander Creole English",
"id": "Indonesian",
"ida": "Idakho-Isukha-Tiriki",
"idb": "Indo-Portuguese",
"idc": "Idon",
"idd": "Ede Idaca",
"ide": "Idere",
"idi": "Idi",
"idr": "Indri",
"ids": "Idesa",
"idt": "Idaté",
"idu": "Idoma",
"ie": "Interlingue",
"ifa": "Amganad Ifugao",
"ifb": "Batad Ifugao",
"ife": "Ifè",
"iff": "Ifo",
"ifk": "Tuwali Ifugao",
"ifm": "Teke-Fuumu",
"ifu": "<NAME>",
"ify": "<NAME>",
"ig": "Igbo",
"igb": "Ebira",
"ige": "Igede",
"igg": "Igana",
"igl": "Igala",
"igm": "Kanggape",
"ign": "Ignaciano",
"igo": "Isebe",
"igs": "Glosa",
"igw": "Igwe",
"ihb": "<NAME>",
"ihi": "Ihievbe",
"ihp": "Iha",
"ii": "Sichuan Yi",
"iir": "Indo-Iranian",
"iir-nur": "Nuristani",
"iir-nur-pro": "Proto-Nuristani",
"iir-pro": "Proto-Indo-Iranian",
"ijc": "Izon",
"ije": "Biseni",
"ijj": "Ede Ije",
"ijn": "Kalabari",
"ijo": "Ijoid",
"ijo-pro": "Proto-Ijoid",
"ijs": "Southeast Ijo",
"ik": "Inupiaq",
"ike": "Eastern Canadian Inuktitut",
"iki": "Iko",
"ikk": "Ika",
"ikl": "Ikulu",
"iko": "Olulumo-Ikom",
"ikp": "Ikpeshi",
"ikr": "Ikaranggal",
"iks": "Inuit Sign Language",
"ikt": "Inuvialuktun",
"ikv": "Iku-Gora-Ankwa",
"ikw": "Ikwere",
"ikx": "Ik",
"ikz": "Ikizu",
"ila": "Ile Ape",
"ilb": "Ila",
"ilg": "Ilgar",
"ili": "Ili Turki",
"ilk": "Ilongot",
"ill": "Iranun",
"ilo": "Ilocano",
"ils": "International Sign",
"ilu": "Ili'uun",
"ilv": "Ilue",
"ima": "Mala Malasar",
"imi": "Anamgura",
"iml": "Miluk",
"imn": "Imonda",
"imo": "Imbongu",
"imr": "Imroing",
"ims": "Marsian",
"imy": "Milyan",
"inb": "Inga",
"inc": "Indo-Aryan",
"inc-ash": "Ashokan Prakrit",
"inc-bhi": "Bhil",
"inc-cen": "Central Indo-Aryan",
"inc-cen-pro": "Proto-Central Indo-Aryan",
"inc-dar": "Dardic",
"inc-eas": "Eastern Indo-Aryan",
"inc-elu": "Helu Prakrit",
"inc-gup": "Gurjar Apabhramsa",
"inc-hie": "Eastern Hindi",
"inc-hiw": "Western Hindi",
"inc-hnd": "Hindustani",
"inc-ins": "Insular Indo-Aryan",
"inc-kam": "Kamarupi Prakrit",
"inc-kha": "Khasa Prakrit",
"inc-kho": "Kholosi",
"inc-khs": "Khasa Prakrit",
"inc-mas": "Middle Assamese",
"inc-mbn": "Middle Bengali",
"inc-mgd": "Magadhi Prakrit",
"inc-mgu": "Middle Gujarati",
"inc-mid": "Middle Indo-Aryan",
"inc-mit": "Mitanni",
"inc-mor": "Middle Oriya",
"inc-nor": "Northern Indo-Aryan",
"inc-nwe": "Northwestern Indo-Aryan",
"inc-oas": "Early Assamese",
"inc-obn": "Old Bengali",
"inc-ogu": "Old Gujarati",
"inc-ohi": "Old Hindi",
"inc-old": "Old Indo-Aryan",
"inc-oor": "Old Oriya",
"inc-opa": "Old Punjabi",
"inc-ork": "Old Kamta",
"inc-pah": "Pahari",
"inc-pan": "Punjabi-Lahnda",
"inc-pka": "Ardhamagadhi Prakrit",
"inc-pmg": "Magadhi Prakrit",
"inc-pmh": "Maharastri Prakrit",
"inc-pra": "Prakrit",
"inc-pro": "Proto-Indo-Aryan",
"inc-psc": "Paisaci Prakrit",
"inc-pse": "Sauraseni Prakrit",
"inc-psi": "Paisaci Prakrit",
"inc-psu": "Sauraseni Prakrit",
"inc-rom": "Romani",
"inc-sap": "Sauraseni Apabhramsa",
"inc-snd": "Sindhi",
"inc-sou": "Southern Indo-Aryan",
"inc-tak": "Takka Apabhramsa",
"inc-vra": "Vracada Apabhramsa",
"inc-wes": "Western Indo-Aryan",
"ine": "Indo-European",
"ine-ana": "Anatolian",
"ine-ana-pro": "Proto-Anatolian",
"ine-bsl": "Balto-Slavic",
"ine-bsl-pro": "Proto-Balto-Slavic",
"ine-pae": "Paeonian",
"ine-pro": "Proto-Indo-European",
"ine-toc": "Tocharian",
"ine-toc-pro": "Proto-Tocharian",
"ing": "Deg Xinag",
"inh": "Ingush",
"inj": "Jungle Inga",
"inl": "Indonesian Sign Language",
"inm": "Minaean",
"inn": "Isinai",
"ino": "Inoke-Yate",
"inp": "Iñapari",
"ins": "Indian Sign Language",
"int": "Intha",
"inz": "Ineseño",
"io": "Ido",
"ior": "Inor",
"iou": "Tuma-Irumu",
"iow": "Chiwere",
"ipi": "Ipili",
"ipo": "Ipiko",
"iqu": "Iquito",
"iqw": "Ikwo",
"ira": "Iranian",
"ira-cen": "Central Iranian",
"ira-csp": "Caspian",
"ira-kms": "Komisenian",
"ira-kms-pro": "Proto-Komisenian",
"ira-mid": "Middle Iranian",
"ira-mny": "Munji-Yidgha",
"ira-mny-pro": "Proto-Munji-Yidgha",
"ira-mpr": "Medo-Parthian",
"ira-mpr-pro": "Proto-Medo-Parthian",
"ira-msh": "Mazanderani-Shahmirzadi",
"ira-nei": "Northeastern Iranian",
"ira-nwi": "Northwestern Iranian",
"ira-old": "Old Iranian",
"ira-orp": | |
<reponame>ska-telescope/csp-lmc-prototype<filename>csplmc/CspMaster/CspMaster/CspMaster.py
# -*- coding: utf-8 -*-
#
# This file is part of the CspMaster project
#
#
#
# Distributed under the terms of the GPL license.
# See LICENSE.txt for more info.
""" CspMaster Tango device prototype
CSPMaster TANGO device class for the CSPMaster prototype
"""
# PROTECTED REGION ID (CspMaster.standardlibray_import) ENABLED START #
# Python standard library
from __future__ import absolute_import
import sys
import os
from future.utils import with_metaclass
from collections import defaultdict
# PROTECTED REGION END# //CspMaster.standardlibray_import
# tango imports
import tango
from tango import DebugIt, EventType, DeviceProxy, AttrWriteType
from tango.server import run, DeviceMeta, attribute, command, device_property
# Additional import
# PROTECTED REGION ID(CspMaster.additionnal_import) ENABLED START #
#
from skabase.SKAMaster import SKAMaster
from skabase.auxiliary import utils
# PROTECTED REGION END # // CspMaster.additionnal_import
# PROTECTED REGION ID (CspMaster.add_path) ENABLED START #
# add the path to import global_enum package.
file_path = os.path.dirname(os.path.abspath(__file__))
commons_pkg_path = os.path.abspath(os.path.join(file_path, "../../commons"))
sys.path.insert(0, commons_pkg_path)
import global_enum as const
from global_enum import HealthState, AdminMode
#add the path to import release file (!!)
csplmc_path = os.path.abspath(os.path.join(file_path, "../../"))
sys.path.insert(0, csplmc_path)
import release
# PROTECTED REGION END# //CspMaster.add_path
__all__ = ["CspMaster", "main"]
class CspMaster(with_metaclass(DeviceMeta, SKAMaster)):
"""
CSPMaster TANGO device class for the CSPMaster prototype
"""
# PROTECTED REGION ID(CspMaster.class_variable) ENABLED START #
# ---------------
# Event Callback functions
# ---------------
def __seSCMCallback(self, evt):
"""
Class private method.
Retrieve the values of the sub-element SCM attributes subscribed
for change event at device initialization.
:param evt: The event data
:return: None
"""
if not evt.err:
dev_name = evt.device.dev_name()
try:
if dev_name in self._se_fqdn:
if evt.attr_value.name.lower() == "state":
self._se_state[dev_name] = evt.attr_value.value
elif evt.attr_value.name.lower() == "healthstate":
self._se_healthstate[dev_name] = evt.attr_value.value
elif evt.attr_value.name.lower() == "adminmode":
self._se_adminmode[dev_name] = evt.attr_value.value
else:
log_msg = ("Attribute {} not still "
"handled".format(evt.attr_name))
self.dev_logging(log_msg, tango.LogLevel.LOG_WARN)
else:
log_msg = ("Unexpected change event for"
" attribute: {}".format(str(evt.attr_name)))
self.dev_logging(log_msg, tango.LogLevel.LOG_WARN)
return
log_msg = "New value for {} is {}".format(str(evt.attr_name),
str(evt.attr_value.value))
self.dev_logging(log_msg, tango.LogLevel.LOG_INFO)
# update CSP global state
if evt.attr_value.name.lower() in ["state", "healthstate"]:
self.__set_csp_state()
except tango.DevFailed as df:
self.dev_logging(str(df.args[0].desc), tango.LogLevel.LOG_ERROR)
except Exception as except_occurred:
self.dev_logging(str(except_occurred), tango.LogLevel.LOG_ERROR)
else:
for item in evt.errors:
# API_EventTimeout: if sub-element device not reachable it transitions
# to UNKNOWN state.
if item.reason == "API_EventTimeout":
if evt.attr_name.find(dev_name) > 0:
self._se_state[dev_name] = tango.DevState.UNKNOWN
self._se_healthstate[dev_name] = HealthState.UNKNOWN
if self._se_to_switch_off[dev_name]:
self._se_state[dev_name] = tango.DevState.OFF
# update the State and healthState of the CSP Element
self.__set_csp_state()
log_msg = item.reason + ": on attribute " + str(evt.attr_name)
self.dev_logging(log_msg, tango.LogLevel.LOG_WARN)
# ---------------
# Class private methods
# ---------------
def __set_csp_state(self):
"""
Class private method.
Retrieve the State attribute of the CSP sub-elements and aggregate
them to build up the CSP global state.
:param: None
:return: None
"""
self.__set_csp_health_state()
# CSP state reflects the status of CBF. Only if CBF is present
# CSP can work. The state of PSS and PST sub-elements only contributes
# to determine the CSP health state.
self.set_state(self._se_state[self.CspMidCbf])
def __set_csp_health_state(self):
"""
Class private method.
Retrieve the healthState attribute of the CSP sub-elements and
aggregate them to build up the CSP health state
:param: None
:return: None
"""
# The whole CSP HealthState is OK only if:
# - all sub-elements are available
# - each sub-element HealthState is OK
if (len(self._se_healthstate.values()) == 3 and \
list(self._se_healthstate.values()) == [HealthState.OK,
HealthState.OK,
HealthState.OK
]):
self._se_healthstate = HealthState.OK
# in all other case the HealthState depends on the CBF
# sub-element HealthState
if self._se_healthstate[self.CspMidCbf] == HealthState.UNKNOWN:
self._health_state = HealthState.UNKNOWN
elif self._se_healthstate[self.CspMidCbf] == HealthState.FAILED:
self._health_state = HealthState.FAILED
else:
self._health_state = HealthState.DEGRADED
def __get_maxnum_of_beams_capabilities(self):
"""
Class private method.
Retrieve the max number of CSP Capabilities for each capability
type.\n
The couple [CapabilityType: num] is specified as TANGO Device
Property. Default values for Mid CSP are:\n
- Subarray 16 \n
- SearchBeam 1500 \n
- TimingBeam 16 \n
- VlbiBeam 20 \n
:param: None
:return: None
"""
self._search_beams_maxnum = const.NUM_OF_SEARCH_BEAMS
self._timing_beams_maxnum = const.NUM_OF_TIMING_BEAMS
self._vlbi_beams_maxnum = const.NUM_OF_VLBI_BEAMS
self._subarrays_maxnum = const.NUM_OF_SUBARRAYS
self._available_search_beams_num = const.NUM_OF_SEARCH_BEAMS
self._available_timing_beams_num = const.NUM_OF_TIMING_BEAMS
self._available_vlbi_beams_num = const.NUM_OF_VLBI_BEAMS
self._available_subarrays_num = const.NUM_OF_SUBARRAYS
if self._max_capabilities:
try:
self._search_beams_maxnum = self._max_capabilities["SearchBeam"]
except KeyError: # not found in DB
self._search_beams_maxnum = const.NUM_OF_SEARCH_BEAMS
try:
self._timing_beams_maxnum = self._max_capabilities["TimingBeam"]
except KeyError: # not found in DB
self._timing_beams_maxnum = const.NUM_OF_TIMING_BEAMS
try:
self._vlbi_beams_maxnum = self._max_capabilities["VlbiBeam"]
except KeyError: # not found in DB
self._vlbi_beams_maxnum = const.NUM_OF_VLBI_BEAMS
try:
self._subarrays_maxnum = self._max_capabilities["Subarray"]
except KeyError: # not found in DB
self._subarrays_maxnum = const.NUM_OF_SUBARRAYS
else:
self.dev_logging(("MaxCapabilities device property not defined."
"Use defaul values"), tango.LogLevel.LOG_WARN)
def __get_maxnum_of_receptors(self):
"""
Get the maximum number of receptors that can be used for observations.
This number can be less than 197.
"""
self._receptors_maxnum = const.NUM_OF_RECEPTORS
capability_dict = {}
try:
proxy = self._se_proxies[self.CspMidCbf]
proxy.ping()
vcc_to_receptor = proxy.vccToReceptor
self._vcc_to_receptor_map = dict([int(ID) for ID in pair.split(":")]
for pair in vcc_to_receptor)
# get the number of each Capability type allocated by CBF
cbf_max_capabilities = proxy.maxCapabilities
for capability in cbf_max_capabilities:
cap_type, cap_num = capability.split(':')
capability_dict[cap_type] = int(cap_num)
self._receptors_maxnum = capability_dict["VCC"]
self._receptorsMembership = [0]* self._receptors_maxnum
except KeyError as key_err:
log_msg = "Error: no key found for {}".format(str(key_err))
self.dev_logging(log_msg, int(tango.LogLevel.LOG_ERROR))
except AttributeError as attr_err:
log_msg = "Error reading{}: {}".format(str(attr_err.args[0]),
attr_err.__doc__)
self.dev_logging(log_msg, int(tango.LogLevel.LOG_ERROR))
except tango.DevFailed as df:
log_msg = "Error: " + str(df.args[0].reason)
self.dev_logging(log_msg, int(tango.LogLevel.LOG_ERROR))
def __init_beams_capabilities(self):
"""
Class private method.
Initialize the CSP capabilities State and Modes attributes.
"""
# get the max number of CSP Capabilities for each Csp capability type
self.__get_maxnum_of_beams_capabilities()
# set init values for SCM states of each beam capability type
self._search_beams_state = [tango.DevState.UNKNOWN
for i in range(self._search_beams_maxnum)]
self._timing_beams_state = [tango.DevState.UNKNOWN
for i in range(self._timing_beams_maxnum)
]
self._vlbi_beams_state = [tango.DevState.UNKNOWN
for i in range(self._vlbi_beams_maxnum)
]
self._search_beams_health_state = [HealthState.UNKNOWN
for i in range(self._search_beams_maxnum)
]
self._timing_beams_health_state = [HealthState.UNKNOWN
for i in range(self._timing_beams_maxnum)
]
self._vlbi_beams_health_state = [HealthState.UNKNOWN
for i in range(self._vlbi_beams_maxnum)
]
self._search_beams_admin = [AdminMode.ONLINE
for i in range(self._search_beams_maxnum)
]
self._timing_beams_admin = [AdminMode.ONLINE
for i in range(self._timing_beams_maxnum)
]
self._vlbi_beams_admin = [AdminMode.ONLINE
for i in range(self._vlbi_beams_maxnum)
]
def __connect_to_subelements(self):
"""
Class private method.
Establish connection with each CSP sub-element.
If connection succeeds, the CspMaster device subscribes the State,
healthState and adminMode attributes of each CSP Sub-element and
registers a callback function to handle the events (see __seSCMCallback()).
Exceptions are logged.
Returns:
None
"""
for fqdn in self._se_fqdn:
# initialize the list for each dictionary key-name
self._se_event_id[fqdn] = []
try:
self._se_to_switch_off[fqdn] = False
log_msg = "Trying connection to" + str(fqdn) + " device"
self.dev_logging(log_msg, int(tango.LogLevel.LOG_INFO))
device_proxy = DeviceProxy(fqdn)
device_proxy.ping()
# store the sub-element proxies
self._se_proxies[fqdn] = device_proxy
# Subscription of the sub-element State,healthState and adminMode
ev_id = device_proxy.subscribe_event("State",
EventType.CHANGE_EVENT,
self.__seSCMCallback,
stateless=True)
self._se_event_id[fqdn].append(ev_id)
ev_id = device_proxy.subscribe_event("healthState",
EventType.CHANGE_EVENT,
self.__seSCMCallback,
stateless=True)
self._se_event_id[fqdn].append(ev_id)
ev_id = device_proxy.subscribe_event("adminMode",
EventType.CHANGE_EVENT,
self.__seSCMCallback,
stateless=True)
self._se_event_id[fqdn].append(ev_id)
except tango.DevFailed as df:
#for item in df.args:
log_msg = ("Failure in connection to {}"
" device: {}".format(str(fqdn), str(df.args[0].desc)))
self.dev_logging(log_msg, tango.LogLevel.LOG_ERROR)
# NOTE: if the exception is thrown, the Device server fails
# and exit. In this case we rely on K8s/Docker restart policy
# to restart the Device Server.
def __is_subelement_available(self, subelement_name):
"""
*Class private method.*
Check if the sub-element is exported in the TANGO DB.
If the device is not present in the list of the connected
sub-elements, a connection with the device is performed.
Args:
subelement_name : the FQDN of the sub-element
Returns:
True if the connection with the subarray is established,
False otherwise
"""
try:
proxy = self._se_proxies[subelement_name]
proxy.ping()
except KeyError as key_err:
# Raised when a mapping (dictionary) key is not found in the set
# of existing keys.
# no proxy registered for the subelement device
msg = "Can't retrieve the information of key {}".format(key_err)
self.dev_logging(msg, tango.LogLevel.LOG_ERROR)
proxy = tango.DeviceProxy(subelement_name)
proxy.ping()
self._se_proxies[subelement_name] = proxy
except tango.DevFailed as df:
msg = "Failure reason: {} Desc: {}".format(str(df.args[0].reason), str(df.args[0].desc))
self.dev_logging(msg, tango.LogLevel.LOG_ERROR)
return False
return True
def __create_search_beam_group(self):
"""
Class private method.
Create a TANGO GROUP to get CSP SearchBeams Capabilities
information
"""
return
def __create_timing_beam_group(self):
"""
Class private method.
Create a TANGO GROUP to get CSP TimingBeams Capabilities
information
"""
return
def __create_vlbi_beam_group(self):
"""
Class private method.
Create a TANGO GROUP to get CSP Vlbi Beams Capabilities
information
"""
return
# PROTECTED REGION END # // CspMaster.class_variable
# -----------------
# Device Properties
# -----------------
CspMidCbf = device_property(
dtype='str', default_value="mid_csp_cbf/sub_elt/master",
doc="TANGO Device property.\n\n The Mid CBF sub-element address\n\n *type*: string",
)
"""
*Device | |
<filename>fcts.py
import sys, os, time, datetime, pandas, numpy, pickle, logging, py7zr, base64, io, random
from functools import wraps
from matplotlib import pyplot as plt
def process_figure(out_path, plt):
"""Show / save / serrialize plot"""
if out_path == 'base64':
plt.tight_layout()
out_path = serialize_image(plt)
elif out_path != '':
plt.tight_layout()
plt.savefig(out_path)
elif out_path == '':
plt.show()
return out_path
def text_to_fname(text, suffix_date=False):
"""Remove special characters, spaces, etc to make a filename"""
text = text.replace(' ', '_')
text = ''.join(ch for ch in text if ch.isalnum())
if suffix_date:
text += datetime.datetime.now().strftime('%Y%m%d')
return text
def to_zipped_protected_csv(df, out_path, password):
"""Save dataframe to an encrypted 7zip file"""
try:
df.to_csv(out_path, index=None,sep=";")
zip_path = out_path+'.zip'
with py7zr.SevenZipFile(zip_path, 'w', password=password) as zf:
zf.writeall(out_path)
time.sleep(0.01)
os.remove(out_path)
return zip_path
except Exception as err:
logging.info('Error writing to_zipped_protected_csv (' + str(out_path) + '): ' + str(err))
return ''
def timed(func):
"""This decorator prints the execution time for the decorated function."""
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
logging.info("{fct_name} ran in {time_s}s".format(fct_name=func.__name__, time_s=round(end - start, 2)))
return result
return wrapper
def get_trendpoly(xs,ys):
"""Polynomial regression of a series"""
trend = numpy.polyfit(xs,ys,1)
trendpoly = numpy.poly1d(trend)
return trendpoly
def show_trendpoly(ax, xs, ys, color='red'):
"""Add polynomial regression to a plot"""
trendpoly = get_trendpoly(xs, ys)
X = [xs.min(), xs.max()]
Y = trendpoly(X)
ax.plot(X,Y, color=color)
return trendpoly
def is_any_in_txt(txt_list, within_txt):
"""
Within (txt_list), is there one item contained in within_txt ?
Example: (['a', 'b'], 'ab') --> Yes, a is contained in ab
"""
for x in txt_list:
if x in within_txt:
return True
return False
def is_in_any_txt(txt, within_txts_list, case_insensitive=False):
"""is "txt" in any of the texts list ?"""
for within_txt in within_txts_list:
if case_insensitive: # slower
if txt.lower() in within_txt.lower():
return True
else:
if txt in within_txt:
return True
return False
def try_div(x,y, def_value=0):
"""Divide x/y, and return the default value in case of error (typically if we divide by 0"""
try:
return x/y
except:
return def_value
def list_unique(x):
"""Remove duplicates from a list, without losing order of items"""
_ = []
for i in x:
if not i in _:
_.append(i)
return _
def showcount(df, col_name, topn=0, print_count=True, as_percentage=False):
"""Group by a column and show how many items in each group"""
len_tot = len(df)
if isinstance(col_name, str):
n_distinct = len(df[col_name].unique())
else:
n_distinct = len(df[col_name].drop_duplicates())
if print_count:
print(str(n_distinct) + ' distinct values')
if isinstance(col_name, str):
tmp_df = df[[col_name]].copy()
tmp_df['n'] = tmp_df.groupby(col_name)[col_name].transform(len)
tmp_df['%'] = tmp_df['n'] / len_tot
tmp_df = tmp_df[[col_name, 'n', '%']].drop_duplicates().sort_values('n', ascending=False)
else:
tmp_df = df[col_name].copy()
tmp_df['n'] = tmp_df.groupby(col_name)[col_name[0]].transform(len)
tmp_df['%'] = tmp_df['n'] / len_tot
tmp_df = tmp_df[col_name + ['n', '%']].drop_duplicates().sort_values('n', ascending=False)
if as_percentage:
tmp_df['%'] = tmp_df['%'].map(lambda x: to_pc(100*x))
if topn == 0:
return tmp_df
else:
return tmp_df.head(topn)
def mem_usage(pandas_obj, ret_type='str'):
"""Show usage of a pandas object"""
if isinstance(pandas_obj,pandas.DataFrame):
usage_b = pandas_obj.memory_usage(deep=True).sum()
else: # we assume if not a df it's a series
usage_b = pandas_obj.memory_usage(deep=True)
usage_mb = usage_b / 1024 ** 2 # convert bytes to megabytes
if ret_type == 'str':
return "{:03.2f} MB".format(usage_mb)
else:
return usage_mb
def try_to_category(df, col_name):
"""Convert to category if possible"""
try:
df[col_name] = df[col_name].astype('category')
except:
pass
return df
def categorize_all(all_tx, cat_cols):
"""Convert select columns types to categories (save some memory...)"""
for c in cat_cols:
try:
size_bf = mem_usage(all_tx[c])
all_tx[c] = all_tx[c].astype('category')
size_af = mem_usage(all_tx[c])
logging.info('categorize_all: ' + c + '. Size : ' + str(size_bf) + ' --> ' + str(size_af))
except Exception as err:
logging.info('Error categorizing: ' + str(c) + ': ' + str(err))
return all_tx
def to_pickle(obj, fpath):
"""Save object to a pickle file"""
with open(fpath, "wb") as f:
pickle.dump(obj, f)
def read_pickle(fpath):
"""Read object from a pickle file"""
with open(fpath, "rb") as f:
return pickle.load(f)
def sign(x):
"""Get the sign of a number"""
return 1 if x >=0 else -1
def all_same_signs(x_list):
"""Returns True if all items in the list have the same sign"""
return len(set([sign(x) for x in x_list])) == 1
def log(msg, logmode='print'):
"""Write to logfile, with a timestamp"""
msg_full = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f ' + str(msg))
if logmode == 'print':
print(msg_full)
else:
logging.info(str(msg))
def f_thousands_k(n, sep="'"):
"""Format number to thousands, with a k replacing 000"""
if not isinstance(n, int):
if isinstance(n, float) and numpy.isnan(n):
return 'None'
else:
n = int(n)
if n > 999:
n = int(round(n/1000))
return f"{n:,d}".replace(',', sep) + 'k'
else:
return f"{n:,d}".replace(',', sep)
def f_thousands(n, sep="'"):
"""Format number to thousands"""
if not isinstance(n, int):
if isinstance(n, float) and numpy.isnan(n):
return 'None'
else:
n = int(n)
return f"{n:,d}".replace(',', sep)
def f_big(n, sep="'"):
"""Format big number"""
if n > 10*1000*1000:
n = round(n / 1000000, 0)
suffix = 'M'
format_code = '{:.0f}'
elif n > 1*1000*1000:
n = round(n / 1000000, 1)
suffix = 'M'
format_code = '{:.1f}'
elif n > 10*1000:
n = round(n / 1000, 0)
suffix = 'k'
format_code = '{:.0f}'
elif n > 1000:
n = round(n / 1000, 1)
suffix = 'k'
format_code = '{:.1f}'
else:
n = round(n, 0)
suffix = ''
format_code = '{:.0f}'
return format_code.format(n) + suffix
def create_missing_dir(dir_path):
"""Create a directory if it doesnt exist"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return dir_path
def replace_all_str(x, pattern_list, repl_by):
"""Replace all patterns by a str"""
for p in pattern_list:
x = x.replace(p, repl_by)
return x
def replace_str_all_empty(x, repl_by=''):
"""Replace #, NA, N/A, etc..."""
x = x.strip()
l = ['#', '?', 'n//a', 'n/a', 'not defined', 'unknown', 'nan']
for ll in l:
if x == ll:
x = repl_by
return x
def get_dates_range_str(df, col_name='date'):
"""Format a range of dates to a string yyyymmdd --> yyyymmdd"""
return df[col_name].min().strftime('%Y-%m-%d -> ') + df[col_name].max().strftime('%Y-%m-%d')
def todf(table):
"""2D list to dataframe. The first row is the headers"""
return pandas.DataFrame(table[1:], columns=table[0])
def clip(df):
"""Clip DF to clipboard, without index"""
df.to_clipboard(index=False)
def fdict(d, col_like, col_like_2='', show_values=False):
"""Search for a key containing col_like"""
if show_values:
return [(k,v) for k,v in d.items() if col_like.lower() in k.lower() and col_like_2.lower() in k.lower()]
else:
return [k for k in d.keys() if col_like.lower() in k.lower() and col_like_2.lower() in k.lower()]
def fcol(df, col_like, col_like_2='', **kwargs):
"""Find col name in df"""
if isinstance(df, dict):
return fdict(df, col_like, col_like_2, **kwargs)
else:
nrows = kwargs.get('nrows', 2)
return df[[c for c in df if col_like in c.lower() and col_like_2.lower() in c.lower()]].drop_duplicates().head(nrows)
def tryround(x, *args):
"""Try rounding a number. In case of failure, return 0"""
try: return round(x, *args)
except:
return 0
def try_strftime(x, *args, **kwargs):
"""Try strftime. In case of failure, return an empty string"""
try:
return x.strftime(*args, **kwargs)
except:
return ''
def try_to_datetime(x, frmt=''):
"""Try to convert a string to a date. In case of failure, return nan"""
try:
if frmt == '':
return pandas.to_datetime(x)
else:
return pandas.to_datetime(x, format=frmt)
except:
return numpy.nan
def float_to_datetime(x):
"""20190102.0 --> remove decimals and parse to datetime"""
try:
x = str(int(x))
return try_to_datetime(x)
except:
return numpy.nan
def capitalize_all(x):
"""Capitalize all words of a sentence"""
_str = [word.capitalize() for word in x.split(' ')]
return ' '.join(_str)
def clean_email(x):
"""Simple cleaning on email addresses"""
x = x.replace(' ', '')
x = x.replace('<', '')
x = x.replace('>', '')
x = x.replace("'", "")
x = x.replace('"', '')
return x
def describe_column(c, df, i):
"""Describe a DF in a column"""
col_desc = [i
, str(df[c].dtype)
, str(c).replace('\n','')
, ', '.join(map(str, df[c].unique()[:3]))[:20]+'...']
return col_desc
def describe(df):
"""Describe a DF"""
df_desc = [['Col Nr', 'Type', 'Name', 'Examples']]
for i,c in enumerate(df):
df_desc.append(describe_column(c, df, i))
return todf(df_desc)
def showcols(df):
"""Show available columns names for a DF"""
cols = list(df.columns)
for i in range(10):
if len(cols[i*5:(i+1)*5])>0:
print(', '.join(cols[i*5:(i+1)*5]))
def min_nonan(xseries):
"""Minimum value of a series, ignoring nans"""
try:
return min(xseries[-xseries.isnull()])
except:
return numpy.nan
def max_nonan(xseries):
"""Maximum value of a series, ignoring nans"""
try:
return max(xseries[-xseries.isnull()])
except:
return numpy.nan
def getfig(x=3,y=3):
"""Clear Matplotlib memory and generate a new figure"""
plt.clf()
plt.close('all')
plt.figure(figsize=(x,y))
def table_dates(df, date_col, ym='ym', agg_col='count'): # agg_col: if this is a column name, then sum it
"""Simple table, counting entries by date group"""
df_copy = df[[date_col]].copy() if agg_col == 'count' else df[[date_col, agg_col]].copy()
if ym == 'ym':
df_copy['date'] = df[date_col].map(lambda x: datetime.datetime.strptime(x.strftime('%Y-%m-01'), '%Y-%m-%d'))
else:
df_copy['date'] = df[date_col].map(lambda x: datetime.datetime.strptime(x.strftime('%Y-01-01'), '%Y-%m-%d'))
if agg_col == 'count':
df_copy['n'] = df_copy.groupby('date')['date'].transform(len)
else:
df_copy['n'] = df_copy.groupby('date')[agg_col].transform(sum)
df_copy = df_copy[['date', 'n']].drop_duplicates().sort_values('date')
return df_copy
def plot_dates(df, date_col, ym='ym', agg_col='count'):
"""Simple | |
value, data))
elif comparator == '>':
ok = any(map(lambda x: x is not None and x > value, data))
elif comparator == '<=':
ok = any(map(lambda x: x is not None and x <= value, data))
elif comparator == '>=':
ok = any(map(lambda x: x is not None and x >= value, data))
elif comparator in ('!=', '<>'):
ok = value not in data
elif comparator == 'not in':
ok = all(map(lambda x: x not in data, value))
elif comparator == 'not ilike':
data = [(x or "") for x in data]
ok = all(map(lambda x: value.lower() not in x.lower(), data))
elif comparator == 'ilike':
data = [(x or "").lower() for x in data]
ok = bool(fnmatch.filter(data, '*'+(value_esc or '').lower()+'*'))
elif comparator == 'not like':
data = [(x or "") for x in data]
ok = all(map(lambda x: value not in x, data))
elif comparator == 'like':
data = [(x or "") for x in data]
ok = bool(fnmatch.filter(data, value and '*'+value_esc+'*'))
elif comparator == '=?':
ok = (value in data) or not value
elif comparator in ('=like'):
data = [(x or "") for x in data]
ok = bool(fnmatch.filter(data, value_esc))
elif comparator in ('=ilike'):
data = [(x or "").lower() for x in data]
ok = bool(fnmatch.filter(data, value and value_esc.lower()))
else:
raise ValueError
if ok:
records_ids.add(rec.id)
result.append(self.browse(records_ids))
while len(result)>1:
result.append(result.pop() & result.pop())
return result[0]
def sorted(self, key=None, reverse=False):
"""Return the recordset ``self`` ordered by ``key``.
:param key: either a function of one argument that returns a
comparison key for each record, or a field name, or ``None``, in
which case records are ordered according the default model's order
:type key: callable or str or None
:param bool reverse: if ``True``, return the result in reverse order
.. code-block:: python3
# sort records by name
records.sorted(key=lambda r: r.name)
"""
if key is None:
recs = self.search([('id', 'in', self.ids)])
return self.browse(reversed(recs._ids)) if reverse else recs
if isinstance(key, str):
key = itemgetter(key)
return self.browse(item.id for item in sorted(self, key=key, reverse=reverse))
def update(self, values):
""" Update the records in ``self`` with ``values``. """
for record in self:
for name, value in values.items():
record[name] = value
@api.model
def flush(self, fnames=None, records=None):
""" Process all the pending computations (on all models), and flush all
the pending updates to the database.
:param fnames (list<str>): list of field names to flush. If given,
limit the processing to the given fields of the current model.
:param records (Model): if given (together with ``fnames``), limit the
processing to the given records.
"""
def process(model, id_vals):
# group record ids by vals, to update in batch when possible
updates = defaultdict(list)
for rid, vals in id_vals.items():
updates[frozendict(vals)].append(rid)
for vals, ids in updates.items():
recs = model.browse(ids)
try:
recs._write(vals)
except MissingError:
recs.exists()._write(vals)
if fnames is None:
# flush everything
self.recompute()
while self.env.all.towrite:
model_name, id_vals = self.env.all.towrite.popitem()
process(self.env[model_name], id_vals)
else:
# flush self's model if any of the fields must be flushed
self.recompute(fnames, records=records)
# check whether any of 'records' must be flushed
if records is not None:
fnames = set(fnames)
towrite = self.env.all.towrite.get(self._name)
if not towrite or all(
fnames.isdisjoint(towrite.get(record.id, ()))
for record in records
):
return
# DLE P76: test_onchange_one2many_with_domain_on_related_field
# ```
# email.important = True
# self.assertIn(email, discussion.important_emails)
# ```
# When a search on a field coming from a related occurs (the domain
# on discussion.important_emails field), make sure the related field
# is flushed
model_fields = {}
for fname in fnames:
field = self._fields[fname]
model_fields.setdefault(field.model_name, []).append(field)
if field.related_field:
model_fields.setdefault(field.related_field.model_name, []).append(field.related_field)
for model_name, fields in model_fields.items():
if any(
field.name in vals
for vals in self.env.all.towrite.get(model_name, {}).values()
for field in fields
):
id_vals = self.env.all.towrite.pop(model_name)
process(self.env[model_name], id_vals)
# missing for one2many fields, flush their inverse
for fname in fnames:
field = self._fields[fname]
if field.type == 'one2many' and field.inverse_name:
self.env[field.comodel_name].flush([field.inverse_name])
#
# New records - represent records that do not exist in the database yet;
# they are used to perform onchanges.
#
@api.model
def new(self, values={}, origin=None, ref=None):
""" new([values], [origin], [ref]) -> record
Return a new record instance attached to the current environment and
initialized with the provided ``value``. The record is *not* created
in database, it only exists in memory.
One can pass an ``origin`` record, which is the actual record behind the
result. It is retrieved as ``record._origin``. Two new records with the
same origin record are considered equal.
One can also pass a ``ref`` value to identify the record among other new
records. The reference is encapsulated in the ``id`` of the record.
"""
if origin is not None:
origin = origin.id
record = self.browse([NewId(origin, ref)])
record._update_cache(values, validate=False)
return record
@property
def _origin(self):
""" Return the actual records corresponding to ``self``. """
ids = tuple(origin_ids(self._ids))
prefetch_ids = IterableGenerator(origin_ids, self._prefetch_ids)
return self._browse(self.env, ids, prefetch_ids)
#
# "Dunder" methods
#
def __bool__(self):
""" Test whether ``self`` is nonempty. """
return bool(getattr(self, '_ids', True))
__nonzero__ = __bool__
def __len__(self):
""" Return the size of ``self``. """
return len(self._ids)
def __iter__(self):
""" Return an iterator over ``self``. """
if len(self._ids) > PREFETCH_MAX and self._prefetch_ids is self._ids:
for ids in self.env.cr.split_for_in_conditions(self._ids):
for id_ in ids:
yield self._browse(self.env, (id_,), ids)
else:
for id in self._ids:
yield self._browse(self.env, (id,), self._prefetch_ids)
def __contains__(self, item):
""" Test whether ``item`` (record or field name) is an element of ``self``.
In the first case, the test is fully equivalent to::
any(item == record for record in self)
"""
if isinstance(item, BaseModel) and self._name == item._name:
return len(item) == 1 and item.id in self._ids
elif isinstance(item, str):
return item in self._fields
else:
raise TypeError("Mixing apples and oranges: %s in %s" % (item, self))
def __add__(self, other):
""" Return the concatenation of two recordsets. """
return self.concat(other)
def concat(self, *args):
""" Return the concatenation of ``self`` with all the arguments (in
linear time complexity).
"""
ids = list(self._ids)
for arg in args:
if not (isinstance(arg, BaseModel) and arg._name == self._name):
raise TypeError("Mixing apples and oranges: %s.concat(%s)" % (self, arg))
ids.extend(arg._ids)
return self.browse(ids)
def __sub__(self, other):
""" Return the recordset of all the records in ``self`` that are not in
``other``. Note that recordset order is preserved.
"""
if not isinstance(other, BaseModel) or self._name != other._name:
raise TypeError("Mixing apples and oranges: %s - %s" % (self, other))
other_ids = set(other._ids)
return self.browse([id for id in self._ids if id not in other_ids])
def __and__(self, other):
""" Return the intersection of two recordsets.
Note that first occurrence order is preserved.
"""
if not isinstance(other, BaseModel) or self._name != other._name:
raise TypeError("Mixing apples and oranges: %s & %s" % (self, other))
other_ids = set(other._ids)
return self.browse(OrderedSet(id for id in self._ids if id in other_ids))
def __or__(self, other):
""" Return the union of two recordsets.
Note that first occurrence order is preserved.
"""
return self.union(other)
def union(self, *args):
""" Return the union of ``self`` with all the arguments (in linear time
complexity, with first occurrence order preserved).
"""
ids = list(self._ids)
for arg in args:
if not (isinstance(arg, BaseModel) and arg._name == self._name):
raise TypeError("Mixing apples and oranges: %s.union(%s)" % (self, arg))
ids.extend(arg._ids)
return self.browse(OrderedSet(ids))
def __eq__(self, other):
""" Test whether two recordsets are equivalent (up to reordering). """
if not isinstance(other, BaseModel):
if other:
filename, lineno = frame_codeinfo(currentframe(), 1)
_logger.warning("Comparing apples and oranges: %r == %r (%s:%s)",
self, other, filename, lineno)
return NotImplemented
return self._name == other._name and set(self._ids) == set(other._ids)
def __lt__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
return NotImplemented
return set(self._ids) < set(other._ids)
def __le__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
return NotImplemented
# these are much cheaper checks than a proper subset check, so
# optimise for checking if a null or singleton are subsets of a
# recordset
if not self or self in other:
return True
return set(self._ids) <= set(other._ids)
def __gt__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
return NotImplemented
return | |
order='F', copy=copy)
def dot(m1, m2, target = None, beta = 0., alpha = 1.):
"""
Find the dot product between m1 and m2 and store in target:
target = beta*target + alpha*(m1 m2)
If no target is given, it will be created automatically, but not
initialized -- so beta should be left at its default value zero.
"""
if not target:
m = _cudanet.get_leading_dimension(m1.p_mat)
n = _cudanet.get_nonleading_dimension(m2.p_mat)
target = empty((m, n))
err_code = _cudanet.dot(m1.p_mat, m2.p_mat, target.p_mat, ct.c_float(beta), ct.c_float(alpha))
if err_code:
raise generate_exception(err_code)
return target
def vdot(m1, m2):
"""
Compute the vector dot product of matrices m1 and m2.
"""
err_code = ct.c_int(0)
res = _cudanet.vdot(m1.p_mat, m2.p_mat, ct.byref(err_code))
if err_code:
raise generate_exception(err_code.value)
return res
def sigmoid(mat, target = None):
"""
Apply the logistic sigmoid to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudanet.apply_sigmoid(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def tanh(mat, target = None):
"""
Apply the tanh to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudanet.apply_tanh(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def clip_range(mat, lower, upper, target = None):
"""
Clip each element of the matrix to min of lower and max of upper
"""
if not target:
target = mat
err_code = _cudanet.apply_clip_range(mat.p_mat, ct.c_float(lower), ct.c_float(upper), target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def soft_threshold(mat, alpha, target = None):
"""
Apply the soft threshold function to each element of the matrix:
mat = sign(mat) * max(0, abs(mat) - alpha)
"""
if not target:
target = mat
err_code = _cudanet.apply_soft_threshold(mat.p_mat, ct.c_float(alpha), target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def abs(mat, target = None):
"""
Apply abs to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudanet.apply_abs(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def log_1_plus_exp(mat, target = None):
"""
Apply log(1+exp(x)) to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudanet.apply_log_1_plus_exp(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def maximum(mat, mat2, target = None):
"""
Compute the element-wise max of mat and mat2
"""
if not target:
target = mat
err_code = _cudanet.maximum(mat.p_mat, mat2.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def minimum(mat, mat2, target = None):
"""
Compute the element-wise max of mat and mat2
"""
if not target:
target = mat
err_code = _cudanet.minimum(mat.p_mat, mat2.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def maximum_scalar(mat, compval, target = None):
"""
Compute the element-wise max of mat and compval (scalar)
"""
if not target:
target = mat
err_code = _cudanet.maximum_scalar(mat.p_mat, ct.c_float(compval), target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def minimum_scalar(mat, compval, target = None):
"""
Compute the minimum between mat and compval (scalar) elementwise
"""
if not target:
target = mat
err_code = _cudanet.minimum_scalar(mat.p_mat, ct.c_float(compval), target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def log(mat, target = None):
"""
Find the natural logarithm of each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudanet.apply_log(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def exp(mat, target = None):
"""
Apply the exponential function to each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudanet.apply_exp(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def sqrt(mat, target = None):
"""
Compute the square root of each element of the matrix mat.
"""
if not target:
target = mat
err_code = _cudanet.apply_sqrt(mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def pow(mat, p, target = None):
"""
If p is a scalar, compute the 'p'th power of each element of the matrix mat,
otherwise raise each element of the matrix mat to the power given by the
corresponding element of the matrix p.
"""
if not target:
target = mat
if isinstance(p, CUDAMatrix):
err_code = _cudanet.apply_pow_matrix(mat.p_mat, p.p_mat, target.p_mat)
elif isinstance(p, (np.int32, np.float32, int, float)):
err_code = _cudanet.apply_pow(mat.p_mat, ct.c_float(p), target.p_mat)
else:
raise ValueError("Value must be of type CUDAMatrix, int, or float.")
if err_code:
raise generate_exception(err_code)
return target
def cross_entropy(output, labels, target = None):
"""
Compute the cross entropy between output and labels. Can do multiple examples at a time.
Dimensions of output and labels must match and the target collapses along the row axis
"""
if not target:
n = _cudanet.get_nonleading_dimension(output.p_mat)
target = empty((1, n))
err_code = _cudanet.cross_entropy(output.p_mat, labels.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def where(condition_mat, if_mat, else_mat, target = None):
"""
For each element i, j, store if_math[i, j] in target[i,j] if
condition_mat[i, j] is True, and else_mat[i, j] otherwise.
"""
if not target:
target = condition_mat
err_code = _cudanet.where(condition_mat.p_mat, if_mat.p_mat, else_mat.p_mat, target.p_mat)
if err_code:
raise generate_exception(err_code)
return target
def max_pool(imgs, target, channels, sizeX, paddingStart, moduleStride, numModulesX):
"""
Perform Max Pooling of kernel dimension sizeX on imgs and put result in target
Images as (CxHxW) Rows x (N) Columns in 'C' order
Target as (KxPxQ) Rows x (N) Colums in 'C' order
int imgSizeY, int numModulesX, int paddingStart, int moduleStride,
"""
err_code = _cudanet.max_pool(imgs.p_mat, target.p_mat, ct.c_int(channels),
ct.c_int(sizeX), ct.c_int(paddingStart),
ct.c_int(moduleStride), ct.c_int(numModulesX))
if err_code:
raise generate_exception(err_code)
return target
def max_pool_undo(imgs, maxGrads, maxActs, target, sizeX, paddingStart, moduleStride, numModulesX):
"""
Undo Max Pooling of kernel dimension sizeX on imgs and put result in target
Images as (CxHxW) Rows x (N) Columns in 'C' order
Target as (KxPxQ) Rows x (N) Colums in 'C' order
int imgSizeY, int numModulesX, int paddingStart, int moduleStride
"""
err_code = _cudanet.max_pool_undo(imgs.p_mat, maxGrads.p_mat, maxActs.p_mat,
target.p_mat, ct.c_int(sizeX),
ct.c_int(paddingStart),
ct.c_int(moduleStride),
ct.c_int(numModulesX))
if err_code:
raise generate_exception(err_code)
return target
def l2_pool(imgs, target, channels, sizeX, paddingStart, moduleStride, numModulesX):
"""
Perform L2 Pooling of kernel dimension sizeX on imgs and put result in target
Images as (CxHxW) Rows x (N) Columns in 'C' order
Target as (KxPxQ) Rows x (N) Colums in 'C' order
int imgSizeY, int numModulesX, int paddingStart, int moduleStride,
"""
err_code = _cudanet.l2_pool(imgs.p_mat, target.p_mat, ct.c_int(channels),
ct.c_int(sizeX), ct.c_int(paddingStart),
ct.c_int(moduleStride), ct.c_int(numModulesX))
if err_code:
raise generate_exception(err_code)
return target
def l2_pool_undo(imgs, l2Grads, l2Acts, target, sizeX, paddingStart, moduleStride, numModulesX):
"""
Undo L2 Pooling of kernel dimension sizeX on imgs and put result in target
Images as (CxHxW) Rows x (N) Columns in 'C' order
Target as (KxPxQ) Rows x (N) Colums in 'C' order
int imgSizeY, int numModulesX, int paddingStart, int moduleStride
"""
err_code = _cudanet.l2_pool_undo(imgs.p_mat, l2Grads.p_mat, l2Acts.p_mat,
target.p_mat, ct.c_int(sizeX),
ct.c_int(paddingStart),
ct.c_int(moduleStride),
ct.c_int(numModulesX))
if err_code:
raise generate_exception(err_code)
return target
def avg_pool(imgs, target, channels, sizeX, paddingStart, moduleStride, numModulesX):
"""
Perform Max Pooling of kernel dimension sizeX on imgs and put result in target
Images as (CxHxW) Rows x (N) Columns in 'C' order
Target as (KxPxQ) Rows x (N) Colums in 'C' order
int imgSizeY, int numModulesX, int paddingStart, int moduleStride,
"""
err_code = _cudanet.avg_pool(imgs.p_mat, target.p_mat, ct.c_int(channels),
ct.c_int(sizeX), ct.c_int(paddingStart),
ct.c_int(moduleStride), ct.c_int(numModulesX))
if err_code:
raise generate_exception(err_code)
return target
def avg_pool_undo(avgGrads, target, sizeX, paddingStart, moduleStride, numModulesX, imgSizeX):
"""
Undo Avg Pooling of kernel dimension sizeX on imgs and put result in target
average Gradients as (CxHxW) Rows x (N) Columns in 'C' order
Target as (KxPxQ) Rows x (N) Colums in 'C' order
int imgSizeY, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups
"""
err_code = _cudanet.avg_pool_undo(avgGrads.p_mat, target.p_mat,
ct.c_int(sizeX), ct.c_int(paddingStart),
ct.c_int(moduleStride),
ct.c_int(numModulesX),
ct.c_int(imgSizeX))
if err_code:
raise generate_exception(err_code)
return target
def unpool_forward(smallMat, largeMat, channels, sizeX, smallX, largeX):
"""
Undo Avg Pooling of kernel dimension sizeX on imgs and put result in target
average Gradients as (CxHxW) Rows x (N) Columns in 'C' order
Target as (KxPxQ) Rows x (N) Colums in 'C' order
int imgSizeY, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups
"""
err_code = _cudanet.unpool_forward(smallMat.p_mat, largeMat.p_mat,
ct.c_int(channels), ct.c_int(sizeX),
ct.c_int(smallX), ct.c_int(largeX))
if err_code:
raise generate_exception(err_code)
return largeMat
def unpool_backward(largeMat, smallMat, channels, sizeX, smallX, largeX):
"""
Undo Avg Pooling of kernel dimension sizeX on imgs and put result in target
average Gradients as (CxHxW) Rows x (N) Columns in 'C' order
Target as (KxPxQ) Rows x (N) Colums in 'C' order
int imgSizeY, int numModulesX, int paddingStart, int moduleStride, int numImgColors, | |
<reponame>lsd-maddrive/adas_system<gh_stars>0
from utils.augmentations import (
Albumentations,
augment_hsv,
letterbox,
random_perspective,
)
from utils.general import non_max_suppression, scale_coords
import torch
import cv2
import numpy as np
from torch import nn
import random
import pandas as pd
import matplotlib.pyplot as plt
from models.experimental import Ensemble
from models.yolo import Detect, Model
from models.common import Conv
def printProgressEnum(index, length, label=None):
print(
"\r{}Progress: {}/{} ({:.2f}%)".format(
label if label != None else "",
index + 1,
length,
100 * (index + 1) / length,
),
flush=True,
end="",
)
def showTensorPicture(tensor_image, label=None):
# img = tensor_image.permute(1, 2, 0)
img = cv2.cvtColor(tensor_image.permute(1, 2, 0).numpy(), cv2.COLOR_BGR2RGB)
plt.imshow(img)
if label:
plt.title(label)
plt.show()
def letterbox(
im,
new_shape=(640, 640),
color=(114, 114, 114),
auto=True,
scaleFill=False,
scaleup=True,
stride=32,
):
# Resize and pad image while meeting stride-multiple constraints
shape = im.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better val mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im = cv2.copyMakeBorder(
im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color
) # add border
return im, ratio, (dw, dh)
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
# Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
return y
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
if clip:
clip_coords(x, (h - eps, w - eps)) # warning: inplace clip
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center
y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center
y[:, 2] = (x[:, 2] - x[:, 0]) / w # width
y[:, 3] = (x[:, 3] - x[:, 1]) / h # height
return y
def clip_coords(boxes, shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
if isinstance(boxes, torch.Tensor): # faster individually
boxes[:, 0].clamp_(0, shape[1]) # x1
boxes[:, 1].clamp_(0, shape[0]) # y1
boxes[:, 2].clamp_(0, shape[1]) # x2
boxes[:, 3].clamp_(0, shape[0]) # y2
else: # np.array (faster grouped)
boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2
boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2
class CreateDataSet(torch.utils.data.Dataset):
def __init__(
self,
df,
set_label,
hyp_arg,
img_size=640,
batch_size=16,
augment=False,
hyp=None,
):
self.img_size = img_size
self.augment = augment
self.hyp = hyp_arg
self.df = df[df["set"] == set_label]
self.albumentations = Albumentations() if augment else None
def loadImage(self, instance):
path, (w0, h0) = instance["filepath"], instance["size"]
im = cv2.imread(path)
assert im is not None, f"Image Not Found {path}"
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
im = cv2.resize(
im,
(int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_AREA
if r < 1 and not self.augment
else cv2.INTER_LINEAR,
)
return im, (h0, w0), im.shape[:2]
def __getitem__(self, index):
# locate img info from DataFrame
instance = self.df.iloc[index]
# get Img, src height, width and resized height, width
img, (h0, w0), (h, w) = self.loadImage(instance)
shape = self.img_size
# make img square
# print('>', (img>1).sum())
# print('<=', (img<=1).sum())
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
# print(pad)
# store core shape info
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# add class to labels. We have 1 class, so just add zeros into first column
labels = np.array(instance["coords"])
labels = np.c_[np.zeros(labels.shape[0]), labels]
# print(labels)
# fix labels location caused by letterbox
labels[:, 1:] = xywhn2xyxy(
labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]
)
if self.augment:
img, labels = random_perspective(
img,
labels,
degrees=self.hyp["degrees"],
translate=self.hyp["translate"],
scale=self.hyp["scale"],
shear=self.hyp["shear"],
perspective=self.hyp["perspective"],
)
labels[:, 1:5] = xyxy2xywhn(
labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=False, eps=1e-3
)
# YOLO augmentation technique (!copy-paste!)
if self.augment:
# print('augm for', index, instance['filepath'])
# Albumentations
img, labels = self.albumentations(img, labels)
nl = len(labels) # update after albumentations
# HSV color-space
augment_hsv(
img,
hgain=self.hyp["hsv_h"],
sgain=self.hyp["hsv_s"],
vgain=self.hyp["hsv_v"],
)
# Flip up-down
if random.random() < self.hyp["flipud"]:
img = np.flipud(img)
if nl:
labels[:, 2] = 1 - labels[:, 2]
# Flip left-right
if random.random() < self.hyp["fliplr"]:
img = np.fliplr(img)
if nl:
labels[:, 1] = 1 - labels[:, 1]
nl = len(labels)
# why out size (?, 6)??
labels_out = torch.zeros((nl, 6))
if nl:
labels_out[:, 1:] = torch.from_numpy(labels)
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, instance["filepath"], shapes
def __len__(self):
return len(self.df.index)
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def createDataLoaderAndDataSet(
df,
set_label,
imgsz,
hyp_arg,
batch_size,
hyp=None,
augment=False,
shuffle=True,
nw=0,
):
from torch.utils.data import DataLoader
dataset = CreateDataSet(df, set_label, hyp_arg, img_size=imgsz, augment=augment)
batch_size = min(batch_size, len(dataset))
sampler = None # distributed.DistributedSampler(dataset, shuffle=shuffle)
loader = DataLoader(
dataset, # InfiniteDataLoader ?
batch_size=batch_size,
shuffle=shuffle and sampler is None,
num_workers=nw, # doesnt work in Windows
sampler=sampler,
pin_memory=True,
collate_fn=CreateDataSet.collate_fn,
)
return loader, dataset
class makeDetectFromModel(nn.Module):
def __init__(self, model, device=None):
super().__init__()
ensemble = Ensemble()
ensemble.append(model).float().eval()
for m in model.modules():
if type(m) in [
nn.Hardswish,
nn.LeakyReLU,
nn.ReLU,
nn.ReLU6,
nn.SiLU,
Detect,
Model,
]:
m.inplace = True # pytorch 1.7.0 compatibility
if type(m) is Detect:
if not isinstance(
m.anchor_grid, list
): # new Detect Layer compatibility
delattr(m, "anchor_grid")
setattr(m, "anchor_grid", [torch.zeros(1)] * m.nl)
elif type(m) is Conv:
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
self.model = model
self.device = device
def forward(self, im):
return self.model(im)[0]
def warmup(self, imgsz=(1, 3, 640, 640)):
# Warmup model by running inference once
if (
isinstance(self.device, torch.device) and self.device.type != "cpu"
): # only warmup GPU models
im = (
torch.zeros(*imgsz)
.to(self.device)
.type(torch.half if half else torch.float)
) # input image
self.forward(im) # warmup
@staticmethod
def translatePreds(
pred,
nn_img_size,
source_img_size,
conf_thres=0.25,
iou_thres=0.45,
classes=None,
agnostic=False,
multi_label=False,
labels=(),
max_det=300,
):
pred = non_max_suppression(
pred,
conf_thres=conf_thres,
iou_thres=iou_thres,
classes=classes,
agnostic=agnostic,
multi_label=multi_label,
labels=labels,
max_det=max_det,
)
ret_dict = {
"coords": [],
"relative_coords": [],
"class": [],
"confs": [],
"count": 0,
}
for i, det in enumerate(pred):
if len(det):
ret_dict["relative_coords"].append(det[:, :4])
det[:, :4] = scale_coords(
nn_img_size, det[:, :4], source_img_size
).round()
for *xyxy, conf, cls in reversed(det):
ret_dict["coords"].append(list(map(int, xyxy)))
ret_dict["confs"].append(float(conf))
ret_dict["class"].append(int(cls))
ret_dict["count"] += 1
return ret_dict
def UnmakeRel(coords, w, h):
return list(map(int, [coords[0] * w, coords[1] * h, coords[2] * w, coords[3] * h]))
def MakeRel(coords, w, h):
return list(
map(float, [coords[0] / w, coords[1] / h, coords[2] / w, coords[3] / h])
)
def ConvertAbsTLWH2CV2Rectangle(coords):
return list(
map(int, [coords[0], coords[1], coords[0] + coords[2], coords[1] + coords[3]])
)
def ConvertCenterXYWH2CV2Rectangle(coords):
return list(
map(
int,
[
coords[0] - coords[2] / 2,
coords[1] - coords[3] / 2,
coords[0] + coords[2] / 2,
coords[1] + coords[3] / 2,
],
)
)
def ConvertCV2Rectangle2CenterXYWH(coords):
return list(
| |
__init__(
self, symbolic: str = None, nr_components=None,
unit_depth_in_bit: int = None, location: _Location = None):
#
super().__init__(
alignment=_Alignment(unpacked=_DataSize.FLOAT32),
symbolic=symbolic,
nr_components=nr_components,
unit_depth_in_bit=unit_depth_in_bit,
location=location
)
def expand(self, array: numpy.ndarray) -> numpy.ndarray:
return array.view(numpy.float32)
# ----
class _MonoUnpackedUint8(_UnpackedUint8):
def __init__(self, symbolic: str = None, unit_depth_in_bit: int = None):
#
super().__init__(
symbolic=symbolic,
nr_components=1.,
unit_depth_in_bit=unit_depth_in_bit,
location=_Location.MONO
)
class _MonoUnpackedInt8(_UnpackedInt8):
def __init__(self, symbolic: str = None, unit_depth_in_bit: int = None):
#
super().__init__(
symbolic=symbolic,
nr_components=1.,
unit_depth_in_bit=unit_depth_in_bit,
location = _Location.MONO
)
class _MonoUnpackedUint16(_UnpackedUint16):
def __init__(self, symbolic: str = None, unit_depth_in_bit: int = None):
#
super().__init__(
symbolic=symbolic,
nr_components=1.,
unit_depth_in_bit=unit_depth_in_bit,
location=_Location.MONO
)
class _MonoUnpackedFloat32(_UnpackedFloat32):
def __init__(self, symbolic: str = None):
#
super().__init__(
symbolic=symbolic,
nr_components=1.,
unit_depth_in_bit=32,
location=_Location.MONO
)
# ----
class Mono8(_MonoUnpackedUint8):
def __init__(self):
#
super().__init__(
symbolic='Mono8',
unit_depth_in_bit=8
)
class Mono8s(_MonoUnpackedInt8):
def __init__(self):
#
super().__init__(
symbolic='Mono8s',
unit_depth_in_bit=8
)
class Mono10(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='Mono10',
unit_depth_in_bit=10
)
class Mono12(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='Mono12',
unit_depth_in_bit=12
)
class Mono14(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='Mono14',
unit_depth_in_bit=14
)
class Mono16(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='Mono16',
unit_depth_in_bit=16
)
class R8(_MonoUnpackedUint8):
def __init__(self):
#
super().__init__(
symbolic='R8',
unit_depth_in_bit=8
)
class R10(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='R10',
unit_depth_in_bit=10
)
class R12(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='R12',
unit_depth_in_bit=12
)
class R16(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='R16',
unit_depth_in_bit=16
)
class G8(_MonoUnpackedUint8):
def __init__(self):
#
super().__init__(
symbolic='G8',
unit_depth_in_bit=8
)
class G10(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='G10',
unit_depth_in_bit=10
)
class G12(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='G12',
unit_depth_in_bit=12
)
class G16(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='G16',
unit_depth_in_bit=16
)
class B8(_MonoUnpackedUint8):
def __init__(self):
#
super().__init__(
symbolic='B8',
unit_depth_in_bit=8
)
class B10(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='B10',
unit_depth_in_bit=10
)
class B12(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='B12',
unit_depth_in_bit=12
)
class B16(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='B16',
unit_depth_in_bit=16
)
class Coord3D_A8(_MonoUnpackedUint8):
def __init__(self):
#
super().__init__(
symbolic='Coord3D_A8',
unit_depth_in_bit=8
)
class Coord3D_B8(_MonoUnpackedUint8):
def __init__(self):
#
super().__init__(
symbolic='Coord3D_B8',
unit_depth_in_bit=8
)
class Coord3D_C8(_MonoUnpackedUint8):
def __init__(self):
#
super().__init__(
symbolic='Coord3D_C8',
unit_depth_in_bit=8
)
class Coord3D_A16(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='Coord3D_A16',
unit_depth_in_bit=16
)
class Coord3D_B16(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='Coord3D_B16',
unit_depth_in_bit=16
)
class Coord3D_C16(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='Coord3D_C16',
unit_depth_in_bit=16
)
# ----
class Coord3D_A32f(_MonoUnpackedFloat32):
def __init__(self):
#
super().__init__(symbolic='Coord3D_A32f')
class Coord3D_B32f(_MonoUnpackedFloat32):
def __init__(self):
#
super().__init__(symbolic='Coord3D_B32f')
class Coord3D_C32f(_MonoUnpackedFloat32):
def __init__(self):
#
super().__init__(symbolic='Coord3D_C32f')
# ----
class Confidence1(_MonoUnpackedUint8):
def __init__(self):
#
super().__init__(
symbolic='Confidence1',
unit_depth_in_bit=1
)
class Confidence8(_MonoUnpackedUint8):
def __init__(self):
#
super().__init__(
symbolic='Confidence8',
unit_depth_in_bit=8
)
class Confidence16(_MonoUnpackedUint16):
def __init__(self):
#
super().__init__(
symbolic='Confidence16',
unit_depth_in_bit=16
)
class Confidence32f(_MonoUnpackedFloat32):
def __init__(self):
#
super().__init__(symbolic='Confidence32f')
# ----
class _Packed(_PixelFormat):
def __init__(
self, symbolic: str = None, nr_components=None,
unit_depth_in_bit: int = None, location: _Location = None):
#
super().__init__(
alignment=_Alignment(
unpacked=_DataSize.UINT16, packed=_DataSize.UINT8
),
symbolic=symbolic,
nr_components=nr_components,
unit_depth_in_bit=unit_depth_in_bit,
location=location
)
# ----
class _GroupPacked(_PixelFormat):
def __init__(
self, symbolic: str = None, nr_components: float = None,
unit_depth_in_bit: int = None, location: _Location = None):
#
super().__init__(
alignment=_Alignment(
unpacked=_DataSize.UINT16, packed=_DataSize.UINT8
),
symbolic=symbolic,
nr_components=nr_components,
unit_depth_in_bit=unit_depth_in_bit,
location=location
)
# ----
class _GroupPacked_10(_GroupPacked):
def __init__(
self, symbolic: str = None, nr_components: float = None,
location: _Location = None):
#
super().__init__(
symbolic=symbolic,
nr_components=nr_components,
unit_depth_in_bit=10,
location=location
)
def expand(self, array: numpy.ndarray) -> numpy.ndarray:
nr_packed = 3
nr_unpacked = 2
#
p1st, p2nd, p3rd = numpy.reshape(
array, (array.shape[0] // nr_packed, nr_packed)
).astype(numpy.uint16).T
#
mask = 0x3
up1st = numpy.bitwise_or(
p1st << 2, numpy.bitwise_and(mask, p2nd)
)
up2nd = numpy.bitwise_or(
p3rd << 2, numpy.bitwise_and(mask, p2nd >> 4)
)
#
return numpy.reshape(
numpy.concatenate(
(up1st[:, None], up2nd[:, None]), axis=1
),
nr_unpacked * up1st.shape[0]
)
class _GroupPacked_12(_GroupPacked):
def __init__(
self, symbolic: str = None, nr_components: float = None,
location: _Location = None):
#
super().__init__(
symbolic=symbolic,
nr_components=nr_components,
unit_depth_in_bit=12,
location=location
)
def expand(self, array: numpy.ndarray) -> numpy.ndarray:
nr_packed = 3
nr_unpacked = 2
#
p1st, p2nd, p3rd = numpy.reshape(
array, (array.shape[0] // nr_packed, nr_packed)
).astype(numpy.uint16).T
#
mask = 0xf
up1st = numpy.bitwise_or(
p1st << 4, numpy.bitwise_and(mask, p2nd)
)
up2nd = numpy.bitwise_or(
p3rd << 4, numpy.bitwise_and(mask, p2nd >> 4)
)
#
return numpy.reshape(
numpy.concatenate(
(up1st[:, None], up2nd[:, None]), axis=1
),
nr_unpacked * up1st.shape[0]
)
# ----
class _10p(_PixelFormat):
def __init__(
self, symbolic: str = None, nr_components: float = None,
location: _Location = None):
#
super().__init__(
symbolic=symbolic,
alignment=_Alignment(
unpacked=_DataSize.UINT16, packed=_DataSize.UINT8
),
nr_components=nr_components,
unit_depth_in_bit=10,
location=location
)
def expand(self, array: numpy.ndarray) -> numpy.ndarray:
"""Expand the Mono10p format (litte-endian order), where chunks of 5 bytes give 4 pixels.
Parameters
----------
array : numpy.ndarray
One-dimensional array of datatype uint8 representing the raw byte sequence.
Returns
-------
numpy.ndarray
One-dimensional array of datatype uint16 representing the unpacked
pixels with 10-bit data (values from 0 to 1023).
"""
assert array.dtype == numpy.uint8
bytes_packed = 5 # chunks of 5 bytes
# pixels_unpacked = 4 # give 4 pixels
# The .T-transpose allows receiving into five named variables v0--v4.
v0, v1, v2, v3, v4 = array.reshape(
array.size // bytes_packed,
bytes_packed
).astype(numpy.uint16).T
"""
See Figure 6-9 on page 34 of
https://www.emva.org/wp-content/uploads/GenICam_PFNC_2_3.pdf
Input: v4 v3 v2 v1 v0
Byte: B4 B3 B2 B1 B0
|........|.. ......|.... ....|...... ..|........|
|........ ..|...... ....|.... ......|.. ........|
Pixel: p3 p2 p1 p0
Output: v3 v2 v1 v0
"""
v0 = numpy.bitwise_or(
# all the 8 bits of B0 remain as LSB of p0
v0,
# 2 LSB from B1 go to MSB of p0
numpy.bitwise_and(v1 << 8, 0b1100000000)
)
v1 = numpy.bitwise_or(
# 6 MSB from B1 as LSB of p1
v1 >> 2,
# 4 LSB from B2 of MSB of p1
numpy.bitwise_and(v2 << 6, 0b1111000000)
)
v2 = numpy.bitwise_or(
# 4 MSB from B2 as LSB of p2
v2 >> 4,
# 6 LSB from B3 as MSB of p2
numpy.bitwise_and(v3 << 4, 0b1111110000)
)
v3 = numpy.bitwise_or(
# 2 MSB of B3 as LSB of p3
v3 >> 6,
# all the 8 bits of B4 as MSB of p3
v4 << 2
)
# Stack the four pixels as columns, i.e. one row per chunk, then
# flatten to 1D-array
return numpy.column_stack((v0, v1, v2, v3)).ravel()
class _10p32(_PixelFormat):
def __init__(
self, symbolic: str = None, nr_components: float = None,
location: _Location = None):
#
super().__init__(
symbolic=symbolic,
alignment=_Alignment(
unpacked=_DataSize.UINT16, packed=_DataSize.UINT8
),
nr_components=nr_components,
unit_depth_in_bit=10,
location=location
)
def expand(self, array: numpy.ndarray) -> numpy.ndarray:
"""
Expand the Mono10c3p32 (or RGB10p32) format, where chunks of 4 bytes
give 3 pixels.
"""
nr_packed = 4
# nr_unpacked = 3
#
p1st, p2nd, p3rd, p4th = numpy.reshape(
array, (array.shape[0] // nr_packed, nr_packed)
).astype(numpy.uint16).T
"""
See Figure 6-6 on page 32 of
https://www.emva.org/wp-content/uploads/GenICam_PFNC_2_3.pdf
Byte: B3 B2 B1 B0
with |XX......|.... ....|...... ..|........|
| ...... ....|.... ......|.. ........|
Pixel: p2 p1 p0
"""
up1st = numpy.bitwise_or(
# all the 8 bits of B0 remain as LSB of p0
p1st,
# 2 LSB from B1 go to MSB of p0
numpy.bitwise_and(0x300, p2nd << 8)
)
up2nd = numpy.bitwise_or(
# 6 MSB from B1 as LSB of p1
numpy.bitwise_and(0x3f, p2nd >> 2),
# 4 LSB from B2 of MSB of p1.
numpy.bitwise_and(0x3c0, p3rd << 6)
# NOTE: 0x3e0 changed to 0x3c0 to make more sense but result is
# the same (after the << 6 shift the extra bit with 0x3e0 was
# anyway zeroed).
)
up3rd = numpy.bitwise_or(
# 4 MSB from B2 as LSB of p2
numpy.bitwise_and(0xf, p3rd >> 4),
# 6 LSB from B3 as MSB of p2
numpy.bitwise_and(0x3f0, p4th << 4)
)
return numpy.column_stack((up1st, up2nd, up3rd)).ravel()
class _12p(_PixelFormat):
def __init__(
self, symbolic: str = None, nr_components: float = None,
location: _Location = None):
#
super().__init__(
symbolic=symbolic,
alignment=_Alignment(
unpacked=_DataSize.UINT16, packed=_DataSize.UINT8
),
nr_components=nr_components,
unit_depth_in_bit=12,
location=location
)
def expand(self, array: numpy.ndarray) -> numpy.ndarray:
nr_packed = 3
nr_unpacked = 2
#
p1st, p2nd, p3rd = numpy.reshape(
array, (array.shape[0] // nr_packed, nr_packed)
).astype(numpy.uint16).T
#
up1st = numpy.bitwise_or(
p1st, numpy.bitwise_and(0xf00, p2nd << 8)
)
up2nd = numpy.bitwise_or(
numpy.bitwise_and(0xf, p2nd >> 4),
numpy.bitwise_and(0xff0, p3rd << 4)
)
#
return numpy.reshape(
numpy.concatenate(
(up1st[:, None], up2nd[:, None]), axis=1
),
nr_unpacked * up1st.shape[0]
)
class _14p(_PixelFormat):
def __init__(
self, symbolic: str = None, nr_components: float = None,
location: _Location = None):
#
super().__init__(
symbolic=symbolic,
alignment=_Alignment(
unpacked=_DataSize.UINT16, packed=_DataSize.UINT8
),
nr_components=nr_components,
unit_depth_in_bit=14,
location=location
)
def expand(self, array: numpy.ndarray) -> numpy.ndarray:
nr_packed = 7
nr_unpacked = 4
#
p1st, p2nd, p3rd, p4th, p5th, p6th, p7th = numpy.reshape(
array, (array.shape[0] // nr_packed, nr_packed)
).astype(numpy.uint16).T
#
up1st = numpy.bitwise_or(
p1st, numpy.bitwise_and(0x3f00, p2nd << 8)
)
up2nd = numpy.bitwise_or(
numpy.bitwise_and(0x3, p2nd >> 6),
numpy.bitwise_and(0x3fc, p3rd << 2),
numpy.bitwise_and(0x3c00, p4th << 10),
)
| |
# Deep Learning optimization functions
#
# <NAME>, 2021
# <EMAIL>
import torch
import uproot
import numpy as np
import sklearn
import psutil
from termcolor import colored,cprint
from matplotlib import pyplot as plt
import pdb
from tqdm import tqdm, trange
import torch.optim as optim
from torch.autograd import Variable
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
from icenet.deep.tempscale import ModelWithTemperature
from icenet.tools import aux
from icenet.tools import io
from icefit import mine
init_funcs = {
1: lambda x: torch.nn.init.normal_(x, mean=0.0, std=1.0), # bias terms
2: lambda x: torch.nn.init.xavier_normal_(x, gain=1.0), # weight terms
3: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.0), # conv1D filter
4: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.0), # conv2D filter
"default": lambda x: torch.nn.init.constant(x, 1.0), # others
}
def weights_init_all(model, init_funcs):
"""
Examples:
model = MyNet()
weights_init_all(model, init_funcs)
"""
for p in model.parameters():
init_func = init_funcs.get(len(p.shape), init_funcs["default"])
init_func(p)
def weights_init_uniform_rule(m):
""" Initializes module weights from uniform [-a,a]
"""
classname = m.__class__.__name__
# Linear layers
if classname.find('Linear') != -1:
n = m.in_features
y = 1.0/np.sqrt(n)
m.weight.data.uniform_(-y, y)
m.bias.data.fill_(0)
def weights_init_normal(m):
""" Initializes module weights from normal distribution
with a rule sigma ~ 1/sqrt(n)
"""
classname = m.__class__.__name__
# Linear layers
if classname.find('Linear') != -1:
y = m.in_features
m.weight.data.normal_(0.0, 1/np.sqrt(y))
m.bias.data.fill_(0)
def logsumexp(x,dim=-1):
# https://en.wikipedia.org/wiki/LogSumExp
xmax, idx = torch.max(x, dim=dim, keepdim=True)
return xmax + torch.log(torch.sum(torch.exp(x - xmax), dim=dim, keepdim=True))
def log_softmax(x, dim=-1):
"""
Log of softmax
Args:
x : network output without softmax
Returns:
logsoftmax values
"""
log_z = logsumexp(x, dim=dim)
y = x - log_z
return y
def multiclass_cross_entropy_logprob(log_phat, y, N_classes, weights):
""" Per instance weighted cross entropy loss
(negative log-likelihood)
Numerically more stable version.
"""
y = F.one_hot(y, N_classes)
loss = - y*log_phat * weights
loss = loss.sum() / y.shape[0]
return loss
def multiclass_cross_entropy(phat, y, N_classes, weights, EPS = 1e-30) :
""" Per instance weighted cross entropy loss
(negative log-likelihood)
"""
y = F.one_hot(y, N_classes)
# Protection
loss = - y*torch.log(phat + EPS) * weights
loss = loss.sum() / y.shape[0]
return loss
def multiclass_focal_entropy_logprob(log_phat, y, N_classes, weights, gamma) :
""" Per instance weighted 'focal entropy loss'
https://arxiv.org/pdf/1708.02002.pdf
"""
y = F.one_hot(y, N_classes)
loss = -y * torch.pow(1 - phat, gamma) * torch.log(phat + EPS) * weights
loss = loss.sum() / y.shape[0]
return loss
def multiclass_focal_entropy(phat, y, N_classes, weights, gamma, EPS = 1e-30) :
""" Per instance weighted 'focal entropy loss'
https://arxiv.org/pdf/1708.02002.pdf
"""
y = F.one_hot(y, N_classes)
loss = -y * torch.pow(1 - phat, gamma) * torch.log(phat + EPS) * weights
loss = loss.sum() / y.shape[0]
return loss
def log_sum_exp(x):
"""
http://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/
"""
b, _ = torch.max(x, 1)
# b.size() = [N, ], unsqueeze() required
y = b + torch.log(torch.exp(x - b.unsqueeze(dim=1).expand_as(x)).sum(1))
# y.size() = [N, ], no need to squeeze()
return y
class Dataset(torch.utils.data.Dataset):
def __init__(self, X, Y, W):
""" Initialization """
self.X = X
self.Y = Y
self.W = W
def __len__(self):
""" Return the total number of samples """
return self.X.shape[0]
def __getitem__(self, index):
""" Generates one sample of data """
# Use ellipsis ... to index over scalar [,:], vector [,:,:], tensor [,:,:,..,:] indices
return self.X[index,...], self.Y[index], self.W[index,:]
class DualDataset(torch.utils.data.Dataset):
def __init__(self, X, Y, W):
""" Initialization """
self.x = X['x'] # e.g. image tensors
self.u = X['u'] # e.g. global features
self.Y = Y
self.W = W
def __len__(self):
""" Return the total number of samples """
return self.x.shape[0]
def __getitem__(self, index):
""" Generates one sample of data """
# Use ellipsis ... to index over scalar [,:], vector [,:,:], tensor [,:,:,..,:] indices
O = {}
O['x'] = self.x[index,...]
O['u'] = self.u[index,...]
return O, self.Y[index], self.W[index,:]
def model_to_cuda(model, device_type='auto'):
""" Wrapper function to handle CPU/GPU setup
"""
GPU_chosen = False
if device_type == 'auto':
if torch.cuda.is_available():
device = torch.device('cuda:0')
GPU_chosen = True
else:
device = torch.device('cpu:0')
else:
device = param['device']
model = model.to(device, non_blocking=True)
# Multi-GPU setup
if torch.cuda.device_count() > 1:
print(__name__ + f'.model_to_cuda: Multi-GPU {torch.cuda.device_count()}')
model = nn.DataParallel(model)
print(__name__ + f'.model_to_cuda: Computing device <{device}> chosen')
if GPU_chosen:
used = io.get_gpu_memory_map()[0]
total = io.torch_cuda_total_memory(device)
cprint(__name__ + f'.model_to_cuda: device <{device}> VRAM in use: {used:0.2f} / {total:0.2f} GB', 'yellow')
print('')
return model, device
def train(model, X_trn, Y_trn, X_val, Y_val, trn_weights, param, modeldir, clip_gradients=True):
"""
Main training loop
"""
cprint(__name__ + f""".train: Process RAM usage: {io.process_memory_use():0.2f} GB
[total RAM in use {psutil.virtual_memory()[2]} %]""", 'red')
model, device = model_to_cuda(model, param['device'])
# Input checks
# more than 1 class sample required, will crash otherwise
if len(np.unique(Y_trn.detach().cpu().numpy())) <= 1:
raise Exception(__name__ + '.train: Number of classes in ''Y_trn'' <= 1')
if len(np.unique(Y_val.detach().cpu().numpy())) <= 1:
raise Exception(__name__ + '.train: Number of classes in ''Y_val'' <= 1')
# --------------------------------------------------------------------
if type(X_trn) is dict:
print(__name__ + f".train: Training samples = {X_trn['x'].shape[0]}, Validation samples = {X_val['x'].shape[0]}")
else:
print(__name__ + f'.train: Training samples = {X_trn.shape[0]}, Validation samples = {X_val.shape[0]}')
# Prints the weights and biases
print(model)
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
cprint(__name__ + f'.train: Number of free parameters = {params} (requires_grad)', 'yellow')
# --------------------------------------------------------------------
### Weight initialization
#print(__name__ + f'.train: Weight initialization')
#model.apply(weights_init_normal_rule)
# --------------------------------------------------------------------
print('')
# Class fractions
YY = Y_trn.cpu().numpy()
frac = []
for i in range(model.C):
frac.append( sum(YY == i) / YY.size )
print(__name__ + '.train: Class fractions in the training sample: ')
## Classes
for i in range(len(frac)):
print(f' {i:4d} : {frac[i]:5.6f} ({sum(YY == i)} counts)')
print(__name__ + f'.train: Found {len(np.unique(YY))} / {model.C} classes in the training sample')
# Define the optimizer
opt = param['opt_param']['optimizer']
learning_rate = param['opt_param']['learning_rate']
weight_decay = param['opt_param']['weight_decay']
if opt == 'AdamW':
optimizer = torch.optim.AdamW(model.parameters(), lr = learning_rate, weight_decay = weight_decay)
elif opt == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate, weight_decay = weight_decay)
elif opt == 'SGD':
optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate, weight_decay = weight_decay)
else:
raise Exception(__name__ + f'.train: Unknown optimizer {opt} (use "Adam", "AdamW" or "SGD")')
# List to store losses
losses = []
trn_aucs = []
val_aucs = []
print(__name__ + '.train: Training loop ...')
# Change the shape
trn_one_hot_weights = np.zeros((len(trn_weights), model.C))
for i in range(model.C):
trn_one_hot_weights[YY == i, i] = trn_weights[YY == i]
params = {'batch_size': param['opt_param']['batch_size'],
'shuffle' : True,
'num_workers' : param['num_workers'],
'pin_memory' : True}
val_one_hot_weights = np.ones((len(Y_val), model.C))
### Generators
if type(X_trn) is dict:
training_set = DualDataset(X_trn, Y_trn, trn_one_hot_weights)
validation_set = DualDataset(X_val, Y_val, val_one_hot_weights)
else:
training_set = Dataset(X_trn, Y_trn, trn_one_hot_weights)
validation_set = Dataset(X_val, Y_val, val_one_hot_weights)
training_generator = torch.utils.data.DataLoader(training_set, **params)
validation_generator = torch.utils.data.DataLoader(validation_set, **params)
# Training mode on!
model.train()
### Epoch loop
for epoch in tqdm(range(param['opt_param']['epochs']), ncols = 60):
# Minibatch loop
sumloss = 0
nbatch = 0
for batch_x, batch_y, batch_weights in training_generator:
# ----------------------------------------------------------------
# Transfer to (GPU) device memory
if type(batch_x) is dict: # If multiobject type
for key in batch_x.keys():
batch_x[key] = batch_x[key].to(device, non_blocking=True, dtype=torch.float)
else:
batch_x = batch_x.to(device, non_blocking=True, dtype=torch.float)
batch_y = batch_y.to(device, non_blocking=True)
batch_weights = batch_weights.to(device, non_blocking=True)
# ----------------------------------------------------------------
# Noise regularization (NOT ACTIVE)
#if param['noise_reg'] > 0:
# noise = torch.empty(batch_x.shape).normal_(mean=0, std=param['noise_reg']).to(device, dtype=torch.float32, non_blocking=True)
# batch_x = batch_x + noise
# Predict probabilities
log_phat = model.softpredict(batch_x)
# Evaluate loss
loss = 0
if param['opt_param']['lossfunc'] == 'cross_entropy':
loss = multiclass_cross_entropy_logprob(log_phat, batch_y, model.C, batch_weights)
elif param['opt_param']['lossfunc'] == 'focal_entropy':
loss = multiclass_focal_entropy_logprob(log_phat, batch_y, model.C, batch_weights, param['opt_param']['gamma'])
elif param['opt_param']['lossfunc'] == 'inverse_focal':
loss = multiclass_inverse_focal_logprob(log_phat, batch_y, model.C, batch_weights, param['opt_param']['gamma'])
else:
print(__name__ + '.train: Error with unknown lossfunc ')
# ------------------------------------------------------------
# Mutual information regularization for the output independence w.r.t the target variable
"""
if param['opt_param']['MI_reg_on'] == True:
# Input variables, make output orthogonal with respect to 'ortho' variable
x1 = log_phat
x2 = batch_x['ortho']
MI, MI_err = mine.estimate(X=x1, Z=x2, num_iter=2000, loss=method)
# Adaptive gradient clipping
# ...
# Add to the loss
loss += param['opt_param']['MI_reg_beta'] * MI
"""
# ------------------------------------------------------------
optimizer.zero_grad() # Zero gradients
loss.backward() # Compute gradients
if clip_gradients:
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5) # Clip gradient for NaN problems
# Update parameters
optimizer.step()
### Save metrics
sumloss += loss.item() # item() important for performance
nbatch += 1
avgloss = sumloss / nbatch
losses.append(avgloss)
### SAVE MODEL
checkpoint = {'model': model, 'state_dict': | |
'
'output as \'8 20 2\'',
argstr='-roisel %s')
debug = traits.Bool(
desc='print debug information',
argstr='-debug')
quiet = traits.Bool(
desc='execute quietly',
argstr='-quiet')
nomeanout = traits.Bool(
desc='Do not include the (zero-inclusive) mean among computed stats',
argstr='-nomeanout')
nobriklab = traits.Bool(
desc='Do not print the sub-brick label next to its index',
argstr='-nobriklab')
format1D = traits.Bool(
xor=['format1DR'],
desc='Output results in a 1D format that includes commented labels',
argstr='-1Dformat')
format1DR = traits.Bool(
xor=['format1D'],
desc='Output results in a 1D format that includes uncommented '
'labels. May not work optimally with typical 1D functions, '
'but is useful for R functions.',
argstr='-1DRformat')
_stat_names = ['mean', 'sum', 'voxels', 'minmax', 'sigma', 'median',
'mode', 'summary', 'zerominmax', 'zerosigma', 'zeromedian',
'zeromode']
stat = InputMultiObject(
traits.Enum(_stat_names),
desc='statistics to compute. Options include: '
' * mean = Compute the mean using only non_zero voxels.'
' Implies the opposite for the mean computed '
' by default.\n'
' * median = Compute the median of nonzero voxels\n'
' * mode = Compute the mode of nonzero voxels.'
' (integral valued sets only)\n'
' * minmax = Compute the min/max of nonzero voxels\n'
' * sum = Compute the sum using only nonzero voxels.\n'
' * voxels = Compute the number of nonzero voxels\n'
' * sigma = Compute the standard deviation of nonzero'
' voxels\n'
'Statistics that include zero-valued voxels:\n'
' * zerominmax = Compute the min/max of all voxels.\n'
' * zerosigma = Compute the standard deviation of all'
' voxels.\n'
' * zeromedian = Compute the median of all voxels.\n'
' * zeromode = Compute the mode of all voxels.\n'
' * summary = Only output a summary line with the grand '
' mean across all briks in the input dataset.'
' This option cannot be used with nomeanout.\n'
'More that one option can be specified.',
argstr='%s...')
out_file = File(
name_template='%s_roistat.1D',
desc='output file',
keep_extension=False,
argstr='> %s',
name_source='in_file',
position=-1)
class ROIStatsOutputSpec(TraitedSpec):
out_file = File(desc='output tab-separated values file', exists=True)
class ROIStats(AFNICommandBase):
"""Display statistics over masked regions
For complete details, see the `3dROIstats Documentation
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dROIstats.html>`_
Examples
========
>>> from nipype.interfaces import afni
>>> roistats = afni.ROIStats()
>>> roistats.inputs.in_file = 'functional.nii'
>>> roistats.inputs.mask_file = 'skeleton_mask.nii.gz'
>>> roistats.inputs.stat = ['mean', 'median', 'voxels']
>>> roistats.inputs.nomeanout = True
>>> roistats.cmdline
'3dROIstats -mask skeleton_mask.nii.gz -nomeanout -nzmean -nzmedian -nzvoxels functional.nii > functional_roistat.1D'
>>> res = roistats.run() # doctest: +SKIP
"""
_cmd = '3dROIstats'
_terminal_output = 'allatonce'
input_spec = ROIStatsInputSpec
output_spec = ROIStatsOutputSpec
def _format_arg(self, name, spec, value):
_stat_dict = {
'mean': '-nzmean',
'median': '-nzmedian',
'mode': '-nzmode',
'minmax': '-nzminmax',
'sigma': '-nzsigma',
'voxels': '-nzvoxels',
'sum': '-nzsum',
'summary': '-summary',
'zerominmax': '-minmax',
'zeromedian': '-median',
'zerosigma': '-sigma',
'zeromode': '-mode'
}
if name == 'stat':
value = [_stat_dict[v] for v in value]
return super(ROIStats, self)._format_arg(name, spec, value)
class RetroicorInputSpec(AFNICommandInputSpec):
in_file = File(
desc='input file to 3dretroicor',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(
name_template='%s_retroicor',
name_source=['in_file'],
desc='output image file name',
argstr='-prefix %s',
position=1)
card = File(
desc='1D cardiac data file for cardiac correction',
argstr='-card %s',
position=-2,
exists=True)
resp = File(
desc='1D respiratory waveform data for correction',
argstr='-resp %s',
position=-3,
exists=True)
threshold = traits.Int(
desc='Threshold for detection of R-wave peaks in input (Make sure it '
'is above the background noise level, Try 3/4 or 4/5 times range '
'plus minimum)',
argstr='-threshold %d',
position=-4)
order = traits.Int(
desc='The order of the correction (2 is typical)',
argstr='-order %s',
position=-5)
cardphase = File(
desc='Filename for 1D cardiac phase output',
argstr='-cardphase %s',
position=-6,
hash_files=False)
respphase = File(
desc='Filename for 1D resp phase output',
argstr='-respphase %s',
position=-7,
hash_files=False)
class Retroicor(AFNICommand):
"""Performs Retrospective Image Correction for physiological
motion effects, using a slightly modified version of the
RETROICOR algorithm
The durations of the physiological inputs are assumed to equal
the duration of the dataset. Any constant sampling rate may be
used, but 40 Hz seems to be acceptable. This program's cardiac
peak detection algorithm is rather simplistic, so you might try
using the scanner's cardiac gating output (transform it to a
spike wave if necessary).
This program uses slice timing information embedded in the
dataset to estimate the proper cardiac/respiratory phase for
each slice. It makes sense to run this program before any
program that may destroy the slice timings (e.g. 3dvolreg for
motion correction).
For complete details, see the `3dretroicor Documentation.
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dretroicor.html>`_
Examples
========
>>> from nipype.interfaces import afni
>>> ret = afni.Retroicor()
>>> ret.inputs.in_file = 'functional.nii'
>>> ret.inputs.card = 'mask.1D'
>>> ret.inputs.resp = 'resp.1D'
>>> ret.inputs.outputtype = 'NIFTI'
>>> ret.cmdline
'3dretroicor -prefix functional_retroicor.nii -resp resp.1D -card mask.1D functional.nii'
>>> res = ret.run() # doctest: +SKIP
"""
_cmd = '3dretroicor'
input_spec = RetroicorInputSpec
output_spec = AFNICommandOutputSpec
def _format_arg(self, name, trait_spec, value):
if name == 'in_file':
if not isdefined(self.inputs.card) and not isdefined(
self.inputs.resp):
return None
return super(Retroicor, self)._format_arg(name, trait_spec, value)
class SegInputSpec(CommandLineInputSpec):
in_file = File(
desc='ANAT is the volume to segment',
argstr='-anat %s',
position=-1,
mandatory=True,
exists=True,
copyfile=True)
mask = traits.Either(
traits.Enum('AUTO'),
File(exists=True),
desc='only non-zero voxels in mask are analyzed. mask can either be a '
'dataset or the string "AUTO" which would use AFNI\'s automask '
'function to create the mask.',
argstr='-mask %s',
position=-2,
mandatory=True)
blur_meth = traits.Enum(
'BFT',
'BIM',
argstr='-blur_meth %s',
desc='set the blurring method for bias field estimation')
bias_fwhm = traits.Float(
desc='The amount of blurring used when estimating the field bias with '
'the Wells method',
argstr='-bias_fwhm %f')
classes = Str(
desc='CLASS_STRING is a semicolon delimited string of class labels',
argstr='-classes %s')
bmrf = traits.Float(
desc='Weighting factor controlling spatial homogeneity of the '
'classifications',
argstr='-bmrf %f')
bias_classes = Str(
desc='A semicolon delimited string of classes that contribute to the '
'estimation of the bias field',
argstr='-bias_classes %s')
prefix = Str(
desc='the prefix for the output folder containing all output volumes',
argstr='-prefix %s')
mixfrac = Str(
desc='MIXFRAC sets up the volume-wide (within mask) tissue fractions '
'while initializing the segmentation (see IGNORE for exception)',
argstr='-mixfrac %s')
mixfloor = traits.Float(
desc='Set the minimum value for any class\'s mixing fraction',
argstr='-mixfloor %f')
main_N = traits.Int(
desc='Number of iterations to perform.', argstr='-main_N %d')
class Seg(AFNICommandBase):
"""3dSeg segments brain volumes into tissue classes. The program allows
for adding a variety of global and voxelwise priors. However for the
moment, only mixing fractions and MRF are documented.
For complete details, see the `3dSeg Documentation.
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dSeg.html>`_
Examples
========
>>> from nipype.interfaces.afni import preprocess
>>> seg = preprocess.Seg()
>>> seg.inputs.in_file = 'structural.nii'
>>> seg.inputs.mask = 'AUTO'
>>> seg.cmdline
'3dSeg -mask AUTO -anat structural.nii'
>>> res = seg.run() # doctest: +SKIP
"""
_cmd = '3dSeg'
input_spec = SegInputSpec
output_spec = AFNICommandOutputSpec
def aggregate_outputs(self, runtime=None, needed_outputs=None):
import glob
outputs = self._outputs()
if isdefined(self.inputs.prefix):
outfile = os.path.join(os.getcwd(), self.inputs.prefix,
'Classes+*.BRIK')
else:
outfile = os.path.join(os.getcwd(), 'Segsy', 'Classes+*.BRIK')
outputs.out_file = glob.glob(outfile)[0]
return outputs
class SkullStripInputSpec(AFNICommandInputSpec):
in_file = File(
desc='input file to 3dSkullStrip',
argstr='-input %s',
position=1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(
name_template='%s_skullstrip',
desc='output image file name',
argstr='-prefix %s',
name_source='in_file')
class SkullStrip(AFNICommand):
"""A program to extract the brain from surrounding tissue from MRI
T1-weighted images.
TODO Add optional arguments.
For complete details, see the `3dSkullStrip Documentation.
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dSkullStrip.html>`_
Examples
========
>>> from nipype.interfaces import afni
>>> skullstrip = afni.SkullStrip()
>>> skullstrip.inputs.in_file = 'functional.nii'
>>> skullstrip.inputs.args = '-o_ply'
>>> skullstrip.cmdline
'3dSkullStrip -input functional.nii -o_ply -prefix functional_skullstrip'
>>> res = skullstrip.run() # doctest: +SKIP
"""
_cmd = '3dSkullStrip'
_redirect_x = True
input_spec = SkullStripInputSpec
output_spec = AFNICommandOutputSpec
def __init__(self, **inputs):
super(SkullStrip, self).__init__(**inputs)
if not no_afni():
v = Info.version()
# Between AFNI 16.0.00 and 16.2.07, redirect_x is not needed
if v >= (2016, 0, 0) and v < (2016, 2, 7):
self._redirect_x = False
class TCorr1DInputSpec(AFNICommandInputSpec):
xset = File(
desc='3d+time dataset input',
argstr=' %s',
position=-2,
mandatory=True,
exists=True,
copyfile=False)
y_1d = File(
desc='1D time series file input',
argstr=' %s',
position=-1,
mandatory=True,
exists=True)
out_file = File(
desc='output filename prefix',
name_template='%s_correlation.nii.gz',
argstr='-prefix %s',
name_source='xset',
keep_extension=True)
pearson = traits.Bool(
desc='Correlation is the normal Pearson correlation coefficient',
argstr=' -pearson',
xor=['spearman', 'quadrant', 'ktaub'],
position=1)
spearman = traits.Bool(
desc='Correlation is the Spearman (rank) correlation coefficient',
argstr=' -spearman',
xor=['pearson', 'quadrant', 'ktaub'],
position=1)
quadrant = traits.Bool(
desc='Correlation is the quadrant |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.